mirror of
https://github.com/mod-playerbots/azerothcore-wotlk.git
synced 2026-01-13 01:08:35 +00:00
Merge branch 'master' of https://github.com/azerothcore/azerothcore-wotlk into dir-restructure
This commit is contained in:
94
deps/jemalloc/CMakeLists.txt
vendored
94
deps/jemalloc/CMakeLists.txt
vendored
@@ -1,4 +1,4 @@
|
||||
# Copyright (C)
|
||||
# Copyright (C) 2008-2017 TrinityCore <http://www.trinitycore.org/>
|
||||
#
|
||||
# This file is free software; as a special exception the author gives
|
||||
# unlimited permission to copy and/or distribute it, with or without
|
||||
@@ -8,14 +8,24 @@
|
||||
# WITHOUT ANY WARRANTY, to the extent permitted by law; without even the
|
||||
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
|
||||
# We need to generate the jemalloc_def.h header based on platform-specific settings
|
||||
if (PLATFORM EQUAL 32)
|
||||
set(JEM_SIZEDEF 2)
|
||||
set(JEM_TLSMODEL)
|
||||
else()
|
||||
set(JEM_SIZEDEF 3)
|
||||
set(JEM_TLSMODEL "__attribute__\(\(tls_model\(\"initial-exec\"\)\)\)")
|
||||
endif()
|
||||
# We need to generate the jemalloc_def.h header based on platform-specific settings
|
||||
CHECK_SYMBOL_EXISTS(MADV_FREE "sys/mman.h" HAVE_MADV_FREE)
|
||||
|
||||
if (PLATFORM EQUAL 32)
|
||||
set(JEM_SIZEDEF 2)
|
||||
set(JEM_TLSMODEL)
|
||||
set(JEM_VADDRBITS 32)
|
||||
else()
|
||||
set(JEM_SIZEDEF 3)
|
||||
set(JEM_TLSMODEL "__attribute__\(\(tls_model\(\"initial-exec\"\)\)\)")
|
||||
set(JEM_VADDRBITS 48)
|
||||
endif()
|
||||
|
||||
if (HAVE_MADV_FREE)
|
||||
set(JEM_MADFREE_DEF "#define")
|
||||
else()
|
||||
set(JEM_MADFREE_DEF "#undef")
|
||||
endif()
|
||||
|
||||
# Create the header, so we can use it
|
||||
configure_file(
|
||||
@@ -24,31 +34,38 @@ configure_file(
|
||||
@ONLY
|
||||
)
|
||||
|
||||
# Done, let's continue
|
||||
set(jemalloc_STAT_SRC
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/arena.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/atomic.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/base.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/bitmap.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/chunk.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/chunk_dss.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/chunk_mmap.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/ckh.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/ctl.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/extent.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/hash.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/huge.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/jemalloc.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/mb.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/mutex.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/prof.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/quarantine.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/rtree.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/stats.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/tcache.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/tsd.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/util.c
|
||||
)
|
||||
# Done, let's continue
|
||||
set(jemalloc_STAT_SRC
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/arena.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/background_thread.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/base.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/bitmap.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/ckh.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/ctl.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/extent.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/extent_dss.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/extent_mmap.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/hash.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/hooks.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/jemalloc.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/jemalloc_cpp.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/large.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/malloc_io.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/mutex.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/mutex_pool.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/nstime.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/pages.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/prng.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/prof.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/rtree.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/spin.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/stats.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/sz.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/tcache.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/ticker.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/tsd.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/witness.c
|
||||
)
|
||||
|
||||
include_directories(
|
||||
${BUILDDIR}/
|
||||
@@ -58,3 +75,12 @@ include_directories(
|
||||
add_definitions(-D_GNU_SOURCE -D_REENTRANT)
|
||||
|
||||
add_library(jemalloc STATIC ${jemalloc_STAT_SRC})
|
||||
|
||||
target_link_libraries(jemalloc
|
||||
PUBLIC
|
||||
${CMAKE_DL_LIBS})
|
||||
|
||||
set_target_properties(jemalloc
|
||||
PROPERTIES
|
||||
FOLDER
|
||||
"deps")
|
||||
|
||||
4
deps/jemalloc/COPYING
vendored
4
deps/jemalloc/COPYING
vendored
@@ -1,10 +1,10 @@
|
||||
Unless otherwise specified, files in the jemalloc source distribution are
|
||||
subject to the following license:
|
||||
--------------------------------------------------------------------------------
|
||||
Copyright (C) 2002-2014 Jason Evans <jasone@canonware.com>.
|
||||
Copyright (C) 2002-2017 Jason Evans <jasone@canonware.com>.
|
||||
All rights reserved.
|
||||
Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved.
|
||||
Copyright (C) 2009-2014 Facebook, Inc. All rights reserved.
|
||||
Copyright (C) 2009-2017 Facebook, Inc. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
727
deps/jemalloc/ChangeLog
vendored
727
deps/jemalloc/ChangeLog
vendored
@@ -1,10 +1,727 @@
|
||||
Following are change highlights associated with official releases. Important
|
||||
bug fixes are all mentioned, but internal enhancements are omitted here for
|
||||
brevity (even though they are more fun to write about). Much more detail can be
|
||||
found in the git revision history:
|
||||
bug fixes are all mentioned, but some internal enhancements are omitted here for
|
||||
brevity. Much more detail can be found in the git revision history:
|
||||
|
||||
https://github.com/jemalloc/jemalloc
|
||||
|
||||
* 5.0.1 (July 1, 2017)
|
||||
|
||||
This bugfix release fixes several issues, most of which are obscure enough
|
||||
that typical applications are not impacted.
|
||||
|
||||
Bug fixes:
|
||||
- Update decay->nunpurged before purging, in order to avoid potential update
|
||||
races and subsequent incorrect purging volume. (@interwq)
|
||||
- Only abort on dlsym(3) error if the failure impacts an enabled feature (lazy
|
||||
locking and/or background threads). This mitigates an initialization
|
||||
failure bug for which we still do not have a clear reproduction test case.
|
||||
(@interwq)
|
||||
- Modify tsd management so that it neither crashes nor leaks if a thread's
|
||||
only allocation activity is to call free() after TLS destructors have been
|
||||
executed. This behavior was observed when operating with GNU libc, and is
|
||||
unlikely to be an issue with other libc implementations. (@interwq)
|
||||
- Mask signals during background thread creation. This prevents signals from
|
||||
being inadvertently delivered to background threads. (@jasone,
|
||||
@davidgoldblatt, @interwq)
|
||||
- Avoid inactivity checks within background threads, in order to prevent
|
||||
recursive mutex acquisition. (@interwq)
|
||||
- Fix extent_grow_retained() to use the specified hooks when the
|
||||
arena.<i>.extent_hooks mallctl is used to override the default hooks.
|
||||
(@interwq)
|
||||
- Add missing reentrancy support for custom extent hooks which allocate.
|
||||
(@interwq)
|
||||
- Post-fork(2), re-initialize the list of tcaches associated with each arena
|
||||
to contain no tcaches except the forking thread's. (@interwq)
|
||||
- Add missing post-fork(2) mutex reinitialization for extent_grow_mtx. This
|
||||
fixes potential deadlocks after fork(2). (@interwq)
|
||||
- Enforce minimum autoconf version (currently 2.68), since 2.63 is known to
|
||||
generate corrupt configure scripts. (@jasone)
|
||||
- Ensure that the configured page size (--with-lg-page) is no larger than the
|
||||
configured huge page size (--with-lg-hugepage). (@jasone)
|
||||
|
||||
* 5.0.0 (June 13, 2017)
|
||||
|
||||
Unlike all previous jemalloc releases, this release does not use naturally
|
||||
aligned "chunks" for virtual memory management, and instead uses page-aligned
|
||||
"extents". This change has few externally visible effects, but the internal
|
||||
impacts are... extensive. Many other internal changes combine to make this
|
||||
the most cohesively designed version of jemalloc so far, with ample
|
||||
opportunity for further enhancements.
|
||||
|
||||
Continuous integration is now an integral aspect of development thanks to the
|
||||
efforts of @davidtgoldblatt, and the dev branch tends to remain reasonably
|
||||
stable on the tested platforms (Linux, FreeBSD, macOS, and Windows). As a
|
||||
side effect the official release frequency may decrease over time.
|
||||
|
||||
New features:
|
||||
- Implement optional per-CPU arena support; threads choose which arena to use
|
||||
based on current CPU rather than on fixed thread-->arena associations.
|
||||
(@interwq)
|
||||
- Implement two-phase decay of unused dirty pages. Pages transition from
|
||||
dirty-->muzzy-->clean, where the first phase transition relies on
|
||||
madvise(... MADV_FREE) semantics, and the second phase transition discards
|
||||
pages such that they are replaced with demand-zeroed pages on next access.
|
||||
(@jasone)
|
||||
- Increase decay time resolution from seconds to milliseconds. (@jasone)
|
||||
- Implement opt-in per CPU background threads, and use them for asynchronous
|
||||
decay-driven unused dirty page purging. (@interwq)
|
||||
- Add mutex profiling, which collects a variety of statistics useful for
|
||||
diagnosing overhead/contention issues. (@interwq)
|
||||
- Add C++ new/delete operator bindings. (@djwatson)
|
||||
- Support manually created arena destruction, such that all data and metadata
|
||||
are discarded. Add MALLCTL_ARENAS_DESTROYED for accessing merged stats
|
||||
associated with destroyed arenas. (@jasone)
|
||||
- Add MALLCTL_ARENAS_ALL as a fixed index for use in accessing
|
||||
merged/destroyed arena statistics via mallctl. (@jasone)
|
||||
- Add opt.abort_conf to optionally abort if invalid configuration options are
|
||||
detected during initialization. (@interwq)
|
||||
- Add opt.stats_print_opts, so that e.g. JSON output can be selected for the
|
||||
stats dumped during exit if opt.stats_print is true. (@jasone)
|
||||
- Add --with-version=VERSION for use when embedding jemalloc into another
|
||||
project's git repository. (@jasone)
|
||||
- Add --disable-thp to support cross compiling. (@jasone)
|
||||
- Add --with-lg-hugepage to support cross compiling. (@jasone)
|
||||
- Add mallctl interfaces (various authors):
|
||||
+ background_thread
|
||||
+ opt.abort_conf
|
||||
+ opt.retain
|
||||
+ opt.percpu_arena
|
||||
+ opt.background_thread
|
||||
+ opt.{dirty,muzzy}_decay_ms
|
||||
+ opt.stats_print_opts
|
||||
+ arena.<i>.initialized
|
||||
+ arena.<i>.destroy
|
||||
+ arena.<i>.{dirty,muzzy}_decay_ms
|
||||
+ arena.<i>.extent_hooks
|
||||
+ arenas.{dirty,muzzy}_decay_ms
|
||||
+ arenas.bin.<i>.slab_size
|
||||
+ arenas.nlextents
|
||||
+ arenas.lextent.<i>.size
|
||||
+ arenas.create
|
||||
+ stats.background_thread.{num_threads,num_runs,run_interval}
|
||||
+ stats.mutexes.{ctl,background_thread,prof,reset}.
|
||||
{num_ops,num_spin_acq,num_wait,max_wait_time,total_wait_time,max_num_thds,
|
||||
num_owner_switch}
|
||||
+ stats.arenas.<i>.{dirty,muzzy}_decay_ms
|
||||
+ stats.arenas.<i>.uptime
|
||||
+ stats.arenas.<i>.{pmuzzy,base,internal,resident}
|
||||
+ stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
|
||||
+ stats.arenas.<i>.bins.<j>.{nslabs,reslabs,curslabs}
|
||||
+ stats.arenas.<i>.bins.<j>.mutex.
|
||||
{num_ops,num_spin_acq,num_wait,max_wait_time,total_wait_time,max_num_thds,
|
||||
num_owner_switch}
|
||||
+ stats.arenas.<i>.lextents.<j>.{nmalloc,ndalloc,nrequests,curlextents}
|
||||
+ stats.arenas.i.mutexes.{large,extent_avail,extents_dirty,extents_muzzy,
|
||||
extents_retained,decay_dirty,decay_muzzy,base,tcache_list}.
|
||||
{num_ops,num_spin_acq,num_wait,max_wait_time,total_wait_time,max_num_thds,
|
||||
num_owner_switch}
|
||||
|
||||
Portability improvements:
|
||||
- Improve reentrant allocation support, such that deadlock is less likely if
|
||||
e.g. a system library call in turn allocates memory. (@davidtgoldblatt,
|
||||
@interwq)
|
||||
- Support static linking of jemalloc with glibc. (@djwatson)
|
||||
|
||||
Optimizations and refactors:
|
||||
- Organize virtual memory as "extents" of virtual memory pages, rather than as
|
||||
naturally aligned "chunks", and store all metadata in arbitrarily distant
|
||||
locations. This reduces virtual memory external fragmentation, and will
|
||||
interact better with huge pages (not yet explicitly supported). (@jasone)
|
||||
- Fold large and huge size classes together; only small and large size classes
|
||||
remain. (@jasone)
|
||||
- Unify the allocation paths, and merge most fast-path branching decisions.
|
||||
(@davidtgoldblatt, @interwq)
|
||||
- Embed per thread automatic tcache into thread-specific data, which reduces
|
||||
conditional branches and dereferences. Also reorganize tcache to increase
|
||||
fast-path data locality. (@interwq)
|
||||
- Rewrite atomics to closely model the C11 API, convert various
|
||||
synchronization from mutex-based to atomic, and use the explicit memory
|
||||
ordering control to resolve various hypothetical races without increasing
|
||||
synchronization overhead. (@davidtgoldblatt)
|
||||
- Extensively optimize rtree via various methods:
|
||||
+ Add multiple layers of rtree lookup caching, since rtree lookups are now
|
||||
part of fast-path deallocation. (@interwq)
|
||||
+ Determine rtree layout at compile time. (@jasone)
|
||||
+ Make the tree shallower for common configurations. (@jasone)
|
||||
+ Embed the root node in the top-level rtree data structure, thus avoiding
|
||||
one level of indirection. (@jasone)
|
||||
+ Further specialize leaf elements as compared to internal node elements,
|
||||
and directly embed extent metadata needed for fast-path deallocation.
|
||||
(@jasone)
|
||||
+ Ignore leading always-zero address bits (architecture-specific).
|
||||
(@jasone)
|
||||
- Reorganize headers (ongoing work) to make them hermetic, and disentangle
|
||||
various module dependencies. (@davidtgoldblatt)
|
||||
- Convert various internal data structures such as size class metadata from
|
||||
boot-time-initialized to compile-time-initialized. Propagate resulting data
|
||||
structure simplifications, such as making arena metadata fixed-size.
|
||||
(@jasone)
|
||||
- Simplify size class lookups when constrained to size classes that are
|
||||
multiples of the page size. This speeds lookups, but the primary benefit is
|
||||
complexity reduction in code that was the source of numerous regressions.
|
||||
(@jasone)
|
||||
- Lock individual extents when possible for localized extent operations,
|
||||
rather than relying on a top-level arena lock. (@davidtgoldblatt, @jasone)
|
||||
- Use first fit layout policy instead of best fit, in order to improve
|
||||
packing. (@jasone)
|
||||
- If munmap(2) is not in use, use an exponential series to grow each arena's
|
||||
virtual memory, so that the number of disjoint virtual memory mappings
|
||||
remains low. (@jasone)
|
||||
- Implement per arena base allocators, so that arenas never share any virtual
|
||||
memory pages. (@jasone)
|
||||
- Automatically generate private symbol name mangling macros. (@jasone)
|
||||
|
||||
Incompatible changes:
|
||||
- Replace chunk hooks with an expanded/normalized set of extent hooks.
|
||||
(@jasone)
|
||||
- Remove ratio-based purging. (@jasone)
|
||||
- Remove --disable-tcache. (@jasone)
|
||||
- Remove --disable-tls. (@jasone)
|
||||
- Remove --enable-ivsalloc. (@jasone)
|
||||
- Remove --with-lg-size-class-group. (@jasone)
|
||||
- Remove --with-lg-tiny-min. (@jasone)
|
||||
- Remove --disable-cc-silence. (@jasone)
|
||||
- Remove --enable-code-coverage. (@jasone)
|
||||
- Remove --disable-munmap (replaced by opt.retain). (@jasone)
|
||||
- Remove Valgrind support. (@jasone)
|
||||
- Remove quarantine support. (@jasone)
|
||||
- Remove redzone support. (@jasone)
|
||||
- Remove mallctl interfaces (various authors):
|
||||
+ config.munmap
|
||||
+ config.tcache
|
||||
+ config.tls
|
||||
+ config.valgrind
|
||||
+ opt.lg_chunk
|
||||
+ opt.purge
|
||||
+ opt.lg_dirty_mult
|
||||
+ opt.decay_time
|
||||
+ opt.quarantine
|
||||
+ opt.redzone
|
||||
+ opt.thp
|
||||
+ arena.<i>.lg_dirty_mult
|
||||
+ arena.<i>.decay_time
|
||||
+ arena.<i>.chunk_hooks
|
||||
+ arenas.initialized
|
||||
+ arenas.lg_dirty_mult
|
||||
+ arenas.decay_time
|
||||
+ arenas.bin.<i>.run_size
|
||||
+ arenas.nlruns
|
||||
+ arenas.lrun.<i>.size
|
||||
+ arenas.nhchunks
|
||||
+ arenas.hchunk.<i>.size
|
||||
+ arenas.extend
|
||||
+ stats.cactive
|
||||
+ stats.arenas.<i>.lg_dirty_mult
|
||||
+ stats.arenas.<i>.decay_time
|
||||
+ stats.arenas.<i>.metadata.{mapped,allocated}
|
||||
+ stats.arenas.<i>.{npurge,nmadvise,purged}
|
||||
+ stats.arenas.<i>.huge.{allocated,nmalloc,ndalloc,nrequests}
|
||||
+ stats.arenas.<i>.bins.<j>.{nruns,reruns,curruns}
|
||||
+ stats.arenas.<i>.lruns.<j>.{nmalloc,ndalloc,nrequests,curruns}
|
||||
+ stats.arenas.<i>.hchunks.<j>.{nmalloc,ndalloc,nrequests,curhchunks}
|
||||
|
||||
Bug fixes:
|
||||
- Improve interval-based profile dump triggering to dump only one profile when
|
||||
a single allocation's size exceeds the interval. (@jasone)
|
||||
- Use prefixed function names (as controlled by --with-jemalloc-prefix) when
|
||||
pruning backtrace frames in jeprof. (@jasone)
|
||||
|
||||
* 4.5.0 (February 28, 2017)
|
||||
|
||||
This is the first release to benefit from much broader continuous integration
|
||||
testing, thanks to @davidtgoldblatt. Had we had this testing infrastructure
|
||||
in place for prior releases, it would have caught all of the most serious
|
||||
regressions fixed by this release.
|
||||
|
||||
New features:
|
||||
- Add --disable-thp and the opt.thp mallctl to provide opt-out mechanisms for
|
||||
transparent huge page integration. (@jasone)
|
||||
- Update zone allocator integration to work with macOS 10.12. (@glandium)
|
||||
- Restructure *CFLAGS configuration, so that CFLAGS behaves typically, and
|
||||
EXTRA_CFLAGS provides a way to specify e.g. -Werror during building, but not
|
||||
during configuration. (@jasone, @ronawho)
|
||||
|
||||
Bug fixes:
|
||||
- Fix DSS (sbrk(2)-based) allocation. This regression was first released in
|
||||
4.3.0. (@jasone)
|
||||
- Handle race in per size class utilization computation. This functionality
|
||||
was first released in 4.0.0. (@interwq)
|
||||
- Fix lock order reversal during gdump. (@jasone)
|
||||
- Fix/refactor tcache synchronization. This regression was first released in
|
||||
4.0.0. (@jasone)
|
||||
- Fix various JSON-formatted malloc_stats_print() bugs. This functionality
|
||||
was first released in 4.3.0. (@jasone)
|
||||
- Fix huge-aligned allocation. This regression was first released in 4.4.0.
|
||||
(@jasone)
|
||||
- When transparent huge page integration is enabled, detect what state pages
|
||||
start in according to the kernel's current operating mode, and only convert
|
||||
arena chunks to non-huge during purging if that is not their initial state.
|
||||
This functionality was first released in 4.4.0. (@jasone)
|
||||
- Fix lg_chunk clamping for the --enable-cache-oblivious --disable-fill case.
|
||||
This regression was first released in 4.0.0. (@jasone, @428desmo)
|
||||
- Properly detect sparc64 when building for Linux. (@glaubitz)
|
||||
|
||||
* 4.4.0 (December 3, 2016)
|
||||
|
||||
New features:
|
||||
- Add configure support for *-*-linux-android. (@cferris1000, @jasone)
|
||||
- Add the --disable-syscall configure option, for use on systems that place
|
||||
security-motivated limitations on syscall(2). (@jasone)
|
||||
- Add support for Debian GNU/kFreeBSD. (@thesam)
|
||||
|
||||
Optimizations:
|
||||
- Add extent serial numbers and use them where appropriate as a sort key that
|
||||
is higher priority than address, so that the allocation policy prefers older
|
||||
extents. This tends to improve locality (decrease fragmentation) when
|
||||
memory grows downward. (@jasone)
|
||||
- Refactor madvise(2) configuration so that MADV_FREE is detected and utilized
|
||||
on Linux 4.5 and newer. (@jasone)
|
||||
- Mark partially purged arena chunks as non-huge-page. This improves
|
||||
interaction with Linux's transparent huge page functionality. (@jasone)
|
||||
|
||||
Bug fixes:
|
||||
- Fix size class computations for edge conditions involving extremely large
|
||||
allocations. This regression was first released in 4.0.0. (@jasone,
|
||||
@ingvarha)
|
||||
- Remove overly restrictive assertions related to the cactive statistic. This
|
||||
regression was first released in 4.1.0. (@jasone)
|
||||
- Implement a more reliable detection scheme for os_unfair_lock on macOS.
|
||||
(@jszakmeister)
|
||||
|
||||
* 4.3.1 (November 7, 2016)
|
||||
|
||||
Bug fixes:
|
||||
- Fix a severe virtual memory leak. This regression was first released in
|
||||
4.3.0. (@interwq, @jasone)
|
||||
- Refactor atomic and prng APIs to restore support for 32-bit platforms that
|
||||
use pre-C11 toolchains, e.g. FreeBSD's mips. (@jasone)
|
||||
|
||||
* 4.3.0 (November 4, 2016)
|
||||
|
||||
This is the first release that passes the test suite for multiple Windows
|
||||
configurations, thanks in large part to @glandium setting up continuous
|
||||
integration via AppVeyor (and Travis CI for Linux and OS X).
|
||||
|
||||
New features:
|
||||
- Add "J" (JSON) support to malloc_stats_print(). (@jasone)
|
||||
- Add Cray compiler support. (@ronawho)
|
||||
|
||||
Optimizations:
|
||||
- Add/use adaptive spinning for bootstrapping and radix tree node
|
||||
initialization. (@jasone)
|
||||
|
||||
Bug fixes:
|
||||
- Fix large allocation to search starting in the optimal size class heap,
|
||||
which can substantially reduce virtual memory churn and fragmentation. This
|
||||
regression was first released in 4.0.0. (@mjp41, @jasone)
|
||||
- Fix stats.arenas.<i>.nthreads accounting. (@interwq)
|
||||
- Fix and simplify decay-based purging. (@jasone)
|
||||
- Make DSS (sbrk(2)-related) operations lockless, which resolves potential
|
||||
deadlocks during thread exit. (@jasone)
|
||||
- Fix over-sized allocation of radix tree leaf nodes. (@mjp41, @ogaun,
|
||||
@jasone)
|
||||
- Fix over-sized allocation of arena_t (plus associated stats) data
|
||||
structures. (@jasone, @interwq)
|
||||
- Fix EXTRA_CFLAGS to not affect configuration. (@jasone)
|
||||
- Fix a Valgrind integration bug. (@ronawho)
|
||||
- Disallow 0x5a junk filling when running in Valgrind. (@jasone)
|
||||
- Fix a file descriptor leak on Linux. This regression was first released in
|
||||
4.2.0. (@vsarunas, @jasone)
|
||||
- Fix static linking of jemalloc with glibc. (@djwatson)
|
||||
- Use syscall(2) rather than {open,read,close}(2) during boot on Linux. This
|
||||
works around other libraries' system call wrappers performing reentrant
|
||||
allocation. (@kspinka, @Whissi, @jasone)
|
||||
- Fix OS X default zone replacement to work with OS X 10.12. (@glandium,
|
||||
@jasone)
|
||||
- Fix cached memory management to avoid needless commit/decommit operations
|
||||
during purging, which resolves permanent virtual memory map fragmentation
|
||||
issues on Windows. (@mjp41, @jasone)
|
||||
- Fix TSD fetches to avoid (recursive) allocation. This is relevant to
|
||||
non-TLS and Windows configurations. (@jasone)
|
||||
- Fix malloc_conf overriding to work on Windows. (@jasone)
|
||||
- Forcibly disable lazy-lock on Windows (was forcibly *enabled*). (@jasone)
|
||||
|
||||
* 4.2.1 (June 8, 2016)
|
||||
|
||||
Bug fixes:
|
||||
- Fix bootstrapping issues for configurations that require allocation during
|
||||
tsd initialization (e.g. --disable-tls). (@cferris1000, @jasone)
|
||||
- Fix gettimeofday() version of nstime_update(). (@ronawho)
|
||||
- Fix Valgrind regressions in calloc() and chunk_alloc_wrapper(). (@ronawho)
|
||||
- Fix potential VM map fragmentation regression. (@jasone)
|
||||
- Fix opt_zero-triggered in-place huge reallocation zeroing. (@jasone)
|
||||
- Fix heap profiling context leaks in reallocation edge cases. (@jasone)
|
||||
|
||||
* 4.2.0 (May 12, 2016)
|
||||
|
||||
New features:
|
||||
- Add the arena.<i>.reset mallctl, which makes it possible to discard all of
|
||||
an arena's allocations in a single operation. (@jasone)
|
||||
- Add the stats.retained and stats.arenas.<i>.retained statistics. (@jasone)
|
||||
- Add the --with-version configure option. (@jasone)
|
||||
- Support --with-lg-page values larger than actual page size. (@jasone)
|
||||
|
||||
Optimizations:
|
||||
- Use pairing heaps rather than red-black trees for various hot data
|
||||
structures. (@djwatson, @jasone)
|
||||
- Streamline fast paths of rtree operations. (@jasone)
|
||||
- Optimize the fast paths of calloc() and [m,d,sd]allocx(). (@jasone)
|
||||
- Decommit unused virtual memory if the OS does not overcommit. (@jasone)
|
||||
- Specify MAP_NORESERVE on Linux if [heuristic] overcommit is active, in order
|
||||
to avoid unfortunate interactions during fork(2). (@jasone)
|
||||
|
||||
Bug fixes:
|
||||
- Fix chunk accounting related to triggering gdump profiles. (@jasone)
|
||||
- Link against librt for clock_gettime(2) if glibc < 2.17. (@jasone)
|
||||
- Scale leak report summary according to sampling probability. (@jasone)
|
||||
|
||||
* 4.1.1 (May 3, 2016)
|
||||
|
||||
This bugfix release resolves a variety of mostly minor issues, though the
|
||||
bitmap fix is critical for 64-bit Windows.
|
||||
|
||||
Bug fixes:
|
||||
- Fix the linear scan version of bitmap_sfu() to shift by the proper amount
|
||||
even when sizeof(long) is not the same as sizeof(void *), as on 64-bit
|
||||
Windows. (@jasone)
|
||||
- Fix hashing functions to avoid unaligned memory accesses (and resulting
|
||||
crashes). This is relevant at least to some ARM-based platforms.
|
||||
(@rkmisra)
|
||||
- Fix fork()-related lock rank ordering reversals. These reversals were
|
||||
unlikely to cause deadlocks in practice except when heap profiling was
|
||||
enabled and active. (@jasone)
|
||||
- Fix various chunk leaks in OOM code paths. (@jasone)
|
||||
- Fix malloc_stats_print() to print opt.narenas correctly. (@jasone)
|
||||
- Fix MSVC-specific build/test issues. (@rustyx, @yuslepukhin)
|
||||
- Fix a variety of test failures that were due to test fragility rather than
|
||||
core bugs. (@jasone)
|
||||
|
||||
* 4.1.0 (February 28, 2016)
|
||||
|
||||
This release is primarily about optimizations, but it also incorporates a lot
|
||||
of portability-motivated refactoring and enhancements. Many people worked on
|
||||
this release, to an extent that even with the omission here of minor changes
|
||||
(see git revision history), and of the people who reported and diagnosed
|
||||
issues, so much of the work was contributed that starting with this release,
|
||||
changes are annotated with author credits to help reflect the collaborative
|
||||
effort involved.
|
||||
|
||||
New features:
|
||||
- Implement decay-based unused dirty page purging, a major optimization with
|
||||
mallctl API impact. This is an alternative to the existing ratio-based
|
||||
unused dirty page purging, and is intended to eventually become the sole
|
||||
purging mechanism. New mallctls:
|
||||
+ opt.purge
|
||||
+ opt.decay_time
|
||||
+ arena.<i>.decay
|
||||
+ arena.<i>.decay_time
|
||||
+ arenas.decay_time
|
||||
+ stats.arenas.<i>.decay_time
|
||||
(@jasone, @cevans87)
|
||||
- Add --with-malloc-conf, which makes it possible to embed a default
|
||||
options string during configuration. This was motivated by the desire to
|
||||
specify --with-malloc-conf=purge:decay , since the default must remain
|
||||
purge:ratio until the 5.0.0 release. (@jasone)
|
||||
- Add MS Visual Studio 2015 support. (@rustyx, @yuslepukhin)
|
||||
- Make *allocx() size class overflow behavior defined. The maximum
|
||||
size class is now less than PTRDIFF_MAX to protect applications against
|
||||
numerical overflow, and all allocation functions are guaranteed to indicate
|
||||
errors rather than potentially crashing if the request size exceeds the
|
||||
maximum size class. (@jasone)
|
||||
- jeprof:
|
||||
+ Add raw heap profile support. (@jasone)
|
||||
+ Add --retain and --exclude for backtrace symbol filtering. (@jasone)
|
||||
|
||||
Optimizations:
|
||||
- Optimize the fast path to combine various bootstrapping and configuration
|
||||
checks and execute more streamlined code in the common case. (@interwq)
|
||||
- Use linear scan for small bitmaps (used for small object tracking). In
|
||||
addition to speeding up bitmap operations on 64-bit systems, this reduces
|
||||
allocator metadata overhead by approximately 0.2%. (@djwatson)
|
||||
- Separate arena_avail trees, which substantially speeds up run tree
|
||||
operations. (@djwatson)
|
||||
- Use memoization (boot-time-computed table) for run quantization. Separate
|
||||
arena_avail trees reduced the importance of this optimization. (@jasone)
|
||||
- Attempt mmap-based in-place huge reallocation. This can dramatically speed
|
||||
up incremental huge reallocation. (@jasone)
|
||||
|
||||
Incompatible changes:
|
||||
- Make opt.narenas unsigned rather than size_t. (@jasone)
|
||||
|
||||
Bug fixes:
|
||||
- Fix stats.cactive accounting regression. (@rustyx, @jasone)
|
||||
- Handle unaligned keys in hash(). This caused problems for some ARM systems.
|
||||
(@jasone, @cferris1000)
|
||||
- Refactor arenas array. In addition to fixing a fork-related deadlock, this
|
||||
makes arena lookups faster and simpler. (@jasone)
|
||||
- Move retained memory allocation out of the default chunk allocation
|
||||
function, to a location that gets executed even if the application installs
|
||||
a custom chunk allocation function. This resolves a virtual memory leak.
|
||||
(@buchgr)
|
||||
- Fix a potential tsd cleanup leak. (@cferris1000, @jasone)
|
||||
- Fix run quantization. In practice this bug had no impact unless
|
||||
applications requested memory with alignment exceeding one page.
|
||||
(@jasone, @djwatson)
|
||||
- Fix LinuxThreads-specific bootstrapping deadlock. (Cosmin Paraschiv)
|
||||
- jeprof:
|
||||
+ Don't discard curl options if timeout is not defined. (@djwatson)
|
||||
+ Detect failed profile fetches. (@djwatson)
|
||||
- Fix stats.arenas.<i>.{dss,lg_dirty_mult,decay_time,pactive,pdirty} for
|
||||
--disable-stats case. (@jasone)
|
||||
|
||||
* 4.0.4 (October 24, 2015)
|
||||
|
||||
This bugfix release fixes another xallocx() regression. No other regressions
|
||||
have come to light in over a month, so this is likely a good starting point
|
||||
for people who prefer to wait for "dot one" releases with all the major issues
|
||||
shaken out.
|
||||
|
||||
Bug fixes:
|
||||
- Fix xallocx(..., MALLOCX_ZERO to zero the last full trailing page of large
|
||||
allocations that have been randomly assigned an offset of 0 when
|
||||
--enable-cache-oblivious configure option is enabled.
|
||||
|
||||
* 4.0.3 (September 24, 2015)
|
||||
|
||||
This bugfix release continues the trend of xallocx() and heap profiling fixes.
|
||||
|
||||
Bug fixes:
|
||||
- Fix xallocx(..., MALLOCX_ZERO) to zero all trailing bytes of large
|
||||
allocations when --enable-cache-oblivious configure option is enabled.
|
||||
- Fix xallocx(..., MALLOCX_ZERO) to zero trailing bytes of huge allocations
|
||||
when resizing from/to a size class that is not a multiple of the chunk size.
|
||||
- Fix prof_tctx_dump_iter() to filter out nodes that were created after heap
|
||||
profile dumping started.
|
||||
- Work around a potentially bad thread-specific data initialization
|
||||
interaction with NPTL (glibc's pthreads implementation).
|
||||
|
||||
* 4.0.2 (September 21, 2015)
|
||||
|
||||
This bugfix release addresses a few bugs specific to heap profiling.
|
||||
|
||||
Bug fixes:
|
||||
- Fix ixallocx_prof_sample() to never modify nor create sampled small
|
||||
allocations. xallocx() is in general incapable of moving small allocations,
|
||||
so this fix removes buggy code without loss of generality.
|
||||
- Fix irallocx_prof_sample() to always allocate large regions, even when
|
||||
alignment is non-zero.
|
||||
- Fix prof_alloc_rollback() to read tdata from thread-specific data rather
|
||||
than dereferencing a potentially invalid tctx.
|
||||
|
||||
* 4.0.1 (September 15, 2015)
|
||||
|
||||
This is a bugfix release that is somewhat high risk due to the amount of
|
||||
refactoring required to address deep xallocx() problems. As a side effect of
|
||||
these fixes, xallocx() now tries harder to partially fulfill requests for
|
||||
optional extra space. Note that a couple of minor heap profiling
|
||||
optimizations are included, but these are better thought of as performance
|
||||
fixes that were integral to disovering most of the other bugs.
|
||||
|
||||
Optimizations:
|
||||
- Avoid a chunk metadata read in arena_prof_tctx_set(), since it is in the
|
||||
fast path when heap profiling is enabled. Additionally, split a special
|
||||
case out into arena_prof_tctx_reset(), which also avoids chunk metadata
|
||||
reads.
|
||||
- Optimize irallocx_prof() to optimistically update the sampler state. The
|
||||
prior implementation appears to have been a holdover from when
|
||||
rallocx()/xallocx() functionality was combined as rallocm().
|
||||
|
||||
Bug fixes:
|
||||
- Fix TLS configuration such that it is enabled by default for platforms on
|
||||
which it works correctly.
|
||||
- Fix arenas_cache_cleanup() and arena_get_hard() to handle
|
||||
allocation/deallocation within the application's thread-specific data
|
||||
cleanup functions even after arenas_cache is torn down.
|
||||
- Fix xallocx() bugs related to size+extra exceeding HUGE_MAXCLASS.
|
||||
- Fix chunk purge hook calls for in-place huge shrinking reallocation to
|
||||
specify the old chunk size rather than the new chunk size. This bug caused
|
||||
no correctness issues for the default chunk purge function, but was
|
||||
visible to custom functions set via the "arena.<i>.chunk_hooks" mallctl.
|
||||
- Fix heap profiling bugs:
|
||||
+ Fix heap profiling to distinguish among otherwise identical sample sites
|
||||
with interposed resets (triggered via the "prof.reset" mallctl). This bug
|
||||
could cause data structure corruption that would most likely result in a
|
||||
segfault.
|
||||
+ Fix irealloc_prof() to prof_alloc_rollback() on OOM.
|
||||
+ Make one call to prof_active_get_unlocked() per allocation event, and use
|
||||
the result throughout the relevant functions that handle an allocation
|
||||
event. Also add a missing check in prof_realloc(). These fixes protect
|
||||
allocation events against concurrent prof_active changes.
|
||||
+ Fix ixallocx_prof() to pass usize_max and zero to ixallocx_prof_sample()
|
||||
in the correct order.
|
||||
+ Fix prof_realloc() to call prof_free_sampled_object() after calling
|
||||
prof_malloc_sample_object(). Prior to this fix, if tctx and old_tctx were
|
||||
the same, the tctx could have been prematurely destroyed.
|
||||
- Fix portability bugs:
|
||||
+ Don't bitshift by negative amounts when encoding/decoding run sizes in
|
||||
chunk header maps. This affected systems with page sizes greater than 8
|
||||
KiB.
|
||||
+ Rename index_t to szind_t to avoid an existing type on Solaris.
|
||||
+ Add JEMALLOC_CXX_THROW to the memalign() function prototype, in order to
|
||||
match glibc and avoid compilation errors when including both
|
||||
jemalloc/jemalloc.h and malloc.h in C++ code.
|
||||
+ Don't assume that /bin/sh is appropriate when running size_classes.sh
|
||||
during configuration.
|
||||
+ Consider __sparcv9 a synonym for __sparc64__ when defining LG_QUANTUM.
|
||||
+ Link tests to librt if it contains clock_gettime(2).
|
||||
|
||||
* 4.0.0 (August 17, 2015)
|
||||
|
||||
This version contains many speed and space optimizations, both minor and
|
||||
major. The major themes are generalization, unification, and simplification.
|
||||
Although many of these optimizations cause no visible behavior change, their
|
||||
cumulative effect is substantial.
|
||||
|
||||
New features:
|
||||
- Normalize size class spacing to be consistent across the complete size
|
||||
range. By default there are four size classes per size doubling, but this
|
||||
is now configurable via the --with-lg-size-class-group option. Also add the
|
||||
--with-lg-page, --with-lg-page-sizes, --with-lg-quantum, and
|
||||
--with-lg-tiny-min options, which can be used to tweak page and size class
|
||||
settings. Impacts:
|
||||
+ Worst case performance for incrementally growing/shrinking reallocation
|
||||
is improved because there are far fewer size classes, and therefore
|
||||
copying happens less often.
|
||||
+ Internal fragmentation is limited to 20% for all but the smallest size
|
||||
classes (those less than four times the quantum). (1B + 4 KiB)
|
||||
and (1B + 4 MiB) previously suffered nearly 50% internal fragmentation.
|
||||
+ Chunk fragmentation tends to be lower because there are fewer distinct run
|
||||
sizes to pack.
|
||||
- Add support for explicit tcaches. The "tcache.create", "tcache.flush", and
|
||||
"tcache.destroy" mallctls control tcache lifetime and flushing, and the
|
||||
MALLOCX_TCACHE(tc) and MALLOCX_TCACHE_NONE flags to the *allocx() API
|
||||
control which tcache is used for each operation.
|
||||
- Implement per thread heap profiling, as well as the ability to
|
||||
enable/disable heap profiling on a per thread basis. Add the "prof.reset",
|
||||
"prof.lg_sample", "thread.prof.name", "thread.prof.active",
|
||||
"opt.prof_thread_active_init", "prof.thread_active_init", and
|
||||
"thread.prof.active" mallctls.
|
||||
- Add support for per arena application-specified chunk allocators, configured
|
||||
via the "arena.<i>.chunk_hooks" mallctl.
|
||||
- Refactor huge allocation to be managed by arenas, so that arenas now
|
||||
function as general purpose independent allocators. This is important in
|
||||
the context of user-specified chunk allocators, aside from the scalability
|
||||
benefits. Related new statistics:
|
||||
+ The "stats.arenas.<i>.huge.allocated", "stats.arenas.<i>.huge.nmalloc",
|
||||
"stats.arenas.<i>.huge.ndalloc", and "stats.arenas.<i>.huge.nrequests"
|
||||
mallctls provide high level per arena huge allocation statistics.
|
||||
+ The "arenas.nhchunks", "arenas.hchunk.<i>.size",
|
||||
"stats.arenas.<i>.hchunks.<j>.nmalloc",
|
||||
"stats.arenas.<i>.hchunks.<j>.ndalloc",
|
||||
"stats.arenas.<i>.hchunks.<j>.nrequests", and
|
||||
"stats.arenas.<i>.hchunks.<j>.curhchunks" mallctls provide per size class
|
||||
statistics.
|
||||
- Add the 'util' column to malloc_stats_print() output, which reports the
|
||||
proportion of available regions that are currently in use for each small
|
||||
size class.
|
||||
- Add "alloc" and "free" modes for for junk filling (see the "opt.junk"
|
||||
mallctl), so that it is possible to separately enable junk filling for
|
||||
allocation versus deallocation.
|
||||
- Add the jemalloc-config script, which provides information about how
|
||||
jemalloc was configured, and how to integrate it into application builds.
|
||||
- Add metadata statistics, which are accessible via the "stats.metadata",
|
||||
"stats.arenas.<i>.metadata.mapped", and
|
||||
"stats.arenas.<i>.metadata.allocated" mallctls.
|
||||
- Add the "stats.resident" mallctl, which reports the upper limit of
|
||||
physically resident memory mapped by the allocator.
|
||||
- Add per arena control over unused dirty page purging, via the
|
||||
"arenas.lg_dirty_mult", "arena.<i>.lg_dirty_mult", and
|
||||
"stats.arenas.<i>.lg_dirty_mult" mallctls.
|
||||
- Add the "prof.gdump" mallctl, which makes it possible to toggle the gdump
|
||||
feature on/off during program execution.
|
||||
- Add sdallocx(), which implements sized deallocation. The primary
|
||||
optimization over dallocx() is the removal of a metadata read, which often
|
||||
suffers an L1 cache miss.
|
||||
- Add missing header includes in jemalloc/jemalloc.h, so that applications
|
||||
only have to #include <jemalloc/jemalloc.h>.
|
||||
- Add support for additional platforms:
|
||||
+ Bitrig
|
||||
+ Cygwin
|
||||
+ DragonFlyBSD
|
||||
+ iOS
|
||||
+ OpenBSD
|
||||
+ OpenRISC/or1k
|
||||
|
||||
Optimizations:
|
||||
- Maintain dirty runs in per arena LRUs rather than in per arena trees of
|
||||
dirty-run-containing chunks. In practice this change significantly reduces
|
||||
dirty page purging volume.
|
||||
- Integrate whole chunks into the unused dirty page purging machinery. This
|
||||
reduces the cost of repeated huge allocation/deallocation, because it
|
||||
effectively introduces a cache of chunks.
|
||||
- Split the arena chunk map into two separate arrays, in order to increase
|
||||
cache locality for the frequently accessed bits.
|
||||
- Move small run metadata out of runs, into arena chunk headers. This reduces
|
||||
run fragmentation, smaller runs reduce external fragmentation for small size
|
||||
classes, and packed (less uniformly aligned) metadata layout improves CPU
|
||||
cache set distribution.
|
||||
- Randomly distribute large allocation base pointer alignment relative to page
|
||||
boundaries in order to more uniformly utilize CPU cache sets. This can be
|
||||
disabled via the --disable-cache-oblivious configure option, and queried via
|
||||
the "config.cache_oblivious" mallctl.
|
||||
- Micro-optimize the fast paths for the public API functions.
|
||||
- Refactor thread-specific data to reside in a single structure. This assures
|
||||
that only a single TLS read is necessary per call into the public API.
|
||||
- Implement in-place huge allocation growing and shrinking.
|
||||
- Refactor rtree (radix tree for chunk lookups) to be lock-free, and make
|
||||
additional optimizations that reduce maximum lookup depth to one or two
|
||||
levels. This resolves what was a concurrency bottleneck for per arena huge
|
||||
allocation, because a global data structure is critical for determining
|
||||
which arenas own which huge allocations.
|
||||
|
||||
Incompatible changes:
|
||||
- Replace --enable-cc-silence with --disable-cc-silence to suppress spurious
|
||||
warnings by default.
|
||||
- Assure that the constness of malloc_usable_size()'s return type matches that
|
||||
of the system implementation.
|
||||
- Change the heap profile dump format to support per thread heap profiling,
|
||||
rename pprof to jeprof, and enhance it with the --thread=<n> option. As a
|
||||
result, the bundled jeprof must now be used rather than the upstream
|
||||
(gperftools) pprof.
|
||||
- Disable "opt.prof_final" by default, in order to avoid atexit(3), which can
|
||||
internally deadlock on some platforms.
|
||||
- Change the "arenas.nlruns" mallctl type from size_t to unsigned.
|
||||
- Replace the "stats.arenas.<i>.bins.<j>.allocated" mallctl with
|
||||
"stats.arenas.<i>.bins.<j>.curregs".
|
||||
- Ignore MALLOC_CONF in set{uid,gid,cap} binaries.
|
||||
- Ignore MALLOCX_ARENA(a) in dallocx(), in favor of using the
|
||||
MALLOCX_TCACHE(tc) and MALLOCX_TCACHE_NONE flags to control tcache usage.
|
||||
|
||||
Removed features:
|
||||
- Remove the *allocm() API, which is superseded by the *allocx() API.
|
||||
- Remove the --enable-dss options, and make dss non-optional on all platforms
|
||||
which support sbrk(2).
|
||||
- Remove the "arenas.purge" mallctl, which was obsoleted by the
|
||||
"arena.<i>.purge" mallctl in 3.1.0.
|
||||
- Remove the unnecessary "opt.valgrind" mallctl; jemalloc automatically
|
||||
detects whether it is running inside Valgrind.
|
||||
- Remove the "stats.huge.allocated", "stats.huge.nmalloc", and
|
||||
"stats.huge.ndalloc" mallctls.
|
||||
- Remove the --enable-mremap option.
|
||||
- Remove the "stats.chunks.current", "stats.chunks.total", and
|
||||
"stats.chunks.high" mallctls.
|
||||
|
||||
Bug fixes:
|
||||
- Fix the cactive statistic to decrease (rather than increase) when active
|
||||
memory decreases. This regression was first released in 3.5.0.
|
||||
- Fix OOM handling in memalign() and valloc(). A variant of this bug existed
|
||||
in all releases since 2.0.0, which introduced these functions.
|
||||
- Fix an OOM-related regression in arena_tcache_fill_small(), which could
|
||||
cause cache corruption on OOM. This regression was present in all releases
|
||||
from 2.2.0 through 3.6.0.
|
||||
- Fix size class overflow handling for malloc(), posix_memalign(), memalign(),
|
||||
calloc(), and realloc() when profiling is enabled.
|
||||
- Fix the "arena.<i>.dss" mallctl to return an error if "primary" or
|
||||
"secondary" precedence is specified, but sbrk(2) is not supported.
|
||||
- Fix fallback lg_floor() implementations to handle extremely large inputs.
|
||||
- Ensure the default purgeable zone is after the default zone on OS X.
|
||||
- Fix latent bugs in atomic_*().
|
||||
- Fix the "arena.<i>.dss" mallctl to handle read-only calls.
|
||||
- Fix tls_model configuration to enable the initial-exec model when possible.
|
||||
- Mark malloc_conf as a weak symbol so that the application can override it.
|
||||
- Correctly detect glibc's adaptive pthread mutexes.
|
||||
- Fix the --without-export configure option.
|
||||
|
||||
* 3.6.0 (March 31, 2014)
|
||||
|
||||
This version contains a critical bug fix for a regression present in 3.5.0 and
|
||||
@@ -21,7 +738,7 @@ found in the git revision history:
|
||||
backtracing to be reliable.
|
||||
- Use dss allocation precedence for huge allocations as well as small/large
|
||||
allocations.
|
||||
- Fix test assertion failure message formatting. This bug did not manifect on
|
||||
- Fix test assertion failure message formatting. This bug did not manifest on
|
||||
x86_64 systems because of implementation subtleties in va_list.
|
||||
- Fix inconsequential test failures for hash and SFMT code.
|
||||
|
||||
@@ -516,7 +1233,7 @@ found in the git revision history:
|
||||
- Make it possible for the application to manually flush a thread's cache, via
|
||||
the "tcache.flush" mallctl.
|
||||
- Base maximum dirty page count on proportion of active memory.
|
||||
- Compute various addtional run-time statistics, including per size class
|
||||
- Compute various additional run-time statistics, including per size class
|
||||
statistics for large objects.
|
||||
- Expose malloc_stats_print(), which can be called repeatedly by the
|
||||
application.
|
||||
|
||||
14
deps/jemalloc/README
vendored
14
deps/jemalloc/README
vendored
@@ -3,12 +3,12 @@ fragmentation avoidance and scalable concurrency support. jemalloc first came
|
||||
into use as the FreeBSD libc allocator in 2005, and since then it has found its
|
||||
way into numerous applications that rely on its predictable behavior. In 2010
|
||||
jemalloc development efforts broadened to include developer support features
|
||||
such as heap profiling, Valgrind integration, and extensive monitoring/tuning
|
||||
hooks. Modern jemalloc releases continue to be integrated back into FreeBSD,
|
||||
and therefore versatility remains critical. Ongoing development efforts trend
|
||||
toward making jemalloc among the best allocators for a broad range of demanding
|
||||
applications, and eliminating/mitigating weaknesses that have practical
|
||||
repercussions for real world applications.
|
||||
such as heap profiling and extensive monitoring/tuning hooks. Modern jemalloc
|
||||
releases continue to be integrated back into FreeBSD, and therefore versatility
|
||||
remains critical. Ongoing development efforts trend toward making jemalloc
|
||||
among the best allocators for a broad range of demanding applications, and
|
||||
eliminating/mitigating weaknesses that have practical repercussions for real
|
||||
world applications.
|
||||
|
||||
The COPYING file contains copyright and licensing information.
|
||||
|
||||
@@ -17,4 +17,4 @@ jemalloc.
|
||||
|
||||
The ChangeLog file contains a brief summary of changes for each release.
|
||||
|
||||
URL: http://www.canonware.com/jemalloc/
|
||||
URL: http://jemalloc.net/
|
||||
|
||||
363
deps/jemalloc/include/jemalloc/internal/atomic.h
vendored
363
deps/jemalloc/include/jemalloc/internal/atomic.h
vendored
@@ -1,304 +1,77 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
#ifndef JEMALLOC_INTERNAL_ATOMIC_H
|
||||
#define JEMALLOC_INTERNAL_ATOMIC_H
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
#define ATOMIC_INLINE static inline
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
#define atomic_read_uint64(p) atomic_add_uint64(p, 0)
|
||||
#define atomic_read_uint32(p) atomic_add_uint32(p, 0)
|
||||
#define atomic_read_z(p) atomic_add_z(p, 0)
|
||||
#define atomic_read_u(p) atomic_add_u(p, 0)
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
uint64_t atomic_add_uint64(uint64_t *p, uint64_t x);
|
||||
uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x);
|
||||
uint32_t atomic_add_uint32(uint32_t *p, uint32_t x);
|
||||
uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x);
|
||||
size_t atomic_add_z(size_t *p, size_t x);
|
||||
size_t atomic_sub_z(size_t *p, size_t x);
|
||||
unsigned atomic_add_u(unsigned *p, unsigned x);
|
||||
unsigned atomic_sub_u(unsigned *p, unsigned x);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_))
|
||||
/******************************************************************************/
|
||||
/* 64-bit operations. */
|
||||
#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
|
||||
# ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_add_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
return (__sync_add_and_fetch(p, x));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
return (__sync_sub_and_fetch(p, x));
|
||||
}
|
||||
#elif (defined(_MSC_VER))
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_add_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
return (InterlockedExchangeAdd64(p, x));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
return (InterlockedExchangeAdd64(p, -((int64_t)x)));
|
||||
}
|
||||
#elif (defined(JEMALLOC_OSATOMIC))
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_add_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
return (OSAtomicAdd64((int64_t)x, (int64_t *)p));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p));
|
||||
}
|
||||
# elif (defined(__amd64__) || defined(__x86_64__))
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_add_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
asm volatile (
|
||||
"lock; xaddq %0, %1;"
|
||||
: "+r" (x), "=m" (*p) /* Outputs. */
|
||||
: "m" (*p) /* Inputs. */
|
||||
);
|
||||
|
||||
return (x);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
x = (uint64_t)(-(int64_t)x);
|
||||
asm volatile (
|
||||
"lock; xaddq %0, %1;"
|
||||
: "+r" (x), "=m" (*p) /* Outputs. */
|
||||
: "m" (*p) /* Inputs. */
|
||||
);
|
||||
|
||||
return (x);
|
||||
}
|
||||
# elif (defined(JEMALLOC_ATOMIC9))
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_add_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
/*
|
||||
* atomic_fetchadd_64() doesn't exist, but we only ever use this
|
||||
* function on LP64 systems, so atomic_fetchadd_long() will do.
|
||||
*/
|
||||
assert(sizeof(uint64_t) == sizeof(unsigned long));
|
||||
|
||||
return (atomic_fetchadd_long(p, (unsigned long)x) + x);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
assert(sizeof(uint64_t) == sizeof(unsigned long));
|
||||
|
||||
return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x);
|
||||
}
|
||||
# elif (defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8))
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_add_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
return (__sync_add_and_fetch(p, x));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
return (__sync_sub_and_fetch(p, x));
|
||||
}
|
||||
# else
|
||||
# error "Missing implementation for 64-bit atomic operations"
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/******************************************************************************/
|
||||
/* 32-bit operations. */
|
||||
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_add_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
return (__sync_add_and_fetch(p, x));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
return (__sync_sub_and_fetch(p, x));
|
||||
}
|
||||
#elif (defined(_MSC_VER))
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_add_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
return (InterlockedExchangeAdd(p, x));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
return (InterlockedExchangeAdd(p, -((int32_t)x)));
|
||||
}
|
||||
#elif (defined(JEMALLOC_OSATOMIC))
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_add_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
return (OSAtomicAdd32((int32_t)x, (int32_t *)p));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p));
|
||||
}
|
||||
#elif (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_add_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
asm volatile (
|
||||
"lock; xaddl %0, %1;"
|
||||
: "+r" (x), "=m" (*p) /* Outputs. */
|
||||
: "m" (*p) /* Inputs. */
|
||||
);
|
||||
|
||||
return (x);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
x = (uint32_t)(-(int32_t)x);
|
||||
asm volatile (
|
||||
"lock; xaddl %0, %1;"
|
||||
: "+r" (x), "=m" (*p) /* Outputs. */
|
||||
: "m" (*p) /* Inputs. */
|
||||
);
|
||||
|
||||
return (x);
|
||||
}
|
||||
#elif (defined(JEMALLOC_ATOMIC9))
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_add_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
return (atomic_fetchadd_32(p, x) + x);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x);
|
||||
}
|
||||
#elif (defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_add_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
return (__sync_add_and_fetch(p, x));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
return (__sync_sub_and_fetch(p, x));
|
||||
}
|
||||
#if defined(JEMALLOC_GCC_ATOMIC_ATOMICS)
|
||||
# include "jemalloc/internal/atomic_gcc_atomic.h"
|
||||
#elif defined(JEMALLOC_GCC_SYNC_ATOMICS)
|
||||
# include "jemalloc/internal/atomic_gcc_sync.h"
|
||||
#elif defined(_MSC_VER)
|
||||
# include "jemalloc/internal/atomic_msvc.h"
|
||||
#elif defined(JEMALLOC_C11_ATOMICS)
|
||||
# include "jemalloc/internal/atomic_c11.h"
|
||||
#else
|
||||
# error "Missing implementation for 32-bit atomic operations"
|
||||
# error "Don't have atomics implemented on this platform."
|
||||
#endif
|
||||
|
||||
/******************************************************************************/
|
||||
/* size_t operations. */
|
||||
JEMALLOC_INLINE size_t
|
||||
atomic_add_z(size_t *p, size_t x)
|
||||
{
|
||||
/*
|
||||
* This header gives more or less a backport of C11 atomics. The user can write
|
||||
* JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_sizeof_type); to generate
|
||||
* counterparts of the C11 atomic functions for type, as so:
|
||||
* JEMALLOC_GENERATE_ATOMICS(int *, pi, 3);
|
||||
* and then write things like:
|
||||
* int *some_ptr;
|
||||
* atomic_pi_t atomic_ptr_to_int;
|
||||
* atomic_store_pi(&atomic_ptr_to_int, some_ptr, ATOMIC_RELAXED);
|
||||
* int *prev_value = atomic_exchange_pi(&ptr_to_int, NULL, ATOMIC_ACQ_REL);
|
||||
* assert(some_ptr == prev_value);
|
||||
* and expect things to work in the obvious way.
|
||||
*
|
||||
* Also included (with naming differences to avoid conflicts with the standard
|
||||
* library):
|
||||
* atomic_fence(atomic_memory_order_t) (mimics C11's atomic_thread_fence).
|
||||
* ATOMIC_INIT (mimics C11's ATOMIC_VAR_INIT).
|
||||
*/
|
||||
|
||||
#if (LG_SIZEOF_PTR == 3)
|
||||
return ((size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
|
||||
#elif (LG_SIZEOF_PTR == 2)
|
||||
return ((size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
|
||||
#endif
|
||||
}
|
||||
/*
|
||||
* Pure convenience, so that we don't have to type "atomic_memory_order_"
|
||||
* quite so often.
|
||||
*/
|
||||
#define ATOMIC_RELAXED atomic_memory_order_relaxed
|
||||
#define ATOMIC_ACQUIRE atomic_memory_order_acquire
|
||||
#define ATOMIC_RELEASE atomic_memory_order_release
|
||||
#define ATOMIC_ACQ_REL atomic_memory_order_acq_rel
|
||||
#define ATOMIC_SEQ_CST atomic_memory_order_seq_cst
|
||||
|
||||
JEMALLOC_INLINE size_t
|
||||
atomic_sub_z(size_t *p, size_t x)
|
||||
{
|
||||
|
||||
#if (LG_SIZEOF_PTR == 3)
|
||||
return ((size_t)atomic_add_uint64((uint64_t *)p,
|
||||
(uint64_t)-((int64_t)x)));
|
||||
#elif (LG_SIZEOF_PTR == 2)
|
||||
return ((size_t)atomic_add_uint32((uint32_t *)p,
|
||||
(uint32_t)-((int32_t)x)));
|
||||
#endif
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
/* unsigned operations. */
|
||||
JEMALLOC_INLINE unsigned
|
||||
atomic_add_u(unsigned *p, unsigned x)
|
||||
{
|
||||
|
||||
#if (LG_SIZEOF_INT == 3)
|
||||
return ((unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
|
||||
#elif (LG_SIZEOF_INT == 2)
|
||||
return ((unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
|
||||
#endif
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE unsigned
|
||||
atomic_sub_u(unsigned *p, unsigned x)
|
||||
{
|
||||
|
||||
#if (LG_SIZEOF_INT == 3)
|
||||
return ((unsigned)atomic_add_uint64((uint64_t *)p,
|
||||
(uint64_t)-((int64_t)x)));
|
||||
#elif (LG_SIZEOF_INT == 2)
|
||||
return ((unsigned)atomic_add_uint32((uint32_t *)p,
|
||||
(uint32_t)-((int32_t)x)));
|
||||
#endif
|
||||
}
|
||||
/******************************************************************************/
|
||||
/*
|
||||
* Not all platforms have 64-bit atomics. If we do, this #define exposes that
|
||||
* fact.
|
||||
*/
|
||||
#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
|
||||
# define JEMALLOC_ATOMIC_U64
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
JEMALLOC_GENERATE_ATOMICS(void *, p, LG_SIZEOF_PTR)
|
||||
|
||||
/*
|
||||
* There's no actual guarantee that sizeof(bool) == 1, but it's true on the only
|
||||
* platform that actually needs to know the size, MSVC.
|
||||
*/
|
||||
JEMALLOC_GENERATE_ATOMICS(bool, b, 0)
|
||||
|
||||
JEMALLOC_GENERATE_INT_ATOMICS(unsigned, u, LG_SIZEOF_INT)
|
||||
|
||||
JEMALLOC_GENERATE_INT_ATOMICS(size_t, zu, LG_SIZEOF_PTR)
|
||||
|
||||
JEMALLOC_GENERATE_INT_ATOMICS(ssize_t, zd, LG_SIZEOF_PTR)
|
||||
|
||||
JEMALLOC_GENERATE_INT_ATOMICS(uint32_t, u32, 2)
|
||||
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
JEMALLOC_GENERATE_INT_ATOMICS(uint64_t, u64, 3)
|
||||
#endif
|
||||
|
||||
#undef ATOMIC_INLINE
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_ATOMIC_H */
|
||||
|
||||
349
deps/jemalloc/include/jemalloc/internal/bitmap.h
vendored
349
deps/jemalloc/include/jemalloc/internal/bitmap.h
vendored
@@ -1,37 +1,159 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
#ifndef JEMALLOC_INTERNAL_BITMAP_H
|
||||
#define JEMALLOC_INTERNAL_BITMAP_H
|
||||
|
||||
#include "jemalloc/internal/arena_types.h"
|
||||
#include "jemalloc/internal/bit_util.h"
|
||||
#include "jemalloc/internal/size_classes.h"
|
||||
|
||||
typedef unsigned long bitmap_t;
|
||||
#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG
|
||||
|
||||
/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
|
||||
#define LG_BITMAP_MAXBITS LG_RUN_MAXREGS
|
||||
|
||||
typedef struct bitmap_level_s bitmap_level_t;
|
||||
typedef struct bitmap_info_s bitmap_info_t;
|
||||
typedef unsigned long bitmap_t;
|
||||
#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG
|
||||
#if LG_SLAB_MAXREGS > LG_CEIL_NSIZES
|
||||
/* Maximum bitmap bit count is determined by maximum regions per slab. */
|
||||
# define LG_BITMAP_MAXBITS LG_SLAB_MAXREGS
|
||||
#else
|
||||
/* Maximum bitmap bit count is determined by number of extent size classes. */
|
||||
# define LG_BITMAP_MAXBITS LG_CEIL_NSIZES
|
||||
#endif
|
||||
#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS)
|
||||
|
||||
/* Number of bits per group. */
|
||||
#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3)
|
||||
#define BITMAP_GROUP_NBITS (ZU(1) << LG_BITMAP_GROUP_NBITS)
|
||||
#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1)
|
||||
#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3)
|
||||
#define BITMAP_GROUP_NBITS (1U << LG_BITMAP_GROUP_NBITS)
|
||||
#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1)
|
||||
|
||||
/* Maximum number of levels possible. */
|
||||
#define BITMAP_MAX_LEVELS \
|
||||
(LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \
|
||||
+ !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP)
|
||||
/*
|
||||
* Do some analysis on how big the bitmap is before we use a tree. For a brute
|
||||
* force linear search, if we would have to call ffs_lu() more than 2^3 times,
|
||||
* use a tree instead.
|
||||
*/
|
||||
#if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3
|
||||
# define BITMAP_USE_TREE
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
/* Number of groups required to store a given number of bits. */
|
||||
#define BITMAP_BITS2GROUPS(nbits) \
|
||||
(((nbits) + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS)
|
||||
|
||||
struct bitmap_level_s {
|
||||
/*
|
||||
* Number of groups required at a particular level for a given number of bits.
|
||||
*/
|
||||
#define BITMAP_GROUPS_L0(nbits) \
|
||||
BITMAP_BITS2GROUPS(nbits)
|
||||
#define BITMAP_GROUPS_L1(nbits) \
|
||||
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits))
|
||||
#define BITMAP_GROUPS_L2(nbits) \
|
||||
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))
|
||||
#define BITMAP_GROUPS_L3(nbits) \
|
||||
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \
|
||||
BITMAP_BITS2GROUPS((nbits)))))
|
||||
#define BITMAP_GROUPS_L4(nbits) \
|
||||
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \
|
||||
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))))
|
||||
|
||||
/*
|
||||
* Assuming the number of levels, number of groups required for a given number
|
||||
* of bits.
|
||||
*/
|
||||
#define BITMAP_GROUPS_1_LEVEL(nbits) \
|
||||
BITMAP_GROUPS_L0(nbits)
|
||||
#define BITMAP_GROUPS_2_LEVEL(nbits) \
|
||||
(BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits))
|
||||
#define BITMAP_GROUPS_3_LEVEL(nbits) \
|
||||
(BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits))
|
||||
#define BITMAP_GROUPS_4_LEVEL(nbits) \
|
||||
(BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits))
|
||||
#define BITMAP_GROUPS_5_LEVEL(nbits) \
|
||||
(BITMAP_GROUPS_4_LEVEL(nbits) + BITMAP_GROUPS_L4(nbits))
|
||||
|
||||
/*
|
||||
* Maximum number of groups required to support LG_BITMAP_MAXBITS.
|
||||
*/
|
||||
#ifdef BITMAP_USE_TREE
|
||||
|
||||
#if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS
|
||||
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_1_LEVEL(nbits)
|
||||
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS)
|
||||
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2
|
||||
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_2_LEVEL(nbits)
|
||||
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS)
|
||||
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3
|
||||
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_3_LEVEL(nbits)
|
||||
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS)
|
||||
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4
|
||||
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_4_LEVEL(nbits)
|
||||
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS)
|
||||
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 5
|
||||
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_5_LEVEL(nbits)
|
||||
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_5_LEVEL(BITMAP_MAXBITS)
|
||||
#else
|
||||
# error "Unsupported bitmap size"
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Maximum number of levels possible. This could be statically computed based
|
||||
* on LG_BITMAP_MAXBITS:
|
||||
*
|
||||
* #define BITMAP_MAX_LEVELS \
|
||||
* (LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \
|
||||
* + !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP)
|
||||
*
|
||||
* However, that would not allow the generic BITMAP_INFO_INITIALIZER() macro, so
|
||||
* instead hardcode BITMAP_MAX_LEVELS to the largest number supported by the
|
||||
* various cascading macros. The only additional cost this incurs is some
|
||||
* unused trailing entries in bitmap_info_t structures; the bitmaps themselves
|
||||
* are not impacted.
|
||||
*/
|
||||
#define BITMAP_MAX_LEVELS 5
|
||||
|
||||
#define BITMAP_INFO_INITIALIZER(nbits) { \
|
||||
/* nbits. */ \
|
||||
nbits, \
|
||||
/* nlevels. */ \
|
||||
(BITMAP_GROUPS_L0(nbits) > BITMAP_GROUPS_L1(nbits)) + \
|
||||
(BITMAP_GROUPS_L1(nbits) > BITMAP_GROUPS_L2(nbits)) + \
|
||||
(BITMAP_GROUPS_L2(nbits) > BITMAP_GROUPS_L3(nbits)) + \
|
||||
(BITMAP_GROUPS_L3(nbits) > BITMAP_GROUPS_L4(nbits)) + 1, \
|
||||
/* levels. */ \
|
||||
{ \
|
||||
{0}, \
|
||||
{BITMAP_GROUPS_L0(nbits)}, \
|
||||
{BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)}, \
|
||||
{BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) + \
|
||||
BITMAP_GROUPS_L0(nbits)}, \
|
||||
{BITMAP_GROUPS_L3(nbits) + BITMAP_GROUPS_L2(nbits) + \
|
||||
BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)}, \
|
||||
{BITMAP_GROUPS_L4(nbits) + BITMAP_GROUPS_L3(nbits) + \
|
||||
BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) \
|
||||
+ BITMAP_GROUPS_L0(nbits)} \
|
||||
} \
|
||||
}
|
||||
|
||||
#else /* BITMAP_USE_TREE */
|
||||
|
||||
#define BITMAP_GROUPS(nbits) BITMAP_BITS2GROUPS(nbits)
|
||||
#define BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS)
|
||||
|
||||
#define BITMAP_INFO_INITIALIZER(nbits) { \
|
||||
/* nbits. */ \
|
||||
nbits, \
|
||||
/* ngroups. */ \
|
||||
BITMAP_BITS2GROUPS(nbits) \
|
||||
}
|
||||
|
||||
#endif /* BITMAP_USE_TREE */
|
||||
|
||||
typedef struct bitmap_level_s {
|
||||
/* Offset of this level's groups within the array of groups. */
|
||||
size_t group_offset;
|
||||
};
|
||||
} bitmap_level_t;
|
||||
|
||||
struct bitmap_info_s {
|
||||
typedef struct bitmap_info_s {
|
||||
/* Logical number of bits in bitmap (stored at bottom level). */
|
||||
size_t nbits;
|
||||
|
||||
#ifdef BITMAP_USE_TREE
|
||||
/* Number of levels necessary for nbits. */
|
||||
unsigned nlevels;
|
||||
|
||||
@@ -40,67 +162,62 @@ struct bitmap_info_s {
|
||||
* bottom to top (e.g. the bottom level is stored in levels[0]).
|
||||
*/
|
||||
bitmap_level_t levels[BITMAP_MAX_LEVELS+1];
|
||||
};
|
||||
#else /* BITMAP_USE_TREE */
|
||||
/* Number of groups necessary for nbits. */
|
||||
size_t ngroups;
|
||||
#endif /* BITMAP_USE_TREE */
|
||||
} bitmap_info_t;
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
void bitmap_info_init(bitmap_info_t *binfo, size_t nbits);
|
||||
void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill);
|
||||
size_t bitmap_size(const bitmap_info_t *binfo);
|
||||
|
||||
void bitmap_info_init(bitmap_info_t *binfo, size_t nbits);
|
||||
size_t bitmap_info_ngroups(const bitmap_info_t *binfo);
|
||||
size_t bitmap_size(size_t nbits);
|
||||
void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
bool bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo);
|
||||
bool bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
|
||||
void bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
|
||||
size_t bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo);
|
||||
void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_BITMAP_C_))
|
||||
JEMALLOC_INLINE bool
|
||||
bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo)
|
||||
{
|
||||
unsigned rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
|
||||
static inline bool
|
||||
bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) {
|
||||
#ifdef BITMAP_USE_TREE
|
||||
size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
|
||||
bitmap_t rg = bitmap[rgoff];
|
||||
/* The bitmap is full iff the root group is 0. */
|
||||
return (rg == 0);
|
||||
#else
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < binfo->ngroups; i++) {
|
||||
if (bitmap[i] != 0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
|
||||
{
|
||||
static inline bool
|
||||
bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
|
||||
size_t goff;
|
||||
bitmap_t g;
|
||||
|
||||
assert(bit < binfo->nbits);
|
||||
goff = bit >> LG_BITMAP_GROUP_NBITS;
|
||||
g = bitmap[goff];
|
||||
return (!(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))));
|
||||
return !(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
|
||||
{
|
||||
static inline void
|
||||
bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
|
||||
size_t goff;
|
||||
bitmap_t *gp;
|
||||
bitmap_t g;
|
||||
|
||||
assert(bit < binfo->nbits);
|
||||
assert(bitmap_get(bitmap, binfo, bit) == false);
|
||||
assert(!bitmap_get(bitmap, binfo, bit));
|
||||
goff = bit >> LG_BITMAP_GROUP_NBITS;
|
||||
gp = &bitmap[goff];
|
||||
g = *gp;
|
||||
assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)));
|
||||
g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
|
||||
assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
|
||||
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
|
||||
*gp = g;
|
||||
assert(bitmap_get(bitmap, binfo, bit));
|
||||
#ifdef BITMAP_USE_TREE
|
||||
/* Propagate group state transitions up the tree. */
|
||||
if (g == 0) {
|
||||
unsigned i;
|
||||
@@ -109,45 +226,113 @@ bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
|
||||
goff = bit >> LG_BITMAP_GROUP_NBITS;
|
||||
gp = &bitmap[binfo->levels[i].group_offset + goff];
|
||||
g = *gp;
|
||||
assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)));
|
||||
g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
|
||||
assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
|
||||
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
|
||||
*gp = g;
|
||||
if (g != 0)
|
||||
if (g != 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/* ffu: find first unset >= bit. */
|
||||
static inline size_t
|
||||
bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) {
|
||||
assert(min_bit < binfo->nbits);
|
||||
|
||||
#ifdef BITMAP_USE_TREE
|
||||
size_t bit = 0;
|
||||
for (unsigned level = binfo->nlevels; level--;) {
|
||||
size_t lg_bits_per_group = (LG_BITMAP_GROUP_NBITS * (level +
|
||||
1));
|
||||
bitmap_t group = bitmap[binfo->levels[level].group_offset + (bit
|
||||
>> lg_bits_per_group)];
|
||||
unsigned group_nmask = (unsigned)(((min_bit > bit) ? (min_bit -
|
||||
bit) : 0) >> (lg_bits_per_group - LG_BITMAP_GROUP_NBITS));
|
||||
assert(group_nmask <= BITMAP_GROUP_NBITS);
|
||||
bitmap_t group_mask = ~((1LU << group_nmask) - 1);
|
||||
bitmap_t group_masked = group & group_mask;
|
||||
if (group_masked == 0LU) {
|
||||
if (group == 0LU) {
|
||||
return binfo->nbits;
|
||||
}
|
||||
/*
|
||||
* min_bit was preceded by one or more unset bits in
|
||||
* this group, but there are no other unset bits in this
|
||||
* group. Try again starting at the first bit of the
|
||||
* next sibling. This will recurse at most once per
|
||||
* non-root level.
|
||||
*/
|
||||
size_t sib_base = bit + (ZU(1) << lg_bits_per_group);
|
||||
assert(sib_base > min_bit);
|
||||
assert(sib_base > bit);
|
||||
if (sib_base >= binfo->nbits) {
|
||||
return binfo->nbits;
|
||||
}
|
||||
return bitmap_ffu(bitmap, binfo, sib_base);
|
||||
}
|
||||
bit += ((size_t)(ffs_lu(group_masked) - 1)) <<
|
||||
(lg_bits_per_group - LG_BITMAP_GROUP_NBITS);
|
||||
}
|
||||
assert(bit >= min_bit);
|
||||
assert(bit < binfo->nbits);
|
||||
return bit;
|
||||
#else
|
||||
size_t i = min_bit >> LG_BITMAP_GROUP_NBITS;
|
||||
bitmap_t g = bitmap[i] & ~((1LU << (min_bit & BITMAP_GROUP_NBITS_MASK))
|
||||
- 1);
|
||||
size_t bit;
|
||||
do {
|
||||
bit = ffs_lu(g);
|
||||
if (bit != 0) {
|
||||
return (i << LG_BITMAP_GROUP_NBITS) + (bit - 1);
|
||||
}
|
||||
i++;
|
||||
g = bitmap[i];
|
||||
} while (i < binfo->ngroups);
|
||||
return binfo->nbits;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* sfu: set first unset. */
|
||||
JEMALLOC_INLINE size_t
|
||||
bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo)
|
||||
{
|
||||
static inline size_t
|
||||
bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) {
|
||||
size_t bit;
|
||||
bitmap_t g;
|
||||
unsigned i;
|
||||
|
||||
assert(bitmap_full(bitmap, binfo) == false);
|
||||
assert(!bitmap_full(bitmap, binfo));
|
||||
|
||||
#ifdef BITMAP_USE_TREE
|
||||
i = binfo->nlevels - 1;
|
||||
g = bitmap[binfo->levels[i].group_offset];
|
||||
bit = ffsl(g) - 1;
|
||||
bit = ffs_lu(g) - 1;
|
||||
while (i > 0) {
|
||||
i--;
|
||||
g = bitmap[binfo->levels[i].group_offset + bit];
|
||||
bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffsl(g) - 1);
|
||||
bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffs_lu(g) - 1);
|
||||
}
|
||||
|
||||
#else
|
||||
i = 0;
|
||||
g = bitmap[0];
|
||||
while ((bit = ffs_lu(g)) == 0) {
|
||||
i++;
|
||||
g = bitmap[i];
|
||||
}
|
||||
bit = (i << LG_BITMAP_GROUP_NBITS) + (bit - 1);
|
||||
#endif
|
||||
bitmap_set(bitmap, binfo, bit);
|
||||
return (bit);
|
||||
return bit;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
|
||||
{
|
||||
static inline void
|
||||
bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
|
||||
size_t goff;
|
||||
bitmap_t *gp;
|
||||
bitmap_t g;
|
||||
bool propagate;
|
||||
UNUSED bool propagate;
|
||||
|
||||
assert(bit < binfo->nbits);
|
||||
assert(bitmap_get(bitmap, binfo, bit));
|
||||
@@ -155,10 +340,11 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
|
||||
gp = &bitmap[goff];
|
||||
g = *gp;
|
||||
propagate = (g == 0);
|
||||
assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))) == 0);
|
||||
g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
|
||||
assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) == 0);
|
||||
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
|
||||
*gp = g;
|
||||
assert(bitmap_get(bitmap, binfo, bit) == false);
|
||||
assert(!bitmap_get(bitmap, binfo, bit));
|
||||
#ifdef BITMAP_USE_TREE
|
||||
/* Propagate group state transitions up the tree. */
|
||||
if (propagate) {
|
||||
unsigned i;
|
||||
@@ -168,17 +354,16 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
|
||||
gp = &bitmap[binfo->levels[i].group_offset + goff];
|
||||
g = *gp;
|
||||
propagate = (g == 0);
|
||||
assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)))
|
||||
assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)))
|
||||
== 0);
|
||||
g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
|
||||
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
|
||||
*gp = g;
|
||||
if (propagate == false)
|
||||
if (!propagate) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif /* BITMAP_USE_TREE */
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
#endif /* JEMALLOC_INTERNAL_BITMAP_H */
|
||||
|
||||
117
deps/jemalloc/include/jemalloc/internal/ckh.h
vendored
117
deps/jemalloc/include/jemalloc/internal/ckh.h
vendored
@@ -1,88 +1,101 @@
|
||||
#ifndef JEMALLOC_INTERNAL_CKH_H
|
||||
#define JEMALLOC_INTERNAL_CKH_H
|
||||
|
||||
#include "jemalloc/internal/tsd.h"
|
||||
|
||||
/* Cuckoo hashing implementation. Skip to the end for the interface. */
|
||||
|
||||
/******************************************************************************/
|
||||
/* INTERNAL DEFINITIONS -- IGNORE */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
typedef struct ckh_s ckh_t;
|
||||
typedef struct ckhc_s ckhc_t;
|
||||
|
||||
/* Typedefs to allow easy function pointer passing. */
|
||||
typedef void ckh_hash_t (const void *, size_t[2]);
|
||||
typedef bool ckh_keycomp_t (const void *, const void *);
|
||||
|
||||
/* Maintain counters used to get an idea of performance. */
|
||||
/* #define CKH_COUNT */
|
||||
/* #define CKH_COUNT */
|
||||
/* Print counter values in ckh_delete() (requires CKH_COUNT). */
|
||||
/* #define CKH_VERBOSE */
|
||||
/* #define CKH_VERBOSE */
|
||||
|
||||
/*
|
||||
* There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit
|
||||
* one bucket per L1 cache line.
|
||||
*/
|
||||
#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
|
||||
#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
/* Typedefs to allow easy function pointer passing. */
|
||||
typedef void ckh_hash_t (const void *, size_t[2]);
|
||||
typedef bool ckh_keycomp_t (const void *, const void *);
|
||||
|
||||
/* Hash table cell. */
|
||||
struct ckhc_s {
|
||||
const void *key;
|
||||
const void *data;
|
||||
};
|
||||
typedef struct {
|
||||
const void *key;
|
||||
const void *data;
|
||||
} ckhc_t;
|
||||
|
||||
struct ckh_s {
|
||||
/* The hash table itself. */
|
||||
typedef struct {
|
||||
#ifdef CKH_COUNT
|
||||
/* Counters used to get an idea of performance. */
|
||||
uint64_t ngrows;
|
||||
uint64_t nshrinks;
|
||||
uint64_t nshrinkfails;
|
||||
uint64_t ninserts;
|
||||
uint64_t nrelocs;
|
||||
uint64_t ngrows;
|
||||
uint64_t nshrinks;
|
||||
uint64_t nshrinkfails;
|
||||
uint64_t ninserts;
|
||||
uint64_t nrelocs;
|
||||
#endif
|
||||
|
||||
/* Used for pseudo-random number generation. */
|
||||
#define CKH_A 1103515241
|
||||
#define CKH_C 12347
|
||||
uint32_t prng_state;
|
||||
uint64_t prng_state;
|
||||
|
||||
/* Total number of items. */
|
||||
size_t count;
|
||||
size_t count;
|
||||
|
||||
/*
|
||||
* Minimum and current number of hash table buckets. There are
|
||||
* 2^LG_CKH_BUCKET_CELLS cells per bucket.
|
||||
*/
|
||||
unsigned lg_minbuckets;
|
||||
unsigned lg_curbuckets;
|
||||
unsigned lg_minbuckets;
|
||||
unsigned lg_curbuckets;
|
||||
|
||||
/* Hash and comparison functions. */
|
||||
ckh_hash_t *hash;
|
||||
ckh_keycomp_t *keycomp;
|
||||
ckh_hash_t *hash;
|
||||
ckh_keycomp_t *keycomp;
|
||||
|
||||
/* Hash table with 2^lg_curbuckets buckets. */
|
||||
ckhc_t *tab;
|
||||
};
|
||||
ckhc_t *tab;
|
||||
} ckh_t;
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
/* BEGIN PUBLIC API */
|
||||
/******************************************************************************/
|
||||
|
||||
bool ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
|
||||
/* Lifetime management. Minitems is the initial capacity. */
|
||||
bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
|
||||
ckh_keycomp_t *keycomp);
|
||||
void ckh_delete(ckh_t *ckh);
|
||||
size_t ckh_count(ckh_t *ckh);
|
||||
bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data);
|
||||
bool ckh_insert(ckh_t *ckh, const void *key, const void *data);
|
||||
bool ckh_remove(ckh_t *ckh, const void *searchkey, void **key,
|
||||
void ckh_delete(tsd_t *tsd, ckh_t *ckh);
|
||||
|
||||
/* Get the number of elements in the set. */
|
||||
size_t ckh_count(ckh_t *ckh);
|
||||
|
||||
/*
|
||||
* To iterate over the elements in the table, initialize *tabind to 0 and call
|
||||
* this function until it returns true. Each call that returns false will
|
||||
* update *key and *data to the next element in the table, assuming the pointers
|
||||
* are non-NULL.
|
||||
*/
|
||||
bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data);
|
||||
|
||||
/*
|
||||
* Basic hash table operations -- insert, removal, lookup. For ckh_remove and
|
||||
* ckh_search, key or data can be NULL. The hash-table only stores pointers to
|
||||
* the key and value, and doesn't do any lifetime management.
|
||||
*/
|
||||
bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data);
|
||||
bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
|
||||
void **data);
|
||||
bool ckh_search(ckh_t *ckh, const void *seachkey, void **key, void **data);
|
||||
void ckh_string_hash(const void *key, size_t r_hash[2]);
|
||||
bool ckh_string_keycomp(const void *k1, const void *k2);
|
||||
void ckh_pointer_hash(const void *key, size_t r_hash[2]);
|
||||
bool ckh_pointer_keycomp(const void *k1, const void *k2);
|
||||
bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
/* Some useful hash and comparison functions for strings and pointers. */
|
||||
void ckh_string_hash(const void *key, size_t r_hash[2]);
|
||||
bool ckh_string_keycomp(const void *k1, const void *k2);
|
||||
void ckh_pointer_hash(const void *key, size_t r_hash[2]);
|
||||
bool ckh_pointer_keycomp(const void *k1, const void *k2);
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
#endif /* JEMALLOC_INTERNAL_CKH_H */
|
||||
|
||||
167
deps/jemalloc/include/jemalloc/internal/ctl.h
vendored
167
deps/jemalloc/include/jemalloc/internal/ctl.h
vendored
@@ -1,87 +1,106 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
#ifndef JEMALLOC_INTERNAL_CTL_H
|
||||
#define JEMALLOC_INTERNAL_CTL_H
|
||||
|
||||
typedef struct ctl_node_s ctl_node_t;
|
||||
typedef struct ctl_named_node_s ctl_named_node_t;
|
||||
typedef struct ctl_indexed_node_s ctl_indexed_node_t;
|
||||
typedef struct ctl_arena_stats_s ctl_arena_stats_t;
|
||||
typedef struct ctl_stats_s ctl_stats_t;
|
||||
#include "jemalloc/internal/jemalloc_internal_types.h"
|
||||
#include "jemalloc/internal/malloc_io.h"
|
||||
#include "jemalloc/internal/mutex_prof.h"
|
||||
#include "jemalloc/internal/ql.h"
|
||||
#include "jemalloc/internal/size_classes.h"
|
||||
#include "jemalloc/internal/stats.h"
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
/* Maximum ctl tree depth. */
|
||||
#define CTL_MAX_DEPTH 7
|
||||
|
||||
struct ctl_node_s {
|
||||
bool named;
|
||||
};
|
||||
typedef struct ctl_node_s {
|
||||
bool named;
|
||||
} ctl_node_t;
|
||||
|
||||
struct ctl_named_node_s {
|
||||
struct ctl_node_s node;
|
||||
const char *name;
|
||||
typedef struct ctl_named_node_s {
|
||||
ctl_node_t node;
|
||||
const char *name;
|
||||
/* If (nchildren == 0), this is a terminal node. */
|
||||
unsigned nchildren;
|
||||
const ctl_node_t *children;
|
||||
int (*ctl)(const size_t *, size_t, void *, size_t *,
|
||||
void *, size_t);
|
||||
};
|
||||
size_t nchildren;
|
||||
const ctl_node_t *children;
|
||||
int (*ctl)(tsd_t *, const size_t *, size_t, void *, size_t *, void *,
|
||||
size_t);
|
||||
} ctl_named_node_t;
|
||||
|
||||
struct ctl_indexed_node_s {
|
||||
struct ctl_node_s node;
|
||||
const ctl_named_node_t *(*index)(const size_t *, size_t, size_t);
|
||||
};
|
||||
typedef struct ctl_indexed_node_s {
|
||||
struct ctl_node_s node;
|
||||
const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t,
|
||||
size_t);
|
||||
} ctl_indexed_node_t;
|
||||
|
||||
struct ctl_arena_stats_s {
|
||||
bool initialized;
|
||||
unsigned nthreads;
|
||||
const char *dss;
|
||||
size_t pactive;
|
||||
size_t pdirty;
|
||||
arena_stats_t astats;
|
||||
typedef struct ctl_arena_stats_s {
|
||||
arena_stats_t astats;
|
||||
|
||||
/* Aggregate stats for small size classes, based on bin stats. */
|
||||
size_t allocated_small;
|
||||
uint64_t nmalloc_small;
|
||||
uint64_t ndalloc_small;
|
||||
uint64_t nrequests_small;
|
||||
size_t allocated_small;
|
||||
uint64_t nmalloc_small;
|
||||
uint64_t ndalloc_small;
|
||||
uint64_t nrequests_small;
|
||||
|
||||
malloc_bin_stats_t bstats[NBINS];
|
||||
malloc_large_stats_t *lstats; /* nlclasses elements. */
|
||||
malloc_bin_stats_t bstats[NBINS];
|
||||
malloc_large_stats_t lstats[NSIZES - NBINS];
|
||||
} ctl_arena_stats_t;
|
||||
|
||||
typedef struct ctl_stats_s {
|
||||
size_t allocated;
|
||||
size_t active;
|
||||
size_t metadata;
|
||||
size_t resident;
|
||||
size_t mapped;
|
||||
size_t retained;
|
||||
|
||||
background_thread_stats_t background_thread;
|
||||
mutex_prof_data_t mutex_prof_data[mutex_prof_num_global_mutexes];
|
||||
} ctl_stats_t;
|
||||
|
||||
typedef struct ctl_arena_s ctl_arena_t;
|
||||
struct ctl_arena_s {
|
||||
unsigned arena_ind;
|
||||
bool initialized;
|
||||
ql_elm(ctl_arena_t) destroyed_link;
|
||||
|
||||
/* Basic stats, supported even if !config_stats. */
|
||||
unsigned nthreads;
|
||||
const char *dss;
|
||||
ssize_t dirty_decay_ms;
|
||||
ssize_t muzzy_decay_ms;
|
||||
size_t pactive;
|
||||
size_t pdirty;
|
||||
size_t pmuzzy;
|
||||
|
||||
/* NULL if !config_stats. */
|
||||
ctl_arena_stats_t *astats;
|
||||
};
|
||||
|
||||
struct ctl_stats_s {
|
||||
size_t allocated;
|
||||
size_t active;
|
||||
size_t mapped;
|
||||
struct {
|
||||
size_t current; /* stats_chunks.curchunks */
|
||||
uint64_t total; /* stats_chunks.nchunks */
|
||||
size_t high; /* stats_chunks.highchunks */
|
||||
} chunks;
|
||||
struct {
|
||||
size_t allocated; /* huge_allocated */
|
||||
uint64_t nmalloc; /* huge_nmalloc */
|
||||
uint64_t ndalloc; /* huge_ndalloc */
|
||||
} huge;
|
||||
unsigned narenas;
|
||||
ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
|
||||
};
|
||||
typedef struct ctl_arenas_s {
|
||||
uint64_t epoch;
|
||||
unsigned narenas;
|
||||
ql_head(ctl_arena_t) destroyed;
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
/*
|
||||
* Element 0 corresponds to merged stats for extant arenas (accessed via
|
||||
* MALLCTL_ARENAS_ALL), element 1 corresponds to merged stats for
|
||||
* destroyed arenas (accessed via MALLCTL_ARENAS_DESTROYED), and the
|
||||
* remaining MALLOCX_ARENA_LIMIT elements correspond to arenas.
|
||||
*/
|
||||
ctl_arena_t *arenas[2 + MALLOCX_ARENA_LIMIT];
|
||||
} ctl_arenas_t;
|
||||
|
||||
int ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp,
|
||||
size_t newlen);
|
||||
int ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp);
|
||||
|
||||
int ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
|
||||
int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
|
||||
void *newp, size_t newlen);
|
||||
bool ctl_boot(void);
|
||||
void ctl_prefork(void);
|
||||
void ctl_postfork_parent(void);
|
||||
void ctl_postfork_child(void);
|
||||
int ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp);
|
||||
|
||||
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
|
||||
int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
|
||||
size_t *oldlenp, void *newp, size_t newlen);
|
||||
bool ctl_boot(void);
|
||||
void ctl_prefork(tsdn_t *tsdn);
|
||||
void ctl_postfork_parent(tsdn_t *tsdn);
|
||||
void ctl_postfork_child(tsdn_t *tsdn);
|
||||
|
||||
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
|
||||
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
|
||||
!= 0) { \
|
||||
malloc_printf( \
|
||||
@@ -91,7 +110,7 @@ void ctl_postfork_child(void);
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define xmallctlnametomib(name, mibp, miblenp) do { \
|
||||
#define xmallctlnametomib(name, mibp, miblenp) do { \
|
||||
if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \
|
||||
malloc_printf("<jemalloc>: Failure in " \
|
||||
"xmallctlnametomib(\"%s\", ...)\n", name); \
|
||||
@@ -99,7 +118,7 @@ void ctl_postfork_child(void);
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \
|
||||
#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \
|
||||
if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \
|
||||
newlen) != 0) { \
|
||||
malloc_write( \
|
||||
@@ -108,10 +127,4 @@ void ctl_postfork_child(void);
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_CTL_H */
|
||||
|
||||
139
deps/jemalloc/include/jemalloc/internal/hash.h
vendored
139
deps/jemalloc/include/jemalloc/internal/hash.h
vendored
@@ -1,92 +1,76 @@
|
||||
#ifndef JEMALLOC_INTERNAL_HASH_H
|
||||
#define JEMALLOC_INTERNAL_HASH_H
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
|
||||
/*
|
||||
* The following hash function is based on MurmurHash3, placed into the public
|
||||
* domain by Austin Appleby. See http://code.google.com/p/smhasher/ for
|
||||
* domain by Austin Appleby. See https://github.com/aappleby/smhasher for
|
||||
* details.
|
||||
*/
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
uint32_t hash_x86_32(const void *key, int len, uint32_t seed);
|
||||
void hash_x86_128(const void *key, const int len, uint32_t seed,
|
||||
uint64_t r_out[2]);
|
||||
void hash_x64_128(const void *key, const int len, const uint32_t seed,
|
||||
uint64_t r_out[2]);
|
||||
void hash(const void *key, size_t len, const uint32_t seed,
|
||||
size_t r_hash[2]);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_))
|
||||
/******************************************************************************/
|
||||
/* Internal implementation. */
|
||||
JEMALLOC_INLINE uint32_t
|
||||
hash_rotl_32(uint32_t x, int8_t r)
|
||||
{
|
||||
|
||||
return (x << r) | (x >> (32 - r));
|
||||
static inline uint32_t
|
||||
hash_rotl_32(uint32_t x, int8_t r) {
|
||||
return ((x << r) | (x >> (32 - r)));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
hash_rotl_64(uint64_t x, int8_t r)
|
||||
{
|
||||
return (x << r) | (x >> (64 - r));
|
||||
static inline uint64_t
|
||||
hash_rotl_64(uint64_t x, int8_t r) {
|
||||
return ((x << r) | (x >> (64 - r)));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
hash_get_block_32(const uint32_t *p, int i)
|
||||
{
|
||||
static inline uint32_t
|
||||
hash_get_block_32(const uint32_t *p, int i) {
|
||||
/* Handle unaligned read. */
|
||||
if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) {
|
||||
uint32_t ret;
|
||||
|
||||
return (p[i]);
|
||||
memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t));
|
||||
return ret;
|
||||
}
|
||||
|
||||
return p[i];
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
hash_get_block_64(const uint64_t *p, int i)
|
||||
{
|
||||
static inline uint64_t
|
||||
hash_get_block_64(const uint64_t *p, int i) {
|
||||
/* Handle unaligned read. */
|
||||
if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) {
|
||||
uint64_t ret;
|
||||
|
||||
return (p[i]);
|
||||
memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t));
|
||||
return ret;
|
||||
}
|
||||
|
||||
return p[i];
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
hash_fmix_32(uint32_t h)
|
||||
{
|
||||
|
||||
static inline uint32_t
|
||||
hash_fmix_32(uint32_t h) {
|
||||
h ^= h >> 16;
|
||||
h *= 0x85ebca6b;
|
||||
h ^= h >> 13;
|
||||
h *= 0xc2b2ae35;
|
||||
h ^= h >> 16;
|
||||
|
||||
return (h);
|
||||
return h;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
hash_fmix_64(uint64_t k)
|
||||
{
|
||||
|
||||
static inline uint64_t
|
||||
hash_fmix_64(uint64_t k) {
|
||||
k ^= k >> 33;
|
||||
k *= QU(0xff51afd7ed558ccdLLU);
|
||||
k *= KQU(0xff51afd7ed558ccd);
|
||||
k ^= k >> 33;
|
||||
k *= QU(0xc4ceb9fe1a85ec53LLU);
|
||||
k *= KQU(0xc4ceb9fe1a85ec53);
|
||||
k ^= k >> 33;
|
||||
|
||||
return (k);
|
||||
return k;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
hash_x86_32(const void *key, int len, uint32_t seed)
|
||||
{
|
||||
static inline uint32_t
|
||||
hash_x86_32(const void *key, int len, uint32_t seed) {
|
||||
const uint8_t *data = (const uint8_t *) key;
|
||||
const int nblocks = len / 4;
|
||||
|
||||
@@ -132,13 +116,12 @@ hash_x86_32(const void *key, int len, uint32_t seed)
|
||||
|
||||
h1 = hash_fmix_32(h1);
|
||||
|
||||
return (h1);
|
||||
return h1;
|
||||
}
|
||||
|
||||
UNUSED JEMALLOC_INLINE void
|
||||
UNUSED static inline void
|
||||
hash_x86_128(const void *key, const int len, uint32_t seed,
|
||||
uint64_t r_out[2])
|
||||
{
|
||||
uint64_t r_out[2]) {
|
||||
const uint8_t * data = (const uint8_t *) key;
|
||||
const int nblocks = len / 16;
|
||||
|
||||
@@ -237,18 +220,17 @@ hash_x86_128(const void *key, const int len, uint32_t seed,
|
||||
r_out[1] = (((uint64_t) h4) << 32) | h3;
|
||||
}
|
||||
|
||||
UNUSED JEMALLOC_INLINE void
|
||||
UNUSED static inline void
|
||||
hash_x64_128(const void *key, const int len, const uint32_t seed,
|
||||
uint64_t r_out[2])
|
||||
{
|
||||
uint64_t r_out[2]) {
|
||||
const uint8_t *data = (const uint8_t *) key;
|
||||
const int nblocks = len / 16;
|
||||
|
||||
uint64_t h1 = seed;
|
||||
uint64_t h2 = seed;
|
||||
|
||||
const uint64_t c1 = QU(0x87c37b91114253d5LLU);
|
||||
const uint64_t c2 = QU(0x4cf5ad432745937fLLU);
|
||||
const uint64_t c1 = KQU(0x87c37b91114253d5);
|
||||
const uint64_t c2 = KQU(0x4cf5ad432745937f);
|
||||
|
||||
/* body */
|
||||
{
|
||||
@@ -317,19 +299,20 @@ hash_x64_128(const void *key, const int len, const uint32_t seed,
|
||||
|
||||
/******************************************************************************/
|
||||
/* API. */
|
||||
JEMALLOC_INLINE void
|
||||
hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2])
|
||||
{
|
||||
static inline void
|
||||
hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]) {
|
||||
assert(len <= INT_MAX); /* Unfortunate implementation limitation. */
|
||||
|
||||
#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN))
|
||||
hash_x64_128(key, len, seed, (uint64_t *)r_hash);
|
||||
hash_x64_128(key, (int)len, seed, (uint64_t *)r_hash);
|
||||
#else
|
||||
uint64_t hashes[2];
|
||||
hash_x86_128(key, len, seed, hashes);
|
||||
r_hash[0] = (size_t)hashes[0];
|
||||
r_hash[1] = (size_t)hashes[1];
|
||||
{
|
||||
uint64_t hashes[2];
|
||||
hash_x86_128(key, (int)len, seed, hashes);
|
||||
r_hash[0] = (size_t)hashes[0];
|
||||
r_hash[1] = (size_t)hashes[1];
|
||||
}
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
#endif /* JEMALLOC_INTERNAL_HASH_H */
|
||||
|
||||
289
deps/jemalloc/include/jemalloc/internal/mutex.h
vendored
289
deps/jemalloc/include/jemalloc/internal/mutex.h
vendored
@@ -1,45 +1,123 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
#ifndef JEMALLOC_INTERNAL_MUTEX_H
|
||||
#define JEMALLOC_INTERNAL_MUTEX_H
|
||||
|
||||
#include "jemalloc/internal/atomic.h"
|
||||
#include "jemalloc/internal/mutex_prof.h"
|
||||
#include "jemalloc/internal/tsd.h"
|
||||
#include "jemalloc/internal/witness.h"
|
||||
|
||||
typedef enum {
|
||||
/* Can only acquire one mutex of a given witness rank at a time. */
|
||||
malloc_mutex_rank_exclusive,
|
||||
/*
|
||||
* Can acquire multiple mutexes of the same witness rank, but in
|
||||
* address-ascending order only.
|
||||
*/
|
||||
malloc_mutex_address_ordered
|
||||
} malloc_mutex_lock_order_t;
|
||||
|
||||
typedef struct malloc_mutex_s malloc_mutex_t;
|
||||
|
||||
#ifdef _WIN32
|
||||
# define MALLOC_MUTEX_INITIALIZER
|
||||
#elif (defined(JEMALLOC_OSSPIN))
|
||||
# define MALLOC_MUTEX_INITIALIZER {0}
|
||||
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
|
||||
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER, NULL}
|
||||
#else
|
||||
# if (defined(PTHREAD_MUTEX_ADAPTIVE_NP) && \
|
||||
defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP))
|
||||
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP
|
||||
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP}
|
||||
# else
|
||||
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
|
||||
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER}
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
struct malloc_mutex_s {
|
||||
union {
|
||||
struct {
|
||||
/*
|
||||
* prof_data is defined first to reduce cacheline
|
||||
* bouncing: the data is not touched by the mutex holder
|
||||
* during unlocking, while might be modified by
|
||||
* contenders. Having it before the mutex itself could
|
||||
* avoid prefetching a modified cacheline (for the
|
||||
* unlocking thread).
|
||||
*/
|
||||
mutex_prof_data_t prof_data;
|
||||
#ifdef _WIN32
|
||||
CRITICAL_SECTION lock;
|
||||
# if _WIN32_WINNT >= 0x0600
|
||||
SRWLOCK lock;
|
||||
# else
|
||||
CRITICAL_SECTION lock;
|
||||
# endif
|
||||
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
|
||||
os_unfair_lock lock;
|
||||
#elif (defined(JEMALLOC_OSSPIN))
|
||||
OSSpinLock lock;
|
||||
OSSpinLock lock;
|
||||
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
|
||||
pthread_mutex_t lock;
|
||||
malloc_mutex_t *postponed_next;
|
||||
pthread_mutex_t lock;
|
||||
malloc_mutex_t *postponed_next;
|
||||
#else
|
||||
pthread_mutex_t lock;
|
||||
pthread_mutex_t lock;
|
||||
#endif
|
||||
};
|
||||
/*
|
||||
* We only touch witness when configured w/ debug. However we
|
||||
* keep the field in a union when !debug so that we don't have
|
||||
* to pollute the code base with #ifdefs, while avoid paying the
|
||||
* memory cost.
|
||||
*/
|
||||
#if !defined(JEMALLOC_DEBUG)
|
||||
witness_t witness;
|
||||
malloc_mutex_lock_order_t lock_order;
|
||||
#endif
|
||||
};
|
||||
|
||||
#if defined(JEMALLOC_DEBUG)
|
||||
witness_t witness;
|
||||
malloc_mutex_lock_order_t lock_order;
|
||||
#endif
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
/*
|
||||
* Based on benchmark results, a fixed spin with this amount of retries works
|
||||
* well for our critical sections.
|
||||
*/
|
||||
#define MALLOC_MUTEX_MAX_SPIN 250
|
||||
|
||||
#ifdef _WIN32
|
||||
# if _WIN32_WINNT >= 0x0600
|
||||
# define MALLOC_MUTEX_LOCK(m) AcquireSRWLockExclusive(&(m)->lock)
|
||||
# define MALLOC_MUTEX_UNLOCK(m) ReleaseSRWLockExclusive(&(m)->lock)
|
||||
# define MALLOC_MUTEX_TRYLOCK(m) (!TryAcquireSRWLockExclusive(&(m)->lock))
|
||||
# else
|
||||
# define MALLOC_MUTEX_LOCK(m) EnterCriticalSection(&(m)->lock)
|
||||
# define MALLOC_MUTEX_UNLOCK(m) LeaveCriticalSection(&(m)->lock)
|
||||
# define MALLOC_MUTEX_TRYLOCK(m) (!TryEnterCriticalSection(&(m)->lock))
|
||||
# endif
|
||||
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
|
||||
# define MALLOC_MUTEX_LOCK(m) os_unfair_lock_lock(&(m)->lock)
|
||||
# define MALLOC_MUTEX_UNLOCK(m) os_unfair_lock_unlock(&(m)->lock)
|
||||
# define MALLOC_MUTEX_TRYLOCK(m) (!os_unfair_lock_trylock(&(m)->lock))
|
||||
#elif (defined(JEMALLOC_OSSPIN))
|
||||
# define MALLOC_MUTEX_LOCK(m) OSSpinLockLock(&(m)->lock)
|
||||
# define MALLOC_MUTEX_UNLOCK(m) OSSpinLockUnlock(&(m)->lock)
|
||||
# define MALLOC_MUTEX_TRYLOCK(m) (!OSSpinLockTry(&(m)->lock))
|
||||
#else
|
||||
# define MALLOC_MUTEX_LOCK(m) pthread_mutex_lock(&(m)->lock)
|
||||
# define MALLOC_MUTEX_UNLOCK(m) pthread_mutex_unlock(&(m)->lock)
|
||||
# define MALLOC_MUTEX_TRYLOCK(m) (pthread_mutex_trylock(&(m)->lock) != 0)
|
||||
#endif
|
||||
|
||||
#define LOCK_PROF_DATA_INITIALIZER \
|
||||
{NSTIME_ZERO_INITIALIZER, NSTIME_ZERO_INITIALIZER, 0, 0, 0, \
|
||||
ATOMIC_INIT(0), 0, NULL, 0}
|
||||
|
||||
#ifdef _WIN32
|
||||
# define MALLOC_MUTEX_INITIALIZER
|
||||
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
|
||||
# define MALLOC_MUTEX_INITIALIZER \
|
||||
{{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT}}, \
|
||||
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
|
||||
#elif (defined(JEMALLOC_OSSPIN))
|
||||
# define MALLOC_MUTEX_INITIALIZER \
|
||||
{{{LOCK_PROF_DATA_INITIALIZER, 0}}, \
|
||||
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
|
||||
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
|
||||
# define MALLOC_MUTEX_INITIALIZER \
|
||||
{{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL}}, \
|
||||
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
|
||||
#else
|
||||
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
|
||||
# define MALLOC_MUTEX_INITIALIZER \
|
||||
{{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER}}, \
|
||||
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_LAZY_LOCK
|
||||
extern bool isthreaded;
|
||||
@@ -48,52 +126,123 @@ extern bool isthreaded;
|
||||
# define isthreaded true
|
||||
#endif
|
||||
|
||||
bool malloc_mutex_init(malloc_mutex_t *mutex);
|
||||
void malloc_mutex_prefork(malloc_mutex_t *mutex);
|
||||
void malloc_mutex_postfork_parent(malloc_mutex_t *mutex);
|
||||
void malloc_mutex_postfork_child(malloc_mutex_t *mutex);
|
||||
bool mutex_boot(void);
|
||||
bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
|
||||
witness_rank_t rank, malloc_mutex_lock_order_t lock_order);
|
||||
void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex);
|
||||
void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex);
|
||||
void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex);
|
||||
bool malloc_mutex_boot(void);
|
||||
void malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
void malloc_mutex_lock_slow(malloc_mutex_t *mutex);
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
void malloc_mutex_lock(malloc_mutex_t *mutex);
|
||||
void malloc_mutex_unlock(malloc_mutex_t *mutex);
|
||||
#endif
|
||||
static inline void
|
||||
malloc_mutex_lock_final(malloc_mutex_t *mutex) {
|
||||
MALLOC_MUTEX_LOCK(mutex);
|
||||
}
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
|
||||
JEMALLOC_INLINE void
|
||||
malloc_mutex_lock(malloc_mutex_t *mutex)
|
||||
{
|
||||
static inline bool
|
||||
malloc_mutex_trylock_final(malloc_mutex_t *mutex) {
|
||||
return MALLOC_MUTEX_TRYLOCK(mutex);
|
||||
}
|
||||
|
||||
if (isthreaded) {
|
||||
#ifdef _WIN32
|
||||
EnterCriticalSection(&mutex->lock);
|
||||
#elif (defined(JEMALLOC_OSSPIN))
|
||||
OSSpinLockLock(&mutex->lock);
|
||||
#else
|
||||
pthread_mutex_lock(&mutex->lock);
|
||||
#endif
|
||||
static inline void
|
||||
mutex_owner_stats_update(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||
if (config_stats) {
|
||||
mutex_prof_data_t *data = &mutex->prof_data;
|
||||
data->n_lock_ops++;
|
||||
if (data->prev_owner != tsdn) {
|
||||
data->prev_owner = tsdn;
|
||||
data->n_owner_switches++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
malloc_mutex_unlock(malloc_mutex_t *mutex)
|
||||
{
|
||||
|
||||
/* Trylock: return false if the lock is successfully acquired. */
|
||||
static inline bool
|
||||
malloc_mutex_trylock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||
witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
|
||||
if (isthreaded) {
|
||||
#ifdef _WIN32
|
||||
LeaveCriticalSection(&mutex->lock);
|
||||
#elif (defined(JEMALLOC_OSSPIN))
|
||||
OSSpinLockUnlock(&mutex->lock);
|
||||
#else
|
||||
pthread_mutex_unlock(&mutex->lock);
|
||||
#endif
|
||||
if (malloc_mutex_trylock_final(mutex)) {
|
||||
return true;
|
||||
}
|
||||
mutex_owner_stats_update(tsdn, mutex);
|
||||
}
|
||||
witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Aggregate lock prof data. */
|
||||
static inline void
|
||||
malloc_mutex_prof_merge(mutex_prof_data_t *sum, mutex_prof_data_t *data) {
|
||||
nstime_add(&sum->tot_wait_time, &data->tot_wait_time);
|
||||
if (nstime_compare(&sum->max_wait_time, &data->max_wait_time) < 0) {
|
||||
nstime_copy(&sum->max_wait_time, &data->max_wait_time);
|
||||
}
|
||||
|
||||
sum->n_wait_times += data->n_wait_times;
|
||||
sum->n_spin_acquired += data->n_spin_acquired;
|
||||
|
||||
if (sum->max_n_thds < data->max_n_thds) {
|
||||
sum->max_n_thds = data->max_n_thds;
|
||||
}
|
||||
uint32_t cur_n_waiting_thds = atomic_load_u32(&sum->n_waiting_thds,
|
||||
ATOMIC_RELAXED);
|
||||
uint32_t new_n_waiting_thds = cur_n_waiting_thds + atomic_load_u32(
|
||||
&data->n_waiting_thds, ATOMIC_RELAXED);
|
||||
atomic_store_u32(&sum->n_waiting_thds, new_n_waiting_thds,
|
||||
ATOMIC_RELAXED);
|
||||
sum->n_owner_switches += data->n_owner_switches;
|
||||
sum->n_lock_ops += data->n_lock_ops;
|
||||
}
|
||||
|
||||
static inline void
|
||||
malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||
witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
|
||||
if (isthreaded) {
|
||||
if (malloc_mutex_trylock_final(mutex)) {
|
||||
malloc_mutex_lock_slow(mutex);
|
||||
}
|
||||
mutex_owner_stats_update(tsdn, mutex);
|
||||
}
|
||||
witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
|
||||
}
|
||||
|
||||
static inline void
|
||||
malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||
witness_unlock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
|
||||
if (isthreaded) {
|
||||
MALLOC_MUTEX_UNLOCK(mutex);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
static inline void
|
||||
malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||
witness_assert_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
|
||||
}
|
||||
|
||||
static inline void
|
||||
malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||
witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
|
||||
}
|
||||
|
||||
/* Copy the prof data from mutex for processing. */
|
||||
static inline void
|
||||
malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data,
|
||||
malloc_mutex_t *mutex) {
|
||||
mutex_prof_data_t *source = &mutex->prof_data;
|
||||
/* Can only read holding the mutex. */
|
||||
malloc_mutex_assert_owner(tsdn, mutex);
|
||||
|
||||
/*
|
||||
* Not *really* allowed (we shouldn't be doing non-atomic loads of
|
||||
* atomic data), but the mutex protection makes this safe, and writing
|
||||
* a member-for-member copy is tedious for this situation.
|
||||
*/
|
||||
*data = *source;
|
||||
/* n_wait_thds is not reported (modified w/o locking). */
|
||||
atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_MUTEX_H */
|
||||
|
||||
@@ -1,147 +1,199 @@
|
||||
#define a0calloc JEMALLOC_N(a0calloc)
|
||||
#define a0free JEMALLOC_N(a0free)
|
||||
#define a0dalloc JEMALLOC_N(a0dalloc)
|
||||
#define a0get JEMALLOC_N(a0get)
|
||||
#define a0malloc JEMALLOC_N(a0malloc)
|
||||
#define arena_aalloc JEMALLOC_N(arena_aalloc)
|
||||
#define arena_alloc_junk_small JEMALLOC_N(arena_alloc_junk_small)
|
||||
#define arena_basic_stats_merge JEMALLOC_N(arena_basic_stats_merge)
|
||||
#define arena_bin_index JEMALLOC_N(arena_bin_index)
|
||||
#define arena_bin_info JEMALLOC_N(arena_bin_info)
|
||||
#define arena_bitselm_get_const JEMALLOC_N(arena_bitselm_get_const)
|
||||
#define arena_bitselm_get_mutable JEMALLOC_N(arena_bitselm_get_mutable)
|
||||
#define arena_boot JEMALLOC_N(arena_boot)
|
||||
#define arena_choose JEMALLOC_N(arena_choose)
|
||||
#define arena_choose_hard JEMALLOC_N(arena_choose_hard)
|
||||
#define arena_choose_impl JEMALLOC_N(arena_choose_impl)
|
||||
#define arena_chunk_alloc_huge JEMALLOC_N(arena_chunk_alloc_huge)
|
||||
#define arena_chunk_cache_maybe_insert JEMALLOC_N(arena_chunk_cache_maybe_insert)
|
||||
#define arena_chunk_cache_maybe_remove JEMALLOC_N(arena_chunk_cache_maybe_remove)
|
||||
#define arena_chunk_dalloc_huge JEMALLOC_N(arena_chunk_dalloc_huge)
|
||||
#define arena_chunk_ralloc_huge_expand JEMALLOC_N(arena_chunk_ralloc_huge_expand)
|
||||
#define arena_chunk_ralloc_huge_shrink JEMALLOC_N(arena_chunk_ralloc_huge_shrink)
|
||||
#define arena_chunk_ralloc_huge_similar JEMALLOC_N(arena_chunk_ralloc_huge_similar)
|
||||
#define arena_cleanup JEMALLOC_N(arena_cleanup)
|
||||
#define arena_dalloc JEMALLOC_N(arena_dalloc)
|
||||
#define arena_dalloc_bin JEMALLOC_N(arena_dalloc_bin)
|
||||
#define arena_dalloc_bin_locked JEMALLOC_N(arena_dalloc_bin_locked)
|
||||
#define arena_dalloc_bin_junked_locked JEMALLOC_N(arena_dalloc_bin_junked_locked)
|
||||
#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
|
||||
#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
|
||||
#define arena_dalloc_large JEMALLOC_N(arena_dalloc_large)
|
||||
#define arena_dalloc_large_locked JEMALLOC_N(arena_dalloc_large_locked)
|
||||
#define arena_dalloc_large_junked_locked JEMALLOC_N(arena_dalloc_large_junked_locked)
|
||||
#define arena_dalloc_small JEMALLOC_N(arena_dalloc_small)
|
||||
#define arena_decay_tick JEMALLOC_N(arena_decay_tick)
|
||||
#define arena_decay_ticks JEMALLOC_N(arena_decay_ticks)
|
||||
#define arena_decay_time_default_get JEMALLOC_N(arena_decay_time_default_get)
|
||||
#define arena_decay_time_default_set JEMALLOC_N(arena_decay_time_default_set)
|
||||
#define arena_decay_time_get JEMALLOC_N(arena_decay_time_get)
|
||||
#define arena_decay_time_set JEMALLOC_N(arena_decay_time_set)
|
||||
#define arena_dss_prec_get JEMALLOC_N(arena_dss_prec_get)
|
||||
#define arena_dss_prec_set JEMALLOC_N(arena_dss_prec_set)
|
||||
#define arena_extent_sn_next JEMALLOC_N(arena_extent_sn_next)
|
||||
#define arena_get JEMALLOC_N(arena_get)
|
||||
#define arena_ichoose JEMALLOC_N(arena_ichoose)
|
||||
#define arena_init JEMALLOC_N(arena_init)
|
||||
#define arena_lg_dirty_mult_default_get JEMALLOC_N(arena_lg_dirty_mult_default_get)
|
||||
#define arena_lg_dirty_mult_default_set JEMALLOC_N(arena_lg_dirty_mult_default_set)
|
||||
#define arena_lg_dirty_mult_get JEMALLOC_N(arena_lg_dirty_mult_get)
|
||||
#define arena_lg_dirty_mult_set JEMALLOC_N(arena_lg_dirty_mult_set)
|
||||
#define arena_malloc JEMALLOC_N(arena_malloc)
|
||||
#define arena_malloc_hard JEMALLOC_N(arena_malloc_hard)
|
||||
#define arena_malloc_large JEMALLOC_N(arena_malloc_large)
|
||||
#define arena_malloc_small JEMALLOC_N(arena_malloc_small)
|
||||
#define arena_mapbits_allocated_get JEMALLOC_N(arena_mapbits_allocated_get)
|
||||
#define arena_mapbits_binind_get JEMALLOC_N(arena_mapbits_binind_get)
|
||||
#define arena_mapbits_decommitted_get JEMALLOC_N(arena_mapbits_decommitted_get)
|
||||
#define arena_mapbits_dirty_get JEMALLOC_N(arena_mapbits_dirty_get)
|
||||
#define arena_mapbits_get JEMALLOC_N(arena_mapbits_get)
|
||||
#define arena_mapbits_internal_set JEMALLOC_N(arena_mapbits_internal_set)
|
||||
#define arena_mapbits_large_binind_set JEMALLOC_N(arena_mapbits_large_binind_set)
|
||||
#define arena_mapbits_large_get JEMALLOC_N(arena_mapbits_large_get)
|
||||
#define arena_mapbits_large_set JEMALLOC_N(arena_mapbits_large_set)
|
||||
#define arena_mapbits_large_size_get JEMALLOC_N(arena_mapbits_large_size_get)
|
||||
#define arena_mapbits_size_decode JEMALLOC_N(arena_mapbits_size_decode)
|
||||
#define arena_mapbits_size_encode JEMALLOC_N(arena_mapbits_size_encode)
|
||||
#define arena_mapbits_small_runind_get JEMALLOC_N(arena_mapbits_small_runind_get)
|
||||
#define arena_mapbits_small_set JEMALLOC_N(arena_mapbits_small_set)
|
||||
#define arena_mapbits_unallocated_set JEMALLOC_N(arena_mapbits_unallocated_set)
|
||||
#define arena_mapbits_unallocated_size_get JEMALLOC_N(arena_mapbits_unallocated_size_get)
|
||||
#define arena_mapbits_unallocated_size_set JEMALLOC_N(arena_mapbits_unallocated_size_set)
|
||||
#define arena_mapbits_unzeroed_get JEMALLOC_N(arena_mapbits_unzeroed_get)
|
||||
#define arena_mapbits_unzeroed_set JEMALLOC_N(arena_mapbits_unzeroed_set)
|
||||
#define arena_mapbitsp_get JEMALLOC_N(arena_mapbitsp_get)
|
||||
#define arena_mapbitsp_get_const JEMALLOC_N(arena_mapbitsp_get_const)
|
||||
#define arena_mapbitsp_get_mutable JEMALLOC_N(arena_mapbitsp_get_mutable)
|
||||
#define arena_mapbitsp_read JEMALLOC_N(arena_mapbitsp_read)
|
||||
#define arena_mapbitsp_write JEMALLOC_N(arena_mapbitsp_write)
|
||||
#define arena_mapp_get JEMALLOC_N(arena_mapp_get)
|
||||
#define arena_maxclass JEMALLOC_N(arena_maxclass)
|
||||
#define arena_maxrun JEMALLOC_N(arena_maxrun)
|
||||
#define arena_maybe_purge JEMALLOC_N(arena_maybe_purge)
|
||||
#define arena_metadata_allocated_add JEMALLOC_N(arena_metadata_allocated_add)
|
||||
#define arena_metadata_allocated_get JEMALLOC_N(arena_metadata_allocated_get)
|
||||
#define arena_metadata_allocated_sub JEMALLOC_N(arena_metadata_allocated_sub)
|
||||
#define arena_migrate JEMALLOC_N(arena_migrate)
|
||||
#define arena_miscelm_get_const JEMALLOC_N(arena_miscelm_get_const)
|
||||
#define arena_miscelm_get_mutable JEMALLOC_N(arena_miscelm_get_mutable)
|
||||
#define arena_miscelm_to_pageind JEMALLOC_N(arena_miscelm_to_pageind)
|
||||
#define arena_miscelm_to_rpages JEMALLOC_N(arena_miscelm_to_rpages)
|
||||
#define arena_new JEMALLOC_N(arena_new)
|
||||
#define arena_node_alloc JEMALLOC_N(arena_node_alloc)
|
||||
#define arena_node_dalloc JEMALLOC_N(arena_node_dalloc)
|
||||
#define arena_nthreads_dec JEMALLOC_N(arena_nthreads_dec)
|
||||
#define arena_nthreads_get JEMALLOC_N(arena_nthreads_get)
|
||||
#define arena_nthreads_inc JEMALLOC_N(arena_nthreads_inc)
|
||||
#define arena_palloc JEMALLOC_N(arena_palloc)
|
||||
#define arena_postfork_child JEMALLOC_N(arena_postfork_child)
|
||||
#define arena_postfork_parent JEMALLOC_N(arena_postfork_parent)
|
||||
#define arena_prefork JEMALLOC_N(arena_prefork)
|
||||
#define arena_prefork0 JEMALLOC_N(arena_prefork0)
|
||||
#define arena_prefork1 JEMALLOC_N(arena_prefork1)
|
||||
#define arena_prefork2 JEMALLOC_N(arena_prefork2)
|
||||
#define arena_prefork3 JEMALLOC_N(arena_prefork3)
|
||||
#define arena_prof_accum JEMALLOC_N(arena_prof_accum)
|
||||
#define arena_prof_accum_impl JEMALLOC_N(arena_prof_accum_impl)
|
||||
#define arena_prof_accum_locked JEMALLOC_N(arena_prof_accum_locked)
|
||||
#define arena_prof_ctx_get JEMALLOC_N(arena_prof_ctx_get)
|
||||
#define arena_prof_ctx_set JEMALLOC_N(arena_prof_ctx_set)
|
||||
#define arena_prof_promoted JEMALLOC_N(arena_prof_promoted)
|
||||
#define arena_prof_tctx_get JEMALLOC_N(arena_prof_tctx_get)
|
||||
#define arena_prof_tctx_reset JEMALLOC_N(arena_prof_tctx_reset)
|
||||
#define arena_prof_tctx_set JEMALLOC_N(arena_prof_tctx_set)
|
||||
#define arena_ptr_small_binind_get JEMALLOC_N(arena_ptr_small_binind_get)
|
||||
#define arena_purge_all JEMALLOC_N(arena_purge_all)
|
||||
#define arena_purge JEMALLOC_N(arena_purge)
|
||||
#define arena_quarantine_junk_small JEMALLOC_N(arena_quarantine_junk_small)
|
||||
#define arena_ralloc JEMALLOC_N(arena_ralloc)
|
||||
#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
|
||||
#define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move)
|
||||
#define arena_rd_to_miscelm JEMALLOC_N(arena_rd_to_miscelm)
|
||||
#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
|
||||
#define arena_reset JEMALLOC_N(arena_reset)
|
||||
#define arena_run_regind JEMALLOC_N(arena_run_regind)
|
||||
#define arena_run_to_miscelm JEMALLOC_N(arena_run_to_miscelm)
|
||||
#define arena_salloc JEMALLOC_N(arena_salloc)
|
||||
#define arena_sdalloc JEMALLOC_N(arena_sdalloc)
|
||||
#define arena_stats_merge JEMALLOC_N(arena_stats_merge)
|
||||
#define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small)
|
||||
#define arena_tdata_get JEMALLOC_N(arena_tdata_get)
|
||||
#define arena_tdata_get_hard JEMALLOC_N(arena_tdata_get_hard)
|
||||
#define arenas JEMALLOC_N(arenas)
|
||||
#define arenas_booted JEMALLOC_N(arenas_booted)
|
||||
#define arenas_cleanup JEMALLOC_N(arenas_cleanup)
|
||||
#define arenas_extend JEMALLOC_N(arenas_extend)
|
||||
#define arenas_initialized JEMALLOC_N(arenas_initialized)
|
||||
#define arenas_lock JEMALLOC_N(arenas_lock)
|
||||
#define arenas_tls JEMALLOC_N(arenas_tls)
|
||||
#define arenas_tsd JEMALLOC_N(arenas_tsd)
|
||||
#define arenas_tsd_boot JEMALLOC_N(arenas_tsd_boot)
|
||||
#define arenas_tsd_cleanup_wrapper JEMALLOC_N(arenas_tsd_cleanup_wrapper)
|
||||
#define arenas_tsd_get JEMALLOC_N(arenas_tsd_get)
|
||||
#define arenas_tsd_get_wrapper JEMALLOC_N(arenas_tsd_get_wrapper)
|
||||
#define arenas_tsd_init_head JEMALLOC_N(arenas_tsd_init_head)
|
||||
#define arenas_tsd_set JEMALLOC_N(arenas_tsd_set)
|
||||
#define arenas_tdata_bypass_cleanup JEMALLOC_N(arenas_tdata_bypass_cleanup)
|
||||
#define arenas_tdata_cleanup JEMALLOC_N(arenas_tdata_cleanup)
|
||||
#define atomic_add_p JEMALLOC_N(atomic_add_p)
|
||||
#define atomic_add_u JEMALLOC_N(atomic_add_u)
|
||||
#define atomic_add_uint32 JEMALLOC_N(atomic_add_uint32)
|
||||
#define atomic_add_uint64 JEMALLOC_N(atomic_add_uint64)
|
||||
#define atomic_add_z JEMALLOC_N(atomic_add_z)
|
||||
#define atomic_cas_p JEMALLOC_N(atomic_cas_p)
|
||||
#define atomic_cas_u JEMALLOC_N(atomic_cas_u)
|
||||
#define atomic_cas_uint32 JEMALLOC_N(atomic_cas_uint32)
|
||||
#define atomic_cas_uint64 JEMALLOC_N(atomic_cas_uint64)
|
||||
#define atomic_cas_z JEMALLOC_N(atomic_cas_z)
|
||||
#define atomic_sub_p JEMALLOC_N(atomic_sub_p)
|
||||
#define atomic_sub_u JEMALLOC_N(atomic_sub_u)
|
||||
#define atomic_sub_uint32 JEMALLOC_N(atomic_sub_uint32)
|
||||
#define atomic_sub_uint64 JEMALLOC_N(atomic_sub_uint64)
|
||||
#define atomic_sub_z JEMALLOC_N(atomic_sub_z)
|
||||
#define atomic_write_p JEMALLOC_N(atomic_write_p)
|
||||
#define atomic_write_u JEMALLOC_N(atomic_write_u)
|
||||
#define atomic_write_uint32 JEMALLOC_N(atomic_write_uint32)
|
||||
#define atomic_write_uint64 JEMALLOC_N(atomic_write_uint64)
|
||||
#define atomic_write_z JEMALLOC_N(atomic_write_z)
|
||||
#define base_alloc JEMALLOC_N(base_alloc)
|
||||
#define base_boot JEMALLOC_N(base_boot)
|
||||
#define base_calloc JEMALLOC_N(base_calloc)
|
||||
#define base_node_alloc JEMALLOC_N(base_node_alloc)
|
||||
#define base_node_dealloc JEMALLOC_N(base_node_dealloc)
|
||||
#define base_postfork_child JEMALLOC_N(base_postfork_child)
|
||||
#define base_postfork_parent JEMALLOC_N(base_postfork_parent)
|
||||
#define base_prefork JEMALLOC_N(base_prefork)
|
||||
#define base_stats_get JEMALLOC_N(base_stats_get)
|
||||
#define bitmap_full JEMALLOC_N(bitmap_full)
|
||||
#define bitmap_get JEMALLOC_N(bitmap_get)
|
||||
#define bitmap_info_init JEMALLOC_N(bitmap_info_init)
|
||||
#define bitmap_info_ngroups JEMALLOC_N(bitmap_info_ngroups)
|
||||
#define bitmap_init JEMALLOC_N(bitmap_init)
|
||||
#define bitmap_set JEMALLOC_N(bitmap_set)
|
||||
#define bitmap_sfu JEMALLOC_N(bitmap_sfu)
|
||||
#define bitmap_size JEMALLOC_N(bitmap_size)
|
||||
#define bitmap_unset JEMALLOC_N(bitmap_unset)
|
||||
#define bootstrap_calloc JEMALLOC_N(bootstrap_calloc)
|
||||
#define bootstrap_free JEMALLOC_N(bootstrap_free)
|
||||
#define bootstrap_malloc JEMALLOC_N(bootstrap_malloc)
|
||||
#define bt_init JEMALLOC_N(bt_init)
|
||||
#define buferror JEMALLOC_N(buferror)
|
||||
#define choose_arena JEMALLOC_N(choose_arena)
|
||||
#define choose_arena_hard JEMALLOC_N(choose_arena_hard)
|
||||
#define chunk_alloc JEMALLOC_N(chunk_alloc)
|
||||
#define chunk_alloc_base JEMALLOC_N(chunk_alloc_base)
|
||||
#define chunk_alloc_cache JEMALLOC_N(chunk_alloc_cache)
|
||||
#define chunk_alloc_dss JEMALLOC_N(chunk_alloc_dss)
|
||||
#define chunk_alloc_mmap JEMALLOC_N(chunk_alloc_mmap)
|
||||
#define chunk_alloc_wrapper JEMALLOC_N(chunk_alloc_wrapper)
|
||||
#define chunk_boot JEMALLOC_N(chunk_boot)
|
||||
#define chunk_dealloc JEMALLOC_N(chunk_dealloc)
|
||||
#define chunk_dealloc_mmap JEMALLOC_N(chunk_dealloc_mmap)
|
||||
#define chunk_dalloc_cache JEMALLOC_N(chunk_dalloc_cache)
|
||||
#define chunk_dalloc_mmap JEMALLOC_N(chunk_dalloc_mmap)
|
||||
#define chunk_dalloc_wrapper JEMALLOC_N(chunk_dalloc_wrapper)
|
||||
#define chunk_deregister JEMALLOC_N(chunk_deregister)
|
||||
#define chunk_dss_boot JEMALLOC_N(chunk_dss_boot)
|
||||
#define chunk_dss_postfork_child JEMALLOC_N(chunk_dss_postfork_child)
|
||||
#define chunk_dss_postfork_parent JEMALLOC_N(chunk_dss_postfork_parent)
|
||||
#define chunk_dss_mergeable JEMALLOC_N(chunk_dss_mergeable)
|
||||
#define chunk_dss_prec_get JEMALLOC_N(chunk_dss_prec_get)
|
||||
#define chunk_dss_prec_set JEMALLOC_N(chunk_dss_prec_set)
|
||||
#define chunk_dss_prefork JEMALLOC_N(chunk_dss_prefork)
|
||||
#define chunk_hooks_default JEMALLOC_N(chunk_hooks_default)
|
||||
#define chunk_hooks_get JEMALLOC_N(chunk_hooks_get)
|
||||
#define chunk_hooks_set JEMALLOC_N(chunk_hooks_set)
|
||||
#define chunk_in_dss JEMALLOC_N(chunk_in_dss)
|
||||
#define chunk_lookup JEMALLOC_N(chunk_lookup)
|
||||
#define chunk_npages JEMALLOC_N(chunk_npages)
|
||||
#define chunk_postfork_child JEMALLOC_N(chunk_postfork_child)
|
||||
#define chunk_postfork_parent JEMALLOC_N(chunk_postfork_parent)
|
||||
#define chunk_prefork JEMALLOC_N(chunk_prefork)
|
||||
#define chunk_unmap JEMALLOC_N(chunk_unmap)
|
||||
#define chunks_mtx JEMALLOC_N(chunks_mtx)
|
||||
#define chunk_purge_wrapper JEMALLOC_N(chunk_purge_wrapper)
|
||||
#define chunk_register JEMALLOC_N(chunk_register)
|
||||
#define chunks_rtree JEMALLOC_N(chunks_rtree)
|
||||
#define chunksize JEMALLOC_N(chunksize)
|
||||
#define chunksize_mask JEMALLOC_N(chunksize_mask)
|
||||
#define ckh_bucket_search JEMALLOC_N(ckh_bucket_search)
|
||||
#define ckh_count JEMALLOC_N(ckh_count)
|
||||
#define ckh_delete JEMALLOC_N(ckh_delete)
|
||||
#define ckh_evict_reloc_insert JEMALLOC_N(ckh_evict_reloc_insert)
|
||||
#define ckh_insert JEMALLOC_N(ckh_insert)
|
||||
#define ckh_isearch JEMALLOC_N(ckh_isearch)
|
||||
#define ckh_iter JEMALLOC_N(ckh_iter)
|
||||
#define ckh_new JEMALLOC_N(ckh_new)
|
||||
#define ckh_pointer_hash JEMALLOC_N(ckh_pointer_hash)
|
||||
#define ckh_pointer_keycomp JEMALLOC_N(ckh_pointer_keycomp)
|
||||
#define ckh_rebuild JEMALLOC_N(ckh_rebuild)
|
||||
#define ckh_remove JEMALLOC_N(ckh_remove)
|
||||
#define ckh_search JEMALLOC_N(ckh_search)
|
||||
#define ckh_string_hash JEMALLOC_N(ckh_string_hash)
|
||||
#define ckh_string_keycomp JEMALLOC_N(ckh_string_keycomp)
|
||||
#define ckh_try_bucket_insert JEMALLOC_N(ckh_try_bucket_insert)
|
||||
#define ckh_try_insert JEMALLOC_N(ckh_try_insert)
|
||||
#define ctl_boot JEMALLOC_N(ctl_boot)
|
||||
#define ctl_bymib JEMALLOC_N(ctl_bymib)
|
||||
#define ctl_byname JEMALLOC_N(ctl_byname)
|
||||
@@ -149,7 +201,33 @@
|
||||
#define ctl_postfork_child JEMALLOC_N(ctl_postfork_child)
|
||||
#define ctl_postfork_parent JEMALLOC_N(ctl_postfork_parent)
|
||||
#define ctl_prefork JEMALLOC_N(ctl_prefork)
|
||||
#define decay_ticker_get JEMALLOC_N(decay_ticker_get)
|
||||
#define dss_prec_names JEMALLOC_N(dss_prec_names)
|
||||
#define extent_node_achunk_get JEMALLOC_N(extent_node_achunk_get)
|
||||
#define extent_node_achunk_set JEMALLOC_N(extent_node_achunk_set)
|
||||
#define extent_node_addr_get JEMALLOC_N(extent_node_addr_get)
|
||||
#define extent_node_addr_set JEMALLOC_N(extent_node_addr_set)
|
||||
#define extent_node_arena_get JEMALLOC_N(extent_node_arena_get)
|
||||
#define extent_node_arena_set JEMALLOC_N(extent_node_arena_set)
|
||||
#define extent_node_committed_get JEMALLOC_N(extent_node_committed_get)
|
||||
#define extent_node_committed_set JEMALLOC_N(extent_node_committed_set)
|
||||
#define extent_node_dirty_insert JEMALLOC_N(extent_node_dirty_insert)
|
||||
#define extent_node_dirty_linkage_init JEMALLOC_N(extent_node_dirty_linkage_init)
|
||||
#define extent_node_dirty_remove JEMALLOC_N(extent_node_dirty_remove)
|
||||
#define extent_node_init JEMALLOC_N(extent_node_init)
|
||||
#define extent_node_prof_tctx_get JEMALLOC_N(extent_node_prof_tctx_get)
|
||||
#define extent_node_prof_tctx_set JEMALLOC_N(extent_node_prof_tctx_set)
|
||||
#define extent_node_size_get JEMALLOC_N(extent_node_size_get)
|
||||
#define extent_node_size_set JEMALLOC_N(extent_node_size_set)
|
||||
#define extent_node_sn_get JEMALLOC_N(extent_node_sn_get)
|
||||
#define extent_node_sn_set JEMALLOC_N(extent_node_sn_set)
|
||||
#define extent_node_zeroed_get JEMALLOC_N(extent_node_zeroed_get)
|
||||
#define extent_node_zeroed_set JEMALLOC_N(extent_node_zeroed_set)
|
||||
#define extent_size_quantize_ceil JEMALLOC_N(extent_size_quantize_ceil)
|
||||
#define extent_size_quantize_floor JEMALLOC_N(extent_size_quantize_floor)
|
||||
#define extent_tree_ad_destroy JEMALLOC_N(extent_tree_ad_destroy)
|
||||
#define extent_tree_ad_destroy_recurse JEMALLOC_N(extent_tree_ad_destroy_recurse)
|
||||
#define extent_tree_ad_empty JEMALLOC_N(extent_tree_ad_empty)
|
||||
#define extent_tree_ad_first JEMALLOC_N(extent_tree_ad_first)
|
||||
#define extent_tree_ad_insert JEMALLOC_N(extent_tree_ad_insert)
|
||||
#define extent_tree_ad_iter JEMALLOC_N(extent_tree_ad_iter)
|
||||
@@ -166,22 +244,31 @@
|
||||
#define extent_tree_ad_reverse_iter_recurse JEMALLOC_N(extent_tree_ad_reverse_iter_recurse)
|
||||
#define extent_tree_ad_reverse_iter_start JEMALLOC_N(extent_tree_ad_reverse_iter_start)
|
||||
#define extent_tree_ad_search JEMALLOC_N(extent_tree_ad_search)
|
||||
#define extent_tree_szad_first JEMALLOC_N(extent_tree_szad_first)
|
||||
#define extent_tree_szad_insert JEMALLOC_N(extent_tree_szad_insert)
|
||||
#define extent_tree_szad_iter JEMALLOC_N(extent_tree_szad_iter)
|
||||
#define extent_tree_szad_iter_recurse JEMALLOC_N(extent_tree_szad_iter_recurse)
|
||||
#define extent_tree_szad_iter_start JEMALLOC_N(extent_tree_szad_iter_start)
|
||||
#define extent_tree_szad_last JEMALLOC_N(extent_tree_szad_last)
|
||||
#define extent_tree_szad_new JEMALLOC_N(extent_tree_szad_new)
|
||||
#define extent_tree_szad_next JEMALLOC_N(extent_tree_szad_next)
|
||||
#define extent_tree_szad_nsearch JEMALLOC_N(extent_tree_szad_nsearch)
|
||||
#define extent_tree_szad_prev JEMALLOC_N(extent_tree_szad_prev)
|
||||
#define extent_tree_szad_psearch JEMALLOC_N(extent_tree_szad_psearch)
|
||||
#define extent_tree_szad_remove JEMALLOC_N(extent_tree_szad_remove)
|
||||
#define extent_tree_szad_reverse_iter JEMALLOC_N(extent_tree_szad_reverse_iter)
|
||||
#define extent_tree_szad_reverse_iter_recurse JEMALLOC_N(extent_tree_szad_reverse_iter_recurse)
|
||||
#define extent_tree_szad_reverse_iter_start JEMALLOC_N(extent_tree_szad_reverse_iter_start)
|
||||
#define extent_tree_szad_search JEMALLOC_N(extent_tree_szad_search)
|
||||
#define extent_tree_szsnad_destroy JEMALLOC_N(extent_tree_szsnad_destroy)
|
||||
#define extent_tree_szsnad_destroy_recurse JEMALLOC_N(extent_tree_szsnad_destroy_recurse)
|
||||
#define extent_tree_szsnad_empty JEMALLOC_N(extent_tree_szsnad_empty)
|
||||
#define extent_tree_szsnad_first JEMALLOC_N(extent_tree_szsnad_first)
|
||||
#define extent_tree_szsnad_insert JEMALLOC_N(extent_tree_szsnad_insert)
|
||||
#define extent_tree_szsnad_iter JEMALLOC_N(extent_tree_szsnad_iter)
|
||||
#define extent_tree_szsnad_iter_recurse JEMALLOC_N(extent_tree_szsnad_iter_recurse)
|
||||
#define extent_tree_szsnad_iter_start JEMALLOC_N(extent_tree_szsnad_iter_start)
|
||||
#define extent_tree_szsnad_last JEMALLOC_N(extent_tree_szsnad_last)
|
||||
#define extent_tree_szsnad_new JEMALLOC_N(extent_tree_szsnad_new)
|
||||
#define extent_tree_szsnad_next JEMALLOC_N(extent_tree_szsnad_next)
|
||||
#define extent_tree_szsnad_nsearch JEMALLOC_N(extent_tree_szsnad_nsearch)
|
||||
#define extent_tree_szsnad_prev JEMALLOC_N(extent_tree_szsnad_prev)
|
||||
#define extent_tree_szsnad_psearch JEMALLOC_N(extent_tree_szsnad_psearch)
|
||||
#define extent_tree_szsnad_remove JEMALLOC_N(extent_tree_szsnad_remove)
|
||||
#define extent_tree_szsnad_reverse_iter JEMALLOC_N(extent_tree_szsnad_reverse_iter)
|
||||
#define extent_tree_szsnad_reverse_iter_recurse JEMALLOC_N(extent_tree_szsnad_reverse_iter_recurse)
|
||||
#define extent_tree_szsnad_reverse_iter_start JEMALLOC_N(extent_tree_szsnad_reverse_iter_start)
|
||||
#define extent_tree_szsnad_search JEMALLOC_N(extent_tree_szsnad_search)
|
||||
#define ffs_llu JEMALLOC_N(ffs_llu)
|
||||
#define ffs_lu JEMALLOC_N(ffs_lu)
|
||||
#define ffs_u JEMALLOC_N(ffs_u)
|
||||
#define ffs_u32 JEMALLOC_N(ffs_u32)
|
||||
#define ffs_u64 JEMALLOC_N(ffs_u64)
|
||||
#define ffs_zu JEMALLOC_N(ffs_zu)
|
||||
#define get_errno JEMALLOC_N(get_errno)
|
||||
#define hash JEMALLOC_N(hash)
|
||||
#define hash_fmix_32 JEMALLOC_N(hash_fmix_32)
|
||||
@@ -193,46 +280,51 @@
|
||||
#define hash_x64_128 JEMALLOC_N(hash_x64_128)
|
||||
#define hash_x86_128 JEMALLOC_N(hash_x86_128)
|
||||
#define hash_x86_32 JEMALLOC_N(hash_x86_32)
|
||||
#define huge_allocated JEMALLOC_N(huge_allocated)
|
||||
#define huge_boot JEMALLOC_N(huge_boot)
|
||||
#define huge_aalloc JEMALLOC_N(huge_aalloc)
|
||||
#define huge_dalloc JEMALLOC_N(huge_dalloc)
|
||||
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
|
||||
#define huge_dss_prec_get JEMALLOC_N(huge_dss_prec_get)
|
||||
#define huge_malloc JEMALLOC_N(huge_malloc)
|
||||
#define huge_mtx JEMALLOC_N(huge_mtx)
|
||||
#define huge_ndalloc JEMALLOC_N(huge_ndalloc)
|
||||
#define huge_nmalloc JEMALLOC_N(huge_nmalloc)
|
||||
#define huge_palloc JEMALLOC_N(huge_palloc)
|
||||
#define huge_postfork_child JEMALLOC_N(huge_postfork_child)
|
||||
#define huge_postfork_parent JEMALLOC_N(huge_postfork_parent)
|
||||
#define huge_prefork JEMALLOC_N(huge_prefork)
|
||||
#define huge_prof_ctx_get JEMALLOC_N(huge_prof_ctx_get)
|
||||
#define huge_prof_ctx_set JEMALLOC_N(huge_prof_ctx_set)
|
||||
#define huge_prof_tctx_get JEMALLOC_N(huge_prof_tctx_get)
|
||||
#define huge_prof_tctx_reset JEMALLOC_N(huge_prof_tctx_reset)
|
||||
#define huge_prof_tctx_set JEMALLOC_N(huge_prof_tctx_set)
|
||||
#define huge_ralloc JEMALLOC_N(huge_ralloc)
|
||||
#define huge_ralloc_no_move JEMALLOC_N(huge_ralloc_no_move)
|
||||
#define huge_salloc JEMALLOC_N(huge_salloc)
|
||||
#define iallocm JEMALLOC_N(iallocm)
|
||||
#define icalloc JEMALLOC_N(icalloc)
|
||||
#define icalloct JEMALLOC_N(icalloct)
|
||||
#define iaalloc JEMALLOC_N(iaalloc)
|
||||
#define ialloc JEMALLOC_N(ialloc)
|
||||
#define iallocztm JEMALLOC_N(iallocztm)
|
||||
#define iarena_cleanup JEMALLOC_N(iarena_cleanup)
|
||||
#define idalloc JEMALLOC_N(idalloc)
|
||||
#define idalloct JEMALLOC_N(idalloct)
|
||||
#define imalloc JEMALLOC_N(imalloc)
|
||||
#define imalloct JEMALLOC_N(imalloct)
|
||||
#define idalloctm JEMALLOC_N(idalloctm)
|
||||
#define in_valgrind JEMALLOC_N(in_valgrind)
|
||||
#define index2size JEMALLOC_N(index2size)
|
||||
#define index2size_compute JEMALLOC_N(index2size_compute)
|
||||
#define index2size_lookup JEMALLOC_N(index2size_lookup)
|
||||
#define index2size_tab JEMALLOC_N(index2size_tab)
|
||||
#define ipalloc JEMALLOC_N(ipalloc)
|
||||
#define ipalloct JEMALLOC_N(ipalloct)
|
||||
#define ipallocztm JEMALLOC_N(ipallocztm)
|
||||
#define iqalloc JEMALLOC_N(iqalloc)
|
||||
#define iqalloct JEMALLOC_N(iqalloct)
|
||||
#define iralloc JEMALLOC_N(iralloc)
|
||||
#define iralloct JEMALLOC_N(iralloct)
|
||||
#define iralloct_realign JEMALLOC_N(iralloct_realign)
|
||||
#define isalloc JEMALLOC_N(isalloc)
|
||||
#define isdalloct JEMALLOC_N(isdalloct)
|
||||
#define isqalloc JEMALLOC_N(isqalloc)
|
||||
#define isthreaded JEMALLOC_N(isthreaded)
|
||||
#define ivsalloc JEMALLOC_N(ivsalloc)
|
||||
#define ixalloc JEMALLOC_N(ixalloc)
|
||||
#define jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child)
|
||||
#define jemalloc_postfork_parent JEMALLOC_N(jemalloc_postfork_parent)
|
||||
#define jemalloc_prefork JEMALLOC_N(jemalloc_prefork)
|
||||
#define large_maxclass JEMALLOC_N(large_maxclass)
|
||||
#define lg_floor JEMALLOC_N(lg_floor)
|
||||
#define lg_prof_sample JEMALLOC_N(lg_prof_sample)
|
||||
#define malloc_cprintf JEMALLOC_N(malloc_cprintf)
|
||||
#define malloc_mutex_assert_not_owner JEMALLOC_N(malloc_mutex_assert_not_owner)
|
||||
#define malloc_mutex_assert_owner JEMALLOC_N(malloc_mutex_assert_owner)
|
||||
#define malloc_mutex_boot JEMALLOC_N(malloc_mutex_boot)
|
||||
#define malloc_mutex_init JEMALLOC_N(malloc_mutex_init)
|
||||
#define malloc_mutex_lock JEMALLOC_N(malloc_mutex_lock)
|
||||
#define malloc_mutex_postfork_child JEMALLOC_N(malloc_mutex_postfork_child)
|
||||
@@ -242,7 +334,8 @@
|
||||
#define malloc_printf JEMALLOC_N(malloc_printf)
|
||||
#define malloc_snprintf JEMALLOC_N(malloc_snprintf)
|
||||
#define malloc_strtoumax JEMALLOC_N(malloc_strtoumax)
|
||||
#define malloc_tsd_boot JEMALLOC_N(malloc_tsd_boot)
|
||||
#define malloc_tsd_boot0 JEMALLOC_N(malloc_tsd_boot0)
|
||||
#define malloc_tsd_boot1 JEMALLOC_N(malloc_tsd_boot1)
|
||||
#define malloc_tsd_cleanup_register JEMALLOC_N(malloc_tsd_cleanup_register)
|
||||
#define malloc_tsd_dalloc JEMALLOC_N(malloc_tsd_dalloc)
|
||||
#define malloc_tsd_malloc JEMALLOC_N(malloc_tsd_malloc)
|
||||
@@ -251,16 +344,35 @@
|
||||
#define malloc_vsnprintf JEMALLOC_N(malloc_vsnprintf)
|
||||
#define malloc_write JEMALLOC_N(malloc_write)
|
||||
#define map_bias JEMALLOC_N(map_bias)
|
||||
#define map_misc_offset JEMALLOC_N(map_misc_offset)
|
||||
#define mb_write JEMALLOC_N(mb_write)
|
||||
#define mutex_boot JEMALLOC_N(mutex_boot)
|
||||
#define narenas_auto JEMALLOC_N(narenas_auto)
|
||||
#define narenas_total JEMALLOC_N(narenas_total)
|
||||
#define narenas_tdata_cleanup JEMALLOC_N(narenas_tdata_cleanup)
|
||||
#define narenas_total_get JEMALLOC_N(narenas_total_get)
|
||||
#define ncpus JEMALLOC_N(ncpus)
|
||||
#define nhbins JEMALLOC_N(nhbins)
|
||||
#define nhclasses JEMALLOC_N(nhclasses)
|
||||
#define nlclasses JEMALLOC_N(nlclasses)
|
||||
#define nstime_add JEMALLOC_N(nstime_add)
|
||||
#define nstime_compare JEMALLOC_N(nstime_compare)
|
||||
#define nstime_copy JEMALLOC_N(nstime_copy)
|
||||
#define nstime_divide JEMALLOC_N(nstime_divide)
|
||||
#define nstime_idivide JEMALLOC_N(nstime_idivide)
|
||||
#define nstime_imultiply JEMALLOC_N(nstime_imultiply)
|
||||
#define nstime_init JEMALLOC_N(nstime_init)
|
||||
#define nstime_init2 JEMALLOC_N(nstime_init2)
|
||||
#define nstime_monotonic JEMALLOC_N(nstime_monotonic)
|
||||
#define nstime_ns JEMALLOC_N(nstime_ns)
|
||||
#define nstime_nsec JEMALLOC_N(nstime_nsec)
|
||||
#define nstime_sec JEMALLOC_N(nstime_sec)
|
||||
#define nstime_subtract JEMALLOC_N(nstime_subtract)
|
||||
#define nstime_update JEMALLOC_N(nstime_update)
|
||||
#define opt_abort JEMALLOC_N(opt_abort)
|
||||
#define opt_decay_time JEMALLOC_N(opt_decay_time)
|
||||
#define opt_dss JEMALLOC_N(opt_dss)
|
||||
#define opt_junk JEMALLOC_N(opt_junk)
|
||||
#define opt_junk_alloc JEMALLOC_N(opt_junk_alloc)
|
||||
#define opt_junk_free JEMALLOC_N(opt_junk_free)
|
||||
#define opt_lg_chunk JEMALLOC_N(opt_lg_chunk)
|
||||
#define opt_lg_dirty_mult JEMALLOC_N(opt_lg_dirty_mult)
|
||||
#define opt_lg_prof_interval JEMALLOC_N(opt_lg_prof_interval)
|
||||
@@ -274,140 +386,254 @@
|
||||
#define opt_prof_gdump JEMALLOC_N(opt_prof_gdump)
|
||||
#define opt_prof_leak JEMALLOC_N(opt_prof_leak)
|
||||
#define opt_prof_prefix JEMALLOC_N(opt_prof_prefix)
|
||||
#define opt_prof_thread_active_init JEMALLOC_N(opt_prof_thread_active_init)
|
||||
#define opt_purge JEMALLOC_N(opt_purge)
|
||||
#define opt_quarantine JEMALLOC_N(opt_quarantine)
|
||||
#define opt_redzone JEMALLOC_N(opt_redzone)
|
||||
#define opt_stats_print JEMALLOC_N(opt_stats_print)
|
||||
#define opt_tcache JEMALLOC_N(opt_tcache)
|
||||
#define opt_thp JEMALLOC_N(opt_thp)
|
||||
#define opt_utrace JEMALLOC_N(opt_utrace)
|
||||
#define opt_valgrind JEMALLOC_N(opt_valgrind)
|
||||
#define opt_xmalloc JEMALLOC_N(opt_xmalloc)
|
||||
#define opt_zero JEMALLOC_N(opt_zero)
|
||||
#define p2rz JEMALLOC_N(p2rz)
|
||||
#define pages_boot JEMALLOC_N(pages_boot)
|
||||
#define pages_commit JEMALLOC_N(pages_commit)
|
||||
#define pages_decommit JEMALLOC_N(pages_decommit)
|
||||
#define pages_huge JEMALLOC_N(pages_huge)
|
||||
#define pages_map JEMALLOC_N(pages_map)
|
||||
#define pages_nohuge JEMALLOC_N(pages_nohuge)
|
||||
#define pages_purge JEMALLOC_N(pages_purge)
|
||||
#define pow2_ceil JEMALLOC_N(pow2_ceil)
|
||||
#define pages_trim JEMALLOC_N(pages_trim)
|
||||
#define pages_unmap JEMALLOC_N(pages_unmap)
|
||||
#define pind2sz JEMALLOC_N(pind2sz)
|
||||
#define pind2sz_compute JEMALLOC_N(pind2sz_compute)
|
||||
#define pind2sz_lookup JEMALLOC_N(pind2sz_lookup)
|
||||
#define pind2sz_tab JEMALLOC_N(pind2sz_tab)
|
||||
#define pow2_ceil_u32 JEMALLOC_N(pow2_ceil_u32)
|
||||
#define pow2_ceil_u64 JEMALLOC_N(pow2_ceil_u64)
|
||||
#define pow2_ceil_zu JEMALLOC_N(pow2_ceil_zu)
|
||||
#define prng_lg_range_u32 JEMALLOC_N(prng_lg_range_u32)
|
||||
#define prng_lg_range_u64 JEMALLOC_N(prng_lg_range_u64)
|
||||
#define prng_lg_range_zu JEMALLOC_N(prng_lg_range_zu)
|
||||
#define prng_range_u32 JEMALLOC_N(prng_range_u32)
|
||||
#define prng_range_u64 JEMALLOC_N(prng_range_u64)
|
||||
#define prng_range_zu JEMALLOC_N(prng_range_zu)
|
||||
#define prng_state_next_u32 JEMALLOC_N(prng_state_next_u32)
|
||||
#define prng_state_next_u64 JEMALLOC_N(prng_state_next_u64)
|
||||
#define prng_state_next_zu JEMALLOC_N(prng_state_next_zu)
|
||||
#define prof_active JEMALLOC_N(prof_active)
|
||||
#define prof_active_get JEMALLOC_N(prof_active_get)
|
||||
#define prof_active_get_unlocked JEMALLOC_N(prof_active_get_unlocked)
|
||||
#define prof_active_set JEMALLOC_N(prof_active_set)
|
||||
#define prof_alloc_prep JEMALLOC_N(prof_alloc_prep)
|
||||
#define prof_alloc_rollback JEMALLOC_N(prof_alloc_rollback)
|
||||
#define prof_backtrace JEMALLOC_N(prof_backtrace)
|
||||
#define prof_boot0 JEMALLOC_N(prof_boot0)
|
||||
#define prof_boot1 JEMALLOC_N(prof_boot1)
|
||||
#define prof_boot2 JEMALLOC_N(prof_boot2)
|
||||
#define prof_bt_count JEMALLOC_N(prof_bt_count)
|
||||
#define prof_ctx_get JEMALLOC_N(prof_ctx_get)
|
||||
#define prof_ctx_set JEMALLOC_N(prof_ctx_set)
|
||||
#define prof_dump_header JEMALLOC_N(prof_dump_header)
|
||||
#define prof_dump_open JEMALLOC_N(prof_dump_open)
|
||||
#define prof_free JEMALLOC_N(prof_free)
|
||||
#define prof_free_sampled_object JEMALLOC_N(prof_free_sampled_object)
|
||||
#define prof_gdump JEMALLOC_N(prof_gdump)
|
||||
#define prof_gdump_get JEMALLOC_N(prof_gdump_get)
|
||||
#define prof_gdump_get_unlocked JEMALLOC_N(prof_gdump_get_unlocked)
|
||||
#define prof_gdump_set JEMALLOC_N(prof_gdump_set)
|
||||
#define prof_gdump_val JEMALLOC_N(prof_gdump_val)
|
||||
#define prof_idump JEMALLOC_N(prof_idump)
|
||||
#define prof_interval JEMALLOC_N(prof_interval)
|
||||
#define prof_lookup JEMALLOC_N(prof_lookup)
|
||||
#define prof_malloc JEMALLOC_N(prof_malloc)
|
||||
#define prof_malloc_sample_object JEMALLOC_N(prof_malloc_sample_object)
|
||||
#define prof_mdump JEMALLOC_N(prof_mdump)
|
||||
#define prof_postfork_child JEMALLOC_N(prof_postfork_child)
|
||||
#define prof_postfork_parent JEMALLOC_N(prof_postfork_parent)
|
||||
#define prof_prefork JEMALLOC_N(prof_prefork)
|
||||
#define prof_promote JEMALLOC_N(prof_promote)
|
||||
#define prof_prefork0 JEMALLOC_N(prof_prefork0)
|
||||
#define prof_prefork1 JEMALLOC_N(prof_prefork1)
|
||||
#define prof_realloc JEMALLOC_N(prof_realloc)
|
||||
#define prof_reset JEMALLOC_N(prof_reset)
|
||||
#define prof_sample_accum_update JEMALLOC_N(prof_sample_accum_update)
|
||||
#define prof_sample_threshold_update JEMALLOC_N(prof_sample_threshold_update)
|
||||
#define prof_tdata_booted JEMALLOC_N(prof_tdata_booted)
|
||||
#define prof_tctx_get JEMALLOC_N(prof_tctx_get)
|
||||
#define prof_tctx_reset JEMALLOC_N(prof_tctx_reset)
|
||||
#define prof_tctx_set JEMALLOC_N(prof_tctx_set)
|
||||
#define prof_tdata_cleanup JEMALLOC_N(prof_tdata_cleanup)
|
||||
#define prof_tdata_count JEMALLOC_N(prof_tdata_count)
|
||||
#define prof_tdata_get JEMALLOC_N(prof_tdata_get)
|
||||
#define prof_tdata_init JEMALLOC_N(prof_tdata_init)
|
||||
#define prof_tdata_initialized JEMALLOC_N(prof_tdata_initialized)
|
||||
#define prof_tdata_tls JEMALLOC_N(prof_tdata_tls)
|
||||
#define prof_tdata_tsd JEMALLOC_N(prof_tdata_tsd)
|
||||
#define prof_tdata_tsd_boot JEMALLOC_N(prof_tdata_tsd_boot)
|
||||
#define prof_tdata_tsd_cleanup_wrapper JEMALLOC_N(prof_tdata_tsd_cleanup_wrapper)
|
||||
#define prof_tdata_tsd_get JEMALLOC_N(prof_tdata_tsd_get)
|
||||
#define prof_tdata_tsd_get_wrapper JEMALLOC_N(prof_tdata_tsd_get_wrapper)
|
||||
#define prof_tdata_tsd_init_head JEMALLOC_N(prof_tdata_tsd_init_head)
|
||||
#define prof_tdata_tsd_set JEMALLOC_N(prof_tdata_tsd_set)
|
||||
#define prof_tdata_reinit JEMALLOC_N(prof_tdata_reinit)
|
||||
#define prof_thread_active_get JEMALLOC_N(prof_thread_active_get)
|
||||
#define prof_thread_active_init_get JEMALLOC_N(prof_thread_active_init_get)
|
||||
#define prof_thread_active_init_set JEMALLOC_N(prof_thread_active_init_set)
|
||||
#define prof_thread_active_set JEMALLOC_N(prof_thread_active_set)
|
||||
#define prof_thread_name_get JEMALLOC_N(prof_thread_name_get)
|
||||
#define prof_thread_name_set JEMALLOC_N(prof_thread_name_set)
|
||||
#define psz2ind JEMALLOC_N(psz2ind)
|
||||
#define psz2u JEMALLOC_N(psz2u)
|
||||
#define purge_mode_names JEMALLOC_N(purge_mode_names)
|
||||
#define quarantine JEMALLOC_N(quarantine)
|
||||
#define quarantine_alloc_hook JEMALLOC_N(quarantine_alloc_hook)
|
||||
#define quarantine_boot JEMALLOC_N(quarantine_boot)
|
||||
#define quarantine_booted JEMALLOC_N(quarantine_booted)
|
||||
#define quarantine_alloc_hook_work JEMALLOC_N(quarantine_alloc_hook_work)
|
||||
#define quarantine_cleanup JEMALLOC_N(quarantine_cleanup)
|
||||
#define quarantine_init JEMALLOC_N(quarantine_init)
|
||||
#define quarantine_tls JEMALLOC_N(quarantine_tls)
|
||||
#define quarantine_tsd JEMALLOC_N(quarantine_tsd)
|
||||
#define quarantine_tsd_boot JEMALLOC_N(quarantine_tsd_boot)
|
||||
#define quarantine_tsd_cleanup_wrapper JEMALLOC_N(quarantine_tsd_cleanup_wrapper)
|
||||
#define quarantine_tsd_get JEMALLOC_N(quarantine_tsd_get)
|
||||
#define quarantine_tsd_get_wrapper JEMALLOC_N(quarantine_tsd_get_wrapper)
|
||||
#define quarantine_tsd_init_head JEMALLOC_N(quarantine_tsd_init_head)
|
||||
#define quarantine_tsd_set JEMALLOC_N(quarantine_tsd_set)
|
||||
#define register_zone JEMALLOC_N(register_zone)
|
||||
#define rtree_child_read JEMALLOC_N(rtree_child_read)
|
||||
#define rtree_child_read_hard JEMALLOC_N(rtree_child_read_hard)
|
||||
#define rtree_child_tryread JEMALLOC_N(rtree_child_tryread)
|
||||
#define rtree_delete JEMALLOC_N(rtree_delete)
|
||||
#define rtree_get JEMALLOC_N(rtree_get)
|
||||
#define rtree_get_locked JEMALLOC_N(rtree_get_locked)
|
||||
#define rtree_new JEMALLOC_N(rtree_new)
|
||||
#define rtree_postfork_child JEMALLOC_N(rtree_postfork_child)
|
||||
#define rtree_postfork_parent JEMALLOC_N(rtree_postfork_parent)
|
||||
#define rtree_prefork JEMALLOC_N(rtree_prefork)
|
||||
#define rtree_node_valid JEMALLOC_N(rtree_node_valid)
|
||||
#define rtree_set JEMALLOC_N(rtree_set)
|
||||
#define rtree_start_level JEMALLOC_N(rtree_start_level)
|
||||
#define rtree_subkey JEMALLOC_N(rtree_subkey)
|
||||
#define rtree_subtree_read JEMALLOC_N(rtree_subtree_read)
|
||||
#define rtree_subtree_read_hard JEMALLOC_N(rtree_subtree_read_hard)
|
||||
#define rtree_subtree_tryread JEMALLOC_N(rtree_subtree_tryread)
|
||||
#define rtree_val_read JEMALLOC_N(rtree_val_read)
|
||||
#define rtree_val_write JEMALLOC_N(rtree_val_write)
|
||||
#define run_quantize_ceil JEMALLOC_N(run_quantize_ceil)
|
||||
#define run_quantize_floor JEMALLOC_N(run_quantize_floor)
|
||||
#define s2u JEMALLOC_N(s2u)
|
||||
#define s2u_compute JEMALLOC_N(s2u_compute)
|
||||
#define s2u_lookup JEMALLOC_N(s2u_lookup)
|
||||
#define sa2u JEMALLOC_N(sa2u)
|
||||
#define set_errno JEMALLOC_N(set_errno)
|
||||
#define small_size2bin JEMALLOC_N(small_size2bin)
|
||||
#define size2index JEMALLOC_N(size2index)
|
||||
#define size2index_compute JEMALLOC_N(size2index_compute)
|
||||
#define size2index_lookup JEMALLOC_N(size2index_lookup)
|
||||
#define size2index_tab JEMALLOC_N(size2index_tab)
|
||||
#define spin_adaptive JEMALLOC_N(spin_adaptive)
|
||||
#define spin_init JEMALLOC_N(spin_init)
|
||||
#define stats_cactive JEMALLOC_N(stats_cactive)
|
||||
#define stats_cactive_add JEMALLOC_N(stats_cactive_add)
|
||||
#define stats_cactive_get JEMALLOC_N(stats_cactive_get)
|
||||
#define stats_cactive_sub JEMALLOC_N(stats_cactive_sub)
|
||||
#define stats_chunks JEMALLOC_N(stats_chunks)
|
||||
#define stats_print JEMALLOC_N(stats_print)
|
||||
#define tcache_alloc_easy JEMALLOC_N(tcache_alloc_easy)
|
||||
#define tcache_alloc_large JEMALLOC_N(tcache_alloc_large)
|
||||
#define tcache_alloc_small JEMALLOC_N(tcache_alloc_small)
|
||||
#define tcache_alloc_small_hard JEMALLOC_N(tcache_alloc_small_hard)
|
||||
#define tcache_arena_associate JEMALLOC_N(tcache_arena_associate)
|
||||
#define tcache_arena_dissociate JEMALLOC_N(tcache_arena_dissociate)
|
||||
#define tcache_arena_reassociate JEMALLOC_N(tcache_arena_reassociate)
|
||||
#define tcache_bin_flush_large JEMALLOC_N(tcache_bin_flush_large)
|
||||
#define tcache_bin_flush_small JEMALLOC_N(tcache_bin_flush_small)
|
||||
#define tcache_bin_info JEMALLOC_N(tcache_bin_info)
|
||||
#define tcache_boot0 JEMALLOC_N(tcache_boot0)
|
||||
#define tcache_boot1 JEMALLOC_N(tcache_boot1)
|
||||
#define tcache_booted JEMALLOC_N(tcache_booted)
|
||||
#define tcache_boot JEMALLOC_N(tcache_boot)
|
||||
#define tcache_cleanup JEMALLOC_N(tcache_cleanup)
|
||||
#define tcache_create JEMALLOC_N(tcache_create)
|
||||
#define tcache_dalloc_large JEMALLOC_N(tcache_dalloc_large)
|
||||
#define tcache_dalloc_small JEMALLOC_N(tcache_dalloc_small)
|
||||
#define tcache_destroy JEMALLOC_N(tcache_destroy)
|
||||
#define tcache_enabled_booted JEMALLOC_N(tcache_enabled_booted)
|
||||
#define tcache_enabled_cleanup JEMALLOC_N(tcache_enabled_cleanup)
|
||||
#define tcache_enabled_get JEMALLOC_N(tcache_enabled_get)
|
||||
#define tcache_enabled_initialized JEMALLOC_N(tcache_enabled_initialized)
|
||||
#define tcache_enabled_set JEMALLOC_N(tcache_enabled_set)
|
||||
#define tcache_enabled_tls JEMALLOC_N(tcache_enabled_tls)
|
||||
#define tcache_enabled_tsd JEMALLOC_N(tcache_enabled_tsd)
|
||||
#define tcache_enabled_tsd_boot JEMALLOC_N(tcache_enabled_tsd_boot)
|
||||
#define tcache_enabled_tsd_cleanup_wrapper JEMALLOC_N(tcache_enabled_tsd_cleanup_wrapper)
|
||||
#define tcache_enabled_tsd_get JEMALLOC_N(tcache_enabled_tsd_get)
|
||||
#define tcache_enabled_tsd_get_wrapper JEMALLOC_N(tcache_enabled_tsd_get_wrapper)
|
||||
#define tcache_enabled_tsd_init_head JEMALLOC_N(tcache_enabled_tsd_init_head)
|
||||
#define tcache_enabled_tsd_set JEMALLOC_N(tcache_enabled_tsd_set)
|
||||
#define tcache_event JEMALLOC_N(tcache_event)
|
||||
#define tcache_event_hard JEMALLOC_N(tcache_event_hard)
|
||||
#define tcache_flush JEMALLOC_N(tcache_flush)
|
||||
#define tcache_get JEMALLOC_N(tcache_get)
|
||||
#define tcache_initialized JEMALLOC_N(tcache_initialized)
|
||||
#define tcache_get_hard JEMALLOC_N(tcache_get_hard)
|
||||
#define tcache_maxclass JEMALLOC_N(tcache_maxclass)
|
||||
#define tcache_postfork_child JEMALLOC_N(tcache_postfork_child)
|
||||
#define tcache_postfork_parent JEMALLOC_N(tcache_postfork_parent)
|
||||
#define tcache_prefork JEMALLOC_N(tcache_prefork)
|
||||
#define tcache_salloc JEMALLOC_N(tcache_salloc)
|
||||
#define tcache_stats_merge JEMALLOC_N(tcache_stats_merge)
|
||||
#define tcache_thread_cleanup JEMALLOC_N(tcache_thread_cleanup)
|
||||
#define tcache_tls JEMALLOC_N(tcache_tls)
|
||||
#define tcache_tsd JEMALLOC_N(tcache_tsd)
|
||||
#define tcache_tsd_boot JEMALLOC_N(tcache_tsd_boot)
|
||||
#define tcache_tsd_cleanup_wrapper JEMALLOC_N(tcache_tsd_cleanup_wrapper)
|
||||
#define tcache_tsd_get JEMALLOC_N(tcache_tsd_get)
|
||||
#define tcache_tsd_get_wrapper JEMALLOC_N(tcache_tsd_get_wrapper)
|
||||
#define tcache_tsd_init_head JEMALLOC_N(tcache_tsd_init_head)
|
||||
#define tcache_tsd_set JEMALLOC_N(tcache_tsd_set)
|
||||
#define thread_allocated_booted JEMALLOC_N(thread_allocated_booted)
|
||||
#define thread_allocated_initialized JEMALLOC_N(thread_allocated_initialized)
|
||||
#define thread_allocated_tls JEMALLOC_N(thread_allocated_tls)
|
||||
#define thread_allocated_tsd JEMALLOC_N(thread_allocated_tsd)
|
||||
#define thread_allocated_tsd_boot JEMALLOC_N(thread_allocated_tsd_boot)
|
||||
#define thread_allocated_tsd_cleanup_wrapper JEMALLOC_N(thread_allocated_tsd_cleanup_wrapper)
|
||||
#define thread_allocated_tsd_get JEMALLOC_N(thread_allocated_tsd_get)
|
||||
#define thread_allocated_tsd_get_wrapper JEMALLOC_N(thread_allocated_tsd_get_wrapper)
|
||||
#define thread_allocated_tsd_init_head JEMALLOC_N(thread_allocated_tsd_init_head)
|
||||
#define thread_allocated_tsd_set JEMALLOC_N(thread_allocated_tsd_set)
|
||||
#define tcaches JEMALLOC_N(tcaches)
|
||||
#define tcaches_create JEMALLOC_N(tcaches_create)
|
||||
#define tcaches_destroy JEMALLOC_N(tcaches_destroy)
|
||||
#define tcaches_flush JEMALLOC_N(tcaches_flush)
|
||||
#define tcaches_get JEMALLOC_N(tcaches_get)
|
||||
#define thread_allocated_cleanup JEMALLOC_N(thread_allocated_cleanup)
|
||||
#define thread_deallocated_cleanup JEMALLOC_N(thread_deallocated_cleanup)
|
||||
#define ticker_copy JEMALLOC_N(ticker_copy)
|
||||
#define ticker_init JEMALLOC_N(ticker_init)
|
||||
#define ticker_read JEMALLOC_N(ticker_read)
|
||||
#define ticker_tick JEMALLOC_N(ticker_tick)
|
||||
#define ticker_ticks JEMALLOC_N(ticker_ticks)
|
||||
#define tsd_arena_get JEMALLOC_N(tsd_arena_get)
|
||||
#define tsd_arena_set JEMALLOC_N(tsd_arena_set)
|
||||
#define tsd_arenap_get JEMALLOC_N(tsd_arenap_get)
|
||||
#define tsd_arenas_tdata_bypass_get JEMALLOC_N(tsd_arenas_tdata_bypass_get)
|
||||
#define tsd_arenas_tdata_bypass_set JEMALLOC_N(tsd_arenas_tdata_bypass_set)
|
||||
#define tsd_arenas_tdata_bypassp_get JEMALLOC_N(tsd_arenas_tdata_bypassp_get)
|
||||
#define tsd_arenas_tdata_get JEMALLOC_N(tsd_arenas_tdata_get)
|
||||
#define tsd_arenas_tdata_set JEMALLOC_N(tsd_arenas_tdata_set)
|
||||
#define tsd_arenas_tdatap_get JEMALLOC_N(tsd_arenas_tdatap_get)
|
||||
#define tsd_boot JEMALLOC_N(tsd_boot)
|
||||
#define tsd_boot0 JEMALLOC_N(tsd_boot0)
|
||||
#define tsd_boot1 JEMALLOC_N(tsd_boot1)
|
||||
#define tsd_booted JEMALLOC_N(tsd_booted)
|
||||
#define tsd_booted_get JEMALLOC_N(tsd_booted_get)
|
||||
#define tsd_cleanup JEMALLOC_N(tsd_cleanup)
|
||||
#define tsd_cleanup_wrapper JEMALLOC_N(tsd_cleanup_wrapper)
|
||||
#define tsd_fetch JEMALLOC_N(tsd_fetch)
|
||||
#define tsd_fetch_impl JEMALLOC_N(tsd_fetch_impl)
|
||||
#define tsd_get JEMALLOC_N(tsd_get)
|
||||
#define tsd_get_allocates JEMALLOC_N(tsd_get_allocates)
|
||||
#define tsd_iarena_get JEMALLOC_N(tsd_iarena_get)
|
||||
#define tsd_iarena_set JEMALLOC_N(tsd_iarena_set)
|
||||
#define tsd_iarenap_get JEMALLOC_N(tsd_iarenap_get)
|
||||
#define tsd_initialized JEMALLOC_N(tsd_initialized)
|
||||
#define tsd_init_check_recursion JEMALLOC_N(tsd_init_check_recursion)
|
||||
#define tsd_init_finish JEMALLOC_N(tsd_init_finish)
|
||||
#define tsd_init_head JEMALLOC_N(tsd_init_head)
|
||||
#define tsd_narenas_tdata_get JEMALLOC_N(tsd_narenas_tdata_get)
|
||||
#define tsd_narenas_tdata_set JEMALLOC_N(tsd_narenas_tdata_set)
|
||||
#define tsd_narenas_tdatap_get JEMALLOC_N(tsd_narenas_tdatap_get)
|
||||
#define tsd_wrapper_get JEMALLOC_N(tsd_wrapper_get)
|
||||
#define tsd_wrapper_set JEMALLOC_N(tsd_wrapper_set)
|
||||
#define tsd_nominal JEMALLOC_N(tsd_nominal)
|
||||
#define tsd_prof_tdata_get JEMALLOC_N(tsd_prof_tdata_get)
|
||||
#define tsd_prof_tdata_set JEMALLOC_N(tsd_prof_tdata_set)
|
||||
#define tsd_prof_tdatap_get JEMALLOC_N(tsd_prof_tdatap_get)
|
||||
#define tsd_quarantine_get JEMALLOC_N(tsd_quarantine_get)
|
||||
#define tsd_quarantine_set JEMALLOC_N(tsd_quarantine_set)
|
||||
#define tsd_quarantinep_get JEMALLOC_N(tsd_quarantinep_get)
|
||||
#define tsd_set JEMALLOC_N(tsd_set)
|
||||
#define tsd_tcache_enabled_get JEMALLOC_N(tsd_tcache_enabled_get)
|
||||
#define tsd_tcache_enabled_set JEMALLOC_N(tsd_tcache_enabled_set)
|
||||
#define tsd_tcache_enabledp_get JEMALLOC_N(tsd_tcache_enabledp_get)
|
||||
#define tsd_tcache_get JEMALLOC_N(tsd_tcache_get)
|
||||
#define tsd_tcache_set JEMALLOC_N(tsd_tcache_set)
|
||||
#define tsd_tcachep_get JEMALLOC_N(tsd_tcachep_get)
|
||||
#define tsd_thread_allocated_get JEMALLOC_N(tsd_thread_allocated_get)
|
||||
#define tsd_thread_allocated_set JEMALLOC_N(tsd_thread_allocated_set)
|
||||
#define tsd_thread_allocatedp_get JEMALLOC_N(tsd_thread_allocatedp_get)
|
||||
#define tsd_thread_deallocated_get JEMALLOC_N(tsd_thread_deallocated_get)
|
||||
#define tsd_thread_deallocated_set JEMALLOC_N(tsd_thread_deallocated_set)
|
||||
#define tsd_thread_deallocatedp_get JEMALLOC_N(tsd_thread_deallocatedp_get)
|
||||
#define tsd_tls JEMALLOC_N(tsd_tls)
|
||||
#define tsd_tsd JEMALLOC_N(tsd_tsd)
|
||||
#define tsd_tsdn JEMALLOC_N(tsd_tsdn)
|
||||
#define tsd_witness_fork_get JEMALLOC_N(tsd_witness_fork_get)
|
||||
#define tsd_witness_fork_set JEMALLOC_N(tsd_witness_fork_set)
|
||||
#define tsd_witness_forkp_get JEMALLOC_N(tsd_witness_forkp_get)
|
||||
#define tsd_witnesses_get JEMALLOC_N(tsd_witnesses_get)
|
||||
#define tsd_witnesses_set JEMALLOC_N(tsd_witnesses_set)
|
||||
#define tsd_witnessesp_get JEMALLOC_N(tsd_witnessesp_get)
|
||||
#define tsdn_fetch JEMALLOC_N(tsdn_fetch)
|
||||
#define tsdn_null JEMALLOC_N(tsdn_null)
|
||||
#define tsdn_tsd JEMALLOC_N(tsdn_tsd)
|
||||
#define u2rz JEMALLOC_N(u2rz)
|
||||
#define valgrind_freelike_block JEMALLOC_N(valgrind_freelike_block)
|
||||
#define valgrind_make_mem_defined JEMALLOC_N(valgrind_make_mem_defined)
|
||||
#define valgrind_make_mem_noaccess JEMALLOC_N(valgrind_make_mem_noaccess)
|
||||
#define valgrind_make_mem_undefined JEMALLOC_N(valgrind_make_mem_undefined)
|
||||
#define witness_assert_depth JEMALLOC_N(witness_assert_depth)
|
||||
#define witness_assert_depth_to_rank JEMALLOC_N(witness_assert_depth_to_rank)
|
||||
#define witness_assert_lockless JEMALLOC_N(witness_assert_lockless)
|
||||
#define witness_assert_not_owner JEMALLOC_N(witness_assert_not_owner)
|
||||
#define witness_assert_owner JEMALLOC_N(witness_assert_owner)
|
||||
#define witness_depth_error JEMALLOC_N(witness_depth_error)
|
||||
#define witness_fork_cleanup JEMALLOC_N(witness_fork_cleanup)
|
||||
#define witness_init JEMALLOC_N(witness_init)
|
||||
#define witness_lock JEMALLOC_N(witness_lock)
|
||||
#define witness_lock_error JEMALLOC_N(witness_lock_error)
|
||||
#define witness_not_owner_error JEMALLOC_N(witness_not_owner_error)
|
||||
#define witness_owner JEMALLOC_N(witness_owner)
|
||||
#define witness_owner_error JEMALLOC_N(witness_owner_error)
|
||||
#define witness_postfork_child JEMALLOC_N(witness_postfork_child)
|
||||
#define witness_postfork_parent JEMALLOC_N(witness_postfork_parent)
|
||||
#define witness_prefork JEMALLOC_N(witness_prefork)
|
||||
#define witness_unlock JEMALLOC_N(witness_unlock)
|
||||
#define witnesses_cleanup JEMALLOC_N(witnesses_cleanup)
|
||||
#define zone_register JEMALLOC_N(zone_register)
|
||||
|
||||
197
deps/jemalloc/include/jemalloc/internal/prng.h
vendored
197
deps/jemalloc/include/jemalloc/internal/prng.h
vendored
@@ -1,5 +1,8 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
#ifndef JEMALLOC_INTERNAL_PRNG_H
|
||||
#define JEMALLOC_INTERNAL_PRNG_H
|
||||
|
||||
#include "jemalloc/internal/atomic.h"
|
||||
#include "jemalloc/internal/bit_util.h"
|
||||
|
||||
/*
|
||||
* Simple linear congruential pseudo-random number generator:
|
||||
@@ -15,46 +18,168 @@
|
||||
* See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints.
|
||||
*
|
||||
* This choice of m has the disadvantage that the quality of the bits is
|
||||
* proportional to bit position. For example. the lowest bit has a cycle of 2,
|
||||
* proportional to bit position. For example, the lowest bit has a cycle of 2,
|
||||
* the next has a cycle of 4, etc. For this reason, we prefer to use the upper
|
||||
* bits.
|
||||
*
|
||||
* Macro parameters:
|
||||
* uint32_t r : Result.
|
||||
* unsigned lg_range : (0..32], number of least significant bits to return.
|
||||
* uint32_t state : Seed value.
|
||||
* const uint32_t a, c : See above discussion.
|
||||
*/
|
||||
#define prng32(r, lg_range, state, a, c) do { \
|
||||
assert(lg_range > 0); \
|
||||
assert(lg_range <= 32); \
|
||||
\
|
||||
r = (state * (a)) + (c); \
|
||||
state = r; \
|
||||
r >>= (32 - lg_range); \
|
||||
} while (false)
|
||||
|
||||
/* Same as prng32(), but 64 bits of pseudo-randomness, using uint64_t. */
|
||||
#define prng64(r, lg_range, state, a, c) do { \
|
||||
assert(lg_range > 0); \
|
||||
assert(lg_range <= 64); \
|
||||
\
|
||||
r = (state * (a)) + (c); \
|
||||
state = r; \
|
||||
r >>= (64 - lg_range); \
|
||||
} while (false)
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/* INTERNAL DEFINITIONS -- IGNORE */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
#define PRNG_A_32 UINT32_C(1103515241)
|
||||
#define PRNG_C_32 UINT32_C(12347)
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
#define PRNG_A_64 UINT64_C(6364136223846793005)
|
||||
#define PRNG_C_64 UINT64_C(1442695040888963407)
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE uint32_t
|
||||
prng_state_next_u32(uint32_t state) {
|
||||
return (state * PRNG_A_32) + PRNG_C_32;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE uint64_t
|
||||
prng_state_next_u64(uint64_t state) {
|
||||
return (state * PRNG_A_64) + PRNG_C_64;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
prng_state_next_zu(size_t state) {
|
||||
#if LG_SIZEOF_PTR == 2
|
||||
return (state * PRNG_A_32) + PRNG_C_32;
|
||||
#elif LG_SIZEOF_PTR == 3
|
||||
return (state * PRNG_A_64) + PRNG_C_64;
|
||||
#else
|
||||
#error Unsupported pointer size
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
/* BEGIN PUBLIC API */
|
||||
/******************************************************************************/
|
||||
|
||||
/*
|
||||
* The prng_lg_range functions give a uniform int in the half-open range [0,
|
||||
* 2**lg_range). If atomic is true, they do so safely from multiple threads.
|
||||
* Multithreaded 64-bit prngs aren't supported.
|
||||
*/
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE uint32_t
|
||||
prng_lg_range_u32(atomic_u32_t *state, unsigned lg_range, bool atomic) {
|
||||
uint32_t ret, state0, state1;
|
||||
|
||||
assert(lg_range > 0);
|
||||
assert(lg_range <= 32);
|
||||
|
||||
state0 = atomic_load_u32(state, ATOMIC_RELAXED);
|
||||
|
||||
if (atomic) {
|
||||
do {
|
||||
state1 = prng_state_next_u32(state0);
|
||||
} while (!atomic_compare_exchange_weak_u32(state, &state0,
|
||||
state1, ATOMIC_RELAXED, ATOMIC_RELAXED));
|
||||
} else {
|
||||
state1 = prng_state_next_u32(state0);
|
||||
atomic_store_u32(state, state1, ATOMIC_RELAXED);
|
||||
}
|
||||
ret = state1 >> (32 - lg_range);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE uint64_t
|
||||
prng_lg_range_u64(uint64_t *state, unsigned lg_range) {
|
||||
uint64_t ret, state1;
|
||||
|
||||
assert(lg_range > 0);
|
||||
assert(lg_range <= 64);
|
||||
|
||||
state1 = prng_state_next_u64(*state);
|
||||
*state = state1;
|
||||
ret = state1 >> (64 - lg_range);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
prng_lg_range_zu(atomic_zu_t *state, unsigned lg_range, bool atomic) {
|
||||
size_t ret, state0, state1;
|
||||
|
||||
assert(lg_range > 0);
|
||||
assert(lg_range <= ZU(1) << (3 + LG_SIZEOF_PTR));
|
||||
|
||||
state0 = atomic_load_zu(state, ATOMIC_RELAXED);
|
||||
|
||||
if (atomic) {
|
||||
do {
|
||||
state1 = prng_state_next_zu(state0);
|
||||
} while (atomic_compare_exchange_weak_zu(state, &state0,
|
||||
state1, ATOMIC_RELAXED, ATOMIC_RELAXED));
|
||||
} else {
|
||||
state1 = prng_state_next_zu(state0);
|
||||
atomic_store_zu(state, state1, ATOMIC_RELAXED);
|
||||
}
|
||||
ret = state1 >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* The prng_range functions behave like the prng_lg_range, but return a result
|
||||
* in [0, range) instead of [0, 2**lg_range).
|
||||
*/
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE uint32_t
|
||||
prng_range_u32(atomic_u32_t *state, uint32_t range, bool atomic) {
|
||||
uint32_t ret;
|
||||
unsigned lg_range;
|
||||
|
||||
assert(range > 1);
|
||||
|
||||
/* Compute the ceiling of lg(range). */
|
||||
lg_range = ffs_u32(pow2_ceil_u32(range)) - 1;
|
||||
|
||||
/* Generate a result in [0..range) via repeated trial. */
|
||||
do {
|
||||
ret = prng_lg_range_u32(state, lg_range, atomic);
|
||||
} while (ret >= range);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE uint64_t
|
||||
prng_range_u64(uint64_t *state, uint64_t range) {
|
||||
uint64_t ret;
|
||||
unsigned lg_range;
|
||||
|
||||
assert(range > 1);
|
||||
|
||||
/* Compute the ceiling of lg(range). */
|
||||
lg_range = ffs_u64(pow2_ceil_u64(range)) - 1;
|
||||
|
||||
/* Generate a result in [0..range) via repeated trial. */
|
||||
do {
|
||||
ret = prng_lg_range_u64(state, lg_range);
|
||||
} while (ret >= range);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
prng_range_zu(atomic_zu_t *state, size_t range, bool atomic) {
|
||||
size_t ret;
|
||||
unsigned lg_range;
|
||||
|
||||
assert(range > 1);
|
||||
|
||||
/* Compute the ceiling of lg(range). */
|
||||
lg_range = ffs_u64(pow2_ceil_u64(range)) - 1;
|
||||
|
||||
/* Generate a result in [0..range) via repeated trial. */
|
||||
do {
|
||||
ret = prng_lg_range_zu(state, lg_range, atomic);
|
||||
} while (ret >= range);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_PRNG_H */
|
||||
|
||||
47
deps/jemalloc/include/jemalloc/internal/ql.h
vendored
47
deps/jemalloc/include/jemalloc/internal/ql.h
vendored
@@ -1,61 +1,64 @@
|
||||
/*
|
||||
* List definitions.
|
||||
*/
|
||||
#define ql_head(a_type) \
|
||||
#ifndef JEMALLOC_INTERNAL_QL_H
|
||||
#define JEMALLOC_INTERNAL_QL_H
|
||||
|
||||
#include "jemalloc/internal/qr.h"
|
||||
|
||||
/* List definitions. */
|
||||
#define ql_head(a_type) \
|
||||
struct { \
|
||||
a_type *qlh_first; \
|
||||
}
|
||||
|
||||
#define ql_head_initializer(a_head) {NULL}
|
||||
#define ql_head_initializer(a_head) {NULL}
|
||||
|
||||
#define ql_elm(a_type) qr(a_type)
|
||||
#define ql_elm(a_type) qr(a_type)
|
||||
|
||||
/* List functions. */
|
||||
#define ql_new(a_head) do { \
|
||||
#define ql_new(a_head) do { \
|
||||
(a_head)->qlh_first = NULL; \
|
||||
} while (0)
|
||||
|
||||
#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
|
||||
#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
|
||||
|
||||
#define ql_first(a_head) ((a_head)->qlh_first)
|
||||
#define ql_first(a_head) ((a_head)->qlh_first)
|
||||
|
||||
#define ql_last(a_head, a_field) \
|
||||
#define ql_last(a_head, a_field) \
|
||||
((ql_first(a_head) != NULL) \
|
||||
? qr_prev(ql_first(a_head), a_field) : NULL)
|
||||
|
||||
#define ql_next(a_head, a_elm, a_field) \
|
||||
#define ql_next(a_head, a_elm, a_field) \
|
||||
((ql_last(a_head, a_field) != (a_elm)) \
|
||||
? qr_next((a_elm), a_field) : NULL)
|
||||
|
||||
#define ql_prev(a_head, a_elm, a_field) \
|
||||
#define ql_prev(a_head, a_elm, a_field) \
|
||||
((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \
|
||||
: NULL)
|
||||
|
||||
#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
|
||||
#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
|
||||
qr_before_insert((a_qlelm), (a_elm), a_field); \
|
||||
if (ql_first(a_head) == (a_qlelm)) { \
|
||||
ql_first(a_head) = (a_elm); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define ql_after_insert(a_qlelm, a_elm, a_field) \
|
||||
#define ql_after_insert(a_qlelm, a_elm, a_field) \
|
||||
qr_after_insert((a_qlelm), (a_elm), a_field)
|
||||
|
||||
#define ql_head_insert(a_head, a_elm, a_field) do { \
|
||||
#define ql_head_insert(a_head, a_elm, a_field) do { \
|
||||
if (ql_first(a_head) != NULL) { \
|
||||
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
|
||||
} \
|
||||
ql_first(a_head) = (a_elm); \
|
||||
} while (0)
|
||||
|
||||
#define ql_tail_insert(a_head, a_elm, a_field) do { \
|
||||
#define ql_tail_insert(a_head, a_elm, a_field) do { \
|
||||
if (ql_first(a_head) != NULL) { \
|
||||
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
|
||||
} \
|
||||
ql_first(a_head) = qr_next((a_elm), a_field); \
|
||||
} while (0)
|
||||
|
||||
#define ql_remove(a_head, a_elm, a_field) do { \
|
||||
#define ql_remove(a_head, a_elm, a_field) do { \
|
||||
if (ql_first(a_head) == (a_elm)) { \
|
||||
ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
|
||||
} \
|
||||
@@ -66,18 +69,20 @@ struct { \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define ql_head_remove(a_head, a_type, a_field) do { \
|
||||
#define ql_head_remove(a_head, a_type, a_field) do { \
|
||||
a_type *t = ql_first(a_head); \
|
||||
ql_remove((a_head), t, a_field); \
|
||||
} while (0)
|
||||
|
||||
#define ql_tail_remove(a_head, a_type, a_field) do { \
|
||||
#define ql_tail_remove(a_head, a_type, a_field) do { \
|
||||
a_type *t = ql_last(a_head, a_field); \
|
||||
ql_remove((a_head), t, a_field); \
|
||||
} while (0)
|
||||
|
||||
#define ql_foreach(a_var, a_head, a_field) \
|
||||
#define ql_foreach(a_var, a_head, a_field) \
|
||||
qr_foreach((a_var), ql_first(a_head), a_field)
|
||||
|
||||
#define ql_reverse_foreach(a_var, a_head, a_field) \
|
||||
#define ql_reverse_foreach(a_var, a_head, a_field) \
|
||||
qr_reverse_foreach((a_var), ql_first(a_head), a_field)
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_QL_H */
|
||||
|
||||
41
deps/jemalloc/include/jemalloc/internal/qr.h
vendored
41
deps/jemalloc/include/jemalloc/internal/qr.h
vendored
@@ -1,38 +1,39 @@
|
||||
#ifndef JEMALLOC_INTERNAL_QR_H
|
||||
#define JEMALLOC_INTERNAL_QR_H
|
||||
|
||||
/* Ring definitions. */
|
||||
#define qr(a_type) \
|
||||
#define qr(a_type) \
|
||||
struct { \
|
||||
a_type *qre_next; \
|
||||
a_type *qre_prev; \
|
||||
}
|
||||
|
||||
/* Ring functions. */
|
||||
#define qr_new(a_qr, a_field) do { \
|
||||
#define qr_new(a_qr, a_field) do { \
|
||||
(a_qr)->a_field.qre_next = (a_qr); \
|
||||
(a_qr)->a_field.qre_prev = (a_qr); \
|
||||
} while (0)
|
||||
|
||||
#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
|
||||
#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
|
||||
|
||||
#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
|
||||
#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
|
||||
|
||||
#define qr_before_insert(a_qrelm, a_qr, a_field) do { \
|
||||
#define qr_before_insert(a_qrelm, a_qr, a_field) do { \
|
||||
(a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \
|
||||
(a_qr)->a_field.qre_next = (a_qrelm); \
|
||||
(a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \
|
||||
(a_qrelm)->a_field.qre_prev = (a_qr); \
|
||||
} while (0)
|
||||
|
||||
#define qr_after_insert(a_qrelm, a_qr, a_field) \
|
||||
do \
|
||||
{ \
|
||||
#define qr_after_insert(a_qrelm, a_qr, a_field) do { \
|
||||
(a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \
|
||||
(a_qr)->a_field.qre_prev = (a_qrelm); \
|
||||
(a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \
|
||||
(a_qrelm)->a_field.qre_next = (a_qr); \
|
||||
} while (0)
|
||||
} while (0)
|
||||
|
||||
#define qr_meld(a_qr_a, a_qr_b, a_field) do { \
|
||||
void *t; \
|
||||
#define qr_meld(a_qr_a, a_qr_b, a_type, a_field) do { \
|
||||
a_type *t; \
|
||||
(a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
|
||||
(a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
|
||||
t = (a_qr_a)->a_field.qre_prev; \
|
||||
@@ -40,12 +41,14 @@ struct { \
|
||||
(a_qr_b)->a_field.qre_prev = t; \
|
||||
} while (0)
|
||||
|
||||
/* qr_meld() and qr_split() are functionally equivalent, so there's no need to
|
||||
* have two copies of the code. */
|
||||
#define qr_split(a_qr_a, a_qr_b, a_field) \
|
||||
qr_meld((a_qr_a), (a_qr_b), a_field)
|
||||
/*
|
||||
* qr_meld() and qr_split() are functionally equivalent, so there's no need to
|
||||
* have two copies of the code.
|
||||
*/
|
||||
#define qr_split(a_qr_a, a_qr_b, a_type, a_field) \
|
||||
qr_meld((a_qr_a), (a_qr_b), a_type, a_field)
|
||||
|
||||
#define qr_remove(a_qr, a_field) do { \
|
||||
#define qr_remove(a_qr, a_field) do { \
|
||||
(a_qr)->a_field.qre_prev->a_field.qre_next \
|
||||
= (a_qr)->a_field.qre_next; \
|
||||
(a_qr)->a_field.qre_next->a_field.qre_prev \
|
||||
@@ -54,14 +57,16 @@ struct { \
|
||||
(a_qr)->a_field.qre_prev = (a_qr); \
|
||||
} while (0)
|
||||
|
||||
#define qr_foreach(var, a_qr, a_field) \
|
||||
#define qr_foreach(var, a_qr, a_field) \
|
||||
for ((var) = (a_qr); \
|
||||
(var) != NULL; \
|
||||
(var) = (((var)->a_field.qre_next != (a_qr)) \
|
||||
? (var)->a_field.qre_next : NULL))
|
||||
|
||||
#define qr_reverse_foreach(var, a_qr, a_field) \
|
||||
#define qr_reverse_foreach(var, a_qr, a_field) \
|
||||
for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \
|
||||
(var) != NULL; \
|
||||
(var) = (((var) != (a_qr)) \
|
||||
? (var)->a_field.qre_prev : NULL))
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_QR_H */
|
||||
|
||||
351
deps/jemalloc/include/jemalloc/internal/rb.h
vendored
351
deps/jemalloc/include/jemalloc/internal/rb.h
vendored
@@ -20,17 +20,21 @@
|
||||
*/
|
||||
|
||||
#ifndef RB_H_
|
||||
#define RB_H_
|
||||
#define RB_H_
|
||||
|
||||
#ifndef __PGI
|
||||
#define RB_COMPACT
|
||||
#endif
|
||||
|
||||
#ifdef RB_COMPACT
|
||||
/* Node structure. */
|
||||
#define rb_node(a_type) \
|
||||
#define rb_node(a_type) \
|
||||
struct { \
|
||||
a_type *rbn_left; \
|
||||
a_type *rbn_right_red; \
|
||||
}
|
||||
#else
|
||||
#define rb_node(a_type) \
|
||||
#define rb_node(a_type) \
|
||||
struct { \
|
||||
a_type *rbn_left; \
|
||||
a_type *rbn_right; \
|
||||
@@ -39,111 +43,116 @@ struct { \
|
||||
#endif
|
||||
|
||||
/* Root structure. */
|
||||
#define rb_tree(a_type) \
|
||||
#define rb_tree(a_type) \
|
||||
struct { \
|
||||
a_type *rbt_root; \
|
||||
a_type rbt_nil; \
|
||||
}
|
||||
|
||||
/* Left accessors. */
|
||||
#define rbtn_left_get(a_type, a_field, a_node) \
|
||||
#define rbtn_left_get(a_type, a_field, a_node) \
|
||||
((a_node)->a_field.rbn_left)
|
||||
#define rbtn_left_set(a_type, a_field, a_node, a_left) do { \
|
||||
#define rbtn_left_set(a_type, a_field, a_node, a_left) do { \
|
||||
(a_node)->a_field.rbn_left = a_left; \
|
||||
} while (0)
|
||||
|
||||
#ifdef RB_COMPACT
|
||||
/* Right accessors. */
|
||||
#define rbtn_right_get(a_type, a_field, a_node) \
|
||||
#define rbtn_right_get(a_type, a_field, a_node) \
|
||||
((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red) \
|
||||
& ((ssize_t)-2)))
|
||||
#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \
|
||||
#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \
|
||||
(a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right) \
|
||||
| (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1))); \
|
||||
} while (0)
|
||||
|
||||
/* Color accessors. */
|
||||
#define rbtn_red_get(a_type, a_field, a_node) \
|
||||
#define rbtn_red_get(a_type, a_field, a_node) \
|
||||
((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red) \
|
||||
& ((size_t)1)))
|
||||
#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \
|
||||
#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \
|
||||
(a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t) \
|
||||
(a_node)->a_field.rbn_right_red) & ((ssize_t)-2)) \
|
||||
| ((ssize_t)a_red)); \
|
||||
} while (0)
|
||||
#define rbtn_red_set(a_type, a_field, a_node) do { \
|
||||
#define rbtn_red_set(a_type, a_field, a_node) do { \
|
||||
(a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) \
|
||||
(a_node)->a_field.rbn_right_red) | ((size_t)1)); \
|
||||
} while (0)
|
||||
#define rbtn_black_set(a_type, a_field, a_node) do { \
|
||||
#define rbtn_black_set(a_type, a_field, a_node) do { \
|
||||
(a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \
|
||||
(a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \
|
||||
} while (0)
|
||||
|
||||
/* Node initializer. */
|
||||
#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
|
||||
/* Bookkeeping bit cannot be used by node pointer. */ \
|
||||
assert(((uintptr_t)(a_node) & 0x1) == 0); \
|
||||
rbtn_left_set(a_type, a_field, (a_node), NULL); \
|
||||
rbtn_right_set(a_type, a_field, (a_node), NULL); \
|
||||
rbtn_red_set(a_type, a_field, (a_node)); \
|
||||
} while (0)
|
||||
#else
|
||||
/* Right accessors. */
|
||||
#define rbtn_right_get(a_type, a_field, a_node) \
|
||||
#define rbtn_right_get(a_type, a_field, a_node) \
|
||||
((a_node)->a_field.rbn_right)
|
||||
#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \
|
||||
#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \
|
||||
(a_node)->a_field.rbn_right = a_right; \
|
||||
} while (0)
|
||||
|
||||
/* Color accessors. */
|
||||
#define rbtn_red_get(a_type, a_field, a_node) \
|
||||
#define rbtn_red_get(a_type, a_field, a_node) \
|
||||
((a_node)->a_field.rbn_red)
|
||||
#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \
|
||||
#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \
|
||||
(a_node)->a_field.rbn_red = (a_red); \
|
||||
} while (0)
|
||||
#define rbtn_red_set(a_type, a_field, a_node) do { \
|
||||
#define rbtn_red_set(a_type, a_field, a_node) do { \
|
||||
(a_node)->a_field.rbn_red = true; \
|
||||
} while (0)
|
||||
#define rbtn_black_set(a_type, a_field, a_node) do { \
|
||||
#define rbtn_black_set(a_type, a_field, a_node) do { \
|
||||
(a_node)->a_field.rbn_red = false; \
|
||||
} while (0)
|
||||
|
||||
/* Node initializer. */
|
||||
#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
|
||||
rbtn_left_set(a_type, a_field, (a_node), NULL); \
|
||||
rbtn_right_set(a_type, a_field, (a_node), NULL); \
|
||||
rbtn_red_set(a_type, a_field, (a_node)); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
/* Node initializer. */
|
||||
#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
|
||||
rbtn_left_set(a_type, a_field, (a_node), &(a_rbt)->rbt_nil); \
|
||||
rbtn_right_set(a_type, a_field, (a_node), &(a_rbt)->rbt_nil); \
|
||||
rbtn_red_set(a_type, a_field, (a_node)); \
|
||||
} while (0)
|
||||
|
||||
/* Tree initializer. */
|
||||
#define rb_new(a_type, a_field, a_rbt) do { \
|
||||
(a_rbt)->rbt_root = &(a_rbt)->rbt_nil; \
|
||||
rbt_node_new(a_type, a_field, a_rbt, &(a_rbt)->rbt_nil); \
|
||||
rbtn_black_set(a_type, a_field, &(a_rbt)->rbt_nil); \
|
||||
#define rb_new(a_type, a_field, a_rbt) do { \
|
||||
(a_rbt)->rbt_root = NULL; \
|
||||
} while (0)
|
||||
|
||||
/* Internal utility macros. */
|
||||
#define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \
|
||||
#define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \
|
||||
(r_node) = (a_root); \
|
||||
if ((r_node) != &(a_rbt)->rbt_nil) { \
|
||||
if ((r_node) != NULL) { \
|
||||
for (; \
|
||||
rbtn_left_get(a_type, a_field, (r_node)) != &(a_rbt)->rbt_nil;\
|
||||
rbtn_left_get(a_type, a_field, (r_node)) != NULL; \
|
||||
(r_node) = rbtn_left_get(a_type, a_field, (r_node))) { \
|
||||
} \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \
|
||||
#define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \
|
||||
(r_node) = (a_root); \
|
||||
if ((r_node) != &(a_rbt)->rbt_nil) { \
|
||||
for (; rbtn_right_get(a_type, a_field, (r_node)) != \
|
||||
&(a_rbt)->rbt_nil; (r_node) = rbtn_right_get(a_type, a_field, \
|
||||
(r_node))) { \
|
||||
if ((r_node) != NULL) { \
|
||||
for (; rbtn_right_get(a_type, a_field, (r_node)) != NULL; \
|
||||
(r_node) = rbtn_right_get(a_type, a_field, (r_node))) { \
|
||||
} \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define rbtn_rotate_left(a_type, a_field, a_node, r_node) do { \
|
||||
#define rbtn_rotate_left(a_type, a_field, a_node, r_node) do { \
|
||||
(r_node) = rbtn_right_get(a_type, a_field, (a_node)); \
|
||||
rbtn_right_set(a_type, a_field, (a_node), \
|
||||
rbtn_left_get(a_type, a_field, (r_node))); \
|
||||
rbtn_left_set(a_type, a_field, (r_node), (a_node)); \
|
||||
} while (0)
|
||||
|
||||
#define rbtn_rotate_right(a_type, a_field, a_node, r_node) do { \
|
||||
#define rbtn_rotate_right(a_type, a_field, a_node, r_node) do { \
|
||||
(r_node) = rbtn_left_get(a_type, a_field, (a_node)); \
|
||||
rbtn_left_set(a_type, a_field, (a_node), \
|
||||
rbtn_right_get(a_type, a_field, (r_node))); \
|
||||
@@ -155,9 +164,11 @@ struct { \
|
||||
* functions generated by an equivalently parameterized call to rb_gen().
|
||||
*/
|
||||
|
||||
#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \
|
||||
#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \
|
||||
a_attr void \
|
||||
a_prefix##new(a_rbt_type *rbtree); \
|
||||
a_attr bool \
|
||||
a_prefix##empty(a_rbt_type *rbtree); \
|
||||
a_attr a_type * \
|
||||
a_prefix##first(a_rbt_type *rbtree); \
|
||||
a_attr a_type * \
|
||||
@@ -167,11 +178,11 @@ a_prefix##next(a_rbt_type *rbtree, a_type *node); \
|
||||
a_attr a_type * \
|
||||
a_prefix##prev(a_rbt_type *rbtree, a_type *node); \
|
||||
a_attr a_type * \
|
||||
a_prefix##search(a_rbt_type *rbtree, a_type *key); \
|
||||
a_prefix##search(a_rbt_type *rbtree, const a_type *key); \
|
||||
a_attr a_type * \
|
||||
a_prefix##nsearch(a_rbt_type *rbtree, a_type *key); \
|
||||
a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key); \
|
||||
a_attr a_type * \
|
||||
a_prefix##psearch(a_rbt_type *rbtree, a_type *key); \
|
||||
a_prefix##psearch(a_rbt_type *rbtree, const a_type *key); \
|
||||
a_attr void \
|
||||
a_prefix##insert(a_rbt_type *rbtree, a_type *node); \
|
||||
a_attr void \
|
||||
@@ -181,7 +192,10 @@ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \
|
||||
a_rbt_type *, a_type *, void *), void *arg); \
|
||||
a_attr a_type * \
|
||||
a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
|
||||
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg);
|
||||
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); \
|
||||
a_attr void \
|
||||
a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
|
||||
void *arg);
|
||||
|
||||
/*
|
||||
* The rb_gen() macro generates a type-specific red-black tree implementation,
|
||||
@@ -198,7 +212,7 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
|
||||
* int (a_cmp *)(a_type *a_node, a_type *a_other);
|
||||
* ^^^^^^
|
||||
* or a_key
|
||||
* Interpretation of comparision function return values:
|
||||
* Interpretation of comparison function return values:
|
||||
* -1 : a_node < a_other
|
||||
* 0 : a_node == a_other
|
||||
* 1 : a_node > a_other
|
||||
@@ -224,6 +238,13 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
|
||||
* Args:
|
||||
* tree: Pointer to an uninitialized red-black tree object.
|
||||
*
|
||||
* static bool
|
||||
* ex_empty(ex_t *tree);
|
||||
* Description: Determine whether tree is empty.
|
||||
* Args:
|
||||
* tree: Pointer to an initialized red-black tree object.
|
||||
* Ret: True if tree is empty, false otherwise.
|
||||
*
|
||||
* static ex_node_t *
|
||||
* ex_first(ex_t *tree);
|
||||
* static ex_node_t *
|
||||
@@ -245,7 +266,7 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
|
||||
* last/first.
|
||||
*
|
||||
* static ex_node_t *
|
||||
* ex_search(ex_t *tree, ex_node_t *key);
|
||||
* ex_search(ex_t *tree, const ex_node_t *key);
|
||||
* Description: Search for node that matches key.
|
||||
* Args:
|
||||
* tree: Pointer to an initialized red-black tree object.
|
||||
@@ -253,9 +274,9 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
|
||||
* Ret: Node in tree that matches key, or NULL if no match.
|
||||
*
|
||||
* static ex_node_t *
|
||||
* ex_nsearch(ex_t *tree, ex_node_t *key);
|
||||
* ex_nsearch(ex_t *tree, const ex_node_t *key);
|
||||
* static ex_node_t *
|
||||
* ex_psearch(ex_t *tree, ex_node_t *key);
|
||||
* ex_psearch(ex_t *tree, const ex_node_t *key);
|
||||
* Description: Search for node that matches key. If no match is found,
|
||||
* return what would be key's successor/predecessor, were
|
||||
* key in tree.
|
||||
@@ -303,40 +324,52 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
|
||||
* arg : Opaque pointer passed to cb().
|
||||
* Ret: NULL if iteration completed, or the non-NULL callback return value
|
||||
* that caused termination of the iteration.
|
||||
*
|
||||
* static void
|
||||
* ex_destroy(ex_t *tree, void (*cb)(ex_node_t *, void *), void *arg);
|
||||
* Description: Iterate over the tree with post-order traversal, remove
|
||||
* each node, and run the callback if non-null. This is
|
||||
* used for destroying a tree without paying the cost to
|
||||
* rebalance it. The tree must not be otherwise altered
|
||||
* during traversal.
|
||||
* Args:
|
||||
* tree: Pointer to an initialized red-black tree object.
|
||||
* cb : Callback function, which, if non-null, is called for each node
|
||||
* during iteration. There is no way to stop iteration once it
|
||||
* has begun.
|
||||
* arg : Opaque pointer passed to cb().
|
||||
*/
|
||||
#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \
|
||||
#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \
|
||||
a_attr void \
|
||||
a_prefix##new(a_rbt_type *rbtree) { \
|
||||
rb_new(a_type, a_field, rbtree); \
|
||||
} \
|
||||
a_attr bool \
|
||||
a_prefix##empty(a_rbt_type *rbtree) { \
|
||||
return (rbtree->rbt_root == NULL); \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_prefix##first(a_rbt_type *rbtree) { \
|
||||
a_type *ret; \
|
||||
rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
|
||||
if (ret == &rbtree->rbt_nil) { \
|
||||
ret = NULL; \
|
||||
} \
|
||||
return (ret); \
|
||||
return ret; \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_prefix##last(a_rbt_type *rbtree) { \
|
||||
a_type *ret; \
|
||||
rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
|
||||
if (ret == &rbtree->rbt_nil) { \
|
||||
ret = NULL; \
|
||||
} \
|
||||
return (ret); \
|
||||
return ret; \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_prefix##next(a_rbt_type *rbtree, a_type *node) { \
|
||||
a_type *ret; \
|
||||
if (rbtn_right_get(a_type, a_field, node) != &rbtree->rbt_nil) { \
|
||||
if (rbtn_right_get(a_type, a_field, node) != NULL) { \
|
||||
rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type, \
|
||||
a_field, node), ret); \
|
||||
} else { \
|
||||
a_type *tnode = rbtree->rbt_root; \
|
||||
assert(tnode != &rbtree->rbt_nil); \
|
||||
ret = &rbtree->rbt_nil; \
|
||||
assert(tnode != NULL); \
|
||||
ret = NULL; \
|
||||
while (true) { \
|
||||
int cmp = (a_cmp)(node, tnode); \
|
||||
if (cmp < 0) { \
|
||||
@@ -347,24 +380,21 @@ a_prefix##next(a_rbt_type *rbtree, a_type *node) { \
|
||||
} else { \
|
||||
break; \
|
||||
} \
|
||||
assert(tnode != &rbtree->rbt_nil); \
|
||||
assert(tnode != NULL); \
|
||||
} \
|
||||
} \
|
||||
if (ret == &rbtree->rbt_nil) { \
|
||||
ret = (NULL); \
|
||||
} \
|
||||
return (ret); \
|
||||
return ret; \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \
|
||||
a_type *ret; \
|
||||
if (rbtn_left_get(a_type, a_field, node) != &rbtree->rbt_nil) { \
|
||||
if (rbtn_left_get(a_type, a_field, node) != NULL) { \
|
||||
rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type, \
|
||||
a_field, node), ret); \
|
||||
} else { \
|
||||
a_type *tnode = rbtree->rbt_root; \
|
||||
assert(tnode != &rbtree->rbt_nil); \
|
||||
ret = &rbtree->rbt_nil; \
|
||||
assert(tnode != NULL); \
|
||||
ret = NULL; \
|
||||
while (true) { \
|
||||
int cmp = (a_cmp)(node, tnode); \
|
||||
if (cmp < 0) { \
|
||||
@@ -375,20 +405,17 @@ a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \
|
||||
} else { \
|
||||
break; \
|
||||
} \
|
||||
assert(tnode != &rbtree->rbt_nil); \
|
||||
assert(tnode != NULL); \
|
||||
} \
|
||||
} \
|
||||
if (ret == &rbtree->rbt_nil) { \
|
||||
ret = (NULL); \
|
||||
} \
|
||||
return (ret); \
|
||||
return ret; \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_prefix##search(a_rbt_type *rbtree, a_type *key) { \
|
||||
a_prefix##search(a_rbt_type *rbtree, const a_type *key) { \
|
||||
a_type *ret; \
|
||||
int cmp; \
|
||||
ret = rbtree->rbt_root; \
|
||||
while (ret != &rbtree->rbt_nil \
|
||||
while (ret != NULL \
|
||||
&& (cmp = (a_cmp)(key, ret)) != 0) { \
|
||||
if (cmp < 0) { \
|
||||
ret = rbtn_left_get(a_type, a_field, ret); \
|
||||
@@ -396,17 +423,14 @@ a_prefix##search(a_rbt_type *rbtree, a_type *key) { \
|
||||
ret = rbtn_right_get(a_type, a_field, ret); \
|
||||
} \
|
||||
} \
|
||||
if (ret == &rbtree->rbt_nil) { \
|
||||
ret = (NULL); \
|
||||
} \
|
||||
return (ret); \
|
||||
return ret; \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_prefix##nsearch(a_rbt_type *rbtree, a_type *key) { \
|
||||
a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key) { \
|
||||
a_type *ret; \
|
||||
a_type *tnode = rbtree->rbt_root; \
|
||||
ret = &rbtree->rbt_nil; \
|
||||
while (tnode != &rbtree->rbt_nil) { \
|
||||
ret = NULL; \
|
||||
while (tnode != NULL) { \
|
||||
int cmp = (a_cmp)(key, tnode); \
|
||||
if (cmp < 0) { \
|
||||
ret = tnode; \
|
||||
@@ -418,17 +442,14 @@ a_prefix##nsearch(a_rbt_type *rbtree, a_type *key) { \
|
||||
break; \
|
||||
} \
|
||||
} \
|
||||
if (ret == &rbtree->rbt_nil) { \
|
||||
ret = (NULL); \
|
||||
} \
|
||||
return (ret); \
|
||||
return ret; \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_prefix##psearch(a_rbt_type *rbtree, a_type *key) { \
|
||||
a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) { \
|
||||
a_type *ret; \
|
||||
a_type *tnode = rbtree->rbt_root; \
|
||||
ret = &rbtree->rbt_nil; \
|
||||
while (tnode != &rbtree->rbt_nil) { \
|
||||
ret = NULL; \
|
||||
while (tnode != NULL) { \
|
||||
int cmp = (a_cmp)(key, tnode); \
|
||||
if (cmp < 0) { \
|
||||
tnode = rbtn_left_get(a_type, a_field, tnode); \
|
||||
@@ -440,10 +461,7 @@ a_prefix##psearch(a_rbt_type *rbtree, a_type *key) { \
|
||||
break; \
|
||||
} \
|
||||
} \
|
||||
if (ret == &rbtree->rbt_nil) { \
|
||||
ret = (NULL); \
|
||||
} \
|
||||
return (ret); \
|
||||
return ret; \
|
||||
} \
|
||||
a_attr void \
|
||||
a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
|
||||
@@ -454,7 +472,7 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
|
||||
rbt_node_new(a_type, a_field, rbtree, node); \
|
||||
/* Wind. */ \
|
||||
path->node = rbtree->rbt_root; \
|
||||
for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) { \
|
||||
for (pathp = path; pathp->node != NULL; pathp++) { \
|
||||
int cmp = pathp->cmp = a_cmp(node, pathp->node); \
|
||||
assert(cmp != 0); \
|
||||
if (cmp < 0) { \
|
||||
@@ -474,7 +492,8 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
|
||||
rbtn_left_set(a_type, a_field, cnode, left); \
|
||||
if (rbtn_red_get(a_type, a_field, left)) { \
|
||||
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
|
||||
if (rbtn_red_get(a_type, a_field, leftleft)) { \
|
||||
if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
|
||||
leftleft)) { \
|
||||
/* Fix up 4-node. */ \
|
||||
a_type *tnode; \
|
||||
rbtn_black_set(a_type, a_field, leftleft); \
|
||||
@@ -489,7 +508,8 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
|
||||
rbtn_right_set(a_type, a_field, cnode, right); \
|
||||
if (rbtn_red_get(a_type, a_field, right)) { \
|
||||
a_type *left = rbtn_left_get(a_type, a_field, cnode); \
|
||||
if (rbtn_red_get(a_type, a_field, left)) { \
|
||||
if (left != NULL && rbtn_red_get(a_type, a_field, \
|
||||
left)) { \
|
||||
/* Split 4-node. */ \
|
||||
rbtn_black_set(a_type, a_field, left); \
|
||||
rbtn_black_set(a_type, a_field, right); \
|
||||
@@ -522,7 +542,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
|
||||
/* Wind. */ \
|
||||
nodep = NULL; /* Silence compiler warning. */ \
|
||||
path->node = rbtree->rbt_root; \
|
||||
for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) { \
|
||||
for (pathp = path; pathp->node != NULL; pathp++) { \
|
||||
int cmp = pathp->cmp = a_cmp(node, pathp->node); \
|
||||
if (cmp < 0) { \
|
||||
pathp[1].node = rbtn_left_get(a_type, a_field, \
|
||||
@@ -534,8 +554,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
|
||||
/* Find node's successor, in preparation for swap. */ \
|
||||
pathp->cmp = 1; \
|
||||
nodep = pathp; \
|
||||
for (pathp++; pathp->node != &rbtree->rbt_nil; \
|
||||
pathp++) { \
|
||||
for (pathp++; pathp->node != NULL; pathp++) { \
|
||||
pathp->cmp = -1; \
|
||||
pathp[1].node = rbtn_left_get(a_type, a_field, \
|
||||
pathp->node); \
|
||||
@@ -577,10 +596,10 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
|
||||
} \
|
||||
} else { \
|
||||
a_type *left = rbtn_left_get(a_type, a_field, node); \
|
||||
if (left != &rbtree->rbt_nil) { \
|
||||
if (left != NULL) { \
|
||||
/* node has no successor, but it has a left child. */\
|
||||
/* Splice node out, without losing the left child. */\
|
||||
assert(rbtn_red_get(a_type, a_field, node) == false); \
|
||||
assert(!rbtn_red_get(a_type, a_field, node)); \
|
||||
assert(rbtn_red_get(a_type, a_field, left)); \
|
||||
rbtn_black_set(a_type, a_field, left); \
|
||||
if (pathp == path) { \
|
||||
@@ -597,34 +616,32 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
|
||||
return; \
|
||||
} else if (pathp == path) { \
|
||||
/* The tree only contained one node. */ \
|
||||
rbtree->rbt_root = &rbtree->rbt_nil; \
|
||||
rbtree->rbt_root = NULL; \
|
||||
return; \
|
||||
} \
|
||||
} \
|
||||
if (rbtn_red_get(a_type, a_field, pathp->node)) { \
|
||||
/* Prune red node, which requires no fixup. */ \
|
||||
assert(pathp[-1].cmp < 0); \
|
||||
rbtn_left_set(a_type, a_field, pathp[-1].node, \
|
||||
&rbtree->rbt_nil); \
|
||||
rbtn_left_set(a_type, a_field, pathp[-1].node, NULL); \
|
||||
return; \
|
||||
} \
|
||||
/* The node to be pruned is black, so unwind until balance is */\
|
||||
/* restored. */\
|
||||
pathp->node = &rbtree->rbt_nil; \
|
||||
pathp->node = NULL; \
|
||||
for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \
|
||||
assert(pathp->cmp != 0); \
|
||||
if (pathp->cmp < 0) { \
|
||||
rbtn_left_set(a_type, a_field, pathp->node, \
|
||||
pathp[1].node); \
|
||||
assert(rbtn_red_get(a_type, a_field, pathp[1].node) \
|
||||
== false); \
|
||||
if (rbtn_red_get(a_type, a_field, pathp->node)) { \
|
||||
a_type *right = rbtn_right_get(a_type, a_field, \
|
||||
pathp->node); \
|
||||
a_type *rightleft = rbtn_left_get(a_type, a_field, \
|
||||
right); \
|
||||
a_type *tnode; \
|
||||
if (rbtn_red_get(a_type, a_field, rightleft)) { \
|
||||
if (rightleft != NULL && rbtn_red_get(a_type, a_field, \
|
||||
rightleft)) { \
|
||||
/* In the following diagrams, ||, //, and \\ */\
|
||||
/* indicate the path to the removed node. */\
|
||||
/* */\
|
||||
@@ -667,7 +684,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
|
||||
pathp->node); \
|
||||
a_type *rightleft = rbtn_left_get(a_type, a_field, \
|
||||
right); \
|
||||
if (rbtn_red_get(a_type, a_field, rightleft)) { \
|
||||
if (rightleft != NULL && rbtn_red_get(a_type, a_field, \
|
||||
rightleft)) { \
|
||||
/* || */\
|
||||
/* pathp(b) */\
|
||||
/* // \ */\
|
||||
@@ -681,7 +699,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
|
||||
rbtn_rotate_left(a_type, a_field, pathp->node, \
|
||||
tnode); \
|
||||
/* Balance restored, but rotation modified */\
|
||||
/* subree root, which may actually be the tree */\
|
||||
/* subtree root, which may actually be the tree */\
|
||||
/* root. */\
|
||||
if (pathp == path) { \
|
||||
/* Set root. */ \
|
||||
@@ -721,7 +739,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
|
||||
left); \
|
||||
a_type *leftrightleft = rbtn_left_get(a_type, a_field, \
|
||||
leftright); \
|
||||
if (rbtn_red_get(a_type, a_field, leftrightleft)) { \
|
||||
if (leftrightleft != NULL && rbtn_red_get(a_type, \
|
||||
a_field, leftrightleft)) { \
|
||||
/* || */\
|
||||
/* pathp(b) */\
|
||||
/* / \\ */\
|
||||
@@ -747,7 +766,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
|
||||
/* (b) */\
|
||||
/* / */\
|
||||
/* (b) */\
|
||||
assert(leftright != &rbtree->rbt_nil); \
|
||||
assert(leftright != NULL); \
|
||||
rbtn_red_set(a_type, a_field, leftright); \
|
||||
rbtn_rotate_right(a_type, a_field, pathp->node, \
|
||||
tnode); \
|
||||
@@ -770,7 +789,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
|
||||
return; \
|
||||
} else if (rbtn_red_get(a_type, a_field, pathp->node)) { \
|
||||
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
|
||||
if (rbtn_red_get(a_type, a_field, leftleft)) { \
|
||||
if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
|
||||
leftleft)) { \
|
||||
/* || */\
|
||||
/* pathp(r) */\
|
||||
/* / \\ */\
|
||||
@@ -808,7 +828,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
|
||||
} \
|
||||
} else { \
|
||||
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
|
||||
if (rbtn_red_get(a_type, a_field, leftleft)) { \
|
||||
if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
|
||||
leftleft)) { \
|
||||
/* || */\
|
||||
/* pathp(b) */\
|
||||
/* / \\ */\
|
||||
@@ -849,22 +870,22 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
|
||||
} \
|
||||
/* Set root. */ \
|
||||
rbtree->rbt_root = path->node; \
|
||||
assert(rbtn_red_get(a_type, a_field, rbtree->rbt_root) == false); \
|
||||
assert(!rbtn_red_get(a_type, a_field, rbtree->rbt_root)); \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \
|
||||
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
|
||||
if (node == &rbtree->rbt_nil) { \
|
||||
return (&rbtree->rbt_nil); \
|
||||
if (node == NULL) { \
|
||||
return NULL; \
|
||||
} else { \
|
||||
a_type *ret; \
|
||||
if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \
|
||||
a_field, node), cb, arg)) != &rbtree->rbt_nil \
|
||||
|| (ret = cb(rbtree, node, arg)) != NULL) { \
|
||||
return (ret); \
|
||||
a_field, node), cb, arg)) != NULL || (ret = cb(rbtree, node, \
|
||||
arg)) != NULL) { \
|
||||
return ret; \
|
||||
} \
|
||||
return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
|
||||
a_field, node), cb, arg)); \
|
||||
return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
|
||||
a_field, node), cb, arg); \
|
||||
} \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
@@ -874,22 +895,22 @@ a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \
|
||||
if (cmp < 0) { \
|
||||
a_type *ret; \
|
||||
if ((ret = a_prefix##iter_start(rbtree, start, \
|
||||
rbtn_left_get(a_type, a_field, node), cb, arg)) != \
|
||||
&rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \
|
||||
return (ret); \
|
||||
rbtn_left_get(a_type, a_field, node), cb, arg)) != NULL || \
|
||||
(ret = cb(rbtree, node, arg)) != NULL) { \
|
||||
return ret; \
|
||||
} \
|
||||
return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
|
||||
a_field, node), cb, arg)); \
|
||||
return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
|
||||
a_field, node), cb, arg); \
|
||||
} else if (cmp > 0) { \
|
||||
return (a_prefix##iter_start(rbtree, start, \
|
||||
rbtn_right_get(a_type, a_field, node), cb, arg)); \
|
||||
return a_prefix##iter_start(rbtree, start, \
|
||||
rbtn_right_get(a_type, a_field, node), cb, arg); \
|
||||
} else { \
|
||||
a_type *ret; \
|
||||
if ((ret = cb(rbtree, node, arg)) != NULL) { \
|
||||
return (ret); \
|
||||
return ret; \
|
||||
} \
|
||||
return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
|
||||
a_field, node), cb, arg)); \
|
||||
return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
|
||||
a_field, node), cb, arg); \
|
||||
} \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
@@ -902,25 +923,22 @@ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \
|
||||
} else { \
|
||||
ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\
|
||||
} \
|
||||
if (ret == &rbtree->rbt_nil) { \
|
||||
ret = NULL; \
|
||||
} \
|
||||
return (ret); \
|
||||
return ret; \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \
|
||||
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
|
||||
if (node == &rbtree->rbt_nil) { \
|
||||
return (&rbtree->rbt_nil); \
|
||||
if (node == NULL) { \
|
||||
return NULL; \
|
||||
} else { \
|
||||
a_type *ret; \
|
||||
if ((ret = a_prefix##reverse_iter_recurse(rbtree, \
|
||||
rbtn_right_get(a_type, a_field, node), cb, arg)) != \
|
||||
&rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \
|
||||
return (ret); \
|
||||
rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \
|
||||
(ret = cb(rbtree, node, arg)) != NULL) { \
|
||||
return ret; \
|
||||
} \
|
||||
return (a_prefix##reverse_iter_recurse(rbtree, \
|
||||
rbtn_left_get(a_type, a_field, node), cb, arg)); \
|
||||
return a_prefix##reverse_iter_recurse(rbtree, \
|
||||
rbtn_left_get(a_type, a_field, node), cb, arg); \
|
||||
} \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
@@ -931,22 +949,22 @@ a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \
|
||||
if (cmp > 0) { \
|
||||
a_type *ret; \
|
||||
if ((ret = a_prefix##reverse_iter_start(rbtree, start, \
|
||||
rbtn_right_get(a_type, a_field, node), cb, arg)) != \
|
||||
&rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \
|
||||
return (ret); \
|
||||
rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \
|
||||
(ret = cb(rbtree, node, arg)) != NULL) { \
|
||||
return ret; \
|
||||
} \
|
||||
return (a_prefix##reverse_iter_recurse(rbtree, \
|
||||
rbtn_left_get(a_type, a_field, node), cb, arg)); \
|
||||
return a_prefix##reverse_iter_recurse(rbtree, \
|
||||
rbtn_left_get(a_type, a_field, node), cb, arg); \
|
||||
} else if (cmp < 0) { \
|
||||
return (a_prefix##reverse_iter_start(rbtree, start, \
|
||||
rbtn_left_get(a_type, a_field, node), cb, arg)); \
|
||||
return a_prefix##reverse_iter_start(rbtree, start, \
|
||||
rbtn_left_get(a_type, a_field, node), cb, arg); \
|
||||
} else { \
|
||||
a_type *ret; \
|
||||
if ((ret = cb(rbtree, node, arg)) != NULL) { \
|
||||
return (ret); \
|
||||
return ret; \
|
||||
} \
|
||||
return (a_prefix##reverse_iter_recurse(rbtree, \
|
||||
rbtn_left_get(a_type, a_field, node), cb, arg)); \
|
||||
return a_prefix##reverse_iter_recurse(rbtree, \
|
||||
rbtn_left_get(a_type, a_field, node), cb, arg); \
|
||||
} \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
@@ -960,10 +978,29 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
|
||||
ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \
|
||||
cb, arg); \
|
||||
} \
|
||||
if (ret == &rbtree->rbt_nil) { \
|
||||
ret = NULL; \
|
||||
return ret; \
|
||||
} \
|
||||
a_attr void \
|
||||
a_prefix##destroy_recurse(a_rbt_type *rbtree, a_type *node, void (*cb)( \
|
||||
a_type *, void *), void *arg) { \
|
||||
if (node == NULL) { \
|
||||
return; \
|
||||
} \
|
||||
return (ret); \
|
||||
a_prefix##destroy_recurse(rbtree, rbtn_left_get(a_type, a_field, \
|
||||
node), cb, arg); \
|
||||
rbtn_left_set(a_type, a_field, (node), NULL); \
|
||||
a_prefix##destroy_recurse(rbtree, rbtn_right_get(a_type, a_field, \
|
||||
node), cb, arg); \
|
||||
rbtn_right_set(a_type, a_field, (node), NULL); \
|
||||
if (cb) { \
|
||||
cb(node, arg); \
|
||||
} \
|
||||
} \
|
||||
a_attr void \
|
||||
a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
|
||||
void *arg) { \
|
||||
a_prefix##destroy_recurse(rbtree, rbtree->rbt_root, cb, arg); \
|
||||
rbtree->rbt_root = NULL; \
|
||||
}
|
||||
|
||||
#endif /* RB_H_ */
|
||||
|
||||
610
deps/jemalloc/include/jemalloc/internal/rtree.h
vendored
610
deps/jemalloc/include/jemalloc/internal/rtree.h
vendored
@@ -1,172 +1,474 @@
|
||||
#ifndef JEMALLOC_INTERNAL_RTREE_H
|
||||
#define JEMALLOC_INTERNAL_RTREE_H
|
||||
|
||||
#include "jemalloc/internal/atomic.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/rtree_tsd.h"
|
||||
#include "jemalloc/internal/size_classes.h"
|
||||
#include "jemalloc/internal/tsd.h"
|
||||
|
||||
/*
|
||||
* This radix tree implementation is tailored to the singular purpose of
|
||||
* tracking which chunks are currently owned by jemalloc. This functionality
|
||||
* is mandatory for OS X, where jemalloc must be able to respond to object
|
||||
* ownership queries.
|
||||
* associating metadata with extents that are currently owned by jemalloc.
|
||||
*
|
||||
*******************************************************************************
|
||||
*/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
typedef struct rtree_s rtree_t;
|
||||
/* Number of high insignificant bits. */
|
||||
#define RTREE_NHIB ((1U << (LG_SIZEOF_PTR+3)) - LG_VADDR)
|
||||
/* Number of low insigificant bits. */
|
||||
#define RTREE_NLIB LG_PAGE
|
||||
/* Number of significant bits. */
|
||||
#define RTREE_NSB (LG_VADDR - RTREE_NLIB)
|
||||
/* Number of levels in radix tree. */
|
||||
#if RTREE_NSB <= 10
|
||||
# define RTREE_HEIGHT 1
|
||||
#elif RTREE_NSB <= 36
|
||||
# define RTREE_HEIGHT 2
|
||||
#elif RTREE_NSB <= 52
|
||||
# define RTREE_HEIGHT 3
|
||||
#else
|
||||
# error Unsupported number of significant virtual address bits
|
||||
#endif
|
||||
/* Use compact leaf representation if virtual address encoding allows. */
|
||||
#if RTREE_NHIB >= LG_CEIL_NSIZES
|
||||
# define RTREE_LEAF_COMPACT
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Size of each radix tree node (must be a power of 2). This impacts tree
|
||||
* depth.
|
||||
*/
|
||||
#define RTREE_NODESIZE (1U << 16)
|
||||
/* Needed for initialization only. */
|
||||
#define RTREE_LEAFKEY_INVALID ((uintptr_t)1)
|
||||
|
||||
typedef void *(rtree_alloc_t)(size_t);
|
||||
typedef void (rtree_dalloc_t)(void *);
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
struct rtree_s {
|
||||
rtree_alloc_t *alloc;
|
||||
rtree_dalloc_t *dalloc;
|
||||
malloc_mutex_t mutex;
|
||||
void **root;
|
||||
unsigned height;
|
||||
unsigned level2bits[1]; /* Dynamically sized. */
|
||||
typedef struct rtree_node_elm_s rtree_node_elm_t;
|
||||
struct rtree_node_elm_s {
|
||||
atomic_p_t child; /* (rtree_{node,leaf}_elm_t *) */
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
rtree_t *rtree_new(unsigned bits, rtree_alloc_t *alloc, rtree_dalloc_t *dalloc);
|
||||
void rtree_delete(rtree_t *rtree);
|
||||
void rtree_prefork(rtree_t *rtree);
|
||||
void rtree_postfork_parent(rtree_t *rtree);
|
||||
void rtree_postfork_child(rtree_t *rtree);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
#ifdef JEMALLOC_DEBUG
|
||||
uint8_t rtree_get_locked(rtree_t *rtree, uintptr_t key);
|
||||
#endif
|
||||
uint8_t rtree_get(rtree_t *rtree, uintptr_t key);
|
||||
bool rtree_set(rtree_t *rtree, uintptr_t key, uint8_t val);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
|
||||
#define RTREE_GET_GENERATE(f) \
|
||||
/* The least significant bits of the key are ignored. */ \
|
||||
JEMALLOC_INLINE uint8_t \
|
||||
f(rtree_t *rtree, uintptr_t key) \
|
||||
{ \
|
||||
uint8_t ret; \
|
||||
uintptr_t subkey; \
|
||||
unsigned i, lshift, height, bits; \
|
||||
void **node, **child; \
|
||||
\
|
||||
RTREE_LOCK(&rtree->mutex); \
|
||||
for (i = lshift = 0, height = rtree->height, node = rtree->root;\
|
||||
i < height - 1; \
|
||||
i++, lshift += bits, node = child) { \
|
||||
bits = rtree->level2bits[i]; \
|
||||
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR + \
|
||||
3)) - bits); \
|
||||
child = (void**)node[subkey]; \
|
||||
if (child == NULL) { \
|
||||
RTREE_UNLOCK(&rtree->mutex); \
|
||||
return (0); \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
/* \
|
||||
* node is a leaf, so it contains values rather than node \
|
||||
* pointers. \
|
||||
*/ \
|
||||
bits = rtree->level2bits[i]; \
|
||||
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - \
|
||||
bits); \
|
||||
{ \
|
||||
uint8_t *leaf = (uint8_t *)node; \
|
||||
ret = leaf[subkey]; \
|
||||
} \
|
||||
RTREE_UNLOCK(&rtree->mutex); \
|
||||
\
|
||||
RTREE_GET_VALIDATE \
|
||||
return (ret); \
|
||||
}
|
||||
|
||||
#ifdef JEMALLOC_DEBUG
|
||||
# define RTREE_LOCK(l) malloc_mutex_lock(l)
|
||||
# define RTREE_UNLOCK(l) malloc_mutex_unlock(l)
|
||||
# define RTREE_GET_VALIDATE
|
||||
RTREE_GET_GENERATE(rtree_get_locked)
|
||||
# undef RTREE_LOCK
|
||||
# undef RTREE_UNLOCK
|
||||
# undef RTREE_GET_VALIDATE
|
||||
#endif
|
||||
|
||||
#define RTREE_LOCK(l)
|
||||
#define RTREE_UNLOCK(l)
|
||||
#ifdef JEMALLOC_DEBUG
|
||||
/*
|
||||
* Suppose that it were possible for a jemalloc-allocated chunk to be
|
||||
* munmap()ped, followed by a different allocator in another thread re-using
|
||||
* overlapping virtual memory, all without invalidating the cached rtree
|
||||
* value. The result would be a false positive (the rtree would claim that
|
||||
* jemalloc owns memory that it had actually discarded). This scenario
|
||||
* seems impossible, but the following assertion is a prudent sanity check.
|
||||
*/
|
||||
# define RTREE_GET_VALIDATE \
|
||||
assert(rtree_get_locked(rtree, key) == ret);
|
||||
struct rtree_leaf_elm_s {
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
/*
|
||||
* Single pointer-width field containing all three leaf element fields.
|
||||
* For example, on a 64-bit x64 system with 48 significant virtual
|
||||
* memory address bits, the index, extent, and slab fields are packed as
|
||||
* such:
|
||||
*
|
||||
* x: index
|
||||
* e: extent
|
||||
* b: slab
|
||||
*
|
||||
* 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee eeee000b
|
||||
*/
|
||||
atomic_p_t le_bits;
|
||||
#else
|
||||
# define RTREE_GET_VALIDATE
|
||||
atomic_p_t le_extent; /* (extent_t *) */
|
||||
atomic_u_t le_szind; /* (szind_t) */
|
||||
atomic_b_t le_slab; /* (bool) */
|
||||
#endif
|
||||
RTREE_GET_GENERATE(rtree_get)
|
||||
#undef RTREE_LOCK
|
||||
#undef RTREE_UNLOCK
|
||||
#undef RTREE_GET_VALIDATE
|
||||
};
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
rtree_set(rtree_t *rtree, uintptr_t key, uint8_t val)
|
||||
{
|
||||
uintptr_t subkey;
|
||||
unsigned i, lshift, height, bits;
|
||||
void **node, **child;
|
||||
typedef struct rtree_level_s rtree_level_t;
|
||||
struct rtree_level_s {
|
||||
/* Number of key bits distinguished by this level. */
|
||||
unsigned bits;
|
||||
/*
|
||||
* Cumulative number of key bits distinguished by traversing to
|
||||
* corresponding tree level.
|
||||
*/
|
||||
unsigned cumbits;
|
||||
};
|
||||
|
||||
malloc_mutex_lock(&rtree->mutex);
|
||||
for (i = lshift = 0, height = rtree->height, node = rtree->root;
|
||||
i < height - 1;
|
||||
i++, lshift += bits, node = child) {
|
||||
bits = rtree->level2bits[i];
|
||||
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) -
|
||||
bits);
|
||||
child = (void**)node[subkey];
|
||||
if (child == NULL) {
|
||||
size_t size = ((i + 1 < height - 1) ? sizeof(void *)
|
||||
: (sizeof(uint8_t))) << rtree->level2bits[i+1];
|
||||
child = (void**)rtree->alloc(size);
|
||||
if (child == NULL) {
|
||||
malloc_mutex_unlock(&rtree->mutex);
|
||||
return (true);
|
||||
}
|
||||
memset(child, 0, size);
|
||||
node[subkey] = child;
|
||||
}
|
||||
}
|
||||
typedef struct rtree_s rtree_t;
|
||||
struct rtree_s {
|
||||
malloc_mutex_t init_lock;
|
||||
/* Number of elements based on rtree_levels[0].bits. */
|
||||
#if RTREE_HEIGHT > 1
|
||||
rtree_node_elm_t root[1U << (RTREE_NSB/RTREE_HEIGHT)];
|
||||
#else
|
||||
rtree_leaf_elm_t root[1U << (RTREE_NSB/RTREE_HEIGHT)];
|
||||
#endif
|
||||
};
|
||||
|
||||
/* node is a leaf, so it contains values rather than node pointers. */
|
||||
bits = rtree->level2bits[i];
|
||||
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - bits);
|
||||
{
|
||||
uint8_t *leaf = (uint8_t *)node;
|
||||
leaf[subkey] = val;
|
||||
}
|
||||
malloc_mutex_unlock(&rtree->mutex);
|
||||
/*
|
||||
* Split the bits into one to three partitions depending on number of
|
||||
* significant bits. It the number of bits does not divide evenly into the
|
||||
* number of levels, place one remainder bit per level starting at the leaf
|
||||
* level.
|
||||
*/
|
||||
static const rtree_level_t rtree_levels[] = {
|
||||
#if RTREE_HEIGHT == 1
|
||||
{RTREE_NSB, RTREE_NHIB + RTREE_NSB}
|
||||
#elif RTREE_HEIGHT == 2
|
||||
{RTREE_NSB/2, RTREE_NHIB + RTREE_NSB/2},
|
||||
{RTREE_NSB/2 + RTREE_NSB%2, RTREE_NHIB + RTREE_NSB}
|
||||
#elif RTREE_HEIGHT == 3
|
||||
{RTREE_NSB/3, RTREE_NHIB + RTREE_NSB/3},
|
||||
{RTREE_NSB/3 + RTREE_NSB%3/2,
|
||||
RTREE_NHIB + RTREE_NSB/3*2 + RTREE_NSB%3/2},
|
||||
{RTREE_NSB/3 + RTREE_NSB%3 - RTREE_NSB%3/2, RTREE_NHIB + RTREE_NSB}
|
||||
#else
|
||||
# error Unsupported rtree height
|
||||
#endif
|
||||
};
|
||||
|
||||
return (false);
|
||||
bool rtree_new(rtree_t *rtree, bool zeroed);
|
||||
|
||||
typedef rtree_node_elm_t *(rtree_node_alloc_t)(tsdn_t *, rtree_t *, size_t);
|
||||
extern rtree_node_alloc_t *JET_MUTABLE rtree_node_alloc;
|
||||
|
||||
typedef rtree_leaf_elm_t *(rtree_leaf_alloc_t)(tsdn_t *, rtree_t *, size_t);
|
||||
extern rtree_leaf_alloc_t *JET_MUTABLE rtree_leaf_alloc;
|
||||
|
||||
typedef void (rtree_node_dalloc_t)(tsdn_t *, rtree_t *, rtree_node_elm_t *);
|
||||
extern rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc;
|
||||
|
||||
typedef void (rtree_leaf_dalloc_t)(tsdn_t *, rtree_t *, rtree_leaf_elm_t *);
|
||||
extern rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc;
|
||||
#ifdef JEMALLOC_JET
|
||||
void rtree_delete(tsdn_t *tsdn, rtree_t *rtree);
|
||||
#endif
|
||||
rtree_leaf_elm_t *rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree,
|
||||
rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, bool init_missing);
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE uintptr_t
|
||||
rtree_leafkey(uintptr_t key) {
|
||||
unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
|
||||
unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits -
|
||||
rtree_levels[RTREE_HEIGHT-1].bits);
|
||||
unsigned maskbits = ptrbits - cumbits;
|
||||
uintptr_t mask = ~((ZU(1) << maskbits) - 1);
|
||||
return (key & mask);
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
rtree_cache_direct_map(uintptr_t key) {
|
||||
unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
|
||||
unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits -
|
||||
rtree_levels[RTREE_HEIGHT-1].bits);
|
||||
unsigned maskbits = ptrbits - cumbits;
|
||||
return (size_t)((key >> maskbits) & (RTREE_CTX_NCACHE - 1));
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE uintptr_t
|
||||
rtree_subkey(uintptr_t key, unsigned level) {
|
||||
unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
|
||||
unsigned cumbits = rtree_levels[level].cumbits;
|
||||
unsigned shiftbits = ptrbits - cumbits;
|
||||
unsigned maskbits = rtree_levels[level].bits;
|
||||
uintptr_t mask = (ZU(1) << maskbits) - 1;
|
||||
return ((key >> shiftbits) & mask);
|
||||
}
|
||||
|
||||
/*
|
||||
* Atomic getters.
|
||||
*
|
||||
* dependent: Reading a value on behalf of a pointer to a valid allocation
|
||||
* is guaranteed to be a clean read even without synchronization,
|
||||
* because the rtree update became visible in memory before the
|
||||
* pointer came into existence.
|
||||
* !dependent: An arbitrary read, e.g. on behalf of ivsalloc(), may not be
|
||||
* dependent on a previous rtree write, which means a stale read
|
||||
* could result if synchronization were omitted here.
|
||||
*/
|
||||
# ifdef RTREE_LEAF_COMPACT
|
||||
JEMALLOC_ALWAYS_INLINE uintptr_t
|
||||
rtree_leaf_elm_bits_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||
bool dependent) {
|
||||
return (uintptr_t)atomic_load_p(&elm->le_bits, dependent
|
||||
? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE extent_t *
|
||||
rtree_leaf_elm_bits_extent_get(uintptr_t bits) {
|
||||
/* Restore sign-extended high bits, mask slab bit. */
|
||||
return (extent_t *)((uintptr_t)((intptr_t)(bits << RTREE_NHIB) >>
|
||||
RTREE_NHIB) & ~((uintptr_t)0x1));
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE szind_t
|
||||
rtree_leaf_elm_bits_szind_get(uintptr_t bits) {
|
||||
return (szind_t)(bits >> LG_VADDR);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
rtree_leaf_elm_bits_slab_get(uintptr_t bits) {
|
||||
return (bool)(bits & (uintptr_t)0x1);
|
||||
}
|
||||
|
||||
# endif
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE extent_t *
|
||||
rtree_leaf_elm_extent_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||
bool dependent) {
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
|
||||
return rtree_leaf_elm_bits_extent_get(bits);
|
||||
#else
|
||||
extent_t *extent = (extent_t *)atomic_load_p(&elm->le_extent, dependent
|
||||
? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
|
||||
return extent;
|
||||
#endif
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE szind_t
|
||||
rtree_leaf_elm_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||
bool dependent) {
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
|
||||
return rtree_leaf_elm_bits_szind_get(bits);
|
||||
#else
|
||||
return (szind_t)atomic_load_u(&elm->le_szind, dependent ? ATOMIC_RELAXED
|
||||
: ATOMIC_ACQUIRE);
|
||||
#endif
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
rtree_leaf_elm_slab_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||
bool dependent) {
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
|
||||
return rtree_leaf_elm_bits_slab_get(bits);
|
||||
#else
|
||||
return atomic_load_b(&elm->le_slab, dependent ? ATOMIC_RELAXED :
|
||||
ATOMIC_ACQUIRE);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
rtree_leaf_elm_extent_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||
extent_t *extent) {
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, true);
|
||||
uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) <<
|
||||
LG_VADDR) | ((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1))
|
||||
| ((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits));
|
||||
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
|
||||
#else
|
||||
atomic_store_p(&elm->le_extent, extent, ATOMIC_RELEASE);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
rtree_leaf_elm_szind_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||
szind_t szind) {
|
||||
assert(szind <= NSIZES);
|
||||
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm,
|
||||
true);
|
||||
uintptr_t bits = ((uintptr_t)szind << LG_VADDR) |
|
||||
((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) &
|
||||
(((uintptr_t)0x1 << LG_VADDR) - 1)) |
|
||||
((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits));
|
||||
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
|
||||
#else
|
||||
atomic_store_u(&elm->le_szind, szind, ATOMIC_RELEASE);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
rtree_leaf_elm_slab_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||
bool slab) {
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm,
|
||||
true);
|
||||
uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) <<
|
||||
LG_VADDR) | ((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) &
|
||||
(((uintptr_t)0x1 << LG_VADDR) - 1)) | ((uintptr_t)slab);
|
||||
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
|
||||
#else
|
||||
atomic_store_b(&elm->le_slab, slab, ATOMIC_RELEASE);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||
extent_t *extent, szind_t szind, bool slab) {
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
uintptr_t bits = ((uintptr_t)szind << LG_VADDR) |
|
||||
((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1)) |
|
||||
((uintptr_t)slab);
|
||||
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
|
||||
#else
|
||||
rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab);
|
||||
rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind);
|
||||
/*
|
||||
* Write extent last, since the element is atomically considered valid
|
||||
* as soon as the extent field is non-NULL.
|
||||
*/
|
||||
rtree_leaf_elm_extent_write(tsdn, rtree, elm, extent);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
rtree_leaf_elm_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree,
|
||||
rtree_leaf_elm_t *elm, szind_t szind, bool slab) {
|
||||
assert(!slab || szind < NBINS);
|
||||
|
||||
/*
|
||||
* The caller implicitly assures that it is the only writer to the szind
|
||||
* and slab fields, and that the extent field cannot currently change.
|
||||
*/
|
||||
rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab);
|
||||
rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t *
|
||||
rtree_leaf_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
uintptr_t key, bool dependent, bool init_missing) {
|
||||
assert(key != 0);
|
||||
assert(!dependent || !init_missing);
|
||||
|
||||
size_t slot = rtree_cache_direct_map(key);
|
||||
uintptr_t leafkey = rtree_leafkey(key);
|
||||
assert(leafkey != RTREE_LEAFKEY_INVALID);
|
||||
|
||||
/* Fast path: L1 direct mapped cache. */
|
||||
if (likely(rtree_ctx->cache[slot].leafkey == leafkey)) {
|
||||
rtree_leaf_elm_t *leaf = rtree_ctx->cache[slot].leaf;
|
||||
assert(leaf != NULL);
|
||||
uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1);
|
||||
return &leaf[subkey];
|
||||
}
|
||||
/*
|
||||
* Search the L2 LRU cache. On hit, swap the matching element into the
|
||||
* slot in L1 cache, and move the position in L2 up by 1.
|
||||
*/
|
||||
#define RTREE_CACHE_CHECK_L2(i) do { \
|
||||
if (likely(rtree_ctx->l2_cache[i].leafkey == leafkey)) { \
|
||||
rtree_leaf_elm_t *leaf = rtree_ctx->l2_cache[i].leaf; \
|
||||
assert(leaf != NULL); \
|
||||
if (i > 0) { \
|
||||
/* Bubble up by one. */ \
|
||||
rtree_ctx->l2_cache[i].leafkey = \
|
||||
rtree_ctx->l2_cache[i - 1].leafkey; \
|
||||
rtree_ctx->l2_cache[i].leaf = \
|
||||
rtree_ctx->l2_cache[i - 1].leaf; \
|
||||
rtree_ctx->l2_cache[i - 1].leafkey = \
|
||||
rtree_ctx->cache[slot].leafkey; \
|
||||
rtree_ctx->l2_cache[i - 1].leaf = \
|
||||
rtree_ctx->cache[slot].leaf; \
|
||||
} else { \
|
||||
rtree_ctx->l2_cache[0].leafkey = \
|
||||
rtree_ctx->cache[slot].leafkey; \
|
||||
rtree_ctx->l2_cache[0].leaf = \
|
||||
rtree_ctx->cache[slot].leaf; \
|
||||
} \
|
||||
rtree_ctx->cache[slot].leafkey = leafkey; \
|
||||
rtree_ctx->cache[slot].leaf = leaf; \
|
||||
uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1); \
|
||||
return &leaf[subkey]; \
|
||||
} \
|
||||
} while (0)
|
||||
/* Check the first cache entry. */
|
||||
RTREE_CACHE_CHECK_L2(0);
|
||||
/* Search the remaining cache elements. */
|
||||
for (unsigned i = 1; i < RTREE_CTX_NCACHE_L2; i++) {
|
||||
RTREE_CACHE_CHECK_L2(i);
|
||||
}
|
||||
#undef RTREE_CACHE_CHECK_L2
|
||||
|
||||
return rtree_leaf_elm_lookup_hard(tsdn, rtree, rtree_ctx, key,
|
||||
dependent, init_missing);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
|
||||
extent_t *extent, szind_t szind, bool slab) {
|
||||
/* Use rtree_clear() to set the extent to NULL. */
|
||||
assert(extent != NULL);
|
||||
|
||||
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
|
||||
key, false, true);
|
||||
if (elm == NULL) {
|
||||
return true;
|
||||
}
|
||||
|
||||
assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) == NULL);
|
||||
rtree_leaf_elm_write(tsdn, rtree, elm, extent, szind, slab);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t *
|
||||
rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
|
||||
bool dependent) {
|
||||
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
|
||||
key, dependent, false);
|
||||
if (!dependent && elm == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
assert(elm != NULL);
|
||||
return elm;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE extent_t *
|
||||
rtree_extent_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
uintptr_t key, bool dependent) {
|
||||
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
|
||||
dependent);
|
||||
if (!dependent && elm == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
return rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE szind_t
|
||||
rtree_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
uintptr_t key, bool dependent) {
|
||||
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
|
||||
dependent);
|
||||
if (!dependent && elm == NULL) {
|
||||
return NSIZES;
|
||||
}
|
||||
return rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
|
||||
}
|
||||
|
||||
/*
|
||||
* rtree_slab_read() is intentionally omitted because slab is always read in
|
||||
* conjunction with szind, which makes rtree_szind_slab_read() a better choice.
|
||||
*/
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
rtree_extent_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
uintptr_t key, bool dependent, extent_t **r_extent, szind_t *r_szind) {
|
||||
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
|
||||
dependent);
|
||||
if (!dependent && elm == NULL) {
|
||||
return true;
|
||||
}
|
||||
*r_extent = rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent);
|
||||
*r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
|
||||
return false;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
rtree_szind_slab_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
uintptr_t key, bool dependent, szind_t *r_szind, bool *r_slab) {
|
||||
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
|
||||
dependent);
|
||||
if (!dependent && elm == NULL) {
|
||||
return true;
|
||||
}
|
||||
*r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
|
||||
*r_slab = rtree_leaf_elm_slab_read(tsdn, rtree, elm, dependent);
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void
|
||||
rtree_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
uintptr_t key, szind_t szind, bool slab) {
|
||||
assert(!slab || szind < NBINS);
|
||||
|
||||
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true);
|
||||
rtree_leaf_elm_szind_slab_update(tsdn, rtree, elm, szind, slab);
|
||||
}
|
||||
|
||||
static inline void
|
||||
rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
uintptr_t key) {
|
||||
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true);
|
||||
assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) !=
|
||||
NULL);
|
||||
rtree_leaf_elm_write(tsdn, rtree, elm, NULL, NSIZES, false);
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_RTREE_H */
|
||||
|
||||
2091
deps/jemalloc/include/jemalloc/internal/size_classes.h
vendored
2091
deps/jemalloc/include/jemalloc/internal/size_classes.h
vendored
File diff suppressed because it is too large
Load Diff
231
deps/jemalloc/include/jemalloc/internal/stats.h
vendored
231
deps/jemalloc/include/jemalloc/internal/stats.h
vendored
@@ -1,31 +1,51 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
#ifndef JEMALLOC_INTERNAL_STATS_H
|
||||
#define JEMALLOC_INTERNAL_STATS_H
|
||||
|
||||
typedef struct tcache_bin_stats_s tcache_bin_stats_t;
|
||||
typedef struct malloc_bin_stats_s malloc_bin_stats_t;
|
||||
typedef struct malloc_large_stats_s malloc_large_stats_t;
|
||||
typedef struct arena_stats_s arena_stats_t;
|
||||
typedef struct chunk_stats_s chunk_stats_t;
|
||||
#include "jemalloc/internal/atomic.h"
|
||||
#include "jemalloc/internal/mutex_prof.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/size_classes.h"
|
||||
#include "jemalloc/internal/stats_tsd.h"
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
/* OPTION(opt, var_name, default, set_value_to) */
|
||||
#define STATS_PRINT_OPTIONS \
|
||||
OPTION('J', json, false, true) \
|
||||
OPTION('g', general, true, false) \
|
||||
OPTION('m', merged, config_stats, false) \
|
||||
OPTION('d', destroyed, config_stats, false) \
|
||||
OPTION('a', unmerged, config_stats, false) \
|
||||
OPTION('b', bins, true, false) \
|
||||
OPTION('l', large, true, false) \
|
||||
OPTION('x', mutex, true, false)
|
||||
|
||||
struct tcache_bin_stats_s {
|
||||
/*
|
||||
* Number of allocation requests that corresponded to the size of this
|
||||
* bin.
|
||||
*/
|
||||
uint64_t nrequests;
|
||||
enum {
|
||||
#define OPTION(o, v, d, s) stats_print_option_num_##v,
|
||||
STATS_PRINT_OPTIONS
|
||||
#undef OPTION
|
||||
stats_print_tot_num_options
|
||||
};
|
||||
|
||||
struct malloc_bin_stats_s {
|
||||
/*
|
||||
* Current number of bytes allocated, including objects currently
|
||||
* cached by tcache.
|
||||
*/
|
||||
size_t allocated;
|
||||
/* Options for stats_print. */
|
||||
extern bool opt_stats_print;
|
||||
extern char opt_stats_print_opts[stats_print_tot_num_options+1];
|
||||
|
||||
/* Implements je_malloc_stats_print. */
|
||||
void stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
const char *opts);
|
||||
|
||||
/*
|
||||
* In those architectures that support 64-bit atomics, we use atomic updates for
|
||||
* our 64-bit values. Otherwise, we use a plain uint64_t and synchronize
|
||||
* externally.
|
||||
*/
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
typedef atomic_u64_t arena_stats_u64_t;
|
||||
#else
|
||||
/* Must hold the arena stats mutex while reading atomically. */
|
||||
typedef uint64_t arena_stats_u64_t;
|
||||
#endif
|
||||
|
||||
typedef struct malloc_bin_stats_s {
|
||||
/*
|
||||
* Total number of allocation/deallocation requests served directly by
|
||||
* the bin. Note that tcache may allocate an object, then recycle it
|
||||
@@ -42,132 +62,103 @@ struct malloc_bin_stats_s {
|
||||
*/
|
||||
uint64_t nrequests;
|
||||
|
||||
/*
|
||||
* Current number of regions of this size class, including regions
|
||||
* currently cached by tcache.
|
||||
*/
|
||||
size_t curregs;
|
||||
|
||||
/* Number of tcache fills from this bin. */
|
||||
uint64_t nfills;
|
||||
|
||||
/* Number of tcache flushes to this bin. */
|
||||
uint64_t nflushes;
|
||||
|
||||
/* Total number of runs created for this bin's size class. */
|
||||
uint64_t nruns;
|
||||
/* Total number of slabs created for this bin's size class. */
|
||||
uint64_t nslabs;
|
||||
|
||||
/*
|
||||
* Total number of runs reused by extracting them from the runs tree for
|
||||
* this bin's size class.
|
||||
* Total number of slabs reused by extracting them from the slabs heap
|
||||
* for this bin's size class.
|
||||
*/
|
||||
uint64_t reruns;
|
||||
uint64_t reslabs;
|
||||
|
||||
/* Current number of runs in this bin. */
|
||||
size_t curruns;
|
||||
};
|
||||
/* Current number of slabs in this bin. */
|
||||
size_t curslabs;
|
||||
|
||||
struct malloc_large_stats_s {
|
||||
mutex_prof_data_t mutex_data;
|
||||
} malloc_bin_stats_t;
|
||||
|
||||
typedef struct malloc_large_stats_s {
|
||||
/*
|
||||
* Total number of allocation/deallocation requests served directly by
|
||||
* the arena. Note that tcache may allocate an object, then recycle it
|
||||
* many times, resulting many increments to nrequests, but only one
|
||||
* each to nmalloc and ndalloc.
|
||||
* the arena.
|
||||
*/
|
||||
uint64_t nmalloc;
|
||||
uint64_t ndalloc;
|
||||
arena_stats_u64_t nmalloc;
|
||||
arena_stats_u64_t ndalloc;
|
||||
|
||||
/*
|
||||
* Number of allocation requests that correspond to this size class.
|
||||
* This includes requests served by tcache, though tcache only
|
||||
* periodically merges into this counter.
|
||||
*/
|
||||
uint64_t nrequests;
|
||||
arena_stats_u64_t nrequests; /* Partially derived. */
|
||||
|
||||
/* Current number of runs of this size class. */
|
||||
size_t curruns;
|
||||
};
|
||||
/* Current number of allocations of this size class. */
|
||||
size_t curlextents; /* Derived. */
|
||||
} malloc_large_stats_t;
|
||||
|
||||
struct arena_stats_s {
|
||||
/* Number of bytes currently mapped. */
|
||||
size_t mapped;
|
||||
typedef struct decay_stats_s {
|
||||
/* Total number of purge sweeps. */
|
||||
arena_stats_u64_t npurge;
|
||||
/* Total number of madvise calls made. */
|
||||
arena_stats_u64_t nmadvise;
|
||||
/* Total number of pages purged. */
|
||||
arena_stats_u64_t purged;
|
||||
} decay_stats_t;
|
||||
|
||||
/*
|
||||
* Total number of purge sweeps, total number of madvise calls made,
|
||||
* and total pages purged in order to keep dirty unused memory under
|
||||
* control.
|
||||
*/
|
||||
uint64_t npurge;
|
||||
uint64_t nmadvise;
|
||||
uint64_t purged;
|
||||
|
||||
/* Per-size-category statistics. */
|
||||
size_t allocated_large;
|
||||
uint64_t nmalloc_large;
|
||||
uint64_t ndalloc_large;
|
||||
uint64_t nrequests_large;
|
||||
|
||||
/*
|
||||
* One element for each possible size class, including sizes that
|
||||
* overlap with bin size classes. This is necessary because ipalloc()
|
||||
* sometimes has to use such large objects in order to assure proper
|
||||
* alignment.
|
||||
*/
|
||||
malloc_large_stats_t *lstats;
|
||||
};
|
||||
|
||||
struct chunk_stats_s {
|
||||
/* Number of chunks that were allocated. */
|
||||
uint64_t nchunks;
|
||||
|
||||
/* High-water mark for number of chunks allocated. */
|
||||
size_t highchunks;
|
||||
|
||||
/*
|
||||
* Current number of chunks allocated. This value isn't maintained for
|
||||
* any other purpose, so keep track of it in order to be able to set
|
||||
* highchunks.
|
||||
*/
|
||||
size_t curchunks;
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
extern bool opt_stats_print;
|
||||
|
||||
extern size_t stats_cactive;
|
||||
|
||||
void stats_print(void (*write)(void *, const char *), void *cbopaque,
|
||||
const char *opts);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
size_t stats_cactive_get(void);
|
||||
void stats_cactive_add(size_t size);
|
||||
void stats_cactive_sub(size_t size);
|
||||
/*
|
||||
* Arena stats. Note that fields marked "derived" are not directly maintained
|
||||
* within the arena code; rather their values are derived during stats merge
|
||||
* requests.
|
||||
*/
|
||||
typedef struct arena_stats_s {
|
||||
#ifndef JEMALLOC_ATOMIC_U64
|
||||
malloc_mutex_t mtx;
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_STATS_C_))
|
||||
JEMALLOC_INLINE size_t
|
||||
stats_cactive_get(void)
|
||||
{
|
||||
/* Number of bytes currently mapped, excluding retained memory. */
|
||||
atomic_zu_t mapped; /* Partially derived. */
|
||||
|
||||
return (atomic_read_z(&stats_cactive));
|
||||
}
|
||||
/*
|
||||
* Number of unused virtual memory bytes currently retained. Retained
|
||||
* bytes are technically mapped (though always decommitted or purged),
|
||||
* but they are excluded from the mapped statistic (above).
|
||||
*/
|
||||
atomic_zu_t retained; /* Derived. */
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
stats_cactive_add(size_t size)
|
||||
{
|
||||
decay_stats_t decay_dirty;
|
||||
decay_stats_t decay_muzzy;
|
||||
|
||||
atomic_add_z(&stats_cactive, size);
|
||||
}
|
||||
atomic_zu_t base; /* Derived. */
|
||||
atomic_zu_t internal;
|
||||
atomic_zu_t resident; /* Derived. */
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
stats_cactive_sub(size_t size)
|
||||
{
|
||||
atomic_zu_t allocated_large; /* Derived. */
|
||||
arena_stats_u64_t nmalloc_large; /* Derived. */
|
||||
arena_stats_u64_t ndalloc_large; /* Derived. */
|
||||
arena_stats_u64_t nrequests_large; /* Derived. */
|
||||
|
||||
atomic_sub_z(&stats_cactive, size);
|
||||
}
|
||||
#endif
|
||||
/* Number of bytes cached in tcache associated with this arena. */
|
||||
atomic_zu_t tcache_bytes; /* Derived. */
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes];
|
||||
|
||||
/* One element for each large size class. */
|
||||
malloc_large_stats_t lstats[NSIZES - NBINS];
|
||||
|
||||
/* Arena uptime. */
|
||||
nstime_t uptime;
|
||||
} arena_stats_t;
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_STATS_H */
|
||||
|
||||
722
deps/jemalloc/include/jemalloc/internal/tsd.h
vendored
722
deps/jemalloc/include/jemalloc/internal/tsd.h
vendored
@@ -1,434 +1,324 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
#ifndef JEMALLOC_INTERNAL_TSD_H
|
||||
#define JEMALLOC_INTERNAL_TSD_H
|
||||
|
||||
/* Maximum number of malloc_tsd users with cleanup functions. */
|
||||
#define MALLOC_TSD_CLEANUPS_MAX 8
|
||||
#include "jemalloc/internal/arena_types.h"
|
||||
#include "jemalloc/internal/assert.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_externs.h"
|
||||
#include "jemalloc/internal/prof_types.h"
|
||||
#include "jemalloc/internal/ql.h"
|
||||
#include "jemalloc/internal/rtree_tsd.h"
|
||||
#include "jemalloc/internal/tcache_types.h"
|
||||
#include "jemalloc/internal/tcache_structs.h"
|
||||
#include "jemalloc/internal/util.h"
|
||||
#include "jemalloc/internal/witness.h"
|
||||
|
||||
typedef bool (*malloc_tsd_cleanup_t)(void);
|
||||
/*
|
||||
* Thread-Specific-Data layout
|
||||
* --- data accessed on tcache fast path: state, rtree_ctx, stats, prof ---
|
||||
* s: state
|
||||
* e: tcache_enabled
|
||||
* m: thread_allocated (config_stats)
|
||||
* f: thread_deallocated (config_stats)
|
||||
* p: prof_tdata (config_prof)
|
||||
* c: rtree_ctx (rtree cache accessed on deallocation)
|
||||
* t: tcache
|
||||
* --- data not accessed on tcache fast path: arena-related fields ---
|
||||
* d: arenas_tdata_bypass
|
||||
* r: reentrancy_level
|
||||
* x: narenas_tdata
|
||||
* i: iarena
|
||||
* a: arena
|
||||
* o: arenas_tdata
|
||||
* Loading TSD data is on the critical path of basically all malloc operations.
|
||||
* In particular, tcache and rtree_ctx rely on hot CPU cache to be effective.
|
||||
* Use a compact layout to reduce cache footprint.
|
||||
* +--- 64-bit and 64B cacheline; 1B each letter; First byte on the left. ---+
|
||||
* |---------------------------- 1st cacheline ----------------------------|
|
||||
* | sedrxxxx mmmmmmmm ffffffff pppppppp [c * 32 ........ ........ .......] |
|
||||
* |---------------------------- 2nd cacheline ----------------------------|
|
||||
* | [c * 64 ........ ........ ........ ........ ........ ........ .......] |
|
||||
* |---------------------------- 3nd cacheline ----------------------------|
|
||||
* | [c * 32 ........ ........ .......] iiiiiiii aaaaaaaa oooooooo [t...... |
|
||||
* +-------------------------------------------------------------------------+
|
||||
* Note: the entire tcache is embedded into TSD and spans multiple cachelines.
|
||||
*
|
||||
* The last 3 members (i, a and o) before tcache isn't really needed on tcache
|
||||
* fast path. However we have a number of unused tcache bins and witnesses
|
||||
* (never touched unless config_debug) at the end of tcache, so we place them
|
||||
* there to avoid breaking the cachelines and possibly paging in an extra page.
|
||||
*/
|
||||
#ifdef JEMALLOC_JET
|
||||
typedef void (*test_callback_t)(int *);
|
||||
# define MALLOC_TSD_TEST_DATA_INIT 0x72b65c10
|
||||
# define MALLOC_TEST_TSD \
|
||||
O(test_data, int, int) \
|
||||
O(test_callback, test_callback_t, int)
|
||||
# define MALLOC_TEST_TSD_INITIALIZER , MALLOC_TSD_TEST_DATA_INIT, NULL
|
||||
#else
|
||||
# define MALLOC_TEST_TSD
|
||||
# define MALLOC_TEST_TSD_INITIALIZER
|
||||
#endif
|
||||
|
||||
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
|
||||
!defined(_WIN32))
|
||||
typedef struct tsd_init_block_s tsd_init_block_t;
|
||||
typedef struct tsd_init_head_s tsd_init_head_t;
|
||||
/* O(name, type, nullable type */
|
||||
#define MALLOC_TSD \
|
||||
O(tcache_enabled, bool, bool) \
|
||||
O(arenas_tdata_bypass, bool, bool) \
|
||||
O(reentrancy_level, int8_t, int8_t) \
|
||||
O(narenas_tdata, uint32_t, uint32_t) \
|
||||
O(thread_allocated, uint64_t, uint64_t) \
|
||||
O(thread_deallocated, uint64_t, uint64_t) \
|
||||
O(prof_tdata, prof_tdata_t *, prof_tdata_t *) \
|
||||
O(rtree_ctx, rtree_ctx_t, rtree_ctx_t) \
|
||||
O(iarena, arena_t *, arena_t *) \
|
||||
O(arena, arena_t *, arena_t *) \
|
||||
O(arenas_tdata, arena_tdata_t *, arena_tdata_t *)\
|
||||
O(tcache, tcache_t, tcache_t) \
|
||||
O(witness_tsd, witness_tsd_t, witness_tsdn_t) \
|
||||
MALLOC_TEST_TSD
|
||||
|
||||
#define TSD_INITIALIZER { \
|
||||
tsd_state_uninitialized, \
|
||||
TCACHE_ENABLED_ZERO_INITIALIZER, \
|
||||
false, \
|
||||
0, \
|
||||
0, \
|
||||
0, \
|
||||
0, \
|
||||
NULL, \
|
||||
RTREE_CTX_ZERO_INITIALIZER, \
|
||||
NULL, \
|
||||
NULL, \
|
||||
NULL, \
|
||||
TCACHE_ZERO_INITIALIZER, \
|
||||
WITNESS_TSD_INITIALIZER \
|
||||
MALLOC_TEST_TSD_INITIALIZER \
|
||||
}
|
||||
|
||||
enum {
|
||||
tsd_state_nominal = 0, /* Common case --> jnz. */
|
||||
tsd_state_nominal_slow = 1, /* Initialized but on slow path. */
|
||||
/* the above 2 nominal states should be lower values. */
|
||||
tsd_state_nominal_max = 1, /* used for comparison only. */
|
||||
tsd_state_minimal_initialized = 2,
|
||||
tsd_state_purgatory = 3,
|
||||
tsd_state_reincarnated = 4,
|
||||
tsd_state_uninitialized = 5
|
||||
};
|
||||
|
||||
/* Manually limit tsd_state_t to a single byte. */
|
||||
typedef uint8_t tsd_state_t;
|
||||
|
||||
/* The actual tsd. */
|
||||
struct tsd_s {
|
||||
/*
|
||||
* The contents should be treated as totally opaque outside the tsd
|
||||
* module. Access any thread-local state through the getters and
|
||||
* setters below.
|
||||
*/
|
||||
tsd_state_t state;
|
||||
#define O(n, t, nt) \
|
||||
t use_a_getter_or_setter_instead_##n;
|
||||
MALLOC_TSD
|
||||
#undef O
|
||||
};
|
||||
|
||||
/*
|
||||
* Wrapper around tsd_t that makes it possible to avoid implicit conversion
|
||||
* between tsd_t and tsdn_t, where tsdn_t is "nullable" and has to be
|
||||
* explicitly converted to tsd_t, which is non-nullable.
|
||||
*/
|
||||
struct tsdn_s {
|
||||
tsd_t tsd;
|
||||
};
|
||||
#define TSDN_NULL ((tsdn_t *)0)
|
||||
JEMALLOC_ALWAYS_INLINE tsdn_t *
|
||||
tsd_tsdn(tsd_t *tsd) {
|
||||
return (tsdn_t *)tsd;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
tsdn_null(const tsdn_t *tsdn) {
|
||||
return tsdn == NULL;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE tsd_t *
|
||||
tsdn_tsd(tsdn_t *tsdn) {
|
||||
assert(!tsdn_null(tsdn));
|
||||
|
||||
return &tsdn->tsd;
|
||||
}
|
||||
|
||||
void *malloc_tsd_malloc(size_t size);
|
||||
void malloc_tsd_dalloc(void *wrapper);
|
||||
void malloc_tsd_cleanup_register(bool (*f)(void));
|
||||
tsd_t *malloc_tsd_boot0(void);
|
||||
void malloc_tsd_boot1(void);
|
||||
void tsd_cleanup(void *arg);
|
||||
tsd_t *tsd_fetch_slow(tsd_t *tsd, bool internal);
|
||||
void tsd_slow_update(tsd_t *tsd);
|
||||
|
||||
/*
|
||||
* We put the platform-specific data declarations and inlines into their own
|
||||
* header files to avoid cluttering this file. They define tsd_boot0,
|
||||
* tsd_boot1, tsd_boot, tsd_booted_get, tsd_get_allocates, tsd_get, and tsd_set.
|
||||
*/
|
||||
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
|
||||
#include "jemalloc/internal/tsd_malloc_thread_cleanup.h"
|
||||
#elif (defined(JEMALLOC_TLS))
|
||||
#include "jemalloc/internal/tsd_tls.h"
|
||||
#elif (defined(_WIN32))
|
||||
#include "jemalloc/internal/tsd_win.h"
|
||||
#else
|
||||
#include "jemalloc/internal/tsd_generic.h"
|
||||
#endif
|
||||
|
||||
/*
|
||||
* TLS/TSD-agnostic macro-based implementation of thread-specific data. There
|
||||
* are four macros that support (at least) three use cases: file-private,
|
||||
* library-private, and library-private inlined. Following is an example
|
||||
* library-private tsd variable:
|
||||
*
|
||||
* In example.h:
|
||||
* typedef struct {
|
||||
* int x;
|
||||
* int y;
|
||||
* } example_t;
|
||||
* #define EX_INITIALIZER JEMALLOC_CONCAT({0, 0})
|
||||
* malloc_tsd_protos(, example, example_t *)
|
||||
* malloc_tsd_externs(example, example_t *)
|
||||
* In example.c:
|
||||
* malloc_tsd_data(, example, example_t *, EX_INITIALIZER)
|
||||
* malloc_tsd_funcs(, example, example_t *, EX_INITIALIZER,
|
||||
* example_tsd_cleanup)
|
||||
*
|
||||
* The result is a set of generated functions, e.g.:
|
||||
*
|
||||
* bool example_tsd_boot(void) {...}
|
||||
* example_t **example_tsd_get() {...}
|
||||
* void example_tsd_set(example_t **val) {...}
|
||||
*
|
||||
* Note that all of the functions deal in terms of (a_type *) rather than
|
||||
* (a_type) so that it is possible to support non-pointer types (unlike
|
||||
* pthreads TSD). example_tsd_cleanup() is passed an (a_type *) pointer that is
|
||||
* cast to (void *). This means that the cleanup function needs to cast *and*
|
||||
* dereference the function argument, e.g.:
|
||||
*
|
||||
* void
|
||||
* example_tsd_cleanup(void *arg)
|
||||
* {
|
||||
* example_t *example = *(example_t **)arg;
|
||||
*
|
||||
* [...]
|
||||
* if ([want the cleanup function to be called again]) {
|
||||
* example_tsd_set(&example);
|
||||
* }
|
||||
* }
|
||||
*
|
||||
* If example_tsd_set() is called within example_tsd_cleanup(), it will be
|
||||
* called again. This is similar to how pthreads TSD destruction works, except
|
||||
* that pthreads only calls the cleanup function again if the value was set to
|
||||
* non-NULL.
|
||||
* tsd_foop_get_unsafe(tsd) returns a pointer to the thread-local instance of
|
||||
* foo. This omits some safety checks, and so can be used during tsd
|
||||
* initialization and cleanup.
|
||||
*/
|
||||
|
||||
/* malloc_tsd_protos(). */
|
||||
#define malloc_tsd_protos(a_attr, a_name, a_type) \
|
||||
a_attr bool \
|
||||
a_name##_tsd_boot(void); \
|
||||
a_attr a_type * \
|
||||
a_name##_tsd_get(void); \
|
||||
a_attr void \
|
||||
a_name##_tsd_set(a_type *val);
|
||||
|
||||
/* malloc_tsd_externs(). */
|
||||
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
|
||||
#define malloc_tsd_externs(a_name, a_type) \
|
||||
extern __thread a_type a_name##_tls; \
|
||||
extern __thread bool a_name##_initialized; \
|
||||
extern bool a_name##_booted;
|
||||
#elif (defined(JEMALLOC_TLS))
|
||||
#define malloc_tsd_externs(a_name, a_type) \
|
||||
extern __thread a_type a_name##_tls; \
|
||||
extern pthread_key_t a_name##_tsd; \
|
||||
extern bool a_name##_booted;
|
||||
#elif (defined(_WIN32))
|
||||
#define malloc_tsd_externs(a_name, a_type) \
|
||||
extern DWORD a_name##_tsd; \
|
||||
extern bool a_name##_booted;
|
||||
#else
|
||||
#define malloc_tsd_externs(a_name, a_type) \
|
||||
extern pthread_key_t a_name##_tsd; \
|
||||
extern tsd_init_head_t a_name##_tsd_init_head; \
|
||||
extern bool a_name##_booted;
|
||||
#endif
|
||||
|
||||
/* malloc_tsd_data(). */
|
||||
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
|
||||
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
|
||||
a_attr __thread a_type JEMALLOC_TLS_MODEL \
|
||||
a_name##_tls = a_initializer; \
|
||||
a_attr __thread bool JEMALLOC_TLS_MODEL \
|
||||
a_name##_initialized = false; \
|
||||
a_attr bool a_name##_booted = false;
|
||||
#elif (defined(JEMALLOC_TLS))
|
||||
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
|
||||
a_attr __thread a_type JEMALLOC_TLS_MODEL \
|
||||
a_name##_tls = a_initializer; \
|
||||
a_attr pthread_key_t a_name##_tsd; \
|
||||
a_attr bool a_name##_booted = false;
|
||||
#elif (defined(_WIN32))
|
||||
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
|
||||
a_attr DWORD a_name##_tsd; \
|
||||
a_attr bool a_name##_booted = false;
|
||||
#else
|
||||
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
|
||||
a_attr pthread_key_t a_name##_tsd; \
|
||||
a_attr tsd_init_head_t a_name##_tsd_init_head = { \
|
||||
ql_head_initializer(blocks), \
|
||||
MALLOC_MUTEX_INITIALIZER \
|
||||
}; \
|
||||
a_attr bool a_name##_booted = false;
|
||||
#endif
|
||||
|
||||
/* malloc_tsd_funcs(). */
|
||||
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
|
||||
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
|
||||
a_cleanup) \
|
||||
/* Initialization/cleanup. */ \
|
||||
a_attr bool \
|
||||
a_name##_tsd_cleanup_wrapper(void) \
|
||||
{ \
|
||||
\
|
||||
if (a_name##_initialized) { \
|
||||
a_name##_initialized = false; \
|
||||
a_cleanup(&a_name##_tls); \
|
||||
} \
|
||||
return (a_name##_initialized); \
|
||||
} \
|
||||
a_attr bool \
|
||||
a_name##_tsd_boot(void) \
|
||||
{ \
|
||||
\
|
||||
if (a_cleanup != malloc_tsd_no_cleanup) { \
|
||||
malloc_tsd_cleanup_register( \
|
||||
&a_name##_tsd_cleanup_wrapper); \
|
||||
} \
|
||||
a_name##_booted = true; \
|
||||
return (false); \
|
||||
} \
|
||||
/* Get/set. */ \
|
||||
a_attr a_type * \
|
||||
a_name##_tsd_get(void) \
|
||||
{ \
|
||||
\
|
||||
assert(a_name##_booted); \
|
||||
return (&a_name##_tls); \
|
||||
} \
|
||||
a_attr void \
|
||||
a_name##_tsd_set(a_type *val) \
|
||||
{ \
|
||||
\
|
||||
assert(a_name##_booted); \
|
||||
a_name##_tls = (*val); \
|
||||
if (a_cleanup != malloc_tsd_no_cleanup) \
|
||||
a_name##_initialized = true; \
|
||||
#define O(n, t, nt) \
|
||||
JEMALLOC_ALWAYS_INLINE t * \
|
||||
tsd_##n##p_get_unsafe(tsd_t *tsd) { \
|
||||
return &tsd->use_a_getter_or_setter_instead_##n; \
|
||||
}
|
||||
#elif (defined(JEMALLOC_TLS))
|
||||
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
|
||||
a_cleanup) \
|
||||
/* Initialization/cleanup. */ \
|
||||
a_attr bool \
|
||||
a_name##_tsd_boot(void) \
|
||||
{ \
|
||||
\
|
||||
if (a_cleanup != malloc_tsd_no_cleanup) { \
|
||||
if (pthread_key_create(&a_name##_tsd, a_cleanup) != 0) \
|
||||
return (true); \
|
||||
} \
|
||||
a_name##_booted = true; \
|
||||
return (false); \
|
||||
} \
|
||||
/* Get/set. */ \
|
||||
a_attr a_type * \
|
||||
a_name##_tsd_get(void) \
|
||||
{ \
|
||||
\
|
||||
assert(a_name##_booted); \
|
||||
return (&a_name##_tls); \
|
||||
} \
|
||||
a_attr void \
|
||||
a_name##_tsd_set(a_type *val) \
|
||||
{ \
|
||||
\
|
||||
assert(a_name##_booted); \
|
||||
a_name##_tls = (*val); \
|
||||
if (a_cleanup != malloc_tsd_no_cleanup) { \
|
||||
if (pthread_setspecific(a_name##_tsd, \
|
||||
(void *)(&a_name##_tls))) { \
|
||||
malloc_write("<jemalloc>: Error" \
|
||||
" setting TSD for "#a_name"\n"); \
|
||||
if (opt_abort) \
|
||||
abort(); \
|
||||
} \
|
||||
} \
|
||||
MALLOC_TSD
|
||||
#undef O
|
||||
|
||||
/* tsd_foop_get(tsd) returns a pointer to the thread-local instance of foo. */
|
||||
#define O(n, t, nt) \
|
||||
JEMALLOC_ALWAYS_INLINE t * \
|
||||
tsd_##n##p_get(tsd_t *tsd) { \
|
||||
assert(tsd->state == tsd_state_nominal || \
|
||||
tsd->state == tsd_state_nominal_slow || \
|
||||
tsd->state == tsd_state_reincarnated || \
|
||||
tsd->state == tsd_state_minimal_initialized); \
|
||||
return tsd_##n##p_get_unsafe(tsd); \
|
||||
}
|
||||
#elif (defined(_WIN32))
|
||||
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
|
||||
a_cleanup) \
|
||||
/* Data structure. */ \
|
||||
typedef struct { \
|
||||
bool initialized; \
|
||||
a_type val; \
|
||||
} a_name##_tsd_wrapper_t; \
|
||||
/* Initialization/cleanup. */ \
|
||||
a_attr bool \
|
||||
a_name##_tsd_cleanup_wrapper(void) \
|
||||
{ \
|
||||
a_name##_tsd_wrapper_t *wrapper; \
|
||||
\
|
||||
wrapper = (a_name##_tsd_wrapper_t *) TlsGetValue(a_name##_tsd); \
|
||||
if (wrapper == NULL) \
|
||||
return (false); \
|
||||
if (a_cleanup != malloc_tsd_no_cleanup && \
|
||||
wrapper->initialized) { \
|
||||
a_type val = wrapper->val; \
|
||||
a_type tsd_static_data = a_initializer; \
|
||||
wrapper->initialized = false; \
|
||||
wrapper->val = tsd_static_data; \
|
||||
a_cleanup(&val); \
|
||||
if (wrapper->initialized) { \
|
||||
/* Trigger another cleanup round. */ \
|
||||
return (true); \
|
||||
} \
|
||||
MALLOC_TSD
|
||||
#undef O
|
||||
|
||||
/*
|
||||
* tsdn_foop_get(tsdn) returns either the thread-local instance of foo (if tsdn
|
||||
* isn't NULL), or NULL (if tsdn is NULL), cast to the nullable pointer type.
|
||||
*/
|
||||
#define O(n, t, nt) \
|
||||
JEMALLOC_ALWAYS_INLINE nt * \
|
||||
tsdn_##n##p_get(tsdn_t *tsdn) { \
|
||||
if (tsdn_null(tsdn)) { \
|
||||
return NULL; \
|
||||
} \
|
||||
malloc_tsd_dalloc(wrapper); \
|
||||
return (false); \
|
||||
} \
|
||||
a_attr bool \
|
||||
a_name##_tsd_boot(void) \
|
||||
{ \
|
||||
\
|
||||
a_name##_tsd = TlsAlloc(); \
|
||||
if (a_name##_tsd == TLS_OUT_OF_INDEXES) \
|
||||
return (true); \
|
||||
if (a_cleanup != malloc_tsd_no_cleanup) { \
|
||||
malloc_tsd_cleanup_register( \
|
||||
&a_name##_tsd_cleanup_wrapper); \
|
||||
} \
|
||||
a_name##_booted = true; \
|
||||
return (false); \
|
||||
} \
|
||||
/* Get/set. */ \
|
||||
a_attr a_name##_tsd_wrapper_t * \
|
||||
a_name##_tsd_get_wrapper(void) \
|
||||
{ \
|
||||
a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *) \
|
||||
TlsGetValue(a_name##_tsd); \
|
||||
\
|
||||
if (wrapper == NULL) { \
|
||||
wrapper = (a_name##_tsd_wrapper_t *) \
|
||||
malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \
|
||||
if (wrapper == NULL) { \
|
||||
malloc_write("<jemalloc>: Error allocating" \
|
||||
" TSD for "#a_name"\n"); \
|
||||
abort(); \
|
||||
} else { \
|
||||
static a_type tsd_static_data = a_initializer; \
|
||||
wrapper->initialized = false; \
|
||||
wrapper->val = tsd_static_data; \
|
||||
} \
|
||||
if (!TlsSetValue(a_name##_tsd, (void *)wrapper)) { \
|
||||
malloc_write("<jemalloc>: Error setting" \
|
||||
" TSD for "#a_name"\n"); \
|
||||
abort(); \
|
||||
} \
|
||||
} \
|
||||
return (wrapper); \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_name##_tsd_get(void) \
|
||||
{ \
|
||||
a_name##_tsd_wrapper_t *wrapper; \
|
||||
\
|
||||
assert(a_name##_booted); \
|
||||
wrapper = a_name##_tsd_get_wrapper(); \
|
||||
return (&wrapper->val); \
|
||||
} \
|
||||
a_attr void \
|
||||
a_name##_tsd_set(a_type *val) \
|
||||
{ \
|
||||
a_name##_tsd_wrapper_t *wrapper; \
|
||||
\
|
||||
assert(a_name##_booted); \
|
||||
wrapper = a_name##_tsd_get_wrapper(); \
|
||||
wrapper->val = *(val); \
|
||||
if (a_cleanup != malloc_tsd_no_cleanup) \
|
||||
wrapper->initialized = true; \
|
||||
tsd_t *tsd = tsdn_tsd(tsdn); \
|
||||
return (nt *)tsd_##n##p_get(tsd); \
|
||||
}
|
||||
#else
|
||||
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
|
||||
a_cleanup) \
|
||||
/* Data structure. */ \
|
||||
typedef struct { \
|
||||
bool initialized; \
|
||||
a_type val; \
|
||||
} a_name##_tsd_wrapper_t; \
|
||||
/* Initialization/cleanup. */ \
|
||||
a_attr void \
|
||||
a_name##_tsd_cleanup_wrapper(void *arg) \
|
||||
{ \
|
||||
a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *)arg;\
|
||||
\
|
||||
if (a_cleanup != malloc_tsd_no_cleanup && \
|
||||
wrapper->initialized) { \
|
||||
wrapper->initialized = false; \
|
||||
a_cleanup(&wrapper->val); \
|
||||
if (wrapper->initialized) { \
|
||||
/* Trigger another cleanup round. */ \
|
||||
if (pthread_setspecific(a_name##_tsd, \
|
||||
(void *)wrapper)) { \
|
||||
malloc_write("<jemalloc>: Error" \
|
||||
" setting TSD for "#a_name"\n"); \
|
||||
if (opt_abort) \
|
||||
abort(); \
|
||||
} \
|
||||
return; \
|
||||
} \
|
||||
} \
|
||||
malloc_tsd_dalloc(wrapper); \
|
||||
} \
|
||||
a_attr bool \
|
||||
a_name##_tsd_boot(void) \
|
||||
{ \
|
||||
\
|
||||
if (pthread_key_create(&a_name##_tsd, \
|
||||
a_name##_tsd_cleanup_wrapper) != 0) \
|
||||
return (true); \
|
||||
a_name##_booted = true; \
|
||||
return (false); \
|
||||
} \
|
||||
/* Get/set. */ \
|
||||
a_attr a_name##_tsd_wrapper_t * \
|
||||
a_name##_tsd_get_wrapper(void) \
|
||||
{ \
|
||||
a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *) \
|
||||
pthread_getspecific(a_name##_tsd); \
|
||||
\
|
||||
if (wrapper == NULL) { \
|
||||
tsd_init_block_t block; \
|
||||
wrapper = tsd_init_check_recursion( \
|
||||
&a_name##_tsd_init_head, &block); \
|
||||
if (wrapper) \
|
||||
return (wrapper); \
|
||||
wrapper = (a_name##_tsd_wrapper_t *) \
|
||||
malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \
|
||||
block.data = wrapper; \
|
||||
if (wrapper == NULL) { \
|
||||
malloc_write("<jemalloc>: Error allocating" \
|
||||
" TSD for "#a_name"\n"); \
|
||||
abort(); \
|
||||
} else { \
|
||||
static a_type tsd_static_data = a_initializer; \
|
||||
wrapper->initialized = false; \
|
||||
wrapper->val = tsd_static_data; \
|
||||
} \
|
||||
if (pthread_setspecific(a_name##_tsd, \
|
||||
(void *)wrapper)) { \
|
||||
malloc_write("<jemalloc>: Error setting" \
|
||||
" TSD for "#a_name"\n"); \
|
||||
abort(); \
|
||||
} \
|
||||
tsd_init_finish(&a_name##_tsd_init_head, &block); \
|
||||
} \
|
||||
return (wrapper); \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_name##_tsd_get(void) \
|
||||
{ \
|
||||
a_name##_tsd_wrapper_t *wrapper; \
|
||||
\
|
||||
assert(a_name##_booted); \
|
||||
wrapper = a_name##_tsd_get_wrapper(); \
|
||||
return (&wrapper->val); \
|
||||
} \
|
||||
a_attr void \
|
||||
a_name##_tsd_set(a_type *val) \
|
||||
{ \
|
||||
a_name##_tsd_wrapper_t *wrapper; \
|
||||
\
|
||||
assert(a_name##_booted); \
|
||||
wrapper = a_name##_tsd_get_wrapper(); \
|
||||
wrapper->val = *(val); \
|
||||
if (a_cleanup != malloc_tsd_no_cleanup) \
|
||||
wrapper->initialized = true; \
|
||||
MALLOC_TSD
|
||||
#undef O
|
||||
|
||||
/* tsd_foo_get(tsd) returns the value of the thread-local instance of foo. */
|
||||
#define O(n, t, nt) \
|
||||
JEMALLOC_ALWAYS_INLINE t \
|
||||
tsd_##n##_get(tsd_t *tsd) { \
|
||||
return *tsd_##n##p_get(tsd); \
|
||||
}
|
||||
#endif
|
||||
MALLOC_TSD
|
||||
#undef O
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
/* tsd_foo_set(tsd, val) updates the thread-local instance of foo to be val. */
|
||||
#define O(n, t, nt) \
|
||||
JEMALLOC_ALWAYS_INLINE void \
|
||||
tsd_##n##_set(tsd_t *tsd, t val) { \
|
||||
assert(tsd->state != tsd_state_reincarnated && \
|
||||
tsd->state != tsd_state_minimal_initialized); \
|
||||
*tsd_##n##p_get(tsd) = val; \
|
||||
}
|
||||
MALLOC_TSD
|
||||
#undef O
|
||||
|
||||
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
|
||||
!defined(_WIN32))
|
||||
struct tsd_init_block_s {
|
||||
ql_elm(tsd_init_block_t) link;
|
||||
pthread_t thread;
|
||||
void *data;
|
||||
};
|
||||
struct tsd_init_head_s {
|
||||
ql_head(tsd_init_block_t) blocks;
|
||||
malloc_mutex_t lock;
|
||||
};
|
||||
#endif
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
tsd_assert_fast(tsd_t *tsd) {
|
||||
assert(!malloc_slow && tsd_tcache_enabled_get(tsd) &&
|
||||
tsd_reentrancy_level_get(tsd) == 0);
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
tsd_fast(tsd_t *tsd) {
|
||||
bool fast = (tsd->state == tsd_state_nominal);
|
||||
if (fast) {
|
||||
tsd_assert_fast(tsd);
|
||||
}
|
||||
|
||||
void *malloc_tsd_malloc(size_t size);
|
||||
void malloc_tsd_dalloc(void *wrapper);
|
||||
void malloc_tsd_no_cleanup(void *);
|
||||
void malloc_tsd_cleanup_register(bool (*f)(void));
|
||||
void malloc_tsd_boot(void);
|
||||
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
|
||||
!defined(_WIN32))
|
||||
void *tsd_init_check_recursion(tsd_init_head_t *head,
|
||||
tsd_init_block_t *block);
|
||||
void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block);
|
||||
#endif
|
||||
return fast;
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
JEMALLOC_ALWAYS_INLINE tsd_t *
|
||||
tsd_fetch_impl(bool init, bool minimal) {
|
||||
tsd_t *tsd = tsd_get(init);
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
if (!init && tsd_get_allocates() && tsd == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
assert(tsd != NULL);
|
||||
|
||||
if (unlikely(tsd->state != tsd_state_nominal)) {
|
||||
return tsd_fetch_slow(tsd, minimal);
|
||||
}
|
||||
assert(tsd_fast(tsd));
|
||||
tsd_assert_fast(tsd);
|
||||
|
||||
return tsd;
|
||||
}
|
||||
|
||||
/* Get a minimal TSD that requires no cleanup. See comments in free(). */
|
||||
JEMALLOC_ALWAYS_INLINE tsd_t *
|
||||
tsd_fetch_min(void) {
|
||||
return tsd_fetch_impl(true, true);
|
||||
}
|
||||
|
||||
/* For internal background threads use only. */
|
||||
JEMALLOC_ALWAYS_INLINE tsd_t *
|
||||
tsd_internal_fetch(void) {
|
||||
tsd_t *tsd = tsd_fetch_min();
|
||||
/* Use reincarnated state to prevent full initialization. */
|
||||
tsd->state = tsd_state_reincarnated;
|
||||
|
||||
return tsd;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE tsd_t *
|
||||
tsd_fetch(void) {
|
||||
return tsd_fetch_impl(true, false);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
tsd_nominal(tsd_t *tsd) {
|
||||
return (tsd->state <= tsd_state_nominal_max);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE tsdn_t *
|
||||
tsdn_fetch(void) {
|
||||
if (!tsd_booted_get()) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return tsd_tsdn(tsd_fetch_impl(false, false));
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE rtree_ctx_t *
|
||||
tsd_rtree_ctx(tsd_t *tsd) {
|
||||
return tsd_rtree_ctxp_get(tsd);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE rtree_ctx_t *
|
||||
tsdn_rtree_ctx(tsdn_t *tsdn, rtree_ctx_t *fallback) {
|
||||
/*
|
||||
* If tsd cannot be accessed, initialize the fallback rtree_ctx and
|
||||
* return a pointer to it.
|
||||
*/
|
||||
if (unlikely(tsdn_null(tsdn))) {
|
||||
rtree_ctx_data_init(fallback);
|
||||
return fallback;
|
||||
}
|
||||
return tsd_rtree_ctx(tsdn_tsd(tsdn));
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_TSD_H */
|
||||
|
||||
167
deps/jemalloc/include/jemalloc/internal/util.h
vendored
167
deps/jemalloc/include/jemalloc/internal/util.h
vendored
@@ -1,143 +1,50 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
#ifndef JEMALLOC_INTERNAL_UTIL_H
|
||||
#define JEMALLOC_INTERNAL_UTIL_H
|
||||
|
||||
/* Size of stack-allocated buffer passed to buferror(). */
|
||||
#define BUFERROR_BUF 64
|
||||
#define UTIL_INLINE static inline
|
||||
|
||||
/*
|
||||
* Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be
|
||||
* large enough for all possible uses within jemalloc.
|
||||
*/
|
||||
#define MALLOC_PRINTF_BUFSIZE 4096
|
||||
/* Junk fill patterns. */
|
||||
#ifndef JEMALLOC_ALLOC_JUNK
|
||||
# define JEMALLOC_ALLOC_JUNK ((uint8_t)0xa5)
|
||||
#endif
|
||||
#ifndef JEMALLOC_FREE_JUNK
|
||||
# define JEMALLOC_FREE_JUNK ((uint8_t)0x5a)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Wrap a cpp argument that contains commas such that it isn't broken up into
|
||||
* multiple arguments.
|
||||
*/
|
||||
#define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__
|
||||
#define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__
|
||||
|
||||
/* cpp macro definition stringification. */
|
||||
#define STRINGIFY_HELPER(x) #x
|
||||
#define STRINGIFY(x) STRINGIFY_HELPER(x)
|
||||
|
||||
/*
|
||||
* Silence compiler warnings due to uninitialized values. This is used
|
||||
* wherever the compiler fails to recognize that the variable is never used
|
||||
* uninitialized.
|
||||
*/
|
||||
#ifdef JEMALLOC_CC_SILENCE
|
||||
# define JEMALLOC_CC_SILENCE_INIT(v) = v
|
||||
#define JEMALLOC_CC_SILENCE_INIT(v) = v
|
||||
|
||||
#ifdef __GNUC__
|
||||
# define likely(x) __builtin_expect(!!(x), 1)
|
||||
# define unlikely(x) __builtin_expect(!!(x), 0)
|
||||
#else
|
||||
# define JEMALLOC_CC_SILENCE_INIT(v)
|
||||
# define likely(x) !!(x)
|
||||
# define unlikely(x) !!(x)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Define a custom assert() in order to reduce the chances of deadlock during
|
||||
* assertion failure.
|
||||
*/
|
||||
#ifndef assert
|
||||
#define assert(e) do { \
|
||||
if (config_debug && !(e)) { \
|
||||
malloc_printf( \
|
||||
"<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
|
||||
__FILE__, __LINE__, #e); \
|
||||
abort(); \
|
||||
} \
|
||||
} while (0)
|
||||
#if !defined(JEMALLOC_INTERNAL_UNREACHABLE)
|
||||
# error JEMALLOC_INTERNAL_UNREACHABLE should have been defined by configure
|
||||
#endif
|
||||
|
||||
#ifndef not_reached
|
||||
#define not_reached() do { \
|
||||
if (config_debug) { \
|
||||
malloc_printf( \
|
||||
"<jemalloc>: %s:%d: Unreachable code reached\n", \
|
||||
__FILE__, __LINE__); \
|
||||
abort(); \
|
||||
} \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#ifndef not_implemented
|
||||
#define not_implemented() do { \
|
||||
if (config_debug) { \
|
||||
malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
|
||||
__FILE__, __LINE__); \
|
||||
abort(); \
|
||||
} \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#ifndef assert_not_implemented
|
||||
#define assert_not_implemented(e) do { \
|
||||
if (config_debug && !(e)) \
|
||||
not_implemented(); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
/* Use to assert a particular configuration, e.g., cassert(config_debug). */
|
||||
#define cassert(c) do { \
|
||||
if ((c) == false) \
|
||||
not_reached(); \
|
||||
} while (0)
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
int buferror(int err, char *buf, size_t buflen);
|
||||
uintmax_t malloc_strtoumax(const char *restrict nptr,
|
||||
char **restrict endptr, int base);
|
||||
void malloc_write(const char *s);
|
||||
|
||||
/*
|
||||
* malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating
|
||||
* point math.
|
||||
*/
|
||||
int malloc_vsnprintf(char *str, size_t size, const char *format,
|
||||
va_list ap);
|
||||
int malloc_snprintf(char *str, size_t size, const char *format, ...)
|
||||
JEMALLOC_ATTR(format(printf, 3, 4));
|
||||
void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
const char *format, va_list ap);
|
||||
void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque,
|
||||
const char *format, ...) JEMALLOC_ATTR(format(printf, 3, 4));
|
||||
void malloc_printf(const char *format, ...)
|
||||
JEMALLOC_ATTR(format(printf, 1, 2));
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
size_t pow2_ceil(size_t x);
|
||||
void set_errno(int errnum);
|
||||
int get_errno(void);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_UTIL_C_))
|
||||
/* Compute the smallest power of 2 that is >= x. */
|
||||
JEMALLOC_INLINE size_t
|
||||
pow2_ceil(size_t x)
|
||||
{
|
||||
|
||||
x--;
|
||||
x |= x >> 1;
|
||||
x |= x >> 2;
|
||||
x |= x >> 4;
|
||||
x |= x >> 8;
|
||||
x |= x >> 16;
|
||||
#if (LG_SIZEOF_PTR == 3)
|
||||
x |= x >> 32;
|
||||
#endif
|
||||
x++;
|
||||
return (x);
|
||||
}
|
||||
|
||||
/* Sets error code */
|
||||
JEMALLOC_INLINE void
|
||||
set_errno(int errnum)
|
||||
{
|
||||
#define unreachable() JEMALLOC_INTERNAL_UNREACHABLE()
|
||||
|
||||
/* Set error code. */
|
||||
UTIL_INLINE void
|
||||
set_errno(int errnum) {
|
||||
#ifdef _WIN32
|
||||
SetLastError(errnum);
|
||||
#else
|
||||
@@ -145,18 +52,16 @@ set_errno(int errnum)
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Get last error code */
|
||||
JEMALLOC_INLINE int
|
||||
get_errno(void)
|
||||
{
|
||||
|
||||
/* Get last error code. */
|
||||
UTIL_INLINE int
|
||||
get_errno(void) {
|
||||
#ifdef _WIN32
|
||||
return (GetLastError());
|
||||
return GetLastError();
|
||||
#else
|
||||
return (errno);
|
||||
return errno;
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
#undef UTIL_INLINE
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_UTIL_H */
|
||||
|
||||
456
deps/jemalloc/include/jemalloc/jemalloc.h
vendored
456
deps/jemalloc/include/jemalloc/jemalloc.h
vendored
@@ -1,45 +1,203 @@
|
||||
#ifndef JEMALLOC_H_
|
||||
#define JEMALLOC_H_
|
||||
#define JEMALLOC_H_
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* Defined if __attribute__((...)) syntax is supported. */
|
||||
#define JEMALLOC_HAVE_ATTR
|
||||
|
||||
/* Defined if alloc_size attribute is supported. */
|
||||
#define JEMALLOC_HAVE_ATTR_ALLOC_SIZE
|
||||
|
||||
/* Defined if format(gnu_printf, ...) attribute is supported. */
|
||||
#define JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
|
||||
|
||||
/* Defined if format(printf, ...) attribute is supported. */
|
||||
#define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF
|
||||
|
||||
/*
|
||||
* Define overrides for non-standard allocator-related functions if they are
|
||||
* present on the system.
|
||||
*/
|
||||
#define JEMALLOC_OVERRIDE_MEMALIGN
|
||||
#define JEMALLOC_OVERRIDE_VALLOC
|
||||
|
||||
/*
|
||||
* At least Linux omits the "const" in:
|
||||
*
|
||||
* size_t malloc_usable_size(const void *ptr);
|
||||
*
|
||||
* Match the operating system's prototype.
|
||||
*/
|
||||
#define JEMALLOC_USABLE_SIZE_CONST
|
||||
|
||||
/*
|
||||
* If defined, specify throw() for the public function prototypes when compiling
|
||||
* with C++. The only justification for this is to match the prototypes that
|
||||
* glibc defines.
|
||||
*/
|
||||
#define JEMALLOC_USE_CXX_THROW
|
||||
|
||||
#ifdef _MSC_VER
|
||||
# ifdef _WIN64
|
||||
# define LG_SIZEOF_PTR_WIN 3
|
||||
# else
|
||||
# define LG_SIZEOF_PTR_WIN 2
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Name mangling for public symbols is controlled by --with-mangling and
|
||||
* --with-jemalloc-prefix. With default settings the je_ prefix is stripped by
|
||||
* these macro definitions.
|
||||
*/
|
||||
#ifndef JEMALLOC_NO_RENAME
|
||||
# define je_aligned_alloc aligned_alloc
|
||||
# define je_calloc calloc
|
||||
# define je_dallocx dallocx
|
||||
# define je_free free
|
||||
# define je_mallctl mallctl
|
||||
# define je_mallctlbymib mallctlbymib
|
||||
# define je_mallctlnametomib mallctlnametomib
|
||||
# define je_malloc malloc
|
||||
# define je_malloc_conf malloc_conf
|
||||
# define je_malloc_message malloc_message
|
||||
# define je_malloc_stats_print malloc_stats_print
|
||||
# define je_malloc_usable_size malloc_usable_size
|
||||
# define je_mallocx mallocx
|
||||
# define je_nallocx nallocx
|
||||
# define je_posix_memalign posix_memalign
|
||||
# define je_rallocx rallocx
|
||||
# define je_realloc realloc
|
||||
# define je_sallocx sallocx
|
||||
# define je_sdallocx sdallocx
|
||||
# define je_xallocx xallocx
|
||||
# define je_memalign memalign
|
||||
# define je_valloc valloc
|
||||
#endif
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
#include <limits.h>
|
||||
#include <strings.h>
|
||||
|
||||
#define JEMALLOC_VERSION "3.6.0-0-g46c0af68bd248b04df75e4f92d5fb804c3d75340"
|
||||
#define JEMALLOC_VERSION_MAJOR 3
|
||||
#define JEMALLOC_VERSION_MINOR 6
|
||||
#define JEMALLOC_VERSION_BUGFIX 0
|
||||
#define JEMALLOC_VERSION_NREV 0
|
||||
#define JEMALLOC_VERSION_GID "46c0af68bd248b04df75e4f92d5fb804c3d75340"
|
||||
#define JEMALLOC_VERSION "5.0.1-0-g896ed3a8b3f41998d4fb4d625d30ac63ef2d51fb"
|
||||
#define JEMALLOC_VERSION_MAJOR 5
|
||||
#define JEMALLOC_VERSION_MINOR 0
|
||||
#define JEMALLOC_VERSION_BUGFIX 1
|
||||
#define JEMALLOC_VERSION_NREV 0
|
||||
#define JEMALLOC_VERSION_GID "896ed3a8b3f41998d4fb4d625d30ac63ef2d51fb"
|
||||
|
||||
# define MALLOCX_LG_ALIGN(la) (la)
|
||||
# if LG_SIZEOF_PTR == 2
|
||||
# define MALLOCX_ALIGN(a) (ffs(a)-1)
|
||||
# else
|
||||
# define MALLOCX_ALIGN(a) \
|
||||
((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31)
|
||||
# endif
|
||||
# define MALLOCX_ZERO ((int)0x40)
|
||||
/* Bias arena index bits so that 0 encodes "MALLOCX_ARENA() unspecified". */
|
||||
# define MALLOCX_ARENA(a) ((int)(((a)+1) << 8))
|
||||
#define MALLOCX_LG_ALIGN(la) ((int)(la))
|
||||
#if LG_SIZEOF_PTR == 2
|
||||
# define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1))
|
||||
#else
|
||||
# define MALLOCX_ALIGN(a) \
|
||||
((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \
|
||||
ffs((int)(((size_t)(a))>>32))+31))
|
||||
#endif
|
||||
#define MALLOCX_ZERO ((int)0x40)
|
||||
/*
|
||||
* Bias tcache index bits so that 0 encodes "automatic tcache management", and 1
|
||||
* encodes MALLOCX_TCACHE_NONE.
|
||||
*/
|
||||
#define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8))
|
||||
#define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1)
|
||||
/*
|
||||
* Bias arena index bits so that 0 encodes "use an automatically chosen arena".
|
||||
*/
|
||||
#define MALLOCX_ARENA(a) ((((int)(a))+1) << 20)
|
||||
|
||||
#ifdef JEMALLOC_EXPERIMENTAL
|
||||
# define ALLOCM_LG_ALIGN(la) (la)
|
||||
# if LG_SIZEOF_PTR == 2
|
||||
# define ALLOCM_ALIGN(a) (ffs(a)-1)
|
||||
# else
|
||||
# define ALLOCM_ALIGN(a) \
|
||||
((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31)
|
||||
/*
|
||||
* Use as arena index in "arena.<i>.{purge,decay,dss}" and
|
||||
* "stats.arenas.<i>.*" mallctl interfaces to select all arenas. This
|
||||
* definition is intentionally specified in raw decimal format to support
|
||||
* cpp-based string concatenation, e.g.
|
||||
*
|
||||
* #define STRINGIFY_HELPER(x) #x
|
||||
* #define STRINGIFY(x) STRINGIFY_HELPER(x)
|
||||
*
|
||||
* mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", NULL, NULL, NULL,
|
||||
* 0);
|
||||
*/
|
||||
#define MALLCTL_ARENAS_ALL 4096
|
||||
/*
|
||||
* Use as arena index in "stats.arenas.<i>.*" mallctl interfaces to select
|
||||
* destroyed arenas.
|
||||
*/
|
||||
#define MALLCTL_ARENAS_DESTROYED 4097
|
||||
|
||||
#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW)
|
||||
# define JEMALLOC_CXX_THROW throw()
|
||||
#else
|
||||
# define JEMALLOC_CXX_THROW
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
# define JEMALLOC_ATTR(s)
|
||||
# define JEMALLOC_ALIGNED(s) __declspec(align(s))
|
||||
# define JEMALLOC_ALLOC_SIZE(s)
|
||||
# define JEMALLOC_ALLOC_SIZE2(s1, s2)
|
||||
# ifndef JEMALLOC_EXPORT
|
||||
# ifdef DLLEXPORT
|
||||
# define JEMALLOC_EXPORT __declspec(dllexport)
|
||||
# else
|
||||
# define JEMALLOC_EXPORT __declspec(dllimport)
|
||||
# endif
|
||||
# endif
|
||||
# define ALLOCM_ZERO ((int)0x40)
|
||||
# define ALLOCM_NO_MOVE ((int)0x80)
|
||||
/* Bias arena index bits so that 0 encodes "ALLOCM_ARENA() unspecified". */
|
||||
# define ALLOCM_ARENA(a) ((int)(((a)+1) << 8))
|
||||
# define ALLOCM_SUCCESS 0
|
||||
# define ALLOCM_ERR_OOM 1
|
||||
# define ALLOCM_ERR_NOT_MOVED 2
|
||||
# define JEMALLOC_FORMAT_PRINTF(s, i)
|
||||
# define JEMALLOC_NOINLINE __declspec(noinline)
|
||||
# ifdef __cplusplus
|
||||
# define JEMALLOC_NOTHROW __declspec(nothrow)
|
||||
# else
|
||||
# define JEMALLOC_NOTHROW
|
||||
# endif
|
||||
# define JEMALLOC_SECTION(s) __declspec(allocate(s))
|
||||
# define JEMALLOC_RESTRICT_RETURN __declspec(restrict)
|
||||
# if _MSC_VER >= 1900 && !defined(__EDG__)
|
||||
# define JEMALLOC_ALLOCATOR __declspec(allocator)
|
||||
# else
|
||||
# define JEMALLOC_ALLOCATOR
|
||||
# endif
|
||||
#elif defined(JEMALLOC_HAVE_ATTR)
|
||||
# define JEMALLOC_ATTR(s) __attribute__((s))
|
||||
# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
|
||||
# ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE
|
||||
# define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s))
|
||||
# define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2))
|
||||
# else
|
||||
# define JEMALLOC_ALLOC_SIZE(s)
|
||||
# define JEMALLOC_ALLOC_SIZE2(s1, s2)
|
||||
# endif
|
||||
# ifndef JEMALLOC_EXPORT
|
||||
# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
|
||||
# endif
|
||||
# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
|
||||
# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i))
|
||||
# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF)
|
||||
# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i))
|
||||
# else
|
||||
# define JEMALLOC_FORMAT_PRINTF(s, i)
|
||||
# endif
|
||||
# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
|
||||
# define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow)
|
||||
# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
|
||||
# define JEMALLOC_RESTRICT_RETURN
|
||||
# define JEMALLOC_ALLOCATOR
|
||||
#else
|
||||
# define JEMALLOC_ATTR(s)
|
||||
# define JEMALLOC_ALIGNED(s)
|
||||
# define JEMALLOC_ALLOC_SIZE(s)
|
||||
# define JEMALLOC_ALLOC_SIZE2(s1, s2)
|
||||
# define JEMALLOC_EXPORT
|
||||
# define JEMALLOC_FORMAT_PRINTF(s, i)
|
||||
# define JEMALLOC_NOINLINE
|
||||
# define JEMALLOC_NOTHROW
|
||||
# define JEMALLOC_SECTION(s)
|
||||
# define JEMALLOC_RESTRICT_RETURN
|
||||
# define JEMALLOC_ALLOCATOR
|
||||
#endif
|
||||
|
||||
/*
|
||||
@@ -51,55 +209,141 @@ extern JEMALLOC_EXPORT const char *je_malloc_conf;
|
||||
extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque,
|
||||
const char *s);
|
||||
|
||||
JEMALLOC_EXPORT void *je_malloc(size_t size) JEMALLOC_ATTR(malloc);
|
||||
JEMALLOC_EXPORT void *je_calloc(size_t num, size_t size)
|
||||
JEMALLOC_ATTR(malloc);
|
||||
JEMALLOC_EXPORT int je_posix_memalign(void **memptr, size_t alignment,
|
||||
size_t size) JEMALLOC_ATTR(nonnull(1));
|
||||
JEMALLOC_EXPORT void *je_aligned_alloc(size_t alignment, size_t size)
|
||||
JEMALLOC_ATTR(malloc);
|
||||
JEMALLOC_EXPORT void *je_realloc(void *ptr, size_t size);
|
||||
JEMALLOC_EXPORT void je_free(void *ptr);
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_malloc(size_t size)
|
||||
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_calloc(size_t num, size_t size)
|
||||
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2);
|
||||
JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_posix_memalign(void **memptr,
|
||||
size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1));
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_aligned_alloc(size_t alignment,
|
||||
size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc)
|
||||
JEMALLOC_ALLOC_SIZE(2);
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_realloc(void *ptr, size_t size)
|
||||
JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2);
|
||||
JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_free(void *ptr)
|
||||
JEMALLOC_CXX_THROW;
|
||||
|
||||
JEMALLOC_EXPORT void *je_mallocx(size_t size, int flags);
|
||||
JEMALLOC_EXPORT void *je_rallocx(void *ptr, size_t size, int flags);
|
||||
JEMALLOC_EXPORT size_t je_xallocx(void *ptr, size_t size, size_t extra,
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_mallocx(size_t size, int flags)
|
||||
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_rallocx(void *ptr, size_t size,
|
||||
int flags) JEMALLOC_ALLOC_SIZE(2);
|
||||
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_xallocx(void *ptr, size_t size,
|
||||
size_t extra, int flags);
|
||||
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_sallocx(const void *ptr,
|
||||
int flags) JEMALLOC_ATTR(pure);
|
||||
JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_dallocx(void *ptr, int flags);
|
||||
JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_sdallocx(void *ptr, size_t size,
|
||||
int flags);
|
||||
JEMALLOC_EXPORT size_t je_sallocx(const void *ptr, int flags);
|
||||
JEMALLOC_EXPORT void je_dallocx(void *ptr, int flags);
|
||||
JEMALLOC_EXPORT size_t je_nallocx(size_t size, int flags);
|
||||
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_nallocx(size_t size, int flags)
|
||||
JEMALLOC_ATTR(pure);
|
||||
|
||||
JEMALLOC_EXPORT int je_mallctl(const char *name, void *oldp,
|
||||
size_t *oldlenp, void *newp, size_t newlen);
|
||||
JEMALLOC_EXPORT int je_mallctlnametomib(const char *name, size_t *mibp,
|
||||
size_t *miblenp);
|
||||
JEMALLOC_EXPORT int je_mallctlbymib(const size_t *mib, size_t miblen,
|
||||
JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctl(const char *name,
|
||||
void *oldp, size_t *oldlenp, void *newp, size_t newlen);
|
||||
JEMALLOC_EXPORT void je_malloc_stats_print(void (*write_cb)(void *,
|
||||
const char *), void *je_cbopaque, const char *opts);
|
||||
JEMALLOC_EXPORT size_t je_malloc_usable_size(
|
||||
JEMALLOC_USABLE_SIZE_CONST void *ptr);
|
||||
JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlnametomib(const char *name,
|
||||
size_t *mibp, size_t *miblenp);
|
||||
JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlbymib(const size_t *mib,
|
||||
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
|
||||
JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_malloc_stats_print(
|
||||
void (*write_cb)(void *, const char *), void *je_cbopaque,
|
||||
const char *opts);
|
||||
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_malloc_usable_size(
|
||||
JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW;
|
||||
|
||||
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
|
||||
JEMALLOC_EXPORT void * je_memalign(size_t alignment, size_t size)
|
||||
JEMALLOC_ATTR(malloc);
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_memalign(size_t alignment, size_t size)
|
||||
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc);
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_OVERRIDE_VALLOC
|
||||
JEMALLOC_EXPORT void * je_valloc(size_t size) JEMALLOC_ATTR(malloc);
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_valloc(size_t size) JEMALLOC_CXX_THROW
|
||||
JEMALLOC_ATTR(malloc);
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_EXPERIMENTAL
|
||||
JEMALLOC_EXPORT int je_allocm(void **ptr, size_t *rsize, size_t size,
|
||||
int flags) JEMALLOC_ATTR(nonnull(1));
|
||||
JEMALLOC_EXPORT int je_rallocm(void **ptr, size_t *rsize, size_t size,
|
||||
size_t extra, int flags) JEMALLOC_ATTR(nonnull(1));
|
||||
JEMALLOC_EXPORT int je_sallocm(const void *ptr, size_t *rsize, int flags)
|
||||
JEMALLOC_ATTR(nonnull(1));
|
||||
JEMALLOC_EXPORT int je_dallocm(void *ptr, int flags)
|
||||
JEMALLOC_ATTR(nonnull(1));
|
||||
JEMALLOC_EXPORT int je_nallocm(size_t *rsize, size_t size, int flags);
|
||||
#endif
|
||||
typedef struct extent_hooks_s extent_hooks_t;
|
||||
|
||||
/*
|
||||
* void *
|
||||
* extent_alloc(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
|
||||
* size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
|
||||
*/
|
||||
typedef void *(extent_alloc_t)(extent_hooks_t *, void *, size_t, size_t, bool *,
|
||||
bool *, unsigned);
|
||||
|
||||
/*
|
||||
* bool
|
||||
* extent_dalloc(extent_hooks_t *extent_hooks, void *addr, size_t size,
|
||||
* bool committed, unsigned arena_ind);
|
||||
*/
|
||||
typedef bool (extent_dalloc_t)(extent_hooks_t *, void *, size_t, bool,
|
||||
unsigned);
|
||||
|
||||
/*
|
||||
* void
|
||||
* extent_destroy(extent_hooks_t *extent_hooks, void *addr, size_t size,
|
||||
* bool committed, unsigned arena_ind);
|
||||
*/
|
||||
typedef void (extent_destroy_t)(extent_hooks_t *, void *, size_t, bool,
|
||||
unsigned);
|
||||
|
||||
/*
|
||||
* bool
|
||||
* extent_commit(extent_hooks_t *extent_hooks, void *addr, size_t size,
|
||||
* size_t offset, size_t length, unsigned arena_ind);
|
||||
*/
|
||||
typedef bool (extent_commit_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
|
||||
unsigned);
|
||||
|
||||
/*
|
||||
* bool
|
||||
* extent_decommit(extent_hooks_t *extent_hooks, void *addr, size_t size,
|
||||
* size_t offset, size_t length, unsigned arena_ind);
|
||||
*/
|
||||
typedef bool (extent_decommit_t)(extent_hooks_t *, void *, size_t, size_t,
|
||||
size_t, unsigned);
|
||||
|
||||
/*
|
||||
* bool
|
||||
* extent_purge(extent_hooks_t *extent_hooks, void *addr, size_t size,
|
||||
* size_t offset, size_t length, unsigned arena_ind);
|
||||
*/
|
||||
typedef bool (extent_purge_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
|
||||
unsigned);
|
||||
|
||||
/*
|
||||
* bool
|
||||
* extent_split(extent_hooks_t *extent_hooks, void *addr, size_t size,
|
||||
* size_t size_a, size_t size_b, bool committed, unsigned arena_ind);
|
||||
*/
|
||||
typedef bool (extent_split_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
|
||||
bool, unsigned);
|
||||
|
||||
/*
|
||||
* bool
|
||||
* extent_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
|
||||
* void *addr_b, size_t size_b, bool committed, unsigned arena_ind);
|
||||
*/
|
||||
typedef bool (extent_merge_t)(extent_hooks_t *, void *, size_t, void *, size_t,
|
||||
bool, unsigned);
|
||||
|
||||
struct extent_hooks_s {
|
||||
extent_alloc_t *alloc;
|
||||
extent_dalloc_t *dalloc;
|
||||
extent_destroy_t *destroy;
|
||||
extent_commit_t *commit;
|
||||
extent_decommit_t *decommit;
|
||||
extent_purge_t *purge_lazy;
|
||||
extent_purge_t *purge_forced;
|
||||
extent_split_t *split;
|
||||
extent_merge_t *merge;
|
||||
};
|
||||
|
||||
/*
|
||||
* By default application code must explicitly refer to mangled symbol names,
|
||||
@@ -112,32 +356,28 @@ JEMALLOC_EXPORT int je_nallocm(size_t *rsize, size_t size, int flags);
|
||||
# ifndef JEMALLOC_NO_DEMANGLE
|
||||
# define JEMALLOC_NO_DEMANGLE
|
||||
# endif
|
||||
# define aligned_alloc je_aligned_alloc
|
||||
# define calloc je_calloc
|
||||
# define dallocx je_dallocx
|
||||
# define free je_free
|
||||
# define mallctl je_mallctl
|
||||
# define mallctlbymib je_mallctlbymib
|
||||
# define mallctlnametomib je_mallctlnametomib
|
||||
# define malloc je_malloc
|
||||
# define malloc_conf je_malloc_conf
|
||||
# define malloc_message je_malloc_message
|
||||
# define malloc je_malloc
|
||||
# define calloc je_calloc
|
||||
# define posix_memalign je_posix_memalign
|
||||
# define aligned_alloc je_aligned_alloc
|
||||
# define realloc je_realloc
|
||||
# define free je_free
|
||||
# define mallocx je_mallocx
|
||||
# define rallocx je_rallocx
|
||||
# define xallocx je_xallocx
|
||||
# define sallocx je_sallocx
|
||||
# define dallocx je_dallocx
|
||||
# define nallocx je_nallocx
|
||||
# define mallctl je_mallctl
|
||||
# define mallctlnametomib je_mallctlnametomib
|
||||
# define mallctlbymib je_mallctlbymib
|
||||
# define malloc_stats_print je_malloc_stats_print
|
||||
# define malloc_usable_size je_malloc_usable_size
|
||||
# define mallocx je_mallocx
|
||||
# define nallocx je_nallocx
|
||||
# define posix_memalign je_posix_memalign
|
||||
# define rallocx je_rallocx
|
||||
# define realloc je_realloc
|
||||
# define sallocx je_sallocx
|
||||
# define sdallocx je_sdallocx
|
||||
# define xallocx je_xallocx
|
||||
# define memalign je_memalign
|
||||
# define valloc je_valloc
|
||||
# define allocm je_allocm
|
||||
# define dallocm je_dallocm
|
||||
# define nallocm je_nallocm
|
||||
# define rallocm je_rallocm
|
||||
# define sallocm je_sallocm
|
||||
#endif
|
||||
|
||||
/*
|
||||
@@ -148,35 +388,31 @@ JEMALLOC_EXPORT int je_nallocm(size_t *rsize, size_t size, int flags);
|
||||
* and/or --with-jemalloc-prefix.
|
||||
*/
|
||||
#ifndef JEMALLOC_NO_DEMANGLE
|
||||
# undef je_aligned_alloc
|
||||
# undef je_calloc
|
||||
# undef je_dallocx
|
||||
# undef je_free
|
||||
# undef je_mallctl
|
||||
# undef je_mallctlbymib
|
||||
# undef je_mallctlnametomib
|
||||
# undef je_malloc
|
||||
# undef je_malloc_conf
|
||||
# undef je_malloc_message
|
||||
# undef je_malloc
|
||||
# undef je_calloc
|
||||
# undef je_posix_memalign
|
||||
# undef je_aligned_alloc
|
||||
# undef je_realloc
|
||||
# undef je_free
|
||||
# undef je_mallocx
|
||||
# undef je_rallocx
|
||||
# undef je_xallocx
|
||||
# undef je_sallocx
|
||||
# undef je_dallocx
|
||||
# undef je_nallocx
|
||||
# undef je_mallctl
|
||||
# undef je_mallctlnametomib
|
||||
# undef je_mallctlbymib
|
||||
# undef je_malloc_stats_print
|
||||
# undef je_malloc_usable_size
|
||||
# undef je_mallocx
|
||||
# undef je_nallocx
|
||||
# undef je_posix_memalign
|
||||
# undef je_rallocx
|
||||
# undef je_realloc
|
||||
# undef je_sallocx
|
||||
# undef je_sdallocx
|
||||
# undef je_xallocx
|
||||
# undef je_memalign
|
||||
# undef je_valloc
|
||||
# undef je_allocm
|
||||
# undef je_dallocm
|
||||
# undef je_nallocm
|
||||
# undef je_rallocm
|
||||
# undef je_sallocm
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
};
|
||||
}
|
||||
#endif
|
||||
#endif /* JEMALLOC_H_ */
|
||||
|
||||
57
deps/jemalloc/include/msvc_compat/strings.h
vendored
57
deps/jemalloc/include/msvc_compat/strings.h
vendored
@@ -3,21 +3,56 @@
|
||||
|
||||
/* MSVC doesn't define ffs/ffsl. This dummy strings.h header is provided
|
||||
* for both */
|
||||
#include <intrin.h>
|
||||
#pragma intrinsic(_BitScanForward)
|
||||
static __forceinline int ffsl(long x)
|
||||
{
|
||||
#ifdef _MSC_VER
|
||||
# include <intrin.h>
|
||||
# pragma intrinsic(_BitScanForward)
|
||||
static __forceinline int ffsl(long x) {
|
||||
unsigned long i;
|
||||
|
||||
if (_BitScanForward(&i, x))
|
||||
return (i + 1);
|
||||
return (0);
|
||||
if (_BitScanForward(&i, x)) {
|
||||
return i + 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __forceinline int ffs(int x)
|
||||
{
|
||||
|
||||
return (ffsl(x));
|
||||
static __forceinline int ffs(int x) {
|
||||
return ffsl(x);
|
||||
}
|
||||
|
||||
# ifdef _M_X64
|
||||
# pragma intrinsic(_BitScanForward64)
|
||||
# endif
|
||||
|
||||
static __forceinline int ffsll(unsigned __int64 x) {
|
||||
unsigned long i;
|
||||
#ifdef _M_X64
|
||||
if (_BitScanForward64(&i, x)) {
|
||||
return i + 1;
|
||||
}
|
||||
return 0;
|
||||
#else
|
||||
// Fallback for 32-bit build where 64-bit version not available
|
||||
// assuming little endian
|
||||
union {
|
||||
unsigned __int64 ll;
|
||||
unsigned long l[2];
|
||||
} s;
|
||||
|
||||
s.ll = x;
|
||||
|
||||
if (_BitScanForward(&i, s.l[0])) {
|
||||
return i + 1;
|
||||
} else if(_BitScanForward(&i, s.l[1])) {
|
||||
return i + 33;
|
||||
}
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
#else
|
||||
# define ffsll(x) __builtin_ffsll(x)
|
||||
# define ffsl(x) __builtin_ffsl(x)
|
||||
# define ffs(x) __builtin_ffs(x)
|
||||
#endif
|
||||
|
||||
#endif /* strings_h */
|
||||
|
||||
311
deps/jemalloc/jemalloc_defs.h.in.cmake
vendored
311
deps/jemalloc/jemalloc_defs.h.in.cmake
vendored
@@ -1,4 +1,6 @@
|
||||
/* include/jemalloc/jemalloc_defs.h. Generated from jemalloc_defs.h.in by configure. */
|
||||
/* include/jemalloc/internal/jemalloc_internal_defs.h. Generated from jemalloc_internal_defs.h.in by configure. */
|
||||
#ifndef JEMALLOC_INTERNAL_DEFS_H_
|
||||
#define JEMALLOC_INTERNAL_DEFS_H_
|
||||
/*
|
||||
* If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
|
||||
* public APIs to be prefixed. This makes it possible, with some care, to use
|
||||
@@ -8,30 +10,16 @@
|
||||
/* #undef JEMALLOC_CPREFIX */
|
||||
|
||||
/*
|
||||
* Name mangling for public symbols is controlled by --with-mangling and
|
||||
* --with-jemalloc-prefix. With default settings the je_ prefix is stripped by
|
||||
* these macro definitions.
|
||||
* Define overrides for non-standard allocator-related functions if they are
|
||||
* present on the system.
|
||||
*/
|
||||
#define je_malloc_conf malloc_conf
|
||||
#define je_malloc_message malloc_message
|
||||
#define je_malloc malloc
|
||||
#define je_calloc calloc
|
||||
#define je_posix_memalign posix_memalign
|
||||
#define je_aligned_alloc aligned_alloc
|
||||
#define je_realloc realloc
|
||||
#define je_free free
|
||||
#define je_malloc_usable_size malloc_usable_size
|
||||
#define je_malloc_stats_print malloc_stats_print
|
||||
#define je_mallctl mallctl
|
||||
#define je_mallctlnametomib mallctlnametomib
|
||||
#define je_mallctlbymib mallctlbymib
|
||||
#define je_memalign memalign
|
||||
#define je_valloc valloc
|
||||
#define je_allocm allocm
|
||||
#define je_rallocm rallocm
|
||||
#define je_sallocm sallocm
|
||||
#define je_dallocm dallocm
|
||||
#define je_nallocm nallocm
|
||||
#define JEMALLOC_OVERRIDE___LIBC_CALLOC
|
||||
#define JEMALLOC_OVERRIDE___LIBC_FREE
|
||||
#define JEMALLOC_OVERRIDE___LIBC_MALLOC
|
||||
#define JEMALLOC_OVERRIDE___LIBC_MEMALIGN
|
||||
#define JEMALLOC_OVERRIDE___LIBC_REALLOC
|
||||
#define JEMALLOC_OVERRIDE___LIBC_VALLOC
|
||||
/* #undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN */
|
||||
|
||||
/*
|
||||
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
|
||||
@@ -39,8 +27,7 @@
|
||||
* from being exported, but for static libraries, naming collisions are a real
|
||||
* possibility.
|
||||
*/
|
||||
#define JEMALLOC_PRIVATE_NAMESPACE ""
|
||||
#define JEMALLOC_N(string_that_no_one_should_want_to_use_as_a_jemalloc_private_namespace_prefix) string_that_no_one_should_want_to_use_as_a_jemalloc_private_namespace_prefix
|
||||
#define JEMALLOC_PRIVATE_NAMESPACE je_
|
||||
|
||||
/*
|
||||
* Hyper-threaded CPUs may need a special instruction inside spin loops in
|
||||
@@ -48,20 +35,27 @@
|
||||
*/
|
||||
#define CPU_SPINWAIT __asm__ volatile("pause")
|
||||
|
||||
/* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */
|
||||
/* #undef JEMALLOC_ATOMIC9 */
|
||||
|
||||
/*
|
||||
* Defined if OSAtomic*() functions are available, as provided by Darwin, and
|
||||
* documented in the atomic(3) manual page.
|
||||
* Number of significant bits in virtual addresses. This may be less than the
|
||||
* total number of bits in a pointer, e.g. on x64, for which the uppermost 16
|
||||
* bits are the same as bit 47.
|
||||
*/
|
||||
/* #undef JEMALLOC_OSATOMIC */
|
||||
#define LG_VADDR @JEM_VADDRBITS@
|
||||
|
||||
/* Defined if C11 atomics are available. */
|
||||
#define JEMALLOC_C11_ATOMICS 1
|
||||
|
||||
/* Defined if GCC __atomic atomics are available. */
|
||||
#define JEMALLOC_GCC_ATOMIC_ATOMICS 1
|
||||
|
||||
/* Defined if GCC __sync atomics are available. */
|
||||
#define JEMALLOC_GCC_SYNC_ATOMICS 1
|
||||
|
||||
/*
|
||||
* Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and
|
||||
* __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite
|
||||
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the
|
||||
* functions are defined in libgcc instead of being inlines)
|
||||
* functions are defined in libgcc instead of being inlines).
|
||||
*/
|
||||
/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4 */
|
||||
|
||||
@@ -69,16 +63,60 @@
|
||||
* Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and
|
||||
* __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite
|
||||
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the
|
||||
* functions are defined in libgcc instead of being inlines)
|
||||
* functions are defined in libgcc instead of being inlines).
|
||||
*/
|
||||
/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8 */
|
||||
|
||||
/*
|
||||
* Defined if __builtin_clz() and __builtin_clzl() are available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_BUILTIN_CLZ
|
||||
|
||||
/*
|
||||
* Defined if os_unfair_lock_*() functions are available, as provided by Darwin.
|
||||
*/
|
||||
/* #undef JEMALLOC_OS_UNFAIR_LOCK */
|
||||
|
||||
/*
|
||||
* Defined if OSSpin*() functions are available, as provided by Darwin, and
|
||||
* documented in the spinlock(3) manual page.
|
||||
*/
|
||||
/* #undef JEMALLOC_OSSPIN */
|
||||
|
||||
/* Defined if syscall(2) is usable. */
|
||||
#define JEMALLOC_USE_SYSCALL
|
||||
|
||||
/*
|
||||
* Defined if secure_getenv(3) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_SECURE_GETENV
|
||||
|
||||
/*
|
||||
* Defined if issetugid(2) is available.
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_ISSETUGID */
|
||||
|
||||
/* Defined if pthread_atfork(3) is available. */
|
||||
#define JEMALLOC_HAVE_PTHREAD_ATFORK
|
||||
|
||||
/* Defined if pthread_setname_np(3) is available. */
|
||||
#define JEMALLOC_HAVE_PTHREAD_SETNAME_NP
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE 1
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC 1
|
||||
|
||||
/*
|
||||
* Defined if mach_absolute_time() is available.
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME */
|
||||
|
||||
/*
|
||||
* Defined if _malloc_thread_cleanup() exists. At least in the case of
|
||||
* FreeBSD, pthread_key_create() allocates, which if used during malloc
|
||||
@@ -102,41 +140,9 @@
|
||||
*/
|
||||
/* #undef JEMALLOC_MUTEX_INIT_CB */
|
||||
|
||||
/* Defined if __attribute__((...)) syntax is supported. */
|
||||
#define JEMALLOC_HAVE_ATTR
|
||||
#ifdef JEMALLOC_HAVE_ATTR
|
||||
# define JEMALLOC_ATTR(s) __attribute__((s))
|
||||
# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
|
||||
# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
|
||||
# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
|
||||
# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
|
||||
#elif _MSC_VER
|
||||
# define JEMALLOC_ATTR(s)
|
||||
# ifdef DLLEXPORT
|
||||
# define JEMALLOC_EXPORT __declspec(dllexport)
|
||||
# else
|
||||
# define JEMALLOC_EXPORT __declspec(dllimport)
|
||||
# endif
|
||||
# define JEMALLOC_ALIGNED(s) __declspec(align(s))
|
||||
# define JEMALLOC_SECTION(s) __declspec(allocate(s))
|
||||
# define JEMALLOC_NOINLINE __declspec(noinline)
|
||||
#else
|
||||
# define JEMALLOC_ATTR(s)
|
||||
# define JEMALLOC_EXPORT
|
||||
# define JEMALLOC_ALIGNED(s)
|
||||
# define JEMALLOC_SECTION(s)
|
||||
# define JEMALLOC_NOINLINE
|
||||
#endif
|
||||
|
||||
/* Defined if sbrk() is supported. */
|
||||
#define JEMALLOC_HAVE_SBRK
|
||||
|
||||
/* Non-empty if the tls_model attribute is supported. */
|
||||
#define JEMALLOC_TLS_MODEL @JEM_TLSMODEL@
|
||||
|
||||
/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */
|
||||
/* #undef JEMALLOC_CC_SILENCE */
|
||||
|
||||
/*
|
||||
* JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
|
||||
* inline functions.
|
||||
@@ -144,7 +150,7 @@
|
||||
/* #undef JEMALLOC_DEBUG */
|
||||
|
||||
/* JEMALLOC_STATS enables statistics calculation. */
|
||||
#define JEMALLOC_STATS
|
||||
/* #undef JEMALLOC_STATS */
|
||||
|
||||
/* JEMALLOC_PROF enables allocation profiling. */
|
||||
/* #undef JEMALLOC_PROF */
|
||||
@@ -159,104 +165,127 @@
|
||||
/* #undef JEMALLOC_PROF_GCC */
|
||||
|
||||
/*
|
||||
* JEMALLOC_TCACHE enables a thread-specific caching layer for small objects.
|
||||
* This makes it possible to allocate/deallocate objects without any locking
|
||||
* when the cache is in the steady state.
|
||||
*/
|
||||
#define JEMALLOC_TCACHE
|
||||
|
||||
/*
|
||||
* JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage
|
||||
* JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage
|
||||
* segment (DSS).
|
||||
*/
|
||||
/* #undef JEMALLOC_DSS */
|
||||
#define JEMALLOC_DSS
|
||||
|
||||
/* Support memory filling (junk/zero/quarantine/redzone). */
|
||||
/* Support memory filling (junk/zero). */
|
||||
#define JEMALLOC_FILL
|
||||
|
||||
/* Support the experimental API. */
|
||||
#define JEMALLOC_EXPERIMENTAL
|
||||
|
||||
/* Support utrace(2)-based tracing. */
|
||||
/* #undef JEMALLOC_UTRACE */
|
||||
|
||||
/* Support Valgrind. */
|
||||
/* #undef JEMALLOC_VALGRIND */
|
||||
|
||||
/* Support optional abort() on OOM. */
|
||||
/* #undef JEMALLOC_XMALLOC */
|
||||
|
||||
/* Support lazy locking (avoid locking unless a second thread is launched). */
|
||||
/* #undef JEMALLOC_LAZY_LOCK */
|
||||
|
||||
/* One page is 2^STATIC_PAGE_SHIFT bytes. */
|
||||
#define STATIC_PAGE_SHIFT 12
|
||||
/*
|
||||
* Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
|
||||
* classes).
|
||||
*/
|
||||
/* #undef LG_QUANTUM */
|
||||
|
||||
/* One page is 2^LG_PAGE bytes. */
|
||||
#define LG_PAGE 12
|
||||
|
||||
/*
|
||||
* If defined, use munmap() to unmap freed chunks, rather than storing them for
|
||||
* later reuse. This is disabled by default on Linux because common sequences
|
||||
* of mmap()/munmap() calls will cause virtual memory map holes.
|
||||
* One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the
|
||||
* system does not explicitly support huge pages; system calls that require
|
||||
* explicit huge page support are separately configured.
|
||||
*/
|
||||
/* #undef JEMALLOC_MUNMAP */
|
||||
#define LG_HUGEPAGE 21
|
||||
|
||||
/*
|
||||
* If defined, use mremap(...MREMAP_FIXED...) for huge realloc(). This is
|
||||
* disabled by default because it is Linux-specific and it will cause virtual
|
||||
* memory map holes, much like munmap(2) does.
|
||||
* If defined, adjacent virtual memory mappings with identical attributes
|
||||
* automatically coalesce, and they fragment when changes are made to subranges.
|
||||
* This is the normal order of things for mmap()/munmap(), but on Windows
|
||||
* VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
|
||||
* mappings do *not* coalesce/fragment.
|
||||
*/
|
||||
/* #undef JEMALLOC_MREMAP */
|
||||
#define JEMALLOC_MAPS_COALESCE
|
||||
|
||||
/*
|
||||
* If defined, retain memory for later reuse by default rather than using e.g.
|
||||
* munmap() to unmap freed extents. This is enabled on 64-bit Linux because
|
||||
* common sequences of mmap()/munmap() calls will cause virtual memory map
|
||||
* holes.
|
||||
*/
|
||||
#define JEMALLOC_RETAIN
|
||||
|
||||
/* TLS is used to map arenas and magazine caches to threads. */
|
||||
#define JEMALLOC_TLS
|
||||
|
||||
/*
|
||||
* JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside
|
||||
* within jemalloc-owned chunks before dereferencing them.
|
||||
* Used to mark unreachable code to quiet "end of non-void" compiler warnings.
|
||||
* Don't use this directly; instead use unreachable() from util.h
|
||||
*/
|
||||
/* #undef JEMALLOC_IVSALLOC */
|
||||
#define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable
|
||||
|
||||
/*
|
||||
* Define overrides for non-standard allocator-related functions if they
|
||||
* are present on the system.
|
||||
* ffs*() functions to use for bitmapping. Don't use these directly; instead,
|
||||
* use ffs_*() from util.h.
|
||||
*/
|
||||
#define JEMALLOC_OVERRIDE_MEMALIGN
|
||||
#define JEMALLOC_OVERRIDE_VALLOC
|
||||
#define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll
|
||||
#define JEMALLOC_INTERNAL_FFSL __builtin_ffsl
|
||||
#define JEMALLOC_INTERNAL_FFS __builtin_ffs
|
||||
|
||||
/*
|
||||
* At least Linux omits the "const" in:
|
||||
*
|
||||
* size_t malloc_usable_size(const void *ptr);
|
||||
*
|
||||
* Match the operating system's prototype.
|
||||
* If defined, explicitly attempt to more uniformly distribute large allocation
|
||||
* pointer alignments across all cache indices.
|
||||
*/
|
||||
#define JEMALLOC_USABLE_SIZE_CONST
|
||||
#define JEMALLOC_CACHE_OBLIVIOUS
|
||||
|
||||
/*
|
||||
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
|
||||
*/
|
||||
/* #undef JEMALLOC_ZONE */
|
||||
/* #undef JEMALLOC_ZONE_VERSION */
|
||||
|
||||
/*
|
||||
* Methods for determining whether the OS overcommits.
|
||||
* JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
|
||||
* /proc/sys/vm.overcommit_memory file.
|
||||
* JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
|
||||
*/
|
||||
/* #undef JEMALLOC_SYSCTL_VM_OVERCOMMIT */
|
||||
#define JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
|
||||
|
||||
/* Defined if madvise(2) is available. */
|
||||
#define JEMALLOC_HAVE_MADVISE
|
||||
|
||||
/*
|
||||
* Methods for purging unused pages differ between operating systems.
|
||||
*
|
||||
* madvise(..., MADV_DONTNEED) : On Linux, this immediately discards pages,
|
||||
* madvise(..., MADV_FREE) : This marks pages as being unused, such that they
|
||||
* will be discarded rather than swapped out.
|
||||
* madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is
|
||||
* defined, this immediately discards pages,
|
||||
* such that new pages will be demand-zeroed if
|
||||
* the address region is later touched.
|
||||
* madvise(..., MADV_FREE) : On FreeBSD and Darwin, this marks pages as being
|
||||
* unused, such that they will be discarded rather
|
||||
* than swapped out.
|
||||
* the address region is later touched;
|
||||
* otherwise this behaves similarly to
|
||||
* MADV_FREE, though typically with higher
|
||||
* system overhead.
|
||||
*/
|
||||
@JEM_MADFREE_DEF@ JEMALLOC_PURGE_MADVISE_FREE
|
||||
#define JEMALLOC_PURGE_MADVISE_DONTNEED
|
||||
/* #undef JEMALLOC_PURGE_MADVISE_FREE */
|
||||
#define JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS 1
|
||||
|
||||
/*
|
||||
* Define if operating system has alloca.h header.
|
||||
* Defined if transparent huge pages (THPs) are supported via the
|
||||
* MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled.
|
||||
*/
|
||||
#define JEMALLOC_HAS_ALLOCA_H
|
||||
#define JEMALLOC_THP
|
||||
|
||||
/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
|
||||
#define LG_SIZEOF_PTR @JEM_SIZEDEF@
|
||||
/* Define if operating system has alloca.h header. */
|
||||
#define JEMALLOC_HAS_ALLOCA_H 1
|
||||
|
||||
/* C99 restrict keyword supported. */
|
||||
#define JEMALLOC_HAS_RESTRICT 1
|
||||
|
||||
/* For use by hash code. */
|
||||
/* #undef JEMALLOC_BIG_ENDIAN */
|
||||
|
||||
/* sizeof(int) == 2^LG_SIZEOF_INT. */
|
||||
#define LG_SIZEOF_INT 2
|
||||
@@ -264,11 +293,51 @@
|
||||
/* sizeof(long) == 2^LG_SIZEOF_LONG. */
|
||||
#define LG_SIZEOF_LONG @JEM_SIZEDEF@
|
||||
|
||||
/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */
|
||||
#define LG_SIZEOF_LONG_LONG 3
|
||||
|
||||
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
|
||||
#define LG_SIZEOF_INTMAX_T 3
|
||||
|
||||
/* C99 restrict keyword supported. */
|
||||
/*#define JEMALLOC_HAS_RESTRICT*/
|
||||
/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
|
||||
#define JEMALLOC_GLIBC_MALLOC_HOOK
|
||||
|
||||
/* JEMALLOC_CODE_COVERAGE enables test code coverage analysis. */
|
||||
#undef JEMALLOC_CODE_COVERAGE
|
||||
/* glibc memalign hook. */
|
||||
#define JEMALLOC_GLIBC_MEMALIGN_HOOK
|
||||
|
||||
/* pthread support */
|
||||
#define JEMALLOC_HAVE_PTHREAD
|
||||
|
||||
/* dlsym() support */
|
||||
#define JEMALLOC_HAVE_DLSYM
|
||||
|
||||
/* Adaptive mutex support in pthreads. */
|
||||
#define JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP
|
||||
|
||||
/* GNU specific sched_getcpu support */
|
||||
#define JEMALLOC_HAVE_SCHED_GETCPU
|
||||
|
||||
/* GNU specific sched_setaffinity support */
|
||||
#define JEMALLOC_HAVE_SCHED_SETAFFINITY
|
||||
|
||||
/*
|
||||
* If defined, all the features necessary for background threads are present.
|
||||
*/
|
||||
#define JEMALLOC_BACKGROUND_THREAD 1
|
||||
|
||||
/*
|
||||
* If defined, jemalloc symbols are not exported (doesn't work when
|
||||
* JEMALLOC_PREFIX is not defined).
|
||||
*/
|
||||
/* #undef JEMALLOC_EXPORT */
|
||||
|
||||
/* config.malloc_conf options string. */
|
||||
#define JEMALLOC_CONFIG_MALLOC_CONF ""
|
||||
|
||||
/* If defined, jemalloc takes the malloc/free/etc. symbol names. */
|
||||
#define JEMALLOC_IS_MALLOC 1
|
||||
|
||||
/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
|
||||
#define LG_SIZEOF_PTR @JEM_SIZEDEF@
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
|
||||
4280
deps/jemalloc/src/arena.c
vendored
4280
deps/jemalloc/src/arena.c
vendored
File diff suppressed because it is too large
Load Diff
496
deps/jemalloc/src/base.c
vendored
496
deps/jemalloc/src/base.c
vendored
@@ -1,142 +1,402 @@
|
||||
#define JEMALLOC_BASE_C_
|
||||
#include "jemalloc/internal/jemalloc_internal.h"
|
||||
#define JEMALLOC_BASE_C_
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
#include "jemalloc/internal/extent_mmap.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/sz.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
static malloc_mutex_t base_mtx;
|
||||
static base_t *b0;
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
static void *
|
||||
base_map(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, size_t size) {
|
||||
void *addr;
|
||||
bool zero = true;
|
||||
bool commit = true;
|
||||
|
||||
assert(size == HUGEPAGE_CEILING(size));
|
||||
|
||||
if (extent_hooks == &extent_hooks_default) {
|
||||
addr = extent_alloc_mmap(NULL, size, PAGE, &zero, &commit);
|
||||
} else {
|
||||
/* No arena context as we are creating new arenas. */
|
||||
tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
|
||||
pre_reentrancy(tsd, NULL);
|
||||
addr = extent_hooks->alloc(extent_hooks, NULL, size, PAGE,
|
||||
&zero, &commit, ind);
|
||||
post_reentrancy(tsd);
|
||||
}
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
static void
|
||||
base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr,
|
||||
size_t size) {
|
||||
/*
|
||||
* Cascade through dalloc, decommit, purge_forced, and purge_lazy,
|
||||
* stopping at first success. This cascade is performed for consistency
|
||||
* with the cascade in extent_dalloc_wrapper() because an application's
|
||||
* custom hooks may not support e.g. dalloc. This function is only ever
|
||||
* called as a side effect of arena destruction, so although it might
|
||||
* seem pointless to do anything besides dalloc here, the application
|
||||
* may in fact want the end state of all associated virtual memory to be
|
||||
* in some consistent-but-allocated state.
|
||||
*/
|
||||
if (extent_hooks == &extent_hooks_default) {
|
||||
if (!extent_dalloc_mmap(addr, size)) {
|
||||
return;
|
||||
}
|
||||
if (!pages_decommit(addr, size)) {
|
||||
return;
|
||||
}
|
||||
if (!pages_purge_forced(addr, size)) {
|
||||
return;
|
||||
}
|
||||
if (!pages_purge_lazy(addr, size)) {
|
||||
return;
|
||||
}
|
||||
/* Nothing worked. This should never happen. */
|
||||
not_reached();
|
||||
} else {
|
||||
tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
|
||||
pre_reentrancy(tsd, NULL);
|
||||
if (extent_hooks->dalloc != NULL &&
|
||||
!extent_hooks->dalloc(extent_hooks, addr, size, true,
|
||||
ind)) {
|
||||
goto label_done;
|
||||
}
|
||||
if (extent_hooks->decommit != NULL &&
|
||||
!extent_hooks->decommit(extent_hooks, addr, size, 0, size,
|
||||
ind)) {
|
||||
goto label_done;
|
||||
}
|
||||
if (extent_hooks->purge_forced != NULL &&
|
||||
!extent_hooks->purge_forced(extent_hooks, addr, size, 0,
|
||||
size, ind)) {
|
||||
goto label_done;
|
||||
}
|
||||
if (extent_hooks->purge_lazy != NULL &&
|
||||
!extent_hooks->purge_lazy(extent_hooks, addr, size, 0, size,
|
||||
ind)) {
|
||||
goto label_done;
|
||||
}
|
||||
/* Nothing worked. That's the application's problem. */
|
||||
label_done:
|
||||
post_reentrancy(tsd);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr,
|
||||
size_t size) {
|
||||
size_t sn;
|
||||
|
||||
sn = *extent_sn_next;
|
||||
(*extent_sn_next)++;
|
||||
|
||||
extent_binit(extent, addr, size, sn);
|
||||
}
|
||||
|
||||
static void *
|
||||
base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size,
|
||||
size_t alignment) {
|
||||
void *ret;
|
||||
|
||||
assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM));
|
||||
assert(size == ALIGNMENT_CEILING(size, alignment));
|
||||
|
||||
*gap_size = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent),
|
||||
alignment) - (uintptr_t)extent_addr_get(extent);
|
||||
ret = (void *)((uintptr_t)extent_addr_get(extent) + *gap_size);
|
||||
assert(extent_bsize_get(extent) >= *gap_size + size);
|
||||
extent_binit(extent, (void *)((uintptr_t)extent_addr_get(extent) +
|
||||
*gap_size + size), extent_bsize_get(extent) - *gap_size - size,
|
||||
extent_sn_get(extent));
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
base_extent_bump_alloc_post(tsdn_t *tsdn, base_t *base, extent_t *extent,
|
||||
size_t gap_size, void *addr, size_t size) {
|
||||
if (extent_bsize_get(extent) > 0) {
|
||||
/*
|
||||
* Compute the index for the largest size class that does not
|
||||
* exceed extent's size.
|
||||
*/
|
||||
szind_t index_floor =
|
||||
sz_size2index(extent_bsize_get(extent) + 1) - 1;
|
||||
extent_heap_insert(&base->avail[index_floor], extent);
|
||||
}
|
||||
|
||||
if (config_stats) {
|
||||
base->allocated += size;
|
||||
/*
|
||||
* Add one PAGE to base_resident for every page boundary that is
|
||||
* crossed by the new allocation.
|
||||
*/
|
||||
base->resident += PAGE_CEILING((uintptr_t)addr + size) -
|
||||
PAGE_CEILING((uintptr_t)addr - gap_size);
|
||||
assert(base->allocated <= base->resident);
|
||||
assert(base->resident <= base->mapped);
|
||||
}
|
||||
}
|
||||
|
||||
static void *
|
||||
base_extent_bump_alloc(tsdn_t *tsdn, base_t *base, extent_t *extent,
|
||||
size_t size, size_t alignment) {
|
||||
void *ret;
|
||||
size_t gap_size;
|
||||
|
||||
ret = base_extent_bump_alloc_helper(extent, &gap_size, size, alignment);
|
||||
base_extent_bump_alloc_post(tsdn, base, extent, gap_size, ret, size);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Current pages that are being used for internal memory allocations. These
|
||||
* pages are carved up in cacheline-size quanta, so that there is no chance of
|
||||
* false cache line sharing.
|
||||
* Allocate a block of virtual memory that is large enough to start with a
|
||||
* base_block_t header, followed by an object of specified size and alignment.
|
||||
* On success a pointer to the initialized base_block_t header is returned.
|
||||
*/
|
||||
static void *base_pages;
|
||||
static void *base_next_addr;
|
||||
static void *base_past_addr; /* Addr immediately past base_pages. */
|
||||
static extent_node_t *base_nodes;
|
||||
|
||||
/******************************************************************************/
|
||||
/* Function prototypes for non-inline static functions. */
|
||||
|
||||
static bool base_pages_alloc(size_t minsize);
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
static bool
|
||||
base_pages_alloc(size_t minsize)
|
||||
{
|
||||
size_t csize;
|
||||
bool zero;
|
||||
|
||||
assert(minsize != 0);
|
||||
csize = CHUNK_CEILING(minsize);
|
||||
zero = false;
|
||||
base_pages = chunk_alloc(csize, chunksize, true, &zero,
|
||||
chunk_dss_prec_get());
|
||||
if (base_pages == NULL)
|
||||
return (true);
|
||||
base_next_addr = base_pages;
|
||||
base_past_addr = (void *)((uintptr_t)base_pages + csize);
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
||||
void *
|
||||
base_alloc(size_t size)
|
||||
{
|
||||
void *ret;
|
||||
size_t csize;
|
||||
|
||||
/* Round size up to nearest multiple of the cacheline size. */
|
||||
csize = CACHELINE_CEILING(size);
|
||||
|
||||
malloc_mutex_lock(&base_mtx);
|
||||
/* Make sure there's enough space for the allocation. */
|
||||
if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) {
|
||||
if (base_pages_alloc(csize)) {
|
||||
malloc_mutex_unlock(&base_mtx);
|
||||
return (NULL);
|
||||
}
|
||||
static base_block_t *
|
||||
base_block_alloc(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind,
|
||||
pszind_t *pind_last, size_t *extent_sn_next, size_t size,
|
||||
size_t alignment) {
|
||||
alignment = ALIGNMENT_CEILING(alignment, QUANTUM);
|
||||
size_t usize = ALIGNMENT_CEILING(size, alignment);
|
||||
size_t header_size = sizeof(base_block_t);
|
||||
size_t gap_size = ALIGNMENT_CEILING(header_size, alignment) -
|
||||
header_size;
|
||||
/*
|
||||
* Create increasingly larger blocks in order to limit the total number
|
||||
* of disjoint virtual memory ranges. Choose the next size in the page
|
||||
* size class series (skipping size classes that are not a multiple of
|
||||
* HUGEPAGE), or a size large enough to satisfy the requested size and
|
||||
* alignment, whichever is larger.
|
||||
*/
|
||||
size_t min_block_size = HUGEPAGE_CEILING(sz_psz2u(header_size + gap_size
|
||||
+ usize));
|
||||
pszind_t pind_next = (*pind_last + 1 < NPSIZES) ? *pind_last + 1 :
|
||||
*pind_last;
|
||||
size_t next_block_size = HUGEPAGE_CEILING(sz_pind2sz(pind_next));
|
||||
size_t block_size = (min_block_size > next_block_size) ? min_block_size
|
||||
: next_block_size;
|
||||
base_block_t *block = (base_block_t *)base_map(tsdn, extent_hooks, ind,
|
||||
block_size);
|
||||
if (block == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
/* Allocate. */
|
||||
ret = base_next_addr;
|
||||
base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
|
||||
malloc_mutex_unlock(&base_mtx);
|
||||
VALGRIND_MAKE_MEM_UNDEFINED(ret, csize);
|
||||
|
||||
return (ret);
|
||||
*pind_last = sz_psz2ind(block_size);
|
||||
block->size = block_size;
|
||||
block->next = NULL;
|
||||
assert(block_size >= header_size);
|
||||
base_extent_init(extent_sn_next, &block->extent,
|
||||
(void *)((uintptr_t)block + header_size), block_size - header_size);
|
||||
return block;
|
||||
}
|
||||
|
||||
void *
|
||||
base_calloc(size_t number, size_t size)
|
||||
{
|
||||
void *ret = base_alloc(number * size);
|
||||
/*
|
||||
* Allocate an extent that is at least as large as specified size, with
|
||||
* specified alignment.
|
||||
*/
|
||||
static extent_t *
|
||||
base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
|
||||
malloc_mutex_assert_owner(tsdn, &base->mtx);
|
||||
|
||||
if (ret != NULL)
|
||||
memset(ret, 0, number * size);
|
||||
|
||||
return (ret);
|
||||
extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
|
||||
/*
|
||||
* Drop mutex during base_block_alloc(), because an extent hook will be
|
||||
* called.
|
||||
*/
|
||||
malloc_mutex_unlock(tsdn, &base->mtx);
|
||||
base_block_t *block = base_block_alloc(tsdn, extent_hooks,
|
||||
base_ind_get(base), &base->pind_last, &base->extent_sn_next, size,
|
||||
alignment);
|
||||
malloc_mutex_lock(tsdn, &base->mtx);
|
||||
if (block == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
block->next = base->blocks;
|
||||
base->blocks = block;
|
||||
if (config_stats) {
|
||||
base->allocated += sizeof(base_block_t);
|
||||
base->resident += PAGE_CEILING(sizeof(base_block_t));
|
||||
base->mapped += block->size;
|
||||
assert(base->allocated <= base->resident);
|
||||
assert(base->resident <= base->mapped);
|
||||
}
|
||||
return &block->extent;
|
||||
}
|
||||
|
||||
extent_node_t *
|
||||
base_node_alloc(void)
|
||||
{
|
||||
extent_node_t *ret;
|
||||
base_t *
|
||||
b0get(void) {
|
||||
return b0;
|
||||
}
|
||||
|
||||
malloc_mutex_lock(&base_mtx);
|
||||
if (base_nodes != NULL) {
|
||||
ret = base_nodes;
|
||||
base_nodes = *(extent_node_t **)ret;
|
||||
malloc_mutex_unlock(&base_mtx);
|
||||
VALGRIND_MAKE_MEM_UNDEFINED(ret, sizeof(extent_node_t));
|
||||
} else {
|
||||
malloc_mutex_unlock(&base_mtx);
|
||||
ret = (extent_node_t *)base_alloc(sizeof(extent_node_t));
|
||||
base_t *
|
||||
base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
||||
pszind_t pind_last = 0;
|
||||
size_t extent_sn_next = 0;
|
||||
base_block_t *block = base_block_alloc(tsdn, extent_hooks, ind,
|
||||
&pind_last, &extent_sn_next, sizeof(base_t), QUANTUM);
|
||||
if (block == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return (ret);
|
||||
size_t gap_size;
|
||||
size_t base_alignment = CACHELINE;
|
||||
size_t base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment);
|
||||
base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->extent,
|
||||
&gap_size, base_size, base_alignment);
|
||||
base->ind = ind;
|
||||
atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELAXED);
|
||||
if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE,
|
||||
malloc_mutex_rank_exclusive)) {
|
||||
base_unmap(tsdn, extent_hooks, ind, block, block->size);
|
||||
return NULL;
|
||||
}
|
||||
base->pind_last = pind_last;
|
||||
base->extent_sn_next = extent_sn_next;
|
||||
base->blocks = block;
|
||||
for (szind_t i = 0; i < NSIZES; i++) {
|
||||
extent_heap_new(&base->avail[i]);
|
||||
}
|
||||
if (config_stats) {
|
||||
base->allocated = sizeof(base_block_t);
|
||||
base->resident = PAGE_CEILING(sizeof(base_block_t));
|
||||
base->mapped = block->size;
|
||||
assert(base->allocated <= base->resident);
|
||||
assert(base->resident <= base->mapped);
|
||||
}
|
||||
base_extent_bump_alloc_post(tsdn, base, &block->extent, gap_size, base,
|
||||
base_size);
|
||||
|
||||
return base;
|
||||
}
|
||||
|
||||
void
|
||||
base_node_dealloc(extent_node_t *node)
|
||||
{
|
||||
base_delete(tsdn_t *tsdn, base_t *base) {
|
||||
extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
|
||||
base_block_t *next = base->blocks;
|
||||
do {
|
||||
base_block_t *block = next;
|
||||
next = block->next;
|
||||
base_unmap(tsdn, extent_hooks, base_ind_get(base), block,
|
||||
block->size);
|
||||
} while (next != NULL);
|
||||
}
|
||||
|
||||
VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
|
||||
malloc_mutex_lock(&base_mtx);
|
||||
*(extent_node_t **)node = base_nodes;
|
||||
base_nodes = node;
|
||||
malloc_mutex_unlock(&base_mtx);
|
||||
extent_hooks_t *
|
||||
base_extent_hooks_get(base_t *base) {
|
||||
return (extent_hooks_t *)atomic_load_p(&base->extent_hooks,
|
||||
ATOMIC_ACQUIRE);
|
||||
}
|
||||
|
||||
extent_hooks_t *
|
||||
base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) {
|
||||
extent_hooks_t *old_extent_hooks = base_extent_hooks_get(base);
|
||||
atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELEASE);
|
||||
return old_extent_hooks;
|
||||
}
|
||||
|
||||
static void *
|
||||
base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment,
|
||||
size_t *esn) {
|
||||
alignment = QUANTUM_CEILING(alignment);
|
||||
size_t usize = ALIGNMENT_CEILING(size, alignment);
|
||||
size_t asize = usize + alignment - QUANTUM;
|
||||
|
||||
extent_t *extent = NULL;
|
||||
malloc_mutex_lock(tsdn, &base->mtx);
|
||||
for (szind_t i = sz_size2index(asize); i < NSIZES; i++) {
|
||||
extent = extent_heap_remove_first(&base->avail[i]);
|
||||
if (extent != NULL) {
|
||||
/* Use existing space. */
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (extent == NULL) {
|
||||
/* Try to allocate more space. */
|
||||
extent = base_extent_alloc(tsdn, base, usize, alignment);
|
||||
}
|
||||
void *ret;
|
||||
if (extent == NULL) {
|
||||
ret = NULL;
|
||||
goto label_return;
|
||||
}
|
||||
|
||||
ret = base_extent_bump_alloc(tsdn, base, extent, usize, alignment);
|
||||
if (esn != NULL) {
|
||||
*esn = extent_sn_get(extent);
|
||||
}
|
||||
label_return:
|
||||
malloc_mutex_unlock(tsdn, &base->mtx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* base_alloc() returns zeroed memory, which is always demand-zeroed for the
|
||||
* auto arenas, in order to make multi-page sparse data structures such as radix
|
||||
* tree nodes efficient with respect to physical memory usage. Upon success a
|
||||
* pointer to at least size bytes with specified alignment is returned. Note
|
||||
* that size is rounded up to the nearest multiple of alignment to avoid false
|
||||
* sharing.
|
||||
*/
|
||||
void *
|
||||
base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
|
||||
return base_alloc_impl(tsdn, base, size, alignment, NULL);
|
||||
}
|
||||
|
||||
extent_t *
|
||||
base_alloc_extent(tsdn_t *tsdn, base_t *base) {
|
||||
size_t esn;
|
||||
extent_t *extent = base_alloc_impl(tsdn, base, sizeof(extent_t),
|
||||
CACHELINE, &esn);
|
||||
if (extent == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
extent_esn_set(extent, esn);
|
||||
return extent;
|
||||
}
|
||||
|
||||
void
|
||||
base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident,
|
||||
size_t *mapped) {
|
||||
cassert(config_stats);
|
||||
|
||||
malloc_mutex_lock(tsdn, &base->mtx);
|
||||
assert(base->allocated <= base->resident);
|
||||
assert(base->resident <= base->mapped);
|
||||
*allocated = base->allocated;
|
||||
*resident = base->resident;
|
||||
*mapped = base->mapped;
|
||||
malloc_mutex_unlock(tsdn, &base->mtx);
|
||||
}
|
||||
|
||||
void
|
||||
base_prefork(tsdn_t *tsdn, base_t *base) {
|
||||
malloc_mutex_prefork(tsdn, &base->mtx);
|
||||
}
|
||||
|
||||
void
|
||||
base_postfork_parent(tsdn_t *tsdn, base_t *base) {
|
||||
malloc_mutex_postfork_parent(tsdn, &base->mtx);
|
||||
}
|
||||
|
||||
void
|
||||
base_postfork_child(tsdn_t *tsdn, base_t *base) {
|
||||
malloc_mutex_postfork_child(tsdn, &base->mtx);
|
||||
}
|
||||
|
||||
bool
|
||||
base_boot(void)
|
||||
{
|
||||
|
||||
base_nodes = NULL;
|
||||
if (malloc_mutex_init(&base_mtx))
|
||||
return (true);
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
||||
void
|
||||
base_prefork(void)
|
||||
{
|
||||
|
||||
malloc_mutex_prefork(&base_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
base_postfork_parent(void)
|
||||
{
|
||||
|
||||
malloc_mutex_postfork_parent(&base_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
base_postfork_child(void)
|
||||
{
|
||||
|
||||
malloc_mutex_postfork_child(&base_mtx);
|
||||
base_boot(tsdn_t *tsdn) {
|
||||
b0 = base_new(tsdn, 0, (extent_hooks_t *)&extent_hooks_default);
|
||||
return (b0 == NULL);
|
||||
}
|
||||
|
||||
113
deps/jemalloc/src/bitmap.c
vendored
113
deps/jemalloc/src/bitmap.c
vendored
@@ -1,24 +1,15 @@
|
||||
#define JEMALLOC_BITMAP_C_
|
||||
#include "jemalloc/internal/jemalloc_internal.h"
|
||||
#define JEMALLOC_BITMAP_C_
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* Function prototypes for non-inline static functions. */
|
||||
|
||||
static size_t bits2groups(size_t nbits);
|
||||
#include "jemalloc/internal/assert.h"
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
static size_t
|
||||
bits2groups(size_t nbits)
|
||||
{
|
||||
|
||||
return ((nbits >> LG_BITMAP_GROUP_NBITS) +
|
||||
!!(nbits & BITMAP_GROUP_NBITS_MASK));
|
||||
}
|
||||
#ifdef BITMAP_USE_TREE
|
||||
|
||||
void
|
||||
bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
|
||||
{
|
||||
bitmap_info_init(bitmap_info_t *binfo, size_t nbits) {
|
||||
unsigned i;
|
||||
size_t group_count;
|
||||
|
||||
@@ -31,60 +22,100 @@ bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
|
||||
* that requires only one group.
|
||||
*/
|
||||
binfo->levels[0].group_offset = 0;
|
||||
group_count = bits2groups(nbits);
|
||||
group_count = BITMAP_BITS2GROUPS(nbits);
|
||||
for (i = 1; group_count > 1; i++) {
|
||||
assert(i < BITMAP_MAX_LEVELS);
|
||||
binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
|
||||
+ group_count;
|
||||
group_count = bits2groups(group_count);
|
||||
group_count = BITMAP_BITS2GROUPS(group_count);
|
||||
}
|
||||
binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
|
||||
+ group_count;
|
||||
assert(binfo->levels[i].group_offset <= BITMAP_GROUPS_MAX);
|
||||
binfo->nlevels = i;
|
||||
binfo->nbits = nbits;
|
||||
}
|
||||
|
||||
size_t
|
||||
bitmap_info_ngroups(const bitmap_info_t *binfo)
|
||||
{
|
||||
|
||||
return (binfo->levels[binfo->nlevels].group_offset << LG_SIZEOF_BITMAP);
|
||||
}
|
||||
|
||||
size_t
|
||||
bitmap_size(size_t nbits)
|
||||
{
|
||||
bitmap_info_t binfo;
|
||||
|
||||
bitmap_info_init(&binfo, nbits);
|
||||
return (bitmap_info_ngroups(&binfo));
|
||||
static size_t
|
||||
bitmap_info_ngroups(const bitmap_info_t *binfo) {
|
||||
return binfo->levels[binfo->nlevels].group_offset;
|
||||
}
|
||||
|
||||
void
|
||||
bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
|
||||
{
|
||||
bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) {
|
||||
size_t extra;
|
||||
unsigned i;
|
||||
|
||||
/*
|
||||
* Bits are actually inverted with regard to the external bitmap
|
||||
* interface, so the bitmap starts out with all 1 bits, except for
|
||||
* trailing unused bits (if any). Note that each group uses bit 0 to
|
||||
* correspond to the first logical bit in the group, so extra bits
|
||||
* are the most significant bits of the last group.
|
||||
* interface.
|
||||
*/
|
||||
memset(bitmap, 0xffU, binfo->levels[binfo->nlevels].group_offset <<
|
||||
LG_SIZEOF_BITMAP);
|
||||
|
||||
if (fill) {
|
||||
/* The "filled" bitmap starts out with all 0 bits. */
|
||||
memset(bitmap, 0, bitmap_size(binfo));
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* The "empty" bitmap starts out with all 1 bits, except for trailing
|
||||
* unused bits (if any). Note that each group uses bit 0 to correspond
|
||||
* to the first logical bit in the group, so extra bits are the most
|
||||
* significant bits of the last group.
|
||||
*/
|
||||
memset(bitmap, 0xffU, bitmap_size(binfo));
|
||||
extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
|
||||
& BITMAP_GROUP_NBITS_MASK;
|
||||
if (extra != 0)
|
||||
if (extra != 0) {
|
||||
bitmap[binfo->levels[1].group_offset - 1] >>= extra;
|
||||
}
|
||||
for (i = 1; i < binfo->nlevels; i++) {
|
||||
size_t group_count = binfo->levels[i].group_offset -
|
||||
binfo->levels[i-1].group_offset;
|
||||
extra = (BITMAP_GROUP_NBITS - (group_count &
|
||||
BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK;
|
||||
if (extra != 0)
|
||||
if (extra != 0) {
|
||||
bitmap[binfo->levels[i+1].group_offset - 1] >>= extra;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#else /* BITMAP_USE_TREE */
|
||||
|
||||
void
|
||||
bitmap_info_init(bitmap_info_t *binfo, size_t nbits) {
|
||||
assert(nbits > 0);
|
||||
assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS));
|
||||
|
||||
binfo->ngroups = BITMAP_BITS2GROUPS(nbits);
|
||||
binfo->nbits = nbits;
|
||||
}
|
||||
|
||||
static size_t
|
||||
bitmap_info_ngroups(const bitmap_info_t *binfo) {
|
||||
return binfo->ngroups;
|
||||
}
|
||||
|
||||
void
|
||||
bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) {
|
||||
size_t extra;
|
||||
|
||||
if (fill) {
|
||||
memset(bitmap, 0, bitmap_size(binfo));
|
||||
return;
|
||||
}
|
||||
|
||||
memset(bitmap, 0xffU, bitmap_size(binfo));
|
||||
extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
|
||||
& BITMAP_GROUP_NBITS_MASK;
|
||||
if (extra != 0) {
|
||||
bitmap[binfo->ngroups - 1] >>= extra;
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* BITMAP_USE_TREE */
|
||||
|
||||
size_t
|
||||
bitmap_size(const bitmap_info_t *binfo) {
|
||||
return (bitmap_info_ngroups(binfo) << LG_SIZEOF_BITMAP);
|
||||
}
|
||||
|
||||
254
deps/jemalloc/src/ckh.c
vendored
254
deps/jemalloc/src/ckh.c
vendored
@@ -34,14 +34,24 @@
|
||||
* respectively.
|
||||
*
|
||||
******************************************************************************/
|
||||
#define JEMALLOC_CKH_C_
|
||||
#include "jemalloc/internal/jemalloc_internal.h"
|
||||
#define JEMALLOC_CKH_C_
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
|
||||
#include "jemalloc/internal/ckh.h"
|
||||
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
#include "jemalloc/internal/hash.h"
|
||||
#include "jemalloc/internal/malloc_io.h"
|
||||
#include "jemalloc/internal/prng.h"
|
||||
#include "jemalloc/internal/util.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* Function prototypes for non-inline static functions. */
|
||||
|
||||
static bool ckh_grow(ckh_t *ckh);
|
||||
static void ckh_shrink(ckh_t *ckh);
|
||||
static bool ckh_grow(tsd_t *tsd, ckh_t *ckh);
|
||||
static void ckh_shrink(tsd_t *tsd, ckh_t *ckh);
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
@@ -49,27 +59,26 @@ static void ckh_shrink(ckh_t *ckh);
|
||||
* Search bucket for key and return the cell number if found; SIZE_T_MAX
|
||||
* otherwise.
|
||||
*/
|
||||
JEMALLOC_INLINE_C size_t
|
||||
ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key)
|
||||
{
|
||||
static size_t
|
||||
ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) {
|
||||
ckhc_t *cell;
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
|
||||
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
|
||||
if (cell->key != NULL && ckh->keycomp(key, cell->key))
|
||||
return ((bucket << LG_CKH_BUCKET_CELLS) + i);
|
||||
if (cell->key != NULL && ckh->keycomp(key, cell->key)) {
|
||||
return (bucket << LG_CKH_BUCKET_CELLS) + i;
|
||||
}
|
||||
}
|
||||
|
||||
return (SIZE_T_MAX);
|
||||
return SIZE_T_MAX;
|
||||
}
|
||||
|
||||
/*
|
||||
* Search table for key and return cell number if found; SIZE_T_MAX otherwise.
|
||||
*/
|
||||
JEMALLOC_INLINE_C size_t
|
||||
ckh_isearch(ckh_t *ckh, const void *key)
|
||||
{
|
||||
static size_t
|
||||
ckh_isearch(ckh_t *ckh, const void *key) {
|
||||
size_t hashes[2], bucket, cell;
|
||||
|
||||
assert(ckh != NULL);
|
||||
@@ -79,19 +88,19 @@ ckh_isearch(ckh_t *ckh, const void *key)
|
||||
/* Search primary bucket. */
|
||||
bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
|
||||
cell = ckh_bucket_search(ckh, bucket, key);
|
||||
if (cell != SIZE_T_MAX)
|
||||
return (cell);
|
||||
if (cell != SIZE_T_MAX) {
|
||||
return cell;
|
||||
}
|
||||
|
||||
/* Search secondary bucket. */
|
||||
bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
|
||||
cell = ckh_bucket_search(ckh, bucket, key);
|
||||
return (cell);
|
||||
return cell;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE_C bool
|
||||
static bool
|
||||
ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
|
||||
const void *data)
|
||||
{
|
||||
const void *data) {
|
||||
ckhc_t *cell;
|
||||
unsigned offset, i;
|
||||
|
||||
@@ -99,7 +108,8 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
|
||||
* Cycle through the cells in the bucket, starting at a random position.
|
||||
* The randomness avoids worst-case search overhead as buckets fill up.
|
||||
*/
|
||||
prng32(offset, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C);
|
||||
offset = (unsigned)prng_lg_range_u64(&ckh->prng_state,
|
||||
LG_CKH_BUCKET_CELLS);
|
||||
for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
|
||||
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) +
|
||||
((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))];
|
||||
@@ -107,11 +117,11 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
|
||||
cell->key = key;
|
||||
cell->data = data;
|
||||
ckh->count++;
|
||||
return (false);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return (true);
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -120,10 +130,9 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
|
||||
* eviction/relocation procedure until either success or detection of an
|
||||
* eviction/relocation bucket cycle.
|
||||
*/
|
||||
JEMALLOC_INLINE_C bool
|
||||
static bool
|
||||
ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
|
||||
void const **argdata)
|
||||
{
|
||||
void const **argdata) {
|
||||
const void *key, *data, *tkey, *tdata;
|
||||
ckhc_t *cell;
|
||||
size_t hashes[2], bucket, tbucket;
|
||||
@@ -141,7 +150,8 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
|
||||
* were an item for which both hashes indicated the same
|
||||
* bucket.
|
||||
*/
|
||||
prng32(i, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C);
|
||||
i = (unsigned)prng_lg_range_u64(&ckh->prng_state,
|
||||
LG_CKH_BUCKET_CELLS);
|
||||
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
|
||||
assert(cell->key != NULL);
|
||||
|
||||
@@ -181,18 +191,18 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
|
||||
if (tbucket == argbucket) {
|
||||
*argkey = key;
|
||||
*argdata = data;
|
||||
return (true);
|
||||
return true;
|
||||
}
|
||||
|
||||
bucket = tbucket;
|
||||
if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
|
||||
return (false);
|
||||
if (!ckh_try_bucket_insert(ckh, bucket, key, data)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE_C bool
|
||||
ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata)
|
||||
{
|
||||
static bool
|
||||
ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) {
|
||||
size_t hashes[2], bucket;
|
||||
const void *key = *argkey;
|
||||
const void *data = *argdata;
|
||||
@@ -201,27 +211,28 @@ ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata)
|
||||
|
||||
/* Try to insert in primary bucket. */
|
||||
bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
|
||||
if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
|
||||
return (false);
|
||||
if (!ckh_try_bucket_insert(ckh, bucket, key, data)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Try to insert in secondary bucket. */
|
||||
bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
|
||||
if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
|
||||
return (false);
|
||||
if (!ckh_try_bucket_insert(ckh, bucket, key, data)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to find a place for this item via iterative eviction/relocation.
|
||||
*/
|
||||
return (ckh_evict_reloc_insert(ckh, bucket, argkey, argdata));
|
||||
return ckh_evict_reloc_insert(ckh, bucket, argkey, argdata);
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to rebuild the hash table from scratch by inserting all items from the
|
||||
* old table into the new.
|
||||
*/
|
||||
JEMALLOC_INLINE_C bool
|
||||
ckh_rebuild(ckh_t *ckh, ckhc_t *aTab)
|
||||
{
|
||||
static bool
|
||||
ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) {
|
||||
size_t count, i, nins;
|
||||
const void *key, *data;
|
||||
|
||||
@@ -233,22 +244,20 @@ ckh_rebuild(ckh_t *ckh, ckhc_t *aTab)
|
||||
data = aTab[i].data;
|
||||
if (ckh_try_insert(ckh, &key, &data)) {
|
||||
ckh->count = count;
|
||||
return (true);
|
||||
return true;
|
||||
}
|
||||
nins++;
|
||||
}
|
||||
}
|
||||
|
||||
return (false);
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool
|
||||
ckh_grow(ckh_t *ckh)
|
||||
{
|
||||
ckh_grow(tsd_t *tsd, ckh_t *ckh) {
|
||||
bool ret;
|
||||
ckhc_t *tab, *ttab;
|
||||
size_t lg_curcells;
|
||||
unsigned lg_prevbuckets;
|
||||
unsigned lg_prevbuckets, lg_curcells;
|
||||
|
||||
#ifdef CKH_COUNT
|
||||
ckh->ngrows++;
|
||||
@@ -265,12 +274,13 @@ ckh_grow(ckh_t *ckh)
|
||||
size_t usize;
|
||||
|
||||
lg_curcells++;
|
||||
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
|
||||
if (usize == 0) {
|
||||
usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
|
||||
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
|
||||
ret = true;
|
||||
goto label_return;
|
||||
}
|
||||
tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
|
||||
tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE,
|
||||
true, NULL, true, arena_ichoose(tsd, NULL));
|
||||
if (tab == NULL) {
|
||||
ret = true;
|
||||
goto label_return;
|
||||
@@ -281,28 +291,27 @@ ckh_grow(ckh_t *ckh)
|
||||
tab = ttab;
|
||||
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
|
||||
|
||||
if (ckh_rebuild(ckh, tab) == false) {
|
||||
idalloc(tab);
|
||||
if (!ckh_rebuild(ckh, tab)) {
|
||||
idalloctm(tsd_tsdn(tsd), tab, NULL, NULL, true, true);
|
||||
break;
|
||||
}
|
||||
|
||||
/* Rebuilding failed, so back out partially rebuilt table. */
|
||||
idalloc(ckh->tab);
|
||||
idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true);
|
||||
ckh->tab = tab;
|
||||
ckh->lg_curbuckets = lg_prevbuckets;
|
||||
}
|
||||
|
||||
ret = false;
|
||||
label_return:
|
||||
return (ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
ckh_shrink(ckh_t *ckh)
|
||||
{
|
||||
ckh_shrink(tsd_t *tsd, ckh_t *ckh) {
|
||||
ckhc_t *tab, *ttab;
|
||||
size_t lg_curcells, usize;
|
||||
unsigned lg_prevbuckets;
|
||||
size_t usize;
|
||||
unsigned lg_prevbuckets, lg_curcells;
|
||||
|
||||
/*
|
||||
* It is possible (though unlikely, given well behaved hashes) that the
|
||||
@@ -310,10 +319,12 @@ ckh_shrink(ckh_t *ckh)
|
||||
*/
|
||||
lg_prevbuckets = ckh->lg_curbuckets;
|
||||
lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1;
|
||||
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
|
||||
if (usize == 0)
|
||||
usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
|
||||
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
|
||||
return;
|
||||
tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
|
||||
}
|
||||
tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL,
|
||||
true, arena_ichoose(tsd, NULL));
|
||||
if (tab == NULL) {
|
||||
/*
|
||||
* An OOM error isn't worth propagating, since it doesn't
|
||||
@@ -327,8 +338,8 @@ ckh_shrink(ckh_t *ckh)
|
||||
tab = ttab;
|
||||
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
|
||||
|
||||
if (ckh_rebuild(ckh, tab) == false) {
|
||||
idalloc(tab);
|
||||
if (!ckh_rebuild(ckh, tab)) {
|
||||
idalloctm(tsd_tsdn(tsd), tab, NULL, NULL, true, true);
|
||||
#ifdef CKH_COUNT
|
||||
ckh->nshrinks++;
|
||||
#endif
|
||||
@@ -336,7 +347,7 @@ ckh_shrink(ckh_t *ckh)
|
||||
}
|
||||
|
||||
/* Rebuilding failed, so back out partially rebuilt table. */
|
||||
idalloc(ckh->tab);
|
||||
idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true);
|
||||
ckh->tab = tab;
|
||||
ckh->lg_curbuckets = lg_prevbuckets;
|
||||
#ifdef CKH_COUNT
|
||||
@@ -345,8 +356,8 @@ ckh_shrink(ckh_t *ckh)
|
||||
}
|
||||
|
||||
bool
|
||||
ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
|
||||
{
|
||||
ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
|
||||
ckh_keycomp_t *keycomp) {
|
||||
bool ret;
|
||||
size_t mincells, usize;
|
||||
unsigned lg_mincells;
|
||||
@@ -366,29 +377,31 @@ ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
|
||||
ckh->count = 0;
|
||||
|
||||
/*
|
||||
* Find the minimum power of 2 that is large enough to fit aBaseCount
|
||||
* Find the minimum power of 2 that is large enough to fit minitems
|
||||
* entries. We are using (2+,2) cuckoo hashing, which has an expected
|
||||
* maximum load factor of at least ~0.86, so 0.75 is a conservative load
|
||||
* factor that will typically allow 2^aLgMinItems to fit without ever
|
||||
* factor that will typically allow mincells items to fit without ever
|
||||
* growing the table.
|
||||
*/
|
||||
assert(LG_CKH_BUCKET_CELLS > 0);
|
||||
mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2;
|
||||
for (lg_mincells = LG_CKH_BUCKET_CELLS;
|
||||
(ZU(1) << lg_mincells) < mincells;
|
||||
lg_mincells++)
|
||||
; /* Do nothing. */
|
||||
lg_mincells++) {
|
||||
/* Do nothing. */
|
||||
}
|
||||
ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
|
||||
ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
|
||||
ckh->hash = hash;
|
||||
ckh->keycomp = keycomp;
|
||||
|
||||
usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
|
||||
if (usize == 0) {
|
||||
usize = sz_sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
|
||||
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
|
||||
ret = true;
|
||||
goto label_return;
|
||||
}
|
||||
ckh->tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
|
||||
ckh->tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true,
|
||||
NULL, true, arena_ichoose(tsd, NULL));
|
||||
if (ckh->tab == NULL) {
|
||||
ret = true;
|
||||
goto label_return;
|
||||
@@ -396,20 +409,18 @@ ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
|
||||
|
||||
ret = false;
|
||||
label_return:
|
||||
return (ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
ckh_delete(ckh_t *ckh)
|
||||
{
|
||||
|
||||
ckh_delete(tsd_t *tsd, ckh_t *ckh) {
|
||||
assert(ckh != NULL);
|
||||
|
||||
#ifdef CKH_VERBOSE
|
||||
malloc_printf(
|
||||
"%s(%p): ngrows: %"PRIu64", nshrinks: %"PRIu64","
|
||||
" nshrinkfails: %"PRIu64", ninserts: %"PRIu64","
|
||||
" nrelocs: %"PRIu64"\n", __func__, ckh,
|
||||
"%s(%p): ngrows: %"FMTu64", nshrinks: %"FMTu64","
|
||||
" nshrinkfails: %"FMTu64", ninserts: %"FMTu64","
|
||||
" nrelocs: %"FMTu64"\n", __func__, ckh,
|
||||
(unsigned long long)ckh->ngrows,
|
||||
(unsigned long long)ckh->nshrinks,
|
||||
(unsigned long long)ckh->nshrinkfails,
|
||||
@@ -417,43 +428,42 @@ ckh_delete(ckh_t *ckh)
|
||||
(unsigned long long)ckh->nrelocs);
|
||||
#endif
|
||||
|
||||
idalloc(ckh->tab);
|
||||
if (config_debug)
|
||||
memset(ckh, 0x5a, sizeof(ckh_t));
|
||||
idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true);
|
||||
if (config_debug) {
|
||||
memset(ckh, JEMALLOC_FREE_JUNK, sizeof(ckh_t));
|
||||
}
|
||||
}
|
||||
|
||||
size_t
|
||||
ckh_count(ckh_t *ckh)
|
||||
{
|
||||
|
||||
ckh_count(ckh_t *ckh) {
|
||||
assert(ckh != NULL);
|
||||
|
||||
return (ckh->count);
|
||||
return ckh->count;
|
||||
}
|
||||
|
||||
bool
|
||||
ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data)
|
||||
{
|
||||
ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data) {
|
||||
size_t i, ncells;
|
||||
|
||||
for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets +
|
||||
LG_CKH_BUCKET_CELLS)); i < ncells; i++) {
|
||||
if (ckh->tab[i].key != NULL) {
|
||||
if (key != NULL)
|
||||
if (key != NULL) {
|
||||
*key = (void *)ckh->tab[i].key;
|
||||
if (data != NULL)
|
||||
}
|
||||
if (data != NULL) {
|
||||
*data = (void *)ckh->tab[i].data;
|
||||
}
|
||||
*tabind = i + 1;
|
||||
return (false);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return (true);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
ckh_insert(ckh_t *ckh, const void *key, const void *data)
|
||||
{
|
||||
ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data) {
|
||||
bool ret;
|
||||
|
||||
assert(ckh != NULL);
|
||||
@@ -464,7 +474,7 @@ ckh_insert(ckh_t *ckh, const void *key, const void *data)
|
||||
#endif
|
||||
|
||||
while (ckh_try_insert(ckh, &key, &data)) {
|
||||
if (ckh_grow(ckh)) {
|
||||
if (ckh_grow(tsd, ckh)) {
|
||||
ret = true;
|
||||
goto label_return;
|
||||
}
|
||||
@@ -472,22 +482,24 @@ ckh_insert(ckh_t *ckh, const void *key, const void *data)
|
||||
|
||||
ret = false;
|
||||
label_return:
|
||||
return (ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool
|
||||
ckh_remove(ckh_t *ckh, const void *searchkey, void **key, void **data)
|
||||
{
|
||||
ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
|
||||
void **data) {
|
||||
size_t cell;
|
||||
|
||||
assert(ckh != NULL);
|
||||
|
||||
cell = ckh_isearch(ckh, searchkey);
|
||||
if (cell != SIZE_T_MAX) {
|
||||
if (key != NULL)
|
||||
if (key != NULL) {
|
||||
*key = (void *)ckh->tab[cell].key;
|
||||
if (data != NULL)
|
||||
}
|
||||
if (data != NULL) {
|
||||
*data = (void *)ckh->tab[cell].data;
|
||||
}
|
||||
ckh->tab[cell].key = NULL;
|
||||
ckh->tab[cell].data = NULL; /* Not necessary. */
|
||||
|
||||
@@ -497,54 +509,50 @@ ckh_remove(ckh_t *ckh, const void *searchkey, void **key, void **data)
|
||||
+ LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets
|
||||
> ckh->lg_minbuckets) {
|
||||
/* Ignore error due to OOM. */
|
||||
ckh_shrink(ckh);
|
||||
ckh_shrink(tsd, ckh);
|
||||
}
|
||||
|
||||
return (false);
|
||||
return false;
|
||||
}
|
||||
|
||||
return (true);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data)
|
||||
{
|
||||
ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data) {
|
||||
size_t cell;
|
||||
|
||||
assert(ckh != NULL);
|
||||
|
||||
cell = ckh_isearch(ckh, searchkey);
|
||||
if (cell != SIZE_T_MAX) {
|
||||
if (key != NULL)
|
||||
if (key != NULL) {
|
||||
*key = (void *)ckh->tab[cell].key;
|
||||
if (data != NULL)
|
||||
}
|
||||
if (data != NULL) {
|
||||
*data = (void *)ckh->tab[cell].data;
|
||||
return (false);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
return (true);
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
ckh_string_hash(const void *key, size_t r_hash[2])
|
||||
{
|
||||
|
||||
ckh_string_hash(const void *key, size_t r_hash[2]) {
|
||||
hash(key, strlen((const char *)key), 0x94122f33U, r_hash);
|
||||
}
|
||||
|
||||
bool
|
||||
ckh_string_keycomp(const void *k1, const void *k2)
|
||||
{
|
||||
ckh_string_keycomp(const void *k1, const void *k2) {
|
||||
assert(k1 != NULL);
|
||||
assert(k2 != NULL);
|
||||
|
||||
assert(k1 != NULL);
|
||||
assert(k2 != NULL);
|
||||
|
||||
return (strcmp((char *)k1, (char *)k2) ? false : true);
|
||||
return !strcmp((char *)k1, (char *)k2);
|
||||
}
|
||||
|
||||
void
|
||||
ckh_pointer_hash(const void *key, size_t r_hash[2])
|
||||
{
|
||||
ckh_pointer_hash(const void *key, size_t r_hash[2]) {
|
||||
union {
|
||||
const void *v;
|
||||
size_t i;
|
||||
@@ -556,8 +564,6 @@ ckh_pointer_hash(const void *key, size_t r_hash[2])
|
||||
}
|
||||
|
||||
bool
|
||||
ckh_pointer_keycomp(const void *k1, const void *k2)
|
||||
{
|
||||
|
||||
return ((k1 == k2) ? true : false);
|
||||
ckh_pointer_keycomp(const void *k1, const void *k2) {
|
||||
return (k1 == k2);
|
||||
}
|
||||
|
||||
2770
deps/jemalloc/src/ctl.c
vendored
2770
deps/jemalloc/src/ctl.c
vendored
File diff suppressed because it is too large
Load Diff
2004
deps/jemalloc/src/extent.c
vendored
2004
deps/jemalloc/src/extent.c
vendored
File diff suppressed because it is too large
Load Diff
5
deps/jemalloc/src/hash.c
vendored
5
deps/jemalloc/src/hash.c
vendored
@@ -1,2 +1,3 @@
|
||||
#define JEMALLOC_HASH_C_
|
||||
#include "jemalloc/internal/jemalloc_internal.h"
|
||||
#define JEMALLOC_HASH_C_
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
3649
deps/jemalloc/src/jemalloc.c
vendored
3649
deps/jemalloc/src/jemalloc.c
vendored
File diff suppressed because it is too large
Load Diff
206
deps/jemalloc/src/mutex.c
vendored
206
deps/jemalloc/src/mutex.c
vendored
@@ -1,12 +1,12 @@
|
||||
#define JEMALLOC_MUTEX_C_
|
||||
#include "jemalloc/internal/jemalloc_internal.h"
|
||||
#define JEMALLOC_MUTEX_C_
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
|
||||
#include <dlfcn.h>
|
||||
#endif
|
||||
#include "jemalloc/internal/assert.h"
|
||||
#include "jemalloc/internal/malloc_io.h"
|
||||
|
||||
#ifndef _CRT_SPINCOUNT
|
||||
#define _CRT_SPINCOUNT 4000
|
||||
#define _CRT_SPINCOUNT 4000
|
||||
#endif
|
||||
|
||||
/******************************************************************************/
|
||||
@@ -20,10 +20,6 @@ static bool postpone_init = true;
|
||||
static malloc_mutex_t *postponed_mutexes = NULL;
|
||||
#endif
|
||||
|
||||
#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
|
||||
static void pthread_create_once(void);
|
||||
#endif
|
||||
|
||||
/******************************************************************************/
|
||||
/*
|
||||
* We intercept pthread_create() calls in order to toggle isthreaded if the
|
||||
@@ -31,33 +27,11 @@ static void pthread_create_once(void);
|
||||
*/
|
||||
|
||||
#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
|
||||
static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *,
|
||||
void *(*)(void *), void *__restrict);
|
||||
|
||||
static void
|
||||
pthread_create_once(void)
|
||||
{
|
||||
|
||||
pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create");
|
||||
if (pthread_create_fptr == NULL) {
|
||||
malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, "
|
||||
"\"pthread_create\")\n");
|
||||
abort();
|
||||
}
|
||||
|
||||
isthreaded = true;
|
||||
}
|
||||
|
||||
JEMALLOC_EXPORT int
|
||||
pthread_create(pthread_t *__restrict thread,
|
||||
const pthread_attr_t *__restrict attr, void *(*start_routine)(void *),
|
||||
void *__restrict arg)
|
||||
{
|
||||
static pthread_once_t once_control = PTHREAD_ONCE_INIT;
|
||||
|
||||
pthread_once(&once_control, pthread_create_once);
|
||||
|
||||
return (pthread_create_fptr(thread, attr, start_routine, arg));
|
||||
void *__restrict arg) {
|
||||
return pthread_create_wrapper(thread, attr, start_routine, arg);
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -68,14 +42,108 @@ JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
|
||||
void *(calloc_cb)(size_t, size_t));
|
||||
#endif
|
||||
|
||||
bool
|
||||
malloc_mutex_init(malloc_mutex_t *mutex)
|
||||
{
|
||||
void
|
||||
malloc_mutex_lock_slow(malloc_mutex_t *mutex) {
|
||||
mutex_prof_data_t *data = &mutex->prof_data;
|
||||
UNUSED nstime_t before = NSTIME_ZERO_INITIALIZER;
|
||||
|
||||
if (ncpus == 1) {
|
||||
goto label_spin_done;
|
||||
}
|
||||
|
||||
int cnt = 0, max_cnt = MALLOC_MUTEX_MAX_SPIN;
|
||||
do {
|
||||
CPU_SPINWAIT;
|
||||
if (!malloc_mutex_trylock_final(mutex)) {
|
||||
data->n_spin_acquired++;
|
||||
return;
|
||||
}
|
||||
} while (cnt++ < max_cnt);
|
||||
|
||||
if (!config_stats) {
|
||||
/* Only spin is useful when stats is off. */
|
||||
malloc_mutex_lock_final(mutex);
|
||||
return;
|
||||
}
|
||||
label_spin_done:
|
||||
nstime_update(&before);
|
||||
/* Copy before to after to avoid clock skews. */
|
||||
nstime_t after;
|
||||
nstime_copy(&after, &before);
|
||||
uint32_t n_thds = atomic_fetch_add_u32(&data->n_waiting_thds, 1,
|
||||
ATOMIC_RELAXED) + 1;
|
||||
/* One last try as above two calls may take quite some cycles. */
|
||||
if (!malloc_mutex_trylock_final(mutex)) {
|
||||
atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED);
|
||||
data->n_spin_acquired++;
|
||||
return;
|
||||
}
|
||||
|
||||
/* True slow path. */
|
||||
malloc_mutex_lock_final(mutex);
|
||||
/* Update more slow-path only counters. */
|
||||
atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED);
|
||||
nstime_update(&after);
|
||||
|
||||
nstime_t delta;
|
||||
nstime_copy(&delta, &after);
|
||||
nstime_subtract(&delta, &before);
|
||||
|
||||
data->n_wait_times++;
|
||||
nstime_add(&data->tot_wait_time, &delta);
|
||||
if (nstime_compare(&data->max_wait_time, &delta) < 0) {
|
||||
nstime_copy(&data->max_wait_time, &delta);
|
||||
}
|
||||
if (n_thds > data->max_n_thds) {
|
||||
data->max_n_thds = n_thds;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
mutex_prof_data_init(mutex_prof_data_t *data) {
|
||||
memset(data, 0, sizeof(mutex_prof_data_t));
|
||||
nstime_init(&data->max_wait_time, 0);
|
||||
nstime_init(&data->tot_wait_time, 0);
|
||||
data->prev_owner = NULL;
|
||||
}
|
||||
|
||||
void
|
||||
malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||
malloc_mutex_assert_owner(tsdn, mutex);
|
||||
mutex_prof_data_init(&mutex->prof_data);
|
||||
}
|
||||
|
||||
static int
|
||||
mutex_addr_comp(const witness_t *witness1, void *mutex1,
|
||||
const witness_t *witness2, void *mutex2) {
|
||||
assert(mutex1 != NULL);
|
||||
assert(mutex2 != NULL);
|
||||
uintptr_t mu1int = (uintptr_t)mutex1;
|
||||
uintptr_t mu2int = (uintptr_t)mutex2;
|
||||
if (mu1int < mu2int) {
|
||||
return -1;
|
||||
} else if (mu1int == mu2int) {
|
||||
return 0;
|
||||
} else {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
|
||||
witness_rank_t rank, malloc_mutex_lock_order_t lock_order) {
|
||||
mutex_prof_data_init(&mutex->prof_data);
|
||||
#ifdef _WIN32
|
||||
# if _WIN32_WINNT >= 0x0600
|
||||
InitializeSRWLock(&mutex->lock);
|
||||
# else
|
||||
if (!InitializeCriticalSectionAndSpinCount(&mutex->lock,
|
||||
_CRT_SPINCOUNT))
|
||||
return (true);
|
||||
_CRT_SPINCOUNT)) {
|
||||
return true;
|
||||
}
|
||||
# endif
|
||||
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
|
||||
mutex->lock = OS_UNFAIR_LOCK_INIT;
|
||||
#elif (defined(JEMALLOC_OSSPIN))
|
||||
mutex->lock = 0;
|
||||
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
|
||||
@@ -83,67 +151,73 @@ malloc_mutex_init(malloc_mutex_t *mutex)
|
||||
mutex->postponed_next = postponed_mutexes;
|
||||
postponed_mutexes = mutex;
|
||||
} else {
|
||||
if (_pthread_mutex_init_calloc_cb(&mutex->lock, base_calloc) !=
|
||||
0)
|
||||
return (true);
|
||||
if (_pthread_mutex_init_calloc_cb(&mutex->lock,
|
||||
bootstrap_calloc) != 0) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
#else
|
||||
pthread_mutexattr_t attr;
|
||||
|
||||
if (pthread_mutexattr_init(&attr) != 0)
|
||||
return (true);
|
||||
if (pthread_mutexattr_init(&attr) != 0) {
|
||||
return true;
|
||||
}
|
||||
pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE);
|
||||
if (pthread_mutex_init(&mutex->lock, &attr) != 0) {
|
||||
pthread_mutexattr_destroy(&attr);
|
||||
return (true);
|
||||
return true;
|
||||
}
|
||||
pthread_mutexattr_destroy(&attr);
|
||||
#endif
|
||||
return (false);
|
||||
if (config_debug) {
|
||||
mutex->lock_order = lock_order;
|
||||
if (lock_order == malloc_mutex_address_ordered) {
|
||||
witness_init(&mutex->witness, name, rank,
|
||||
mutex_addr_comp, &mutex);
|
||||
} else {
|
||||
witness_init(&mutex->witness, name, rank, NULL, NULL);
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void
|
||||
malloc_mutex_prefork(malloc_mutex_t *mutex)
|
||||
{
|
||||
|
||||
malloc_mutex_lock(mutex);
|
||||
malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||
malloc_mutex_lock(tsdn, mutex);
|
||||
}
|
||||
|
||||
void
|
||||
malloc_mutex_postfork_parent(malloc_mutex_t *mutex)
|
||||
{
|
||||
|
||||
malloc_mutex_unlock(mutex);
|
||||
malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||
malloc_mutex_unlock(tsdn, mutex);
|
||||
}
|
||||
|
||||
void
|
||||
malloc_mutex_postfork_child(malloc_mutex_t *mutex)
|
||||
{
|
||||
|
||||
malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||
#ifdef JEMALLOC_MUTEX_INIT_CB
|
||||
malloc_mutex_unlock(mutex);
|
||||
malloc_mutex_unlock(tsdn, mutex);
|
||||
#else
|
||||
if (malloc_mutex_init(mutex)) {
|
||||
if (malloc_mutex_init(mutex, mutex->witness.name,
|
||||
mutex->witness.rank, mutex->lock_order)) {
|
||||
malloc_printf("<jemalloc>: Error re-initializing mutex in "
|
||||
"child\n");
|
||||
if (opt_abort)
|
||||
if (opt_abort) {
|
||||
abort();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
bool
|
||||
mutex_boot(void)
|
||||
{
|
||||
|
||||
malloc_mutex_boot(void) {
|
||||
#ifdef JEMALLOC_MUTEX_INIT_CB
|
||||
postpone_init = false;
|
||||
while (postponed_mutexes != NULL) {
|
||||
if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock,
|
||||
base_calloc) != 0)
|
||||
return (true);
|
||||
bootstrap_calloc) != 0) {
|
||||
return true;
|
||||
}
|
||||
postponed_mutexes = postponed_mutexes->postponed_next;
|
||||
}
|
||||
#endif
|
||||
return (false);
|
||||
return false;
|
||||
}
|
||||
|
||||
2488
deps/jemalloc/src/prof.c
vendored
2488
deps/jemalloc/src/prof.c
vendored
File diff suppressed because it is too large
Load Diff
391
deps/jemalloc/src/rtree.c
vendored
391
deps/jemalloc/src/rtree.c
vendored
@@ -1,105 +1,320 @@
|
||||
#define JEMALLOC_RTREE_C_
|
||||
#include "jemalloc/internal/jemalloc_internal.h"
|
||||
#define JEMALLOC_RTREE_C_
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
rtree_t *
|
||||
rtree_new(unsigned bits, rtree_alloc_t *alloc, rtree_dalloc_t *dalloc)
|
||||
{
|
||||
rtree_t *ret;
|
||||
unsigned bits_per_level, bits_in_leaf, height, i;
|
||||
#include "jemalloc/internal/assert.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
|
||||
assert(bits > 0 && bits <= (sizeof(uintptr_t) << 3));
|
||||
|
||||
bits_per_level = ffs(pow2_ceil((RTREE_NODESIZE / sizeof(void *)))) - 1;
|
||||
bits_in_leaf = ffs(pow2_ceil((RTREE_NODESIZE / sizeof(uint8_t)))) - 1;
|
||||
if (bits > bits_in_leaf) {
|
||||
height = 1 + (bits - bits_in_leaf) / bits_per_level;
|
||||
if ((height-1) * bits_per_level + bits_in_leaf != bits)
|
||||
height++;
|
||||
} else {
|
||||
height = 1;
|
||||
/*
|
||||
* Only the most significant bits of keys passed to rtree_{read,write}() are
|
||||
* used.
|
||||
*/
|
||||
bool
|
||||
rtree_new(rtree_t *rtree, bool zeroed) {
|
||||
#ifdef JEMALLOC_JET
|
||||
if (!zeroed) {
|
||||
memset(rtree, 0, sizeof(rtree_t)); /* Clear root. */
|
||||
}
|
||||
assert((height-1) * bits_per_level + bits_in_leaf >= bits);
|
||||
#else
|
||||
assert(zeroed);
|
||||
#endif
|
||||
|
||||
ret = (rtree_t*)alloc(offsetof(rtree_t, level2bits) +
|
||||
(sizeof(unsigned) * height));
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
memset(ret, 0, offsetof(rtree_t, level2bits) + (sizeof(unsigned) *
|
||||
height));
|
||||
|
||||
ret->alloc = alloc;
|
||||
ret->dalloc = dalloc;
|
||||
if (malloc_mutex_init(&ret->mutex)) {
|
||||
if (dalloc != NULL)
|
||||
dalloc(ret);
|
||||
return (NULL);
|
||||
if (malloc_mutex_init(&rtree->init_lock, "rtree", WITNESS_RANK_RTREE,
|
||||
malloc_mutex_rank_exclusive)) {
|
||||
return true;
|
||||
}
|
||||
ret->height = height;
|
||||
if (height > 1) {
|
||||
if ((height-1) * bits_per_level + bits_in_leaf > bits) {
|
||||
ret->level2bits[0] = (bits - bits_in_leaf) %
|
||||
bits_per_level;
|
||||
} else
|
||||
ret->level2bits[0] = bits_per_level;
|
||||
for (i = 1; i < height-1; i++)
|
||||
ret->level2bits[i] = bits_per_level;
|
||||
ret->level2bits[height-1] = bits_in_leaf;
|
||||
} else
|
||||
ret->level2bits[0] = bits;
|
||||
|
||||
ret->root = (void**)alloc(sizeof(void *) << ret->level2bits[0]);
|
||||
if (ret->root == NULL) {
|
||||
if (dalloc != NULL)
|
||||
dalloc(ret);
|
||||
return (NULL);
|
||||
}
|
||||
memset(ret->root, 0, sizeof(void *) << ret->level2bits[0]);
|
||||
|
||||
return (ret);
|
||||
return false;
|
||||
}
|
||||
|
||||
static rtree_node_elm_t *
|
||||
rtree_node_alloc_impl(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
|
||||
return (rtree_node_elm_t *)base_alloc(tsdn, b0get(), nelms *
|
||||
sizeof(rtree_node_elm_t), CACHELINE);
|
||||
}
|
||||
rtree_node_alloc_t *JET_MUTABLE rtree_node_alloc = rtree_node_alloc_impl;
|
||||
|
||||
static void
|
||||
rtree_delete_subtree(rtree_t *rtree, void **node, unsigned level)
|
||||
{
|
||||
rtree_node_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *node) {
|
||||
/* Nodes are never deleted during normal operation. */
|
||||
not_reached();
|
||||
}
|
||||
UNUSED rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc =
|
||||
rtree_node_dalloc_impl;
|
||||
|
||||
if (level < rtree->height - 1) {
|
||||
size_t nchildren, i;
|
||||
static rtree_leaf_elm_t *
|
||||
rtree_leaf_alloc_impl(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
|
||||
return (rtree_leaf_elm_t *)base_alloc(tsdn, b0get(), nelms *
|
||||
sizeof(rtree_leaf_elm_t), CACHELINE);
|
||||
}
|
||||
rtree_leaf_alloc_t *JET_MUTABLE rtree_leaf_alloc = rtree_leaf_alloc_impl;
|
||||
|
||||
nchildren = ZU(1) << rtree->level2bits[level];
|
||||
for (i = 0; i < nchildren; i++) {
|
||||
void **child = (void **)node[i];
|
||||
if (child != NULL)
|
||||
rtree_delete_subtree(rtree, child, level + 1);
|
||||
static void
|
||||
rtree_leaf_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *leaf) {
|
||||
/* Leaves are never deleted during normal operation. */
|
||||
not_reached();
|
||||
}
|
||||
UNUSED rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc =
|
||||
rtree_leaf_dalloc_impl;
|
||||
|
||||
#ifdef JEMALLOC_JET
|
||||
# if RTREE_HEIGHT > 1
|
||||
static void
|
||||
rtree_delete_subtree(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *subtree,
|
||||
unsigned level) {
|
||||
size_t nchildren = ZU(1) << rtree_levels[level].bits;
|
||||
if (level + 2 < RTREE_HEIGHT) {
|
||||
for (size_t i = 0; i < nchildren; i++) {
|
||||
rtree_node_elm_t *node =
|
||||
(rtree_node_elm_t *)atomic_load_p(&subtree[i].child,
|
||||
ATOMIC_RELAXED);
|
||||
if (node != NULL) {
|
||||
rtree_delete_subtree(tsdn, rtree, node, level +
|
||||
1);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (size_t i = 0; i < nchildren; i++) {
|
||||
rtree_leaf_elm_t *leaf =
|
||||
(rtree_leaf_elm_t *)atomic_load_p(&subtree[i].child,
|
||||
ATOMIC_RELAXED);
|
||||
if (leaf != NULL) {
|
||||
rtree_leaf_dalloc(tsdn, rtree, leaf);
|
||||
}
|
||||
}
|
||||
}
|
||||
rtree->dalloc(node);
|
||||
|
||||
if (subtree != rtree->root) {
|
||||
rtree_node_dalloc(tsdn, rtree, subtree);
|
||||
}
|
||||
}
|
||||
# endif
|
||||
|
||||
void
|
||||
rtree_delete(tsdn_t *tsdn, rtree_t *rtree) {
|
||||
# if RTREE_HEIGHT > 1
|
||||
rtree_delete_subtree(tsdn, rtree, rtree->root, 0);
|
||||
# endif
|
||||
}
|
||||
#endif
|
||||
|
||||
static rtree_node_elm_t *
|
||||
rtree_node_init(tsdn_t *tsdn, rtree_t *rtree, unsigned level,
|
||||
atomic_p_t *elmp) {
|
||||
malloc_mutex_lock(tsdn, &rtree->init_lock);
|
||||
/*
|
||||
* If *elmp is non-null, then it was initialized with the init lock
|
||||
* held, so we can get by with 'relaxed' here.
|
||||
*/
|
||||
rtree_node_elm_t *node = atomic_load_p(elmp, ATOMIC_RELAXED);
|
||||
if (node == NULL) {
|
||||
node = rtree_node_alloc(tsdn, rtree, ZU(1) <<
|
||||
rtree_levels[level].bits);
|
||||
if (node == NULL) {
|
||||
malloc_mutex_unlock(tsdn, &rtree->init_lock);
|
||||
return NULL;
|
||||
}
|
||||
/*
|
||||
* Even though we hold the lock, a later reader might not; we
|
||||
* need release semantics.
|
||||
*/
|
||||
atomic_store_p(elmp, node, ATOMIC_RELEASE);
|
||||
}
|
||||
malloc_mutex_unlock(tsdn, &rtree->init_lock);
|
||||
|
||||
return node;
|
||||
}
|
||||
|
||||
static rtree_leaf_elm_t *
|
||||
rtree_leaf_init(tsdn_t *tsdn, rtree_t *rtree, atomic_p_t *elmp) {
|
||||
malloc_mutex_lock(tsdn, &rtree->init_lock);
|
||||
/*
|
||||
* If *elmp is non-null, then it was initialized with the init lock
|
||||
* held, so we can get by with 'relaxed' here.
|
||||
*/
|
||||
rtree_leaf_elm_t *leaf = atomic_load_p(elmp, ATOMIC_RELAXED);
|
||||
if (leaf == NULL) {
|
||||
leaf = rtree_leaf_alloc(tsdn, rtree, ZU(1) <<
|
||||
rtree_levels[RTREE_HEIGHT-1].bits);
|
||||
if (leaf == NULL) {
|
||||
malloc_mutex_unlock(tsdn, &rtree->init_lock);
|
||||
return NULL;
|
||||
}
|
||||
/*
|
||||
* Even though we hold the lock, a later reader might not; we
|
||||
* need release semantics.
|
||||
*/
|
||||
atomic_store_p(elmp, leaf, ATOMIC_RELEASE);
|
||||
}
|
||||
malloc_mutex_unlock(tsdn, &rtree->init_lock);
|
||||
|
||||
return leaf;
|
||||
}
|
||||
|
||||
static bool
|
||||
rtree_node_valid(rtree_node_elm_t *node) {
|
||||
return ((uintptr_t)node != (uintptr_t)0);
|
||||
}
|
||||
|
||||
static bool
|
||||
rtree_leaf_valid(rtree_leaf_elm_t *leaf) {
|
||||
return ((uintptr_t)leaf != (uintptr_t)0);
|
||||
}
|
||||
|
||||
static rtree_node_elm_t *
|
||||
rtree_child_node_tryread(rtree_node_elm_t *elm, bool dependent) {
|
||||
rtree_node_elm_t *node;
|
||||
|
||||
if (dependent) {
|
||||
node = (rtree_node_elm_t *)atomic_load_p(&elm->child,
|
||||
ATOMIC_RELAXED);
|
||||
} else {
|
||||
node = (rtree_node_elm_t *)atomic_load_p(&elm->child,
|
||||
ATOMIC_ACQUIRE);
|
||||
}
|
||||
|
||||
assert(!dependent || node != NULL);
|
||||
return node;
|
||||
}
|
||||
|
||||
static rtree_node_elm_t *
|
||||
rtree_child_node_read(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *elm,
|
||||
unsigned level, bool dependent) {
|
||||
rtree_node_elm_t *node;
|
||||
|
||||
node = rtree_child_node_tryread(elm, dependent);
|
||||
if (!dependent && unlikely(!rtree_node_valid(node))) {
|
||||
node = rtree_node_init(tsdn, rtree, level + 1, &elm->child);
|
||||
}
|
||||
assert(!dependent || node != NULL);
|
||||
return node;
|
||||
}
|
||||
|
||||
static rtree_leaf_elm_t *
|
||||
rtree_child_leaf_tryread(rtree_node_elm_t *elm, bool dependent) {
|
||||
rtree_leaf_elm_t *leaf;
|
||||
|
||||
if (dependent) {
|
||||
leaf = (rtree_leaf_elm_t *)atomic_load_p(&elm->child,
|
||||
ATOMIC_RELAXED);
|
||||
} else {
|
||||
leaf = (rtree_leaf_elm_t *)atomic_load_p(&elm->child,
|
||||
ATOMIC_ACQUIRE);
|
||||
}
|
||||
|
||||
assert(!dependent || leaf != NULL);
|
||||
return leaf;
|
||||
}
|
||||
|
||||
static rtree_leaf_elm_t *
|
||||
rtree_child_leaf_read(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *elm,
|
||||
unsigned level, bool dependent) {
|
||||
rtree_leaf_elm_t *leaf;
|
||||
|
||||
leaf = rtree_child_leaf_tryread(elm, dependent);
|
||||
if (!dependent && unlikely(!rtree_leaf_valid(leaf))) {
|
||||
leaf = rtree_leaf_init(tsdn, rtree, &elm->child);
|
||||
}
|
||||
assert(!dependent || leaf != NULL);
|
||||
return leaf;
|
||||
}
|
||||
|
||||
rtree_leaf_elm_t *
|
||||
rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
uintptr_t key, bool dependent, bool init_missing) {
|
||||
rtree_node_elm_t *node;
|
||||
rtree_leaf_elm_t *leaf;
|
||||
#if RTREE_HEIGHT > 1
|
||||
node = rtree->root;
|
||||
#else
|
||||
leaf = rtree->root;
|
||||
#endif
|
||||
|
||||
if (config_debug) {
|
||||
uintptr_t leafkey = rtree_leafkey(key);
|
||||
for (unsigned i = 0; i < RTREE_CTX_NCACHE; i++) {
|
||||
assert(rtree_ctx->cache[i].leafkey != leafkey);
|
||||
}
|
||||
for (unsigned i = 0; i < RTREE_CTX_NCACHE_L2; i++) {
|
||||
assert(rtree_ctx->l2_cache[i].leafkey != leafkey);
|
||||
}
|
||||
}
|
||||
|
||||
#define RTREE_GET_CHILD(level) { \
|
||||
assert(level < RTREE_HEIGHT-1); \
|
||||
if (level != 0 && !dependent && \
|
||||
unlikely(!rtree_node_valid(node))) { \
|
||||
return NULL; \
|
||||
} \
|
||||
uintptr_t subkey = rtree_subkey(key, level); \
|
||||
if (level + 2 < RTREE_HEIGHT) { \
|
||||
node = init_missing ? \
|
||||
rtree_child_node_read(tsdn, rtree, \
|
||||
&node[subkey], level, dependent) : \
|
||||
rtree_child_node_tryread(&node[subkey], \
|
||||
dependent); \
|
||||
} else { \
|
||||
leaf = init_missing ? \
|
||||
rtree_child_leaf_read(tsdn, rtree, \
|
||||
&node[subkey], level, dependent) : \
|
||||
rtree_child_leaf_tryread(&node[subkey], \
|
||||
dependent); \
|
||||
} \
|
||||
}
|
||||
/*
|
||||
* Cache replacement upon hard lookup (i.e. L1 & L2 rtree cache miss):
|
||||
* (1) evict last entry in L2 cache; (2) move the collision slot from L1
|
||||
* cache down to L2; and 3) fill L1.
|
||||
*/
|
||||
#define RTREE_GET_LEAF(level) { \
|
||||
assert(level == RTREE_HEIGHT-1); \
|
||||
if (!dependent && unlikely(!rtree_leaf_valid(leaf))) { \
|
||||
return NULL; \
|
||||
} \
|
||||
if (RTREE_CTX_NCACHE_L2 > 1) { \
|
||||
memmove(&rtree_ctx->l2_cache[1], \
|
||||
&rtree_ctx->l2_cache[0], \
|
||||
sizeof(rtree_ctx_cache_elm_t) * \
|
||||
(RTREE_CTX_NCACHE_L2 - 1)); \
|
||||
} \
|
||||
size_t slot = rtree_cache_direct_map(key); \
|
||||
rtree_ctx->l2_cache[0].leafkey = \
|
||||
rtree_ctx->cache[slot].leafkey; \
|
||||
rtree_ctx->l2_cache[0].leaf = \
|
||||
rtree_ctx->cache[slot].leaf; \
|
||||
uintptr_t leafkey = rtree_leafkey(key); \
|
||||
rtree_ctx->cache[slot].leafkey = leafkey; \
|
||||
rtree_ctx->cache[slot].leaf = leaf; \
|
||||
uintptr_t subkey = rtree_subkey(key, level); \
|
||||
return &leaf[subkey]; \
|
||||
}
|
||||
if (RTREE_HEIGHT > 1) {
|
||||
RTREE_GET_CHILD(0)
|
||||
}
|
||||
if (RTREE_HEIGHT > 2) {
|
||||
RTREE_GET_CHILD(1)
|
||||
}
|
||||
if (RTREE_HEIGHT > 3) {
|
||||
for (unsigned i = 2; i < RTREE_HEIGHT-1; i++) {
|
||||
RTREE_GET_CHILD(i)
|
||||
}
|
||||
}
|
||||
RTREE_GET_LEAF(RTREE_HEIGHT-1)
|
||||
#undef RTREE_GET_CHILD
|
||||
#undef RTREE_GET_LEAF
|
||||
not_reached();
|
||||
}
|
||||
|
||||
void
|
||||
rtree_delete(rtree_t *rtree)
|
||||
{
|
||||
|
||||
rtree_delete_subtree(rtree, rtree->root, 0);
|
||||
rtree->dalloc(rtree);
|
||||
}
|
||||
|
||||
void
|
||||
rtree_prefork(rtree_t *rtree)
|
||||
{
|
||||
|
||||
malloc_mutex_prefork(&rtree->mutex);
|
||||
}
|
||||
|
||||
void
|
||||
rtree_postfork_parent(rtree_t *rtree)
|
||||
{
|
||||
|
||||
malloc_mutex_postfork_parent(&rtree->mutex);
|
||||
}
|
||||
|
||||
void
|
||||
rtree_postfork_child(rtree_t *rtree)
|
||||
{
|
||||
|
||||
malloc_mutex_postfork_child(&rtree->mutex);
|
||||
rtree_ctx_data_init(rtree_ctx_t *ctx) {
|
||||
for (unsigned i = 0; i < RTREE_CTX_NCACHE; i++) {
|
||||
rtree_ctx_cache_elm_t *cache = &ctx->cache[i];
|
||||
cache->leafkey = RTREE_LEAFKEY_INVALID;
|
||||
cache->leaf = NULL;
|
||||
}
|
||||
for (unsigned i = 0; i < RTREE_CTX_NCACHE_L2; i++) {
|
||||
rtree_ctx_cache_elm_t *cache = &ctx->l2_cache[i];
|
||||
cache->leafkey = RTREE_LEAFKEY_INVALID;
|
||||
cache->leaf = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
1622
deps/jemalloc/src/stats.c
vendored
1622
deps/jemalloc/src/stats.c
vendored
File diff suppressed because it is too large
Load Diff
785
deps/jemalloc/src/tcache.c
vendored
785
deps/jemalloc/src/tcache.c
vendored
@@ -1,131 +1,153 @@
|
||||
#define JEMALLOC_TCACHE_C_
|
||||
#include "jemalloc/internal/jemalloc_internal.h"
|
||||
#define JEMALLOC_TCACHE_C_
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/size_classes.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
malloc_tsd_data(, tcache, tcache_t *, NULL)
|
||||
malloc_tsd_data(, tcache_enabled, tcache_enabled_t, tcache_enabled_default)
|
||||
|
||||
bool opt_tcache = true;
|
||||
ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
|
||||
|
||||
tcache_bin_info_t *tcache_bin_info;
|
||||
static unsigned stack_nelms; /* Total stack elms per tcache. */
|
||||
|
||||
size_t nhbins;
|
||||
unsigned nhbins;
|
||||
size_t tcache_maxclass;
|
||||
|
||||
tcaches_t *tcaches;
|
||||
|
||||
/* Index of first element within tcaches that has never been used. */
|
||||
static unsigned tcaches_past;
|
||||
|
||||
/* Head of singly linked list tracking available tcaches elements. */
|
||||
static tcaches_t *tcaches_avail;
|
||||
|
||||
/* Protects tcaches{,_past,_avail}. */
|
||||
static malloc_mutex_t tcaches_mtx;
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
size_t tcache_salloc(const void *ptr)
|
||||
{
|
||||
|
||||
return (arena_salloc(ptr, false));
|
||||
size_t
|
||||
tcache_salloc(tsdn_t *tsdn, const void *ptr) {
|
||||
return arena_salloc(tsdn, ptr);
|
||||
}
|
||||
|
||||
void
|
||||
tcache_event_hard(tcache_t *tcache)
|
||||
{
|
||||
size_t binind = tcache->next_gc_bin;
|
||||
tcache_bin_t *tbin = &tcache->tbins[binind];
|
||||
tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
|
||||
tcache_event_hard(tsd_t *tsd, tcache_t *tcache) {
|
||||
szind_t binind = tcache->next_gc_bin;
|
||||
|
||||
tcache_bin_t *tbin;
|
||||
if (binind < NBINS) {
|
||||
tbin = tcache_small_bin_get(tcache, binind);
|
||||
} else {
|
||||
tbin = tcache_large_bin_get(tcache, binind);
|
||||
}
|
||||
if (tbin->low_water > 0) {
|
||||
/*
|
||||
* Flush (ceiling) 3/4 of the objects below the low water mark.
|
||||
*/
|
||||
if (binind < NBINS) {
|
||||
tcache_bin_flush_small(tbin, binind, tbin->ncached -
|
||||
tbin->low_water + (tbin->low_water >> 2), tcache);
|
||||
tcache_bin_flush_small(tsd, tcache, tbin, binind,
|
||||
tbin->ncached - tbin->low_water + (tbin->low_water
|
||||
>> 2));
|
||||
/*
|
||||
* Reduce fill count by 2X. Limit lg_fill_div such that
|
||||
* the fill count is always at least 1.
|
||||
*/
|
||||
tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
|
||||
if ((tbin_info->ncached_max >>
|
||||
(tcache->lg_fill_div[binind] + 1)) >= 1) {
|
||||
tcache->lg_fill_div[binind]++;
|
||||
}
|
||||
} else {
|
||||
tcache_bin_flush_large(tbin, binind, tbin->ncached -
|
||||
tbin->low_water + (tbin->low_water >> 2), tcache);
|
||||
tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached
|
||||
- tbin->low_water + (tbin->low_water >> 2), tcache);
|
||||
}
|
||||
/*
|
||||
* Reduce fill count by 2X. Limit lg_fill_div such that the
|
||||
* fill count is always at least 1.
|
||||
*/
|
||||
if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1)
|
||||
tbin->lg_fill_div++;
|
||||
} else if (tbin->low_water < 0) {
|
||||
/*
|
||||
* Increase fill count by 2X. Make sure lg_fill_div stays
|
||||
* greater than 0.
|
||||
* Increase fill count by 2X for small bins. Make sure
|
||||
* lg_fill_div stays greater than 0.
|
||||
*/
|
||||
if (tbin->lg_fill_div > 1)
|
||||
tbin->lg_fill_div--;
|
||||
if (binind < NBINS && tcache->lg_fill_div[binind] > 1) {
|
||||
tcache->lg_fill_div[binind]--;
|
||||
}
|
||||
}
|
||||
tbin->low_water = tbin->ncached;
|
||||
|
||||
tcache->next_gc_bin++;
|
||||
if (tcache->next_gc_bin == nhbins)
|
||||
if (tcache->next_gc_bin == nhbins) {
|
||||
tcache->next_gc_bin = 0;
|
||||
tcache->ev_cnt = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void *
|
||||
tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind)
|
||||
{
|
||||
tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
|
||||
tcache_bin_t *tbin, szind_t binind, bool *tcache_success) {
|
||||
void *ret;
|
||||
|
||||
arena_tcache_fill_small(tcache->arena, tbin, binind,
|
||||
assert(tcache->arena != NULL);
|
||||
arena_tcache_fill_small(tsdn, arena, tcache, tbin, binind,
|
||||
config_prof ? tcache->prof_accumbytes : 0);
|
||||
if (config_prof)
|
||||
if (config_prof) {
|
||||
tcache->prof_accumbytes = 0;
|
||||
ret = tcache_alloc_easy(tbin);
|
||||
}
|
||||
ret = tcache_alloc_easy(tbin, tcache_success);
|
||||
|
||||
return (ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
||||
tcache_t *tcache)
|
||||
{
|
||||
void *ptr;
|
||||
unsigned i, nflush, ndeferred;
|
||||
tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
||||
szind_t binind, unsigned rem) {
|
||||
bool merged_stats = false;
|
||||
|
||||
assert(binind < NBINS);
|
||||
assert(rem <= tbin->ncached);
|
||||
|
||||
for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
|
||||
/* Lock the arena bin associated with the first object. */
|
||||
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
|
||||
tbin->avail[0]);
|
||||
arena_t *arena = chunk->arena;
|
||||
arena_bin_t *bin = &arena->bins[binind];
|
||||
arena_t *arena = tcache->arena;
|
||||
assert(arena != NULL);
|
||||
unsigned nflush = tbin->ncached - rem;
|
||||
VARIABLE_ARRAY(extent_t *, item_extent, nflush);
|
||||
/* Look up extent once per item. */
|
||||
for (unsigned i = 0 ; i < nflush; i++) {
|
||||
item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i));
|
||||
}
|
||||
|
||||
if (config_prof && arena == tcache->arena) {
|
||||
if (arena_prof_accum(arena, tcache->prof_accumbytes))
|
||||
prof_idump();
|
||||
while (nflush > 0) {
|
||||
/* Lock the arena bin associated with the first object. */
|
||||
extent_t *extent = item_extent[0];
|
||||
arena_t *bin_arena = extent_arena_get(extent);
|
||||
arena_bin_t *bin = &bin_arena->bins[binind];
|
||||
|
||||
if (config_prof && bin_arena == arena) {
|
||||
if (arena_prof_accum(tsd_tsdn(tsd), arena,
|
||||
tcache->prof_accumbytes)) {
|
||||
prof_idump(tsd_tsdn(tsd));
|
||||
}
|
||||
tcache->prof_accumbytes = 0;
|
||||
}
|
||||
|
||||
malloc_mutex_lock(&bin->lock);
|
||||
if (config_stats && arena == tcache->arena) {
|
||||
assert(merged_stats == false);
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
|
||||
if (config_stats && bin_arena == arena) {
|
||||
assert(!merged_stats);
|
||||
merged_stats = true;
|
||||
bin->stats.nflushes++;
|
||||
bin->stats.nrequests += tbin->tstats.nrequests;
|
||||
tbin->tstats.nrequests = 0;
|
||||
}
|
||||
ndeferred = 0;
|
||||
for (i = 0; i < nflush; i++) {
|
||||
ptr = tbin->avail[i];
|
||||
assert(ptr != NULL);
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
if (chunk->arena == arena) {
|
||||
size_t pageind = ((uintptr_t)ptr -
|
||||
(uintptr_t)chunk) >> LG_PAGE;
|
||||
arena_chunk_map_t *mapelm =
|
||||
arena_mapp_get(chunk, pageind);
|
||||
if (config_fill && opt_junk) {
|
||||
arena_alloc_junk_small(ptr,
|
||||
&arena_bin_info[binind], true);
|
||||
}
|
||||
arena_dalloc_bin_locked(arena, chunk, ptr,
|
||||
mapelm);
|
||||
unsigned ndeferred = 0;
|
||||
for (unsigned i = 0; i < nflush; i++) {
|
||||
void *ptr = *(tbin->avail - 1 - i);
|
||||
extent = item_extent[i];
|
||||
assert(ptr != NULL && extent != NULL);
|
||||
|
||||
if (extent_arena_get(extent) == bin_arena) {
|
||||
arena_dalloc_bin_junked_locked(tsd_tsdn(tsd),
|
||||
bin_arena, extent, ptr);
|
||||
} else {
|
||||
/*
|
||||
* This object was allocated via a different
|
||||
@@ -133,276 +155,369 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
||||
* locked. Stash the object, so that it can be
|
||||
* handled in a future pass.
|
||||
*/
|
||||
tbin->avail[ndeferred] = ptr;
|
||||
*(tbin->avail - 1 - ndeferred) = ptr;
|
||||
item_extent[ndeferred] = extent;
|
||||
ndeferred++;
|
||||
}
|
||||
}
|
||||
malloc_mutex_unlock(&bin->lock);
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
|
||||
arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred);
|
||||
nflush = ndeferred;
|
||||
}
|
||||
if (config_stats && merged_stats == false) {
|
||||
if (config_stats && !merged_stats) {
|
||||
/*
|
||||
* The flush loop didn't happen to flush to this thread's
|
||||
* arena, so the stats didn't get merged. Manually do so now.
|
||||
*/
|
||||
arena_bin_t *bin = &tcache->arena->bins[binind];
|
||||
malloc_mutex_lock(&bin->lock);
|
||||
arena_bin_t *bin = &arena->bins[binind];
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
|
||||
bin->stats.nflushes++;
|
||||
bin->stats.nrequests += tbin->tstats.nrequests;
|
||||
tbin->tstats.nrequests = 0;
|
||||
malloc_mutex_unlock(&bin->lock);
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
|
||||
}
|
||||
|
||||
memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
|
||||
rem * sizeof(void *));
|
||||
memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
|
||||
sizeof(void *));
|
||||
tbin->ncached = rem;
|
||||
if ((int)tbin->ncached < tbin->low_water)
|
||||
if ((low_water_t)tbin->ncached < tbin->low_water) {
|
||||
tbin->low_water = tbin->ncached;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
||||
tcache_t *tcache)
|
||||
{
|
||||
void *ptr;
|
||||
unsigned i, nflush, ndeferred;
|
||||
tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
||||
unsigned rem, tcache_t *tcache) {
|
||||
bool merged_stats = false;
|
||||
|
||||
assert(binind < nhbins);
|
||||
assert(rem <= tbin->ncached);
|
||||
|
||||
for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
|
||||
arena_t *arena = tcache->arena;
|
||||
assert(arena != NULL);
|
||||
unsigned nflush = tbin->ncached - rem;
|
||||
VARIABLE_ARRAY(extent_t *, item_extent, nflush);
|
||||
/* Look up extent once per item. */
|
||||
for (unsigned i = 0 ; i < nflush; i++) {
|
||||
item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i));
|
||||
}
|
||||
|
||||
while (nflush > 0) {
|
||||
/* Lock the arena associated with the first object. */
|
||||
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
|
||||
tbin->avail[0]);
|
||||
arena_t *arena = chunk->arena;
|
||||
extent_t *extent = item_extent[0];
|
||||
arena_t *locked_arena = extent_arena_get(extent);
|
||||
UNUSED bool idump;
|
||||
|
||||
if (config_prof)
|
||||
if (config_prof) {
|
||||
idump = false;
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
if ((config_prof || config_stats) && arena == tcache->arena) {
|
||||
}
|
||||
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->large_mtx);
|
||||
for (unsigned i = 0; i < nflush; i++) {
|
||||
void *ptr = *(tbin->avail - 1 - i);
|
||||
assert(ptr != NULL);
|
||||
extent = item_extent[i];
|
||||
if (extent_arena_get(extent) == locked_arena) {
|
||||
large_dalloc_prep_junked_locked(tsd_tsdn(tsd),
|
||||
extent);
|
||||
}
|
||||
}
|
||||
if ((config_prof || config_stats) && locked_arena == arena) {
|
||||
if (config_prof) {
|
||||
idump = arena_prof_accum_locked(arena,
|
||||
idump = arena_prof_accum(tsd_tsdn(tsd), arena,
|
||||
tcache->prof_accumbytes);
|
||||
tcache->prof_accumbytes = 0;
|
||||
}
|
||||
if (config_stats) {
|
||||
merged_stats = true;
|
||||
arena->stats.nrequests_large +=
|
||||
tbin->tstats.nrequests;
|
||||
arena->stats.lstats[binind - NBINS].nrequests +=
|
||||
tbin->tstats.nrequests;
|
||||
arena_stats_large_nrequests_add(tsd_tsdn(tsd),
|
||||
&arena->stats, binind,
|
||||
tbin->tstats.nrequests);
|
||||
tbin->tstats.nrequests = 0;
|
||||
}
|
||||
}
|
||||
ndeferred = 0;
|
||||
for (i = 0; i < nflush; i++) {
|
||||
ptr = tbin->avail[i];
|
||||
assert(ptr != NULL);
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
if (chunk->arena == arena)
|
||||
arena_dalloc_large_locked(arena, chunk, ptr);
|
||||
else {
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->large_mtx);
|
||||
|
||||
unsigned ndeferred = 0;
|
||||
for (unsigned i = 0; i < nflush; i++) {
|
||||
void *ptr = *(tbin->avail - 1 - i);
|
||||
extent = item_extent[i];
|
||||
assert(ptr != NULL && extent != NULL);
|
||||
|
||||
if (extent_arena_get(extent) == locked_arena) {
|
||||
large_dalloc_finish(tsd_tsdn(tsd), extent);
|
||||
} else {
|
||||
/*
|
||||
* This object was allocated via a different
|
||||
* arena than the one that is currently locked.
|
||||
* Stash the object, so that it can be handled
|
||||
* in a future pass.
|
||||
*/
|
||||
tbin->avail[ndeferred] = ptr;
|
||||
*(tbin->avail - 1 - ndeferred) = ptr;
|
||||
item_extent[ndeferred] = extent;
|
||||
ndeferred++;
|
||||
}
|
||||
}
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
if (config_prof && idump)
|
||||
prof_idump();
|
||||
if (config_prof && idump) {
|
||||
prof_idump(tsd_tsdn(tsd));
|
||||
}
|
||||
arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush -
|
||||
ndeferred);
|
||||
nflush = ndeferred;
|
||||
}
|
||||
if (config_stats && merged_stats == false) {
|
||||
if (config_stats && !merged_stats) {
|
||||
/*
|
||||
* The flush loop didn't happen to flush to this thread's
|
||||
* arena, so the stats didn't get merged. Manually do so now.
|
||||
*/
|
||||
arena_t *arena = tcache->arena;
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
arena->stats.nrequests_large += tbin->tstats.nrequests;
|
||||
arena->stats.lstats[binind - NBINS].nrequests +=
|
||||
tbin->tstats.nrequests;
|
||||
arena_stats_large_nrequests_add(tsd_tsdn(tsd), &arena->stats,
|
||||
binind, tbin->tstats.nrequests);
|
||||
tbin->tstats.nrequests = 0;
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
}
|
||||
|
||||
memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
|
||||
rem * sizeof(void *));
|
||||
memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
|
||||
sizeof(void *));
|
||||
tbin->ncached = rem;
|
||||
if ((int)tbin->ncached < tbin->low_water)
|
||||
if ((low_water_t)tbin->ncached < tbin->low_water) {
|
||||
tbin->low_water = tbin->ncached;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
tcache_arena_associate(tcache_t *tcache, arena_t *arena)
|
||||
{
|
||||
tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
|
||||
assert(tcache->arena == NULL);
|
||||
tcache->arena = arena;
|
||||
|
||||
if (config_stats) {
|
||||
/* Link into list of extant tcaches. */
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
|
||||
ql_elm_new(tcache, link);
|
||||
ql_tail_insert(&arena->tcache_ql, tcache, link);
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
|
||||
}
|
||||
tcache->arena = arena;
|
||||
}
|
||||
|
||||
static void
|
||||
tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache) {
|
||||
arena_t *arena = tcache->arena;
|
||||
assert(arena != NULL);
|
||||
if (config_stats) {
|
||||
/* Unlink from list of extant tcaches. */
|
||||
malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
|
||||
if (config_debug) {
|
||||
bool in_ql = false;
|
||||
tcache_t *iter;
|
||||
ql_foreach(iter, &arena->tcache_ql, link) {
|
||||
if (iter == tcache) {
|
||||
in_ql = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
assert(in_ql);
|
||||
}
|
||||
ql_remove(&arena->tcache_ql, tcache, link);
|
||||
tcache_stats_merge(tsdn, tcache, arena);
|
||||
malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
|
||||
}
|
||||
tcache->arena = NULL;
|
||||
}
|
||||
|
||||
void
|
||||
tcache_arena_dissociate(tcache_t *tcache)
|
||||
{
|
||||
|
||||
if (config_stats) {
|
||||
/* Unlink from list of extant tcaches. */
|
||||
malloc_mutex_lock(&tcache->arena->lock);
|
||||
ql_remove(&tcache->arena->tcache_ql, tcache, link);
|
||||
tcache_stats_merge(tcache, tcache->arena);
|
||||
malloc_mutex_unlock(&tcache->arena->lock);
|
||||
}
|
||||
tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
|
||||
tcache_arena_dissociate(tsdn, tcache);
|
||||
tcache_arena_associate(tsdn, tcache, arena);
|
||||
}
|
||||
|
||||
bool
|
||||
tsd_tcache_enabled_data_init(tsd_t *tsd) {
|
||||
/* Called upon tsd initialization. */
|
||||
tsd_tcache_enabled_set(tsd, opt_tcache);
|
||||
tsd_slow_update(tsd);
|
||||
|
||||
if (opt_tcache) {
|
||||
/* Trigger tcache init. */
|
||||
tsd_tcache_data_init(tsd);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Initialize auto tcache (embedded in TSD). */
|
||||
static void
|
||||
tcache_init(tsd_t *tsd, tcache_t *tcache, void *avail_stack) {
|
||||
memset(&tcache->link, 0, sizeof(ql_elm(tcache_t)));
|
||||
tcache->prof_accumbytes = 0;
|
||||
tcache->next_gc_bin = 0;
|
||||
tcache->arena = NULL;
|
||||
|
||||
ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR);
|
||||
|
||||
size_t stack_offset = 0;
|
||||
assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
|
||||
memset(tcache->tbins_small, 0, sizeof(tcache_bin_t) * NBINS);
|
||||
memset(tcache->tbins_large, 0, sizeof(tcache_bin_t) * (nhbins - NBINS));
|
||||
unsigned i = 0;
|
||||
for (; i < NBINS; i++) {
|
||||
tcache->lg_fill_div[i] = 1;
|
||||
stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
|
||||
/*
|
||||
* avail points past the available space. Allocations will
|
||||
* access the slots toward higher addresses (for the benefit of
|
||||
* prefetch).
|
||||
*/
|
||||
tcache_small_bin_get(tcache, i)->avail =
|
||||
(void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset);
|
||||
}
|
||||
for (; i < nhbins; i++) {
|
||||
stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
|
||||
tcache_large_bin_get(tcache, i)->avail =
|
||||
(void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset);
|
||||
}
|
||||
assert(stack_offset == stack_nelms * sizeof(void *));
|
||||
}
|
||||
|
||||
/* Initialize auto tcache (embedded in TSD). */
|
||||
bool
|
||||
tsd_tcache_data_init(tsd_t *tsd) {
|
||||
tcache_t *tcache = tsd_tcachep_get_unsafe(tsd);
|
||||
assert(tcache_small_bin_get(tcache, 0)->avail == NULL);
|
||||
size_t size = stack_nelms * sizeof(void *);
|
||||
/* Avoid false cacheline sharing. */
|
||||
size = sz_sa2u(size, CACHELINE);
|
||||
|
||||
void *avail_array = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true,
|
||||
NULL, true, arena_get(TSDN_NULL, 0, true));
|
||||
if (avail_array == NULL) {
|
||||
return true;
|
||||
}
|
||||
|
||||
tcache_init(tsd, tcache, avail_array);
|
||||
/*
|
||||
* Initialization is a bit tricky here. After malloc init is done, all
|
||||
* threads can rely on arena_choose and associate tcache accordingly.
|
||||
* However, the thread that does actual malloc bootstrapping relies on
|
||||
* functional tsd, and it can only rely on a0. In that case, we
|
||||
* associate its tcache to a0 temporarily, and later on
|
||||
* arena_choose_hard() will re-associate properly.
|
||||
*/
|
||||
tcache->arena = NULL;
|
||||
arena_t *arena;
|
||||
if (!malloc_initialized()) {
|
||||
/* If in initialization, assign to a0. */
|
||||
arena = arena_get(tsd_tsdn(tsd), 0, false);
|
||||
tcache_arena_associate(tsd_tsdn(tsd), tcache, arena);
|
||||
} else {
|
||||
arena = arena_choose(tsd, NULL);
|
||||
/* This may happen if thread.tcache.enabled is used. */
|
||||
if (tcache->arena == NULL) {
|
||||
tcache_arena_associate(tsd_tsdn(tsd), tcache, arena);
|
||||
}
|
||||
}
|
||||
assert(arena == tcache->arena);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Created manual tcache for tcache.create mallctl. */
|
||||
tcache_t *
|
||||
tcache_create(arena_t *arena)
|
||||
{
|
||||
tcache_create_explicit(tsd_t *tsd) {
|
||||
tcache_t *tcache;
|
||||
size_t size, stack_offset;
|
||||
unsigned i;
|
||||
|
||||
size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins);
|
||||
size = sizeof(tcache_t);
|
||||
/* Naturally align the pointer stacks. */
|
||||
size = PTR_CEILING(size);
|
||||
stack_offset = size;
|
||||
size += stack_nelms * sizeof(void *);
|
||||
/*
|
||||
* Round up to the nearest multiple of the cacheline size, in order to
|
||||
* avoid the possibility of false cacheline sharing.
|
||||
*
|
||||
* That this works relies on the same logic as in ipalloc(), but we
|
||||
* cannot directly call ipalloc() here due to tcache bootstrapping
|
||||
* issues.
|
||||
*/
|
||||
size = (size + CACHELINE_MASK) & (-CACHELINE);
|
||||
/* Avoid false cacheline sharing. */
|
||||
size = sz_sa2u(size, CACHELINE);
|
||||
|
||||
if (size <= SMALL_MAXCLASS)
|
||||
tcache = (tcache_t *)arena_malloc_small(arena, size, true);
|
||||
else if (size <= tcache_maxclass)
|
||||
tcache = (tcache_t *)arena_malloc_large(arena, size, true);
|
||||
else
|
||||
tcache = (tcache_t *)icalloct(size, false, arena);
|
||||
|
||||
if (tcache == NULL)
|
||||
return (NULL);
|
||||
|
||||
tcache_arena_associate(tcache, arena);
|
||||
|
||||
assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
|
||||
for (i = 0; i < nhbins; i++) {
|
||||
tcache->tbins[i].lg_fill_div = 1;
|
||||
tcache->tbins[i].avail = (void **)((uintptr_t)tcache +
|
||||
(uintptr_t)stack_offset);
|
||||
stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
|
||||
tcache = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true, NULL, true,
|
||||
arena_get(TSDN_NULL, 0, true));
|
||||
if (tcache == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
tcache_tsd_set(&tcache);
|
||||
tcache_init(tsd, tcache,
|
||||
(void *)((uintptr_t)tcache + (uintptr_t)stack_offset));
|
||||
tcache_arena_associate(tsd_tsdn(tsd), tcache, arena_ichoose(tsd, NULL));
|
||||
|
||||
return (tcache);
|
||||
return tcache;
|
||||
}
|
||||
|
||||
void
|
||||
tcache_destroy(tcache_t *tcache)
|
||||
{
|
||||
unsigned i;
|
||||
size_t tcache_size;
|
||||
static void
|
||||
tcache_flush_cache(tsd_t *tsd, tcache_t *tcache) {
|
||||
assert(tcache->arena != NULL);
|
||||
|
||||
tcache_arena_dissociate(tcache);
|
||||
for (unsigned i = 0; i < NBINS; i++) {
|
||||
tcache_bin_t *tbin = tcache_small_bin_get(tcache, i);
|
||||
tcache_bin_flush_small(tsd, tcache, tbin, i, 0);
|
||||
|
||||
for (i = 0; i < NBINS; i++) {
|
||||
tcache_bin_t *tbin = &tcache->tbins[i];
|
||||
tcache_bin_flush_small(tbin, i, 0, tcache);
|
||||
|
||||
if (config_stats && tbin->tstats.nrequests != 0) {
|
||||
arena_t *arena = tcache->arena;
|
||||
arena_bin_t *bin = &arena->bins[i];
|
||||
malloc_mutex_lock(&bin->lock);
|
||||
bin->stats.nrequests += tbin->tstats.nrequests;
|
||||
malloc_mutex_unlock(&bin->lock);
|
||||
if (config_stats) {
|
||||
assert(tbin->tstats.nrequests == 0);
|
||||
}
|
||||
}
|
||||
for (unsigned i = NBINS; i < nhbins; i++) {
|
||||
tcache_bin_t *tbin = tcache_large_bin_get(tcache, i);
|
||||
tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
|
||||
|
||||
for (; i < nhbins; i++) {
|
||||
tcache_bin_t *tbin = &tcache->tbins[i];
|
||||
tcache_bin_flush_large(tbin, i, 0, tcache);
|
||||
|
||||
if (config_stats && tbin->tstats.nrequests != 0) {
|
||||
arena_t *arena = tcache->arena;
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
arena->stats.nrequests_large += tbin->tstats.nrequests;
|
||||
arena->stats.lstats[i - NBINS].nrequests +=
|
||||
tbin->tstats.nrequests;
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
if (config_stats) {
|
||||
assert(tbin->tstats.nrequests == 0);
|
||||
}
|
||||
}
|
||||
|
||||
if (config_prof && tcache->prof_accumbytes > 0 &&
|
||||
arena_prof_accum(tcache->arena, tcache->prof_accumbytes))
|
||||
prof_idump();
|
||||
|
||||
tcache_size = arena_salloc(tcache, false);
|
||||
if (tcache_size <= SMALL_MAXCLASS) {
|
||||
arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
|
||||
arena_t *arena = chunk->arena;
|
||||
size_t pageind = ((uintptr_t)tcache - (uintptr_t)chunk) >>
|
||||
LG_PAGE;
|
||||
arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
|
||||
|
||||
arena_dalloc_bin(arena, chunk, tcache, pageind, mapelm);
|
||||
} else if (tcache_size <= tcache_maxclass) {
|
||||
arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
|
||||
arena_t *arena = chunk->arena;
|
||||
|
||||
arena_dalloc_large(arena, chunk, tcache);
|
||||
} else
|
||||
idalloct(tcache, false);
|
||||
}
|
||||
|
||||
void
|
||||
tcache_thread_cleanup(void *arg)
|
||||
{
|
||||
tcache_t *tcache = *(tcache_t **)arg;
|
||||
|
||||
if (tcache == TCACHE_STATE_DISABLED) {
|
||||
/* Do nothing. */
|
||||
} else if (tcache == TCACHE_STATE_REINCARNATED) {
|
||||
/*
|
||||
* Another destructor called an allocator function after this
|
||||
* destructor was called. Reset tcache to
|
||||
* TCACHE_STATE_PURGATORY in order to receive another callback.
|
||||
*/
|
||||
tcache = TCACHE_STATE_PURGATORY;
|
||||
tcache_tsd_set(&tcache);
|
||||
} else if (tcache == TCACHE_STATE_PURGATORY) {
|
||||
/*
|
||||
* The previous time this destructor was called, we set the key
|
||||
* to TCACHE_STATE_PURGATORY so that other destructors wouldn't
|
||||
* cause re-creation of the tcache. This time, do nothing, so
|
||||
* that the destructor will not be called again.
|
||||
*/
|
||||
} else if (tcache != NULL) {
|
||||
assert(tcache != TCACHE_STATE_PURGATORY);
|
||||
tcache_destroy(tcache);
|
||||
tcache = TCACHE_STATE_PURGATORY;
|
||||
tcache_tsd_set(&tcache);
|
||||
arena_prof_accum(tsd_tsdn(tsd), tcache->arena,
|
||||
tcache->prof_accumbytes)) {
|
||||
prof_idump(tsd_tsdn(tsd));
|
||||
}
|
||||
}
|
||||
|
||||
/* Caller must own arena->lock. */
|
||||
void
|
||||
tcache_stats_merge(tcache_t *tcache, arena_t *arena)
|
||||
{
|
||||
tcache_flush(tsd_t *tsd) {
|
||||
assert(tcache_available(tsd));
|
||||
tcache_flush_cache(tsd, tsd_tcachep_get(tsd));
|
||||
}
|
||||
|
||||
static void
|
||||
tcache_destroy(tsd_t *tsd, tcache_t *tcache, bool tsd_tcache) {
|
||||
tcache_flush_cache(tsd, tcache);
|
||||
tcache_arena_dissociate(tsd_tsdn(tsd), tcache);
|
||||
|
||||
if (tsd_tcache) {
|
||||
/* Release the avail array for the TSD embedded auto tcache. */
|
||||
void *avail_array =
|
||||
(void *)((uintptr_t)tcache_small_bin_get(tcache, 0)->avail -
|
||||
(uintptr_t)tcache_bin_info[0].ncached_max * sizeof(void *));
|
||||
idalloctm(tsd_tsdn(tsd), avail_array, NULL, NULL, true, true);
|
||||
} else {
|
||||
/* Release both the tcache struct and avail array. */
|
||||
idalloctm(tsd_tsdn(tsd), tcache, NULL, NULL, true, true);
|
||||
}
|
||||
}
|
||||
|
||||
/* For auto tcache (embedded in TSD) only. */
|
||||
void
|
||||
tcache_cleanup(tsd_t *tsd) {
|
||||
tcache_t *tcache = tsd_tcachep_get(tsd);
|
||||
if (!tcache_available(tsd)) {
|
||||
assert(tsd_tcache_enabled_get(tsd) == false);
|
||||
if (config_debug) {
|
||||
assert(tcache_small_bin_get(tcache, 0)->avail == NULL);
|
||||
}
|
||||
return;
|
||||
}
|
||||
assert(tsd_tcache_enabled_get(tsd));
|
||||
assert(tcache_small_bin_get(tcache, 0)->avail != NULL);
|
||||
|
||||
tcache_destroy(tsd, tcache, true);
|
||||
if (config_debug) {
|
||||
tcache_small_bin_get(tcache, 0)->avail = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
|
||||
unsigned i;
|
||||
|
||||
cassert(config_stats);
|
||||
@@ -410,48 +525,151 @@ tcache_stats_merge(tcache_t *tcache, arena_t *arena)
|
||||
/* Merge and reset tcache stats. */
|
||||
for (i = 0; i < NBINS; i++) {
|
||||
arena_bin_t *bin = &arena->bins[i];
|
||||
tcache_bin_t *tbin = &tcache->tbins[i];
|
||||
malloc_mutex_lock(&bin->lock);
|
||||
tcache_bin_t *tbin = tcache_small_bin_get(tcache, i);
|
||||
malloc_mutex_lock(tsdn, &bin->lock);
|
||||
bin->stats.nrequests += tbin->tstats.nrequests;
|
||||
malloc_mutex_unlock(&bin->lock);
|
||||
malloc_mutex_unlock(tsdn, &bin->lock);
|
||||
tbin->tstats.nrequests = 0;
|
||||
}
|
||||
|
||||
for (; i < nhbins; i++) {
|
||||
malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS];
|
||||
tcache_bin_t *tbin = &tcache->tbins[i];
|
||||
arena->stats.nrequests_large += tbin->tstats.nrequests;
|
||||
lstats->nrequests += tbin->tstats.nrequests;
|
||||
tcache_bin_t *tbin = tcache_large_bin_get(tcache, i);
|
||||
arena_stats_large_nrequests_add(tsdn, &arena->stats, i,
|
||||
tbin->tstats.nrequests);
|
||||
tbin->tstats.nrequests = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static bool
|
||||
tcaches_create_prep(tsd_t *tsd) {
|
||||
bool err;
|
||||
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
|
||||
|
||||
if (tcaches == NULL) {
|
||||
tcaches = base_alloc(tsd_tsdn(tsd), b0get(), sizeof(tcache_t *)
|
||||
* (MALLOCX_TCACHE_MAX+1), CACHELINE);
|
||||
if (tcaches == NULL) {
|
||||
err = true;
|
||||
goto label_return;
|
||||
}
|
||||
}
|
||||
|
||||
if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX) {
|
||||
err = true;
|
||||
goto label_return;
|
||||
}
|
||||
|
||||
err = false;
|
||||
label_return:
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
|
||||
return err;
|
||||
}
|
||||
|
||||
bool
|
||||
tcache_boot0(void)
|
||||
{
|
||||
unsigned i;
|
||||
tcaches_create(tsd_t *tsd, unsigned *r_ind) {
|
||||
witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0);
|
||||
|
||||
/*
|
||||
* If necessary, clamp opt_lg_tcache_max, now that arena_maxclass is
|
||||
* known.
|
||||
*/
|
||||
if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS)
|
||||
bool err;
|
||||
|
||||
if (tcaches_create_prep(tsd)) {
|
||||
err = true;
|
||||
goto label_return;
|
||||
}
|
||||
|
||||
tcache_t *tcache = tcache_create_explicit(tsd);
|
||||
if (tcache == NULL) {
|
||||
err = true;
|
||||
goto label_return;
|
||||
}
|
||||
|
||||
tcaches_t *elm;
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
|
||||
if (tcaches_avail != NULL) {
|
||||
elm = tcaches_avail;
|
||||
tcaches_avail = tcaches_avail->next;
|
||||
elm->tcache = tcache;
|
||||
*r_ind = (unsigned)(elm - tcaches);
|
||||
} else {
|
||||
elm = &tcaches[tcaches_past];
|
||||
elm->tcache = tcache;
|
||||
*r_ind = tcaches_past;
|
||||
tcaches_past++;
|
||||
}
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
|
||||
|
||||
err = false;
|
||||
label_return:
|
||||
witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0);
|
||||
return err;
|
||||
}
|
||||
|
||||
static tcache_t *
|
||||
tcaches_elm_remove(tsd_t *tsd, tcaches_t *elm) {
|
||||
malloc_mutex_assert_owner(tsd_tsdn(tsd), &tcaches_mtx);
|
||||
|
||||
if (elm->tcache == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
tcache_t *tcache = elm->tcache;
|
||||
elm->tcache = NULL;
|
||||
return tcache;
|
||||
}
|
||||
|
||||
void
|
||||
tcaches_flush(tsd_t *tsd, unsigned ind) {
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
|
||||
tcache_t *tcache = tcaches_elm_remove(tsd, &tcaches[ind]);
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
|
||||
if (tcache != NULL) {
|
||||
tcache_destroy(tsd, tcache, false);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
tcaches_destroy(tsd_t *tsd, unsigned ind) {
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
|
||||
tcaches_t *elm = &tcaches[ind];
|
||||
tcache_t *tcache = tcaches_elm_remove(tsd, elm);
|
||||
elm->next = tcaches_avail;
|
||||
tcaches_avail = elm;
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
|
||||
if (tcache != NULL) {
|
||||
tcache_destroy(tsd, tcache, false);
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
tcache_boot(tsdn_t *tsdn) {
|
||||
/* If necessary, clamp opt_lg_tcache_max. */
|
||||
if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) <
|
||||
SMALL_MAXCLASS) {
|
||||
tcache_maxclass = SMALL_MAXCLASS;
|
||||
else if ((1U << opt_lg_tcache_max) > arena_maxclass)
|
||||
tcache_maxclass = arena_maxclass;
|
||||
else
|
||||
tcache_maxclass = (1U << opt_lg_tcache_max);
|
||||
} else {
|
||||
tcache_maxclass = (ZU(1) << opt_lg_tcache_max);
|
||||
}
|
||||
|
||||
nhbins = NBINS + (tcache_maxclass >> LG_PAGE);
|
||||
if (malloc_mutex_init(&tcaches_mtx, "tcaches", WITNESS_RANK_TCACHES,
|
||||
malloc_mutex_rank_exclusive)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
nhbins = sz_size2index(tcache_maxclass) + 1;
|
||||
|
||||
/* Initialize tcache_bin_info. */
|
||||
tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins *
|
||||
sizeof(tcache_bin_info_t));
|
||||
if (tcache_bin_info == NULL)
|
||||
return (true);
|
||||
tcache_bin_info = (tcache_bin_info_t *)base_alloc(tsdn, b0get(), nhbins
|
||||
* sizeof(tcache_bin_info_t), CACHELINE);
|
||||
if (tcache_bin_info == NULL) {
|
||||
return true;
|
||||
}
|
||||
stack_nelms = 0;
|
||||
unsigned i;
|
||||
for (i = 0; i < NBINS; i++) {
|
||||
if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MAX) {
|
||||
if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) {
|
||||
tcache_bin_info[i].ncached_max =
|
||||
TCACHE_NSLOTS_SMALL_MIN;
|
||||
} else if ((arena_bin_info[i].nregs << 1) <=
|
||||
TCACHE_NSLOTS_SMALL_MAX) {
|
||||
tcache_bin_info[i].ncached_max =
|
||||
(arena_bin_info[i].nregs << 1);
|
||||
} else {
|
||||
@@ -465,15 +683,26 @@ tcache_boot0(void)
|
||||
stack_nelms += tcache_bin_info[i].ncached_max;
|
||||
}
|
||||
|
||||
return (false);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
tcache_boot1(void)
|
||||
{
|
||||
|
||||
if (tcache_tsd_boot() || tcache_enabled_tsd_boot())
|
||||
return (true);
|
||||
|
||||
return (false);
|
||||
void
|
||||
tcache_prefork(tsdn_t *tsdn) {
|
||||
if (!config_prof && opt_tcache) {
|
||||
malloc_mutex_prefork(tsdn, &tcaches_mtx);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
tcache_postfork_parent(tsdn_t *tsdn) {
|
||||
if (!config_prof && opt_tcache) {
|
||||
malloc_mutex_postfork_parent(tsdn, &tcaches_mtx);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
tcache_postfork_child(tsdn_t *tsdn) {
|
||||
if (!config_prof && opt_tcache) {
|
||||
malloc_mutex_postfork_child(tsdn, &tcaches_mtx);
|
||||
}
|
||||
}
|
||||
|
||||
288
deps/jemalloc/src/tsd.c
vendored
288
deps/jemalloc/src/tsd.c
vendored
@@ -1,5 +1,10 @@
|
||||
#define JEMALLOC_TSD_C_
|
||||
#include "jemalloc/internal/jemalloc_internal.h"
|
||||
#define JEMALLOC_TSD_C_
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/rtree.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
@@ -7,28 +12,148 @@
|
||||
static unsigned ncleanups;
|
||||
static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX];
|
||||
|
||||
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
|
||||
__thread tsd_t JEMALLOC_TLS_MODEL tsd_tls = TSD_INITIALIZER;
|
||||
__thread bool JEMALLOC_TLS_MODEL tsd_initialized = false;
|
||||
bool tsd_booted = false;
|
||||
#elif (defined(JEMALLOC_TLS))
|
||||
__thread tsd_t JEMALLOC_TLS_MODEL tsd_tls = TSD_INITIALIZER;
|
||||
pthread_key_t tsd_tsd;
|
||||
bool tsd_booted = false;
|
||||
#elif (defined(_WIN32))
|
||||
DWORD tsd_tsd;
|
||||
tsd_wrapper_t tsd_boot_wrapper = {false, TSD_INITIALIZER};
|
||||
bool tsd_booted = false;
|
||||
#else
|
||||
|
||||
/*
|
||||
* This contains a mutex, but it's pretty convenient to allow the mutex code to
|
||||
* have a dependency on tsd. So we define the struct here, and only refer to it
|
||||
* by pointer in the header.
|
||||
*/
|
||||
struct tsd_init_head_s {
|
||||
ql_head(tsd_init_block_t) blocks;
|
||||
malloc_mutex_t lock;
|
||||
};
|
||||
|
||||
pthread_key_t tsd_tsd;
|
||||
tsd_init_head_t tsd_init_head = {
|
||||
ql_head_initializer(blocks),
|
||||
MALLOC_MUTEX_INITIALIZER
|
||||
};
|
||||
tsd_wrapper_t tsd_boot_wrapper = {
|
||||
false,
|
||||
TSD_INITIALIZER
|
||||
};
|
||||
bool tsd_booted = false;
|
||||
#endif
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
void
|
||||
tsd_slow_update(tsd_t *tsd) {
|
||||
if (tsd_nominal(tsd)) {
|
||||
if (malloc_slow || !tsd_tcache_enabled_get(tsd) ||
|
||||
tsd_reentrancy_level_get(tsd) > 0) {
|
||||
tsd->state = tsd_state_nominal_slow;
|
||||
} else {
|
||||
tsd->state = tsd_state_nominal;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static bool
|
||||
tsd_data_init(tsd_t *tsd) {
|
||||
/*
|
||||
* We initialize the rtree context first (before the tcache), since the
|
||||
* tcache initialization depends on it.
|
||||
*/
|
||||
rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd));
|
||||
|
||||
return tsd_tcache_enabled_data_init(tsd);
|
||||
}
|
||||
|
||||
static void
|
||||
assert_tsd_data_cleanup_done(tsd_t *tsd) {
|
||||
assert(!tsd_nominal(tsd));
|
||||
assert(*tsd_arenap_get_unsafe(tsd) == NULL);
|
||||
assert(*tsd_iarenap_get_unsafe(tsd) == NULL);
|
||||
assert(*tsd_arenas_tdata_bypassp_get_unsafe(tsd) == true);
|
||||
assert(*tsd_arenas_tdatap_get_unsafe(tsd) == NULL);
|
||||
assert(*tsd_tcache_enabledp_get_unsafe(tsd) == false);
|
||||
assert(*tsd_prof_tdatap_get_unsafe(tsd) == NULL);
|
||||
}
|
||||
|
||||
static bool
|
||||
tsd_data_init_nocleanup(tsd_t *tsd) {
|
||||
assert(tsd->state == tsd_state_reincarnated ||
|
||||
tsd->state == tsd_state_minimal_initialized);
|
||||
/*
|
||||
* During reincarnation, there is no guarantee that the cleanup function
|
||||
* will be called (deallocation may happen after all tsd destructors).
|
||||
* We set up tsd in a way that no cleanup is needed.
|
||||
*/
|
||||
rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd));
|
||||
*tsd_arenas_tdata_bypassp_get(tsd) = true;
|
||||
*tsd_tcache_enabledp_get_unsafe(tsd) = false;
|
||||
*tsd_reentrancy_levelp_get(tsd) = 1;
|
||||
assert_tsd_data_cleanup_done(tsd);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
tsd_t *
|
||||
tsd_fetch_slow(tsd_t *tsd, bool minimal) {
|
||||
assert(!tsd_fast(tsd));
|
||||
|
||||
if (tsd->state == tsd_state_nominal_slow) {
|
||||
/* On slow path but no work needed. */
|
||||
assert(malloc_slow || !tsd_tcache_enabled_get(tsd) ||
|
||||
tsd_reentrancy_level_get(tsd) > 0 ||
|
||||
*tsd_arenas_tdata_bypassp_get(tsd));
|
||||
} else if (tsd->state == tsd_state_uninitialized) {
|
||||
if (!minimal) {
|
||||
tsd->state = tsd_state_nominal;
|
||||
tsd_slow_update(tsd);
|
||||
/* Trigger cleanup handler registration. */
|
||||
tsd_set(tsd);
|
||||
tsd_data_init(tsd);
|
||||
} else {
|
||||
tsd->state = tsd_state_minimal_initialized;
|
||||
tsd_set(tsd);
|
||||
tsd_data_init_nocleanup(tsd);
|
||||
}
|
||||
} else if (tsd->state == tsd_state_minimal_initialized) {
|
||||
if (!minimal) {
|
||||
/* Switch to fully initialized. */
|
||||
tsd->state = tsd_state_nominal;
|
||||
assert(*tsd_reentrancy_levelp_get(tsd) >= 1);
|
||||
(*tsd_reentrancy_levelp_get(tsd))--;
|
||||
tsd_slow_update(tsd);
|
||||
tsd_data_init(tsd);
|
||||
} else {
|
||||
assert_tsd_data_cleanup_done(tsd);
|
||||
}
|
||||
} else if (tsd->state == tsd_state_purgatory) {
|
||||
tsd->state = tsd_state_reincarnated;
|
||||
tsd_set(tsd);
|
||||
tsd_data_init_nocleanup(tsd);
|
||||
} else {
|
||||
assert(tsd->state == tsd_state_reincarnated);
|
||||
}
|
||||
|
||||
return tsd;
|
||||
}
|
||||
|
||||
void *
|
||||
malloc_tsd_malloc(size_t size)
|
||||
{
|
||||
|
||||
/* Avoid choose_arena() in order to dodge bootstrapping issues. */
|
||||
return (arena_malloc(arenas[0], size, false, false));
|
||||
malloc_tsd_malloc(size_t size) {
|
||||
return a0malloc(CACHELINE_CEILING(size));
|
||||
}
|
||||
|
||||
void
|
||||
malloc_tsd_dalloc(void *wrapper)
|
||||
{
|
||||
|
||||
idalloct(wrapper, false);
|
||||
}
|
||||
|
||||
void
|
||||
malloc_tsd_no_cleanup(void *arg)
|
||||
{
|
||||
|
||||
not_reached();
|
||||
malloc_tsd_dalloc(void *wrapper) {
|
||||
a0dalloc(wrapper);
|
||||
}
|
||||
|
||||
#if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32)
|
||||
@@ -36,21 +161,22 @@ malloc_tsd_no_cleanup(void *arg)
|
||||
JEMALLOC_EXPORT
|
||||
#endif
|
||||
void
|
||||
_malloc_thread_cleanup(void)
|
||||
{
|
||||
_malloc_thread_cleanup(void) {
|
||||
bool pending[MALLOC_TSD_CLEANUPS_MAX], again;
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < ncleanups; i++)
|
||||
for (i = 0; i < ncleanups; i++) {
|
||||
pending[i] = true;
|
||||
}
|
||||
|
||||
do {
|
||||
again = false;
|
||||
for (i = 0; i < ncleanups; i++) {
|
||||
if (pending[i]) {
|
||||
pending[i] = cleanups[i]();
|
||||
if (pending[i])
|
||||
if (pending[i]) {
|
||||
again = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
} while (again);
|
||||
@@ -58,26 +184,92 @@ _malloc_thread_cleanup(void)
|
||||
#endif
|
||||
|
||||
void
|
||||
malloc_tsd_cleanup_register(bool (*f)(void))
|
||||
{
|
||||
|
||||
malloc_tsd_cleanup_register(bool (*f)(void)) {
|
||||
assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX);
|
||||
cleanups[ncleanups] = f;
|
||||
ncleanups++;
|
||||
}
|
||||
|
||||
static void
|
||||
tsd_do_data_cleanup(tsd_t *tsd) {
|
||||
prof_tdata_cleanup(tsd);
|
||||
iarena_cleanup(tsd);
|
||||
arena_cleanup(tsd);
|
||||
arenas_tdata_cleanup(tsd);
|
||||
tcache_cleanup(tsd);
|
||||
witnesses_cleanup(tsd_witness_tsdp_get_unsafe(tsd));
|
||||
}
|
||||
|
||||
void
|
||||
malloc_tsd_boot(void)
|
||||
{
|
||||
tsd_cleanup(void *arg) {
|
||||
tsd_t *tsd = (tsd_t *)arg;
|
||||
|
||||
switch (tsd->state) {
|
||||
case tsd_state_uninitialized:
|
||||
/* Do nothing. */
|
||||
break;
|
||||
case tsd_state_minimal_initialized:
|
||||
/* This implies the thread only did free() in its life time. */
|
||||
/* Fall through. */
|
||||
case tsd_state_reincarnated:
|
||||
/*
|
||||
* Reincarnated means another destructor deallocated memory
|
||||
* after the destructor was called. Cleanup isn't required but
|
||||
* is still called for testing and completeness.
|
||||
*/
|
||||
assert_tsd_data_cleanup_done(tsd);
|
||||
/* Fall through. */
|
||||
case tsd_state_nominal:
|
||||
case tsd_state_nominal_slow:
|
||||
tsd_do_data_cleanup(tsd);
|
||||
tsd->state = tsd_state_purgatory;
|
||||
tsd_set(tsd);
|
||||
break;
|
||||
case tsd_state_purgatory:
|
||||
/*
|
||||
* The previous time this destructor was called, we set the
|
||||
* state to tsd_state_purgatory so that other destructors
|
||||
* wouldn't cause re-creation of the tsd. This time, do
|
||||
* nothing, and do not request another callback.
|
||||
*/
|
||||
break;
|
||||
default:
|
||||
not_reached();
|
||||
}
|
||||
#ifdef JEMALLOC_JET
|
||||
test_callback_t test_callback = *tsd_test_callbackp_get_unsafe(tsd);
|
||||
int *data = tsd_test_datap_get_unsafe(tsd);
|
||||
if (test_callback != NULL) {
|
||||
test_callback(data);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
tsd_t *
|
||||
malloc_tsd_boot0(void) {
|
||||
tsd_t *tsd;
|
||||
|
||||
ncleanups = 0;
|
||||
if (tsd_boot0()) {
|
||||
return NULL;
|
||||
}
|
||||
tsd = tsd_fetch();
|
||||
*tsd_arenas_tdata_bypassp_get(tsd) = true;
|
||||
return tsd;
|
||||
}
|
||||
|
||||
void
|
||||
malloc_tsd_boot1(void) {
|
||||
tsd_boot1();
|
||||
tsd_t *tsd = tsd_fetch();
|
||||
/* malloc_slow has been set properly. Update tsd_slow. */
|
||||
tsd_slow_update(tsd);
|
||||
*tsd_arenas_tdata_bypassp_get(tsd) = false;
|
||||
}
|
||||
|
||||
#ifdef _WIN32
|
||||
static BOOL WINAPI
|
||||
_tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved)
|
||||
{
|
||||
|
||||
_tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) {
|
||||
switch (fdwReason) {
|
||||
#ifdef JEMALLOC_LAZY_LOCK
|
||||
case DLL_THREAD_ATTACH:
|
||||
@@ -90,52 +282,60 @@ _tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved)
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return (true);
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* We need to be able to say "read" here (in the "pragma section"), but have
|
||||
* hooked "read". We won't read for the rest of the file, so we can get away
|
||||
* with unhooking.
|
||||
*/
|
||||
#ifdef read
|
||||
# undef read
|
||||
#endif
|
||||
|
||||
#ifdef _MSC_VER
|
||||
# ifdef _M_IX86
|
||||
# pragma comment(linker, "/INCLUDE:__tls_used")
|
||||
# pragma comment(linker, "/INCLUDE:_tls_callback")
|
||||
# else
|
||||
# pragma comment(linker, "/INCLUDE:_tls_used")
|
||||
# pragma comment(linker, "/INCLUDE:tls_callback")
|
||||
# endif
|
||||
# pragma section(".CRT$XLY",long,read)
|
||||
#endif
|
||||
JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used)
|
||||
static const BOOL (WINAPI *tls_callback)(HINSTANCE hinstDLL,
|
||||
BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL,
|
||||
DWORD fdwReason, LPVOID lpvReserved) = _tls_callback;
|
||||
#endif
|
||||
|
||||
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
|
||||
!defined(_WIN32))
|
||||
void *
|
||||
tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block)
|
||||
{
|
||||
tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block) {
|
||||
pthread_t self = pthread_self();
|
||||
tsd_init_block_t *iter;
|
||||
|
||||
/* Check whether this thread has already inserted into the list. */
|
||||
malloc_mutex_lock(&head->lock);
|
||||
malloc_mutex_lock(TSDN_NULL, &head->lock);
|
||||
ql_foreach(iter, &head->blocks, link) {
|
||||
if (iter->thread == self) {
|
||||
malloc_mutex_unlock(&head->lock);
|
||||
return (iter->data);
|
||||
malloc_mutex_unlock(TSDN_NULL, &head->lock);
|
||||
return iter->data;
|
||||
}
|
||||
}
|
||||
/* Insert block into list. */
|
||||
ql_elm_new(block, link);
|
||||
block->thread = self;
|
||||
ql_tail_insert(&head->blocks, block, link);
|
||||
malloc_mutex_unlock(&head->lock);
|
||||
return (NULL);
|
||||
malloc_mutex_unlock(TSDN_NULL, &head->lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void
|
||||
tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block)
|
||||
{
|
||||
|
||||
malloc_mutex_lock(&head->lock);
|
||||
tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block) {
|
||||
malloc_mutex_lock(TSDN_NULL, &head->lock);
|
||||
ql_remove(&head->blocks, block, link);
|
||||
malloc_mutex_unlock(&head->lock);
|
||||
malloc_mutex_unlock(TSDN_NULL, &head->lock);
|
||||
}
|
||||
#endif
|
||||
|
||||
453
deps/jemalloc/src/zone.c
vendored
453
deps/jemalloc/src/zone.c
vendored
@@ -1,10 +1,83 @@
|
||||
#include "jemalloc/internal/jemalloc_internal.h"
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
|
||||
#ifndef JEMALLOC_ZONE
|
||||
# error "This source file is for zones on Darwin (OS X)."
|
||||
#endif
|
||||
|
||||
/* Definitions of the following structs in malloc/malloc.h might be too old
|
||||
* for the built binary to run on newer versions of OSX. So use the newest
|
||||
* possible version of those structs.
|
||||
*/
|
||||
typedef struct _malloc_zone_t {
|
||||
void *reserved1;
|
||||
void *reserved2;
|
||||
size_t (*size)(struct _malloc_zone_t *, const void *);
|
||||
void *(*malloc)(struct _malloc_zone_t *, size_t);
|
||||
void *(*calloc)(struct _malloc_zone_t *, size_t, size_t);
|
||||
void *(*valloc)(struct _malloc_zone_t *, size_t);
|
||||
void (*free)(struct _malloc_zone_t *, void *);
|
||||
void *(*realloc)(struct _malloc_zone_t *, void *, size_t);
|
||||
void (*destroy)(struct _malloc_zone_t *);
|
||||
const char *zone_name;
|
||||
unsigned (*batch_malloc)(struct _malloc_zone_t *, size_t, void **, unsigned);
|
||||
void (*batch_free)(struct _malloc_zone_t *, void **, unsigned);
|
||||
struct malloc_introspection_t *introspect;
|
||||
unsigned version;
|
||||
void *(*memalign)(struct _malloc_zone_t *, size_t, size_t);
|
||||
void (*free_definite_size)(struct _malloc_zone_t *, void *, size_t);
|
||||
size_t (*pressure_relief)(struct _malloc_zone_t *, size_t);
|
||||
} malloc_zone_t;
|
||||
|
||||
typedef struct {
|
||||
vm_address_t address;
|
||||
vm_size_t size;
|
||||
} vm_range_t;
|
||||
|
||||
typedef struct malloc_statistics_t {
|
||||
unsigned blocks_in_use;
|
||||
size_t size_in_use;
|
||||
size_t max_size_in_use;
|
||||
size_t size_allocated;
|
||||
} malloc_statistics_t;
|
||||
|
||||
typedef kern_return_t memory_reader_t(task_t, vm_address_t, vm_size_t, void **);
|
||||
|
||||
typedef void vm_range_recorder_t(task_t, void *, unsigned type, vm_range_t *, unsigned);
|
||||
|
||||
typedef struct malloc_introspection_t {
|
||||
kern_return_t (*enumerator)(task_t, void *, unsigned, vm_address_t, memory_reader_t, vm_range_recorder_t);
|
||||
size_t (*good_size)(malloc_zone_t *, size_t);
|
||||
boolean_t (*check)(malloc_zone_t *);
|
||||
void (*print)(malloc_zone_t *, boolean_t);
|
||||
void (*log)(malloc_zone_t *, void *);
|
||||
void (*force_lock)(malloc_zone_t *);
|
||||
void (*force_unlock)(malloc_zone_t *);
|
||||
void (*statistics)(malloc_zone_t *, malloc_statistics_t *);
|
||||
boolean_t (*zone_locked)(malloc_zone_t *);
|
||||
boolean_t (*enable_discharge_checking)(malloc_zone_t *);
|
||||
boolean_t (*disable_discharge_checking)(malloc_zone_t *);
|
||||
void (*discharge)(malloc_zone_t *, void *);
|
||||
#ifdef __BLOCKS__
|
||||
void (*enumerate_discharged_pointers)(malloc_zone_t *, void (^)(void *, void *));
|
||||
#else
|
||||
void *enumerate_unavailable_without_blocks;
|
||||
#endif
|
||||
void (*reinit_lock)(malloc_zone_t *);
|
||||
} malloc_introspection_t;
|
||||
|
||||
extern kern_return_t malloc_get_all_zones(task_t, memory_reader_t, vm_address_t **, unsigned *);
|
||||
|
||||
extern malloc_zone_t *malloc_default_zone(void);
|
||||
|
||||
extern void malloc_zone_register(malloc_zone_t *zone);
|
||||
|
||||
extern void malloc_zone_unregister(malloc_zone_t *zone);
|
||||
|
||||
/*
|
||||
* The malloc_default_purgeable_zone function is only available on >= 10.6.
|
||||
* The malloc_default_purgeable_zone() function is only available on >= 10.6.
|
||||
* We need to check whether it is present at runtime, thus the weak_import.
|
||||
*/
|
||||
extern malloc_zone_t *malloc_default_purgeable_zone(void)
|
||||
@@ -13,30 +86,42 @@ JEMALLOC_ATTR(weak_import);
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
static malloc_zone_t zone;
|
||||
static struct malloc_introspection_t zone_introspect;
|
||||
static malloc_zone_t *default_zone, *purgeable_zone;
|
||||
static malloc_zone_t jemalloc_zone;
|
||||
static struct malloc_introspection_t jemalloc_zone_introspect;
|
||||
|
||||
/******************************************************************************/
|
||||
/* Function prototypes for non-inline static functions. */
|
||||
|
||||
static size_t zone_size(malloc_zone_t *zone, void *ptr);
|
||||
static size_t zone_size(malloc_zone_t *zone, const void *ptr);
|
||||
static void *zone_malloc(malloc_zone_t *zone, size_t size);
|
||||
static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size);
|
||||
static void *zone_valloc(malloc_zone_t *zone, size_t size);
|
||||
static void zone_free(malloc_zone_t *zone, void *ptr);
|
||||
static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size);
|
||||
#if (JEMALLOC_ZONE_VERSION >= 5)
|
||||
static void *zone_memalign(malloc_zone_t *zone, size_t alignment,
|
||||
#endif
|
||||
#if (JEMALLOC_ZONE_VERSION >= 6)
|
||||
size_t size);
|
||||
static void zone_free_definite_size(malloc_zone_t *zone, void *ptr,
|
||||
size_t size);
|
||||
#endif
|
||||
static void *zone_destroy(malloc_zone_t *zone);
|
||||
static void zone_destroy(malloc_zone_t *zone);
|
||||
static unsigned zone_batch_malloc(struct _malloc_zone_t *zone, size_t size,
|
||||
void **results, unsigned num_requested);
|
||||
static void zone_batch_free(struct _malloc_zone_t *zone,
|
||||
void **to_be_freed, unsigned num_to_be_freed);
|
||||
static size_t zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal);
|
||||
static size_t zone_good_size(malloc_zone_t *zone, size_t size);
|
||||
static kern_return_t zone_enumerator(task_t task, void *data, unsigned type_mask,
|
||||
vm_address_t zone_address, memory_reader_t reader,
|
||||
vm_range_recorder_t recorder);
|
||||
static boolean_t zone_check(malloc_zone_t *zone);
|
||||
static void zone_print(malloc_zone_t *zone, boolean_t verbose);
|
||||
static void zone_log(malloc_zone_t *zone, void *address);
|
||||
static void zone_force_lock(malloc_zone_t *zone);
|
||||
static void zone_force_unlock(malloc_zone_t *zone);
|
||||
static void zone_statistics(malloc_zone_t *zone,
|
||||
malloc_statistics_t *stats);
|
||||
static boolean_t zone_locked(malloc_zone_t *zone);
|
||||
static void zone_reinit_lock(malloc_zone_t *zone);
|
||||
|
||||
/******************************************************************************/
|
||||
/*
|
||||
@@ -44,9 +129,7 @@ static void zone_force_unlock(malloc_zone_t *zone);
|
||||
*/
|
||||
|
||||
static size_t
|
||||
zone_size(malloc_zone_t *zone, void *ptr)
|
||||
{
|
||||
|
||||
zone_size(malloc_zone_t *zone, const void *ptr) {
|
||||
/*
|
||||
* There appear to be places within Darwin (such as setenv(3)) that
|
||||
* cause calls to this function with pointers that *no* zone owns. If
|
||||
@@ -54,40 +137,33 @@ zone_size(malloc_zone_t *zone, void *ptr)
|
||||
* our zone into two parts, and use one as the default allocator and
|
||||
* the other as the default deallocator/reallocator. Since that will
|
||||
* not work in practice, we must check all pointers to assure that they
|
||||
* reside within a mapped chunk before determining size.
|
||||
* reside within a mapped extent before determining size.
|
||||
*/
|
||||
return (ivsalloc(ptr, config_prof));
|
||||
return ivsalloc(tsdn_fetch(), ptr);
|
||||
}
|
||||
|
||||
static void *
|
||||
zone_malloc(malloc_zone_t *zone, size_t size)
|
||||
{
|
||||
|
||||
return (je_malloc(size));
|
||||
zone_malloc(malloc_zone_t *zone, size_t size) {
|
||||
return je_malloc(size);
|
||||
}
|
||||
|
||||
static void *
|
||||
zone_calloc(malloc_zone_t *zone, size_t num, size_t size)
|
||||
{
|
||||
|
||||
return (je_calloc(num, size));
|
||||
zone_calloc(malloc_zone_t *zone, size_t num, size_t size) {
|
||||
return je_calloc(num, size);
|
||||
}
|
||||
|
||||
static void *
|
||||
zone_valloc(malloc_zone_t *zone, size_t size)
|
||||
{
|
||||
zone_valloc(malloc_zone_t *zone, size_t size) {
|
||||
void *ret = NULL; /* Assignment avoids useless compiler warning. */
|
||||
|
||||
je_posix_memalign(&ret, PAGE, size);
|
||||
|
||||
return (ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
zone_free(malloc_zone_t *zone, void *ptr)
|
||||
{
|
||||
|
||||
if (ivsalloc(ptr, config_prof) != 0) {
|
||||
zone_free(malloc_zone_t *zone, void *ptr) {
|
||||
if (ivsalloc(tsdn_fetch(), ptr) != 0) {
|
||||
je_free(ptr);
|
||||
return;
|
||||
}
|
||||
@@ -96,163 +172,280 @@ zone_free(malloc_zone_t *zone, void *ptr)
|
||||
}
|
||||
|
||||
static void *
|
||||
zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
|
||||
{
|
||||
zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
|
||||
if (ivsalloc(tsdn_fetch(), ptr) != 0) {
|
||||
return je_realloc(ptr, size);
|
||||
}
|
||||
|
||||
if (ivsalloc(ptr, config_prof) != 0)
|
||||
return (je_realloc(ptr, size));
|
||||
|
||||
return (realloc(ptr, size));
|
||||
return realloc(ptr, size);
|
||||
}
|
||||
|
||||
#if (JEMALLOC_ZONE_VERSION >= 5)
|
||||
static void *
|
||||
zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size)
|
||||
{
|
||||
zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) {
|
||||
void *ret = NULL; /* Assignment avoids useless compiler warning. */
|
||||
|
||||
je_posix_memalign(&ret, alignment, size);
|
||||
|
||||
return (ret);
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if (JEMALLOC_ZONE_VERSION >= 6)
|
||||
static void
|
||||
zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
|
||||
{
|
||||
zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) {
|
||||
size_t alloc_size;
|
||||
|
||||
if (ivsalloc(ptr, config_prof) != 0) {
|
||||
assert(ivsalloc(ptr, config_prof) == size);
|
||||
alloc_size = ivsalloc(tsdn_fetch(), ptr);
|
||||
if (alloc_size != 0) {
|
||||
assert(alloc_size == size);
|
||||
je_free(ptr);
|
||||
return;
|
||||
}
|
||||
|
||||
free(ptr);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void *
|
||||
zone_destroy(malloc_zone_t *zone)
|
||||
{
|
||||
|
||||
static void
|
||||
zone_destroy(malloc_zone_t *zone) {
|
||||
/* This function should never be called. */
|
||||
not_reached();
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
static unsigned
|
||||
zone_batch_malloc(struct _malloc_zone_t *zone, size_t size, void **results,
|
||||
unsigned num_requested) {
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < num_requested; i++) {
|
||||
results[i] = je_malloc(size);
|
||||
if (!results[i])
|
||||
break;
|
||||
}
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
static void
|
||||
zone_batch_free(struct _malloc_zone_t *zone, void **to_be_freed,
|
||||
unsigned num_to_be_freed) {
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < num_to_be_freed; i++) {
|
||||
zone_free(zone, to_be_freed[i]);
|
||||
to_be_freed[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static size_t
|
||||
zone_good_size(malloc_zone_t *zone, size_t size)
|
||||
{
|
||||
zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (size == 0)
|
||||
static size_t
|
||||
zone_good_size(malloc_zone_t *zone, size_t size) {
|
||||
if (size == 0) {
|
||||
size = 1;
|
||||
return (s2u(size));
|
||||
}
|
||||
return sz_s2u(size);
|
||||
}
|
||||
|
||||
static kern_return_t
|
||||
zone_enumerator(task_t task, void *data, unsigned type_mask,
|
||||
vm_address_t zone_address, memory_reader_t reader,
|
||||
vm_range_recorder_t recorder) {
|
||||
return KERN_SUCCESS;
|
||||
}
|
||||
|
||||
static boolean_t
|
||||
zone_check(malloc_zone_t *zone) {
|
||||
return true;
|
||||
}
|
||||
|
||||
static void
|
||||
zone_force_lock(malloc_zone_t *zone)
|
||||
{
|
||||
zone_print(malloc_zone_t *zone, boolean_t verbose) {
|
||||
}
|
||||
|
||||
if (isthreaded)
|
||||
static void
|
||||
zone_log(malloc_zone_t *zone, void *address) {
|
||||
}
|
||||
|
||||
static void
|
||||
zone_force_lock(malloc_zone_t *zone) {
|
||||
if (isthreaded) {
|
||||
jemalloc_prefork();
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
zone_force_unlock(malloc_zone_t *zone)
|
||||
{
|
||||
zone_force_unlock(malloc_zone_t *zone) {
|
||||
/*
|
||||
* Call jemalloc_postfork_child() rather than
|
||||
* jemalloc_postfork_parent(), because this function is executed by both
|
||||
* parent and child. The parent can tolerate having state
|
||||
* reinitialized, but the child cannot unlock mutexes that were locked
|
||||
* by the parent.
|
||||
*/
|
||||
if (isthreaded) {
|
||||
jemalloc_postfork_child();
|
||||
}
|
||||
}
|
||||
|
||||
if (isthreaded)
|
||||
jemalloc_postfork_parent();
|
||||
static void
|
||||
zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) {
|
||||
/* We make no effort to actually fill the values */
|
||||
stats->blocks_in_use = 0;
|
||||
stats->size_in_use = 0;
|
||||
stats->max_size_in_use = 0;
|
||||
stats->size_allocated = 0;
|
||||
}
|
||||
|
||||
static boolean_t
|
||||
zone_locked(malloc_zone_t *zone) {
|
||||
/* Pretend no lock is being held */
|
||||
return false;
|
||||
}
|
||||
|
||||
static void
|
||||
zone_reinit_lock(malloc_zone_t *zone) {
|
||||
/* As of OSX 10.12, this function is only used when force_unlock would
|
||||
* be used if the zone version were < 9. So just use force_unlock. */
|
||||
zone_force_unlock(zone);
|
||||
}
|
||||
|
||||
static void
|
||||
zone_init(void) {
|
||||
jemalloc_zone.size = zone_size;
|
||||
jemalloc_zone.malloc = zone_malloc;
|
||||
jemalloc_zone.calloc = zone_calloc;
|
||||
jemalloc_zone.valloc = zone_valloc;
|
||||
jemalloc_zone.free = zone_free;
|
||||
jemalloc_zone.realloc = zone_realloc;
|
||||
jemalloc_zone.destroy = zone_destroy;
|
||||
jemalloc_zone.zone_name = "jemalloc_zone";
|
||||
jemalloc_zone.batch_malloc = zone_batch_malloc;
|
||||
jemalloc_zone.batch_free = zone_batch_free;
|
||||
jemalloc_zone.introspect = &jemalloc_zone_introspect;
|
||||
jemalloc_zone.version = 9;
|
||||
jemalloc_zone.memalign = zone_memalign;
|
||||
jemalloc_zone.free_definite_size = zone_free_definite_size;
|
||||
jemalloc_zone.pressure_relief = zone_pressure_relief;
|
||||
|
||||
jemalloc_zone_introspect.enumerator = zone_enumerator;
|
||||
jemalloc_zone_introspect.good_size = zone_good_size;
|
||||
jemalloc_zone_introspect.check = zone_check;
|
||||
jemalloc_zone_introspect.print = zone_print;
|
||||
jemalloc_zone_introspect.log = zone_log;
|
||||
jemalloc_zone_introspect.force_lock = zone_force_lock;
|
||||
jemalloc_zone_introspect.force_unlock = zone_force_unlock;
|
||||
jemalloc_zone_introspect.statistics = zone_statistics;
|
||||
jemalloc_zone_introspect.zone_locked = zone_locked;
|
||||
jemalloc_zone_introspect.enable_discharge_checking = NULL;
|
||||
jemalloc_zone_introspect.disable_discharge_checking = NULL;
|
||||
jemalloc_zone_introspect.discharge = NULL;
|
||||
#ifdef __BLOCKS__
|
||||
jemalloc_zone_introspect.enumerate_discharged_pointers = NULL;
|
||||
#else
|
||||
jemalloc_zone_introspect.enumerate_unavailable_without_blocks = NULL;
|
||||
#endif
|
||||
jemalloc_zone_introspect.reinit_lock = zone_reinit_lock;
|
||||
}
|
||||
|
||||
static malloc_zone_t *
|
||||
zone_default_get(void) {
|
||||
malloc_zone_t **zones = NULL;
|
||||
unsigned int num_zones = 0;
|
||||
|
||||
/*
|
||||
* On OSX 10.12, malloc_default_zone returns a special zone that is not
|
||||
* present in the list of registered zones. That zone uses a "lite zone"
|
||||
* if one is present (apparently enabled when malloc stack logging is
|
||||
* enabled), or the first registered zone otherwise. In practice this
|
||||
* means unless malloc stack logging is enabled, the first registered
|
||||
* zone is the default. So get the list of zones to get the first one,
|
||||
* instead of relying on malloc_default_zone.
|
||||
*/
|
||||
if (KERN_SUCCESS != malloc_get_all_zones(0, NULL,
|
||||
(vm_address_t**)&zones, &num_zones)) {
|
||||
/*
|
||||
* Reset the value in case the failure happened after it was
|
||||
* set.
|
||||
*/
|
||||
num_zones = 0;
|
||||
}
|
||||
|
||||
if (num_zones) {
|
||||
return zones[0];
|
||||
}
|
||||
|
||||
return malloc_default_zone();
|
||||
}
|
||||
|
||||
/* As written, this function can only promote jemalloc_zone. */
|
||||
static void
|
||||
zone_promote(void) {
|
||||
malloc_zone_t *zone;
|
||||
|
||||
do {
|
||||
/*
|
||||
* Unregister and reregister the default zone. On OSX >= 10.6,
|
||||
* unregistering takes the last registered zone and places it
|
||||
* at the location of the specified zone. Unregistering the
|
||||
* default zone thus makes the last registered one the default.
|
||||
* On OSX < 10.6, unregistering shifts all registered zones.
|
||||
* The first registered zone then becomes the default.
|
||||
*/
|
||||
malloc_zone_unregister(default_zone);
|
||||
malloc_zone_register(default_zone);
|
||||
|
||||
/*
|
||||
* On OSX 10.6, having the default purgeable zone appear before
|
||||
* the default zone makes some things crash because it thinks it
|
||||
* owns the default zone allocated pointers. We thus
|
||||
* unregister/re-register it in order to ensure it's always
|
||||
* after the default zone. On OSX < 10.6, there is no purgeable
|
||||
* zone, so this does nothing. On OSX >= 10.6, unregistering
|
||||
* replaces the purgeable zone with the last registered zone
|
||||
* above, i.e. the default zone. Registering it again then puts
|
||||
* it at the end, obviously after the default zone.
|
||||
*/
|
||||
if (purgeable_zone != NULL) {
|
||||
malloc_zone_unregister(purgeable_zone);
|
||||
malloc_zone_register(purgeable_zone);
|
||||
}
|
||||
|
||||
zone = zone_default_get();
|
||||
} while (zone != &jemalloc_zone);
|
||||
}
|
||||
|
||||
JEMALLOC_ATTR(constructor)
|
||||
void
|
||||
register_zone(void)
|
||||
{
|
||||
|
||||
zone_register(void) {
|
||||
/*
|
||||
* If something else replaced the system default zone allocator, don't
|
||||
* register jemalloc's.
|
||||
*/
|
||||
malloc_zone_t *default_zone = malloc_default_zone();
|
||||
if (!default_zone->zone_name ||
|
||||
strcmp(default_zone->zone_name, "DefaultMallocZone") != 0) {
|
||||
default_zone = zone_default_get();
|
||||
if (!default_zone->zone_name || strcmp(default_zone->zone_name,
|
||||
"DefaultMallocZone") != 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
zone.size = (void *)zone_size;
|
||||
zone.malloc = (void *)zone_malloc;
|
||||
zone.calloc = (void *)zone_calloc;
|
||||
zone.valloc = (void *)zone_valloc;
|
||||
zone.free = (void *)zone_free;
|
||||
zone.realloc = (void *)zone_realloc;
|
||||
zone.destroy = (void *)zone_destroy;
|
||||
zone.zone_name = "jemalloc_zone";
|
||||
zone.batch_malloc = NULL;
|
||||
zone.batch_free = NULL;
|
||||
zone.introspect = &zone_introspect;
|
||||
zone.version = JEMALLOC_ZONE_VERSION;
|
||||
#if (JEMALLOC_ZONE_VERSION >= 5)
|
||||
zone.memalign = zone_memalign;
|
||||
#endif
|
||||
#if (JEMALLOC_ZONE_VERSION >= 6)
|
||||
zone.free_definite_size = zone_free_definite_size;
|
||||
#endif
|
||||
#if (JEMALLOC_ZONE_VERSION >= 8)
|
||||
zone.pressure_relief = NULL;
|
||||
#endif
|
||||
|
||||
zone_introspect.enumerator = NULL;
|
||||
zone_introspect.good_size = (void *)zone_good_size;
|
||||
zone_introspect.check = NULL;
|
||||
zone_introspect.print = NULL;
|
||||
zone_introspect.log = NULL;
|
||||
zone_introspect.force_lock = (void *)zone_force_lock;
|
||||
zone_introspect.force_unlock = (void *)zone_force_unlock;
|
||||
zone_introspect.statistics = NULL;
|
||||
#if (JEMALLOC_ZONE_VERSION >= 6)
|
||||
zone_introspect.zone_locked = NULL;
|
||||
#endif
|
||||
#if (JEMALLOC_ZONE_VERSION >= 7)
|
||||
zone_introspect.enable_discharge_checking = NULL;
|
||||
zone_introspect.disable_discharge_checking = NULL;
|
||||
zone_introspect.discharge = NULL;
|
||||
#ifdef __BLOCKS__
|
||||
zone_introspect.enumerate_discharged_pointers = NULL;
|
||||
#else
|
||||
zone_introspect.enumerate_unavailable_without_blocks = NULL;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The default purgeable zone is created lazily by OSX's libc. It uses
|
||||
* the default zone when it is created for "small" allocations
|
||||
* (< 15 KiB), but assumes the default zone is a scalable_zone. This
|
||||
* obviously fails when the default zone is the jemalloc zone, so
|
||||
* malloc_default_purgeable_zone is called beforehand so that the
|
||||
* malloc_default_purgeable_zone() is called beforehand so that the
|
||||
* default purgeable zone is created when the default zone is still
|
||||
* a scalable_zone. As purgeable zones only exist on >= 10.6, we need
|
||||
* to check for the existence of malloc_default_purgeable_zone() at
|
||||
* run time.
|
||||
*/
|
||||
if (malloc_default_purgeable_zone != NULL)
|
||||
malloc_default_purgeable_zone();
|
||||
purgeable_zone = (malloc_default_purgeable_zone == NULL) ? NULL :
|
||||
malloc_default_purgeable_zone();
|
||||
|
||||
/* Register the custom zone. At this point it won't be the default. */
|
||||
malloc_zone_register(&zone);
|
||||
zone_init();
|
||||
malloc_zone_register(&jemalloc_zone);
|
||||
|
||||
/*
|
||||
* Unregister and reregister the default zone. On OSX >= 10.6,
|
||||
* unregistering takes the last registered zone and places it at the
|
||||
* location of the specified zone. Unregistering the default zone thus
|
||||
* makes the last registered one the default. On OSX < 10.6,
|
||||
* unregistering shifts all registered zones. The first registered zone
|
||||
* then becomes the default.
|
||||
*/
|
||||
do {
|
||||
default_zone = malloc_default_zone();
|
||||
malloc_zone_unregister(default_zone);
|
||||
malloc_zone_register(default_zone);
|
||||
} while (malloc_default_zone() != &zone);
|
||||
/* Promote the custom zone to be default. */
|
||||
zone_promote();
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user