Library Jemalloc updated to 5.0.1 (#721)

This commit is contained in:
sucofog
2017-11-26 17:46:07 +01:00
committed by Yehonal
parent 5ef601b7cf
commit d87134b8ea
121 changed files with 29264 additions and 9335 deletions

View File

@@ -14,7 +14,7 @@ G3D (a commercial-grade C++ 3D engine available as Open Source (BSD License)
jemalloc (a general-purpose scalable concurrent malloc-implementation)
http://www.canonware.com/jemalloc/
Version: 3.5.1
Version: 5.0.1
libMPQ (a library for reading MPQ files)
https://libmpq.org/

View File

@@ -1,4 +1,4 @@
# Copyright (C)
# Copyright (C) 2008-2017 TrinityCore <http://www.trinitycore.org/>
#
# This file is free software; as a special exception the author gives
# unlimited permission to copy and/or distribute it, with or without
@@ -8,47 +8,56 @@
# WITHOUT ANY WARRANTY, to the extent permitted by law; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# We need to generate the jemalloc_def.h header based on platform-specific settings
if (PLATFORM EQUAL 32)
# We need to generate the jemalloc_def.h header based on platform-specific settings
if (PLATFORM EQUAL 32)
set(JEM_SIZEDEF 2)
set(JEM_TLSMODEL)
else()
set(JEM_VADDRBITS 32)
else()
set(JEM_SIZEDEF 3)
set(JEM_TLSMODEL "__attribute__\(\(tls_model\(\"initial-exec\"\)\)\)")
endif()
set(JEM_VADDRBITS 48)
endif()
# Create the header, so we can use it
configure_file(
"${CMAKE_SOURCE_DIR}/modules/worldengine/deps/jemalloc/jemalloc_defs.h.in.cmake"
"${BUILDDIR}/jemalloc_defs.h"
# Create the header, so we can use it
configure_file(
"${CMAKE_SOURCE_DIR}/modules/worldengine/deps/jemalloc/jemalloc_internal_defs.h.in.cmake"
"${BUILDDIR}/jemalloc_internal_defs.h"
@ONLY
)
)
# Done, let's continue
set(jemalloc_STAT_SRC
# Done, let's continue
set(jemalloc_STAT_SRC
${CMAKE_CURRENT_SOURCE_DIR}/src/arena.c
${CMAKE_CURRENT_SOURCE_DIR}/src/atomic.c
${CMAKE_CURRENT_SOURCE_DIR}/src/background_thread.c
${CMAKE_CURRENT_SOURCE_DIR}/src/base.c
${CMAKE_CURRENT_SOURCE_DIR}/src/bitmap.c
${CMAKE_CURRENT_SOURCE_DIR}/src/chunk.c
${CMAKE_CURRENT_SOURCE_DIR}/src/chunk_dss.c
${CMAKE_CURRENT_SOURCE_DIR}/src/chunk_mmap.c
${CMAKE_CURRENT_SOURCE_DIR}/src/ckh.c
${CMAKE_CURRENT_SOURCE_DIR}/src/ctl.c
${CMAKE_CURRENT_SOURCE_DIR}/src/extent.c
${CMAKE_CURRENT_SOURCE_DIR}/src/extent_dss.c
${CMAKE_CURRENT_SOURCE_DIR}/src/extent_mmap.c
${CMAKE_CURRENT_SOURCE_DIR}/src/hash.c
${CMAKE_CURRENT_SOURCE_DIR}/src/huge.c
${CMAKE_CURRENT_SOURCE_DIR}/src/hooks.c
${CMAKE_CURRENT_SOURCE_DIR}/src/jemalloc.c
${CMAKE_CURRENT_SOURCE_DIR}/src/mb.c
${CMAKE_CURRENT_SOURCE_DIR}/src/jemalloc_cpp.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/large.c
${CMAKE_CURRENT_SOURCE_DIR}/src/malloc_io.c
${CMAKE_CURRENT_SOURCE_DIR}/src/mutex.c
${CMAKE_CURRENT_SOURCE_DIR}/src/mutex_pool.c
${CMAKE_CURRENT_SOURCE_DIR}/src/nstime.c
${CMAKE_CURRENT_SOURCE_DIR}/src/pages.c
${CMAKE_CURRENT_SOURCE_DIR}/src/prng.c
${CMAKE_CURRENT_SOURCE_DIR}/src/prof.c
${CMAKE_CURRENT_SOURCE_DIR}/src/quarantine.c
${CMAKE_CURRENT_SOURCE_DIR}/src/rtree.c
${CMAKE_CURRENT_SOURCE_DIR}/src/spin.c
${CMAKE_CURRENT_SOURCE_DIR}/src/stats.c
${CMAKE_CURRENT_SOURCE_DIR}/src/sz.c
${CMAKE_CURRENT_SOURCE_DIR}/src/tcache.c
${CMAKE_CURRENT_SOURCE_DIR}/src/ticker.c
${CMAKE_CURRENT_SOURCE_DIR}/src/tsd.c
${CMAKE_CURRENT_SOURCE_DIR}/src/util.c
)
${CMAKE_CURRENT_SOURCE_DIR}/src/witness.c
)
include_directories(
${BUILDDIR}/
@@ -58,3 +67,12 @@ include_directories(
add_definitions(-D_GNU_SOURCE -D_REENTRANT)
add_library(jemalloc STATIC ${jemalloc_STAT_SRC})
target_link_libraries(jemalloc
PUBLIC
${CMAKE_DL_LIBS})
set_target_properties(jemalloc
PROPERTIES
FOLDER
"deps")

View File

@@ -1,10 +1,10 @@
Unless otherwise specified, files in the jemalloc source distribution are
subject to the following license:
--------------------------------------------------------------------------------
Copyright (C) 2002-2014 Jason Evans <jasone@canonware.com>.
Copyright (C) 2002-2017 Jason Evans <jasone@canonware.com>.
All rights reserved.
Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved.
Copyright (C) 2009-2014 Facebook, Inc. All rights reserved.
Copyright (C) 2009-2017 Facebook, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:

View File

@@ -1,10 +1,727 @@
Following are change highlights associated with official releases. Important
bug fixes are all mentioned, but internal enhancements are omitted here for
brevity (even though they are more fun to write about). Much more detail can be
found in the git revision history:
bug fixes are all mentioned, but some internal enhancements are omitted here for
brevity. Much more detail can be found in the git revision history:
https://github.com/jemalloc/jemalloc
* 5.0.1 (July 1, 2017)
This bugfix release fixes several issues, most of which are obscure enough
that typical applications are not impacted.
Bug fixes:
- Update decay->nunpurged before purging, in order to avoid potential update
races and subsequent incorrect purging volume. (@interwq)
- Only abort on dlsym(3) error if the failure impacts an enabled feature (lazy
locking and/or background threads). This mitigates an initialization
failure bug for which we still do not have a clear reproduction test case.
(@interwq)
- Modify tsd management so that it neither crashes nor leaks if a thread's
only allocation activity is to call free() after TLS destructors have been
executed. This behavior was observed when operating with GNU libc, and is
unlikely to be an issue with other libc implementations. (@interwq)
- Mask signals during background thread creation. This prevents signals from
being inadvertently delivered to background threads. (@jasone,
@davidgoldblatt, @interwq)
- Avoid inactivity checks within background threads, in order to prevent
recursive mutex acquisition. (@interwq)
- Fix extent_grow_retained() to use the specified hooks when the
arena.<i>.extent_hooks mallctl is used to override the default hooks.
(@interwq)
- Add missing reentrancy support for custom extent hooks which allocate.
(@interwq)
- Post-fork(2), re-initialize the list of tcaches associated with each arena
to contain no tcaches except the forking thread's. (@interwq)
- Add missing post-fork(2) mutex reinitialization for extent_grow_mtx. This
fixes potential deadlocks after fork(2). (@interwq)
- Enforce minimum autoconf version (currently 2.68), since 2.63 is known to
generate corrupt configure scripts. (@jasone)
- Ensure that the configured page size (--with-lg-page) is no larger than the
configured huge page size (--with-lg-hugepage). (@jasone)
* 5.0.0 (June 13, 2017)
Unlike all previous jemalloc releases, this release does not use naturally
aligned "chunks" for virtual memory management, and instead uses page-aligned
"extents". This change has few externally visible effects, but the internal
impacts are... extensive. Many other internal changes combine to make this
the most cohesively designed version of jemalloc so far, with ample
opportunity for further enhancements.
Continuous integration is now an integral aspect of development thanks to the
efforts of @davidtgoldblatt, and the dev branch tends to remain reasonably
stable on the tested platforms (Linux, FreeBSD, macOS, and Windows). As a
side effect the official release frequency may decrease over time.
New features:
- Implement optional per-CPU arena support; threads choose which arena to use
based on current CPU rather than on fixed thread-->arena associations.
(@interwq)
- Implement two-phase decay of unused dirty pages. Pages transition from
dirty-->muzzy-->clean, where the first phase transition relies on
madvise(... MADV_FREE) semantics, and the second phase transition discards
pages such that they are replaced with demand-zeroed pages on next access.
(@jasone)
- Increase decay time resolution from seconds to milliseconds. (@jasone)
- Implement opt-in per CPU background threads, and use them for asynchronous
decay-driven unused dirty page purging. (@interwq)
- Add mutex profiling, which collects a variety of statistics useful for
diagnosing overhead/contention issues. (@interwq)
- Add C++ new/delete operator bindings. (@djwatson)
- Support manually created arena destruction, such that all data and metadata
are discarded. Add MALLCTL_ARENAS_DESTROYED for accessing merged stats
associated with destroyed arenas. (@jasone)
- Add MALLCTL_ARENAS_ALL as a fixed index for use in accessing
merged/destroyed arena statistics via mallctl. (@jasone)
- Add opt.abort_conf to optionally abort if invalid configuration options are
detected during initialization. (@interwq)
- Add opt.stats_print_opts, so that e.g. JSON output can be selected for the
stats dumped during exit if opt.stats_print is true. (@jasone)
- Add --with-version=VERSION for use when embedding jemalloc into another
project's git repository. (@jasone)
- Add --disable-thp to support cross compiling. (@jasone)
- Add --with-lg-hugepage to support cross compiling. (@jasone)
- Add mallctl interfaces (various authors):
+ background_thread
+ opt.abort_conf
+ opt.retain
+ opt.percpu_arena
+ opt.background_thread
+ opt.{dirty,muzzy}_decay_ms
+ opt.stats_print_opts
+ arena.<i>.initialized
+ arena.<i>.destroy
+ arena.<i>.{dirty,muzzy}_decay_ms
+ arena.<i>.extent_hooks
+ arenas.{dirty,muzzy}_decay_ms
+ arenas.bin.<i>.slab_size
+ arenas.nlextents
+ arenas.lextent.<i>.size
+ arenas.create
+ stats.background_thread.{num_threads,num_runs,run_interval}
+ stats.mutexes.{ctl,background_thread,prof,reset}.
{num_ops,num_spin_acq,num_wait,max_wait_time,total_wait_time,max_num_thds,
num_owner_switch}
+ stats.arenas.<i>.{dirty,muzzy}_decay_ms
+ stats.arenas.<i>.uptime
+ stats.arenas.<i>.{pmuzzy,base,internal,resident}
+ stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
+ stats.arenas.<i>.bins.<j>.{nslabs,reslabs,curslabs}
+ stats.arenas.<i>.bins.<j>.mutex.
{num_ops,num_spin_acq,num_wait,max_wait_time,total_wait_time,max_num_thds,
num_owner_switch}
+ stats.arenas.<i>.lextents.<j>.{nmalloc,ndalloc,nrequests,curlextents}
+ stats.arenas.i.mutexes.{large,extent_avail,extents_dirty,extents_muzzy,
extents_retained,decay_dirty,decay_muzzy,base,tcache_list}.
{num_ops,num_spin_acq,num_wait,max_wait_time,total_wait_time,max_num_thds,
num_owner_switch}
Portability improvements:
- Improve reentrant allocation support, such that deadlock is less likely if
e.g. a system library call in turn allocates memory. (@davidtgoldblatt,
@interwq)
- Support static linking of jemalloc with glibc. (@djwatson)
Optimizations and refactors:
- Organize virtual memory as "extents" of virtual memory pages, rather than as
naturally aligned "chunks", and store all metadata in arbitrarily distant
locations. This reduces virtual memory external fragmentation, and will
interact better with huge pages (not yet explicitly supported). (@jasone)
- Fold large and huge size classes together; only small and large size classes
remain. (@jasone)
- Unify the allocation paths, and merge most fast-path branching decisions.
(@davidtgoldblatt, @interwq)
- Embed per thread automatic tcache into thread-specific data, which reduces
conditional branches and dereferences. Also reorganize tcache to increase
fast-path data locality. (@interwq)
- Rewrite atomics to closely model the C11 API, convert various
synchronization from mutex-based to atomic, and use the explicit memory
ordering control to resolve various hypothetical races without increasing
synchronization overhead. (@davidtgoldblatt)
- Extensively optimize rtree via various methods:
+ Add multiple layers of rtree lookup caching, since rtree lookups are now
part of fast-path deallocation. (@interwq)
+ Determine rtree layout at compile time. (@jasone)
+ Make the tree shallower for common configurations. (@jasone)
+ Embed the root node in the top-level rtree data structure, thus avoiding
one level of indirection. (@jasone)
+ Further specialize leaf elements as compared to internal node elements,
and directly embed extent metadata needed for fast-path deallocation.
(@jasone)
+ Ignore leading always-zero address bits (architecture-specific).
(@jasone)
- Reorganize headers (ongoing work) to make them hermetic, and disentangle
various module dependencies. (@davidtgoldblatt)
- Convert various internal data structures such as size class metadata from
boot-time-initialized to compile-time-initialized. Propagate resulting data
structure simplifications, such as making arena metadata fixed-size.
(@jasone)
- Simplify size class lookups when constrained to size classes that are
multiples of the page size. This speeds lookups, but the primary benefit is
complexity reduction in code that was the source of numerous regressions.
(@jasone)
- Lock individual extents when possible for localized extent operations,
rather than relying on a top-level arena lock. (@davidtgoldblatt, @jasone)
- Use first fit layout policy instead of best fit, in order to improve
packing. (@jasone)
- If munmap(2) is not in use, use an exponential series to grow each arena's
virtual memory, so that the number of disjoint virtual memory mappings
remains low. (@jasone)
- Implement per arena base allocators, so that arenas never share any virtual
memory pages. (@jasone)
- Automatically generate private symbol name mangling macros. (@jasone)
Incompatible changes:
- Replace chunk hooks with an expanded/normalized set of extent hooks.
(@jasone)
- Remove ratio-based purging. (@jasone)
- Remove --disable-tcache. (@jasone)
- Remove --disable-tls. (@jasone)
- Remove --enable-ivsalloc. (@jasone)
- Remove --with-lg-size-class-group. (@jasone)
- Remove --with-lg-tiny-min. (@jasone)
- Remove --disable-cc-silence. (@jasone)
- Remove --enable-code-coverage. (@jasone)
- Remove --disable-munmap (replaced by opt.retain). (@jasone)
- Remove Valgrind support. (@jasone)
- Remove quarantine support. (@jasone)
- Remove redzone support. (@jasone)
- Remove mallctl interfaces (various authors):
+ config.munmap
+ config.tcache
+ config.tls
+ config.valgrind
+ opt.lg_chunk
+ opt.purge
+ opt.lg_dirty_mult
+ opt.decay_time
+ opt.quarantine
+ opt.redzone
+ opt.thp
+ arena.<i>.lg_dirty_mult
+ arena.<i>.decay_time
+ arena.<i>.chunk_hooks
+ arenas.initialized
+ arenas.lg_dirty_mult
+ arenas.decay_time
+ arenas.bin.<i>.run_size
+ arenas.nlruns
+ arenas.lrun.<i>.size
+ arenas.nhchunks
+ arenas.hchunk.<i>.size
+ arenas.extend
+ stats.cactive
+ stats.arenas.<i>.lg_dirty_mult
+ stats.arenas.<i>.decay_time
+ stats.arenas.<i>.metadata.{mapped,allocated}
+ stats.arenas.<i>.{npurge,nmadvise,purged}
+ stats.arenas.<i>.huge.{allocated,nmalloc,ndalloc,nrequests}
+ stats.arenas.<i>.bins.<j>.{nruns,reruns,curruns}
+ stats.arenas.<i>.lruns.<j>.{nmalloc,ndalloc,nrequests,curruns}
+ stats.arenas.<i>.hchunks.<j>.{nmalloc,ndalloc,nrequests,curhchunks}
Bug fixes:
- Improve interval-based profile dump triggering to dump only one profile when
a single allocation's size exceeds the interval. (@jasone)
- Use prefixed function names (as controlled by --with-jemalloc-prefix) when
pruning backtrace frames in jeprof. (@jasone)
* 4.5.0 (February 28, 2017)
This is the first release to benefit from much broader continuous integration
testing, thanks to @davidtgoldblatt. Had we had this testing infrastructure
in place for prior releases, it would have caught all of the most serious
regressions fixed by this release.
New features:
- Add --disable-thp and the opt.thp mallctl to provide opt-out mechanisms for
transparent huge page integration. (@jasone)
- Update zone allocator integration to work with macOS 10.12. (@glandium)
- Restructure *CFLAGS configuration, so that CFLAGS behaves typically, and
EXTRA_CFLAGS provides a way to specify e.g. -Werror during building, but not
during configuration. (@jasone, @ronawho)
Bug fixes:
- Fix DSS (sbrk(2)-based) allocation. This regression was first released in
4.3.0. (@jasone)
- Handle race in per size class utilization computation. This functionality
was first released in 4.0.0. (@interwq)
- Fix lock order reversal during gdump. (@jasone)
- Fix/refactor tcache synchronization. This regression was first released in
4.0.0. (@jasone)
- Fix various JSON-formatted malloc_stats_print() bugs. This functionality
was first released in 4.3.0. (@jasone)
- Fix huge-aligned allocation. This regression was first released in 4.4.0.
(@jasone)
- When transparent huge page integration is enabled, detect what state pages
start in according to the kernel's current operating mode, and only convert
arena chunks to non-huge during purging if that is not their initial state.
This functionality was first released in 4.4.0. (@jasone)
- Fix lg_chunk clamping for the --enable-cache-oblivious --disable-fill case.
This regression was first released in 4.0.0. (@jasone, @428desmo)
- Properly detect sparc64 when building for Linux. (@glaubitz)
* 4.4.0 (December 3, 2016)
New features:
- Add configure support for *-*-linux-android. (@cferris1000, @jasone)
- Add the --disable-syscall configure option, for use on systems that place
security-motivated limitations on syscall(2). (@jasone)
- Add support for Debian GNU/kFreeBSD. (@thesam)
Optimizations:
- Add extent serial numbers and use them where appropriate as a sort key that
is higher priority than address, so that the allocation policy prefers older
extents. This tends to improve locality (decrease fragmentation) when
memory grows downward. (@jasone)
- Refactor madvise(2) configuration so that MADV_FREE is detected and utilized
on Linux 4.5 and newer. (@jasone)
- Mark partially purged arena chunks as non-huge-page. This improves
interaction with Linux's transparent huge page functionality. (@jasone)
Bug fixes:
- Fix size class computations for edge conditions involving extremely large
allocations. This regression was first released in 4.0.0. (@jasone,
@ingvarha)
- Remove overly restrictive assertions related to the cactive statistic. This
regression was first released in 4.1.0. (@jasone)
- Implement a more reliable detection scheme for os_unfair_lock on macOS.
(@jszakmeister)
* 4.3.1 (November 7, 2016)
Bug fixes:
- Fix a severe virtual memory leak. This regression was first released in
4.3.0. (@interwq, @jasone)
- Refactor atomic and prng APIs to restore support for 32-bit platforms that
use pre-C11 toolchains, e.g. FreeBSD's mips. (@jasone)
* 4.3.0 (November 4, 2016)
This is the first release that passes the test suite for multiple Windows
configurations, thanks in large part to @glandium setting up continuous
integration via AppVeyor (and Travis CI for Linux and OS X).
New features:
- Add "J" (JSON) support to malloc_stats_print(). (@jasone)
- Add Cray compiler support. (@ronawho)
Optimizations:
- Add/use adaptive spinning for bootstrapping and radix tree node
initialization. (@jasone)
Bug fixes:
- Fix large allocation to search starting in the optimal size class heap,
which can substantially reduce virtual memory churn and fragmentation. This
regression was first released in 4.0.0. (@mjp41, @jasone)
- Fix stats.arenas.<i>.nthreads accounting. (@interwq)
- Fix and simplify decay-based purging. (@jasone)
- Make DSS (sbrk(2)-related) operations lockless, which resolves potential
deadlocks during thread exit. (@jasone)
- Fix over-sized allocation of radix tree leaf nodes. (@mjp41, @ogaun,
@jasone)
- Fix over-sized allocation of arena_t (plus associated stats) data
structures. (@jasone, @interwq)
- Fix EXTRA_CFLAGS to not affect configuration. (@jasone)
- Fix a Valgrind integration bug. (@ronawho)
- Disallow 0x5a junk filling when running in Valgrind. (@jasone)
- Fix a file descriptor leak on Linux. This regression was first released in
4.2.0. (@vsarunas, @jasone)
- Fix static linking of jemalloc with glibc. (@djwatson)
- Use syscall(2) rather than {open,read,close}(2) during boot on Linux. This
works around other libraries' system call wrappers performing reentrant
allocation. (@kspinka, @Whissi, @jasone)
- Fix OS X default zone replacement to work with OS X 10.12. (@glandium,
@jasone)
- Fix cached memory management to avoid needless commit/decommit operations
during purging, which resolves permanent virtual memory map fragmentation
issues on Windows. (@mjp41, @jasone)
- Fix TSD fetches to avoid (recursive) allocation. This is relevant to
non-TLS and Windows configurations. (@jasone)
- Fix malloc_conf overriding to work on Windows. (@jasone)
- Forcibly disable lazy-lock on Windows (was forcibly *enabled*). (@jasone)
* 4.2.1 (June 8, 2016)
Bug fixes:
- Fix bootstrapping issues for configurations that require allocation during
tsd initialization (e.g. --disable-tls). (@cferris1000, @jasone)
- Fix gettimeofday() version of nstime_update(). (@ronawho)
- Fix Valgrind regressions in calloc() and chunk_alloc_wrapper(). (@ronawho)
- Fix potential VM map fragmentation regression. (@jasone)
- Fix opt_zero-triggered in-place huge reallocation zeroing. (@jasone)
- Fix heap profiling context leaks in reallocation edge cases. (@jasone)
* 4.2.0 (May 12, 2016)
New features:
- Add the arena.<i>.reset mallctl, which makes it possible to discard all of
an arena's allocations in a single operation. (@jasone)
- Add the stats.retained and stats.arenas.<i>.retained statistics. (@jasone)
- Add the --with-version configure option. (@jasone)
- Support --with-lg-page values larger than actual page size. (@jasone)
Optimizations:
- Use pairing heaps rather than red-black trees for various hot data
structures. (@djwatson, @jasone)
- Streamline fast paths of rtree operations. (@jasone)
- Optimize the fast paths of calloc() and [m,d,sd]allocx(). (@jasone)
- Decommit unused virtual memory if the OS does not overcommit. (@jasone)
- Specify MAP_NORESERVE on Linux if [heuristic] overcommit is active, in order
to avoid unfortunate interactions during fork(2). (@jasone)
Bug fixes:
- Fix chunk accounting related to triggering gdump profiles. (@jasone)
- Link against librt for clock_gettime(2) if glibc < 2.17. (@jasone)
- Scale leak report summary according to sampling probability. (@jasone)
* 4.1.1 (May 3, 2016)
This bugfix release resolves a variety of mostly minor issues, though the
bitmap fix is critical for 64-bit Windows.
Bug fixes:
- Fix the linear scan version of bitmap_sfu() to shift by the proper amount
even when sizeof(long) is not the same as sizeof(void *), as on 64-bit
Windows. (@jasone)
- Fix hashing functions to avoid unaligned memory accesses (and resulting
crashes). This is relevant at least to some ARM-based platforms.
(@rkmisra)
- Fix fork()-related lock rank ordering reversals. These reversals were
unlikely to cause deadlocks in practice except when heap profiling was
enabled and active. (@jasone)
- Fix various chunk leaks in OOM code paths. (@jasone)
- Fix malloc_stats_print() to print opt.narenas correctly. (@jasone)
- Fix MSVC-specific build/test issues. (@rustyx, @yuslepukhin)
- Fix a variety of test failures that were due to test fragility rather than
core bugs. (@jasone)
* 4.1.0 (February 28, 2016)
This release is primarily about optimizations, but it also incorporates a lot
of portability-motivated refactoring and enhancements. Many people worked on
this release, to an extent that even with the omission here of minor changes
(see git revision history), and of the people who reported and diagnosed
issues, so much of the work was contributed that starting with this release,
changes are annotated with author credits to help reflect the collaborative
effort involved.
New features:
- Implement decay-based unused dirty page purging, a major optimization with
mallctl API impact. This is an alternative to the existing ratio-based
unused dirty page purging, and is intended to eventually become the sole
purging mechanism. New mallctls:
+ opt.purge
+ opt.decay_time
+ arena.<i>.decay
+ arena.<i>.decay_time
+ arenas.decay_time
+ stats.arenas.<i>.decay_time
(@jasone, @cevans87)
- Add --with-malloc-conf, which makes it possible to embed a default
options string during configuration. This was motivated by the desire to
specify --with-malloc-conf=purge:decay , since the default must remain
purge:ratio until the 5.0.0 release. (@jasone)
- Add MS Visual Studio 2015 support. (@rustyx, @yuslepukhin)
- Make *allocx() size class overflow behavior defined. The maximum
size class is now less than PTRDIFF_MAX to protect applications against
numerical overflow, and all allocation functions are guaranteed to indicate
errors rather than potentially crashing if the request size exceeds the
maximum size class. (@jasone)
- jeprof:
+ Add raw heap profile support. (@jasone)
+ Add --retain and --exclude for backtrace symbol filtering. (@jasone)
Optimizations:
- Optimize the fast path to combine various bootstrapping and configuration
checks and execute more streamlined code in the common case. (@interwq)
- Use linear scan for small bitmaps (used for small object tracking). In
addition to speeding up bitmap operations on 64-bit systems, this reduces
allocator metadata overhead by approximately 0.2%. (@djwatson)
- Separate arena_avail trees, which substantially speeds up run tree
operations. (@djwatson)
- Use memoization (boot-time-computed table) for run quantization. Separate
arena_avail trees reduced the importance of this optimization. (@jasone)
- Attempt mmap-based in-place huge reallocation. This can dramatically speed
up incremental huge reallocation. (@jasone)
Incompatible changes:
- Make opt.narenas unsigned rather than size_t. (@jasone)
Bug fixes:
- Fix stats.cactive accounting regression. (@rustyx, @jasone)
- Handle unaligned keys in hash(). This caused problems for some ARM systems.
(@jasone, @cferris1000)
- Refactor arenas array. In addition to fixing a fork-related deadlock, this
makes arena lookups faster and simpler. (@jasone)
- Move retained memory allocation out of the default chunk allocation
function, to a location that gets executed even if the application installs
a custom chunk allocation function. This resolves a virtual memory leak.
(@buchgr)
- Fix a potential tsd cleanup leak. (@cferris1000, @jasone)
- Fix run quantization. In practice this bug had no impact unless
applications requested memory with alignment exceeding one page.
(@jasone, @djwatson)
- Fix LinuxThreads-specific bootstrapping deadlock. (Cosmin Paraschiv)
- jeprof:
+ Don't discard curl options if timeout is not defined. (@djwatson)
+ Detect failed profile fetches. (@djwatson)
- Fix stats.arenas.<i>.{dss,lg_dirty_mult,decay_time,pactive,pdirty} for
--disable-stats case. (@jasone)
* 4.0.4 (October 24, 2015)
This bugfix release fixes another xallocx() regression. No other regressions
have come to light in over a month, so this is likely a good starting point
for people who prefer to wait for "dot one" releases with all the major issues
shaken out.
Bug fixes:
- Fix xallocx(..., MALLOCX_ZERO to zero the last full trailing page of large
allocations that have been randomly assigned an offset of 0 when
--enable-cache-oblivious configure option is enabled.
* 4.0.3 (September 24, 2015)
This bugfix release continues the trend of xallocx() and heap profiling fixes.
Bug fixes:
- Fix xallocx(..., MALLOCX_ZERO) to zero all trailing bytes of large
allocations when --enable-cache-oblivious configure option is enabled.
- Fix xallocx(..., MALLOCX_ZERO) to zero trailing bytes of huge allocations
when resizing from/to a size class that is not a multiple of the chunk size.
- Fix prof_tctx_dump_iter() to filter out nodes that were created after heap
profile dumping started.
- Work around a potentially bad thread-specific data initialization
interaction with NPTL (glibc's pthreads implementation).
* 4.0.2 (September 21, 2015)
This bugfix release addresses a few bugs specific to heap profiling.
Bug fixes:
- Fix ixallocx_prof_sample() to never modify nor create sampled small
allocations. xallocx() is in general incapable of moving small allocations,
so this fix removes buggy code without loss of generality.
- Fix irallocx_prof_sample() to always allocate large regions, even when
alignment is non-zero.
- Fix prof_alloc_rollback() to read tdata from thread-specific data rather
than dereferencing a potentially invalid tctx.
* 4.0.1 (September 15, 2015)
This is a bugfix release that is somewhat high risk due to the amount of
refactoring required to address deep xallocx() problems. As a side effect of
these fixes, xallocx() now tries harder to partially fulfill requests for
optional extra space. Note that a couple of minor heap profiling
optimizations are included, but these are better thought of as performance
fixes that were integral to disovering most of the other bugs.
Optimizations:
- Avoid a chunk metadata read in arena_prof_tctx_set(), since it is in the
fast path when heap profiling is enabled. Additionally, split a special
case out into arena_prof_tctx_reset(), which also avoids chunk metadata
reads.
- Optimize irallocx_prof() to optimistically update the sampler state. The
prior implementation appears to have been a holdover from when
rallocx()/xallocx() functionality was combined as rallocm().
Bug fixes:
- Fix TLS configuration such that it is enabled by default for platforms on
which it works correctly.
- Fix arenas_cache_cleanup() and arena_get_hard() to handle
allocation/deallocation within the application's thread-specific data
cleanup functions even after arenas_cache is torn down.
- Fix xallocx() bugs related to size+extra exceeding HUGE_MAXCLASS.
- Fix chunk purge hook calls for in-place huge shrinking reallocation to
specify the old chunk size rather than the new chunk size. This bug caused
no correctness issues for the default chunk purge function, but was
visible to custom functions set via the "arena.<i>.chunk_hooks" mallctl.
- Fix heap profiling bugs:
+ Fix heap profiling to distinguish among otherwise identical sample sites
with interposed resets (triggered via the "prof.reset" mallctl). This bug
could cause data structure corruption that would most likely result in a
segfault.
+ Fix irealloc_prof() to prof_alloc_rollback() on OOM.
+ Make one call to prof_active_get_unlocked() per allocation event, and use
the result throughout the relevant functions that handle an allocation
event. Also add a missing check in prof_realloc(). These fixes protect
allocation events against concurrent prof_active changes.
+ Fix ixallocx_prof() to pass usize_max and zero to ixallocx_prof_sample()
in the correct order.
+ Fix prof_realloc() to call prof_free_sampled_object() after calling
prof_malloc_sample_object(). Prior to this fix, if tctx and old_tctx were
the same, the tctx could have been prematurely destroyed.
- Fix portability bugs:
+ Don't bitshift by negative amounts when encoding/decoding run sizes in
chunk header maps. This affected systems with page sizes greater than 8
KiB.
+ Rename index_t to szind_t to avoid an existing type on Solaris.
+ Add JEMALLOC_CXX_THROW to the memalign() function prototype, in order to
match glibc and avoid compilation errors when including both
jemalloc/jemalloc.h and malloc.h in C++ code.
+ Don't assume that /bin/sh is appropriate when running size_classes.sh
during configuration.
+ Consider __sparcv9 a synonym for __sparc64__ when defining LG_QUANTUM.
+ Link tests to librt if it contains clock_gettime(2).
* 4.0.0 (August 17, 2015)
This version contains many speed and space optimizations, both minor and
major. The major themes are generalization, unification, and simplification.
Although many of these optimizations cause no visible behavior change, their
cumulative effect is substantial.
New features:
- Normalize size class spacing to be consistent across the complete size
range. By default there are four size classes per size doubling, but this
is now configurable via the --with-lg-size-class-group option. Also add the
--with-lg-page, --with-lg-page-sizes, --with-lg-quantum, and
--with-lg-tiny-min options, which can be used to tweak page and size class
settings. Impacts:
+ Worst case performance for incrementally growing/shrinking reallocation
is improved because there are far fewer size classes, and therefore
copying happens less often.
+ Internal fragmentation is limited to 20% for all but the smallest size
classes (those less than four times the quantum). (1B + 4 KiB)
and (1B + 4 MiB) previously suffered nearly 50% internal fragmentation.
+ Chunk fragmentation tends to be lower because there are fewer distinct run
sizes to pack.
- Add support for explicit tcaches. The "tcache.create", "tcache.flush", and
"tcache.destroy" mallctls control tcache lifetime and flushing, and the
MALLOCX_TCACHE(tc) and MALLOCX_TCACHE_NONE flags to the *allocx() API
control which tcache is used for each operation.
- Implement per thread heap profiling, as well as the ability to
enable/disable heap profiling on a per thread basis. Add the "prof.reset",
"prof.lg_sample", "thread.prof.name", "thread.prof.active",
"opt.prof_thread_active_init", "prof.thread_active_init", and
"thread.prof.active" mallctls.
- Add support for per arena application-specified chunk allocators, configured
via the "arena.<i>.chunk_hooks" mallctl.
- Refactor huge allocation to be managed by arenas, so that arenas now
function as general purpose independent allocators. This is important in
the context of user-specified chunk allocators, aside from the scalability
benefits. Related new statistics:
+ The "stats.arenas.<i>.huge.allocated", "stats.arenas.<i>.huge.nmalloc",
"stats.arenas.<i>.huge.ndalloc", and "stats.arenas.<i>.huge.nrequests"
mallctls provide high level per arena huge allocation statistics.
+ The "arenas.nhchunks", "arenas.hchunk.<i>.size",
"stats.arenas.<i>.hchunks.<j>.nmalloc",
"stats.arenas.<i>.hchunks.<j>.ndalloc",
"stats.arenas.<i>.hchunks.<j>.nrequests", and
"stats.arenas.<i>.hchunks.<j>.curhchunks" mallctls provide per size class
statistics.
- Add the 'util' column to malloc_stats_print() output, which reports the
proportion of available regions that are currently in use for each small
size class.
- Add "alloc" and "free" modes for for junk filling (see the "opt.junk"
mallctl), so that it is possible to separately enable junk filling for
allocation versus deallocation.
- Add the jemalloc-config script, which provides information about how
jemalloc was configured, and how to integrate it into application builds.
- Add metadata statistics, which are accessible via the "stats.metadata",
"stats.arenas.<i>.metadata.mapped", and
"stats.arenas.<i>.metadata.allocated" mallctls.
- Add the "stats.resident" mallctl, which reports the upper limit of
physically resident memory mapped by the allocator.
- Add per arena control over unused dirty page purging, via the
"arenas.lg_dirty_mult", "arena.<i>.lg_dirty_mult", and
"stats.arenas.<i>.lg_dirty_mult" mallctls.
- Add the "prof.gdump" mallctl, which makes it possible to toggle the gdump
feature on/off during program execution.
- Add sdallocx(), which implements sized deallocation. The primary
optimization over dallocx() is the removal of a metadata read, which often
suffers an L1 cache miss.
- Add missing header includes in jemalloc/jemalloc.h, so that applications
only have to #include <jemalloc/jemalloc.h>.
- Add support for additional platforms:
+ Bitrig
+ Cygwin
+ DragonFlyBSD
+ iOS
+ OpenBSD
+ OpenRISC/or1k
Optimizations:
- Maintain dirty runs in per arena LRUs rather than in per arena trees of
dirty-run-containing chunks. In practice this change significantly reduces
dirty page purging volume.
- Integrate whole chunks into the unused dirty page purging machinery. This
reduces the cost of repeated huge allocation/deallocation, because it
effectively introduces a cache of chunks.
- Split the arena chunk map into two separate arrays, in order to increase
cache locality for the frequently accessed bits.
- Move small run metadata out of runs, into arena chunk headers. This reduces
run fragmentation, smaller runs reduce external fragmentation for small size
classes, and packed (less uniformly aligned) metadata layout improves CPU
cache set distribution.
- Randomly distribute large allocation base pointer alignment relative to page
boundaries in order to more uniformly utilize CPU cache sets. This can be
disabled via the --disable-cache-oblivious configure option, and queried via
the "config.cache_oblivious" mallctl.
- Micro-optimize the fast paths for the public API functions.
- Refactor thread-specific data to reside in a single structure. This assures
that only a single TLS read is necessary per call into the public API.
- Implement in-place huge allocation growing and shrinking.
- Refactor rtree (radix tree for chunk lookups) to be lock-free, and make
additional optimizations that reduce maximum lookup depth to one or two
levels. This resolves what was a concurrency bottleneck for per arena huge
allocation, because a global data structure is critical for determining
which arenas own which huge allocations.
Incompatible changes:
- Replace --enable-cc-silence with --disable-cc-silence to suppress spurious
warnings by default.
- Assure that the constness of malloc_usable_size()'s return type matches that
of the system implementation.
- Change the heap profile dump format to support per thread heap profiling,
rename pprof to jeprof, and enhance it with the --thread=<n> option. As a
result, the bundled jeprof must now be used rather than the upstream
(gperftools) pprof.
- Disable "opt.prof_final" by default, in order to avoid atexit(3), which can
internally deadlock on some platforms.
- Change the "arenas.nlruns" mallctl type from size_t to unsigned.
- Replace the "stats.arenas.<i>.bins.<j>.allocated" mallctl with
"stats.arenas.<i>.bins.<j>.curregs".
- Ignore MALLOC_CONF in set{uid,gid,cap} binaries.
- Ignore MALLOCX_ARENA(a) in dallocx(), in favor of using the
MALLOCX_TCACHE(tc) and MALLOCX_TCACHE_NONE flags to control tcache usage.
Removed features:
- Remove the *allocm() API, which is superseded by the *allocx() API.
- Remove the --enable-dss options, and make dss non-optional on all platforms
which support sbrk(2).
- Remove the "arenas.purge" mallctl, which was obsoleted by the
"arena.<i>.purge" mallctl in 3.1.0.
- Remove the unnecessary "opt.valgrind" mallctl; jemalloc automatically
detects whether it is running inside Valgrind.
- Remove the "stats.huge.allocated", "stats.huge.nmalloc", and
"stats.huge.ndalloc" mallctls.
- Remove the --enable-mremap option.
- Remove the "stats.chunks.current", "stats.chunks.total", and
"stats.chunks.high" mallctls.
Bug fixes:
- Fix the cactive statistic to decrease (rather than increase) when active
memory decreases. This regression was first released in 3.5.0.
- Fix OOM handling in memalign() and valloc(). A variant of this bug existed
in all releases since 2.0.0, which introduced these functions.
- Fix an OOM-related regression in arena_tcache_fill_small(), which could
cause cache corruption on OOM. This regression was present in all releases
from 2.2.0 through 3.6.0.
- Fix size class overflow handling for malloc(), posix_memalign(), memalign(),
calloc(), and realloc() when profiling is enabled.
- Fix the "arena.<i>.dss" mallctl to return an error if "primary" or
"secondary" precedence is specified, but sbrk(2) is not supported.
- Fix fallback lg_floor() implementations to handle extremely large inputs.
- Ensure the default purgeable zone is after the default zone on OS X.
- Fix latent bugs in atomic_*().
- Fix the "arena.<i>.dss" mallctl to handle read-only calls.
- Fix tls_model configuration to enable the initial-exec model when possible.
- Mark malloc_conf as a weak symbol so that the application can override it.
- Correctly detect glibc's adaptive pthread mutexes.
- Fix the --without-export configure option.
* 3.6.0 (March 31, 2014)
This version contains a critical bug fix for a regression present in 3.5.0 and
@@ -21,7 +738,7 @@ found in the git revision history:
backtracing to be reliable.
- Use dss allocation precedence for huge allocations as well as small/large
allocations.
- Fix test assertion failure message formatting. This bug did not manifect on
- Fix test assertion failure message formatting. This bug did not manifest on
x86_64 systems because of implementation subtleties in va_list.
- Fix inconsequential test failures for hash and SFMT code.
@@ -516,7 +1233,7 @@ found in the git revision history:
- Make it possible for the application to manually flush a thread's cache, via
the "tcache.flush" mallctl.
- Base maximum dirty page count on proportion of active memory.
- Compute various addtional run-time statistics, including per size class
- Compute various additional run-time statistics, including per size class
statistics for large objects.
- Expose malloc_stats_print(), which can be called repeatedly by the
application.

View File

@@ -3,12 +3,12 @@ fragmentation avoidance and scalable concurrency support. jemalloc first came
into use as the FreeBSD libc allocator in 2005, and since then it has found its
way into numerous applications that rely on its predictable behavior. In 2010
jemalloc development efforts broadened to include developer support features
such as heap profiling, Valgrind integration, and extensive monitoring/tuning
hooks. Modern jemalloc releases continue to be integrated back into FreeBSD,
and therefore versatility remains critical. Ongoing development efforts trend
toward making jemalloc among the best allocators for a broad range of demanding
applications, and eliminating/mitigating weaknesses that have practical
repercussions for real world applications.
such as heap profiling and extensive monitoring/tuning hooks. Modern jemalloc
releases continue to be integrated back into FreeBSD, and therefore versatility
remains critical. Ongoing development efforts trend toward making jemalloc
among the best allocators for a broad range of demanding applications, and
eliminating/mitigating weaknesses that have practical repercussions for real
world applications.
The COPYING file contains copyright and licensing information.
@@ -17,4 +17,4 @@ jemalloc.
The ChangeLog file contains a brief summary of changes for each release.
URL: http://www.canonware.com/jemalloc/
URL: http://jemalloc.net/

View File

@@ -0,0 +1,97 @@
#ifndef JEMALLOC_INTERNAL_ARENA_EXTERNS_H
#define JEMALLOC_INTERNAL_ARENA_EXTERNS_H
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/stats.h"
extern ssize_t opt_dirty_decay_ms;
extern ssize_t opt_muzzy_decay_ms;
extern const arena_bin_info_t arena_bin_info[NBINS];
extern percpu_arena_mode_t opt_percpu_arena;
extern const char *percpu_arena_mode_names[];
extern const uint64_t h_steps[SMOOTHSTEP_NSTEPS];
extern malloc_mutex_t arenas_lock;
void arena_stats_large_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
szind_t szind, uint64_t nrequests);
void arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
size_t size);
void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena,
unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms,
ssize_t *muzzy_decay_ms, size_t *nactive, size_t *ndirty, size_t *nmuzzy);
void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats);
void arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent);
#ifdef JEMALLOC_JET
size_t arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr);
#endif
extent_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena,
size_t usize, size_t alignment, bool *zero);
void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena,
extent_t *extent);
void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena,
extent_t *extent, size_t oldsize);
void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena,
extent_t *extent, size_t oldsize);
ssize_t arena_dirty_decay_ms_get(arena_t *arena);
bool arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms);
ssize_t arena_muzzy_decay_ms_get(arena_t *arena);
bool arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms);
void arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
bool all);
void arena_reset(tsd_t *tsd, arena_t *arena);
void arena_destroy(tsd_t *tsd, arena_t *arena);
void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes);
void arena_alloc_junk_small(void *ptr, const arena_bin_info_t *bin_info,
bool zero);
typedef void (arena_dalloc_junk_small_t)(void *, const arena_bin_info_t *);
extern arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small;
void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
szind_t ind, bool zero);
void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool zero, tcache_t *tcache);
void arena_prof_promote(tsdn_t *tsdn, const void *ptr, size_t usize);
void arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
bool slow_path);
void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
extent_t *extent, void *ptr);
void arena_dalloc_small(tsdn_t *tsdn, void *ptr);
bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero);
void *arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
size_t size, size_t alignment, bool zero, tcache_t *tcache);
dss_prec_t arena_dss_prec_get(arena_t *arena);
bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
ssize_t arena_dirty_decay_ms_default_get(void);
bool arena_dirty_decay_ms_default_set(ssize_t decay_ms);
ssize_t arena_muzzy_decay_ms_default_get(void);
bool arena_muzzy_decay_ms_default_set(ssize_t decay_ms);
unsigned arena_nthreads_get(arena_t *arena, bool internal);
void arena_nthreads_inc(arena_t *arena, bool internal);
void arena_nthreads_dec(arena_t *arena, bool internal);
size_t arena_extent_sn_next(arena_t *arena);
arena_t *arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
void arena_boot(void);
void arena_prefork0(tsdn_t *tsdn, arena_t *arena);
void arena_prefork1(tsdn_t *tsdn, arena_t *arena);
void arena_prefork2(tsdn_t *tsdn, arena_t *arena);
void arena_prefork3(tsdn_t *tsdn, arena_t *arena);
void arena_prefork4(tsdn_t *tsdn, arena_t *arena);
void arena_prefork5(tsdn_t *tsdn, arena_t *arena);
void arena_prefork6(tsdn_t *tsdn, arena_t *arena);
void arena_prefork7(tsdn_t *tsdn, arena_t *arena);
void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena);
void arena_postfork_child(tsdn_t *tsdn, arena_t *arena);
#endif /* JEMALLOC_INTERNAL_ARENA_EXTERNS_H */

View File

@@ -0,0 +1,57 @@
#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_A_H
#define JEMALLOC_INTERNAL_ARENA_INLINES_A_H
static inline unsigned
arena_ind_get(const arena_t *arena) {
return base_ind_get(arena->base);
}
static inline void
arena_internal_add(arena_t *arena, size_t size) {
atomic_fetch_add_zu(&arena->stats.internal, size, ATOMIC_RELAXED);
}
static inline void
arena_internal_sub(arena_t *arena, size_t size) {
atomic_fetch_sub_zu(&arena->stats.internal, size, ATOMIC_RELAXED);
}
static inline size_t
arena_internal_get(arena_t *arena) {
return atomic_load_zu(&arena->stats.internal, ATOMIC_RELAXED);
}
static inline bool
arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes) {
cassert(config_prof);
if (likely(prof_interval == 0)) {
return false;
}
return prof_accum_add(tsdn, &arena->prof_accum, accumbytes);
}
static inline void
percpu_arena_update(tsd_t *tsd, unsigned cpu) {
assert(have_percpu_arena);
arena_t *oldarena = tsd_arena_get(tsd);
assert(oldarena != NULL);
unsigned oldind = arena_ind_get(oldarena);
if (oldind != cpu) {
unsigned newind = cpu;
arena_t *newarena = arena_get(tsd_tsdn(tsd), newind, true);
assert(newarena != NULL);
/* Set new arena/tcache associations. */
arena_migrate(tsd, oldind, newind);
tcache_t *tcache = tcache_get(tsd);
if (tcache != NULL) {
tcache_arena_reassociate(tsd_tsdn(tsd), tcache,
newarena);
}
}
}
#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_A_H */

View File

@@ -0,0 +1,361 @@
#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_B_H
#define JEMALLOC_INTERNAL_ARENA_INLINES_B_H
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/sz.h"
#include "jemalloc/internal/ticker.h"
static inline szind_t
arena_bin_index(arena_t *arena, arena_bin_t *bin) {
szind_t binind = (szind_t)(bin - arena->bins);
assert(binind < NBINS);
return binind;
}
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
cassert(config_prof);
assert(ptr != NULL);
/* Static check. */
if (alloc_ctx == NULL) {
const extent_t *extent = iealloc(tsdn, ptr);
if (unlikely(!extent_slab_get(extent))) {
return large_prof_tctx_get(tsdn, extent);
}
} else {
if (unlikely(!alloc_ctx->slab)) {
return large_prof_tctx_get(tsdn, iealloc(tsdn, ptr));
}
}
return (prof_tctx_t *)(uintptr_t)1U;
}
JEMALLOC_ALWAYS_INLINE void
arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) {
cassert(config_prof);
assert(ptr != NULL);
/* Static check. */
if (alloc_ctx == NULL) {
extent_t *extent = iealloc(tsdn, ptr);
if (unlikely(!extent_slab_get(extent))) {
large_prof_tctx_set(tsdn, extent, tctx);
}
} else {
if (unlikely(!alloc_ctx->slab)) {
large_prof_tctx_set(tsdn, iealloc(tsdn, ptr), tctx);
}
}
}
static inline void
arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) {
cassert(config_prof);
assert(ptr != NULL);
extent_t *extent = iealloc(tsdn, ptr);
assert(!extent_slab_get(extent));
large_prof_tctx_reset(tsdn, extent);
}
JEMALLOC_ALWAYS_INLINE void
arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) {
tsd_t *tsd;
ticker_t *decay_ticker;
if (unlikely(tsdn_null(tsdn))) {
return;
}
tsd = tsdn_tsd(tsdn);
decay_ticker = decay_ticker_get(tsd, arena_ind_get(arena));
if (unlikely(decay_ticker == NULL)) {
return;
}
if (unlikely(ticker_ticks(decay_ticker, nticks))) {
arena_decay(tsdn, arena, false, false);
}
}
JEMALLOC_ALWAYS_INLINE void
arena_decay_tick(tsdn_t *tsdn, arena_t *arena) {
malloc_mutex_assert_not_owner(tsdn, &arena->decay_dirty.mtx);
malloc_mutex_assert_not_owner(tsdn, &arena->decay_muzzy.mtx);
arena_decay_ticks(tsdn, arena, 1);
}
JEMALLOC_ALWAYS_INLINE void *
arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
tcache_t *tcache, bool slow_path) {
assert(!tsdn_null(tsdn) || tcache == NULL);
assert(size != 0);
if (likely(tcache != NULL)) {
if (likely(size <= SMALL_MAXCLASS)) {
return tcache_alloc_small(tsdn_tsd(tsdn), arena,
tcache, size, ind, zero, slow_path);
}
if (likely(size <= tcache_maxclass)) {
return tcache_alloc_large(tsdn_tsd(tsdn), arena,
tcache, size, ind, zero, slow_path);
}
/* (size > tcache_maxclass) case falls through. */
assert(size > tcache_maxclass);
}
return arena_malloc_hard(tsdn, arena, size, ind, zero);
}
JEMALLOC_ALWAYS_INLINE arena_t *
arena_aalloc(tsdn_t *tsdn, const void *ptr) {
return extent_arena_get(iealloc(tsdn, ptr));
}
JEMALLOC_ALWAYS_INLINE size_t
arena_salloc(tsdn_t *tsdn, const void *ptr) {
assert(ptr != NULL);
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
szind_t szind = rtree_szind_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true);
assert(szind != NSIZES);
return sz_index2size(szind);
}
JEMALLOC_ALWAYS_INLINE size_t
arena_vsalloc(tsdn_t *tsdn, const void *ptr) {
/*
* Return 0 if ptr is not within an extent managed by jemalloc. This
* function has two extra costs relative to isalloc():
* - The rtree calls cannot claim to be dependent lookups, which induces
* rtree lookup load dependencies.
* - The lookup may fail, so there is an extra branch to check for
* failure.
*/
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
extent_t *extent;
szind_t szind;
if (rtree_extent_szind_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, false, &extent, &szind)) {
return 0;
}
if (extent == NULL) {
return 0;
}
assert(extent_state_get(extent) == extent_state_active);
/* Only slab members should be looked up via interior pointers. */
assert(extent_addr_get(extent) == ptr || extent_slab_get(extent));
assert(szind != NSIZES);
return sz_index2size(szind);
}
static inline void
arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) {
assert(ptr != NULL);
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
szind_t szind;
bool slab;
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
true, &szind, &slab);
if (config_debug) {
extent_t *extent = rtree_extent_read(tsdn, &extents_rtree,
rtree_ctx, (uintptr_t)ptr, true);
assert(szind == extent_szind_get(extent));
assert(szind < NSIZES);
assert(slab == extent_slab_get(extent));
}
if (likely(slab)) {
/* Small allocation. */
arena_dalloc_small(tsdn, ptr);
} else {
extent_t *extent = iealloc(tsdn, ptr);
large_dalloc(tsdn, extent);
}
}
JEMALLOC_ALWAYS_INLINE void
arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
alloc_ctx_t *alloc_ctx, bool slow_path) {
assert(!tsdn_null(tsdn) || tcache == NULL);
assert(ptr != NULL);
if (unlikely(tcache == NULL)) {
arena_dalloc_no_tcache(tsdn, ptr);
return;
}
szind_t szind;
bool slab;
rtree_ctx_t *rtree_ctx;
if (alloc_ctx != NULL) {
szind = alloc_ctx->szind;
slab = alloc_ctx->slab;
assert(szind != NSIZES);
} else {
rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true, &szind, &slab);
}
if (config_debug) {
rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
extent_t *extent = rtree_extent_read(tsdn, &extents_rtree,
rtree_ctx, (uintptr_t)ptr, true);
assert(szind == extent_szind_get(extent));
assert(szind < NSIZES);
assert(slab == extent_slab_get(extent));
}
if (likely(slab)) {
/* Small allocation. */
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind,
slow_path);
} else {
if (szind < nhbins) {
if (config_prof && unlikely(szind < NBINS)) {
arena_dalloc_promoted(tsdn, ptr, tcache,
slow_path);
} else {
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
szind, slow_path);
}
} else {
extent_t *extent = iealloc(tsdn, ptr);
large_dalloc(tsdn, extent);
}
}
}
static inline void
arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
assert(ptr != NULL);
assert(size <= LARGE_MAXCLASS);
szind_t szind;
bool slab;
if (!config_prof || !opt_prof) {
/*
* There is no risk of being confused by a promoted sampled
* object, so base szind and slab on the given size.
*/
szind = sz_size2index(size);
slab = (szind < NBINS);
}
if ((config_prof && opt_prof) || config_debug) {
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
&rtree_ctx_fallback);
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true, &szind, &slab);
assert(szind == sz_size2index(size));
assert((config_prof && opt_prof) || slab == (szind < NBINS));
if (config_debug) {
extent_t *extent = rtree_extent_read(tsdn,
&extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
assert(szind == extent_szind_get(extent));
assert(slab == extent_slab_get(extent));
}
}
if (likely(slab)) {
/* Small allocation. */
arena_dalloc_small(tsdn, ptr);
} else {
extent_t *extent = iealloc(tsdn, ptr);
large_dalloc(tsdn, extent);
}
}
JEMALLOC_ALWAYS_INLINE void
arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
alloc_ctx_t *alloc_ctx, bool slow_path) {
assert(!tsdn_null(tsdn) || tcache == NULL);
assert(ptr != NULL);
assert(size <= LARGE_MAXCLASS);
if (unlikely(tcache == NULL)) {
arena_sdalloc_no_tcache(tsdn, ptr, size);
return;
}
szind_t szind;
bool slab;
UNUSED alloc_ctx_t local_ctx;
if (config_prof && opt_prof) {
if (alloc_ctx == NULL) {
/* Uncommon case and should be a static check. */
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
&rtree_ctx_fallback);
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true, &local_ctx.szind,
&local_ctx.slab);
assert(local_ctx.szind == sz_size2index(size));
alloc_ctx = &local_ctx;
}
slab = alloc_ctx->slab;
szind = alloc_ctx->szind;
} else {
/*
* There is no risk of being confused by a promoted sampled
* object, so base szind and slab on the given size.
*/
szind = sz_size2index(size);
slab = (szind < NBINS);
}
if (config_debug) {
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true, &szind, &slab);
extent_t *extent = rtree_extent_read(tsdn,
&extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
assert(szind == extent_szind_get(extent));
assert(slab == extent_slab_get(extent));
}
if (likely(slab)) {
/* Small allocation. */
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind,
slow_path);
} else {
if (szind < nhbins) {
if (config_prof && unlikely(szind < NBINS)) {
arena_dalloc_promoted(tsdn, ptr, tcache,
slow_path);
} else {
tcache_dalloc_large(tsdn_tsd(tsdn),
tcache, ptr, szind, slow_path);
}
} else {
extent_t *extent = iealloc(tsdn, ptr);
large_dalloc(tsdn, extent);
}
}
}
#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_B_H */

View File

@@ -0,0 +1,11 @@
#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H
#define JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H
#include "jemalloc/internal/bitmap.h"
struct arena_slab_data_s {
/* Per region allocated/deallocated bitmap. */
bitmap_t bitmap[BITMAP_GROUPS_MAX];
};
#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H */

View File

@@ -0,0 +1,284 @@
#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
#define JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/smoothstep.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ticker.h"
/*
* Read-only information associated with each element of arena_t's bins array
* is stored separately, partly to reduce memory usage (only one copy, rather
* than one per arena), but mainly to avoid false cacheline sharing.
*
* Each slab has the following layout:
*
* /--------------------\
* | region 0 |
* |--------------------|
* | region 1 |
* |--------------------|
* | ... |
* | ... |
* | ... |
* |--------------------|
* | region nregs-1 |
* \--------------------/
*/
struct arena_bin_info_s {
/* Size of regions in a slab for this bin's size class. */
size_t reg_size;
/* Total size of a slab for this bin's size class. */
size_t slab_size;
/* Total number of regions in a slab for this bin's size class. */
uint32_t nregs;
/*
* Metadata used to manipulate bitmaps for slabs associated with this
* bin.
*/
bitmap_info_t bitmap_info;
};
struct arena_decay_s {
/* Synchronizes all non-atomic fields. */
malloc_mutex_t mtx;
/*
* True if a thread is currently purging the extents associated with
* this decay structure.
*/
bool purging;
/*
* Approximate time in milliseconds from the creation of a set of unused
* dirty pages until an equivalent set of unused dirty pages is purged
* and/or reused.
*/
atomic_zd_t time_ms;
/* time / SMOOTHSTEP_NSTEPS. */
nstime_t interval;
/*
* Time at which the current decay interval logically started. We do
* not actually advance to a new epoch until sometime after it starts
* because of scheduling and computation delays, and it is even possible
* to completely skip epochs. In all cases, during epoch advancement we
* merge all relevant activity into the most recently recorded epoch.
*/
nstime_t epoch;
/* Deadline randomness generator. */
uint64_t jitter_state;
/*
* Deadline for current epoch. This is the sum of interval and per
* epoch jitter which is a uniform random variable in [0..interval).
* Epochs always advance by precise multiples of interval, but we
* randomize the deadline to reduce the likelihood of arenas purging in
* lockstep.
*/
nstime_t deadline;
/*
* Number of unpurged pages at beginning of current epoch. During epoch
* advancement we use the delta between arena->decay_*.nunpurged and
* extents_npages_get(&arena->extents_*) to determine how many dirty
* pages, if any, were generated.
*/
size_t nunpurged;
/*
* Trailing log of how many unused dirty pages were generated during
* each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
* element is the most recent epoch. Corresponding epoch times are
* relative to epoch.
*/
size_t backlog[SMOOTHSTEP_NSTEPS];
/*
* Pointer to associated stats. These stats are embedded directly in
* the arena's stats due to how stats structures are shared between the
* arena and ctl code.
*
* Synchronization: Same as associated arena's stats field. */
decay_stats_t *stats;
/* Peak number of pages in associated extents. Used for debug only. */
uint64_t ceil_npages;
};
struct arena_bin_s {
/* All operations on arena_bin_t fields require lock ownership. */
malloc_mutex_t lock;
/*
* Current slab being used to service allocations of this bin's size
* class. slabcur is independent of slabs_{nonfull,full}; whenever
* slabcur is reassigned, the previous slab must be deallocated or
* inserted into slabs_{nonfull,full}.
*/
extent_t *slabcur;
/*
* Heap of non-full slabs. This heap is used to assure that new
* allocations come from the non-full slab that is oldest/lowest in
* memory.
*/
extent_heap_t slabs_nonfull;
/* List used to track full slabs. */
extent_list_t slabs_full;
/* Bin statistics. */
malloc_bin_stats_t stats;
};
struct arena_s {
/*
* Number of threads currently assigned to this arena. Each thread has
* two distinct assignments, one for application-serving allocation, and
* the other for internal metadata allocation. Internal metadata must
* not be allocated from arenas explicitly created via the arenas.create
* mallctl, because the arena.<i>.reset mallctl indiscriminately
* discards all allocations for the affected arena.
*
* 0: Application allocation.
* 1: Internal metadata allocation.
*
* Synchronization: atomic.
*/
atomic_u_t nthreads[2];
/*
* When percpu_arena is enabled, to amortize the cost of reading /
* updating the current CPU id, track the most recent thread accessing
* this arena, and only read CPU if there is a mismatch.
*/
tsdn_t *last_thd;
/* Synchronization: internal. */
arena_stats_t stats;
/*
* List of tcaches for extant threads associated with this arena.
* Stats from these are merged incrementally, and at exit if
* opt_stats_print is enabled.
*
* Synchronization: tcache_ql_mtx.
*/
ql_head(tcache_t) tcache_ql;
malloc_mutex_t tcache_ql_mtx;
/* Synchronization: internal. */
prof_accum_t prof_accum;
uint64_t prof_accumbytes;
/*
* PRNG state for cache index randomization of large allocation base
* pointers.
*
* Synchronization: atomic.
*/
atomic_zu_t offset_state;
/*
* Extent serial number generator state.
*
* Synchronization: atomic.
*/
atomic_zu_t extent_sn_next;
/*
* Represents a dss_prec_t, but atomically.
*
* Synchronization: atomic.
*/
atomic_u_t dss_prec;
/*
* Number of pages in active extents.
*
* Synchronization: atomic.
*/
atomic_zu_t nactive;
/*
* Extant large allocations.
*
* Synchronization: large_mtx.
*/
extent_list_t large;
/* Synchronizes all large allocation/update/deallocation. */
malloc_mutex_t large_mtx;
/*
* Collections of extents that were previously allocated. These are
* used when allocating extents, in an attempt to re-use address space.
*
* Synchronization: internal.
*/
extents_t extents_dirty;
extents_t extents_muzzy;
extents_t extents_retained;
/*
* Decay-based purging state, responsible for scheduling extent state
* transitions.
*
* Synchronization: internal.
*/
arena_decay_t decay_dirty; /* dirty --> muzzy */
arena_decay_t decay_muzzy; /* muzzy --> retained */
/*
* Next extent size class in a growing series to use when satisfying a
* request via the extent hooks (only if opt_retain). This limits the
* number of disjoint virtual memory ranges so that extent merging can
* be effective even if multiple arenas' extent allocation requests are
* highly interleaved.
*
* Synchronization: extent_grow_mtx
*/
pszind_t extent_grow_next;
malloc_mutex_t extent_grow_mtx;
/*
* Available extent structures that were allocated via
* base_alloc_extent().
*
* Synchronization: extent_avail_mtx.
*/
extent_tree_t extent_avail;
malloc_mutex_t extent_avail_mtx;
/*
* bins is used to store heaps of free regions.
*
* Synchronization: internal.
*/
arena_bin_t bins[NBINS];
/*
* Base allocator, from which arena metadata are allocated.
*
* Synchronization: internal.
*/
base_t *base;
/* Used to determine uptime. Read-only after initialization. */
nstime_t create_time;
};
/* Used in conjunction with tsd for fast arena-related context lookup. */
struct arena_tdata_s {
ticker_t decay_ticker;
};
/* Used to pass rtree lookup context down the path. */
struct alloc_ctx_s {
szind_t szind;
bool slab;
};
#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H */

View File

@@ -0,0 +1,45 @@
#ifndef JEMALLOC_INTERNAL_ARENA_TYPES_H
#define JEMALLOC_INTERNAL_ARENA_TYPES_H
/* Maximum number of regions in one slab. */
#define LG_SLAB_MAXREGS (LG_PAGE - LG_TINY_MIN)
#define SLAB_MAXREGS (1U << LG_SLAB_MAXREGS)
/* Default decay times in milliseconds. */
#define DIRTY_DECAY_MS_DEFAULT ZD(10 * 1000)
#define MUZZY_DECAY_MS_DEFAULT ZD(10 * 1000)
/* Number of event ticks between time checks. */
#define DECAY_NTICKS_PER_UPDATE 1000
typedef struct arena_slab_data_s arena_slab_data_t;
typedef struct arena_bin_info_s arena_bin_info_t;
typedef struct arena_decay_s arena_decay_t;
typedef struct arena_bin_s arena_bin_t;
typedef struct arena_s arena_t;
typedef struct arena_tdata_s arena_tdata_t;
typedef struct alloc_ctx_s alloc_ctx_t;
typedef enum {
percpu_arena_mode_names_base = 0, /* Used for options processing. */
/*
* *_uninit are used only during bootstrapping, and must correspond
* to initialized variant plus percpu_arena_mode_enabled_base.
*/
percpu_arena_uninit = 0,
per_phycpu_arena_uninit = 1,
/* All non-disabled modes must come after percpu_arena_disabled. */
percpu_arena_disabled = 2,
percpu_arena_mode_names_limit = 3, /* Used for options processing. */
percpu_arena_mode_enabled_base = 3,
percpu_arena = 3,
per_phycpu_arena = 4 /* Hyper threads share arena. */
} percpu_arena_mode_t;
#define PERCPU_ARENA_ENABLED(m) ((m) >= percpu_arena_mode_enabled_base)
#define PERCPU_ARENA_DEFAULT percpu_arena_disabled
#endif /* JEMALLOC_INTERNAL_ARENA_TYPES_H */

View File

@@ -0,0 +1,56 @@
#include "jemalloc/internal/malloc_io.h"
#include "jemalloc/internal/util.h"
/*
* Define a custom assert() in order to reduce the chances of deadlock during
* assertion failure.
*/
#ifndef assert
#define assert(e) do { \
if (unlikely(config_debug && !(e))) { \
malloc_printf( \
"<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
__FILE__, __LINE__, #e); \
abort(); \
} \
} while (0)
#endif
#ifndef not_reached
#define not_reached() do { \
if (config_debug) { \
malloc_printf( \
"<jemalloc>: %s:%d: Unreachable code reached\n", \
__FILE__, __LINE__); \
abort(); \
} \
unreachable(); \
} while (0)
#endif
#ifndef not_implemented
#define not_implemented() do { \
if (config_debug) { \
malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
__FILE__, __LINE__); \
abort(); \
} \
} while (0)
#endif
#ifndef assert_not_implemented
#define assert_not_implemented(e) do { \
if (unlikely(config_debug && !(e))) { \
not_implemented(); \
} \
} while (0)
#endif
/* Use to assert a particular configuration, e.g., cassert(config_debug). */
#ifndef cassert
#define cassert(c) do { \
if (unlikely(!(c))) { \
not_reached(); \
} \
} while (0)
#endif

View File

@@ -1,304 +1,77 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#ifndef JEMALLOC_INTERNAL_ATOMIC_H
#define JEMALLOC_INTERNAL_ATOMIC_H
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#define ATOMIC_INLINE static inline
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#define atomic_read_uint64(p) atomic_add_uint64(p, 0)
#define atomic_read_uint32(p) atomic_add_uint32(p, 0)
#define atomic_read_z(p) atomic_add_z(p, 0)
#define atomic_read_u(p) atomic_add_u(p, 0)
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
uint64_t atomic_add_uint64(uint64_t *p, uint64_t x);
uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x);
uint32_t atomic_add_uint32(uint32_t *p, uint32_t x);
uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x);
size_t atomic_add_z(size_t *p, size_t x);
size_t atomic_sub_z(size_t *p, size_t x);
unsigned atomic_add_u(unsigned *p, unsigned x);
unsigned atomic_sub_u(unsigned *p, unsigned x);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_))
/******************************************************************************/
/* 64-bit operations. */
#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
# ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
return (__sync_add_and_fetch(p, x));
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
return (__sync_sub_and_fetch(p, x));
}
#elif (defined(_MSC_VER))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
return (InterlockedExchangeAdd64(p, x));
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
return (InterlockedExchangeAdd64(p, -((int64_t)x)));
}
#elif (defined(JEMALLOC_OSATOMIC))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
return (OSAtomicAdd64((int64_t)x, (int64_t *)p));
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p));
}
# elif (defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
asm volatile (
"lock; xaddq %0, %1;"
: "+r" (x), "=m" (*p) /* Outputs. */
: "m" (*p) /* Inputs. */
);
return (x);
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
x = (uint64_t)(-(int64_t)x);
asm volatile (
"lock; xaddq %0, %1;"
: "+r" (x), "=m" (*p) /* Outputs. */
: "m" (*p) /* Inputs. */
);
return (x);
}
# elif (defined(JEMALLOC_ATOMIC9))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
/*
* atomic_fetchadd_64() doesn't exist, but we only ever use this
* function on LP64 systems, so atomic_fetchadd_long() will do.
*/
assert(sizeof(uint64_t) == sizeof(unsigned long));
return (atomic_fetchadd_long(p, (unsigned long)x) + x);
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
assert(sizeof(uint64_t) == sizeof(unsigned long));
return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x);
}
# elif (defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
return (__sync_add_and_fetch(p, x));
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
return (__sync_sub_and_fetch(p, x));
}
# else
# error "Missing implementation for 64-bit atomic operations"
# endif
#endif
/******************************************************************************/
/* 32-bit operations. */
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
return (__sync_add_and_fetch(p, x));
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
return (__sync_sub_and_fetch(p, x));
}
#elif (defined(_MSC_VER))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
return (InterlockedExchangeAdd(p, x));
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
return (InterlockedExchangeAdd(p, -((int32_t)x)));
}
#elif (defined(JEMALLOC_OSATOMIC))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
return (OSAtomicAdd32((int32_t)x, (int32_t *)p));
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p));
}
#elif (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
asm volatile (
"lock; xaddl %0, %1;"
: "+r" (x), "=m" (*p) /* Outputs. */
: "m" (*p) /* Inputs. */
);
return (x);
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
x = (uint32_t)(-(int32_t)x);
asm volatile (
"lock; xaddl %0, %1;"
: "+r" (x), "=m" (*p) /* Outputs. */
: "m" (*p) /* Inputs. */
);
return (x);
}
#elif (defined(JEMALLOC_ATOMIC9))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
return (atomic_fetchadd_32(p, x) + x);
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x);
}
#elif (defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
return (__sync_add_and_fetch(p, x));
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
return (__sync_sub_and_fetch(p, x));
}
#if defined(JEMALLOC_GCC_ATOMIC_ATOMICS)
# include "jemalloc/internal/atomic_gcc_atomic.h"
#elif defined(JEMALLOC_GCC_SYNC_ATOMICS)
# include "jemalloc/internal/atomic_gcc_sync.h"
#elif defined(_MSC_VER)
# include "jemalloc/internal/atomic_msvc.h"
#elif defined(JEMALLOC_C11_ATOMICS)
# include "jemalloc/internal/atomic_c11.h"
#else
# error "Missing implementation for 32-bit atomic operations"
# error "Don't have atomics implemented on this platform."
#endif
/******************************************************************************/
/* size_t operations. */
JEMALLOC_INLINE size_t
atomic_add_z(size_t *p, size_t x)
{
/*
* This header gives more or less a backport of C11 atomics. The user can write
* JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_sizeof_type); to generate
* counterparts of the C11 atomic functions for type, as so:
* JEMALLOC_GENERATE_ATOMICS(int *, pi, 3);
* and then write things like:
* int *some_ptr;
* atomic_pi_t atomic_ptr_to_int;
* atomic_store_pi(&atomic_ptr_to_int, some_ptr, ATOMIC_RELAXED);
* int *prev_value = atomic_exchange_pi(&ptr_to_int, NULL, ATOMIC_ACQ_REL);
* assert(some_ptr == prev_value);
* and expect things to work in the obvious way.
*
* Also included (with naming differences to avoid conflicts with the standard
* library):
* atomic_fence(atomic_memory_order_t) (mimics C11's atomic_thread_fence).
* ATOMIC_INIT (mimics C11's ATOMIC_VAR_INIT).
*/
#if (LG_SIZEOF_PTR == 3)
return ((size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
#elif (LG_SIZEOF_PTR == 2)
return ((size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
#endif
}
/*
* Pure convenience, so that we don't have to type "atomic_memory_order_"
* quite so often.
*/
#define ATOMIC_RELAXED atomic_memory_order_relaxed
#define ATOMIC_ACQUIRE atomic_memory_order_acquire
#define ATOMIC_RELEASE atomic_memory_order_release
#define ATOMIC_ACQ_REL atomic_memory_order_acq_rel
#define ATOMIC_SEQ_CST atomic_memory_order_seq_cst
JEMALLOC_INLINE size_t
atomic_sub_z(size_t *p, size_t x)
{
#if (LG_SIZEOF_PTR == 3)
return ((size_t)atomic_add_uint64((uint64_t *)p,
(uint64_t)-((int64_t)x)));
#elif (LG_SIZEOF_PTR == 2)
return ((size_t)atomic_add_uint32((uint32_t *)p,
(uint32_t)-((int32_t)x)));
#endif
}
/******************************************************************************/
/* unsigned operations. */
JEMALLOC_INLINE unsigned
atomic_add_u(unsigned *p, unsigned x)
{
#if (LG_SIZEOF_INT == 3)
return ((unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
#elif (LG_SIZEOF_INT == 2)
return ((unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
#endif
}
JEMALLOC_INLINE unsigned
atomic_sub_u(unsigned *p, unsigned x)
{
#if (LG_SIZEOF_INT == 3)
return ((unsigned)atomic_add_uint64((uint64_t *)p,
(uint64_t)-((int64_t)x)));
#elif (LG_SIZEOF_INT == 2)
return ((unsigned)atomic_add_uint32((uint32_t *)p,
(uint32_t)-((int32_t)x)));
#endif
}
/******************************************************************************/
/*
* Not all platforms have 64-bit atomics. If we do, this #define exposes that
* fact.
*/
#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
# define JEMALLOC_ATOMIC_U64
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
JEMALLOC_GENERATE_ATOMICS(void *, p, LG_SIZEOF_PTR)
/*
* There's no actual guarantee that sizeof(bool) == 1, but it's true on the only
* platform that actually needs to know the size, MSVC.
*/
JEMALLOC_GENERATE_ATOMICS(bool, b, 0)
JEMALLOC_GENERATE_INT_ATOMICS(unsigned, u, LG_SIZEOF_INT)
JEMALLOC_GENERATE_INT_ATOMICS(size_t, zu, LG_SIZEOF_PTR)
JEMALLOC_GENERATE_INT_ATOMICS(ssize_t, zd, LG_SIZEOF_PTR)
JEMALLOC_GENERATE_INT_ATOMICS(uint32_t, u32, 2)
#ifdef JEMALLOC_ATOMIC_U64
JEMALLOC_GENERATE_INT_ATOMICS(uint64_t, u64, 3)
#endif
#undef ATOMIC_INLINE
#endif /* JEMALLOC_INTERNAL_ATOMIC_H */

View File

@@ -0,0 +1,97 @@
#ifndef JEMALLOC_INTERNAL_ATOMIC_C11_H
#define JEMALLOC_INTERNAL_ATOMIC_C11_H
#include <stdatomic.h>
#define ATOMIC_INIT(...) ATOMIC_VAR_INIT(__VA_ARGS__)
#define atomic_memory_order_t memory_order
#define atomic_memory_order_relaxed memory_order_relaxed
#define atomic_memory_order_acquire memory_order_acquire
#define atomic_memory_order_release memory_order_release
#define atomic_memory_order_acq_rel memory_order_acq_rel
#define atomic_memory_order_seq_cst memory_order_seq_cst
#define atomic_fence atomic_thread_fence
#define JEMALLOC_GENERATE_ATOMICS(type, short_type, \
/* unused */ lg_size) \
typedef _Atomic(type) atomic_##short_type##_t; \
\
ATOMIC_INLINE type \
atomic_load_##short_type(const atomic_##short_type##_t *a, \
atomic_memory_order_t mo) { \
/* \
* A strict interpretation of the C standard prevents \
* atomic_load from taking a const argument, but it's \
* convenient for our purposes. This cast is a workaround. \
*/ \
atomic_##short_type##_t* a_nonconst = \
(atomic_##short_type##_t*)a; \
return atomic_load_explicit(a_nonconst, mo); \
} \
\
ATOMIC_INLINE void \
atomic_store_##short_type(atomic_##short_type##_t *a, \
type val, atomic_memory_order_t mo) { \
atomic_store_explicit(a, val, mo); \
} \
\
ATOMIC_INLINE type \
atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
return atomic_exchange_explicit(a, val, mo); \
} \
\
ATOMIC_INLINE bool \
atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \
type *expected, type desired, atomic_memory_order_t success_mo, \
atomic_memory_order_t failure_mo) { \
return atomic_compare_exchange_weak_explicit(a, expected, \
desired, success_mo, failure_mo); \
} \
\
ATOMIC_INLINE bool \
atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \
type *expected, type desired, atomic_memory_order_t success_mo, \
atomic_memory_order_t failure_mo) { \
return atomic_compare_exchange_strong_explicit(a, expected, \
desired, success_mo, failure_mo); \
}
/*
* Integral types have some special operations available that non-integral ones
* lack.
*/
#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \
/* unused */ lg_size) \
JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \
\
ATOMIC_INLINE type \
atomic_fetch_add_##short_type(atomic_##short_type##_t *a, \
type val, atomic_memory_order_t mo) { \
return atomic_fetch_add_explicit(a, val, mo); \
} \
\
ATOMIC_INLINE type \
atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, \
type val, atomic_memory_order_t mo) { \
return atomic_fetch_sub_explicit(a, val, mo); \
} \
ATOMIC_INLINE type \
atomic_fetch_and_##short_type(atomic_##short_type##_t *a, \
type val, atomic_memory_order_t mo) { \
return atomic_fetch_and_explicit(a, val, mo); \
} \
ATOMIC_INLINE type \
atomic_fetch_or_##short_type(atomic_##short_type##_t *a, \
type val, atomic_memory_order_t mo) { \
return atomic_fetch_or_explicit(a, val, mo); \
} \
ATOMIC_INLINE type \
atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, \
type val, atomic_memory_order_t mo) { \
return atomic_fetch_xor_explicit(a, val, mo); \
}
#endif /* JEMALLOC_INTERNAL_ATOMIC_C11_H */

View File

@@ -0,0 +1,127 @@
#ifndef JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H
#define JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H
#include "jemalloc/internal/assert.h"
#define ATOMIC_INIT(...) {__VA_ARGS__}
typedef enum {
atomic_memory_order_relaxed,
atomic_memory_order_acquire,
atomic_memory_order_release,
atomic_memory_order_acq_rel,
atomic_memory_order_seq_cst
} atomic_memory_order_t;
ATOMIC_INLINE int
atomic_enum_to_builtin(atomic_memory_order_t mo) {
switch (mo) {
case atomic_memory_order_relaxed:
return __ATOMIC_RELAXED;
case atomic_memory_order_acquire:
return __ATOMIC_ACQUIRE;
case atomic_memory_order_release:
return __ATOMIC_RELEASE;
case atomic_memory_order_acq_rel:
return __ATOMIC_ACQ_REL;
case atomic_memory_order_seq_cst:
return __ATOMIC_SEQ_CST;
}
/* Can't happen; the switch is exhaustive. */
not_reached();
}
ATOMIC_INLINE void
atomic_fence(atomic_memory_order_t mo) {
__atomic_thread_fence(atomic_enum_to_builtin(mo));
}
#define JEMALLOC_GENERATE_ATOMICS(type, short_type, \
/* unused */ lg_size) \
typedef struct { \
type repr; \
} atomic_##short_type##_t; \
\
ATOMIC_INLINE type \
atomic_load_##short_type(const atomic_##short_type##_t *a, \
atomic_memory_order_t mo) { \
type result; \
__atomic_load(&a->repr, &result, atomic_enum_to_builtin(mo)); \
return result; \
} \
\
ATOMIC_INLINE void \
atomic_store_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
__atomic_store(&a->repr, &val, atomic_enum_to_builtin(mo)); \
} \
\
ATOMIC_INLINE type \
atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
type result; \
__atomic_exchange(&a->repr, &val, &result, \
atomic_enum_to_builtin(mo)); \
return result; \
} \
\
ATOMIC_INLINE bool \
atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \
type *expected, type desired, atomic_memory_order_t success_mo, \
atomic_memory_order_t failure_mo) { \
return __atomic_compare_exchange(&a->repr, expected, &desired, \
true, atomic_enum_to_builtin(success_mo), \
atomic_enum_to_builtin(failure_mo)); \
} \
\
ATOMIC_INLINE bool \
atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \
type *expected, type desired, atomic_memory_order_t success_mo, \
atomic_memory_order_t failure_mo) { \
return __atomic_compare_exchange(&a->repr, expected, &desired, \
false, \
atomic_enum_to_builtin(success_mo), \
atomic_enum_to_builtin(failure_mo)); \
}
#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \
/* unused */ lg_size) \
JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \
\
ATOMIC_INLINE type \
atomic_fetch_add_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
return __atomic_fetch_add(&a->repr, val, \
atomic_enum_to_builtin(mo)); \
} \
\
ATOMIC_INLINE type \
atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
return __atomic_fetch_sub(&a->repr, val, \
atomic_enum_to_builtin(mo)); \
} \
\
ATOMIC_INLINE type \
atomic_fetch_and_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
return __atomic_fetch_and(&a->repr, val, \
atomic_enum_to_builtin(mo)); \
} \
\
ATOMIC_INLINE type \
atomic_fetch_or_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
return __atomic_fetch_or(&a->repr, val, \
atomic_enum_to_builtin(mo)); \
} \
\
ATOMIC_INLINE type \
atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
return __atomic_fetch_xor(&a->repr, val, \
atomic_enum_to_builtin(mo)); \
}
#endif /* JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H */

View File

@@ -0,0 +1,191 @@
#ifndef JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H
#define JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H
#define ATOMIC_INIT(...) {__VA_ARGS__}
typedef enum {
atomic_memory_order_relaxed,
atomic_memory_order_acquire,
atomic_memory_order_release,
atomic_memory_order_acq_rel,
atomic_memory_order_seq_cst
} atomic_memory_order_t;
ATOMIC_INLINE void
atomic_fence(atomic_memory_order_t mo) {
/* Easy cases first: no barrier, and full barrier. */
if (mo == atomic_memory_order_relaxed) {
asm volatile("" ::: "memory");
return;
}
if (mo == atomic_memory_order_seq_cst) {
asm volatile("" ::: "memory");
__sync_synchronize();
asm volatile("" ::: "memory");
return;
}
asm volatile("" ::: "memory");
# if defined(__i386__) || defined(__x86_64__)
/* This is implicit on x86. */
# elif defined(__ppc__)
asm volatile("lwsync");
# elif defined(__sparc__) && defined(__arch64__)
if (mo == atomic_memory_order_acquire) {
asm volatile("membar #LoadLoad | #LoadStore");
} else if (mo == atomic_memory_order_release) {
asm volatile("membar #LoadStore | #StoreStore");
} else {
asm volatile("membar #LoadLoad | #LoadStore | #StoreStore");
}
# else
__sync_synchronize();
# endif
asm volatile("" ::: "memory");
}
/*
* A correct implementation of seq_cst loads and stores on weakly ordered
* architectures could do either of the following:
* 1. store() is weak-fence -> store -> strong fence, load() is load ->
* strong-fence.
* 2. store() is strong-fence -> store, load() is strong-fence -> load ->
* weak-fence.
* The tricky thing is, load() and store() above can be the load or store
* portions of a gcc __sync builtin, so we have to follow GCC's lead, which
* means going with strategy 2.
* On strongly ordered architectures, the natural strategy is to stick a strong
* fence after seq_cst stores, and have naked loads. So we want the strong
* fences in different places on different architectures.
* atomic_pre_sc_load_fence and atomic_post_sc_store_fence allow us to
* accomplish this.
*/
ATOMIC_INLINE void
atomic_pre_sc_load_fence() {
# if defined(__i386__) || defined(__x86_64__) || \
(defined(__sparc__) && defined(__arch64__))
atomic_fence(atomic_memory_order_relaxed);
# else
atomic_fence(atomic_memory_order_seq_cst);
# endif
}
ATOMIC_INLINE void
atomic_post_sc_store_fence() {
# if defined(__i386__) || defined(__x86_64__) || \
(defined(__sparc__) && defined(__arch64__))
atomic_fence(atomic_memory_order_seq_cst);
# else
atomic_fence(atomic_memory_order_relaxed);
# endif
}
#define JEMALLOC_GENERATE_ATOMICS(type, short_type, \
/* unused */ lg_size) \
typedef struct { \
type volatile repr; \
} atomic_##short_type##_t; \
\
ATOMIC_INLINE type \
atomic_load_##short_type(const atomic_##short_type##_t *a, \
atomic_memory_order_t mo) { \
if (mo == atomic_memory_order_seq_cst) { \
atomic_pre_sc_load_fence(); \
} \
type result = a->repr; \
if (mo != atomic_memory_order_relaxed) { \
atomic_fence(atomic_memory_order_acquire); \
} \
return result; \
} \
\
ATOMIC_INLINE void \
atomic_store_##short_type(atomic_##short_type##_t *a, \
type val, atomic_memory_order_t mo) { \
if (mo != atomic_memory_order_relaxed) { \
atomic_fence(atomic_memory_order_release); \
} \
a->repr = val; \
if (mo == atomic_memory_order_seq_cst) { \
atomic_post_sc_store_fence(); \
} \
} \
\
ATOMIC_INLINE type \
atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
/* \
* Because of FreeBSD, we care about gcc 4.2, which doesn't have\
* an atomic exchange builtin. We fake it with a CAS loop. \
*/ \
while (true) { \
type old = a->repr; \
if (__sync_bool_compare_and_swap(&a->repr, old, val)) { \
return old; \
} \
} \
} \
\
ATOMIC_INLINE bool \
atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \
type *expected, type desired, atomic_memory_order_t success_mo, \
atomic_memory_order_t failure_mo) { \
type prev = __sync_val_compare_and_swap(&a->repr, *expected, \
desired); \
if (prev == *expected) { \
return true; \
} else { \
*expected = prev; \
return false; \
} \
} \
ATOMIC_INLINE bool \
atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \
type *expected, type desired, atomic_memory_order_t success_mo, \
atomic_memory_order_t failure_mo) { \
type prev = __sync_val_compare_and_swap(&a->repr, *expected, \
desired); \
if (prev == *expected) { \
return true; \
} else { \
*expected = prev; \
return false; \
} \
}
#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \
/* unused */ lg_size) \
JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \
\
ATOMIC_INLINE type \
atomic_fetch_add_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
return __sync_fetch_and_add(&a->repr, val); \
} \
\
ATOMIC_INLINE type \
atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
return __sync_fetch_and_sub(&a->repr, val); \
} \
\
ATOMIC_INLINE type \
atomic_fetch_and_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
return __sync_fetch_and_and(&a->repr, val); \
} \
\
ATOMIC_INLINE type \
atomic_fetch_or_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
return __sync_fetch_and_or(&a->repr, val); \
} \
\
ATOMIC_INLINE type \
atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
return __sync_fetch_and_xor(&a->repr, val); \
}
#endif /* JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H */

View File

@@ -0,0 +1,158 @@
#ifndef JEMALLOC_INTERNAL_ATOMIC_MSVC_H
#define JEMALLOC_INTERNAL_ATOMIC_MSVC_H
#define ATOMIC_INIT(...) {__VA_ARGS__}
typedef enum {
atomic_memory_order_relaxed,
atomic_memory_order_acquire,
atomic_memory_order_release,
atomic_memory_order_acq_rel,
atomic_memory_order_seq_cst
} atomic_memory_order_t;
typedef char atomic_repr_0_t;
typedef short atomic_repr_1_t;
typedef long atomic_repr_2_t;
typedef __int64 atomic_repr_3_t;
ATOMIC_INLINE void
atomic_fence(atomic_memory_order_t mo) {
_ReadWriteBarrier();
# if defined(_M_ARM) || defined(_M_ARM64)
/* ARM needs a barrier for everything but relaxed. */
if (mo != atomic_memory_order_relaxed) {
MemoryBarrier();
}
# elif defined(_M_IX86) || defined (_M_X64)
/* x86 needs a barrier only for seq_cst. */
if (mo == atomic_memory_order_seq_cst) {
MemoryBarrier();
}
# else
# error "Don't know how to create atomics for this platform for MSVC."
# endif
_ReadWriteBarrier();
}
#define ATOMIC_INTERLOCKED_REPR(lg_size) atomic_repr_ ## lg_size ## _t
#define ATOMIC_CONCAT(a, b) ATOMIC_RAW_CONCAT(a, b)
#define ATOMIC_RAW_CONCAT(a, b) a ## b
#define ATOMIC_INTERLOCKED_NAME(base_name, lg_size) ATOMIC_CONCAT( \
base_name, ATOMIC_INTERLOCKED_SUFFIX(lg_size))
#define ATOMIC_INTERLOCKED_SUFFIX(lg_size) \
ATOMIC_CONCAT(ATOMIC_INTERLOCKED_SUFFIX_, lg_size)
#define ATOMIC_INTERLOCKED_SUFFIX_0 8
#define ATOMIC_INTERLOCKED_SUFFIX_1 16
#define ATOMIC_INTERLOCKED_SUFFIX_2
#define ATOMIC_INTERLOCKED_SUFFIX_3 64
#define JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_size) \
typedef struct { \
ATOMIC_INTERLOCKED_REPR(lg_size) repr; \
} atomic_##short_type##_t; \
\
ATOMIC_INLINE type \
atomic_load_##short_type(const atomic_##short_type##_t *a, \
atomic_memory_order_t mo) { \
ATOMIC_INTERLOCKED_REPR(lg_size) ret = a->repr; \
if (mo != atomic_memory_order_relaxed) { \
atomic_fence(atomic_memory_order_acquire); \
} \
return (type) ret; \
} \
\
ATOMIC_INLINE void \
atomic_store_##short_type(atomic_##short_type##_t *a, \
type val, atomic_memory_order_t mo) { \
if (mo != atomic_memory_order_relaxed) { \
atomic_fence(atomic_memory_order_release); \
} \
a->repr = (ATOMIC_INTERLOCKED_REPR(lg_size)) val; \
if (mo == atomic_memory_order_seq_cst) { \
atomic_fence(atomic_memory_order_seq_cst); \
} \
} \
\
ATOMIC_INLINE type \
atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedExchange, \
lg_size)(&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
} \
\
ATOMIC_INLINE bool \
atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \
type *expected, type desired, atomic_memory_order_t success_mo, \
atomic_memory_order_t failure_mo) { \
ATOMIC_INTERLOCKED_REPR(lg_size) e = \
(ATOMIC_INTERLOCKED_REPR(lg_size))*expected; \
ATOMIC_INTERLOCKED_REPR(lg_size) d = \
(ATOMIC_INTERLOCKED_REPR(lg_size))desired; \
ATOMIC_INTERLOCKED_REPR(lg_size) old = \
ATOMIC_INTERLOCKED_NAME(_InterlockedCompareExchange, \
lg_size)(&a->repr, d, e); \
if (old == e) { \
return true; \
} else { \
*expected = (type)old; \
return false; \
} \
} \
\
ATOMIC_INLINE bool \
atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \
type *expected, type desired, atomic_memory_order_t success_mo, \
atomic_memory_order_t failure_mo) { \
/* We implement the weak version with strong semantics. */ \
return atomic_compare_exchange_weak_##short_type(a, expected, \
desired, success_mo, failure_mo); \
}
#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, lg_size) \
JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_size) \
\
ATOMIC_INLINE type \
atomic_fetch_add_##short_type(atomic_##short_type##_t *a, \
type val, atomic_memory_order_t mo) { \
return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedExchangeAdd, \
lg_size)(&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
} \
\
ATOMIC_INLINE type \
atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, \
type val, atomic_memory_order_t mo) { \
/* \
* MSVC warns on negation of unsigned operands, but for us it \
* gives exactly the right semantics (MAX_TYPE + 1 - operand). \
*/ \
__pragma(warning(push)) \
__pragma(warning(disable: 4146)) \
return atomic_fetch_add_##short_type(a, -val, mo); \
__pragma(warning(pop)) \
} \
ATOMIC_INLINE type \
atomic_fetch_and_##short_type(atomic_##short_type##_t *a, \
type val, atomic_memory_order_t mo) { \
return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedAnd, lg_size)( \
&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
} \
ATOMIC_INLINE type \
atomic_fetch_or_##short_type(atomic_##short_type##_t *a, \
type val, atomic_memory_order_t mo) { \
return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedOr, lg_size)( \
&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
} \
ATOMIC_INLINE type \
atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, \
type val, atomic_memory_order_t mo) { \
return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedXor, lg_size)( \
&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
}
#endif /* JEMALLOC_INTERNAL_ATOMIC_MSVC_H */

View File

@@ -0,0 +1,31 @@
#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H
#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H
extern bool opt_background_thread;
extern malloc_mutex_t background_thread_lock;
extern atomic_b_t background_thread_enabled_state;
extern size_t n_background_threads;
extern background_thread_info_t *background_thread_info;
extern bool can_enable_background_thread;
bool background_thread_create(tsd_t *tsd, unsigned arena_ind);
bool background_threads_enable(tsd_t *tsd);
bool background_threads_disable(tsd_t *tsd);
void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
arena_decay_t *decay, size_t npages_new);
void background_thread_prefork0(tsdn_t *tsdn);
void background_thread_prefork1(tsdn_t *tsdn);
void background_thread_postfork_parent(tsdn_t *tsdn);
void background_thread_postfork_child(tsdn_t *tsdn);
bool background_thread_stats_read(tsdn_t *tsdn,
background_thread_stats_t *stats);
void background_thread_ctl_init(tsdn_t *tsdn);
#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
extern int pthread_create_wrapper(pthread_t *__restrict, const pthread_attr_t *,
void *(*)(void *), void *__restrict);
#endif
bool background_thread_boot0(void);
bool background_thread_boot1(tsdn_t *tsdn);
#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H */

View File

@@ -0,0 +1,57 @@
#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H
#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H
JEMALLOC_ALWAYS_INLINE bool
background_thread_enabled(void) {
return atomic_load_b(&background_thread_enabled_state, ATOMIC_RELAXED);
}
JEMALLOC_ALWAYS_INLINE void
background_thread_enabled_set(tsdn_t *tsdn, bool state) {
malloc_mutex_assert_owner(tsdn, &background_thread_lock);
atomic_store_b(&background_thread_enabled_state, state, ATOMIC_RELAXED);
}
JEMALLOC_ALWAYS_INLINE background_thread_info_t *
arena_background_thread_info_get(arena_t *arena) {
unsigned arena_ind = arena_ind_get(arena);
return &background_thread_info[arena_ind % ncpus];
}
JEMALLOC_ALWAYS_INLINE uint64_t
background_thread_wakeup_time_get(background_thread_info_t *info) {
uint64_t next_wakeup = nstime_ns(&info->next_wakeup);
assert(atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE) ==
(next_wakeup == BACKGROUND_THREAD_INDEFINITE_SLEEP));
return next_wakeup;
}
JEMALLOC_ALWAYS_INLINE void
background_thread_wakeup_time_set(tsdn_t *tsdn, background_thread_info_t *info,
uint64_t wakeup_time) {
malloc_mutex_assert_owner(tsdn, &info->mtx);
atomic_store_b(&info->indefinite_sleep,
wakeup_time == BACKGROUND_THREAD_INDEFINITE_SLEEP, ATOMIC_RELEASE);
nstime_init(&info->next_wakeup, wakeup_time);
}
JEMALLOC_ALWAYS_INLINE bool
background_thread_indefinite_sleep(background_thread_info_t *info) {
return atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE);
}
JEMALLOC_ALWAYS_INLINE void
arena_background_thread_inactivity_check(tsdn_t *tsdn, arena_t *arena,
bool is_background_thread) {
if (!background_thread_enabled() || is_background_thread) {
return;
}
background_thread_info_t *info =
arena_background_thread_info_get(arena);
if (background_thread_indefinite_sleep(info)) {
background_thread_interval_check(tsdn, arena,
&arena->decay_dirty, 0);
}
}
#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H */

View File

@@ -0,0 +1,52 @@
#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H
#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H
/* This file really combines "structs" and "types", but only transitionally. */
#if defined(JEMALLOC_BACKGROUND_THREAD) || defined(JEMALLOC_LAZY_LOCK)
# define JEMALLOC_PTHREAD_CREATE_WRAPPER
#endif
#define BACKGROUND_THREAD_INDEFINITE_SLEEP UINT64_MAX
typedef enum {
background_thread_stopped,
background_thread_started,
/* Thread waits on the global lock when paused (for arena_reset). */
background_thread_paused,
} background_thread_state_t;
struct background_thread_info_s {
#ifdef JEMALLOC_BACKGROUND_THREAD
/* Background thread is pthread specific. */
pthread_t thread;
pthread_cond_t cond;
#endif
malloc_mutex_t mtx;
background_thread_state_t state;
/* When true, it means no wakeup scheduled. */
atomic_b_t indefinite_sleep;
/* Next scheduled wakeup time (absolute time in ns). */
nstime_t next_wakeup;
/*
* Since the last background thread run, newly added number of pages
* that need to be purged by the next wakeup. This is adjusted on
* epoch advance, and is used to determine whether we should signal the
* background thread to wake up earlier.
*/
size_t npages_to_purge_new;
/* Stats: total number of runs since started. */
uint64_t tot_n_runs;
/* Stats: total sleep time since started. */
nstime_t tot_sleep_time;
};
typedef struct background_thread_info_s background_thread_info_t;
struct background_thread_stats_s {
size_t num_threads;
uint64_t num_runs;
nstime_t run_interval;
};
typedef struct background_thread_stats_s background_thread_stats_t;
#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H */

View File

@@ -0,0 +1,19 @@
#ifndef JEMALLOC_INTERNAL_BASE_EXTERNS_H
#define JEMALLOC_INTERNAL_BASE_EXTERNS_H
base_t *b0get(void);
base_t *base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
void base_delete(tsdn_t *tsdn, base_t *base);
extent_hooks_t *base_extent_hooks_get(base_t *base);
extent_hooks_t *base_extent_hooks_set(base_t *base,
extent_hooks_t *extent_hooks);
void *base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment);
extent_t *base_alloc_extent(tsdn_t *tsdn, base_t *base);
void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated,
size_t *resident, size_t *mapped);
void base_prefork(tsdn_t *tsdn, base_t *base);
void base_postfork_parent(tsdn_t *tsdn, base_t *base);
void base_postfork_child(tsdn_t *tsdn, base_t *base);
bool base_boot(tsdn_t *tsdn);
#endif /* JEMALLOC_INTERNAL_BASE_EXTERNS_H */

View File

@@ -0,0 +1,9 @@
#ifndef JEMALLOC_INTERNAL_BASE_INLINES_H
#define JEMALLOC_INTERNAL_BASE_INLINES_H
static inline unsigned
base_ind_get(const base_t *base) {
return base->ind;
}
#endif /* JEMALLOC_INTERNAL_BASE_INLINES_H */

View File

@@ -0,0 +1,55 @@
#ifndef JEMALLOC_INTERNAL_BASE_STRUCTS_H
#define JEMALLOC_INTERNAL_BASE_STRUCTS_H
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/size_classes.h"
/* Embedded at the beginning of every block of base-managed virtual memory. */
struct base_block_s {
/* Total size of block's virtual memory mapping. */
size_t size;
/* Next block in list of base's blocks. */
base_block_t *next;
/* Tracks unused trailing space. */
extent_t extent;
};
struct base_s {
/* Associated arena's index within the arenas array. */
unsigned ind;
/*
* User-configurable extent hook functions. Points to an
* extent_hooks_t.
*/
atomic_p_t extent_hooks;
/* Protects base_alloc() and base_stats_get() operations. */
malloc_mutex_t mtx;
/*
* Most recent size class in the series of increasingly large base
* extents. Logarithmic spacing between subsequent allocations ensures
* that the total number of distinct mappings remains small.
*/
pszind_t pind_last;
/* Serial number generation state. */
size_t extent_sn_next;
/* Chain of all blocks associated with base. */
base_block_t *blocks;
/* Heap of extents that track unused trailing space within blocks. */
extent_heap_t avail[NSIZES];
/* Stats, only maintained if config_stats. */
size_t allocated;
size_t resident;
size_t mapped;
};
#endif /* JEMALLOC_INTERNAL_BASE_STRUCTS_H */

View File

@@ -0,0 +1,7 @@
#ifndef JEMALLOC_INTERNAL_BASE_TYPES_H
#define JEMALLOC_INTERNAL_BASE_TYPES_H
typedef struct base_block_s base_block_t;
typedef struct base_s base_t;
#endif /* JEMALLOC_INTERNAL_BASE_TYPES_H */

View File

@@ -0,0 +1,165 @@
#ifndef JEMALLOC_INTERNAL_BIT_UTIL_H
#define JEMALLOC_INTERNAL_BIT_UTIL_H
#include "jemalloc/internal/assert.h"
#define BIT_UTIL_INLINE static inline
/* Sanity check. */
#if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \
|| !defined(JEMALLOC_INTERNAL_FFS)
# error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure
#endif
BIT_UTIL_INLINE unsigned
ffs_llu(unsigned long long bitmap) {
return JEMALLOC_INTERNAL_FFSLL(bitmap);
}
BIT_UTIL_INLINE unsigned
ffs_lu(unsigned long bitmap) {
return JEMALLOC_INTERNAL_FFSL(bitmap);
}
BIT_UTIL_INLINE unsigned
ffs_u(unsigned bitmap) {
return JEMALLOC_INTERNAL_FFS(bitmap);
}
BIT_UTIL_INLINE unsigned
ffs_zu(size_t bitmap) {
#if LG_SIZEOF_PTR == LG_SIZEOF_INT
return ffs_u(bitmap);
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG
return ffs_lu(bitmap);
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG
return ffs_llu(bitmap);
#else
#error No implementation for size_t ffs()
#endif
}
BIT_UTIL_INLINE unsigned
ffs_u64(uint64_t bitmap) {
#if LG_SIZEOF_LONG == 3
return ffs_lu(bitmap);
#elif LG_SIZEOF_LONG_LONG == 3
return ffs_llu(bitmap);
#else
#error No implementation for 64-bit ffs()
#endif
}
BIT_UTIL_INLINE unsigned
ffs_u32(uint32_t bitmap) {
#if LG_SIZEOF_INT == 2
return ffs_u(bitmap);
#else
#error No implementation for 32-bit ffs()
#endif
return ffs_u(bitmap);
}
BIT_UTIL_INLINE uint64_t
pow2_ceil_u64(uint64_t x) {
x--;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
x |= x >> 32;
x++;
return x;
}
BIT_UTIL_INLINE uint32_t
pow2_ceil_u32(uint32_t x) {
x--;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
x++;
return x;
}
/* Compute the smallest power of 2 that is >= x. */
BIT_UTIL_INLINE size_t
pow2_ceil_zu(size_t x) {
#if (LG_SIZEOF_PTR == 3)
return pow2_ceil_u64(x);
#else
return pow2_ceil_u32(x);
#endif
}
#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
BIT_UTIL_INLINE unsigned
lg_floor(size_t x) {
size_t ret;
assert(x != 0);
asm ("bsr %1, %0"
: "=r"(ret) // Outputs.
: "r"(x) // Inputs.
);
assert(ret < UINT_MAX);
return (unsigned)ret;
}
#elif (defined(_MSC_VER))
BIT_UTIL_INLINE unsigned
lg_floor(size_t x) {
unsigned long ret;
assert(x != 0);
#if (LG_SIZEOF_PTR == 3)
_BitScanReverse64(&ret, x);
#elif (LG_SIZEOF_PTR == 2)
_BitScanReverse(&ret, x);
#else
# error "Unsupported type size for lg_floor()"
#endif
assert(ret < UINT_MAX);
return (unsigned)ret;
}
#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
BIT_UTIL_INLINE unsigned
lg_floor(size_t x) {
assert(x != 0);
#if (LG_SIZEOF_PTR == LG_SIZEOF_INT)
return ((8 << LG_SIZEOF_PTR) - 1) - __builtin_clz(x);
#elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG)
return ((8 << LG_SIZEOF_PTR) - 1) - __builtin_clzl(x);
#else
# error "Unsupported type size for lg_floor()"
#endif
}
#else
BIT_UTIL_INLINE unsigned
lg_floor(size_t x) {
assert(x != 0);
x |= (x >> 1);
x |= (x >> 2);
x |= (x >> 4);
x |= (x >> 8);
x |= (x >> 16);
#if (LG_SIZEOF_PTR == 3)
x |= (x >> 32);
#endif
if (x == SIZE_T_MAX) {
return (8 << LG_SIZEOF_PTR) - 1;
}
x++;
return ffs_zu(x) - 2;
}
#endif
#undef BIT_UTIL_INLINE
#endif /* JEMALLOC_INTERNAL_BIT_UTIL_H */

View File

@@ -1,37 +1,159 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#ifndef JEMALLOC_INTERNAL_BITMAP_H
#define JEMALLOC_INTERNAL_BITMAP_H
/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
#define LG_BITMAP_MAXBITS LG_RUN_MAXREGS
#include "jemalloc/internal/arena_types.h"
#include "jemalloc/internal/bit_util.h"
#include "jemalloc/internal/size_classes.h"
typedef struct bitmap_level_s bitmap_level_t;
typedef struct bitmap_info_s bitmap_info_t;
typedef unsigned long bitmap_t;
#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG
/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
#if LG_SLAB_MAXREGS > LG_CEIL_NSIZES
/* Maximum bitmap bit count is determined by maximum regions per slab. */
# define LG_BITMAP_MAXBITS LG_SLAB_MAXREGS
#else
/* Maximum bitmap bit count is determined by number of extent size classes. */
# define LG_BITMAP_MAXBITS LG_CEIL_NSIZES
#endif
#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS)
/* Number of bits per group. */
#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3)
#define BITMAP_GROUP_NBITS (ZU(1) << LG_BITMAP_GROUP_NBITS)
#define BITMAP_GROUP_NBITS (1U << LG_BITMAP_GROUP_NBITS)
#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1)
/* Maximum number of levels possible. */
#define BITMAP_MAX_LEVELS \
(LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \
+ !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP)
/*
* Do some analysis on how big the bitmap is before we use a tree. For a brute
* force linear search, if we would have to call ffs_lu() more than 2^3 times,
* use a tree instead.
*/
#if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3
# define BITMAP_USE_TREE
#endif
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
/* Number of groups required to store a given number of bits. */
#define BITMAP_BITS2GROUPS(nbits) \
(((nbits) + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS)
struct bitmap_level_s {
/*
* Number of groups required at a particular level for a given number of bits.
*/
#define BITMAP_GROUPS_L0(nbits) \
BITMAP_BITS2GROUPS(nbits)
#define BITMAP_GROUPS_L1(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits))
#define BITMAP_GROUPS_L2(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))
#define BITMAP_GROUPS_L3(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \
BITMAP_BITS2GROUPS((nbits)))))
#define BITMAP_GROUPS_L4(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))))
/*
* Assuming the number of levels, number of groups required for a given number
* of bits.
*/
#define BITMAP_GROUPS_1_LEVEL(nbits) \
BITMAP_GROUPS_L0(nbits)
#define BITMAP_GROUPS_2_LEVEL(nbits) \
(BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits))
#define BITMAP_GROUPS_3_LEVEL(nbits) \
(BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits))
#define BITMAP_GROUPS_4_LEVEL(nbits) \
(BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits))
#define BITMAP_GROUPS_5_LEVEL(nbits) \
(BITMAP_GROUPS_4_LEVEL(nbits) + BITMAP_GROUPS_L4(nbits))
/*
* Maximum number of groups required to support LG_BITMAP_MAXBITS.
*/
#ifdef BITMAP_USE_TREE
#if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_1_LEVEL(nbits)
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS)
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_2_LEVEL(nbits)
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS)
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_3_LEVEL(nbits)
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS)
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_4_LEVEL(nbits)
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS)
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 5
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_5_LEVEL(nbits)
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_5_LEVEL(BITMAP_MAXBITS)
#else
# error "Unsupported bitmap size"
#endif
/*
* Maximum number of levels possible. This could be statically computed based
* on LG_BITMAP_MAXBITS:
*
* #define BITMAP_MAX_LEVELS \
* (LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \
* + !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP)
*
* However, that would not allow the generic BITMAP_INFO_INITIALIZER() macro, so
* instead hardcode BITMAP_MAX_LEVELS to the largest number supported by the
* various cascading macros. The only additional cost this incurs is some
* unused trailing entries in bitmap_info_t structures; the bitmaps themselves
* are not impacted.
*/
#define BITMAP_MAX_LEVELS 5
#define BITMAP_INFO_INITIALIZER(nbits) { \
/* nbits. */ \
nbits, \
/* nlevels. */ \
(BITMAP_GROUPS_L0(nbits) > BITMAP_GROUPS_L1(nbits)) + \
(BITMAP_GROUPS_L1(nbits) > BITMAP_GROUPS_L2(nbits)) + \
(BITMAP_GROUPS_L2(nbits) > BITMAP_GROUPS_L3(nbits)) + \
(BITMAP_GROUPS_L3(nbits) > BITMAP_GROUPS_L4(nbits)) + 1, \
/* levels. */ \
{ \
{0}, \
{BITMAP_GROUPS_L0(nbits)}, \
{BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)}, \
{BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) + \
BITMAP_GROUPS_L0(nbits)}, \
{BITMAP_GROUPS_L3(nbits) + BITMAP_GROUPS_L2(nbits) + \
BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)}, \
{BITMAP_GROUPS_L4(nbits) + BITMAP_GROUPS_L3(nbits) + \
BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) \
+ BITMAP_GROUPS_L0(nbits)} \
} \
}
#else /* BITMAP_USE_TREE */
#define BITMAP_GROUPS(nbits) BITMAP_BITS2GROUPS(nbits)
#define BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS)
#define BITMAP_INFO_INITIALIZER(nbits) { \
/* nbits. */ \
nbits, \
/* ngroups. */ \
BITMAP_BITS2GROUPS(nbits) \
}
#endif /* BITMAP_USE_TREE */
typedef struct bitmap_level_s {
/* Offset of this level's groups within the array of groups. */
size_t group_offset;
};
} bitmap_level_t;
struct bitmap_info_s {
typedef struct bitmap_info_s {
/* Logical number of bits in bitmap (stored at bottom level). */
size_t nbits;
#ifdef BITMAP_USE_TREE
/* Number of levels necessary for nbits. */
unsigned nlevels;
@@ -40,67 +162,62 @@ struct bitmap_info_s {
* bottom to top (e.g. the bottom level is stored in levels[0]).
*/
bitmap_level_t levels[BITMAP_MAX_LEVELS+1];
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#else /* BITMAP_USE_TREE */
/* Number of groups necessary for nbits. */
size_t ngroups;
#endif /* BITMAP_USE_TREE */
} bitmap_info_t;
void bitmap_info_init(bitmap_info_t *binfo, size_t nbits);
size_t bitmap_info_ngroups(const bitmap_info_t *binfo);
size_t bitmap_size(size_t nbits);
void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo);
void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill);
size_t bitmap_size(const bitmap_info_t *binfo);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
bool bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo);
bool bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
void bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
size_t bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo);
void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_BITMAP_C_))
JEMALLOC_INLINE bool
bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo)
{
unsigned rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
static inline bool
bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) {
#ifdef BITMAP_USE_TREE
size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
bitmap_t rg = bitmap[rgoff];
/* The bitmap is full iff the root group is 0. */
return (rg == 0);
#else
size_t i;
for (i = 0; i < binfo->ngroups; i++) {
if (bitmap[i] != 0) {
return false;
}
}
return true;
#endif
}
JEMALLOC_INLINE bool
bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
{
static inline bool
bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
size_t goff;
bitmap_t g;
assert(bit < binfo->nbits);
goff = bit >> LG_BITMAP_GROUP_NBITS;
g = bitmap[goff];
return (!(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))));
return !(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
}
JEMALLOC_INLINE void
bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
{
static inline void
bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
size_t goff;
bitmap_t *gp;
bitmap_t g;
assert(bit < binfo->nbits);
assert(bitmap_get(bitmap, binfo, bit) == false);
assert(!bitmap_get(bitmap, binfo, bit));
goff = bit >> LG_BITMAP_GROUP_NBITS;
gp = &bitmap[goff];
g = *gp;
assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)));
g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
assert(bitmap_get(bitmap, binfo, bit));
#ifdef BITMAP_USE_TREE
/* Propagate group state transitions up the tree. */
if (g == 0) {
unsigned i;
@@ -109,45 +226,113 @@ bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
goff = bit >> LG_BITMAP_GROUP_NBITS;
gp = &bitmap[binfo->levels[i].group_offset + goff];
g = *gp;
assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)));
g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
if (g != 0)
if (g != 0) {
break;
}
}
}
#endif
}
/* ffu: find first unset >= bit. */
static inline size_t
bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) {
assert(min_bit < binfo->nbits);
#ifdef BITMAP_USE_TREE
size_t bit = 0;
for (unsigned level = binfo->nlevels; level--;) {
size_t lg_bits_per_group = (LG_BITMAP_GROUP_NBITS * (level +
1));
bitmap_t group = bitmap[binfo->levels[level].group_offset + (bit
>> lg_bits_per_group)];
unsigned group_nmask = (unsigned)(((min_bit > bit) ? (min_bit -
bit) : 0) >> (lg_bits_per_group - LG_BITMAP_GROUP_NBITS));
assert(group_nmask <= BITMAP_GROUP_NBITS);
bitmap_t group_mask = ~((1LU << group_nmask) - 1);
bitmap_t group_masked = group & group_mask;
if (group_masked == 0LU) {
if (group == 0LU) {
return binfo->nbits;
}
/*
* min_bit was preceded by one or more unset bits in
* this group, but there are no other unset bits in this
* group. Try again starting at the first bit of the
* next sibling. This will recurse at most once per
* non-root level.
*/
size_t sib_base = bit + (ZU(1) << lg_bits_per_group);
assert(sib_base > min_bit);
assert(sib_base > bit);
if (sib_base >= binfo->nbits) {
return binfo->nbits;
}
return bitmap_ffu(bitmap, binfo, sib_base);
}
bit += ((size_t)(ffs_lu(group_masked) - 1)) <<
(lg_bits_per_group - LG_BITMAP_GROUP_NBITS);
}
assert(bit >= min_bit);
assert(bit < binfo->nbits);
return bit;
#else
size_t i = min_bit >> LG_BITMAP_GROUP_NBITS;
bitmap_t g = bitmap[i] & ~((1LU << (min_bit & BITMAP_GROUP_NBITS_MASK))
- 1);
size_t bit;
do {
bit = ffs_lu(g);
if (bit != 0) {
return (i << LG_BITMAP_GROUP_NBITS) + (bit - 1);
}
i++;
g = bitmap[i];
} while (i < binfo->ngroups);
return binfo->nbits;
#endif
}
/* sfu: set first unset. */
JEMALLOC_INLINE size_t
bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo)
{
static inline size_t
bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) {
size_t bit;
bitmap_t g;
unsigned i;
assert(bitmap_full(bitmap, binfo) == false);
assert(!bitmap_full(bitmap, binfo));
#ifdef BITMAP_USE_TREE
i = binfo->nlevels - 1;
g = bitmap[binfo->levels[i].group_offset];
bit = ffsl(g) - 1;
bit = ffs_lu(g) - 1;
while (i > 0) {
i--;
g = bitmap[binfo->levels[i].group_offset + bit];
bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffsl(g) - 1);
bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffs_lu(g) - 1);
}
#else
i = 0;
g = bitmap[0];
while ((bit = ffs_lu(g)) == 0) {
i++;
g = bitmap[i];
}
bit = (i << LG_BITMAP_GROUP_NBITS) + (bit - 1);
#endif
bitmap_set(bitmap, binfo, bit);
return (bit);
return bit;
}
JEMALLOC_INLINE void
bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
{
static inline void
bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
size_t goff;
bitmap_t *gp;
bitmap_t g;
bool propagate;
UNUSED bool propagate;
assert(bit < binfo->nbits);
assert(bitmap_get(bitmap, binfo, bit));
@@ -155,10 +340,11 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
gp = &bitmap[goff];
g = *gp;
propagate = (g == 0);
assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))) == 0);
g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) == 0);
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
assert(bitmap_get(bitmap, binfo, bit) == false);
assert(!bitmap_get(bitmap, binfo, bit));
#ifdef BITMAP_USE_TREE
/* Propagate group state transitions up the tree. */
if (propagate) {
unsigned i;
@@ -168,17 +354,16 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
gp = &bitmap[binfo->levels[i].group_offset + goff];
g = *gp;
propagate = (g == 0);
assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)))
assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)))
== 0);
g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
if (propagate == false)
if (!propagate) {
break;
}
}
}
#endif /* BITMAP_USE_TREE */
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
#endif /* JEMALLOC_INTERNAL_BITMAP_H */

View File

@@ -1,12 +1,13 @@
#ifndef JEMALLOC_INTERNAL_CKH_H
#define JEMALLOC_INTERNAL_CKH_H
#include "jemalloc/internal/tsd.h"
/* Cuckoo hashing implementation. Skip to the end for the interface. */
/******************************************************************************/
/* INTERNAL DEFINITIONS -- IGNORE */
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct ckh_s ckh_t;
typedef struct ckhc_s ckhc_t;
/* Typedefs to allow easy function pointer passing. */
typedef void ckh_hash_t (const void *, size_t[2]);
typedef bool ckh_keycomp_t (const void *, const void *);
/* Maintain counters used to get an idea of performance. */
/* #define CKH_COUNT */
@@ -19,17 +20,18 @@ typedef bool ckh_keycomp_t (const void *, const void *);
*/
#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
/* Typedefs to allow easy function pointer passing. */
typedef void ckh_hash_t (const void *, size_t[2]);
typedef bool ckh_keycomp_t (const void *, const void *);
/* Hash table cell. */
struct ckhc_s {
typedef struct {
const void *key;
const void *data;
};
} ckhc_t;
struct ckh_s {
/* The hash table itself. */
typedef struct {
#ifdef CKH_COUNT
/* Counters used to get an idea of performance. */
uint64_t ngrows;
@@ -40,9 +42,7 @@ struct ckh_s {
#endif
/* Used for pseudo-random number generation. */
#define CKH_A 1103515241
#define CKH_C 12347
uint32_t prng_state;
uint64_t prng_state;
/* Total number of items. */
size_t count;
@@ -60,29 +60,42 @@ struct ckh_s {
/* Hash table with 2^lg_curbuckets buckets. */
ckhc_t *tab;
};
} ckh_t;
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
/* BEGIN PUBLIC API */
/******************************************************************************/
bool ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
/* Lifetime management. Minitems is the initial capacity. */
bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
ckh_keycomp_t *keycomp);
void ckh_delete(ckh_t *ckh);
void ckh_delete(tsd_t *tsd, ckh_t *ckh);
/* Get the number of elements in the set. */
size_t ckh_count(ckh_t *ckh);
/*
* To iterate over the elements in the table, initialize *tabind to 0 and call
* this function until it returns true. Each call that returns false will
* update *key and *data to the next element in the table, assuming the pointers
* are non-NULL.
*/
bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data);
bool ckh_insert(ckh_t *ckh, const void *key, const void *data);
bool ckh_remove(ckh_t *ckh, const void *searchkey, void **key,
/*
* Basic hash table operations -- insert, removal, lookup. For ckh_remove and
* ckh_search, key or data can be NULL. The hash-table only stores pointers to
* the key and value, and doesn't do any lifetime management.
*/
bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data);
bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
void **data);
bool ckh_search(ckh_t *ckh, const void *seachkey, void **key, void **data);
bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data);
/* Some useful hash and comparison functions for strings and pointers. */
void ckh_string_hash(const void *key, size_t r_hash[2]);
bool ckh_string_keycomp(const void *k1, const void *k2);
void ckh_pointer_hash(const void *key, size_t r_hash[2]);
bool ckh_pointer_keycomp(const void *k1, const void *k2);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
#endif /* JEMALLOC_INTERNAL_CKH_H */

View File

@@ -1,41 +1,37 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#ifndef JEMALLOC_INTERNAL_CTL_H
#define JEMALLOC_INTERNAL_CTL_H
typedef struct ctl_node_s ctl_node_t;
typedef struct ctl_named_node_s ctl_named_node_t;
typedef struct ctl_indexed_node_s ctl_indexed_node_t;
typedef struct ctl_arena_stats_s ctl_arena_stats_t;
typedef struct ctl_stats_s ctl_stats_t;
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/malloc_io.h"
#include "jemalloc/internal/mutex_prof.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/stats.h"
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
/* Maximum ctl tree depth. */
#define CTL_MAX_DEPTH 7
struct ctl_node_s {
typedef struct ctl_node_s {
bool named;
};
} ctl_node_t;
struct ctl_named_node_s {
struct ctl_node_s node;
typedef struct ctl_named_node_s {
ctl_node_t node;
const char *name;
/* If (nchildren == 0), this is a terminal node. */
unsigned nchildren;
size_t nchildren;
const ctl_node_t *children;
int (*ctl)(const size_t *, size_t, void *, size_t *,
void *, size_t);
};
int (*ctl)(tsd_t *, const size_t *, size_t, void *, size_t *, void *,
size_t);
} ctl_named_node_t;
struct ctl_indexed_node_s {
typedef struct ctl_indexed_node_s {
struct ctl_node_s node;
const ctl_named_node_t *(*index)(const size_t *, size_t, size_t);
};
const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t,
size_t);
} ctl_indexed_node_t;
struct ctl_arena_stats_s {
bool initialized;
unsigned nthreads;
const char *dss;
size_t pactive;
size_t pdirty;
typedef struct ctl_arena_stats_s {
arena_stats_t astats;
/* Aggregate stats for small size classes, based on bin stats. */
@@ -45,41 +41,64 @@ struct ctl_arena_stats_s {
uint64_t nrequests_small;
malloc_bin_stats_t bstats[NBINS];
malloc_large_stats_t *lstats; /* nlclasses elements. */
};
malloc_large_stats_t lstats[NSIZES - NBINS];
} ctl_arena_stats_t;
struct ctl_stats_s {
typedef struct ctl_stats_s {
size_t allocated;
size_t active;
size_t metadata;
size_t resident;
size_t mapped;
struct {
size_t current; /* stats_chunks.curchunks */
uint64_t total; /* stats_chunks.nchunks */
size_t high; /* stats_chunks.highchunks */
} chunks;
struct {
size_t allocated; /* huge_allocated */
uint64_t nmalloc; /* huge_nmalloc */
uint64_t ndalloc; /* huge_ndalloc */
} huge;
unsigned narenas;
ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
size_t retained;
background_thread_stats_t background_thread;
mutex_prof_data_t mutex_prof_data[mutex_prof_num_global_mutexes];
} ctl_stats_t;
typedef struct ctl_arena_s ctl_arena_t;
struct ctl_arena_s {
unsigned arena_ind;
bool initialized;
ql_elm(ctl_arena_t) destroyed_link;
/* Basic stats, supported even if !config_stats. */
unsigned nthreads;
const char *dss;
ssize_t dirty_decay_ms;
ssize_t muzzy_decay_ms;
size_t pactive;
size_t pdirty;
size_t pmuzzy;
/* NULL if !config_stats. */
ctl_arena_stats_t *astats;
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
typedef struct ctl_arenas_s {
uint64_t epoch;
unsigned narenas;
ql_head(ctl_arena_t) destroyed;
int ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp,
size_t newlen);
int ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp);
/*
* Element 0 corresponds to merged stats for extant arenas (accessed via
* MALLCTL_ARENAS_ALL), element 1 corresponds to merged stats for
* destroyed arenas (accessed via MALLCTL_ARENAS_DESTROYED), and the
* remaining MALLOCX_ARENA_LIMIT elements correspond to arenas.
*/
ctl_arena_t *arenas[2 + MALLOCX_ARENA_LIMIT];
} ctl_arenas_t;
int ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
void *newp, size_t newlen);
int ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp);
int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen);
bool ctl_boot(void);
void ctl_prefork(void);
void ctl_postfork_parent(void);
void ctl_postfork_child(void);
void ctl_prefork(tsdn_t *tsdn);
void ctl_postfork_parent(tsdn_t *tsdn);
void ctl_postfork_child(tsdn_t *tsdn);
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
@@ -108,10 +127,4 @@ void ctl_postfork_child(void);
} \
} while (0)
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
#endif /* JEMALLOC_INTERNAL_CTL_H */

View File

@@ -0,0 +1,26 @@
#ifndef JEMALLOC_INTERNAL_EXTENT_DSS_H
#define JEMALLOC_INTERNAL_EXTENT_DSS_H
typedef enum {
dss_prec_disabled = 0,
dss_prec_primary = 1,
dss_prec_secondary = 2,
dss_prec_limit = 3
} dss_prec_t;
#define DSS_PREC_DEFAULT dss_prec_secondary
#define DSS_DEFAULT "secondary"
extern const char *dss_prec_names[];
extern const char *opt_dss;
dss_prec_t extent_dss_prec_get(void);
bool extent_dss_prec_set(dss_prec_t dss_prec);
void *extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr,
size_t size, size_t alignment, bool *zero, bool *commit);
bool extent_in_dss(void *addr);
bool extent_dss_mergeable(void *addr_a, void *addr_b);
void extent_dss_boot(void);
#endif /* JEMALLOC_INTERNAL_EXTENT_DSS_H */

View File

@@ -0,0 +1,72 @@
#ifndef JEMALLOC_INTERNAL_EXTENT_EXTERNS_H
#define JEMALLOC_INTERNAL_EXTENT_EXTERNS_H
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex_pool.h"
#include "jemalloc/internal/ph.h"
#include "jemalloc/internal/rb.h"
#include "jemalloc/internal/rtree.h"
extern rtree_t extents_rtree;
extern const extent_hooks_t extent_hooks_default;
extern mutex_pool_t extent_mutex_pool;
extent_t *extent_alloc(tsdn_t *tsdn, arena_t *arena);
void extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
extent_hooks_t *extent_hooks_get(arena_t *arena);
extent_hooks_t *extent_hooks_set(tsd_t *tsd, arena_t *arena,
extent_hooks_t *extent_hooks);
#ifdef JEMALLOC_JET
size_t extent_size_quantize_floor(size_t size);
size_t extent_size_quantize_ceil(size_t size);
#endif
rb_proto(, extent_avail_, extent_tree_t, extent_t)
ph_proto(, extent_heap_, extent_heap_t, extent_t)
bool extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state,
bool delay_coalesce);
extent_state_t extents_state_get(const extents_t *extents);
size_t extents_npages_get(extents_t *extents);
extent_t *extents_alloc(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr,
size_t size, size_t pad, size_t alignment, bool slab, szind_t szind,
bool *zero, bool *commit);
void extents_dalloc(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent);
extent_t *extents_evict(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_min);
void extents_prefork(tsdn_t *tsdn, extents_t *extents);
void extents_postfork_parent(tsdn_t *tsdn, extents_t *extents);
void extents_postfork_child(tsdn_t *tsdn, extents_t *extents);
extent_t *extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit);
void extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
void extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent);
void extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent);
bool extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
size_t length);
bool extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
size_t length);
bool extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
size_t length);
bool extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
size_t length);
extent_t *extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b);
bool extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b);
bool extent_boot(void);
#endif /* JEMALLOC_INTERNAL_EXTENT_EXTERNS_H */

View File

@@ -0,0 +1,407 @@
#ifndef JEMALLOC_INTERNAL_EXTENT_INLINES_H
#define JEMALLOC_INTERNAL_EXTENT_INLINES_H
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex_pool.h"
#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/sz.h"
static inline void
extent_lock(tsdn_t *tsdn, extent_t *extent) {
assert(extent != NULL);
mutex_pool_lock(tsdn, &extent_mutex_pool, (uintptr_t)extent);
}
static inline void
extent_unlock(tsdn_t *tsdn, extent_t *extent) {
assert(extent != NULL);
mutex_pool_unlock(tsdn, &extent_mutex_pool, (uintptr_t)extent);
}
static inline void
extent_lock2(tsdn_t *tsdn, extent_t *extent1, extent_t *extent2) {
assert(extent1 != NULL && extent2 != NULL);
mutex_pool_lock2(tsdn, &extent_mutex_pool, (uintptr_t)extent1,
(uintptr_t)extent2);
}
static inline void
extent_unlock2(tsdn_t *tsdn, extent_t *extent1, extent_t *extent2) {
assert(extent1 != NULL && extent2 != NULL);
mutex_pool_unlock2(tsdn, &extent_mutex_pool, (uintptr_t)extent1,
(uintptr_t)extent2);
}
static inline arena_t *
extent_arena_get(const extent_t *extent) {
unsigned arena_ind = (unsigned)((extent->e_bits &
EXTENT_BITS_ARENA_MASK) >> EXTENT_BITS_ARENA_SHIFT);
/*
* The following check is omitted because we should never actually read
* a NULL arena pointer.
*/
if (false && arena_ind >= MALLOCX_ARENA_LIMIT) {
return NULL;
}
assert(arena_ind < MALLOCX_ARENA_LIMIT);
return (arena_t *)atomic_load_p(&arenas[arena_ind], ATOMIC_ACQUIRE);
}
static inline szind_t
extent_szind_get_maybe_invalid(const extent_t *extent) {
szind_t szind = (szind_t)((extent->e_bits & EXTENT_BITS_SZIND_MASK) >>
EXTENT_BITS_SZIND_SHIFT);
assert(szind <= NSIZES);
return szind;
}
static inline szind_t
extent_szind_get(const extent_t *extent) {
szind_t szind = extent_szind_get_maybe_invalid(extent);
assert(szind < NSIZES); /* Never call when "invalid". */
return szind;
}
static inline size_t
extent_usize_get(const extent_t *extent) {
return sz_index2size(extent_szind_get(extent));
}
static inline size_t
extent_sn_get(const extent_t *extent) {
return (size_t)((extent->e_bits & EXTENT_BITS_SN_MASK) >>
EXTENT_BITS_SN_SHIFT);
}
static inline extent_state_t
extent_state_get(const extent_t *extent) {
return (extent_state_t)((extent->e_bits & EXTENT_BITS_STATE_MASK) >>
EXTENT_BITS_STATE_SHIFT);
}
static inline bool
extent_zeroed_get(const extent_t *extent) {
return (bool)((extent->e_bits & EXTENT_BITS_ZEROED_MASK) >>
EXTENT_BITS_ZEROED_SHIFT);
}
static inline bool
extent_committed_get(const extent_t *extent) {
return (bool)((extent->e_bits & EXTENT_BITS_COMMITTED_MASK) >>
EXTENT_BITS_COMMITTED_SHIFT);
}
static inline bool
extent_slab_get(const extent_t *extent) {
return (bool)((extent->e_bits & EXTENT_BITS_SLAB_MASK) >>
EXTENT_BITS_SLAB_SHIFT);
}
static inline unsigned
extent_nfree_get(const extent_t *extent) {
assert(extent_slab_get(extent));
return (unsigned)((extent->e_bits & EXTENT_BITS_NFREE_MASK) >>
EXTENT_BITS_NFREE_SHIFT);
}
static inline void *
extent_base_get(const extent_t *extent) {
assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
!extent_slab_get(extent));
return PAGE_ADDR2BASE(extent->e_addr);
}
static inline void *
extent_addr_get(const extent_t *extent) {
assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
!extent_slab_get(extent));
return extent->e_addr;
}
static inline size_t
extent_size_get(const extent_t *extent) {
return (extent->e_size_esn & EXTENT_SIZE_MASK);
}
static inline size_t
extent_esn_get(const extent_t *extent) {
return (extent->e_size_esn & EXTENT_ESN_MASK);
}
static inline size_t
extent_bsize_get(const extent_t *extent) {
return extent->e_bsize;
}
static inline void *
extent_before_get(const extent_t *extent) {
return (void *)((uintptr_t)extent_base_get(extent) - PAGE);
}
static inline void *
extent_last_get(const extent_t *extent) {
return (void *)((uintptr_t)extent_base_get(extent) +
extent_size_get(extent) - PAGE);
}
static inline void *
extent_past_get(const extent_t *extent) {
return (void *)((uintptr_t)extent_base_get(extent) +
extent_size_get(extent));
}
static inline arena_slab_data_t *
extent_slab_data_get(extent_t *extent) {
assert(extent_slab_get(extent));
return &extent->e_slab_data;
}
static inline const arena_slab_data_t *
extent_slab_data_get_const(const extent_t *extent) {
assert(extent_slab_get(extent));
return &extent->e_slab_data;
}
static inline prof_tctx_t *
extent_prof_tctx_get(const extent_t *extent) {
return (prof_tctx_t *)atomic_load_p(&extent->e_prof_tctx,
ATOMIC_ACQUIRE);
}
static inline void
extent_arena_set(extent_t *extent, arena_t *arena) {
unsigned arena_ind = (arena != NULL) ? arena_ind_get(arena) : ((1U <<
MALLOCX_ARENA_BITS) - 1);
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ARENA_MASK) |
((uint64_t)arena_ind << EXTENT_BITS_ARENA_SHIFT);
}
static inline void
extent_addr_set(extent_t *extent, void *addr) {
extent->e_addr = addr;
}
static inline void
extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment) {
assert(extent_base_get(extent) == extent_addr_get(extent));
if (alignment < PAGE) {
unsigned lg_range = LG_PAGE -
lg_floor(CACHELINE_CEILING(alignment));
size_t r =
prng_lg_range_zu(&extent_arena_get(extent)->offset_state,
lg_range, true);
uintptr_t random_offset = ((uintptr_t)r) << (LG_PAGE -
lg_range);
extent->e_addr = (void *)((uintptr_t)extent->e_addr +
random_offset);
assert(ALIGNMENT_ADDR2BASE(extent->e_addr, alignment) ==
extent->e_addr);
}
}
static inline void
extent_size_set(extent_t *extent, size_t size) {
assert((size & ~EXTENT_SIZE_MASK) == 0);
extent->e_size_esn = size | (extent->e_size_esn & ~EXTENT_SIZE_MASK);
}
static inline void
extent_esn_set(extent_t *extent, size_t esn) {
extent->e_size_esn = (extent->e_size_esn & ~EXTENT_ESN_MASK) | (esn &
EXTENT_ESN_MASK);
}
static inline void
extent_bsize_set(extent_t *extent, size_t bsize) {
extent->e_bsize = bsize;
}
static inline void
extent_szind_set(extent_t *extent, szind_t szind) {
assert(szind <= NSIZES); /* NSIZES means "invalid". */
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SZIND_MASK) |
((uint64_t)szind << EXTENT_BITS_SZIND_SHIFT);
}
static inline void
extent_nfree_set(extent_t *extent, unsigned nfree) {
assert(extent_slab_get(extent));
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_NFREE_MASK) |
((uint64_t)nfree << EXTENT_BITS_NFREE_SHIFT);
}
static inline void
extent_nfree_inc(extent_t *extent) {
assert(extent_slab_get(extent));
extent->e_bits += ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT);
}
static inline void
extent_nfree_dec(extent_t *extent) {
assert(extent_slab_get(extent));
extent->e_bits -= ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT);
}
static inline void
extent_sn_set(extent_t *extent, size_t sn) {
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SN_MASK) |
((uint64_t)sn << EXTENT_BITS_SN_SHIFT);
}
static inline void
extent_state_set(extent_t *extent, extent_state_t state) {
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_STATE_MASK) |
((uint64_t)state << EXTENT_BITS_STATE_SHIFT);
}
static inline void
extent_zeroed_set(extent_t *extent, bool zeroed) {
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ZEROED_MASK) |
((uint64_t)zeroed << EXTENT_BITS_ZEROED_SHIFT);
}
static inline void
extent_committed_set(extent_t *extent, bool committed) {
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_COMMITTED_MASK) |
((uint64_t)committed << EXTENT_BITS_COMMITTED_SHIFT);
}
static inline void
extent_slab_set(extent_t *extent, bool slab) {
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SLAB_MASK) |
((uint64_t)slab << EXTENT_BITS_SLAB_SHIFT);
}
static inline void
extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx) {
atomic_store_p(&extent->e_prof_tctx, tctx, ATOMIC_RELEASE);
}
static inline void
extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
bool slab, szind_t szind, size_t sn, extent_state_t state, bool zeroed,
bool committed) {
assert(addr == PAGE_ADDR2BASE(addr) || !slab);
extent_arena_set(extent, arena);
extent_addr_set(extent, addr);
extent_size_set(extent, size);
extent_slab_set(extent, slab);
extent_szind_set(extent, szind);
extent_sn_set(extent, sn);
extent_state_set(extent, state);
extent_zeroed_set(extent, zeroed);
extent_committed_set(extent, committed);
ql_elm_new(extent, ql_link);
if (config_prof) {
extent_prof_tctx_set(extent, NULL);
}
}
static inline void
extent_binit(extent_t *extent, void *addr, size_t bsize, size_t sn) {
extent_arena_set(extent, NULL);
extent_addr_set(extent, addr);
extent_bsize_set(extent, bsize);
extent_slab_set(extent, false);
extent_szind_set(extent, NSIZES);
extent_sn_set(extent, sn);
extent_state_set(extent, extent_state_active);
extent_zeroed_set(extent, true);
extent_committed_set(extent, true);
}
static inline void
extent_list_init(extent_list_t *list) {
ql_new(list);
}
static inline extent_t *
extent_list_first(const extent_list_t *list) {
return ql_first(list);
}
static inline extent_t *
extent_list_last(const extent_list_t *list) {
return ql_last(list, ql_link);
}
static inline void
extent_list_append(extent_list_t *list, extent_t *extent) {
ql_tail_insert(list, extent, ql_link);
}
static inline void
extent_list_replace(extent_list_t *list, extent_t *to_remove,
extent_t *to_insert) {
ql_after_insert(to_remove, to_insert, ql_link);
ql_remove(list, to_remove, ql_link);
}
static inline void
extent_list_remove(extent_list_t *list, extent_t *extent) {
ql_remove(list, extent, ql_link);
}
static inline int
extent_sn_comp(const extent_t *a, const extent_t *b) {
size_t a_sn = extent_sn_get(a);
size_t b_sn = extent_sn_get(b);
return (a_sn > b_sn) - (a_sn < b_sn);
}
static inline int
extent_esn_comp(const extent_t *a, const extent_t *b) {
size_t a_esn = extent_esn_get(a);
size_t b_esn = extent_esn_get(b);
return (a_esn > b_esn) - (a_esn < b_esn);
}
static inline int
extent_ad_comp(const extent_t *a, const extent_t *b) {
uintptr_t a_addr = (uintptr_t)extent_addr_get(a);
uintptr_t b_addr = (uintptr_t)extent_addr_get(b);
return (a_addr > b_addr) - (a_addr < b_addr);
}
static inline int
extent_ead_comp(const extent_t *a, const extent_t *b) {
uintptr_t a_eaddr = (uintptr_t)a;
uintptr_t b_eaddr = (uintptr_t)b;
return (a_eaddr > b_eaddr) - (a_eaddr < b_eaddr);
}
static inline int
extent_snad_comp(const extent_t *a, const extent_t *b) {
int ret;
ret = extent_sn_comp(a, b);
if (ret != 0) {
return ret;
}
ret = extent_ad_comp(a, b);
return ret;
}
static inline int
extent_esnead_comp(const extent_t *a, const extent_t *b) {
int ret;
ret = extent_esn_comp(a, b);
if (ret != 0) {
return ret;
}
ret = extent_ead_comp(a, b);
return ret;
}
#endif /* JEMALLOC_INTERNAL_EXTENT_INLINES_H */

View File

@@ -0,0 +1,10 @@
#ifndef JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H
#define JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H
extern bool opt_retain;
void *extent_alloc_mmap(void *new_addr, size_t size, size_t alignment,
bool *zero, bool *commit);
bool extent_dalloc_mmap(void *addr, size_t size);
#endif /* JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H */

View File

@@ -0,0 +1,199 @@
#ifndef JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
#define JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/rb.h"
#include "jemalloc/internal/ph.h"
#include "jemalloc/internal/size_classes.h"
typedef enum {
extent_state_active = 0,
extent_state_dirty = 1,
extent_state_muzzy = 2,
extent_state_retained = 3
} extent_state_t;
/* Extent (span of pages). Use accessor functions for e_* fields. */
struct extent_s {
/*
* Bitfield containing several fields:
*
* a: arena_ind
* b: slab
* c: committed
* z: zeroed
* t: state
* i: szind
* f: nfree
* n: sn
*
* nnnnnnnn ... nnnnnfff fffffffi iiiiiiit tzcbaaaa aaaaaaaa
*
* arena_ind: Arena from which this extent came, or all 1 bits if
* unassociated.
*
* slab: The slab flag indicates whether the extent is used for a slab
* of small regions. This helps differentiate small size classes,
* and it indicates whether interior pointers can be looked up via
* iealloc().
*
* committed: The committed flag indicates whether physical memory is
* committed to the extent, whether explicitly or implicitly
* as on a system that overcommits and satisfies physical
* memory needs on demand via soft page faults.
*
* zeroed: The zeroed flag is used by extent recycling code to track
* whether memory is zero-filled.
*
* state: The state flag is an extent_state_t.
*
* szind: The szind flag indicates usable size class index for
* allocations residing in this extent, regardless of whether the
* extent is a slab. Extent size and usable size often differ
* even for non-slabs, either due to sz_large_pad or promotion of
* sampled small regions.
*
* nfree: Number of free regions in slab.
*
* sn: Serial number (potentially non-unique).
*
* Serial numbers may wrap around if !opt_retain, but as long as
* comparison functions fall back on address comparison for equal
* serial numbers, stable (if imperfect) ordering is maintained.
*
* Serial numbers may not be unique even in the absence of
* wrap-around, e.g. when splitting an extent and assigning the same
* serial number to both resulting adjacent extents.
*/
uint64_t e_bits;
#define EXTENT_BITS_ARENA_SHIFT 0
#define EXTENT_BITS_ARENA_MASK \
(((uint64_t)(1U << MALLOCX_ARENA_BITS) - 1) << EXTENT_BITS_ARENA_SHIFT)
#define EXTENT_BITS_SLAB_SHIFT MALLOCX_ARENA_BITS
#define EXTENT_BITS_SLAB_MASK \
((uint64_t)0x1U << EXTENT_BITS_SLAB_SHIFT)
#define EXTENT_BITS_COMMITTED_SHIFT (MALLOCX_ARENA_BITS + 1)
#define EXTENT_BITS_COMMITTED_MASK \
((uint64_t)0x1U << EXTENT_BITS_COMMITTED_SHIFT)
#define EXTENT_BITS_ZEROED_SHIFT (MALLOCX_ARENA_BITS + 2)
#define EXTENT_BITS_ZEROED_MASK \
((uint64_t)0x1U << EXTENT_BITS_ZEROED_SHIFT)
#define EXTENT_BITS_STATE_SHIFT (MALLOCX_ARENA_BITS + 3)
#define EXTENT_BITS_STATE_MASK \
((uint64_t)0x3U << EXTENT_BITS_STATE_SHIFT)
#define EXTENT_BITS_SZIND_SHIFT (MALLOCX_ARENA_BITS + 5)
#define EXTENT_BITS_SZIND_MASK \
(((uint64_t)(1U << LG_CEIL_NSIZES) - 1) << EXTENT_BITS_SZIND_SHIFT)
#define EXTENT_BITS_NFREE_SHIFT \
(MALLOCX_ARENA_BITS + 5 + LG_CEIL_NSIZES)
#define EXTENT_BITS_NFREE_MASK \
((uint64_t)((1U << (LG_SLAB_MAXREGS + 1)) - 1) << EXTENT_BITS_NFREE_SHIFT)
#define EXTENT_BITS_SN_SHIFT \
(MALLOCX_ARENA_BITS + 5 + LG_CEIL_NSIZES + (LG_SLAB_MAXREGS + 1))
#define EXTENT_BITS_SN_MASK (UINT64_MAX << EXTENT_BITS_SN_SHIFT)
/* Pointer to the extent that this structure is responsible for. */
void *e_addr;
union {
/*
* Extent size and serial number associated with the extent
* structure (different than the serial number for the extent at
* e_addr).
*
* ssssssss [...] ssssssss ssssnnnn nnnnnnnn
*/
size_t e_size_esn;
#define EXTENT_SIZE_MASK ((size_t)~(PAGE-1))
#define EXTENT_ESN_MASK ((size_t)PAGE-1)
/* Base extent size, which may not be a multiple of PAGE. */
size_t e_bsize;
};
union {
/*
* List linkage, used by a variety of lists:
* - arena_bin_t's slabs_full
* - extents_t's LRU
* - stashed dirty extents
* - arena's large allocations
*/
ql_elm(extent_t) ql_link;
/* Red-black tree linkage, used by arena's extent_avail. */
rb_node(extent_t) rb_link;
};
/* Linkage for per size class sn/address-ordered heaps. */
phn(extent_t) ph_link;
union {
/* Small region slab metadata. */
arena_slab_data_t e_slab_data;
/*
* Profile counters, used for large objects. Points to a
* prof_tctx_t.
*/
atomic_p_t e_prof_tctx;
};
};
typedef ql_head(extent_t) extent_list_t;
typedef rb_tree(extent_t) extent_tree_t;
typedef ph(extent_t) extent_heap_t;
/* Quantized collection of extents, with built-in LRU queue. */
struct extents_s {
malloc_mutex_t mtx;
/*
* Quantized per size class heaps of extents.
*
* Synchronization: mtx.
*/
extent_heap_t heaps[NPSIZES+1];
/*
* Bitmap for which set bits correspond to non-empty heaps.
*
* Synchronization: mtx.
*/
bitmap_t bitmap[BITMAP_GROUPS(NPSIZES+1)];
/*
* LRU of all extents in heaps.
*
* Synchronization: mtx.
*/
extent_list_t lru;
/*
* Page sum for all extents in heaps.
*
* The synchronization here is a little tricky. Modifications to npages
* must hold mtx, but reads need not (though, a reader who sees npages
* without holding the mutex can't assume anything about the rest of the
* state of the extents_t).
*/
atomic_zu_t npages;
/* All stored extents must be in the same state. */
extent_state_t state;
/*
* If true, delay coalescing until eviction; otherwise coalesce during
* deallocation.
*/
bool delay_coalesce;
};
#endif /* JEMALLOC_INTERNAL_EXTENT_STRUCTS_H */

View File

@@ -0,0 +1,9 @@
#ifndef JEMALLOC_INTERNAL_EXTENT_TYPES_H
#define JEMALLOC_INTERNAL_EXTENT_TYPES_H
typedef struct extent_s extent_t;
typedef struct extents_s extents_t;
#define EXTENT_HOOKS_INITIALIZER NULL
#endif /* JEMALLOC_INTERNAL_EXTENT_TYPES_H */

View File

@@ -1,92 +1,76 @@
#ifndef JEMALLOC_INTERNAL_HASH_H
#define JEMALLOC_INTERNAL_HASH_H
#include "jemalloc/internal/assert.h"
/*
* The following hash function is based on MurmurHash3, placed into the public
* domain by Austin Appleby. See http://code.google.com/p/smhasher/ for
* domain by Austin Appleby. See https://github.com/aappleby/smhasher for
* details.
*/
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
uint32_t hash_x86_32(const void *key, int len, uint32_t seed);
void hash_x86_128(const void *key, const int len, uint32_t seed,
uint64_t r_out[2]);
void hash_x64_128(const void *key, const int len, const uint32_t seed,
uint64_t r_out[2]);
void hash(const void *key, size_t len, const uint32_t seed,
size_t r_hash[2]);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_))
/******************************************************************************/
/* Internal implementation. */
JEMALLOC_INLINE uint32_t
hash_rotl_32(uint32_t x, int8_t r)
{
return (x << r) | (x >> (32 - r));
static inline uint32_t
hash_rotl_32(uint32_t x, int8_t r) {
return ((x << r) | (x >> (32 - r)));
}
JEMALLOC_INLINE uint64_t
hash_rotl_64(uint64_t x, int8_t r)
{
return (x << r) | (x >> (64 - r));
static inline uint64_t
hash_rotl_64(uint64_t x, int8_t r) {
return ((x << r) | (x >> (64 - r)));
}
JEMALLOC_INLINE uint32_t
hash_get_block_32(const uint32_t *p, int i)
{
static inline uint32_t
hash_get_block_32(const uint32_t *p, int i) {
/* Handle unaligned read. */
if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) {
uint32_t ret;
return (p[i]);
memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t));
return ret;
}
return p[i];
}
JEMALLOC_INLINE uint64_t
hash_get_block_64(const uint64_t *p, int i)
{
static inline uint64_t
hash_get_block_64(const uint64_t *p, int i) {
/* Handle unaligned read. */
if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) {
uint64_t ret;
return (p[i]);
memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t));
return ret;
}
return p[i];
}
JEMALLOC_INLINE uint32_t
hash_fmix_32(uint32_t h)
{
static inline uint32_t
hash_fmix_32(uint32_t h) {
h ^= h >> 16;
h *= 0x85ebca6b;
h ^= h >> 13;
h *= 0xc2b2ae35;
h ^= h >> 16;
return (h);
return h;
}
JEMALLOC_INLINE uint64_t
hash_fmix_64(uint64_t k)
{
static inline uint64_t
hash_fmix_64(uint64_t k) {
k ^= k >> 33;
k *= QU(0xff51afd7ed558ccdLLU);
k *= KQU(0xff51afd7ed558ccd);
k ^= k >> 33;
k *= QU(0xc4ceb9fe1a85ec53LLU);
k *= KQU(0xc4ceb9fe1a85ec53);
k ^= k >> 33;
return (k);
return k;
}
JEMALLOC_INLINE uint32_t
hash_x86_32(const void *key, int len, uint32_t seed)
{
static inline uint32_t
hash_x86_32(const void *key, int len, uint32_t seed) {
const uint8_t *data = (const uint8_t *) key;
const int nblocks = len / 4;
@@ -132,13 +116,12 @@ hash_x86_32(const void *key, int len, uint32_t seed)
h1 = hash_fmix_32(h1);
return (h1);
return h1;
}
UNUSED JEMALLOC_INLINE void
UNUSED static inline void
hash_x86_128(const void *key, const int len, uint32_t seed,
uint64_t r_out[2])
{
uint64_t r_out[2]) {
const uint8_t * data = (const uint8_t *) key;
const int nblocks = len / 16;
@@ -237,18 +220,17 @@ hash_x86_128(const void *key, const int len, uint32_t seed,
r_out[1] = (((uint64_t) h4) << 32) | h3;
}
UNUSED JEMALLOC_INLINE void
UNUSED static inline void
hash_x64_128(const void *key, const int len, const uint32_t seed,
uint64_t r_out[2])
{
uint64_t r_out[2]) {
const uint8_t *data = (const uint8_t *) key;
const int nblocks = len / 16;
uint64_t h1 = seed;
uint64_t h2 = seed;
const uint64_t c1 = QU(0x87c37b91114253d5LLU);
const uint64_t c2 = QU(0x4cf5ad432745937fLLU);
const uint64_t c1 = KQU(0x87c37b91114253d5);
const uint64_t c2 = KQU(0x4cf5ad432745937f);
/* body */
{
@@ -317,19 +299,20 @@ hash_x64_128(const void *key, const int len, const uint32_t seed,
/******************************************************************************/
/* API. */
JEMALLOC_INLINE void
hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2])
{
static inline void
hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]) {
assert(len <= INT_MAX); /* Unfortunate implementation limitation. */
#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN))
hash_x64_128(key, len, seed, (uint64_t *)r_hash);
hash_x64_128(key, (int)len, seed, (uint64_t *)r_hash);
#else
{
uint64_t hashes[2];
hash_x86_128(key, len, seed, hashes);
hash_x86_128(key, (int)len, seed, hashes);
r_hash[0] = (size_t)hashes[0];
r_hash[1] = (size_t)hashes[1];
}
#endif
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
#endif /* JEMALLOC_INTERNAL_HASH_H */

View File

@@ -0,0 +1,19 @@
#ifndef JEMALLOC_INTERNAL_HOOKS_H
#define JEMALLOC_INTERNAL_HOOKS_H
extern JEMALLOC_EXPORT void (*hooks_arena_new_hook)();
extern JEMALLOC_EXPORT void (*hooks_libc_hook)();
#define JEMALLOC_HOOK(fn, hook) ((void)(hook != NULL && (hook(), 0)), fn)
#define open JEMALLOC_HOOK(open, hooks_libc_hook)
#define read JEMALLOC_HOOK(read, hooks_libc_hook)
#define write JEMALLOC_HOOK(write, hooks_libc_hook)
#define readlink JEMALLOC_HOOK(readlink, hooks_libc_hook)
#define close JEMALLOC_HOOK(close, hooks_libc_hook)
#define creat JEMALLOC_HOOK(creat, hooks_libc_hook)
#define secure_getenv JEMALLOC_HOOK(secure_getenv, hooks_libc_hook)
/* Note that this is undef'd and re-define'd in src/prof.c. */
#define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, hooks_libc_hook)
#endif /* JEMALLOC_INTERNAL_HOOKS_H */

View File

@@ -0,0 +1,82 @@
#ifndef JEMALLOC_INTERNAL_DECLS_H
#define JEMALLOC_INTERNAL_DECLS_H
#include <math.h>
#ifdef _WIN32
# include <windows.h>
# include "msvc_compat/windows_extra.h"
#else
# include <sys/param.h>
# include <sys/mman.h>
# if !defined(__pnacl__) && !defined(__native_client__)
# include <sys/syscall.h>
# if !defined(SYS_write) && defined(__NR_write)
# define SYS_write __NR_write
# endif
# if defined(SYS_open) && defined(__aarch64__)
/* Android headers may define SYS_open to __NR_open even though
* __NR_open may not exist on AArch64 (superseded by __NR_openat). */
# undef SYS_open
# endif
# include <sys/uio.h>
# endif
# include <pthread.h>
# include <signal.h>
# ifdef JEMALLOC_OS_UNFAIR_LOCK
# include <os/lock.h>
# endif
# ifdef JEMALLOC_GLIBC_MALLOC_HOOK
# include <sched.h>
# endif
# include <errno.h>
# include <sys/time.h>
# include <time.h>
# ifdef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
# include <mach/mach_time.h>
# endif
#endif
#include <sys/types.h>
#include <limits.h>
#ifndef SIZE_T_MAX
# define SIZE_T_MAX SIZE_MAX
#endif
#ifndef SSIZE_MAX
# define SSIZE_MAX ((ssize_t)(SIZE_T_MAX >> 1))
#endif
#include <stdarg.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <stddef.h>
#ifndef offsetof
# define offsetof(type, member) ((size_t)&(((type *)NULL)->member))
#endif
#include <string.h>
#include <strings.h>
#include <ctype.h>
#ifdef _MSC_VER
# include <io.h>
typedef intptr_t ssize_t;
# define PATH_MAX 1024
# define STDERR_FILENO 2
# define __func__ __FUNCTION__
# ifdef JEMALLOC_HAS_RESTRICT
# define restrict __restrict
# endif
/* Disable warnings about deprecated system functions. */
# pragma warning(disable: 4996)
#if _MSC_VER < 1800
static int
isblank(int c) {
return (c == '\t' || c == ' ');
}
#endif
#else
# include <unistd.h>
#endif
#include <fcntl.h>
#endif /* JEMALLOC_INTERNAL_H */

View File

@@ -0,0 +1,53 @@
#ifndef JEMALLOC_INTERNAL_EXTERNS_H
#define JEMALLOC_INTERNAL_EXTERNS_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/tsd_types.h"
/* TSD checks this to set thread local slow state accordingly. */
extern bool malloc_slow;
/* Run-time options. */
extern bool opt_abort;
extern bool opt_abort_conf;
extern const char *opt_junk;
extern bool opt_junk_alloc;
extern bool opt_junk_free;
extern bool opt_utrace;
extern bool opt_xmalloc;
extern bool opt_zero;
extern unsigned opt_narenas;
/* Number of CPUs. */
extern unsigned ncpus;
/* Number of arenas used for automatic multiplexing of threads and arenas. */
extern unsigned narenas_auto;
/*
* Arenas that are used to service external requests. Not all elements of the
* arenas array are necessarily used; arenas are created lazily as needed.
*/
extern atomic_p_t arenas[];
void *a0malloc(size_t size);
void a0dalloc(void *ptr);
void *bootstrap_malloc(size_t size);
void *bootstrap_calloc(size_t num, size_t size);
void bootstrap_free(void *ptr);
void arena_set(unsigned ind, arena_t *arena);
unsigned narenas_total_get(void);
arena_t *arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind);
arena_t *arena_choose_hard(tsd_t *tsd, bool internal);
void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind);
void iarena_cleanup(tsd_t *tsd);
void arena_cleanup(tsd_t *tsd);
void arenas_tdata_cleanup(tsd_t *tsd);
void jemalloc_prefork(void);
void jemalloc_postfork_parent(void);
void jemalloc_postfork_child(void);
bool malloc_initialized(void);
#endif /* JEMALLOC_INTERNAL_EXTERNS_H */

View File

@@ -0,0 +1,94 @@
#ifndef JEMALLOC_INTERNAL_INCLUDES_H
#define JEMALLOC_INTERNAL_INCLUDES_H
/*
* jemalloc can conceptually be broken into components (arena, tcache, etc.),
* but there are circular dependencies that cannot be broken without
* substantial performance degradation.
*
* Historically, we dealt with this by each header into four sections (types,
* structs, externs, and inlines), and included each header file multiple times
* in this file, picking out the portion we want on each pass using the
* following #defines:
* JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data
* types.
* JEMALLOC_H_STRUCTS : Data structures.
* JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
* JEMALLOC_H_INLINES : Inline functions.
*
* We're moving toward a world in which the dependencies are explicit; each file
* will #include the headers it depends on (rather than relying on them being
* implicitly available via this file including every header file in the
* project).
*
* We're now in an intermediate state: we've broken up the header files to avoid
* having to include each one multiple times, but have not yet moved the
* dependency information into the header files (i.e. we still rely on the
* ordering in this file to ensure all a header's dependencies are available in
* its translation unit). Each component is now broken up into multiple header
* files, corresponding to the sections above (e.g. instead of "foo.h", we now
* have "foo_types.h", "foo_structs.h", "foo_externs.h", "foo_inlines.h").
*
* Those files which have been converted to explicitly include their
* inter-component dependencies are now in the initial HERMETIC HEADERS
* section. All headers may still rely on jemalloc_preamble.h (which, by fiat,
* must be included first in every translation unit) for system headers and
* global jemalloc definitions, however.
*/
/******************************************************************************/
/* TYPES */
/******************************************************************************/
#include "jemalloc/internal/extent_types.h"
#include "jemalloc/internal/base_types.h"
#include "jemalloc/internal/arena_types.h"
#include "jemalloc/internal/tcache_types.h"
#include "jemalloc/internal/prof_types.h"
/******************************************************************************/
/* STRUCTS */
/******************************************************************************/
#include "jemalloc/internal/arena_structs_a.h"
#include "jemalloc/internal/extent_structs.h"
#include "jemalloc/internal/base_structs.h"
#include "jemalloc/internal/prof_structs.h"
#include "jemalloc/internal/arena_structs_b.h"
#include "jemalloc/internal/tcache_structs.h"
#include "jemalloc/internal/background_thread_structs.h"
/******************************************************************************/
/* EXTERNS */
/******************************************************************************/
#include "jemalloc/internal/jemalloc_internal_externs.h"
#include "jemalloc/internal/extent_externs.h"
#include "jemalloc/internal/base_externs.h"
#include "jemalloc/internal/arena_externs.h"
#include "jemalloc/internal/large_externs.h"
#include "jemalloc/internal/tcache_externs.h"
#include "jemalloc/internal/prof_externs.h"
#include "jemalloc/internal/background_thread_externs.h"
/******************************************************************************/
/* INLINES */
/******************************************************************************/
#include "jemalloc/internal/jemalloc_internal_inlines_a.h"
#include "jemalloc/internal/base_inlines.h"
/*
* Include portions of arena code interleaved with tcache code in order to
* resolve circular dependencies.
*/
#include "jemalloc/internal/prof_inlines_a.h"
#include "jemalloc/internal/arena_inlines_a.h"
#include "jemalloc/internal/extent_inlines.h"
#include "jemalloc/internal/jemalloc_internal_inlines_b.h"
#include "jemalloc/internal/tcache_inlines.h"
#include "jemalloc/internal/arena_inlines_b.h"
#include "jemalloc/internal/jemalloc_internal_inlines_c.h"
#include "jemalloc/internal/prof_inlines_b.h"
#include "jemalloc/internal/background_thread_inlines.h"
#endif /* JEMALLOC_INTERNAL_INCLUDES_H */

View File

@@ -0,0 +1,171 @@
#ifndef JEMALLOC_INTERNAL_INLINES_A_H
#define JEMALLOC_INTERNAL_INLINES_A_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/bit_util.h"
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/ticker.h"
JEMALLOC_ALWAYS_INLINE malloc_cpuid_t
malloc_getcpu(void) {
assert(have_percpu_arena);
#if defined(JEMALLOC_HAVE_SCHED_GETCPU)
return (malloc_cpuid_t)sched_getcpu();
#else
not_reached();
return -1;
#endif
}
/* Return the chosen arena index based on current cpu. */
JEMALLOC_ALWAYS_INLINE unsigned
percpu_arena_choose(void) {
assert(have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena));
malloc_cpuid_t cpuid = malloc_getcpu();
assert(cpuid >= 0);
unsigned arena_ind;
if ((opt_percpu_arena == percpu_arena) || ((unsigned)cpuid < ncpus /
2)) {
arena_ind = cpuid;
} else {
assert(opt_percpu_arena == per_phycpu_arena);
/* Hyper threads on the same physical CPU share arena. */
arena_ind = cpuid - ncpus / 2;
}
return arena_ind;
}
/* Return the limit of percpu auto arena range, i.e. arenas[0...ind_limit). */
JEMALLOC_ALWAYS_INLINE unsigned
percpu_arena_ind_limit(percpu_arena_mode_t mode) {
assert(have_percpu_arena && PERCPU_ARENA_ENABLED(mode));
if (mode == per_phycpu_arena && ncpus > 1) {
if (ncpus % 2) {
/* This likely means a misconfig. */
return ncpus / 2 + 1;
}
return ncpus / 2;
} else {
return ncpus;
}
}
static inline arena_tdata_t *
arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing) {
arena_tdata_t *tdata;
arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
if (unlikely(arenas_tdata == NULL)) {
/* arenas_tdata hasn't been initialized yet. */
return arena_tdata_get_hard(tsd, ind);
}
if (unlikely(ind >= tsd_narenas_tdata_get(tsd))) {
/*
* ind is invalid, cache is old (too small), or tdata to be
* initialized.
*/
return (refresh_if_missing ? arena_tdata_get_hard(tsd, ind) :
NULL);
}
tdata = &arenas_tdata[ind];
if (likely(tdata != NULL) || !refresh_if_missing) {
return tdata;
}
return arena_tdata_get_hard(tsd, ind);
}
static inline arena_t *
arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) {
arena_t *ret;
assert(ind < MALLOCX_ARENA_LIMIT);
ret = (arena_t *)atomic_load_p(&arenas[ind], ATOMIC_ACQUIRE);
if (unlikely(ret == NULL)) {
if (init_if_missing) {
ret = arena_init(tsdn, ind,
(extent_hooks_t *)&extent_hooks_default);
}
}
return ret;
}
static inline ticker_t *
decay_ticker_get(tsd_t *tsd, unsigned ind) {
arena_tdata_t *tdata;
tdata = arena_tdata_get(tsd, ind, true);
if (unlikely(tdata == NULL)) {
return NULL;
}
return &tdata->decay_ticker;
}
JEMALLOC_ALWAYS_INLINE tcache_bin_t *
tcache_small_bin_get(tcache_t *tcache, szind_t binind) {
assert(binind < NBINS);
return &tcache->tbins_small[binind];
}
JEMALLOC_ALWAYS_INLINE tcache_bin_t *
tcache_large_bin_get(tcache_t *tcache, szind_t binind) {
assert(binind >= NBINS &&binind < nhbins);
return &tcache->tbins_large[binind - NBINS];
}
JEMALLOC_ALWAYS_INLINE bool
tcache_available(tsd_t *tsd) {
/*
* Thread specific auto tcache might be unavailable if: 1) during tcache
* initialization, or 2) disabled through thread.tcache.enabled mallctl
* or config options. This check covers all cases.
*/
if (likely(tsd_tcache_enabled_get(tsd))) {
/* Associated arena == NULL implies tcache init in progress. */
assert(tsd_tcachep_get(tsd)->arena == NULL ||
tcache_small_bin_get(tsd_tcachep_get(tsd), 0)->avail !=
NULL);
return true;
}
return false;
}
JEMALLOC_ALWAYS_INLINE tcache_t *
tcache_get(tsd_t *tsd) {
if (!tcache_available(tsd)) {
return NULL;
}
return tsd_tcachep_get(tsd);
}
static inline void
pre_reentrancy(tsd_t *tsd, arena_t *arena) {
/* arena is the current context. Reentry from a0 is not allowed. */
assert(arena != arena_get(tsd_tsdn(tsd), 0, false));
bool fast = tsd_fast(tsd);
++*tsd_reentrancy_levelp_get(tsd);
if (fast) {
/* Prepare slow path for reentrancy. */
tsd_slow_update(tsd);
assert(tsd->state == tsd_state_nominal_slow);
}
}
static inline void
post_reentrancy(tsd_t *tsd) {
int8_t *reentrancy_level = tsd_reentrancy_levelp_get(tsd);
assert(*reentrancy_level > 0);
if (--*reentrancy_level == 0) {
tsd_slow_update(tsd);
}
}
#endif /* JEMALLOC_INTERNAL_INLINES_A_H */

View File

@@ -0,0 +1,86 @@
#ifndef JEMALLOC_INTERNAL_INLINES_B_H
#define JEMALLOC_INTERNAL_INLINES_B_H
#include "jemalloc/internal/rtree.h"
/* Choose an arena based on a per-thread value. */
static inline arena_t *
arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) {
arena_t *ret;
if (arena != NULL) {
return arena;
}
/* During reentrancy, arena 0 is the safest bet. */
if (unlikely(tsd_reentrancy_level_get(tsd) > 0)) {
return arena_get(tsd_tsdn(tsd), 0, true);
}
ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd);
if (unlikely(ret == NULL)) {
ret = arena_choose_hard(tsd, internal);
assert(ret);
if (tcache_available(tsd)) {
tcache_t *tcache = tcache_get(tsd);
if (tcache->arena != NULL) {
/* See comments in tcache_data_init().*/
assert(tcache->arena ==
arena_get(tsd_tsdn(tsd), 0, false));
if (tcache->arena != ret) {
tcache_arena_reassociate(tsd_tsdn(tsd),
tcache, ret);
}
} else {
tcache_arena_associate(tsd_tsdn(tsd), tcache,
ret);
}
}
}
/*
* Note that for percpu arena, if the current arena is outside of the
* auto percpu arena range, (i.e. thread is assigned to a manually
* managed arena), then percpu arena is skipped.
*/
if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena) &&
!internal && (arena_ind_get(ret) <
percpu_arena_ind_limit(opt_percpu_arena)) && (ret->last_thd !=
tsd_tsdn(tsd))) {
unsigned ind = percpu_arena_choose();
if (arena_ind_get(ret) != ind) {
percpu_arena_update(tsd, ind);
ret = tsd_arena_get(tsd);
}
ret->last_thd = tsd_tsdn(tsd);
}
return ret;
}
static inline arena_t *
arena_choose(tsd_t *tsd, arena_t *arena) {
return arena_choose_impl(tsd, arena, false);
}
static inline arena_t *
arena_ichoose(tsd_t *tsd, arena_t *arena) {
return arena_choose_impl(tsd, arena, true);
}
static inline bool
arena_is_auto(arena_t *arena) {
assert(narenas_auto > 0);
return (arena_ind_get(arena) < narenas_auto);
}
JEMALLOC_ALWAYS_INLINE extent_t *
iealloc(tsdn_t *tsdn, const void *ptr) {
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
return rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true);
}
#endif /* JEMALLOC_INTERNAL_INLINES_B_H */

View File

@@ -0,0 +1,197 @@
#ifndef JEMALLOC_INTERNAL_INLINES_C_H
#define JEMALLOC_INTERNAL_INLINES_C_H
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/sz.h"
#include "jemalloc/internal/witness.h"
JEMALLOC_ALWAYS_INLINE arena_t *
iaalloc(tsdn_t *tsdn, const void *ptr) {
assert(ptr != NULL);
return arena_aalloc(tsdn, ptr);
}
JEMALLOC_ALWAYS_INLINE size_t
isalloc(tsdn_t *tsdn, const void *ptr) {
assert(ptr != NULL);
return arena_salloc(tsdn, ptr);
}
JEMALLOC_ALWAYS_INLINE void *
iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
bool is_internal, arena_t *arena, bool slow_path) {
void *ret;
assert(size != 0);
assert(!is_internal || tcache == NULL);
assert(!is_internal || arena == NULL || arena_is_auto(arena));
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path);
if (config_stats && is_internal && likely(ret != NULL)) {
arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret));
}
return ret;
}
JEMALLOC_ALWAYS_INLINE void *
ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path) {
return iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd), false,
NULL, slow_path);
}
JEMALLOC_ALWAYS_INLINE void *
ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, bool is_internal, arena_t *arena) {
void *ret;
assert(usize != 0);
assert(usize == sz_sa2u(usize, alignment));
assert(!is_internal || tcache == NULL);
assert(!is_internal || arena == NULL || arena_is_auto(arena));
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache);
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
if (config_stats && is_internal && likely(ret != NULL)) {
arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret));
}
return ret;
}
JEMALLOC_ALWAYS_INLINE void *
ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, arena_t *arena) {
return ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena);
}
JEMALLOC_ALWAYS_INLINE void *
ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero) {
return ipallocztm(tsd_tsdn(tsd), usize, alignment, zero,
tcache_get(tsd), false, NULL);
}
JEMALLOC_ALWAYS_INLINE size_t
ivsalloc(tsdn_t *tsdn, const void *ptr) {
return arena_vsalloc(tsdn, ptr);
}
JEMALLOC_ALWAYS_INLINE void
idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, alloc_ctx_t *alloc_ctx,
bool is_internal, bool slow_path) {
assert(ptr != NULL);
assert(!is_internal || tcache == NULL);
assert(!is_internal || arena_is_auto(iaalloc(tsdn, ptr)));
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
if (config_stats && is_internal) {
arena_internal_sub(iaalloc(tsdn, ptr), isalloc(tsdn, ptr));
}
if (!is_internal && tsd_reentrancy_level_get(tsdn_tsd(tsdn)) != 0) {
assert(tcache == NULL);
}
arena_dalloc(tsdn, ptr, tcache, alloc_ctx, slow_path);
}
JEMALLOC_ALWAYS_INLINE void
idalloc(tsd_t *tsd, void *ptr) {
idalloctm(tsd_tsdn(tsd), ptr, tcache_get(tsd), NULL, false, true);
}
JEMALLOC_ALWAYS_INLINE void
isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
alloc_ctx_t *alloc_ctx, bool slow_path) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
arena_sdalloc(tsdn, ptr, size, tcache, alloc_ctx, slow_path);
}
JEMALLOC_ALWAYS_INLINE void *
iralloct_realign(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
size_t extra, size_t alignment, bool zero, tcache_t *tcache,
arena_t *arena) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
void *p;
size_t usize, copysize;
usize = sz_sa2u(size + extra, alignment);
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
return NULL;
}
p = ipalloct(tsdn, usize, alignment, zero, tcache, arena);
if (p == NULL) {
if (extra == 0) {
return NULL;
}
/* Try again, without extra this time. */
usize = sz_sa2u(size, alignment);
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
return NULL;
}
p = ipalloct(tsdn, usize, alignment, zero, tcache, arena);
if (p == NULL) {
return NULL;
}
}
/*
* Copy at most size bytes (not size+extra), since the caller has no
* expectation that the extra bytes will be reliably preserved.
*/
copysize = (size < oldsize) ? size : oldsize;
memcpy(p, ptr, copysize);
isdalloct(tsdn, ptr, oldsize, tcache, NULL, true);
return p;
}
JEMALLOC_ALWAYS_INLINE void *
iralloct(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t alignment,
bool zero, tcache_t *tcache, arena_t *arena) {
assert(ptr != NULL);
assert(size != 0);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
!= 0) {
/*
* Existing object alignment is inadequate; allocate new space
* and copy.
*/
return iralloct_realign(tsdn, ptr, oldsize, size, 0, alignment,
zero, tcache, arena);
}
return arena_ralloc(tsdn, arena, ptr, oldsize, size, alignment, zero,
tcache);
}
JEMALLOC_ALWAYS_INLINE void *
iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
bool zero) {
return iralloct(tsd_tsdn(tsd), ptr, oldsize, size, alignment, zero,
tcache_get(tsd), NULL);
}
JEMALLOC_ALWAYS_INLINE bool
ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero) {
assert(ptr != NULL);
assert(size != 0);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
!= 0) {
/* Existing object alignment is inadequate. */
return true;
}
return arena_ralloc_no_move(tsdn, ptr, oldsize, size, extra, zero);
}
#endif /* JEMALLOC_INTERNAL_INLINES_C_H */

View File

@@ -0,0 +1,40 @@
#ifndef JEMALLOC_INTERNAL_MACROS_H
#define JEMALLOC_INTERNAL_MACROS_H
#ifdef JEMALLOC_DEBUG
# define JEMALLOC_ALWAYS_INLINE static inline
#else
# define JEMALLOC_ALWAYS_INLINE JEMALLOC_ATTR(always_inline) static inline
#endif
#ifdef _MSC_VER
# define inline _inline
#endif
#define UNUSED JEMALLOC_ATTR(unused)
#define ZU(z) ((size_t)z)
#define ZD(z) ((ssize_t)z)
#define QU(q) ((uint64_t)q)
#define QD(q) ((int64_t)q)
#define KZU(z) ZU(z##ULL)
#define KZD(z) ZD(z##LL)
#define KQU(q) QU(q##ULL)
#define KQD(q) QI(q##LL)
#ifndef __DECONST
# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
#endif
#if !defined(JEMALLOC_HAS_RESTRICT) || defined(__cplusplus)
# define restrict
#endif
/* Various function pointers are statick and immutable except during testing. */
#ifdef JEMALLOC_JET
# define JET_MUTABLE
#else
# define JET_MUTABLE const
#endif
#endif /* JEMALLOC_INTERNAL_MACROS_H */

View File

@@ -0,0 +1,178 @@
#ifndef JEMALLOC_INTERNAL_TYPES_H
#define JEMALLOC_INTERNAL_TYPES_H
/* Page size index type. */
typedef unsigned pszind_t;
/* Size class index type. */
typedef unsigned szind_t;
/* Processor / core id type. */
typedef int malloc_cpuid_t;
/*
* Flags bits:
*
* a: arena
* t: tcache
* 0: unused
* z: zero
* n: alignment
*
* aaaaaaaa aaaatttt tttttttt 0znnnnnn
*/
#define MALLOCX_ARENA_BITS 12
#define MALLOCX_TCACHE_BITS 12
#define MALLOCX_LG_ALIGN_BITS 6
#define MALLOCX_ARENA_SHIFT 20
#define MALLOCX_TCACHE_SHIFT 8
#define MALLOCX_ARENA_MASK \
(((1 << MALLOCX_ARENA_BITS) - 1) << MALLOCX_ARENA_SHIFT)
/* NB: Arena index bias decreases the maximum number of arenas by 1. */
#define MALLOCX_ARENA_LIMIT ((1 << MALLOCX_ARENA_BITS) - 1)
#define MALLOCX_TCACHE_MASK \
(((1 << MALLOCX_TCACHE_BITS) - 1) << MALLOCX_TCACHE_SHIFT)
#define MALLOCX_TCACHE_MAX ((1 << MALLOCX_TCACHE_BITS) - 3)
#define MALLOCX_LG_ALIGN_MASK ((1 << MALLOCX_LG_ALIGN_BITS) - 1)
/* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */
#define MALLOCX_ALIGN_GET_SPECIFIED(flags) \
(ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK))
#define MALLOCX_ALIGN_GET(flags) \
(MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1))
#define MALLOCX_ZERO_GET(flags) \
((bool)(flags & MALLOCX_ZERO))
#define MALLOCX_TCACHE_GET(flags) \
(((unsigned)((flags & MALLOCX_TCACHE_MASK) >> MALLOCX_TCACHE_SHIFT)) - 2)
#define MALLOCX_ARENA_GET(flags) \
(((unsigned)(((unsigned)flags) >> MALLOCX_ARENA_SHIFT)) - 1)
/* Smallest size class to support. */
#define TINY_MIN (1U << LG_TINY_MIN)
/*
* Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
* classes).
*/
#ifndef LG_QUANTUM
# if (defined(__i386__) || defined(_M_IX86))
# define LG_QUANTUM 4
# endif
# ifdef __ia64__
# define LG_QUANTUM 4
# endif
# ifdef __alpha__
# define LG_QUANTUM 4
# endif
# if (defined(__sparc64__) || defined(__sparcv9) || defined(__sparc_v9__))
# define LG_QUANTUM 4
# endif
# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
# define LG_QUANTUM 4
# endif
# ifdef __arm__
# define LG_QUANTUM 3
# endif
# ifdef __aarch64__
# define LG_QUANTUM 4
# endif
# ifdef __hppa__
# define LG_QUANTUM 4
# endif
# ifdef __mips__
# define LG_QUANTUM 3
# endif
# ifdef __or1k__
# define LG_QUANTUM 3
# endif
# ifdef __powerpc__
# define LG_QUANTUM 4
# endif
# ifdef __riscv__
# define LG_QUANTUM 4
# endif
# ifdef __s390__
# define LG_QUANTUM 4
# endif
# ifdef __SH4__
# define LG_QUANTUM 4
# endif
# ifdef __tile__
# define LG_QUANTUM 4
# endif
# ifdef __le32__
# define LG_QUANTUM 4
# endif
# ifndef LG_QUANTUM
# error "Unknown minimum alignment for architecture; specify via "
"--with-lg-quantum"
# endif
#endif
#define QUANTUM ((size_t)(1U << LG_QUANTUM))
#define QUANTUM_MASK (QUANTUM - 1)
/* Return the smallest quantum multiple that is >= a. */
#define QUANTUM_CEILING(a) \
(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
#define LONG ((size_t)(1U << LG_SIZEOF_LONG))
#define LONG_MASK (LONG - 1)
/* Return the smallest long multiple that is >= a. */
#define LONG_CEILING(a) \
(((a) + LONG_MASK) & ~LONG_MASK)
#define SIZEOF_PTR (1U << LG_SIZEOF_PTR)
#define PTR_MASK (SIZEOF_PTR - 1)
/* Return the smallest (void *) multiple that is >= a. */
#define PTR_CEILING(a) \
(((a) + PTR_MASK) & ~PTR_MASK)
/*
* Maximum size of L1 cache line. This is used to avoid cache line aliasing.
* In addition, this controls the spacing of cacheline-spaced size classes.
*
* CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
* only handle raw constants.
*/
#define LG_CACHELINE 6
#define CACHELINE 64
#define CACHELINE_MASK (CACHELINE - 1)
/* Return the smallest cacheline multiple that is >= s. */
#define CACHELINE_CEILING(s) \
(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
/* Return the nearest aligned address at or below a. */
#define ALIGNMENT_ADDR2BASE(a, alignment) \
((void *)((uintptr_t)(a) & ((~(alignment)) + 1)))
/* Return the offset between a and the nearest aligned address at or below a. */
#define ALIGNMENT_ADDR2OFFSET(a, alignment) \
((size_t)((uintptr_t)(a) & (alignment - 1)))
/* Return the smallest alignment multiple that is >= s. */
#define ALIGNMENT_CEILING(s, alignment) \
(((s) + (alignment - 1)) & ((~(alignment)) + 1))
/* Declare a variable-length array. */
#if __STDC_VERSION__ < 199901L
# ifdef _MSC_VER
# include <malloc.h>
# define alloca _alloca
# else
# ifdef JEMALLOC_HAS_ALLOCA_H
# include <alloca.h>
# else
# include <stdlib.h>
# endif
# endif
# define VARIABLE_ARRAY(type, name, count) \
type *name = alloca(sizeof(type) * (count))
#else
# define VARIABLE_ARRAY(type, name, count) type name[(count)]
#endif
#endif /* JEMALLOC_INTERNAL_TYPES_H */

View File

@@ -0,0 +1,179 @@
#ifndef JEMALLOC_PREAMBLE_H
#define JEMALLOC_PREAMBLE_H
#include "jemalloc_internal_defs.h"
#include "jemalloc/internal/jemalloc_internal_decls.h"
#ifdef JEMALLOC_UTRACE
#include <sys/ktrace.h>
#endif
#define JEMALLOC_NO_DEMANGLE
#ifdef JEMALLOC_JET
# undef JEMALLOC_IS_MALLOC
# define JEMALLOC_N(n) jet_##n
# include "jemalloc/internal/public_namespace.h"
# define JEMALLOC_NO_RENAME
# include "../jemalloc.h"
# undef JEMALLOC_NO_RENAME
#else
# define JEMALLOC_N(n) je_##n
# include "../jemalloc.h"
#endif
#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
#include <libkern/OSAtomic.h>
#endif
#ifdef JEMALLOC_ZONE
#include <mach/mach_error.h>
#include <mach/mach_init.h>
#include <mach/vm_map.h>
#endif
#include "jemalloc/internal/jemalloc_internal_macros.h"
/*
* Note that the ordering matters here; the hook itself is name-mangled. We
* want the inclusion of hooks to happen early, so that we hook as much as
* possible.
*/
#ifndef JEMALLOC_NO_PRIVATE_NAMESPACE
# ifndef JEMALLOC_JET
# include "jemalloc/internal/private_namespace.h"
# else
# include "jemalloc/internal/private_namespace_jet.h"
# endif
#endif
#include "jemalloc/internal/hooks.h"
static const bool config_debug =
#ifdef JEMALLOC_DEBUG
true
#else
false
#endif
;
static const bool have_dss =
#ifdef JEMALLOC_DSS
true
#else
false
#endif
;
static const bool config_fill =
#ifdef JEMALLOC_FILL
true
#else
false
#endif
;
static const bool config_lazy_lock =
#ifdef JEMALLOC_LAZY_LOCK
true
#else
false
#endif
;
static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF;
static const bool config_prof =
#ifdef JEMALLOC_PROF
true
#else
false
#endif
;
static const bool config_prof_libgcc =
#ifdef JEMALLOC_PROF_LIBGCC
true
#else
false
#endif
;
static const bool config_prof_libunwind =
#ifdef JEMALLOC_PROF_LIBUNWIND
true
#else
false
#endif
;
static const bool maps_coalesce =
#ifdef JEMALLOC_MAPS_COALESCE
true
#else
false
#endif
;
static const bool config_stats =
#ifdef JEMALLOC_STATS
true
#else
false
#endif
;
static const bool config_thp =
#ifdef JEMALLOC_THP
true
#else
false
#endif
;
static const bool config_tls =
#ifdef JEMALLOC_TLS
true
#else
false
#endif
;
static const bool config_utrace =
#ifdef JEMALLOC_UTRACE
true
#else
false
#endif
;
static const bool config_xmalloc =
#ifdef JEMALLOC_XMALLOC
true
#else
false
#endif
;
static const bool config_cache_oblivious =
#ifdef JEMALLOC_CACHE_OBLIVIOUS
true
#else
false
#endif
;
#ifdef JEMALLOC_HAVE_SCHED_GETCPU
/* Currently percpu_arena depends on sched_getcpu. */
#define JEMALLOC_PERCPU_ARENA
#endif
static const bool have_percpu_arena =
#ifdef JEMALLOC_PERCPU_ARENA
true
#else
false
#endif
;
/*
* Undocumented, and not recommended; the application should take full
* responsibility for tracking provenance.
*/
static const bool force_ivsalloc =
#ifdef JEMALLOC_FORCE_IVSALLOC
true
#else
false
#endif
;
static const bool have_background_thread =
#ifdef JEMALLOC_BACKGROUND_THREAD
true
#else
false
#endif
;
#endif /* JEMALLOC_PREAMBLE_H */

View File

@@ -0,0 +1,179 @@
#ifndef JEMALLOC_PREAMBLE_H
#define JEMALLOC_PREAMBLE_H
#include "jemalloc_internal_defs.h"
#include "jemalloc/internal/jemalloc_internal_decls.h"
#ifdef JEMALLOC_UTRACE
#include <sys/ktrace.h>
#endif
#define JEMALLOC_NO_DEMANGLE
#ifdef JEMALLOC_JET
# undef JEMALLOC_IS_MALLOC
# define JEMALLOC_N(n) jet_##n
# include "jemalloc/internal/public_namespace.h"
# define JEMALLOC_NO_RENAME
# include "../jemalloc@install_suffix@.h"
# undef JEMALLOC_NO_RENAME
#else
# define JEMALLOC_N(n) @private_namespace@##n
# include "../jemalloc@install_suffix@.h"
#endif
#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
#include <libkern/OSAtomic.h>
#endif
#ifdef JEMALLOC_ZONE
#include <mach/mach_error.h>
#include <mach/mach_init.h>
#include <mach/vm_map.h>
#endif
#include "jemalloc/internal/jemalloc_internal_macros.h"
/*
* Note that the ordering matters here; the hook itself is name-mangled. We
* want the inclusion of hooks to happen early, so that we hook as much as
* possible.
*/
#ifndef JEMALLOC_NO_PRIVATE_NAMESPACE
# ifndef JEMALLOC_JET
# include "jemalloc/internal/private_namespace.h"
# else
# include "jemalloc/internal/private_namespace_jet.h"
# endif
#endif
#include "jemalloc/internal/hooks.h"
static const bool config_debug =
#ifdef JEMALLOC_DEBUG
true
#else
false
#endif
;
static const bool have_dss =
#ifdef JEMALLOC_DSS
true
#else
false
#endif
;
static const bool config_fill =
#ifdef JEMALLOC_FILL
true
#else
false
#endif
;
static const bool config_lazy_lock =
#ifdef JEMALLOC_LAZY_LOCK
true
#else
false
#endif
;
static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF;
static const bool config_prof =
#ifdef JEMALLOC_PROF
true
#else
false
#endif
;
static const bool config_prof_libgcc =
#ifdef JEMALLOC_PROF_LIBGCC
true
#else
false
#endif
;
static const bool config_prof_libunwind =
#ifdef JEMALLOC_PROF_LIBUNWIND
true
#else
false
#endif
;
static const bool maps_coalesce =
#ifdef JEMALLOC_MAPS_COALESCE
true
#else
false
#endif
;
static const bool config_stats =
#ifdef JEMALLOC_STATS
true
#else
false
#endif
;
static const bool config_thp =
#ifdef JEMALLOC_THP
true
#else
false
#endif
;
static const bool config_tls =
#ifdef JEMALLOC_TLS
true
#else
false
#endif
;
static const bool config_utrace =
#ifdef JEMALLOC_UTRACE
true
#else
false
#endif
;
static const bool config_xmalloc =
#ifdef JEMALLOC_XMALLOC
true
#else
false
#endif
;
static const bool config_cache_oblivious =
#ifdef JEMALLOC_CACHE_OBLIVIOUS
true
#else
false
#endif
;
#ifdef JEMALLOC_HAVE_SCHED_GETCPU
/* Currently percpu_arena depends on sched_getcpu. */
#define JEMALLOC_PERCPU_ARENA
#endif
static const bool have_percpu_arena =
#ifdef JEMALLOC_PERCPU_ARENA
true
#else
false
#endif
;
/*
* Undocumented, and not recommended; the application should take full
* responsibility for tracking provenance.
*/
static const bool force_ivsalloc =
#ifdef JEMALLOC_FORCE_IVSALLOC
true
#else
false
#endif
;
static const bool have_background_thread =
#ifdef JEMALLOC_BACKGROUND_THREAD
true
#else
false
#endif
;
#endif /* JEMALLOC_PREAMBLE_H */

View File

@@ -0,0 +1,26 @@
#ifndef JEMALLOC_INTERNAL_LARGE_EXTERNS_H
#define JEMALLOC_INTERNAL_LARGE_EXTERNS_H
void *large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero);
void *large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
bool zero);
bool large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
size_t usize_max, bool zero);
void *large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize,
size_t alignment, bool zero, tcache_t *tcache);
typedef void (large_dalloc_junk_t)(void *, size_t);
extern large_dalloc_junk_t *JET_MUTABLE large_dalloc_junk;
typedef void (large_dalloc_maybe_junk_t)(void *, size_t);
extern large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk;
void large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent);
void large_dalloc_finish(tsdn_t *tsdn, extent_t *extent);
void large_dalloc(tsdn_t *tsdn, extent_t *extent);
size_t large_salloc(tsdn_t *tsdn, const extent_t *extent);
prof_tctx_t *large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent);
void large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx);
void large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent);
#endif /* JEMALLOC_INTERNAL_LARGE_EXTERNS_H */

View File

@@ -0,0 +1,62 @@
#ifndef JEMALLOC_INTERNAL_MALLOC_IO_H
#define JEMALLOC_INTERNAL_MALLOC_IO_H
#ifdef _WIN32
# ifdef _WIN64
# define FMT64_PREFIX "ll"
# define FMTPTR_PREFIX "ll"
# else
# define FMT64_PREFIX "ll"
# define FMTPTR_PREFIX ""
# endif
# define FMTd32 "d"
# define FMTu32 "u"
# define FMTx32 "x"
# define FMTd64 FMT64_PREFIX "d"
# define FMTu64 FMT64_PREFIX "u"
# define FMTx64 FMT64_PREFIX "x"
# define FMTdPTR FMTPTR_PREFIX "d"
# define FMTuPTR FMTPTR_PREFIX "u"
# define FMTxPTR FMTPTR_PREFIX "x"
#else
# include <inttypes.h>
# define FMTd32 PRId32
# define FMTu32 PRIu32
# define FMTx32 PRIx32
# define FMTd64 PRId64
# define FMTu64 PRIu64
# define FMTx64 PRIx64
# define FMTdPTR PRIdPTR
# define FMTuPTR PRIuPTR
# define FMTxPTR PRIxPTR
#endif
/* Size of stack-allocated buffer passed to buferror(). */
#define BUFERROR_BUF 64
/*
* Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be
* large enough for all possible uses within jemalloc.
*/
#define MALLOC_PRINTF_BUFSIZE 4096
int buferror(int err, char *buf, size_t buflen);
uintmax_t malloc_strtoumax(const char *restrict nptr, char **restrict endptr,
int base);
void malloc_write(const char *s);
/*
* malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating
* point math.
*/
size_t malloc_vsnprintf(char *str, size_t size, const char *format,
va_list ap);
size_t malloc_snprintf(char *str, size_t size, const char *format, ...)
JEMALLOC_FORMAT_PRINTF(3, 4);
void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
const char *format, va_list ap);
void malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4);
void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
#endif /* JEMALLOC_INTERNAL_MALLOC_IO_H */

View File

@@ -1,32 +1,42 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#ifndef JEMALLOC_INTERNAL_MUTEX_H
#define JEMALLOC_INTERNAL_MUTEX_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/mutex_prof.h"
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/witness.h"
typedef enum {
/* Can only acquire one mutex of a given witness rank at a time. */
malloc_mutex_rank_exclusive,
/*
* Can acquire multiple mutexes of the same witness rank, but in
* address-ascending order only.
*/
malloc_mutex_address_ordered
} malloc_mutex_lock_order_t;
typedef struct malloc_mutex_s malloc_mutex_t;
#ifdef _WIN32
# define MALLOC_MUTEX_INITIALIZER
#elif (defined(JEMALLOC_OSSPIN))
# define MALLOC_MUTEX_INITIALIZER {0}
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER, NULL}
#else
# if (defined(PTHREAD_MUTEX_ADAPTIVE_NP) && \
defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP))
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP}
# else
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER}
# endif
#endif
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct malloc_mutex_s {
union {
struct {
/*
* prof_data is defined first to reduce cacheline
* bouncing: the data is not touched by the mutex holder
* during unlocking, while might be modified by
* contenders. Having it before the mutex itself could
* avoid prefetching a modified cacheline (for the
* unlocking thread).
*/
mutex_prof_data_t prof_data;
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
SRWLOCK lock;
# else
CRITICAL_SECTION lock;
# endif
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
os_unfair_lock lock;
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLock lock;
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
@@ -34,12 +44,80 @@ struct malloc_mutex_s {
malloc_mutex_t *postponed_next;
#else
pthread_mutex_t lock;
#endif
};
/*
* We only touch witness when configured w/ debug. However we
* keep the field in a union when !debug so that we don't have
* to pollute the code base with #ifdefs, while avoid paying the
* memory cost.
*/
#if !defined(JEMALLOC_DEBUG)
witness_t witness;
malloc_mutex_lock_order_t lock_order;
#endif
};
#if defined(JEMALLOC_DEBUG)
witness_t witness;
malloc_mutex_lock_order_t lock_order;
#endif
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
/*
* Based on benchmark results, a fixed spin with this amount of retries works
* well for our critical sections.
*/
#define MALLOC_MUTEX_MAX_SPIN 250
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
# define MALLOC_MUTEX_LOCK(m) AcquireSRWLockExclusive(&(m)->lock)
# define MALLOC_MUTEX_UNLOCK(m) ReleaseSRWLockExclusive(&(m)->lock)
# define MALLOC_MUTEX_TRYLOCK(m) (!TryAcquireSRWLockExclusive(&(m)->lock))
# else
# define MALLOC_MUTEX_LOCK(m) EnterCriticalSection(&(m)->lock)
# define MALLOC_MUTEX_UNLOCK(m) LeaveCriticalSection(&(m)->lock)
# define MALLOC_MUTEX_TRYLOCK(m) (!TryEnterCriticalSection(&(m)->lock))
# endif
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
# define MALLOC_MUTEX_LOCK(m) os_unfair_lock_lock(&(m)->lock)
# define MALLOC_MUTEX_UNLOCK(m) os_unfair_lock_unlock(&(m)->lock)
# define MALLOC_MUTEX_TRYLOCK(m) (!os_unfair_lock_trylock(&(m)->lock))
#elif (defined(JEMALLOC_OSSPIN))
# define MALLOC_MUTEX_LOCK(m) OSSpinLockLock(&(m)->lock)
# define MALLOC_MUTEX_UNLOCK(m) OSSpinLockUnlock(&(m)->lock)
# define MALLOC_MUTEX_TRYLOCK(m) (!OSSpinLockTry(&(m)->lock))
#else
# define MALLOC_MUTEX_LOCK(m) pthread_mutex_lock(&(m)->lock)
# define MALLOC_MUTEX_UNLOCK(m) pthread_mutex_unlock(&(m)->lock)
# define MALLOC_MUTEX_TRYLOCK(m) (pthread_mutex_trylock(&(m)->lock) != 0)
#endif
#define LOCK_PROF_DATA_INITIALIZER \
{NSTIME_ZERO_INITIALIZER, NSTIME_ZERO_INITIALIZER, 0, 0, 0, \
ATOMIC_INIT(0), 0, NULL, 0}
#ifdef _WIN32
# define MALLOC_MUTEX_INITIALIZER
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
# define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
#elif (defined(JEMALLOC_OSSPIN))
# define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, 0}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
# define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
#else
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
# define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
#endif
#ifdef JEMALLOC_LAZY_LOCK
extern bool isthreaded;
@@ -48,52 +126,123 @@ extern bool isthreaded;
# define isthreaded true
#endif
bool malloc_mutex_init(malloc_mutex_t *mutex);
void malloc_mutex_prefork(malloc_mutex_t *mutex);
void malloc_mutex_postfork_parent(malloc_mutex_t *mutex);
void malloc_mutex_postfork_child(malloc_mutex_t *mutex);
bool mutex_boot(void);
bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
witness_rank_t rank, malloc_mutex_lock_order_t lock_order);
void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex);
void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex);
void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex);
bool malloc_mutex_boot(void);
void malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
void malloc_mutex_lock_slow(malloc_mutex_t *mutex);
#ifndef JEMALLOC_ENABLE_INLINE
void malloc_mutex_lock(malloc_mutex_t *mutex);
void malloc_mutex_unlock(malloc_mutex_t *mutex);
#endif
static inline void
malloc_mutex_lock_final(malloc_mutex_t *mutex) {
MALLOC_MUTEX_LOCK(mutex);
}
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
JEMALLOC_INLINE void
malloc_mutex_lock(malloc_mutex_t *mutex)
{
static inline bool
malloc_mutex_trylock_final(malloc_mutex_t *mutex) {
return MALLOC_MUTEX_TRYLOCK(mutex);
}
if (isthreaded) {
#ifdef _WIN32
EnterCriticalSection(&mutex->lock);
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockLock(&mutex->lock);
#else
pthread_mutex_lock(&mutex->lock);
#endif
static inline void
mutex_owner_stats_update(tsdn_t *tsdn, malloc_mutex_t *mutex) {
if (config_stats) {
mutex_prof_data_t *data = &mutex->prof_data;
data->n_lock_ops++;
if (data->prev_owner != tsdn) {
data->prev_owner = tsdn;
data->n_owner_switches++;
}
}
}
JEMALLOC_INLINE void
malloc_mutex_unlock(malloc_mutex_t *mutex)
{
/* Trylock: return false if the lock is successfully acquired. */
static inline bool
malloc_mutex_trylock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
if (isthreaded) {
#ifdef _WIN32
LeaveCriticalSection(&mutex->lock);
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockUnlock(&mutex->lock);
#else
pthread_mutex_unlock(&mutex->lock);
#endif
if (malloc_mutex_trylock_final(mutex)) {
return true;
}
mutex_owner_stats_update(tsdn, mutex);
}
witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
return false;
}
/* Aggregate lock prof data. */
static inline void
malloc_mutex_prof_merge(mutex_prof_data_t *sum, mutex_prof_data_t *data) {
nstime_add(&sum->tot_wait_time, &data->tot_wait_time);
if (nstime_compare(&sum->max_wait_time, &data->max_wait_time) < 0) {
nstime_copy(&sum->max_wait_time, &data->max_wait_time);
}
sum->n_wait_times += data->n_wait_times;
sum->n_spin_acquired += data->n_spin_acquired;
if (sum->max_n_thds < data->max_n_thds) {
sum->max_n_thds = data->max_n_thds;
}
uint32_t cur_n_waiting_thds = atomic_load_u32(&sum->n_waiting_thds,
ATOMIC_RELAXED);
uint32_t new_n_waiting_thds = cur_n_waiting_thds + atomic_load_u32(
&data->n_waiting_thds, ATOMIC_RELAXED);
atomic_store_u32(&sum->n_waiting_thds, new_n_waiting_thds,
ATOMIC_RELAXED);
sum->n_owner_switches += data->n_owner_switches;
sum->n_lock_ops += data->n_lock_ops;
}
static inline void
malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
if (isthreaded) {
if (malloc_mutex_trylock_final(mutex)) {
malloc_mutex_lock_slow(mutex);
}
mutex_owner_stats_update(tsdn, mutex);
}
witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
}
static inline void
malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
witness_unlock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
if (isthreaded) {
MALLOC_MUTEX_UNLOCK(mutex);
}
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
static inline void
malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
witness_assert_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
}
static inline void
malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
}
/* Copy the prof data from mutex for processing. */
static inline void
malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data,
malloc_mutex_t *mutex) {
mutex_prof_data_t *source = &mutex->prof_data;
/* Can only read holding the mutex. */
malloc_mutex_assert_owner(tsdn, mutex);
/*
* Not *really* allowed (we shouldn't be doing non-atomic loads of
* atomic data), but the mutex protection makes this safe, and writing
* a member-for-member copy is tedious for this situation.
*/
*data = *source;
/* n_wait_thds is not reported (modified w/o locking). */
atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED);
}
#endif /* JEMALLOC_INTERNAL_MUTEX_H */

View File

@@ -0,0 +1,94 @@
#ifndef JEMALLOC_INTERNAL_MUTEX_POOL_H
#define JEMALLOC_INTERNAL_MUTEX_POOL_H
#include "jemalloc/internal/hash.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/witness.h"
/* We do mod reductions by this value, so it should be kept a power of 2. */
#define MUTEX_POOL_SIZE 256
typedef struct mutex_pool_s mutex_pool_t;
struct mutex_pool_s {
malloc_mutex_t mutexes[MUTEX_POOL_SIZE];
};
bool mutex_pool_init(mutex_pool_t *pool, const char *name, witness_rank_t rank);
/* Internal helper - not meant to be called outside this module. */
static inline malloc_mutex_t *
mutex_pool_mutex(mutex_pool_t *pool, uintptr_t key) {
size_t hash_result[2];
hash(&key, sizeof(key), 0xd50dcc1b, hash_result);
return &pool->mutexes[hash_result[0] % MUTEX_POOL_SIZE];
}
static inline void
mutex_pool_assert_not_held(tsdn_t *tsdn, mutex_pool_t *pool) {
for (int i = 0; i < MUTEX_POOL_SIZE; i++) {
malloc_mutex_assert_not_owner(tsdn, &pool->mutexes[i]);
}
}
/*
* Note that a mutex pool doesn't work exactly the way an embdedded mutex would.
* You're not allowed to acquire mutexes in the pool one at a time. You have to
* acquire all the mutexes you'll need in a single function call, and then
* release them all in a single function call.
*/
static inline void
mutex_pool_lock(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) {
mutex_pool_assert_not_held(tsdn, pool);
malloc_mutex_t *mutex = mutex_pool_mutex(pool, key);
malloc_mutex_lock(tsdn, mutex);
}
static inline void
mutex_pool_unlock(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) {
malloc_mutex_t *mutex = mutex_pool_mutex(pool, key);
malloc_mutex_unlock(tsdn, mutex);
mutex_pool_assert_not_held(tsdn, pool);
}
static inline void
mutex_pool_lock2(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key1,
uintptr_t key2) {
mutex_pool_assert_not_held(tsdn, pool);
malloc_mutex_t *mutex1 = mutex_pool_mutex(pool, key1);
malloc_mutex_t *mutex2 = mutex_pool_mutex(pool, key2);
if ((uintptr_t)mutex1 < (uintptr_t)mutex2) {
malloc_mutex_lock(tsdn, mutex1);
malloc_mutex_lock(tsdn, mutex2);
} else if ((uintptr_t)mutex1 == (uintptr_t)mutex2) {
malloc_mutex_lock(tsdn, mutex1);
} else {
malloc_mutex_lock(tsdn, mutex2);
malloc_mutex_lock(tsdn, mutex1);
}
}
static inline void
mutex_pool_unlock2(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key1,
uintptr_t key2) {
malloc_mutex_t *mutex1 = mutex_pool_mutex(pool, key1);
malloc_mutex_t *mutex2 = mutex_pool_mutex(pool, key2);
if (mutex1 == mutex2) {
malloc_mutex_unlock(tsdn, mutex1);
} else {
malloc_mutex_unlock(tsdn, mutex1);
malloc_mutex_unlock(tsdn, mutex2);
}
mutex_pool_assert_not_held(tsdn, pool);
}
static inline void
mutex_pool_assert_owner(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) {
malloc_mutex_assert_owner(tsdn, mutex_pool_mutex(pool, key));
}
#endif /* JEMALLOC_INTERNAL_MUTEX_POOL_H */

View File

@@ -0,0 +1,86 @@
#ifndef JEMALLOC_INTERNAL_MUTEX_PROF_H
#define JEMALLOC_INTERNAL_MUTEX_PROF_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/tsd_types.h"
#define MUTEX_PROF_GLOBAL_MUTEXES \
OP(background_thread) \
OP(ctl) \
OP(prof)
typedef enum {
#define OP(mtx) global_prof_mutex_##mtx,
MUTEX_PROF_GLOBAL_MUTEXES
#undef OP
mutex_prof_num_global_mutexes
} mutex_prof_global_ind_t;
#define MUTEX_PROF_ARENA_MUTEXES \
OP(large) \
OP(extent_avail) \
OP(extents_dirty) \
OP(extents_muzzy) \
OP(extents_retained) \
OP(decay_dirty) \
OP(decay_muzzy) \
OP(base) \
OP(tcache_list)
typedef enum {
#define OP(mtx) arena_prof_mutex_##mtx,
MUTEX_PROF_ARENA_MUTEXES
#undef OP
mutex_prof_num_arena_mutexes
} mutex_prof_arena_ind_t;
#define MUTEX_PROF_COUNTERS \
OP(num_ops, uint64_t) \
OP(num_wait, uint64_t) \
OP(num_spin_acq, uint64_t) \
OP(num_owner_switch, uint64_t) \
OP(total_wait_time, uint64_t) \
OP(max_wait_time, uint64_t) \
OP(max_num_thds, uint32_t)
typedef enum {
#define OP(counter, type) mutex_counter_##counter,
MUTEX_PROF_COUNTERS
#undef OP
mutex_prof_num_counters
} mutex_prof_counter_ind_t;
typedef struct {
/*
* Counters touched on the slow path, i.e. when there is lock
* contention. We update them once we have the lock.
*/
/* Total time (in nano seconds) spent waiting on this mutex. */
nstime_t tot_wait_time;
/* Max time (in nano seconds) spent on a single lock operation. */
nstime_t max_wait_time;
/* # of times have to wait for this mutex (after spinning). */
uint64_t n_wait_times;
/* # of times acquired the mutex through local spinning. */
uint64_t n_spin_acquired;
/* Max # of threads waiting for the mutex at the same time. */
uint32_t max_n_thds;
/* Current # of threads waiting on the lock. Atomic synced. */
atomic_u32_t n_waiting_thds;
/*
* Data touched on the fast path. These are modified right after we
* grab the lock, so it's placed closest to the end (i.e. right before
* the lock) so that we have a higher chance of them being on the same
* cacheline.
*/
/* # of times the mutex holder is different than the previous one. */
uint64_t n_owner_switches;
/* Previous mutex holder, to facilitate n_owner_switches. */
tsdn_t *prev_owner;
/* # of lock() operations in total. */
uint64_t n_lock_ops;
} mutex_prof_data_t;
#endif /* JEMALLOC_INTERNAL_MUTEX_PROF_H */

View File

@@ -0,0 +1,34 @@
#ifndef JEMALLOC_INTERNAL_NSTIME_H
#define JEMALLOC_INTERNAL_NSTIME_H
/* Maximum supported number of seconds (~584 years). */
#define NSTIME_SEC_MAX KQU(18446744072)
#define NSTIME_ZERO_INITIALIZER {0}
typedef struct {
uint64_t ns;
} nstime_t;
void nstime_init(nstime_t *time, uint64_t ns);
void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec);
uint64_t nstime_ns(const nstime_t *time);
uint64_t nstime_sec(const nstime_t *time);
uint64_t nstime_msec(const nstime_t *time);
uint64_t nstime_nsec(const nstime_t *time);
void nstime_copy(nstime_t *time, const nstime_t *source);
int nstime_compare(const nstime_t *a, const nstime_t *b);
void nstime_add(nstime_t *time, const nstime_t *addend);
void nstime_iadd(nstime_t *time, uint64_t addend);
void nstime_subtract(nstime_t *time, const nstime_t *subtrahend);
void nstime_isubtract(nstime_t *time, uint64_t subtrahend);
void nstime_imultiply(nstime_t *time, uint64_t multiplier);
void nstime_idivide(nstime_t *time, uint64_t divisor);
uint64_t nstime_divide(const nstime_t *time, const nstime_t *divisor);
typedef bool (nstime_monotonic_t)(void);
extern nstime_monotonic_t *JET_MUTABLE nstime_monotonic;
typedef bool (nstime_update_t)(nstime_t *);
extern nstime_update_t *JET_MUTABLE nstime_update;
#endif /* JEMALLOC_INTERNAL_NSTIME_H */

View File

@@ -0,0 +1,71 @@
#ifndef JEMALLOC_INTERNAL_PAGES_EXTERNS_H
#define JEMALLOC_INTERNAL_PAGES_EXTERNS_H
/* Page size. LG_PAGE is determined by the configure script. */
#ifdef PAGE_MASK
# undef PAGE_MASK
#endif
#define PAGE ((size_t)(1U << LG_PAGE))
#define PAGE_MASK ((size_t)(PAGE - 1))
/* Return the page base address for the page containing address a. */
#define PAGE_ADDR2BASE(a) \
((void *)((uintptr_t)(a) & ~PAGE_MASK))
/* Return the smallest pagesize multiple that is >= s. */
#define PAGE_CEILING(s) \
(((s) + PAGE_MASK) & ~PAGE_MASK)
/* Huge page size. LG_HUGEPAGE is determined by the configure script. */
#define HUGEPAGE ((size_t)(1U << LG_HUGEPAGE))
#define HUGEPAGE_MASK ((size_t)(HUGEPAGE - 1))
/* Return the huge page base address for the huge page containing address a. */
#define HUGEPAGE_ADDR2BASE(a) \
((void *)((uintptr_t)(a) & ~HUGEPAGE_MASK))
/* Return the smallest pagesize multiple that is >= s. */
#define HUGEPAGE_CEILING(s) \
(((s) + HUGEPAGE_MASK) & ~HUGEPAGE_MASK)
/* PAGES_CAN_PURGE_LAZY is defined if lazy purging is supported. */
#if defined(_WIN32) || defined(JEMALLOC_PURGE_MADVISE_FREE)
# define PAGES_CAN_PURGE_LAZY
#endif
/*
* PAGES_CAN_PURGE_FORCED is defined if forced purging is supported.
*
* The only supported way to hard-purge on Windows is to decommit and then
* re-commit, but doing so is racy, and if re-commit fails it's a pain to
* propagate the "poisoned" memory state. Since we typically decommit as the
* next step after purging on Windows anyway, there's no point in adding such
* complexity.
*/
#if !defined(_WIN32) && ((defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)) || \
defined(JEMALLOC_MAPS_COALESCE))
# define PAGES_CAN_PURGE_FORCED
#endif
static const bool pages_can_purge_lazy =
#ifdef PAGES_CAN_PURGE_LAZY
true
#else
false
#endif
;
static const bool pages_can_purge_forced =
#ifdef PAGES_CAN_PURGE_FORCED
true
#else
false
#endif
;
void *pages_map(void *addr, size_t size, size_t alignment, bool *commit);
void pages_unmap(void *addr, size_t size);
bool pages_commit(void *addr, size_t size);
bool pages_decommit(void *addr, size_t size);
bool pages_purge_lazy(void *addr, size_t size);
bool pages_purge_forced(void *addr, size_t size);
bool pages_huge(void *addr, size_t size);
bool pages_nohuge(void *addr, size_t size);
bool pages_boot(void);
#endif /* JEMALLOC_INTERNAL_PAGES_EXTERNS_H */

View File

@@ -0,0 +1,391 @@
/*
* A Pairing Heap implementation.
*
* "The Pairing Heap: A New Form of Self-Adjusting Heap"
* https://www.cs.cmu.edu/~sleator/papers/pairing-heaps.pdf
*
* With auxiliary twopass list, described in a follow on paper.
*
* "Pairing Heaps: Experiments and Analysis"
* http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.2988&rep=rep1&type=pdf
*
*******************************************************************************
*/
#ifndef PH_H_
#define PH_H_
/* Node structure. */
#define phn(a_type) \
struct { \
a_type *phn_prev; \
a_type *phn_next; \
a_type *phn_lchild; \
}
/* Root structure. */
#define ph(a_type) \
struct { \
a_type *ph_root; \
}
/* Internal utility macros. */
#define phn_lchild_get(a_type, a_field, a_phn) \
(a_phn->a_field.phn_lchild)
#define phn_lchild_set(a_type, a_field, a_phn, a_lchild) do { \
a_phn->a_field.phn_lchild = a_lchild; \
} while (0)
#define phn_next_get(a_type, a_field, a_phn) \
(a_phn->a_field.phn_next)
#define phn_prev_set(a_type, a_field, a_phn, a_prev) do { \
a_phn->a_field.phn_prev = a_prev; \
} while (0)
#define phn_prev_get(a_type, a_field, a_phn) \
(a_phn->a_field.phn_prev)
#define phn_next_set(a_type, a_field, a_phn, a_next) do { \
a_phn->a_field.phn_next = a_next; \
} while (0)
#define phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, a_cmp) do { \
a_type *phn0child; \
\
assert(a_phn0 != NULL); \
assert(a_phn1 != NULL); \
assert(a_cmp(a_phn0, a_phn1) <= 0); \
\
phn_prev_set(a_type, a_field, a_phn1, a_phn0); \
phn0child = phn_lchild_get(a_type, a_field, a_phn0); \
phn_next_set(a_type, a_field, a_phn1, phn0child); \
if (phn0child != NULL) { \
phn_prev_set(a_type, a_field, phn0child, a_phn1); \
} \
phn_lchild_set(a_type, a_field, a_phn0, a_phn1); \
} while (0)
#define phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do { \
if (a_phn0 == NULL) { \
r_phn = a_phn1; \
} else if (a_phn1 == NULL) { \
r_phn = a_phn0; \
} else if (a_cmp(a_phn0, a_phn1) < 0) { \
phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, \
a_cmp); \
r_phn = a_phn0; \
} else { \
phn_merge_ordered(a_type, a_field, a_phn1, a_phn0, \
a_cmp); \
r_phn = a_phn1; \
} \
} while (0)
#define ph_merge_siblings(a_type, a_field, a_phn, a_cmp, r_phn) do { \
a_type *head = NULL; \
a_type *tail = NULL; \
a_type *phn0 = a_phn; \
a_type *phn1 = phn_next_get(a_type, a_field, phn0); \
\
/* \
* Multipass merge, wherein the first two elements of a FIFO \
* are repeatedly merged, and each result is appended to the \
* singly linked FIFO, until the FIFO contains only a single \
* element. We start with a sibling list but no reference to \
* its tail, so we do a single pass over the sibling list to \
* populate the FIFO. \
*/ \
if (phn1 != NULL) { \
a_type *phnrest = phn_next_get(a_type, a_field, phn1); \
if (phnrest != NULL) { \
phn_prev_set(a_type, a_field, phnrest, NULL); \
} \
phn_prev_set(a_type, a_field, phn0, NULL); \
phn_next_set(a_type, a_field, phn0, NULL); \
phn_prev_set(a_type, a_field, phn1, NULL); \
phn_next_set(a_type, a_field, phn1, NULL); \
phn_merge(a_type, a_field, phn0, phn1, a_cmp, phn0); \
head = tail = phn0; \
phn0 = phnrest; \
while (phn0 != NULL) { \
phn1 = phn_next_get(a_type, a_field, phn0); \
if (phn1 != NULL) { \
phnrest = phn_next_get(a_type, a_field, \
phn1); \
if (phnrest != NULL) { \
phn_prev_set(a_type, a_field, \
phnrest, NULL); \
} \
phn_prev_set(a_type, a_field, phn0, \
NULL); \
phn_next_set(a_type, a_field, phn0, \
NULL); \
phn_prev_set(a_type, a_field, phn1, \
NULL); \
phn_next_set(a_type, a_field, phn1, \
NULL); \
phn_merge(a_type, a_field, phn0, phn1, \
a_cmp, phn0); \
phn_next_set(a_type, a_field, tail, \
phn0); \
tail = phn0; \
phn0 = phnrest; \
} else { \
phn_next_set(a_type, a_field, tail, \
phn0); \
tail = phn0; \
phn0 = NULL; \
} \
} \
phn0 = head; \
phn1 = phn_next_get(a_type, a_field, phn0); \
if (phn1 != NULL) { \
while (true) { \
head = phn_next_get(a_type, a_field, \
phn1); \
assert(phn_prev_get(a_type, a_field, \
phn0) == NULL); \
phn_next_set(a_type, a_field, phn0, \
NULL); \
assert(phn_prev_get(a_type, a_field, \
phn1) == NULL); \
phn_next_set(a_type, a_field, phn1, \
NULL); \
phn_merge(a_type, a_field, phn0, phn1, \
a_cmp, phn0); \
if (head == NULL) { \
break; \
} \
phn_next_set(a_type, a_field, tail, \
phn0); \
tail = phn0; \
phn0 = head; \
phn1 = phn_next_get(a_type, a_field, \
phn0); \
} \
} \
} \
r_phn = phn0; \
} while (0)
#define ph_merge_aux(a_type, a_field, a_ph, a_cmp) do { \
a_type *phn = phn_next_get(a_type, a_field, a_ph->ph_root); \
if (phn != NULL) { \
phn_prev_set(a_type, a_field, a_ph->ph_root, NULL); \
phn_next_set(a_type, a_field, a_ph->ph_root, NULL); \
phn_prev_set(a_type, a_field, phn, NULL); \
ph_merge_siblings(a_type, a_field, phn, a_cmp, phn); \
assert(phn_next_get(a_type, a_field, phn) == NULL); \
phn_merge(a_type, a_field, a_ph->ph_root, phn, a_cmp, \
a_ph->ph_root); \
} \
} while (0)
#define ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do { \
a_type *lchild = phn_lchild_get(a_type, a_field, a_phn); \
if (lchild == NULL) { \
r_phn = NULL; \
} else { \
ph_merge_siblings(a_type, a_field, lchild, a_cmp, \
r_phn); \
} \
} while (0)
/*
* The ph_proto() macro generates function prototypes that correspond to the
* functions generated by an equivalently parameterized call to ph_gen().
*/
#define ph_proto(a_attr, a_prefix, a_ph_type, a_type) \
a_attr void a_prefix##new(a_ph_type *ph); \
a_attr bool a_prefix##empty(a_ph_type *ph); \
a_attr a_type *a_prefix##first(a_ph_type *ph); \
a_attr a_type *a_prefix##any(a_ph_type *ph); \
a_attr void a_prefix##insert(a_ph_type *ph, a_type *phn); \
a_attr a_type *a_prefix##remove_first(a_ph_type *ph); \
a_attr a_type *a_prefix##remove_any(a_ph_type *ph); \
a_attr void a_prefix##remove(a_ph_type *ph, a_type *phn);
/*
* The ph_gen() macro generates a type-specific pairing heap implementation,
* based on the above cpp macros.
*/
#define ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp) \
a_attr void \
a_prefix##new(a_ph_type *ph) { \
memset(ph, 0, sizeof(ph(a_type))); \
} \
a_attr bool \
a_prefix##empty(a_ph_type *ph) { \
return (ph->ph_root == NULL); \
} \
a_attr a_type * \
a_prefix##first(a_ph_type *ph) { \
if (ph->ph_root == NULL) { \
return NULL; \
} \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
return ph->ph_root; \
} \
a_attr a_type * \
a_prefix##any(a_ph_type *ph) { \
if (ph->ph_root == NULL) { \
return NULL; \
} \
a_type *aux = phn_next_get(a_type, a_field, ph->ph_root); \
if (aux != NULL) { \
return aux; \
} \
return ph->ph_root; \
} \
a_attr void \
a_prefix##insert(a_ph_type *ph, a_type *phn) { \
memset(&phn->a_field, 0, sizeof(phn(a_type))); \
\
/* \
* Treat the root as an aux list during insertion, and lazily \
* merge during a_prefix##remove_first(). For elements that \
* are inserted, then removed via a_prefix##remove() before the \
* aux list is ever processed, this makes insert/remove \
* constant-time, whereas eager merging would make insert \
* O(log n). \
*/ \
if (ph->ph_root == NULL) { \
ph->ph_root = phn; \
} else { \
phn_next_set(a_type, a_field, phn, phn_next_get(a_type, \
a_field, ph->ph_root)); \
if (phn_next_get(a_type, a_field, ph->ph_root) != \
NULL) { \
phn_prev_set(a_type, a_field, \
phn_next_get(a_type, a_field, ph->ph_root), \
phn); \
} \
phn_prev_set(a_type, a_field, phn, ph->ph_root); \
phn_next_set(a_type, a_field, ph->ph_root, phn); \
} \
} \
a_attr a_type * \
a_prefix##remove_first(a_ph_type *ph) { \
a_type *ret; \
\
if (ph->ph_root == NULL) { \
return NULL; \
} \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
\
ret = ph->ph_root; \
\
ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \
ph->ph_root); \
\
return ret; \
} \
a_attr a_type * \
a_prefix##remove_any(a_ph_type *ph) { \
/* \
* Remove the most recently inserted aux list element, or the \
* root if the aux list is empty. This has the effect of \
* behaving as a LIFO (and insertion/removal is therefore \
* constant-time) if a_prefix##[remove_]first() are never \
* called. \
*/ \
if (ph->ph_root == NULL) { \
return NULL; \
} \
a_type *ret = phn_next_get(a_type, a_field, ph->ph_root); \
if (ret != NULL) { \
a_type *aux = phn_next_get(a_type, a_field, ret); \
phn_next_set(a_type, a_field, ph->ph_root, aux); \
if (aux != NULL) { \
phn_prev_set(a_type, a_field, aux, \
ph->ph_root); \
} \
return ret; \
} \
ret = ph->ph_root; \
ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \
ph->ph_root); \
return ret; \
} \
a_attr void \
a_prefix##remove(a_ph_type *ph, a_type *phn) { \
a_type *replace, *parent; \
\
if (ph->ph_root == phn) { \
/* \
* We can delete from aux list without merging it, but \
* we need to merge if we are dealing with the root \
* node and it has children. \
*/ \
if (phn_lchild_get(a_type, a_field, phn) == NULL) { \
ph->ph_root = phn_next_get(a_type, a_field, \
phn); \
if (ph->ph_root != NULL) { \
phn_prev_set(a_type, a_field, \
ph->ph_root, NULL); \
} \
return; \
} \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
if (ph->ph_root == phn) { \
ph_merge_children(a_type, a_field, ph->ph_root, \
a_cmp, ph->ph_root); \
return; \
} \
} \
\
/* Get parent (if phn is leftmost child) before mutating. */ \
if ((parent = phn_prev_get(a_type, a_field, phn)) != NULL) { \
if (phn_lchild_get(a_type, a_field, parent) != phn) { \
parent = NULL; \
} \
} \
/* Find a possible replacement node, and link to parent. */ \
ph_merge_children(a_type, a_field, phn, a_cmp, replace); \
/* Set next/prev for sibling linked list. */ \
if (replace != NULL) { \
if (parent != NULL) { \
phn_prev_set(a_type, a_field, replace, parent); \
phn_lchild_set(a_type, a_field, parent, \
replace); \
} else { \
phn_prev_set(a_type, a_field, replace, \
phn_prev_get(a_type, a_field, phn)); \
if (phn_prev_get(a_type, a_field, phn) != \
NULL) { \
phn_next_set(a_type, a_field, \
phn_prev_get(a_type, a_field, phn), \
replace); \
} \
} \
phn_next_set(a_type, a_field, replace, \
phn_next_get(a_type, a_field, phn)); \
if (phn_next_get(a_type, a_field, phn) != NULL) { \
phn_prev_set(a_type, a_field, \
phn_next_get(a_type, a_field, phn), \
replace); \
} \
} else { \
if (parent != NULL) { \
a_type *next = phn_next_get(a_type, a_field, \
phn); \
phn_lchild_set(a_type, a_field, parent, next); \
if (next != NULL) { \
phn_prev_set(a_type, a_field, next, \
parent); \
} \
} else { \
assert(phn_prev_get(a_type, a_field, phn) != \
NULL); \
phn_next_set(a_type, a_field, \
phn_prev_get(a_type, a_field, phn), \
phn_next_get(a_type, a_field, phn)); \
} \
if (phn_next_get(a_type, a_field, phn) != NULL) { \
phn_prev_set(a_type, a_field, \
phn_next_get(a_type, a_field, phn), \
phn_prev_get(a_type, a_field, phn)); \
} \
} \
}
#endif /* PH_H_ */

View File

@@ -1,147 +1,199 @@
#define a0calloc JEMALLOC_N(a0calloc)
#define a0free JEMALLOC_N(a0free)
#define a0dalloc JEMALLOC_N(a0dalloc)
#define a0get JEMALLOC_N(a0get)
#define a0malloc JEMALLOC_N(a0malloc)
#define arena_aalloc JEMALLOC_N(arena_aalloc)
#define arena_alloc_junk_small JEMALLOC_N(arena_alloc_junk_small)
#define arena_basic_stats_merge JEMALLOC_N(arena_basic_stats_merge)
#define arena_bin_index JEMALLOC_N(arena_bin_index)
#define arena_bin_info JEMALLOC_N(arena_bin_info)
#define arena_bitselm_get_const JEMALLOC_N(arena_bitselm_get_const)
#define arena_bitselm_get_mutable JEMALLOC_N(arena_bitselm_get_mutable)
#define arena_boot JEMALLOC_N(arena_boot)
#define arena_choose JEMALLOC_N(arena_choose)
#define arena_choose_hard JEMALLOC_N(arena_choose_hard)
#define arena_choose_impl JEMALLOC_N(arena_choose_impl)
#define arena_chunk_alloc_huge JEMALLOC_N(arena_chunk_alloc_huge)
#define arena_chunk_cache_maybe_insert JEMALLOC_N(arena_chunk_cache_maybe_insert)
#define arena_chunk_cache_maybe_remove JEMALLOC_N(arena_chunk_cache_maybe_remove)
#define arena_chunk_dalloc_huge JEMALLOC_N(arena_chunk_dalloc_huge)
#define arena_chunk_ralloc_huge_expand JEMALLOC_N(arena_chunk_ralloc_huge_expand)
#define arena_chunk_ralloc_huge_shrink JEMALLOC_N(arena_chunk_ralloc_huge_shrink)
#define arena_chunk_ralloc_huge_similar JEMALLOC_N(arena_chunk_ralloc_huge_similar)
#define arena_cleanup JEMALLOC_N(arena_cleanup)
#define arena_dalloc JEMALLOC_N(arena_dalloc)
#define arena_dalloc_bin JEMALLOC_N(arena_dalloc_bin)
#define arena_dalloc_bin_locked JEMALLOC_N(arena_dalloc_bin_locked)
#define arena_dalloc_bin_junked_locked JEMALLOC_N(arena_dalloc_bin_junked_locked)
#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
#define arena_dalloc_large JEMALLOC_N(arena_dalloc_large)
#define arena_dalloc_large_locked JEMALLOC_N(arena_dalloc_large_locked)
#define arena_dalloc_large_junked_locked JEMALLOC_N(arena_dalloc_large_junked_locked)
#define arena_dalloc_small JEMALLOC_N(arena_dalloc_small)
#define arena_decay_tick JEMALLOC_N(arena_decay_tick)
#define arena_decay_ticks JEMALLOC_N(arena_decay_ticks)
#define arena_decay_time_default_get JEMALLOC_N(arena_decay_time_default_get)
#define arena_decay_time_default_set JEMALLOC_N(arena_decay_time_default_set)
#define arena_decay_time_get JEMALLOC_N(arena_decay_time_get)
#define arena_decay_time_set JEMALLOC_N(arena_decay_time_set)
#define arena_dss_prec_get JEMALLOC_N(arena_dss_prec_get)
#define arena_dss_prec_set JEMALLOC_N(arena_dss_prec_set)
#define arena_extent_sn_next JEMALLOC_N(arena_extent_sn_next)
#define arena_get JEMALLOC_N(arena_get)
#define arena_ichoose JEMALLOC_N(arena_ichoose)
#define arena_init JEMALLOC_N(arena_init)
#define arena_lg_dirty_mult_default_get JEMALLOC_N(arena_lg_dirty_mult_default_get)
#define arena_lg_dirty_mult_default_set JEMALLOC_N(arena_lg_dirty_mult_default_set)
#define arena_lg_dirty_mult_get JEMALLOC_N(arena_lg_dirty_mult_get)
#define arena_lg_dirty_mult_set JEMALLOC_N(arena_lg_dirty_mult_set)
#define arena_malloc JEMALLOC_N(arena_malloc)
#define arena_malloc_hard JEMALLOC_N(arena_malloc_hard)
#define arena_malloc_large JEMALLOC_N(arena_malloc_large)
#define arena_malloc_small JEMALLOC_N(arena_malloc_small)
#define arena_mapbits_allocated_get JEMALLOC_N(arena_mapbits_allocated_get)
#define arena_mapbits_binind_get JEMALLOC_N(arena_mapbits_binind_get)
#define arena_mapbits_decommitted_get JEMALLOC_N(arena_mapbits_decommitted_get)
#define arena_mapbits_dirty_get JEMALLOC_N(arena_mapbits_dirty_get)
#define arena_mapbits_get JEMALLOC_N(arena_mapbits_get)
#define arena_mapbits_internal_set JEMALLOC_N(arena_mapbits_internal_set)
#define arena_mapbits_large_binind_set JEMALLOC_N(arena_mapbits_large_binind_set)
#define arena_mapbits_large_get JEMALLOC_N(arena_mapbits_large_get)
#define arena_mapbits_large_set JEMALLOC_N(arena_mapbits_large_set)
#define arena_mapbits_large_size_get JEMALLOC_N(arena_mapbits_large_size_get)
#define arena_mapbits_size_decode JEMALLOC_N(arena_mapbits_size_decode)
#define arena_mapbits_size_encode JEMALLOC_N(arena_mapbits_size_encode)
#define arena_mapbits_small_runind_get JEMALLOC_N(arena_mapbits_small_runind_get)
#define arena_mapbits_small_set JEMALLOC_N(arena_mapbits_small_set)
#define arena_mapbits_unallocated_set JEMALLOC_N(arena_mapbits_unallocated_set)
#define arena_mapbits_unallocated_size_get JEMALLOC_N(arena_mapbits_unallocated_size_get)
#define arena_mapbits_unallocated_size_set JEMALLOC_N(arena_mapbits_unallocated_size_set)
#define arena_mapbits_unzeroed_get JEMALLOC_N(arena_mapbits_unzeroed_get)
#define arena_mapbits_unzeroed_set JEMALLOC_N(arena_mapbits_unzeroed_set)
#define arena_mapbitsp_get JEMALLOC_N(arena_mapbitsp_get)
#define arena_mapbitsp_get_const JEMALLOC_N(arena_mapbitsp_get_const)
#define arena_mapbitsp_get_mutable JEMALLOC_N(arena_mapbitsp_get_mutable)
#define arena_mapbitsp_read JEMALLOC_N(arena_mapbitsp_read)
#define arena_mapbitsp_write JEMALLOC_N(arena_mapbitsp_write)
#define arena_mapp_get JEMALLOC_N(arena_mapp_get)
#define arena_maxclass JEMALLOC_N(arena_maxclass)
#define arena_maxrun JEMALLOC_N(arena_maxrun)
#define arena_maybe_purge JEMALLOC_N(arena_maybe_purge)
#define arena_metadata_allocated_add JEMALLOC_N(arena_metadata_allocated_add)
#define arena_metadata_allocated_get JEMALLOC_N(arena_metadata_allocated_get)
#define arena_metadata_allocated_sub JEMALLOC_N(arena_metadata_allocated_sub)
#define arena_migrate JEMALLOC_N(arena_migrate)
#define arena_miscelm_get_const JEMALLOC_N(arena_miscelm_get_const)
#define arena_miscelm_get_mutable JEMALLOC_N(arena_miscelm_get_mutable)
#define arena_miscelm_to_pageind JEMALLOC_N(arena_miscelm_to_pageind)
#define arena_miscelm_to_rpages JEMALLOC_N(arena_miscelm_to_rpages)
#define arena_new JEMALLOC_N(arena_new)
#define arena_node_alloc JEMALLOC_N(arena_node_alloc)
#define arena_node_dalloc JEMALLOC_N(arena_node_dalloc)
#define arena_nthreads_dec JEMALLOC_N(arena_nthreads_dec)
#define arena_nthreads_get JEMALLOC_N(arena_nthreads_get)
#define arena_nthreads_inc JEMALLOC_N(arena_nthreads_inc)
#define arena_palloc JEMALLOC_N(arena_palloc)
#define arena_postfork_child JEMALLOC_N(arena_postfork_child)
#define arena_postfork_parent JEMALLOC_N(arena_postfork_parent)
#define arena_prefork JEMALLOC_N(arena_prefork)
#define arena_prefork0 JEMALLOC_N(arena_prefork0)
#define arena_prefork1 JEMALLOC_N(arena_prefork1)
#define arena_prefork2 JEMALLOC_N(arena_prefork2)
#define arena_prefork3 JEMALLOC_N(arena_prefork3)
#define arena_prof_accum JEMALLOC_N(arena_prof_accum)
#define arena_prof_accum_impl JEMALLOC_N(arena_prof_accum_impl)
#define arena_prof_accum_locked JEMALLOC_N(arena_prof_accum_locked)
#define arena_prof_ctx_get JEMALLOC_N(arena_prof_ctx_get)
#define arena_prof_ctx_set JEMALLOC_N(arena_prof_ctx_set)
#define arena_prof_promoted JEMALLOC_N(arena_prof_promoted)
#define arena_prof_tctx_get JEMALLOC_N(arena_prof_tctx_get)
#define arena_prof_tctx_reset JEMALLOC_N(arena_prof_tctx_reset)
#define arena_prof_tctx_set JEMALLOC_N(arena_prof_tctx_set)
#define arena_ptr_small_binind_get JEMALLOC_N(arena_ptr_small_binind_get)
#define arena_purge_all JEMALLOC_N(arena_purge_all)
#define arena_purge JEMALLOC_N(arena_purge)
#define arena_quarantine_junk_small JEMALLOC_N(arena_quarantine_junk_small)
#define arena_ralloc JEMALLOC_N(arena_ralloc)
#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
#define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move)
#define arena_rd_to_miscelm JEMALLOC_N(arena_rd_to_miscelm)
#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
#define arena_reset JEMALLOC_N(arena_reset)
#define arena_run_regind JEMALLOC_N(arena_run_regind)
#define arena_run_to_miscelm JEMALLOC_N(arena_run_to_miscelm)
#define arena_salloc JEMALLOC_N(arena_salloc)
#define arena_sdalloc JEMALLOC_N(arena_sdalloc)
#define arena_stats_merge JEMALLOC_N(arena_stats_merge)
#define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small)
#define arena_tdata_get JEMALLOC_N(arena_tdata_get)
#define arena_tdata_get_hard JEMALLOC_N(arena_tdata_get_hard)
#define arenas JEMALLOC_N(arenas)
#define arenas_booted JEMALLOC_N(arenas_booted)
#define arenas_cleanup JEMALLOC_N(arenas_cleanup)
#define arenas_extend JEMALLOC_N(arenas_extend)
#define arenas_initialized JEMALLOC_N(arenas_initialized)
#define arenas_lock JEMALLOC_N(arenas_lock)
#define arenas_tls JEMALLOC_N(arenas_tls)
#define arenas_tsd JEMALLOC_N(arenas_tsd)
#define arenas_tsd_boot JEMALLOC_N(arenas_tsd_boot)
#define arenas_tsd_cleanup_wrapper JEMALLOC_N(arenas_tsd_cleanup_wrapper)
#define arenas_tsd_get JEMALLOC_N(arenas_tsd_get)
#define arenas_tsd_get_wrapper JEMALLOC_N(arenas_tsd_get_wrapper)
#define arenas_tsd_init_head JEMALLOC_N(arenas_tsd_init_head)
#define arenas_tsd_set JEMALLOC_N(arenas_tsd_set)
#define arenas_tdata_bypass_cleanup JEMALLOC_N(arenas_tdata_bypass_cleanup)
#define arenas_tdata_cleanup JEMALLOC_N(arenas_tdata_cleanup)
#define atomic_add_p JEMALLOC_N(atomic_add_p)
#define atomic_add_u JEMALLOC_N(atomic_add_u)
#define atomic_add_uint32 JEMALLOC_N(atomic_add_uint32)
#define atomic_add_uint64 JEMALLOC_N(atomic_add_uint64)
#define atomic_add_z JEMALLOC_N(atomic_add_z)
#define atomic_cas_p JEMALLOC_N(atomic_cas_p)
#define atomic_cas_u JEMALLOC_N(atomic_cas_u)
#define atomic_cas_uint32 JEMALLOC_N(atomic_cas_uint32)
#define atomic_cas_uint64 JEMALLOC_N(atomic_cas_uint64)
#define atomic_cas_z JEMALLOC_N(atomic_cas_z)
#define atomic_sub_p JEMALLOC_N(atomic_sub_p)
#define atomic_sub_u JEMALLOC_N(atomic_sub_u)
#define atomic_sub_uint32 JEMALLOC_N(atomic_sub_uint32)
#define atomic_sub_uint64 JEMALLOC_N(atomic_sub_uint64)
#define atomic_sub_z JEMALLOC_N(atomic_sub_z)
#define atomic_write_p JEMALLOC_N(atomic_write_p)
#define atomic_write_u JEMALLOC_N(atomic_write_u)
#define atomic_write_uint32 JEMALLOC_N(atomic_write_uint32)
#define atomic_write_uint64 JEMALLOC_N(atomic_write_uint64)
#define atomic_write_z JEMALLOC_N(atomic_write_z)
#define base_alloc JEMALLOC_N(base_alloc)
#define base_boot JEMALLOC_N(base_boot)
#define base_calloc JEMALLOC_N(base_calloc)
#define base_node_alloc JEMALLOC_N(base_node_alloc)
#define base_node_dealloc JEMALLOC_N(base_node_dealloc)
#define base_postfork_child JEMALLOC_N(base_postfork_child)
#define base_postfork_parent JEMALLOC_N(base_postfork_parent)
#define base_prefork JEMALLOC_N(base_prefork)
#define base_stats_get JEMALLOC_N(base_stats_get)
#define bitmap_full JEMALLOC_N(bitmap_full)
#define bitmap_get JEMALLOC_N(bitmap_get)
#define bitmap_info_init JEMALLOC_N(bitmap_info_init)
#define bitmap_info_ngroups JEMALLOC_N(bitmap_info_ngroups)
#define bitmap_init JEMALLOC_N(bitmap_init)
#define bitmap_set JEMALLOC_N(bitmap_set)
#define bitmap_sfu JEMALLOC_N(bitmap_sfu)
#define bitmap_size JEMALLOC_N(bitmap_size)
#define bitmap_unset JEMALLOC_N(bitmap_unset)
#define bootstrap_calloc JEMALLOC_N(bootstrap_calloc)
#define bootstrap_free JEMALLOC_N(bootstrap_free)
#define bootstrap_malloc JEMALLOC_N(bootstrap_malloc)
#define bt_init JEMALLOC_N(bt_init)
#define buferror JEMALLOC_N(buferror)
#define choose_arena JEMALLOC_N(choose_arena)
#define choose_arena_hard JEMALLOC_N(choose_arena_hard)
#define chunk_alloc JEMALLOC_N(chunk_alloc)
#define chunk_alloc_base JEMALLOC_N(chunk_alloc_base)
#define chunk_alloc_cache JEMALLOC_N(chunk_alloc_cache)
#define chunk_alloc_dss JEMALLOC_N(chunk_alloc_dss)
#define chunk_alloc_mmap JEMALLOC_N(chunk_alloc_mmap)
#define chunk_alloc_wrapper JEMALLOC_N(chunk_alloc_wrapper)
#define chunk_boot JEMALLOC_N(chunk_boot)
#define chunk_dealloc JEMALLOC_N(chunk_dealloc)
#define chunk_dealloc_mmap JEMALLOC_N(chunk_dealloc_mmap)
#define chunk_dalloc_cache JEMALLOC_N(chunk_dalloc_cache)
#define chunk_dalloc_mmap JEMALLOC_N(chunk_dalloc_mmap)
#define chunk_dalloc_wrapper JEMALLOC_N(chunk_dalloc_wrapper)
#define chunk_deregister JEMALLOC_N(chunk_deregister)
#define chunk_dss_boot JEMALLOC_N(chunk_dss_boot)
#define chunk_dss_postfork_child JEMALLOC_N(chunk_dss_postfork_child)
#define chunk_dss_postfork_parent JEMALLOC_N(chunk_dss_postfork_parent)
#define chunk_dss_mergeable JEMALLOC_N(chunk_dss_mergeable)
#define chunk_dss_prec_get JEMALLOC_N(chunk_dss_prec_get)
#define chunk_dss_prec_set JEMALLOC_N(chunk_dss_prec_set)
#define chunk_dss_prefork JEMALLOC_N(chunk_dss_prefork)
#define chunk_hooks_default JEMALLOC_N(chunk_hooks_default)
#define chunk_hooks_get JEMALLOC_N(chunk_hooks_get)
#define chunk_hooks_set JEMALLOC_N(chunk_hooks_set)
#define chunk_in_dss JEMALLOC_N(chunk_in_dss)
#define chunk_lookup JEMALLOC_N(chunk_lookup)
#define chunk_npages JEMALLOC_N(chunk_npages)
#define chunk_postfork_child JEMALLOC_N(chunk_postfork_child)
#define chunk_postfork_parent JEMALLOC_N(chunk_postfork_parent)
#define chunk_prefork JEMALLOC_N(chunk_prefork)
#define chunk_unmap JEMALLOC_N(chunk_unmap)
#define chunks_mtx JEMALLOC_N(chunks_mtx)
#define chunk_purge_wrapper JEMALLOC_N(chunk_purge_wrapper)
#define chunk_register JEMALLOC_N(chunk_register)
#define chunks_rtree JEMALLOC_N(chunks_rtree)
#define chunksize JEMALLOC_N(chunksize)
#define chunksize_mask JEMALLOC_N(chunksize_mask)
#define ckh_bucket_search JEMALLOC_N(ckh_bucket_search)
#define ckh_count JEMALLOC_N(ckh_count)
#define ckh_delete JEMALLOC_N(ckh_delete)
#define ckh_evict_reloc_insert JEMALLOC_N(ckh_evict_reloc_insert)
#define ckh_insert JEMALLOC_N(ckh_insert)
#define ckh_isearch JEMALLOC_N(ckh_isearch)
#define ckh_iter JEMALLOC_N(ckh_iter)
#define ckh_new JEMALLOC_N(ckh_new)
#define ckh_pointer_hash JEMALLOC_N(ckh_pointer_hash)
#define ckh_pointer_keycomp JEMALLOC_N(ckh_pointer_keycomp)
#define ckh_rebuild JEMALLOC_N(ckh_rebuild)
#define ckh_remove JEMALLOC_N(ckh_remove)
#define ckh_search JEMALLOC_N(ckh_search)
#define ckh_string_hash JEMALLOC_N(ckh_string_hash)
#define ckh_string_keycomp JEMALLOC_N(ckh_string_keycomp)
#define ckh_try_bucket_insert JEMALLOC_N(ckh_try_bucket_insert)
#define ckh_try_insert JEMALLOC_N(ckh_try_insert)
#define ctl_boot JEMALLOC_N(ctl_boot)
#define ctl_bymib JEMALLOC_N(ctl_bymib)
#define ctl_byname JEMALLOC_N(ctl_byname)
@@ -149,7 +201,33 @@
#define ctl_postfork_child JEMALLOC_N(ctl_postfork_child)
#define ctl_postfork_parent JEMALLOC_N(ctl_postfork_parent)
#define ctl_prefork JEMALLOC_N(ctl_prefork)
#define decay_ticker_get JEMALLOC_N(decay_ticker_get)
#define dss_prec_names JEMALLOC_N(dss_prec_names)
#define extent_node_achunk_get JEMALLOC_N(extent_node_achunk_get)
#define extent_node_achunk_set JEMALLOC_N(extent_node_achunk_set)
#define extent_node_addr_get JEMALLOC_N(extent_node_addr_get)
#define extent_node_addr_set JEMALLOC_N(extent_node_addr_set)
#define extent_node_arena_get JEMALLOC_N(extent_node_arena_get)
#define extent_node_arena_set JEMALLOC_N(extent_node_arena_set)
#define extent_node_committed_get JEMALLOC_N(extent_node_committed_get)
#define extent_node_committed_set JEMALLOC_N(extent_node_committed_set)
#define extent_node_dirty_insert JEMALLOC_N(extent_node_dirty_insert)
#define extent_node_dirty_linkage_init JEMALLOC_N(extent_node_dirty_linkage_init)
#define extent_node_dirty_remove JEMALLOC_N(extent_node_dirty_remove)
#define extent_node_init JEMALLOC_N(extent_node_init)
#define extent_node_prof_tctx_get JEMALLOC_N(extent_node_prof_tctx_get)
#define extent_node_prof_tctx_set JEMALLOC_N(extent_node_prof_tctx_set)
#define extent_node_size_get JEMALLOC_N(extent_node_size_get)
#define extent_node_size_set JEMALLOC_N(extent_node_size_set)
#define extent_node_sn_get JEMALLOC_N(extent_node_sn_get)
#define extent_node_sn_set JEMALLOC_N(extent_node_sn_set)
#define extent_node_zeroed_get JEMALLOC_N(extent_node_zeroed_get)
#define extent_node_zeroed_set JEMALLOC_N(extent_node_zeroed_set)
#define extent_size_quantize_ceil JEMALLOC_N(extent_size_quantize_ceil)
#define extent_size_quantize_floor JEMALLOC_N(extent_size_quantize_floor)
#define extent_tree_ad_destroy JEMALLOC_N(extent_tree_ad_destroy)
#define extent_tree_ad_destroy_recurse JEMALLOC_N(extent_tree_ad_destroy_recurse)
#define extent_tree_ad_empty JEMALLOC_N(extent_tree_ad_empty)
#define extent_tree_ad_first JEMALLOC_N(extent_tree_ad_first)
#define extent_tree_ad_insert JEMALLOC_N(extent_tree_ad_insert)
#define extent_tree_ad_iter JEMALLOC_N(extent_tree_ad_iter)
@@ -166,22 +244,31 @@
#define extent_tree_ad_reverse_iter_recurse JEMALLOC_N(extent_tree_ad_reverse_iter_recurse)
#define extent_tree_ad_reverse_iter_start JEMALLOC_N(extent_tree_ad_reverse_iter_start)
#define extent_tree_ad_search JEMALLOC_N(extent_tree_ad_search)
#define extent_tree_szad_first JEMALLOC_N(extent_tree_szad_first)
#define extent_tree_szad_insert JEMALLOC_N(extent_tree_szad_insert)
#define extent_tree_szad_iter JEMALLOC_N(extent_tree_szad_iter)
#define extent_tree_szad_iter_recurse JEMALLOC_N(extent_tree_szad_iter_recurse)
#define extent_tree_szad_iter_start JEMALLOC_N(extent_tree_szad_iter_start)
#define extent_tree_szad_last JEMALLOC_N(extent_tree_szad_last)
#define extent_tree_szad_new JEMALLOC_N(extent_tree_szad_new)
#define extent_tree_szad_next JEMALLOC_N(extent_tree_szad_next)
#define extent_tree_szad_nsearch JEMALLOC_N(extent_tree_szad_nsearch)
#define extent_tree_szad_prev JEMALLOC_N(extent_tree_szad_prev)
#define extent_tree_szad_psearch JEMALLOC_N(extent_tree_szad_psearch)
#define extent_tree_szad_remove JEMALLOC_N(extent_tree_szad_remove)
#define extent_tree_szad_reverse_iter JEMALLOC_N(extent_tree_szad_reverse_iter)
#define extent_tree_szad_reverse_iter_recurse JEMALLOC_N(extent_tree_szad_reverse_iter_recurse)
#define extent_tree_szad_reverse_iter_start JEMALLOC_N(extent_tree_szad_reverse_iter_start)
#define extent_tree_szad_search JEMALLOC_N(extent_tree_szad_search)
#define extent_tree_szsnad_destroy JEMALLOC_N(extent_tree_szsnad_destroy)
#define extent_tree_szsnad_destroy_recurse JEMALLOC_N(extent_tree_szsnad_destroy_recurse)
#define extent_tree_szsnad_empty JEMALLOC_N(extent_tree_szsnad_empty)
#define extent_tree_szsnad_first JEMALLOC_N(extent_tree_szsnad_first)
#define extent_tree_szsnad_insert JEMALLOC_N(extent_tree_szsnad_insert)
#define extent_tree_szsnad_iter JEMALLOC_N(extent_tree_szsnad_iter)
#define extent_tree_szsnad_iter_recurse JEMALLOC_N(extent_tree_szsnad_iter_recurse)
#define extent_tree_szsnad_iter_start JEMALLOC_N(extent_tree_szsnad_iter_start)
#define extent_tree_szsnad_last JEMALLOC_N(extent_tree_szsnad_last)
#define extent_tree_szsnad_new JEMALLOC_N(extent_tree_szsnad_new)
#define extent_tree_szsnad_next JEMALLOC_N(extent_tree_szsnad_next)
#define extent_tree_szsnad_nsearch JEMALLOC_N(extent_tree_szsnad_nsearch)
#define extent_tree_szsnad_prev JEMALLOC_N(extent_tree_szsnad_prev)
#define extent_tree_szsnad_psearch JEMALLOC_N(extent_tree_szsnad_psearch)
#define extent_tree_szsnad_remove JEMALLOC_N(extent_tree_szsnad_remove)
#define extent_tree_szsnad_reverse_iter JEMALLOC_N(extent_tree_szsnad_reverse_iter)
#define extent_tree_szsnad_reverse_iter_recurse JEMALLOC_N(extent_tree_szsnad_reverse_iter_recurse)
#define extent_tree_szsnad_reverse_iter_start JEMALLOC_N(extent_tree_szsnad_reverse_iter_start)
#define extent_tree_szsnad_search JEMALLOC_N(extent_tree_szsnad_search)
#define ffs_llu JEMALLOC_N(ffs_llu)
#define ffs_lu JEMALLOC_N(ffs_lu)
#define ffs_u JEMALLOC_N(ffs_u)
#define ffs_u32 JEMALLOC_N(ffs_u32)
#define ffs_u64 JEMALLOC_N(ffs_u64)
#define ffs_zu JEMALLOC_N(ffs_zu)
#define get_errno JEMALLOC_N(get_errno)
#define hash JEMALLOC_N(hash)
#define hash_fmix_32 JEMALLOC_N(hash_fmix_32)
@@ -193,46 +280,51 @@
#define hash_x64_128 JEMALLOC_N(hash_x64_128)
#define hash_x86_128 JEMALLOC_N(hash_x86_128)
#define hash_x86_32 JEMALLOC_N(hash_x86_32)
#define huge_allocated JEMALLOC_N(huge_allocated)
#define huge_boot JEMALLOC_N(huge_boot)
#define huge_aalloc JEMALLOC_N(huge_aalloc)
#define huge_dalloc JEMALLOC_N(huge_dalloc)
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
#define huge_dss_prec_get JEMALLOC_N(huge_dss_prec_get)
#define huge_malloc JEMALLOC_N(huge_malloc)
#define huge_mtx JEMALLOC_N(huge_mtx)
#define huge_ndalloc JEMALLOC_N(huge_ndalloc)
#define huge_nmalloc JEMALLOC_N(huge_nmalloc)
#define huge_palloc JEMALLOC_N(huge_palloc)
#define huge_postfork_child JEMALLOC_N(huge_postfork_child)
#define huge_postfork_parent JEMALLOC_N(huge_postfork_parent)
#define huge_prefork JEMALLOC_N(huge_prefork)
#define huge_prof_ctx_get JEMALLOC_N(huge_prof_ctx_get)
#define huge_prof_ctx_set JEMALLOC_N(huge_prof_ctx_set)
#define huge_prof_tctx_get JEMALLOC_N(huge_prof_tctx_get)
#define huge_prof_tctx_reset JEMALLOC_N(huge_prof_tctx_reset)
#define huge_prof_tctx_set JEMALLOC_N(huge_prof_tctx_set)
#define huge_ralloc JEMALLOC_N(huge_ralloc)
#define huge_ralloc_no_move JEMALLOC_N(huge_ralloc_no_move)
#define huge_salloc JEMALLOC_N(huge_salloc)
#define iallocm JEMALLOC_N(iallocm)
#define icalloc JEMALLOC_N(icalloc)
#define icalloct JEMALLOC_N(icalloct)
#define iaalloc JEMALLOC_N(iaalloc)
#define ialloc JEMALLOC_N(ialloc)
#define iallocztm JEMALLOC_N(iallocztm)
#define iarena_cleanup JEMALLOC_N(iarena_cleanup)
#define idalloc JEMALLOC_N(idalloc)
#define idalloct JEMALLOC_N(idalloct)
#define imalloc JEMALLOC_N(imalloc)
#define imalloct JEMALLOC_N(imalloct)
#define idalloctm JEMALLOC_N(idalloctm)
#define in_valgrind JEMALLOC_N(in_valgrind)
#define index2size JEMALLOC_N(index2size)
#define index2size_compute JEMALLOC_N(index2size_compute)
#define index2size_lookup JEMALLOC_N(index2size_lookup)
#define index2size_tab JEMALLOC_N(index2size_tab)
#define ipalloc JEMALLOC_N(ipalloc)
#define ipalloct JEMALLOC_N(ipalloct)
#define ipallocztm JEMALLOC_N(ipallocztm)
#define iqalloc JEMALLOC_N(iqalloc)
#define iqalloct JEMALLOC_N(iqalloct)
#define iralloc JEMALLOC_N(iralloc)
#define iralloct JEMALLOC_N(iralloct)
#define iralloct_realign JEMALLOC_N(iralloct_realign)
#define isalloc JEMALLOC_N(isalloc)
#define isdalloct JEMALLOC_N(isdalloct)
#define isqalloc JEMALLOC_N(isqalloc)
#define isthreaded JEMALLOC_N(isthreaded)
#define ivsalloc JEMALLOC_N(ivsalloc)
#define ixalloc JEMALLOC_N(ixalloc)
#define jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child)
#define jemalloc_postfork_parent JEMALLOC_N(jemalloc_postfork_parent)
#define jemalloc_prefork JEMALLOC_N(jemalloc_prefork)
#define large_maxclass JEMALLOC_N(large_maxclass)
#define lg_floor JEMALLOC_N(lg_floor)
#define lg_prof_sample JEMALLOC_N(lg_prof_sample)
#define malloc_cprintf JEMALLOC_N(malloc_cprintf)
#define malloc_mutex_assert_not_owner JEMALLOC_N(malloc_mutex_assert_not_owner)
#define malloc_mutex_assert_owner JEMALLOC_N(malloc_mutex_assert_owner)
#define malloc_mutex_boot JEMALLOC_N(malloc_mutex_boot)
#define malloc_mutex_init JEMALLOC_N(malloc_mutex_init)
#define malloc_mutex_lock JEMALLOC_N(malloc_mutex_lock)
#define malloc_mutex_postfork_child JEMALLOC_N(malloc_mutex_postfork_child)
@@ -242,7 +334,8 @@
#define malloc_printf JEMALLOC_N(malloc_printf)
#define malloc_snprintf JEMALLOC_N(malloc_snprintf)
#define malloc_strtoumax JEMALLOC_N(malloc_strtoumax)
#define malloc_tsd_boot JEMALLOC_N(malloc_tsd_boot)
#define malloc_tsd_boot0 JEMALLOC_N(malloc_tsd_boot0)
#define malloc_tsd_boot1 JEMALLOC_N(malloc_tsd_boot1)
#define malloc_tsd_cleanup_register JEMALLOC_N(malloc_tsd_cleanup_register)
#define malloc_tsd_dalloc JEMALLOC_N(malloc_tsd_dalloc)
#define malloc_tsd_malloc JEMALLOC_N(malloc_tsd_malloc)
@@ -251,16 +344,35 @@
#define malloc_vsnprintf JEMALLOC_N(malloc_vsnprintf)
#define malloc_write JEMALLOC_N(malloc_write)
#define map_bias JEMALLOC_N(map_bias)
#define map_misc_offset JEMALLOC_N(map_misc_offset)
#define mb_write JEMALLOC_N(mb_write)
#define mutex_boot JEMALLOC_N(mutex_boot)
#define narenas_auto JEMALLOC_N(narenas_auto)
#define narenas_total JEMALLOC_N(narenas_total)
#define narenas_tdata_cleanup JEMALLOC_N(narenas_tdata_cleanup)
#define narenas_total_get JEMALLOC_N(narenas_total_get)
#define ncpus JEMALLOC_N(ncpus)
#define nhbins JEMALLOC_N(nhbins)
#define nhclasses JEMALLOC_N(nhclasses)
#define nlclasses JEMALLOC_N(nlclasses)
#define nstime_add JEMALLOC_N(nstime_add)
#define nstime_compare JEMALLOC_N(nstime_compare)
#define nstime_copy JEMALLOC_N(nstime_copy)
#define nstime_divide JEMALLOC_N(nstime_divide)
#define nstime_idivide JEMALLOC_N(nstime_idivide)
#define nstime_imultiply JEMALLOC_N(nstime_imultiply)
#define nstime_init JEMALLOC_N(nstime_init)
#define nstime_init2 JEMALLOC_N(nstime_init2)
#define nstime_monotonic JEMALLOC_N(nstime_monotonic)
#define nstime_ns JEMALLOC_N(nstime_ns)
#define nstime_nsec JEMALLOC_N(nstime_nsec)
#define nstime_sec JEMALLOC_N(nstime_sec)
#define nstime_subtract JEMALLOC_N(nstime_subtract)
#define nstime_update JEMALLOC_N(nstime_update)
#define opt_abort JEMALLOC_N(opt_abort)
#define opt_decay_time JEMALLOC_N(opt_decay_time)
#define opt_dss JEMALLOC_N(opt_dss)
#define opt_junk JEMALLOC_N(opt_junk)
#define opt_junk_alloc JEMALLOC_N(opt_junk_alloc)
#define opt_junk_free JEMALLOC_N(opt_junk_free)
#define opt_lg_chunk JEMALLOC_N(opt_lg_chunk)
#define opt_lg_dirty_mult JEMALLOC_N(opt_lg_dirty_mult)
#define opt_lg_prof_interval JEMALLOC_N(opt_lg_prof_interval)
@@ -274,140 +386,254 @@
#define opt_prof_gdump JEMALLOC_N(opt_prof_gdump)
#define opt_prof_leak JEMALLOC_N(opt_prof_leak)
#define opt_prof_prefix JEMALLOC_N(opt_prof_prefix)
#define opt_prof_thread_active_init JEMALLOC_N(opt_prof_thread_active_init)
#define opt_purge JEMALLOC_N(opt_purge)
#define opt_quarantine JEMALLOC_N(opt_quarantine)
#define opt_redzone JEMALLOC_N(opt_redzone)
#define opt_stats_print JEMALLOC_N(opt_stats_print)
#define opt_tcache JEMALLOC_N(opt_tcache)
#define opt_thp JEMALLOC_N(opt_thp)
#define opt_utrace JEMALLOC_N(opt_utrace)
#define opt_valgrind JEMALLOC_N(opt_valgrind)
#define opt_xmalloc JEMALLOC_N(opt_xmalloc)
#define opt_zero JEMALLOC_N(opt_zero)
#define p2rz JEMALLOC_N(p2rz)
#define pages_boot JEMALLOC_N(pages_boot)
#define pages_commit JEMALLOC_N(pages_commit)
#define pages_decommit JEMALLOC_N(pages_decommit)
#define pages_huge JEMALLOC_N(pages_huge)
#define pages_map JEMALLOC_N(pages_map)
#define pages_nohuge JEMALLOC_N(pages_nohuge)
#define pages_purge JEMALLOC_N(pages_purge)
#define pow2_ceil JEMALLOC_N(pow2_ceil)
#define pages_trim JEMALLOC_N(pages_trim)
#define pages_unmap JEMALLOC_N(pages_unmap)
#define pind2sz JEMALLOC_N(pind2sz)
#define pind2sz_compute JEMALLOC_N(pind2sz_compute)
#define pind2sz_lookup JEMALLOC_N(pind2sz_lookup)
#define pind2sz_tab JEMALLOC_N(pind2sz_tab)
#define pow2_ceil_u32 JEMALLOC_N(pow2_ceil_u32)
#define pow2_ceil_u64 JEMALLOC_N(pow2_ceil_u64)
#define pow2_ceil_zu JEMALLOC_N(pow2_ceil_zu)
#define prng_lg_range_u32 JEMALLOC_N(prng_lg_range_u32)
#define prng_lg_range_u64 JEMALLOC_N(prng_lg_range_u64)
#define prng_lg_range_zu JEMALLOC_N(prng_lg_range_zu)
#define prng_range_u32 JEMALLOC_N(prng_range_u32)
#define prng_range_u64 JEMALLOC_N(prng_range_u64)
#define prng_range_zu JEMALLOC_N(prng_range_zu)
#define prng_state_next_u32 JEMALLOC_N(prng_state_next_u32)
#define prng_state_next_u64 JEMALLOC_N(prng_state_next_u64)
#define prng_state_next_zu JEMALLOC_N(prng_state_next_zu)
#define prof_active JEMALLOC_N(prof_active)
#define prof_active_get JEMALLOC_N(prof_active_get)
#define prof_active_get_unlocked JEMALLOC_N(prof_active_get_unlocked)
#define prof_active_set JEMALLOC_N(prof_active_set)
#define prof_alloc_prep JEMALLOC_N(prof_alloc_prep)
#define prof_alloc_rollback JEMALLOC_N(prof_alloc_rollback)
#define prof_backtrace JEMALLOC_N(prof_backtrace)
#define prof_boot0 JEMALLOC_N(prof_boot0)
#define prof_boot1 JEMALLOC_N(prof_boot1)
#define prof_boot2 JEMALLOC_N(prof_boot2)
#define prof_bt_count JEMALLOC_N(prof_bt_count)
#define prof_ctx_get JEMALLOC_N(prof_ctx_get)
#define prof_ctx_set JEMALLOC_N(prof_ctx_set)
#define prof_dump_header JEMALLOC_N(prof_dump_header)
#define prof_dump_open JEMALLOC_N(prof_dump_open)
#define prof_free JEMALLOC_N(prof_free)
#define prof_free_sampled_object JEMALLOC_N(prof_free_sampled_object)
#define prof_gdump JEMALLOC_N(prof_gdump)
#define prof_gdump_get JEMALLOC_N(prof_gdump_get)
#define prof_gdump_get_unlocked JEMALLOC_N(prof_gdump_get_unlocked)
#define prof_gdump_set JEMALLOC_N(prof_gdump_set)
#define prof_gdump_val JEMALLOC_N(prof_gdump_val)
#define prof_idump JEMALLOC_N(prof_idump)
#define prof_interval JEMALLOC_N(prof_interval)
#define prof_lookup JEMALLOC_N(prof_lookup)
#define prof_malloc JEMALLOC_N(prof_malloc)
#define prof_malloc_sample_object JEMALLOC_N(prof_malloc_sample_object)
#define prof_mdump JEMALLOC_N(prof_mdump)
#define prof_postfork_child JEMALLOC_N(prof_postfork_child)
#define prof_postfork_parent JEMALLOC_N(prof_postfork_parent)
#define prof_prefork JEMALLOC_N(prof_prefork)
#define prof_promote JEMALLOC_N(prof_promote)
#define prof_prefork0 JEMALLOC_N(prof_prefork0)
#define prof_prefork1 JEMALLOC_N(prof_prefork1)
#define prof_realloc JEMALLOC_N(prof_realloc)
#define prof_reset JEMALLOC_N(prof_reset)
#define prof_sample_accum_update JEMALLOC_N(prof_sample_accum_update)
#define prof_sample_threshold_update JEMALLOC_N(prof_sample_threshold_update)
#define prof_tdata_booted JEMALLOC_N(prof_tdata_booted)
#define prof_tctx_get JEMALLOC_N(prof_tctx_get)
#define prof_tctx_reset JEMALLOC_N(prof_tctx_reset)
#define prof_tctx_set JEMALLOC_N(prof_tctx_set)
#define prof_tdata_cleanup JEMALLOC_N(prof_tdata_cleanup)
#define prof_tdata_count JEMALLOC_N(prof_tdata_count)
#define prof_tdata_get JEMALLOC_N(prof_tdata_get)
#define prof_tdata_init JEMALLOC_N(prof_tdata_init)
#define prof_tdata_initialized JEMALLOC_N(prof_tdata_initialized)
#define prof_tdata_tls JEMALLOC_N(prof_tdata_tls)
#define prof_tdata_tsd JEMALLOC_N(prof_tdata_tsd)
#define prof_tdata_tsd_boot JEMALLOC_N(prof_tdata_tsd_boot)
#define prof_tdata_tsd_cleanup_wrapper JEMALLOC_N(prof_tdata_tsd_cleanup_wrapper)
#define prof_tdata_tsd_get JEMALLOC_N(prof_tdata_tsd_get)
#define prof_tdata_tsd_get_wrapper JEMALLOC_N(prof_tdata_tsd_get_wrapper)
#define prof_tdata_tsd_init_head JEMALLOC_N(prof_tdata_tsd_init_head)
#define prof_tdata_tsd_set JEMALLOC_N(prof_tdata_tsd_set)
#define prof_tdata_reinit JEMALLOC_N(prof_tdata_reinit)
#define prof_thread_active_get JEMALLOC_N(prof_thread_active_get)
#define prof_thread_active_init_get JEMALLOC_N(prof_thread_active_init_get)
#define prof_thread_active_init_set JEMALLOC_N(prof_thread_active_init_set)
#define prof_thread_active_set JEMALLOC_N(prof_thread_active_set)
#define prof_thread_name_get JEMALLOC_N(prof_thread_name_get)
#define prof_thread_name_set JEMALLOC_N(prof_thread_name_set)
#define psz2ind JEMALLOC_N(psz2ind)
#define psz2u JEMALLOC_N(psz2u)
#define purge_mode_names JEMALLOC_N(purge_mode_names)
#define quarantine JEMALLOC_N(quarantine)
#define quarantine_alloc_hook JEMALLOC_N(quarantine_alloc_hook)
#define quarantine_boot JEMALLOC_N(quarantine_boot)
#define quarantine_booted JEMALLOC_N(quarantine_booted)
#define quarantine_alloc_hook_work JEMALLOC_N(quarantine_alloc_hook_work)
#define quarantine_cleanup JEMALLOC_N(quarantine_cleanup)
#define quarantine_init JEMALLOC_N(quarantine_init)
#define quarantine_tls JEMALLOC_N(quarantine_tls)
#define quarantine_tsd JEMALLOC_N(quarantine_tsd)
#define quarantine_tsd_boot JEMALLOC_N(quarantine_tsd_boot)
#define quarantine_tsd_cleanup_wrapper JEMALLOC_N(quarantine_tsd_cleanup_wrapper)
#define quarantine_tsd_get JEMALLOC_N(quarantine_tsd_get)
#define quarantine_tsd_get_wrapper JEMALLOC_N(quarantine_tsd_get_wrapper)
#define quarantine_tsd_init_head JEMALLOC_N(quarantine_tsd_init_head)
#define quarantine_tsd_set JEMALLOC_N(quarantine_tsd_set)
#define register_zone JEMALLOC_N(register_zone)
#define rtree_child_read JEMALLOC_N(rtree_child_read)
#define rtree_child_read_hard JEMALLOC_N(rtree_child_read_hard)
#define rtree_child_tryread JEMALLOC_N(rtree_child_tryread)
#define rtree_delete JEMALLOC_N(rtree_delete)
#define rtree_get JEMALLOC_N(rtree_get)
#define rtree_get_locked JEMALLOC_N(rtree_get_locked)
#define rtree_new JEMALLOC_N(rtree_new)
#define rtree_postfork_child JEMALLOC_N(rtree_postfork_child)
#define rtree_postfork_parent JEMALLOC_N(rtree_postfork_parent)
#define rtree_prefork JEMALLOC_N(rtree_prefork)
#define rtree_node_valid JEMALLOC_N(rtree_node_valid)
#define rtree_set JEMALLOC_N(rtree_set)
#define rtree_start_level JEMALLOC_N(rtree_start_level)
#define rtree_subkey JEMALLOC_N(rtree_subkey)
#define rtree_subtree_read JEMALLOC_N(rtree_subtree_read)
#define rtree_subtree_read_hard JEMALLOC_N(rtree_subtree_read_hard)
#define rtree_subtree_tryread JEMALLOC_N(rtree_subtree_tryread)
#define rtree_val_read JEMALLOC_N(rtree_val_read)
#define rtree_val_write JEMALLOC_N(rtree_val_write)
#define run_quantize_ceil JEMALLOC_N(run_quantize_ceil)
#define run_quantize_floor JEMALLOC_N(run_quantize_floor)
#define s2u JEMALLOC_N(s2u)
#define s2u_compute JEMALLOC_N(s2u_compute)
#define s2u_lookup JEMALLOC_N(s2u_lookup)
#define sa2u JEMALLOC_N(sa2u)
#define set_errno JEMALLOC_N(set_errno)
#define small_size2bin JEMALLOC_N(small_size2bin)
#define size2index JEMALLOC_N(size2index)
#define size2index_compute JEMALLOC_N(size2index_compute)
#define size2index_lookup JEMALLOC_N(size2index_lookup)
#define size2index_tab JEMALLOC_N(size2index_tab)
#define spin_adaptive JEMALLOC_N(spin_adaptive)
#define spin_init JEMALLOC_N(spin_init)
#define stats_cactive JEMALLOC_N(stats_cactive)
#define stats_cactive_add JEMALLOC_N(stats_cactive_add)
#define stats_cactive_get JEMALLOC_N(stats_cactive_get)
#define stats_cactive_sub JEMALLOC_N(stats_cactive_sub)
#define stats_chunks JEMALLOC_N(stats_chunks)
#define stats_print JEMALLOC_N(stats_print)
#define tcache_alloc_easy JEMALLOC_N(tcache_alloc_easy)
#define tcache_alloc_large JEMALLOC_N(tcache_alloc_large)
#define tcache_alloc_small JEMALLOC_N(tcache_alloc_small)
#define tcache_alloc_small_hard JEMALLOC_N(tcache_alloc_small_hard)
#define tcache_arena_associate JEMALLOC_N(tcache_arena_associate)
#define tcache_arena_dissociate JEMALLOC_N(tcache_arena_dissociate)
#define tcache_arena_reassociate JEMALLOC_N(tcache_arena_reassociate)
#define tcache_bin_flush_large JEMALLOC_N(tcache_bin_flush_large)
#define tcache_bin_flush_small JEMALLOC_N(tcache_bin_flush_small)
#define tcache_bin_info JEMALLOC_N(tcache_bin_info)
#define tcache_boot0 JEMALLOC_N(tcache_boot0)
#define tcache_boot1 JEMALLOC_N(tcache_boot1)
#define tcache_booted JEMALLOC_N(tcache_booted)
#define tcache_boot JEMALLOC_N(tcache_boot)
#define tcache_cleanup JEMALLOC_N(tcache_cleanup)
#define tcache_create JEMALLOC_N(tcache_create)
#define tcache_dalloc_large JEMALLOC_N(tcache_dalloc_large)
#define tcache_dalloc_small JEMALLOC_N(tcache_dalloc_small)
#define tcache_destroy JEMALLOC_N(tcache_destroy)
#define tcache_enabled_booted JEMALLOC_N(tcache_enabled_booted)
#define tcache_enabled_cleanup JEMALLOC_N(tcache_enabled_cleanup)
#define tcache_enabled_get JEMALLOC_N(tcache_enabled_get)
#define tcache_enabled_initialized JEMALLOC_N(tcache_enabled_initialized)
#define tcache_enabled_set JEMALLOC_N(tcache_enabled_set)
#define tcache_enabled_tls JEMALLOC_N(tcache_enabled_tls)
#define tcache_enabled_tsd JEMALLOC_N(tcache_enabled_tsd)
#define tcache_enabled_tsd_boot JEMALLOC_N(tcache_enabled_tsd_boot)
#define tcache_enabled_tsd_cleanup_wrapper JEMALLOC_N(tcache_enabled_tsd_cleanup_wrapper)
#define tcache_enabled_tsd_get JEMALLOC_N(tcache_enabled_tsd_get)
#define tcache_enabled_tsd_get_wrapper JEMALLOC_N(tcache_enabled_tsd_get_wrapper)
#define tcache_enabled_tsd_init_head JEMALLOC_N(tcache_enabled_tsd_init_head)
#define tcache_enabled_tsd_set JEMALLOC_N(tcache_enabled_tsd_set)
#define tcache_event JEMALLOC_N(tcache_event)
#define tcache_event_hard JEMALLOC_N(tcache_event_hard)
#define tcache_flush JEMALLOC_N(tcache_flush)
#define tcache_get JEMALLOC_N(tcache_get)
#define tcache_initialized JEMALLOC_N(tcache_initialized)
#define tcache_get_hard JEMALLOC_N(tcache_get_hard)
#define tcache_maxclass JEMALLOC_N(tcache_maxclass)
#define tcache_postfork_child JEMALLOC_N(tcache_postfork_child)
#define tcache_postfork_parent JEMALLOC_N(tcache_postfork_parent)
#define tcache_prefork JEMALLOC_N(tcache_prefork)
#define tcache_salloc JEMALLOC_N(tcache_salloc)
#define tcache_stats_merge JEMALLOC_N(tcache_stats_merge)
#define tcache_thread_cleanup JEMALLOC_N(tcache_thread_cleanup)
#define tcache_tls JEMALLOC_N(tcache_tls)
#define tcache_tsd JEMALLOC_N(tcache_tsd)
#define tcache_tsd_boot JEMALLOC_N(tcache_tsd_boot)
#define tcache_tsd_cleanup_wrapper JEMALLOC_N(tcache_tsd_cleanup_wrapper)
#define tcache_tsd_get JEMALLOC_N(tcache_tsd_get)
#define tcache_tsd_get_wrapper JEMALLOC_N(tcache_tsd_get_wrapper)
#define tcache_tsd_init_head JEMALLOC_N(tcache_tsd_init_head)
#define tcache_tsd_set JEMALLOC_N(tcache_tsd_set)
#define thread_allocated_booted JEMALLOC_N(thread_allocated_booted)
#define thread_allocated_initialized JEMALLOC_N(thread_allocated_initialized)
#define thread_allocated_tls JEMALLOC_N(thread_allocated_tls)
#define thread_allocated_tsd JEMALLOC_N(thread_allocated_tsd)
#define thread_allocated_tsd_boot JEMALLOC_N(thread_allocated_tsd_boot)
#define thread_allocated_tsd_cleanup_wrapper JEMALLOC_N(thread_allocated_tsd_cleanup_wrapper)
#define thread_allocated_tsd_get JEMALLOC_N(thread_allocated_tsd_get)
#define thread_allocated_tsd_get_wrapper JEMALLOC_N(thread_allocated_tsd_get_wrapper)
#define thread_allocated_tsd_init_head JEMALLOC_N(thread_allocated_tsd_init_head)
#define thread_allocated_tsd_set JEMALLOC_N(thread_allocated_tsd_set)
#define tcaches JEMALLOC_N(tcaches)
#define tcaches_create JEMALLOC_N(tcaches_create)
#define tcaches_destroy JEMALLOC_N(tcaches_destroy)
#define tcaches_flush JEMALLOC_N(tcaches_flush)
#define tcaches_get JEMALLOC_N(tcaches_get)
#define thread_allocated_cleanup JEMALLOC_N(thread_allocated_cleanup)
#define thread_deallocated_cleanup JEMALLOC_N(thread_deallocated_cleanup)
#define ticker_copy JEMALLOC_N(ticker_copy)
#define ticker_init JEMALLOC_N(ticker_init)
#define ticker_read JEMALLOC_N(ticker_read)
#define ticker_tick JEMALLOC_N(ticker_tick)
#define ticker_ticks JEMALLOC_N(ticker_ticks)
#define tsd_arena_get JEMALLOC_N(tsd_arena_get)
#define tsd_arena_set JEMALLOC_N(tsd_arena_set)
#define tsd_arenap_get JEMALLOC_N(tsd_arenap_get)
#define tsd_arenas_tdata_bypass_get JEMALLOC_N(tsd_arenas_tdata_bypass_get)
#define tsd_arenas_tdata_bypass_set JEMALLOC_N(tsd_arenas_tdata_bypass_set)
#define tsd_arenas_tdata_bypassp_get JEMALLOC_N(tsd_arenas_tdata_bypassp_get)
#define tsd_arenas_tdata_get JEMALLOC_N(tsd_arenas_tdata_get)
#define tsd_arenas_tdata_set JEMALLOC_N(tsd_arenas_tdata_set)
#define tsd_arenas_tdatap_get JEMALLOC_N(tsd_arenas_tdatap_get)
#define tsd_boot JEMALLOC_N(tsd_boot)
#define tsd_boot0 JEMALLOC_N(tsd_boot0)
#define tsd_boot1 JEMALLOC_N(tsd_boot1)
#define tsd_booted JEMALLOC_N(tsd_booted)
#define tsd_booted_get JEMALLOC_N(tsd_booted_get)
#define tsd_cleanup JEMALLOC_N(tsd_cleanup)
#define tsd_cleanup_wrapper JEMALLOC_N(tsd_cleanup_wrapper)
#define tsd_fetch JEMALLOC_N(tsd_fetch)
#define tsd_fetch_impl JEMALLOC_N(tsd_fetch_impl)
#define tsd_get JEMALLOC_N(tsd_get)
#define tsd_get_allocates JEMALLOC_N(tsd_get_allocates)
#define tsd_iarena_get JEMALLOC_N(tsd_iarena_get)
#define tsd_iarena_set JEMALLOC_N(tsd_iarena_set)
#define tsd_iarenap_get JEMALLOC_N(tsd_iarenap_get)
#define tsd_initialized JEMALLOC_N(tsd_initialized)
#define tsd_init_check_recursion JEMALLOC_N(tsd_init_check_recursion)
#define tsd_init_finish JEMALLOC_N(tsd_init_finish)
#define tsd_init_head JEMALLOC_N(tsd_init_head)
#define tsd_narenas_tdata_get JEMALLOC_N(tsd_narenas_tdata_get)
#define tsd_narenas_tdata_set JEMALLOC_N(tsd_narenas_tdata_set)
#define tsd_narenas_tdatap_get JEMALLOC_N(tsd_narenas_tdatap_get)
#define tsd_wrapper_get JEMALLOC_N(tsd_wrapper_get)
#define tsd_wrapper_set JEMALLOC_N(tsd_wrapper_set)
#define tsd_nominal JEMALLOC_N(tsd_nominal)
#define tsd_prof_tdata_get JEMALLOC_N(tsd_prof_tdata_get)
#define tsd_prof_tdata_set JEMALLOC_N(tsd_prof_tdata_set)
#define tsd_prof_tdatap_get JEMALLOC_N(tsd_prof_tdatap_get)
#define tsd_quarantine_get JEMALLOC_N(tsd_quarantine_get)
#define tsd_quarantine_set JEMALLOC_N(tsd_quarantine_set)
#define tsd_quarantinep_get JEMALLOC_N(tsd_quarantinep_get)
#define tsd_set JEMALLOC_N(tsd_set)
#define tsd_tcache_enabled_get JEMALLOC_N(tsd_tcache_enabled_get)
#define tsd_tcache_enabled_set JEMALLOC_N(tsd_tcache_enabled_set)
#define tsd_tcache_enabledp_get JEMALLOC_N(tsd_tcache_enabledp_get)
#define tsd_tcache_get JEMALLOC_N(tsd_tcache_get)
#define tsd_tcache_set JEMALLOC_N(tsd_tcache_set)
#define tsd_tcachep_get JEMALLOC_N(tsd_tcachep_get)
#define tsd_thread_allocated_get JEMALLOC_N(tsd_thread_allocated_get)
#define tsd_thread_allocated_set JEMALLOC_N(tsd_thread_allocated_set)
#define tsd_thread_allocatedp_get JEMALLOC_N(tsd_thread_allocatedp_get)
#define tsd_thread_deallocated_get JEMALLOC_N(tsd_thread_deallocated_get)
#define tsd_thread_deallocated_set JEMALLOC_N(tsd_thread_deallocated_set)
#define tsd_thread_deallocatedp_get JEMALLOC_N(tsd_thread_deallocatedp_get)
#define tsd_tls JEMALLOC_N(tsd_tls)
#define tsd_tsd JEMALLOC_N(tsd_tsd)
#define tsd_tsdn JEMALLOC_N(tsd_tsdn)
#define tsd_witness_fork_get JEMALLOC_N(tsd_witness_fork_get)
#define tsd_witness_fork_set JEMALLOC_N(tsd_witness_fork_set)
#define tsd_witness_forkp_get JEMALLOC_N(tsd_witness_forkp_get)
#define tsd_witnesses_get JEMALLOC_N(tsd_witnesses_get)
#define tsd_witnesses_set JEMALLOC_N(tsd_witnesses_set)
#define tsd_witnessesp_get JEMALLOC_N(tsd_witnessesp_get)
#define tsdn_fetch JEMALLOC_N(tsdn_fetch)
#define tsdn_null JEMALLOC_N(tsdn_null)
#define tsdn_tsd JEMALLOC_N(tsdn_tsd)
#define u2rz JEMALLOC_N(u2rz)
#define valgrind_freelike_block JEMALLOC_N(valgrind_freelike_block)
#define valgrind_make_mem_defined JEMALLOC_N(valgrind_make_mem_defined)
#define valgrind_make_mem_noaccess JEMALLOC_N(valgrind_make_mem_noaccess)
#define valgrind_make_mem_undefined JEMALLOC_N(valgrind_make_mem_undefined)
#define witness_assert_depth JEMALLOC_N(witness_assert_depth)
#define witness_assert_depth_to_rank JEMALLOC_N(witness_assert_depth_to_rank)
#define witness_assert_lockless JEMALLOC_N(witness_assert_lockless)
#define witness_assert_not_owner JEMALLOC_N(witness_assert_not_owner)
#define witness_assert_owner JEMALLOC_N(witness_assert_owner)
#define witness_depth_error JEMALLOC_N(witness_depth_error)
#define witness_fork_cleanup JEMALLOC_N(witness_fork_cleanup)
#define witness_init JEMALLOC_N(witness_init)
#define witness_lock JEMALLOC_N(witness_lock)
#define witness_lock_error JEMALLOC_N(witness_lock_error)
#define witness_not_owner_error JEMALLOC_N(witness_not_owner_error)
#define witness_owner JEMALLOC_N(witness_owner)
#define witness_owner_error JEMALLOC_N(witness_owner_error)
#define witness_postfork_child JEMALLOC_N(witness_postfork_child)
#define witness_postfork_parent JEMALLOC_N(witness_postfork_parent)
#define witness_prefork JEMALLOC_N(witness_prefork)
#define witness_unlock JEMALLOC_N(witness_unlock)
#define witnesses_cleanup JEMALLOC_N(witnesses_cleanup)
#define zone_register JEMALLOC_N(zone_register)

View File

@@ -1,5 +1,8 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#ifndef JEMALLOC_INTERNAL_PRNG_H
#define JEMALLOC_INTERNAL_PRNG_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/bit_util.h"
/*
* Simple linear congruential pseudo-random number generator:
@@ -15,46 +18,168 @@
* See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints.
*
* This choice of m has the disadvantage that the quality of the bits is
* proportional to bit position. For example. the lowest bit has a cycle of 2,
* proportional to bit position. For example, the lowest bit has a cycle of 2,
* the next has a cycle of 4, etc. For this reason, we prefer to use the upper
* bits.
*
* Macro parameters:
* uint32_t r : Result.
* unsigned lg_range : (0..32], number of least significant bits to return.
* uint32_t state : Seed value.
* const uint32_t a, c : See above discussion.
*/
#define prng32(r, lg_range, state, a, c) do { \
assert(lg_range > 0); \
assert(lg_range <= 32); \
\
r = (state * (a)) + (c); \
state = r; \
r >>= (32 - lg_range); \
} while (false)
/* Same as prng32(), but 64 bits of pseudo-randomness, using uint64_t. */
#define prng64(r, lg_range, state, a, c) do { \
assert(lg_range > 0); \
assert(lg_range <= 64); \
\
r = (state * (a)) + (c); \
state = r; \
r >>= (64 - lg_range); \
} while (false)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/* INTERNAL DEFINITIONS -- IGNORE */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#define PRNG_A_32 UINT32_C(1103515241)
#define PRNG_C_32 UINT32_C(12347)
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#define PRNG_A_64 UINT64_C(6364136223846793005)
#define PRNG_C_64 UINT64_C(1442695040888963407)
JEMALLOC_ALWAYS_INLINE uint32_t
prng_state_next_u32(uint32_t state) {
return (state * PRNG_A_32) + PRNG_C_32;
}
JEMALLOC_ALWAYS_INLINE uint64_t
prng_state_next_u64(uint64_t state) {
return (state * PRNG_A_64) + PRNG_C_64;
}
JEMALLOC_ALWAYS_INLINE size_t
prng_state_next_zu(size_t state) {
#if LG_SIZEOF_PTR == 2
return (state * PRNG_A_32) + PRNG_C_32;
#elif LG_SIZEOF_PTR == 3
return (state * PRNG_A_64) + PRNG_C_64;
#else
#error Unsupported pointer size
#endif
}
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
/* BEGIN PUBLIC API */
/******************************************************************************/
/*
* The prng_lg_range functions give a uniform int in the half-open range [0,
* 2**lg_range). If atomic is true, they do so safely from multiple threads.
* Multithreaded 64-bit prngs aren't supported.
*/
JEMALLOC_ALWAYS_INLINE uint32_t
prng_lg_range_u32(atomic_u32_t *state, unsigned lg_range, bool atomic) {
uint32_t ret, state0, state1;
assert(lg_range > 0);
assert(lg_range <= 32);
state0 = atomic_load_u32(state, ATOMIC_RELAXED);
if (atomic) {
do {
state1 = prng_state_next_u32(state0);
} while (!atomic_compare_exchange_weak_u32(state, &state0,
state1, ATOMIC_RELAXED, ATOMIC_RELAXED));
} else {
state1 = prng_state_next_u32(state0);
atomic_store_u32(state, state1, ATOMIC_RELAXED);
}
ret = state1 >> (32 - lg_range);
return ret;
}
JEMALLOC_ALWAYS_INLINE uint64_t
prng_lg_range_u64(uint64_t *state, unsigned lg_range) {
uint64_t ret, state1;
assert(lg_range > 0);
assert(lg_range <= 64);
state1 = prng_state_next_u64(*state);
*state = state1;
ret = state1 >> (64 - lg_range);
return ret;
}
JEMALLOC_ALWAYS_INLINE size_t
prng_lg_range_zu(atomic_zu_t *state, unsigned lg_range, bool atomic) {
size_t ret, state0, state1;
assert(lg_range > 0);
assert(lg_range <= ZU(1) << (3 + LG_SIZEOF_PTR));
state0 = atomic_load_zu(state, ATOMIC_RELAXED);
if (atomic) {
do {
state1 = prng_state_next_zu(state0);
} while (atomic_compare_exchange_weak_zu(state, &state0,
state1, ATOMIC_RELAXED, ATOMIC_RELAXED));
} else {
state1 = prng_state_next_zu(state0);
atomic_store_zu(state, state1, ATOMIC_RELAXED);
}
ret = state1 >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range);
return ret;
}
/*
* The prng_range functions behave like the prng_lg_range, but return a result
* in [0, range) instead of [0, 2**lg_range).
*/
JEMALLOC_ALWAYS_INLINE uint32_t
prng_range_u32(atomic_u32_t *state, uint32_t range, bool atomic) {
uint32_t ret;
unsigned lg_range;
assert(range > 1);
/* Compute the ceiling of lg(range). */
lg_range = ffs_u32(pow2_ceil_u32(range)) - 1;
/* Generate a result in [0..range) via repeated trial. */
do {
ret = prng_lg_range_u32(state, lg_range, atomic);
} while (ret >= range);
return ret;
}
JEMALLOC_ALWAYS_INLINE uint64_t
prng_range_u64(uint64_t *state, uint64_t range) {
uint64_t ret;
unsigned lg_range;
assert(range > 1);
/* Compute the ceiling of lg(range). */
lg_range = ffs_u64(pow2_ceil_u64(range)) - 1;
/* Generate a result in [0..range) via repeated trial. */
do {
ret = prng_lg_range_u64(state, lg_range);
} while (ret >= range);
return ret;
}
JEMALLOC_ALWAYS_INLINE size_t
prng_range_zu(atomic_zu_t *state, size_t range, bool atomic) {
size_t ret;
unsigned lg_range;
assert(range > 1);
/* Compute the ceiling of lg(range). */
lg_range = ffs_u64(pow2_ceil_u64(range)) - 1;
/* Generate a result in [0..range) via repeated trial. */
do {
ret = prng_lg_range_zu(state, lg_range, atomic);
} while (ret >= range);
return ret;
}
#endif /* JEMALLOC_INTERNAL_PRNG_H */

View File

@@ -0,0 +1,92 @@
#ifndef JEMALLOC_INTERNAL_PROF_EXTERNS_H
#define JEMALLOC_INTERNAL_PROF_EXTERNS_H
#include "jemalloc/internal/mutex.h"
extern malloc_mutex_t bt2gctx_mtx;
extern bool opt_prof;
extern bool opt_prof_active;
extern bool opt_prof_thread_active_init;
extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */
extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */
extern bool opt_prof_gdump; /* High-water memory dumping. */
extern bool opt_prof_final; /* Final profile dumping. */
extern bool opt_prof_leak; /* Dump leak summary at exit. */
extern bool opt_prof_accum; /* Report cumulative bytes. */
extern char opt_prof_prefix[
/* Minimize memory bloat for non-prof builds. */
#ifdef JEMALLOC_PROF
PATH_MAX +
#endif
1];
/* Accessed via prof_active_[gs]et{_unlocked,}(). */
extern bool prof_active;
/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */
extern bool prof_gdump_val;
/*
* Profile dump interval, measured in bytes allocated. Each arena triggers a
* profile dump when it reaches this threshold. The effect is that the
* interval between profile dumps averages prof_interval, though the actual
* interval between dumps will tend to be sporadic, and the interval will be a
* maximum of approximately (prof_interval * narenas).
*/
extern uint64_t prof_interval;
/*
* Initialized as opt_lg_prof_sample, and potentially modified during profiling
* resets.
*/
extern size_t lg_prof_sample;
void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated);
void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
prof_tctx_t *tctx);
void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx);
void bt_init(prof_bt_t *bt, void **vec);
void prof_backtrace(prof_bt_t *bt);
prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt);
#ifdef JEMALLOC_JET
size_t prof_tdata_count(void);
size_t prof_bt_count(void);
#endif
typedef int (prof_dump_open_t)(bool, const char *);
extern prof_dump_open_t *JET_MUTABLE prof_dump_open;
typedef bool (prof_dump_header_t)(tsdn_t *, bool, const prof_cnt_t *);
extern prof_dump_header_t *JET_MUTABLE prof_dump_header;
#ifdef JEMALLOC_JET
void prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs,
uint64_t *accumbytes);
#endif
bool prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum);
void prof_idump(tsdn_t *tsdn);
bool prof_mdump(tsd_t *tsd, const char *filename);
void prof_gdump(tsdn_t *tsdn);
prof_tdata_t *prof_tdata_init(tsd_t *tsd);
prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata);
void prof_reset(tsd_t *tsd, size_t lg_sample);
void prof_tdata_cleanup(tsd_t *tsd);
bool prof_active_get(tsdn_t *tsdn);
bool prof_active_set(tsdn_t *tsdn, bool active);
const char *prof_thread_name_get(tsd_t *tsd);
int prof_thread_name_set(tsd_t *tsd, const char *thread_name);
bool prof_thread_active_get(tsd_t *tsd);
bool prof_thread_active_set(tsd_t *tsd, bool active);
bool prof_thread_active_init_get(tsdn_t *tsdn);
bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init);
bool prof_gdump_get(tsdn_t *tsdn);
bool prof_gdump_set(tsdn_t *tsdn, bool active);
void prof_boot0(void);
void prof_boot1(void);
bool prof_boot2(tsd_t *tsd);
void prof_prefork0(tsdn_t *tsdn);
void prof_prefork1(tsdn_t *tsdn);
void prof_postfork_parent(tsdn_t *tsdn);
void prof_postfork_child(tsdn_t *tsdn);
void prof_sample_threshold_update(prof_tdata_t *tdata);
#endif /* JEMALLOC_INTERNAL_PROF_EXTERNS_H */

View File

@@ -0,0 +1,72 @@
#ifndef JEMALLOC_INTERNAL_PROF_INLINES_A_H
#define JEMALLOC_INTERNAL_PROF_INLINES_A_H
#include "jemalloc/internal/mutex.h"
static inline bool
prof_accum_add(tsdn_t *tsdn, prof_accum_t *prof_accum, uint64_t accumbytes) {
cassert(config_prof);
bool overflow;
uint64_t a0, a1;
/*
* If the application allocates fast enough (and/or if idump is slow
* enough), extreme overflow here (a1 >= prof_interval * 2) can cause
* idump trigger coalescing. This is an intentional mechanism that
* avoids rate-limiting allocation.
*/
#ifdef JEMALLOC_ATOMIC_U64
a0 = atomic_load_u64(&prof_accum->accumbytes, ATOMIC_RELAXED);
do {
a1 = a0 + accumbytes;
assert(a1 >= a0);
overflow = (a1 >= prof_interval);
if (overflow) {
a1 %= prof_interval;
}
} while (!atomic_compare_exchange_weak_u64(&prof_accum->accumbytes, &a0,
a1, ATOMIC_RELAXED, ATOMIC_RELAXED));
#else
malloc_mutex_lock(tsdn, &prof_accum->mtx);
a0 = prof_accum->accumbytes;
a1 = a0 + accumbytes;
overflow = (a1 >= prof_interval);
if (overflow) {
a1 %= prof_interval;
}
prof_accum->accumbytes = a1;
malloc_mutex_unlock(tsdn, &prof_accum->mtx);
#endif
return overflow;
}
static inline void
prof_accum_cancel(tsdn_t *tsdn, prof_accum_t *prof_accum, size_t usize) {
cassert(config_prof);
/*
* Cancel out as much of the excessive prof_accumbytes increase as
* possible without underflowing. Interval-triggered dumps occur
* slightly more often than intended as a result of incomplete
* canceling.
*/
uint64_t a0, a1;
#ifdef JEMALLOC_ATOMIC_U64
a0 = atomic_load_u64(&prof_accum->accumbytes, ATOMIC_RELAXED);
do {
a1 = (a0 >= LARGE_MINCLASS - usize) ? a0 - (LARGE_MINCLASS -
usize) : 0;
} while (!atomic_compare_exchange_weak_u64(&prof_accum->accumbytes, &a0,
a1, ATOMIC_RELAXED, ATOMIC_RELAXED));
#else
malloc_mutex_lock(tsdn, &prof_accum->mtx);
a0 = prof_accum->accumbytes;
a1 = (a0 >= LARGE_MINCLASS - usize) ? a0 - (LARGE_MINCLASS - usize) :
0;
prof_accum->accumbytes = a1;
malloc_mutex_unlock(tsdn, &prof_accum->mtx);
#endif
}
#endif /* JEMALLOC_INTERNAL_PROF_INLINES_A_H */

View File

@@ -0,0 +1,217 @@
#ifndef JEMALLOC_INTERNAL_PROF_INLINES_B_H
#define JEMALLOC_INTERNAL_PROF_INLINES_B_H
#include "jemalloc/internal/sz.h"
JEMALLOC_ALWAYS_INLINE bool
prof_active_get_unlocked(void) {
/*
* Even if opt_prof is true, sampling can be temporarily disabled by
* setting prof_active to false. No locking is used when reading
* prof_active in the fast path, so there are no guarantees regarding
* how long it will take for all threads to notice state changes.
*/
return prof_active;
}
JEMALLOC_ALWAYS_INLINE bool
prof_gdump_get_unlocked(void) {
/*
* No locking is used when reading prof_gdump_val in the fast path, so
* there are no guarantees regarding how long it will take for all
* threads to notice state changes.
*/
return prof_gdump_val;
}
JEMALLOC_ALWAYS_INLINE prof_tdata_t *
prof_tdata_get(tsd_t *tsd, bool create) {
prof_tdata_t *tdata;
cassert(config_prof);
tdata = tsd_prof_tdata_get(tsd);
if (create) {
if (unlikely(tdata == NULL)) {
if (tsd_nominal(tsd)) {
tdata = prof_tdata_init(tsd);
tsd_prof_tdata_set(tsd, tdata);
}
} else if (unlikely(tdata->expired)) {
tdata = prof_tdata_reinit(tsd, tdata);
tsd_prof_tdata_set(tsd, tdata);
}
assert(tdata == NULL || tdata->attached);
}
return tdata;
}
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
cassert(config_prof);
assert(ptr != NULL);
return arena_prof_tctx_get(tsdn, ptr, alloc_ctx);
}
JEMALLOC_ALWAYS_INLINE void
prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) {
cassert(config_prof);
assert(ptr != NULL);
arena_prof_tctx_set(tsdn, ptr, usize, alloc_ctx, tctx);
}
JEMALLOC_ALWAYS_INLINE void
prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) {
cassert(config_prof);
assert(ptr != NULL);
arena_prof_tctx_reset(tsdn, ptr, tctx);
}
JEMALLOC_ALWAYS_INLINE bool
prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
prof_tdata_t **tdata_out) {
prof_tdata_t *tdata;
cassert(config_prof);
tdata = prof_tdata_get(tsd, true);
if (unlikely((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)) {
tdata = NULL;
}
if (tdata_out != NULL) {
*tdata_out = tdata;
}
if (unlikely(tdata == NULL)) {
return true;
}
if (likely(tdata->bytes_until_sample >= usize)) {
if (update) {
tdata->bytes_until_sample -= usize;
}
return true;
} else {
if (tsd_reentrancy_level_get(tsd) > 0) {
return true;
}
/* Compute new sample threshold. */
if (update) {
prof_sample_threshold_update(tdata);
}
return !tdata->active;
}
}
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update) {
prof_tctx_t *ret;
prof_tdata_t *tdata;
prof_bt_t bt;
assert(usize == sz_s2u(usize));
if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update,
&tdata))) {
ret = (prof_tctx_t *)(uintptr_t)1U;
} else {
bt_init(&bt, tdata->vec);
prof_backtrace(&bt);
ret = prof_lookup(tsd, &bt);
}
return ret;
}
JEMALLOC_ALWAYS_INLINE void
prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx,
prof_tctx_t *tctx) {
cassert(config_prof);
assert(ptr != NULL);
assert(usize == isalloc(tsdn, ptr));
if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) {
prof_malloc_sample_object(tsdn, ptr, usize, tctx);
} else {
prof_tctx_set(tsdn, ptr, usize, alloc_ctx,
(prof_tctx_t *)(uintptr_t)1U);
}
}
JEMALLOC_ALWAYS_INLINE void
prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
bool prof_active, bool updated, const void *old_ptr, size_t old_usize,
prof_tctx_t *old_tctx) {
bool sampled, old_sampled, moved;
cassert(config_prof);
assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
if (prof_active && !updated && ptr != NULL) {
assert(usize == isalloc(tsd_tsdn(tsd), ptr));
if (prof_sample_accum_update(tsd, usize, true, NULL)) {
/*
* Don't sample. The usize passed to prof_alloc_prep()
* was larger than what actually got allocated, so a
* backtrace was captured for this allocation, even
* though its actual usize was insufficient to cross the
* sample threshold.
*/
prof_alloc_rollback(tsd, tctx, true);
tctx = (prof_tctx_t *)(uintptr_t)1U;
}
}
sampled = ((uintptr_t)tctx > (uintptr_t)1U);
old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U);
moved = (ptr != old_ptr);
if (unlikely(sampled)) {
prof_malloc_sample_object(tsd_tsdn(tsd), ptr, usize, tctx);
} else if (moved) {
prof_tctx_set(tsd_tsdn(tsd), ptr, usize, NULL,
(prof_tctx_t *)(uintptr_t)1U);
} else if (unlikely(old_sampled)) {
/*
* prof_tctx_set() would work for the !moved case as well, but
* prof_tctx_reset() is slightly cheaper, and the proper thing
* to do here in the presence of explicit knowledge re: moved
* state.
*/
prof_tctx_reset(tsd_tsdn(tsd), ptr, tctx);
} else {
assert((uintptr_t)prof_tctx_get(tsd_tsdn(tsd), ptr, NULL) ==
(uintptr_t)1U);
}
/*
* The prof_free_sampled_object() call must come after the
* prof_malloc_sample_object() call, because tctx and old_tctx may be
* the same, in which case reversing the call order could cause the tctx
* to be prematurely destroyed as a side effect of momentarily zeroed
* counters.
*/
if (unlikely(old_sampled)) {
prof_free_sampled_object(tsd, old_usize, old_tctx);
}
}
JEMALLOC_ALWAYS_INLINE void
prof_free(tsd_t *tsd, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx) {
prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx);
cassert(config_prof);
assert(usize == isalloc(tsd_tsdn(tsd), ptr));
if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) {
prof_free_sampled_object(tsd, usize, tctx);
}
}
#endif /* JEMALLOC_INTERNAL_PROF_INLINES_B_H */

View File

@@ -0,0 +1,201 @@
#ifndef JEMALLOC_INTERNAL_PROF_STRUCTS_H
#define JEMALLOC_INTERNAL_PROF_STRUCTS_H
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/rb.h"
struct prof_bt_s {
/* Backtrace, stored as len program counters. */
void **vec;
unsigned len;
};
#ifdef JEMALLOC_PROF_LIBGCC
/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
typedef struct {
prof_bt_t *bt;
unsigned max;
} prof_unwind_data_t;
#endif
struct prof_accum_s {
#ifndef JEMALLOC_ATOMIC_U64
malloc_mutex_t mtx;
uint64_t accumbytes;
#else
atomic_u64_t accumbytes;
#endif
};
struct prof_cnt_s {
/* Profiling counters. */
uint64_t curobjs;
uint64_t curbytes;
uint64_t accumobjs;
uint64_t accumbytes;
};
typedef enum {
prof_tctx_state_initializing,
prof_tctx_state_nominal,
prof_tctx_state_dumping,
prof_tctx_state_purgatory /* Dumper must finish destroying. */
} prof_tctx_state_t;
struct prof_tctx_s {
/* Thread data for thread that performed the allocation. */
prof_tdata_t *tdata;
/*
* Copy of tdata->thr_{uid,discrim}, necessary because tdata may be
* defunct during teardown.
*/
uint64_t thr_uid;
uint64_t thr_discrim;
/* Profiling counters, protected by tdata->lock. */
prof_cnt_t cnts;
/* Associated global context. */
prof_gctx_t *gctx;
/*
* UID that distinguishes multiple tctx's created by the same thread,
* but coexisting in gctx->tctxs. There are two ways that such
* coexistence can occur:
* - A dumper thread can cause a tctx to be retained in the purgatory
* state.
* - Although a single "producer" thread must create all tctx's which
* share the same thr_uid, multiple "consumers" can each concurrently
* execute portions of prof_tctx_destroy(). prof_tctx_destroy() only
* gets called once each time cnts.cur{objs,bytes} drop to 0, but this
* threshold can be hit again before the first consumer finishes
* executing prof_tctx_destroy().
*/
uint64_t tctx_uid;
/* Linkage into gctx's tctxs. */
rb_node(prof_tctx_t) tctx_link;
/*
* True during prof_alloc_prep()..prof_malloc_sample_object(), prevents
* sample vs destroy race.
*/
bool prepared;
/* Current dump-related state, protected by gctx->lock. */
prof_tctx_state_t state;
/*
* Copy of cnts snapshotted during early dump phase, protected by
* dump_mtx.
*/
prof_cnt_t dump_cnts;
};
typedef rb_tree(prof_tctx_t) prof_tctx_tree_t;
struct prof_gctx_s {
/* Protects nlimbo, cnt_summed, and tctxs. */
malloc_mutex_t *lock;
/*
* Number of threads that currently cause this gctx to be in a state of
* limbo due to one of:
* - Initializing this gctx.
* - Initializing per thread counters associated with this gctx.
* - Preparing to destroy this gctx.
* - Dumping a heap profile that includes this gctx.
* nlimbo must be 1 (single destroyer) in order to safely destroy the
* gctx.
*/
unsigned nlimbo;
/*
* Tree of profile counters, one for each thread that has allocated in
* this context.
*/
prof_tctx_tree_t tctxs;
/* Linkage for tree of contexts to be dumped. */
rb_node(prof_gctx_t) dump_link;
/* Temporary storage for summation during dump. */
prof_cnt_t cnt_summed;
/* Associated backtrace. */
prof_bt_t bt;
/* Backtrace vector, variable size, referred to by bt. */
void *vec[1];
};
typedef rb_tree(prof_gctx_t) prof_gctx_tree_t;
struct prof_tdata_s {
malloc_mutex_t *lock;
/* Monotonically increasing unique thread identifier. */
uint64_t thr_uid;
/*
* Monotonically increasing discriminator among tdata structures
* associated with the same thr_uid.
*/
uint64_t thr_discrim;
/* Included in heap profile dumps if non-NULL. */
char *thread_name;
bool attached;
bool expired;
rb_node(prof_tdata_t) tdata_link;
/*
* Counter used to initialize prof_tctx_t's tctx_uid. No locking is
* necessary when incrementing this field, because only one thread ever
* does so.
*/
uint64_t tctx_uid_next;
/*
* Hash of (prof_bt_t *)-->(prof_tctx_t *). Each thread tracks
* backtraces for which it has non-zero allocation/deallocation counters
* associated with thread-specific prof_tctx_t objects. Other threads
* may write to prof_tctx_t contents when freeing associated objects.
*/
ckh_t bt2tctx;
/* Sampling state. */
uint64_t prng_state;
uint64_t bytes_until_sample;
/* State used to avoid dumping while operating on prof internals. */
bool enq;
bool enq_idump;
bool enq_gdump;
/*
* Set to true during an early dump phase for tdata's which are
* currently being dumped. New threads' tdata's have this initialized
* to false so that they aren't accidentally included in later dump
* phases.
*/
bool dumping;
/*
* True if profiling is active for this tdata's thread
* (thread.prof.active mallctl).
*/
bool active;
/* Temporary storage for summation during dump. */
prof_cnt_t cnt_summed;
/* Backtrace vector, used for calls to prof_backtrace(). */
void *vec[PROF_BT_MAX];
};
typedef rb_tree(prof_tdata_t) prof_tdata_tree_t;
#endif /* JEMALLOC_INTERNAL_PROF_STRUCTS_H */

View File

@@ -0,0 +1,56 @@
#ifndef JEMALLOC_INTERNAL_PROF_TYPES_H
#define JEMALLOC_INTERNAL_PROF_TYPES_H
typedef struct prof_bt_s prof_bt_t;
typedef struct prof_accum_s prof_accum_t;
typedef struct prof_cnt_s prof_cnt_t;
typedef struct prof_tctx_s prof_tctx_t;
typedef struct prof_gctx_s prof_gctx_t;
typedef struct prof_tdata_s prof_tdata_t;
/* Option defaults. */
#ifdef JEMALLOC_PROF
# define PROF_PREFIX_DEFAULT "jeprof"
#else
# define PROF_PREFIX_DEFAULT ""
#endif
#define LG_PROF_SAMPLE_DEFAULT 19
#define LG_PROF_INTERVAL_DEFAULT -1
/*
* Hard limit on stack backtrace depth. The version of prof_backtrace() that
* is based on __builtin_return_address() necessarily has a hard-coded number
* of backtrace frame handlers, and should be kept in sync with this setting.
*/
#define PROF_BT_MAX 128
/* Initial hash table size. */
#define PROF_CKH_MINITEMS 64
/* Size of memory buffer to use when writing dump files. */
#define PROF_DUMP_BUFSIZE 65536
/* Size of stack-allocated buffer used by prof_printf(). */
#define PROF_PRINTF_BUFSIZE 128
/*
* Number of mutexes shared among all gctx's. No space is allocated for these
* unless profiling is enabled, so it's okay to over-provision.
*/
#define PROF_NCTX_LOCKS 1024
/*
* Number of mutexes shared among all tdata's. No space is allocated for these
* unless profiling is enabled, so it's okay to over-provision.
*/
#define PROF_NTDATA_LOCKS 256
/*
* prof_tdata pointers close to NULL are used to encode state information that
* is used for cleaning up during thread shutdown.
*/
#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1)
#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2)
#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY
#endif /* JEMALLOC_INTERNAL_PROF_TYPES_H */

View File

@@ -1,6 +1,9 @@
/*
* List definitions.
*/
#ifndef JEMALLOC_INTERNAL_QL_H
#define JEMALLOC_INTERNAL_QL_H
#include "jemalloc/internal/qr.h"
/* List definitions. */
#define ql_head(a_type) \
struct { \
a_type *qlh_first; \
@@ -81,3 +84,5 @@ struct { \
#define ql_reverse_foreach(a_var, a_head, a_field) \
qr_reverse_foreach((a_var), ql_first(a_head), a_field)
#endif /* JEMALLOC_INTERNAL_QL_H */

View File

@@ -1,3 +1,6 @@
#ifndef JEMALLOC_INTERNAL_QR_H
#define JEMALLOC_INTERNAL_QR_H
/* Ring definitions. */
#define qr(a_type) \
struct { \
@@ -22,17 +25,15 @@ struct { \
(a_qrelm)->a_field.qre_prev = (a_qr); \
} while (0)
#define qr_after_insert(a_qrelm, a_qr, a_field) \
do \
{ \
#define qr_after_insert(a_qrelm, a_qr, a_field) do { \
(a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \
(a_qr)->a_field.qre_prev = (a_qrelm); \
(a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \
(a_qrelm)->a_field.qre_next = (a_qr); \
} while (0)
} while (0)
#define qr_meld(a_qr_a, a_qr_b, a_field) do { \
void *t; \
#define qr_meld(a_qr_a, a_qr_b, a_type, a_field) do { \
a_type *t; \
(a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
(a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
t = (a_qr_a)->a_field.qre_prev; \
@@ -40,10 +41,12 @@ struct { \
(a_qr_b)->a_field.qre_prev = t; \
} while (0)
/* qr_meld() and qr_split() are functionally equivalent, so there's no need to
* have two copies of the code. */
#define qr_split(a_qr_a, a_qr_b, a_field) \
qr_meld((a_qr_a), (a_qr_b), a_field)
/*
* qr_meld() and qr_split() are functionally equivalent, so there's no need to
* have two copies of the code.
*/
#define qr_split(a_qr_a, a_qr_b, a_type, a_field) \
qr_meld((a_qr_a), (a_qr_b), a_type, a_field)
#define qr_remove(a_qr, a_field) do { \
(a_qr)->a_field.qre_prev->a_field.qre_next \
@@ -65,3 +68,5 @@ struct { \
(var) != NULL; \
(var) = (((var) != (a_qr)) \
? (var)->a_field.qre_prev : NULL))
#endif /* JEMALLOC_INTERNAL_QR_H */

View File

@@ -22,6 +22,10 @@
#ifndef RB_H_
#define RB_H_
#ifndef __PGI
#define RB_COMPACT
#endif
#ifdef RB_COMPACT
/* Node structure. */
#define rb_node(a_type) \
@@ -42,7 +46,6 @@ struct { \
#define rb_tree(a_type) \
struct { \
a_type *rbt_root; \
a_type rbt_nil; \
}
/* Left accessors. */
@@ -79,6 +82,15 @@ struct { \
(a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \
(a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \
} while (0)
/* Node initializer. */
#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
/* Bookkeeping bit cannot be used by node pointer. */ \
assert(((uintptr_t)(a_node) & 0x1) == 0); \
rbtn_left_set(a_type, a_field, (a_node), NULL); \
rbtn_right_set(a_type, a_field, (a_node), NULL); \
rbtn_red_set(a_type, a_field, (a_node)); \
} while (0)
#else
/* Right accessors. */
#define rbtn_right_get(a_type, a_field, a_node) \
@@ -99,28 +111,26 @@ struct { \
#define rbtn_black_set(a_type, a_field, a_node) do { \
(a_node)->a_field.rbn_red = false; \
} while (0)
#endif
/* Node initializer. */
#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
rbtn_left_set(a_type, a_field, (a_node), &(a_rbt)->rbt_nil); \
rbtn_right_set(a_type, a_field, (a_node), &(a_rbt)->rbt_nil); \
rbtn_left_set(a_type, a_field, (a_node), NULL); \
rbtn_right_set(a_type, a_field, (a_node), NULL); \
rbtn_red_set(a_type, a_field, (a_node)); \
} while (0)
#endif
/* Tree initializer. */
#define rb_new(a_type, a_field, a_rbt) do { \
(a_rbt)->rbt_root = &(a_rbt)->rbt_nil; \
rbt_node_new(a_type, a_field, a_rbt, &(a_rbt)->rbt_nil); \
rbtn_black_set(a_type, a_field, &(a_rbt)->rbt_nil); \
(a_rbt)->rbt_root = NULL; \
} while (0)
/* Internal utility macros. */
#define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \
(r_node) = (a_root); \
if ((r_node) != &(a_rbt)->rbt_nil) { \
if ((r_node) != NULL) { \
for (; \
rbtn_left_get(a_type, a_field, (r_node)) != &(a_rbt)->rbt_nil;\
rbtn_left_get(a_type, a_field, (r_node)) != NULL; \
(r_node) = rbtn_left_get(a_type, a_field, (r_node))) { \
} \
} \
@@ -128,10 +138,9 @@ struct { \
#define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \
(r_node) = (a_root); \
if ((r_node) != &(a_rbt)->rbt_nil) { \
for (; rbtn_right_get(a_type, a_field, (r_node)) != \
&(a_rbt)->rbt_nil; (r_node) = rbtn_right_get(a_type, a_field, \
(r_node))) { \
if ((r_node) != NULL) { \
for (; rbtn_right_get(a_type, a_field, (r_node)) != NULL; \
(r_node) = rbtn_right_get(a_type, a_field, (r_node))) { \
} \
} \
} while (0)
@@ -158,6 +167,8 @@ struct { \
#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \
a_attr void \
a_prefix##new(a_rbt_type *rbtree); \
a_attr bool \
a_prefix##empty(a_rbt_type *rbtree); \
a_attr a_type * \
a_prefix##first(a_rbt_type *rbtree); \
a_attr a_type * \
@@ -167,11 +178,11 @@ a_prefix##next(a_rbt_type *rbtree, a_type *node); \
a_attr a_type * \
a_prefix##prev(a_rbt_type *rbtree, a_type *node); \
a_attr a_type * \
a_prefix##search(a_rbt_type *rbtree, a_type *key); \
a_prefix##search(a_rbt_type *rbtree, const a_type *key); \
a_attr a_type * \
a_prefix##nsearch(a_rbt_type *rbtree, a_type *key); \
a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key); \
a_attr a_type * \
a_prefix##psearch(a_rbt_type *rbtree, a_type *key); \
a_prefix##psearch(a_rbt_type *rbtree, const a_type *key); \
a_attr void \
a_prefix##insert(a_rbt_type *rbtree, a_type *node); \
a_attr void \
@@ -181,7 +192,10 @@ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \
a_rbt_type *, a_type *, void *), void *arg); \
a_attr a_type * \
a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg);
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); \
a_attr void \
a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
void *arg);
/*
* The rb_gen() macro generates a type-specific red-black tree implementation,
@@ -198,7 +212,7 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
* int (a_cmp *)(a_type *a_node, a_type *a_other);
* ^^^^^^
* or a_key
* Interpretation of comparision function return values:
* Interpretation of comparison function return values:
* -1 : a_node < a_other
* 0 : a_node == a_other
* 1 : a_node > a_other
@@ -224,6 +238,13 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
* Args:
* tree: Pointer to an uninitialized red-black tree object.
*
* static bool
* ex_empty(ex_t *tree);
* Description: Determine whether tree is empty.
* Args:
* tree: Pointer to an initialized red-black tree object.
* Ret: True if tree is empty, false otherwise.
*
* static ex_node_t *
* ex_first(ex_t *tree);
* static ex_node_t *
@@ -245,7 +266,7 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
* last/first.
*
* static ex_node_t *
* ex_search(ex_t *tree, ex_node_t *key);
* ex_search(ex_t *tree, const ex_node_t *key);
* Description: Search for node that matches key.
* Args:
* tree: Pointer to an initialized red-black tree object.
@@ -253,9 +274,9 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
* Ret: Node in tree that matches key, or NULL if no match.
*
* static ex_node_t *
* ex_nsearch(ex_t *tree, ex_node_t *key);
* ex_nsearch(ex_t *tree, const ex_node_t *key);
* static ex_node_t *
* ex_psearch(ex_t *tree, ex_node_t *key);
* ex_psearch(ex_t *tree, const ex_node_t *key);
* Description: Search for node that matches key. If no match is found,
* return what would be key's successor/predecessor, were
* key in tree.
@@ -303,40 +324,52 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
* arg : Opaque pointer passed to cb().
* Ret: NULL if iteration completed, or the non-NULL callback return value
* that caused termination of the iteration.
*
* static void
* ex_destroy(ex_t *tree, void (*cb)(ex_node_t *, void *), void *arg);
* Description: Iterate over the tree with post-order traversal, remove
* each node, and run the callback if non-null. This is
* used for destroying a tree without paying the cost to
* rebalance it. The tree must not be otherwise altered
* during traversal.
* Args:
* tree: Pointer to an initialized red-black tree object.
* cb : Callback function, which, if non-null, is called for each node
* during iteration. There is no way to stop iteration once it
* has begun.
* arg : Opaque pointer passed to cb().
*/
#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \
a_attr void \
a_prefix##new(a_rbt_type *rbtree) { \
rb_new(a_type, a_field, rbtree); \
} \
a_attr bool \
a_prefix##empty(a_rbt_type *rbtree) { \
return (rbtree->rbt_root == NULL); \
} \
a_attr a_type * \
a_prefix##first(a_rbt_type *rbtree) { \
a_type *ret; \
rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
if (ret == &rbtree->rbt_nil) { \
ret = NULL; \
} \
return (ret); \
return ret; \
} \
a_attr a_type * \
a_prefix##last(a_rbt_type *rbtree) { \
a_type *ret; \
rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
if (ret == &rbtree->rbt_nil) { \
ret = NULL; \
} \
return (ret); \
return ret; \
} \
a_attr a_type * \
a_prefix##next(a_rbt_type *rbtree, a_type *node) { \
a_type *ret; \
if (rbtn_right_get(a_type, a_field, node) != &rbtree->rbt_nil) { \
if (rbtn_right_get(a_type, a_field, node) != NULL) { \
rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type, \
a_field, node), ret); \
} else { \
a_type *tnode = rbtree->rbt_root; \
assert(tnode != &rbtree->rbt_nil); \
ret = &rbtree->rbt_nil; \
assert(tnode != NULL); \
ret = NULL; \
while (true) { \
int cmp = (a_cmp)(node, tnode); \
if (cmp < 0) { \
@@ -347,24 +380,21 @@ a_prefix##next(a_rbt_type *rbtree, a_type *node) { \
} else { \
break; \
} \
assert(tnode != &rbtree->rbt_nil); \
assert(tnode != NULL); \
} \
} \
if (ret == &rbtree->rbt_nil) { \
ret = (NULL); \
} \
return (ret); \
return ret; \
} \
a_attr a_type * \
a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \
a_type *ret; \
if (rbtn_left_get(a_type, a_field, node) != &rbtree->rbt_nil) { \
if (rbtn_left_get(a_type, a_field, node) != NULL) { \
rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type, \
a_field, node), ret); \
} else { \
a_type *tnode = rbtree->rbt_root; \
assert(tnode != &rbtree->rbt_nil); \
ret = &rbtree->rbt_nil; \
assert(tnode != NULL); \
ret = NULL; \
while (true) { \
int cmp = (a_cmp)(node, tnode); \
if (cmp < 0) { \
@@ -375,20 +405,17 @@ a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \
} else { \
break; \
} \
assert(tnode != &rbtree->rbt_nil); \
assert(tnode != NULL); \
} \
} \
if (ret == &rbtree->rbt_nil) { \
ret = (NULL); \
} \
return (ret); \
return ret; \
} \
a_attr a_type * \
a_prefix##search(a_rbt_type *rbtree, a_type *key) { \
a_prefix##search(a_rbt_type *rbtree, const a_type *key) { \
a_type *ret; \
int cmp; \
ret = rbtree->rbt_root; \
while (ret != &rbtree->rbt_nil \
while (ret != NULL \
&& (cmp = (a_cmp)(key, ret)) != 0) { \
if (cmp < 0) { \
ret = rbtn_left_get(a_type, a_field, ret); \
@@ -396,17 +423,14 @@ a_prefix##search(a_rbt_type *rbtree, a_type *key) { \
ret = rbtn_right_get(a_type, a_field, ret); \
} \
} \
if (ret == &rbtree->rbt_nil) { \
ret = (NULL); \
} \
return (ret); \
return ret; \
} \
a_attr a_type * \
a_prefix##nsearch(a_rbt_type *rbtree, a_type *key) { \
a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key) { \
a_type *ret; \
a_type *tnode = rbtree->rbt_root; \
ret = &rbtree->rbt_nil; \
while (tnode != &rbtree->rbt_nil) { \
ret = NULL; \
while (tnode != NULL) { \
int cmp = (a_cmp)(key, tnode); \
if (cmp < 0) { \
ret = tnode; \
@@ -418,17 +442,14 @@ a_prefix##nsearch(a_rbt_type *rbtree, a_type *key) { \
break; \
} \
} \
if (ret == &rbtree->rbt_nil) { \
ret = (NULL); \
} \
return (ret); \
return ret; \
} \
a_attr a_type * \
a_prefix##psearch(a_rbt_type *rbtree, a_type *key) { \
a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) { \
a_type *ret; \
a_type *tnode = rbtree->rbt_root; \
ret = &rbtree->rbt_nil; \
while (tnode != &rbtree->rbt_nil) { \
ret = NULL; \
while (tnode != NULL) { \
int cmp = (a_cmp)(key, tnode); \
if (cmp < 0) { \
tnode = rbtn_left_get(a_type, a_field, tnode); \
@@ -440,10 +461,7 @@ a_prefix##psearch(a_rbt_type *rbtree, a_type *key) { \
break; \
} \
} \
if (ret == &rbtree->rbt_nil) { \
ret = (NULL); \
} \
return (ret); \
return ret; \
} \
a_attr void \
a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
@@ -454,7 +472,7 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
rbt_node_new(a_type, a_field, rbtree, node); \
/* Wind. */ \
path->node = rbtree->rbt_root; \
for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) { \
for (pathp = path; pathp->node != NULL; pathp++) { \
int cmp = pathp->cmp = a_cmp(node, pathp->node); \
assert(cmp != 0); \
if (cmp < 0) { \
@@ -474,7 +492,8 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
rbtn_left_set(a_type, a_field, cnode, left); \
if (rbtn_red_get(a_type, a_field, left)) { \
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
if (rbtn_red_get(a_type, a_field, leftleft)) { \
if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
leftleft)) { \
/* Fix up 4-node. */ \
a_type *tnode; \
rbtn_black_set(a_type, a_field, leftleft); \
@@ -489,7 +508,8 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
rbtn_right_set(a_type, a_field, cnode, right); \
if (rbtn_red_get(a_type, a_field, right)) { \
a_type *left = rbtn_left_get(a_type, a_field, cnode); \
if (rbtn_red_get(a_type, a_field, left)) { \
if (left != NULL && rbtn_red_get(a_type, a_field, \
left)) { \
/* Split 4-node. */ \
rbtn_black_set(a_type, a_field, left); \
rbtn_black_set(a_type, a_field, right); \
@@ -522,7 +542,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
/* Wind. */ \
nodep = NULL; /* Silence compiler warning. */ \
path->node = rbtree->rbt_root; \
for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) { \
for (pathp = path; pathp->node != NULL; pathp++) { \
int cmp = pathp->cmp = a_cmp(node, pathp->node); \
if (cmp < 0) { \
pathp[1].node = rbtn_left_get(a_type, a_field, \
@@ -534,8 +554,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
/* Find node's successor, in preparation for swap. */ \
pathp->cmp = 1; \
nodep = pathp; \
for (pathp++; pathp->node != &rbtree->rbt_nil; \
pathp++) { \
for (pathp++; pathp->node != NULL; pathp++) { \
pathp->cmp = -1; \
pathp[1].node = rbtn_left_get(a_type, a_field, \
pathp->node); \
@@ -577,10 +596,10 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
} \
} else { \
a_type *left = rbtn_left_get(a_type, a_field, node); \
if (left != &rbtree->rbt_nil) { \
if (left != NULL) { \
/* node has no successor, but it has a left child. */\
/* Splice node out, without losing the left child. */\
assert(rbtn_red_get(a_type, a_field, node) == false); \
assert(!rbtn_red_get(a_type, a_field, node)); \
assert(rbtn_red_get(a_type, a_field, left)); \
rbtn_black_set(a_type, a_field, left); \
if (pathp == path) { \
@@ -597,34 +616,32 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
return; \
} else if (pathp == path) { \
/* The tree only contained one node. */ \
rbtree->rbt_root = &rbtree->rbt_nil; \
rbtree->rbt_root = NULL; \
return; \
} \
} \
if (rbtn_red_get(a_type, a_field, pathp->node)) { \
/* Prune red node, which requires no fixup. */ \
assert(pathp[-1].cmp < 0); \
rbtn_left_set(a_type, a_field, pathp[-1].node, \
&rbtree->rbt_nil); \
rbtn_left_set(a_type, a_field, pathp[-1].node, NULL); \
return; \
} \
/* The node to be pruned is black, so unwind until balance is */\
/* restored. */\
pathp->node = &rbtree->rbt_nil; \
pathp->node = NULL; \
for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \
assert(pathp->cmp != 0); \
if (pathp->cmp < 0) { \
rbtn_left_set(a_type, a_field, pathp->node, \
pathp[1].node); \
assert(rbtn_red_get(a_type, a_field, pathp[1].node) \
== false); \
if (rbtn_red_get(a_type, a_field, pathp->node)) { \
a_type *right = rbtn_right_get(a_type, a_field, \
pathp->node); \
a_type *rightleft = rbtn_left_get(a_type, a_field, \
right); \
a_type *tnode; \
if (rbtn_red_get(a_type, a_field, rightleft)) { \
if (rightleft != NULL && rbtn_red_get(a_type, a_field, \
rightleft)) { \
/* In the following diagrams, ||, //, and \\ */\
/* indicate the path to the removed node. */\
/* */\
@@ -667,7 +684,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
pathp->node); \
a_type *rightleft = rbtn_left_get(a_type, a_field, \
right); \
if (rbtn_red_get(a_type, a_field, rightleft)) { \
if (rightleft != NULL && rbtn_red_get(a_type, a_field, \
rightleft)) { \
/* || */\
/* pathp(b) */\
/* // \ */\
@@ -681,7 +699,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_rotate_left(a_type, a_field, pathp->node, \
tnode); \
/* Balance restored, but rotation modified */\
/* subree root, which may actually be the tree */\
/* subtree root, which may actually be the tree */\
/* root. */\
if (pathp == path) { \
/* Set root. */ \
@@ -721,7 +739,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
left); \
a_type *leftrightleft = rbtn_left_get(a_type, a_field, \
leftright); \
if (rbtn_red_get(a_type, a_field, leftrightleft)) { \
if (leftrightleft != NULL && rbtn_red_get(a_type, \
a_field, leftrightleft)) { \
/* || */\
/* pathp(b) */\
/* / \\ */\
@@ -747,7 +766,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
/* (b) */\
/* / */\
/* (b) */\
assert(leftright != &rbtree->rbt_nil); \
assert(leftright != NULL); \
rbtn_red_set(a_type, a_field, leftright); \
rbtn_rotate_right(a_type, a_field, pathp->node, \
tnode); \
@@ -770,7 +789,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
return; \
} else if (rbtn_red_get(a_type, a_field, pathp->node)) { \
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
if (rbtn_red_get(a_type, a_field, leftleft)) { \
if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
leftleft)) { \
/* || */\
/* pathp(r) */\
/* / \\ */\
@@ -808,7 +828,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
} \
} else { \
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
if (rbtn_red_get(a_type, a_field, leftleft)) { \
if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
leftleft)) { \
/* || */\
/* pathp(b) */\
/* / \\ */\
@@ -849,22 +870,22 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
} \
/* Set root. */ \
rbtree->rbt_root = path->node; \
assert(rbtn_red_get(a_type, a_field, rbtree->rbt_root) == false); \
assert(!rbtn_red_get(a_type, a_field, rbtree->rbt_root)); \
} \
a_attr a_type * \
a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
if (node == &rbtree->rbt_nil) { \
return (&rbtree->rbt_nil); \
if (node == NULL) { \
return NULL; \
} else { \
a_type *ret; \
if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \
a_field, node), cb, arg)) != &rbtree->rbt_nil \
|| (ret = cb(rbtree, node, arg)) != NULL) { \
return (ret); \
a_field, node), cb, arg)) != NULL || (ret = cb(rbtree, node, \
arg)) != NULL) { \
return ret; \
} \
return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
a_field, node), cb, arg)); \
return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
a_field, node), cb, arg); \
} \
} \
a_attr a_type * \
@@ -874,22 +895,22 @@ a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \
if (cmp < 0) { \
a_type *ret; \
if ((ret = a_prefix##iter_start(rbtree, start, \
rbtn_left_get(a_type, a_field, node), cb, arg)) != \
&rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \
return (ret); \
rbtn_left_get(a_type, a_field, node), cb, arg)) != NULL || \
(ret = cb(rbtree, node, arg)) != NULL) { \
return ret; \
} \
return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
a_field, node), cb, arg)); \
return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
a_field, node), cb, arg); \
} else if (cmp > 0) { \
return (a_prefix##iter_start(rbtree, start, \
rbtn_right_get(a_type, a_field, node), cb, arg)); \
return a_prefix##iter_start(rbtree, start, \
rbtn_right_get(a_type, a_field, node), cb, arg); \
} else { \
a_type *ret; \
if ((ret = cb(rbtree, node, arg)) != NULL) { \
return (ret); \
return ret; \
} \
return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
a_field, node), cb, arg)); \
return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
a_field, node), cb, arg); \
} \
} \
a_attr a_type * \
@@ -902,25 +923,22 @@ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \
} else { \
ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\
} \
if (ret == &rbtree->rbt_nil) { \
ret = NULL; \
} \
return (ret); \
return ret; \
} \
a_attr a_type * \
a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
if (node == &rbtree->rbt_nil) { \
return (&rbtree->rbt_nil); \
if (node == NULL) { \
return NULL; \
} else { \
a_type *ret; \
if ((ret = a_prefix##reverse_iter_recurse(rbtree, \
rbtn_right_get(a_type, a_field, node), cb, arg)) != \
&rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \
return (ret); \
rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \
(ret = cb(rbtree, node, arg)) != NULL) { \
return ret; \
} \
return (a_prefix##reverse_iter_recurse(rbtree, \
rbtn_left_get(a_type, a_field, node), cb, arg)); \
return a_prefix##reverse_iter_recurse(rbtree, \
rbtn_left_get(a_type, a_field, node), cb, arg); \
} \
} \
a_attr a_type * \
@@ -931,22 +949,22 @@ a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \
if (cmp > 0) { \
a_type *ret; \
if ((ret = a_prefix##reverse_iter_start(rbtree, start, \
rbtn_right_get(a_type, a_field, node), cb, arg)) != \
&rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \
return (ret); \
rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \
(ret = cb(rbtree, node, arg)) != NULL) { \
return ret; \
} \
return (a_prefix##reverse_iter_recurse(rbtree, \
rbtn_left_get(a_type, a_field, node), cb, arg)); \
return a_prefix##reverse_iter_recurse(rbtree, \
rbtn_left_get(a_type, a_field, node), cb, arg); \
} else if (cmp < 0) { \
return (a_prefix##reverse_iter_start(rbtree, start, \
rbtn_left_get(a_type, a_field, node), cb, arg)); \
return a_prefix##reverse_iter_start(rbtree, start, \
rbtn_left_get(a_type, a_field, node), cb, arg); \
} else { \
a_type *ret; \
if ((ret = cb(rbtree, node, arg)) != NULL) { \
return (ret); \
return ret; \
} \
return (a_prefix##reverse_iter_recurse(rbtree, \
rbtn_left_get(a_type, a_field, node), cb, arg)); \
return a_prefix##reverse_iter_recurse(rbtree, \
rbtn_left_get(a_type, a_field, node), cb, arg); \
} \
} \
a_attr a_type * \
@@ -960,10 +978,29 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \
cb, arg); \
} \
if (ret == &rbtree->rbt_nil) { \
ret = NULL; \
return ret; \
} \
a_attr void \
a_prefix##destroy_recurse(a_rbt_type *rbtree, a_type *node, void (*cb)( \
a_type *, void *), void *arg) { \
if (node == NULL) { \
return; \
} \
return (ret); \
a_prefix##destroy_recurse(rbtree, rbtn_left_get(a_type, a_field, \
node), cb, arg); \
rbtn_left_set(a_type, a_field, (node), NULL); \
a_prefix##destroy_recurse(rbtree, rbtn_right_get(a_type, a_field, \
node), cb, arg); \
rbtn_right_set(a_type, a_field, (node), NULL); \
if (cb) { \
cb(node, arg); \
} \
} \
a_attr void \
a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
void *arg) { \
a_prefix##destroy_recurse(rbtree, rbtree->rbt_root, cb, arg); \
rbtree->rbt_root = NULL; \
}
#endif /* RB_H_ */

View File

@@ -1,172 +1,474 @@
#ifndef JEMALLOC_INTERNAL_RTREE_H
#define JEMALLOC_INTERNAL_RTREE_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/rtree_tsd.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/tsd.h"
/*
* This radix tree implementation is tailored to the singular purpose of
* tracking which chunks are currently owned by jemalloc. This functionality
* is mandatory for OS X, where jemalloc must be able to respond to object
* ownership queries.
* associating metadata with extents that are currently owned by jemalloc.
*
*******************************************************************************
*/
#ifdef JEMALLOC_H_TYPES
typedef struct rtree_s rtree_t;
/* Number of high insignificant bits. */
#define RTREE_NHIB ((1U << (LG_SIZEOF_PTR+3)) - LG_VADDR)
/* Number of low insigificant bits. */
#define RTREE_NLIB LG_PAGE
/* Number of significant bits. */
#define RTREE_NSB (LG_VADDR - RTREE_NLIB)
/* Number of levels in radix tree. */
#if RTREE_NSB <= 10
# define RTREE_HEIGHT 1
#elif RTREE_NSB <= 36
# define RTREE_HEIGHT 2
#elif RTREE_NSB <= 52
# define RTREE_HEIGHT 3
#else
# error Unsupported number of significant virtual address bits
#endif
/* Use compact leaf representation if virtual address encoding allows. */
#if RTREE_NHIB >= LG_CEIL_NSIZES
# define RTREE_LEAF_COMPACT
#endif
/*
* Size of each radix tree node (must be a power of 2). This impacts tree
* depth.
*/
#define RTREE_NODESIZE (1U << 16)
/* Needed for initialization only. */
#define RTREE_LEAFKEY_INVALID ((uintptr_t)1)
typedef void *(rtree_alloc_t)(size_t);
typedef void (rtree_dalloc_t)(void *);
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct rtree_s {
rtree_alloc_t *alloc;
rtree_dalloc_t *dalloc;
malloc_mutex_t mutex;
void **root;
unsigned height;
unsigned level2bits[1]; /* Dynamically sized. */
typedef struct rtree_node_elm_s rtree_node_elm_t;
struct rtree_node_elm_s {
atomic_p_t child; /* (rtree_{node,leaf}_elm_t *) */
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
rtree_t *rtree_new(unsigned bits, rtree_alloc_t *alloc, rtree_dalloc_t *dalloc);
void rtree_delete(rtree_t *rtree);
void rtree_prefork(rtree_t *rtree);
void rtree_postfork_parent(rtree_t *rtree);
void rtree_postfork_child(rtree_t *rtree);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
#ifdef JEMALLOC_DEBUG
uint8_t rtree_get_locked(rtree_t *rtree, uintptr_t key);
#endif
uint8_t rtree_get(rtree_t *rtree, uintptr_t key);
bool rtree_set(rtree_t *rtree, uintptr_t key, uint8_t val);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
#define RTREE_GET_GENERATE(f) \
/* The least significant bits of the key are ignored. */ \
JEMALLOC_INLINE uint8_t \
f(rtree_t *rtree, uintptr_t key) \
{ \
uint8_t ret; \
uintptr_t subkey; \
unsigned i, lshift, height, bits; \
void **node, **child; \
\
RTREE_LOCK(&rtree->mutex); \
for (i = lshift = 0, height = rtree->height, node = rtree->root;\
i < height - 1; \
i++, lshift += bits, node = child) { \
bits = rtree->level2bits[i]; \
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR + \
3)) - bits); \
child = (void**)node[subkey]; \
if (child == NULL) { \
RTREE_UNLOCK(&rtree->mutex); \
return (0); \
} \
} \
\
/* \
* node is a leaf, so it contains values rather than node \
* pointers. \
*/ \
bits = rtree->level2bits[i]; \
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - \
bits); \
{ \
uint8_t *leaf = (uint8_t *)node; \
ret = leaf[subkey]; \
} \
RTREE_UNLOCK(&rtree->mutex); \
\
RTREE_GET_VALIDATE \
return (ret); \
}
#ifdef JEMALLOC_DEBUG
# define RTREE_LOCK(l) malloc_mutex_lock(l)
# define RTREE_UNLOCK(l) malloc_mutex_unlock(l)
# define RTREE_GET_VALIDATE
RTREE_GET_GENERATE(rtree_get_locked)
# undef RTREE_LOCK
# undef RTREE_UNLOCK
# undef RTREE_GET_VALIDATE
#endif
#define RTREE_LOCK(l)
#define RTREE_UNLOCK(l)
#ifdef JEMALLOC_DEBUG
struct rtree_leaf_elm_s {
#ifdef RTREE_LEAF_COMPACT
/*
* Suppose that it were possible for a jemalloc-allocated chunk to be
* munmap()ped, followed by a different allocator in another thread re-using
* overlapping virtual memory, all without invalidating the cached rtree
* value. The result would be a false positive (the rtree would claim that
* jemalloc owns memory that it had actually discarded). This scenario
* seems impossible, but the following assertion is a prudent sanity check.
* Single pointer-width field containing all three leaf element fields.
* For example, on a 64-bit x64 system with 48 significant virtual
* memory address bits, the index, extent, and slab fields are packed as
* such:
*
* x: index
* e: extent
* b: slab
*
* 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee eeee000b
*/
# define RTREE_GET_VALIDATE \
assert(rtree_get_locked(rtree, key) == ret);
atomic_p_t le_bits;
#else
# define RTREE_GET_VALIDATE
atomic_p_t le_extent; /* (extent_t *) */
atomic_u_t le_szind; /* (szind_t) */
atomic_b_t le_slab; /* (bool) */
#endif
RTREE_GET_GENERATE(rtree_get)
#undef RTREE_LOCK
#undef RTREE_UNLOCK
#undef RTREE_GET_VALIDATE
};
JEMALLOC_INLINE bool
rtree_set(rtree_t *rtree, uintptr_t key, uint8_t val)
{
uintptr_t subkey;
unsigned i, lshift, height, bits;
void **node, **child;
typedef struct rtree_level_s rtree_level_t;
struct rtree_level_s {
/* Number of key bits distinguished by this level. */
unsigned bits;
/*
* Cumulative number of key bits distinguished by traversing to
* corresponding tree level.
*/
unsigned cumbits;
};
malloc_mutex_lock(&rtree->mutex);
for (i = lshift = 0, height = rtree->height, node = rtree->root;
i < height - 1;
i++, lshift += bits, node = child) {
bits = rtree->level2bits[i];
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) -
bits);
child = (void**)node[subkey];
if (child == NULL) {
size_t size = ((i + 1 < height - 1) ? sizeof(void *)
: (sizeof(uint8_t))) << rtree->level2bits[i+1];
child = (void**)rtree->alloc(size);
if (child == NULL) {
malloc_mutex_unlock(&rtree->mutex);
return (true);
}
memset(child, 0, size);
node[subkey] = child;
}
}
typedef struct rtree_s rtree_t;
struct rtree_s {
malloc_mutex_t init_lock;
/* Number of elements based on rtree_levels[0].bits. */
#if RTREE_HEIGHT > 1
rtree_node_elm_t root[1U << (RTREE_NSB/RTREE_HEIGHT)];
#else
rtree_leaf_elm_t root[1U << (RTREE_NSB/RTREE_HEIGHT)];
#endif
};
/* node is a leaf, so it contains values rather than node pointers. */
bits = rtree->level2bits[i];
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - bits);
{
uint8_t *leaf = (uint8_t *)node;
leaf[subkey] = val;
}
malloc_mutex_unlock(&rtree->mutex);
/*
* Split the bits into one to three partitions depending on number of
* significant bits. It the number of bits does not divide evenly into the
* number of levels, place one remainder bit per level starting at the leaf
* level.
*/
static const rtree_level_t rtree_levels[] = {
#if RTREE_HEIGHT == 1
{RTREE_NSB, RTREE_NHIB + RTREE_NSB}
#elif RTREE_HEIGHT == 2
{RTREE_NSB/2, RTREE_NHIB + RTREE_NSB/2},
{RTREE_NSB/2 + RTREE_NSB%2, RTREE_NHIB + RTREE_NSB}
#elif RTREE_HEIGHT == 3
{RTREE_NSB/3, RTREE_NHIB + RTREE_NSB/3},
{RTREE_NSB/3 + RTREE_NSB%3/2,
RTREE_NHIB + RTREE_NSB/3*2 + RTREE_NSB%3/2},
{RTREE_NSB/3 + RTREE_NSB%3 - RTREE_NSB%3/2, RTREE_NHIB + RTREE_NSB}
#else
# error Unsupported rtree height
#endif
};
return (false);
bool rtree_new(rtree_t *rtree, bool zeroed);
typedef rtree_node_elm_t *(rtree_node_alloc_t)(tsdn_t *, rtree_t *, size_t);
extern rtree_node_alloc_t *JET_MUTABLE rtree_node_alloc;
typedef rtree_leaf_elm_t *(rtree_leaf_alloc_t)(tsdn_t *, rtree_t *, size_t);
extern rtree_leaf_alloc_t *JET_MUTABLE rtree_leaf_alloc;
typedef void (rtree_node_dalloc_t)(tsdn_t *, rtree_t *, rtree_node_elm_t *);
extern rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc;
typedef void (rtree_leaf_dalloc_t)(tsdn_t *, rtree_t *, rtree_leaf_elm_t *);
extern rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc;
#ifdef JEMALLOC_JET
void rtree_delete(tsdn_t *tsdn, rtree_t *rtree);
#endif
rtree_leaf_elm_t *rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree,
rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, bool init_missing);
JEMALLOC_ALWAYS_INLINE uintptr_t
rtree_leafkey(uintptr_t key) {
unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits -
rtree_levels[RTREE_HEIGHT-1].bits);
unsigned maskbits = ptrbits - cumbits;
uintptr_t mask = ~((ZU(1) << maskbits) - 1);
return (key & mask);
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
JEMALLOC_ALWAYS_INLINE size_t
rtree_cache_direct_map(uintptr_t key) {
unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits -
rtree_levels[RTREE_HEIGHT-1].bits);
unsigned maskbits = ptrbits - cumbits;
return (size_t)((key >> maskbits) & (RTREE_CTX_NCACHE - 1));
}
JEMALLOC_ALWAYS_INLINE uintptr_t
rtree_subkey(uintptr_t key, unsigned level) {
unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
unsigned cumbits = rtree_levels[level].cumbits;
unsigned shiftbits = ptrbits - cumbits;
unsigned maskbits = rtree_levels[level].bits;
uintptr_t mask = (ZU(1) << maskbits) - 1;
return ((key >> shiftbits) & mask);
}
/*
* Atomic getters.
*
* dependent: Reading a value on behalf of a pointer to a valid allocation
* is guaranteed to be a clean read even without synchronization,
* because the rtree update became visible in memory before the
* pointer came into existence.
* !dependent: An arbitrary read, e.g. on behalf of ivsalloc(), may not be
* dependent on a previous rtree write, which means a stale read
* could result if synchronization were omitted here.
*/
# ifdef RTREE_LEAF_COMPACT
JEMALLOC_ALWAYS_INLINE uintptr_t
rtree_leaf_elm_bits_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
bool dependent) {
return (uintptr_t)atomic_load_p(&elm->le_bits, dependent
? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
}
JEMALLOC_ALWAYS_INLINE extent_t *
rtree_leaf_elm_bits_extent_get(uintptr_t bits) {
/* Restore sign-extended high bits, mask slab bit. */
return (extent_t *)((uintptr_t)((intptr_t)(bits << RTREE_NHIB) >>
RTREE_NHIB) & ~((uintptr_t)0x1));
}
JEMALLOC_ALWAYS_INLINE szind_t
rtree_leaf_elm_bits_szind_get(uintptr_t bits) {
return (szind_t)(bits >> LG_VADDR);
}
JEMALLOC_ALWAYS_INLINE bool
rtree_leaf_elm_bits_slab_get(uintptr_t bits) {
return (bool)(bits & (uintptr_t)0x1);
}
# endif
JEMALLOC_ALWAYS_INLINE extent_t *
rtree_leaf_elm_extent_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
bool dependent) {
#ifdef RTREE_LEAF_COMPACT
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
return rtree_leaf_elm_bits_extent_get(bits);
#else
extent_t *extent = (extent_t *)atomic_load_p(&elm->le_extent, dependent
? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
return extent;
#endif
}
JEMALLOC_ALWAYS_INLINE szind_t
rtree_leaf_elm_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
bool dependent) {
#ifdef RTREE_LEAF_COMPACT
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
return rtree_leaf_elm_bits_szind_get(bits);
#else
return (szind_t)atomic_load_u(&elm->le_szind, dependent ? ATOMIC_RELAXED
: ATOMIC_ACQUIRE);
#endif
}
JEMALLOC_ALWAYS_INLINE bool
rtree_leaf_elm_slab_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
bool dependent) {
#ifdef RTREE_LEAF_COMPACT
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
return rtree_leaf_elm_bits_slab_get(bits);
#else
return atomic_load_b(&elm->le_slab, dependent ? ATOMIC_RELAXED :
ATOMIC_ACQUIRE);
#endif
}
static inline void
rtree_leaf_elm_extent_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
extent_t *extent) {
#ifdef RTREE_LEAF_COMPACT
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, true);
uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) <<
LG_VADDR) | ((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1))
| ((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits));
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
#else
atomic_store_p(&elm->le_extent, extent, ATOMIC_RELEASE);
#endif
}
static inline void
rtree_leaf_elm_szind_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
szind_t szind) {
assert(szind <= NSIZES);
#ifdef RTREE_LEAF_COMPACT
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm,
true);
uintptr_t bits = ((uintptr_t)szind << LG_VADDR) |
((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) &
(((uintptr_t)0x1 << LG_VADDR) - 1)) |
((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits));
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
#else
atomic_store_u(&elm->le_szind, szind, ATOMIC_RELEASE);
#endif
}
static inline void
rtree_leaf_elm_slab_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
bool slab) {
#ifdef RTREE_LEAF_COMPACT
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm,
true);
uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) <<
LG_VADDR) | ((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) &
(((uintptr_t)0x1 << LG_VADDR) - 1)) | ((uintptr_t)slab);
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
#else
atomic_store_b(&elm->le_slab, slab, ATOMIC_RELEASE);
#endif
}
static inline void
rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
extent_t *extent, szind_t szind, bool slab) {
#ifdef RTREE_LEAF_COMPACT
uintptr_t bits = ((uintptr_t)szind << LG_VADDR) |
((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1)) |
((uintptr_t)slab);
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
#else
rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab);
rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind);
/*
* Write extent last, since the element is atomically considered valid
* as soon as the extent field is non-NULL.
*/
rtree_leaf_elm_extent_write(tsdn, rtree, elm, extent);
#endif
}
static inline void
rtree_leaf_elm_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree,
rtree_leaf_elm_t *elm, szind_t szind, bool slab) {
assert(!slab || szind < NBINS);
/*
* The caller implicitly assures that it is the only writer to the szind
* and slab fields, and that the extent field cannot currently change.
*/
rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab);
rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind);
}
JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t *
rtree_leaf_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key, bool dependent, bool init_missing) {
assert(key != 0);
assert(!dependent || !init_missing);
size_t slot = rtree_cache_direct_map(key);
uintptr_t leafkey = rtree_leafkey(key);
assert(leafkey != RTREE_LEAFKEY_INVALID);
/* Fast path: L1 direct mapped cache. */
if (likely(rtree_ctx->cache[slot].leafkey == leafkey)) {
rtree_leaf_elm_t *leaf = rtree_ctx->cache[slot].leaf;
assert(leaf != NULL);
uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1);
return &leaf[subkey];
}
/*
* Search the L2 LRU cache. On hit, swap the matching element into the
* slot in L1 cache, and move the position in L2 up by 1.
*/
#define RTREE_CACHE_CHECK_L2(i) do { \
if (likely(rtree_ctx->l2_cache[i].leafkey == leafkey)) { \
rtree_leaf_elm_t *leaf = rtree_ctx->l2_cache[i].leaf; \
assert(leaf != NULL); \
if (i > 0) { \
/* Bubble up by one. */ \
rtree_ctx->l2_cache[i].leafkey = \
rtree_ctx->l2_cache[i - 1].leafkey; \
rtree_ctx->l2_cache[i].leaf = \
rtree_ctx->l2_cache[i - 1].leaf; \
rtree_ctx->l2_cache[i - 1].leafkey = \
rtree_ctx->cache[slot].leafkey; \
rtree_ctx->l2_cache[i - 1].leaf = \
rtree_ctx->cache[slot].leaf; \
} else { \
rtree_ctx->l2_cache[0].leafkey = \
rtree_ctx->cache[slot].leafkey; \
rtree_ctx->l2_cache[0].leaf = \
rtree_ctx->cache[slot].leaf; \
} \
rtree_ctx->cache[slot].leafkey = leafkey; \
rtree_ctx->cache[slot].leaf = leaf; \
uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1); \
return &leaf[subkey]; \
} \
} while (0)
/* Check the first cache entry. */
RTREE_CACHE_CHECK_L2(0);
/* Search the remaining cache elements. */
for (unsigned i = 1; i < RTREE_CTX_NCACHE_L2; i++) {
RTREE_CACHE_CHECK_L2(i);
}
#undef RTREE_CACHE_CHECK_L2
return rtree_leaf_elm_lookup_hard(tsdn, rtree, rtree_ctx, key,
dependent, init_missing);
}
static inline bool
rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
extent_t *extent, szind_t szind, bool slab) {
/* Use rtree_clear() to set the extent to NULL. */
assert(extent != NULL);
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
key, false, true);
if (elm == NULL) {
return true;
}
assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) == NULL);
rtree_leaf_elm_write(tsdn, rtree, elm, extent, szind, slab);
return false;
}
JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t *
rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
bool dependent) {
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
key, dependent, false);
if (!dependent && elm == NULL) {
return NULL;
}
assert(elm != NULL);
return elm;
}
JEMALLOC_ALWAYS_INLINE extent_t *
rtree_extent_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key, bool dependent) {
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
dependent);
if (!dependent && elm == NULL) {
return NULL;
}
return rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent);
}
JEMALLOC_ALWAYS_INLINE szind_t
rtree_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key, bool dependent) {
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
dependent);
if (!dependent && elm == NULL) {
return NSIZES;
}
return rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
}
/*
* rtree_slab_read() is intentionally omitted because slab is always read in
* conjunction with szind, which makes rtree_szind_slab_read() a better choice.
*/
JEMALLOC_ALWAYS_INLINE bool
rtree_extent_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key, bool dependent, extent_t **r_extent, szind_t *r_szind) {
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
dependent);
if (!dependent && elm == NULL) {
return true;
}
*r_extent = rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent);
*r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
return false;
}
JEMALLOC_ALWAYS_INLINE bool
rtree_szind_slab_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key, bool dependent, szind_t *r_szind, bool *r_slab) {
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
dependent);
if (!dependent && elm == NULL) {
return true;
}
*r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
*r_slab = rtree_leaf_elm_slab_read(tsdn, rtree, elm, dependent);
return false;
}
static inline void
rtree_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key, szind_t szind, bool slab) {
assert(!slab || szind < NBINS);
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true);
rtree_leaf_elm_szind_slab_update(tsdn, rtree, elm, szind, slab);
}
static inline void
rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key) {
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true);
assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) !=
NULL);
rtree_leaf_elm_write(tsdn, rtree, elm, NULL, NSIZES, false);
}
#endif /* JEMALLOC_INTERNAL_RTREE_H */

View File

@@ -0,0 +1,50 @@
#ifndef JEMALLOC_INTERNAL_RTREE_CTX_H
#define JEMALLOC_INTERNAL_RTREE_CTX_H
/*
* Number of leafkey/leaf pairs to cache in L1 and L2 level respectively. Each
* entry supports an entire leaf, so the cache hit rate is typically high even
* with a small number of entries. In rare cases extent activity will straddle
* the boundary between two leaf nodes. Furthermore, an arena may use a
* combination of dss and mmap. Note that as memory usage grows past the amount
* that this cache can directly cover, the cache will become less effective if
* locality of reference is low, but the consequence is merely cache misses
* while traversing the tree nodes.
*
* The L1 direct mapped cache offers consistent and low cost on cache hit.
* However collision could affect hit rate negatively. This is resolved by
* combining with a L2 LRU cache, which requires linear search and re-ordering
* on access but suffers no collision. Note that, the cache will itself suffer
* cache misses if made overly large, plus the cost of linear search in the LRU
* cache.
*/
#define RTREE_CTX_LG_NCACHE 4
#define RTREE_CTX_NCACHE (1 << RTREE_CTX_LG_NCACHE)
#define RTREE_CTX_NCACHE_L2 8
/*
* Zero initializer required for tsd initialization only. Proper initialization
* done via rtree_ctx_data_init().
*/
#define RTREE_CTX_ZERO_INITIALIZER {{{0}}}
typedef struct rtree_leaf_elm_s rtree_leaf_elm_t;
typedef struct rtree_ctx_cache_elm_s rtree_ctx_cache_elm_t;
struct rtree_ctx_cache_elm_s {
uintptr_t leafkey;
rtree_leaf_elm_t *leaf;
};
typedef struct rtree_ctx_s rtree_ctx_t;
struct rtree_ctx_s {
/* Direct mapped cache. */
rtree_ctx_cache_elm_t cache[RTREE_CTX_NCACHE];
/* L2 LRU cache. */
rtree_ctx_cache_elm_t l2_cache[RTREE_CTX_NCACHE_L2];
};
void rtree_ctx_data_init(rtree_ctx_t *ctx);
#endif /* JEMALLOC_INTERNAL_RTREE_CTX_H */

View File

@@ -0,0 +1,232 @@
#ifndef JEMALLOC_INTERNAL_SMOOTHSTEP_H
#define JEMALLOC_INTERNAL_SMOOTHSTEP_H
/*
* This file was generated by the following command:
* sh smoothstep.sh smoother 200 24 3 15
*/
/******************************************************************************/
/*
* This header defines a precomputed table based on the smoothstep family of
* sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0
* to 1 in 0 <= x <= 1. The table is stored as integer fixed point values so
* that floating point math can be avoided.
*
* 3 2
* smoothstep(x) = -2x + 3x
*
* 5 4 3
* smootherstep(x) = 6x - 15x + 10x
*
* 7 6 5 4
* smootheststep(x) = -20x + 70x - 84x + 35x
*/
#define SMOOTHSTEP_VARIANT "smoother"
#define SMOOTHSTEP_NSTEPS 200
#define SMOOTHSTEP_BFP 24
#define SMOOTHSTEP \
/* STEP(step, h, x, y) */ \
STEP( 1, UINT64_C(0x0000000000000014), 0.005, 0.000001240643750) \
STEP( 2, UINT64_C(0x00000000000000a5), 0.010, 0.000009850600000) \
STEP( 3, UINT64_C(0x0000000000000229), 0.015, 0.000032995181250) \
STEP( 4, UINT64_C(0x0000000000000516), 0.020, 0.000077619200000) \
STEP( 5, UINT64_C(0x00000000000009dc), 0.025, 0.000150449218750) \
STEP( 6, UINT64_C(0x00000000000010e8), 0.030, 0.000257995800000) \
STEP( 7, UINT64_C(0x0000000000001aa4), 0.035, 0.000406555756250) \
STEP( 8, UINT64_C(0x0000000000002777), 0.040, 0.000602214400000) \
STEP( 9, UINT64_C(0x00000000000037c2), 0.045, 0.000850847793750) \
STEP( 10, UINT64_C(0x0000000000004be6), 0.050, 0.001158125000000) \
STEP( 11, UINT64_C(0x000000000000643c), 0.055, 0.001529510331250) \
STEP( 12, UINT64_C(0x000000000000811f), 0.060, 0.001970265600000) \
STEP( 13, UINT64_C(0x000000000000a2e2), 0.065, 0.002485452368750) \
STEP( 14, UINT64_C(0x000000000000c9d8), 0.070, 0.003079934200000) \
STEP( 15, UINT64_C(0x000000000000f64f), 0.075, 0.003758378906250) \
STEP( 16, UINT64_C(0x0000000000012891), 0.080, 0.004525260800000) \
STEP( 17, UINT64_C(0x00000000000160e7), 0.085, 0.005384862943750) \
STEP( 18, UINT64_C(0x0000000000019f95), 0.090, 0.006341279400000) \
STEP( 19, UINT64_C(0x000000000001e4dc), 0.095, 0.007398417481250) \
STEP( 20, UINT64_C(0x00000000000230fc), 0.100, 0.008560000000000) \
STEP( 21, UINT64_C(0x0000000000028430), 0.105, 0.009829567518750) \
STEP( 22, UINT64_C(0x000000000002deb0), 0.110, 0.011210480600000) \
STEP( 23, UINT64_C(0x00000000000340b1), 0.115, 0.012705922056250) \
STEP( 24, UINT64_C(0x000000000003aa67), 0.120, 0.014318899200000) \
STEP( 25, UINT64_C(0x0000000000041c00), 0.125, 0.016052246093750) \
STEP( 26, UINT64_C(0x00000000000495a8), 0.130, 0.017908625800000) \
STEP( 27, UINT64_C(0x000000000005178b), 0.135, 0.019890532631250) \
STEP( 28, UINT64_C(0x000000000005a1cf), 0.140, 0.022000294400000) \
STEP( 29, UINT64_C(0x0000000000063498), 0.145, 0.024240074668750) \
STEP( 30, UINT64_C(0x000000000006d009), 0.150, 0.026611875000000) \
STEP( 31, UINT64_C(0x000000000007743f), 0.155, 0.029117537206250) \
STEP( 32, UINT64_C(0x0000000000082157), 0.160, 0.031758745600000) \
STEP( 33, UINT64_C(0x000000000008d76b), 0.165, 0.034537029243750) \
STEP( 34, UINT64_C(0x0000000000099691), 0.170, 0.037453764200000) \
STEP( 35, UINT64_C(0x00000000000a5edf), 0.175, 0.040510175781250) \
STEP( 36, UINT64_C(0x00000000000b3067), 0.180, 0.043707340800000) \
STEP( 37, UINT64_C(0x00000000000c0b38), 0.185, 0.047046189818750) \
STEP( 38, UINT64_C(0x00000000000cef5e), 0.190, 0.050527509400000) \
STEP( 39, UINT64_C(0x00000000000ddce6), 0.195, 0.054151944356250) \
STEP( 40, UINT64_C(0x00000000000ed3d8), 0.200, 0.057920000000000) \
STEP( 41, UINT64_C(0x00000000000fd439), 0.205, 0.061832044393750) \
STEP( 42, UINT64_C(0x000000000010de0e), 0.210, 0.065888310600000) \
STEP( 43, UINT64_C(0x000000000011f158), 0.215, 0.070088898931250) \
STEP( 44, UINT64_C(0x0000000000130e17), 0.220, 0.074433779200000) \
STEP( 45, UINT64_C(0x0000000000143448), 0.225, 0.078922792968750) \
STEP( 46, UINT64_C(0x00000000001563e7), 0.230, 0.083555655800000) \
STEP( 47, UINT64_C(0x0000000000169cec), 0.235, 0.088331959506250) \
STEP( 48, UINT64_C(0x000000000017df4f), 0.240, 0.093251174400000) \
STEP( 49, UINT64_C(0x0000000000192b04), 0.245, 0.098312651543750) \
STEP( 50, UINT64_C(0x00000000001a8000), 0.250, 0.103515625000000) \
STEP( 51, UINT64_C(0x00000000001bde32), 0.255, 0.108859214081250) \
STEP( 52, UINT64_C(0x00000000001d458b), 0.260, 0.114342425600000) \
STEP( 53, UINT64_C(0x00000000001eb5f8), 0.265, 0.119964156118750) \
STEP( 54, UINT64_C(0x0000000000202f65), 0.270, 0.125723194200000) \
STEP( 55, UINT64_C(0x000000000021b1bb), 0.275, 0.131618222656250) \
STEP( 56, UINT64_C(0x0000000000233ce3), 0.280, 0.137647820800000) \
STEP( 57, UINT64_C(0x000000000024d0c3), 0.285, 0.143810466693750) \
STEP( 58, UINT64_C(0x0000000000266d40), 0.290, 0.150104539400000) \
STEP( 59, UINT64_C(0x000000000028123d), 0.295, 0.156528321231250) \
STEP( 60, UINT64_C(0x000000000029bf9c), 0.300, 0.163080000000000) \
STEP( 61, UINT64_C(0x00000000002b753d), 0.305, 0.169757671268750) \
STEP( 62, UINT64_C(0x00000000002d32fe), 0.310, 0.176559340600000) \
STEP( 63, UINT64_C(0x00000000002ef8bc), 0.315, 0.183482925806250) \
STEP( 64, UINT64_C(0x000000000030c654), 0.320, 0.190526259200000) \
STEP( 65, UINT64_C(0x0000000000329b9f), 0.325, 0.197687089843750) \
STEP( 66, UINT64_C(0x0000000000347875), 0.330, 0.204963085800000) \
STEP( 67, UINT64_C(0x0000000000365cb0), 0.335, 0.212351836381250) \
STEP( 68, UINT64_C(0x0000000000384825), 0.340, 0.219850854400000) \
STEP( 69, UINT64_C(0x00000000003a3aa8), 0.345, 0.227457578418750) \
STEP( 70, UINT64_C(0x00000000003c340f), 0.350, 0.235169375000000) \
STEP( 71, UINT64_C(0x00000000003e342b), 0.355, 0.242983540956250) \
STEP( 72, UINT64_C(0x0000000000403ace), 0.360, 0.250897305600000) \
STEP( 73, UINT64_C(0x00000000004247c8), 0.365, 0.258907832993750) \
STEP( 74, UINT64_C(0x0000000000445ae9), 0.370, 0.267012224200000) \
STEP( 75, UINT64_C(0x0000000000467400), 0.375, 0.275207519531250) \
STEP( 76, UINT64_C(0x00000000004892d8), 0.380, 0.283490700800000) \
STEP( 77, UINT64_C(0x00000000004ab740), 0.385, 0.291858693568750) \
STEP( 78, UINT64_C(0x00000000004ce102), 0.390, 0.300308369400000) \
STEP( 79, UINT64_C(0x00000000004f0fe9), 0.395, 0.308836548106250) \
STEP( 80, UINT64_C(0x00000000005143bf), 0.400, 0.317440000000000) \
STEP( 81, UINT64_C(0x0000000000537c4d), 0.405, 0.326115448143750) \
STEP( 82, UINT64_C(0x000000000055b95b), 0.410, 0.334859570600000) \
STEP( 83, UINT64_C(0x000000000057fab1), 0.415, 0.343669002681250) \
STEP( 84, UINT64_C(0x00000000005a4015), 0.420, 0.352540339200000) \
STEP( 85, UINT64_C(0x00000000005c894e), 0.425, 0.361470136718750) \
STEP( 86, UINT64_C(0x00000000005ed622), 0.430, 0.370454915800000) \
STEP( 87, UINT64_C(0x0000000000612655), 0.435, 0.379491163256250) \
STEP( 88, UINT64_C(0x00000000006379ac), 0.440, 0.388575334400000) \
STEP( 89, UINT64_C(0x000000000065cfeb), 0.445, 0.397703855293750) \
STEP( 90, UINT64_C(0x00000000006828d6), 0.450, 0.406873125000000) \
STEP( 91, UINT64_C(0x00000000006a842f), 0.455, 0.416079517831250) \
STEP( 92, UINT64_C(0x00000000006ce1bb), 0.460, 0.425319385600000) \
STEP( 93, UINT64_C(0x00000000006f413a), 0.465, 0.434589059868750) \
STEP( 94, UINT64_C(0x000000000071a270), 0.470, 0.443884854200000) \
STEP( 95, UINT64_C(0x000000000074051d), 0.475, 0.453203066406250) \
STEP( 96, UINT64_C(0x0000000000766905), 0.480, 0.462539980800000) \
STEP( 97, UINT64_C(0x000000000078cde7), 0.485, 0.471891870443750) \
STEP( 98, UINT64_C(0x00000000007b3387), 0.490, 0.481254999400000) \
STEP( 99, UINT64_C(0x00000000007d99a4), 0.495, 0.490625624981250) \
STEP( 100, UINT64_C(0x0000000000800000), 0.500, 0.500000000000000) \
STEP( 101, UINT64_C(0x000000000082665b), 0.505, 0.509374375018750) \
STEP( 102, UINT64_C(0x000000000084cc78), 0.510, 0.518745000600000) \
STEP( 103, UINT64_C(0x0000000000873218), 0.515, 0.528108129556250) \
STEP( 104, UINT64_C(0x00000000008996fa), 0.520, 0.537460019200000) \
STEP( 105, UINT64_C(0x00000000008bfae2), 0.525, 0.546796933593750) \
STEP( 106, UINT64_C(0x00000000008e5d8f), 0.530, 0.556115145800000) \
STEP( 107, UINT64_C(0x000000000090bec5), 0.535, 0.565410940131250) \
STEP( 108, UINT64_C(0x0000000000931e44), 0.540, 0.574680614400000) \
STEP( 109, UINT64_C(0x0000000000957bd0), 0.545, 0.583920482168750) \
STEP( 110, UINT64_C(0x000000000097d729), 0.550, 0.593126875000000) \
STEP( 111, UINT64_C(0x00000000009a3014), 0.555, 0.602296144706250) \
STEP( 112, UINT64_C(0x00000000009c8653), 0.560, 0.611424665600000) \
STEP( 113, UINT64_C(0x00000000009ed9aa), 0.565, 0.620508836743750) \
STEP( 114, UINT64_C(0x0000000000a129dd), 0.570, 0.629545084200000) \
STEP( 115, UINT64_C(0x0000000000a376b1), 0.575, 0.638529863281250) \
STEP( 116, UINT64_C(0x0000000000a5bfea), 0.580, 0.647459660800000) \
STEP( 117, UINT64_C(0x0000000000a8054e), 0.585, 0.656330997318750) \
STEP( 118, UINT64_C(0x0000000000aa46a4), 0.590, 0.665140429400000) \
STEP( 119, UINT64_C(0x0000000000ac83b2), 0.595, 0.673884551856250) \
STEP( 120, UINT64_C(0x0000000000aebc40), 0.600, 0.682560000000000) \
STEP( 121, UINT64_C(0x0000000000b0f016), 0.605, 0.691163451893750) \
STEP( 122, UINT64_C(0x0000000000b31efd), 0.610, 0.699691630600000) \
STEP( 123, UINT64_C(0x0000000000b548bf), 0.615, 0.708141306431250) \
STEP( 124, UINT64_C(0x0000000000b76d27), 0.620, 0.716509299200000) \
STEP( 125, UINT64_C(0x0000000000b98c00), 0.625, 0.724792480468750) \
STEP( 126, UINT64_C(0x0000000000bba516), 0.630, 0.732987775800000) \
STEP( 127, UINT64_C(0x0000000000bdb837), 0.635, 0.741092167006250) \
STEP( 128, UINT64_C(0x0000000000bfc531), 0.640, 0.749102694400000) \
STEP( 129, UINT64_C(0x0000000000c1cbd4), 0.645, 0.757016459043750) \
STEP( 130, UINT64_C(0x0000000000c3cbf0), 0.650, 0.764830625000000) \
STEP( 131, UINT64_C(0x0000000000c5c557), 0.655, 0.772542421581250) \
STEP( 132, UINT64_C(0x0000000000c7b7da), 0.660, 0.780149145600000) \
STEP( 133, UINT64_C(0x0000000000c9a34f), 0.665, 0.787648163618750) \
STEP( 134, UINT64_C(0x0000000000cb878a), 0.670, 0.795036914200000) \
STEP( 135, UINT64_C(0x0000000000cd6460), 0.675, 0.802312910156250) \
STEP( 136, UINT64_C(0x0000000000cf39ab), 0.680, 0.809473740800000) \
STEP( 137, UINT64_C(0x0000000000d10743), 0.685, 0.816517074193750) \
STEP( 138, UINT64_C(0x0000000000d2cd01), 0.690, 0.823440659400000) \
STEP( 139, UINT64_C(0x0000000000d48ac2), 0.695, 0.830242328731250) \
STEP( 140, UINT64_C(0x0000000000d64063), 0.700, 0.836920000000000) \
STEP( 141, UINT64_C(0x0000000000d7edc2), 0.705, 0.843471678768750) \
STEP( 142, UINT64_C(0x0000000000d992bf), 0.710, 0.849895460600000) \
STEP( 143, UINT64_C(0x0000000000db2f3c), 0.715, 0.856189533306250) \
STEP( 144, UINT64_C(0x0000000000dcc31c), 0.720, 0.862352179200000) \
STEP( 145, UINT64_C(0x0000000000de4e44), 0.725, 0.868381777343750) \
STEP( 146, UINT64_C(0x0000000000dfd09a), 0.730, 0.874276805800000) \
STEP( 147, UINT64_C(0x0000000000e14a07), 0.735, 0.880035843881250) \
STEP( 148, UINT64_C(0x0000000000e2ba74), 0.740, 0.885657574400000) \
STEP( 149, UINT64_C(0x0000000000e421cd), 0.745, 0.891140785918750) \
STEP( 150, UINT64_C(0x0000000000e58000), 0.750, 0.896484375000000) \
STEP( 151, UINT64_C(0x0000000000e6d4fb), 0.755, 0.901687348456250) \
STEP( 152, UINT64_C(0x0000000000e820b0), 0.760, 0.906748825600000) \
STEP( 153, UINT64_C(0x0000000000e96313), 0.765, 0.911668040493750) \
STEP( 154, UINT64_C(0x0000000000ea9c18), 0.770, 0.916444344200000) \
STEP( 155, UINT64_C(0x0000000000ebcbb7), 0.775, 0.921077207031250) \
STEP( 156, UINT64_C(0x0000000000ecf1e8), 0.780, 0.925566220800000) \
STEP( 157, UINT64_C(0x0000000000ee0ea7), 0.785, 0.929911101068750) \
STEP( 158, UINT64_C(0x0000000000ef21f1), 0.790, 0.934111689400000) \
STEP( 159, UINT64_C(0x0000000000f02bc6), 0.795, 0.938167955606250) \
STEP( 160, UINT64_C(0x0000000000f12c27), 0.800, 0.942080000000000) \
STEP( 161, UINT64_C(0x0000000000f22319), 0.805, 0.945848055643750) \
STEP( 162, UINT64_C(0x0000000000f310a1), 0.810, 0.949472490600000) \
STEP( 163, UINT64_C(0x0000000000f3f4c7), 0.815, 0.952953810181250) \
STEP( 164, UINT64_C(0x0000000000f4cf98), 0.820, 0.956292659200000) \
STEP( 165, UINT64_C(0x0000000000f5a120), 0.825, 0.959489824218750) \
STEP( 166, UINT64_C(0x0000000000f6696e), 0.830, 0.962546235800000) \
STEP( 167, UINT64_C(0x0000000000f72894), 0.835, 0.965462970756250) \
STEP( 168, UINT64_C(0x0000000000f7dea8), 0.840, 0.968241254400000) \
STEP( 169, UINT64_C(0x0000000000f88bc0), 0.845, 0.970882462793750) \
STEP( 170, UINT64_C(0x0000000000f92ff6), 0.850, 0.973388125000000) \
STEP( 171, UINT64_C(0x0000000000f9cb67), 0.855, 0.975759925331250) \
STEP( 172, UINT64_C(0x0000000000fa5e30), 0.860, 0.977999705600000) \
STEP( 173, UINT64_C(0x0000000000fae874), 0.865, 0.980109467368750) \
STEP( 174, UINT64_C(0x0000000000fb6a57), 0.870, 0.982091374200000) \
STEP( 175, UINT64_C(0x0000000000fbe400), 0.875, 0.983947753906250) \
STEP( 176, UINT64_C(0x0000000000fc5598), 0.880, 0.985681100800000) \
STEP( 177, UINT64_C(0x0000000000fcbf4e), 0.885, 0.987294077943750) \
STEP( 178, UINT64_C(0x0000000000fd214f), 0.890, 0.988789519400000) \
STEP( 179, UINT64_C(0x0000000000fd7bcf), 0.895, 0.990170432481250) \
STEP( 180, UINT64_C(0x0000000000fdcf03), 0.900, 0.991440000000000) \
STEP( 181, UINT64_C(0x0000000000fe1b23), 0.905, 0.992601582518750) \
STEP( 182, UINT64_C(0x0000000000fe606a), 0.910, 0.993658720600000) \
STEP( 183, UINT64_C(0x0000000000fe9f18), 0.915, 0.994615137056250) \
STEP( 184, UINT64_C(0x0000000000fed76e), 0.920, 0.995474739200000) \
STEP( 185, UINT64_C(0x0000000000ff09b0), 0.925, 0.996241621093750) \
STEP( 186, UINT64_C(0x0000000000ff3627), 0.930, 0.996920065800000) \
STEP( 187, UINT64_C(0x0000000000ff5d1d), 0.935, 0.997514547631250) \
STEP( 188, UINT64_C(0x0000000000ff7ee0), 0.940, 0.998029734400000) \
STEP( 189, UINT64_C(0x0000000000ff9bc3), 0.945, 0.998470489668750) \
STEP( 190, UINT64_C(0x0000000000ffb419), 0.950, 0.998841875000000) \
STEP( 191, UINT64_C(0x0000000000ffc83d), 0.955, 0.999149152206250) \
STEP( 192, UINT64_C(0x0000000000ffd888), 0.960, 0.999397785600000) \
STEP( 193, UINT64_C(0x0000000000ffe55b), 0.965, 0.999593444243750) \
STEP( 194, UINT64_C(0x0000000000ffef17), 0.970, 0.999742004200000) \
STEP( 195, UINT64_C(0x0000000000fff623), 0.975, 0.999849550781250) \
STEP( 196, UINT64_C(0x0000000000fffae9), 0.980, 0.999922380800000) \
STEP( 197, UINT64_C(0x0000000000fffdd6), 0.985, 0.999967004818750) \
STEP( 198, UINT64_C(0x0000000000ffff5a), 0.990, 0.999990149400000) \
STEP( 199, UINT64_C(0x0000000000ffffeb), 0.995, 0.999998759356250) \
STEP( 200, UINT64_C(0x0000000001000000), 1.000, 1.000000000000000) \
#endif /* JEMALLOC_INTERNAL_SMOOTHSTEP_H */

View File

@@ -0,0 +1,36 @@
#ifndef JEMALLOC_INTERNAL_SPIN_H
#define JEMALLOC_INTERNAL_SPIN_H
#ifdef JEMALLOC_SPIN_C_
# define SPIN_INLINE extern inline
#else
# define SPIN_INLINE inline
#endif
#define SPIN_INITIALIZER {0U}
typedef struct {
unsigned iteration;
} spin_t;
SPIN_INLINE void
spin_adaptive(spin_t *spin) {
volatile uint32_t i;
if (spin->iteration < 5) {
for (i = 0; i < (1U << spin->iteration); i++) {
CPU_SPINWAIT;
}
spin->iteration++;
} else {
#ifdef _WIN32
SwitchToThread();
#else
sched_yield();
#endif
}
}
#undef SPIN_INLINE
#endif /* JEMALLOC_INTERNAL_SPIN_H */

View File

@@ -1,31 +1,51 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#ifndef JEMALLOC_INTERNAL_STATS_H
#define JEMALLOC_INTERNAL_STATS_H
typedef struct tcache_bin_stats_s tcache_bin_stats_t;
typedef struct malloc_bin_stats_s malloc_bin_stats_t;
typedef struct malloc_large_stats_s malloc_large_stats_t;
typedef struct arena_stats_s arena_stats_t;
typedef struct chunk_stats_s chunk_stats_t;
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/mutex_prof.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/stats_tsd.h"
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
/* OPTION(opt, var_name, default, set_value_to) */
#define STATS_PRINT_OPTIONS \
OPTION('J', json, false, true) \
OPTION('g', general, true, false) \
OPTION('m', merged, config_stats, false) \
OPTION('d', destroyed, config_stats, false) \
OPTION('a', unmerged, config_stats, false) \
OPTION('b', bins, true, false) \
OPTION('l', large, true, false) \
OPTION('x', mutex, true, false)
struct tcache_bin_stats_s {
/*
* Number of allocation requests that corresponded to the size of this
* bin.
*/
uint64_t nrequests;
enum {
#define OPTION(o, v, d, s) stats_print_option_num_##v,
STATS_PRINT_OPTIONS
#undef OPTION
stats_print_tot_num_options
};
struct malloc_bin_stats_s {
/*
* Current number of bytes allocated, including objects currently
* cached by tcache.
*/
size_t allocated;
/* Options for stats_print. */
extern bool opt_stats_print;
extern char opt_stats_print_opts[stats_print_tot_num_options+1];
/* Implements je_malloc_stats_print. */
void stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
const char *opts);
/*
* In those architectures that support 64-bit atomics, we use atomic updates for
* our 64-bit values. Otherwise, we use a plain uint64_t and synchronize
* externally.
*/
#ifdef JEMALLOC_ATOMIC_U64
typedef atomic_u64_t arena_stats_u64_t;
#else
/* Must hold the arena stats mutex while reading atomically. */
typedef uint64_t arena_stats_u64_t;
#endif
typedef struct malloc_bin_stats_s {
/*
* Total number of allocation/deallocation requests served directly by
* the bin. Note that tcache may allocate an object, then recycle it
@@ -42,132 +62,103 @@ struct malloc_bin_stats_s {
*/
uint64_t nrequests;
/*
* Current number of regions of this size class, including regions
* currently cached by tcache.
*/
size_t curregs;
/* Number of tcache fills from this bin. */
uint64_t nfills;
/* Number of tcache flushes to this bin. */
uint64_t nflushes;
/* Total number of runs created for this bin's size class. */
uint64_t nruns;
/* Total number of slabs created for this bin's size class. */
uint64_t nslabs;
/*
* Total number of runs reused by extracting them from the runs tree for
* this bin's size class.
* Total number of slabs reused by extracting them from the slabs heap
* for this bin's size class.
*/
uint64_t reruns;
uint64_t reslabs;
/* Current number of runs in this bin. */
size_t curruns;
};
/* Current number of slabs in this bin. */
size_t curslabs;
struct malloc_large_stats_s {
mutex_prof_data_t mutex_data;
} malloc_bin_stats_t;
typedef struct malloc_large_stats_s {
/*
* Total number of allocation/deallocation requests served directly by
* the arena. Note that tcache may allocate an object, then recycle it
* many times, resulting many increments to nrequests, but only one
* each to nmalloc and ndalloc.
* the arena.
*/
uint64_t nmalloc;
uint64_t ndalloc;
arena_stats_u64_t nmalloc;
arena_stats_u64_t ndalloc;
/*
* Number of allocation requests that correspond to this size class.
* This includes requests served by tcache, though tcache only
* periodically merges into this counter.
*/
uint64_t nrequests;
arena_stats_u64_t nrequests; /* Partially derived. */
/* Current number of runs of this size class. */
size_t curruns;
};
/* Current number of allocations of this size class. */
size_t curlextents; /* Derived. */
} malloc_large_stats_t;
struct arena_stats_s {
/* Number of bytes currently mapped. */
size_t mapped;
typedef struct decay_stats_s {
/* Total number of purge sweeps. */
arena_stats_u64_t npurge;
/* Total number of madvise calls made. */
arena_stats_u64_t nmadvise;
/* Total number of pages purged. */
arena_stats_u64_t purged;
} decay_stats_t;
/*
* Total number of purge sweeps, total number of madvise calls made,
* and total pages purged in order to keep dirty unused memory under
* control.
/*
* Arena stats. Note that fields marked "derived" are not directly maintained
* within the arena code; rather their values are derived during stats merge
* requests.
*/
uint64_t npurge;
uint64_t nmadvise;
uint64_t purged;
/* Per-size-category statistics. */
size_t allocated_large;
uint64_t nmalloc_large;
uint64_t ndalloc_large;
uint64_t nrequests_large;
/*
* One element for each possible size class, including sizes that
* overlap with bin size classes. This is necessary because ipalloc()
* sometimes has to use such large objects in order to assure proper
* alignment.
*/
malloc_large_stats_t *lstats;
};
struct chunk_stats_s {
/* Number of chunks that were allocated. */
uint64_t nchunks;
/* High-water mark for number of chunks allocated. */
size_t highchunks;
/*
* Current number of chunks allocated. This value isn't maintained for
* any other purpose, so keep track of it in order to be able to set
* highchunks.
*/
size_t curchunks;
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extern bool opt_stats_print;
extern size_t stats_cactive;
void stats_print(void (*write)(void *, const char *), void *cbopaque,
const char *opts);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
size_t stats_cactive_get(void);
void stats_cactive_add(size_t size);
void stats_cactive_sub(size_t size);
typedef struct arena_stats_s {
#ifndef JEMALLOC_ATOMIC_U64
malloc_mutex_t mtx;
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_STATS_C_))
JEMALLOC_INLINE size_t
stats_cactive_get(void)
{
/* Number of bytes currently mapped, excluding retained memory. */
atomic_zu_t mapped; /* Partially derived. */
return (atomic_read_z(&stats_cactive));
}
/*
* Number of unused virtual memory bytes currently retained. Retained
* bytes are technically mapped (though always decommitted or purged),
* but they are excluded from the mapped statistic (above).
*/
atomic_zu_t retained; /* Derived. */
JEMALLOC_INLINE void
stats_cactive_add(size_t size)
{
decay_stats_t decay_dirty;
decay_stats_t decay_muzzy;
atomic_add_z(&stats_cactive, size);
}
atomic_zu_t base; /* Derived. */
atomic_zu_t internal;
atomic_zu_t resident; /* Derived. */
JEMALLOC_INLINE void
stats_cactive_sub(size_t size)
{
atomic_zu_t allocated_large; /* Derived. */
arena_stats_u64_t nmalloc_large; /* Derived. */
arena_stats_u64_t ndalloc_large; /* Derived. */
arena_stats_u64_t nrequests_large; /* Derived. */
atomic_sub_z(&stats_cactive, size);
}
#endif
/* Number of bytes cached in tcache associated with this arena. */
atomic_zu_t tcache_bytes; /* Derived. */
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes];
/* One element for each large size class. */
malloc_large_stats_t lstats[NSIZES - NBINS];
/* Arena uptime. */
nstime_t uptime;
} arena_stats_t;
#endif /* JEMALLOC_INTERNAL_STATS_H */

View File

@@ -0,0 +1,12 @@
#ifndef JEMALLOC_INTERNAL_STATS_TSD_H
#define JEMALLOC_INTERNAL_STATS_TSD_H
typedef struct tcache_bin_stats_s {
/*
* Number of allocation requests that corresponded to the size of this
* bin.
*/
uint64_t nrequests;
} tcache_bin_stats_t;
#endif /* JEMALLOC_INTERNAL_STATS_TSD_H */

View File

@@ -0,0 +1,317 @@
#ifndef JEMALLOC_INTERNAL_SIZE_H
#define JEMALLOC_INTERNAL_SIZE_H
#include "jemalloc/internal/bit_util.h"
#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/util.h"
/*
* sz module: Size computations.
*
* Some abbreviations used here:
* p: Page
* ind: Index
* s, sz: Size
* u: Usable size
* a: Aligned
*
* These are not always used completely consistently, but should be enough to
* interpret function names. E.g. sz_psz2ind converts page size to page size
* index; sz_sa2u converts a (size, alignment) allocation request to the usable
* size that would result from such an allocation.
*/
/*
* sz_pind2sz_tab encodes the same information as could be computed by
* sz_pind2sz_compute().
*/
extern size_t const sz_pind2sz_tab[NPSIZES+1];
/*
* sz_index2size_tab encodes the same information as could be computed (at
* unacceptable cost in some code paths) by sz_index2size_compute().
*/
extern size_t const sz_index2size_tab[NSIZES];
/*
* sz_size2index_tab is a compact lookup table that rounds request sizes up to
* size classes. In order to reduce cache footprint, the table is compressed,
* and all accesses are via sz_size2index().
*/
extern uint8_t const sz_size2index_tab[];
static const size_t sz_large_pad =
#ifdef JEMALLOC_CACHE_OBLIVIOUS
PAGE
#else
0
#endif
;
JEMALLOC_ALWAYS_INLINE pszind_t
sz_psz2ind(size_t psz) {
if (unlikely(psz > LARGE_MAXCLASS)) {
return NPSIZES;
}
{
pszind_t x = lg_floor((psz<<1)-1);
pszind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_PAGE) ? 0 : x -
(LG_SIZE_CLASS_GROUP + LG_PAGE);
pszind_t grp = shift << LG_SIZE_CLASS_GROUP;
pszind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ?
LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1;
size_t delta_inverse_mask = ZD(-1) << lg_delta;
pszind_t mod = ((((psz-1) & delta_inverse_mask) >> lg_delta)) &
((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
pszind_t ind = grp + mod;
return ind;
}
}
static inline size_t
sz_pind2sz_compute(pszind_t pind) {
if (unlikely(pind == NPSIZES)) {
return LARGE_MAXCLASS + PAGE;
}
{
size_t grp = pind >> LG_SIZE_CLASS_GROUP;
size_t mod = pind & ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
size_t grp_size_mask = ~((!!grp)-1);
size_t grp_size = ((ZU(1) << (LG_PAGE +
(LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
size_t shift = (grp == 0) ? 1 : grp;
size_t lg_delta = shift + (LG_PAGE-1);
size_t mod_size = (mod+1) << lg_delta;
size_t sz = grp_size + mod_size;
return sz;
}
}
static inline size_t
sz_pind2sz_lookup(pszind_t pind) {
size_t ret = (size_t)sz_pind2sz_tab[pind];
assert(ret == sz_pind2sz_compute(pind));
return ret;
}
static inline size_t
sz_pind2sz(pszind_t pind) {
assert(pind < NPSIZES+1);
return sz_pind2sz_lookup(pind);
}
static inline size_t
sz_psz2u(size_t psz) {
if (unlikely(psz > LARGE_MAXCLASS)) {
return LARGE_MAXCLASS + PAGE;
}
{
size_t x = lg_floor((psz<<1)-1);
size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ?
LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1;
size_t delta = ZU(1) << lg_delta;
size_t delta_mask = delta - 1;
size_t usize = (psz + delta_mask) & ~delta_mask;
return usize;
}
}
static inline szind_t
sz_size2index_compute(size_t size) {
if (unlikely(size > LARGE_MAXCLASS)) {
return NSIZES;
}
#if (NTBINS != 0)
if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
szind_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
szind_t lg_ceil = lg_floor(pow2_ceil_zu(size));
return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin);
}
#endif
{
szind_t x = lg_floor((size<<1)-1);
szind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 :
x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM);
szind_t grp = shift << LG_SIZE_CLASS_GROUP;
szind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
size_t delta_inverse_mask = ZD(-1) << lg_delta;
szind_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
szind_t index = NTBINS + grp + mod;
return index;
}
}
JEMALLOC_ALWAYS_INLINE szind_t
sz_size2index_lookup(size_t size) {
assert(size <= LOOKUP_MAXCLASS);
{
szind_t ret = (sz_size2index_tab[(size-1) >> LG_TINY_MIN]);
assert(ret == sz_size2index_compute(size));
return ret;
}
}
JEMALLOC_ALWAYS_INLINE szind_t
sz_size2index(size_t size) {
assert(size > 0);
if (likely(size <= LOOKUP_MAXCLASS)) {
return sz_size2index_lookup(size);
}
return sz_size2index_compute(size);
}
static inline size_t
sz_index2size_compute(szind_t index) {
#if (NTBINS > 0)
if (index < NTBINS) {
return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + index));
}
#endif
{
size_t reduced_index = index - NTBINS;
size_t grp = reduced_index >> LG_SIZE_CLASS_GROUP;
size_t mod = reduced_index & ((ZU(1) << LG_SIZE_CLASS_GROUP) -
1);
size_t grp_size_mask = ~((!!grp)-1);
size_t grp_size = ((ZU(1) << (LG_QUANTUM +
(LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
size_t shift = (grp == 0) ? 1 : grp;
size_t lg_delta = shift + (LG_QUANTUM-1);
size_t mod_size = (mod+1) << lg_delta;
size_t usize = grp_size + mod_size;
return usize;
}
}
JEMALLOC_ALWAYS_INLINE size_t
sz_index2size_lookup(szind_t index) {
size_t ret = (size_t)sz_index2size_tab[index];
assert(ret == sz_index2size_compute(index));
return ret;
}
JEMALLOC_ALWAYS_INLINE size_t
sz_index2size(szind_t index) {
assert(index < NSIZES);
return sz_index2size_lookup(index);
}
JEMALLOC_ALWAYS_INLINE size_t
sz_s2u_compute(size_t size) {
if (unlikely(size > LARGE_MAXCLASS)) {
return 0;
}
#if (NTBINS > 0)
if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
size_t lg_ceil = lg_floor(pow2_ceil_zu(size));
return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) :
(ZU(1) << lg_ceil));
}
#endif
{
size_t x = lg_floor((size<<1)-1);
size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
size_t delta = ZU(1) << lg_delta;
size_t delta_mask = delta - 1;
size_t usize = (size + delta_mask) & ~delta_mask;
return usize;
}
}
JEMALLOC_ALWAYS_INLINE size_t
sz_s2u_lookup(size_t size) {
size_t ret = sz_index2size_lookup(sz_size2index_lookup(size));
assert(ret == sz_s2u_compute(size));
return ret;
}
/*
* Compute usable size that would result from allocating an object with the
* specified size.
*/
JEMALLOC_ALWAYS_INLINE size_t
sz_s2u(size_t size) {
assert(size > 0);
if (likely(size <= LOOKUP_MAXCLASS)) {
return sz_s2u_lookup(size);
}
return sz_s2u_compute(size);
}
/*
* Compute usable size that would result from allocating an object with the
* specified size and alignment.
*/
JEMALLOC_ALWAYS_INLINE size_t
sz_sa2u(size_t size, size_t alignment) {
size_t usize;
assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
/* Try for a small size class. */
if (size <= SMALL_MAXCLASS && alignment < PAGE) {
/*
* Round size up to the nearest multiple of alignment.
*
* This done, we can take advantage of the fact that for each
* small size class, every object is aligned at the smallest
* power of two that is non-zero in the base two representation
* of the size. For example:
*
* Size | Base 2 | Minimum alignment
* -----+----------+------------------
* 96 | 1100000 | 32
* 144 | 10100000 | 32
* 192 | 11000000 | 64
*/
usize = sz_s2u(ALIGNMENT_CEILING(size, alignment));
if (usize < LARGE_MINCLASS) {
return usize;
}
}
/* Large size class. Beware of overflow. */
if (unlikely(alignment > LARGE_MAXCLASS)) {
return 0;
}
/* Make sure result is a large size class. */
if (size <= LARGE_MINCLASS) {
usize = LARGE_MINCLASS;
} else {
usize = sz_s2u(size);
if (usize < size) {
/* size_t overflow. */
return 0;
}
}
/*
* Calculate the multi-page mapping that large_palloc() would need in
* order to guarantee the alignment.
*/
if (usize + sz_large_pad + PAGE_CEILING(alignment) - PAGE < usize) {
/* size_t overflow. */
return 0;
}
return usize;
}
#endif /* JEMALLOC_INTERNAL_SIZE_H */

View File

@@ -0,0 +1,55 @@
#ifndef JEMALLOC_INTERNAL_TCACHE_EXTERNS_H
#define JEMALLOC_INTERNAL_TCACHE_EXTERNS_H
#include "jemalloc/internal/size_classes.h"
extern bool opt_tcache;
extern ssize_t opt_lg_tcache_max;
extern tcache_bin_info_t *tcache_bin_info;
/*
* Number of tcache bins. There are NBINS small-object bins, plus 0 or more
* large-object bins.
*/
extern unsigned nhbins;
/* Maximum cached size class. */
extern size_t tcache_maxclass;
/*
* Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and
* usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are
* completely disjoint from this data structure. tcaches starts off as a sparse
* array, so it has no physical memory footprint until individual pages are
* touched. This allows the entire array to be allocated the first time an
* explicit tcache is created without a disproportionate impact on memory usage.
*/
extern tcaches_t *tcaches;
size_t tcache_salloc(tsdn_t *tsdn, const void *ptr);
void tcache_event_hard(tsd_t *tsd, tcache_t *tcache);
void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
tcache_bin_t *tbin, szind_t binind, bool *tcache_success);
void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
szind_t binind, unsigned rem);
void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
unsigned rem, tcache_t *tcache);
void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache,
arena_t *arena);
tcache_t *tcache_create_explicit(tsd_t *tsd);
void tcache_cleanup(tsd_t *tsd);
void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena);
bool tcaches_create(tsd_t *tsd, unsigned *r_ind);
void tcaches_flush(tsd_t *tsd, unsigned ind);
void tcaches_destroy(tsd_t *tsd, unsigned ind);
bool tcache_boot(tsdn_t *tsdn);
void tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena);
void tcache_prefork(tsdn_t *tsdn);
void tcache_postfork_parent(tsdn_t *tsdn);
void tcache_postfork_child(tsdn_t *tsdn);
void tcache_flush(tsd_t *tsd);
bool tsd_tcache_data_init(tsd_t *tsd);
bool tsd_tcache_enabled_data_init(tsd_t *tsd);
#endif /* JEMALLOC_INTERNAL_TCACHE_EXTERNS_H */

View File

@@ -0,0 +1,250 @@
#ifndef JEMALLOC_INTERNAL_TCACHE_INLINES_H
#define JEMALLOC_INTERNAL_TCACHE_INLINES_H
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/sz.h"
#include "jemalloc/internal/ticker.h"
#include "jemalloc/internal/util.h"
static inline bool
tcache_enabled_get(tsd_t *tsd) {
return tsd_tcache_enabled_get(tsd);
}
static inline void
tcache_enabled_set(tsd_t *tsd, bool enabled) {
bool was_enabled = tsd_tcache_enabled_get(tsd);
if (!was_enabled && enabled) {
tsd_tcache_data_init(tsd);
} else if (was_enabled && !enabled) {
tcache_cleanup(tsd);
}
/* Commit the state last. Above calls check current state. */
tsd_tcache_enabled_set(tsd, enabled);
tsd_slow_update(tsd);
}
JEMALLOC_ALWAYS_INLINE void
tcache_event(tsd_t *tsd, tcache_t *tcache) {
if (TCACHE_GC_INCR == 0) {
return;
}
if (unlikely(ticker_tick(&tcache->gc_ticker))) {
tcache_event_hard(tsd, tcache);
}
}
JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success) {
void *ret;
if (unlikely(tbin->ncached == 0)) {
tbin->low_water = -1;
*tcache_success = false;
return NULL;
}
/*
* tcache_success (instead of ret) should be checked upon the return of
* this function. We avoid checking (ret == NULL) because there is
* never a null stored on the avail stack (which is unknown to the
* compiler), and eagerly checking ret would cause pipeline stall
* (waiting for the cacheline).
*/
*tcache_success = true;
ret = *(tbin->avail - tbin->ncached);
tbin->ncached--;
if (unlikely((low_water_t)tbin->ncached < tbin->low_water)) {
tbin->low_water = tbin->ncached;
}
return ret;
}
JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
szind_t binind, bool zero, bool slow_path) {
void *ret;
tcache_bin_t *tbin;
bool tcache_success;
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
assert(binind < NBINS);
tbin = tcache_small_bin_get(tcache, binind);
ret = tcache_alloc_easy(tbin, &tcache_success);
assert(tcache_success == (ret != NULL));
if (unlikely(!tcache_success)) {
bool tcache_hard_success;
arena = arena_choose(tsd, arena);
if (unlikely(arena == NULL)) {
return NULL;
}
ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache,
tbin, binind, &tcache_hard_success);
if (tcache_hard_success == false) {
return NULL;
}
}
assert(ret);
/*
* Only compute usize if required. The checks in the following if
* statement are all static.
*/
if (config_prof || (slow_path && config_fill) || unlikely(zero)) {
usize = sz_index2size(binind);
assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize);
}
if (likely(!zero)) {
if (slow_path && config_fill) {
if (unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret,
&arena_bin_info[binind], false);
} else if (unlikely(opt_zero)) {
memset(ret, 0, usize);
}
}
} else {
if (slow_path && config_fill && unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret, &arena_bin_info[binind],
true);
}
memset(ret, 0, usize);
}
if (config_stats) {
tbin->tstats.nrequests++;
}
if (config_prof) {
tcache->prof_accumbytes += usize;
}
tcache_event(tsd, tcache);
return ret;
}
JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
szind_t binind, bool zero, bool slow_path) {
void *ret;
tcache_bin_t *tbin;
bool tcache_success;
assert(binind >= NBINS &&binind < nhbins);
tbin = tcache_large_bin_get(tcache, binind);
ret = tcache_alloc_easy(tbin, &tcache_success);
assert(tcache_success == (ret != NULL));
if (unlikely(!tcache_success)) {
/*
* Only allocate one large object at a time, because it's quite
* expensive to create one and not use it.
*/
arena = arena_choose(tsd, arena);
if (unlikely(arena == NULL)) {
return NULL;
}
ret = large_malloc(tsd_tsdn(tsd), arena, sz_s2u(size), zero);
if (ret == NULL) {
return NULL;
}
} else {
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
/* Only compute usize on demand */
if (config_prof || (slow_path && config_fill) ||
unlikely(zero)) {
usize = sz_index2size(binind);
assert(usize <= tcache_maxclass);
}
if (likely(!zero)) {
if (slow_path && config_fill) {
if (unlikely(opt_junk_alloc)) {
memset(ret, JEMALLOC_ALLOC_JUNK,
usize);
} else if (unlikely(opt_zero)) {
memset(ret, 0, usize);
}
}
} else {
memset(ret, 0, usize);
}
if (config_stats) {
tbin->tstats.nrequests++;
}
if (config_prof) {
tcache->prof_accumbytes += usize;
}
}
tcache_event(tsd, tcache);
return ret;
}
JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
bool slow_path) {
tcache_bin_t *tbin;
tcache_bin_info_t *tbin_info;
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SMALL_MAXCLASS);
if (slow_path && config_fill && unlikely(opt_junk_free)) {
arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
}
tbin = tcache_small_bin_get(tcache, binind);
tbin_info = &tcache_bin_info[binind];
if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
tcache_bin_flush_small(tsd, tcache, tbin, binind,
(tbin_info->ncached_max >> 1));
}
assert(tbin->ncached < tbin_info->ncached_max);
tbin->ncached++;
*(tbin->avail - tbin->ncached) = ptr;
tcache_event(tsd, tcache);
}
JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
bool slow_path) {
tcache_bin_t *tbin;
tcache_bin_info_t *tbin_info;
assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SMALL_MAXCLASS);
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass);
if (slow_path && config_fill && unlikely(opt_junk_free)) {
large_dalloc_junk(ptr, sz_index2size(binind));
}
tbin = tcache_large_bin_get(tcache, binind);
tbin_info = &tcache_bin_info[binind];
if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
tcache_bin_flush_large(tsd, tbin, binind,
(tbin_info->ncached_max >> 1), tcache);
}
assert(tbin->ncached < tbin_info->ncached_max);
tbin->ncached++;
*(tbin->avail - tbin->ncached) = ptr;
tcache_event(tsd, tcache);
}
JEMALLOC_ALWAYS_INLINE tcache_t *
tcaches_get(tsd_t *tsd, unsigned ind) {
tcaches_t *elm = &tcaches[ind];
if (unlikely(elm->tcache == NULL)) {
elm->tcache = tcache_create_explicit(tsd);
}
return elm->tcache;
}
#endif /* JEMALLOC_INTERNAL_TCACHE_INLINES_H */

View File

@@ -0,0 +1,64 @@
#ifndef JEMALLOC_INTERNAL_TCACHE_STRUCTS_H
#define JEMALLOC_INTERNAL_TCACHE_STRUCTS_H
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/stats_tsd.h"
#include "jemalloc/internal/ticker.h"
/*
* Read-only information associated with each element of tcache_t's tbins array
* is stored separately, mainly to reduce memory usage.
*/
struct tcache_bin_info_s {
unsigned ncached_max; /* Upper limit on ncached. */
};
struct tcache_bin_s {
low_water_t low_water; /* Min # cached since last GC. */
uint32_t ncached; /* # of cached objects. */
/*
* ncached and stats are both modified frequently. Let's keep them
* close so that they have a higher chance of being on the same
* cacheline, thus less write-backs.
*/
tcache_bin_stats_t tstats;
/*
* To make use of adjacent cacheline prefetch, the items in the avail
* stack goes to higher address for newer allocations. avail points
* just above the available space, which means that
* avail[-ncached, ... -1] are available items and the lowest item will
* be allocated first.
*/
void **avail; /* Stack of available objects. */
};
struct tcache_s {
/* Data accessed frequently first: prof, ticker and small bins. */
uint64_t prof_accumbytes;/* Cleared after arena_prof_accum(). */
ticker_t gc_ticker; /* Drives incremental GC. */
/*
* The pointer stacks associated with tbins follow as a contiguous
* array. During tcache initialization, the avail pointer in each
* element of tbins is initialized to point to the proper offset within
* this array.
*/
tcache_bin_t tbins_small[NBINS];
/* Data accessed less often below. */
ql_elm(tcache_t) link; /* Used for aggregating stats. */
arena_t *arena; /* Associated arena. */
szind_t next_gc_bin; /* Next bin to GC. */
/* For small bins, fill (ncached_max >> lg_fill_div). */
uint8_t lg_fill_div[NBINS];
tcache_bin_t tbins_large[NSIZES-NBINS];
};
/* Linkage for list of available (previously used) explicit tcache IDs. */
struct tcaches_s {
union {
tcache_t *tcache;
tcaches_t *next;
};
};
#endif /* JEMALLOC_INTERNAL_TCACHE_STRUCTS_H */

View File

@@ -0,0 +1,61 @@
#ifndef JEMALLOC_INTERNAL_TCACHE_TYPES_H
#define JEMALLOC_INTERNAL_TCACHE_TYPES_H
#include "jemalloc/internal/size_classes.h"
typedef struct tcache_bin_info_s tcache_bin_info_t;
typedef struct tcache_bin_s tcache_bin_t;
typedef struct tcache_s tcache_t;
typedef struct tcaches_s tcaches_t;
/* ncached is cast to this type for comparison. */
typedef int32_t low_water_t;
/*
* tcache pointers close to NULL are used to encode state information that is
* used for two purposes: preventing thread caching on a per thread basis and
* cleaning up during thread shutdown.
*/
#define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1)
#define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2)
#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3)
#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY
/*
* Absolute minimum number of cache slots for each small bin.
*/
#define TCACHE_NSLOTS_SMALL_MIN 20
/*
* Absolute maximum number of cache slots for each small bin in the thread
* cache. This is an additional constraint beyond that imposed as: twice the
* number of regions per slab for this size class.
*
* This constant must be an even number.
*/
#define TCACHE_NSLOTS_SMALL_MAX 200
/* Number of cache slots for large size classes. */
#define TCACHE_NSLOTS_LARGE 20
/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
#define LG_TCACHE_MAXCLASS_DEFAULT 15
/*
* TCACHE_GC_SWEEP is the approximate number of allocation events between
* full GC sweeps. Integer rounding may cause the actual number to be
* slightly higher, since GC is performed incrementally.
*/
#define TCACHE_GC_SWEEP 8192
/* Number of tcache allocation/deallocation events between incremental GCs. */
#define TCACHE_GC_INCR \
((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1))
/* Used in TSD static initializer only. Real init in tcache_data_init(). */
#define TCACHE_ZERO_INITIALIZER {0}
/* Used in TSD static initializer only. Will be initialized to opt_tcache. */
#define TCACHE_ENABLED_ZERO_INITIALIZER false
#endif /* JEMALLOC_INTERNAL_TCACHE_TYPES_H */

View File

@@ -0,0 +1,50 @@
#ifndef JEMALLOC_INTERNAL_TICKER_H
#define JEMALLOC_INTERNAL_TICKER_H
#include "jemalloc/internal/util.h"
/**
* A ticker makes it easy to count-down events until some limit. You
* ticker_init the ticker to trigger every nticks events. You then notify it
* that an event has occurred with calls to ticker_tick (or that nticks events
* have occurred with a call to ticker_ticks), which will return true (and reset
* the counter) if the countdown hit zero.
*/
typedef struct {
int32_t tick;
int32_t nticks;
} ticker_t;
static inline void
ticker_init(ticker_t *ticker, int32_t nticks) {
ticker->tick = nticks;
ticker->nticks = nticks;
}
static inline void
ticker_copy(ticker_t *ticker, const ticker_t *other) {
*ticker = *other;
}
static inline int32_t
ticker_read(const ticker_t *ticker) {
return ticker->tick;
}
static inline bool
ticker_ticks(ticker_t *ticker, int32_t nticks) {
if (unlikely(ticker->tick < nticks)) {
ticker->tick = ticker->nticks;
return true;
}
ticker->tick -= nticks;
return(false);
}
static inline bool
ticker_tick(ticker_t *ticker) {
return ticker_ticks(ticker, 1);
}
#endif /* JEMALLOC_INTERNAL_TICKER_H */

View File

@@ -1,434 +1,324 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#ifndef JEMALLOC_INTERNAL_TSD_H
#define JEMALLOC_INTERNAL_TSD_H
/* Maximum number of malloc_tsd users with cleanup functions. */
#define MALLOC_TSD_CLEANUPS_MAX 8
typedef bool (*malloc_tsd_cleanup_t)(void);
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
!defined(_WIN32))
typedef struct tsd_init_block_s tsd_init_block_t;
typedef struct tsd_init_head_s tsd_init_head_t;
#endif
#include "jemalloc/internal/arena_types.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/jemalloc_internal_externs.h"
#include "jemalloc/internal/prof_types.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/rtree_tsd.h"
#include "jemalloc/internal/tcache_types.h"
#include "jemalloc/internal/tcache_structs.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/witness.h"
/*
* TLS/TSD-agnostic macro-based implementation of thread-specific data. There
* are four macros that support (at least) three use cases: file-private,
* library-private, and library-private inlined. Following is an example
* library-private tsd variable:
* Thread-Specific-Data layout
* --- data accessed on tcache fast path: state, rtree_ctx, stats, prof ---
* s: state
* e: tcache_enabled
* m: thread_allocated (config_stats)
* f: thread_deallocated (config_stats)
* p: prof_tdata (config_prof)
* c: rtree_ctx (rtree cache accessed on deallocation)
* t: tcache
* --- data not accessed on tcache fast path: arena-related fields ---
* d: arenas_tdata_bypass
* r: reentrancy_level
* x: narenas_tdata
* i: iarena
* a: arena
* o: arenas_tdata
* Loading TSD data is on the critical path of basically all malloc operations.
* In particular, tcache and rtree_ctx rely on hot CPU cache to be effective.
* Use a compact layout to reduce cache footprint.
* +--- 64-bit and 64B cacheline; 1B each letter; First byte on the left. ---+
* |---------------------------- 1st cacheline ----------------------------|
* | sedrxxxx mmmmmmmm ffffffff pppppppp [c * 32 ........ ........ .......] |
* |---------------------------- 2nd cacheline ----------------------------|
* | [c * 64 ........ ........ ........ ........ ........ ........ .......] |
* |---------------------------- 3nd cacheline ----------------------------|
* | [c * 32 ........ ........ .......] iiiiiiii aaaaaaaa oooooooo [t...... |
* +-------------------------------------------------------------------------+
* Note: the entire tcache is embedded into TSD and spans multiple cachelines.
*
* In example.h:
* typedef struct {
* int x;
* int y;
* } example_t;
* #define EX_INITIALIZER JEMALLOC_CONCAT({0, 0})
* malloc_tsd_protos(, example, example_t *)
* malloc_tsd_externs(example, example_t *)
* In example.c:
* malloc_tsd_data(, example, example_t *, EX_INITIALIZER)
* malloc_tsd_funcs(, example, example_t *, EX_INITIALIZER,
* example_tsd_cleanup)
*
* The result is a set of generated functions, e.g.:
*
* bool example_tsd_boot(void) {...}
* example_t **example_tsd_get() {...}
* void example_tsd_set(example_t **val) {...}
*
* Note that all of the functions deal in terms of (a_type *) rather than
* (a_type) so that it is possible to support non-pointer types (unlike
* pthreads TSD). example_tsd_cleanup() is passed an (a_type *) pointer that is
* cast to (void *). This means that the cleanup function needs to cast *and*
* dereference the function argument, e.g.:
*
* void
* example_tsd_cleanup(void *arg)
* {
* example_t *example = *(example_t **)arg;
*
* [...]
* if ([want the cleanup function to be called again]) {
* example_tsd_set(&example);
* }
* }
*
* If example_tsd_set() is called within example_tsd_cleanup(), it will be
* called again. This is similar to how pthreads TSD destruction works, except
* that pthreads only calls the cleanup function again if the value was set to
* non-NULL.
* The last 3 members (i, a and o) before tcache isn't really needed on tcache
* fast path. However we have a number of unused tcache bins and witnesses
* (never touched unless config_debug) at the end of tcache, so we place them
* there to avoid breaking the cachelines and possibly paging in an extra page.
*/
/* malloc_tsd_protos(). */
#define malloc_tsd_protos(a_attr, a_name, a_type) \
a_attr bool \
a_name##_tsd_boot(void); \
a_attr a_type * \
a_name##_tsd_get(void); \
a_attr void \
a_name##_tsd_set(a_type *val);
/* malloc_tsd_externs(). */
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
#define malloc_tsd_externs(a_name, a_type) \
extern __thread a_type a_name##_tls; \
extern __thread bool a_name##_initialized; \
extern bool a_name##_booted;
#elif (defined(JEMALLOC_TLS))
#define malloc_tsd_externs(a_name, a_type) \
extern __thread a_type a_name##_tls; \
extern pthread_key_t a_name##_tsd; \
extern bool a_name##_booted;
#elif (defined(_WIN32))
#define malloc_tsd_externs(a_name, a_type) \
extern DWORD a_name##_tsd; \
extern bool a_name##_booted;
#ifdef JEMALLOC_JET
typedef void (*test_callback_t)(int *);
# define MALLOC_TSD_TEST_DATA_INIT 0x72b65c10
# define MALLOC_TEST_TSD \
O(test_data, int, int) \
O(test_callback, test_callback_t, int)
# define MALLOC_TEST_TSD_INITIALIZER , MALLOC_TSD_TEST_DATA_INIT, NULL
#else
#define malloc_tsd_externs(a_name, a_type) \
extern pthread_key_t a_name##_tsd; \
extern tsd_init_head_t a_name##_tsd_init_head; \
extern bool a_name##_booted;
# define MALLOC_TEST_TSD
# define MALLOC_TEST_TSD_INITIALIZER
#endif
/* malloc_tsd_data(). */
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
a_attr __thread a_type JEMALLOC_TLS_MODEL \
a_name##_tls = a_initializer; \
a_attr __thread bool JEMALLOC_TLS_MODEL \
a_name##_initialized = false; \
a_attr bool a_name##_booted = false;
#elif (defined(JEMALLOC_TLS))
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
a_attr __thread a_type JEMALLOC_TLS_MODEL \
a_name##_tls = a_initializer; \
a_attr pthread_key_t a_name##_tsd; \
a_attr bool a_name##_booted = false;
#elif (defined(_WIN32))
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
a_attr DWORD a_name##_tsd; \
a_attr bool a_name##_booted = false;
#else
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
a_attr pthread_key_t a_name##_tsd; \
a_attr tsd_init_head_t a_name##_tsd_init_head = { \
ql_head_initializer(blocks), \
MALLOC_MUTEX_INITIALIZER \
}; \
a_attr bool a_name##_booted = false;
#endif
/* O(name, type, nullable type */
#define MALLOC_TSD \
O(tcache_enabled, bool, bool) \
O(arenas_tdata_bypass, bool, bool) \
O(reentrancy_level, int8_t, int8_t) \
O(narenas_tdata, uint32_t, uint32_t) \
O(thread_allocated, uint64_t, uint64_t) \
O(thread_deallocated, uint64_t, uint64_t) \
O(prof_tdata, prof_tdata_t *, prof_tdata_t *) \
O(rtree_ctx, rtree_ctx_t, rtree_ctx_t) \
O(iarena, arena_t *, arena_t *) \
O(arena, arena_t *, arena_t *) \
O(arenas_tdata, arena_tdata_t *, arena_tdata_t *)\
O(tcache, tcache_t, tcache_t) \
O(witness_tsd, witness_tsd_t, witness_tsdn_t) \
MALLOC_TEST_TSD
/* malloc_tsd_funcs(). */
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
a_cleanup) \
/* Initialization/cleanup. */ \
a_attr bool \
a_name##_tsd_cleanup_wrapper(void) \
{ \
\
if (a_name##_initialized) { \
a_name##_initialized = false; \
a_cleanup(&a_name##_tls); \
} \
return (a_name##_initialized); \
} \
a_attr bool \
a_name##_tsd_boot(void) \
{ \
\
if (a_cleanup != malloc_tsd_no_cleanup) { \
malloc_tsd_cleanup_register( \
&a_name##_tsd_cleanup_wrapper); \
} \
a_name##_booted = true; \
return (false); \
} \
/* Get/set. */ \
a_attr a_type * \
a_name##_tsd_get(void) \
{ \
\
assert(a_name##_booted); \
return (&a_name##_tls); \
} \
a_attr void \
a_name##_tsd_set(a_type *val) \
{ \
\
assert(a_name##_booted); \
a_name##_tls = (*val); \
if (a_cleanup != malloc_tsd_no_cleanup) \
a_name##_initialized = true; \
#define TSD_INITIALIZER { \
tsd_state_uninitialized, \
TCACHE_ENABLED_ZERO_INITIALIZER, \
false, \
0, \
0, \
0, \
0, \
NULL, \
RTREE_CTX_ZERO_INITIALIZER, \
NULL, \
NULL, \
NULL, \
TCACHE_ZERO_INITIALIZER, \
WITNESS_TSD_INITIALIZER \
MALLOC_TEST_TSD_INITIALIZER \
}
#elif (defined(JEMALLOC_TLS))
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
a_cleanup) \
/* Initialization/cleanup. */ \
a_attr bool \
a_name##_tsd_boot(void) \
{ \
\
if (a_cleanup != malloc_tsd_no_cleanup) { \
if (pthread_key_create(&a_name##_tsd, a_cleanup) != 0) \
return (true); \
} \
a_name##_booted = true; \
return (false); \
} \
/* Get/set. */ \
a_attr a_type * \
a_name##_tsd_get(void) \
{ \
\
assert(a_name##_booted); \
return (&a_name##_tls); \
} \
a_attr void \
a_name##_tsd_set(a_type *val) \
{ \
\
assert(a_name##_booted); \
a_name##_tls = (*val); \
if (a_cleanup != malloc_tsd_no_cleanup) { \
if (pthread_setspecific(a_name##_tsd, \
(void *)(&a_name##_tls))) { \
malloc_write("<jemalloc>: Error" \
" setting TSD for "#a_name"\n"); \
if (opt_abort) \
abort(); \
} \
} \
}
#elif (defined(_WIN32))
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
a_cleanup) \
/* Data structure. */ \
typedef struct { \
bool initialized; \
a_type val; \
} a_name##_tsd_wrapper_t; \
/* Initialization/cleanup. */ \
a_attr bool \
a_name##_tsd_cleanup_wrapper(void) \
{ \
a_name##_tsd_wrapper_t *wrapper; \
\
wrapper = (a_name##_tsd_wrapper_t *) TlsGetValue(a_name##_tsd); \
if (wrapper == NULL) \
return (false); \
if (a_cleanup != malloc_tsd_no_cleanup && \
wrapper->initialized) { \
a_type val = wrapper->val; \
a_type tsd_static_data = a_initializer; \
wrapper->initialized = false; \
wrapper->val = tsd_static_data; \
a_cleanup(&val); \
if (wrapper->initialized) { \
/* Trigger another cleanup round. */ \
return (true); \
} \
} \
malloc_tsd_dalloc(wrapper); \
return (false); \
} \
a_attr bool \
a_name##_tsd_boot(void) \
{ \
\
a_name##_tsd = TlsAlloc(); \
if (a_name##_tsd == TLS_OUT_OF_INDEXES) \
return (true); \
if (a_cleanup != malloc_tsd_no_cleanup) { \
malloc_tsd_cleanup_register( \
&a_name##_tsd_cleanup_wrapper); \
} \
a_name##_booted = true; \
return (false); \
} \
/* Get/set. */ \
a_attr a_name##_tsd_wrapper_t * \
a_name##_tsd_get_wrapper(void) \
{ \
a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *) \
TlsGetValue(a_name##_tsd); \
\
if (wrapper == NULL) { \
wrapper = (a_name##_tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \
if (wrapper == NULL) { \
malloc_write("<jemalloc>: Error allocating" \
" TSD for "#a_name"\n"); \
abort(); \
} else { \
static a_type tsd_static_data = a_initializer; \
wrapper->initialized = false; \
wrapper->val = tsd_static_data; \
} \
if (!TlsSetValue(a_name##_tsd, (void *)wrapper)) { \
malloc_write("<jemalloc>: Error setting" \
" TSD for "#a_name"\n"); \
abort(); \
} \
} \
return (wrapper); \
} \
a_attr a_type * \
a_name##_tsd_get(void) \
{ \
a_name##_tsd_wrapper_t *wrapper; \
\
assert(a_name##_booted); \
wrapper = a_name##_tsd_get_wrapper(); \
return (&wrapper->val); \
} \
a_attr void \
a_name##_tsd_set(a_type *val) \
{ \
a_name##_tsd_wrapper_t *wrapper; \
\
assert(a_name##_booted); \
wrapper = a_name##_tsd_get_wrapper(); \
wrapper->val = *(val); \
if (a_cleanup != malloc_tsd_no_cleanup) \
wrapper->initialized = true; \
}
#else
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
a_cleanup) \
/* Data structure. */ \
typedef struct { \
bool initialized; \
a_type val; \
} a_name##_tsd_wrapper_t; \
/* Initialization/cleanup. */ \
a_attr void \
a_name##_tsd_cleanup_wrapper(void *arg) \
{ \
a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *)arg;\
\
if (a_cleanup != malloc_tsd_no_cleanup && \
wrapper->initialized) { \
wrapper->initialized = false; \
a_cleanup(&wrapper->val); \
if (wrapper->initialized) { \
/* Trigger another cleanup round. */ \
if (pthread_setspecific(a_name##_tsd, \
(void *)wrapper)) { \
malloc_write("<jemalloc>: Error" \
" setting TSD for "#a_name"\n"); \
if (opt_abort) \
abort(); \
} \
return; \
} \
} \
malloc_tsd_dalloc(wrapper); \
} \
a_attr bool \
a_name##_tsd_boot(void) \
{ \
\
if (pthread_key_create(&a_name##_tsd, \
a_name##_tsd_cleanup_wrapper) != 0) \
return (true); \
a_name##_booted = true; \
return (false); \
} \
/* Get/set. */ \
a_attr a_name##_tsd_wrapper_t * \
a_name##_tsd_get_wrapper(void) \
{ \
a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *) \
pthread_getspecific(a_name##_tsd); \
\
if (wrapper == NULL) { \
tsd_init_block_t block; \
wrapper = tsd_init_check_recursion( \
&a_name##_tsd_init_head, &block); \
if (wrapper) \
return (wrapper); \
wrapper = (a_name##_tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \
block.data = wrapper; \
if (wrapper == NULL) { \
malloc_write("<jemalloc>: Error allocating" \
" TSD for "#a_name"\n"); \
abort(); \
} else { \
static a_type tsd_static_data = a_initializer; \
wrapper->initialized = false; \
wrapper->val = tsd_static_data; \
} \
if (pthread_setspecific(a_name##_tsd, \
(void *)wrapper)) { \
malloc_write("<jemalloc>: Error setting" \
" TSD for "#a_name"\n"); \
abort(); \
} \
tsd_init_finish(&a_name##_tsd_init_head, &block); \
} \
return (wrapper); \
} \
a_attr a_type * \
a_name##_tsd_get(void) \
{ \
a_name##_tsd_wrapper_t *wrapper; \
\
assert(a_name##_booted); \
wrapper = a_name##_tsd_get_wrapper(); \
return (&wrapper->val); \
} \
a_attr void \
a_name##_tsd_set(a_type *val) \
{ \
a_name##_tsd_wrapper_t *wrapper; \
\
assert(a_name##_booted); \
wrapper = a_name##_tsd_get_wrapper(); \
wrapper->val = *(val); \
if (a_cleanup != malloc_tsd_no_cleanup) \
wrapper->initialized = true; \
}
#endif
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
!defined(_WIN32))
struct tsd_init_block_s {
ql_elm(tsd_init_block_t) link;
pthread_t thread;
void *data;
enum {
tsd_state_nominal = 0, /* Common case --> jnz. */
tsd_state_nominal_slow = 1, /* Initialized but on slow path. */
/* the above 2 nominal states should be lower values. */
tsd_state_nominal_max = 1, /* used for comparison only. */
tsd_state_minimal_initialized = 2,
tsd_state_purgatory = 3,
tsd_state_reincarnated = 4,
tsd_state_uninitialized = 5
};
struct tsd_init_head_s {
ql_head(tsd_init_block_t) blocks;
malloc_mutex_t lock;
};
#endif
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
/* Manually limit tsd_state_t to a single byte. */
typedef uint8_t tsd_state_t;
/* The actual tsd. */
struct tsd_s {
/*
* The contents should be treated as totally opaque outside the tsd
* module. Access any thread-local state through the getters and
* setters below.
*/
tsd_state_t state;
#define O(n, t, nt) \
t use_a_getter_or_setter_instead_##n;
MALLOC_TSD
#undef O
};
/*
* Wrapper around tsd_t that makes it possible to avoid implicit conversion
* between tsd_t and tsdn_t, where tsdn_t is "nullable" and has to be
* explicitly converted to tsd_t, which is non-nullable.
*/
struct tsdn_s {
tsd_t tsd;
};
#define TSDN_NULL ((tsdn_t *)0)
JEMALLOC_ALWAYS_INLINE tsdn_t *
tsd_tsdn(tsd_t *tsd) {
return (tsdn_t *)tsd;
}
JEMALLOC_ALWAYS_INLINE bool
tsdn_null(const tsdn_t *tsdn) {
return tsdn == NULL;
}
JEMALLOC_ALWAYS_INLINE tsd_t *
tsdn_tsd(tsdn_t *tsdn) {
assert(!tsdn_null(tsdn));
return &tsdn->tsd;
}
void *malloc_tsd_malloc(size_t size);
void malloc_tsd_dalloc(void *wrapper);
void malloc_tsd_no_cleanup(void *);
void malloc_tsd_cleanup_register(bool (*f)(void));
void malloc_tsd_boot(void);
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
!defined(_WIN32))
void *tsd_init_check_recursion(tsd_init_head_t *head,
tsd_init_block_t *block);
void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block);
tsd_t *malloc_tsd_boot0(void);
void malloc_tsd_boot1(void);
void tsd_cleanup(void *arg);
tsd_t *tsd_fetch_slow(tsd_t *tsd, bool internal);
void tsd_slow_update(tsd_t *tsd);
/*
* We put the platform-specific data declarations and inlines into their own
* header files to avoid cluttering this file. They define tsd_boot0,
* tsd_boot1, tsd_boot, tsd_booted_get, tsd_get_allocates, tsd_get, and tsd_set.
*/
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
#include "jemalloc/internal/tsd_malloc_thread_cleanup.h"
#elif (defined(JEMALLOC_TLS))
#include "jemalloc/internal/tsd_tls.h"
#elif (defined(_WIN32))
#include "jemalloc/internal/tsd_win.h"
#else
#include "jemalloc/internal/tsd_generic.h"
#endif
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
/*
* tsd_foop_get_unsafe(tsd) returns a pointer to the thread-local instance of
* foo. This omits some safety checks, and so can be used during tsd
* initialization and cleanup.
*/
#define O(n, t, nt) \
JEMALLOC_ALWAYS_INLINE t * \
tsd_##n##p_get_unsafe(tsd_t *tsd) { \
return &tsd->use_a_getter_or_setter_instead_##n; \
}
MALLOC_TSD
#undef O
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
/* tsd_foop_get(tsd) returns a pointer to the thread-local instance of foo. */
#define O(n, t, nt) \
JEMALLOC_ALWAYS_INLINE t * \
tsd_##n##p_get(tsd_t *tsd) { \
assert(tsd->state == tsd_state_nominal || \
tsd->state == tsd_state_nominal_slow || \
tsd->state == tsd_state_reincarnated || \
tsd->state == tsd_state_minimal_initialized); \
return tsd_##n##p_get_unsafe(tsd); \
}
MALLOC_TSD
#undef O
/*
* tsdn_foop_get(tsdn) returns either the thread-local instance of foo (if tsdn
* isn't NULL), or NULL (if tsdn is NULL), cast to the nullable pointer type.
*/
#define O(n, t, nt) \
JEMALLOC_ALWAYS_INLINE nt * \
tsdn_##n##p_get(tsdn_t *tsdn) { \
if (tsdn_null(tsdn)) { \
return NULL; \
} \
tsd_t *tsd = tsdn_tsd(tsdn); \
return (nt *)tsd_##n##p_get(tsd); \
}
MALLOC_TSD
#undef O
/* tsd_foo_get(tsd) returns the value of the thread-local instance of foo. */
#define O(n, t, nt) \
JEMALLOC_ALWAYS_INLINE t \
tsd_##n##_get(tsd_t *tsd) { \
return *tsd_##n##p_get(tsd); \
}
MALLOC_TSD
#undef O
/* tsd_foo_set(tsd, val) updates the thread-local instance of foo to be val. */
#define O(n, t, nt) \
JEMALLOC_ALWAYS_INLINE void \
tsd_##n##_set(tsd_t *tsd, t val) { \
assert(tsd->state != tsd_state_reincarnated && \
tsd->state != tsd_state_minimal_initialized); \
*tsd_##n##p_get(tsd) = val; \
}
MALLOC_TSD
#undef O
JEMALLOC_ALWAYS_INLINE void
tsd_assert_fast(tsd_t *tsd) {
assert(!malloc_slow && tsd_tcache_enabled_get(tsd) &&
tsd_reentrancy_level_get(tsd) == 0);
}
JEMALLOC_ALWAYS_INLINE bool
tsd_fast(tsd_t *tsd) {
bool fast = (tsd->state == tsd_state_nominal);
if (fast) {
tsd_assert_fast(tsd);
}
return fast;
}
JEMALLOC_ALWAYS_INLINE tsd_t *
tsd_fetch_impl(bool init, bool minimal) {
tsd_t *tsd = tsd_get(init);
if (!init && tsd_get_allocates() && tsd == NULL) {
return NULL;
}
assert(tsd != NULL);
if (unlikely(tsd->state != tsd_state_nominal)) {
return tsd_fetch_slow(tsd, minimal);
}
assert(tsd_fast(tsd));
tsd_assert_fast(tsd);
return tsd;
}
/* Get a minimal TSD that requires no cleanup. See comments in free(). */
JEMALLOC_ALWAYS_INLINE tsd_t *
tsd_fetch_min(void) {
return tsd_fetch_impl(true, true);
}
/* For internal background threads use only. */
JEMALLOC_ALWAYS_INLINE tsd_t *
tsd_internal_fetch(void) {
tsd_t *tsd = tsd_fetch_min();
/* Use reincarnated state to prevent full initialization. */
tsd->state = tsd_state_reincarnated;
return tsd;
}
JEMALLOC_ALWAYS_INLINE tsd_t *
tsd_fetch(void) {
return tsd_fetch_impl(true, false);
}
static inline bool
tsd_nominal(tsd_t *tsd) {
return (tsd->state <= tsd_state_nominal_max);
}
JEMALLOC_ALWAYS_INLINE tsdn_t *
tsdn_fetch(void) {
if (!tsd_booted_get()) {
return NULL;
}
return tsd_tsdn(tsd_fetch_impl(false, false));
}
JEMALLOC_ALWAYS_INLINE rtree_ctx_t *
tsd_rtree_ctx(tsd_t *tsd) {
return tsd_rtree_ctxp_get(tsd);
}
JEMALLOC_ALWAYS_INLINE rtree_ctx_t *
tsdn_rtree_ctx(tsdn_t *tsdn, rtree_ctx_t *fallback) {
/*
* If tsd cannot be accessed, initialize the fallback rtree_ctx and
* return a pointer to it.
*/
if (unlikely(tsdn_null(tsdn))) {
rtree_ctx_data_init(fallback);
return fallback;
}
return tsd_rtree_ctx(tsdn_tsd(tsdn));
}
#endif /* JEMALLOC_INTERNAL_TSD_H */

View File

@@ -0,0 +1,157 @@
#ifdef JEMALLOC_INTERNAL_TSD_GENERIC_H
#error This file should be included only once, by tsd.h.
#endif
#define JEMALLOC_INTERNAL_TSD_GENERIC_H
typedef struct tsd_init_block_s tsd_init_block_t;
struct tsd_init_block_s {
ql_elm(tsd_init_block_t) link;
pthread_t thread;
void *data;
};
/* Defined in tsd.c, to allow the mutex headers to have tsd dependencies. */
typedef struct tsd_init_head_s tsd_init_head_t;
typedef struct {
bool initialized;
tsd_t val;
} tsd_wrapper_t;
void *tsd_init_check_recursion(tsd_init_head_t *head,
tsd_init_block_t *block);
void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block);
extern pthread_key_t tsd_tsd;
extern tsd_init_head_t tsd_init_head;
extern tsd_wrapper_t tsd_boot_wrapper;
extern bool tsd_booted;
/* Initialization/cleanup. */
JEMALLOC_ALWAYS_INLINE void
tsd_cleanup_wrapper(void *arg) {
tsd_wrapper_t *wrapper = (tsd_wrapper_t *)arg;
if (wrapper->initialized) {
wrapper->initialized = false;
tsd_cleanup(&wrapper->val);
if (wrapper->initialized) {
/* Trigger another cleanup round. */
if (pthread_setspecific(tsd_tsd, (void *)wrapper) != 0)
{
malloc_write("<jemalloc>: Error setting TSD\n");
if (opt_abort) {
abort();
}
}
return;
}
}
malloc_tsd_dalloc(wrapper);
}
JEMALLOC_ALWAYS_INLINE void
tsd_wrapper_set(tsd_wrapper_t *wrapper) {
if (pthread_setspecific(tsd_tsd, (void *)wrapper) != 0) {
malloc_write("<jemalloc>: Error setting TSD\n");
abort();
}
}
JEMALLOC_ALWAYS_INLINE tsd_wrapper_t *
tsd_wrapper_get(bool init) {
tsd_wrapper_t *wrapper = (tsd_wrapper_t *)pthread_getspecific(tsd_tsd);
if (init && unlikely(wrapper == NULL)) {
tsd_init_block_t block;
wrapper = (tsd_wrapper_t *)
tsd_init_check_recursion(&tsd_init_head, &block);
if (wrapper) {
return wrapper;
}
wrapper = (tsd_wrapper_t *)
malloc_tsd_malloc(sizeof(tsd_wrapper_t));
block.data = (void *)wrapper;
if (wrapper == NULL) {
malloc_write("<jemalloc>: Error allocating TSD\n");
abort();
} else {
wrapper->initialized = false;
tsd_t initializer = TSD_INITIALIZER;
wrapper->val = initializer;
}
tsd_wrapper_set(wrapper);
tsd_init_finish(&tsd_init_head, &block);
}
return wrapper;
}
JEMALLOC_ALWAYS_INLINE bool
tsd_boot0(void) {
if (pthread_key_create(&tsd_tsd, tsd_cleanup_wrapper) != 0) {
return true;
}
tsd_wrapper_set(&tsd_boot_wrapper);
tsd_booted = true;
return false;
}
JEMALLOC_ALWAYS_INLINE void
tsd_boot1(void) {
tsd_wrapper_t *wrapper;
wrapper = (tsd_wrapper_t *)malloc_tsd_malloc(sizeof(tsd_wrapper_t));
if (wrapper == NULL) {
malloc_write("<jemalloc>: Error allocating TSD\n");
abort();
}
tsd_boot_wrapper.initialized = false;
tsd_cleanup(&tsd_boot_wrapper.val);
wrapper->initialized = false;
tsd_t initializer = TSD_INITIALIZER;
wrapper->val = initializer;
tsd_wrapper_set(wrapper);
}
JEMALLOC_ALWAYS_INLINE bool
tsd_boot(void) {
if (tsd_boot0()) {
return true;
}
tsd_boot1();
return false;
}
JEMALLOC_ALWAYS_INLINE bool
tsd_booted_get(void) {
return tsd_booted;
}
JEMALLOC_ALWAYS_INLINE bool
tsd_get_allocates(void) {
return true;
}
/* Get/set. */
JEMALLOC_ALWAYS_INLINE tsd_t *
tsd_get(bool init) {
tsd_wrapper_t *wrapper;
assert(tsd_booted);
wrapper = tsd_wrapper_get(init);
if (tsd_get_allocates() && !init && wrapper == NULL) {
return NULL;
}
return &wrapper->val;
}
JEMALLOC_ALWAYS_INLINE void
tsd_set(tsd_t *val) {
tsd_wrapper_t *wrapper;
assert(tsd_booted);
wrapper = tsd_wrapper_get(true);
if (likely(&wrapper->val != val)) {
wrapper->val = *(val);
}
wrapper->initialized = true;
}

View File

@@ -0,0 +1,60 @@
#ifdef JEMALLOC_INTERNAL_TSD_MALLOC_THREAD_CLEANUP_H
#error This file should be included only once, by tsd.h.
#endif
#define JEMALLOC_INTERNAL_TSD_MALLOC_THREAD_CLEANUP_H
extern __thread tsd_t tsd_tls;
extern __thread bool tsd_initialized;
extern bool tsd_booted;
/* Initialization/cleanup. */
JEMALLOC_ALWAYS_INLINE bool
tsd_cleanup_wrapper(void) {
if (tsd_initialized) {
tsd_initialized = false;
tsd_cleanup(&tsd_tls);
}
return tsd_initialized;
}
JEMALLOC_ALWAYS_INLINE bool
tsd_boot0(void) {
malloc_tsd_cleanup_register(&tsd_cleanup_wrapper);
tsd_booted = true;
return false;
}
JEMALLOC_ALWAYS_INLINE void
tsd_boot1(void) {
/* Do nothing. */
}
JEMALLOC_ALWAYS_INLINE bool
tsd_boot(void) {
return tsd_boot0();
}
JEMALLOC_ALWAYS_INLINE bool
tsd_booted_get(void) {
return tsd_booted;
}
JEMALLOC_ALWAYS_INLINE bool
tsd_get_allocates(void) {
return false;
}
/* Get/set. */
JEMALLOC_ALWAYS_INLINE tsd_t *
tsd_get(bool init) {
assert(tsd_booted);
return &tsd_tls;
}
JEMALLOC_ALWAYS_INLINE void
tsd_set(tsd_t *val) {
assert(tsd_booted);
if (likely(&tsd_tls != val)) {
tsd_tls = (*val);
}
tsd_initialized = true;
}

View File

@@ -0,0 +1,59 @@
#ifdef JEMALLOC_INTERNAL_TSD_TLS_H
#error This file should be included only once, by tsd.h.
#endif
#define JEMALLOC_INTERNAL_TSD_TLS_H
extern __thread tsd_t tsd_tls;
extern pthread_key_t tsd_tsd;
extern bool tsd_booted;
/* Initialization/cleanup. */
JEMALLOC_ALWAYS_INLINE bool
tsd_boot0(void) {
if (pthread_key_create(&tsd_tsd, &tsd_cleanup) != 0) {
return true;
}
tsd_booted = true;
return false;
}
JEMALLOC_ALWAYS_INLINE void
tsd_boot1(void) {
/* Do nothing. */
}
JEMALLOC_ALWAYS_INLINE bool
tsd_boot(void) {
return tsd_boot0();
}
JEMALLOC_ALWAYS_INLINE bool
tsd_booted_get(void) {
return tsd_booted;
}
JEMALLOC_ALWAYS_INLINE bool
tsd_get_allocates(void) {
return false;
}
/* Get/set. */
JEMALLOC_ALWAYS_INLINE tsd_t *
tsd_get(bool init) {
assert(tsd_booted);
return &tsd_tls;
}
JEMALLOC_ALWAYS_INLINE void
tsd_set(tsd_t *val) {
assert(tsd_booted);
if (likely(&tsd_tls != val)) {
tsd_tls = (*val);
}
if (pthread_setspecific(tsd_tsd, (void *)(&tsd_tls)) != 0) {
malloc_write("<jemalloc>: Error setting tsd.\n");
if (opt_abort) {
abort();
}
}
}

View File

@@ -0,0 +1,10 @@
#ifndef JEMALLOC_INTERNAL_TSD_TYPES_H
#define JEMALLOC_INTERNAL_TSD_TYPES_H
#define MALLOC_TSD_CLEANUPS_MAX 2
typedef struct tsd_s tsd_t;
typedef struct tsdn_s tsdn_t;
typedef bool (*malloc_tsd_cleanup_t)(void);
#endif /* JEMALLOC_INTERNAL_TSD_TYPES_H */

View File

@@ -0,0 +1,139 @@
#ifdef JEMALLOC_INTERNAL_TSD_WIN_H
#error This file should be included only once, by tsd.h.
#endif
#define JEMALLOC_INTERNAL_TSD_WIN_H
typedef struct {
bool initialized;
tsd_t val;
} tsd_wrapper_t;
extern DWORD tsd_tsd;
extern tsd_wrapper_t tsd_boot_wrapper;
extern bool tsd_booted;
/* Initialization/cleanup. */
JEMALLOC_ALWAYS_INLINE bool
tsd_cleanup_wrapper(void) {
DWORD error = GetLastError();
tsd_wrapper_t *wrapper = (tsd_wrapper_t *)TlsGetValue(tsd_tsd);
SetLastError(error);
if (wrapper == NULL) {
return false;
}
if (wrapper->initialized) {
wrapper->initialized = false;
tsd_cleanup(&wrapper->val);
if (wrapper->initialized) {
/* Trigger another cleanup round. */
return true;
}
}
malloc_tsd_dalloc(wrapper);
return false;
}
JEMALLOC_ALWAYS_INLINE void
tsd_wrapper_set(tsd_wrapper_t *wrapper) {
if (!TlsSetValue(tsd_tsd, (void *)wrapper)) {
malloc_write("<jemalloc>: Error setting TSD\n");
abort();
}
}
JEMALLOC_ALWAYS_INLINE tsd_wrapper_t *
tsd_wrapper_get(bool init) {
DWORD error = GetLastError();
tsd_wrapper_t *wrapper = (tsd_wrapper_t *) TlsGetValue(tsd_tsd);
SetLastError(error);
if (init && unlikely(wrapper == NULL)) {
wrapper = (tsd_wrapper_t *)
malloc_tsd_malloc(sizeof(tsd_wrapper_t));
if (wrapper == NULL) {
malloc_write("<jemalloc>: Error allocating TSD\n");
abort();
} else {
wrapper->initialized = false;
/* MSVC is finicky about aggregate initialization. */
tsd_t tsd_initializer = TSD_INITIALIZER;
wrapper->val = tsd_initializer;
}
tsd_wrapper_set(wrapper);
}
return wrapper;
}
JEMALLOC_ALWAYS_INLINE bool
tsd_boot0(void) {
tsd_tsd = TlsAlloc();
if (tsd_tsd == TLS_OUT_OF_INDEXES) {
return true;
}
malloc_tsd_cleanup_register(&tsd_cleanup_wrapper);
tsd_wrapper_set(&tsd_boot_wrapper);
tsd_booted = true;
return false;
}
JEMALLOC_ALWAYS_INLINE void
tsd_boot1(void) {
tsd_wrapper_t *wrapper;
wrapper = (tsd_wrapper_t *)
malloc_tsd_malloc(sizeof(tsd_wrapper_t));
if (wrapper == NULL) {
malloc_write("<jemalloc>: Error allocating TSD\n");
abort();
}
tsd_boot_wrapper.initialized = false;
tsd_cleanup(&tsd_boot_wrapper.val);
wrapper->initialized = false;
tsd_t initializer = TSD_INITIALIZER;
wrapper->val = initializer;
tsd_wrapper_set(wrapper);
}
JEMALLOC_ALWAYS_INLINE bool
tsd_boot(void) {
if (tsd_boot0()) {
return true;
}
tsd_boot1();
return false;
}
JEMALLOC_ALWAYS_INLINE bool
tsd_booted_get(void) {
return tsd_booted;
}
JEMALLOC_ALWAYS_INLINE bool
tsd_get_allocates(void) {
return true;
}
/* Get/set. */
JEMALLOC_ALWAYS_INLINE tsd_t *
tsd_get(bool init) {
tsd_wrapper_t *wrapper;
assert(tsd_booted);
wrapper = tsd_wrapper_get(init);
if (tsd_get_allocates() && !init && wrapper == NULL) {
return NULL;
}
return &wrapper->val;
}
JEMALLOC_ALWAYS_INLINE void
tsd_set(tsd_t *val) {
tsd_wrapper_t *wrapper;
assert(tsd_booted);
wrapper = tsd_wrapper_get(true);
if (likely(&wrapper->val != val)) {
wrapper->val = *(val);
}
wrapper->initialized = true;
}

View File

@@ -1,14 +1,15 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#ifndef JEMALLOC_INTERNAL_UTIL_H
#define JEMALLOC_INTERNAL_UTIL_H
/* Size of stack-allocated buffer passed to buferror(). */
#define BUFERROR_BUF 64
#define UTIL_INLINE static inline
/*
* Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be
* large enough for all possible uses within jemalloc.
*/
#define MALLOC_PRINTF_BUFSIZE 4096
/* Junk fill patterns. */
#ifndef JEMALLOC_ALLOC_JUNK
# define JEMALLOC_ALLOC_JUNK ((uint8_t)0xa5)
#endif
#ifndef JEMALLOC_FREE_JUNK
# define JEMALLOC_FREE_JUNK ((uint8_t)0x5a)
#endif
/*
* Wrap a cpp argument that contains commas such that it isn't broken up into
@@ -16,128 +17,34 @@
*/
#define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__
/* cpp macro definition stringification. */
#define STRINGIFY_HELPER(x) #x
#define STRINGIFY(x) STRINGIFY_HELPER(x)
/*
* Silence compiler warnings due to uninitialized values. This is used
* wherever the compiler fails to recognize that the variable is never used
* uninitialized.
*/
#ifdef JEMALLOC_CC_SILENCE
# define JEMALLOC_CC_SILENCE_INIT(v) = v
#define JEMALLOC_CC_SILENCE_INIT(v) = v
#ifdef __GNUC__
# define likely(x) __builtin_expect(!!(x), 1)
# define unlikely(x) __builtin_expect(!!(x), 0)
#else
# define JEMALLOC_CC_SILENCE_INIT(v)
# define likely(x) !!(x)
# define unlikely(x) !!(x)
#endif
/*
* Define a custom assert() in order to reduce the chances of deadlock during
* assertion failure.
*/
#ifndef assert
#define assert(e) do { \
if (config_debug && !(e)) { \
malloc_printf( \
"<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
__FILE__, __LINE__, #e); \
abort(); \
} \
} while (0)
#if !defined(JEMALLOC_INTERNAL_UNREACHABLE)
# error JEMALLOC_INTERNAL_UNREACHABLE should have been defined by configure
#endif
#ifndef not_reached
#define not_reached() do { \
if (config_debug) { \
malloc_printf( \
"<jemalloc>: %s:%d: Unreachable code reached\n", \
__FILE__, __LINE__); \
abort(); \
} \
} while (0)
#endif
#ifndef not_implemented
#define not_implemented() do { \
if (config_debug) { \
malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
__FILE__, __LINE__); \
abort(); \
} \
} while (0)
#endif
#ifndef assert_not_implemented
#define assert_not_implemented(e) do { \
if (config_debug && !(e)) \
not_implemented(); \
} while (0)
#endif
/* Use to assert a particular configuration, e.g., cassert(config_debug). */
#define cassert(c) do { \
if ((c) == false) \
not_reached(); \
} while (0)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
int buferror(int err, char *buf, size_t buflen);
uintmax_t malloc_strtoumax(const char *restrict nptr,
char **restrict endptr, int base);
void malloc_write(const char *s);
/*
* malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating
* point math.
*/
int malloc_vsnprintf(char *str, size_t size, const char *format,
va_list ap);
int malloc_snprintf(char *str, size_t size, const char *format, ...)
JEMALLOC_ATTR(format(printf, 3, 4));
void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
const char *format, va_list ap);
void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque,
const char *format, ...) JEMALLOC_ATTR(format(printf, 3, 4));
void malloc_printf(const char *format, ...)
JEMALLOC_ATTR(format(printf, 1, 2));
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
size_t pow2_ceil(size_t x);
void set_errno(int errnum);
int get_errno(void);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_UTIL_C_))
/* Compute the smallest power of 2 that is >= x. */
JEMALLOC_INLINE size_t
pow2_ceil(size_t x)
{
x--;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
#if (LG_SIZEOF_PTR == 3)
x |= x >> 32;
#endif
x++;
return (x);
}
/* Sets error code */
JEMALLOC_INLINE void
set_errno(int errnum)
{
#define unreachable() JEMALLOC_INTERNAL_UNREACHABLE()
/* Set error code. */
UTIL_INLINE void
set_errno(int errnum) {
#ifdef _WIN32
SetLastError(errnum);
#else
@@ -145,18 +52,16 @@ set_errno(int errnum)
#endif
}
/* Get last error code */
JEMALLOC_INLINE int
get_errno(void)
{
/* Get last error code. */
UTIL_INLINE int
get_errno(void) {
#ifdef _WIN32
return (GetLastError());
return GetLastError();
#else
return (errno);
return errno;
#endif
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
#undef UTIL_INLINE
#endif /* JEMALLOC_INTERNAL_UTIL_H */

View File

@@ -0,0 +1,346 @@
#ifndef JEMALLOC_INTERNAL_WITNESS_H
#define JEMALLOC_INTERNAL_WITNESS_H
#include "jemalloc/internal/ql.h"
/******************************************************************************/
/* LOCK RANKS */
/******************************************************************************/
/*
* Witnesses with rank WITNESS_RANK_OMIT are completely ignored by the witness
* machinery.
*/
#define WITNESS_RANK_OMIT 0U
#define WITNESS_RANK_MIN 1U
#define WITNESS_RANK_INIT 1U
#define WITNESS_RANK_CTL 1U
#define WITNESS_RANK_TCACHES 2U
#define WITNESS_RANK_ARENAS 3U
#define WITNESS_RANK_BACKGROUND_THREAD_GLOBAL 4U
#define WITNESS_RANK_PROF_DUMP 5U
#define WITNESS_RANK_PROF_BT2GCTX 6U
#define WITNESS_RANK_PROF_TDATAS 7U
#define WITNESS_RANK_PROF_TDATA 8U
#define WITNESS_RANK_PROF_GCTX 9U
#define WITNESS_RANK_BACKGROUND_THREAD 10U
/*
* Used as an argument to witness_assert_depth_to_rank() in order to validate
* depth excluding non-core locks with lower ranks. Since the rank argument to
* witness_assert_depth_to_rank() is inclusive rather than exclusive, this
* definition can have the same value as the minimally ranked core lock.
*/
#define WITNESS_RANK_CORE 11U
#define WITNESS_RANK_DECAY 11U
#define WITNESS_RANK_TCACHE_QL 12U
#define WITNESS_RANK_EXTENT_GROW 13U
#define WITNESS_RANK_EXTENTS 14U
#define WITNESS_RANK_EXTENT_AVAIL 15U
#define WITNESS_RANK_EXTENT_POOL 16U
#define WITNESS_RANK_RTREE 17U
#define WITNESS_RANK_BASE 18U
#define WITNESS_RANK_ARENA_LARGE 19U
#define WITNESS_RANK_LEAF 0xffffffffU
#define WITNESS_RANK_ARENA_BIN WITNESS_RANK_LEAF
#define WITNESS_RANK_ARENA_STATS WITNESS_RANK_LEAF
#define WITNESS_RANK_DSS WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_ACCUM WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_DUMP_SEQ WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_GDUMP WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_NEXT_THR_UID WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_THREAD_ACTIVE_INIT WITNESS_RANK_LEAF
/******************************************************************************/
/* PER-WITNESS DATA */
/******************************************************************************/
#if defined(JEMALLOC_DEBUG)
# define WITNESS_INITIALIZER(name, rank) {name, rank, NULL, NULL, {NULL, NULL}}
#else
# define WITNESS_INITIALIZER(name, rank)
#endif
typedef struct witness_s witness_t;
typedef unsigned witness_rank_t;
typedef ql_head(witness_t) witness_list_t;
typedef int witness_comp_t (const witness_t *, void *, const witness_t *,
void *);
struct witness_s {
/* Name, used for printing lock order reversal messages. */
const char *name;
/*
* Witness rank, where 0 is lowest and UINT_MAX is highest. Witnesses
* must be acquired in order of increasing rank.
*/
witness_rank_t rank;
/*
* If two witnesses are of equal rank and they have the samp comp
* function pointer, it is called as a last attempt to differentiate
* between witnesses of equal rank.
*/
witness_comp_t *comp;
/* Opaque data, passed to comp(). */
void *opaque;
/* Linkage for thread's currently owned locks. */
ql_elm(witness_t) link;
};
/******************************************************************************/
/* PER-THREAD DATA */
/******************************************************************************/
typedef struct witness_tsd_s witness_tsd_t;
struct witness_tsd_s {
witness_list_t witnesses;
bool forking;
};
#define WITNESS_TSD_INITIALIZER { ql_head_initializer(witnesses), false }
#define WITNESS_TSDN_NULL ((witness_tsdn_t *)0)
/******************************************************************************/
/* (PER-THREAD) NULLABILITY HELPERS */
/******************************************************************************/
typedef struct witness_tsdn_s witness_tsdn_t;
struct witness_tsdn_s {
witness_tsd_t witness_tsd;
};
JEMALLOC_ALWAYS_INLINE witness_tsdn_t *
witness_tsd_tsdn(witness_tsd_t *witness_tsd) {
return (witness_tsdn_t *)witness_tsd;
}
JEMALLOC_ALWAYS_INLINE bool
witness_tsdn_null(witness_tsdn_t *witness_tsdn) {
return witness_tsdn == NULL;
}
JEMALLOC_ALWAYS_INLINE witness_tsd_t *
witness_tsdn_tsd(witness_tsdn_t *witness_tsdn) {
assert(!witness_tsdn_null(witness_tsdn));
return &witness_tsdn->witness_tsd;
}
/******************************************************************************/
/* API */
/******************************************************************************/
void witness_init(witness_t *witness, const char *name, witness_rank_t rank,
witness_comp_t *comp, void *opaque);
typedef void (witness_lock_error_t)(const witness_list_t *, const witness_t *);
extern witness_lock_error_t *JET_MUTABLE witness_lock_error;
typedef void (witness_owner_error_t)(const witness_t *);
extern witness_owner_error_t *JET_MUTABLE witness_owner_error;
typedef void (witness_not_owner_error_t)(const witness_t *);
extern witness_not_owner_error_t *JET_MUTABLE witness_not_owner_error;
typedef void (witness_depth_error_t)(const witness_list_t *,
witness_rank_t rank_inclusive, unsigned depth);
extern witness_depth_error_t *JET_MUTABLE witness_depth_error;
void witnesses_cleanup(witness_tsd_t *witness_tsd);
void witness_prefork(witness_tsd_t *witness_tsd);
void witness_postfork_parent(witness_tsd_t *witness_tsd);
void witness_postfork_child(witness_tsd_t *witness_tsd);
/* Helper, not intended for direct use. */
static inline bool
witness_owner(witness_tsd_t *witness_tsd, const witness_t *witness) {
witness_list_t *witnesses;
witness_t *w;
cassert(config_debug);
witnesses = &witness_tsd->witnesses;
ql_foreach(w, witnesses, link) {
if (w == witness) {
return true;
}
}
return false;
}
static inline void
witness_assert_owner(witness_tsdn_t *witness_tsdn, const witness_t *witness) {
witness_tsd_t *witness_tsd;
if (!config_debug) {
return;
}
if (witness_tsdn_null(witness_tsdn)) {
return;
}
witness_tsd = witness_tsdn_tsd(witness_tsdn);
if (witness->rank == WITNESS_RANK_OMIT) {
return;
}
if (witness_owner(witness_tsd, witness)) {
return;
}
witness_owner_error(witness);
}
static inline void
witness_assert_not_owner(witness_tsdn_t *witness_tsdn,
const witness_t *witness) {
witness_tsd_t *witness_tsd;
witness_list_t *witnesses;
witness_t *w;
if (!config_debug) {
return;
}
if (witness_tsdn_null(witness_tsdn)) {
return;
}
witness_tsd = witness_tsdn_tsd(witness_tsdn);
if (witness->rank == WITNESS_RANK_OMIT) {
return;
}
witnesses = &witness_tsd->witnesses;
ql_foreach(w, witnesses, link) {
if (w == witness) {
witness_not_owner_error(witness);
}
}
}
static inline void
witness_assert_depth_to_rank(witness_tsdn_t *witness_tsdn,
witness_rank_t rank_inclusive, unsigned depth) {
witness_tsd_t *witness_tsd;
unsigned d;
witness_list_t *witnesses;
witness_t *w;
if (!config_debug) {
return;
}
if (witness_tsdn_null(witness_tsdn)) {
return;
}
witness_tsd = witness_tsdn_tsd(witness_tsdn);
d = 0;
witnesses = &witness_tsd->witnesses;
w = ql_last(witnesses, link);
if (w != NULL) {
ql_reverse_foreach(w, witnesses, link) {
if (w->rank < rank_inclusive) {
break;
}
d++;
}
}
if (d != depth) {
witness_depth_error(witnesses, rank_inclusive, depth);
}
}
static inline void
witness_assert_depth(witness_tsdn_t *witness_tsdn, unsigned depth) {
witness_assert_depth_to_rank(witness_tsdn, WITNESS_RANK_MIN, depth);
}
static inline void
witness_assert_lockless(witness_tsdn_t *witness_tsdn) {
witness_assert_depth(witness_tsdn, 0);
}
static inline void
witness_lock(witness_tsdn_t *witness_tsdn, witness_t *witness) {
witness_tsd_t *witness_tsd;
witness_list_t *witnesses;
witness_t *w;
if (!config_debug) {
return;
}
if (witness_tsdn_null(witness_tsdn)) {
return;
}
witness_tsd = witness_tsdn_tsd(witness_tsdn);
if (witness->rank == WITNESS_RANK_OMIT) {
return;
}
witness_assert_not_owner(witness_tsdn, witness);
witnesses = &witness_tsd->witnesses;
w = ql_last(witnesses, link);
if (w == NULL) {
/* No other locks; do nothing. */
} else if (witness_tsd->forking && w->rank <= witness->rank) {
/* Forking, and relaxed ranking satisfied. */
} else if (w->rank > witness->rank) {
/* Not forking, rank order reversal. */
witness_lock_error(witnesses, witness);
} else if (w->rank == witness->rank && (w->comp == NULL || w->comp !=
witness->comp || w->comp(w, w->opaque, witness, witness->opaque) >
0)) {
/*
* Missing/incompatible comparison function, or comparison
* function indicates rank order reversal.
*/
witness_lock_error(witnesses, witness);
}
ql_elm_new(witness, link);
ql_tail_insert(witnesses, witness, link);
}
static inline void
witness_unlock(witness_tsdn_t *witness_tsdn, witness_t *witness) {
witness_tsd_t *witness_tsd;
witness_list_t *witnesses;
if (!config_debug) {
return;
}
if (witness_tsdn_null(witness_tsdn)) {
return;
}
witness_tsd = witness_tsdn_tsd(witness_tsdn);
if (witness->rank == WITNESS_RANK_OMIT) {
return;
}
/*
* Check whether owner before removal, rather than relying on
* witness_assert_owner() to abort, so that unit tests can test this
* function's failure mode without causing undefined behavior.
*/
if (witness_owner(witness_tsd, witness)) {
witnesses = &witness_tsd->witnesses;
ql_remove(witnesses, witness, link);
} else {
witness_assert_owner(witness_tsdn, witness);
}
}
#endif /* JEMALLOC_INTERNAL_WITNESS_H */

View File

@@ -4,42 +4,200 @@
extern "C" {
#endif
/* Defined if __attribute__((...)) syntax is supported. */
#define JEMALLOC_HAVE_ATTR
/* Defined if alloc_size attribute is supported. */
#define JEMALLOC_HAVE_ATTR_ALLOC_SIZE
/* Defined if format(gnu_printf, ...) attribute is supported. */
#define JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
/* Defined if format(printf, ...) attribute is supported. */
#define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF
/*
* Define overrides for non-standard allocator-related functions if they are
* present on the system.
*/
#define JEMALLOC_OVERRIDE_MEMALIGN
#define JEMALLOC_OVERRIDE_VALLOC
/*
* At least Linux omits the "const" in:
*
* size_t malloc_usable_size(const void *ptr);
*
* Match the operating system's prototype.
*/
#define JEMALLOC_USABLE_SIZE_CONST
/*
* If defined, specify throw() for the public function prototypes when compiling
* with C++. The only justification for this is to match the prototypes that
* glibc defines.
*/
#define JEMALLOC_USE_CXX_THROW
#ifdef _MSC_VER
# ifdef _WIN64
# define LG_SIZEOF_PTR_WIN 3
# else
# define LG_SIZEOF_PTR_WIN 2
# endif
#endif
/*
* Name mangling for public symbols is controlled by --with-mangling and
* --with-jemalloc-prefix. With default settings the je_ prefix is stripped by
* these macro definitions.
*/
#ifndef JEMALLOC_NO_RENAME
# define je_aligned_alloc aligned_alloc
# define je_calloc calloc
# define je_dallocx dallocx
# define je_free free
# define je_mallctl mallctl
# define je_mallctlbymib mallctlbymib
# define je_mallctlnametomib mallctlnametomib
# define je_malloc malloc
# define je_malloc_conf malloc_conf
# define je_malloc_message malloc_message
# define je_malloc_stats_print malloc_stats_print
# define je_malloc_usable_size malloc_usable_size
# define je_mallocx mallocx
# define je_nallocx nallocx
# define je_posix_memalign posix_memalign
# define je_rallocx rallocx
# define je_realloc realloc
# define je_sallocx sallocx
# define je_sdallocx sdallocx
# define je_xallocx xallocx
# define je_memalign memalign
# define je_valloc valloc
#endif
#include <stdlib.h>
#include <stdbool.h>
#include <stdint.h>
#include <limits.h>
#include <strings.h>
#define JEMALLOC_VERSION "3.6.0-0-g46c0af68bd248b04df75e4f92d5fb804c3d75340"
#define JEMALLOC_VERSION_MAJOR 3
#define JEMALLOC_VERSION_MINOR 6
#define JEMALLOC_VERSION_BUGFIX 0
#define JEMALLOC_VERSION "5.0.1-0-g896ed3a8b3f41998d4fb4d625d30ac63ef2d51fb"
#define JEMALLOC_VERSION_MAJOR 5
#define JEMALLOC_VERSION_MINOR 0
#define JEMALLOC_VERSION_BUGFIX 1
#define JEMALLOC_VERSION_NREV 0
#define JEMALLOC_VERSION_GID "46c0af68bd248b04df75e4f92d5fb804c3d75340"
#define JEMALLOC_VERSION_GID "896ed3a8b3f41998d4fb4d625d30ac63ef2d51fb"
# define MALLOCX_LG_ALIGN(la) (la)
# if LG_SIZEOF_PTR == 2
# define MALLOCX_ALIGN(a) (ffs(a)-1)
# else
#define MALLOCX_LG_ALIGN(la) ((int)(la))
#if LG_SIZEOF_PTR == 2
# define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1))
#else
# define MALLOCX_ALIGN(a) \
((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31)
# endif
# define MALLOCX_ZERO ((int)0x40)
/* Bias arena index bits so that 0 encodes "MALLOCX_ARENA() unspecified". */
# define MALLOCX_ARENA(a) ((int)(((a)+1) << 8))
((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \
ffs((int)(((size_t)(a))>>32))+31))
#endif
#define MALLOCX_ZERO ((int)0x40)
/*
* Bias tcache index bits so that 0 encodes "automatic tcache management", and 1
* encodes MALLOCX_TCACHE_NONE.
*/
#define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8))
#define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1)
/*
* Bias arena index bits so that 0 encodes "use an automatically chosen arena".
*/
#define MALLOCX_ARENA(a) ((((int)(a))+1) << 20)
#ifdef JEMALLOC_EXPERIMENTAL
# define ALLOCM_LG_ALIGN(la) (la)
# if LG_SIZEOF_PTR == 2
# define ALLOCM_ALIGN(a) (ffs(a)-1)
/*
* Use as arena index in "arena.<i>.{purge,decay,dss}" and
* "stats.arenas.<i>.*" mallctl interfaces to select all arenas. This
* definition is intentionally specified in raw decimal format to support
* cpp-based string concatenation, e.g.
*
* #define STRINGIFY_HELPER(x) #x
* #define STRINGIFY(x) STRINGIFY_HELPER(x)
*
* mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", NULL, NULL, NULL,
* 0);
*/
#define MALLCTL_ARENAS_ALL 4096
/*
* Use as arena index in "stats.arenas.<i>.*" mallctl interfaces to select
* destroyed arenas.
*/
#define MALLCTL_ARENAS_DESTROYED 4097
#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW)
# define JEMALLOC_CXX_THROW throw()
#else
# define JEMALLOC_CXX_THROW
#endif
#if defined(_MSC_VER)
# define JEMALLOC_ATTR(s)
# define JEMALLOC_ALIGNED(s) __declspec(align(s))
# define JEMALLOC_ALLOC_SIZE(s)
# define JEMALLOC_ALLOC_SIZE2(s1, s2)
# ifndef JEMALLOC_EXPORT
# ifdef DLLEXPORT
# define JEMALLOC_EXPORT __declspec(dllexport)
# else
# define ALLOCM_ALIGN(a) \
((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31)
# define JEMALLOC_EXPORT __declspec(dllimport)
# endif
# define ALLOCM_ZERO ((int)0x40)
# define ALLOCM_NO_MOVE ((int)0x80)
/* Bias arena index bits so that 0 encodes "ALLOCM_ARENA() unspecified". */
# define ALLOCM_ARENA(a) ((int)(((a)+1) << 8))
# define ALLOCM_SUCCESS 0
# define ALLOCM_ERR_OOM 1
# define ALLOCM_ERR_NOT_MOVED 2
# endif
# define JEMALLOC_FORMAT_PRINTF(s, i)
# define JEMALLOC_NOINLINE __declspec(noinline)
# ifdef __cplusplus
# define JEMALLOC_NOTHROW __declspec(nothrow)
# else
# define JEMALLOC_NOTHROW
# endif
# define JEMALLOC_SECTION(s) __declspec(allocate(s))
# define JEMALLOC_RESTRICT_RETURN __declspec(restrict)
# if _MSC_VER >= 1900 && !defined(__EDG__)
# define JEMALLOC_ALLOCATOR __declspec(allocator)
# else
# define JEMALLOC_ALLOCATOR
# endif
#elif defined(JEMALLOC_HAVE_ATTR)
# define JEMALLOC_ATTR(s) __attribute__((s))
# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
# ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE
# define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s))
# define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2))
# else
# define JEMALLOC_ALLOC_SIZE(s)
# define JEMALLOC_ALLOC_SIZE2(s1, s2)
# endif
# ifndef JEMALLOC_EXPORT
# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
# endif
# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i))
# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF)
# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i))
# else
# define JEMALLOC_FORMAT_PRINTF(s, i)
# endif
# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
# define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow)
# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
# define JEMALLOC_RESTRICT_RETURN
# define JEMALLOC_ALLOCATOR
#else
# define JEMALLOC_ATTR(s)
# define JEMALLOC_ALIGNED(s)
# define JEMALLOC_ALLOC_SIZE(s)
# define JEMALLOC_ALLOC_SIZE2(s1, s2)
# define JEMALLOC_EXPORT
# define JEMALLOC_FORMAT_PRINTF(s, i)
# define JEMALLOC_NOINLINE
# define JEMALLOC_NOTHROW
# define JEMALLOC_SECTION(s)
# define JEMALLOC_RESTRICT_RETURN
# define JEMALLOC_ALLOCATOR
#endif
/*
@@ -51,55 +209,141 @@ extern JEMALLOC_EXPORT const char *je_malloc_conf;
extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque,
const char *s);
JEMALLOC_EXPORT void *je_malloc(size_t size) JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT void *je_calloc(size_t num, size_t size)
JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT int je_posix_memalign(void **memptr, size_t alignment,
size_t size) JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT void *je_aligned_alloc(size_t alignment, size_t size)
JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT void *je_realloc(void *ptr, size_t size);
JEMALLOC_EXPORT void je_free(void *ptr);
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *je_malloc(size_t size)
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *je_calloc(size_t num, size_t size)
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2);
JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_posix_memalign(void **memptr,
size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *je_aligned_alloc(size_t alignment,
size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc)
JEMALLOC_ALLOC_SIZE(2);
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *je_realloc(void *ptr, size_t size)
JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2);
JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_free(void *ptr)
JEMALLOC_CXX_THROW;
JEMALLOC_EXPORT void *je_mallocx(size_t size, int flags);
JEMALLOC_EXPORT void *je_rallocx(void *ptr, size_t size, int flags);
JEMALLOC_EXPORT size_t je_xallocx(void *ptr, size_t size, size_t extra,
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *je_mallocx(size_t size, int flags)
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *je_rallocx(void *ptr, size_t size,
int flags) JEMALLOC_ALLOC_SIZE(2);
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_xallocx(void *ptr, size_t size,
size_t extra, int flags);
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_sallocx(const void *ptr,
int flags) JEMALLOC_ATTR(pure);
JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_dallocx(void *ptr, int flags);
JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_sdallocx(void *ptr, size_t size,
int flags);
JEMALLOC_EXPORT size_t je_sallocx(const void *ptr, int flags);
JEMALLOC_EXPORT void je_dallocx(void *ptr, int flags);
JEMALLOC_EXPORT size_t je_nallocx(size_t size, int flags);
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_nallocx(size_t size, int flags)
JEMALLOC_ATTR(pure);
JEMALLOC_EXPORT int je_mallctl(const char *name, void *oldp,
size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT int je_mallctlnametomib(const char *name, size_t *mibp,
size_t *miblenp);
JEMALLOC_EXPORT int je_mallctlbymib(const size_t *mib, size_t miblen,
JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctl(const char *name,
void *oldp, size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT void je_malloc_stats_print(void (*write_cb)(void *,
const char *), void *je_cbopaque, const char *opts);
JEMALLOC_EXPORT size_t je_malloc_usable_size(
JEMALLOC_USABLE_SIZE_CONST void *ptr);
JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlnametomib(const char *name,
size_t *mibp, size_t *miblenp);
JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlbymib(const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_malloc_stats_print(
void (*write_cb)(void *, const char *), void *je_cbopaque,
const char *opts);
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_malloc_usable_size(
JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW;
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
JEMALLOC_EXPORT void * je_memalign(size_t alignment, size_t size)
JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *je_memalign(size_t alignment, size_t size)
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc);
#endif
#ifdef JEMALLOC_OVERRIDE_VALLOC
JEMALLOC_EXPORT void * je_valloc(size_t size) JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *je_valloc(size_t size) JEMALLOC_CXX_THROW
JEMALLOC_ATTR(malloc);
#endif
#ifdef JEMALLOC_EXPERIMENTAL
JEMALLOC_EXPORT int je_allocm(void **ptr, size_t *rsize, size_t size,
int flags) JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT int je_rallocm(void **ptr, size_t *rsize, size_t size,
size_t extra, int flags) JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT int je_sallocm(const void *ptr, size_t *rsize, int flags)
JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT int je_dallocm(void *ptr, int flags)
JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT int je_nallocm(size_t *rsize, size_t size, int flags);
#endif
typedef struct extent_hooks_s extent_hooks_t;
/*
* void *
* extent_alloc(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
* size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
*/
typedef void *(extent_alloc_t)(extent_hooks_t *, void *, size_t, size_t, bool *,
bool *, unsigned);
/*
* bool
* extent_dalloc(extent_hooks_t *extent_hooks, void *addr, size_t size,
* bool committed, unsigned arena_ind);
*/
typedef bool (extent_dalloc_t)(extent_hooks_t *, void *, size_t, bool,
unsigned);
/*
* void
* extent_destroy(extent_hooks_t *extent_hooks, void *addr, size_t size,
* bool committed, unsigned arena_ind);
*/
typedef void (extent_destroy_t)(extent_hooks_t *, void *, size_t, bool,
unsigned);
/*
* bool
* extent_commit(extent_hooks_t *extent_hooks, void *addr, size_t size,
* size_t offset, size_t length, unsigned arena_ind);
*/
typedef bool (extent_commit_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
unsigned);
/*
* bool
* extent_decommit(extent_hooks_t *extent_hooks, void *addr, size_t size,
* size_t offset, size_t length, unsigned arena_ind);
*/
typedef bool (extent_decommit_t)(extent_hooks_t *, void *, size_t, size_t,
size_t, unsigned);
/*
* bool
* extent_purge(extent_hooks_t *extent_hooks, void *addr, size_t size,
* size_t offset, size_t length, unsigned arena_ind);
*/
typedef bool (extent_purge_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
unsigned);
/*
* bool
* extent_split(extent_hooks_t *extent_hooks, void *addr, size_t size,
* size_t size_a, size_t size_b, bool committed, unsigned arena_ind);
*/
typedef bool (extent_split_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
bool, unsigned);
/*
* bool
* extent_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
* void *addr_b, size_t size_b, bool committed, unsigned arena_ind);
*/
typedef bool (extent_merge_t)(extent_hooks_t *, void *, size_t, void *, size_t,
bool, unsigned);
struct extent_hooks_s {
extent_alloc_t *alloc;
extent_dalloc_t *dalloc;
extent_destroy_t *destroy;
extent_commit_t *commit;
extent_decommit_t *decommit;
extent_purge_t *purge_lazy;
extent_purge_t *purge_forced;
extent_split_t *split;
extent_merge_t *merge;
};
/*
* By default application code must explicitly refer to mangled symbol names,
@@ -112,32 +356,28 @@ JEMALLOC_EXPORT int je_nallocm(size_t *rsize, size_t size, int flags);
# ifndef JEMALLOC_NO_DEMANGLE
# define JEMALLOC_NO_DEMANGLE
# endif
# define aligned_alloc je_aligned_alloc
# define calloc je_calloc
# define dallocx je_dallocx
# define free je_free
# define mallctl je_mallctl
# define mallctlbymib je_mallctlbymib
# define mallctlnametomib je_mallctlnametomib
# define malloc je_malloc
# define malloc_conf je_malloc_conf
# define malloc_message je_malloc_message
# define malloc je_malloc
# define calloc je_calloc
# define posix_memalign je_posix_memalign
# define aligned_alloc je_aligned_alloc
# define realloc je_realloc
# define free je_free
# define mallocx je_mallocx
# define rallocx je_rallocx
# define xallocx je_xallocx
# define sallocx je_sallocx
# define dallocx je_dallocx
# define nallocx je_nallocx
# define mallctl je_mallctl
# define mallctlnametomib je_mallctlnametomib
# define mallctlbymib je_mallctlbymib
# define malloc_stats_print je_malloc_stats_print
# define malloc_usable_size je_malloc_usable_size
# define mallocx je_mallocx
# define nallocx je_nallocx
# define posix_memalign je_posix_memalign
# define rallocx je_rallocx
# define realloc je_realloc
# define sallocx je_sallocx
# define sdallocx je_sdallocx
# define xallocx je_xallocx
# define memalign je_memalign
# define valloc je_valloc
# define allocm je_allocm
# define dallocm je_dallocm
# define nallocm je_nallocm
# define rallocm je_rallocm
# define sallocm je_sallocm
#endif
/*
@@ -148,35 +388,31 @@ JEMALLOC_EXPORT int je_nallocm(size_t *rsize, size_t size, int flags);
* and/or --with-jemalloc-prefix.
*/
#ifndef JEMALLOC_NO_DEMANGLE
# undef je_aligned_alloc
# undef je_calloc
# undef je_dallocx
# undef je_free
# undef je_mallctl
# undef je_mallctlbymib
# undef je_mallctlnametomib
# undef je_malloc
# undef je_malloc_conf
# undef je_malloc_message
# undef je_malloc
# undef je_calloc
# undef je_posix_memalign
# undef je_aligned_alloc
# undef je_realloc
# undef je_free
# undef je_mallocx
# undef je_rallocx
# undef je_xallocx
# undef je_sallocx
# undef je_dallocx
# undef je_nallocx
# undef je_mallctl
# undef je_mallctlnametomib
# undef je_mallctlbymib
# undef je_malloc_stats_print
# undef je_malloc_usable_size
# undef je_mallocx
# undef je_nallocx
# undef je_posix_memalign
# undef je_rallocx
# undef je_realloc
# undef je_sallocx
# undef je_sdallocx
# undef je_xallocx
# undef je_memalign
# undef je_valloc
# undef je_allocm
# undef je_dallocm
# undef je_nallocm
# undef je_rallocm
# undef je_sallocm
#endif
#ifdef __cplusplus
};
}
#endif
#endif /* JEMALLOC_H_ */

View File

@@ -0,0 +1,20 @@
#ifndef stdbool_h
#define stdbool_h
#include <wtypes.h>
/* MSVC doesn't define _Bool or bool in C, but does have BOOL */
/* Note this doesn't pass autoconf's test because (bool) 0.5 != true */
/* Clang-cl uses MSVC headers, so needs msvc_compat, but has _Bool as
* a built-in type. */
#ifndef __clang__
typedef BOOL _Bool;
#endif
#define bool _Bool
#define true 1
#define false 0
#define __bool_true_false_are_defined 1
#endif /* stdbool_h */

View File

@@ -0,0 +1,247 @@
// ISO C9x compliant stdint.h for Microsoft Visual Studio
// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
//
// Copyright (c) 2006-2008 Alexander Chemeris
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. The name of the author may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef _MSC_VER // [
#error "Use this header only with Microsoft Visual C++ compilers!"
#endif // _MSC_VER ]
#ifndef _MSC_STDINT_H_ // [
#define _MSC_STDINT_H_
#if _MSC_VER > 1000
#pragma once
#endif
#include <limits.h>
// For Visual Studio 6 in C++ mode and for many Visual Studio versions when
// compiling for ARM we should wrap <wchar.h> include with 'extern "C++" {}'
// or compiler give many errors like this:
// error C2733: second C linkage of overloaded function 'wmemchr' not allowed
#ifdef __cplusplus
extern "C" {
#endif
# include <wchar.h>
#ifdef __cplusplus
}
#endif
// Define _W64 macros to mark types changing their size, like intptr_t.
#ifndef _W64
# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300
# define _W64 __w64
# else
# define _W64
# endif
#endif
// 7.18.1 Integer types
// 7.18.1.1 Exact-width integer types
// Visual Studio 6 and Embedded Visual C++ 4 doesn't
// realize that, e.g. char has the same size as __int8
// so we give up on __intX for them.
#if (_MSC_VER < 1300)
typedef signed char int8_t;
typedef signed short int16_t;
typedef signed int int32_t;
typedef unsigned char uint8_t;
typedef unsigned short uint16_t;
typedef unsigned int uint32_t;
#else
typedef signed __int8 int8_t;
typedef signed __int16 int16_t;
typedef signed __int32 int32_t;
typedef unsigned __int8 uint8_t;
typedef unsigned __int16 uint16_t;
typedef unsigned __int32 uint32_t;
#endif
typedef signed __int64 int64_t;
typedef unsigned __int64 uint64_t;
// 7.18.1.2 Minimum-width integer types
typedef int8_t int_least8_t;
typedef int16_t int_least16_t;
typedef int32_t int_least32_t;
typedef int64_t int_least64_t;
typedef uint8_t uint_least8_t;
typedef uint16_t uint_least16_t;
typedef uint32_t uint_least32_t;
typedef uint64_t uint_least64_t;
// 7.18.1.3 Fastest minimum-width integer types
typedef int8_t int_fast8_t;
typedef int16_t int_fast16_t;
typedef int32_t int_fast32_t;
typedef int64_t int_fast64_t;
typedef uint8_t uint_fast8_t;
typedef uint16_t uint_fast16_t;
typedef uint32_t uint_fast32_t;
typedef uint64_t uint_fast64_t;
// 7.18.1.4 Integer types capable of holding object pointers
#ifdef _WIN64 // [
typedef signed __int64 intptr_t;
typedef unsigned __int64 uintptr_t;
#else // _WIN64 ][
typedef _W64 signed int intptr_t;
typedef _W64 unsigned int uintptr_t;
#endif // _WIN64 ]
// 7.18.1.5 Greatest-width integer types
typedef int64_t intmax_t;
typedef uint64_t uintmax_t;
// 7.18.2 Limits of specified-width integer types
#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259
// 7.18.2.1 Limits of exact-width integer types
#define INT8_MIN ((int8_t)_I8_MIN)
#define INT8_MAX _I8_MAX
#define INT16_MIN ((int16_t)_I16_MIN)
#define INT16_MAX _I16_MAX
#define INT32_MIN ((int32_t)_I32_MIN)
#define INT32_MAX _I32_MAX
#define INT64_MIN ((int64_t)_I64_MIN)
#define INT64_MAX _I64_MAX
#define UINT8_MAX _UI8_MAX
#define UINT16_MAX _UI16_MAX
#define UINT32_MAX _UI32_MAX
#define UINT64_MAX _UI64_MAX
// 7.18.2.2 Limits of minimum-width integer types
#define INT_LEAST8_MIN INT8_MIN
#define INT_LEAST8_MAX INT8_MAX
#define INT_LEAST16_MIN INT16_MIN
#define INT_LEAST16_MAX INT16_MAX
#define INT_LEAST32_MIN INT32_MIN
#define INT_LEAST32_MAX INT32_MAX
#define INT_LEAST64_MIN INT64_MIN
#define INT_LEAST64_MAX INT64_MAX
#define UINT_LEAST8_MAX UINT8_MAX
#define UINT_LEAST16_MAX UINT16_MAX
#define UINT_LEAST32_MAX UINT32_MAX
#define UINT_LEAST64_MAX UINT64_MAX
// 7.18.2.3 Limits of fastest minimum-width integer types
#define INT_FAST8_MIN INT8_MIN
#define INT_FAST8_MAX INT8_MAX
#define INT_FAST16_MIN INT16_MIN
#define INT_FAST16_MAX INT16_MAX
#define INT_FAST32_MIN INT32_MIN
#define INT_FAST32_MAX INT32_MAX
#define INT_FAST64_MIN INT64_MIN
#define INT_FAST64_MAX INT64_MAX
#define UINT_FAST8_MAX UINT8_MAX
#define UINT_FAST16_MAX UINT16_MAX
#define UINT_FAST32_MAX UINT32_MAX
#define UINT_FAST64_MAX UINT64_MAX
// 7.18.2.4 Limits of integer types capable of holding object pointers
#ifdef _WIN64 // [
# define INTPTR_MIN INT64_MIN
# define INTPTR_MAX INT64_MAX
# define UINTPTR_MAX UINT64_MAX
#else // _WIN64 ][
# define INTPTR_MIN INT32_MIN
# define INTPTR_MAX INT32_MAX
# define UINTPTR_MAX UINT32_MAX
#endif // _WIN64 ]
// 7.18.2.5 Limits of greatest-width integer types
#define INTMAX_MIN INT64_MIN
#define INTMAX_MAX INT64_MAX
#define UINTMAX_MAX UINT64_MAX
// 7.18.3 Limits of other integer types
#ifdef _WIN64 // [
# define PTRDIFF_MIN _I64_MIN
# define PTRDIFF_MAX _I64_MAX
#else // _WIN64 ][
# define PTRDIFF_MIN _I32_MIN
# define PTRDIFF_MAX _I32_MAX
#endif // _WIN64 ]
#define SIG_ATOMIC_MIN INT_MIN
#define SIG_ATOMIC_MAX INT_MAX
#ifndef SIZE_MAX // [
# ifdef _WIN64 // [
# define SIZE_MAX _UI64_MAX
# else // _WIN64 ][
# define SIZE_MAX _UI32_MAX
# endif // _WIN64 ]
#endif // SIZE_MAX ]
// WCHAR_MIN and WCHAR_MAX are also defined in <wchar.h>
#ifndef WCHAR_MIN // [
# define WCHAR_MIN 0
#endif // WCHAR_MIN ]
#ifndef WCHAR_MAX // [
# define WCHAR_MAX _UI16_MAX
#endif // WCHAR_MAX ]
#define WINT_MIN 0
#define WINT_MAX _UI16_MAX
#endif // __STDC_LIMIT_MACROS ]
// 7.18.4 Limits of other integer types
#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260
// 7.18.4.1 Macros for minimum-width integer constants
#define INT8_C(val) val##i8
#define INT16_C(val) val##i16
#define INT32_C(val) val##i32
#define INT64_C(val) val##i64
#define UINT8_C(val) val##ui8
#define UINT16_C(val) val##ui16
#define UINT32_C(val) val##ui32
#define UINT64_C(val) val##ui64
// 7.18.4.2 Macros for greatest-width integer constants
#define INTMAX_C INT64_C
#define UINTMAX_C UINT64_C
#endif // __STDC_CONSTANT_MACROS ]
#endif // _MSC_STDINT_H_ ]

View File

@@ -3,21 +3,56 @@
/* MSVC doesn't define ffs/ffsl. This dummy strings.h header is provided
* for both */
#include <intrin.h>
#pragma intrinsic(_BitScanForward)
static __forceinline int ffsl(long x)
{
#ifdef _MSC_VER
# include <intrin.h>
# pragma intrinsic(_BitScanForward)
static __forceinline int ffsl(long x) {
unsigned long i;
if (_BitScanForward(&i, x))
return (i + 1);
return (0);
if (_BitScanForward(&i, x)) {
return i + 1;
}
return 0;
}
static __forceinline int ffs(int x)
{
return (ffsl(x));
static __forceinline int ffs(int x) {
return ffsl(x);
}
# ifdef _M_X64
# pragma intrinsic(_BitScanForward64)
# endif
static __forceinline int ffsll(unsigned __int64 x) {
unsigned long i;
#ifdef _M_X64
if (_BitScanForward64(&i, x)) {
return i + 1;
}
return 0;
#else
// Fallback for 32-bit build where 64-bit version not available
// assuming little endian
union {
unsigned __int64 ll;
unsigned long l[2];
} s;
s.ll = x;
if (_BitScanForward(&i, s.l[0])) {
return i + 1;
} else if(_BitScanForward(&i, s.l[1])) {
return i + 33;
}
return 0;
#endif
}
#else
# define ffsll(x) __builtin_ffsll(x)
# define ffsl(x) __builtin_ffsl(x)
# define ffs(x) __builtin_ffs(x)
#endif
#endif /* strings_h */

View File

@@ -0,0 +1,6 @@
#ifndef MSVC_COMPAT_WINDOWS_EXTRA_H
#define MSVC_COMPAT_WINDOWS_EXTRA_H
#include <errno.h>
#endif /* MSVC_COMPAT_WINDOWS_EXTRA_H */

View File

@@ -0,0 +1,343 @@
/* include/jemalloc/internal/jemalloc_internal_defs.h. Generated from jemalloc_internal_defs.h.in by configure. */
#ifndef JEMALLOC_INTERNAL_DEFS_H_
#define JEMALLOC_INTERNAL_DEFS_H_
/*
* If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
* public APIs to be prefixed. This makes it possible, with some care, to use
* multiple allocators simultaneously.
*/
/* #undef JEMALLOC_PREFIX */
/* #undef JEMALLOC_CPREFIX */
/*
* Define overrides for non-standard allocator-related functions if they are
* present on the system.
*/
#define JEMALLOC_OVERRIDE___LIBC_CALLOC
#define JEMALLOC_OVERRIDE___LIBC_FREE
#define JEMALLOC_OVERRIDE___LIBC_MALLOC
#define JEMALLOC_OVERRIDE___LIBC_MEMALIGN
#define JEMALLOC_OVERRIDE___LIBC_REALLOC
#define JEMALLOC_OVERRIDE___LIBC_VALLOC
/* #undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN */
/*
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
* For shared libraries, symbol visibility mechanisms prevent these symbols
* from being exported, but for static libraries, naming collisions are a real
* possibility.
*/
#define JEMALLOC_PRIVATE_NAMESPACE je_
/*
* Hyper-threaded CPUs may need a special instruction inside spin loops in
* order to yield to another virtual CPU.
*/
#define CPU_SPINWAIT __asm__ volatile("pause")
/*
* Number of significant bits in virtual addresses. This may be less than the
* total number of bits in a pointer, e.g. on x64, for which the uppermost 16
* bits are the same as bit 47.
*/
#define LG_VADDR @JEM_VADDRBITS@
/* Defined if C11 atomics are available. */
#define JEMALLOC_C11_ATOMICS 1
/* Defined if GCC __atomic atomics are available. */
#define JEMALLOC_GCC_ATOMIC_ATOMICS 1
/* Defined if GCC __sync atomics are available. */
#define JEMALLOC_GCC_SYNC_ATOMICS 1
/*
* Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and
* __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the
* functions are defined in libgcc instead of being inlines).
*/
/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4 */
/*
* Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and
* __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the
* functions are defined in libgcc instead of being inlines).
*/
/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8 */
/*
* Defined if __builtin_clz() and __builtin_clzl() are available.
*/
#define JEMALLOC_HAVE_BUILTIN_CLZ
/*
* Defined if os_unfair_lock_*() functions are available, as provided by Darwin.
*/
/* #undef JEMALLOC_OS_UNFAIR_LOCK */
/*
* Defined if OSSpin*() functions are available, as provided by Darwin, and
* documented in the spinlock(3) manual page.
*/
/* #undef JEMALLOC_OSSPIN */
/* Defined if syscall(2) is usable. */
#define JEMALLOC_USE_SYSCALL
/*
* Defined if secure_getenv(3) is available.
*/
#define JEMALLOC_HAVE_SECURE_GETENV
/*
* Defined if issetugid(2) is available.
*/
/* #undef JEMALLOC_HAVE_ISSETUGID */
/* Defined if pthread_atfork(3) is available. */
#define JEMALLOC_HAVE_PTHREAD_ATFORK
/* Defined if pthread_setname_np(3) is available. */
#define JEMALLOC_HAVE_PTHREAD_SETNAME_NP
/*
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
*/
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE 1
/*
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
*/
#define JEMALLOC_HAVE_CLOCK_MONOTONIC 1
/*
* Defined if mach_absolute_time() is available.
*/
/* #undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME */
/*
* Defined if _malloc_thread_cleanup() exists. At least in the case of
* FreeBSD, pthread_key_create() allocates, which if used during malloc
* bootstrapping will cause recursion into the pthreads library. Therefore, if
* _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
* malloc_tsd.
*/
/* #undef JEMALLOC_MALLOC_THREAD_CLEANUP */
/*
* Defined if threaded initialization is known to be safe on this platform.
* Among other things, it must be possible to initialize a mutex without
* triggering allocation in order for threaded allocation to be safe.
*/
#define JEMALLOC_THREADED_INIT
/*
* Defined if the pthreads implementation defines
* _pthread_mutex_init_calloc_cb(), in which case the function is used in order
* to avoid recursive allocation during mutex initialization.
*/
/* #undef JEMALLOC_MUTEX_INIT_CB */
/* Non-empty if the tls_model attribute is supported. */
#define JEMALLOC_TLS_MODEL @JEM_TLSMODEL@
/*
* JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
* inline functions.
*/
/* #undef JEMALLOC_DEBUG */
/* JEMALLOC_STATS enables statistics calculation. */
/* #undef JEMALLOC_STATS */
/* JEMALLOC_PROF enables allocation profiling. */
/* #undef JEMALLOC_PROF */
/* Use libunwind for profile backtracing if defined. */
/* #undef JEMALLOC_PROF_LIBUNWIND */
/* Use libgcc for profile backtracing if defined. */
/* #undef JEMALLOC_PROF_LIBGCC */
/* Use gcc intrinsics for profile backtracing if defined. */
/* #undef JEMALLOC_PROF_GCC */
/*
* JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage
* segment (DSS).
*/
#define JEMALLOC_DSS
/* Support memory filling (junk/zero). */
#define JEMALLOC_FILL
/* Support utrace(2)-based tracing. */
/* #undef JEMALLOC_UTRACE */
/* Support optional abort() on OOM. */
/* #undef JEMALLOC_XMALLOC */
/* Support lazy locking (avoid locking unless a second thread is launched). */
/* #undef JEMALLOC_LAZY_LOCK */
/*
* Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
* classes).
*/
/* #undef LG_QUANTUM */
/* One page is 2^LG_PAGE bytes. */
#define LG_PAGE 12
/*
* One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the
* system does not explicitly support huge pages; system calls that require
* explicit huge page support are separately configured.
*/
#define LG_HUGEPAGE 21
/*
* If defined, adjacent virtual memory mappings with identical attributes
* automatically coalesce, and they fragment when changes are made to subranges.
* This is the normal order of things for mmap()/munmap(), but on Windows
* VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
* mappings do *not* coalesce/fragment.
*/
#define JEMALLOC_MAPS_COALESCE
/*
* If defined, retain memory for later reuse by default rather than using e.g.
* munmap() to unmap freed extents. This is enabled on 64-bit Linux because
* common sequences of mmap()/munmap() calls will cause virtual memory map
* holes.
*/
#define JEMALLOC_RETAIN
/* TLS is used to map arenas and magazine caches to threads. */
#define JEMALLOC_TLS
/*
* Used to mark unreachable code to quiet "end of non-void" compiler warnings.
* Don't use this directly; instead use unreachable() from util.h
*/
#define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable
/*
* ffs*() functions to use for bitmapping. Don't use these directly; instead,
* use ffs_*() from util.h.
*/
#define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll
#define JEMALLOC_INTERNAL_FFSL __builtin_ffsl
#define JEMALLOC_INTERNAL_FFS __builtin_ffs
/*
* If defined, explicitly attempt to more uniformly distribute large allocation
* pointer alignments across all cache indices.
*/
#define JEMALLOC_CACHE_OBLIVIOUS
/*
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
*/
/* #undef JEMALLOC_ZONE */
/*
* Methods for determining whether the OS overcommits.
* JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
* /proc/sys/vm.overcommit_memory file.
* JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
*/
/* #undef JEMALLOC_SYSCTL_VM_OVERCOMMIT */
#define JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
/* Defined if madvise(2) is available. */
#define JEMALLOC_HAVE_MADVISE
/*
* Methods for purging unused pages differ between operating systems.
*
* madvise(..., MADV_FREE) : This marks pages as being unused, such that they
* will be discarded rather than swapped out.
* madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is
* defined, this immediately discards pages,
* such that new pages will be demand-zeroed if
* the address region is later touched;
* otherwise this behaves similarly to
* MADV_FREE, though typically with higher
* system overhead.
*/
#define JEMALLOC_PURGE_MADVISE_FREE
#define JEMALLOC_PURGE_MADVISE_DONTNEED
#define JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS 1
/*
* Defined if transparent huge pages (THPs) are supported via the
* MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled.
*/
#define JEMALLOC_THP
/* Define if operating system has alloca.h header. */
#define JEMALLOC_HAS_ALLOCA_H 1
/* C99 restrict keyword supported. */
#define JEMALLOC_HAS_RESTRICT 1
/* For use by hash code. */
/* #undef JEMALLOC_BIG_ENDIAN */
/* sizeof(int) == 2^LG_SIZEOF_INT. */
#define LG_SIZEOF_INT 2
/* sizeof(long) == 2^LG_SIZEOF_LONG. */
#define LG_SIZEOF_LONG @JEM_SIZEDEF@
/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */
#define LG_SIZEOF_LONG_LONG 3
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
#define LG_SIZEOF_INTMAX_T 3
/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
#define JEMALLOC_GLIBC_MALLOC_HOOK
/* glibc memalign hook. */
#define JEMALLOC_GLIBC_MEMALIGN_HOOK
/* pthread support */
#define JEMALLOC_HAVE_PTHREAD
/* dlsym() support */
#define JEMALLOC_HAVE_DLSYM
/* Adaptive mutex support in pthreads. */
#define JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP
/* GNU specific sched_getcpu support */
#define JEMALLOC_HAVE_SCHED_GETCPU
/* GNU specific sched_setaffinity support */
#define JEMALLOC_HAVE_SCHED_SETAFFINITY
/*
* If defined, all the features necessary for background threads are present.
*/
#define JEMALLOC_BACKGROUND_THREAD 1
/*
* If defined, jemalloc symbols are not exported (doesn't work when
* JEMALLOC_PREFIX is not defined).
*/
/* #undef JEMALLOC_EXPORT */
/* config.malloc_conf options string. */
#define JEMALLOC_CONFIG_MALLOC_CONF ""
/* If defined, jemalloc takes the malloc/free/etc. symbol names. */
#define JEMALLOC_IS_MALLOC 1
/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
#define LG_SIZEOF_PTR @JEM_SIZEDEF@
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,880 @@
#define JEMALLOC_BACKGROUND_THREAD_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
/******************************************************************************/
/* Data. */
/* This option should be opt-in only. */
#define BACKGROUND_THREAD_DEFAULT false
/* Read-only after initialization. */
bool opt_background_thread = BACKGROUND_THREAD_DEFAULT;
/* Used for thread creation, termination and stats. */
malloc_mutex_t background_thread_lock;
/* Indicates global state. Atomic because decay reads this w/o locking. */
atomic_b_t background_thread_enabled_state;
size_t n_background_threads;
/* Thread info per-index. */
background_thread_info_t *background_thread_info;
/* False if no necessary runtime support. */
bool can_enable_background_thread;
/******************************************************************************/
#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
#include <dlfcn.h>
static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *,
void *(*)(void *), void *__restrict);
static pthread_once_t once_control = PTHREAD_ONCE_INIT;
static void
pthread_create_wrapper_once(void) {
#ifdef JEMALLOC_LAZY_LOCK
isthreaded = true;
#endif
}
int
pthread_create_wrapper(pthread_t *__restrict thread, const pthread_attr_t *attr,
void *(*start_routine)(void *), void *__restrict arg) {
pthread_once(&once_control, pthread_create_wrapper_once);
return pthread_create_fptr(thread, attr, start_routine, arg);
}
#endif /* JEMALLOC_PTHREAD_CREATE_WRAPPER */
#ifndef JEMALLOC_BACKGROUND_THREAD
#define NOT_REACHED { not_reached(); }
bool background_thread_create(tsd_t *tsd, unsigned arena_ind) NOT_REACHED
bool background_threads_enable(tsd_t *tsd) NOT_REACHED
bool background_threads_disable(tsd_t *tsd) NOT_REACHED
void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
arena_decay_t *decay, size_t npages_new) NOT_REACHED
void background_thread_prefork0(tsdn_t *tsdn) NOT_REACHED
void background_thread_prefork1(tsdn_t *tsdn) NOT_REACHED
void background_thread_postfork_parent(tsdn_t *tsdn) NOT_REACHED
void background_thread_postfork_child(tsdn_t *tsdn) NOT_REACHED
bool background_thread_stats_read(tsdn_t *tsdn,
background_thread_stats_t *stats) NOT_REACHED
void background_thread_ctl_init(tsdn_t *tsdn) NOT_REACHED
#undef NOT_REACHED
#else
static bool background_thread_enabled_at_fork;
static void
background_thread_info_init(tsdn_t *tsdn, background_thread_info_t *info) {
background_thread_wakeup_time_set(tsdn, info, 0);
info->npages_to_purge_new = 0;
if (config_stats) {
info->tot_n_runs = 0;
nstime_init(&info->tot_sleep_time, 0);
}
}
static inline bool
set_current_thread_affinity(UNUSED int cpu) {
#if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)
cpu_set_t cpuset;
CPU_ZERO(&cpuset);
CPU_SET(cpu, &cpuset);
int ret = sched_setaffinity(0, sizeof(cpu_set_t), &cpuset);
return (ret != 0);
#else
return false;
#endif
}
/* Threshold for determining when to wake up the background thread. */
#define BACKGROUND_THREAD_NPAGES_THRESHOLD UINT64_C(1024)
#define BILLION UINT64_C(1000000000)
/* Minimal sleep interval 100 ms. */
#define BACKGROUND_THREAD_MIN_INTERVAL_NS (BILLION / 10)
static inline size_t
decay_npurge_after_interval(arena_decay_t *decay, size_t interval) {
size_t i;
uint64_t sum = 0;
for (i = 0; i < interval; i++) {
sum += decay->backlog[i] * h_steps[i];
}
for (; i < SMOOTHSTEP_NSTEPS; i++) {
sum += decay->backlog[i] * (h_steps[i] - h_steps[i - interval]);
}
return (size_t)(sum >> SMOOTHSTEP_BFP);
}
static uint64_t
arena_decay_compute_purge_interval_impl(tsdn_t *tsdn, arena_decay_t *decay,
extents_t *extents) {
if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
/* Use minimal interval if decay is contended. */
return BACKGROUND_THREAD_MIN_INTERVAL_NS;
}
uint64_t interval;
ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
if (decay_time <= 0) {
/* Purging is eagerly done or disabled currently. */
interval = BACKGROUND_THREAD_INDEFINITE_SLEEP;
goto label_done;
}
uint64_t decay_interval_ns = nstime_ns(&decay->interval);
assert(decay_interval_ns > 0);
size_t npages = extents_npages_get(extents);
if (npages == 0) {
unsigned i;
for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
if (decay->backlog[i] > 0) {
break;
}
}
if (i == SMOOTHSTEP_NSTEPS) {
/* No dirty pages recorded. Sleep indefinitely. */
interval = BACKGROUND_THREAD_INDEFINITE_SLEEP;
goto label_done;
}
}
if (npages <= BACKGROUND_THREAD_NPAGES_THRESHOLD) {
/* Use max interval. */
interval = decay_interval_ns * SMOOTHSTEP_NSTEPS;
goto label_done;
}
size_t lb = BACKGROUND_THREAD_MIN_INTERVAL_NS / decay_interval_ns;
size_t ub = SMOOTHSTEP_NSTEPS;
/* Minimal 2 intervals to ensure reaching next epoch deadline. */
lb = (lb < 2) ? 2 : lb;
if ((decay_interval_ns * ub <= BACKGROUND_THREAD_MIN_INTERVAL_NS) ||
(lb + 2 > ub)) {
interval = BACKGROUND_THREAD_MIN_INTERVAL_NS;
goto label_done;
}
assert(lb + 2 <= ub);
size_t npurge_lb, npurge_ub;
npurge_lb = decay_npurge_after_interval(decay, lb);
if (npurge_lb > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
interval = decay_interval_ns * lb;
goto label_done;
}
npurge_ub = decay_npurge_after_interval(decay, ub);
if (npurge_ub < BACKGROUND_THREAD_NPAGES_THRESHOLD) {
interval = decay_interval_ns * ub;
goto label_done;
}
unsigned n_search = 0;
size_t target, npurge;
while ((npurge_lb + BACKGROUND_THREAD_NPAGES_THRESHOLD < npurge_ub)
&& (lb + 2 < ub)) {
target = (lb + ub) / 2;
npurge = decay_npurge_after_interval(decay, target);
if (npurge > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
ub = target;
npurge_ub = npurge;
} else {
lb = target;
npurge_lb = npurge;
}
assert(n_search++ < lg_floor(SMOOTHSTEP_NSTEPS) + 1);
}
interval = decay_interval_ns * (ub + lb) / 2;
label_done:
interval = (interval < BACKGROUND_THREAD_MIN_INTERVAL_NS) ?
BACKGROUND_THREAD_MIN_INTERVAL_NS : interval;
malloc_mutex_unlock(tsdn, &decay->mtx);
return interval;
}
/* Compute purge interval for background threads. */
static uint64_t
arena_decay_compute_purge_interval(tsdn_t *tsdn, arena_t *arena) {
uint64_t i1, i2;
i1 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_dirty,
&arena->extents_dirty);
if (i1 == BACKGROUND_THREAD_MIN_INTERVAL_NS) {
return i1;
}
i2 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_muzzy,
&arena->extents_muzzy);
return i1 < i2 ? i1 : i2;
}
static void
background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info,
uint64_t interval) {
if (config_stats) {
info->tot_n_runs++;
}
info->npages_to_purge_new = 0;
struct timeval tv;
/* Specific clock required by timedwait. */
gettimeofday(&tv, NULL);
nstime_t before_sleep;
nstime_init2(&before_sleep, tv.tv_sec, tv.tv_usec * 1000);
int ret;
if (interval == BACKGROUND_THREAD_INDEFINITE_SLEEP) {
assert(background_thread_indefinite_sleep(info));
ret = pthread_cond_wait(&info->cond, &info->mtx.lock);
assert(ret == 0);
} else {
assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS &&
interval <= BACKGROUND_THREAD_INDEFINITE_SLEEP);
/* We need malloc clock (can be different from tv). */
nstime_t next_wakeup;
nstime_init(&next_wakeup, 0);
nstime_update(&next_wakeup);
nstime_iadd(&next_wakeup, interval);
assert(nstime_ns(&next_wakeup) <
BACKGROUND_THREAD_INDEFINITE_SLEEP);
background_thread_wakeup_time_set(tsdn, info,
nstime_ns(&next_wakeup));
nstime_t ts_wakeup;
nstime_copy(&ts_wakeup, &before_sleep);
nstime_iadd(&ts_wakeup, interval);
struct timespec ts;
ts.tv_sec = (size_t)nstime_sec(&ts_wakeup);
ts.tv_nsec = (size_t)nstime_nsec(&ts_wakeup);
assert(!background_thread_indefinite_sleep(info));
ret = pthread_cond_timedwait(&info->cond, &info->mtx.lock, &ts);
assert(ret == ETIMEDOUT || ret == 0);
background_thread_wakeup_time_set(tsdn, info,
BACKGROUND_THREAD_INDEFINITE_SLEEP);
}
if (config_stats) {
gettimeofday(&tv, NULL);
nstime_t after_sleep;
nstime_init2(&after_sleep, tv.tv_sec, tv.tv_usec * 1000);
if (nstime_compare(&after_sleep, &before_sleep) > 0) {
nstime_subtract(&after_sleep, &before_sleep);
nstime_add(&info->tot_sleep_time, &after_sleep);
}
}
}
static bool
background_thread_pause_check(tsdn_t *tsdn, background_thread_info_t *info) {
if (unlikely(info->state == background_thread_paused)) {
malloc_mutex_unlock(tsdn, &info->mtx);
/* Wait on global lock to update status. */
malloc_mutex_lock(tsdn, &background_thread_lock);
malloc_mutex_unlock(tsdn, &background_thread_lock);
malloc_mutex_lock(tsdn, &info->mtx);
return true;
}
return false;
}
static inline void
background_work_sleep_once(tsdn_t *tsdn, background_thread_info_t *info, unsigned ind) {
uint64_t min_interval = BACKGROUND_THREAD_INDEFINITE_SLEEP;
unsigned narenas = narenas_total_get();
for (unsigned i = ind; i < narenas; i += ncpus) {
arena_t *arena = arena_get(tsdn, i, false);
if (!arena) {
continue;
}
arena_decay(tsdn, arena, true, false);
if (min_interval == BACKGROUND_THREAD_MIN_INTERVAL_NS) {
/* Min interval will be used. */
continue;
}
uint64_t interval = arena_decay_compute_purge_interval(tsdn,
arena);
assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS);
if (min_interval > interval) {
min_interval = interval;
}
}
background_thread_sleep(tsdn, info, min_interval);
}
static bool
background_threads_disable_single(tsd_t *tsd, background_thread_info_t *info) {
if (info == &background_thread_info[0]) {
malloc_mutex_assert_owner(tsd_tsdn(tsd),
&background_thread_lock);
} else {
malloc_mutex_assert_not_owner(tsd_tsdn(tsd),
&background_thread_lock);
}
pre_reentrancy(tsd, NULL);
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
bool has_thread;
assert(info->state != background_thread_paused);
if (info->state == background_thread_started) {
has_thread = true;
info->state = background_thread_stopped;
pthread_cond_signal(&info->cond);
} else {
has_thread = false;
}
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
if (!has_thread) {
post_reentrancy(tsd);
return false;
}
void *ret;
if (pthread_join(info->thread, &ret)) {
post_reentrancy(tsd);
return true;
}
assert(ret == NULL);
n_background_threads--;
post_reentrancy(tsd);
return false;
}
static void *background_thread_entry(void *ind_arg);
static int
background_thread_create_signals_masked(pthread_t *thread,
const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg) {
/*
* Mask signals during thread creation so that the thread inherits
* an empty signal set.
*/
sigset_t set;
sigfillset(&set);
sigset_t oldset;
int mask_err = pthread_sigmask(SIG_SETMASK, &set, &oldset);
if (mask_err != 0) {
return mask_err;
}
int create_err = pthread_create_wrapper(thread, attr, start_routine,
arg);
/*
* Restore the signal mask. Failure to restore the signal mask here
* changes program behavior.
*/
int restore_err = pthread_sigmask(SIG_SETMASK, &oldset, NULL);
if (restore_err != 0) {
malloc_printf("<jemalloc>: background thread creation "
"failed (%d), and signal mask restoration failed "
"(%d)\n", create_err, restore_err);
if (opt_abort) {
abort();
}
}
return create_err;
}
static void
check_background_thread_creation(tsd_t *tsd, unsigned *n_created,
bool *created_threads) {
if (likely(*n_created == n_background_threads)) {
return;
}
malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_info[0].mtx);
label_restart:
malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
for (unsigned i = 1; i < ncpus; i++) {
if (created_threads[i]) {
continue;
}
background_thread_info_t *info = &background_thread_info[i];
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
assert(info->state != background_thread_paused);
bool create = (info->state == background_thread_started);
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
if (!create) {
continue;
}
/*
* To avoid deadlock with prefork handlers (which waits for the
* mutex held here), unlock before calling pthread_create().
*/
malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
pre_reentrancy(tsd, NULL);
int err = background_thread_create_signals_masked(&info->thread,
NULL, background_thread_entry, (void *)(uintptr_t)i);
post_reentrancy(tsd);
if (err == 0) {
(*n_created)++;
created_threads[i] = true;
} else {
malloc_printf("<jemalloc>: background thread "
"creation failed (%d)\n", err);
if (opt_abort) {
abort();
}
}
/* Restart since we unlocked. */
goto label_restart;
}
malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_info[0].mtx);
malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
}
static void
background_thread0_work(tsd_t *tsd) {
/* Thread0 is also responsible for launching / terminating threads. */
VARIABLE_ARRAY(bool, created_threads, ncpus);
unsigned i;
for (i = 1; i < ncpus; i++) {
created_threads[i] = false;
}
/* Start working, and create more threads when asked. */
unsigned n_created = 1;
while (background_thread_info[0].state != background_thread_stopped) {
if (background_thread_pause_check(tsd_tsdn(tsd),
&background_thread_info[0])) {
continue;
}
check_background_thread_creation(tsd, &n_created,
(bool *)&created_threads);
background_work_sleep_once(tsd_tsdn(tsd),
&background_thread_info[0], 0);
}
/*
* Shut down other threads at exit. Note that the ctl thread is holding
* the global background_thread mutex (and is waiting) for us.
*/
assert(!background_thread_enabled());
for (i = 1; i < ncpus; i++) {
background_thread_info_t *info = &background_thread_info[i];
assert(info->state != background_thread_paused);
if (created_threads[i]) {
background_threads_disable_single(tsd, info);
} else {
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
/* Clear in case the thread wasn't created. */
info->state = background_thread_stopped;
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
}
}
background_thread_info[0].state = background_thread_stopped;
assert(n_background_threads == 1);
}
static void
background_work(tsd_t *tsd, unsigned ind) {
background_thread_info_t *info = &background_thread_info[ind];
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
background_thread_wakeup_time_set(tsd_tsdn(tsd), info,
BACKGROUND_THREAD_INDEFINITE_SLEEP);
if (ind == 0) {
background_thread0_work(tsd);
} else {
while (info->state != background_thread_stopped) {
if (background_thread_pause_check(tsd_tsdn(tsd),
info)) {
continue;
}
background_work_sleep_once(tsd_tsdn(tsd), info, ind);
}
}
assert(info->state == background_thread_stopped);
background_thread_wakeup_time_set(tsd_tsdn(tsd), info, 0);
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
}
static void *
background_thread_entry(void *ind_arg) {
unsigned thread_ind = (unsigned)(uintptr_t)ind_arg;
assert(thread_ind < ncpus);
#ifdef JEMALLOC_HAVE_PTHREAD_SETNAME_NP
pthread_setname_np(pthread_self(), "jemalloc_bg_thd");
#endif
if (opt_percpu_arena != percpu_arena_disabled) {
set_current_thread_affinity((int)thread_ind);
}
/*
* Start periodic background work. We use internal tsd which avoids
* side effects, for example triggering new arena creation (which in
* turn triggers another background thread creation).
*/
background_work(tsd_internal_fetch(), thread_ind);
assert(pthread_equal(pthread_self(),
background_thread_info[thread_ind].thread));
return NULL;
}
static void
background_thread_init(tsd_t *tsd, background_thread_info_t *info) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
info->state = background_thread_started;
background_thread_info_init(tsd_tsdn(tsd), info);
n_background_threads++;
}
/* Create a new background thread if needed. */
bool
background_thread_create(tsd_t *tsd, unsigned arena_ind) {
assert(have_background_thread);
malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
/* We create at most NCPUs threads. */
size_t thread_ind = arena_ind % ncpus;
background_thread_info_t *info = &background_thread_info[thread_ind];
bool need_new_thread;
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
need_new_thread = background_thread_enabled() &&
(info->state == background_thread_stopped);
if (need_new_thread) {
background_thread_init(tsd, info);
}
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
if (!need_new_thread) {
return false;
}
if (arena_ind != 0) {
/* Threads are created asynchronously by Thread 0. */
background_thread_info_t *t0 = &background_thread_info[0];
malloc_mutex_lock(tsd_tsdn(tsd), &t0->mtx);
assert(t0->state == background_thread_started);
pthread_cond_signal(&t0->cond);
malloc_mutex_unlock(tsd_tsdn(tsd), &t0->mtx);
return false;
}
pre_reentrancy(tsd, NULL);
/*
* To avoid complications (besides reentrancy), create internal
* background threads with the underlying pthread_create.
*/
int err = background_thread_create_signals_masked(&info->thread, NULL,
background_thread_entry, (void *)thread_ind);
post_reentrancy(tsd);
if (err != 0) {
malloc_printf("<jemalloc>: arena 0 background thread creation "
"failed (%d)\n", err);
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
info->state = background_thread_stopped;
n_background_threads--;
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
return true;
}
return false;
}
bool
background_threads_enable(tsd_t *tsd) {
assert(n_background_threads == 0);
assert(background_thread_enabled());
malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
VARIABLE_ARRAY(bool, marked, ncpus);
unsigned i, nmarked;
for (i = 0; i < ncpus; i++) {
marked[i] = false;
}
nmarked = 0;
/* Mark the threads we need to create for thread 0. */
unsigned n = narenas_total_get();
for (i = 1; i < n; i++) {
if (marked[i % ncpus] ||
arena_get(tsd_tsdn(tsd), i, false) == NULL) {
continue;
}
background_thread_info_t *info = &background_thread_info[i];
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
assert(info->state == background_thread_stopped);
background_thread_init(tsd, info);
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
marked[i % ncpus] = true;
if (++nmarked == ncpus) {
break;
}
}
return background_thread_create(tsd, 0);
}
bool
background_threads_disable(tsd_t *tsd) {
assert(!background_thread_enabled());
malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
/* Thread 0 will be responsible for terminating other threads. */
if (background_threads_disable_single(tsd,
&background_thread_info[0])) {
return true;
}
assert(n_background_threads == 0);
return false;
}
/* Check if we need to signal the background thread early. */
void
background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
arena_decay_t *decay, size_t npages_new) {
background_thread_info_t *info = arena_background_thread_info_get(
arena);
if (malloc_mutex_trylock(tsdn, &info->mtx)) {
/*
* Background thread may hold the mutex for a long period of
* time. We'd like to avoid the variance on application
* threads. So keep this non-blocking, and leave the work to a
* future epoch.
*/
return;
}
if (info->state != background_thread_started) {
goto label_done;
}
if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
goto label_done;
}
ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
if (decay_time <= 0) {
/* Purging is eagerly done or disabled currently. */
goto label_done_unlock2;
}
uint64_t decay_interval_ns = nstime_ns(&decay->interval);
assert(decay_interval_ns > 0);
nstime_t diff;
nstime_init(&diff, background_thread_wakeup_time_get(info));
if (nstime_compare(&diff, &decay->epoch) <= 0) {
goto label_done_unlock2;
}
nstime_subtract(&diff, &decay->epoch);
if (nstime_ns(&diff) < BACKGROUND_THREAD_MIN_INTERVAL_NS) {
goto label_done_unlock2;
}
if (npages_new > 0) {
size_t n_epoch = (size_t)(nstime_ns(&diff) / decay_interval_ns);
/*
* Compute how many new pages we would need to purge by the next
* wakeup, which is used to determine if we should signal the
* background thread.
*/
uint64_t npurge_new;
if (n_epoch >= SMOOTHSTEP_NSTEPS) {
npurge_new = npages_new;
} else {
uint64_t h_steps_max = h_steps[SMOOTHSTEP_NSTEPS - 1];
assert(h_steps_max >=
h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
npurge_new = npages_new * (h_steps_max -
h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
npurge_new >>= SMOOTHSTEP_BFP;
}
info->npages_to_purge_new += npurge_new;
}
bool should_signal;
if (info->npages_to_purge_new > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
should_signal = true;
} else if (unlikely(background_thread_indefinite_sleep(info)) &&
(extents_npages_get(&arena->extents_dirty) > 0 ||
extents_npages_get(&arena->extents_muzzy) > 0 ||
info->npages_to_purge_new > 0)) {
should_signal = true;
} else {
should_signal = false;
}
if (should_signal) {
info->npages_to_purge_new = 0;
pthread_cond_signal(&info->cond);
}
label_done_unlock2:
malloc_mutex_unlock(tsdn, &decay->mtx);
label_done:
malloc_mutex_unlock(tsdn, &info->mtx);
}
void
background_thread_prefork0(tsdn_t *tsdn) {
malloc_mutex_prefork(tsdn, &background_thread_lock);
background_thread_enabled_at_fork = background_thread_enabled();
}
void
background_thread_prefork1(tsdn_t *tsdn) {
for (unsigned i = 0; i < ncpus; i++) {
malloc_mutex_prefork(tsdn, &background_thread_info[i].mtx);
}
}
void
background_thread_postfork_parent(tsdn_t *tsdn) {
for (unsigned i = 0; i < ncpus; i++) {
malloc_mutex_postfork_parent(tsdn,
&background_thread_info[i].mtx);
}
malloc_mutex_postfork_parent(tsdn, &background_thread_lock);
}
void
background_thread_postfork_child(tsdn_t *tsdn) {
for (unsigned i = 0; i < ncpus; i++) {
malloc_mutex_postfork_child(tsdn,
&background_thread_info[i].mtx);
}
malloc_mutex_postfork_child(tsdn, &background_thread_lock);
if (!background_thread_enabled_at_fork) {
return;
}
/* Clear background_thread state (reset to disabled for child). */
malloc_mutex_lock(tsdn, &background_thread_lock);
n_background_threads = 0;
background_thread_enabled_set(tsdn, false);
for (unsigned i = 0; i < ncpus; i++) {
background_thread_info_t *info = &background_thread_info[i];
malloc_mutex_lock(tsdn, &info->mtx);
info->state = background_thread_stopped;
int ret = pthread_cond_init(&info->cond, NULL);
assert(ret == 0);
background_thread_info_init(tsdn, info);
malloc_mutex_unlock(tsdn, &info->mtx);
}
malloc_mutex_unlock(tsdn, &background_thread_lock);
}
bool
background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) {
assert(config_stats);
malloc_mutex_lock(tsdn, &background_thread_lock);
if (!background_thread_enabled()) {
malloc_mutex_unlock(tsdn, &background_thread_lock);
return true;
}
stats->num_threads = n_background_threads;
uint64_t num_runs = 0;
nstime_init(&stats->run_interval, 0);
for (unsigned i = 0; i < ncpus; i++) {
background_thread_info_t *info = &background_thread_info[i];
malloc_mutex_lock(tsdn, &info->mtx);
if (info->state != background_thread_stopped) {
num_runs += info->tot_n_runs;
nstime_add(&stats->run_interval, &info->tot_sleep_time);
}
malloc_mutex_unlock(tsdn, &info->mtx);
}
stats->num_runs = num_runs;
if (num_runs > 0) {
nstime_idivide(&stats->run_interval, num_runs);
}
malloc_mutex_unlock(tsdn, &background_thread_lock);
return false;
}
#undef BACKGROUND_THREAD_NPAGES_THRESHOLD
#undef BILLION
#undef BACKGROUND_THREAD_MIN_INTERVAL_NS
/*
* When lazy lock is enabled, we need to make sure setting isthreaded before
* taking any background_thread locks. This is called early in ctl (instead of
* wait for the pthread_create calls to trigger) because the mutex is required
* before creating background threads.
*/
void
background_thread_ctl_init(tsdn_t *tsdn) {
malloc_mutex_assert_not_owner(tsdn, &background_thread_lock);
#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
pthread_once(&once_control, pthread_create_wrapper_once);
#endif
}
#endif /* defined(JEMALLOC_BACKGROUND_THREAD) */
bool
background_thread_boot0(void) {
if (!have_background_thread && opt_background_thread) {
malloc_printf("<jemalloc>: option background_thread currently "
"supports pthread only\n");
return true;
}
#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create");
if (pthread_create_fptr == NULL) {
can_enable_background_thread = false;
if (config_lazy_lock || opt_background_thread) {
malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, "
"\"pthread_create\")\n");
abort();
}
} else {
can_enable_background_thread = true;
}
#endif
return false;
}
bool
background_thread_boot1(tsdn_t *tsdn) {
#ifdef JEMALLOC_BACKGROUND_THREAD
assert(have_background_thread);
assert(narenas_total_get() > 0);
background_thread_enabled_set(tsdn, opt_background_thread);
if (malloc_mutex_init(&background_thread_lock,
"background_thread_global",
WITNESS_RANK_BACKGROUND_THREAD_GLOBAL,
malloc_mutex_rank_exclusive)) {
return true;
}
if (opt_background_thread) {
background_thread_ctl_init(tsdn);
}
background_thread_info = (background_thread_info_t *)base_alloc(tsdn,
b0get(), ncpus * sizeof(background_thread_info_t), CACHELINE);
if (background_thread_info == NULL) {
return true;
}
for (unsigned i = 0; i < ncpus; i++) {
background_thread_info_t *info = &background_thread_info[i];
/* Thread mutex is rank_inclusive because of thread0. */
if (malloc_mutex_init(&info->mtx, "background_thread",
WITNESS_RANK_BACKGROUND_THREAD,
malloc_mutex_address_ordered)) {
return true;
}
if (pthread_cond_init(&info->cond, NULL)) {
return true;
}
malloc_mutex_lock(tsdn, &info->mtx);
info->state = background_thread_stopped;
background_thread_info_init(tsdn, info);
malloc_mutex_unlock(tsdn, &info->mtx);
}
#endif
return false;
}

View File

@@ -1,142 +1,402 @@
#define JEMALLOC_BASE_C_
#include "jemalloc/internal/jemalloc_internal.h"
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/extent_mmap.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/sz.h"
/******************************************************************************/
/* Data. */
static malloc_mutex_t base_mtx;
static base_t *b0;
/******************************************************************************/
static void *
base_map(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, size_t size) {
void *addr;
bool zero = true;
bool commit = true;
assert(size == HUGEPAGE_CEILING(size));
if (extent_hooks == &extent_hooks_default) {
addr = extent_alloc_mmap(NULL, size, PAGE, &zero, &commit);
} else {
/* No arena context as we are creating new arenas. */
tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
pre_reentrancy(tsd, NULL);
addr = extent_hooks->alloc(extent_hooks, NULL, size, PAGE,
&zero, &commit, ind);
post_reentrancy(tsd);
}
return addr;
}
static void
base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr,
size_t size) {
/*
* Cascade through dalloc, decommit, purge_forced, and purge_lazy,
* stopping at first success. This cascade is performed for consistency
* with the cascade in extent_dalloc_wrapper() because an application's
* custom hooks may not support e.g. dalloc. This function is only ever
* called as a side effect of arena destruction, so although it might
* seem pointless to do anything besides dalloc here, the application
* may in fact want the end state of all associated virtual memory to be
* in some consistent-but-allocated state.
*/
if (extent_hooks == &extent_hooks_default) {
if (!extent_dalloc_mmap(addr, size)) {
return;
}
if (!pages_decommit(addr, size)) {
return;
}
if (!pages_purge_forced(addr, size)) {
return;
}
if (!pages_purge_lazy(addr, size)) {
return;
}
/* Nothing worked. This should never happen. */
not_reached();
} else {
tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
pre_reentrancy(tsd, NULL);
if (extent_hooks->dalloc != NULL &&
!extent_hooks->dalloc(extent_hooks, addr, size, true,
ind)) {
goto label_done;
}
if (extent_hooks->decommit != NULL &&
!extent_hooks->decommit(extent_hooks, addr, size, 0, size,
ind)) {
goto label_done;
}
if (extent_hooks->purge_forced != NULL &&
!extent_hooks->purge_forced(extent_hooks, addr, size, 0,
size, ind)) {
goto label_done;
}
if (extent_hooks->purge_lazy != NULL &&
!extent_hooks->purge_lazy(extent_hooks, addr, size, 0, size,
ind)) {
goto label_done;
}
/* Nothing worked. That's the application's problem. */
label_done:
post_reentrancy(tsd);
return;
}
}
static void
base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr,
size_t size) {
size_t sn;
sn = *extent_sn_next;
(*extent_sn_next)++;
extent_binit(extent, addr, size, sn);
}
static void *
base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size,
size_t alignment) {
void *ret;
assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM));
assert(size == ALIGNMENT_CEILING(size, alignment));
*gap_size = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent),
alignment) - (uintptr_t)extent_addr_get(extent);
ret = (void *)((uintptr_t)extent_addr_get(extent) + *gap_size);
assert(extent_bsize_get(extent) >= *gap_size + size);
extent_binit(extent, (void *)((uintptr_t)extent_addr_get(extent) +
*gap_size + size), extent_bsize_get(extent) - *gap_size - size,
extent_sn_get(extent));
return ret;
}
static void
base_extent_bump_alloc_post(tsdn_t *tsdn, base_t *base, extent_t *extent,
size_t gap_size, void *addr, size_t size) {
if (extent_bsize_get(extent) > 0) {
/*
* Compute the index for the largest size class that does not
* exceed extent's size.
*/
szind_t index_floor =
sz_size2index(extent_bsize_get(extent) + 1) - 1;
extent_heap_insert(&base->avail[index_floor], extent);
}
if (config_stats) {
base->allocated += size;
/*
* Add one PAGE to base_resident for every page boundary that is
* crossed by the new allocation.
*/
base->resident += PAGE_CEILING((uintptr_t)addr + size) -
PAGE_CEILING((uintptr_t)addr - gap_size);
assert(base->allocated <= base->resident);
assert(base->resident <= base->mapped);
}
}
static void *
base_extent_bump_alloc(tsdn_t *tsdn, base_t *base, extent_t *extent,
size_t size, size_t alignment) {
void *ret;
size_t gap_size;
ret = base_extent_bump_alloc_helper(extent, &gap_size, size, alignment);
base_extent_bump_alloc_post(tsdn, base, extent, gap_size, ret, size);
return ret;
}
/*
* Current pages that are being used for internal memory allocations. These
* pages are carved up in cacheline-size quanta, so that there is no chance of
* false cache line sharing.
* Allocate a block of virtual memory that is large enough to start with a
* base_block_t header, followed by an object of specified size and alignment.
* On success a pointer to the initialized base_block_t header is returned.
*/
static void *base_pages;
static void *base_next_addr;
static void *base_past_addr; /* Addr immediately past base_pages. */
static extent_node_t *base_nodes;
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static bool base_pages_alloc(size_t minsize);
/******************************************************************************/
static bool
base_pages_alloc(size_t minsize)
{
size_t csize;
bool zero;
assert(minsize != 0);
csize = CHUNK_CEILING(minsize);
zero = false;
base_pages = chunk_alloc(csize, chunksize, true, &zero,
chunk_dss_prec_get());
if (base_pages == NULL)
return (true);
base_next_addr = base_pages;
base_past_addr = (void *)((uintptr_t)base_pages + csize);
return (false);
static base_block_t *
base_block_alloc(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind,
pszind_t *pind_last, size_t *extent_sn_next, size_t size,
size_t alignment) {
alignment = ALIGNMENT_CEILING(alignment, QUANTUM);
size_t usize = ALIGNMENT_CEILING(size, alignment);
size_t header_size = sizeof(base_block_t);
size_t gap_size = ALIGNMENT_CEILING(header_size, alignment) -
header_size;
/*
* Create increasingly larger blocks in order to limit the total number
* of disjoint virtual memory ranges. Choose the next size in the page
* size class series (skipping size classes that are not a multiple of
* HUGEPAGE), or a size large enough to satisfy the requested size and
* alignment, whichever is larger.
*/
size_t min_block_size = HUGEPAGE_CEILING(sz_psz2u(header_size + gap_size
+ usize));
pszind_t pind_next = (*pind_last + 1 < NPSIZES) ? *pind_last + 1 :
*pind_last;
size_t next_block_size = HUGEPAGE_CEILING(sz_pind2sz(pind_next));
size_t block_size = (min_block_size > next_block_size) ? min_block_size
: next_block_size;
base_block_t *block = (base_block_t *)base_map(tsdn, extent_hooks, ind,
block_size);
if (block == NULL) {
return NULL;
}
*pind_last = sz_psz2ind(block_size);
block->size = block_size;
block->next = NULL;
assert(block_size >= header_size);
base_extent_init(extent_sn_next, &block->extent,
(void *)((uintptr_t)block + header_size), block_size - header_size);
return block;
}
void *
base_alloc(size_t size)
{
void *ret;
size_t csize;
/*
* Allocate an extent that is at least as large as specified size, with
* specified alignment.
*/
static extent_t *
base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
malloc_mutex_assert_owner(tsdn, &base->mtx);
/* Round size up to nearest multiple of the cacheline size. */
csize = CACHELINE_CEILING(size);
malloc_mutex_lock(&base_mtx);
/* Make sure there's enough space for the allocation. */
if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) {
if (base_pages_alloc(csize)) {
malloc_mutex_unlock(&base_mtx);
return (NULL);
extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
/*
* Drop mutex during base_block_alloc(), because an extent hook will be
* called.
*/
malloc_mutex_unlock(tsdn, &base->mtx);
base_block_t *block = base_block_alloc(tsdn, extent_hooks,
base_ind_get(base), &base->pind_last, &base->extent_sn_next, size,
alignment);
malloc_mutex_lock(tsdn, &base->mtx);
if (block == NULL) {
return NULL;
}
block->next = base->blocks;
base->blocks = block;
if (config_stats) {
base->allocated += sizeof(base_block_t);
base->resident += PAGE_CEILING(sizeof(base_block_t));
base->mapped += block->size;
assert(base->allocated <= base->resident);
assert(base->resident <= base->mapped);
}
/* Allocate. */
ret = base_next_addr;
base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
malloc_mutex_unlock(&base_mtx);
VALGRIND_MAKE_MEM_UNDEFINED(ret, csize);
return (ret);
return &block->extent;
}
void *
base_calloc(size_t number, size_t size)
{
void *ret = base_alloc(number * size);
if (ret != NULL)
memset(ret, 0, number * size);
return (ret);
base_t *
b0get(void) {
return b0;
}
extent_node_t *
base_node_alloc(void)
{
extent_node_t *ret;
malloc_mutex_lock(&base_mtx);
if (base_nodes != NULL) {
ret = base_nodes;
base_nodes = *(extent_node_t **)ret;
malloc_mutex_unlock(&base_mtx);
VALGRIND_MAKE_MEM_UNDEFINED(ret, sizeof(extent_node_t));
} else {
malloc_mutex_unlock(&base_mtx);
ret = (extent_node_t *)base_alloc(sizeof(extent_node_t));
base_t *
base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
pszind_t pind_last = 0;
size_t extent_sn_next = 0;
base_block_t *block = base_block_alloc(tsdn, extent_hooks, ind,
&pind_last, &extent_sn_next, sizeof(base_t), QUANTUM);
if (block == NULL) {
return NULL;
}
return (ret);
size_t gap_size;
size_t base_alignment = CACHELINE;
size_t base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment);
base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->extent,
&gap_size, base_size, base_alignment);
base->ind = ind;
atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELAXED);
if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE,
malloc_mutex_rank_exclusive)) {
base_unmap(tsdn, extent_hooks, ind, block, block->size);
return NULL;
}
base->pind_last = pind_last;
base->extent_sn_next = extent_sn_next;
base->blocks = block;
for (szind_t i = 0; i < NSIZES; i++) {
extent_heap_new(&base->avail[i]);
}
if (config_stats) {
base->allocated = sizeof(base_block_t);
base->resident = PAGE_CEILING(sizeof(base_block_t));
base->mapped = block->size;
assert(base->allocated <= base->resident);
assert(base->resident <= base->mapped);
}
base_extent_bump_alloc_post(tsdn, base, &block->extent, gap_size, base,
base_size);
return base;
}
void
base_node_dealloc(extent_node_t *node)
{
base_delete(tsdn_t *tsdn, base_t *base) {
extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
base_block_t *next = base->blocks;
do {
base_block_t *block = next;
next = block->next;
base_unmap(tsdn, extent_hooks, base_ind_get(base), block,
block->size);
} while (next != NULL);
}
VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
malloc_mutex_lock(&base_mtx);
*(extent_node_t **)node = base_nodes;
base_nodes = node;
malloc_mutex_unlock(&base_mtx);
extent_hooks_t *
base_extent_hooks_get(base_t *base) {
return (extent_hooks_t *)atomic_load_p(&base->extent_hooks,
ATOMIC_ACQUIRE);
}
extent_hooks_t *
base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) {
extent_hooks_t *old_extent_hooks = base_extent_hooks_get(base);
atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELEASE);
return old_extent_hooks;
}
static void *
base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment,
size_t *esn) {
alignment = QUANTUM_CEILING(alignment);
size_t usize = ALIGNMENT_CEILING(size, alignment);
size_t asize = usize + alignment - QUANTUM;
extent_t *extent = NULL;
malloc_mutex_lock(tsdn, &base->mtx);
for (szind_t i = sz_size2index(asize); i < NSIZES; i++) {
extent = extent_heap_remove_first(&base->avail[i]);
if (extent != NULL) {
/* Use existing space. */
break;
}
}
if (extent == NULL) {
/* Try to allocate more space. */
extent = base_extent_alloc(tsdn, base, usize, alignment);
}
void *ret;
if (extent == NULL) {
ret = NULL;
goto label_return;
}
ret = base_extent_bump_alloc(tsdn, base, extent, usize, alignment);
if (esn != NULL) {
*esn = extent_sn_get(extent);
}
label_return:
malloc_mutex_unlock(tsdn, &base->mtx);
return ret;
}
/*
* base_alloc() returns zeroed memory, which is always demand-zeroed for the
* auto arenas, in order to make multi-page sparse data structures such as radix
* tree nodes efficient with respect to physical memory usage. Upon success a
* pointer to at least size bytes with specified alignment is returned. Note
* that size is rounded up to the nearest multiple of alignment to avoid false
* sharing.
*/
void *
base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
return base_alloc_impl(tsdn, base, size, alignment, NULL);
}
extent_t *
base_alloc_extent(tsdn_t *tsdn, base_t *base) {
size_t esn;
extent_t *extent = base_alloc_impl(tsdn, base, sizeof(extent_t),
CACHELINE, &esn);
if (extent == NULL) {
return NULL;
}
extent_esn_set(extent, esn);
return extent;
}
void
base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident,
size_t *mapped) {
cassert(config_stats);
malloc_mutex_lock(tsdn, &base->mtx);
assert(base->allocated <= base->resident);
assert(base->resident <= base->mapped);
*allocated = base->allocated;
*resident = base->resident;
*mapped = base->mapped;
malloc_mutex_unlock(tsdn, &base->mtx);
}
void
base_prefork(tsdn_t *tsdn, base_t *base) {
malloc_mutex_prefork(tsdn, &base->mtx);
}
void
base_postfork_parent(tsdn_t *tsdn, base_t *base) {
malloc_mutex_postfork_parent(tsdn, &base->mtx);
}
void
base_postfork_child(tsdn_t *tsdn, base_t *base) {
malloc_mutex_postfork_child(tsdn, &base->mtx);
}
bool
base_boot(void)
{
base_nodes = NULL;
if (malloc_mutex_init(&base_mtx))
return (true);
return (false);
}
void
base_prefork(void)
{
malloc_mutex_prefork(&base_mtx);
}
void
base_postfork_parent(void)
{
malloc_mutex_postfork_parent(&base_mtx);
}
void
base_postfork_child(void)
{
malloc_mutex_postfork_child(&base_mtx);
base_boot(tsdn_t *tsdn) {
b0 = base_new(tsdn, 0, (extent_hooks_t *)&extent_hooks_default);
return (b0 == NULL);
}

View File

@@ -1,24 +1,15 @@
#define JEMALLOC_BITMAP_C_
#include "jemalloc/internal/jemalloc_internal.h"
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static size_t bits2groups(size_t nbits);
#include "jemalloc/internal/assert.h"
/******************************************************************************/
static size_t
bits2groups(size_t nbits)
{
return ((nbits >> LG_BITMAP_GROUP_NBITS) +
!!(nbits & BITMAP_GROUP_NBITS_MASK));
}
#ifdef BITMAP_USE_TREE
void
bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
{
bitmap_info_init(bitmap_info_t *binfo, size_t nbits) {
unsigned i;
size_t group_count;
@@ -31,60 +22,100 @@ bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
* that requires only one group.
*/
binfo->levels[0].group_offset = 0;
group_count = bits2groups(nbits);
group_count = BITMAP_BITS2GROUPS(nbits);
for (i = 1; group_count > 1; i++) {
assert(i < BITMAP_MAX_LEVELS);
binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
+ group_count;
group_count = bits2groups(group_count);
group_count = BITMAP_BITS2GROUPS(group_count);
}
binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
+ group_count;
assert(binfo->levels[i].group_offset <= BITMAP_GROUPS_MAX);
binfo->nlevels = i;
binfo->nbits = nbits;
}
size_t
bitmap_info_ngroups(const bitmap_info_t *binfo)
{
return (binfo->levels[binfo->nlevels].group_offset << LG_SIZEOF_BITMAP);
}
size_t
bitmap_size(size_t nbits)
{
bitmap_info_t binfo;
bitmap_info_init(&binfo, nbits);
return (bitmap_info_ngroups(&binfo));
static size_t
bitmap_info_ngroups(const bitmap_info_t *binfo) {
return binfo->levels[binfo->nlevels].group_offset;
}
void
bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
{
bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) {
size_t extra;
unsigned i;
/*
* Bits are actually inverted with regard to the external bitmap
* interface, so the bitmap starts out with all 1 bits, except for
* trailing unused bits (if any). Note that each group uses bit 0 to
* correspond to the first logical bit in the group, so extra bits
* are the most significant bits of the last group.
* interface.
*/
memset(bitmap, 0xffU, binfo->levels[binfo->nlevels].group_offset <<
LG_SIZEOF_BITMAP);
if (fill) {
/* The "filled" bitmap starts out with all 0 bits. */
memset(bitmap, 0, bitmap_size(binfo));
return;
}
/*
* The "empty" bitmap starts out with all 1 bits, except for trailing
* unused bits (if any). Note that each group uses bit 0 to correspond
* to the first logical bit in the group, so extra bits are the most
* significant bits of the last group.
*/
memset(bitmap, 0xffU, bitmap_size(binfo));
extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
& BITMAP_GROUP_NBITS_MASK;
if (extra != 0)
if (extra != 0) {
bitmap[binfo->levels[1].group_offset - 1] >>= extra;
}
for (i = 1; i < binfo->nlevels; i++) {
size_t group_count = binfo->levels[i].group_offset -
binfo->levels[i-1].group_offset;
extra = (BITMAP_GROUP_NBITS - (group_count &
BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK;
if (extra != 0)
if (extra != 0) {
bitmap[binfo->levels[i+1].group_offset - 1] >>= extra;
}
}
}
#else /* BITMAP_USE_TREE */
void
bitmap_info_init(bitmap_info_t *binfo, size_t nbits) {
assert(nbits > 0);
assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS));
binfo->ngroups = BITMAP_BITS2GROUPS(nbits);
binfo->nbits = nbits;
}
static size_t
bitmap_info_ngroups(const bitmap_info_t *binfo) {
return binfo->ngroups;
}
void
bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) {
size_t extra;
if (fill) {
memset(bitmap, 0, bitmap_size(binfo));
return;
}
memset(bitmap, 0xffU, bitmap_size(binfo));
extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
& BITMAP_GROUP_NBITS_MASK;
if (extra != 0) {
bitmap[binfo->ngroups - 1] >>= extra;
}
}
#endif /* BITMAP_USE_TREE */
size_t
bitmap_size(const bitmap_info_t *binfo) {
return (bitmap_info_ngroups(binfo) << LG_SIZEOF_BITMAP);
}

View File

@@ -35,13 +35,23 @@
*
******************************************************************************/
#define JEMALLOC_CKH_C_
#include "jemalloc/internal/jemalloc_internal.h"
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/hash.h"
#include "jemalloc/internal/malloc_io.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/util.h"
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static bool ckh_grow(ckh_t *ckh);
static void ckh_shrink(ckh_t *ckh);
static bool ckh_grow(tsd_t *tsd, ckh_t *ckh);
static void ckh_shrink(tsd_t *tsd, ckh_t *ckh);
/******************************************************************************/
@@ -49,27 +59,26 @@ static void ckh_shrink(ckh_t *ckh);
* Search bucket for key and return the cell number if found; SIZE_T_MAX
* otherwise.
*/
JEMALLOC_INLINE_C size_t
ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key)
{
static size_t
ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) {
ckhc_t *cell;
unsigned i;
for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
if (cell->key != NULL && ckh->keycomp(key, cell->key))
return ((bucket << LG_CKH_BUCKET_CELLS) + i);
if (cell->key != NULL && ckh->keycomp(key, cell->key)) {
return (bucket << LG_CKH_BUCKET_CELLS) + i;
}
}
return (SIZE_T_MAX);
return SIZE_T_MAX;
}
/*
* Search table for key and return cell number if found; SIZE_T_MAX otherwise.
*/
JEMALLOC_INLINE_C size_t
ckh_isearch(ckh_t *ckh, const void *key)
{
static size_t
ckh_isearch(ckh_t *ckh, const void *key) {
size_t hashes[2], bucket, cell;
assert(ckh != NULL);
@@ -79,19 +88,19 @@ ckh_isearch(ckh_t *ckh, const void *key)
/* Search primary bucket. */
bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
cell = ckh_bucket_search(ckh, bucket, key);
if (cell != SIZE_T_MAX)
return (cell);
if (cell != SIZE_T_MAX) {
return cell;
}
/* Search secondary bucket. */
bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
cell = ckh_bucket_search(ckh, bucket, key);
return (cell);
return cell;
}
JEMALLOC_INLINE_C bool
static bool
ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
const void *data)
{
const void *data) {
ckhc_t *cell;
unsigned offset, i;
@@ -99,7 +108,8 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
* Cycle through the cells in the bucket, starting at a random position.
* The randomness avoids worst-case search overhead as buckets fill up.
*/
prng32(offset, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C);
offset = (unsigned)prng_lg_range_u64(&ckh->prng_state,
LG_CKH_BUCKET_CELLS);
for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) +
((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))];
@@ -107,11 +117,11 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
cell->key = key;
cell->data = data;
ckh->count++;
return (false);
return false;
}
}
return (true);
return true;
}
/*
@@ -120,10 +130,9 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
* eviction/relocation procedure until either success or detection of an
* eviction/relocation bucket cycle.
*/
JEMALLOC_INLINE_C bool
static bool
ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
void const **argdata)
{
void const **argdata) {
const void *key, *data, *tkey, *tdata;
ckhc_t *cell;
size_t hashes[2], bucket, tbucket;
@@ -141,7 +150,8 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
* were an item for which both hashes indicated the same
* bucket.
*/
prng32(i, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C);
i = (unsigned)prng_lg_range_u64(&ckh->prng_state,
LG_CKH_BUCKET_CELLS);
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
assert(cell->key != NULL);
@@ -181,18 +191,18 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
if (tbucket == argbucket) {
*argkey = key;
*argdata = data;
return (true);
return true;
}
bucket = tbucket;
if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
return (false);
if (!ckh_try_bucket_insert(ckh, bucket, key, data)) {
return false;
}
}
}
JEMALLOC_INLINE_C bool
ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata)
{
static bool
ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) {
size_t hashes[2], bucket;
const void *key = *argkey;
const void *data = *argdata;
@@ -201,27 +211,28 @@ ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata)
/* Try to insert in primary bucket. */
bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
return (false);
if (!ckh_try_bucket_insert(ckh, bucket, key, data)) {
return false;
}
/* Try to insert in secondary bucket. */
bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
return (false);
if (!ckh_try_bucket_insert(ckh, bucket, key, data)) {
return false;
}
/*
* Try to find a place for this item via iterative eviction/relocation.
*/
return (ckh_evict_reloc_insert(ckh, bucket, argkey, argdata));
return ckh_evict_reloc_insert(ckh, bucket, argkey, argdata);
}
/*
* Try to rebuild the hash table from scratch by inserting all items from the
* old table into the new.
*/
JEMALLOC_INLINE_C bool
ckh_rebuild(ckh_t *ckh, ckhc_t *aTab)
{
static bool
ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) {
size_t count, i, nins;
const void *key, *data;
@@ -233,22 +244,20 @@ ckh_rebuild(ckh_t *ckh, ckhc_t *aTab)
data = aTab[i].data;
if (ckh_try_insert(ckh, &key, &data)) {
ckh->count = count;
return (true);
return true;
}
nins++;
}
}
return (false);
return false;
}
static bool
ckh_grow(ckh_t *ckh)
{
ckh_grow(tsd_t *tsd, ckh_t *ckh) {
bool ret;
ckhc_t *tab, *ttab;
size_t lg_curcells;
unsigned lg_prevbuckets;
unsigned lg_prevbuckets, lg_curcells;
#ifdef CKH_COUNT
ckh->ngrows++;
@@ -265,12 +274,13 @@ ckh_grow(ckh_t *ckh)
size_t usize;
lg_curcells++;
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
if (usize == 0) {
usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
ret = true;
goto label_return;
}
tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE,
true, NULL, true, arena_ichoose(tsd, NULL));
if (tab == NULL) {
ret = true;
goto label_return;
@@ -281,28 +291,27 @@ ckh_grow(ckh_t *ckh)
tab = ttab;
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
if (ckh_rebuild(ckh, tab) == false) {
idalloc(tab);
if (!ckh_rebuild(ckh, tab)) {
idalloctm(tsd_tsdn(tsd), tab, NULL, NULL, true, true);
break;
}
/* Rebuilding failed, so back out partially rebuilt table. */
idalloc(ckh->tab);
idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true);
ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets;
}
ret = false;
label_return:
return (ret);
return ret;
}
static void
ckh_shrink(ckh_t *ckh)
{
ckh_shrink(tsd_t *tsd, ckh_t *ckh) {
ckhc_t *tab, *ttab;
size_t lg_curcells, usize;
unsigned lg_prevbuckets;
size_t usize;
unsigned lg_prevbuckets, lg_curcells;
/*
* It is possible (though unlikely, given well behaved hashes) that the
@@ -310,10 +319,12 @@ ckh_shrink(ckh_t *ckh)
*/
lg_prevbuckets = ckh->lg_curbuckets;
lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1;
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
if (usize == 0)
usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
return;
tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
}
tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL,
true, arena_ichoose(tsd, NULL));
if (tab == NULL) {
/*
* An OOM error isn't worth propagating, since it doesn't
@@ -327,8 +338,8 @@ ckh_shrink(ckh_t *ckh)
tab = ttab;
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
if (ckh_rebuild(ckh, tab) == false) {
idalloc(tab);
if (!ckh_rebuild(ckh, tab)) {
idalloctm(tsd_tsdn(tsd), tab, NULL, NULL, true, true);
#ifdef CKH_COUNT
ckh->nshrinks++;
#endif
@@ -336,7 +347,7 @@ ckh_shrink(ckh_t *ckh)
}
/* Rebuilding failed, so back out partially rebuilt table. */
idalloc(ckh->tab);
idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true);
ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets;
#ifdef CKH_COUNT
@@ -345,8 +356,8 @@ ckh_shrink(ckh_t *ckh)
}
bool
ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
{
ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
ckh_keycomp_t *keycomp) {
bool ret;
size_t mincells, usize;
unsigned lg_mincells;
@@ -366,29 +377,31 @@ ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
ckh->count = 0;
/*
* Find the minimum power of 2 that is large enough to fit aBaseCount
* Find the minimum power of 2 that is large enough to fit minitems
* entries. We are using (2+,2) cuckoo hashing, which has an expected
* maximum load factor of at least ~0.86, so 0.75 is a conservative load
* factor that will typically allow 2^aLgMinItems to fit without ever
* factor that will typically allow mincells items to fit without ever
* growing the table.
*/
assert(LG_CKH_BUCKET_CELLS > 0);
mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2;
for (lg_mincells = LG_CKH_BUCKET_CELLS;
(ZU(1) << lg_mincells) < mincells;
lg_mincells++)
; /* Do nothing. */
lg_mincells++) {
/* Do nothing. */
}
ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
ckh->hash = hash;
ckh->keycomp = keycomp;
usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
if (usize == 0) {
usize = sz_sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
ret = true;
goto label_return;
}
ckh->tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
ckh->tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true,
NULL, true, arena_ichoose(tsd, NULL));
if (ckh->tab == NULL) {
ret = true;
goto label_return;
@@ -396,20 +409,18 @@ ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
ret = false;
label_return:
return (ret);
return ret;
}
void
ckh_delete(ckh_t *ckh)
{
ckh_delete(tsd_t *tsd, ckh_t *ckh) {
assert(ckh != NULL);
#ifdef CKH_VERBOSE
malloc_printf(
"%s(%p): ngrows: %"PRIu64", nshrinks: %"PRIu64","
" nshrinkfails: %"PRIu64", ninserts: %"PRIu64","
" nrelocs: %"PRIu64"\n", __func__, ckh,
"%s(%p): ngrows: %"FMTu64", nshrinks: %"FMTu64","
" nshrinkfails: %"FMTu64", ninserts: %"FMTu64","
" nrelocs: %"FMTu64"\n", __func__, ckh,
(unsigned long long)ckh->ngrows,
(unsigned long long)ckh->nshrinks,
(unsigned long long)ckh->nshrinkfails,
@@ -417,43 +428,42 @@ ckh_delete(ckh_t *ckh)
(unsigned long long)ckh->nrelocs);
#endif
idalloc(ckh->tab);
if (config_debug)
memset(ckh, 0x5a, sizeof(ckh_t));
idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true);
if (config_debug) {
memset(ckh, JEMALLOC_FREE_JUNK, sizeof(ckh_t));
}
}
size_t
ckh_count(ckh_t *ckh)
{
ckh_count(ckh_t *ckh) {
assert(ckh != NULL);
return (ckh->count);
return ckh->count;
}
bool
ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data)
{
ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data) {
size_t i, ncells;
for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets +
LG_CKH_BUCKET_CELLS)); i < ncells; i++) {
if (ckh->tab[i].key != NULL) {
if (key != NULL)
if (key != NULL) {
*key = (void *)ckh->tab[i].key;
if (data != NULL)
}
if (data != NULL) {
*data = (void *)ckh->tab[i].data;
}
*tabind = i + 1;
return (false);
return false;
}
}
return (true);
return true;
}
bool
ckh_insert(ckh_t *ckh, const void *key, const void *data)
{
ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data) {
bool ret;
assert(ckh != NULL);
@@ -464,7 +474,7 @@ ckh_insert(ckh_t *ckh, const void *key, const void *data)
#endif
while (ckh_try_insert(ckh, &key, &data)) {
if (ckh_grow(ckh)) {
if (ckh_grow(tsd, ckh)) {
ret = true;
goto label_return;
}
@@ -472,22 +482,24 @@ ckh_insert(ckh_t *ckh, const void *key, const void *data)
ret = false;
label_return:
return (ret);
return ret;
}
bool
ckh_remove(ckh_t *ckh, const void *searchkey, void **key, void **data)
{
ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
void **data) {
size_t cell;
assert(ckh != NULL);
cell = ckh_isearch(ckh, searchkey);
if (cell != SIZE_T_MAX) {
if (key != NULL)
if (key != NULL) {
*key = (void *)ckh->tab[cell].key;
if (data != NULL)
}
if (data != NULL) {
*data = (void *)ckh->tab[cell].data;
}
ckh->tab[cell].key = NULL;
ckh->tab[cell].data = NULL; /* Not necessary. */
@@ -497,54 +509,50 @@ ckh_remove(ckh_t *ckh, const void *searchkey, void **key, void **data)
+ LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets
> ckh->lg_minbuckets) {
/* Ignore error due to OOM. */
ckh_shrink(ckh);
ckh_shrink(tsd, ckh);
}
return (false);
return false;
}
return (true);
return true;
}
bool
ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data)
{
ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data) {
size_t cell;
assert(ckh != NULL);
cell = ckh_isearch(ckh, searchkey);
if (cell != SIZE_T_MAX) {
if (key != NULL)
if (key != NULL) {
*key = (void *)ckh->tab[cell].key;
if (data != NULL)
}
if (data != NULL) {
*data = (void *)ckh->tab[cell].data;
return (false);
}
return false;
}
return (true);
return true;
}
void
ckh_string_hash(const void *key, size_t r_hash[2])
{
ckh_string_hash(const void *key, size_t r_hash[2]) {
hash(key, strlen((const char *)key), 0x94122f33U, r_hash);
}
bool
ckh_string_keycomp(const void *k1, const void *k2)
{
ckh_string_keycomp(const void *k1, const void *k2) {
assert(k1 != NULL);
assert(k2 != NULL);
return (strcmp((char *)k1, (char *)k2) ? false : true);
return !strcmp((char *)k1, (char *)k2);
}
void
ckh_pointer_hash(const void *key, size_t r_hash[2])
{
ckh_pointer_hash(const void *key, size_t r_hash[2]) {
union {
const void *v;
size_t i;
@@ -556,8 +564,6 @@ ckh_pointer_hash(const void *key, size_t r_hash[2])
}
bool
ckh_pointer_keycomp(const void *k1, const void *k2)
{
return ((k1 == k2) ? true : false);
ckh_pointer_keycomp(const void *k1, const void *k2) {
return (k1 == k2);
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,269 @@
#define JEMALLOC_EXTENT_DSS_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/spin.h"
/******************************************************************************/
/* Data. */
const char *opt_dss = DSS_DEFAULT;
const char *dss_prec_names[] = {
"disabled",
"primary",
"secondary",
"N/A"
};
/*
* Current dss precedence default, used when creating new arenas. NB: This is
* stored as unsigned rather than dss_prec_t because in principle there's no
* guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use
* atomic operations to synchronize the setting.
*/
static atomic_u_t dss_prec_default = ATOMIC_INIT(
(unsigned)DSS_PREC_DEFAULT);
/* Base address of the DSS. */
static void *dss_base;
/* Atomic boolean indicating whether a thread is currently extending DSS. */
static atomic_b_t dss_extending;
/* Atomic boolean indicating whether the DSS is exhausted. */
static atomic_b_t dss_exhausted;
/* Atomic current upper limit on DSS addresses. */
static atomic_p_t dss_max;
/******************************************************************************/
static void *
extent_dss_sbrk(intptr_t increment) {
#ifdef JEMALLOC_DSS
return sbrk(increment);
#else
not_implemented();
return NULL;
#endif
}
dss_prec_t
extent_dss_prec_get(void) {
dss_prec_t ret;
if (!have_dss) {
return dss_prec_disabled;
}
ret = (dss_prec_t)atomic_load_u(&dss_prec_default, ATOMIC_ACQUIRE);
return ret;
}
bool
extent_dss_prec_set(dss_prec_t dss_prec) {
if (!have_dss) {
return (dss_prec != dss_prec_disabled);
}
atomic_store_u(&dss_prec_default, (unsigned)dss_prec, ATOMIC_RELEASE);
return false;
}
static void
extent_dss_extending_start(void) {
spin_t spinner = SPIN_INITIALIZER;
while (true) {
bool expected = false;
if (atomic_compare_exchange_weak_b(&dss_extending, &expected,
true, ATOMIC_ACQ_REL, ATOMIC_RELAXED)) {
break;
}
spin_adaptive(&spinner);
}
}
static void
extent_dss_extending_finish(void) {
assert(atomic_load_b(&dss_extending, ATOMIC_RELAXED));
atomic_store_b(&dss_extending, false, ATOMIC_RELEASE);
}
static void *
extent_dss_max_update(void *new_addr) {
/*
* Get the current end of the DSS as max_cur and assure that dss_max is
* up to date.
*/
void *max_cur = extent_dss_sbrk(0);
if (max_cur == (void *)-1) {
return NULL;
}
atomic_store_p(&dss_max, max_cur, ATOMIC_RELEASE);
/* Fixed new_addr can only be supported if it is at the edge of DSS. */
if (new_addr != NULL && max_cur != new_addr) {
return NULL;
}
return max_cur;
}
void *
extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
size_t alignment, bool *zero, bool *commit) {
extent_t *gap;
cassert(have_dss);
assert(size > 0);
assert(alignment > 0);
/*
* sbrk() uses a signed increment argument, so take care not to
* interpret a large allocation request as a negative increment.
*/
if ((intptr_t)size < 0) {
return NULL;
}
gap = extent_alloc(tsdn, arena);
if (gap == NULL) {
return NULL;
}
extent_dss_extending_start();
if (!atomic_load_b(&dss_exhausted, ATOMIC_ACQUIRE)) {
/*
* The loop is necessary to recover from races with other
* threads that are using the DSS for something other than
* malloc.
*/
while (true) {
void *max_cur = extent_dss_max_update(new_addr);
if (max_cur == NULL) {
goto label_oom;
}
/*
* Compute how much page-aligned gap space (if any) is
* necessary to satisfy alignment. This space can be
* recycled for later use.
*/
void *gap_addr_page = (void *)(PAGE_CEILING(
(uintptr_t)max_cur));
void *ret = (void *)ALIGNMENT_CEILING(
(uintptr_t)gap_addr_page, alignment);
size_t gap_size_page = (uintptr_t)ret -
(uintptr_t)gap_addr_page;
if (gap_size_page != 0) {
extent_init(gap, arena, gap_addr_page,
gap_size_page, false, NSIZES,
arena_extent_sn_next(arena),
extent_state_active, false, true);
}
/*
* Compute the address just past the end of the desired
* allocation space.
*/
void *dss_next = (void *)((uintptr_t)ret + size);
if ((uintptr_t)ret < (uintptr_t)max_cur ||
(uintptr_t)dss_next < (uintptr_t)max_cur) {
goto label_oom; /* Wrap-around. */
}
/* Compute the increment, including subpage bytes. */
void *gap_addr_subpage = max_cur;
size_t gap_size_subpage = (uintptr_t)ret -
(uintptr_t)gap_addr_subpage;
intptr_t incr = gap_size_subpage + size;
assert((uintptr_t)max_cur + incr == (uintptr_t)ret +
size);
/* Try to allocate. */
void *dss_prev = extent_dss_sbrk(incr);
if (dss_prev == max_cur) {
/* Success. */
atomic_store_p(&dss_max, dss_next,
ATOMIC_RELEASE);
extent_dss_extending_finish();
if (gap_size_page != 0) {
extent_dalloc_gap(tsdn, arena, gap);
} else {
extent_dalloc(tsdn, arena, gap);
}
if (!*commit) {
*commit = pages_decommit(ret, size);
}
if (*zero && *commit) {
extent_hooks_t *extent_hooks =
EXTENT_HOOKS_INITIALIZER;
extent_t extent;
extent_init(&extent, arena, ret, size,
size, false, NSIZES,
extent_state_active, false, true);
if (extent_purge_forced_wrapper(tsdn,
arena, &extent_hooks, &extent, 0,
size)) {
memset(ret, 0, size);
}
}
return ret;
}
/*
* Failure, whether due to OOM or a race with a raw
* sbrk() call from outside the allocator.
*/
if (dss_prev == (void *)-1) {
/* OOM. */
atomic_store_b(&dss_exhausted, true,
ATOMIC_RELEASE);
goto label_oom;
}
}
}
label_oom:
extent_dss_extending_finish();
extent_dalloc(tsdn, arena, gap);
return NULL;
}
static bool
extent_in_dss_helper(void *addr, void *max) {
return ((uintptr_t)addr >= (uintptr_t)dss_base && (uintptr_t)addr <
(uintptr_t)max);
}
bool
extent_in_dss(void *addr) {
cassert(have_dss);
return extent_in_dss_helper(addr, atomic_load_p(&dss_max,
ATOMIC_ACQUIRE));
}
bool
extent_dss_mergeable(void *addr_a, void *addr_b) {
void *max;
cassert(have_dss);
if ((uintptr_t)addr_a < (uintptr_t)dss_base && (uintptr_t)addr_b <
(uintptr_t)dss_base) {
return true;
}
max = atomic_load_p(&dss_max, ATOMIC_ACQUIRE);
return (extent_in_dss_helper(addr_a, max) ==
extent_in_dss_helper(addr_b, max));
}
void
extent_dss_boot(void) {
cassert(have_dss);
dss_base = extent_dss_sbrk(0);
atomic_store_b(&dss_extending, false, ATOMIC_RELAXED);
atomic_store_b(&dss_exhausted, dss_base == (void *)-1, ATOMIC_RELAXED);
atomic_store_p(&dss_max, dss_base, ATOMIC_RELAXED);
}
/******************************************************************************/

View File

@@ -0,0 +1,42 @@
#define JEMALLOC_EXTENT_MMAP_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/extent_mmap.h"
/******************************************************************************/
/* Data. */
bool opt_retain =
#ifdef JEMALLOC_RETAIN
true
#else
false
#endif
;
/******************************************************************************/
void *
extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
bool *commit) {
void *ret = pages_map(new_addr, size, ALIGNMENT_CEILING(alignment,
PAGE), commit);
if (ret == NULL) {
return NULL;
}
assert(ret != NULL);
if (*commit) {
*zero = true;
}
return ret;
}
bool
extent_dalloc_mmap(void *addr, size_t size) {
if (!opt_retain) {
pages_unmap(addr, size);
}
return opt_retain;
}

Some files were not shown because too many files have changed in this diff Show More