mirror of
https://github.com/mod-playerbots/azerothcore-wotlk.git
synced 2026-01-26 07:06:23 +00:00
Merge branch 'master' of https://github.com/azerothcore/azerothcore-wotlk into dir-restructure
This commit is contained in:
363
deps/jemalloc/include/jemalloc/internal/atomic.h
vendored
363
deps/jemalloc/include/jemalloc/internal/atomic.h
vendored
@@ -1,304 +1,77 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
#ifndef JEMALLOC_INTERNAL_ATOMIC_H
|
||||
#define JEMALLOC_INTERNAL_ATOMIC_H
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
#define ATOMIC_INLINE static inline
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
#define atomic_read_uint64(p) atomic_add_uint64(p, 0)
|
||||
#define atomic_read_uint32(p) atomic_add_uint32(p, 0)
|
||||
#define atomic_read_z(p) atomic_add_z(p, 0)
|
||||
#define atomic_read_u(p) atomic_add_u(p, 0)
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
uint64_t atomic_add_uint64(uint64_t *p, uint64_t x);
|
||||
uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x);
|
||||
uint32_t atomic_add_uint32(uint32_t *p, uint32_t x);
|
||||
uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x);
|
||||
size_t atomic_add_z(size_t *p, size_t x);
|
||||
size_t atomic_sub_z(size_t *p, size_t x);
|
||||
unsigned atomic_add_u(unsigned *p, unsigned x);
|
||||
unsigned atomic_sub_u(unsigned *p, unsigned x);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_))
|
||||
/******************************************************************************/
|
||||
/* 64-bit operations. */
|
||||
#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
|
||||
# ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_add_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
return (__sync_add_and_fetch(p, x));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
return (__sync_sub_and_fetch(p, x));
|
||||
}
|
||||
#elif (defined(_MSC_VER))
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_add_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
return (InterlockedExchangeAdd64(p, x));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
return (InterlockedExchangeAdd64(p, -((int64_t)x)));
|
||||
}
|
||||
#elif (defined(JEMALLOC_OSATOMIC))
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_add_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
return (OSAtomicAdd64((int64_t)x, (int64_t *)p));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p));
|
||||
}
|
||||
# elif (defined(__amd64__) || defined(__x86_64__))
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_add_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
asm volatile (
|
||||
"lock; xaddq %0, %1;"
|
||||
: "+r" (x), "=m" (*p) /* Outputs. */
|
||||
: "m" (*p) /* Inputs. */
|
||||
);
|
||||
|
||||
return (x);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
x = (uint64_t)(-(int64_t)x);
|
||||
asm volatile (
|
||||
"lock; xaddq %0, %1;"
|
||||
: "+r" (x), "=m" (*p) /* Outputs. */
|
||||
: "m" (*p) /* Inputs. */
|
||||
);
|
||||
|
||||
return (x);
|
||||
}
|
||||
# elif (defined(JEMALLOC_ATOMIC9))
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_add_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
/*
|
||||
* atomic_fetchadd_64() doesn't exist, but we only ever use this
|
||||
* function on LP64 systems, so atomic_fetchadd_long() will do.
|
||||
*/
|
||||
assert(sizeof(uint64_t) == sizeof(unsigned long));
|
||||
|
||||
return (atomic_fetchadd_long(p, (unsigned long)x) + x);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
assert(sizeof(uint64_t) == sizeof(unsigned long));
|
||||
|
||||
return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x);
|
||||
}
|
||||
# elif (defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8))
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_add_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
return (__sync_add_and_fetch(p, x));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
return (__sync_sub_and_fetch(p, x));
|
||||
}
|
||||
# else
|
||||
# error "Missing implementation for 64-bit atomic operations"
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/******************************************************************************/
|
||||
/* 32-bit operations. */
|
||||
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_add_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
return (__sync_add_and_fetch(p, x));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
return (__sync_sub_and_fetch(p, x));
|
||||
}
|
||||
#elif (defined(_MSC_VER))
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_add_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
return (InterlockedExchangeAdd(p, x));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
return (InterlockedExchangeAdd(p, -((int32_t)x)));
|
||||
}
|
||||
#elif (defined(JEMALLOC_OSATOMIC))
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_add_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
return (OSAtomicAdd32((int32_t)x, (int32_t *)p));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p));
|
||||
}
|
||||
#elif (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_add_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
asm volatile (
|
||||
"lock; xaddl %0, %1;"
|
||||
: "+r" (x), "=m" (*p) /* Outputs. */
|
||||
: "m" (*p) /* Inputs. */
|
||||
);
|
||||
|
||||
return (x);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
x = (uint32_t)(-(int32_t)x);
|
||||
asm volatile (
|
||||
"lock; xaddl %0, %1;"
|
||||
: "+r" (x), "=m" (*p) /* Outputs. */
|
||||
: "m" (*p) /* Inputs. */
|
||||
);
|
||||
|
||||
return (x);
|
||||
}
|
||||
#elif (defined(JEMALLOC_ATOMIC9))
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_add_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
return (atomic_fetchadd_32(p, x) + x);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x);
|
||||
}
|
||||
#elif (defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_add_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
return (__sync_add_and_fetch(p, x));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
return (__sync_sub_and_fetch(p, x));
|
||||
}
|
||||
#if defined(JEMALLOC_GCC_ATOMIC_ATOMICS)
|
||||
# include "jemalloc/internal/atomic_gcc_atomic.h"
|
||||
#elif defined(JEMALLOC_GCC_SYNC_ATOMICS)
|
||||
# include "jemalloc/internal/atomic_gcc_sync.h"
|
||||
#elif defined(_MSC_VER)
|
||||
# include "jemalloc/internal/atomic_msvc.h"
|
||||
#elif defined(JEMALLOC_C11_ATOMICS)
|
||||
# include "jemalloc/internal/atomic_c11.h"
|
||||
#else
|
||||
# error "Missing implementation for 32-bit atomic operations"
|
||||
# error "Don't have atomics implemented on this platform."
|
||||
#endif
|
||||
|
||||
/******************************************************************************/
|
||||
/* size_t operations. */
|
||||
JEMALLOC_INLINE size_t
|
||||
atomic_add_z(size_t *p, size_t x)
|
||||
{
|
||||
/*
|
||||
* This header gives more or less a backport of C11 atomics. The user can write
|
||||
* JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_sizeof_type); to generate
|
||||
* counterparts of the C11 atomic functions for type, as so:
|
||||
* JEMALLOC_GENERATE_ATOMICS(int *, pi, 3);
|
||||
* and then write things like:
|
||||
* int *some_ptr;
|
||||
* atomic_pi_t atomic_ptr_to_int;
|
||||
* atomic_store_pi(&atomic_ptr_to_int, some_ptr, ATOMIC_RELAXED);
|
||||
* int *prev_value = atomic_exchange_pi(&ptr_to_int, NULL, ATOMIC_ACQ_REL);
|
||||
* assert(some_ptr == prev_value);
|
||||
* and expect things to work in the obvious way.
|
||||
*
|
||||
* Also included (with naming differences to avoid conflicts with the standard
|
||||
* library):
|
||||
* atomic_fence(atomic_memory_order_t) (mimics C11's atomic_thread_fence).
|
||||
* ATOMIC_INIT (mimics C11's ATOMIC_VAR_INIT).
|
||||
*/
|
||||
|
||||
#if (LG_SIZEOF_PTR == 3)
|
||||
return ((size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
|
||||
#elif (LG_SIZEOF_PTR == 2)
|
||||
return ((size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
|
||||
#endif
|
||||
}
|
||||
/*
|
||||
* Pure convenience, so that we don't have to type "atomic_memory_order_"
|
||||
* quite so often.
|
||||
*/
|
||||
#define ATOMIC_RELAXED atomic_memory_order_relaxed
|
||||
#define ATOMIC_ACQUIRE atomic_memory_order_acquire
|
||||
#define ATOMIC_RELEASE atomic_memory_order_release
|
||||
#define ATOMIC_ACQ_REL atomic_memory_order_acq_rel
|
||||
#define ATOMIC_SEQ_CST atomic_memory_order_seq_cst
|
||||
|
||||
JEMALLOC_INLINE size_t
|
||||
atomic_sub_z(size_t *p, size_t x)
|
||||
{
|
||||
|
||||
#if (LG_SIZEOF_PTR == 3)
|
||||
return ((size_t)atomic_add_uint64((uint64_t *)p,
|
||||
(uint64_t)-((int64_t)x)));
|
||||
#elif (LG_SIZEOF_PTR == 2)
|
||||
return ((size_t)atomic_add_uint32((uint32_t *)p,
|
||||
(uint32_t)-((int32_t)x)));
|
||||
#endif
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
/* unsigned operations. */
|
||||
JEMALLOC_INLINE unsigned
|
||||
atomic_add_u(unsigned *p, unsigned x)
|
||||
{
|
||||
|
||||
#if (LG_SIZEOF_INT == 3)
|
||||
return ((unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
|
||||
#elif (LG_SIZEOF_INT == 2)
|
||||
return ((unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
|
||||
#endif
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE unsigned
|
||||
atomic_sub_u(unsigned *p, unsigned x)
|
||||
{
|
||||
|
||||
#if (LG_SIZEOF_INT == 3)
|
||||
return ((unsigned)atomic_add_uint64((uint64_t *)p,
|
||||
(uint64_t)-((int64_t)x)));
|
||||
#elif (LG_SIZEOF_INT == 2)
|
||||
return ((unsigned)atomic_add_uint32((uint32_t *)p,
|
||||
(uint32_t)-((int32_t)x)));
|
||||
#endif
|
||||
}
|
||||
/******************************************************************************/
|
||||
/*
|
||||
* Not all platforms have 64-bit atomics. If we do, this #define exposes that
|
||||
* fact.
|
||||
*/
|
||||
#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
|
||||
# define JEMALLOC_ATOMIC_U64
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
JEMALLOC_GENERATE_ATOMICS(void *, p, LG_SIZEOF_PTR)
|
||||
|
||||
/*
|
||||
* There's no actual guarantee that sizeof(bool) == 1, but it's true on the only
|
||||
* platform that actually needs to know the size, MSVC.
|
||||
*/
|
||||
JEMALLOC_GENERATE_ATOMICS(bool, b, 0)
|
||||
|
||||
JEMALLOC_GENERATE_INT_ATOMICS(unsigned, u, LG_SIZEOF_INT)
|
||||
|
||||
JEMALLOC_GENERATE_INT_ATOMICS(size_t, zu, LG_SIZEOF_PTR)
|
||||
|
||||
JEMALLOC_GENERATE_INT_ATOMICS(ssize_t, zd, LG_SIZEOF_PTR)
|
||||
|
||||
JEMALLOC_GENERATE_INT_ATOMICS(uint32_t, u32, 2)
|
||||
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
JEMALLOC_GENERATE_INT_ATOMICS(uint64_t, u64, 3)
|
||||
#endif
|
||||
|
||||
#undef ATOMIC_INLINE
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_ATOMIC_H */
|
||||
|
||||
349
deps/jemalloc/include/jemalloc/internal/bitmap.h
vendored
349
deps/jemalloc/include/jemalloc/internal/bitmap.h
vendored
@@ -1,37 +1,159 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
#ifndef JEMALLOC_INTERNAL_BITMAP_H
|
||||
#define JEMALLOC_INTERNAL_BITMAP_H
|
||||
|
||||
#include "jemalloc/internal/arena_types.h"
|
||||
#include "jemalloc/internal/bit_util.h"
|
||||
#include "jemalloc/internal/size_classes.h"
|
||||
|
||||
typedef unsigned long bitmap_t;
|
||||
#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG
|
||||
|
||||
/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
|
||||
#define LG_BITMAP_MAXBITS LG_RUN_MAXREGS
|
||||
|
||||
typedef struct bitmap_level_s bitmap_level_t;
|
||||
typedef struct bitmap_info_s bitmap_info_t;
|
||||
typedef unsigned long bitmap_t;
|
||||
#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG
|
||||
#if LG_SLAB_MAXREGS > LG_CEIL_NSIZES
|
||||
/* Maximum bitmap bit count is determined by maximum regions per slab. */
|
||||
# define LG_BITMAP_MAXBITS LG_SLAB_MAXREGS
|
||||
#else
|
||||
/* Maximum bitmap bit count is determined by number of extent size classes. */
|
||||
# define LG_BITMAP_MAXBITS LG_CEIL_NSIZES
|
||||
#endif
|
||||
#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS)
|
||||
|
||||
/* Number of bits per group. */
|
||||
#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3)
|
||||
#define BITMAP_GROUP_NBITS (ZU(1) << LG_BITMAP_GROUP_NBITS)
|
||||
#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1)
|
||||
#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3)
|
||||
#define BITMAP_GROUP_NBITS (1U << LG_BITMAP_GROUP_NBITS)
|
||||
#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1)
|
||||
|
||||
/* Maximum number of levels possible. */
|
||||
#define BITMAP_MAX_LEVELS \
|
||||
(LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \
|
||||
+ !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP)
|
||||
/*
|
||||
* Do some analysis on how big the bitmap is before we use a tree. For a brute
|
||||
* force linear search, if we would have to call ffs_lu() more than 2^3 times,
|
||||
* use a tree instead.
|
||||
*/
|
||||
#if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3
|
||||
# define BITMAP_USE_TREE
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
/* Number of groups required to store a given number of bits. */
|
||||
#define BITMAP_BITS2GROUPS(nbits) \
|
||||
(((nbits) + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS)
|
||||
|
||||
struct bitmap_level_s {
|
||||
/*
|
||||
* Number of groups required at a particular level for a given number of bits.
|
||||
*/
|
||||
#define BITMAP_GROUPS_L0(nbits) \
|
||||
BITMAP_BITS2GROUPS(nbits)
|
||||
#define BITMAP_GROUPS_L1(nbits) \
|
||||
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits))
|
||||
#define BITMAP_GROUPS_L2(nbits) \
|
||||
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))
|
||||
#define BITMAP_GROUPS_L3(nbits) \
|
||||
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \
|
||||
BITMAP_BITS2GROUPS((nbits)))))
|
||||
#define BITMAP_GROUPS_L4(nbits) \
|
||||
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \
|
||||
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))))
|
||||
|
||||
/*
|
||||
* Assuming the number of levels, number of groups required for a given number
|
||||
* of bits.
|
||||
*/
|
||||
#define BITMAP_GROUPS_1_LEVEL(nbits) \
|
||||
BITMAP_GROUPS_L0(nbits)
|
||||
#define BITMAP_GROUPS_2_LEVEL(nbits) \
|
||||
(BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits))
|
||||
#define BITMAP_GROUPS_3_LEVEL(nbits) \
|
||||
(BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits))
|
||||
#define BITMAP_GROUPS_4_LEVEL(nbits) \
|
||||
(BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits))
|
||||
#define BITMAP_GROUPS_5_LEVEL(nbits) \
|
||||
(BITMAP_GROUPS_4_LEVEL(nbits) + BITMAP_GROUPS_L4(nbits))
|
||||
|
||||
/*
|
||||
* Maximum number of groups required to support LG_BITMAP_MAXBITS.
|
||||
*/
|
||||
#ifdef BITMAP_USE_TREE
|
||||
|
||||
#if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS
|
||||
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_1_LEVEL(nbits)
|
||||
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS)
|
||||
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2
|
||||
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_2_LEVEL(nbits)
|
||||
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS)
|
||||
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3
|
||||
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_3_LEVEL(nbits)
|
||||
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS)
|
||||
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4
|
||||
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_4_LEVEL(nbits)
|
||||
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS)
|
||||
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 5
|
||||
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_5_LEVEL(nbits)
|
||||
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_5_LEVEL(BITMAP_MAXBITS)
|
||||
#else
|
||||
# error "Unsupported bitmap size"
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Maximum number of levels possible. This could be statically computed based
|
||||
* on LG_BITMAP_MAXBITS:
|
||||
*
|
||||
* #define BITMAP_MAX_LEVELS \
|
||||
* (LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \
|
||||
* + !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP)
|
||||
*
|
||||
* However, that would not allow the generic BITMAP_INFO_INITIALIZER() macro, so
|
||||
* instead hardcode BITMAP_MAX_LEVELS to the largest number supported by the
|
||||
* various cascading macros. The only additional cost this incurs is some
|
||||
* unused trailing entries in bitmap_info_t structures; the bitmaps themselves
|
||||
* are not impacted.
|
||||
*/
|
||||
#define BITMAP_MAX_LEVELS 5
|
||||
|
||||
#define BITMAP_INFO_INITIALIZER(nbits) { \
|
||||
/* nbits. */ \
|
||||
nbits, \
|
||||
/* nlevels. */ \
|
||||
(BITMAP_GROUPS_L0(nbits) > BITMAP_GROUPS_L1(nbits)) + \
|
||||
(BITMAP_GROUPS_L1(nbits) > BITMAP_GROUPS_L2(nbits)) + \
|
||||
(BITMAP_GROUPS_L2(nbits) > BITMAP_GROUPS_L3(nbits)) + \
|
||||
(BITMAP_GROUPS_L3(nbits) > BITMAP_GROUPS_L4(nbits)) + 1, \
|
||||
/* levels. */ \
|
||||
{ \
|
||||
{0}, \
|
||||
{BITMAP_GROUPS_L0(nbits)}, \
|
||||
{BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)}, \
|
||||
{BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) + \
|
||||
BITMAP_GROUPS_L0(nbits)}, \
|
||||
{BITMAP_GROUPS_L3(nbits) + BITMAP_GROUPS_L2(nbits) + \
|
||||
BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)}, \
|
||||
{BITMAP_GROUPS_L4(nbits) + BITMAP_GROUPS_L3(nbits) + \
|
||||
BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) \
|
||||
+ BITMAP_GROUPS_L0(nbits)} \
|
||||
} \
|
||||
}
|
||||
|
||||
#else /* BITMAP_USE_TREE */
|
||||
|
||||
#define BITMAP_GROUPS(nbits) BITMAP_BITS2GROUPS(nbits)
|
||||
#define BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS)
|
||||
|
||||
#define BITMAP_INFO_INITIALIZER(nbits) { \
|
||||
/* nbits. */ \
|
||||
nbits, \
|
||||
/* ngroups. */ \
|
||||
BITMAP_BITS2GROUPS(nbits) \
|
||||
}
|
||||
|
||||
#endif /* BITMAP_USE_TREE */
|
||||
|
||||
typedef struct bitmap_level_s {
|
||||
/* Offset of this level's groups within the array of groups. */
|
||||
size_t group_offset;
|
||||
};
|
||||
} bitmap_level_t;
|
||||
|
||||
struct bitmap_info_s {
|
||||
typedef struct bitmap_info_s {
|
||||
/* Logical number of bits in bitmap (stored at bottom level). */
|
||||
size_t nbits;
|
||||
|
||||
#ifdef BITMAP_USE_TREE
|
||||
/* Number of levels necessary for nbits. */
|
||||
unsigned nlevels;
|
||||
|
||||
@@ -40,67 +162,62 @@ struct bitmap_info_s {
|
||||
* bottom to top (e.g. the bottom level is stored in levels[0]).
|
||||
*/
|
||||
bitmap_level_t levels[BITMAP_MAX_LEVELS+1];
|
||||
};
|
||||
#else /* BITMAP_USE_TREE */
|
||||
/* Number of groups necessary for nbits. */
|
||||
size_t ngroups;
|
||||
#endif /* BITMAP_USE_TREE */
|
||||
} bitmap_info_t;
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
void bitmap_info_init(bitmap_info_t *binfo, size_t nbits);
|
||||
void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill);
|
||||
size_t bitmap_size(const bitmap_info_t *binfo);
|
||||
|
||||
void bitmap_info_init(bitmap_info_t *binfo, size_t nbits);
|
||||
size_t bitmap_info_ngroups(const bitmap_info_t *binfo);
|
||||
size_t bitmap_size(size_t nbits);
|
||||
void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
bool bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo);
|
||||
bool bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
|
||||
void bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
|
||||
size_t bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo);
|
||||
void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_BITMAP_C_))
|
||||
JEMALLOC_INLINE bool
|
||||
bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo)
|
||||
{
|
||||
unsigned rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
|
||||
static inline bool
|
||||
bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) {
|
||||
#ifdef BITMAP_USE_TREE
|
||||
size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
|
||||
bitmap_t rg = bitmap[rgoff];
|
||||
/* The bitmap is full iff the root group is 0. */
|
||||
return (rg == 0);
|
||||
#else
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < binfo->ngroups; i++) {
|
||||
if (bitmap[i] != 0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
|
||||
{
|
||||
static inline bool
|
||||
bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
|
||||
size_t goff;
|
||||
bitmap_t g;
|
||||
|
||||
assert(bit < binfo->nbits);
|
||||
goff = bit >> LG_BITMAP_GROUP_NBITS;
|
||||
g = bitmap[goff];
|
||||
return (!(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))));
|
||||
return !(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
|
||||
{
|
||||
static inline void
|
||||
bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
|
||||
size_t goff;
|
||||
bitmap_t *gp;
|
||||
bitmap_t g;
|
||||
|
||||
assert(bit < binfo->nbits);
|
||||
assert(bitmap_get(bitmap, binfo, bit) == false);
|
||||
assert(!bitmap_get(bitmap, binfo, bit));
|
||||
goff = bit >> LG_BITMAP_GROUP_NBITS;
|
||||
gp = &bitmap[goff];
|
||||
g = *gp;
|
||||
assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)));
|
||||
g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
|
||||
assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
|
||||
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
|
||||
*gp = g;
|
||||
assert(bitmap_get(bitmap, binfo, bit));
|
||||
#ifdef BITMAP_USE_TREE
|
||||
/* Propagate group state transitions up the tree. */
|
||||
if (g == 0) {
|
||||
unsigned i;
|
||||
@@ -109,45 +226,113 @@ bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
|
||||
goff = bit >> LG_BITMAP_GROUP_NBITS;
|
||||
gp = &bitmap[binfo->levels[i].group_offset + goff];
|
||||
g = *gp;
|
||||
assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)));
|
||||
g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
|
||||
assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
|
||||
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
|
||||
*gp = g;
|
||||
if (g != 0)
|
||||
if (g != 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/* ffu: find first unset >= bit. */
|
||||
static inline size_t
|
||||
bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) {
|
||||
assert(min_bit < binfo->nbits);
|
||||
|
||||
#ifdef BITMAP_USE_TREE
|
||||
size_t bit = 0;
|
||||
for (unsigned level = binfo->nlevels; level--;) {
|
||||
size_t lg_bits_per_group = (LG_BITMAP_GROUP_NBITS * (level +
|
||||
1));
|
||||
bitmap_t group = bitmap[binfo->levels[level].group_offset + (bit
|
||||
>> lg_bits_per_group)];
|
||||
unsigned group_nmask = (unsigned)(((min_bit > bit) ? (min_bit -
|
||||
bit) : 0) >> (lg_bits_per_group - LG_BITMAP_GROUP_NBITS));
|
||||
assert(group_nmask <= BITMAP_GROUP_NBITS);
|
||||
bitmap_t group_mask = ~((1LU << group_nmask) - 1);
|
||||
bitmap_t group_masked = group & group_mask;
|
||||
if (group_masked == 0LU) {
|
||||
if (group == 0LU) {
|
||||
return binfo->nbits;
|
||||
}
|
||||
/*
|
||||
* min_bit was preceded by one or more unset bits in
|
||||
* this group, but there are no other unset bits in this
|
||||
* group. Try again starting at the first bit of the
|
||||
* next sibling. This will recurse at most once per
|
||||
* non-root level.
|
||||
*/
|
||||
size_t sib_base = bit + (ZU(1) << lg_bits_per_group);
|
||||
assert(sib_base > min_bit);
|
||||
assert(sib_base > bit);
|
||||
if (sib_base >= binfo->nbits) {
|
||||
return binfo->nbits;
|
||||
}
|
||||
return bitmap_ffu(bitmap, binfo, sib_base);
|
||||
}
|
||||
bit += ((size_t)(ffs_lu(group_masked) - 1)) <<
|
||||
(lg_bits_per_group - LG_BITMAP_GROUP_NBITS);
|
||||
}
|
||||
assert(bit >= min_bit);
|
||||
assert(bit < binfo->nbits);
|
||||
return bit;
|
||||
#else
|
||||
size_t i = min_bit >> LG_BITMAP_GROUP_NBITS;
|
||||
bitmap_t g = bitmap[i] & ~((1LU << (min_bit & BITMAP_GROUP_NBITS_MASK))
|
||||
- 1);
|
||||
size_t bit;
|
||||
do {
|
||||
bit = ffs_lu(g);
|
||||
if (bit != 0) {
|
||||
return (i << LG_BITMAP_GROUP_NBITS) + (bit - 1);
|
||||
}
|
||||
i++;
|
||||
g = bitmap[i];
|
||||
} while (i < binfo->ngroups);
|
||||
return binfo->nbits;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* sfu: set first unset. */
|
||||
JEMALLOC_INLINE size_t
|
||||
bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo)
|
||||
{
|
||||
static inline size_t
|
||||
bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) {
|
||||
size_t bit;
|
||||
bitmap_t g;
|
||||
unsigned i;
|
||||
|
||||
assert(bitmap_full(bitmap, binfo) == false);
|
||||
assert(!bitmap_full(bitmap, binfo));
|
||||
|
||||
#ifdef BITMAP_USE_TREE
|
||||
i = binfo->nlevels - 1;
|
||||
g = bitmap[binfo->levels[i].group_offset];
|
||||
bit = ffsl(g) - 1;
|
||||
bit = ffs_lu(g) - 1;
|
||||
while (i > 0) {
|
||||
i--;
|
||||
g = bitmap[binfo->levels[i].group_offset + bit];
|
||||
bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffsl(g) - 1);
|
||||
bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffs_lu(g) - 1);
|
||||
}
|
||||
|
||||
#else
|
||||
i = 0;
|
||||
g = bitmap[0];
|
||||
while ((bit = ffs_lu(g)) == 0) {
|
||||
i++;
|
||||
g = bitmap[i];
|
||||
}
|
||||
bit = (i << LG_BITMAP_GROUP_NBITS) + (bit - 1);
|
||||
#endif
|
||||
bitmap_set(bitmap, binfo, bit);
|
||||
return (bit);
|
||||
return bit;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
|
||||
{
|
||||
static inline void
|
||||
bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
|
||||
size_t goff;
|
||||
bitmap_t *gp;
|
||||
bitmap_t g;
|
||||
bool propagate;
|
||||
UNUSED bool propagate;
|
||||
|
||||
assert(bit < binfo->nbits);
|
||||
assert(bitmap_get(bitmap, binfo, bit));
|
||||
@@ -155,10 +340,11 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
|
||||
gp = &bitmap[goff];
|
||||
g = *gp;
|
||||
propagate = (g == 0);
|
||||
assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))) == 0);
|
||||
g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
|
||||
assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) == 0);
|
||||
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
|
||||
*gp = g;
|
||||
assert(bitmap_get(bitmap, binfo, bit) == false);
|
||||
assert(!bitmap_get(bitmap, binfo, bit));
|
||||
#ifdef BITMAP_USE_TREE
|
||||
/* Propagate group state transitions up the tree. */
|
||||
if (propagate) {
|
||||
unsigned i;
|
||||
@@ -168,17 +354,16 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
|
||||
gp = &bitmap[binfo->levels[i].group_offset + goff];
|
||||
g = *gp;
|
||||
propagate = (g == 0);
|
||||
assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)))
|
||||
assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)))
|
||||
== 0);
|
||||
g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
|
||||
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
|
||||
*gp = g;
|
||||
if (propagate == false)
|
||||
if (!propagate) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif /* BITMAP_USE_TREE */
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
#endif /* JEMALLOC_INTERNAL_BITMAP_H */
|
||||
|
||||
117
deps/jemalloc/include/jemalloc/internal/ckh.h
vendored
117
deps/jemalloc/include/jemalloc/internal/ckh.h
vendored
@@ -1,88 +1,101 @@
|
||||
#ifndef JEMALLOC_INTERNAL_CKH_H
|
||||
#define JEMALLOC_INTERNAL_CKH_H
|
||||
|
||||
#include "jemalloc/internal/tsd.h"
|
||||
|
||||
/* Cuckoo hashing implementation. Skip to the end for the interface. */
|
||||
|
||||
/******************************************************************************/
|
||||
/* INTERNAL DEFINITIONS -- IGNORE */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
typedef struct ckh_s ckh_t;
|
||||
typedef struct ckhc_s ckhc_t;
|
||||
|
||||
/* Typedefs to allow easy function pointer passing. */
|
||||
typedef void ckh_hash_t (const void *, size_t[2]);
|
||||
typedef bool ckh_keycomp_t (const void *, const void *);
|
||||
|
||||
/* Maintain counters used to get an idea of performance. */
|
||||
/* #define CKH_COUNT */
|
||||
/* #define CKH_COUNT */
|
||||
/* Print counter values in ckh_delete() (requires CKH_COUNT). */
|
||||
/* #define CKH_VERBOSE */
|
||||
/* #define CKH_VERBOSE */
|
||||
|
||||
/*
|
||||
* There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit
|
||||
* one bucket per L1 cache line.
|
||||
*/
|
||||
#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
|
||||
#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
/* Typedefs to allow easy function pointer passing. */
|
||||
typedef void ckh_hash_t (const void *, size_t[2]);
|
||||
typedef bool ckh_keycomp_t (const void *, const void *);
|
||||
|
||||
/* Hash table cell. */
|
||||
struct ckhc_s {
|
||||
const void *key;
|
||||
const void *data;
|
||||
};
|
||||
typedef struct {
|
||||
const void *key;
|
||||
const void *data;
|
||||
} ckhc_t;
|
||||
|
||||
struct ckh_s {
|
||||
/* The hash table itself. */
|
||||
typedef struct {
|
||||
#ifdef CKH_COUNT
|
||||
/* Counters used to get an idea of performance. */
|
||||
uint64_t ngrows;
|
||||
uint64_t nshrinks;
|
||||
uint64_t nshrinkfails;
|
||||
uint64_t ninserts;
|
||||
uint64_t nrelocs;
|
||||
uint64_t ngrows;
|
||||
uint64_t nshrinks;
|
||||
uint64_t nshrinkfails;
|
||||
uint64_t ninserts;
|
||||
uint64_t nrelocs;
|
||||
#endif
|
||||
|
||||
/* Used for pseudo-random number generation. */
|
||||
#define CKH_A 1103515241
|
||||
#define CKH_C 12347
|
||||
uint32_t prng_state;
|
||||
uint64_t prng_state;
|
||||
|
||||
/* Total number of items. */
|
||||
size_t count;
|
||||
size_t count;
|
||||
|
||||
/*
|
||||
* Minimum and current number of hash table buckets. There are
|
||||
* 2^LG_CKH_BUCKET_CELLS cells per bucket.
|
||||
*/
|
||||
unsigned lg_minbuckets;
|
||||
unsigned lg_curbuckets;
|
||||
unsigned lg_minbuckets;
|
||||
unsigned lg_curbuckets;
|
||||
|
||||
/* Hash and comparison functions. */
|
||||
ckh_hash_t *hash;
|
||||
ckh_keycomp_t *keycomp;
|
||||
ckh_hash_t *hash;
|
||||
ckh_keycomp_t *keycomp;
|
||||
|
||||
/* Hash table with 2^lg_curbuckets buckets. */
|
||||
ckhc_t *tab;
|
||||
};
|
||||
ckhc_t *tab;
|
||||
} ckh_t;
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
/* BEGIN PUBLIC API */
|
||||
/******************************************************************************/
|
||||
|
||||
bool ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
|
||||
/* Lifetime management. Minitems is the initial capacity. */
|
||||
bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
|
||||
ckh_keycomp_t *keycomp);
|
||||
void ckh_delete(ckh_t *ckh);
|
||||
size_t ckh_count(ckh_t *ckh);
|
||||
bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data);
|
||||
bool ckh_insert(ckh_t *ckh, const void *key, const void *data);
|
||||
bool ckh_remove(ckh_t *ckh, const void *searchkey, void **key,
|
||||
void ckh_delete(tsd_t *tsd, ckh_t *ckh);
|
||||
|
||||
/* Get the number of elements in the set. */
|
||||
size_t ckh_count(ckh_t *ckh);
|
||||
|
||||
/*
|
||||
* To iterate over the elements in the table, initialize *tabind to 0 and call
|
||||
* this function until it returns true. Each call that returns false will
|
||||
* update *key and *data to the next element in the table, assuming the pointers
|
||||
* are non-NULL.
|
||||
*/
|
||||
bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data);
|
||||
|
||||
/*
|
||||
* Basic hash table operations -- insert, removal, lookup. For ckh_remove and
|
||||
* ckh_search, key or data can be NULL. The hash-table only stores pointers to
|
||||
* the key and value, and doesn't do any lifetime management.
|
||||
*/
|
||||
bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data);
|
||||
bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
|
||||
void **data);
|
||||
bool ckh_search(ckh_t *ckh, const void *seachkey, void **key, void **data);
|
||||
void ckh_string_hash(const void *key, size_t r_hash[2]);
|
||||
bool ckh_string_keycomp(const void *k1, const void *k2);
|
||||
void ckh_pointer_hash(const void *key, size_t r_hash[2]);
|
||||
bool ckh_pointer_keycomp(const void *k1, const void *k2);
|
||||
bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
/* Some useful hash and comparison functions for strings and pointers. */
|
||||
void ckh_string_hash(const void *key, size_t r_hash[2]);
|
||||
bool ckh_string_keycomp(const void *k1, const void *k2);
|
||||
void ckh_pointer_hash(const void *key, size_t r_hash[2]);
|
||||
bool ckh_pointer_keycomp(const void *k1, const void *k2);
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
#endif /* JEMALLOC_INTERNAL_CKH_H */
|
||||
|
||||
167
deps/jemalloc/include/jemalloc/internal/ctl.h
vendored
167
deps/jemalloc/include/jemalloc/internal/ctl.h
vendored
@@ -1,87 +1,106 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
#ifndef JEMALLOC_INTERNAL_CTL_H
|
||||
#define JEMALLOC_INTERNAL_CTL_H
|
||||
|
||||
typedef struct ctl_node_s ctl_node_t;
|
||||
typedef struct ctl_named_node_s ctl_named_node_t;
|
||||
typedef struct ctl_indexed_node_s ctl_indexed_node_t;
|
||||
typedef struct ctl_arena_stats_s ctl_arena_stats_t;
|
||||
typedef struct ctl_stats_s ctl_stats_t;
|
||||
#include "jemalloc/internal/jemalloc_internal_types.h"
|
||||
#include "jemalloc/internal/malloc_io.h"
|
||||
#include "jemalloc/internal/mutex_prof.h"
|
||||
#include "jemalloc/internal/ql.h"
|
||||
#include "jemalloc/internal/size_classes.h"
|
||||
#include "jemalloc/internal/stats.h"
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
/* Maximum ctl tree depth. */
|
||||
#define CTL_MAX_DEPTH 7
|
||||
|
||||
struct ctl_node_s {
|
||||
bool named;
|
||||
};
|
||||
typedef struct ctl_node_s {
|
||||
bool named;
|
||||
} ctl_node_t;
|
||||
|
||||
struct ctl_named_node_s {
|
||||
struct ctl_node_s node;
|
||||
const char *name;
|
||||
typedef struct ctl_named_node_s {
|
||||
ctl_node_t node;
|
||||
const char *name;
|
||||
/* If (nchildren == 0), this is a terminal node. */
|
||||
unsigned nchildren;
|
||||
const ctl_node_t *children;
|
||||
int (*ctl)(const size_t *, size_t, void *, size_t *,
|
||||
void *, size_t);
|
||||
};
|
||||
size_t nchildren;
|
||||
const ctl_node_t *children;
|
||||
int (*ctl)(tsd_t *, const size_t *, size_t, void *, size_t *, void *,
|
||||
size_t);
|
||||
} ctl_named_node_t;
|
||||
|
||||
struct ctl_indexed_node_s {
|
||||
struct ctl_node_s node;
|
||||
const ctl_named_node_t *(*index)(const size_t *, size_t, size_t);
|
||||
};
|
||||
typedef struct ctl_indexed_node_s {
|
||||
struct ctl_node_s node;
|
||||
const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t,
|
||||
size_t);
|
||||
} ctl_indexed_node_t;
|
||||
|
||||
struct ctl_arena_stats_s {
|
||||
bool initialized;
|
||||
unsigned nthreads;
|
||||
const char *dss;
|
||||
size_t pactive;
|
||||
size_t pdirty;
|
||||
arena_stats_t astats;
|
||||
typedef struct ctl_arena_stats_s {
|
||||
arena_stats_t astats;
|
||||
|
||||
/* Aggregate stats for small size classes, based on bin stats. */
|
||||
size_t allocated_small;
|
||||
uint64_t nmalloc_small;
|
||||
uint64_t ndalloc_small;
|
||||
uint64_t nrequests_small;
|
||||
size_t allocated_small;
|
||||
uint64_t nmalloc_small;
|
||||
uint64_t ndalloc_small;
|
||||
uint64_t nrequests_small;
|
||||
|
||||
malloc_bin_stats_t bstats[NBINS];
|
||||
malloc_large_stats_t *lstats; /* nlclasses elements. */
|
||||
malloc_bin_stats_t bstats[NBINS];
|
||||
malloc_large_stats_t lstats[NSIZES - NBINS];
|
||||
} ctl_arena_stats_t;
|
||||
|
||||
typedef struct ctl_stats_s {
|
||||
size_t allocated;
|
||||
size_t active;
|
||||
size_t metadata;
|
||||
size_t resident;
|
||||
size_t mapped;
|
||||
size_t retained;
|
||||
|
||||
background_thread_stats_t background_thread;
|
||||
mutex_prof_data_t mutex_prof_data[mutex_prof_num_global_mutexes];
|
||||
} ctl_stats_t;
|
||||
|
||||
typedef struct ctl_arena_s ctl_arena_t;
|
||||
struct ctl_arena_s {
|
||||
unsigned arena_ind;
|
||||
bool initialized;
|
||||
ql_elm(ctl_arena_t) destroyed_link;
|
||||
|
||||
/* Basic stats, supported even if !config_stats. */
|
||||
unsigned nthreads;
|
||||
const char *dss;
|
||||
ssize_t dirty_decay_ms;
|
||||
ssize_t muzzy_decay_ms;
|
||||
size_t pactive;
|
||||
size_t pdirty;
|
||||
size_t pmuzzy;
|
||||
|
||||
/* NULL if !config_stats. */
|
||||
ctl_arena_stats_t *astats;
|
||||
};
|
||||
|
||||
struct ctl_stats_s {
|
||||
size_t allocated;
|
||||
size_t active;
|
||||
size_t mapped;
|
||||
struct {
|
||||
size_t current; /* stats_chunks.curchunks */
|
||||
uint64_t total; /* stats_chunks.nchunks */
|
||||
size_t high; /* stats_chunks.highchunks */
|
||||
} chunks;
|
||||
struct {
|
||||
size_t allocated; /* huge_allocated */
|
||||
uint64_t nmalloc; /* huge_nmalloc */
|
||||
uint64_t ndalloc; /* huge_ndalloc */
|
||||
} huge;
|
||||
unsigned narenas;
|
||||
ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
|
||||
};
|
||||
typedef struct ctl_arenas_s {
|
||||
uint64_t epoch;
|
||||
unsigned narenas;
|
||||
ql_head(ctl_arena_t) destroyed;
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
/*
|
||||
* Element 0 corresponds to merged stats for extant arenas (accessed via
|
||||
* MALLCTL_ARENAS_ALL), element 1 corresponds to merged stats for
|
||||
* destroyed arenas (accessed via MALLCTL_ARENAS_DESTROYED), and the
|
||||
* remaining MALLOCX_ARENA_LIMIT elements correspond to arenas.
|
||||
*/
|
||||
ctl_arena_t *arenas[2 + MALLOCX_ARENA_LIMIT];
|
||||
} ctl_arenas_t;
|
||||
|
||||
int ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp,
|
||||
size_t newlen);
|
||||
int ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp);
|
||||
|
||||
int ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
|
||||
int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
|
||||
void *newp, size_t newlen);
|
||||
bool ctl_boot(void);
|
||||
void ctl_prefork(void);
|
||||
void ctl_postfork_parent(void);
|
||||
void ctl_postfork_child(void);
|
||||
int ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp);
|
||||
|
||||
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
|
||||
int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
|
||||
size_t *oldlenp, void *newp, size_t newlen);
|
||||
bool ctl_boot(void);
|
||||
void ctl_prefork(tsdn_t *tsdn);
|
||||
void ctl_postfork_parent(tsdn_t *tsdn);
|
||||
void ctl_postfork_child(tsdn_t *tsdn);
|
||||
|
||||
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
|
||||
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
|
||||
!= 0) { \
|
||||
malloc_printf( \
|
||||
@@ -91,7 +110,7 @@ void ctl_postfork_child(void);
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define xmallctlnametomib(name, mibp, miblenp) do { \
|
||||
#define xmallctlnametomib(name, mibp, miblenp) do { \
|
||||
if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \
|
||||
malloc_printf("<jemalloc>: Failure in " \
|
||||
"xmallctlnametomib(\"%s\", ...)\n", name); \
|
||||
@@ -99,7 +118,7 @@ void ctl_postfork_child(void);
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \
|
||||
#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \
|
||||
if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \
|
||||
newlen) != 0) { \
|
||||
malloc_write( \
|
||||
@@ -108,10 +127,4 @@ void ctl_postfork_child(void);
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_CTL_H */
|
||||
|
||||
139
deps/jemalloc/include/jemalloc/internal/hash.h
vendored
139
deps/jemalloc/include/jemalloc/internal/hash.h
vendored
@@ -1,92 +1,76 @@
|
||||
#ifndef JEMALLOC_INTERNAL_HASH_H
|
||||
#define JEMALLOC_INTERNAL_HASH_H
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
|
||||
/*
|
||||
* The following hash function is based on MurmurHash3, placed into the public
|
||||
* domain by Austin Appleby. See http://code.google.com/p/smhasher/ for
|
||||
* domain by Austin Appleby. See https://github.com/aappleby/smhasher for
|
||||
* details.
|
||||
*/
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
uint32_t hash_x86_32(const void *key, int len, uint32_t seed);
|
||||
void hash_x86_128(const void *key, const int len, uint32_t seed,
|
||||
uint64_t r_out[2]);
|
||||
void hash_x64_128(const void *key, const int len, const uint32_t seed,
|
||||
uint64_t r_out[2]);
|
||||
void hash(const void *key, size_t len, const uint32_t seed,
|
||||
size_t r_hash[2]);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_))
|
||||
/******************************************************************************/
|
||||
/* Internal implementation. */
|
||||
JEMALLOC_INLINE uint32_t
|
||||
hash_rotl_32(uint32_t x, int8_t r)
|
||||
{
|
||||
|
||||
return (x << r) | (x >> (32 - r));
|
||||
static inline uint32_t
|
||||
hash_rotl_32(uint32_t x, int8_t r) {
|
||||
return ((x << r) | (x >> (32 - r)));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
hash_rotl_64(uint64_t x, int8_t r)
|
||||
{
|
||||
return (x << r) | (x >> (64 - r));
|
||||
static inline uint64_t
|
||||
hash_rotl_64(uint64_t x, int8_t r) {
|
||||
return ((x << r) | (x >> (64 - r)));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
hash_get_block_32(const uint32_t *p, int i)
|
||||
{
|
||||
static inline uint32_t
|
||||
hash_get_block_32(const uint32_t *p, int i) {
|
||||
/* Handle unaligned read. */
|
||||
if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) {
|
||||
uint32_t ret;
|
||||
|
||||
return (p[i]);
|
||||
memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t));
|
||||
return ret;
|
||||
}
|
||||
|
||||
return p[i];
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
hash_get_block_64(const uint64_t *p, int i)
|
||||
{
|
||||
static inline uint64_t
|
||||
hash_get_block_64(const uint64_t *p, int i) {
|
||||
/* Handle unaligned read. */
|
||||
if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) {
|
||||
uint64_t ret;
|
||||
|
||||
return (p[i]);
|
||||
memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t));
|
||||
return ret;
|
||||
}
|
||||
|
||||
return p[i];
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
hash_fmix_32(uint32_t h)
|
||||
{
|
||||
|
||||
static inline uint32_t
|
||||
hash_fmix_32(uint32_t h) {
|
||||
h ^= h >> 16;
|
||||
h *= 0x85ebca6b;
|
||||
h ^= h >> 13;
|
||||
h *= 0xc2b2ae35;
|
||||
h ^= h >> 16;
|
||||
|
||||
return (h);
|
||||
return h;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
hash_fmix_64(uint64_t k)
|
||||
{
|
||||
|
||||
static inline uint64_t
|
||||
hash_fmix_64(uint64_t k) {
|
||||
k ^= k >> 33;
|
||||
k *= QU(0xff51afd7ed558ccdLLU);
|
||||
k *= KQU(0xff51afd7ed558ccd);
|
||||
k ^= k >> 33;
|
||||
k *= QU(0xc4ceb9fe1a85ec53LLU);
|
||||
k *= KQU(0xc4ceb9fe1a85ec53);
|
||||
k ^= k >> 33;
|
||||
|
||||
return (k);
|
||||
return k;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
hash_x86_32(const void *key, int len, uint32_t seed)
|
||||
{
|
||||
static inline uint32_t
|
||||
hash_x86_32(const void *key, int len, uint32_t seed) {
|
||||
const uint8_t *data = (const uint8_t *) key;
|
||||
const int nblocks = len / 4;
|
||||
|
||||
@@ -132,13 +116,12 @@ hash_x86_32(const void *key, int len, uint32_t seed)
|
||||
|
||||
h1 = hash_fmix_32(h1);
|
||||
|
||||
return (h1);
|
||||
return h1;
|
||||
}
|
||||
|
||||
UNUSED JEMALLOC_INLINE void
|
||||
UNUSED static inline void
|
||||
hash_x86_128(const void *key, const int len, uint32_t seed,
|
||||
uint64_t r_out[2])
|
||||
{
|
||||
uint64_t r_out[2]) {
|
||||
const uint8_t * data = (const uint8_t *) key;
|
||||
const int nblocks = len / 16;
|
||||
|
||||
@@ -237,18 +220,17 @@ hash_x86_128(const void *key, const int len, uint32_t seed,
|
||||
r_out[1] = (((uint64_t) h4) << 32) | h3;
|
||||
}
|
||||
|
||||
UNUSED JEMALLOC_INLINE void
|
||||
UNUSED static inline void
|
||||
hash_x64_128(const void *key, const int len, const uint32_t seed,
|
||||
uint64_t r_out[2])
|
||||
{
|
||||
uint64_t r_out[2]) {
|
||||
const uint8_t *data = (const uint8_t *) key;
|
||||
const int nblocks = len / 16;
|
||||
|
||||
uint64_t h1 = seed;
|
||||
uint64_t h2 = seed;
|
||||
|
||||
const uint64_t c1 = QU(0x87c37b91114253d5LLU);
|
||||
const uint64_t c2 = QU(0x4cf5ad432745937fLLU);
|
||||
const uint64_t c1 = KQU(0x87c37b91114253d5);
|
||||
const uint64_t c2 = KQU(0x4cf5ad432745937f);
|
||||
|
||||
/* body */
|
||||
{
|
||||
@@ -317,19 +299,20 @@ hash_x64_128(const void *key, const int len, const uint32_t seed,
|
||||
|
||||
/******************************************************************************/
|
||||
/* API. */
|
||||
JEMALLOC_INLINE void
|
||||
hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2])
|
||||
{
|
||||
static inline void
|
||||
hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]) {
|
||||
assert(len <= INT_MAX); /* Unfortunate implementation limitation. */
|
||||
|
||||
#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN))
|
||||
hash_x64_128(key, len, seed, (uint64_t *)r_hash);
|
||||
hash_x64_128(key, (int)len, seed, (uint64_t *)r_hash);
|
||||
#else
|
||||
uint64_t hashes[2];
|
||||
hash_x86_128(key, len, seed, hashes);
|
||||
r_hash[0] = (size_t)hashes[0];
|
||||
r_hash[1] = (size_t)hashes[1];
|
||||
{
|
||||
uint64_t hashes[2];
|
||||
hash_x86_128(key, (int)len, seed, hashes);
|
||||
r_hash[0] = (size_t)hashes[0];
|
||||
r_hash[1] = (size_t)hashes[1];
|
||||
}
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
#endif /* JEMALLOC_INTERNAL_HASH_H */
|
||||
|
||||
289
deps/jemalloc/include/jemalloc/internal/mutex.h
vendored
289
deps/jemalloc/include/jemalloc/internal/mutex.h
vendored
@@ -1,45 +1,123 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
#ifndef JEMALLOC_INTERNAL_MUTEX_H
|
||||
#define JEMALLOC_INTERNAL_MUTEX_H
|
||||
|
||||
#include "jemalloc/internal/atomic.h"
|
||||
#include "jemalloc/internal/mutex_prof.h"
|
||||
#include "jemalloc/internal/tsd.h"
|
||||
#include "jemalloc/internal/witness.h"
|
||||
|
||||
typedef enum {
|
||||
/* Can only acquire one mutex of a given witness rank at a time. */
|
||||
malloc_mutex_rank_exclusive,
|
||||
/*
|
||||
* Can acquire multiple mutexes of the same witness rank, but in
|
||||
* address-ascending order only.
|
||||
*/
|
||||
malloc_mutex_address_ordered
|
||||
} malloc_mutex_lock_order_t;
|
||||
|
||||
typedef struct malloc_mutex_s malloc_mutex_t;
|
||||
|
||||
#ifdef _WIN32
|
||||
# define MALLOC_MUTEX_INITIALIZER
|
||||
#elif (defined(JEMALLOC_OSSPIN))
|
||||
# define MALLOC_MUTEX_INITIALIZER {0}
|
||||
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
|
||||
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER, NULL}
|
||||
#else
|
||||
# if (defined(PTHREAD_MUTEX_ADAPTIVE_NP) && \
|
||||
defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP))
|
||||
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP
|
||||
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP}
|
||||
# else
|
||||
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
|
||||
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER}
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
struct malloc_mutex_s {
|
||||
union {
|
||||
struct {
|
||||
/*
|
||||
* prof_data is defined first to reduce cacheline
|
||||
* bouncing: the data is not touched by the mutex holder
|
||||
* during unlocking, while might be modified by
|
||||
* contenders. Having it before the mutex itself could
|
||||
* avoid prefetching a modified cacheline (for the
|
||||
* unlocking thread).
|
||||
*/
|
||||
mutex_prof_data_t prof_data;
|
||||
#ifdef _WIN32
|
||||
CRITICAL_SECTION lock;
|
||||
# if _WIN32_WINNT >= 0x0600
|
||||
SRWLOCK lock;
|
||||
# else
|
||||
CRITICAL_SECTION lock;
|
||||
# endif
|
||||
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
|
||||
os_unfair_lock lock;
|
||||
#elif (defined(JEMALLOC_OSSPIN))
|
||||
OSSpinLock lock;
|
||||
OSSpinLock lock;
|
||||
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
|
||||
pthread_mutex_t lock;
|
||||
malloc_mutex_t *postponed_next;
|
||||
pthread_mutex_t lock;
|
||||
malloc_mutex_t *postponed_next;
|
||||
#else
|
||||
pthread_mutex_t lock;
|
||||
pthread_mutex_t lock;
|
||||
#endif
|
||||
};
|
||||
/*
|
||||
* We only touch witness when configured w/ debug. However we
|
||||
* keep the field in a union when !debug so that we don't have
|
||||
* to pollute the code base with #ifdefs, while avoid paying the
|
||||
* memory cost.
|
||||
*/
|
||||
#if !defined(JEMALLOC_DEBUG)
|
||||
witness_t witness;
|
||||
malloc_mutex_lock_order_t lock_order;
|
||||
#endif
|
||||
};
|
||||
|
||||
#if defined(JEMALLOC_DEBUG)
|
||||
witness_t witness;
|
||||
malloc_mutex_lock_order_t lock_order;
|
||||
#endif
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
/*
|
||||
* Based on benchmark results, a fixed spin with this amount of retries works
|
||||
* well for our critical sections.
|
||||
*/
|
||||
#define MALLOC_MUTEX_MAX_SPIN 250
|
||||
|
||||
#ifdef _WIN32
|
||||
# if _WIN32_WINNT >= 0x0600
|
||||
# define MALLOC_MUTEX_LOCK(m) AcquireSRWLockExclusive(&(m)->lock)
|
||||
# define MALLOC_MUTEX_UNLOCK(m) ReleaseSRWLockExclusive(&(m)->lock)
|
||||
# define MALLOC_MUTEX_TRYLOCK(m) (!TryAcquireSRWLockExclusive(&(m)->lock))
|
||||
# else
|
||||
# define MALLOC_MUTEX_LOCK(m) EnterCriticalSection(&(m)->lock)
|
||||
# define MALLOC_MUTEX_UNLOCK(m) LeaveCriticalSection(&(m)->lock)
|
||||
# define MALLOC_MUTEX_TRYLOCK(m) (!TryEnterCriticalSection(&(m)->lock))
|
||||
# endif
|
||||
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
|
||||
# define MALLOC_MUTEX_LOCK(m) os_unfair_lock_lock(&(m)->lock)
|
||||
# define MALLOC_MUTEX_UNLOCK(m) os_unfair_lock_unlock(&(m)->lock)
|
||||
# define MALLOC_MUTEX_TRYLOCK(m) (!os_unfair_lock_trylock(&(m)->lock))
|
||||
#elif (defined(JEMALLOC_OSSPIN))
|
||||
# define MALLOC_MUTEX_LOCK(m) OSSpinLockLock(&(m)->lock)
|
||||
# define MALLOC_MUTEX_UNLOCK(m) OSSpinLockUnlock(&(m)->lock)
|
||||
# define MALLOC_MUTEX_TRYLOCK(m) (!OSSpinLockTry(&(m)->lock))
|
||||
#else
|
||||
# define MALLOC_MUTEX_LOCK(m) pthread_mutex_lock(&(m)->lock)
|
||||
# define MALLOC_MUTEX_UNLOCK(m) pthread_mutex_unlock(&(m)->lock)
|
||||
# define MALLOC_MUTEX_TRYLOCK(m) (pthread_mutex_trylock(&(m)->lock) != 0)
|
||||
#endif
|
||||
|
||||
#define LOCK_PROF_DATA_INITIALIZER \
|
||||
{NSTIME_ZERO_INITIALIZER, NSTIME_ZERO_INITIALIZER, 0, 0, 0, \
|
||||
ATOMIC_INIT(0), 0, NULL, 0}
|
||||
|
||||
#ifdef _WIN32
|
||||
# define MALLOC_MUTEX_INITIALIZER
|
||||
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
|
||||
# define MALLOC_MUTEX_INITIALIZER \
|
||||
{{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT}}, \
|
||||
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
|
||||
#elif (defined(JEMALLOC_OSSPIN))
|
||||
# define MALLOC_MUTEX_INITIALIZER \
|
||||
{{{LOCK_PROF_DATA_INITIALIZER, 0}}, \
|
||||
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
|
||||
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
|
||||
# define MALLOC_MUTEX_INITIALIZER \
|
||||
{{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL}}, \
|
||||
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
|
||||
#else
|
||||
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
|
||||
# define MALLOC_MUTEX_INITIALIZER \
|
||||
{{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER}}, \
|
||||
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_LAZY_LOCK
|
||||
extern bool isthreaded;
|
||||
@@ -48,52 +126,123 @@ extern bool isthreaded;
|
||||
# define isthreaded true
|
||||
#endif
|
||||
|
||||
bool malloc_mutex_init(malloc_mutex_t *mutex);
|
||||
void malloc_mutex_prefork(malloc_mutex_t *mutex);
|
||||
void malloc_mutex_postfork_parent(malloc_mutex_t *mutex);
|
||||
void malloc_mutex_postfork_child(malloc_mutex_t *mutex);
|
||||
bool mutex_boot(void);
|
||||
bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
|
||||
witness_rank_t rank, malloc_mutex_lock_order_t lock_order);
|
||||
void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex);
|
||||
void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex);
|
||||
void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex);
|
||||
bool malloc_mutex_boot(void);
|
||||
void malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
void malloc_mutex_lock_slow(malloc_mutex_t *mutex);
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
void malloc_mutex_lock(malloc_mutex_t *mutex);
|
||||
void malloc_mutex_unlock(malloc_mutex_t *mutex);
|
||||
#endif
|
||||
static inline void
|
||||
malloc_mutex_lock_final(malloc_mutex_t *mutex) {
|
||||
MALLOC_MUTEX_LOCK(mutex);
|
||||
}
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
|
||||
JEMALLOC_INLINE void
|
||||
malloc_mutex_lock(malloc_mutex_t *mutex)
|
||||
{
|
||||
static inline bool
|
||||
malloc_mutex_trylock_final(malloc_mutex_t *mutex) {
|
||||
return MALLOC_MUTEX_TRYLOCK(mutex);
|
||||
}
|
||||
|
||||
if (isthreaded) {
|
||||
#ifdef _WIN32
|
||||
EnterCriticalSection(&mutex->lock);
|
||||
#elif (defined(JEMALLOC_OSSPIN))
|
||||
OSSpinLockLock(&mutex->lock);
|
||||
#else
|
||||
pthread_mutex_lock(&mutex->lock);
|
||||
#endif
|
||||
static inline void
|
||||
mutex_owner_stats_update(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||
if (config_stats) {
|
||||
mutex_prof_data_t *data = &mutex->prof_data;
|
||||
data->n_lock_ops++;
|
||||
if (data->prev_owner != tsdn) {
|
||||
data->prev_owner = tsdn;
|
||||
data->n_owner_switches++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
malloc_mutex_unlock(malloc_mutex_t *mutex)
|
||||
{
|
||||
|
||||
/* Trylock: return false if the lock is successfully acquired. */
|
||||
static inline bool
|
||||
malloc_mutex_trylock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||
witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
|
||||
if (isthreaded) {
|
||||
#ifdef _WIN32
|
||||
LeaveCriticalSection(&mutex->lock);
|
||||
#elif (defined(JEMALLOC_OSSPIN))
|
||||
OSSpinLockUnlock(&mutex->lock);
|
||||
#else
|
||||
pthread_mutex_unlock(&mutex->lock);
|
||||
#endif
|
||||
if (malloc_mutex_trylock_final(mutex)) {
|
||||
return true;
|
||||
}
|
||||
mutex_owner_stats_update(tsdn, mutex);
|
||||
}
|
||||
witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Aggregate lock prof data. */
|
||||
static inline void
|
||||
malloc_mutex_prof_merge(mutex_prof_data_t *sum, mutex_prof_data_t *data) {
|
||||
nstime_add(&sum->tot_wait_time, &data->tot_wait_time);
|
||||
if (nstime_compare(&sum->max_wait_time, &data->max_wait_time) < 0) {
|
||||
nstime_copy(&sum->max_wait_time, &data->max_wait_time);
|
||||
}
|
||||
|
||||
sum->n_wait_times += data->n_wait_times;
|
||||
sum->n_spin_acquired += data->n_spin_acquired;
|
||||
|
||||
if (sum->max_n_thds < data->max_n_thds) {
|
||||
sum->max_n_thds = data->max_n_thds;
|
||||
}
|
||||
uint32_t cur_n_waiting_thds = atomic_load_u32(&sum->n_waiting_thds,
|
||||
ATOMIC_RELAXED);
|
||||
uint32_t new_n_waiting_thds = cur_n_waiting_thds + atomic_load_u32(
|
||||
&data->n_waiting_thds, ATOMIC_RELAXED);
|
||||
atomic_store_u32(&sum->n_waiting_thds, new_n_waiting_thds,
|
||||
ATOMIC_RELAXED);
|
||||
sum->n_owner_switches += data->n_owner_switches;
|
||||
sum->n_lock_ops += data->n_lock_ops;
|
||||
}
|
||||
|
||||
static inline void
|
||||
malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||
witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
|
||||
if (isthreaded) {
|
||||
if (malloc_mutex_trylock_final(mutex)) {
|
||||
malloc_mutex_lock_slow(mutex);
|
||||
}
|
||||
mutex_owner_stats_update(tsdn, mutex);
|
||||
}
|
||||
witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
|
||||
}
|
||||
|
||||
static inline void
|
||||
malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||
witness_unlock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
|
||||
if (isthreaded) {
|
||||
MALLOC_MUTEX_UNLOCK(mutex);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
static inline void
|
||||
malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||
witness_assert_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
|
||||
}
|
||||
|
||||
static inline void
|
||||
malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||
witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
|
||||
}
|
||||
|
||||
/* Copy the prof data from mutex for processing. */
|
||||
static inline void
|
||||
malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data,
|
||||
malloc_mutex_t *mutex) {
|
||||
mutex_prof_data_t *source = &mutex->prof_data;
|
||||
/* Can only read holding the mutex. */
|
||||
malloc_mutex_assert_owner(tsdn, mutex);
|
||||
|
||||
/*
|
||||
* Not *really* allowed (we shouldn't be doing non-atomic loads of
|
||||
* atomic data), but the mutex protection makes this safe, and writing
|
||||
* a member-for-member copy is tedious for this situation.
|
||||
*/
|
||||
*data = *source;
|
||||
/* n_wait_thds is not reported (modified w/o locking). */
|
||||
atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_MUTEX_H */
|
||||
|
||||
@@ -1,147 +1,199 @@
|
||||
#define a0calloc JEMALLOC_N(a0calloc)
|
||||
#define a0free JEMALLOC_N(a0free)
|
||||
#define a0dalloc JEMALLOC_N(a0dalloc)
|
||||
#define a0get JEMALLOC_N(a0get)
|
||||
#define a0malloc JEMALLOC_N(a0malloc)
|
||||
#define arena_aalloc JEMALLOC_N(arena_aalloc)
|
||||
#define arena_alloc_junk_small JEMALLOC_N(arena_alloc_junk_small)
|
||||
#define arena_basic_stats_merge JEMALLOC_N(arena_basic_stats_merge)
|
||||
#define arena_bin_index JEMALLOC_N(arena_bin_index)
|
||||
#define arena_bin_info JEMALLOC_N(arena_bin_info)
|
||||
#define arena_bitselm_get_const JEMALLOC_N(arena_bitselm_get_const)
|
||||
#define arena_bitselm_get_mutable JEMALLOC_N(arena_bitselm_get_mutable)
|
||||
#define arena_boot JEMALLOC_N(arena_boot)
|
||||
#define arena_choose JEMALLOC_N(arena_choose)
|
||||
#define arena_choose_hard JEMALLOC_N(arena_choose_hard)
|
||||
#define arena_choose_impl JEMALLOC_N(arena_choose_impl)
|
||||
#define arena_chunk_alloc_huge JEMALLOC_N(arena_chunk_alloc_huge)
|
||||
#define arena_chunk_cache_maybe_insert JEMALLOC_N(arena_chunk_cache_maybe_insert)
|
||||
#define arena_chunk_cache_maybe_remove JEMALLOC_N(arena_chunk_cache_maybe_remove)
|
||||
#define arena_chunk_dalloc_huge JEMALLOC_N(arena_chunk_dalloc_huge)
|
||||
#define arena_chunk_ralloc_huge_expand JEMALLOC_N(arena_chunk_ralloc_huge_expand)
|
||||
#define arena_chunk_ralloc_huge_shrink JEMALLOC_N(arena_chunk_ralloc_huge_shrink)
|
||||
#define arena_chunk_ralloc_huge_similar JEMALLOC_N(arena_chunk_ralloc_huge_similar)
|
||||
#define arena_cleanup JEMALLOC_N(arena_cleanup)
|
||||
#define arena_dalloc JEMALLOC_N(arena_dalloc)
|
||||
#define arena_dalloc_bin JEMALLOC_N(arena_dalloc_bin)
|
||||
#define arena_dalloc_bin_locked JEMALLOC_N(arena_dalloc_bin_locked)
|
||||
#define arena_dalloc_bin_junked_locked JEMALLOC_N(arena_dalloc_bin_junked_locked)
|
||||
#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
|
||||
#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
|
||||
#define arena_dalloc_large JEMALLOC_N(arena_dalloc_large)
|
||||
#define arena_dalloc_large_locked JEMALLOC_N(arena_dalloc_large_locked)
|
||||
#define arena_dalloc_large_junked_locked JEMALLOC_N(arena_dalloc_large_junked_locked)
|
||||
#define arena_dalloc_small JEMALLOC_N(arena_dalloc_small)
|
||||
#define arena_decay_tick JEMALLOC_N(arena_decay_tick)
|
||||
#define arena_decay_ticks JEMALLOC_N(arena_decay_ticks)
|
||||
#define arena_decay_time_default_get JEMALLOC_N(arena_decay_time_default_get)
|
||||
#define arena_decay_time_default_set JEMALLOC_N(arena_decay_time_default_set)
|
||||
#define arena_decay_time_get JEMALLOC_N(arena_decay_time_get)
|
||||
#define arena_decay_time_set JEMALLOC_N(arena_decay_time_set)
|
||||
#define arena_dss_prec_get JEMALLOC_N(arena_dss_prec_get)
|
||||
#define arena_dss_prec_set JEMALLOC_N(arena_dss_prec_set)
|
||||
#define arena_extent_sn_next JEMALLOC_N(arena_extent_sn_next)
|
||||
#define arena_get JEMALLOC_N(arena_get)
|
||||
#define arena_ichoose JEMALLOC_N(arena_ichoose)
|
||||
#define arena_init JEMALLOC_N(arena_init)
|
||||
#define arena_lg_dirty_mult_default_get JEMALLOC_N(arena_lg_dirty_mult_default_get)
|
||||
#define arena_lg_dirty_mult_default_set JEMALLOC_N(arena_lg_dirty_mult_default_set)
|
||||
#define arena_lg_dirty_mult_get JEMALLOC_N(arena_lg_dirty_mult_get)
|
||||
#define arena_lg_dirty_mult_set JEMALLOC_N(arena_lg_dirty_mult_set)
|
||||
#define arena_malloc JEMALLOC_N(arena_malloc)
|
||||
#define arena_malloc_hard JEMALLOC_N(arena_malloc_hard)
|
||||
#define arena_malloc_large JEMALLOC_N(arena_malloc_large)
|
||||
#define arena_malloc_small JEMALLOC_N(arena_malloc_small)
|
||||
#define arena_mapbits_allocated_get JEMALLOC_N(arena_mapbits_allocated_get)
|
||||
#define arena_mapbits_binind_get JEMALLOC_N(arena_mapbits_binind_get)
|
||||
#define arena_mapbits_decommitted_get JEMALLOC_N(arena_mapbits_decommitted_get)
|
||||
#define arena_mapbits_dirty_get JEMALLOC_N(arena_mapbits_dirty_get)
|
||||
#define arena_mapbits_get JEMALLOC_N(arena_mapbits_get)
|
||||
#define arena_mapbits_internal_set JEMALLOC_N(arena_mapbits_internal_set)
|
||||
#define arena_mapbits_large_binind_set JEMALLOC_N(arena_mapbits_large_binind_set)
|
||||
#define arena_mapbits_large_get JEMALLOC_N(arena_mapbits_large_get)
|
||||
#define arena_mapbits_large_set JEMALLOC_N(arena_mapbits_large_set)
|
||||
#define arena_mapbits_large_size_get JEMALLOC_N(arena_mapbits_large_size_get)
|
||||
#define arena_mapbits_size_decode JEMALLOC_N(arena_mapbits_size_decode)
|
||||
#define arena_mapbits_size_encode JEMALLOC_N(arena_mapbits_size_encode)
|
||||
#define arena_mapbits_small_runind_get JEMALLOC_N(arena_mapbits_small_runind_get)
|
||||
#define arena_mapbits_small_set JEMALLOC_N(arena_mapbits_small_set)
|
||||
#define arena_mapbits_unallocated_set JEMALLOC_N(arena_mapbits_unallocated_set)
|
||||
#define arena_mapbits_unallocated_size_get JEMALLOC_N(arena_mapbits_unallocated_size_get)
|
||||
#define arena_mapbits_unallocated_size_set JEMALLOC_N(arena_mapbits_unallocated_size_set)
|
||||
#define arena_mapbits_unzeroed_get JEMALLOC_N(arena_mapbits_unzeroed_get)
|
||||
#define arena_mapbits_unzeroed_set JEMALLOC_N(arena_mapbits_unzeroed_set)
|
||||
#define arena_mapbitsp_get JEMALLOC_N(arena_mapbitsp_get)
|
||||
#define arena_mapbitsp_get_const JEMALLOC_N(arena_mapbitsp_get_const)
|
||||
#define arena_mapbitsp_get_mutable JEMALLOC_N(arena_mapbitsp_get_mutable)
|
||||
#define arena_mapbitsp_read JEMALLOC_N(arena_mapbitsp_read)
|
||||
#define arena_mapbitsp_write JEMALLOC_N(arena_mapbitsp_write)
|
||||
#define arena_mapp_get JEMALLOC_N(arena_mapp_get)
|
||||
#define arena_maxclass JEMALLOC_N(arena_maxclass)
|
||||
#define arena_maxrun JEMALLOC_N(arena_maxrun)
|
||||
#define arena_maybe_purge JEMALLOC_N(arena_maybe_purge)
|
||||
#define arena_metadata_allocated_add JEMALLOC_N(arena_metadata_allocated_add)
|
||||
#define arena_metadata_allocated_get JEMALLOC_N(arena_metadata_allocated_get)
|
||||
#define arena_metadata_allocated_sub JEMALLOC_N(arena_metadata_allocated_sub)
|
||||
#define arena_migrate JEMALLOC_N(arena_migrate)
|
||||
#define arena_miscelm_get_const JEMALLOC_N(arena_miscelm_get_const)
|
||||
#define arena_miscelm_get_mutable JEMALLOC_N(arena_miscelm_get_mutable)
|
||||
#define arena_miscelm_to_pageind JEMALLOC_N(arena_miscelm_to_pageind)
|
||||
#define arena_miscelm_to_rpages JEMALLOC_N(arena_miscelm_to_rpages)
|
||||
#define arena_new JEMALLOC_N(arena_new)
|
||||
#define arena_node_alloc JEMALLOC_N(arena_node_alloc)
|
||||
#define arena_node_dalloc JEMALLOC_N(arena_node_dalloc)
|
||||
#define arena_nthreads_dec JEMALLOC_N(arena_nthreads_dec)
|
||||
#define arena_nthreads_get JEMALLOC_N(arena_nthreads_get)
|
||||
#define arena_nthreads_inc JEMALLOC_N(arena_nthreads_inc)
|
||||
#define arena_palloc JEMALLOC_N(arena_palloc)
|
||||
#define arena_postfork_child JEMALLOC_N(arena_postfork_child)
|
||||
#define arena_postfork_parent JEMALLOC_N(arena_postfork_parent)
|
||||
#define arena_prefork JEMALLOC_N(arena_prefork)
|
||||
#define arena_prefork0 JEMALLOC_N(arena_prefork0)
|
||||
#define arena_prefork1 JEMALLOC_N(arena_prefork1)
|
||||
#define arena_prefork2 JEMALLOC_N(arena_prefork2)
|
||||
#define arena_prefork3 JEMALLOC_N(arena_prefork3)
|
||||
#define arena_prof_accum JEMALLOC_N(arena_prof_accum)
|
||||
#define arena_prof_accum_impl JEMALLOC_N(arena_prof_accum_impl)
|
||||
#define arena_prof_accum_locked JEMALLOC_N(arena_prof_accum_locked)
|
||||
#define arena_prof_ctx_get JEMALLOC_N(arena_prof_ctx_get)
|
||||
#define arena_prof_ctx_set JEMALLOC_N(arena_prof_ctx_set)
|
||||
#define arena_prof_promoted JEMALLOC_N(arena_prof_promoted)
|
||||
#define arena_prof_tctx_get JEMALLOC_N(arena_prof_tctx_get)
|
||||
#define arena_prof_tctx_reset JEMALLOC_N(arena_prof_tctx_reset)
|
||||
#define arena_prof_tctx_set JEMALLOC_N(arena_prof_tctx_set)
|
||||
#define arena_ptr_small_binind_get JEMALLOC_N(arena_ptr_small_binind_get)
|
||||
#define arena_purge_all JEMALLOC_N(arena_purge_all)
|
||||
#define arena_purge JEMALLOC_N(arena_purge)
|
||||
#define arena_quarantine_junk_small JEMALLOC_N(arena_quarantine_junk_small)
|
||||
#define arena_ralloc JEMALLOC_N(arena_ralloc)
|
||||
#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
|
||||
#define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move)
|
||||
#define arena_rd_to_miscelm JEMALLOC_N(arena_rd_to_miscelm)
|
||||
#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
|
||||
#define arena_reset JEMALLOC_N(arena_reset)
|
||||
#define arena_run_regind JEMALLOC_N(arena_run_regind)
|
||||
#define arena_run_to_miscelm JEMALLOC_N(arena_run_to_miscelm)
|
||||
#define arena_salloc JEMALLOC_N(arena_salloc)
|
||||
#define arena_sdalloc JEMALLOC_N(arena_sdalloc)
|
||||
#define arena_stats_merge JEMALLOC_N(arena_stats_merge)
|
||||
#define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small)
|
||||
#define arena_tdata_get JEMALLOC_N(arena_tdata_get)
|
||||
#define arena_tdata_get_hard JEMALLOC_N(arena_tdata_get_hard)
|
||||
#define arenas JEMALLOC_N(arenas)
|
||||
#define arenas_booted JEMALLOC_N(arenas_booted)
|
||||
#define arenas_cleanup JEMALLOC_N(arenas_cleanup)
|
||||
#define arenas_extend JEMALLOC_N(arenas_extend)
|
||||
#define arenas_initialized JEMALLOC_N(arenas_initialized)
|
||||
#define arenas_lock JEMALLOC_N(arenas_lock)
|
||||
#define arenas_tls JEMALLOC_N(arenas_tls)
|
||||
#define arenas_tsd JEMALLOC_N(arenas_tsd)
|
||||
#define arenas_tsd_boot JEMALLOC_N(arenas_tsd_boot)
|
||||
#define arenas_tsd_cleanup_wrapper JEMALLOC_N(arenas_tsd_cleanup_wrapper)
|
||||
#define arenas_tsd_get JEMALLOC_N(arenas_tsd_get)
|
||||
#define arenas_tsd_get_wrapper JEMALLOC_N(arenas_tsd_get_wrapper)
|
||||
#define arenas_tsd_init_head JEMALLOC_N(arenas_tsd_init_head)
|
||||
#define arenas_tsd_set JEMALLOC_N(arenas_tsd_set)
|
||||
#define arenas_tdata_bypass_cleanup JEMALLOC_N(arenas_tdata_bypass_cleanup)
|
||||
#define arenas_tdata_cleanup JEMALLOC_N(arenas_tdata_cleanup)
|
||||
#define atomic_add_p JEMALLOC_N(atomic_add_p)
|
||||
#define atomic_add_u JEMALLOC_N(atomic_add_u)
|
||||
#define atomic_add_uint32 JEMALLOC_N(atomic_add_uint32)
|
||||
#define atomic_add_uint64 JEMALLOC_N(atomic_add_uint64)
|
||||
#define atomic_add_z JEMALLOC_N(atomic_add_z)
|
||||
#define atomic_cas_p JEMALLOC_N(atomic_cas_p)
|
||||
#define atomic_cas_u JEMALLOC_N(atomic_cas_u)
|
||||
#define atomic_cas_uint32 JEMALLOC_N(atomic_cas_uint32)
|
||||
#define atomic_cas_uint64 JEMALLOC_N(atomic_cas_uint64)
|
||||
#define atomic_cas_z JEMALLOC_N(atomic_cas_z)
|
||||
#define atomic_sub_p JEMALLOC_N(atomic_sub_p)
|
||||
#define atomic_sub_u JEMALLOC_N(atomic_sub_u)
|
||||
#define atomic_sub_uint32 JEMALLOC_N(atomic_sub_uint32)
|
||||
#define atomic_sub_uint64 JEMALLOC_N(atomic_sub_uint64)
|
||||
#define atomic_sub_z JEMALLOC_N(atomic_sub_z)
|
||||
#define atomic_write_p JEMALLOC_N(atomic_write_p)
|
||||
#define atomic_write_u JEMALLOC_N(atomic_write_u)
|
||||
#define atomic_write_uint32 JEMALLOC_N(atomic_write_uint32)
|
||||
#define atomic_write_uint64 JEMALLOC_N(atomic_write_uint64)
|
||||
#define atomic_write_z JEMALLOC_N(atomic_write_z)
|
||||
#define base_alloc JEMALLOC_N(base_alloc)
|
||||
#define base_boot JEMALLOC_N(base_boot)
|
||||
#define base_calloc JEMALLOC_N(base_calloc)
|
||||
#define base_node_alloc JEMALLOC_N(base_node_alloc)
|
||||
#define base_node_dealloc JEMALLOC_N(base_node_dealloc)
|
||||
#define base_postfork_child JEMALLOC_N(base_postfork_child)
|
||||
#define base_postfork_parent JEMALLOC_N(base_postfork_parent)
|
||||
#define base_prefork JEMALLOC_N(base_prefork)
|
||||
#define base_stats_get JEMALLOC_N(base_stats_get)
|
||||
#define bitmap_full JEMALLOC_N(bitmap_full)
|
||||
#define bitmap_get JEMALLOC_N(bitmap_get)
|
||||
#define bitmap_info_init JEMALLOC_N(bitmap_info_init)
|
||||
#define bitmap_info_ngroups JEMALLOC_N(bitmap_info_ngroups)
|
||||
#define bitmap_init JEMALLOC_N(bitmap_init)
|
||||
#define bitmap_set JEMALLOC_N(bitmap_set)
|
||||
#define bitmap_sfu JEMALLOC_N(bitmap_sfu)
|
||||
#define bitmap_size JEMALLOC_N(bitmap_size)
|
||||
#define bitmap_unset JEMALLOC_N(bitmap_unset)
|
||||
#define bootstrap_calloc JEMALLOC_N(bootstrap_calloc)
|
||||
#define bootstrap_free JEMALLOC_N(bootstrap_free)
|
||||
#define bootstrap_malloc JEMALLOC_N(bootstrap_malloc)
|
||||
#define bt_init JEMALLOC_N(bt_init)
|
||||
#define buferror JEMALLOC_N(buferror)
|
||||
#define choose_arena JEMALLOC_N(choose_arena)
|
||||
#define choose_arena_hard JEMALLOC_N(choose_arena_hard)
|
||||
#define chunk_alloc JEMALLOC_N(chunk_alloc)
|
||||
#define chunk_alloc_base JEMALLOC_N(chunk_alloc_base)
|
||||
#define chunk_alloc_cache JEMALLOC_N(chunk_alloc_cache)
|
||||
#define chunk_alloc_dss JEMALLOC_N(chunk_alloc_dss)
|
||||
#define chunk_alloc_mmap JEMALLOC_N(chunk_alloc_mmap)
|
||||
#define chunk_alloc_wrapper JEMALLOC_N(chunk_alloc_wrapper)
|
||||
#define chunk_boot JEMALLOC_N(chunk_boot)
|
||||
#define chunk_dealloc JEMALLOC_N(chunk_dealloc)
|
||||
#define chunk_dealloc_mmap JEMALLOC_N(chunk_dealloc_mmap)
|
||||
#define chunk_dalloc_cache JEMALLOC_N(chunk_dalloc_cache)
|
||||
#define chunk_dalloc_mmap JEMALLOC_N(chunk_dalloc_mmap)
|
||||
#define chunk_dalloc_wrapper JEMALLOC_N(chunk_dalloc_wrapper)
|
||||
#define chunk_deregister JEMALLOC_N(chunk_deregister)
|
||||
#define chunk_dss_boot JEMALLOC_N(chunk_dss_boot)
|
||||
#define chunk_dss_postfork_child JEMALLOC_N(chunk_dss_postfork_child)
|
||||
#define chunk_dss_postfork_parent JEMALLOC_N(chunk_dss_postfork_parent)
|
||||
#define chunk_dss_mergeable JEMALLOC_N(chunk_dss_mergeable)
|
||||
#define chunk_dss_prec_get JEMALLOC_N(chunk_dss_prec_get)
|
||||
#define chunk_dss_prec_set JEMALLOC_N(chunk_dss_prec_set)
|
||||
#define chunk_dss_prefork JEMALLOC_N(chunk_dss_prefork)
|
||||
#define chunk_hooks_default JEMALLOC_N(chunk_hooks_default)
|
||||
#define chunk_hooks_get JEMALLOC_N(chunk_hooks_get)
|
||||
#define chunk_hooks_set JEMALLOC_N(chunk_hooks_set)
|
||||
#define chunk_in_dss JEMALLOC_N(chunk_in_dss)
|
||||
#define chunk_lookup JEMALLOC_N(chunk_lookup)
|
||||
#define chunk_npages JEMALLOC_N(chunk_npages)
|
||||
#define chunk_postfork_child JEMALLOC_N(chunk_postfork_child)
|
||||
#define chunk_postfork_parent JEMALLOC_N(chunk_postfork_parent)
|
||||
#define chunk_prefork JEMALLOC_N(chunk_prefork)
|
||||
#define chunk_unmap JEMALLOC_N(chunk_unmap)
|
||||
#define chunks_mtx JEMALLOC_N(chunks_mtx)
|
||||
#define chunk_purge_wrapper JEMALLOC_N(chunk_purge_wrapper)
|
||||
#define chunk_register JEMALLOC_N(chunk_register)
|
||||
#define chunks_rtree JEMALLOC_N(chunks_rtree)
|
||||
#define chunksize JEMALLOC_N(chunksize)
|
||||
#define chunksize_mask JEMALLOC_N(chunksize_mask)
|
||||
#define ckh_bucket_search JEMALLOC_N(ckh_bucket_search)
|
||||
#define ckh_count JEMALLOC_N(ckh_count)
|
||||
#define ckh_delete JEMALLOC_N(ckh_delete)
|
||||
#define ckh_evict_reloc_insert JEMALLOC_N(ckh_evict_reloc_insert)
|
||||
#define ckh_insert JEMALLOC_N(ckh_insert)
|
||||
#define ckh_isearch JEMALLOC_N(ckh_isearch)
|
||||
#define ckh_iter JEMALLOC_N(ckh_iter)
|
||||
#define ckh_new JEMALLOC_N(ckh_new)
|
||||
#define ckh_pointer_hash JEMALLOC_N(ckh_pointer_hash)
|
||||
#define ckh_pointer_keycomp JEMALLOC_N(ckh_pointer_keycomp)
|
||||
#define ckh_rebuild JEMALLOC_N(ckh_rebuild)
|
||||
#define ckh_remove JEMALLOC_N(ckh_remove)
|
||||
#define ckh_search JEMALLOC_N(ckh_search)
|
||||
#define ckh_string_hash JEMALLOC_N(ckh_string_hash)
|
||||
#define ckh_string_keycomp JEMALLOC_N(ckh_string_keycomp)
|
||||
#define ckh_try_bucket_insert JEMALLOC_N(ckh_try_bucket_insert)
|
||||
#define ckh_try_insert JEMALLOC_N(ckh_try_insert)
|
||||
#define ctl_boot JEMALLOC_N(ctl_boot)
|
||||
#define ctl_bymib JEMALLOC_N(ctl_bymib)
|
||||
#define ctl_byname JEMALLOC_N(ctl_byname)
|
||||
@@ -149,7 +201,33 @@
|
||||
#define ctl_postfork_child JEMALLOC_N(ctl_postfork_child)
|
||||
#define ctl_postfork_parent JEMALLOC_N(ctl_postfork_parent)
|
||||
#define ctl_prefork JEMALLOC_N(ctl_prefork)
|
||||
#define decay_ticker_get JEMALLOC_N(decay_ticker_get)
|
||||
#define dss_prec_names JEMALLOC_N(dss_prec_names)
|
||||
#define extent_node_achunk_get JEMALLOC_N(extent_node_achunk_get)
|
||||
#define extent_node_achunk_set JEMALLOC_N(extent_node_achunk_set)
|
||||
#define extent_node_addr_get JEMALLOC_N(extent_node_addr_get)
|
||||
#define extent_node_addr_set JEMALLOC_N(extent_node_addr_set)
|
||||
#define extent_node_arena_get JEMALLOC_N(extent_node_arena_get)
|
||||
#define extent_node_arena_set JEMALLOC_N(extent_node_arena_set)
|
||||
#define extent_node_committed_get JEMALLOC_N(extent_node_committed_get)
|
||||
#define extent_node_committed_set JEMALLOC_N(extent_node_committed_set)
|
||||
#define extent_node_dirty_insert JEMALLOC_N(extent_node_dirty_insert)
|
||||
#define extent_node_dirty_linkage_init JEMALLOC_N(extent_node_dirty_linkage_init)
|
||||
#define extent_node_dirty_remove JEMALLOC_N(extent_node_dirty_remove)
|
||||
#define extent_node_init JEMALLOC_N(extent_node_init)
|
||||
#define extent_node_prof_tctx_get JEMALLOC_N(extent_node_prof_tctx_get)
|
||||
#define extent_node_prof_tctx_set JEMALLOC_N(extent_node_prof_tctx_set)
|
||||
#define extent_node_size_get JEMALLOC_N(extent_node_size_get)
|
||||
#define extent_node_size_set JEMALLOC_N(extent_node_size_set)
|
||||
#define extent_node_sn_get JEMALLOC_N(extent_node_sn_get)
|
||||
#define extent_node_sn_set JEMALLOC_N(extent_node_sn_set)
|
||||
#define extent_node_zeroed_get JEMALLOC_N(extent_node_zeroed_get)
|
||||
#define extent_node_zeroed_set JEMALLOC_N(extent_node_zeroed_set)
|
||||
#define extent_size_quantize_ceil JEMALLOC_N(extent_size_quantize_ceil)
|
||||
#define extent_size_quantize_floor JEMALLOC_N(extent_size_quantize_floor)
|
||||
#define extent_tree_ad_destroy JEMALLOC_N(extent_tree_ad_destroy)
|
||||
#define extent_tree_ad_destroy_recurse JEMALLOC_N(extent_tree_ad_destroy_recurse)
|
||||
#define extent_tree_ad_empty JEMALLOC_N(extent_tree_ad_empty)
|
||||
#define extent_tree_ad_first JEMALLOC_N(extent_tree_ad_first)
|
||||
#define extent_tree_ad_insert JEMALLOC_N(extent_tree_ad_insert)
|
||||
#define extent_tree_ad_iter JEMALLOC_N(extent_tree_ad_iter)
|
||||
@@ -166,22 +244,31 @@
|
||||
#define extent_tree_ad_reverse_iter_recurse JEMALLOC_N(extent_tree_ad_reverse_iter_recurse)
|
||||
#define extent_tree_ad_reverse_iter_start JEMALLOC_N(extent_tree_ad_reverse_iter_start)
|
||||
#define extent_tree_ad_search JEMALLOC_N(extent_tree_ad_search)
|
||||
#define extent_tree_szad_first JEMALLOC_N(extent_tree_szad_first)
|
||||
#define extent_tree_szad_insert JEMALLOC_N(extent_tree_szad_insert)
|
||||
#define extent_tree_szad_iter JEMALLOC_N(extent_tree_szad_iter)
|
||||
#define extent_tree_szad_iter_recurse JEMALLOC_N(extent_tree_szad_iter_recurse)
|
||||
#define extent_tree_szad_iter_start JEMALLOC_N(extent_tree_szad_iter_start)
|
||||
#define extent_tree_szad_last JEMALLOC_N(extent_tree_szad_last)
|
||||
#define extent_tree_szad_new JEMALLOC_N(extent_tree_szad_new)
|
||||
#define extent_tree_szad_next JEMALLOC_N(extent_tree_szad_next)
|
||||
#define extent_tree_szad_nsearch JEMALLOC_N(extent_tree_szad_nsearch)
|
||||
#define extent_tree_szad_prev JEMALLOC_N(extent_tree_szad_prev)
|
||||
#define extent_tree_szad_psearch JEMALLOC_N(extent_tree_szad_psearch)
|
||||
#define extent_tree_szad_remove JEMALLOC_N(extent_tree_szad_remove)
|
||||
#define extent_tree_szad_reverse_iter JEMALLOC_N(extent_tree_szad_reverse_iter)
|
||||
#define extent_tree_szad_reverse_iter_recurse JEMALLOC_N(extent_tree_szad_reverse_iter_recurse)
|
||||
#define extent_tree_szad_reverse_iter_start JEMALLOC_N(extent_tree_szad_reverse_iter_start)
|
||||
#define extent_tree_szad_search JEMALLOC_N(extent_tree_szad_search)
|
||||
#define extent_tree_szsnad_destroy JEMALLOC_N(extent_tree_szsnad_destroy)
|
||||
#define extent_tree_szsnad_destroy_recurse JEMALLOC_N(extent_tree_szsnad_destroy_recurse)
|
||||
#define extent_tree_szsnad_empty JEMALLOC_N(extent_tree_szsnad_empty)
|
||||
#define extent_tree_szsnad_first JEMALLOC_N(extent_tree_szsnad_first)
|
||||
#define extent_tree_szsnad_insert JEMALLOC_N(extent_tree_szsnad_insert)
|
||||
#define extent_tree_szsnad_iter JEMALLOC_N(extent_tree_szsnad_iter)
|
||||
#define extent_tree_szsnad_iter_recurse JEMALLOC_N(extent_tree_szsnad_iter_recurse)
|
||||
#define extent_tree_szsnad_iter_start JEMALLOC_N(extent_tree_szsnad_iter_start)
|
||||
#define extent_tree_szsnad_last JEMALLOC_N(extent_tree_szsnad_last)
|
||||
#define extent_tree_szsnad_new JEMALLOC_N(extent_tree_szsnad_new)
|
||||
#define extent_tree_szsnad_next JEMALLOC_N(extent_tree_szsnad_next)
|
||||
#define extent_tree_szsnad_nsearch JEMALLOC_N(extent_tree_szsnad_nsearch)
|
||||
#define extent_tree_szsnad_prev JEMALLOC_N(extent_tree_szsnad_prev)
|
||||
#define extent_tree_szsnad_psearch JEMALLOC_N(extent_tree_szsnad_psearch)
|
||||
#define extent_tree_szsnad_remove JEMALLOC_N(extent_tree_szsnad_remove)
|
||||
#define extent_tree_szsnad_reverse_iter JEMALLOC_N(extent_tree_szsnad_reverse_iter)
|
||||
#define extent_tree_szsnad_reverse_iter_recurse JEMALLOC_N(extent_tree_szsnad_reverse_iter_recurse)
|
||||
#define extent_tree_szsnad_reverse_iter_start JEMALLOC_N(extent_tree_szsnad_reverse_iter_start)
|
||||
#define extent_tree_szsnad_search JEMALLOC_N(extent_tree_szsnad_search)
|
||||
#define ffs_llu JEMALLOC_N(ffs_llu)
|
||||
#define ffs_lu JEMALLOC_N(ffs_lu)
|
||||
#define ffs_u JEMALLOC_N(ffs_u)
|
||||
#define ffs_u32 JEMALLOC_N(ffs_u32)
|
||||
#define ffs_u64 JEMALLOC_N(ffs_u64)
|
||||
#define ffs_zu JEMALLOC_N(ffs_zu)
|
||||
#define get_errno JEMALLOC_N(get_errno)
|
||||
#define hash JEMALLOC_N(hash)
|
||||
#define hash_fmix_32 JEMALLOC_N(hash_fmix_32)
|
||||
@@ -193,46 +280,51 @@
|
||||
#define hash_x64_128 JEMALLOC_N(hash_x64_128)
|
||||
#define hash_x86_128 JEMALLOC_N(hash_x86_128)
|
||||
#define hash_x86_32 JEMALLOC_N(hash_x86_32)
|
||||
#define huge_allocated JEMALLOC_N(huge_allocated)
|
||||
#define huge_boot JEMALLOC_N(huge_boot)
|
||||
#define huge_aalloc JEMALLOC_N(huge_aalloc)
|
||||
#define huge_dalloc JEMALLOC_N(huge_dalloc)
|
||||
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
|
||||
#define huge_dss_prec_get JEMALLOC_N(huge_dss_prec_get)
|
||||
#define huge_malloc JEMALLOC_N(huge_malloc)
|
||||
#define huge_mtx JEMALLOC_N(huge_mtx)
|
||||
#define huge_ndalloc JEMALLOC_N(huge_ndalloc)
|
||||
#define huge_nmalloc JEMALLOC_N(huge_nmalloc)
|
||||
#define huge_palloc JEMALLOC_N(huge_palloc)
|
||||
#define huge_postfork_child JEMALLOC_N(huge_postfork_child)
|
||||
#define huge_postfork_parent JEMALLOC_N(huge_postfork_parent)
|
||||
#define huge_prefork JEMALLOC_N(huge_prefork)
|
||||
#define huge_prof_ctx_get JEMALLOC_N(huge_prof_ctx_get)
|
||||
#define huge_prof_ctx_set JEMALLOC_N(huge_prof_ctx_set)
|
||||
#define huge_prof_tctx_get JEMALLOC_N(huge_prof_tctx_get)
|
||||
#define huge_prof_tctx_reset JEMALLOC_N(huge_prof_tctx_reset)
|
||||
#define huge_prof_tctx_set JEMALLOC_N(huge_prof_tctx_set)
|
||||
#define huge_ralloc JEMALLOC_N(huge_ralloc)
|
||||
#define huge_ralloc_no_move JEMALLOC_N(huge_ralloc_no_move)
|
||||
#define huge_salloc JEMALLOC_N(huge_salloc)
|
||||
#define iallocm JEMALLOC_N(iallocm)
|
||||
#define icalloc JEMALLOC_N(icalloc)
|
||||
#define icalloct JEMALLOC_N(icalloct)
|
||||
#define iaalloc JEMALLOC_N(iaalloc)
|
||||
#define ialloc JEMALLOC_N(ialloc)
|
||||
#define iallocztm JEMALLOC_N(iallocztm)
|
||||
#define iarena_cleanup JEMALLOC_N(iarena_cleanup)
|
||||
#define idalloc JEMALLOC_N(idalloc)
|
||||
#define idalloct JEMALLOC_N(idalloct)
|
||||
#define imalloc JEMALLOC_N(imalloc)
|
||||
#define imalloct JEMALLOC_N(imalloct)
|
||||
#define idalloctm JEMALLOC_N(idalloctm)
|
||||
#define in_valgrind JEMALLOC_N(in_valgrind)
|
||||
#define index2size JEMALLOC_N(index2size)
|
||||
#define index2size_compute JEMALLOC_N(index2size_compute)
|
||||
#define index2size_lookup JEMALLOC_N(index2size_lookup)
|
||||
#define index2size_tab JEMALLOC_N(index2size_tab)
|
||||
#define ipalloc JEMALLOC_N(ipalloc)
|
||||
#define ipalloct JEMALLOC_N(ipalloct)
|
||||
#define ipallocztm JEMALLOC_N(ipallocztm)
|
||||
#define iqalloc JEMALLOC_N(iqalloc)
|
||||
#define iqalloct JEMALLOC_N(iqalloct)
|
||||
#define iralloc JEMALLOC_N(iralloc)
|
||||
#define iralloct JEMALLOC_N(iralloct)
|
||||
#define iralloct_realign JEMALLOC_N(iralloct_realign)
|
||||
#define isalloc JEMALLOC_N(isalloc)
|
||||
#define isdalloct JEMALLOC_N(isdalloct)
|
||||
#define isqalloc JEMALLOC_N(isqalloc)
|
||||
#define isthreaded JEMALLOC_N(isthreaded)
|
||||
#define ivsalloc JEMALLOC_N(ivsalloc)
|
||||
#define ixalloc JEMALLOC_N(ixalloc)
|
||||
#define jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child)
|
||||
#define jemalloc_postfork_parent JEMALLOC_N(jemalloc_postfork_parent)
|
||||
#define jemalloc_prefork JEMALLOC_N(jemalloc_prefork)
|
||||
#define large_maxclass JEMALLOC_N(large_maxclass)
|
||||
#define lg_floor JEMALLOC_N(lg_floor)
|
||||
#define lg_prof_sample JEMALLOC_N(lg_prof_sample)
|
||||
#define malloc_cprintf JEMALLOC_N(malloc_cprintf)
|
||||
#define malloc_mutex_assert_not_owner JEMALLOC_N(malloc_mutex_assert_not_owner)
|
||||
#define malloc_mutex_assert_owner JEMALLOC_N(malloc_mutex_assert_owner)
|
||||
#define malloc_mutex_boot JEMALLOC_N(malloc_mutex_boot)
|
||||
#define malloc_mutex_init JEMALLOC_N(malloc_mutex_init)
|
||||
#define malloc_mutex_lock JEMALLOC_N(malloc_mutex_lock)
|
||||
#define malloc_mutex_postfork_child JEMALLOC_N(malloc_mutex_postfork_child)
|
||||
@@ -242,7 +334,8 @@
|
||||
#define malloc_printf JEMALLOC_N(malloc_printf)
|
||||
#define malloc_snprintf JEMALLOC_N(malloc_snprintf)
|
||||
#define malloc_strtoumax JEMALLOC_N(malloc_strtoumax)
|
||||
#define malloc_tsd_boot JEMALLOC_N(malloc_tsd_boot)
|
||||
#define malloc_tsd_boot0 JEMALLOC_N(malloc_tsd_boot0)
|
||||
#define malloc_tsd_boot1 JEMALLOC_N(malloc_tsd_boot1)
|
||||
#define malloc_tsd_cleanup_register JEMALLOC_N(malloc_tsd_cleanup_register)
|
||||
#define malloc_tsd_dalloc JEMALLOC_N(malloc_tsd_dalloc)
|
||||
#define malloc_tsd_malloc JEMALLOC_N(malloc_tsd_malloc)
|
||||
@@ -251,16 +344,35 @@
|
||||
#define malloc_vsnprintf JEMALLOC_N(malloc_vsnprintf)
|
||||
#define malloc_write JEMALLOC_N(malloc_write)
|
||||
#define map_bias JEMALLOC_N(map_bias)
|
||||
#define map_misc_offset JEMALLOC_N(map_misc_offset)
|
||||
#define mb_write JEMALLOC_N(mb_write)
|
||||
#define mutex_boot JEMALLOC_N(mutex_boot)
|
||||
#define narenas_auto JEMALLOC_N(narenas_auto)
|
||||
#define narenas_total JEMALLOC_N(narenas_total)
|
||||
#define narenas_tdata_cleanup JEMALLOC_N(narenas_tdata_cleanup)
|
||||
#define narenas_total_get JEMALLOC_N(narenas_total_get)
|
||||
#define ncpus JEMALLOC_N(ncpus)
|
||||
#define nhbins JEMALLOC_N(nhbins)
|
||||
#define nhclasses JEMALLOC_N(nhclasses)
|
||||
#define nlclasses JEMALLOC_N(nlclasses)
|
||||
#define nstime_add JEMALLOC_N(nstime_add)
|
||||
#define nstime_compare JEMALLOC_N(nstime_compare)
|
||||
#define nstime_copy JEMALLOC_N(nstime_copy)
|
||||
#define nstime_divide JEMALLOC_N(nstime_divide)
|
||||
#define nstime_idivide JEMALLOC_N(nstime_idivide)
|
||||
#define nstime_imultiply JEMALLOC_N(nstime_imultiply)
|
||||
#define nstime_init JEMALLOC_N(nstime_init)
|
||||
#define nstime_init2 JEMALLOC_N(nstime_init2)
|
||||
#define nstime_monotonic JEMALLOC_N(nstime_monotonic)
|
||||
#define nstime_ns JEMALLOC_N(nstime_ns)
|
||||
#define nstime_nsec JEMALLOC_N(nstime_nsec)
|
||||
#define nstime_sec JEMALLOC_N(nstime_sec)
|
||||
#define nstime_subtract JEMALLOC_N(nstime_subtract)
|
||||
#define nstime_update JEMALLOC_N(nstime_update)
|
||||
#define opt_abort JEMALLOC_N(opt_abort)
|
||||
#define opt_decay_time JEMALLOC_N(opt_decay_time)
|
||||
#define opt_dss JEMALLOC_N(opt_dss)
|
||||
#define opt_junk JEMALLOC_N(opt_junk)
|
||||
#define opt_junk_alloc JEMALLOC_N(opt_junk_alloc)
|
||||
#define opt_junk_free JEMALLOC_N(opt_junk_free)
|
||||
#define opt_lg_chunk JEMALLOC_N(opt_lg_chunk)
|
||||
#define opt_lg_dirty_mult JEMALLOC_N(opt_lg_dirty_mult)
|
||||
#define opt_lg_prof_interval JEMALLOC_N(opt_lg_prof_interval)
|
||||
@@ -274,140 +386,254 @@
|
||||
#define opt_prof_gdump JEMALLOC_N(opt_prof_gdump)
|
||||
#define opt_prof_leak JEMALLOC_N(opt_prof_leak)
|
||||
#define opt_prof_prefix JEMALLOC_N(opt_prof_prefix)
|
||||
#define opt_prof_thread_active_init JEMALLOC_N(opt_prof_thread_active_init)
|
||||
#define opt_purge JEMALLOC_N(opt_purge)
|
||||
#define opt_quarantine JEMALLOC_N(opt_quarantine)
|
||||
#define opt_redzone JEMALLOC_N(opt_redzone)
|
||||
#define opt_stats_print JEMALLOC_N(opt_stats_print)
|
||||
#define opt_tcache JEMALLOC_N(opt_tcache)
|
||||
#define opt_thp JEMALLOC_N(opt_thp)
|
||||
#define opt_utrace JEMALLOC_N(opt_utrace)
|
||||
#define opt_valgrind JEMALLOC_N(opt_valgrind)
|
||||
#define opt_xmalloc JEMALLOC_N(opt_xmalloc)
|
||||
#define opt_zero JEMALLOC_N(opt_zero)
|
||||
#define p2rz JEMALLOC_N(p2rz)
|
||||
#define pages_boot JEMALLOC_N(pages_boot)
|
||||
#define pages_commit JEMALLOC_N(pages_commit)
|
||||
#define pages_decommit JEMALLOC_N(pages_decommit)
|
||||
#define pages_huge JEMALLOC_N(pages_huge)
|
||||
#define pages_map JEMALLOC_N(pages_map)
|
||||
#define pages_nohuge JEMALLOC_N(pages_nohuge)
|
||||
#define pages_purge JEMALLOC_N(pages_purge)
|
||||
#define pow2_ceil JEMALLOC_N(pow2_ceil)
|
||||
#define pages_trim JEMALLOC_N(pages_trim)
|
||||
#define pages_unmap JEMALLOC_N(pages_unmap)
|
||||
#define pind2sz JEMALLOC_N(pind2sz)
|
||||
#define pind2sz_compute JEMALLOC_N(pind2sz_compute)
|
||||
#define pind2sz_lookup JEMALLOC_N(pind2sz_lookup)
|
||||
#define pind2sz_tab JEMALLOC_N(pind2sz_tab)
|
||||
#define pow2_ceil_u32 JEMALLOC_N(pow2_ceil_u32)
|
||||
#define pow2_ceil_u64 JEMALLOC_N(pow2_ceil_u64)
|
||||
#define pow2_ceil_zu JEMALLOC_N(pow2_ceil_zu)
|
||||
#define prng_lg_range_u32 JEMALLOC_N(prng_lg_range_u32)
|
||||
#define prng_lg_range_u64 JEMALLOC_N(prng_lg_range_u64)
|
||||
#define prng_lg_range_zu JEMALLOC_N(prng_lg_range_zu)
|
||||
#define prng_range_u32 JEMALLOC_N(prng_range_u32)
|
||||
#define prng_range_u64 JEMALLOC_N(prng_range_u64)
|
||||
#define prng_range_zu JEMALLOC_N(prng_range_zu)
|
||||
#define prng_state_next_u32 JEMALLOC_N(prng_state_next_u32)
|
||||
#define prng_state_next_u64 JEMALLOC_N(prng_state_next_u64)
|
||||
#define prng_state_next_zu JEMALLOC_N(prng_state_next_zu)
|
||||
#define prof_active JEMALLOC_N(prof_active)
|
||||
#define prof_active_get JEMALLOC_N(prof_active_get)
|
||||
#define prof_active_get_unlocked JEMALLOC_N(prof_active_get_unlocked)
|
||||
#define prof_active_set JEMALLOC_N(prof_active_set)
|
||||
#define prof_alloc_prep JEMALLOC_N(prof_alloc_prep)
|
||||
#define prof_alloc_rollback JEMALLOC_N(prof_alloc_rollback)
|
||||
#define prof_backtrace JEMALLOC_N(prof_backtrace)
|
||||
#define prof_boot0 JEMALLOC_N(prof_boot0)
|
||||
#define prof_boot1 JEMALLOC_N(prof_boot1)
|
||||
#define prof_boot2 JEMALLOC_N(prof_boot2)
|
||||
#define prof_bt_count JEMALLOC_N(prof_bt_count)
|
||||
#define prof_ctx_get JEMALLOC_N(prof_ctx_get)
|
||||
#define prof_ctx_set JEMALLOC_N(prof_ctx_set)
|
||||
#define prof_dump_header JEMALLOC_N(prof_dump_header)
|
||||
#define prof_dump_open JEMALLOC_N(prof_dump_open)
|
||||
#define prof_free JEMALLOC_N(prof_free)
|
||||
#define prof_free_sampled_object JEMALLOC_N(prof_free_sampled_object)
|
||||
#define prof_gdump JEMALLOC_N(prof_gdump)
|
||||
#define prof_gdump_get JEMALLOC_N(prof_gdump_get)
|
||||
#define prof_gdump_get_unlocked JEMALLOC_N(prof_gdump_get_unlocked)
|
||||
#define prof_gdump_set JEMALLOC_N(prof_gdump_set)
|
||||
#define prof_gdump_val JEMALLOC_N(prof_gdump_val)
|
||||
#define prof_idump JEMALLOC_N(prof_idump)
|
||||
#define prof_interval JEMALLOC_N(prof_interval)
|
||||
#define prof_lookup JEMALLOC_N(prof_lookup)
|
||||
#define prof_malloc JEMALLOC_N(prof_malloc)
|
||||
#define prof_malloc_sample_object JEMALLOC_N(prof_malloc_sample_object)
|
||||
#define prof_mdump JEMALLOC_N(prof_mdump)
|
||||
#define prof_postfork_child JEMALLOC_N(prof_postfork_child)
|
||||
#define prof_postfork_parent JEMALLOC_N(prof_postfork_parent)
|
||||
#define prof_prefork JEMALLOC_N(prof_prefork)
|
||||
#define prof_promote JEMALLOC_N(prof_promote)
|
||||
#define prof_prefork0 JEMALLOC_N(prof_prefork0)
|
||||
#define prof_prefork1 JEMALLOC_N(prof_prefork1)
|
||||
#define prof_realloc JEMALLOC_N(prof_realloc)
|
||||
#define prof_reset JEMALLOC_N(prof_reset)
|
||||
#define prof_sample_accum_update JEMALLOC_N(prof_sample_accum_update)
|
||||
#define prof_sample_threshold_update JEMALLOC_N(prof_sample_threshold_update)
|
||||
#define prof_tdata_booted JEMALLOC_N(prof_tdata_booted)
|
||||
#define prof_tctx_get JEMALLOC_N(prof_tctx_get)
|
||||
#define prof_tctx_reset JEMALLOC_N(prof_tctx_reset)
|
||||
#define prof_tctx_set JEMALLOC_N(prof_tctx_set)
|
||||
#define prof_tdata_cleanup JEMALLOC_N(prof_tdata_cleanup)
|
||||
#define prof_tdata_count JEMALLOC_N(prof_tdata_count)
|
||||
#define prof_tdata_get JEMALLOC_N(prof_tdata_get)
|
||||
#define prof_tdata_init JEMALLOC_N(prof_tdata_init)
|
||||
#define prof_tdata_initialized JEMALLOC_N(prof_tdata_initialized)
|
||||
#define prof_tdata_tls JEMALLOC_N(prof_tdata_tls)
|
||||
#define prof_tdata_tsd JEMALLOC_N(prof_tdata_tsd)
|
||||
#define prof_tdata_tsd_boot JEMALLOC_N(prof_tdata_tsd_boot)
|
||||
#define prof_tdata_tsd_cleanup_wrapper JEMALLOC_N(prof_tdata_tsd_cleanup_wrapper)
|
||||
#define prof_tdata_tsd_get JEMALLOC_N(prof_tdata_tsd_get)
|
||||
#define prof_tdata_tsd_get_wrapper JEMALLOC_N(prof_tdata_tsd_get_wrapper)
|
||||
#define prof_tdata_tsd_init_head JEMALLOC_N(prof_tdata_tsd_init_head)
|
||||
#define prof_tdata_tsd_set JEMALLOC_N(prof_tdata_tsd_set)
|
||||
#define prof_tdata_reinit JEMALLOC_N(prof_tdata_reinit)
|
||||
#define prof_thread_active_get JEMALLOC_N(prof_thread_active_get)
|
||||
#define prof_thread_active_init_get JEMALLOC_N(prof_thread_active_init_get)
|
||||
#define prof_thread_active_init_set JEMALLOC_N(prof_thread_active_init_set)
|
||||
#define prof_thread_active_set JEMALLOC_N(prof_thread_active_set)
|
||||
#define prof_thread_name_get JEMALLOC_N(prof_thread_name_get)
|
||||
#define prof_thread_name_set JEMALLOC_N(prof_thread_name_set)
|
||||
#define psz2ind JEMALLOC_N(psz2ind)
|
||||
#define psz2u JEMALLOC_N(psz2u)
|
||||
#define purge_mode_names JEMALLOC_N(purge_mode_names)
|
||||
#define quarantine JEMALLOC_N(quarantine)
|
||||
#define quarantine_alloc_hook JEMALLOC_N(quarantine_alloc_hook)
|
||||
#define quarantine_boot JEMALLOC_N(quarantine_boot)
|
||||
#define quarantine_booted JEMALLOC_N(quarantine_booted)
|
||||
#define quarantine_alloc_hook_work JEMALLOC_N(quarantine_alloc_hook_work)
|
||||
#define quarantine_cleanup JEMALLOC_N(quarantine_cleanup)
|
||||
#define quarantine_init JEMALLOC_N(quarantine_init)
|
||||
#define quarantine_tls JEMALLOC_N(quarantine_tls)
|
||||
#define quarantine_tsd JEMALLOC_N(quarantine_tsd)
|
||||
#define quarantine_tsd_boot JEMALLOC_N(quarantine_tsd_boot)
|
||||
#define quarantine_tsd_cleanup_wrapper JEMALLOC_N(quarantine_tsd_cleanup_wrapper)
|
||||
#define quarantine_tsd_get JEMALLOC_N(quarantine_tsd_get)
|
||||
#define quarantine_tsd_get_wrapper JEMALLOC_N(quarantine_tsd_get_wrapper)
|
||||
#define quarantine_tsd_init_head JEMALLOC_N(quarantine_tsd_init_head)
|
||||
#define quarantine_tsd_set JEMALLOC_N(quarantine_tsd_set)
|
||||
#define register_zone JEMALLOC_N(register_zone)
|
||||
#define rtree_child_read JEMALLOC_N(rtree_child_read)
|
||||
#define rtree_child_read_hard JEMALLOC_N(rtree_child_read_hard)
|
||||
#define rtree_child_tryread JEMALLOC_N(rtree_child_tryread)
|
||||
#define rtree_delete JEMALLOC_N(rtree_delete)
|
||||
#define rtree_get JEMALLOC_N(rtree_get)
|
||||
#define rtree_get_locked JEMALLOC_N(rtree_get_locked)
|
||||
#define rtree_new JEMALLOC_N(rtree_new)
|
||||
#define rtree_postfork_child JEMALLOC_N(rtree_postfork_child)
|
||||
#define rtree_postfork_parent JEMALLOC_N(rtree_postfork_parent)
|
||||
#define rtree_prefork JEMALLOC_N(rtree_prefork)
|
||||
#define rtree_node_valid JEMALLOC_N(rtree_node_valid)
|
||||
#define rtree_set JEMALLOC_N(rtree_set)
|
||||
#define rtree_start_level JEMALLOC_N(rtree_start_level)
|
||||
#define rtree_subkey JEMALLOC_N(rtree_subkey)
|
||||
#define rtree_subtree_read JEMALLOC_N(rtree_subtree_read)
|
||||
#define rtree_subtree_read_hard JEMALLOC_N(rtree_subtree_read_hard)
|
||||
#define rtree_subtree_tryread JEMALLOC_N(rtree_subtree_tryread)
|
||||
#define rtree_val_read JEMALLOC_N(rtree_val_read)
|
||||
#define rtree_val_write JEMALLOC_N(rtree_val_write)
|
||||
#define run_quantize_ceil JEMALLOC_N(run_quantize_ceil)
|
||||
#define run_quantize_floor JEMALLOC_N(run_quantize_floor)
|
||||
#define s2u JEMALLOC_N(s2u)
|
||||
#define s2u_compute JEMALLOC_N(s2u_compute)
|
||||
#define s2u_lookup JEMALLOC_N(s2u_lookup)
|
||||
#define sa2u JEMALLOC_N(sa2u)
|
||||
#define set_errno JEMALLOC_N(set_errno)
|
||||
#define small_size2bin JEMALLOC_N(small_size2bin)
|
||||
#define size2index JEMALLOC_N(size2index)
|
||||
#define size2index_compute JEMALLOC_N(size2index_compute)
|
||||
#define size2index_lookup JEMALLOC_N(size2index_lookup)
|
||||
#define size2index_tab JEMALLOC_N(size2index_tab)
|
||||
#define spin_adaptive JEMALLOC_N(spin_adaptive)
|
||||
#define spin_init JEMALLOC_N(spin_init)
|
||||
#define stats_cactive JEMALLOC_N(stats_cactive)
|
||||
#define stats_cactive_add JEMALLOC_N(stats_cactive_add)
|
||||
#define stats_cactive_get JEMALLOC_N(stats_cactive_get)
|
||||
#define stats_cactive_sub JEMALLOC_N(stats_cactive_sub)
|
||||
#define stats_chunks JEMALLOC_N(stats_chunks)
|
||||
#define stats_print JEMALLOC_N(stats_print)
|
||||
#define tcache_alloc_easy JEMALLOC_N(tcache_alloc_easy)
|
||||
#define tcache_alloc_large JEMALLOC_N(tcache_alloc_large)
|
||||
#define tcache_alloc_small JEMALLOC_N(tcache_alloc_small)
|
||||
#define tcache_alloc_small_hard JEMALLOC_N(tcache_alloc_small_hard)
|
||||
#define tcache_arena_associate JEMALLOC_N(tcache_arena_associate)
|
||||
#define tcache_arena_dissociate JEMALLOC_N(tcache_arena_dissociate)
|
||||
#define tcache_arena_reassociate JEMALLOC_N(tcache_arena_reassociate)
|
||||
#define tcache_bin_flush_large JEMALLOC_N(tcache_bin_flush_large)
|
||||
#define tcache_bin_flush_small JEMALLOC_N(tcache_bin_flush_small)
|
||||
#define tcache_bin_info JEMALLOC_N(tcache_bin_info)
|
||||
#define tcache_boot0 JEMALLOC_N(tcache_boot0)
|
||||
#define tcache_boot1 JEMALLOC_N(tcache_boot1)
|
||||
#define tcache_booted JEMALLOC_N(tcache_booted)
|
||||
#define tcache_boot JEMALLOC_N(tcache_boot)
|
||||
#define tcache_cleanup JEMALLOC_N(tcache_cleanup)
|
||||
#define tcache_create JEMALLOC_N(tcache_create)
|
||||
#define tcache_dalloc_large JEMALLOC_N(tcache_dalloc_large)
|
||||
#define tcache_dalloc_small JEMALLOC_N(tcache_dalloc_small)
|
||||
#define tcache_destroy JEMALLOC_N(tcache_destroy)
|
||||
#define tcache_enabled_booted JEMALLOC_N(tcache_enabled_booted)
|
||||
#define tcache_enabled_cleanup JEMALLOC_N(tcache_enabled_cleanup)
|
||||
#define tcache_enabled_get JEMALLOC_N(tcache_enabled_get)
|
||||
#define tcache_enabled_initialized JEMALLOC_N(tcache_enabled_initialized)
|
||||
#define tcache_enabled_set JEMALLOC_N(tcache_enabled_set)
|
||||
#define tcache_enabled_tls JEMALLOC_N(tcache_enabled_tls)
|
||||
#define tcache_enabled_tsd JEMALLOC_N(tcache_enabled_tsd)
|
||||
#define tcache_enabled_tsd_boot JEMALLOC_N(tcache_enabled_tsd_boot)
|
||||
#define tcache_enabled_tsd_cleanup_wrapper JEMALLOC_N(tcache_enabled_tsd_cleanup_wrapper)
|
||||
#define tcache_enabled_tsd_get JEMALLOC_N(tcache_enabled_tsd_get)
|
||||
#define tcache_enabled_tsd_get_wrapper JEMALLOC_N(tcache_enabled_tsd_get_wrapper)
|
||||
#define tcache_enabled_tsd_init_head JEMALLOC_N(tcache_enabled_tsd_init_head)
|
||||
#define tcache_enabled_tsd_set JEMALLOC_N(tcache_enabled_tsd_set)
|
||||
#define tcache_event JEMALLOC_N(tcache_event)
|
||||
#define tcache_event_hard JEMALLOC_N(tcache_event_hard)
|
||||
#define tcache_flush JEMALLOC_N(tcache_flush)
|
||||
#define tcache_get JEMALLOC_N(tcache_get)
|
||||
#define tcache_initialized JEMALLOC_N(tcache_initialized)
|
||||
#define tcache_get_hard JEMALLOC_N(tcache_get_hard)
|
||||
#define tcache_maxclass JEMALLOC_N(tcache_maxclass)
|
||||
#define tcache_postfork_child JEMALLOC_N(tcache_postfork_child)
|
||||
#define tcache_postfork_parent JEMALLOC_N(tcache_postfork_parent)
|
||||
#define tcache_prefork JEMALLOC_N(tcache_prefork)
|
||||
#define tcache_salloc JEMALLOC_N(tcache_salloc)
|
||||
#define tcache_stats_merge JEMALLOC_N(tcache_stats_merge)
|
||||
#define tcache_thread_cleanup JEMALLOC_N(tcache_thread_cleanup)
|
||||
#define tcache_tls JEMALLOC_N(tcache_tls)
|
||||
#define tcache_tsd JEMALLOC_N(tcache_tsd)
|
||||
#define tcache_tsd_boot JEMALLOC_N(tcache_tsd_boot)
|
||||
#define tcache_tsd_cleanup_wrapper JEMALLOC_N(tcache_tsd_cleanup_wrapper)
|
||||
#define tcache_tsd_get JEMALLOC_N(tcache_tsd_get)
|
||||
#define tcache_tsd_get_wrapper JEMALLOC_N(tcache_tsd_get_wrapper)
|
||||
#define tcache_tsd_init_head JEMALLOC_N(tcache_tsd_init_head)
|
||||
#define tcache_tsd_set JEMALLOC_N(tcache_tsd_set)
|
||||
#define thread_allocated_booted JEMALLOC_N(thread_allocated_booted)
|
||||
#define thread_allocated_initialized JEMALLOC_N(thread_allocated_initialized)
|
||||
#define thread_allocated_tls JEMALLOC_N(thread_allocated_tls)
|
||||
#define thread_allocated_tsd JEMALLOC_N(thread_allocated_tsd)
|
||||
#define thread_allocated_tsd_boot JEMALLOC_N(thread_allocated_tsd_boot)
|
||||
#define thread_allocated_tsd_cleanup_wrapper JEMALLOC_N(thread_allocated_tsd_cleanup_wrapper)
|
||||
#define thread_allocated_tsd_get JEMALLOC_N(thread_allocated_tsd_get)
|
||||
#define thread_allocated_tsd_get_wrapper JEMALLOC_N(thread_allocated_tsd_get_wrapper)
|
||||
#define thread_allocated_tsd_init_head JEMALLOC_N(thread_allocated_tsd_init_head)
|
||||
#define thread_allocated_tsd_set JEMALLOC_N(thread_allocated_tsd_set)
|
||||
#define tcaches JEMALLOC_N(tcaches)
|
||||
#define tcaches_create JEMALLOC_N(tcaches_create)
|
||||
#define tcaches_destroy JEMALLOC_N(tcaches_destroy)
|
||||
#define tcaches_flush JEMALLOC_N(tcaches_flush)
|
||||
#define tcaches_get JEMALLOC_N(tcaches_get)
|
||||
#define thread_allocated_cleanup JEMALLOC_N(thread_allocated_cleanup)
|
||||
#define thread_deallocated_cleanup JEMALLOC_N(thread_deallocated_cleanup)
|
||||
#define ticker_copy JEMALLOC_N(ticker_copy)
|
||||
#define ticker_init JEMALLOC_N(ticker_init)
|
||||
#define ticker_read JEMALLOC_N(ticker_read)
|
||||
#define ticker_tick JEMALLOC_N(ticker_tick)
|
||||
#define ticker_ticks JEMALLOC_N(ticker_ticks)
|
||||
#define tsd_arena_get JEMALLOC_N(tsd_arena_get)
|
||||
#define tsd_arena_set JEMALLOC_N(tsd_arena_set)
|
||||
#define tsd_arenap_get JEMALLOC_N(tsd_arenap_get)
|
||||
#define tsd_arenas_tdata_bypass_get JEMALLOC_N(tsd_arenas_tdata_bypass_get)
|
||||
#define tsd_arenas_tdata_bypass_set JEMALLOC_N(tsd_arenas_tdata_bypass_set)
|
||||
#define tsd_arenas_tdata_bypassp_get JEMALLOC_N(tsd_arenas_tdata_bypassp_get)
|
||||
#define tsd_arenas_tdata_get JEMALLOC_N(tsd_arenas_tdata_get)
|
||||
#define tsd_arenas_tdata_set JEMALLOC_N(tsd_arenas_tdata_set)
|
||||
#define tsd_arenas_tdatap_get JEMALLOC_N(tsd_arenas_tdatap_get)
|
||||
#define tsd_boot JEMALLOC_N(tsd_boot)
|
||||
#define tsd_boot0 JEMALLOC_N(tsd_boot0)
|
||||
#define tsd_boot1 JEMALLOC_N(tsd_boot1)
|
||||
#define tsd_booted JEMALLOC_N(tsd_booted)
|
||||
#define tsd_booted_get JEMALLOC_N(tsd_booted_get)
|
||||
#define tsd_cleanup JEMALLOC_N(tsd_cleanup)
|
||||
#define tsd_cleanup_wrapper JEMALLOC_N(tsd_cleanup_wrapper)
|
||||
#define tsd_fetch JEMALLOC_N(tsd_fetch)
|
||||
#define tsd_fetch_impl JEMALLOC_N(tsd_fetch_impl)
|
||||
#define tsd_get JEMALLOC_N(tsd_get)
|
||||
#define tsd_get_allocates JEMALLOC_N(tsd_get_allocates)
|
||||
#define tsd_iarena_get JEMALLOC_N(tsd_iarena_get)
|
||||
#define tsd_iarena_set JEMALLOC_N(tsd_iarena_set)
|
||||
#define tsd_iarenap_get JEMALLOC_N(tsd_iarenap_get)
|
||||
#define tsd_initialized JEMALLOC_N(tsd_initialized)
|
||||
#define tsd_init_check_recursion JEMALLOC_N(tsd_init_check_recursion)
|
||||
#define tsd_init_finish JEMALLOC_N(tsd_init_finish)
|
||||
#define tsd_init_head JEMALLOC_N(tsd_init_head)
|
||||
#define tsd_narenas_tdata_get JEMALLOC_N(tsd_narenas_tdata_get)
|
||||
#define tsd_narenas_tdata_set JEMALLOC_N(tsd_narenas_tdata_set)
|
||||
#define tsd_narenas_tdatap_get JEMALLOC_N(tsd_narenas_tdatap_get)
|
||||
#define tsd_wrapper_get JEMALLOC_N(tsd_wrapper_get)
|
||||
#define tsd_wrapper_set JEMALLOC_N(tsd_wrapper_set)
|
||||
#define tsd_nominal JEMALLOC_N(tsd_nominal)
|
||||
#define tsd_prof_tdata_get JEMALLOC_N(tsd_prof_tdata_get)
|
||||
#define tsd_prof_tdata_set JEMALLOC_N(tsd_prof_tdata_set)
|
||||
#define tsd_prof_tdatap_get JEMALLOC_N(tsd_prof_tdatap_get)
|
||||
#define tsd_quarantine_get JEMALLOC_N(tsd_quarantine_get)
|
||||
#define tsd_quarantine_set JEMALLOC_N(tsd_quarantine_set)
|
||||
#define tsd_quarantinep_get JEMALLOC_N(tsd_quarantinep_get)
|
||||
#define tsd_set JEMALLOC_N(tsd_set)
|
||||
#define tsd_tcache_enabled_get JEMALLOC_N(tsd_tcache_enabled_get)
|
||||
#define tsd_tcache_enabled_set JEMALLOC_N(tsd_tcache_enabled_set)
|
||||
#define tsd_tcache_enabledp_get JEMALLOC_N(tsd_tcache_enabledp_get)
|
||||
#define tsd_tcache_get JEMALLOC_N(tsd_tcache_get)
|
||||
#define tsd_tcache_set JEMALLOC_N(tsd_tcache_set)
|
||||
#define tsd_tcachep_get JEMALLOC_N(tsd_tcachep_get)
|
||||
#define tsd_thread_allocated_get JEMALLOC_N(tsd_thread_allocated_get)
|
||||
#define tsd_thread_allocated_set JEMALLOC_N(tsd_thread_allocated_set)
|
||||
#define tsd_thread_allocatedp_get JEMALLOC_N(tsd_thread_allocatedp_get)
|
||||
#define tsd_thread_deallocated_get JEMALLOC_N(tsd_thread_deallocated_get)
|
||||
#define tsd_thread_deallocated_set JEMALLOC_N(tsd_thread_deallocated_set)
|
||||
#define tsd_thread_deallocatedp_get JEMALLOC_N(tsd_thread_deallocatedp_get)
|
||||
#define tsd_tls JEMALLOC_N(tsd_tls)
|
||||
#define tsd_tsd JEMALLOC_N(tsd_tsd)
|
||||
#define tsd_tsdn JEMALLOC_N(tsd_tsdn)
|
||||
#define tsd_witness_fork_get JEMALLOC_N(tsd_witness_fork_get)
|
||||
#define tsd_witness_fork_set JEMALLOC_N(tsd_witness_fork_set)
|
||||
#define tsd_witness_forkp_get JEMALLOC_N(tsd_witness_forkp_get)
|
||||
#define tsd_witnesses_get JEMALLOC_N(tsd_witnesses_get)
|
||||
#define tsd_witnesses_set JEMALLOC_N(tsd_witnesses_set)
|
||||
#define tsd_witnessesp_get JEMALLOC_N(tsd_witnessesp_get)
|
||||
#define tsdn_fetch JEMALLOC_N(tsdn_fetch)
|
||||
#define tsdn_null JEMALLOC_N(tsdn_null)
|
||||
#define tsdn_tsd JEMALLOC_N(tsdn_tsd)
|
||||
#define u2rz JEMALLOC_N(u2rz)
|
||||
#define valgrind_freelike_block JEMALLOC_N(valgrind_freelike_block)
|
||||
#define valgrind_make_mem_defined JEMALLOC_N(valgrind_make_mem_defined)
|
||||
#define valgrind_make_mem_noaccess JEMALLOC_N(valgrind_make_mem_noaccess)
|
||||
#define valgrind_make_mem_undefined JEMALLOC_N(valgrind_make_mem_undefined)
|
||||
#define witness_assert_depth JEMALLOC_N(witness_assert_depth)
|
||||
#define witness_assert_depth_to_rank JEMALLOC_N(witness_assert_depth_to_rank)
|
||||
#define witness_assert_lockless JEMALLOC_N(witness_assert_lockless)
|
||||
#define witness_assert_not_owner JEMALLOC_N(witness_assert_not_owner)
|
||||
#define witness_assert_owner JEMALLOC_N(witness_assert_owner)
|
||||
#define witness_depth_error JEMALLOC_N(witness_depth_error)
|
||||
#define witness_fork_cleanup JEMALLOC_N(witness_fork_cleanup)
|
||||
#define witness_init JEMALLOC_N(witness_init)
|
||||
#define witness_lock JEMALLOC_N(witness_lock)
|
||||
#define witness_lock_error JEMALLOC_N(witness_lock_error)
|
||||
#define witness_not_owner_error JEMALLOC_N(witness_not_owner_error)
|
||||
#define witness_owner JEMALLOC_N(witness_owner)
|
||||
#define witness_owner_error JEMALLOC_N(witness_owner_error)
|
||||
#define witness_postfork_child JEMALLOC_N(witness_postfork_child)
|
||||
#define witness_postfork_parent JEMALLOC_N(witness_postfork_parent)
|
||||
#define witness_prefork JEMALLOC_N(witness_prefork)
|
||||
#define witness_unlock JEMALLOC_N(witness_unlock)
|
||||
#define witnesses_cleanup JEMALLOC_N(witnesses_cleanup)
|
||||
#define zone_register JEMALLOC_N(zone_register)
|
||||
|
||||
197
deps/jemalloc/include/jemalloc/internal/prng.h
vendored
197
deps/jemalloc/include/jemalloc/internal/prng.h
vendored
@@ -1,5 +1,8 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
#ifndef JEMALLOC_INTERNAL_PRNG_H
|
||||
#define JEMALLOC_INTERNAL_PRNG_H
|
||||
|
||||
#include "jemalloc/internal/atomic.h"
|
||||
#include "jemalloc/internal/bit_util.h"
|
||||
|
||||
/*
|
||||
* Simple linear congruential pseudo-random number generator:
|
||||
@@ -15,46 +18,168 @@
|
||||
* See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints.
|
||||
*
|
||||
* This choice of m has the disadvantage that the quality of the bits is
|
||||
* proportional to bit position. For example. the lowest bit has a cycle of 2,
|
||||
* proportional to bit position. For example, the lowest bit has a cycle of 2,
|
||||
* the next has a cycle of 4, etc. For this reason, we prefer to use the upper
|
||||
* bits.
|
||||
*
|
||||
* Macro parameters:
|
||||
* uint32_t r : Result.
|
||||
* unsigned lg_range : (0..32], number of least significant bits to return.
|
||||
* uint32_t state : Seed value.
|
||||
* const uint32_t a, c : See above discussion.
|
||||
*/
|
||||
#define prng32(r, lg_range, state, a, c) do { \
|
||||
assert(lg_range > 0); \
|
||||
assert(lg_range <= 32); \
|
||||
\
|
||||
r = (state * (a)) + (c); \
|
||||
state = r; \
|
||||
r >>= (32 - lg_range); \
|
||||
} while (false)
|
||||
|
||||
/* Same as prng32(), but 64 bits of pseudo-randomness, using uint64_t. */
|
||||
#define prng64(r, lg_range, state, a, c) do { \
|
||||
assert(lg_range > 0); \
|
||||
assert(lg_range <= 64); \
|
||||
\
|
||||
r = (state * (a)) + (c); \
|
||||
state = r; \
|
||||
r >>= (64 - lg_range); \
|
||||
} while (false)
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/* INTERNAL DEFINITIONS -- IGNORE */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
#define PRNG_A_32 UINT32_C(1103515241)
|
||||
#define PRNG_C_32 UINT32_C(12347)
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
#define PRNG_A_64 UINT64_C(6364136223846793005)
|
||||
#define PRNG_C_64 UINT64_C(1442695040888963407)
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE uint32_t
|
||||
prng_state_next_u32(uint32_t state) {
|
||||
return (state * PRNG_A_32) + PRNG_C_32;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE uint64_t
|
||||
prng_state_next_u64(uint64_t state) {
|
||||
return (state * PRNG_A_64) + PRNG_C_64;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
prng_state_next_zu(size_t state) {
|
||||
#if LG_SIZEOF_PTR == 2
|
||||
return (state * PRNG_A_32) + PRNG_C_32;
|
||||
#elif LG_SIZEOF_PTR == 3
|
||||
return (state * PRNG_A_64) + PRNG_C_64;
|
||||
#else
|
||||
#error Unsupported pointer size
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
/* BEGIN PUBLIC API */
|
||||
/******************************************************************************/
|
||||
|
||||
/*
|
||||
* The prng_lg_range functions give a uniform int in the half-open range [0,
|
||||
* 2**lg_range). If atomic is true, they do so safely from multiple threads.
|
||||
* Multithreaded 64-bit prngs aren't supported.
|
||||
*/
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE uint32_t
|
||||
prng_lg_range_u32(atomic_u32_t *state, unsigned lg_range, bool atomic) {
|
||||
uint32_t ret, state0, state1;
|
||||
|
||||
assert(lg_range > 0);
|
||||
assert(lg_range <= 32);
|
||||
|
||||
state0 = atomic_load_u32(state, ATOMIC_RELAXED);
|
||||
|
||||
if (atomic) {
|
||||
do {
|
||||
state1 = prng_state_next_u32(state0);
|
||||
} while (!atomic_compare_exchange_weak_u32(state, &state0,
|
||||
state1, ATOMIC_RELAXED, ATOMIC_RELAXED));
|
||||
} else {
|
||||
state1 = prng_state_next_u32(state0);
|
||||
atomic_store_u32(state, state1, ATOMIC_RELAXED);
|
||||
}
|
||||
ret = state1 >> (32 - lg_range);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE uint64_t
|
||||
prng_lg_range_u64(uint64_t *state, unsigned lg_range) {
|
||||
uint64_t ret, state1;
|
||||
|
||||
assert(lg_range > 0);
|
||||
assert(lg_range <= 64);
|
||||
|
||||
state1 = prng_state_next_u64(*state);
|
||||
*state = state1;
|
||||
ret = state1 >> (64 - lg_range);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
prng_lg_range_zu(atomic_zu_t *state, unsigned lg_range, bool atomic) {
|
||||
size_t ret, state0, state1;
|
||||
|
||||
assert(lg_range > 0);
|
||||
assert(lg_range <= ZU(1) << (3 + LG_SIZEOF_PTR));
|
||||
|
||||
state0 = atomic_load_zu(state, ATOMIC_RELAXED);
|
||||
|
||||
if (atomic) {
|
||||
do {
|
||||
state1 = prng_state_next_zu(state0);
|
||||
} while (atomic_compare_exchange_weak_zu(state, &state0,
|
||||
state1, ATOMIC_RELAXED, ATOMIC_RELAXED));
|
||||
} else {
|
||||
state1 = prng_state_next_zu(state0);
|
||||
atomic_store_zu(state, state1, ATOMIC_RELAXED);
|
||||
}
|
||||
ret = state1 >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* The prng_range functions behave like the prng_lg_range, but return a result
|
||||
* in [0, range) instead of [0, 2**lg_range).
|
||||
*/
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE uint32_t
|
||||
prng_range_u32(atomic_u32_t *state, uint32_t range, bool atomic) {
|
||||
uint32_t ret;
|
||||
unsigned lg_range;
|
||||
|
||||
assert(range > 1);
|
||||
|
||||
/* Compute the ceiling of lg(range). */
|
||||
lg_range = ffs_u32(pow2_ceil_u32(range)) - 1;
|
||||
|
||||
/* Generate a result in [0..range) via repeated trial. */
|
||||
do {
|
||||
ret = prng_lg_range_u32(state, lg_range, atomic);
|
||||
} while (ret >= range);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE uint64_t
|
||||
prng_range_u64(uint64_t *state, uint64_t range) {
|
||||
uint64_t ret;
|
||||
unsigned lg_range;
|
||||
|
||||
assert(range > 1);
|
||||
|
||||
/* Compute the ceiling of lg(range). */
|
||||
lg_range = ffs_u64(pow2_ceil_u64(range)) - 1;
|
||||
|
||||
/* Generate a result in [0..range) via repeated trial. */
|
||||
do {
|
||||
ret = prng_lg_range_u64(state, lg_range);
|
||||
} while (ret >= range);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
prng_range_zu(atomic_zu_t *state, size_t range, bool atomic) {
|
||||
size_t ret;
|
||||
unsigned lg_range;
|
||||
|
||||
assert(range > 1);
|
||||
|
||||
/* Compute the ceiling of lg(range). */
|
||||
lg_range = ffs_u64(pow2_ceil_u64(range)) - 1;
|
||||
|
||||
/* Generate a result in [0..range) via repeated trial. */
|
||||
do {
|
||||
ret = prng_lg_range_zu(state, lg_range, atomic);
|
||||
} while (ret >= range);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_PRNG_H */
|
||||
|
||||
47
deps/jemalloc/include/jemalloc/internal/ql.h
vendored
47
deps/jemalloc/include/jemalloc/internal/ql.h
vendored
@@ -1,61 +1,64 @@
|
||||
/*
|
||||
* List definitions.
|
||||
*/
|
||||
#define ql_head(a_type) \
|
||||
#ifndef JEMALLOC_INTERNAL_QL_H
|
||||
#define JEMALLOC_INTERNAL_QL_H
|
||||
|
||||
#include "jemalloc/internal/qr.h"
|
||||
|
||||
/* List definitions. */
|
||||
#define ql_head(a_type) \
|
||||
struct { \
|
||||
a_type *qlh_first; \
|
||||
}
|
||||
|
||||
#define ql_head_initializer(a_head) {NULL}
|
||||
#define ql_head_initializer(a_head) {NULL}
|
||||
|
||||
#define ql_elm(a_type) qr(a_type)
|
||||
#define ql_elm(a_type) qr(a_type)
|
||||
|
||||
/* List functions. */
|
||||
#define ql_new(a_head) do { \
|
||||
#define ql_new(a_head) do { \
|
||||
(a_head)->qlh_first = NULL; \
|
||||
} while (0)
|
||||
|
||||
#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
|
||||
#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
|
||||
|
||||
#define ql_first(a_head) ((a_head)->qlh_first)
|
||||
#define ql_first(a_head) ((a_head)->qlh_first)
|
||||
|
||||
#define ql_last(a_head, a_field) \
|
||||
#define ql_last(a_head, a_field) \
|
||||
((ql_first(a_head) != NULL) \
|
||||
? qr_prev(ql_first(a_head), a_field) : NULL)
|
||||
|
||||
#define ql_next(a_head, a_elm, a_field) \
|
||||
#define ql_next(a_head, a_elm, a_field) \
|
||||
((ql_last(a_head, a_field) != (a_elm)) \
|
||||
? qr_next((a_elm), a_field) : NULL)
|
||||
|
||||
#define ql_prev(a_head, a_elm, a_field) \
|
||||
#define ql_prev(a_head, a_elm, a_field) \
|
||||
((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \
|
||||
: NULL)
|
||||
|
||||
#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
|
||||
#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
|
||||
qr_before_insert((a_qlelm), (a_elm), a_field); \
|
||||
if (ql_first(a_head) == (a_qlelm)) { \
|
||||
ql_first(a_head) = (a_elm); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define ql_after_insert(a_qlelm, a_elm, a_field) \
|
||||
#define ql_after_insert(a_qlelm, a_elm, a_field) \
|
||||
qr_after_insert((a_qlelm), (a_elm), a_field)
|
||||
|
||||
#define ql_head_insert(a_head, a_elm, a_field) do { \
|
||||
#define ql_head_insert(a_head, a_elm, a_field) do { \
|
||||
if (ql_first(a_head) != NULL) { \
|
||||
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
|
||||
} \
|
||||
ql_first(a_head) = (a_elm); \
|
||||
} while (0)
|
||||
|
||||
#define ql_tail_insert(a_head, a_elm, a_field) do { \
|
||||
#define ql_tail_insert(a_head, a_elm, a_field) do { \
|
||||
if (ql_first(a_head) != NULL) { \
|
||||
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
|
||||
} \
|
||||
ql_first(a_head) = qr_next((a_elm), a_field); \
|
||||
} while (0)
|
||||
|
||||
#define ql_remove(a_head, a_elm, a_field) do { \
|
||||
#define ql_remove(a_head, a_elm, a_field) do { \
|
||||
if (ql_first(a_head) == (a_elm)) { \
|
||||
ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
|
||||
} \
|
||||
@@ -66,18 +69,20 @@ struct { \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define ql_head_remove(a_head, a_type, a_field) do { \
|
||||
#define ql_head_remove(a_head, a_type, a_field) do { \
|
||||
a_type *t = ql_first(a_head); \
|
||||
ql_remove((a_head), t, a_field); \
|
||||
} while (0)
|
||||
|
||||
#define ql_tail_remove(a_head, a_type, a_field) do { \
|
||||
#define ql_tail_remove(a_head, a_type, a_field) do { \
|
||||
a_type *t = ql_last(a_head, a_field); \
|
||||
ql_remove((a_head), t, a_field); \
|
||||
} while (0)
|
||||
|
||||
#define ql_foreach(a_var, a_head, a_field) \
|
||||
#define ql_foreach(a_var, a_head, a_field) \
|
||||
qr_foreach((a_var), ql_first(a_head), a_field)
|
||||
|
||||
#define ql_reverse_foreach(a_var, a_head, a_field) \
|
||||
#define ql_reverse_foreach(a_var, a_head, a_field) \
|
||||
qr_reverse_foreach((a_var), ql_first(a_head), a_field)
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_QL_H */
|
||||
|
||||
41
deps/jemalloc/include/jemalloc/internal/qr.h
vendored
41
deps/jemalloc/include/jemalloc/internal/qr.h
vendored
@@ -1,38 +1,39 @@
|
||||
#ifndef JEMALLOC_INTERNAL_QR_H
|
||||
#define JEMALLOC_INTERNAL_QR_H
|
||||
|
||||
/* Ring definitions. */
|
||||
#define qr(a_type) \
|
||||
#define qr(a_type) \
|
||||
struct { \
|
||||
a_type *qre_next; \
|
||||
a_type *qre_prev; \
|
||||
}
|
||||
|
||||
/* Ring functions. */
|
||||
#define qr_new(a_qr, a_field) do { \
|
||||
#define qr_new(a_qr, a_field) do { \
|
||||
(a_qr)->a_field.qre_next = (a_qr); \
|
||||
(a_qr)->a_field.qre_prev = (a_qr); \
|
||||
} while (0)
|
||||
|
||||
#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
|
||||
#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
|
||||
|
||||
#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
|
||||
#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
|
||||
|
||||
#define qr_before_insert(a_qrelm, a_qr, a_field) do { \
|
||||
#define qr_before_insert(a_qrelm, a_qr, a_field) do { \
|
||||
(a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \
|
||||
(a_qr)->a_field.qre_next = (a_qrelm); \
|
||||
(a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \
|
||||
(a_qrelm)->a_field.qre_prev = (a_qr); \
|
||||
} while (0)
|
||||
|
||||
#define qr_after_insert(a_qrelm, a_qr, a_field) \
|
||||
do \
|
||||
{ \
|
||||
#define qr_after_insert(a_qrelm, a_qr, a_field) do { \
|
||||
(a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \
|
||||
(a_qr)->a_field.qre_prev = (a_qrelm); \
|
||||
(a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \
|
||||
(a_qrelm)->a_field.qre_next = (a_qr); \
|
||||
} while (0)
|
||||
} while (0)
|
||||
|
||||
#define qr_meld(a_qr_a, a_qr_b, a_field) do { \
|
||||
void *t; \
|
||||
#define qr_meld(a_qr_a, a_qr_b, a_type, a_field) do { \
|
||||
a_type *t; \
|
||||
(a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
|
||||
(a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
|
||||
t = (a_qr_a)->a_field.qre_prev; \
|
||||
@@ -40,12 +41,14 @@ struct { \
|
||||
(a_qr_b)->a_field.qre_prev = t; \
|
||||
} while (0)
|
||||
|
||||
/* qr_meld() and qr_split() are functionally equivalent, so there's no need to
|
||||
* have two copies of the code. */
|
||||
#define qr_split(a_qr_a, a_qr_b, a_field) \
|
||||
qr_meld((a_qr_a), (a_qr_b), a_field)
|
||||
/*
|
||||
* qr_meld() and qr_split() are functionally equivalent, so there's no need to
|
||||
* have two copies of the code.
|
||||
*/
|
||||
#define qr_split(a_qr_a, a_qr_b, a_type, a_field) \
|
||||
qr_meld((a_qr_a), (a_qr_b), a_type, a_field)
|
||||
|
||||
#define qr_remove(a_qr, a_field) do { \
|
||||
#define qr_remove(a_qr, a_field) do { \
|
||||
(a_qr)->a_field.qre_prev->a_field.qre_next \
|
||||
= (a_qr)->a_field.qre_next; \
|
||||
(a_qr)->a_field.qre_next->a_field.qre_prev \
|
||||
@@ -54,14 +57,16 @@ struct { \
|
||||
(a_qr)->a_field.qre_prev = (a_qr); \
|
||||
} while (0)
|
||||
|
||||
#define qr_foreach(var, a_qr, a_field) \
|
||||
#define qr_foreach(var, a_qr, a_field) \
|
||||
for ((var) = (a_qr); \
|
||||
(var) != NULL; \
|
||||
(var) = (((var)->a_field.qre_next != (a_qr)) \
|
||||
? (var)->a_field.qre_next : NULL))
|
||||
|
||||
#define qr_reverse_foreach(var, a_qr, a_field) \
|
||||
#define qr_reverse_foreach(var, a_qr, a_field) \
|
||||
for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \
|
||||
(var) != NULL; \
|
||||
(var) = (((var) != (a_qr)) \
|
||||
? (var)->a_field.qre_prev : NULL))
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_QR_H */
|
||||
|
||||
351
deps/jemalloc/include/jemalloc/internal/rb.h
vendored
351
deps/jemalloc/include/jemalloc/internal/rb.h
vendored
@@ -20,17 +20,21 @@
|
||||
*/
|
||||
|
||||
#ifndef RB_H_
|
||||
#define RB_H_
|
||||
#define RB_H_
|
||||
|
||||
#ifndef __PGI
|
||||
#define RB_COMPACT
|
||||
#endif
|
||||
|
||||
#ifdef RB_COMPACT
|
||||
/* Node structure. */
|
||||
#define rb_node(a_type) \
|
||||
#define rb_node(a_type) \
|
||||
struct { \
|
||||
a_type *rbn_left; \
|
||||
a_type *rbn_right_red; \
|
||||
}
|
||||
#else
|
||||
#define rb_node(a_type) \
|
||||
#define rb_node(a_type) \
|
||||
struct { \
|
||||
a_type *rbn_left; \
|
||||
a_type *rbn_right; \
|
||||
@@ -39,111 +43,116 @@ struct { \
|
||||
#endif
|
||||
|
||||
/* Root structure. */
|
||||
#define rb_tree(a_type) \
|
||||
#define rb_tree(a_type) \
|
||||
struct { \
|
||||
a_type *rbt_root; \
|
||||
a_type rbt_nil; \
|
||||
}
|
||||
|
||||
/* Left accessors. */
|
||||
#define rbtn_left_get(a_type, a_field, a_node) \
|
||||
#define rbtn_left_get(a_type, a_field, a_node) \
|
||||
((a_node)->a_field.rbn_left)
|
||||
#define rbtn_left_set(a_type, a_field, a_node, a_left) do { \
|
||||
#define rbtn_left_set(a_type, a_field, a_node, a_left) do { \
|
||||
(a_node)->a_field.rbn_left = a_left; \
|
||||
} while (0)
|
||||
|
||||
#ifdef RB_COMPACT
|
||||
/* Right accessors. */
|
||||
#define rbtn_right_get(a_type, a_field, a_node) \
|
||||
#define rbtn_right_get(a_type, a_field, a_node) \
|
||||
((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red) \
|
||||
& ((ssize_t)-2)))
|
||||
#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \
|
||||
#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \
|
||||
(a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right) \
|
||||
| (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1))); \
|
||||
} while (0)
|
||||
|
||||
/* Color accessors. */
|
||||
#define rbtn_red_get(a_type, a_field, a_node) \
|
||||
#define rbtn_red_get(a_type, a_field, a_node) \
|
||||
((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red) \
|
||||
& ((size_t)1)))
|
||||
#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \
|
||||
#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \
|
||||
(a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t) \
|
||||
(a_node)->a_field.rbn_right_red) & ((ssize_t)-2)) \
|
||||
| ((ssize_t)a_red)); \
|
||||
} while (0)
|
||||
#define rbtn_red_set(a_type, a_field, a_node) do { \
|
||||
#define rbtn_red_set(a_type, a_field, a_node) do { \
|
||||
(a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) \
|
||||
(a_node)->a_field.rbn_right_red) | ((size_t)1)); \
|
||||
} while (0)
|
||||
#define rbtn_black_set(a_type, a_field, a_node) do { \
|
||||
#define rbtn_black_set(a_type, a_field, a_node) do { \
|
||||
(a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \
|
||||
(a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \
|
||||
} while (0)
|
||||
|
||||
/* Node initializer. */
|
||||
#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
|
||||
/* Bookkeeping bit cannot be used by node pointer. */ \
|
||||
assert(((uintptr_t)(a_node) & 0x1) == 0); \
|
||||
rbtn_left_set(a_type, a_field, (a_node), NULL); \
|
||||
rbtn_right_set(a_type, a_field, (a_node), NULL); \
|
||||
rbtn_red_set(a_type, a_field, (a_node)); \
|
||||
} while (0)
|
||||
#else
|
||||
/* Right accessors. */
|
||||
#define rbtn_right_get(a_type, a_field, a_node) \
|
||||
#define rbtn_right_get(a_type, a_field, a_node) \
|
||||
((a_node)->a_field.rbn_right)
|
||||
#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \
|
||||
#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \
|
||||
(a_node)->a_field.rbn_right = a_right; \
|
||||
} while (0)
|
||||
|
||||
/* Color accessors. */
|
||||
#define rbtn_red_get(a_type, a_field, a_node) \
|
||||
#define rbtn_red_get(a_type, a_field, a_node) \
|
||||
((a_node)->a_field.rbn_red)
|
||||
#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \
|
||||
#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \
|
||||
(a_node)->a_field.rbn_red = (a_red); \
|
||||
} while (0)
|
||||
#define rbtn_red_set(a_type, a_field, a_node) do { \
|
||||
#define rbtn_red_set(a_type, a_field, a_node) do { \
|
||||
(a_node)->a_field.rbn_red = true; \
|
||||
} while (0)
|
||||
#define rbtn_black_set(a_type, a_field, a_node) do { \
|
||||
#define rbtn_black_set(a_type, a_field, a_node) do { \
|
||||
(a_node)->a_field.rbn_red = false; \
|
||||
} while (0)
|
||||
|
||||
/* Node initializer. */
|
||||
#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
|
||||
rbtn_left_set(a_type, a_field, (a_node), NULL); \
|
||||
rbtn_right_set(a_type, a_field, (a_node), NULL); \
|
||||
rbtn_red_set(a_type, a_field, (a_node)); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
/* Node initializer. */
|
||||
#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
|
||||
rbtn_left_set(a_type, a_field, (a_node), &(a_rbt)->rbt_nil); \
|
||||
rbtn_right_set(a_type, a_field, (a_node), &(a_rbt)->rbt_nil); \
|
||||
rbtn_red_set(a_type, a_field, (a_node)); \
|
||||
} while (0)
|
||||
|
||||
/* Tree initializer. */
|
||||
#define rb_new(a_type, a_field, a_rbt) do { \
|
||||
(a_rbt)->rbt_root = &(a_rbt)->rbt_nil; \
|
||||
rbt_node_new(a_type, a_field, a_rbt, &(a_rbt)->rbt_nil); \
|
||||
rbtn_black_set(a_type, a_field, &(a_rbt)->rbt_nil); \
|
||||
#define rb_new(a_type, a_field, a_rbt) do { \
|
||||
(a_rbt)->rbt_root = NULL; \
|
||||
} while (0)
|
||||
|
||||
/* Internal utility macros. */
|
||||
#define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \
|
||||
#define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \
|
||||
(r_node) = (a_root); \
|
||||
if ((r_node) != &(a_rbt)->rbt_nil) { \
|
||||
if ((r_node) != NULL) { \
|
||||
for (; \
|
||||
rbtn_left_get(a_type, a_field, (r_node)) != &(a_rbt)->rbt_nil;\
|
||||
rbtn_left_get(a_type, a_field, (r_node)) != NULL; \
|
||||
(r_node) = rbtn_left_get(a_type, a_field, (r_node))) { \
|
||||
} \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \
|
||||
#define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \
|
||||
(r_node) = (a_root); \
|
||||
if ((r_node) != &(a_rbt)->rbt_nil) { \
|
||||
for (; rbtn_right_get(a_type, a_field, (r_node)) != \
|
||||
&(a_rbt)->rbt_nil; (r_node) = rbtn_right_get(a_type, a_field, \
|
||||
(r_node))) { \
|
||||
if ((r_node) != NULL) { \
|
||||
for (; rbtn_right_get(a_type, a_field, (r_node)) != NULL; \
|
||||
(r_node) = rbtn_right_get(a_type, a_field, (r_node))) { \
|
||||
} \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define rbtn_rotate_left(a_type, a_field, a_node, r_node) do { \
|
||||
#define rbtn_rotate_left(a_type, a_field, a_node, r_node) do { \
|
||||
(r_node) = rbtn_right_get(a_type, a_field, (a_node)); \
|
||||
rbtn_right_set(a_type, a_field, (a_node), \
|
||||
rbtn_left_get(a_type, a_field, (r_node))); \
|
||||
rbtn_left_set(a_type, a_field, (r_node), (a_node)); \
|
||||
} while (0)
|
||||
|
||||
#define rbtn_rotate_right(a_type, a_field, a_node, r_node) do { \
|
||||
#define rbtn_rotate_right(a_type, a_field, a_node, r_node) do { \
|
||||
(r_node) = rbtn_left_get(a_type, a_field, (a_node)); \
|
||||
rbtn_left_set(a_type, a_field, (a_node), \
|
||||
rbtn_right_get(a_type, a_field, (r_node))); \
|
||||
@@ -155,9 +164,11 @@ struct { \
|
||||
* functions generated by an equivalently parameterized call to rb_gen().
|
||||
*/
|
||||
|
||||
#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \
|
||||
#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \
|
||||
a_attr void \
|
||||
a_prefix##new(a_rbt_type *rbtree); \
|
||||
a_attr bool \
|
||||
a_prefix##empty(a_rbt_type *rbtree); \
|
||||
a_attr a_type * \
|
||||
a_prefix##first(a_rbt_type *rbtree); \
|
||||
a_attr a_type * \
|
||||
@@ -167,11 +178,11 @@ a_prefix##next(a_rbt_type *rbtree, a_type *node); \
|
||||
a_attr a_type * \
|
||||
a_prefix##prev(a_rbt_type *rbtree, a_type *node); \
|
||||
a_attr a_type * \
|
||||
a_prefix##search(a_rbt_type *rbtree, a_type *key); \
|
||||
a_prefix##search(a_rbt_type *rbtree, const a_type *key); \
|
||||
a_attr a_type * \
|
||||
a_prefix##nsearch(a_rbt_type *rbtree, a_type *key); \
|
||||
a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key); \
|
||||
a_attr a_type * \
|
||||
a_prefix##psearch(a_rbt_type *rbtree, a_type *key); \
|
||||
a_prefix##psearch(a_rbt_type *rbtree, const a_type *key); \
|
||||
a_attr void \
|
||||
a_prefix##insert(a_rbt_type *rbtree, a_type *node); \
|
||||
a_attr void \
|
||||
@@ -181,7 +192,10 @@ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \
|
||||
a_rbt_type *, a_type *, void *), void *arg); \
|
||||
a_attr a_type * \
|
||||
a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
|
||||
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg);
|
||||
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); \
|
||||
a_attr void \
|
||||
a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
|
||||
void *arg);
|
||||
|
||||
/*
|
||||
* The rb_gen() macro generates a type-specific red-black tree implementation,
|
||||
@@ -198,7 +212,7 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
|
||||
* int (a_cmp *)(a_type *a_node, a_type *a_other);
|
||||
* ^^^^^^
|
||||
* or a_key
|
||||
* Interpretation of comparision function return values:
|
||||
* Interpretation of comparison function return values:
|
||||
* -1 : a_node < a_other
|
||||
* 0 : a_node == a_other
|
||||
* 1 : a_node > a_other
|
||||
@@ -224,6 +238,13 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
|
||||
* Args:
|
||||
* tree: Pointer to an uninitialized red-black tree object.
|
||||
*
|
||||
* static bool
|
||||
* ex_empty(ex_t *tree);
|
||||
* Description: Determine whether tree is empty.
|
||||
* Args:
|
||||
* tree: Pointer to an initialized red-black tree object.
|
||||
* Ret: True if tree is empty, false otherwise.
|
||||
*
|
||||
* static ex_node_t *
|
||||
* ex_first(ex_t *tree);
|
||||
* static ex_node_t *
|
||||
@@ -245,7 +266,7 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
|
||||
* last/first.
|
||||
*
|
||||
* static ex_node_t *
|
||||
* ex_search(ex_t *tree, ex_node_t *key);
|
||||
* ex_search(ex_t *tree, const ex_node_t *key);
|
||||
* Description: Search for node that matches key.
|
||||
* Args:
|
||||
* tree: Pointer to an initialized red-black tree object.
|
||||
@@ -253,9 +274,9 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
|
||||
* Ret: Node in tree that matches key, or NULL if no match.
|
||||
*
|
||||
* static ex_node_t *
|
||||
* ex_nsearch(ex_t *tree, ex_node_t *key);
|
||||
* ex_nsearch(ex_t *tree, const ex_node_t *key);
|
||||
* static ex_node_t *
|
||||
* ex_psearch(ex_t *tree, ex_node_t *key);
|
||||
* ex_psearch(ex_t *tree, const ex_node_t *key);
|
||||
* Description: Search for node that matches key. If no match is found,
|
||||
* return what would be key's successor/predecessor, were
|
||||
* key in tree.
|
||||
@@ -303,40 +324,52 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
|
||||
* arg : Opaque pointer passed to cb().
|
||||
* Ret: NULL if iteration completed, or the non-NULL callback return value
|
||||
* that caused termination of the iteration.
|
||||
*
|
||||
* static void
|
||||
* ex_destroy(ex_t *tree, void (*cb)(ex_node_t *, void *), void *arg);
|
||||
* Description: Iterate over the tree with post-order traversal, remove
|
||||
* each node, and run the callback if non-null. This is
|
||||
* used for destroying a tree without paying the cost to
|
||||
* rebalance it. The tree must not be otherwise altered
|
||||
* during traversal.
|
||||
* Args:
|
||||
* tree: Pointer to an initialized red-black tree object.
|
||||
* cb : Callback function, which, if non-null, is called for each node
|
||||
* during iteration. There is no way to stop iteration once it
|
||||
* has begun.
|
||||
* arg : Opaque pointer passed to cb().
|
||||
*/
|
||||
#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \
|
||||
#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \
|
||||
a_attr void \
|
||||
a_prefix##new(a_rbt_type *rbtree) { \
|
||||
rb_new(a_type, a_field, rbtree); \
|
||||
} \
|
||||
a_attr bool \
|
||||
a_prefix##empty(a_rbt_type *rbtree) { \
|
||||
return (rbtree->rbt_root == NULL); \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_prefix##first(a_rbt_type *rbtree) { \
|
||||
a_type *ret; \
|
||||
rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
|
||||
if (ret == &rbtree->rbt_nil) { \
|
||||
ret = NULL; \
|
||||
} \
|
||||
return (ret); \
|
||||
return ret; \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_prefix##last(a_rbt_type *rbtree) { \
|
||||
a_type *ret; \
|
||||
rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
|
||||
if (ret == &rbtree->rbt_nil) { \
|
||||
ret = NULL; \
|
||||
} \
|
||||
return (ret); \
|
||||
return ret; \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_prefix##next(a_rbt_type *rbtree, a_type *node) { \
|
||||
a_type *ret; \
|
||||
if (rbtn_right_get(a_type, a_field, node) != &rbtree->rbt_nil) { \
|
||||
if (rbtn_right_get(a_type, a_field, node) != NULL) { \
|
||||
rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type, \
|
||||
a_field, node), ret); \
|
||||
} else { \
|
||||
a_type *tnode = rbtree->rbt_root; \
|
||||
assert(tnode != &rbtree->rbt_nil); \
|
||||
ret = &rbtree->rbt_nil; \
|
||||
assert(tnode != NULL); \
|
||||
ret = NULL; \
|
||||
while (true) { \
|
||||
int cmp = (a_cmp)(node, tnode); \
|
||||
if (cmp < 0) { \
|
||||
@@ -347,24 +380,21 @@ a_prefix##next(a_rbt_type *rbtree, a_type *node) { \
|
||||
} else { \
|
||||
break; \
|
||||
} \
|
||||
assert(tnode != &rbtree->rbt_nil); \
|
||||
assert(tnode != NULL); \
|
||||
} \
|
||||
} \
|
||||
if (ret == &rbtree->rbt_nil) { \
|
||||
ret = (NULL); \
|
||||
} \
|
||||
return (ret); \
|
||||
return ret; \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \
|
||||
a_type *ret; \
|
||||
if (rbtn_left_get(a_type, a_field, node) != &rbtree->rbt_nil) { \
|
||||
if (rbtn_left_get(a_type, a_field, node) != NULL) { \
|
||||
rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type, \
|
||||
a_field, node), ret); \
|
||||
} else { \
|
||||
a_type *tnode = rbtree->rbt_root; \
|
||||
assert(tnode != &rbtree->rbt_nil); \
|
||||
ret = &rbtree->rbt_nil; \
|
||||
assert(tnode != NULL); \
|
||||
ret = NULL; \
|
||||
while (true) { \
|
||||
int cmp = (a_cmp)(node, tnode); \
|
||||
if (cmp < 0) { \
|
||||
@@ -375,20 +405,17 @@ a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \
|
||||
} else { \
|
||||
break; \
|
||||
} \
|
||||
assert(tnode != &rbtree->rbt_nil); \
|
||||
assert(tnode != NULL); \
|
||||
} \
|
||||
} \
|
||||
if (ret == &rbtree->rbt_nil) { \
|
||||
ret = (NULL); \
|
||||
} \
|
||||
return (ret); \
|
||||
return ret; \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_prefix##search(a_rbt_type *rbtree, a_type *key) { \
|
||||
a_prefix##search(a_rbt_type *rbtree, const a_type *key) { \
|
||||
a_type *ret; \
|
||||
int cmp; \
|
||||
ret = rbtree->rbt_root; \
|
||||
while (ret != &rbtree->rbt_nil \
|
||||
while (ret != NULL \
|
||||
&& (cmp = (a_cmp)(key, ret)) != 0) { \
|
||||
if (cmp < 0) { \
|
||||
ret = rbtn_left_get(a_type, a_field, ret); \
|
||||
@@ -396,17 +423,14 @@ a_prefix##search(a_rbt_type *rbtree, a_type *key) { \
|
||||
ret = rbtn_right_get(a_type, a_field, ret); \
|
||||
} \
|
||||
} \
|
||||
if (ret == &rbtree->rbt_nil) { \
|
||||
ret = (NULL); \
|
||||
} \
|
||||
return (ret); \
|
||||
return ret; \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_prefix##nsearch(a_rbt_type *rbtree, a_type *key) { \
|
||||
a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key) { \
|
||||
a_type *ret; \
|
||||
a_type *tnode = rbtree->rbt_root; \
|
||||
ret = &rbtree->rbt_nil; \
|
||||
while (tnode != &rbtree->rbt_nil) { \
|
||||
ret = NULL; \
|
||||
while (tnode != NULL) { \
|
||||
int cmp = (a_cmp)(key, tnode); \
|
||||
if (cmp < 0) { \
|
||||
ret = tnode; \
|
||||
@@ -418,17 +442,14 @@ a_prefix##nsearch(a_rbt_type *rbtree, a_type *key) { \
|
||||
break; \
|
||||
} \
|
||||
} \
|
||||
if (ret == &rbtree->rbt_nil) { \
|
||||
ret = (NULL); \
|
||||
} \
|
||||
return (ret); \
|
||||
return ret; \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_prefix##psearch(a_rbt_type *rbtree, a_type *key) { \
|
||||
a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) { \
|
||||
a_type *ret; \
|
||||
a_type *tnode = rbtree->rbt_root; \
|
||||
ret = &rbtree->rbt_nil; \
|
||||
while (tnode != &rbtree->rbt_nil) { \
|
||||
ret = NULL; \
|
||||
while (tnode != NULL) { \
|
||||
int cmp = (a_cmp)(key, tnode); \
|
||||
if (cmp < 0) { \
|
||||
tnode = rbtn_left_get(a_type, a_field, tnode); \
|
||||
@@ -440,10 +461,7 @@ a_prefix##psearch(a_rbt_type *rbtree, a_type *key) { \
|
||||
break; \
|
||||
} \
|
||||
} \
|
||||
if (ret == &rbtree->rbt_nil) { \
|
||||
ret = (NULL); \
|
||||
} \
|
||||
return (ret); \
|
||||
return ret; \
|
||||
} \
|
||||
a_attr void \
|
||||
a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
|
||||
@@ -454,7 +472,7 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
|
||||
rbt_node_new(a_type, a_field, rbtree, node); \
|
||||
/* Wind. */ \
|
||||
path->node = rbtree->rbt_root; \
|
||||
for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) { \
|
||||
for (pathp = path; pathp->node != NULL; pathp++) { \
|
||||
int cmp = pathp->cmp = a_cmp(node, pathp->node); \
|
||||
assert(cmp != 0); \
|
||||
if (cmp < 0) { \
|
||||
@@ -474,7 +492,8 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
|
||||
rbtn_left_set(a_type, a_field, cnode, left); \
|
||||
if (rbtn_red_get(a_type, a_field, left)) { \
|
||||
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
|
||||
if (rbtn_red_get(a_type, a_field, leftleft)) { \
|
||||
if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
|
||||
leftleft)) { \
|
||||
/* Fix up 4-node. */ \
|
||||
a_type *tnode; \
|
||||
rbtn_black_set(a_type, a_field, leftleft); \
|
||||
@@ -489,7 +508,8 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
|
||||
rbtn_right_set(a_type, a_field, cnode, right); \
|
||||
if (rbtn_red_get(a_type, a_field, right)) { \
|
||||
a_type *left = rbtn_left_get(a_type, a_field, cnode); \
|
||||
if (rbtn_red_get(a_type, a_field, left)) { \
|
||||
if (left != NULL && rbtn_red_get(a_type, a_field, \
|
||||
left)) { \
|
||||
/* Split 4-node. */ \
|
||||
rbtn_black_set(a_type, a_field, left); \
|
||||
rbtn_black_set(a_type, a_field, right); \
|
||||
@@ -522,7 +542,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
|
||||
/* Wind. */ \
|
||||
nodep = NULL; /* Silence compiler warning. */ \
|
||||
path->node = rbtree->rbt_root; \
|
||||
for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) { \
|
||||
for (pathp = path; pathp->node != NULL; pathp++) { \
|
||||
int cmp = pathp->cmp = a_cmp(node, pathp->node); \
|
||||
if (cmp < 0) { \
|
||||
pathp[1].node = rbtn_left_get(a_type, a_field, \
|
||||
@@ -534,8 +554,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
|
||||
/* Find node's successor, in preparation for swap. */ \
|
||||
pathp->cmp = 1; \
|
||||
nodep = pathp; \
|
||||
for (pathp++; pathp->node != &rbtree->rbt_nil; \
|
||||
pathp++) { \
|
||||
for (pathp++; pathp->node != NULL; pathp++) { \
|
||||
pathp->cmp = -1; \
|
||||
pathp[1].node = rbtn_left_get(a_type, a_field, \
|
||||
pathp->node); \
|
||||
@@ -577,10 +596,10 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
|
||||
} \
|
||||
} else { \
|
||||
a_type *left = rbtn_left_get(a_type, a_field, node); \
|
||||
if (left != &rbtree->rbt_nil) { \
|
||||
if (left != NULL) { \
|
||||
/* node has no successor, but it has a left child. */\
|
||||
/* Splice node out, without losing the left child. */\
|
||||
assert(rbtn_red_get(a_type, a_field, node) == false); \
|
||||
assert(!rbtn_red_get(a_type, a_field, node)); \
|
||||
assert(rbtn_red_get(a_type, a_field, left)); \
|
||||
rbtn_black_set(a_type, a_field, left); \
|
||||
if (pathp == path) { \
|
||||
@@ -597,34 +616,32 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
|
||||
return; \
|
||||
} else if (pathp == path) { \
|
||||
/* The tree only contained one node. */ \
|
||||
rbtree->rbt_root = &rbtree->rbt_nil; \
|
||||
rbtree->rbt_root = NULL; \
|
||||
return; \
|
||||
} \
|
||||
} \
|
||||
if (rbtn_red_get(a_type, a_field, pathp->node)) { \
|
||||
/* Prune red node, which requires no fixup. */ \
|
||||
assert(pathp[-1].cmp < 0); \
|
||||
rbtn_left_set(a_type, a_field, pathp[-1].node, \
|
||||
&rbtree->rbt_nil); \
|
||||
rbtn_left_set(a_type, a_field, pathp[-1].node, NULL); \
|
||||
return; \
|
||||
} \
|
||||
/* The node to be pruned is black, so unwind until balance is */\
|
||||
/* restored. */\
|
||||
pathp->node = &rbtree->rbt_nil; \
|
||||
pathp->node = NULL; \
|
||||
for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \
|
||||
assert(pathp->cmp != 0); \
|
||||
if (pathp->cmp < 0) { \
|
||||
rbtn_left_set(a_type, a_field, pathp->node, \
|
||||
pathp[1].node); \
|
||||
assert(rbtn_red_get(a_type, a_field, pathp[1].node) \
|
||||
== false); \
|
||||
if (rbtn_red_get(a_type, a_field, pathp->node)) { \
|
||||
a_type *right = rbtn_right_get(a_type, a_field, \
|
||||
pathp->node); \
|
||||
a_type *rightleft = rbtn_left_get(a_type, a_field, \
|
||||
right); \
|
||||
a_type *tnode; \
|
||||
if (rbtn_red_get(a_type, a_field, rightleft)) { \
|
||||
if (rightleft != NULL && rbtn_red_get(a_type, a_field, \
|
||||
rightleft)) { \
|
||||
/* In the following diagrams, ||, //, and \\ */\
|
||||
/* indicate the path to the removed node. */\
|
||||
/* */\
|
||||
@@ -667,7 +684,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
|
||||
pathp->node); \
|
||||
a_type *rightleft = rbtn_left_get(a_type, a_field, \
|
||||
right); \
|
||||
if (rbtn_red_get(a_type, a_field, rightleft)) { \
|
||||
if (rightleft != NULL && rbtn_red_get(a_type, a_field, \
|
||||
rightleft)) { \
|
||||
/* || */\
|
||||
/* pathp(b) */\
|
||||
/* // \ */\
|
||||
@@ -681,7 +699,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
|
||||
rbtn_rotate_left(a_type, a_field, pathp->node, \
|
||||
tnode); \
|
||||
/* Balance restored, but rotation modified */\
|
||||
/* subree root, which may actually be the tree */\
|
||||
/* subtree root, which may actually be the tree */\
|
||||
/* root. */\
|
||||
if (pathp == path) { \
|
||||
/* Set root. */ \
|
||||
@@ -721,7 +739,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
|
||||
left); \
|
||||
a_type *leftrightleft = rbtn_left_get(a_type, a_field, \
|
||||
leftright); \
|
||||
if (rbtn_red_get(a_type, a_field, leftrightleft)) { \
|
||||
if (leftrightleft != NULL && rbtn_red_get(a_type, \
|
||||
a_field, leftrightleft)) { \
|
||||
/* || */\
|
||||
/* pathp(b) */\
|
||||
/* / \\ */\
|
||||
@@ -747,7 +766,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
|
||||
/* (b) */\
|
||||
/* / */\
|
||||
/* (b) */\
|
||||
assert(leftright != &rbtree->rbt_nil); \
|
||||
assert(leftright != NULL); \
|
||||
rbtn_red_set(a_type, a_field, leftright); \
|
||||
rbtn_rotate_right(a_type, a_field, pathp->node, \
|
||||
tnode); \
|
||||
@@ -770,7 +789,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
|
||||
return; \
|
||||
} else if (rbtn_red_get(a_type, a_field, pathp->node)) { \
|
||||
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
|
||||
if (rbtn_red_get(a_type, a_field, leftleft)) { \
|
||||
if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
|
||||
leftleft)) { \
|
||||
/* || */\
|
||||
/* pathp(r) */\
|
||||
/* / \\ */\
|
||||
@@ -808,7 +828,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
|
||||
} \
|
||||
} else { \
|
||||
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
|
||||
if (rbtn_red_get(a_type, a_field, leftleft)) { \
|
||||
if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
|
||||
leftleft)) { \
|
||||
/* || */\
|
||||
/* pathp(b) */\
|
||||
/* / \\ */\
|
||||
@@ -849,22 +870,22 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
|
||||
} \
|
||||
/* Set root. */ \
|
||||
rbtree->rbt_root = path->node; \
|
||||
assert(rbtn_red_get(a_type, a_field, rbtree->rbt_root) == false); \
|
||||
assert(!rbtn_red_get(a_type, a_field, rbtree->rbt_root)); \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \
|
||||
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
|
||||
if (node == &rbtree->rbt_nil) { \
|
||||
return (&rbtree->rbt_nil); \
|
||||
if (node == NULL) { \
|
||||
return NULL; \
|
||||
} else { \
|
||||
a_type *ret; \
|
||||
if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \
|
||||
a_field, node), cb, arg)) != &rbtree->rbt_nil \
|
||||
|| (ret = cb(rbtree, node, arg)) != NULL) { \
|
||||
return (ret); \
|
||||
a_field, node), cb, arg)) != NULL || (ret = cb(rbtree, node, \
|
||||
arg)) != NULL) { \
|
||||
return ret; \
|
||||
} \
|
||||
return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
|
||||
a_field, node), cb, arg)); \
|
||||
return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
|
||||
a_field, node), cb, arg); \
|
||||
} \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
@@ -874,22 +895,22 @@ a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \
|
||||
if (cmp < 0) { \
|
||||
a_type *ret; \
|
||||
if ((ret = a_prefix##iter_start(rbtree, start, \
|
||||
rbtn_left_get(a_type, a_field, node), cb, arg)) != \
|
||||
&rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \
|
||||
return (ret); \
|
||||
rbtn_left_get(a_type, a_field, node), cb, arg)) != NULL || \
|
||||
(ret = cb(rbtree, node, arg)) != NULL) { \
|
||||
return ret; \
|
||||
} \
|
||||
return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
|
||||
a_field, node), cb, arg)); \
|
||||
return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
|
||||
a_field, node), cb, arg); \
|
||||
} else if (cmp > 0) { \
|
||||
return (a_prefix##iter_start(rbtree, start, \
|
||||
rbtn_right_get(a_type, a_field, node), cb, arg)); \
|
||||
return a_prefix##iter_start(rbtree, start, \
|
||||
rbtn_right_get(a_type, a_field, node), cb, arg); \
|
||||
} else { \
|
||||
a_type *ret; \
|
||||
if ((ret = cb(rbtree, node, arg)) != NULL) { \
|
||||
return (ret); \
|
||||
return ret; \
|
||||
} \
|
||||
return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
|
||||
a_field, node), cb, arg)); \
|
||||
return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
|
||||
a_field, node), cb, arg); \
|
||||
} \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
@@ -902,25 +923,22 @@ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \
|
||||
} else { \
|
||||
ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\
|
||||
} \
|
||||
if (ret == &rbtree->rbt_nil) { \
|
||||
ret = NULL; \
|
||||
} \
|
||||
return (ret); \
|
||||
return ret; \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \
|
||||
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
|
||||
if (node == &rbtree->rbt_nil) { \
|
||||
return (&rbtree->rbt_nil); \
|
||||
if (node == NULL) { \
|
||||
return NULL; \
|
||||
} else { \
|
||||
a_type *ret; \
|
||||
if ((ret = a_prefix##reverse_iter_recurse(rbtree, \
|
||||
rbtn_right_get(a_type, a_field, node), cb, arg)) != \
|
||||
&rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \
|
||||
return (ret); \
|
||||
rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \
|
||||
(ret = cb(rbtree, node, arg)) != NULL) { \
|
||||
return ret; \
|
||||
} \
|
||||
return (a_prefix##reverse_iter_recurse(rbtree, \
|
||||
rbtn_left_get(a_type, a_field, node), cb, arg)); \
|
||||
return a_prefix##reverse_iter_recurse(rbtree, \
|
||||
rbtn_left_get(a_type, a_field, node), cb, arg); \
|
||||
} \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
@@ -931,22 +949,22 @@ a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \
|
||||
if (cmp > 0) { \
|
||||
a_type *ret; \
|
||||
if ((ret = a_prefix##reverse_iter_start(rbtree, start, \
|
||||
rbtn_right_get(a_type, a_field, node), cb, arg)) != \
|
||||
&rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \
|
||||
return (ret); \
|
||||
rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \
|
||||
(ret = cb(rbtree, node, arg)) != NULL) { \
|
||||
return ret; \
|
||||
} \
|
||||
return (a_prefix##reverse_iter_recurse(rbtree, \
|
||||
rbtn_left_get(a_type, a_field, node), cb, arg)); \
|
||||
return a_prefix##reverse_iter_recurse(rbtree, \
|
||||
rbtn_left_get(a_type, a_field, node), cb, arg); \
|
||||
} else if (cmp < 0) { \
|
||||
return (a_prefix##reverse_iter_start(rbtree, start, \
|
||||
rbtn_left_get(a_type, a_field, node), cb, arg)); \
|
||||
return a_prefix##reverse_iter_start(rbtree, start, \
|
||||
rbtn_left_get(a_type, a_field, node), cb, arg); \
|
||||
} else { \
|
||||
a_type *ret; \
|
||||
if ((ret = cb(rbtree, node, arg)) != NULL) { \
|
||||
return (ret); \
|
||||
return ret; \
|
||||
} \
|
||||
return (a_prefix##reverse_iter_recurse(rbtree, \
|
||||
rbtn_left_get(a_type, a_field, node), cb, arg)); \
|
||||
return a_prefix##reverse_iter_recurse(rbtree, \
|
||||
rbtn_left_get(a_type, a_field, node), cb, arg); \
|
||||
} \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
@@ -960,10 +978,29 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
|
||||
ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \
|
||||
cb, arg); \
|
||||
} \
|
||||
if (ret == &rbtree->rbt_nil) { \
|
||||
ret = NULL; \
|
||||
return ret; \
|
||||
} \
|
||||
a_attr void \
|
||||
a_prefix##destroy_recurse(a_rbt_type *rbtree, a_type *node, void (*cb)( \
|
||||
a_type *, void *), void *arg) { \
|
||||
if (node == NULL) { \
|
||||
return; \
|
||||
} \
|
||||
return (ret); \
|
||||
a_prefix##destroy_recurse(rbtree, rbtn_left_get(a_type, a_field, \
|
||||
node), cb, arg); \
|
||||
rbtn_left_set(a_type, a_field, (node), NULL); \
|
||||
a_prefix##destroy_recurse(rbtree, rbtn_right_get(a_type, a_field, \
|
||||
node), cb, arg); \
|
||||
rbtn_right_set(a_type, a_field, (node), NULL); \
|
||||
if (cb) { \
|
||||
cb(node, arg); \
|
||||
} \
|
||||
} \
|
||||
a_attr void \
|
||||
a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
|
||||
void *arg) { \
|
||||
a_prefix##destroy_recurse(rbtree, rbtree->rbt_root, cb, arg); \
|
||||
rbtree->rbt_root = NULL; \
|
||||
}
|
||||
|
||||
#endif /* RB_H_ */
|
||||
|
||||
610
deps/jemalloc/include/jemalloc/internal/rtree.h
vendored
610
deps/jemalloc/include/jemalloc/internal/rtree.h
vendored
@@ -1,172 +1,474 @@
|
||||
#ifndef JEMALLOC_INTERNAL_RTREE_H
|
||||
#define JEMALLOC_INTERNAL_RTREE_H
|
||||
|
||||
#include "jemalloc/internal/atomic.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/rtree_tsd.h"
|
||||
#include "jemalloc/internal/size_classes.h"
|
||||
#include "jemalloc/internal/tsd.h"
|
||||
|
||||
/*
|
||||
* This radix tree implementation is tailored to the singular purpose of
|
||||
* tracking which chunks are currently owned by jemalloc. This functionality
|
||||
* is mandatory for OS X, where jemalloc must be able to respond to object
|
||||
* ownership queries.
|
||||
* associating metadata with extents that are currently owned by jemalloc.
|
||||
*
|
||||
*******************************************************************************
|
||||
*/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
typedef struct rtree_s rtree_t;
|
||||
/* Number of high insignificant bits. */
|
||||
#define RTREE_NHIB ((1U << (LG_SIZEOF_PTR+3)) - LG_VADDR)
|
||||
/* Number of low insigificant bits. */
|
||||
#define RTREE_NLIB LG_PAGE
|
||||
/* Number of significant bits. */
|
||||
#define RTREE_NSB (LG_VADDR - RTREE_NLIB)
|
||||
/* Number of levels in radix tree. */
|
||||
#if RTREE_NSB <= 10
|
||||
# define RTREE_HEIGHT 1
|
||||
#elif RTREE_NSB <= 36
|
||||
# define RTREE_HEIGHT 2
|
||||
#elif RTREE_NSB <= 52
|
||||
# define RTREE_HEIGHT 3
|
||||
#else
|
||||
# error Unsupported number of significant virtual address bits
|
||||
#endif
|
||||
/* Use compact leaf representation if virtual address encoding allows. */
|
||||
#if RTREE_NHIB >= LG_CEIL_NSIZES
|
||||
# define RTREE_LEAF_COMPACT
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Size of each radix tree node (must be a power of 2). This impacts tree
|
||||
* depth.
|
||||
*/
|
||||
#define RTREE_NODESIZE (1U << 16)
|
||||
/* Needed for initialization only. */
|
||||
#define RTREE_LEAFKEY_INVALID ((uintptr_t)1)
|
||||
|
||||
typedef void *(rtree_alloc_t)(size_t);
|
||||
typedef void (rtree_dalloc_t)(void *);
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
struct rtree_s {
|
||||
rtree_alloc_t *alloc;
|
||||
rtree_dalloc_t *dalloc;
|
||||
malloc_mutex_t mutex;
|
||||
void **root;
|
||||
unsigned height;
|
||||
unsigned level2bits[1]; /* Dynamically sized. */
|
||||
typedef struct rtree_node_elm_s rtree_node_elm_t;
|
||||
struct rtree_node_elm_s {
|
||||
atomic_p_t child; /* (rtree_{node,leaf}_elm_t *) */
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
rtree_t *rtree_new(unsigned bits, rtree_alloc_t *alloc, rtree_dalloc_t *dalloc);
|
||||
void rtree_delete(rtree_t *rtree);
|
||||
void rtree_prefork(rtree_t *rtree);
|
||||
void rtree_postfork_parent(rtree_t *rtree);
|
||||
void rtree_postfork_child(rtree_t *rtree);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
#ifdef JEMALLOC_DEBUG
|
||||
uint8_t rtree_get_locked(rtree_t *rtree, uintptr_t key);
|
||||
#endif
|
||||
uint8_t rtree_get(rtree_t *rtree, uintptr_t key);
|
||||
bool rtree_set(rtree_t *rtree, uintptr_t key, uint8_t val);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
|
||||
#define RTREE_GET_GENERATE(f) \
|
||||
/* The least significant bits of the key are ignored. */ \
|
||||
JEMALLOC_INLINE uint8_t \
|
||||
f(rtree_t *rtree, uintptr_t key) \
|
||||
{ \
|
||||
uint8_t ret; \
|
||||
uintptr_t subkey; \
|
||||
unsigned i, lshift, height, bits; \
|
||||
void **node, **child; \
|
||||
\
|
||||
RTREE_LOCK(&rtree->mutex); \
|
||||
for (i = lshift = 0, height = rtree->height, node = rtree->root;\
|
||||
i < height - 1; \
|
||||
i++, lshift += bits, node = child) { \
|
||||
bits = rtree->level2bits[i]; \
|
||||
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR + \
|
||||
3)) - bits); \
|
||||
child = (void**)node[subkey]; \
|
||||
if (child == NULL) { \
|
||||
RTREE_UNLOCK(&rtree->mutex); \
|
||||
return (0); \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
/* \
|
||||
* node is a leaf, so it contains values rather than node \
|
||||
* pointers. \
|
||||
*/ \
|
||||
bits = rtree->level2bits[i]; \
|
||||
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - \
|
||||
bits); \
|
||||
{ \
|
||||
uint8_t *leaf = (uint8_t *)node; \
|
||||
ret = leaf[subkey]; \
|
||||
} \
|
||||
RTREE_UNLOCK(&rtree->mutex); \
|
||||
\
|
||||
RTREE_GET_VALIDATE \
|
||||
return (ret); \
|
||||
}
|
||||
|
||||
#ifdef JEMALLOC_DEBUG
|
||||
# define RTREE_LOCK(l) malloc_mutex_lock(l)
|
||||
# define RTREE_UNLOCK(l) malloc_mutex_unlock(l)
|
||||
# define RTREE_GET_VALIDATE
|
||||
RTREE_GET_GENERATE(rtree_get_locked)
|
||||
# undef RTREE_LOCK
|
||||
# undef RTREE_UNLOCK
|
||||
# undef RTREE_GET_VALIDATE
|
||||
#endif
|
||||
|
||||
#define RTREE_LOCK(l)
|
||||
#define RTREE_UNLOCK(l)
|
||||
#ifdef JEMALLOC_DEBUG
|
||||
/*
|
||||
* Suppose that it were possible for a jemalloc-allocated chunk to be
|
||||
* munmap()ped, followed by a different allocator in another thread re-using
|
||||
* overlapping virtual memory, all without invalidating the cached rtree
|
||||
* value. The result would be a false positive (the rtree would claim that
|
||||
* jemalloc owns memory that it had actually discarded). This scenario
|
||||
* seems impossible, but the following assertion is a prudent sanity check.
|
||||
*/
|
||||
# define RTREE_GET_VALIDATE \
|
||||
assert(rtree_get_locked(rtree, key) == ret);
|
||||
struct rtree_leaf_elm_s {
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
/*
|
||||
* Single pointer-width field containing all three leaf element fields.
|
||||
* For example, on a 64-bit x64 system with 48 significant virtual
|
||||
* memory address bits, the index, extent, and slab fields are packed as
|
||||
* such:
|
||||
*
|
||||
* x: index
|
||||
* e: extent
|
||||
* b: slab
|
||||
*
|
||||
* 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee eeee000b
|
||||
*/
|
||||
atomic_p_t le_bits;
|
||||
#else
|
||||
# define RTREE_GET_VALIDATE
|
||||
atomic_p_t le_extent; /* (extent_t *) */
|
||||
atomic_u_t le_szind; /* (szind_t) */
|
||||
atomic_b_t le_slab; /* (bool) */
|
||||
#endif
|
||||
RTREE_GET_GENERATE(rtree_get)
|
||||
#undef RTREE_LOCK
|
||||
#undef RTREE_UNLOCK
|
||||
#undef RTREE_GET_VALIDATE
|
||||
};
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
rtree_set(rtree_t *rtree, uintptr_t key, uint8_t val)
|
||||
{
|
||||
uintptr_t subkey;
|
||||
unsigned i, lshift, height, bits;
|
||||
void **node, **child;
|
||||
typedef struct rtree_level_s rtree_level_t;
|
||||
struct rtree_level_s {
|
||||
/* Number of key bits distinguished by this level. */
|
||||
unsigned bits;
|
||||
/*
|
||||
* Cumulative number of key bits distinguished by traversing to
|
||||
* corresponding tree level.
|
||||
*/
|
||||
unsigned cumbits;
|
||||
};
|
||||
|
||||
malloc_mutex_lock(&rtree->mutex);
|
||||
for (i = lshift = 0, height = rtree->height, node = rtree->root;
|
||||
i < height - 1;
|
||||
i++, lshift += bits, node = child) {
|
||||
bits = rtree->level2bits[i];
|
||||
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) -
|
||||
bits);
|
||||
child = (void**)node[subkey];
|
||||
if (child == NULL) {
|
||||
size_t size = ((i + 1 < height - 1) ? sizeof(void *)
|
||||
: (sizeof(uint8_t))) << rtree->level2bits[i+1];
|
||||
child = (void**)rtree->alloc(size);
|
||||
if (child == NULL) {
|
||||
malloc_mutex_unlock(&rtree->mutex);
|
||||
return (true);
|
||||
}
|
||||
memset(child, 0, size);
|
||||
node[subkey] = child;
|
||||
}
|
||||
}
|
||||
typedef struct rtree_s rtree_t;
|
||||
struct rtree_s {
|
||||
malloc_mutex_t init_lock;
|
||||
/* Number of elements based on rtree_levels[0].bits. */
|
||||
#if RTREE_HEIGHT > 1
|
||||
rtree_node_elm_t root[1U << (RTREE_NSB/RTREE_HEIGHT)];
|
||||
#else
|
||||
rtree_leaf_elm_t root[1U << (RTREE_NSB/RTREE_HEIGHT)];
|
||||
#endif
|
||||
};
|
||||
|
||||
/* node is a leaf, so it contains values rather than node pointers. */
|
||||
bits = rtree->level2bits[i];
|
||||
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - bits);
|
||||
{
|
||||
uint8_t *leaf = (uint8_t *)node;
|
||||
leaf[subkey] = val;
|
||||
}
|
||||
malloc_mutex_unlock(&rtree->mutex);
|
||||
/*
|
||||
* Split the bits into one to three partitions depending on number of
|
||||
* significant bits. It the number of bits does not divide evenly into the
|
||||
* number of levels, place one remainder bit per level starting at the leaf
|
||||
* level.
|
||||
*/
|
||||
static const rtree_level_t rtree_levels[] = {
|
||||
#if RTREE_HEIGHT == 1
|
||||
{RTREE_NSB, RTREE_NHIB + RTREE_NSB}
|
||||
#elif RTREE_HEIGHT == 2
|
||||
{RTREE_NSB/2, RTREE_NHIB + RTREE_NSB/2},
|
||||
{RTREE_NSB/2 + RTREE_NSB%2, RTREE_NHIB + RTREE_NSB}
|
||||
#elif RTREE_HEIGHT == 3
|
||||
{RTREE_NSB/3, RTREE_NHIB + RTREE_NSB/3},
|
||||
{RTREE_NSB/3 + RTREE_NSB%3/2,
|
||||
RTREE_NHIB + RTREE_NSB/3*2 + RTREE_NSB%3/2},
|
||||
{RTREE_NSB/3 + RTREE_NSB%3 - RTREE_NSB%3/2, RTREE_NHIB + RTREE_NSB}
|
||||
#else
|
||||
# error Unsupported rtree height
|
||||
#endif
|
||||
};
|
||||
|
||||
return (false);
|
||||
bool rtree_new(rtree_t *rtree, bool zeroed);
|
||||
|
||||
typedef rtree_node_elm_t *(rtree_node_alloc_t)(tsdn_t *, rtree_t *, size_t);
|
||||
extern rtree_node_alloc_t *JET_MUTABLE rtree_node_alloc;
|
||||
|
||||
typedef rtree_leaf_elm_t *(rtree_leaf_alloc_t)(tsdn_t *, rtree_t *, size_t);
|
||||
extern rtree_leaf_alloc_t *JET_MUTABLE rtree_leaf_alloc;
|
||||
|
||||
typedef void (rtree_node_dalloc_t)(tsdn_t *, rtree_t *, rtree_node_elm_t *);
|
||||
extern rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc;
|
||||
|
||||
typedef void (rtree_leaf_dalloc_t)(tsdn_t *, rtree_t *, rtree_leaf_elm_t *);
|
||||
extern rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc;
|
||||
#ifdef JEMALLOC_JET
|
||||
void rtree_delete(tsdn_t *tsdn, rtree_t *rtree);
|
||||
#endif
|
||||
rtree_leaf_elm_t *rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree,
|
||||
rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, bool init_missing);
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE uintptr_t
|
||||
rtree_leafkey(uintptr_t key) {
|
||||
unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
|
||||
unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits -
|
||||
rtree_levels[RTREE_HEIGHT-1].bits);
|
||||
unsigned maskbits = ptrbits - cumbits;
|
||||
uintptr_t mask = ~((ZU(1) << maskbits) - 1);
|
||||
return (key & mask);
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
rtree_cache_direct_map(uintptr_t key) {
|
||||
unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
|
||||
unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits -
|
||||
rtree_levels[RTREE_HEIGHT-1].bits);
|
||||
unsigned maskbits = ptrbits - cumbits;
|
||||
return (size_t)((key >> maskbits) & (RTREE_CTX_NCACHE - 1));
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE uintptr_t
|
||||
rtree_subkey(uintptr_t key, unsigned level) {
|
||||
unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
|
||||
unsigned cumbits = rtree_levels[level].cumbits;
|
||||
unsigned shiftbits = ptrbits - cumbits;
|
||||
unsigned maskbits = rtree_levels[level].bits;
|
||||
uintptr_t mask = (ZU(1) << maskbits) - 1;
|
||||
return ((key >> shiftbits) & mask);
|
||||
}
|
||||
|
||||
/*
|
||||
* Atomic getters.
|
||||
*
|
||||
* dependent: Reading a value on behalf of a pointer to a valid allocation
|
||||
* is guaranteed to be a clean read even without synchronization,
|
||||
* because the rtree update became visible in memory before the
|
||||
* pointer came into existence.
|
||||
* !dependent: An arbitrary read, e.g. on behalf of ivsalloc(), may not be
|
||||
* dependent on a previous rtree write, which means a stale read
|
||||
* could result if synchronization were omitted here.
|
||||
*/
|
||||
# ifdef RTREE_LEAF_COMPACT
|
||||
JEMALLOC_ALWAYS_INLINE uintptr_t
|
||||
rtree_leaf_elm_bits_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||
bool dependent) {
|
||||
return (uintptr_t)atomic_load_p(&elm->le_bits, dependent
|
||||
? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE extent_t *
|
||||
rtree_leaf_elm_bits_extent_get(uintptr_t bits) {
|
||||
/* Restore sign-extended high bits, mask slab bit. */
|
||||
return (extent_t *)((uintptr_t)((intptr_t)(bits << RTREE_NHIB) >>
|
||||
RTREE_NHIB) & ~((uintptr_t)0x1));
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE szind_t
|
||||
rtree_leaf_elm_bits_szind_get(uintptr_t bits) {
|
||||
return (szind_t)(bits >> LG_VADDR);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
rtree_leaf_elm_bits_slab_get(uintptr_t bits) {
|
||||
return (bool)(bits & (uintptr_t)0x1);
|
||||
}
|
||||
|
||||
# endif
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE extent_t *
|
||||
rtree_leaf_elm_extent_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||
bool dependent) {
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
|
||||
return rtree_leaf_elm_bits_extent_get(bits);
|
||||
#else
|
||||
extent_t *extent = (extent_t *)atomic_load_p(&elm->le_extent, dependent
|
||||
? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
|
||||
return extent;
|
||||
#endif
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE szind_t
|
||||
rtree_leaf_elm_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||
bool dependent) {
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
|
||||
return rtree_leaf_elm_bits_szind_get(bits);
|
||||
#else
|
||||
return (szind_t)atomic_load_u(&elm->le_szind, dependent ? ATOMIC_RELAXED
|
||||
: ATOMIC_ACQUIRE);
|
||||
#endif
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
rtree_leaf_elm_slab_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||
bool dependent) {
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
|
||||
return rtree_leaf_elm_bits_slab_get(bits);
|
||||
#else
|
||||
return atomic_load_b(&elm->le_slab, dependent ? ATOMIC_RELAXED :
|
||||
ATOMIC_ACQUIRE);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
rtree_leaf_elm_extent_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||
extent_t *extent) {
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, true);
|
||||
uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) <<
|
||||
LG_VADDR) | ((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1))
|
||||
| ((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits));
|
||||
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
|
||||
#else
|
||||
atomic_store_p(&elm->le_extent, extent, ATOMIC_RELEASE);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
rtree_leaf_elm_szind_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||
szind_t szind) {
|
||||
assert(szind <= NSIZES);
|
||||
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm,
|
||||
true);
|
||||
uintptr_t bits = ((uintptr_t)szind << LG_VADDR) |
|
||||
((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) &
|
||||
(((uintptr_t)0x1 << LG_VADDR) - 1)) |
|
||||
((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits));
|
||||
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
|
||||
#else
|
||||
atomic_store_u(&elm->le_szind, szind, ATOMIC_RELEASE);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
rtree_leaf_elm_slab_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||
bool slab) {
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm,
|
||||
true);
|
||||
uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) <<
|
||||
LG_VADDR) | ((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) &
|
||||
(((uintptr_t)0x1 << LG_VADDR) - 1)) | ((uintptr_t)slab);
|
||||
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
|
||||
#else
|
||||
atomic_store_b(&elm->le_slab, slab, ATOMIC_RELEASE);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||
extent_t *extent, szind_t szind, bool slab) {
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
uintptr_t bits = ((uintptr_t)szind << LG_VADDR) |
|
||||
((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1)) |
|
||||
((uintptr_t)slab);
|
||||
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
|
||||
#else
|
||||
rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab);
|
||||
rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind);
|
||||
/*
|
||||
* Write extent last, since the element is atomically considered valid
|
||||
* as soon as the extent field is non-NULL.
|
||||
*/
|
||||
rtree_leaf_elm_extent_write(tsdn, rtree, elm, extent);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
rtree_leaf_elm_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree,
|
||||
rtree_leaf_elm_t *elm, szind_t szind, bool slab) {
|
||||
assert(!slab || szind < NBINS);
|
||||
|
||||
/*
|
||||
* The caller implicitly assures that it is the only writer to the szind
|
||||
* and slab fields, and that the extent field cannot currently change.
|
||||
*/
|
||||
rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab);
|
||||
rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t *
|
||||
rtree_leaf_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
uintptr_t key, bool dependent, bool init_missing) {
|
||||
assert(key != 0);
|
||||
assert(!dependent || !init_missing);
|
||||
|
||||
size_t slot = rtree_cache_direct_map(key);
|
||||
uintptr_t leafkey = rtree_leafkey(key);
|
||||
assert(leafkey != RTREE_LEAFKEY_INVALID);
|
||||
|
||||
/* Fast path: L1 direct mapped cache. */
|
||||
if (likely(rtree_ctx->cache[slot].leafkey == leafkey)) {
|
||||
rtree_leaf_elm_t *leaf = rtree_ctx->cache[slot].leaf;
|
||||
assert(leaf != NULL);
|
||||
uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1);
|
||||
return &leaf[subkey];
|
||||
}
|
||||
/*
|
||||
* Search the L2 LRU cache. On hit, swap the matching element into the
|
||||
* slot in L1 cache, and move the position in L2 up by 1.
|
||||
*/
|
||||
#define RTREE_CACHE_CHECK_L2(i) do { \
|
||||
if (likely(rtree_ctx->l2_cache[i].leafkey == leafkey)) { \
|
||||
rtree_leaf_elm_t *leaf = rtree_ctx->l2_cache[i].leaf; \
|
||||
assert(leaf != NULL); \
|
||||
if (i > 0) { \
|
||||
/* Bubble up by one. */ \
|
||||
rtree_ctx->l2_cache[i].leafkey = \
|
||||
rtree_ctx->l2_cache[i - 1].leafkey; \
|
||||
rtree_ctx->l2_cache[i].leaf = \
|
||||
rtree_ctx->l2_cache[i - 1].leaf; \
|
||||
rtree_ctx->l2_cache[i - 1].leafkey = \
|
||||
rtree_ctx->cache[slot].leafkey; \
|
||||
rtree_ctx->l2_cache[i - 1].leaf = \
|
||||
rtree_ctx->cache[slot].leaf; \
|
||||
} else { \
|
||||
rtree_ctx->l2_cache[0].leafkey = \
|
||||
rtree_ctx->cache[slot].leafkey; \
|
||||
rtree_ctx->l2_cache[0].leaf = \
|
||||
rtree_ctx->cache[slot].leaf; \
|
||||
} \
|
||||
rtree_ctx->cache[slot].leafkey = leafkey; \
|
||||
rtree_ctx->cache[slot].leaf = leaf; \
|
||||
uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1); \
|
||||
return &leaf[subkey]; \
|
||||
} \
|
||||
} while (0)
|
||||
/* Check the first cache entry. */
|
||||
RTREE_CACHE_CHECK_L2(0);
|
||||
/* Search the remaining cache elements. */
|
||||
for (unsigned i = 1; i < RTREE_CTX_NCACHE_L2; i++) {
|
||||
RTREE_CACHE_CHECK_L2(i);
|
||||
}
|
||||
#undef RTREE_CACHE_CHECK_L2
|
||||
|
||||
return rtree_leaf_elm_lookup_hard(tsdn, rtree, rtree_ctx, key,
|
||||
dependent, init_missing);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
|
||||
extent_t *extent, szind_t szind, bool slab) {
|
||||
/* Use rtree_clear() to set the extent to NULL. */
|
||||
assert(extent != NULL);
|
||||
|
||||
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
|
||||
key, false, true);
|
||||
if (elm == NULL) {
|
||||
return true;
|
||||
}
|
||||
|
||||
assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) == NULL);
|
||||
rtree_leaf_elm_write(tsdn, rtree, elm, extent, szind, slab);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t *
|
||||
rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
|
||||
bool dependent) {
|
||||
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
|
||||
key, dependent, false);
|
||||
if (!dependent && elm == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
assert(elm != NULL);
|
||||
return elm;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE extent_t *
|
||||
rtree_extent_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
uintptr_t key, bool dependent) {
|
||||
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
|
||||
dependent);
|
||||
if (!dependent && elm == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
return rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE szind_t
|
||||
rtree_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
uintptr_t key, bool dependent) {
|
||||
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
|
||||
dependent);
|
||||
if (!dependent && elm == NULL) {
|
||||
return NSIZES;
|
||||
}
|
||||
return rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
|
||||
}
|
||||
|
||||
/*
|
||||
* rtree_slab_read() is intentionally omitted because slab is always read in
|
||||
* conjunction with szind, which makes rtree_szind_slab_read() a better choice.
|
||||
*/
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
rtree_extent_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
uintptr_t key, bool dependent, extent_t **r_extent, szind_t *r_szind) {
|
||||
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
|
||||
dependent);
|
||||
if (!dependent && elm == NULL) {
|
||||
return true;
|
||||
}
|
||||
*r_extent = rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent);
|
||||
*r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
|
||||
return false;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
rtree_szind_slab_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
uintptr_t key, bool dependent, szind_t *r_szind, bool *r_slab) {
|
||||
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
|
||||
dependent);
|
||||
if (!dependent && elm == NULL) {
|
||||
return true;
|
||||
}
|
||||
*r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
|
||||
*r_slab = rtree_leaf_elm_slab_read(tsdn, rtree, elm, dependent);
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void
|
||||
rtree_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
uintptr_t key, szind_t szind, bool slab) {
|
||||
assert(!slab || szind < NBINS);
|
||||
|
||||
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true);
|
||||
rtree_leaf_elm_szind_slab_update(tsdn, rtree, elm, szind, slab);
|
||||
}
|
||||
|
||||
static inline void
|
||||
rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
uintptr_t key) {
|
||||
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true);
|
||||
assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) !=
|
||||
NULL);
|
||||
rtree_leaf_elm_write(tsdn, rtree, elm, NULL, NSIZES, false);
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_RTREE_H */
|
||||
|
||||
2091
deps/jemalloc/include/jemalloc/internal/size_classes.h
vendored
2091
deps/jemalloc/include/jemalloc/internal/size_classes.h
vendored
File diff suppressed because it is too large
Load Diff
231
deps/jemalloc/include/jemalloc/internal/stats.h
vendored
231
deps/jemalloc/include/jemalloc/internal/stats.h
vendored
@@ -1,31 +1,51 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
#ifndef JEMALLOC_INTERNAL_STATS_H
|
||||
#define JEMALLOC_INTERNAL_STATS_H
|
||||
|
||||
typedef struct tcache_bin_stats_s tcache_bin_stats_t;
|
||||
typedef struct malloc_bin_stats_s malloc_bin_stats_t;
|
||||
typedef struct malloc_large_stats_s malloc_large_stats_t;
|
||||
typedef struct arena_stats_s arena_stats_t;
|
||||
typedef struct chunk_stats_s chunk_stats_t;
|
||||
#include "jemalloc/internal/atomic.h"
|
||||
#include "jemalloc/internal/mutex_prof.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/size_classes.h"
|
||||
#include "jemalloc/internal/stats_tsd.h"
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
/* OPTION(opt, var_name, default, set_value_to) */
|
||||
#define STATS_PRINT_OPTIONS \
|
||||
OPTION('J', json, false, true) \
|
||||
OPTION('g', general, true, false) \
|
||||
OPTION('m', merged, config_stats, false) \
|
||||
OPTION('d', destroyed, config_stats, false) \
|
||||
OPTION('a', unmerged, config_stats, false) \
|
||||
OPTION('b', bins, true, false) \
|
||||
OPTION('l', large, true, false) \
|
||||
OPTION('x', mutex, true, false)
|
||||
|
||||
struct tcache_bin_stats_s {
|
||||
/*
|
||||
* Number of allocation requests that corresponded to the size of this
|
||||
* bin.
|
||||
*/
|
||||
uint64_t nrequests;
|
||||
enum {
|
||||
#define OPTION(o, v, d, s) stats_print_option_num_##v,
|
||||
STATS_PRINT_OPTIONS
|
||||
#undef OPTION
|
||||
stats_print_tot_num_options
|
||||
};
|
||||
|
||||
struct malloc_bin_stats_s {
|
||||
/*
|
||||
* Current number of bytes allocated, including objects currently
|
||||
* cached by tcache.
|
||||
*/
|
||||
size_t allocated;
|
||||
/* Options for stats_print. */
|
||||
extern bool opt_stats_print;
|
||||
extern char opt_stats_print_opts[stats_print_tot_num_options+1];
|
||||
|
||||
/* Implements je_malloc_stats_print. */
|
||||
void stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
const char *opts);
|
||||
|
||||
/*
|
||||
* In those architectures that support 64-bit atomics, we use atomic updates for
|
||||
* our 64-bit values. Otherwise, we use a plain uint64_t and synchronize
|
||||
* externally.
|
||||
*/
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
typedef atomic_u64_t arena_stats_u64_t;
|
||||
#else
|
||||
/* Must hold the arena stats mutex while reading atomically. */
|
||||
typedef uint64_t arena_stats_u64_t;
|
||||
#endif
|
||||
|
||||
typedef struct malloc_bin_stats_s {
|
||||
/*
|
||||
* Total number of allocation/deallocation requests served directly by
|
||||
* the bin. Note that tcache may allocate an object, then recycle it
|
||||
@@ -42,132 +62,103 @@ struct malloc_bin_stats_s {
|
||||
*/
|
||||
uint64_t nrequests;
|
||||
|
||||
/*
|
||||
* Current number of regions of this size class, including regions
|
||||
* currently cached by tcache.
|
||||
*/
|
||||
size_t curregs;
|
||||
|
||||
/* Number of tcache fills from this bin. */
|
||||
uint64_t nfills;
|
||||
|
||||
/* Number of tcache flushes to this bin. */
|
||||
uint64_t nflushes;
|
||||
|
||||
/* Total number of runs created for this bin's size class. */
|
||||
uint64_t nruns;
|
||||
/* Total number of slabs created for this bin's size class. */
|
||||
uint64_t nslabs;
|
||||
|
||||
/*
|
||||
* Total number of runs reused by extracting them from the runs tree for
|
||||
* this bin's size class.
|
||||
* Total number of slabs reused by extracting them from the slabs heap
|
||||
* for this bin's size class.
|
||||
*/
|
||||
uint64_t reruns;
|
||||
uint64_t reslabs;
|
||||
|
||||
/* Current number of runs in this bin. */
|
||||
size_t curruns;
|
||||
};
|
||||
/* Current number of slabs in this bin. */
|
||||
size_t curslabs;
|
||||
|
||||
struct malloc_large_stats_s {
|
||||
mutex_prof_data_t mutex_data;
|
||||
} malloc_bin_stats_t;
|
||||
|
||||
typedef struct malloc_large_stats_s {
|
||||
/*
|
||||
* Total number of allocation/deallocation requests served directly by
|
||||
* the arena. Note that tcache may allocate an object, then recycle it
|
||||
* many times, resulting many increments to nrequests, but only one
|
||||
* each to nmalloc and ndalloc.
|
||||
* the arena.
|
||||
*/
|
||||
uint64_t nmalloc;
|
||||
uint64_t ndalloc;
|
||||
arena_stats_u64_t nmalloc;
|
||||
arena_stats_u64_t ndalloc;
|
||||
|
||||
/*
|
||||
* Number of allocation requests that correspond to this size class.
|
||||
* This includes requests served by tcache, though tcache only
|
||||
* periodically merges into this counter.
|
||||
*/
|
||||
uint64_t nrequests;
|
||||
arena_stats_u64_t nrequests; /* Partially derived. */
|
||||
|
||||
/* Current number of runs of this size class. */
|
||||
size_t curruns;
|
||||
};
|
||||
/* Current number of allocations of this size class. */
|
||||
size_t curlextents; /* Derived. */
|
||||
} malloc_large_stats_t;
|
||||
|
||||
struct arena_stats_s {
|
||||
/* Number of bytes currently mapped. */
|
||||
size_t mapped;
|
||||
typedef struct decay_stats_s {
|
||||
/* Total number of purge sweeps. */
|
||||
arena_stats_u64_t npurge;
|
||||
/* Total number of madvise calls made. */
|
||||
arena_stats_u64_t nmadvise;
|
||||
/* Total number of pages purged. */
|
||||
arena_stats_u64_t purged;
|
||||
} decay_stats_t;
|
||||
|
||||
/*
|
||||
* Total number of purge sweeps, total number of madvise calls made,
|
||||
* and total pages purged in order to keep dirty unused memory under
|
||||
* control.
|
||||
*/
|
||||
uint64_t npurge;
|
||||
uint64_t nmadvise;
|
||||
uint64_t purged;
|
||||
|
||||
/* Per-size-category statistics. */
|
||||
size_t allocated_large;
|
||||
uint64_t nmalloc_large;
|
||||
uint64_t ndalloc_large;
|
||||
uint64_t nrequests_large;
|
||||
|
||||
/*
|
||||
* One element for each possible size class, including sizes that
|
||||
* overlap with bin size classes. This is necessary because ipalloc()
|
||||
* sometimes has to use such large objects in order to assure proper
|
||||
* alignment.
|
||||
*/
|
||||
malloc_large_stats_t *lstats;
|
||||
};
|
||||
|
||||
struct chunk_stats_s {
|
||||
/* Number of chunks that were allocated. */
|
||||
uint64_t nchunks;
|
||||
|
||||
/* High-water mark for number of chunks allocated. */
|
||||
size_t highchunks;
|
||||
|
||||
/*
|
||||
* Current number of chunks allocated. This value isn't maintained for
|
||||
* any other purpose, so keep track of it in order to be able to set
|
||||
* highchunks.
|
||||
*/
|
||||
size_t curchunks;
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
extern bool opt_stats_print;
|
||||
|
||||
extern size_t stats_cactive;
|
||||
|
||||
void stats_print(void (*write)(void *, const char *), void *cbopaque,
|
||||
const char *opts);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
size_t stats_cactive_get(void);
|
||||
void stats_cactive_add(size_t size);
|
||||
void stats_cactive_sub(size_t size);
|
||||
/*
|
||||
* Arena stats. Note that fields marked "derived" are not directly maintained
|
||||
* within the arena code; rather their values are derived during stats merge
|
||||
* requests.
|
||||
*/
|
||||
typedef struct arena_stats_s {
|
||||
#ifndef JEMALLOC_ATOMIC_U64
|
||||
malloc_mutex_t mtx;
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_STATS_C_))
|
||||
JEMALLOC_INLINE size_t
|
||||
stats_cactive_get(void)
|
||||
{
|
||||
/* Number of bytes currently mapped, excluding retained memory. */
|
||||
atomic_zu_t mapped; /* Partially derived. */
|
||||
|
||||
return (atomic_read_z(&stats_cactive));
|
||||
}
|
||||
/*
|
||||
* Number of unused virtual memory bytes currently retained. Retained
|
||||
* bytes are technically mapped (though always decommitted or purged),
|
||||
* but they are excluded from the mapped statistic (above).
|
||||
*/
|
||||
atomic_zu_t retained; /* Derived. */
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
stats_cactive_add(size_t size)
|
||||
{
|
||||
decay_stats_t decay_dirty;
|
||||
decay_stats_t decay_muzzy;
|
||||
|
||||
atomic_add_z(&stats_cactive, size);
|
||||
}
|
||||
atomic_zu_t base; /* Derived. */
|
||||
atomic_zu_t internal;
|
||||
atomic_zu_t resident; /* Derived. */
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
stats_cactive_sub(size_t size)
|
||||
{
|
||||
atomic_zu_t allocated_large; /* Derived. */
|
||||
arena_stats_u64_t nmalloc_large; /* Derived. */
|
||||
arena_stats_u64_t ndalloc_large; /* Derived. */
|
||||
arena_stats_u64_t nrequests_large; /* Derived. */
|
||||
|
||||
atomic_sub_z(&stats_cactive, size);
|
||||
}
|
||||
#endif
|
||||
/* Number of bytes cached in tcache associated with this arena. */
|
||||
atomic_zu_t tcache_bytes; /* Derived. */
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes];
|
||||
|
||||
/* One element for each large size class. */
|
||||
malloc_large_stats_t lstats[NSIZES - NBINS];
|
||||
|
||||
/* Arena uptime. */
|
||||
nstime_t uptime;
|
||||
} arena_stats_t;
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_STATS_H */
|
||||
|
||||
722
deps/jemalloc/include/jemalloc/internal/tsd.h
vendored
722
deps/jemalloc/include/jemalloc/internal/tsd.h
vendored
@@ -1,434 +1,324 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
#ifndef JEMALLOC_INTERNAL_TSD_H
|
||||
#define JEMALLOC_INTERNAL_TSD_H
|
||||
|
||||
/* Maximum number of malloc_tsd users with cleanup functions. */
|
||||
#define MALLOC_TSD_CLEANUPS_MAX 8
|
||||
#include "jemalloc/internal/arena_types.h"
|
||||
#include "jemalloc/internal/assert.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_externs.h"
|
||||
#include "jemalloc/internal/prof_types.h"
|
||||
#include "jemalloc/internal/ql.h"
|
||||
#include "jemalloc/internal/rtree_tsd.h"
|
||||
#include "jemalloc/internal/tcache_types.h"
|
||||
#include "jemalloc/internal/tcache_structs.h"
|
||||
#include "jemalloc/internal/util.h"
|
||||
#include "jemalloc/internal/witness.h"
|
||||
|
||||
typedef bool (*malloc_tsd_cleanup_t)(void);
|
||||
/*
|
||||
* Thread-Specific-Data layout
|
||||
* --- data accessed on tcache fast path: state, rtree_ctx, stats, prof ---
|
||||
* s: state
|
||||
* e: tcache_enabled
|
||||
* m: thread_allocated (config_stats)
|
||||
* f: thread_deallocated (config_stats)
|
||||
* p: prof_tdata (config_prof)
|
||||
* c: rtree_ctx (rtree cache accessed on deallocation)
|
||||
* t: tcache
|
||||
* --- data not accessed on tcache fast path: arena-related fields ---
|
||||
* d: arenas_tdata_bypass
|
||||
* r: reentrancy_level
|
||||
* x: narenas_tdata
|
||||
* i: iarena
|
||||
* a: arena
|
||||
* o: arenas_tdata
|
||||
* Loading TSD data is on the critical path of basically all malloc operations.
|
||||
* In particular, tcache and rtree_ctx rely on hot CPU cache to be effective.
|
||||
* Use a compact layout to reduce cache footprint.
|
||||
* +--- 64-bit and 64B cacheline; 1B each letter; First byte on the left. ---+
|
||||
* |---------------------------- 1st cacheline ----------------------------|
|
||||
* | sedrxxxx mmmmmmmm ffffffff pppppppp [c * 32 ........ ........ .......] |
|
||||
* |---------------------------- 2nd cacheline ----------------------------|
|
||||
* | [c * 64 ........ ........ ........ ........ ........ ........ .......] |
|
||||
* |---------------------------- 3nd cacheline ----------------------------|
|
||||
* | [c * 32 ........ ........ .......] iiiiiiii aaaaaaaa oooooooo [t...... |
|
||||
* +-------------------------------------------------------------------------+
|
||||
* Note: the entire tcache is embedded into TSD and spans multiple cachelines.
|
||||
*
|
||||
* The last 3 members (i, a and o) before tcache isn't really needed on tcache
|
||||
* fast path. However we have a number of unused tcache bins and witnesses
|
||||
* (never touched unless config_debug) at the end of tcache, so we place them
|
||||
* there to avoid breaking the cachelines and possibly paging in an extra page.
|
||||
*/
|
||||
#ifdef JEMALLOC_JET
|
||||
typedef void (*test_callback_t)(int *);
|
||||
# define MALLOC_TSD_TEST_DATA_INIT 0x72b65c10
|
||||
# define MALLOC_TEST_TSD \
|
||||
O(test_data, int, int) \
|
||||
O(test_callback, test_callback_t, int)
|
||||
# define MALLOC_TEST_TSD_INITIALIZER , MALLOC_TSD_TEST_DATA_INIT, NULL
|
||||
#else
|
||||
# define MALLOC_TEST_TSD
|
||||
# define MALLOC_TEST_TSD_INITIALIZER
|
||||
#endif
|
||||
|
||||
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
|
||||
!defined(_WIN32))
|
||||
typedef struct tsd_init_block_s tsd_init_block_t;
|
||||
typedef struct tsd_init_head_s tsd_init_head_t;
|
||||
/* O(name, type, nullable type */
|
||||
#define MALLOC_TSD \
|
||||
O(tcache_enabled, bool, bool) \
|
||||
O(arenas_tdata_bypass, bool, bool) \
|
||||
O(reentrancy_level, int8_t, int8_t) \
|
||||
O(narenas_tdata, uint32_t, uint32_t) \
|
||||
O(thread_allocated, uint64_t, uint64_t) \
|
||||
O(thread_deallocated, uint64_t, uint64_t) \
|
||||
O(prof_tdata, prof_tdata_t *, prof_tdata_t *) \
|
||||
O(rtree_ctx, rtree_ctx_t, rtree_ctx_t) \
|
||||
O(iarena, arena_t *, arena_t *) \
|
||||
O(arena, arena_t *, arena_t *) \
|
||||
O(arenas_tdata, arena_tdata_t *, arena_tdata_t *)\
|
||||
O(tcache, tcache_t, tcache_t) \
|
||||
O(witness_tsd, witness_tsd_t, witness_tsdn_t) \
|
||||
MALLOC_TEST_TSD
|
||||
|
||||
#define TSD_INITIALIZER { \
|
||||
tsd_state_uninitialized, \
|
||||
TCACHE_ENABLED_ZERO_INITIALIZER, \
|
||||
false, \
|
||||
0, \
|
||||
0, \
|
||||
0, \
|
||||
0, \
|
||||
NULL, \
|
||||
RTREE_CTX_ZERO_INITIALIZER, \
|
||||
NULL, \
|
||||
NULL, \
|
||||
NULL, \
|
||||
TCACHE_ZERO_INITIALIZER, \
|
||||
WITNESS_TSD_INITIALIZER \
|
||||
MALLOC_TEST_TSD_INITIALIZER \
|
||||
}
|
||||
|
||||
enum {
|
||||
tsd_state_nominal = 0, /* Common case --> jnz. */
|
||||
tsd_state_nominal_slow = 1, /* Initialized but on slow path. */
|
||||
/* the above 2 nominal states should be lower values. */
|
||||
tsd_state_nominal_max = 1, /* used for comparison only. */
|
||||
tsd_state_minimal_initialized = 2,
|
||||
tsd_state_purgatory = 3,
|
||||
tsd_state_reincarnated = 4,
|
||||
tsd_state_uninitialized = 5
|
||||
};
|
||||
|
||||
/* Manually limit tsd_state_t to a single byte. */
|
||||
typedef uint8_t tsd_state_t;
|
||||
|
||||
/* The actual tsd. */
|
||||
struct tsd_s {
|
||||
/*
|
||||
* The contents should be treated as totally opaque outside the tsd
|
||||
* module. Access any thread-local state through the getters and
|
||||
* setters below.
|
||||
*/
|
||||
tsd_state_t state;
|
||||
#define O(n, t, nt) \
|
||||
t use_a_getter_or_setter_instead_##n;
|
||||
MALLOC_TSD
|
||||
#undef O
|
||||
};
|
||||
|
||||
/*
|
||||
* Wrapper around tsd_t that makes it possible to avoid implicit conversion
|
||||
* between tsd_t and tsdn_t, where tsdn_t is "nullable" and has to be
|
||||
* explicitly converted to tsd_t, which is non-nullable.
|
||||
*/
|
||||
struct tsdn_s {
|
||||
tsd_t tsd;
|
||||
};
|
||||
#define TSDN_NULL ((tsdn_t *)0)
|
||||
JEMALLOC_ALWAYS_INLINE tsdn_t *
|
||||
tsd_tsdn(tsd_t *tsd) {
|
||||
return (tsdn_t *)tsd;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
tsdn_null(const tsdn_t *tsdn) {
|
||||
return tsdn == NULL;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE tsd_t *
|
||||
tsdn_tsd(tsdn_t *tsdn) {
|
||||
assert(!tsdn_null(tsdn));
|
||||
|
||||
return &tsdn->tsd;
|
||||
}
|
||||
|
||||
void *malloc_tsd_malloc(size_t size);
|
||||
void malloc_tsd_dalloc(void *wrapper);
|
||||
void malloc_tsd_cleanup_register(bool (*f)(void));
|
||||
tsd_t *malloc_tsd_boot0(void);
|
||||
void malloc_tsd_boot1(void);
|
||||
void tsd_cleanup(void *arg);
|
||||
tsd_t *tsd_fetch_slow(tsd_t *tsd, bool internal);
|
||||
void tsd_slow_update(tsd_t *tsd);
|
||||
|
||||
/*
|
||||
* We put the platform-specific data declarations and inlines into their own
|
||||
* header files to avoid cluttering this file. They define tsd_boot0,
|
||||
* tsd_boot1, tsd_boot, tsd_booted_get, tsd_get_allocates, tsd_get, and tsd_set.
|
||||
*/
|
||||
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
|
||||
#include "jemalloc/internal/tsd_malloc_thread_cleanup.h"
|
||||
#elif (defined(JEMALLOC_TLS))
|
||||
#include "jemalloc/internal/tsd_tls.h"
|
||||
#elif (defined(_WIN32))
|
||||
#include "jemalloc/internal/tsd_win.h"
|
||||
#else
|
||||
#include "jemalloc/internal/tsd_generic.h"
|
||||
#endif
|
||||
|
||||
/*
|
||||
* TLS/TSD-agnostic macro-based implementation of thread-specific data. There
|
||||
* are four macros that support (at least) three use cases: file-private,
|
||||
* library-private, and library-private inlined. Following is an example
|
||||
* library-private tsd variable:
|
||||
*
|
||||
* In example.h:
|
||||
* typedef struct {
|
||||
* int x;
|
||||
* int y;
|
||||
* } example_t;
|
||||
* #define EX_INITIALIZER JEMALLOC_CONCAT({0, 0})
|
||||
* malloc_tsd_protos(, example, example_t *)
|
||||
* malloc_tsd_externs(example, example_t *)
|
||||
* In example.c:
|
||||
* malloc_tsd_data(, example, example_t *, EX_INITIALIZER)
|
||||
* malloc_tsd_funcs(, example, example_t *, EX_INITIALIZER,
|
||||
* example_tsd_cleanup)
|
||||
*
|
||||
* The result is a set of generated functions, e.g.:
|
||||
*
|
||||
* bool example_tsd_boot(void) {...}
|
||||
* example_t **example_tsd_get() {...}
|
||||
* void example_tsd_set(example_t **val) {...}
|
||||
*
|
||||
* Note that all of the functions deal in terms of (a_type *) rather than
|
||||
* (a_type) so that it is possible to support non-pointer types (unlike
|
||||
* pthreads TSD). example_tsd_cleanup() is passed an (a_type *) pointer that is
|
||||
* cast to (void *). This means that the cleanup function needs to cast *and*
|
||||
* dereference the function argument, e.g.:
|
||||
*
|
||||
* void
|
||||
* example_tsd_cleanup(void *arg)
|
||||
* {
|
||||
* example_t *example = *(example_t **)arg;
|
||||
*
|
||||
* [...]
|
||||
* if ([want the cleanup function to be called again]) {
|
||||
* example_tsd_set(&example);
|
||||
* }
|
||||
* }
|
||||
*
|
||||
* If example_tsd_set() is called within example_tsd_cleanup(), it will be
|
||||
* called again. This is similar to how pthreads TSD destruction works, except
|
||||
* that pthreads only calls the cleanup function again if the value was set to
|
||||
* non-NULL.
|
||||
* tsd_foop_get_unsafe(tsd) returns a pointer to the thread-local instance of
|
||||
* foo. This omits some safety checks, and so can be used during tsd
|
||||
* initialization and cleanup.
|
||||
*/
|
||||
|
||||
/* malloc_tsd_protos(). */
|
||||
#define malloc_tsd_protos(a_attr, a_name, a_type) \
|
||||
a_attr bool \
|
||||
a_name##_tsd_boot(void); \
|
||||
a_attr a_type * \
|
||||
a_name##_tsd_get(void); \
|
||||
a_attr void \
|
||||
a_name##_tsd_set(a_type *val);
|
||||
|
||||
/* malloc_tsd_externs(). */
|
||||
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
|
||||
#define malloc_tsd_externs(a_name, a_type) \
|
||||
extern __thread a_type a_name##_tls; \
|
||||
extern __thread bool a_name##_initialized; \
|
||||
extern bool a_name##_booted;
|
||||
#elif (defined(JEMALLOC_TLS))
|
||||
#define malloc_tsd_externs(a_name, a_type) \
|
||||
extern __thread a_type a_name##_tls; \
|
||||
extern pthread_key_t a_name##_tsd; \
|
||||
extern bool a_name##_booted;
|
||||
#elif (defined(_WIN32))
|
||||
#define malloc_tsd_externs(a_name, a_type) \
|
||||
extern DWORD a_name##_tsd; \
|
||||
extern bool a_name##_booted;
|
||||
#else
|
||||
#define malloc_tsd_externs(a_name, a_type) \
|
||||
extern pthread_key_t a_name##_tsd; \
|
||||
extern tsd_init_head_t a_name##_tsd_init_head; \
|
||||
extern bool a_name##_booted;
|
||||
#endif
|
||||
|
||||
/* malloc_tsd_data(). */
|
||||
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
|
||||
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
|
||||
a_attr __thread a_type JEMALLOC_TLS_MODEL \
|
||||
a_name##_tls = a_initializer; \
|
||||
a_attr __thread bool JEMALLOC_TLS_MODEL \
|
||||
a_name##_initialized = false; \
|
||||
a_attr bool a_name##_booted = false;
|
||||
#elif (defined(JEMALLOC_TLS))
|
||||
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
|
||||
a_attr __thread a_type JEMALLOC_TLS_MODEL \
|
||||
a_name##_tls = a_initializer; \
|
||||
a_attr pthread_key_t a_name##_tsd; \
|
||||
a_attr bool a_name##_booted = false;
|
||||
#elif (defined(_WIN32))
|
||||
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
|
||||
a_attr DWORD a_name##_tsd; \
|
||||
a_attr bool a_name##_booted = false;
|
||||
#else
|
||||
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
|
||||
a_attr pthread_key_t a_name##_tsd; \
|
||||
a_attr tsd_init_head_t a_name##_tsd_init_head = { \
|
||||
ql_head_initializer(blocks), \
|
||||
MALLOC_MUTEX_INITIALIZER \
|
||||
}; \
|
||||
a_attr bool a_name##_booted = false;
|
||||
#endif
|
||||
|
||||
/* malloc_tsd_funcs(). */
|
||||
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
|
||||
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
|
||||
a_cleanup) \
|
||||
/* Initialization/cleanup. */ \
|
||||
a_attr bool \
|
||||
a_name##_tsd_cleanup_wrapper(void) \
|
||||
{ \
|
||||
\
|
||||
if (a_name##_initialized) { \
|
||||
a_name##_initialized = false; \
|
||||
a_cleanup(&a_name##_tls); \
|
||||
} \
|
||||
return (a_name##_initialized); \
|
||||
} \
|
||||
a_attr bool \
|
||||
a_name##_tsd_boot(void) \
|
||||
{ \
|
||||
\
|
||||
if (a_cleanup != malloc_tsd_no_cleanup) { \
|
||||
malloc_tsd_cleanup_register( \
|
||||
&a_name##_tsd_cleanup_wrapper); \
|
||||
} \
|
||||
a_name##_booted = true; \
|
||||
return (false); \
|
||||
} \
|
||||
/* Get/set. */ \
|
||||
a_attr a_type * \
|
||||
a_name##_tsd_get(void) \
|
||||
{ \
|
||||
\
|
||||
assert(a_name##_booted); \
|
||||
return (&a_name##_tls); \
|
||||
} \
|
||||
a_attr void \
|
||||
a_name##_tsd_set(a_type *val) \
|
||||
{ \
|
||||
\
|
||||
assert(a_name##_booted); \
|
||||
a_name##_tls = (*val); \
|
||||
if (a_cleanup != malloc_tsd_no_cleanup) \
|
||||
a_name##_initialized = true; \
|
||||
#define O(n, t, nt) \
|
||||
JEMALLOC_ALWAYS_INLINE t * \
|
||||
tsd_##n##p_get_unsafe(tsd_t *tsd) { \
|
||||
return &tsd->use_a_getter_or_setter_instead_##n; \
|
||||
}
|
||||
#elif (defined(JEMALLOC_TLS))
|
||||
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
|
||||
a_cleanup) \
|
||||
/* Initialization/cleanup. */ \
|
||||
a_attr bool \
|
||||
a_name##_tsd_boot(void) \
|
||||
{ \
|
||||
\
|
||||
if (a_cleanup != malloc_tsd_no_cleanup) { \
|
||||
if (pthread_key_create(&a_name##_tsd, a_cleanup) != 0) \
|
||||
return (true); \
|
||||
} \
|
||||
a_name##_booted = true; \
|
||||
return (false); \
|
||||
} \
|
||||
/* Get/set. */ \
|
||||
a_attr a_type * \
|
||||
a_name##_tsd_get(void) \
|
||||
{ \
|
||||
\
|
||||
assert(a_name##_booted); \
|
||||
return (&a_name##_tls); \
|
||||
} \
|
||||
a_attr void \
|
||||
a_name##_tsd_set(a_type *val) \
|
||||
{ \
|
||||
\
|
||||
assert(a_name##_booted); \
|
||||
a_name##_tls = (*val); \
|
||||
if (a_cleanup != malloc_tsd_no_cleanup) { \
|
||||
if (pthread_setspecific(a_name##_tsd, \
|
||||
(void *)(&a_name##_tls))) { \
|
||||
malloc_write("<jemalloc>: Error" \
|
||||
" setting TSD for "#a_name"\n"); \
|
||||
if (opt_abort) \
|
||||
abort(); \
|
||||
} \
|
||||
} \
|
||||
MALLOC_TSD
|
||||
#undef O
|
||||
|
||||
/* tsd_foop_get(tsd) returns a pointer to the thread-local instance of foo. */
|
||||
#define O(n, t, nt) \
|
||||
JEMALLOC_ALWAYS_INLINE t * \
|
||||
tsd_##n##p_get(tsd_t *tsd) { \
|
||||
assert(tsd->state == tsd_state_nominal || \
|
||||
tsd->state == tsd_state_nominal_slow || \
|
||||
tsd->state == tsd_state_reincarnated || \
|
||||
tsd->state == tsd_state_minimal_initialized); \
|
||||
return tsd_##n##p_get_unsafe(tsd); \
|
||||
}
|
||||
#elif (defined(_WIN32))
|
||||
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
|
||||
a_cleanup) \
|
||||
/* Data structure. */ \
|
||||
typedef struct { \
|
||||
bool initialized; \
|
||||
a_type val; \
|
||||
} a_name##_tsd_wrapper_t; \
|
||||
/* Initialization/cleanup. */ \
|
||||
a_attr bool \
|
||||
a_name##_tsd_cleanup_wrapper(void) \
|
||||
{ \
|
||||
a_name##_tsd_wrapper_t *wrapper; \
|
||||
\
|
||||
wrapper = (a_name##_tsd_wrapper_t *) TlsGetValue(a_name##_tsd); \
|
||||
if (wrapper == NULL) \
|
||||
return (false); \
|
||||
if (a_cleanup != malloc_tsd_no_cleanup && \
|
||||
wrapper->initialized) { \
|
||||
a_type val = wrapper->val; \
|
||||
a_type tsd_static_data = a_initializer; \
|
||||
wrapper->initialized = false; \
|
||||
wrapper->val = tsd_static_data; \
|
||||
a_cleanup(&val); \
|
||||
if (wrapper->initialized) { \
|
||||
/* Trigger another cleanup round. */ \
|
||||
return (true); \
|
||||
} \
|
||||
MALLOC_TSD
|
||||
#undef O
|
||||
|
||||
/*
|
||||
* tsdn_foop_get(tsdn) returns either the thread-local instance of foo (if tsdn
|
||||
* isn't NULL), or NULL (if tsdn is NULL), cast to the nullable pointer type.
|
||||
*/
|
||||
#define O(n, t, nt) \
|
||||
JEMALLOC_ALWAYS_INLINE nt * \
|
||||
tsdn_##n##p_get(tsdn_t *tsdn) { \
|
||||
if (tsdn_null(tsdn)) { \
|
||||
return NULL; \
|
||||
} \
|
||||
malloc_tsd_dalloc(wrapper); \
|
||||
return (false); \
|
||||
} \
|
||||
a_attr bool \
|
||||
a_name##_tsd_boot(void) \
|
||||
{ \
|
||||
\
|
||||
a_name##_tsd = TlsAlloc(); \
|
||||
if (a_name##_tsd == TLS_OUT_OF_INDEXES) \
|
||||
return (true); \
|
||||
if (a_cleanup != malloc_tsd_no_cleanup) { \
|
||||
malloc_tsd_cleanup_register( \
|
||||
&a_name##_tsd_cleanup_wrapper); \
|
||||
} \
|
||||
a_name##_booted = true; \
|
||||
return (false); \
|
||||
} \
|
||||
/* Get/set. */ \
|
||||
a_attr a_name##_tsd_wrapper_t * \
|
||||
a_name##_tsd_get_wrapper(void) \
|
||||
{ \
|
||||
a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *) \
|
||||
TlsGetValue(a_name##_tsd); \
|
||||
\
|
||||
if (wrapper == NULL) { \
|
||||
wrapper = (a_name##_tsd_wrapper_t *) \
|
||||
malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \
|
||||
if (wrapper == NULL) { \
|
||||
malloc_write("<jemalloc>: Error allocating" \
|
||||
" TSD for "#a_name"\n"); \
|
||||
abort(); \
|
||||
} else { \
|
||||
static a_type tsd_static_data = a_initializer; \
|
||||
wrapper->initialized = false; \
|
||||
wrapper->val = tsd_static_data; \
|
||||
} \
|
||||
if (!TlsSetValue(a_name##_tsd, (void *)wrapper)) { \
|
||||
malloc_write("<jemalloc>: Error setting" \
|
||||
" TSD for "#a_name"\n"); \
|
||||
abort(); \
|
||||
} \
|
||||
} \
|
||||
return (wrapper); \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_name##_tsd_get(void) \
|
||||
{ \
|
||||
a_name##_tsd_wrapper_t *wrapper; \
|
||||
\
|
||||
assert(a_name##_booted); \
|
||||
wrapper = a_name##_tsd_get_wrapper(); \
|
||||
return (&wrapper->val); \
|
||||
} \
|
||||
a_attr void \
|
||||
a_name##_tsd_set(a_type *val) \
|
||||
{ \
|
||||
a_name##_tsd_wrapper_t *wrapper; \
|
||||
\
|
||||
assert(a_name##_booted); \
|
||||
wrapper = a_name##_tsd_get_wrapper(); \
|
||||
wrapper->val = *(val); \
|
||||
if (a_cleanup != malloc_tsd_no_cleanup) \
|
||||
wrapper->initialized = true; \
|
||||
tsd_t *tsd = tsdn_tsd(tsdn); \
|
||||
return (nt *)tsd_##n##p_get(tsd); \
|
||||
}
|
||||
#else
|
||||
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
|
||||
a_cleanup) \
|
||||
/* Data structure. */ \
|
||||
typedef struct { \
|
||||
bool initialized; \
|
||||
a_type val; \
|
||||
} a_name##_tsd_wrapper_t; \
|
||||
/* Initialization/cleanup. */ \
|
||||
a_attr void \
|
||||
a_name##_tsd_cleanup_wrapper(void *arg) \
|
||||
{ \
|
||||
a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *)arg;\
|
||||
\
|
||||
if (a_cleanup != malloc_tsd_no_cleanup && \
|
||||
wrapper->initialized) { \
|
||||
wrapper->initialized = false; \
|
||||
a_cleanup(&wrapper->val); \
|
||||
if (wrapper->initialized) { \
|
||||
/* Trigger another cleanup round. */ \
|
||||
if (pthread_setspecific(a_name##_tsd, \
|
||||
(void *)wrapper)) { \
|
||||
malloc_write("<jemalloc>: Error" \
|
||||
" setting TSD for "#a_name"\n"); \
|
||||
if (opt_abort) \
|
||||
abort(); \
|
||||
} \
|
||||
return; \
|
||||
} \
|
||||
} \
|
||||
malloc_tsd_dalloc(wrapper); \
|
||||
} \
|
||||
a_attr bool \
|
||||
a_name##_tsd_boot(void) \
|
||||
{ \
|
||||
\
|
||||
if (pthread_key_create(&a_name##_tsd, \
|
||||
a_name##_tsd_cleanup_wrapper) != 0) \
|
||||
return (true); \
|
||||
a_name##_booted = true; \
|
||||
return (false); \
|
||||
} \
|
||||
/* Get/set. */ \
|
||||
a_attr a_name##_tsd_wrapper_t * \
|
||||
a_name##_tsd_get_wrapper(void) \
|
||||
{ \
|
||||
a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *) \
|
||||
pthread_getspecific(a_name##_tsd); \
|
||||
\
|
||||
if (wrapper == NULL) { \
|
||||
tsd_init_block_t block; \
|
||||
wrapper = tsd_init_check_recursion( \
|
||||
&a_name##_tsd_init_head, &block); \
|
||||
if (wrapper) \
|
||||
return (wrapper); \
|
||||
wrapper = (a_name##_tsd_wrapper_t *) \
|
||||
malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \
|
||||
block.data = wrapper; \
|
||||
if (wrapper == NULL) { \
|
||||
malloc_write("<jemalloc>: Error allocating" \
|
||||
" TSD for "#a_name"\n"); \
|
||||
abort(); \
|
||||
} else { \
|
||||
static a_type tsd_static_data = a_initializer; \
|
||||
wrapper->initialized = false; \
|
||||
wrapper->val = tsd_static_data; \
|
||||
} \
|
||||
if (pthread_setspecific(a_name##_tsd, \
|
||||
(void *)wrapper)) { \
|
||||
malloc_write("<jemalloc>: Error setting" \
|
||||
" TSD for "#a_name"\n"); \
|
||||
abort(); \
|
||||
} \
|
||||
tsd_init_finish(&a_name##_tsd_init_head, &block); \
|
||||
} \
|
||||
return (wrapper); \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_name##_tsd_get(void) \
|
||||
{ \
|
||||
a_name##_tsd_wrapper_t *wrapper; \
|
||||
\
|
||||
assert(a_name##_booted); \
|
||||
wrapper = a_name##_tsd_get_wrapper(); \
|
||||
return (&wrapper->val); \
|
||||
} \
|
||||
a_attr void \
|
||||
a_name##_tsd_set(a_type *val) \
|
||||
{ \
|
||||
a_name##_tsd_wrapper_t *wrapper; \
|
||||
\
|
||||
assert(a_name##_booted); \
|
||||
wrapper = a_name##_tsd_get_wrapper(); \
|
||||
wrapper->val = *(val); \
|
||||
if (a_cleanup != malloc_tsd_no_cleanup) \
|
||||
wrapper->initialized = true; \
|
||||
MALLOC_TSD
|
||||
#undef O
|
||||
|
||||
/* tsd_foo_get(tsd) returns the value of the thread-local instance of foo. */
|
||||
#define O(n, t, nt) \
|
||||
JEMALLOC_ALWAYS_INLINE t \
|
||||
tsd_##n##_get(tsd_t *tsd) { \
|
||||
return *tsd_##n##p_get(tsd); \
|
||||
}
|
||||
#endif
|
||||
MALLOC_TSD
|
||||
#undef O
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
/* tsd_foo_set(tsd, val) updates the thread-local instance of foo to be val. */
|
||||
#define O(n, t, nt) \
|
||||
JEMALLOC_ALWAYS_INLINE void \
|
||||
tsd_##n##_set(tsd_t *tsd, t val) { \
|
||||
assert(tsd->state != tsd_state_reincarnated && \
|
||||
tsd->state != tsd_state_minimal_initialized); \
|
||||
*tsd_##n##p_get(tsd) = val; \
|
||||
}
|
||||
MALLOC_TSD
|
||||
#undef O
|
||||
|
||||
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
|
||||
!defined(_WIN32))
|
||||
struct tsd_init_block_s {
|
||||
ql_elm(tsd_init_block_t) link;
|
||||
pthread_t thread;
|
||||
void *data;
|
||||
};
|
||||
struct tsd_init_head_s {
|
||||
ql_head(tsd_init_block_t) blocks;
|
||||
malloc_mutex_t lock;
|
||||
};
|
||||
#endif
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
tsd_assert_fast(tsd_t *tsd) {
|
||||
assert(!malloc_slow && tsd_tcache_enabled_get(tsd) &&
|
||||
tsd_reentrancy_level_get(tsd) == 0);
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
tsd_fast(tsd_t *tsd) {
|
||||
bool fast = (tsd->state == tsd_state_nominal);
|
||||
if (fast) {
|
||||
tsd_assert_fast(tsd);
|
||||
}
|
||||
|
||||
void *malloc_tsd_malloc(size_t size);
|
||||
void malloc_tsd_dalloc(void *wrapper);
|
||||
void malloc_tsd_no_cleanup(void *);
|
||||
void malloc_tsd_cleanup_register(bool (*f)(void));
|
||||
void malloc_tsd_boot(void);
|
||||
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
|
||||
!defined(_WIN32))
|
||||
void *tsd_init_check_recursion(tsd_init_head_t *head,
|
||||
tsd_init_block_t *block);
|
||||
void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block);
|
||||
#endif
|
||||
return fast;
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
JEMALLOC_ALWAYS_INLINE tsd_t *
|
||||
tsd_fetch_impl(bool init, bool minimal) {
|
||||
tsd_t *tsd = tsd_get(init);
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
if (!init && tsd_get_allocates() && tsd == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
assert(tsd != NULL);
|
||||
|
||||
if (unlikely(tsd->state != tsd_state_nominal)) {
|
||||
return tsd_fetch_slow(tsd, minimal);
|
||||
}
|
||||
assert(tsd_fast(tsd));
|
||||
tsd_assert_fast(tsd);
|
||||
|
||||
return tsd;
|
||||
}
|
||||
|
||||
/* Get a minimal TSD that requires no cleanup. See comments in free(). */
|
||||
JEMALLOC_ALWAYS_INLINE tsd_t *
|
||||
tsd_fetch_min(void) {
|
||||
return tsd_fetch_impl(true, true);
|
||||
}
|
||||
|
||||
/* For internal background threads use only. */
|
||||
JEMALLOC_ALWAYS_INLINE tsd_t *
|
||||
tsd_internal_fetch(void) {
|
||||
tsd_t *tsd = tsd_fetch_min();
|
||||
/* Use reincarnated state to prevent full initialization. */
|
||||
tsd->state = tsd_state_reincarnated;
|
||||
|
||||
return tsd;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE tsd_t *
|
||||
tsd_fetch(void) {
|
||||
return tsd_fetch_impl(true, false);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
tsd_nominal(tsd_t *tsd) {
|
||||
return (tsd->state <= tsd_state_nominal_max);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE tsdn_t *
|
||||
tsdn_fetch(void) {
|
||||
if (!tsd_booted_get()) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return tsd_tsdn(tsd_fetch_impl(false, false));
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE rtree_ctx_t *
|
||||
tsd_rtree_ctx(tsd_t *tsd) {
|
||||
return tsd_rtree_ctxp_get(tsd);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE rtree_ctx_t *
|
||||
tsdn_rtree_ctx(tsdn_t *tsdn, rtree_ctx_t *fallback) {
|
||||
/*
|
||||
* If tsd cannot be accessed, initialize the fallback rtree_ctx and
|
||||
* return a pointer to it.
|
||||
*/
|
||||
if (unlikely(tsdn_null(tsdn))) {
|
||||
rtree_ctx_data_init(fallback);
|
||||
return fallback;
|
||||
}
|
||||
return tsd_rtree_ctx(tsdn_tsd(tsdn));
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_TSD_H */
|
||||
|
||||
167
deps/jemalloc/include/jemalloc/internal/util.h
vendored
167
deps/jemalloc/include/jemalloc/internal/util.h
vendored
@@ -1,143 +1,50 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
#ifndef JEMALLOC_INTERNAL_UTIL_H
|
||||
#define JEMALLOC_INTERNAL_UTIL_H
|
||||
|
||||
/* Size of stack-allocated buffer passed to buferror(). */
|
||||
#define BUFERROR_BUF 64
|
||||
#define UTIL_INLINE static inline
|
||||
|
||||
/*
|
||||
* Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be
|
||||
* large enough for all possible uses within jemalloc.
|
||||
*/
|
||||
#define MALLOC_PRINTF_BUFSIZE 4096
|
||||
/* Junk fill patterns. */
|
||||
#ifndef JEMALLOC_ALLOC_JUNK
|
||||
# define JEMALLOC_ALLOC_JUNK ((uint8_t)0xa5)
|
||||
#endif
|
||||
#ifndef JEMALLOC_FREE_JUNK
|
||||
# define JEMALLOC_FREE_JUNK ((uint8_t)0x5a)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Wrap a cpp argument that contains commas such that it isn't broken up into
|
||||
* multiple arguments.
|
||||
*/
|
||||
#define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__
|
||||
#define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__
|
||||
|
||||
/* cpp macro definition stringification. */
|
||||
#define STRINGIFY_HELPER(x) #x
|
||||
#define STRINGIFY(x) STRINGIFY_HELPER(x)
|
||||
|
||||
/*
|
||||
* Silence compiler warnings due to uninitialized values. This is used
|
||||
* wherever the compiler fails to recognize that the variable is never used
|
||||
* uninitialized.
|
||||
*/
|
||||
#ifdef JEMALLOC_CC_SILENCE
|
||||
# define JEMALLOC_CC_SILENCE_INIT(v) = v
|
||||
#define JEMALLOC_CC_SILENCE_INIT(v) = v
|
||||
|
||||
#ifdef __GNUC__
|
||||
# define likely(x) __builtin_expect(!!(x), 1)
|
||||
# define unlikely(x) __builtin_expect(!!(x), 0)
|
||||
#else
|
||||
# define JEMALLOC_CC_SILENCE_INIT(v)
|
||||
# define likely(x) !!(x)
|
||||
# define unlikely(x) !!(x)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Define a custom assert() in order to reduce the chances of deadlock during
|
||||
* assertion failure.
|
||||
*/
|
||||
#ifndef assert
|
||||
#define assert(e) do { \
|
||||
if (config_debug && !(e)) { \
|
||||
malloc_printf( \
|
||||
"<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
|
||||
__FILE__, __LINE__, #e); \
|
||||
abort(); \
|
||||
} \
|
||||
} while (0)
|
||||
#if !defined(JEMALLOC_INTERNAL_UNREACHABLE)
|
||||
# error JEMALLOC_INTERNAL_UNREACHABLE should have been defined by configure
|
||||
#endif
|
||||
|
||||
#ifndef not_reached
|
||||
#define not_reached() do { \
|
||||
if (config_debug) { \
|
||||
malloc_printf( \
|
||||
"<jemalloc>: %s:%d: Unreachable code reached\n", \
|
||||
__FILE__, __LINE__); \
|
||||
abort(); \
|
||||
} \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#ifndef not_implemented
|
||||
#define not_implemented() do { \
|
||||
if (config_debug) { \
|
||||
malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
|
||||
__FILE__, __LINE__); \
|
||||
abort(); \
|
||||
} \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#ifndef assert_not_implemented
|
||||
#define assert_not_implemented(e) do { \
|
||||
if (config_debug && !(e)) \
|
||||
not_implemented(); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
/* Use to assert a particular configuration, e.g., cassert(config_debug). */
|
||||
#define cassert(c) do { \
|
||||
if ((c) == false) \
|
||||
not_reached(); \
|
||||
} while (0)
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
int buferror(int err, char *buf, size_t buflen);
|
||||
uintmax_t malloc_strtoumax(const char *restrict nptr,
|
||||
char **restrict endptr, int base);
|
||||
void malloc_write(const char *s);
|
||||
|
||||
/*
|
||||
* malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating
|
||||
* point math.
|
||||
*/
|
||||
int malloc_vsnprintf(char *str, size_t size, const char *format,
|
||||
va_list ap);
|
||||
int malloc_snprintf(char *str, size_t size, const char *format, ...)
|
||||
JEMALLOC_ATTR(format(printf, 3, 4));
|
||||
void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
const char *format, va_list ap);
|
||||
void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque,
|
||||
const char *format, ...) JEMALLOC_ATTR(format(printf, 3, 4));
|
||||
void malloc_printf(const char *format, ...)
|
||||
JEMALLOC_ATTR(format(printf, 1, 2));
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
size_t pow2_ceil(size_t x);
|
||||
void set_errno(int errnum);
|
||||
int get_errno(void);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_UTIL_C_))
|
||||
/* Compute the smallest power of 2 that is >= x. */
|
||||
JEMALLOC_INLINE size_t
|
||||
pow2_ceil(size_t x)
|
||||
{
|
||||
|
||||
x--;
|
||||
x |= x >> 1;
|
||||
x |= x >> 2;
|
||||
x |= x >> 4;
|
||||
x |= x >> 8;
|
||||
x |= x >> 16;
|
||||
#if (LG_SIZEOF_PTR == 3)
|
||||
x |= x >> 32;
|
||||
#endif
|
||||
x++;
|
||||
return (x);
|
||||
}
|
||||
|
||||
/* Sets error code */
|
||||
JEMALLOC_INLINE void
|
||||
set_errno(int errnum)
|
||||
{
|
||||
#define unreachable() JEMALLOC_INTERNAL_UNREACHABLE()
|
||||
|
||||
/* Set error code. */
|
||||
UTIL_INLINE void
|
||||
set_errno(int errnum) {
|
||||
#ifdef _WIN32
|
||||
SetLastError(errnum);
|
||||
#else
|
||||
@@ -145,18 +52,16 @@ set_errno(int errnum)
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Get last error code */
|
||||
JEMALLOC_INLINE int
|
||||
get_errno(void)
|
||||
{
|
||||
|
||||
/* Get last error code. */
|
||||
UTIL_INLINE int
|
||||
get_errno(void) {
|
||||
#ifdef _WIN32
|
||||
return (GetLastError());
|
||||
return GetLastError();
|
||||
#else
|
||||
return (errno);
|
||||
return errno;
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
#undef UTIL_INLINE
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_UTIL_H */
|
||||
|
||||
456
deps/jemalloc/include/jemalloc/jemalloc.h
vendored
456
deps/jemalloc/include/jemalloc/jemalloc.h
vendored
@@ -1,45 +1,203 @@
|
||||
#ifndef JEMALLOC_H_
|
||||
#define JEMALLOC_H_
|
||||
#define JEMALLOC_H_
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* Defined if __attribute__((...)) syntax is supported. */
|
||||
#define JEMALLOC_HAVE_ATTR
|
||||
|
||||
/* Defined if alloc_size attribute is supported. */
|
||||
#define JEMALLOC_HAVE_ATTR_ALLOC_SIZE
|
||||
|
||||
/* Defined if format(gnu_printf, ...) attribute is supported. */
|
||||
#define JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
|
||||
|
||||
/* Defined if format(printf, ...) attribute is supported. */
|
||||
#define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF
|
||||
|
||||
/*
|
||||
* Define overrides for non-standard allocator-related functions if they are
|
||||
* present on the system.
|
||||
*/
|
||||
#define JEMALLOC_OVERRIDE_MEMALIGN
|
||||
#define JEMALLOC_OVERRIDE_VALLOC
|
||||
|
||||
/*
|
||||
* At least Linux omits the "const" in:
|
||||
*
|
||||
* size_t malloc_usable_size(const void *ptr);
|
||||
*
|
||||
* Match the operating system's prototype.
|
||||
*/
|
||||
#define JEMALLOC_USABLE_SIZE_CONST
|
||||
|
||||
/*
|
||||
* If defined, specify throw() for the public function prototypes when compiling
|
||||
* with C++. The only justification for this is to match the prototypes that
|
||||
* glibc defines.
|
||||
*/
|
||||
#define JEMALLOC_USE_CXX_THROW
|
||||
|
||||
#ifdef _MSC_VER
|
||||
# ifdef _WIN64
|
||||
# define LG_SIZEOF_PTR_WIN 3
|
||||
# else
|
||||
# define LG_SIZEOF_PTR_WIN 2
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Name mangling for public symbols is controlled by --with-mangling and
|
||||
* --with-jemalloc-prefix. With default settings the je_ prefix is stripped by
|
||||
* these macro definitions.
|
||||
*/
|
||||
#ifndef JEMALLOC_NO_RENAME
|
||||
# define je_aligned_alloc aligned_alloc
|
||||
# define je_calloc calloc
|
||||
# define je_dallocx dallocx
|
||||
# define je_free free
|
||||
# define je_mallctl mallctl
|
||||
# define je_mallctlbymib mallctlbymib
|
||||
# define je_mallctlnametomib mallctlnametomib
|
||||
# define je_malloc malloc
|
||||
# define je_malloc_conf malloc_conf
|
||||
# define je_malloc_message malloc_message
|
||||
# define je_malloc_stats_print malloc_stats_print
|
||||
# define je_malloc_usable_size malloc_usable_size
|
||||
# define je_mallocx mallocx
|
||||
# define je_nallocx nallocx
|
||||
# define je_posix_memalign posix_memalign
|
||||
# define je_rallocx rallocx
|
||||
# define je_realloc realloc
|
||||
# define je_sallocx sallocx
|
||||
# define je_sdallocx sdallocx
|
||||
# define je_xallocx xallocx
|
||||
# define je_memalign memalign
|
||||
# define je_valloc valloc
|
||||
#endif
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
#include <limits.h>
|
||||
#include <strings.h>
|
||||
|
||||
#define JEMALLOC_VERSION "3.6.0-0-g46c0af68bd248b04df75e4f92d5fb804c3d75340"
|
||||
#define JEMALLOC_VERSION_MAJOR 3
|
||||
#define JEMALLOC_VERSION_MINOR 6
|
||||
#define JEMALLOC_VERSION_BUGFIX 0
|
||||
#define JEMALLOC_VERSION_NREV 0
|
||||
#define JEMALLOC_VERSION_GID "46c0af68bd248b04df75e4f92d5fb804c3d75340"
|
||||
#define JEMALLOC_VERSION "5.0.1-0-g896ed3a8b3f41998d4fb4d625d30ac63ef2d51fb"
|
||||
#define JEMALLOC_VERSION_MAJOR 5
|
||||
#define JEMALLOC_VERSION_MINOR 0
|
||||
#define JEMALLOC_VERSION_BUGFIX 1
|
||||
#define JEMALLOC_VERSION_NREV 0
|
||||
#define JEMALLOC_VERSION_GID "896ed3a8b3f41998d4fb4d625d30ac63ef2d51fb"
|
||||
|
||||
# define MALLOCX_LG_ALIGN(la) (la)
|
||||
# if LG_SIZEOF_PTR == 2
|
||||
# define MALLOCX_ALIGN(a) (ffs(a)-1)
|
||||
# else
|
||||
# define MALLOCX_ALIGN(a) \
|
||||
((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31)
|
||||
# endif
|
||||
# define MALLOCX_ZERO ((int)0x40)
|
||||
/* Bias arena index bits so that 0 encodes "MALLOCX_ARENA() unspecified". */
|
||||
# define MALLOCX_ARENA(a) ((int)(((a)+1) << 8))
|
||||
#define MALLOCX_LG_ALIGN(la) ((int)(la))
|
||||
#if LG_SIZEOF_PTR == 2
|
||||
# define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1))
|
||||
#else
|
||||
# define MALLOCX_ALIGN(a) \
|
||||
((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \
|
||||
ffs((int)(((size_t)(a))>>32))+31))
|
||||
#endif
|
||||
#define MALLOCX_ZERO ((int)0x40)
|
||||
/*
|
||||
* Bias tcache index bits so that 0 encodes "automatic tcache management", and 1
|
||||
* encodes MALLOCX_TCACHE_NONE.
|
||||
*/
|
||||
#define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8))
|
||||
#define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1)
|
||||
/*
|
||||
* Bias arena index bits so that 0 encodes "use an automatically chosen arena".
|
||||
*/
|
||||
#define MALLOCX_ARENA(a) ((((int)(a))+1) << 20)
|
||||
|
||||
#ifdef JEMALLOC_EXPERIMENTAL
|
||||
# define ALLOCM_LG_ALIGN(la) (la)
|
||||
# if LG_SIZEOF_PTR == 2
|
||||
# define ALLOCM_ALIGN(a) (ffs(a)-1)
|
||||
# else
|
||||
# define ALLOCM_ALIGN(a) \
|
||||
((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31)
|
||||
/*
|
||||
* Use as arena index in "arena.<i>.{purge,decay,dss}" and
|
||||
* "stats.arenas.<i>.*" mallctl interfaces to select all arenas. This
|
||||
* definition is intentionally specified in raw decimal format to support
|
||||
* cpp-based string concatenation, e.g.
|
||||
*
|
||||
* #define STRINGIFY_HELPER(x) #x
|
||||
* #define STRINGIFY(x) STRINGIFY_HELPER(x)
|
||||
*
|
||||
* mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", NULL, NULL, NULL,
|
||||
* 0);
|
||||
*/
|
||||
#define MALLCTL_ARENAS_ALL 4096
|
||||
/*
|
||||
* Use as arena index in "stats.arenas.<i>.*" mallctl interfaces to select
|
||||
* destroyed arenas.
|
||||
*/
|
||||
#define MALLCTL_ARENAS_DESTROYED 4097
|
||||
|
||||
#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW)
|
||||
# define JEMALLOC_CXX_THROW throw()
|
||||
#else
|
||||
# define JEMALLOC_CXX_THROW
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
# define JEMALLOC_ATTR(s)
|
||||
# define JEMALLOC_ALIGNED(s) __declspec(align(s))
|
||||
# define JEMALLOC_ALLOC_SIZE(s)
|
||||
# define JEMALLOC_ALLOC_SIZE2(s1, s2)
|
||||
# ifndef JEMALLOC_EXPORT
|
||||
# ifdef DLLEXPORT
|
||||
# define JEMALLOC_EXPORT __declspec(dllexport)
|
||||
# else
|
||||
# define JEMALLOC_EXPORT __declspec(dllimport)
|
||||
# endif
|
||||
# endif
|
||||
# define ALLOCM_ZERO ((int)0x40)
|
||||
# define ALLOCM_NO_MOVE ((int)0x80)
|
||||
/* Bias arena index bits so that 0 encodes "ALLOCM_ARENA() unspecified". */
|
||||
# define ALLOCM_ARENA(a) ((int)(((a)+1) << 8))
|
||||
# define ALLOCM_SUCCESS 0
|
||||
# define ALLOCM_ERR_OOM 1
|
||||
# define ALLOCM_ERR_NOT_MOVED 2
|
||||
# define JEMALLOC_FORMAT_PRINTF(s, i)
|
||||
# define JEMALLOC_NOINLINE __declspec(noinline)
|
||||
# ifdef __cplusplus
|
||||
# define JEMALLOC_NOTHROW __declspec(nothrow)
|
||||
# else
|
||||
# define JEMALLOC_NOTHROW
|
||||
# endif
|
||||
# define JEMALLOC_SECTION(s) __declspec(allocate(s))
|
||||
# define JEMALLOC_RESTRICT_RETURN __declspec(restrict)
|
||||
# if _MSC_VER >= 1900 && !defined(__EDG__)
|
||||
# define JEMALLOC_ALLOCATOR __declspec(allocator)
|
||||
# else
|
||||
# define JEMALLOC_ALLOCATOR
|
||||
# endif
|
||||
#elif defined(JEMALLOC_HAVE_ATTR)
|
||||
# define JEMALLOC_ATTR(s) __attribute__((s))
|
||||
# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
|
||||
# ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE
|
||||
# define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s))
|
||||
# define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2))
|
||||
# else
|
||||
# define JEMALLOC_ALLOC_SIZE(s)
|
||||
# define JEMALLOC_ALLOC_SIZE2(s1, s2)
|
||||
# endif
|
||||
# ifndef JEMALLOC_EXPORT
|
||||
# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
|
||||
# endif
|
||||
# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
|
||||
# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i))
|
||||
# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF)
|
||||
# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i))
|
||||
# else
|
||||
# define JEMALLOC_FORMAT_PRINTF(s, i)
|
||||
# endif
|
||||
# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
|
||||
# define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow)
|
||||
# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
|
||||
# define JEMALLOC_RESTRICT_RETURN
|
||||
# define JEMALLOC_ALLOCATOR
|
||||
#else
|
||||
# define JEMALLOC_ATTR(s)
|
||||
# define JEMALLOC_ALIGNED(s)
|
||||
# define JEMALLOC_ALLOC_SIZE(s)
|
||||
# define JEMALLOC_ALLOC_SIZE2(s1, s2)
|
||||
# define JEMALLOC_EXPORT
|
||||
# define JEMALLOC_FORMAT_PRINTF(s, i)
|
||||
# define JEMALLOC_NOINLINE
|
||||
# define JEMALLOC_NOTHROW
|
||||
# define JEMALLOC_SECTION(s)
|
||||
# define JEMALLOC_RESTRICT_RETURN
|
||||
# define JEMALLOC_ALLOCATOR
|
||||
#endif
|
||||
|
||||
/*
|
||||
@@ -51,55 +209,141 @@ extern JEMALLOC_EXPORT const char *je_malloc_conf;
|
||||
extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque,
|
||||
const char *s);
|
||||
|
||||
JEMALLOC_EXPORT void *je_malloc(size_t size) JEMALLOC_ATTR(malloc);
|
||||
JEMALLOC_EXPORT void *je_calloc(size_t num, size_t size)
|
||||
JEMALLOC_ATTR(malloc);
|
||||
JEMALLOC_EXPORT int je_posix_memalign(void **memptr, size_t alignment,
|
||||
size_t size) JEMALLOC_ATTR(nonnull(1));
|
||||
JEMALLOC_EXPORT void *je_aligned_alloc(size_t alignment, size_t size)
|
||||
JEMALLOC_ATTR(malloc);
|
||||
JEMALLOC_EXPORT void *je_realloc(void *ptr, size_t size);
|
||||
JEMALLOC_EXPORT void je_free(void *ptr);
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_malloc(size_t size)
|
||||
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_calloc(size_t num, size_t size)
|
||||
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2);
|
||||
JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_posix_memalign(void **memptr,
|
||||
size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1));
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_aligned_alloc(size_t alignment,
|
||||
size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc)
|
||||
JEMALLOC_ALLOC_SIZE(2);
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_realloc(void *ptr, size_t size)
|
||||
JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2);
|
||||
JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_free(void *ptr)
|
||||
JEMALLOC_CXX_THROW;
|
||||
|
||||
JEMALLOC_EXPORT void *je_mallocx(size_t size, int flags);
|
||||
JEMALLOC_EXPORT void *je_rallocx(void *ptr, size_t size, int flags);
|
||||
JEMALLOC_EXPORT size_t je_xallocx(void *ptr, size_t size, size_t extra,
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_mallocx(size_t size, int flags)
|
||||
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_rallocx(void *ptr, size_t size,
|
||||
int flags) JEMALLOC_ALLOC_SIZE(2);
|
||||
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_xallocx(void *ptr, size_t size,
|
||||
size_t extra, int flags);
|
||||
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_sallocx(const void *ptr,
|
||||
int flags) JEMALLOC_ATTR(pure);
|
||||
JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_dallocx(void *ptr, int flags);
|
||||
JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_sdallocx(void *ptr, size_t size,
|
||||
int flags);
|
||||
JEMALLOC_EXPORT size_t je_sallocx(const void *ptr, int flags);
|
||||
JEMALLOC_EXPORT void je_dallocx(void *ptr, int flags);
|
||||
JEMALLOC_EXPORT size_t je_nallocx(size_t size, int flags);
|
||||
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_nallocx(size_t size, int flags)
|
||||
JEMALLOC_ATTR(pure);
|
||||
|
||||
JEMALLOC_EXPORT int je_mallctl(const char *name, void *oldp,
|
||||
size_t *oldlenp, void *newp, size_t newlen);
|
||||
JEMALLOC_EXPORT int je_mallctlnametomib(const char *name, size_t *mibp,
|
||||
size_t *miblenp);
|
||||
JEMALLOC_EXPORT int je_mallctlbymib(const size_t *mib, size_t miblen,
|
||||
JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctl(const char *name,
|
||||
void *oldp, size_t *oldlenp, void *newp, size_t newlen);
|
||||
JEMALLOC_EXPORT void je_malloc_stats_print(void (*write_cb)(void *,
|
||||
const char *), void *je_cbopaque, const char *opts);
|
||||
JEMALLOC_EXPORT size_t je_malloc_usable_size(
|
||||
JEMALLOC_USABLE_SIZE_CONST void *ptr);
|
||||
JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlnametomib(const char *name,
|
||||
size_t *mibp, size_t *miblenp);
|
||||
JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlbymib(const size_t *mib,
|
||||
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
|
||||
JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_malloc_stats_print(
|
||||
void (*write_cb)(void *, const char *), void *je_cbopaque,
|
||||
const char *opts);
|
||||
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_malloc_usable_size(
|
||||
JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW;
|
||||
|
||||
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
|
||||
JEMALLOC_EXPORT void * je_memalign(size_t alignment, size_t size)
|
||||
JEMALLOC_ATTR(malloc);
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_memalign(size_t alignment, size_t size)
|
||||
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc);
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_OVERRIDE_VALLOC
|
||||
JEMALLOC_EXPORT void * je_valloc(size_t size) JEMALLOC_ATTR(malloc);
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_valloc(size_t size) JEMALLOC_CXX_THROW
|
||||
JEMALLOC_ATTR(malloc);
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_EXPERIMENTAL
|
||||
JEMALLOC_EXPORT int je_allocm(void **ptr, size_t *rsize, size_t size,
|
||||
int flags) JEMALLOC_ATTR(nonnull(1));
|
||||
JEMALLOC_EXPORT int je_rallocm(void **ptr, size_t *rsize, size_t size,
|
||||
size_t extra, int flags) JEMALLOC_ATTR(nonnull(1));
|
||||
JEMALLOC_EXPORT int je_sallocm(const void *ptr, size_t *rsize, int flags)
|
||||
JEMALLOC_ATTR(nonnull(1));
|
||||
JEMALLOC_EXPORT int je_dallocm(void *ptr, int flags)
|
||||
JEMALLOC_ATTR(nonnull(1));
|
||||
JEMALLOC_EXPORT int je_nallocm(size_t *rsize, size_t size, int flags);
|
||||
#endif
|
||||
typedef struct extent_hooks_s extent_hooks_t;
|
||||
|
||||
/*
|
||||
* void *
|
||||
* extent_alloc(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
|
||||
* size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
|
||||
*/
|
||||
typedef void *(extent_alloc_t)(extent_hooks_t *, void *, size_t, size_t, bool *,
|
||||
bool *, unsigned);
|
||||
|
||||
/*
|
||||
* bool
|
||||
* extent_dalloc(extent_hooks_t *extent_hooks, void *addr, size_t size,
|
||||
* bool committed, unsigned arena_ind);
|
||||
*/
|
||||
typedef bool (extent_dalloc_t)(extent_hooks_t *, void *, size_t, bool,
|
||||
unsigned);
|
||||
|
||||
/*
|
||||
* void
|
||||
* extent_destroy(extent_hooks_t *extent_hooks, void *addr, size_t size,
|
||||
* bool committed, unsigned arena_ind);
|
||||
*/
|
||||
typedef void (extent_destroy_t)(extent_hooks_t *, void *, size_t, bool,
|
||||
unsigned);
|
||||
|
||||
/*
|
||||
* bool
|
||||
* extent_commit(extent_hooks_t *extent_hooks, void *addr, size_t size,
|
||||
* size_t offset, size_t length, unsigned arena_ind);
|
||||
*/
|
||||
typedef bool (extent_commit_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
|
||||
unsigned);
|
||||
|
||||
/*
|
||||
* bool
|
||||
* extent_decommit(extent_hooks_t *extent_hooks, void *addr, size_t size,
|
||||
* size_t offset, size_t length, unsigned arena_ind);
|
||||
*/
|
||||
typedef bool (extent_decommit_t)(extent_hooks_t *, void *, size_t, size_t,
|
||||
size_t, unsigned);
|
||||
|
||||
/*
|
||||
* bool
|
||||
* extent_purge(extent_hooks_t *extent_hooks, void *addr, size_t size,
|
||||
* size_t offset, size_t length, unsigned arena_ind);
|
||||
*/
|
||||
typedef bool (extent_purge_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
|
||||
unsigned);
|
||||
|
||||
/*
|
||||
* bool
|
||||
* extent_split(extent_hooks_t *extent_hooks, void *addr, size_t size,
|
||||
* size_t size_a, size_t size_b, bool committed, unsigned arena_ind);
|
||||
*/
|
||||
typedef bool (extent_split_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
|
||||
bool, unsigned);
|
||||
|
||||
/*
|
||||
* bool
|
||||
* extent_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
|
||||
* void *addr_b, size_t size_b, bool committed, unsigned arena_ind);
|
||||
*/
|
||||
typedef bool (extent_merge_t)(extent_hooks_t *, void *, size_t, void *, size_t,
|
||||
bool, unsigned);
|
||||
|
||||
struct extent_hooks_s {
|
||||
extent_alloc_t *alloc;
|
||||
extent_dalloc_t *dalloc;
|
||||
extent_destroy_t *destroy;
|
||||
extent_commit_t *commit;
|
||||
extent_decommit_t *decommit;
|
||||
extent_purge_t *purge_lazy;
|
||||
extent_purge_t *purge_forced;
|
||||
extent_split_t *split;
|
||||
extent_merge_t *merge;
|
||||
};
|
||||
|
||||
/*
|
||||
* By default application code must explicitly refer to mangled symbol names,
|
||||
@@ -112,32 +356,28 @@ JEMALLOC_EXPORT int je_nallocm(size_t *rsize, size_t size, int flags);
|
||||
# ifndef JEMALLOC_NO_DEMANGLE
|
||||
# define JEMALLOC_NO_DEMANGLE
|
||||
# endif
|
||||
# define aligned_alloc je_aligned_alloc
|
||||
# define calloc je_calloc
|
||||
# define dallocx je_dallocx
|
||||
# define free je_free
|
||||
# define mallctl je_mallctl
|
||||
# define mallctlbymib je_mallctlbymib
|
||||
# define mallctlnametomib je_mallctlnametomib
|
||||
# define malloc je_malloc
|
||||
# define malloc_conf je_malloc_conf
|
||||
# define malloc_message je_malloc_message
|
||||
# define malloc je_malloc
|
||||
# define calloc je_calloc
|
||||
# define posix_memalign je_posix_memalign
|
||||
# define aligned_alloc je_aligned_alloc
|
||||
# define realloc je_realloc
|
||||
# define free je_free
|
||||
# define mallocx je_mallocx
|
||||
# define rallocx je_rallocx
|
||||
# define xallocx je_xallocx
|
||||
# define sallocx je_sallocx
|
||||
# define dallocx je_dallocx
|
||||
# define nallocx je_nallocx
|
||||
# define mallctl je_mallctl
|
||||
# define mallctlnametomib je_mallctlnametomib
|
||||
# define mallctlbymib je_mallctlbymib
|
||||
# define malloc_stats_print je_malloc_stats_print
|
||||
# define malloc_usable_size je_malloc_usable_size
|
||||
# define mallocx je_mallocx
|
||||
# define nallocx je_nallocx
|
||||
# define posix_memalign je_posix_memalign
|
||||
# define rallocx je_rallocx
|
||||
# define realloc je_realloc
|
||||
# define sallocx je_sallocx
|
||||
# define sdallocx je_sdallocx
|
||||
# define xallocx je_xallocx
|
||||
# define memalign je_memalign
|
||||
# define valloc je_valloc
|
||||
# define allocm je_allocm
|
||||
# define dallocm je_dallocm
|
||||
# define nallocm je_nallocm
|
||||
# define rallocm je_rallocm
|
||||
# define sallocm je_sallocm
|
||||
#endif
|
||||
|
||||
/*
|
||||
@@ -148,35 +388,31 @@ JEMALLOC_EXPORT int je_nallocm(size_t *rsize, size_t size, int flags);
|
||||
* and/or --with-jemalloc-prefix.
|
||||
*/
|
||||
#ifndef JEMALLOC_NO_DEMANGLE
|
||||
# undef je_aligned_alloc
|
||||
# undef je_calloc
|
||||
# undef je_dallocx
|
||||
# undef je_free
|
||||
# undef je_mallctl
|
||||
# undef je_mallctlbymib
|
||||
# undef je_mallctlnametomib
|
||||
# undef je_malloc
|
||||
# undef je_malloc_conf
|
||||
# undef je_malloc_message
|
||||
# undef je_malloc
|
||||
# undef je_calloc
|
||||
# undef je_posix_memalign
|
||||
# undef je_aligned_alloc
|
||||
# undef je_realloc
|
||||
# undef je_free
|
||||
# undef je_mallocx
|
||||
# undef je_rallocx
|
||||
# undef je_xallocx
|
||||
# undef je_sallocx
|
||||
# undef je_dallocx
|
||||
# undef je_nallocx
|
||||
# undef je_mallctl
|
||||
# undef je_mallctlnametomib
|
||||
# undef je_mallctlbymib
|
||||
# undef je_malloc_stats_print
|
||||
# undef je_malloc_usable_size
|
||||
# undef je_mallocx
|
||||
# undef je_nallocx
|
||||
# undef je_posix_memalign
|
||||
# undef je_rallocx
|
||||
# undef je_realloc
|
||||
# undef je_sallocx
|
||||
# undef je_sdallocx
|
||||
# undef je_xallocx
|
||||
# undef je_memalign
|
||||
# undef je_valloc
|
||||
# undef je_allocm
|
||||
# undef je_dallocm
|
||||
# undef je_nallocm
|
||||
# undef je_rallocm
|
||||
# undef je_sallocm
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
};
|
||||
}
|
||||
#endif
|
||||
#endif /* JEMALLOC_H_ */
|
||||
|
||||
Reference in New Issue
Block a user