mirror of
https://github.com/mod-playerbots/azerothcore-wotlk.git
synced 2026-01-21 20:56:23 +00:00
Directory Structure [step 1]: moving files
working on #672 NOTE: This commit can't be compiled!!
This commit is contained in:
1063
deps/jemalloc/include/jemalloc/internal/arena.h
vendored
Normal file
1063
deps/jemalloc/include/jemalloc/internal/arena.h
vendored
Normal file
File diff suppressed because it is too large
Load Diff
304
deps/jemalloc/include/jemalloc/internal/atomic.h
vendored
Normal file
304
deps/jemalloc/include/jemalloc/internal/atomic.h
vendored
Normal file
@@ -0,0 +1,304 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
#define atomic_read_uint64(p) atomic_add_uint64(p, 0)
|
||||
#define atomic_read_uint32(p) atomic_add_uint32(p, 0)
|
||||
#define atomic_read_z(p) atomic_add_z(p, 0)
|
||||
#define atomic_read_u(p) atomic_add_u(p, 0)
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
uint64_t atomic_add_uint64(uint64_t *p, uint64_t x);
|
||||
uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x);
|
||||
uint32_t atomic_add_uint32(uint32_t *p, uint32_t x);
|
||||
uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x);
|
||||
size_t atomic_add_z(size_t *p, size_t x);
|
||||
size_t atomic_sub_z(size_t *p, size_t x);
|
||||
unsigned atomic_add_u(unsigned *p, unsigned x);
|
||||
unsigned atomic_sub_u(unsigned *p, unsigned x);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_))
|
||||
/******************************************************************************/
|
||||
/* 64-bit operations. */
|
||||
#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
|
||||
# ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_add_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
return (__sync_add_and_fetch(p, x));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
return (__sync_sub_and_fetch(p, x));
|
||||
}
|
||||
#elif (defined(_MSC_VER))
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_add_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
return (InterlockedExchangeAdd64(p, x));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
return (InterlockedExchangeAdd64(p, -((int64_t)x)));
|
||||
}
|
||||
#elif (defined(JEMALLOC_OSATOMIC))
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_add_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
return (OSAtomicAdd64((int64_t)x, (int64_t *)p));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p));
|
||||
}
|
||||
# elif (defined(__amd64__) || defined(__x86_64__))
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_add_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
asm volatile (
|
||||
"lock; xaddq %0, %1;"
|
||||
: "+r" (x), "=m" (*p) /* Outputs. */
|
||||
: "m" (*p) /* Inputs. */
|
||||
);
|
||||
|
||||
return (x);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
x = (uint64_t)(-(int64_t)x);
|
||||
asm volatile (
|
||||
"lock; xaddq %0, %1;"
|
||||
: "+r" (x), "=m" (*p) /* Outputs. */
|
||||
: "m" (*p) /* Inputs. */
|
||||
);
|
||||
|
||||
return (x);
|
||||
}
|
||||
# elif (defined(JEMALLOC_ATOMIC9))
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_add_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
/*
|
||||
* atomic_fetchadd_64() doesn't exist, but we only ever use this
|
||||
* function on LP64 systems, so atomic_fetchadd_long() will do.
|
||||
*/
|
||||
assert(sizeof(uint64_t) == sizeof(unsigned long));
|
||||
|
||||
return (atomic_fetchadd_long(p, (unsigned long)x) + x);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
assert(sizeof(uint64_t) == sizeof(unsigned long));
|
||||
|
||||
return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x);
|
||||
}
|
||||
# elif (defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8))
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_add_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
return (__sync_add_and_fetch(p, x));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
return (__sync_sub_and_fetch(p, x));
|
||||
}
|
||||
# else
|
||||
# error "Missing implementation for 64-bit atomic operations"
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/******************************************************************************/
|
||||
/* 32-bit operations. */
|
||||
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_add_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
return (__sync_add_and_fetch(p, x));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
return (__sync_sub_and_fetch(p, x));
|
||||
}
|
||||
#elif (defined(_MSC_VER))
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_add_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
return (InterlockedExchangeAdd(p, x));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
return (InterlockedExchangeAdd(p, -((int32_t)x)));
|
||||
}
|
||||
#elif (defined(JEMALLOC_OSATOMIC))
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_add_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
return (OSAtomicAdd32((int32_t)x, (int32_t *)p));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p));
|
||||
}
|
||||
#elif (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_add_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
asm volatile (
|
||||
"lock; xaddl %0, %1;"
|
||||
: "+r" (x), "=m" (*p) /* Outputs. */
|
||||
: "m" (*p) /* Inputs. */
|
||||
);
|
||||
|
||||
return (x);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
x = (uint32_t)(-(int32_t)x);
|
||||
asm volatile (
|
||||
"lock; xaddl %0, %1;"
|
||||
: "+r" (x), "=m" (*p) /* Outputs. */
|
||||
: "m" (*p) /* Inputs. */
|
||||
);
|
||||
|
||||
return (x);
|
||||
}
|
||||
#elif (defined(JEMALLOC_ATOMIC9))
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_add_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
return (atomic_fetchadd_32(p, x) + x);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x);
|
||||
}
|
||||
#elif (defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_add_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
return (__sync_add_and_fetch(p, x));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
return (__sync_sub_and_fetch(p, x));
|
||||
}
|
||||
#else
|
||||
# error "Missing implementation for 32-bit atomic operations"
|
||||
#endif
|
||||
|
||||
/******************************************************************************/
|
||||
/* size_t operations. */
|
||||
JEMALLOC_INLINE size_t
|
||||
atomic_add_z(size_t *p, size_t x)
|
||||
{
|
||||
|
||||
#if (LG_SIZEOF_PTR == 3)
|
||||
return ((size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
|
||||
#elif (LG_SIZEOF_PTR == 2)
|
||||
return ((size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
|
||||
#endif
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE size_t
|
||||
atomic_sub_z(size_t *p, size_t x)
|
||||
{
|
||||
|
||||
#if (LG_SIZEOF_PTR == 3)
|
||||
return ((size_t)atomic_add_uint64((uint64_t *)p,
|
||||
(uint64_t)-((int64_t)x)));
|
||||
#elif (LG_SIZEOF_PTR == 2)
|
||||
return ((size_t)atomic_add_uint32((uint32_t *)p,
|
||||
(uint32_t)-((int32_t)x)));
|
||||
#endif
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
/* unsigned operations. */
|
||||
JEMALLOC_INLINE unsigned
|
||||
atomic_add_u(unsigned *p, unsigned x)
|
||||
{
|
||||
|
||||
#if (LG_SIZEOF_INT == 3)
|
||||
return ((unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
|
||||
#elif (LG_SIZEOF_INT == 2)
|
||||
return ((unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
|
||||
#endif
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE unsigned
|
||||
atomic_sub_u(unsigned *p, unsigned x)
|
||||
{
|
||||
|
||||
#if (LG_SIZEOF_INT == 3)
|
||||
return ((unsigned)atomic_add_uint64((uint64_t *)p,
|
||||
(uint64_t)-((int64_t)x)));
|
||||
#elif (LG_SIZEOF_INT == 2)
|
||||
return ((unsigned)atomic_add_uint32((uint32_t *)p,
|
||||
(uint32_t)-((int32_t)x)));
|
||||
#endif
|
||||
}
|
||||
/******************************************************************************/
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
26
deps/jemalloc/include/jemalloc/internal/base.h
vendored
Normal file
26
deps/jemalloc/include/jemalloc/internal/base.h
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
void *base_alloc(size_t size);
|
||||
void *base_calloc(size_t number, size_t size);
|
||||
extent_node_t *base_node_alloc(void);
|
||||
void base_node_dealloc(extent_node_t *node);
|
||||
bool base_boot(void);
|
||||
void base_prefork(void);
|
||||
void base_postfork_parent(void);
|
||||
void base_postfork_child(void);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
184
deps/jemalloc/include/jemalloc/internal/bitmap.h
vendored
Normal file
184
deps/jemalloc/include/jemalloc/internal/bitmap.h
vendored
Normal file
@@ -0,0 +1,184 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
|
||||
#define LG_BITMAP_MAXBITS LG_RUN_MAXREGS
|
||||
|
||||
typedef struct bitmap_level_s bitmap_level_t;
|
||||
typedef struct bitmap_info_s bitmap_info_t;
|
||||
typedef unsigned long bitmap_t;
|
||||
#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG
|
||||
|
||||
/* Number of bits per group. */
|
||||
#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3)
|
||||
#define BITMAP_GROUP_NBITS (ZU(1) << LG_BITMAP_GROUP_NBITS)
|
||||
#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1)
|
||||
|
||||
/* Maximum number of levels possible. */
|
||||
#define BITMAP_MAX_LEVELS \
|
||||
(LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \
|
||||
+ !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP)
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
struct bitmap_level_s {
|
||||
/* Offset of this level's groups within the array of groups. */
|
||||
size_t group_offset;
|
||||
};
|
||||
|
||||
struct bitmap_info_s {
|
||||
/* Logical number of bits in bitmap (stored at bottom level). */
|
||||
size_t nbits;
|
||||
|
||||
/* Number of levels necessary for nbits. */
|
||||
unsigned nlevels;
|
||||
|
||||
/*
|
||||
* Only the first (nlevels+1) elements are used, and levels are ordered
|
||||
* bottom to top (e.g. the bottom level is stored in levels[0]).
|
||||
*/
|
||||
bitmap_level_t levels[BITMAP_MAX_LEVELS+1];
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
void bitmap_info_init(bitmap_info_t *binfo, size_t nbits);
|
||||
size_t bitmap_info_ngroups(const bitmap_info_t *binfo);
|
||||
size_t bitmap_size(size_t nbits);
|
||||
void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
bool bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo);
|
||||
bool bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
|
||||
void bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
|
||||
size_t bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo);
|
||||
void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_BITMAP_C_))
|
||||
JEMALLOC_INLINE bool
|
||||
bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo)
|
||||
{
|
||||
unsigned rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
|
||||
bitmap_t rg = bitmap[rgoff];
|
||||
/* The bitmap is full iff the root group is 0. */
|
||||
return (rg == 0);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
|
||||
{
|
||||
size_t goff;
|
||||
bitmap_t g;
|
||||
|
||||
assert(bit < binfo->nbits);
|
||||
goff = bit >> LG_BITMAP_GROUP_NBITS;
|
||||
g = bitmap[goff];
|
||||
return (!(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
|
||||
{
|
||||
size_t goff;
|
||||
bitmap_t *gp;
|
||||
bitmap_t g;
|
||||
|
||||
assert(bit < binfo->nbits);
|
||||
assert(bitmap_get(bitmap, binfo, bit) == false);
|
||||
goff = bit >> LG_BITMAP_GROUP_NBITS;
|
||||
gp = &bitmap[goff];
|
||||
g = *gp;
|
||||
assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)));
|
||||
g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
|
||||
*gp = g;
|
||||
assert(bitmap_get(bitmap, binfo, bit));
|
||||
/* Propagate group state transitions up the tree. */
|
||||
if (g == 0) {
|
||||
unsigned i;
|
||||
for (i = 1; i < binfo->nlevels; i++) {
|
||||
bit = goff;
|
||||
goff = bit >> LG_BITMAP_GROUP_NBITS;
|
||||
gp = &bitmap[binfo->levels[i].group_offset + goff];
|
||||
g = *gp;
|
||||
assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)));
|
||||
g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
|
||||
*gp = g;
|
||||
if (g != 0)
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* sfu: set first unset. */
|
||||
JEMALLOC_INLINE size_t
|
||||
bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo)
|
||||
{
|
||||
size_t bit;
|
||||
bitmap_t g;
|
||||
unsigned i;
|
||||
|
||||
assert(bitmap_full(bitmap, binfo) == false);
|
||||
|
||||
i = binfo->nlevels - 1;
|
||||
g = bitmap[binfo->levels[i].group_offset];
|
||||
bit = ffsl(g) - 1;
|
||||
while (i > 0) {
|
||||
i--;
|
||||
g = bitmap[binfo->levels[i].group_offset + bit];
|
||||
bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffsl(g) - 1);
|
||||
}
|
||||
|
||||
bitmap_set(bitmap, binfo, bit);
|
||||
return (bit);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
|
||||
{
|
||||
size_t goff;
|
||||
bitmap_t *gp;
|
||||
bitmap_t g;
|
||||
bool propagate;
|
||||
|
||||
assert(bit < binfo->nbits);
|
||||
assert(bitmap_get(bitmap, binfo, bit));
|
||||
goff = bit >> LG_BITMAP_GROUP_NBITS;
|
||||
gp = &bitmap[goff];
|
||||
g = *gp;
|
||||
propagate = (g == 0);
|
||||
assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))) == 0);
|
||||
g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
|
||||
*gp = g;
|
||||
assert(bitmap_get(bitmap, binfo, bit) == false);
|
||||
/* Propagate group state transitions up the tree. */
|
||||
if (propagate) {
|
||||
unsigned i;
|
||||
for (i = 1; i < binfo->nlevels; i++) {
|
||||
bit = goff;
|
||||
goff = bit >> LG_BITMAP_GROUP_NBITS;
|
||||
gp = &bitmap[binfo->levels[i].group_offset + goff];
|
||||
g = *gp;
|
||||
propagate = (g == 0);
|
||||
assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)))
|
||||
== 0);
|
||||
g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
|
||||
*gp = g;
|
||||
if (propagate == false)
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
63
deps/jemalloc/include/jemalloc/internal/chunk.h
vendored
Normal file
63
deps/jemalloc/include/jemalloc/internal/chunk.h
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
/*
|
||||
* Size and alignment of memory chunks that are allocated by the OS's virtual
|
||||
* memory system.
|
||||
*/
|
||||
#define LG_CHUNK_DEFAULT 22
|
||||
|
||||
/* Return the chunk address for allocation address a. */
|
||||
#define CHUNK_ADDR2BASE(a) \
|
||||
((void *)((uintptr_t)(a) & ~chunksize_mask))
|
||||
|
||||
/* Return the chunk offset of address a. */
|
||||
#define CHUNK_ADDR2OFFSET(a) \
|
||||
((size_t)((uintptr_t)(a) & chunksize_mask))
|
||||
|
||||
/* Return the smallest chunk multiple that is >= s. */
|
||||
#define CHUNK_CEILING(s) \
|
||||
(((s) + chunksize_mask) & ~chunksize_mask)
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
extern size_t opt_lg_chunk;
|
||||
extern const char *opt_dss;
|
||||
|
||||
/* Protects stats_chunks; currently not used for any other purpose. */
|
||||
extern malloc_mutex_t chunks_mtx;
|
||||
/* Chunk statistics. */
|
||||
extern chunk_stats_t stats_chunks;
|
||||
|
||||
extern rtree_t *chunks_rtree;
|
||||
|
||||
extern size_t chunksize;
|
||||
extern size_t chunksize_mask; /* (chunksize - 1). */
|
||||
extern size_t chunk_npages;
|
||||
extern size_t map_bias; /* Number of arena chunk header pages. */
|
||||
extern size_t arena_maxclass; /* Max size class for arenas. */
|
||||
|
||||
void *chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
|
||||
dss_prec_t dss_prec);
|
||||
void chunk_unmap(void *chunk, size_t size);
|
||||
void chunk_dealloc(void *chunk, size_t size, bool unmap);
|
||||
bool chunk_boot(void);
|
||||
void chunk_prefork(void);
|
||||
void chunk_postfork_parent(void);
|
||||
void chunk_postfork_child(void);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
|
||||
#include "jemalloc/internal/chunk_dss.h"
|
||||
#include "jemalloc/internal/chunk_mmap.h"
|
||||
38
deps/jemalloc/include/jemalloc/internal/chunk_dss.h
vendored
Normal file
38
deps/jemalloc/include/jemalloc/internal/chunk_dss.h
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
typedef enum {
|
||||
dss_prec_disabled = 0,
|
||||
dss_prec_primary = 1,
|
||||
dss_prec_secondary = 2,
|
||||
|
||||
dss_prec_limit = 3
|
||||
} dss_prec_t;
|
||||
#define DSS_PREC_DEFAULT dss_prec_secondary
|
||||
#define DSS_DEFAULT "secondary"
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
extern const char *dss_prec_names[];
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
dss_prec_t chunk_dss_prec_get(void);
|
||||
bool chunk_dss_prec_set(dss_prec_t dss_prec);
|
||||
void *chunk_alloc_dss(size_t size, size_t alignment, bool *zero);
|
||||
bool chunk_in_dss(void *chunk);
|
||||
bool chunk_dss_boot(void);
|
||||
void chunk_dss_prefork(void);
|
||||
void chunk_dss_postfork_parent(void);
|
||||
void chunk_dss_postfork_child(void);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
22
deps/jemalloc/include/jemalloc/internal/chunk_mmap.h
vendored
Normal file
22
deps/jemalloc/include/jemalloc/internal/chunk_mmap.h
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
bool pages_purge(void *addr, size_t length);
|
||||
|
||||
void *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero);
|
||||
bool chunk_dealloc_mmap(void *chunk, size_t size);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
88
deps/jemalloc/include/jemalloc/internal/ckh.h
vendored
Normal file
88
deps/jemalloc/include/jemalloc/internal/ckh.h
vendored
Normal file
@@ -0,0 +1,88 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
typedef struct ckh_s ckh_t;
|
||||
typedef struct ckhc_s ckhc_t;
|
||||
|
||||
/* Typedefs to allow easy function pointer passing. */
|
||||
typedef void ckh_hash_t (const void *, size_t[2]);
|
||||
typedef bool ckh_keycomp_t (const void *, const void *);
|
||||
|
||||
/* Maintain counters used to get an idea of performance. */
|
||||
/* #define CKH_COUNT */
|
||||
/* Print counter values in ckh_delete() (requires CKH_COUNT). */
|
||||
/* #define CKH_VERBOSE */
|
||||
|
||||
/*
|
||||
* There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit
|
||||
* one bucket per L1 cache line.
|
||||
*/
|
||||
#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
/* Hash table cell. */
|
||||
struct ckhc_s {
|
||||
const void *key;
|
||||
const void *data;
|
||||
};
|
||||
|
||||
struct ckh_s {
|
||||
#ifdef CKH_COUNT
|
||||
/* Counters used to get an idea of performance. */
|
||||
uint64_t ngrows;
|
||||
uint64_t nshrinks;
|
||||
uint64_t nshrinkfails;
|
||||
uint64_t ninserts;
|
||||
uint64_t nrelocs;
|
||||
#endif
|
||||
|
||||
/* Used for pseudo-random number generation. */
|
||||
#define CKH_A 1103515241
|
||||
#define CKH_C 12347
|
||||
uint32_t prng_state;
|
||||
|
||||
/* Total number of items. */
|
||||
size_t count;
|
||||
|
||||
/*
|
||||
* Minimum and current number of hash table buckets. There are
|
||||
* 2^LG_CKH_BUCKET_CELLS cells per bucket.
|
||||
*/
|
||||
unsigned lg_minbuckets;
|
||||
unsigned lg_curbuckets;
|
||||
|
||||
/* Hash and comparison functions. */
|
||||
ckh_hash_t *hash;
|
||||
ckh_keycomp_t *keycomp;
|
||||
|
||||
/* Hash table with 2^lg_curbuckets buckets. */
|
||||
ckhc_t *tab;
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
bool ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
|
||||
ckh_keycomp_t *keycomp);
|
||||
void ckh_delete(ckh_t *ckh);
|
||||
size_t ckh_count(ckh_t *ckh);
|
||||
bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data);
|
||||
bool ckh_insert(ckh_t *ckh, const void *key, const void *data);
|
||||
bool ckh_remove(ckh_t *ckh, const void *searchkey, void **key,
|
||||
void **data);
|
||||
bool ckh_search(ckh_t *ckh, const void *seachkey, void **key, void **data);
|
||||
void ckh_string_hash(const void *key, size_t r_hash[2]);
|
||||
bool ckh_string_keycomp(const void *k1, const void *k2);
|
||||
void ckh_pointer_hash(const void *key, size_t r_hash[2]);
|
||||
bool ckh_pointer_keycomp(const void *k1, const void *k2);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
117
deps/jemalloc/include/jemalloc/internal/ctl.h
vendored
Normal file
117
deps/jemalloc/include/jemalloc/internal/ctl.h
vendored
Normal file
@@ -0,0 +1,117 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
typedef struct ctl_node_s ctl_node_t;
|
||||
typedef struct ctl_named_node_s ctl_named_node_t;
|
||||
typedef struct ctl_indexed_node_s ctl_indexed_node_t;
|
||||
typedef struct ctl_arena_stats_s ctl_arena_stats_t;
|
||||
typedef struct ctl_stats_s ctl_stats_t;
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
struct ctl_node_s {
|
||||
bool named;
|
||||
};
|
||||
|
||||
struct ctl_named_node_s {
|
||||
struct ctl_node_s node;
|
||||
const char *name;
|
||||
/* If (nchildren == 0), this is a terminal node. */
|
||||
unsigned nchildren;
|
||||
const ctl_node_t *children;
|
||||
int (*ctl)(const size_t *, size_t, void *, size_t *,
|
||||
void *, size_t);
|
||||
};
|
||||
|
||||
struct ctl_indexed_node_s {
|
||||
struct ctl_node_s node;
|
||||
const ctl_named_node_t *(*index)(const size_t *, size_t, size_t);
|
||||
};
|
||||
|
||||
struct ctl_arena_stats_s {
|
||||
bool initialized;
|
||||
unsigned nthreads;
|
||||
const char *dss;
|
||||
size_t pactive;
|
||||
size_t pdirty;
|
||||
arena_stats_t astats;
|
||||
|
||||
/* Aggregate stats for small size classes, based on bin stats. */
|
||||
size_t allocated_small;
|
||||
uint64_t nmalloc_small;
|
||||
uint64_t ndalloc_small;
|
||||
uint64_t nrequests_small;
|
||||
|
||||
malloc_bin_stats_t bstats[NBINS];
|
||||
malloc_large_stats_t *lstats; /* nlclasses elements. */
|
||||
};
|
||||
|
||||
struct ctl_stats_s {
|
||||
size_t allocated;
|
||||
size_t active;
|
||||
size_t mapped;
|
||||
struct {
|
||||
size_t current; /* stats_chunks.curchunks */
|
||||
uint64_t total; /* stats_chunks.nchunks */
|
||||
size_t high; /* stats_chunks.highchunks */
|
||||
} chunks;
|
||||
struct {
|
||||
size_t allocated; /* huge_allocated */
|
||||
uint64_t nmalloc; /* huge_nmalloc */
|
||||
uint64_t ndalloc; /* huge_ndalloc */
|
||||
} huge;
|
||||
unsigned narenas;
|
||||
ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
int ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp,
|
||||
size_t newlen);
|
||||
int ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp);
|
||||
|
||||
int ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
|
||||
void *newp, size_t newlen);
|
||||
bool ctl_boot(void);
|
||||
void ctl_prefork(void);
|
||||
void ctl_postfork_parent(void);
|
||||
void ctl_postfork_child(void);
|
||||
|
||||
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
|
||||
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
|
||||
!= 0) { \
|
||||
malloc_printf( \
|
||||
"<jemalloc>: Failure in xmallctl(\"%s\", ...)\n", \
|
||||
name); \
|
||||
abort(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define xmallctlnametomib(name, mibp, miblenp) do { \
|
||||
if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \
|
||||
malloc_printf("<jemalloc>: Failure in " \
|
||||
"xmallctlnametomib(\"%s\", ...)\n", name); \
|
||||
abort(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \
|
||||
if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \
|
||||
newlen) != 0) { \
|
||||
malloc_write( \
|
||||
"<jemalloc>: Failure in xmallctlbymib()\n"); \
|
||||
abort(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
|
||||
46
deps/jemalloc/include/jemalloc/internal/extent.h
vendored
Normal file
46
deps/jemalloc/include/jemalloc/internal/extent.h
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
typedef struct extent_node_s extent_node_t;
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
/* Tree of extents. */
|
||||
struct extent_node_s {
|
||||
/* Linkage for the size/address-ordered tree. */
|
||||
rb_node(extent_node_t) link_szad;
|
||||
|
||||
/* Linkage for the address-ordered tree. */
|
||||
rb_node(extent_node_t) link_ad;
|
||||
|
||||
/* Profile counters, used for huge objects. */
|
||||
prof_ctx_t *prof_ctx;
|
||||
|
||||
/* Pointer to the extent that this tree node is responsible for. */
|
||||
void *addr;
|
||||
|
||||
/* Total region size. */
|
||||
size_t size;
|
||||
|
||||
/* True if zero-filled; used by chunk recycling code. */
|
||||
bool zeroed;
|
||||
};
|
||||
typedef rb_tree(extent_node_t) extent_tree_t;
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
rb_proto(, extent_tree_szad_, extent_tree_t, extent_node_t)
|
||||
|
||||
rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
|
||||
335
deps/jemalloc/include/jemalloc/internal/hash.h
vendored
Normal file
335
deps/jemalloc/include/jemalloc/internal/hash.h
vendored
Normal file
@@ -0,0 +1,335 @@
|
||||
/*
|
||||
* The following hash function is based on MurmurHash3, placed into the public
|
||||
* domain by Austin Appleby. See http://code.google.com/p/smhasher/ for
|
||||
* details.
|
||||
*/
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
uint32_t hash_x86_32(const void *key, int len, uint32_t seed);
|
||||
void hash_x86_128(const void *key, const int len, uint32_t seed,
|
||||
uint64_t r_out[2]);
|
||||
void hash_x64_128(const void *key, const int len, const uint32_t seed,
|
||||
uint64_t r_out[2]);
|
||||
void hash(const void *key, size_t len, const uint32_t seed,
|
||||
size_t r_hash[2]);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_))
|
||||
/******************************************************************************/
|
||||
/* Internal implementation. */
|
||||
JEMALLOC_INLINE uint32_t
|
||||
hash_rotl_32(uint32_t x, int8_t r)
|
||||
{
|
||||
|
||||
return (x << r) | (x >> (32 - r));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
hash_rotl_64(uint64_t x, int8_t r)
|
||||
{
|
||||
return (x << r) | (x >> (64 - r));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
hash_get_block_32(const uint32_t *p, int i)
|
||||
{
|
||||
|
||||
return (p[i]);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
hash_get_block_64(const uint64_t *p, int i)
|
||||
{
|
||||
|
||||
return (p[i]);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
hash_fmix_32(uint32_t h)
|
||||
{
|
||||
|
||||
h ^= h >> 16;
|
||||
h *= 0x85ebca6b;
|
||||
h ^= h >> 13;
|
||||
h *= 0xc2b2ae35;
|
||||
h ^= h >> 16;
|
||||
|
||||
return (h);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
hash_fmix_64(uint64_t k)
|
||||
{
|
||||
|
||||
k ^= k >> 33;
|
||||
k *= QU(0xff51afd7ed558ccdLLU);
|
||||
k ^= k >> 33;
|
||||
k *= QU(0xc4ceb9fe1a85ec53LLU);
|
||||
k ^= k >> 33;
|
||||
|
||||
return (k);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
hash_x86_32(const void *key, int len, uint32_t seed)
|
||||
{
|
||||
const uint8_t *data = (const uint8_t *) key;
|
||||
const int nblocks = len / 4;
|
||||
|
||||
uint32_t h1 = seed;
|
||||
|
||||
const uint32_t c1 = 0xcc9e2d51;
|
||||
const uint32_t c2 = 0x1b873593;
|
||||
|
||||
/* body */
|
||||
{
|
||||
const uint32_t *blocks = (const uint32_t *) (data + nblocks*4);
|
||||
int i;
|
||||
|
||||
for (i = -nblocks; i; i++) {
|
||||
uint32_t k1 = hash_get_block_32(blocks, i);
|
||||
|
||||
k1 *= c1;
|
||||
k1 = hash_rotl_32(k1, 15);
|
||||
k1 *= c2;
|
||||
|
||||
h1 ^= k1;
|
||||
h1 = hash_rotl_32(h1, 13);
|
||||
h1 = h1*5 + 0xe6546b64;
|
||||
}
|
||||
}
|
||||
|
||||
/* tail */
|
||||
{
|
||||
const uint8_t *tail = (const uint8_t *) (data + nblocks*4);
|
||||
|
||||
uint32_t k1 = 0;
|
||||
|
||||
switch (len & 3) {
|
||||
case 3: k1 ^= tail[2] << 16;
|
||||
case 2: k1 ^= tail[1] << 8;
|
||||
case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15);
|
||||
k1 *= c2; h1 ^= k1;
|
||||
}
|
||||
}
|
||||
|
||||
/* finalization */
|
||||
h1 ^= len;
|
||||
|
||||
h1 = hash_fmix_32(h1);
|
||||
|
||||
return (h1);
|
||||
}
|
||||
|
||||
UNUSED JEMALLOC_INLINE void
|
||||
hash_x86_128(const void *key, const int len, uint32_t seed,
|
||||
uint64_t r_out[2])
|
||||
{
|
||||
const uint8_t * data = (const uint8_t *) key;
|
||||
const int nblocks = len / 16;
|
||||
|
||||
uint32_t h1 = seed;
|
||||
uint32_t h2 = seed;
|
||||
uint32_t h3 = seed;
|
||||
uint32_t h4 = seed;
|
||||
|
||||
const uint32_t c1 = 0x239b961b;
|
||||
const uint32_t c2 = 0xab0e9789;
|
||||
const uint32_t c3 = 0x38b34ae5;
|
||||
const uint32_t c4 = 0xa1e38b93;
|
||||
|
||||
/* body */
|
||||
{
|
||||
const uint32_t *blocks = (const uint32_t *) (data + nblocks*16);
|
||||
int i;
|
||||
|
||||
for (i = -nblocks; i; i++) {
|
||||
uint32_t k1 = hash_get_block_32(blocks, i*4 + 0);
|
||||
uint32_t k2 = hash_get_block_32(blocks, i*4 + 1);
|
||||
uint32_t k3 = hash_get_block_32(blocks, i*4 + 2);
|
||||
uint32_t k4 = hash_get_block_32(blocks, i*4 + 3);
|
||||
|
||||
k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
|
||||
|
||||
h1 = hash_rotl_32(h1, 19); h1 += h2;
|
||||
h1 = h1*5 + 0x561ccd1b;
|
||||
|
||||
k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
|
||||
|
||||
h2 = hash_rotl_32(h2, 17); h2 += h3;
|
||||
h2 = h2*5 + 0x0bcaa747;
|
||||
|
||||
k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
|
||||
|
||||
h3 = hash_rotl_32(h3, 15); h3 += h4;
|
||||
h3 = h3*5 + 0x96cd1c35;
|
||||
|
||||
k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
|
||||
|
||||
h4 = hash_rotl_32(h4, 13); h4 += h1;
|
||||
h4 = h4*5 + 0x32ac3b17;
|
||||
}
|
||||
}
|
||||
|
||||
/* tail */
|
||||
{
|
||||
const uint8_t *tail = (const uint8_t *) (data + nblocks*16);
|
||||
uint32_t k1 = 0;
|
||||
uint32_t k2 = 0;
|
||||
uint32_t k3 = 0;
|
||||
uint32_t k4 = 0;
|
||||
|
||||
switch (len & 15) {
|
||||
case 15: k4 ^= tail[14] << 16;
|
||||
case 14: k4 ^= tail[13] << 8;
|
||||
case 13: k4 ^= tail[12] << 0;
|
||||
k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
|
||||
|
||||
case 12: k3 ^= tail[11] << 24;
|
||||
case 11: k3 ^= tail[10] << 16;
|
||||
case 10: k3 ^= tail[ 9] << 8;
|
||||
case 9: k3 ^= tail[ 8] << 0;
|
||||
k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
|
||||
|
||||
case 8: k2 ^= tail[ 7] << 24;
|
||||
case 7: k2 ^= tail[ 6] << 16;
|
||||
case 6: k2 ^= tail[ 5] << 8;
|
||||
case 5: k2 ^= tail[ 4] << 0;
|
||||
k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
|
||||
|
||||
case 4: k1 ^= tail[ 3] << 24;
|
||||
case 3: k1 ^= tail[ 2] << 16;
|
||||
case 2: k1 ^= tail[ 1] << 8;
|
||||
case 1: k1 ^= tail[ 0] << 0;
|
||||
k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
|
||||
}
|
||||
}
|
||||
|
||||
/* finalization */
|
||||
h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;
|
||||
|
||||
h1 += h2; h1 += h3; h1 += h4;
|
||||
h2 += h1; h3 += h1; h4 += h1;
|
||||
|
||||
h1 = hash_fmix_32(h1);
|
||||
h2 = hash_fmix_32(h2);
|
||||
h3 = hash_fmix_32(h3);
|
||||
h4 = hash_fmix_32(h4);
|
||||
|
||||
h1 += h2; h1 += h3; h1 += h4;
|
||||
h2 += h1; h3 += h1; h4 += h1;
|
||||
|
||||
r_out[0] = (((uint64_t) h2) << 32) | h1;
|
||||
r_out[1] = (((uint64_t) h4) << 32) | h3;
|
||||
}
|
||||
|
||||
UNUSED JEMALLOC_INLINE void
|
||||
hash_x64_128(const void *key, const int len, const uint32_t seed,
|
||||
uint64_t r_out[2])
|
||||
{
|
||||
const uint8_t *data = (const uint8_t *) key;
|
||||
const int nblocks = len / 16;
|
||||
|
||||
uint64_t h1 = seed;
|
||||
uint64_t h2 = seed;
|
||||
|
||||
const uint64_t c1 = QU(0x87c37b91114253d5LLU);
|
||||
const uint64_t c2 = QU(0x4cf5ad432745937fLLU);
|
||||
|
||||
/* body */
|
||||
{
|
||||
const uint64_t *blocks = (const uint64_t *) (data);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nblocks; i++) {
|
||||
uint64_t k1 = hash_get_block_64(blocks, i*2 + 0);
|
||||
uint64_t k2 = hash_get_block_64(blocks, i*2 + 1);
|
||||
|
||||
k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
|
||||
|
||||
h1 = hash_rotl_64(h1, 27); h1 += h2;
|
||||
h1 = h1*5 + 0x52dce729;
|
||||
|
||||
k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
|
||||
|
||||
h2 = hash_rotl_64(h2, 31); h2 += h1;
|
||||
h2 = h2*5 + 0x38495ab5;
|
||||
}
|
||||
}
|
||||
|
||||
/* tail */
|
||||
{
|
||||
const uint8_t *tail = (const uint8_t*)(data + nblocks*16);
|
||||
uint64_t k1 = 0;
|
||||
uint64_t k2 = 0;
|
||||
|
||||
switch (len & 15) {
|
||||
case 15: k2 ^= ((uint64_t)(tail[14])) << 48;
|
||||
case 14: k2 ^= ((uint64_t)(tail[13])) << 40;
|
||||
case 13: k2 ^= ((uint64_t)(tail[12])) << 32;
|
||||
case 12: k2 ^= ((uint64_t)(tail[11])) << 24;
|
||||
case 11: k2 ^= ((uint64_t)(tail[10])) << 16;
|
||||
case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8;
|
||||
case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0;
|
||||
k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
|
||||
|
||||
case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56;
|
||||
case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48;
|
||||
case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40;
|
||||
case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32;
|
||||
case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24;
|
||||
case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16;
|
||||
case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8;
|
||||
case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0;
|
||||
k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
|
||||
}
|
||||
}
|
||||
|
||||
/* finalization */
|
||||
h1 ^= len; h2 ^= len;
|
||||
|
||||
h1 += h2;
|
||||
h2 += h1;
|
||||
|
||||
h1 = hash_fmix_64(h1);
|
||||
h2 = hash_fmix_64(h2);
|
||||
|
||||
h1 += h2;
|
||||
h2 += h1;
|
||||
|
||||
r_out[0] = h1;
|
||||
r_out[1] = h2;
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
/* API. */
|
||||
JEMALLOC_INLINE void
|
||||
hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2])
|
||||
{
|
||||
#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN))
|
||||
hash_x64_128(key, len, seed, (uint64_t *)r_hash);
|
||||
#else
|
||||
uint64_t hashes[2];
|
||||
hash_x86_128(key, len, seed, hashes);
|
||||
r_hash[0] = (size_t)hashes[0];
|
||||
r_hash[1] = (size_t)hashes[1];
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
46
deps/jemalloc/include/jemalloc/internal/huge.h
vendored
Normal file
46
deps/jemalloc/include/jemalloc/internal/huge.h
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
/* Huge allocation statistics. */
|
||||
extern uint64_t huge_nmalloc;
|
||||
extern uint64_t huge_ndalloc;
|
||||
extern size_t huge_allocated;
|
||||
|
||||
/* Protects chunk-related data structures. */
|
||||
extern malloc_mutex_t huge_mtx;
|
||||
|
||||
void *huge_malloc(size_t size, bool zero, dss_prec_t dss_prec);
|
||||
void *huge_palloc(size_t size, size_t alignment, bool zero,
|
||||
dss_prec_t dss_prec);
|
||||
bool huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
|
||||
size_t extra);
|
||||
void *huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
|
||||
size_t alignment, bool zero, bool try_tcache_dalloc, dss_prec_t dss_prec);
|
||||
#ifdef JEMALLOC_JET
|
||||
typedef void (huge_dalloc_junk_t)(void *, size_t);
|
||||
extern huge_dalloc_junk_t *huge_dalloc_junk;
|
||||
#endif
|
||||
void huge_dalloc(void *ptr, bool unmap);
|
||||
size_t huge_salloc(const void *ptr);
|
||||
dss_prec_t huge_dss_prec_get(arena_t *arena);
|
||||
prof_ctx_t *huge_prof_ctx_get(const void *ptr);
|
||||
void huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
|
||||
bool huge_boot(void);
|
||||
void huge_prefork(void);
|
||||
void huge_postfork_parent(void);
|
||||
void huge_postfork_child(void);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
1059
deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h
vendored
Normal file
1059
deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h
vendored
Normal file
File diff suppressed because it is too large
Load Diff
115
deps/jemalloc/include/jemalloc/internal/mb.h
vendored
Normal file
115
deps/jemalloc/include/jemalloc/internal/mb.h
vendored
Normal file
@@ -0,0 +1,115 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
void mb_write(void);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MB_C_))
|
||||
#ifdef __i386__
|
||||
/*
|
||||
* According to the Intel Architecture Software Developer's Manual, current
|
||||
* processors execute instructions in order from the perspective of other
|
||||
* processors in a multiprocessor system, but 1) Intel reserves the right to
|
||||
* change that, and 2) the compiler's optimizer could re-order instructions if
|
||||
* there weren't some form of barrier. Therefore, even if running on an
|
||||
* architecture that does not need memory barriers (everything through at least
|
||||
* i686), an "optimizer barrier" is necessary.
|
||||
*/
|
||||
JEMALLOC_INLINE void
|
||||
mb_write(void)
|
||||
{
|
||||
|
||||
# if 0
|
||||
/* This is a true memory barrier. */
|
||||
asm volatile ("pusha;"
|
||||
"xor %%eax,%%eax;"
|
||||
"cpuid;"
|
||||
"popa;"
|
||||
: /* Outputs. */
|
||||
: /* Inputs. */
|
||||
: "memory" /* Clobbers. */
|
||||
);
|
||||
#else
|
||||
/*
|
||||
* This is hopefully enough to keep the compiler from reordering
|
||||
* instructions around this one.
|
||||
*/
|
||||
asm volatile ("nop;"
|
||||
: /* Outputs. */
|
||||
: /* Inputs. */
|
||||
: "memory" /* Clobbers. */
|
||||
);
|
||||
#endif
|
||||
}
|
||||
#elif (defined(__amd64__) || defined(__x86_64__))
|
||||
JEMALLOC_INLINE void
|
||||
mb_write(void)
|
||||
{
|
||||
|
||||
asm volatile ("sfence"
|
||||
: /* Outputs. */
|
||||
: /* Inputs. */
|
||||
: "memory" /* Clobbers. */
|
||||
);
|
||||
}
|
||||
#elif defined(__powerpc__)
|
||||
JEMALLOC_INLINE void
|
||||
mb_write(void)
|
||||
{
|
||||
|
||||
asm volatile ("eieio"
|
||||
: /* Outputs. */
|
||||
: /* Inputs. */
|
||||
: "memory" /* Clobbers. */
|
||||
);
|
||||
}
|
||||
#elif defined(__sparc64__)
|
||||
JEMALLOC_INLINE void
|
||||
mb_write(void)
|
||||
{
|
||||
|
||||
asm volatile ("membar #StoreStore"
|
||||
: /* Outputs. */
|
||||
: /* Inputs. */
|
||||
: "memory" /* Clobbers. */
|
||||
);
|
||||
}
|
||||
#elif defined(__tile__)
|
||||
JEMALLOC_INLINE void
|
||||
mb_write(void)
|
||||
{
|
||||
|
||||
__sync_synchronize();
|
||||
}
|
||||
#else
|
||||
/*
|
||||
* This is much slower than a simple memory barrier, but the semantics of mutex
|
||||
* unlock make this work.
|
||||
*/
|
||||
JEMALLOC_INLINE void
|
||||
mb_write(void)
|
||||
{
|
||||
malloc_mutex_t mtx;
|
||||
|
||||
malloc_mutex_init(&mtx);
|
||||
malloc_mutex_lock(&mtx);
|
||||
malloc_mutex_unlock(&mtx);
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
99
deps/jemalloc/include/jemalloc/internal/mutex.h
vendored
Normal file
99
deps/jemalloc/include/jemalloc/internal/mutex.h
vendored
Normal file
@@ -0,0 +1,99 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
typedef struct malloc_mutex_s malloc_mutex_t;
|
||||
|
||||
#ifdef _WIN32
|
||||
# define MALLOC_MUTEX_INITIALIZER
|
||||
#elif (defined(JEMALLOC_OSSPIN))
|
||||
# define MALLOC_MUTEX_INITIALIZER {0}
|
||||
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
|
||||
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER, NULL}
|
||||
#else
|
||||
# if (defined(PTHREAD_MUTEX_ADAPTIVE_NP) && \
|
||||
defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP))
|
||||
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP
|
||||
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP}
|
||||
# else
|
||||
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
|
||||
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER}
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
struct malloc_mutex_s {
|
||||
#ifdef _WIN32
|
||||
CRITICAL_SECTION lock;
|
||||
#elif (defined(JEMALLOC_OSSPIN))
|
||||
OSSpinLock lock;
|
||||
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
|
||||
pthread_mutex_t lock;
|
||||
malloc_mutex_t *postponed_next;
|
||||
#else
|
||||
pthread_mutex_t lock;
|
||||
#endif
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
#ifdef JEMALLOC_LAZY_LOCK
|
||||
extern bool isthreaded;
|
||||
#else
|
||||
# undef isthreaded /* Undo private_namespace.h definition. */
|
||||
# define isthreaded true
|
||||
#endif
|
||||
|
||||
bool malloc_mutex_init(malloc_mutex_t *mutex);
|
||||
void malloc_mutex_prefork(malloc_mutex_t *mutex);
|
||||
void malloc_mutex_postfork_parent(malloc_mutex_t *mutex);
|
||||
void malloc_mutex_postfork_child(malloc_mutex_t *mutex);
|
||||
bool mutex_boot(void);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
void malloc_mutex_lock(malloc_mutex_t *mutex);
|
||||
void malloc_mutex_unlock(malloc_mutex_t *mutex);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
|
||||
JEMALLOC_INLINE void
|
||||
malloc_mutex_lock(malloc_mutex_t *mutex)
|
||||
{
|
||||
|
||||
if (isthreaded) {
|
||||
#ifdef _WIN32
|
||||
EnterCriticalSection(&mutex->lock);
|
||||
#elif (defined(JEMALLOC_OSSPIN))
|
||||
OSSpinLockLock(&mutex->lock);
|
||||
#else
|
||||
pthread_mutex_lock(&mutex->lock);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
malloc_mutex_unlock(malloc_mutex_t *mutex)
|
||||
{
|
||||
|
||||
if (isthreaded) {
|
||||
#ifdef _WIN32
|
||||
LeaveCriticalSection(&mutex->lock);
|
||||
#elif (defined(JEMALLOC_OSSPIN))
|
||||
OSSpinLockUnlock(&mutex->lock);
|
||||
#else
|
||||
pthread_mutex_unlock(&mutex->lock);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
413
deps/jemalloc/include/jemalloc/internal/private_namespace.h
vendored
Normal file
413
deps/jemalloc/include/jemalloc/internal/private_namespace.h
vendored
Normal file
@@ -0,0 +1,413 @@
|
||||
#define a0calloc JEMALLOC_N(a0calloc)
|
||||
#define a0free JEMALLOC_N(a0free)
|
||||
#define a0malloc JEMALLOC_N(a0malloc)
|
||||
#define arena_alloc_junk_small JEMALLOC_N(arena_alloc_junk_small)
|
||||
#define arena_bin_index JEMALLOC_N(arena_bin_index)
|
||||
#define arena_bin_info JEMALLOC_N(arena_bin_info)
|
||||
#define arena_boot JEMALLOC_N(arena_boot)
|
||||
#define arena_dalloc JEMALLOC_N(arena_dalloc)
|
||||
#define arena_dalloc_bin JEMALLOC_N(arena_dalloc_bin)
|
||||
#define arena_dalloc_bin_locked JEMALLOC_N(arena_dalloc_bin_locked)
|
||||
#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
|
||||
#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
|
||||
#define arena_dalloc_large JEMALLOC_N(arena_dalloc_large)
|
||||
#define arena_dalloc_large_locked JEMALLOC_N(arena_dalloc_large_locked)
|
||||
#define arena_dalloc_small JEMALLOC_N(arena_dalloc_small)
|
||||
#define arena_dss_prec_get JEMALLOC_N(arena_dss_prec_get)
|
||||
#define arena_dss_prec_set JEMALLOC_N(arena_dss_prec_set)
|
||||
#define arena_malloc JEMALLOC_N(arena_malloc)
|
||||
#define arena_malloc_large JEMALLOC_N(arena_malloc_large)
|
||||
#define arena_malloc_small JEMALLOC_N(arena_malloc_small)
|
||||
#define arena_mapbits_allocated_get JEMALLOC_N(arena_mapbits_allocated_get)
|
||||
#define arena_mapbits_binind_get JEMALLOC_N(arena_mapbits_binind_get)
|
||||
#define arena_mapbits_dirty_get JEMALLOC_N(arena_mapbits_dirty_get)
|
||||
#define arena_mapbits_get JEMALLOC_N(arena_mapbits_get)
|
||||
#define arena_mapbits_large_binind_set JEMALLOC_N(arena_mapbits_large_binind_set)
|
||||
#define arena_mapbits_large_get JEMALLOC_N(arena_mapbits_large_get)
|
||||
#define arena_mapbits_large_set JEMALLOC_N(arena_mapbits_large_set)
|
||||
#define arena_mapbits_large_size_get JEMALLOC_N(arena_mapbits_large_size_get)
|
||||
#define arena_mapbits_small_runind_get JEMALLOC_N(arena_mapbits_small_runind_get)
|
||||
#define arena_mapbits_small_set JEMALLOC_N(arena_mapbits_small_set)
|
||||
#define arena_mapbits_unallocated_set JEMALLOC_N(arena_mapbits_unallocated_set)
|
||||
#define arena_mapbits_unallocated_size_get JEMALLOC_N(arena_mapbits_unallocated_size_get)
|
||||
#define arena_mapbits_unallocated_size_set JEMALLOC_N(arena_mapbits_unallocated_size_set)
|
||||
#define arena_mapbits_unzeroed_get JEMALLOC_N(arena_mapbits_unzeroed_get)
|
||||
#define arena_mapbits_unzeroed_set JEMALLOC_N(arena_mapbits_unzeroed_set)
|
||||
#define arena_mapbitsp_get JEMALLOC_N(arena_mapbitsp_get)
|
||||
#define arena_mapbitsp_read JEMALLOC_N(arena_mapbitsp_read)
|
||||
#define arena_mapbitsp_write JEMALLOC_N(arena_mapbitsp_write)
|
||||
#define arena_mapp_get JEMALLOC_N(arena_mapp_get)
|
||||
#define arena_maxclass JEMALLOC_N(arena_maxclass)
|
||||
#define arena_new JEMALLOC_N(arena_new)
|
||||
#define arena_palloc JEMALLOC_N(arena_palloc)
|
||||
#define arena_postfork_child JEMALLOC_N(arena_postfork_child)
|
||||
#define arena_postfork_parent JEMALLOC_N(arena_postfork_parent)
|
||||
#define arena_prefork JEMALLOC_N(arena_prefork)
|
||||
#define arena_prof_accum JEMALLOC_N(arena_prof_accum)
|
||||
#define arena_prof_accum_impl JEMALLOC_N(arena_prof_accum_impl)
|
||||
#define arena_prof_accum_locked JEMALLOC_N(arena_prof_accum_locked)
|
||||
#define arena_prof_ctx_get JEMALLOC_N(arena_prof_ctx_get)
|
||||
#define arena_prof_ctx_set JEMALLOC_N(arena_prof_ctx_set)
|
||||
#define arena_prof_promoted JEMALLOC_N(arena_prof_promoted)
|
||||
#define arena_ptr_small_binind_get JEMALLOC_N(arena_ptr_small_binind_get)
|
||||
#define arena_purge_all JEMALLOC_N(arena_purge_all)
|
||||
#define arena_quarantine_junk_small JEMALLOC_N(arena_quarantine_junk_small)
|
||||
#define arena_ralloc JEMALLOC_N(arena_ralloc)
|
||||
#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
|
||||
#define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move)
|
||||
#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
|
||||
#define arena_run_regind JEMALLOC_N(arena_run_regind)
|
||||
#define arena_salloc JEMALLOC_N(arena_salloc)
|
||||
#define arena_stats_merge JEMALLOC_N(arena_stats_merge)
|
||||
#define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small)
|
||||
#define arenas JEMALLOC_N(arenas)
|
||||
#define arenas_booted JEMALLOC_N(arenas_booted)
|
||||
#define arenas_cleanup JEMALLOC_N(arenas_cleanup)
|
||||
#define arenas_extend JEMALLOC_N(arenas_extend)
|
||||
#define arenas_initialized JEMALLOC_N(arenas_initialized)
|
||||
#define arenas_lock JEMALLOC_N(arenas_lock)
|
||||
#define arenas_tls JEMALLOC_N(arenas_tls)
|
||||
#define arenas_tsd JEMALLOC_N(arenas_tsd)
|
||||
#define arenas_tsd_boot JEMALLOC_N(arenas_tsd_boot)
|
||||
#define arenas_tsd_cleanup_wrapper JEMALLOC_N(arenas_tsd_cleanup_wrapper)
|
||||
#define arenas_tsd_get JEMALLOC_N(arenas_tsd_get)
|
||||
#define arenas_tsd_get_wrapper JEMALLOC_N(arenas_tsd_get_wrapper)
|
||||
#define arenas_tsd_init_head JEMALLOC_N(arenas_tsd_init_head)
|
||||
#define arenas_tsd_set JEMALLOC_N(arenas_tsd_set)
|
||||
#define atomic_add_u JEMALLOC_N(atomic_add_u)
|
||||
#define atomic_add_uint32 JEMALLOC_N(atomic_add_uint32)
|
||||
#define atomic_add_uint64 JEMALLOC_N(atomic_add_uint64)
|
||||
#define atomic_add_z JEMALLOC_N(atomic_add_z)
|
||||
#define atomic_sub_u JEMALLOC_N(atomic_sub_u)
|
||||
#define atomic_sub_uint32 JEMALLOC_N(atomic_sub_uint32)
|
||||
#define atomic_sub_uint64 JEMALLOC_N(atomic_sub_uint64)
|
||||
#define atomic_sub_z JEMALLOC_N(atomic_sub_z)
|
||||
#define base_alloc JEMALLOC_N(base_alloc)
|
||||
#define base_boot JEMALLOC_N(base_boot)
|
||||
#define base_calloc JEMALLOC_N(base_calloc)
|
||||
#define base_node_alloc JEMALLOC_N(base_node_alloc)
|
||||
#define base_node_dealloc JEMALLOC_N(base_node_dealloc)
|
||||
#define base_postfork_child JEMALLOC_N(base_postfork_child)
|
||||
#define base_postfork_parent JEMALLOC_N(base_postfork_parent)
|
||||
#define base_prefork JEMALLOC_N(base_prefork)
|
||||
#define bitmap_full JEMALLOC_N(bitmap_full)
|
||||
#define bitmap_get JEMALLOC_N(bitmap_get)
|
||||
#define bitmap_info_init JEMALLOC_N(bitmap_info_init)
|
||||
#define bitmap_info_ngroups JEMALLOC_N(bitmap_info_ngroups)
|
||||
#define bitmap_init JEMALLOC_N(bitmap_init)
|
||||
#define bitmap_set JEMALLOC_N(bitmap_set)
|
||||
#define bitmap_sfu JEMALLOC_N(bitmap_sfu)
|
||||
#define bitmap_size JEMALLOC_N(bitmap_size)
|
||||
#define bitmap_unset JEMALLOC_N(bitmap_unset)
|
||||
#define bt_init JEMALLOC_N(bt_init)
|
||||
#define buferror JEMALLOC_N(buferror)
|
||||
#define choose_arena JEMALLOC_N(choose_arena)
|
||||
#define choose_arena_hard JEMALLOC_N(choose_arena_hard)
|
||||
#define chunk_alloc JEMALLOC_N(chunk_alloc)
|
||||
#define chunk_alloc_dss JEMALLOC_N(chunk_alloc_dss)
|
||||
#define chunk_alloc_mmap JEMALLOC_N(chunk_alloc_mmap)
|
||||
#define chunk_boot JEMALLOC_N(chunk_boot)
|
||||
#define chunk_dealloc JEMALLOC_N(chunk_dealloc)
|
||||
#define chunk_dealloc_mmap JEMALLOC_N(chunk_dealloc_mmap)
|
||||
#define chunk_dss_boot JEMALLOC_N(chunk_dss_boot)
|
||||
#define chunk_dss_postfork_child JEMALLOC_N(chunk_dss_postfork_child)
|
||||
#define chunk_dss_postfork_parent JEMALLOC_N(chunk_dss_postfork_parent)
|
||||
#define chunk_dss_prec_get JEMALLOC_N(chunk_dss_prec_get)
|
||||
#define chunk_dss_prec_set JEMALLOC_N(chunk_dss_prec_set)
|
||||
#define chunk_dss_prefork JEMALLOC_N(chunk_dss_prefork)
|
||||
#define chunk_in_dss JEMALLOC_N(chunk_in_dss)
|
||||
#define chunk_npages JEMALLOC_N(chunk_npages)
|
||||
#define chunk_postfork_child JEMALLOC_N(chunk_postfork_child)
|
||||
#define chunk_postfork_parent JEMALLOC_N(chunk_postfork_parent)
|
||||
#define chunk_prefork JEMALLOC_N(chunk_prefork)
|
||||
#define chunk_unmap JEMALLOC_N(chunk_unmap)
|
||||
#define chunks_mtx JEMALLOC_N(chunks_mtx)
|
||||
#define chunks_rtree JEMALLOC_N(chunks_rtree)
|
||||
#define chunksize JEMALLOC_N(chunksize)
|
||||
#define chunksize_mask JEMALLOC_N(chunksize_mask)
|
||||
#define ckh_bucket_search JEMALLOC_N(ckh_bucket_search)
|
||||
#define ckh_count JEMALLOC_N(ckh_count)
|
||||
#define ckh_delete JEMALLOC_N(ckh_delete)
|
||||
#define ckh_evict_reloc_insert JEMALLOC_N(ckh_evict_reloc_insert)
|
||||
#define ckh_insert JEMALLOC_N(ckh_insert)
|
||||
#define ckh_isearch JEMALLOC_N(ckh_isearch)
|
||||
#define ckh_iter JEMALLOC_N(ckh_iter)
|
||||
#define ckh_new JEMALLOC_N(ckh_new)
|
||||
#define ckh_pointer_hash JEMALLOC_N(ckh_pointer_hash)
|
||||
#define ckh_pointer_keycomp JEMALLOC_N(ckh_pointer_keycomp)
|
||||
#define ckh_rebuild JEMALLOC_N(ckh_rebuild)
|
||||
#define ckh_remove JEMALLOC_N(ckh_remove)
|
||||
#define ckh_search JEMALLOC_N(ckh_search)
|
||||
#define ckh_string_hash JEMALLOC_N(ckh_string_hash)
|
||||
#define ckh_string_keycomp JEMALLOC_N(ckh_string_keycomp)
|
||||
#define ckh_try_bucket_insert JEMALLOC_N(ckh_try_bucket_insert)
|
||||
#define ckh_try_insert JEMALLOC_N(ckh_try_insert)
|
||||
#define ctl_boot JEMALLOC_N(ctl_boot)
|
||||
#define ctl_bymib JEMALLOC_N(ctl_bymib)
|
||||
#define ctl_byname JEMALLOC_N(ctl_byname)
|
||||
#define ctl_nametomib JEMALLOC_N(ctl_nametomib)
|
||||
#define ctl_postfork_child JEMALLOC_N(ctl_postfork_child)
|
||||
#define ctl_postfork_parent JEMALLOC_N(ctl_postfork_parent)
|
||||
#define ctl_prefork JEMALLOC_N(ctl_prefork)
|
||||
#define dss_prec_names JEMALLOC_N(dss_prec_names)
|
||||
#define extent_tree_ad_first JEMALLOC_N(extent_tree_ad_first)
|
||||
#define extent_tree_ad_insert JEMALLOC_N(extent_tree_ad_insert)
|
||||
#define extent_tree_ad_iter JEMALLOC_N(extent_tree_ad_iter)
|
||||
#define extent_tree_ad_iter_recurse JEMALLOC_N(extent_tree_ad_iter_recurse)
|
||||
#define extent_tree_ad_iter_start JEMALLOC_N(extent_tree_ad_iter_start)
|
||||
#define extent_tree_ad_last JEMALLOC_N(extent_tree_ad_last)
|
||||
#define extent_tree_ad_new JEMALLOC_N(extent_tree_ad_new)
|
||||
#define extent_tree_ad_next JEMALLOC_N(extent_tree_ad_next)
|
||||
#define extent_tree_ad_nsearch JEMALLOC_N(extent_tree_ad_nsearch)
|
||||
#define extent_tree_ad_prev JEMALLOC_N(extent_tree_ad_prev)
|
||||
#define extent_tree_ad_psearch JEMALLOC_N(extent_tree_ad_psearch)
|
||||
#define extent_tree_ad_remove JEMALLOC_N(extent_tree_ad_remove)
|
||||
#define extent_tree_ad_reverse_iter JEMALLOC_N(extent_tree_ad_reverse_iter)
|
||||
#define extent_tree_ad_reverse_iter_recurse JEMALLOC_N(extent_tree_ad_reverse_iter_recurse)
|
||||
#define extent_tree_ad_reverse_iter_start JEMALLOC_N(extent_tree_ad_reverse_iter_start)
|
||||
#define extent_tree_ad_search JEMALLOC_N(extent_tree_ad_search)
|
||||
#define extent_tree_szad_first JEMALLOC_N(extent_tree_szad_first)
|
||||
#define extent_tree_szad_insert JEMALLOC_N(extent_tree_szad_insert)
|
||||
#define extent_tree_szad_iter JEMALLOC_N(extent_tree_szad_iter)
|
||||
#define extent_tree_szad_iter_recurse JEMALLOC_N(extent_tree_szad_iter_recurse)
|
||||
#define extent_tree_szad_iter_start JEMALLOC_N(extent_tree_szad_iter_start)
|
||||
#define extent_tree_szad_last JEMALLOC_N(extent_tree_szad_last)
|
||||
#define extent_tree_szad_new JEMALLOC_N(extent_tree_szad_new)
|
||||
#define extent_tree_szad_next JEMALLOC_N(extent_tree_szad_next)
|
||||
#define extent_tree_szad_nsearch JEMALLOC_N(extent_tree_szad_nsearch)
|
||||
#define extent_tree_szad_prev JEMALLOC_N(extent_tree_szad_prev)
|
||||
#define extent_tree_szad_psearch JEMALLOC_N(extent_tree_szad_psearch)
|
||||
#define extent_tree_szad_remove JEMALLOC_N(extent_tree_szad_remove)
|
||||
#define extent_tree_szad_reverse_iter JEMALLOC_N(extent_tree_szad_reverse_iter)
|
||||
#define extent_tree_szad_reverse_iter_recurse JEMALLOC_N(extent_tree_szad_reverse_iter_recurse)
|
||||
#define extent_tree_szad_reverse_iter_start JEMALLOC_N(extent_tree_szad_reverse_iter_start)
|
||||
#define extent_tree_szad_search JEMALLOC_N(extent_tree_szad_search)
|
||||
#define get_errno JEMALLOC_N(get_errno)
|
||||
#define hash JEMALLOC_N(hash)
|
||||
#define hash_fmix_32 JEMALLOC_N(hash_fmix_32)
|
||||
#define hash_fmix_64 JEMALLOC_N(hash_fmix_64)
|
||||
#define hash_get_block_32 JEMALLOC_N(hash_get_block_32)
|
||||
#define hash_get_block_64 JEMALLOC_N(hash_get_block_64)
|
||||
#define hash_rotl_32 JEMALLOC_N(hash_rotl_32)
|
||||
#define hash_rotl_64 JEMALLOC_N(hash_rotl_64)
|
||||
#define hash_x64_128 JEMALLOC_N(hash_x64_128)
|
||||
#define hash_x86_128 JEMALLOC_N(hash_x86_128)
|
||||
#define hash_x86_32 JEMALLOC_N(hash_x86_32)
|
||||
#define huge_allocated JEMALLOC_N(huge_allocated)
|
||||
#define huge_boot JEMALLOC_N(huge_boot)
|
||||
#define huge_dalloc JEMALLOC_N(huge_dalloc)
|
||||
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
|
||||
#define huge_dss_prec_get JEMALLOC_N(huge_dss_prec_get)
|
||||
#define huge_malloc JEMALLOC_N(huge_malloc)
|
||||
#define huge_mtx JEMALLOC_N(huge_mtx)
|
||||
#define huge_ndalloc JEMALLOC_N(huge_ndalloc)
|
||||
#define huge_nmalloc JEMALLOC_N(huge_nmalloc)
|
||||
#define huge_palloc JEMALLOC_N(huge_palloc)
|
||||
#define huge_postfork_child JEMALLOC_N(huge_postfork_child)
|
||||
#define huge_postfork_parent JEMALLOC_N(huge_postfork_parent)
|
||||
#define huge_prefork JEMALLOC_N(huge_prefork)
|
||||
#define huge_prof_ctx_get JEMALLOC_N(huge_prof_ctx_get)
|
||||
#define huge_prof_ctx_set JEMALLOC_N(huge_prof_ctx_set)
|
||||
#define huge_ralloc JEMALLOC_N(huge_ralloc)
|
||||
#define huge_ralloc_no_move JEMALLOC_N(huge_ralloc_no_move)
|
||||
#define huge_salloc JEMALLOC_N(huge_salloc)
|
||||
#define iallocm JEMALLOC_N(iallocm)
|
||||
#define icalloc JEMALLOC_N(icalloc)
|
||||
#define icalloct JEMALLOC_N(icalloct)
|
||||
#define idalloc JEMALLOC_N(idalloc)
|
||||
#define idalloct JEMALLOC_N(idalloct)
|
||||
#define imalloc JEMALLOC_N(imalloc)
|
||||
#define imalloct JEMALLOC_N(imalloct)
|
||||
#define ipalloc JEMALLOC_N(ipalloc)
|
||||
#define ipalloct JEMALLOC_N(ipalloct)
|
||||
#define iqalloc JEMALLOC_N(iqalloc)
|
||||
#define iqalloct JEMALLOC_N(iqalloct)
|
||||
#define iralloc JEMALLOC_N(iralloc)
|
||||
#define iralloct JEMALLOC_N(iralloct)
|
||||
#define iralloct_realign JEMALLOC_N(iralloct_realign)
|
||||
#define isalloc JEMALLOC_N(isalloc)
|
||||
#define isthreaded JEMALLOC_N(isthreaded)
|
||||
#define ivsalloc JEMALLOC_N(ivsalloc)
|
||||
#define ixalloc JEMALLOC_N(ixalloc)
|
||||
#define jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child)
|
||||
#define jemalloc_postfork_parent JEMALLOC_N(jemalloc_postfork_parent)
|
||||
#define jemalloc_prefork JEMALLOC_N(jemalloc_prefork)
|
||||
#define malloc_cprintf JEMALLOC_N(malloc_cprintf)
|
||||
#define malloc_mutex_init JEMALLOC_N(malloc_mutex_init)
|
||||
#define malloc_mutex_lock JEMALLOC_N(malloc_mutex_lock)
|
||||
#define malloc_mutex_postfork_child JEMALLOC_N(malloc_mutex_postfork_child)
|
||||
#define malloc_mutex_postfork_parent JEMALLOC_N(malloc_mutex_postfork_parent)
|
||||
#define malloc_mutex_prefork JEMALLOC_N(malloc_mutex_prefork)
|
||||
#define malloc_mutex_unlock JEMALLOC_N(malloc_mutex_unlock)
|
||||
#define malloc_printf JEMALLOC_N(malloc_printf)
|
||||
#define malloc_snprintf JEMALLOC_N(malloc_snprintf)
|
||||
#define malloc_strtoumax JEMALLOC_N(malloc_strtoumax)
|
||||
#define malloc_tsd_boot JEMALLOC_N(malloc_tsd_boot)
|
||||
#define malloc_tsd_cleanup_register JEMALLOC_N(malloc_tsd_cleanup_register)
|
||||
#define malloc_tsd_dalloc JEMALLOC_N(malloc_tsd_dalloc)
|
||||
#define malloc_tsd_malloc JEMALLOC_N(malloc_tsd_malloc)
|
||||
#define malloc_tsd_no_cleanup JEMALLOC_N(malloc_tsd_no_cleanup)
|
||||
#define malloc_vcprintf JEMALLOC_N(malloc_vcprintf)
|
||||
#define malloc_vsnprintf JEMALLOC_N(malloc_vsnprintf)
|
||||
#define malloc_write JEMALLOC_N(malloc_write)
|
||||
#define map_bias JEMALLOC_N(map_bias)
|
||||
#define mb_write JEMALLOC_N(mb_write)
|
||||
#define mutex_boot JEMALLOC_N(mutex_boot)
|
||||
#define narenas_auto JEMALLOC_N(narenas_auto)
|
||||
#define narenas_total JEMALLOC_N(narenas_total)
|
||||
#define narenas_total_get JEMALLOC_N(narenas_total_get)
|
||||
#define ncpus JEMALLOC_N(ncpus)
|
||||
#define nhbins JEMALLOC_N(nhbins)
|
||||
#define opt_abort JEMALLOC_N(opt_abort)
|
||||
#define opt_dss JEMALLOC_N(opt_dss)
|
||||
#define opt_junk JEMALLOC_N(opt_junk)
|
||||
#define opt_lg_chunk JEMALLOC_N(opt_lg_chunk)
|
||||
#define opt_lg_dirty_mult JEMALLOC_N(opt_lg_dirty_mult)
|
||||
#define opt_lg_prof_interval JEMALLOC_N(opt_lg_prof_interval)
|
||||
#define opt_lg_prof_sample JEMALLOC_N(opt_lg_prof_sample)
|
||||
#define opt_lg_tcache_max JEMALLOC_N(opt_lg_tcache_max)
|
||||
#define opt_narenas JEMALLOC_N(opt_narenas)
|
||||
#define opt_prof JEMALLOC_N(opt_prof)
|
||||
#define opt_prof_accum JEMALLOC_N(opt_prof_accum)
|
||||
#define opt_prof_active JEMALLOC_N(opt_prof_active)
|
||||
#define opt_prof_final JEMALLOC_N(opt_prof_final)
|
||||
#define opt_prof_gdump JEMALLOC_N(opt_prof_gdump)
|
||||
#define opt_prof_leak JEMALLOC_N(opt_prof_leak)
|
||||
#define opt_prof_prefix JEMALLOC_N(opt_prof_prefix)
|
||||
#define opt_quarantine JEMALLOC_N(opt_quarantine)
|
||||
#define opt_redzone JEMALLOC_N(opt_redzone)
|
||||
#define opt_stats_print JEMALLOC_N(opt_stats_print)
|
||||
#define opt_tcache JEMALLOC_N(opt_tcache)
|
||||
#define opt_utrace JEMALLOC_N(opt_utrace)
|
||||
#define opt_valgrind JEMALLOC_N(opt_valgrind)
|
||||
#define opt_xmalloc JEMALLOC_N(opt_xmalloc)
|
||||
#define opt_zero JEMALLOC_N(opt_zero)
|
||||
#define p2rz JEMALLOC_N(p2rz)
|
||||
#define pages_purge JEMALLOC_N(pages_purge)
|
||||
#define pow2_ceil JEMALLOC_N(pow2_ceil)
|
||||
#define prof_backtrace JEMALLOC_N(prof_backtrace)
|
||||
#define prof_boot0 JEMALLOC_N(prof_boot0)
|
||||
#define prof_boot1 JEMALLOC_N(prof_boot1)
|
||||
#define prof_boot2 JEMALLOC_N(prof_boot2)
|
||||
#define prof_bt_count JEMALLOC_N(prof_bt_count)
|
||||
#define prof_ctx_get JEMALLOC_N(prof_ctx_get)
|
||||
#define prof_ctx_set JEMALLOC_N(prof_ctx_set)
|
||||
#define prof_dump_open JEMALLOC_N(prof_dump_open)
|
||||
#define prof_free JEMALLOC_N(prof_free)
|
||||
#define prof_gdump JEMALLOC_N(prof_gdump)
|
||||
#define prof_idump JEMALLOC_N(prof_idump)
|
||||
#define prof_interval JEMALLOC_N(prof_interval)
|
||||
#define prof_lookup JEMALLOC_N(prof_lookup)
|
||||
#define prof_malloc JEMALLOC_N(prof_malloc)
|
||||
#define prof_mdump JEMALLOC_N(prof_mdump)
|
||||
#define prof_postfork_child JEMALLOC_N(prof_postfork_child)
|
||||
#define prof_postfork_parent JEMALLOC_N(prof_postfork_parent)
|
||||
#define prof_prefork JEMALLOC_N(prof_prefork)
|
||||
#define prof_promote JEMALLOC_N(prof_promote)
|
||||
#define prof_realloc JEMALLOC_N(prof_realloc)
|
||||
#define prof_sample_accum_update JEMALLOC_N(prof_sample_accum_update)
|
||||
#define prof_sample_threshold_update JEMALLOC_N(prof_sample_threshold_update)
|
||||
#define prof_tdata_booted JEMALLOC_N(prof_tdata_booted)
|
||||
#define prof_tdata_cleanup JEMALLOC_N(prof_tdata_cleanup)
|
||||
#define prof_tdata_get JEMALLOC_N(prof_tdata_get)
|
||||
#define prof_tdata_init JEMALLOC_N(prof_tdata_init)
|
||||
#define prof_tdata_initialized JEMALLOC_N(prof_tdata_initialized)
|
||||
#define prof_tdata_tls JEMALLOC_N(prof_tdata_tls)
|
||||
#define prof_tdata_tsd JEMALLOC_N(prof_tdata_tsd)
|
||||
#define prof_tdata_tsd_boot JEMALLOC_N(prof_tdata_tsd_boot)
|
||||
#define prof_tdata_tsd_cleanup_wrapper JEMALLOC_N(prof_tdata_tsd_cleanup_wrapper)
|
||||
#define prof_tdata_tsd_get JEMALLOC_N(prof_tdata_tsd_get)
|
||||
#define prof_tdata_tsd_get_wrapper JEMALLOC_N(prof_tdata_tsd_get_wrapper)
|
||||
#define prof_tdata_tsd_init_head JEMALLOC_N(prof_tdata_tsd_init_head)
|
||||
#define prof_tdata_tsd_set JEMALLOC_N(prof_tdata_tsd_set)
|
||||
#define quarantine JEMALLOC_N(quarantine)
|
||||
#define quarantine_alloc_hook JEMALLOC_N(quarantine_alloc_hook)
|
||||
#define quarantine_boot JEMALLOC_N(quarantine_boot)
|
||||
#define quarantine_booted JEMALLOC_N(quarantine_booted)
|
||||
#define quarantine_cleanup JEMALLOC_N(quarantine_cleanup)
|
||||
#define quarantine_init JEMALLOC_N(quarantine_init)
|
||||
#define quarantine_tls JEMALLOC_N(quarantine_tls)
|
||||
#define quarantine_tsd JEMALLOC_N(quarantine_tsd)
|
||||
#define quarantine_tsd_boot JEMALLOC_N(quarantine_tsd_boot)
|
||||
#define quarantine_tsd_cleanup_wrapper JEMALLOC_N(quarantine_tsd_cleanup_wrapper)
|
||||
#define quarantine_tsd_get JEMALLOC_N(quarantine_tsd_get)
|
||||
#define quarantine_tsd_get_wrapper JEMALLOC_N(quarantine_tsd_get_wrapper)
|
||||
#define quarantine_tsd_init_head JEMALLOC_N(quarantine_tsd_init_head)
|
||||
#define quarantine_tsd_set JEMALLOC_N(quarantine_tsd_set)
|
||||
#define register_zone JEMALLOC_N(register_zone)
|
||||
#define rtree_delete JEMALLOC_N(rtree_delete)
|
||||
#define rtree_get JEMALLOC_N(rtree_get)
|
||||
#define rtree_get_locked JEMALLOC_N(rtree_get_locked)
|
||||
#define rtree_new JEMALLOC_N(rtree_new)
|
||||
#define rtree_postfork_child JEMALLOC_N(rtree_postfork_child)
|
||||
#define rtree_postfork_parent JEMALLOC_N(rtree_postfork_parent)
|
||||
#define rtree_prefork JEMALLOC_N(rtree_prefork)
|
||||
#define rtree_set JEMALLOC_N(rtree_set)
|
||||
#define s2u JEMALLOC_N(s2u)
|
||||
#define sa2u JEMALLOC_N(sa2u)
|
||||
#define set_errno JEMALLOC_N(set_errno)
|
||||
#define small_size2bin JEMALLOC_N(small_size2bin)
|
||||
#define stats_cactive JEMALLOC_N(stats_cactive)
|
||||
#define stats_cactive_add JEMALLOC_N(stats_cactive_add)
|
||||
#define stats_cactive_get JEMALLOC_N(stats_cactive_get)
|
||||
#define stats_cactive_sub JEMALLOC_N(stats_cactive_sub)
|
||||
#define stats_chunks JEMALLOC_N(stats_chunks)
|
||||
#define stats_print JEMALLOC_N(stats_print)
|
||||
#define tcache_alloc_easy JEMALLOC_N(tcache_alloc_easy)
|
||||
#define tcache_alloc_large JEMALLOC_N(tcache_alloc_large)
|
||||
#define tcache_alloc_small JEMALLOC_N(tcache_alloc_small)
|
||||
#define tcache_alloc_small_hard JEMALLOC_N(tcache_alloc_small_hard)
|
||||
#define tcache_arena_associate JEMALLOC_N(tcache_arena_associate)
|
||||
#define tcache_arena_dissociate JEMALLOC_N(tcache_arena_dissociate)
|
||||
#define tcache_bin_flush_large JEMALLOC_N(tcache_bin_flush_large)
|
||||
#define tcache_bin_flush_small JEMALLOC_N(tcache_bin_flush_small)
|
||||
#define tcache_bin_info JEMALLOC_N(tcache_bin_info)
|
||||
#define tcache_boot0 JEMALLOC_N(tcache_boot0)
|
||||
#define tcache_boot1 JEMALLOC_N(tcache_boot1)
|
||||
#define tcache_booted JEMALLOC_N(tcache_booted)
|
||||
#define tcache_create JEMALLOC_N(tcache_create)
|
||||
#define tcache_dalloc_large JEMALLOC_N(tcache_dalloc_large)
|
||||
#define tcache_dalloc_small JEMALLOC_N(tcache_dalloc_small)
|
||||
#define tcache_destroy JEMALLOC_N(tcache_destroy)
|
||||
#define tcache_enabled_booted JEMALLOC_N(tcache_enabled_booted)
|
||||
#define tcache_enabled_get JEMALLOC_N(tcache_enabled_get)
|
||||
#define tcache_enabled_initialized JEMALLOC_N(tcache_enabled_initialized)
|
||||
#define tcache_enabled_set JEMALLOC_N(tcache_enabled_set)
|
||||
#define tcache_enabled_tls JEMALLOC_N(tcache_enabled_tls)
|
||||
#define tcache_enabled_tsd JEMALLOC_N(tcache_enabled_tsd)
|
||||
#define tcache_enabled_tsd_boot JEMALLOC_N(tcache_enabled_tsd_boot)
|
||||
#define tcache_enabled_tsd_cleanup_wrapper JEMALLOC_N(tcache_enabled_tsd_cleanup_wrapper)
|
||||
#define tcache_enabled_tsd_get JEMALLOC_N(tcache_enabled_tsd_get)
|
||||
#define tcache_enabled_tsd_get_wrapper JEMALLOC_N(tcache_enabled_tsd_get_wrapper)
|
||||
#define tcache_enabled_tsd_init_head JEMALLOC_N(tcache_enabled_tsd_init_head)
|
||||
#define tcache_enabled_tsd_set JEMALLOC_N(tcache_enabled_tsd_set)
|
||||
#define tcache_event JEMALLOC_N(tcache_event)
|
||||
#define tcache_event_hard JEMALLOC_N(tcache_event_hard)
|
||||
#define tcache_flush JEMALLOC_N(tcache_flush)
|
||||
#define tcache_get JEMALLOC_N(tcache_get)
|
||||
#define tcache_initialized JEMALLOC_N(tcache_initialized)
|
||||
#define tcache_maxclass JEMALLOC_N(tcache_maxclass)
|
||||
#define tcache_salloc JEMALLOC_N(tcache_salloc)
|
||||
#define tcache_stats_merge JEMALLOC_N(tcache_stats_merge)
|
||||
#define tcache_thread_cleanup JEMALLOC_N(tcache_thread_cleanup)
|
||||
#define tcache_tls JEMALLOC_N(tcache_tls)
|
||||
#define tcache_tsd JEMALLOC_N(tcache_tsd)
|
||||
#define tcache_tsd_boot JEMALLOC_N(tcache_tsd_boot)
|
||||
#define tcache_tsd_cleanup_wrapper JEMALLOC_N(tcache_tsd_cleanup_wrapper)
|
||||
#define tcache_tsd_get JEMALLOC_N(tcache_tsd_get)
|
||||
#define tcache_tsd_get_wrapper JEMALLOC_N(tcache_tsd_get_wrapper)
|
||||
#define tcache_tsd_init_head JEMALLOC_N(tcache_tsd_init_head)
|
||||
#define tcache_tsd_set JEMALLOC_N(tcache_tsd_set)
|
||||
#define thread_allocated_booted JEMALLOC_N(thread_allocated_booted)
|
||||
#define thread_allocated_initialized JEMALLOC_N(thread_allocated_initialized)
|
||||
#define thread_allocated_tls JEMALLOC_N(thread_allocated_tls)
|
||||
#define thread_allocated_tsd JEMALLOC_N(thread_allocated_tsd)
|
||||
#define thread_allocated_tsd_boot JEMALLOC_N(thread_allocated_tsd_boot)
|
||||
#define thread_allocated_tsd_cleanup_wrapper JEMALLOC_N(thread_allocated_tsd_cleanup_wrapper)
|
||||
#define thread_allocated_tsd_get JEMALLOC_N(thread_allocated_tsd_get)
|
||||
#define thread_allocated_tsd_get_wrapper JEMALLOC_N(thread_allocated_tsd_get_wrapper)
|
||||
#define thread_allocated_tsd_init_head JEMALLOC_N(thread_allocated_tsd_init_head)
|
||||
#define thread_allocated_tsd_set JEMALLOC_N(thread_allocated_tsd_set)
|
||||
#define tsd_init_check_recursion JEMALLOC_N(tsd_init_check_recursion)
|
||||
#define tsd_init_finish JEMALLOC_N(tsd_init_finish)
|
||||
#define u2rz JEMALLOC_N(u2rz)
|
||||
60
deps/jemalloc/include/jemalloc/internal/prng.h
vendored
Normal file
60
deps/jemalloc/include/jemalloc/internal/prng.h
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
/*
|
||||
* Simple linear congruential pseudo-random number generator:
|
||||
*
|
||||
* prng(y) = (a*x + c) % m
|
||||
*
|
||||
* where the following constants ensure maximal period:
|
||||
*
|
||||
* a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4.
|
||||
* c == Odd number (relatively prime to 2^n).
|
||||
* m == 2^32
|
||||
*
|
||||
* See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints.
|
||||
*
|
||||
* This choice of m has the disadvantage that the quality of the bits is
|
||||
* proportional to bit position. For example. the lowest bit has a cycle of 2,
|
||||
* the next has a cycle of 4, etc. For this reason, we prefer to use the upper
|
||||
* bits.
|
||||
*
|
||||
* Macro parameters:
|
||||
* uint32_t r : Result.
|
||||
* unsigned lg_range : (0..32], number of least significant bits to return.
|
||||
* uint32_t state : Seed value.
|
||||
* const uint32_t a, c : See above discussion.
|
||||
*/
|
||||
#define prng32(r, lg_range, state, a, c) do { \
|
||||
assert(lg_range > 0); \
|
||||
assert(lg_range <= 32); \
|
||||
\
|
||||
r = (state * (a)) + (c); \
|
||||
state = r; \
|
||||
r >>= (32 - lg_range); \
|
||||
} while (false)
|
||||
|
||||
/* Same as prng32(), but 64 bits of pseudo-randomness, using uint64_t. */
|
||||
#define prng64(r, lg_range, state, a, c) do { \
|
||||
assert(lg_range > 0); \
|
||||
assert(lg_range <= 64); \
|
||||
\
|
||||
r = (state * (a)) + (c); \
|
||||
state = r; \
|
||||
r >>= (64 - lg_range); \
|
||||
} while (false)
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
613
deps/jemalloc/include/jemalloc/internal/prof.h
vendored
Normal file
613
deps/jemalloc/include/jemalloc/internal/prof.h
vendored
Normal file
@@ -0,0 +1,613 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
typedef struct prof_bt_s prof_bt_t;
|
||||
typedef struct prof_cnt_s prof_cnt_t;
|
||||
typedef struct prof_thr_cnt_s prof_thr_cnt_t;
|
||||
typedef struct prof_ctx_s prof_ctx_t;
|
||||
typedef struct prof_tdata_s prof_tdata_t;
|
||||
|
||||
/* Option defaults. */
|
||||
#ifdef JEMALLOC_PROF
|
||||
# define PROF_PREFIX_DEFAULT "jeprof"
|
||||
#else
|
||||
# define PROF_PREFIX_DEFAULT ""
|
||||
#endif
|
||||
#define LG_PROF_SAMPLE_DEFAULT 19
|
||||
#define LG_PROF_INTERVAL_DEFAULT -1
|
||||
|
||||
/*
|
||||
* Hard limit on stack backtrace depth. The version of prof_backtrace() that
|
||||
* is based on __builtin_return_address() necessarily has a hard-coded number
|
||||
* of backtrace frame handlers, and should be kept in sync with this setting.
|
||||
*/
|
||||
#define PROF_BT_MAX 128
|
||||
|
||||
/* Maximum number of backtraces to store in each per thread LRU cache. */
|
||||
#define PROF_TCMAX 1024
|
||||
|
||||
/* Initial hash table size. */
|
||||
#define PROF_CKH_MINITEMS 64
|
||||
|
||||
/* Size of memory buffer to use when writing dump files. */
|
||||
#define PROF_DUMP_BUFSIZE 65536
|
||||
|
||||
/* Size of stack-allocated buffer used by prof_printf(). */
|
||||
#define PROF_PRINTF_BUFSIZE 128
|
||||
|
||||
/*
|
||||
* Number of mutexes shared among all ctx's. No space is allocated for these
|
||||
* unless profiling is enabled, so it's okay to over-provision.
|
||||
*/
|
||||
#define PROF_NCTX_LOCKS 1024
|
||||
|
||||
/*
|
||||
* prof_tdata pointers close to NULL are used to encode state information that
|
||||
* is used for cleaning up during thread shutdown.
|
||||
*/
|
||||
#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1)
|
||||
#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2)
|
||||
#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
struct prof_bt_s {
|
||||
/* Backtrace, stored as len program counters. */
|
||||
void **vec;
|
||||
unsigned len;
|
||||
};
|
||||
|
||||
#ifdef JEMALLOC_PROF_LIBGCC
|
||||
/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
|
||||
typedef struct {
|
||||
prof_bt_t *bt;
|
||||
unsigned nignore;
|
||||
unsigned max;
|
||||
} prof_unwind_data_t;
|
||||
#endif
|
||||
|
||||
struct prof_cnt_s {
|
||||
/*
|
||||
* Profiling counters. An allocation/deallocation pair can operate on
|
||||
* different prof_thr_cnt_t objects that are linked into the same
|
||||
* prof_ctx_t cnts_ql, so it is possible for the cur* counters to go
|
||||
* negative. In principle it is possible for the *bytes counters to
|
||||
* overflow/underflow, but a general solution would require something
|
||||
* like 128-bit counters; this implementation doesn't bother to solve
|
||||
* that problem.
|
||||
*/
|
||||
int64_t curobjs;
|
||||
int64_t curbytes;
|
||||
uint64_t accumobjs;
|
||||
uint64_t accumbytes;
|
||||
};
|
||||
|
||||
struct prof_thr_cnt_s {
|
||||
/* Linkage into prof_ctx_t's cnts_ql. */
|
||||
ql_elm(prof_thr_cnt_t) cnts_link;
|
||||
|
||||
/* Linkage into thread's LRU. */
|
||||
ql_elm(prof_thr_cnt_t) lru_link;
|
||||
|
||||
/*
|
||||
* Associated context. If a thread frees an object that it did not
|
||||
* allocate, it is possible that the context is not cached in the
|
||||
* thread's hash table, in which case it must be able to look up the
|
||||
* context, insert a new prof_thr_cnt_t into the thread's hash table,
|
||||
* and link it into the prof_ctx_t's cnts_ql.
|
||||
*/
|
||||
prof_ctx_t *ctx;
|
||||
|
||||
/*
|
||||
* Threads use memory barriers to update the counters. Since there is
|
||||
* only ever one writer, the only challenge is for the reader to get a
|
||||
* consistent read of the counters.
|
||||
*
|
||||
* The writer uses this series of operations:
|
||||
*
|
||||
* 1) Increment epoch to an odd number.
|
||||
* 2) Update counters.
|
||||
* 3) Increment epoch to an even number.
|
||||
*
|
||||
* The reader must assure 1) that the epoch is even while it reads the
|
||||
* counters, and 2) that the epoch doesn't change between the time it
|
||||
* starts and finishes reading the counters.
|
||||
*/
|
||||
unsigned epoch;
|
||||
|
||||
/* Profiling counters. */
|
||||
prof_cnt_t cnts;
|
||||
};
|
||||
|
||||
struct prof_ctx_s {
|
||||
/* Associated backtrace. */
|
||||
prof_bt_t *bt;
|
||||
|
||||
/* Protects nlimbo, cnt_merged, and cnts_ql. */
|
||||
malloc_mutex_t *lock;
|
||||
|
||||
/*
|
||||
* Number of threads that currently cause this ctx to be in a state of
|
||||
* limbo due to one of:
|
||||
* - Initializing per thread counters associated with this ctx.
|
||||
* - Preparing to destroy this ctx.
|
||||
* - Dumping a heap profile that includes this ctx.
|
||||
* nlimbo must be 1 (single destroyer) in order to safely destroy the
|
||||
* ctx.
|
||||
*/
|
||||
unsigned nlimbo;
|
||||
|
||||
/* Temporary storage for summation during dump. */
|
||||
prof_cnt_t cnt_summed;
|
||||
|
||||
/* When threads exit, they merge their stats into cnt_merged. */
|
||||
prof_cnt_t cnt_merged;
|
||||
|
||||
/*
|
||||
* List of profile counters, one for each thread that has allocated in
|
||||
* this context.
|
||||
*/
|
||||
ql_head(prof_thr_cnt_t) cnts_ql;
|
||||
|
||||
/* Linkage for list of contexts to be dumped. */
|
||||
ql_elm(prof_ctx_t) dump_link;
|
||||
};
|
||||
typedef ql_head(prof_ctx_t) prof_ctx_list_t;
|
||||
|
||||
struct prof_tdata_s {
|
||||
/*
|
||||
* Hash of (prof_bt_t *)-->(prof_thr_cnt_t *). Each thread keeps a
|
||||
* cache of backtraces, with associated thread-specific prof_thr_cnt_t
|
||||
* objects. Other threads may read the prof_thr_cnt_t contents, but no
|
||||
* others will ever write them.
|
||||
*
|
||||
* Upon thread exit, the thread must merge all the prof_thr_cnt_t
|
||||
* counter data into the associated prof_ctx_t objects, and unlink/free
|
||||
* the prof_thr_cnt_t objects.
|
||||
*/
|
||||
ckh_t bt2cnt;
|
||||
|
||||
/* LRU for contents of bt2cnt. */
|
||||
ql_head(prof_thr_cnt_t) lru_ql;
|
||||
|
||||
/* Backtrace vector, used for calls to prof_backtrace(). */
|
||||
void **vec;
|
||||
|
||||
/* Sampling state. */
|
||||
uint64_t prng_state;
|
||||
uint64_t threshold;
|
||||
uint64_t accum;
|
||||
|
||||
/* State used to avoid dumping while operating on prof internals. */
|
||||
bool enq;
|
||||
bool enq_idump;
|
||||
bool enq_gdump;
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
extern bool opt_prof;
|
||||
/*
|
||||
* Even if opt_prof is true, sampling can be temporarily disabled by setting
|
||||
* opt_prof_active to false. No locking is used when updating opt_prof_active,
|
||||
* so there are no guarantees regarding how long it will take for all threads
|
||||
* to notice state changes.
|
||||
*/
|
||||
extern bool opt_prof_active;
|
||||
extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */
|
||||
extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */
|
||||
extern bool opt_prof_gdump; /* High-water memory dumping. */
|
||||
extern bool opt_prof_final; /* Final profile dumping. */
|
||||
extern bool opt_prof_leak; /* Dump leak summary at exit. */
|
||||
extern bool opt_prof_accum; /* Report cumulative bytes. */
|
||||
extern char opt_prof_prefix[
|
||||
/* Minimize memory bloat for non-prof builds. */
|
||||
#ifdef JEMALLOC_PROF
|
||||
PATH_MAX +
|
||||
#endif
|
||||
1];
|
||||
|
||||
/*
|
||||
* Profile dump interval, measured in bytes allocated. Each arena triggers a
|
||||
* profile dump when it reaches this threshold. The effect is that the
|
||||
* interval between profile dumps averages prof_interval, though the actual
|
||||
* interval between dumps will tend to be sporadic, and the interval will be a
|
||||
* maximum of approximately (prof_interval * narenas).
|
||||
*/
|
||||
extern uint64_t prof_interval;
|
||||
|
||||
/*
|
||||
* If true, promote small sampled objects to large objects, since small run
|
||||
* headers do not have embedded profile context pointers.
|
||||
*/
|
||||
extern bool prof_promote;
|
||||
|
||||
void bt_init(prof_bt_t *bt, void **vec);
|
||||
void prof_backtrace(prof_bt_t *bt, unsigned nignore);
|
||||
prof_thr_cnt_t *prof_lookup(prof_bt_t *bt);
|
||||
#ifdef JEMALLOC_JET
|
||||
size_t prof_bt_count(void);
|
||||
typedef int (prof_dump_open_t)(bool, const char *);
|
||||
extern prof_dump_open_t *prof_dump_open;
|
||||
#endif
|
||||
void prof_idump(void);
|
||||
bool prof_mdump(const char *filename);
|
||||
void prof_gdump(void);
|
||||
prof_tdata_t *prof_tdata_init(void);
|
||||
void prof_tdata_cleanup(void *arg);
|
||||
void prof_boot0(void);
|
||||
void prof_boot1(void);
|
||||
bool prof_boot2(void);
|
||||
void prof_prefork(void);
|
||||
void prof_postfork_parent(void);
|
||||
void prof_postfork_child(void);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#define PROF_ALLOC_PREP(nignore, size, ret) do { \
|
||||
prof_tdata_t *prof_tdata; \
|
||||
prof_bt_t bt; \
|
||||
\
|
||||
assert(size == s2u(size)); \
|
||||
\
|
||||
prof_tdata = prof_tdata_get(true); \
|
||||
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) { \
|
||||
if (prof_tdata != NULL) \
|
||||
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
|
||||
else \
|
||||
ret = NULL; \
|
||||
break; \
|
||||
} \
|
||||
\
|
||||
if (opt_prof_active == false) { \
|
||||
/* Sampling is currently inactive, so avoid sampling. */\
|
||||
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
|
||||
} else if (opt_lg_prof_sample == 0) { \
|
||||
/* Don't bother with sampling logic, since sampling */\
|
||||
/* interval is 1. */\
|
||||
bt_init(&bt, prof_tdata->vec); \
|
||||
prof_backtrace(&bt, nignore); \
|
||||
ret = prof_lookup(&bt); \
|
||||
} else { \
|
||||
if (prof_tdata->threshold == 0) { \
|
||||
/* Initialize. Seed the prng differently for */\
|
||||
/* each thread. */\
|
||||
prof_tdata->prng_state = \
|
||||
(uint64_t)(uintptr_t)&size; \
|
||||
prof_sample_threshold_update(prof_tdata); \
|
||||
} \
|
||||
\
|
||||
/* Determine whether to capture a backtrace based on */\
|
||||
/* whether size is enough for prof_accum to reach */\
|
||||
/* prof_tdata->threshold. However, delay updating */\
|
||||
/* these variables until prof_{m,re}alloc(), because */\
|
||||
/* we don't know for sure that the allocation will */\
|
||||
/* succeed. */\
|
||||
/* */\
|
||||
/* Use subtraction rather than addition to avoid */\
|
||||
/* potential integer overflow. */\
|
||||
if (size >= prof_tdata->threshold - \
|
||||
prof_tdata->accum) { \
|
||||
bt_init(&bt, prof_tdata->vec); \
|
||||
prof_backtrace(&bt, nignore); \
|
||||
ret = prof_lookup(&bt); \
|
||||
} else \
|
||||
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
malloc_tsd_protos(JEMALLOC_ATTR(unused), prof_tdata, prof_tdata_t *)
|
||||
|
||||
prof_tdata_t *prof_tdata_get(bool create);
|
||||
void prof_sample_threshold_update(prof_tdata_t *prof_tdata);
|
||||
prof_ctx_t *prof_ctx_get(const void *ptr);
|
||||
void prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx);
|
||||
bool prof_sample_accum_update(size_t size);
|
||||
void prof_malloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt);
|
||||
void prof_realloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt,
|
||||
size_t old_usize, prof_ctx_t *old_ctx);
|
||||
void prof_free(const void *ptr, size_t size);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_))
|
||||
/* Thread-specific backtrace cache, used to reduce bt2ctx contention. */
|
||||
malloc_tsd_externs(prof_tdata, prof_tdata_t *)
|
||||
malloc_tsd_funcs(JEMALLOC_INLINE, prof_tdata, prof_tdata_t *, NULL,
|
||||
prof_tdata_cleanup)
|
||||
|
||||
JEMALLOC_INLINE prof_tdata_t *
|
||||
prof_tdata_get(bool create)
|
||||
{
|
||||
prof_tdata_t *prof_tdata;
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
prof_tdata = *prof_tdata_tsd_get();
|
||||
if (create && prof_tdata == NULL)
|
||||
prof_tdata = prof_tdata_init();
|
||||
|
||||
return (prof_tdata);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
prof_sample_threshold_update(prof_tdata_t *prof_tdata)
|
||||
{
|
||||
/*
|
||||
* The body of this function is compiled out unless heap profiling is
|
||||
* enabled, so that it is possible to compile jemalloc with floating
|
||||
* point support completely disabled. Avoiding floating point code is
|
||||
* important on memory-constrained systems, but it also enables a
|
||||
* workaround for versions of glibc that don't properly save/restore
|
||||
* floating point registers during dynamic lazy symbol loading (which
|
||||
* internally calls into whatever malloc implementation happens to be
|
||||
* integrated into the application). Note that some compilers (e.g.
|
||||
* gcc 4.8) may use floating point registers for fast memory moves, so
|
||||
* jemalloc must be compiled with such optimizations disabled (e.g.
|
||||
* -mno-sse) in order for the workaround to be complete.
|
||||
*/
|
||||
#ifdef JEMALLOC_PROF
|
||||
uint64_t r;
|
||||
double u;
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
/*
|
||||
* Compute sample threshold as a geometrically distributed random
|
||||
* variable with mean (2^opt_lg_prof_sample).
|
||||
*
|
||||
* __ __
|
||||
* | log(u) | 1
|
||||
* prof_tdata->threshold = | -------- |, where p = -------------------
|
||||
* | log(1-p) | opt_lg_prof_sample
|
||||
* 2
|
||||
*
|
||||
* For more information on the math, see:
|
||||
*
|
||||
* Non-Uniform Random Variate Generation
|
||||
* Luc Devroye
|
||||
* Springer-Verlag, New York, 1986
|
||||
* pp 500
|
||||
* (http://luc.devroye.org/rnbookindex.html)
|
||||
*/
|
||||
prng64(r, 53, prof_tdata->prng_state,
|
||||
UINT64_C(6364136223846793005), UINT64_C(1442695040888963407));
|
||||
u = (double)r * (1.0/9007199254740992.0L);
|
||||
prof_tdata->threshold = (uint64_t)(log(u) /
|
||||
log(1.0 - (1.0 / (double)((uint64_t)1U << opt_lg_prof_sample))))
|
||||
+ (uint64_t)1U;
|
||||
#endif
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE prof_ctx_t *
|
||||
prof_ctx_get(const void *ptr)
|
||||
{
|
||||
prof_ctx_t *ret;
|
||||
arena_chunk_t *chunk;
|
||||
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
if (chunk != ptr) {
|
||||
/* Region. */
|
||||
ret = arena_prof_ctx_get(ptr);
|
||||
} else
|
||||
ret = huge_prof_ctx_get(ptr);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx)
|
||||
{
|
||||
arena_chunk_t *chunk;
|
||||
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
if (chunk != ptr) {
|
||||
/* Region. */
|
||||
arena_prof_ctx_set(ptr, usize, ctx);
|
||||
} else
|
||||
huge_prof_ctx_set(ptr, ctx);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
prof_sample_accum_update(size_t size)
|
||||
{
|
||||
prof_tdata_t *prof_tdata;
|
||||
|
||||
cassert(config_prof);
|
||||
/* Sampling logic is unnecessary if the interval is 1. */
|
||||
assert(opt_lg_prof_sample != 0);
|
||||
|
||||
prof_tdata = prof_tdata_get(false);
|
||||
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
|
||||
return (true);
|
||||
|
||||
/* Take care to avoid integer overflow. */
|
||||
if (size >= prof_tdata->threshold - prof_tdata->accum) {
|
||||
prof_tdata->accum -= (prof_tdata->threshold - size);
|
||||
/* Compute new sample threshold. */
|
||||
prof_sample_threshold_update(prof_tdata);
|
||||
while (prof_tdata->accum >= prof_tdata->threshold) {
|
||||
prof_tdata->accum -= prof_tdata->threshold;
|
||||
prof_sample_threshold_update(prof_tdata);
|
||||
}
|
||||
return (false);
|
||||
} else {
|
||||
prof_tdata->accum += size;
|
||||
return (true);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
prof_malloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt)
|
||||
{
|
||||
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
assert(usize == isalloc(ptr, true));
|
||||
|
||||
if (opt_lg_prof_sample != 0) {
|
||||
if (prof_sample_accum_update(usize)) {
|
||||
/*
|
||||
* Don't sample. For malloc()-like allocation, it is
|
||||
* always possible to tell in advance how large an
|
||||
* object's usable size will be, so there should never
|
||||
* be a difference between the usize passed to
|
||||
* PROF_ALLOC_PREP() and prof_malloc().
|
||||
*/
|
||||
assert((uintptr_t)cnt == (uintptr_t)1U);
|
||||
}
|
||||
}
|
||||
|
||||
if ((uintptr_t)cnt > (uintptr_t)1U) {
|
||||
prof_ctx_set(ptr, usize, cnt->ctx);
|
||||
|
||||
cnt->epoch++;
|
||||
/*********/
|
||||
mb_write();
|
||||
/*********/
|
||||
cnt->cnts.curobjs++;
|
||||
cnt->cnts.curbytes += usize;
|
||||
if (opt_prof_accum) {
|
||||
cnt->cnts.accumobjs++;
|
||||
cnt->cnts.accumbytes += usize;
|
||||
}
|
||||
/*********/
|
||||
mb_write();
|
||||
/*********/
|
||||
cnt->epoch++;
|
||||
/*********/
|
||||
mb_write();
|
||||
/*********/
|
||||
} else
|
||||
prof_ctx_set(ptr, usize, (prof_ctx_t *)(uintptr_t)1U);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
prof_realloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt,
|
||||
size_t old_usize, prof_ctx_t *old_ctx)
|
||||
{
|
||||
prof_thr_cnt_t *told_cnt;
|
||||
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL || (uintptr_t)cnt <= (uintptr_t)1U);
|
||||
|
||||
if (ptr != NULL) {
|
||||
assert(usize == isalloc(ptr, true));
|
||||
if (opt_lg_prof_sample != 0) {
|
||||
if (prof_sample_accum_update(usize)) {
|
||||
/*
|
||||
* Don't sample. The usize passed to
|
||||
* PROF_ALLOC_PREP() was larger than what
|
||||
* actually got allocated, so a backtrace was
|
||||
* captured for this allocation, even though
|
||||
* its actual usize was insufficient to cross
|
||||
* the sample threshold.
|
||||
*/
|
||||
cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ((uintptr_t)old_ctx > (uintptr_t)1U) {
|
||||
told_cnt = prof_lookup(old_ctx->bt);
|
||||
if (told_cnt == NULL) {
|
||||
/*
|
||||
* It's too late to propagate OOM for this realloc(),
|
||||
* so operate directly on old_cnt->ctx->cnt_merged.
|
||||
*/
|
||||
malloc_mutex_lock(old_ctx->lock);
|
||||
old_ctx->cnt_merged.curobjs--;
|
||||
old_ctx->cnt_merged.curbytes -= old_usize;
|
||||
malloc_mutex_unlock(old_ctx->lock);
|
||||
told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
|
||||
}
|
||||
} else
|
||||
told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
|
||||
|
||||
if ((uintptr_t)told_cnt > (uintptr_t)1U)
|
||||
told_cnt->epoch++;
|
||||
if ((uintptr_t)cnt > (uintptr_t)1U) {
|
||||
prof_ctx_set(ptr, usize, cnt->ctx);
|
||||
cnt->epoch++;
|
||||
} else if (ptr != NULL)
|
||||
prof_ctx_set(ptr, usize, (prof_ctx_t *)(uintptr_t)1U);
|
||||
/*********/
|
||||
mb_write();
|
||||
/*********/
|
||||
if ((uintptr_t)told_cnt > (uintptr_t)1U) {
|
||||
told_cnt->cnts.curobjs--;
|
||||
told_cnt->cnts.curbytes -= old_usize;
|
||||
}
|
||||
if ((uintptr_t)cnt > (uintptr_t)1U) {
|
||||
cnt->cnts.curobjs++;
|
||||
cnt->cnts.curbytes += usize;
|
||||
if (opt_prof_accum) {
|
||||
cnt->cnts.accumobjs++;
|
||||
cnt->cnts.accumbytes += usize;
|
||||
}
|
||||
}
|
||||
/*********/
|
||||
mb_write();
|
||||
/*********/
|
||||
if ((uintptr_t)told_cnt > (uintptr_t)1U)
|
||||
told_cnt->epoch++;
|
||||
if ((uintptr_t)cnt > (uintptr_t)1U)
|
||||
cnt->epoch++;
|
||||
/*********/
|
||||
mb_write(); /* Not strictly necessary. */
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
prof_free(const void *ptr, size_t size)
|
||||
{
|
||||
prof_ctx_t *ctx = prof_ctx_get(ptr);
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
if ((uintptr_t)ctx > (uintptr_t)1) {
|
||||
prof_thr_cnt_t *tcnt;
|
||||
assert(size == isalloc(ptr, true));
|
||||
tcnt = prof_lookup(ctx->bt);
|
||||
|
||||
if (tcnt != NULL) {
|
||||
tcnt->epoch++;
|
||||
/*********/
|
||||
mb_write();
|
||||
/*********/
|
||||
tcnt->cnts.curobjs--;
|
||||
tcnt->cnts.curbytes -= size;
|
||||
/*********/
|
||||
mb_write();
|
||||
/*********/
|
||||
tcnt->epoch++;
|
||||
/*********/
|
||||
mb_write();
|
||||
/*********/
|
||||
} else {
|
||||
/*
|
||||
* OOM during free() cannot be propagated, so operate
|
||||
* directly on cnt->ctx->cnt_merged.
|
||||
*/
|
||||
malloc_mutex_lock(ctx->lock);
|
||||
ctx->cnt_merged.curobjs--;
|
||||
ctx->cnt_merged.curbytes -= size;
|
||||
malloc_mutex_unlock(ctx->lock);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
83
deps/jemalloc/include/jemalloc/internal/ql.h
vendored
Normal file
83
deps/jemalloc/include/jemalloc/internal/ql.h
vendored
Normal file
@@ -0,0 +1,83 @@
|
||||
/*
|
||||
* List definitions.
|
||||
*/
|
||||
#define ql_head(a_type) \
|
||||
struct { \
|
||||
a_type *qlh_first; \
|
||||
}
|
||||
|
||||
#define ql_head_initializer(a_head) {NULL}
|
||||
|
||||
#define ql_elm(a_type) qr(a_type)
|
||||
|
||||
/* List functions. */
|
||||
#define ql_new(a_head) do { \
|
||||
(a_head)->qlh_first = NULL; \
|
||||
} while (0)
|
||||
|
||||
#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
|
||||
|
||||
#define ql_first(a_head) ((a_head)->qlh_first)
|
||||
|
||||
#define ql_last(a_head, a_field) \
|
||||
((ql_first(a_head) != NULL) \
|
||||
? qr_prev(ql_first(a_head), a_field) : NULL)
|
||||
|
||||
#define ql_next(a_head, a_elm, a_field) \
|
||||
((ql_last(a_head, a_field) != (a_elm)) \
|
||||
? qr_next((a_elm), a_field) : NULL)
|
||||
|
||||
#define ql_prev(a_head, a_elm, a_field) \
|
||||
((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \
|
||||
: NULL)
|
||||
|
||||
#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
|
||||
qr_before_insert((a_qlelm), (a_elm), a_field); \
|
||||
if (ql_first(a_head) == (a_qlelm)) { \
|
||||
ql_first(a_head) = (a_elm); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define ql_after_insert(a_qlelm, a_elm, a_field) \
|
||||
qr_after_insert((a_qlelm), (a_elm), a_field)
|
||||
|
||||
#define ql_head_insert(a_head, a_elm, a_field) do { \
|
||||
if (ql_first(a_head) != NULL) { \
|
||||
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
|
||||
} \
|
||||
ql_first(a_head) = (a_elm); \
|
||||
} while (0)
|
||||
|
||||
#define ql_tail_insert(a_head, a_elm, a_field) do { \
|
||||
if (ql_first(a_head) != NULL) { \
|
||||
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
|
||||
} \
|
||||
ql_first(a_head) = qr_next((a_elm), a_field); \
|
||||
} while (0)
|
||||
|
||||
#define ql_remove(a_head, a_elm, a_field) do { \
|
||||
if (ql_first(a_head) == (a_elm)) { \
|
||||
ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
|
||||
} \
|
||||
if (ql_first(a_head) != (a_elm)) { \
|
||||
qr_remove((a_elm), a_field); \
|
||||
} else { \
|
||||
ql_first(a_head) = NULL; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define ql_head_remove(a_head, a_type, a_field) do { \
|
||||
a_type *t = ql_first(a_head); \
|
||||
ql_remove((a_head), t, a_field); \
|
||||
} while (0)
|
||||
|
||||
#define ql_tail_remove(a_head, a_type, a_field) do { \
|
||||
a_type *t = ql_last(a_head, a_field); \
|
||||
ql_remove((a_head), t, a_field); \
|
||||
} while (0)
|
||||
|
||||
#define ql_foreach(a_var, a_head, a_field) \
|
||||
qr_foreach((a_var), ql_first(a_head), a_field)
|
||||
|
||||
#define ql_reverse_foreach(a_var, a_head, a_field) \
|
||||
qr_reverse_foreach((a_var), ql_first(a_head), a_field)
|
||||
67
deps/jemalloc/include/jemalloc/internal/qr.h
vendored
Normal file
67
deps/jemalloc/include/jemalloc/internal/qr.h
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
/* Ring definitions. */
|
||||
#define qr(a_type) \
|
||||
struct { \
|
||||
a_type *qre_next; \
|
||||
a_type *qre_prev; \
|
||||
}
|
||||
|
||||
/* Ring functions. */
|
||||
#define qr_new(a_qr, a_field) do { \
|
||||
(a_qr)->a_field.qre_next = (a_qr); \
|
||||
(a_qr)->a_field.qre_prev = (a_qr); \
|
||||
} while (0)
|
||||
|
||||
#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
|
||||
|
||||
#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
|
||||
|
||||
#define qr_before_insert(a_qrelm, a_qr, a_field) do { \
|
||||
(a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \
|
||||
(a_qr)->a_field.qre_next = (a_qrelm); \
|
||||
(a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \
|
||||
(a_qrelm)->a_field.qre_prev = (a_qr); \
|
||||
} while (0)
|
||||
|
||||
#define qr_after_insert(a_qrelm, a_qr, a_field) \
|
||||
do \
|
||||
{ \
|
||||
(a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \
|
||||
(a_qr)->a_field.qre_prev = (a_qrelm); \
|
||||
(a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \
|
||||
(a_qrelm)->a_field.qre_next = (a_qr); \
|
||||
} while (0)
|
||||
|
||||
#define qr_meld(a_qr_a, a_qr_b, a_field) do { \
|
||||
void *t; \
|
||||
(a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
|
||||
(a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
|
||||
t = (a_qr_a)->a_field.qre_prev; \
|
||||
(a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \
|
||||
(a_qr_b)->a_field.qre_prev = t; \
|
||||
} while (0)
|
||||
|
||||
/* qr_meld() and qr_split() are functionally equivalent, so there's no need to
|
||||
* have two copies of the code. */
|
||||
#define qr_split(a_qr_a, a_qr_b, a_field) \
|
||||
qr_meld((a_qr_a), (a_qr_b), a_field)
|
||||
|
||||
#define qr_remove(a_qr, a_field) do { \
|
||||
(a_qr)->a_field.qre_prev->a_field.qre_next \
|
||||
= (a_qr)->a_field.qre_next; \
|
||||
(a_qr)->a_field.qre_next->a_field.qre_prev \
|
||||
= (a_qr)->a_field.qre_prev; \
|
||||
(a_qr)->a_field.qre_next = (a_qr); \
|
||||
(a_qr)->a_field.qre_prev = (a_qr); \
|
||||
} while (0)
|
||||
|
||||
#define qr_foreach(var, a_qr, a_field) \
|
||||
for ((var) = (a_qr); \
|
||||
(var) != NULL; \
|
||||
(var) = (((var)->a_field.qre_next != (a_qr)) \
|
||||
? (var)->a_field.qre_next : NULL))
|
||||
|
||||
#define qr_reverse_foreach(var, a_qr, a_field) \
|
||||
for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \
|
||||
(var) != NULL; \
|
||||
(var) = (((var) != (a_qr)) \
|
||||
? (var)->a_field.qre_prev : NULL))
|
||||
67
deps/jemalloc/include/jemalloc/internal/quarantine.h
vendored
Normal file
67
deps/jemalloc/include/jemalloc/internal/quarantine.h
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
typedef struct quarantine_obj_s quarantine_obj_t;
|
||||
typedef struct quarantine_s quarantine_t;
|
||||
|
||||
/* Default per thread quarantine size if valgrind is enabled. */
|
||||
#define JEMALLOC_VALGRIND_QUARANTINE_DEFAULT (ZU(1) << 24)
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
struct quarantine_obj_s {
|
||||
void *ptr;
|
||||
size_t usize;
|
||||
};
|
||||
|
||||
struct quarantine_s {
|
||||
size_t curbytes;
|
||||
size_t curobjs;
|
||||
size_t first;
|
||||
#define LG_MAXOBJS_INIT 10
|
||||
size_t lg_maxobjs;
|
||||
quarantine_obj_t objs[1]; /* Dynamically sized ring buffer. */
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
quarantine_t *quarantine_init(size_t lg_maxobjs);
|
||||
void quarantine(void *ptr);
|
||||
void quarantine_cleanup(void *arg);
|
||||
bool quarantine_boot(void);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
malloc_tsd_protos(JEMALLOC_ATTR(unused), quarantine, quarantine_t *)
|
||||
|
||||
void quarantine_alloc_hook(void);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_QUARANTINE_C_))
|
||||
malloc_tsd_externs(quarantine, quarantine_t *)
|
||||
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, quarantine, quarantine_t *, NULL,
|
||||
quarantine_cleanup)
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
quarantine_alloc_hook(void)
|
||||
{
|
||||
quarantine_t *quarantine;
|
||||
|
||||
assert(config_fill && opt_quarantine);
|
||||
|
||||
quarantine = *quarantine_tsd_get();
|
||||
if (quarantine == NULL)
|
||||
quarantine_init(LG_MAXOBJS_INIT);
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
|
||||
969
deps/jemalloc/include/jemalloc/internal/rb.h
vendored
Normal file
969
deps/jemalloc/include/jemalloc/internal/rb.h
vendored
Normal file
@@ -0,0 +1,969 @@
|
||||
/*-
|
||||
*******************************************************************************
|
||||
*
|
||||
* cpp macro implementation of left-leaning 2-3 red-black trees. Parent
|
||||
* pointers are not used, and color bits are stored in the least significant
|
||||
* bit of right-child pointers (if RB_COMPACT is defined), thus making node
|
||||
* linkage as compact as is possible for red-black trees.
|
||||
*
|
||||
* Usage:
|
||||
*
|
||||
* #include <stdint.h>
|
||||
* #include <stdbool.h>
|
||||
* #define NDEBUG // (Optional, see assert(3).)
|
||||
* #include <assert.h>
|
||||
* #define RB_COMPACT // (Optional, embed color bits in right-child pointers.)
|
||||
* #include <rb.h>
|
||||
* ...
|
||||
*
|
||||
*******************************************************************************
|
||||
*/
|
||||
|
||||
#ifndef RB_H_
|
||||
#define RB_H_
|
||||
|
||||
#ifdef RB_COMPACT
|
||||
/* Node structure. */
|
||||
#define rb_node(a_type) \
|
||||
struct { \
|
||||
a_type *rbn_left; \
|
||||
a_type *rbn_right_red; \
|
||||
}
|
||||
#else
|
||||
#define rb_node(a_type) \
|
||||
struct { \
|
||||
a_type *rbn_left; \
|
||||
a_type *rbn_right; \
|
||||
bool rbn_red; \
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Root structure. */
|
||||
#define rb_tree(a_type) \
|
||||
struct { \
|
||||
a_type *rbt_root; \
|
||||
a_type rbt_nil; \
|
||||
}
|
||||
|
||||
/* Left accessors. */
|
||||
#define rbtn_left_get(a_type, a_field, a_node) \
|
||||
((a_node)->a_field.rbn_left)
|
||||
#define rbtn_left_set(a_type, a_field, a_node, a_left) do { \
|
||||
(a_node)->a_field.rbn_left = a_left; \
|
||||
} while (0)
|
||||
|
||||
#ifdef RB_COMPACT
|
||||
/* Right accessors. */
|
||||
#define rbtn_right_get(a_type, a_field, a_node) \
|
||||
((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red) \
|
||||
& ((ssize_t)-2)))
|
||||
#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \
|
||||
(a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right) \
|
||||
| (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1))); \
|
||||
} while (0)
|
||||
|
||||
/* Color accessors. */
|
||||
#define rbtn_red_get(a_type, a_field, a_node) \
|
||||
((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red) \
|
||||
& ((size_t)1)))
|
||||
#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \
|
||||
(a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t) \
|
||||
(a_node)->a_field.rbn_right_red) & ((ssize_t)-2)) \
|
||||
| ((ssize_t)a_red)); \
|
||||
} while (0)
|
||||
#define rbtn_red_set(a_type, a_field, a_node) do { \
|
||||
(a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) \
|
||||
(a_node)->a_field.rbn_right_red) | ((size_t)1)); \
|
||||
} while (0)
|
||||
#define rbtn_black_set(a_type, a_field, a_node) do { \
|
||||
(a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \
|
||||
(a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \
|
||||
} while (0)
|
||||
#else
|
||||
/* Right accessors. */
|
||||
#define rbtn_right_get(a_type, a_field, a_node) \
|
||||
((a_node)->a_field.rbn_right)
|
||||
#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \
|
||||
(a_node)->a_field.rbn_right = a_right; \
|
||||
} while (0)
|
||||
|
||||
/* Color accessors. */
|
||||
#define rbtn_red_get(a_type, a_field, a_node) \
|
||||
((a_node)->a_field.rbn_red)
|
||||
#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \
|
||||
(a_node)->a_field.rbn_red = (a_red); \
|
||||
} while (0)
|
||||
#define rbtn_red_set(a_type, a_field, a_node) do { \
|
||||
(a_node)->a_field.rbn_red = true; \
|
||||
} while (0)
|
||||
#define rbtn_black_set(a_type, a_field, a_node) do { \
|
||||
(a_node)->a_field.rbn_red = false; \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
/* Node initializer. */
|
||||
#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
|
||||
rbtn_left_set(a_type, a_field, (a_node), &(a_rbt)->rbt_nil); \
|
||||
rbtn_right_set(a_type, a_field, (a_node), &(a_rbt)->rbt_nil); \
|
||||
rbtn_red_set(a_type, a_field, (a_node)); \
|
||||
} while (0)
|
||||
|
||||
/* Tree initializer. */
|
||||
#define rb_new(a_type, a_field, a_rbt) do { \
|
||||
(a_rbt)->rbt_root = &(a_rbt)->rbt_nil; \
|
||||
rbt_node_new(a_type, a_field, a_rbt, &(a_rbt)->rbt_nil); \
|
||||
rbtn_black_set(a_type, a_field, &(a_rbt)->rbt_nil); \
|
||||
} while (0)
|
||||
|
||||
/* Internal utility macros. */
|
||||
#define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \
|
||||
(r_node) = (a_root); \
|
||||
if ((r_node) != &(a_rbt)->rbt_nil) { \
|
||||
for (; \
|
||||
rbtn_left_get(a_type, a_field, (r_node)) != &(a_rbt)->rbt_nil;\
|
||||
(r_node) = rbtn_left_get(a_type, a_field, (r_node))) { \
|
||||
} \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \
|
||||
(r_node) = (a_root); \
|
||||
if ((r_node) != &(a_rbt)->rbt_nil) { \
|
||||
for (; rbtn_right_get(a_type, a_field, (r_node)) != \
|
||||
&(a_rbt)->rbt_nil; (r_node) = rbtn_right_get(a_type, a_field, \
|
||||
(r_node))) { \
|
||||
} \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define rbtn_rotate_left(a_type, a_field, a_node, r_node) do { \
|
||||
(r_node) = rbtn_right_get(a_type, a_field, (a_node)); \
|
||||
rbtn_right_set(a_type, a_field, (a_node), \
|
||||
rbtn_left_get(a_type, a_field, (r_node))); \
|
||||
rbtn_left_set(a_type, a_field, (r_node), (a_node)); \
|
||||
} while (0)
|
||||
|
||||
#define rbtn_rotate_right(a_type, a_field, a_node, r_node) do { \
|
||||
(r_node) = rbtn_left_get(a_type, a_field, (a_node)); \
|
||||
rbtn_left_set(a_type, a_field, (a_node), \
|
||||
rbtn_right_get(a_type, a_field, (r_node))); \
|
||||
rbtn_right_set(a_type, a_field, (r_node), (a_node)); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* The rb_proto() macro generates function prototypes that correspond to the
|
||||
* functions generated by an equivalently parameterized call to rb_gen().
|
||||
*/
|
||||
|
||||
#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \
|
||||
a_attr void \
|
||||
a_prefix##new(a_rbt_type *rbtree); \
|
||||
a_attr a_type * \
|
||||
a_prefix##first(a_rbt_type *rbtree); \
|
||||
a_attr a_type * \
|
||||
a_prefix##last(a_rbt_type *rbtree); \
|
||||
a_attr a_type * \
|
||||
a_prefix##next(a_rbt_type *rbtree, a_type *node); \
|
||||
a_attr a_type * \
|
||||
a_prefix##prev(a_rbt_type *rbtree, a_type *node); \
|
||||
a_attr a_type * \
|
||||
a_prefix##search(a_rbt_type *rbtree, a_type *key); \
|
||||
a_attr a_type * \
|
||||
a_prefix##nsearch(a_rbt_type *rbtree, a_type *key); \
|
||||
a_attr a_type * \
|
||||
a_prefix##psearch(a_rbt_type *rbtree, a_type *key); \
|
||||
a_attr void \
|
||||
a_prefix##insert(a_rbt_type *rbtree, a_type *node); \
|
||||
a_attr void \
|
||||
a_prefix##remove(a_rbt_type *rbtree, a_type *node); \
|
||||
a_attr a_type * \
|
||||
a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \
|
||||
a_rbt_type *, a_type *, void *), void *arg); \
|
||||
a_attr a_type * \
|
||||
a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
|
||||
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg);
|
||||
|
||||
/*
|
||||
* The rb_gen() macro generates a type-specific red-black tree implementation,
|
||||
* based on the above cpp macros.
|
||||
*
|
||||
* Arguments:
|
||||
*
|
||||
* a_attr : Function attribute for generated functions (ex: static).
|
||||
* a_prefix : Prefix for generated functions (ex: ex_).
|
||||
* a_rb_type : Type for red-black tree data structure (ex: ex_t).
|
||||
* a_type : Type for red-black tree node data structure (ex: ex_node_t).
|
||||
* a_field : Name of red-black tree node linkage (ex: ex_link).
|
||||
* a_cmp : Node comparison function name, with the following prototype:
|
||||
* int (a_cmp *)(a_type *a_node, a_type *a_other);
|
||||
* ^^^^^^
|
||||
* or a_key
|
||||
* Interpretation of comparision function return values:
|
||||
* -1 : a_node < a_other
|
||||
* 0 : a_node == a_other
|
||||
* 1 : a_node > a_other
|
||||
* In all cases, the a_node or a_key macro argument is the first
|
||||
* argument to the comparison function, which makes it possible
|
||||
* to write comparison functions that treat the first argument
|
||||
* specially.
|
||||
*
|
||||
* Assuming the following setup:
|
||||
*
|
||||
* typedef struct ex_node_s ex_node_t;
|
||||
* struct ex_node_s {
|
||||
* rb_node(ex_node_t) ex_link;
|
||||
* };
|
||||
* typedef rb_tree(ex_node_t) ex_t;
|
||||
* rb_gen(static, ex_, ex_t, ex_node_t, ex_link, ex_cmp)
|
||||
*
|
||||
* The following API is generated:
|
||||
*
|
||||
* static void
|
||||
* ex_new(ex_t *tree);
|
||||
* Description: Initialize a red-black tree structure.
|
||||
* Args:
|
||||
* tree: Pointer to an uninitialized red-black tree object.
|
||||
*
|
||||
* static ex_node_t *
|
||||
* ex_first(ex_t *tree);
|
||||
* static ex_node_t *
|
||||
* ex_last(ex_t *tree);
|
||||
* Description: Get the first/last node in tree.
|
||||
* Args:
|
||||
* tree: Pointer to an initialized red-black tree object.
|
||||
* Ret: First/last node in tree, or NULL if tree is empty.
|
||||
*
|
||||
* static ex_node_t *
|
||||
* ex_next(ex_t *tree, ex_node_t *node);
|
||||
* static ex_node_t *
|
||||
* ex_prev(ex_t *tree, ex_node_t *node);
|
||||
* Description: Get node's successor/predecessor.
|
||||
* Args:
|
||||
* tree: Pointer to an initialized red-black tree object.
|
||||
* node: A node in tree.
|
||||
* Ret: node's successor/predecessor in tree, or NULL if node is
|
||||
* last/first.
|
||||
*
|
||||
* static ex_node_t *
|
||||
* ex_search(ex_t *tree, ex_node_t *key);
|
||||
* Description: Search for node that matches key.
|
||||
* Args:
|
||||
* tree: Pointer to an initialized red-black tree object.
|
||||
* key : Search key.
|
||||
* Ret: Node in tree that matches key, or NULL if no match.
|
||||
*
|
||||
* static ex_node_t *
|
||||
* ex_nsearch(ex_t *tree, ex_node_t *key);
|
||||
* static ex_node_t *
|
||||
* ex_psearch(ex_t *tree, ex_node_t *key);
|
||||
* Description: Search for node that matches key. If no match is found,
|
||||
* return what would be key's successor/predecessor, were
|
||||
* key in tree.
|
||||
* Args:
|
||||
* tree: Pointer to an initialized red-black tree object.
|
||||
* key : Search key.
|
||||
* Ret: Node in tree that matches key, or if no match, hypothetical node's
|
||||
* successor/predecessor (NULL if no successor/predecessor).
|
||||
*
|
||||
* static void
|
||||
* ex_insert(ex_t *tree, ex_node_t *node);
|
||||
* Description: Insert node into tree.
|
||||
* Args:
|
||||
* tree: Pointer to an initialized red-black tree object.
|
||||
* node: Node to be inserted into tree.
|
||||
*
|
||||
* static void
|
||||
* ex_remove(ex_t *tree, ex_node_t *node);
|
||||
* Description: Remove node from tree.
|
||||
* Args:
|
||||
* tree: Pointer to an initialized red-black tree object.
|
||||
* node: Node in tree to be removed.
|
||||
*
|
||||
* static ex_node_t *
|
||||
* ex_iter(ex_t *tree, ex_node_t *start, ex_node_t *(*cb)(ex_t *,
|
||||
* ex_node_t *, void *), void *arg);
|
||||
* static ex_node_t *
|
||||
* ex_reverse_iter(ex_t *tree, ex_node_t *start, ex_node *(*cb)(ex_t *,
|
||||
* ex_node_t *, void *), void *arg);
|
||||
* Description: Iterate forward/backward over tree, starting at node. If
|
||||
* tree is modified, iteration must be immediately
|
||||
* terminated by the callback function that causes the
|
||||
* modification.
|
||||
* Args:
|
||||
* tree : Pointer to an initialized red-black tree object.
|
||||
* start: Node at which to start iteration, or NULL to start at
|
||||
* first/last node.
|
||||
* cb : Callback function, which is called for each node during
|
||||
* iteration. Under normal circumstances the callback function
|
||||
* should return NULL, which causes iteration to continue. If a
|
||||
* callback function returns non-NULL, iteration is immediately
|
||||
* terminated and the non-NULL return value is returned by the
|
||||
* iterator. This is useful for re-starting iteration after
|
||||
* modifying tree.
|
||||
* arg : Opaque pointer passed to cb().
|
||||
* Ret: NULL if iteration completed, or the non-NULL callback return value
|
||||
* that caused termination of the iteration.
|
||||
*/
|
||||
#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \
|
||||
a_attr void \
|
||||
a_prefix##new(a_rbt_type *rbtree) { \
|
||||
rb_new(a_type, a_field, rbtree); \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_prefix##first(a_rbt_type *rbtree) { \
|
||||
a_type *ret; \
|
||||
rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
|
||||
if (ret == &rbtree->rbt_nil) { \
|
||||
ret = NULL; \
|
||||
} \
|
||||
return (ret); \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_prefix##last(a_rbt_type *rbtree) { \
|
||||
a_type *ret; \
|
||||
rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
|
||||
if (ret == &rbtree->rbt_nil) { \
|
||||
ret = NULL; \
|
||||
} \
|
||||
return (ret); \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_prefix##next(a_rbt_type *rbtree, a_type *node) { \
|
||||
a_type *ret; \
|
||||
if (rbtn_right_get(a_type, a_field, node) != &rbtree->rbt_nil) { \
|
||||
rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type, \
|
||||
a_field, node), ret); \
|
||||
} else { \
|
||||
a_type *tnode = rbtree->rbt_root; \
|
||||
assert(tnode != &rbtree->rbt_nil); \
|
||||
ret = &rbtree->rbt_nil; \
|
||||
while (true) { \
|
||||
int cmp = (a_cmp)(node, tnode); \
|
||||
if (cmp < 0) { \
|
||||
ret = tnode; \
|
||||
tnode = rbtn_left_get(a_type, a_field, tnode); \
|
||||
} else if (cmp > 0) { \
|
||||
tnode = rbtn_right_get(a_type, a_field, tnode); \
|
||||
} else { \
|
||||
break; \
|
||||
} \
|
||||
assert(tnode != &rbtree->rbt_nil); \
|
||||
} \
|
||||
} \
|
||||
if (ret == &rbtree->rbt_nil) { \
|
||||
ret = (NULL); \
|
||||
} \
|
||||
return (ret); \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \
|
||||
a_type *ret; \
|
||||
if (rbtn_left_get(a_type, a_field, node) != &rbtree->rbt_nil) { \
|
||||
rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type, \
|
||||
a_field, node), ret); \
|
||||
} else { \
|
||||
a_type *tnode = rbtree->rbt_root; \
|
||||
assert(tnode != &rbtree->rbt_nil); \
|
||||
ret = &rbtree->rbt_nil; \
|
||||
while (true) { \
|
||||
int cmp = (a_cmp)(node, tnode); \
|
||||
if (cmp < 0) { \
|
||||
tnode = rbtn_left_get(a_type, a_field, tnode); \
|
||||
} else if (cmp > 0) { \
|
||||
ret = tnode; \
|
||||
tnode = rbtn_right_get(a_type, a_field, tnode); \
|
||||
} else { \
|
||||
break; \
|
||||
} \
|
||||
assert(tnode != &rbtree->rbt_nil); \
|
||||
} \
|
||||
} \
|
||||
if (ret == &rbtree->rbt_nil) { \
|
||||
ret = (NULL); \
|
||||
} \
|
||||
return (ret); \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_prefix##search(a_rbt_type *rbtree, a_type *key) { \
|
||||
a_type *ret; \
|
||||
int cmp; \
|
||||
ret = rbtree->rbt_root; \
|
||||
while (ret != &rbtree->rbt_nil \
|
||||
&& (cmp = (a_cmp)(key, ret)) != 0) { \
|
||||
if (cmp < 0) { \
|
||||
ret = rbtn_left_get(a_type, a_field, ret); \
|
||||
} else { \
|
||||
ret = rbtn_right_get(a_type, a_field, ret); \
|
||||
} \
|
||||
} \
|
||||
if (ret == &rbtree->rbt_nil) { \
|
||||
ret = (NULL); \
|
||||
} \
|
||||
return (ret); \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_prefix##nsearch(a_rbt_type *rbtree, a_type *key) { \
|
||||
a_type *ret; \
|
||||
a_type *tnode = rbtree->rbt_root; \
|
||||
ret = &rbtree->rbt_nil; \
|
||||
while (tnode != &rbtree->rbt_nil) { \
|
||||
int cmp = (a_cmp)(key, tnode); \
|
||||
if (cmp < 0) { \
|
||||
ret = tnode; \
|
||||
tnode = rbtn_left_get(a_type, a_field, tnode); \
|
||||
} else if (cmp > 0) { \
|
||||
tnode = rbtn_right_get(a_type, a_field, tnode); \
|
||||
} else { \
|
||||
ret = tnode; \
|
||||
break; \
|
||||
} \
|
||||
} \
|
||||
if (ret == &rbtree->rbt_nil) { \
|
||||
ret = (NULL); \
|
||||
} \
|
||||
return (ret); \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_prefix##psearch(a_rbt_type *rbtree, a_type *key) { \
|
||||
a_type *ret; \
|
||||
a_type *tnode = rbtree->rbt_root; \
|
||||
ret = &rbtree->rbt_nil; \
|
||||
while (tnode != &rbtree->rbt_nil) { \
|
||||
int cmp = (a_cmp)(key, tnode); \
|
||||
if (cmp < 0) { \
|
||||
tnode = rbtn_left_get(a_type, a_field, tnode); \
|
||||
} else if (cmp > 0) { \
|
||||
ret = tnode; \
|
||||
tnode = rbtn_right_get(a_type, a_field, tnode); \
|
||||
} else { \
|
||||
ret = tnode; \
|
||||
break; \
|
||||
} \
|
||||
} \
|
||||
if (ret == &rbtree->rbt_nil) { \
|
||||
ret = (NULL); \
|
||||
} \
|
||||
return (ret); \
|
||||
} \
|
||||
a_attr void \
|
||||
a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
|
||||
struct { \
|
||||
a_type *node; \
|
||||
int cmp; \
|
||||
} path[sizeof(void *) << 4], *pathp; \
|
||||
rbt_node_new(a_type, a_field, rbtree, node); \
|
||||
/* Wind. */ \
|
||||
path->node = rbtree->rbt_root; \
|
||||
for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) { \
|
||||
int cmp = pathp->cmp = a_cmp(node, pathp->node); \
|
||||
assert(cmp != 0); \
|
||||
if (cmp < 0) { \
|
||||
pathp[1].node = rbtn_left_get(a_type, a_field, \
|
||||
pathp->node); \
|
||||
} else { \
|
||||
pathp[1].node = rbtn_right_get(a_type, a_field, \
|
||||
pathp->node); \
|
||||
} \
|
||||
} \
|
||||
pathp->node = node; \
|
||||
/* Unwind. */ \
|
||||
for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \
|
||||
a_type *cnode = pathp->node; \
|
||||
if (pathp->cmp < 0) { \
|
||||
a_type *left = pathp[1].node; \
|
||||
rbtn_left_set(a_type, a_field, cnode, left); \
|
||||
if (rbtn_red_get(a_type, a_field, left)) { \
|
||||
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
|
||||
if (rbtn_red_get(a_type, a_field, leftleft)) { \
|
||||
/* Fix up 4-node. */ \
|
||||
a_type *tnode; \
|
||||
rbtn_black_set(a_type, a_field, leftleft); \
|
||||
rbtn_rotate_right(a_type, a_field, cnode, tnode); \
|
||||
cnode = tnode; \
|
||||
} \
|
||||
} else { \
|
||||
return; \
|
||||
} \
|
||||
} else { \
|
||||
a_type *right = pathp[1].node; \
|
||||
rbtn_right_set(a_type, a_field, cnode, right); \
|
||||
if (rbtn_red_get(a_type, a_field, right)) { \
|
||||
a_type *left = rbtn_left_get(a_type, a_field, cnode); \
|
||||
if (rbtn_red_get(a_type, a_field, left)) { \
|
||||
/* Split 4-node. */ \
|
||||
rbtn_black_set(a_type, a_field, left); \
|
||||
rbtn_black_set(a_type, a_field, right); \
|
||||
rbtn_red_set(a_type, a_field, cnode); \
|
||||
} else { \
|
||||
/* Lean left. */ \
|
||||
a_type *tnode; \
|
||||
bool tred = rbtn_red_get(a_type, a_field, cnode); \
|
||||
rbtn_rotate_left(a_type, a_field, cnode, tnode); \
|
||||
rbtn_color_set(a_type, a_field, tnode, tred); \
|
||||
rbtn_red_set(a_type, a_field, cnode); \
|
||||
cnode = tnode; \
|
||||
} \
|
||||
} else { \
|
||||
return; \
|
||||
} \
|
||||
} \
|
||||
pathp->node = cnode; \
|
||||
} \
|
||||
/* Set root, and make it black. */ \
|
||||
rbtree->rbt_root = path->node; \
|
||||
rbtn_black_set(a_type, a_field, rbtree->rbt_root); \
|
||||
} \
|
||||
a_attr void \
|
||||
a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
|
||||
struct { \
|
||||
a_type *node; \
|
||||
int cmp; \
|
||||
} *pathp, *nodep, path[sizeof(void *) << 4]; \
|
||||
/* Wind. */ \
|
||||
nodep = NULL; /* Silence compiler warning. */ \
|
||||
path->node = rbtree->rbt_root; \
|
||||
for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) { \
|
||||
int cmp = pathp->cmp = a_cmp(node, pathp->node); \
|
||||
if (cmp < 0) { \
|
||||
pathp[1].node = rbtn_left_get(a_type, a_field, \
|
||||
pathp->node); \
|
||||
} else { \
|
||||
pathp[1].node = rbtn_right_get(a_type, a_field, \
|
||||
pathp->node); \
|
||||
if (cmp == 0) { \
|
||||
/* Find node's successor, in preparation for swap. */ \
|
||||
pathp->cmp = 1; \
|
||||
nodep = pathp; \
|
||||
for (pathp++; pathp->node != &rbtree->rbt_nil; \
|
||||
pathp++) { \
|
||||
pathp->cmp = -1; \
|
||||
pathp[1].node = rbtn_left_get(a_type, a_field, \
|
||||
pathp->node); \
|
||||
} \
|
||||
break; \
|
||||
} \
|
||||
} \
|
||||
} \
|
||||
assert(nodep->node == node); \
|
||||
pathp--; \
|
||||
if (pathp->node != node) { \
|
||||
/* Swap node with its successor. */ \
|
||||
bool tred = rbtn_red_get(a_type, a_field, pathp->node); \
|
||||
rbtn_color_set(a_type, a_field, pathp->node, \
|
||||
rbtn_red_get(a_type, a_field, node)); \
|
||||
rbtn_left_set(a_type, a_field, pathp->node, \
|
||||
rbtn_left_get(a_type, a_field, node)); \
|
||||
/* If node's successor is its right child, the following code */\
|
||||
/* will do the wrong thing for the right child pointer. */\
|
||||
/* However, it doesn't matter, because the pointer will be */\
|
||||
/* properly set when the successor is pruned. */\
|
||||
rbtn_right_set(a_type, a_field, pathp->node, \
|
||||
rbtn_right_get(a_type, a_field, node)); \
|
||||
rbtn_color_set(a_type, a_field, node, tred); \
|
||||
/* The pruned leaf node's child pointers are never accessed */\
|
||||
/* again, so don't bother setting them to nil. */\
|
||||
nodep->node = pathp->node; \
|
||||
pathp->node = node; \
|
||||
if (nodep == path) { \
|
||||
rbtree->rbt_root = nodep->node; \
|
||||
} else { \
|
||||
if (nodep[-1].cmp < 0) { \
|
||||
rbtn_left_set(a_type, a_field, nodep[-1].node, \
|
||||
nodep->node); \
|
||||
} else { \
|
||||
rbtn_right_set(a_type, a_field, nodep[-1].node, \
|
||||
nodep->node); \
|
||||
} \
|
||||
} \
|
||||
} else { \
|
||||
a_type *left = rbtn_left_get(a_type, a_field, node); \
|
||||
if (left != &rbtree->rbt_nil) { \
|
||||
/* node has no successor, but it has a left child. */\
|
||||
/* Splice node out, without losing the left child. */\
|
||||
assert(rbtn_red_get(a_type, a_field, node) == false); \
|
||||
assert(rbtn_red_get(a_type, a_field, left)); \
|
||||
rbtn_black_set(a_type, a_field, left); \
|
||||
if (pathp == path) { \
|
||||
rbtree->rbt_root = left; \
|
||||
} else { \
|
||||
if (pathp[-1].cmp < 0) { \
|
||||
rbtn_left_set(a_type, a_field, pathp[-1].node, \
|
||||
left); \
|
||||
} else { \
|
||||
rbtn_right_set(a_type, a_field, pathp[-1].node, \
|
||||
left); \
|
||||
} \
|
||||
} \
|
||||
return; \
|
||||
} else if (pathp == path) { \
|
||||
/* The tree only contained one node. */ \
|
||||
rbtree->rbt_root = &rbtree->rbt_nil; \
|
||||
return; \
|
||||
} \
|
||||
} \
|
||||
if (rbtn_red_get(a_type, a_field, pathp->node)) { \
|
||||
/* Prune red node, which requires no fixup. */ \
|
||||
assert(pathp[-1].cmp < 0); \
|
||||
rbtn_left_set(a_type, a_field, pathp[-1].node, \
|
||||
&rbtree->rbt_nil); \
|
||||
return; \
|
||||
} \
|
||||
/* The node to be pruned is black, so unwind until balance is */\
|
||||
/* restored. */\
|
||||
pathp->node = &rbtree->rbt_nil; \
|
||||
for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \
|
||||
assert(pathp->cmp != 0); \
|
||||
if (pathp->cmp < 0) { \
|
||||
rbtn_left_set(a_type, a_field, pathp->node, \
|
||||
pathp[1].node); \
|
||||
assert(rbtn_red_get(a_type, a_field, pathp[1].node) \
|
||||
== false); \
|
||||
if (rbtn_red_get(a_type, a_field, pathp->node)) { \
|
||||
a_type *right = rbtn_right_get(a_type, a_field, \
|
||||
pathp->node); \
|
||||
a_type *rightleft = rbtn_left_get(a_type, a_field, \
|
||||
right); \
|
||||
a_type *tnode; \
|
||||
if (rbtn_red_get(a_type, a_field, rightleft)) { \
|
||||
/* In the following diagrams, ||, //, and \\ */\
|
||||
/* indicate the path to the removed node. */\
|
||||
/* */\
|
||||
/* || */\
|
||||
/* pathp(r) */\
|
||||
/* // \ */\
|
||||
/* (b) (b) */\
|
||||
/* / */\
|
||||
/* (r) */\
|
||||
/* */\
|
||||
rbtn_black_set(a_type, a_field, pathp->node); \
|
||||
rbtn_rotate_right(a_type, a_field, right, tnode); \
|
||||
rbtn_right_set(a_type, a_field, pathp->node, tnode);\
|
||||
rbtn_rotate_left(a_type, a_field, pathp->node, \
|
||||
tnode); \
|
||||
} else { \
|
||||
/* || */\
|
||||
/* pathp(r) */\
|
||||
/* // \ */\
|
||||
/* (b) (b) */\
|
||||
/* / */\
|
||||
/* (b) */\
|
||||
/* */\
|
||||
rbtn_rotate_left(a_type, a_field, pathp->node, \
|
||||
tnode); \
|
||||
} \
|
||||
/* Balance restored, but rotation modified subtree */\
|
||||
/* root. */\
|
||||
assert((uintptr_t)pathp > (uintptr_t)path); \
|
||||
if (pathp[-1].cmp < 0) { \
|
||||
rbtn_left_set(a_type, a_field, pathp[-1].node, \
|
||||
tnode); \
|
||||
} else { \
|
||||
rbtn_right_set(a_type, a_field, pathp[-1].node, \
|
||||
tnode); \
|
||||
} \
|
||||
return; \
|
||||
} else { \
|
||||
a_type *right = rbtn_right_get(a_type, a_field, \
|
||||
pathp->node); \
|
||||
a_type *rightleft = rbtn_left_get(a_type, a_field, \
|
||||
right); \
|
||||
if (rbtn_red_get(a_type, a_field, rightleft)) { \
|
||||
/* || */\
|
||||
/* pathp(b) */\
|
||||
/* // \ */\
|
||||
/* (b) (b) */\
|
||||
/* / */\
|
||||
/* (r) */\
|
||||
a_type *tnode; \
|
||||
rbtn_black_set(a_type, a_field, rightleft); \
|
||||
rbtn_rotate_right(a_type, a_field, right, tnode); \
|
||||
rbtn_right_set(a_type, a_field, pathp->node, tnode);\
|
||||
rbtn_rotate_left(a_type, a_field, pathp->node, \
|
||||
tnode); \
|
||||
/* Balance restored, but rotation modified */\
|
||||
/* subree root, which may actually be the tree */\
|
||||
/* root. */\
|
||||
if (pathp == path) { \
|
||||
/* Set root. */ \
|
||||
rbtree->rbt_root = tnode; \
|
||||
} else { \
|
||||
if (pathp[-1].cmp < 0) { \
|
||||
rbtn_left_set(a_type, a_field, \
|
||||
pathp[-1].node, tnode); \
|
||||
} else { \
|
||||
rbtn_right_set(a_type, a_field, \
|
||||
pathp[-1].node, tnode); \
|
||||
} \
|
||||
} \
|
||||
return; \
|
||||
} else { \
|
||||
/* || */\
|
||||
/* pathp(b) */\
|
||||
/* // \ */\
|
||||
/* (b) (b) */\
|
||||
/* / */\
|
||||
/* (b) */\
|
||||
a_type *tnode; \
|
||||
rbtn_red_set(a_type, a_field, pathp->node); \
|
||||
rbtn_rotate_left(a_type, a_field, pathp->node, \
|
||||
tnode); \
|
||||
pathp->node = tnode; \
|
||||
} \
|
||||
} \
|
||||
} else { \
|
||||
a_type *left; \
|
||||
rbtn_right_set(a_type, a_field, pathp->node, \
|
||||
pathp[1].node); \
|
||||
left = rbtn_left_get(a_type, a_field, pathp->node); \
|
||||
if (rbtn_red_get(a_type, a_field, left)) { \
|
||||
a_type *tnode; \
|
||||
a_type *leftright = rbtn_right_get(a_type, a_field, \
|
||||
left); \
|
||||
a_type *leftrightleft = rbtn_left_get(a_type, a_field, \
|
||||
leftright); \
|
||||
if (rbtn_red_get(a_type, a_field, leftrightleft)) { \
|
||||
/* || */\
|
||||
/* pathp(b) */\
|
||||
/* / \\ */\
|
||||
/* (r) (b) */\
|
||||
/* \ */\
|
||||
/* (b) */\
|
||||
/* / */\
|
||||
/* (r) */\
|
||||
a_type *unode; \
|
||||
rbtn_black_set(a_type, a_field, leftrightleft); \
|
||||
rbtn_rotate_right(a_type, a_field, pathp->node, \
|
||||
unode); \
|
||||
rbtn_rotate_right(a_type, a_field, pathp->node, \
|
||||
tnode); \
|
||||
rbtn_right_set(a_type, a_field, unode, tnode); \
|
||||
rbtn_rotate_left(a_type, a_field, unode, tnode); \
|
||||
} else { \
|
||||
/* || */\
|
||||
/* pathp(b) */\
|
||||
/* / \\ */\
|
||||
/* (r) (b) */\
|
||||
/* \ */\
|
||||
/* (b) */\
|
||||
/* / */\
|
||||
/* (b) */\
|
||||
assert(leftright != &rbtree->rbt_nil); \
|
||||
rbtn_red_set(a_type, a_field, leftright); \
|
||||
rbtn_rotate_right(a_type, a_field, pathp->node, \
|
||||
tnode); \
|
||||
rbtn_black_set(a_type, a_field, tnode); \
|
||||
} \
|
||||
/* Balance restored, but rotation modified subtree */\
|
||||
/* root, which may actually be the tree root. */\
|
||||
if (pathp == path) { \
|
||||
/* Set root. */ \
|
||||
rbtree->rbt_root = tnode; \
|
||||
} else { \
|
||||
if (pathp[-1].cmp < 0) { \
|
||||
rbtn_left_set(a_type, a_field, pathp[-1].node, \
|
||||
tnode); \
|
||||
} else { \
|
||||
rbtn_right_set(a_type, a_field, pathp[-1].node, \
|
||||
tnode); \
|
||||
} \
|
||||
} \
|
||||
return; \
|
||||
} else if (rbtn_red_get(a_type, a_field, pathp->node)) { \
|
||||
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
|
||||
if (rbtn_red_get(a_type, a_field, leftleft)) { \
|
||||
/* || */\
|
||||
/* pathp(r) */\
|
||||
/* / \\ */\
|
||||
/* (b) (b) */\
|
||||
/* / */\
|
||||
/* (r) */\
|
||||
a_type *tnode; \
|
||||
rbtn_black_set(a_type, a_field, pathp->node); \
|
||||
rbtn_red_set(a_type, a_field, left); \
|
||||
rbtn_black_set(a_type, a_field, leftleft); \
|
||||
rbtn_rotate_right(a_type, a_field, pathp->node, \
|
||||
tnode); \
|
||||
/* Balance restored, but rotation modified */\
|
||||
/* subtree root. */\
|
||||
assert((uintptr_t)pathp > (uintptr_t)path); \
|
||||
if (pathp[-1].cmp < 0) { \
|
||||
rbtn_left_set(a_type, a_field, pathp[-1].node, \
|
||||
tnode); \
|
||||
} else { \
|
||||
rbtn_right_set(a_type, a_field, pathp[-1].node, \
|
||||
tnode); \
|
||||
} \
|
||||
return; \
|
||||
} else { \
|
||||
/* || */\
|
||||
/* pathp(r) */\
|
||||
/* / \\ */\
|
||||
/* (b) (b) */\
|
||||
/* / */\
|
||||
/* (b) */\
|
||||
rbtn_red_set(a_type, a_field, left); \
|
||||
rbtn_black_set(a_type, a_field, pathp->node); \
|
||||
/* Balance restored. */ \
|
||||
return; \
|
||||
} \
|
||||
} else { \
|
||||
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
|
||||
if (rbtn_red_get(a_type, a_field, leftleft)) { \
|
||||
/* || */\
|
||||
/* pathp(b) */\
|
||||
/* / \\ */\
|
||||
/* (b) (b) */\
|
||||
/* / */\
|
||||
/* (r) */\
|
||||
a_type *tnode; \
|
||||
rbtn_black_set(a_type, a_field, leftleft); \
|
||||
rbtn_rotate_right(a_type, a_field, pathp->node, \
|
||||
tnode); \
|
||||
/* Balance restored, but rotation modified */\
|
||||
/* subtree root, which may actually be the tree */\
|
||||
/* root. */\
|
||||
if (pathp == path) { \
|
||||
/* Set root. */ \
|
||||
rbtree->rbt_root = tnode; \
|
||||
} else { \
|
||||
if (pathp[-1].cmp < 0) { \
|
||||
rbtn_left_set(a_type, a_field, \
|
||||
pathp[-1].node, tnode); \
|
||||
} else { \
|
||||
rbtn_right_set(a_type, a_field, \
|
||||
pathp[-1].node, tnode); \
|
||||
} \
|
||||
} \
|
||||
return; \
|
||||
} else { \
|
||||
/* || */\
|
||||
/* pathp(b) */\
|
||||
/* / \\ */\
|
||||
/* (b) (b) */\
|
||||
/* / */\
|
||||
/* (b) */\
|
||||
rbtn_red_set(a_type, a_field, left); \
|
||||
} \
|
||||
} \
|
||||
} \
|
||||
} \
|
||||
/* Set root. */ \
|
||||
rbtree->rbt_root = path->node; \
|
||||
assert(rbtn_red_get(a_type, a_field, rbtree->rbt_root) == false); \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \
|
||||
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
|
||||
if (node == &rbtree->rbt_nil) { \
|
||||
return (&rbtree->rbt_nil); \
|
||||
} else { \
|
||||
a_type *ret; \
|
||||
if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \
|
||||
a_field, node), cb, arg)) != &rbtree->rbt_nil \
|
||||
|| (ret = cb(rbtree, node, arg)) != NULL) { \
|
||||
return (ret); \
|
||||
} \
|
||||
return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
|
||||
a_field, node), cb, arg)); \
|
||||
} \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \
|
||||
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
|
||||
int cmp = a_cmp(start, node); \
|
||||
if (cmp < 0) { \
|
||||
a_type *ret; \
|
||||
if ((ret = a_prefix##iter_start(rbtree, start, \
|
||||
rbtn_left_get(a_type, a_field, node), cb, arg)) != \
|
||||
&rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \
|
||||
return (ret); \
|
||||
} \
|
||||
return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
|
||||
a_field, node), cb, arg)); \
|
||||
} else if (cmp > 0) { \
|
||||
return (a_prefix##iter_start(rbtree, start, \
|
||||
rbtn_right_get(a_type, a_field, node), cb, arg)); \
|
||||
} else { \
|
||||
a_type *ret; \
|
||||
if ((ret = cb(rbtree, node, arg)) != NULL) { \
|
||||
return (ret); \
|
||||
} \
|
||||
return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
|
||||
a_field, node), cb, arg)); \
|
||||
} \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \
|
||||
a_rbt_type *, a_type *, void *), void *arg) { \
|
||||
a_type *ret; \
|
||||
if (start != NULL) { \
|
||||
ret = a_prefix##iter_start(rbtree, start, rbtree->rbt_root, \
|
||||
cb, arg); \
|
||||
} else { \
|
||||
ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\
|
||||
} \
|
||||
if (ret == &rbtree->rbt_nil) { \
|
||||
ret = NULL; \
|
||||
} \
|
||||
return (ret); \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \
|
||||
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
|
||||
if (node == &rbtree->rbt_nil) { \
|
||||
return (&rbtree->rbt_nil); \
|
||||
} else { \
|
||||
a_type *ret; \
|
||||
if ((ret = a_prefix##reverse_iter_recurse(rbtree, \
|
||||
rbtn_right_get(a_type, a_field, node), cb, arg)) != \
|
||||
&rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \
|
||||
return (ret); \
|
||||
} \
|
||||
return (a_prefix##reverse_iter_recurse(rbtree, \
|
||||
rbtn_left_get(a_type, a_field, node), cb, arg)); \
|
||||
} \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \
|
||||
a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \
|
||||
void *arg) { \
|
||||
int cmp = a_cmp(start, node); \
|
||||
if (cmp > 0) { \
|
||||
a_type *ret; \
|
||||
if ((ret = a_prefix##reverse_iter_start(rbtree, start, \
|
||||
rbtn_right_get(a_type, a_field, node), cb, arg)) != \
|
||||
&rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \
|
||||
return (ret); \
|
||||
} \
|
||||
return (a_prefix##reverse_iter_recurse(rbtree, \
|
||||
rbtn_left_get(a_type, a_field, node), cb, arg)); \
|
||||
} else if (cmp < 0) { \
|
||||
return (a_prefix##reverse_iter_start(rbtree, start, \
|
||||
rbtn_left_get(a_type, a_field, node), cb, arg)); \
|
||||
} else { \
|
||||
a_type *ret; \
|
||||
if ((ret = cb(rbtree, node, arg)) != NULL) { \
|
||||
return (ret); \
|
||||
} \
|
||||
return (a_prefix##reverse_iter_recurse(rbtree, \
|
||||
rbtn_left_get(a_type, a_field, node), cb, arg)); \
|
||||
} \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
|
||||
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
|
||||
a_type *ret; \
|
||||
if (start != NULL) { \
|
||||
ret = a_prefix##reverse_iter_start(rbtree, start, \
|
||||
rbtree->rbt_root, cb, arg); \
|
||||
} else { \
|
||||
ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \
|
||||
cb, arg); \
|
||||
} \
|
||||
if (ret == &rbtree->rbt_nil) { \
|
||||
ret = NULL; \
|
||||
} \
|
||||
return (ret); \
|
||||
}
|
||||
|
||||
#endif /* RB_H_ */
|
||||
172
deps/jemalloc/include/jemalloc/internal/rtree.h
vendored
Normal file
172
deps/jemalloc/include/jemalloc/internal/rtree.h
vendored
Normal file
@@ -0,0 +1,172 @@
|
||||
/*
|
||||
* This radix tree implementation is tailored to the singular purpose of
|
||||
* tracking which chunks are currently owned by jemalloc. This functionality
|
||||
* is mandatory for OS X, where jemalloc must be able to respond to object
|
||||
* ownership queries.
|
||||
*
|
||||
*******************************************************************************
|
||||
*/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
typedef struct rtree_s rtree_t;
|
||||
|
||||
/*
|
||||
* Size of each radix tree node (must be a power of 2). This impacts tree
|
||||
* depth.
|
||||
*/
|
||||
#define RTREE_NODESIZE (1U << 16)
|
||||
|
||||
typedef void *(rtree_alloc_t)(size_t);
|
||||
typedef void (rtree_dalloc_t)(void *);
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
struct rtree_s {
|
||||
rtree_alloc_t *alloc;
|
||||
rtree_dalloc_t *dalloc;
|
||||
malloc_mutex_t mutex;
|
||||
void **root;
|
||||
unsigned height;
|
||||
unsigned level2bits[1]; /* Dynamically sized. */
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
rtree_t *rtree_new(unsigned bits, rtree_alloc_t *alloc, rtree_dalloc_t *dalloc);
|
||||
void rtree_delete(rtree_t *rtree);
|
||||
void rtree_prefork(rtree_t *rtree);
|
||||
void rtree_postfork_parent(rtree_t *rtree);
|
||||
void rtree_postfork_child(rtree_t *rtree);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
#ifdef JEMALLOC_DEBUG
|
||||
uint8_t rtree_get_locked(rtree_t *rtree, uintptr_t key);
|
||||
#endif
|
||||
uint8_t rtree_get(rtree_t *rtree, uintptr_t key);
|
||||
bool rtree_set(rtree_t *rtree, uintptr_t key, uint8_t val);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
|
||||
#define RTREE_GET_GENERATE(f) \
|
||||
/* The least significant bits of the key are ignored. */ \
|
||||
JEMALLOC_INLINE uint8_t \
|
||||
f(rtree_t *rtree, uintptr_t key) \
|
||||
{ \
|
||||
uint8_t ret; \
|
||||
uintptr_t subkey; \
|
||||
unsigned i, lshift, height, bits; \
|
||||
void **node, **child; \
|
||||
\
|
||||
RTREE_LOCK(&rtree->mutex); \
|
||||
for (i = lshift = 0, height = rtree->height, node = rtree->root;\
|
||||
i < height - 1; \
|
||||
i++, lshift += bits, node = child) { \
|
||||
bits = rtree->level2bits[i]; \
|
||||
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR + \
|
||||
3)) - bits); \
|
||||
child = (void**)node[subkey]; \
|
||||
if (child == NULL) { \
|
||||
RTREE_UNLOCK(&rtree->mutex); \
|
||||
return (0); \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
/* \
|
||||
* node is a leaf, so it contains values rather than node \
|
||||
* pointers. \
|
||||
*/ \
|
||||
bits = rtree->level2bits[i]; \
|
||||
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - \
|
||||
bits); \
|
||||
{ \
|
||||
uint8_t *leaf = (uint8_t *)node; \
|
||||
ret = leaf[subkey]; \
|
||||
} \
|
||||
RTREE_UNLOCK(&rtree->mutex); \
|
||||
\
|
||||
RTREE_GET_VALIDATE \
|
||||
return (ret); \
|
||||
}
|
||||
|
||||
#ifdef JEMALLOC_DEBUG
|
||||
# define RTREE_LOCK(l) malloc_mutex_lock(l)
|
||||
# define RTREE_UNLOCK(l) malloc_mutex_unlock(l)
|
||||
# define RTREE_GET_VALIDATE
|
||||
RTREE_GET_GENERATE(rtree_get_locked)
|
||||
# undef RTREE_LOCK
|
||||
# undef RTREE_UNLOCK
|
||||
# undef RTREE_GET_VALIDATE
|
||||
#endif
|
||||
|
||||
#define RTREE_LOCK(l)
|
||||
#define RTREE_UNLOCK(l)
|
||||
#ifdef JEMALLOC_DEBUG
|
||||
/*
|
||||
* Suppose that it were possible for a jemalloc-allocated chunk to be
|
||||
* munmap()ped, followed by a different allocator in another thread re-using
|
||||
* overlapping virtual memory, all without invalidating the cached rtree
|
||||
* value. The result would be a false positive (the rtree would claim that
|
||||
* jemalloc owns memory that it had actually discarded). This scenario
|
||||
* seems impossible, but the following assertion is a prudent sanity check.
|
||||
*/
|
||||
# define RTREE_GET_VALIDATE \
|
||||
assert(rtree_get_locked(rtree, key) == ret);
|
||||
#else
|
||||
# define RTREE_GET_VALIDATE
|
||||
#endif
|
||||
RTREE_GET_GENERATE(rtree_get)
|
||||
#undef RTREE_LOCK
|
||||
#undef RTREE_UNLOCK
|
||||
#undef RTREE_GET_VALIDATE
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
rtree_set(rtree_t *rtree, uintptr_t key, uint8_t val)
|
||||
{
|
||||
uintptr_t subkey;
|
||||
unsigned i, lshift, height, bits;
|
||||
void **node, **child;
|
||||
|
||||
malloc_mutex_lock(&rtree->mutex);
|
||||
for (i = lshift = 0, height = rtree->height, node = rtree->root;
|
||||
i < height - 1;
|
||||
i++, lshift += bits, node = child) {
|
||||
bits = rtree->level2bits[i];
|
||||
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) -
|
||||
bits);
|
||||
child = (void**)node[subkey];
|
||||
if (child == NULL) {
|
||||
size_t size = ((i + 1 < height - 1) ? sizeof(void *)
|
||||
: (sizeof(uint8_t))) << rtree->level2bits[i+1];
|
||||
child = (void**)rtree->alloc(size);
|
||||
if (child == NULL) {
|
||||
malloc_mutex_unlock(&rtree->mutex);
|
||||
return (true);
|
||||
}
|
||||
memset(child, 0, size);
|
||||
node[subkey] = child;
|
||||
}
|
||||
}
|
||||
|
||||
/* node is a leaf, so it contains values rather than node pointers. */
|
||||
bits = rtree->level2bits[i];
|
||||
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - bits);
|
||||
{
|
||||
uint8_t *leaf = (uint8_t *)node;
|
||||
leaf[subkey] = val;
|
||||
}
|
||||
malloc_mutex_unlock(&rtree->mutex);
|
||||
|
||||
return (false);
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
721
deps/jemalloc/include/jemalloc/internal/size_classes.h
vendored
Normal file
721
deps/jemalloc/include/jemalloc/internal/size_classes.h
vendored
Normal file
@@ -0,0 +1,721 @@
|
||||
/* This file was automatically generated by size_classes.sh. */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
#if (LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 12)
|
||||
#define SIZE_CLASSES_DEFINED
|
||||
/* SIZE_CLASS(bin, delta, sz) */
|
||||
#define SIZE_CLASSES \
|
||||
SIZE_CLASS(0, 8, 8) \
|
||||
SIZE_CLASS(1, 8, 16) \
|
||||
SIZE_CLASS(2, 8, 24) \
|
||||
SIZE_CLASS(3, 8, 32) \
|
||||
SIZE_CLASS(4, 8, 40) \
|
||||
SIZE_CLASS(5, 8, 48) \
|
||||
SIZE_CLASS(6, 8, 56) \
|
||||
SIZE_CLASS(7, 8, 64) \
|
||||
SIZE_CLASS(8, 16, 80) \
|
||||
SIZE_CLASS(9, 16, 96) \
|
||||
SIZE_CLASS(10, 16, 112) \
|
||||
SIZE_CLASS(11, 16, 128) \
|
||||
SIZE_CLASS(12, 32, 160) \
|
||||
SIZE_CLASS(13, 32, 192) \
|
||||
SIZE_CLASS(14, 32, 224) \
|
||||
SIZE_CLASS(15, 32, 256) \
|
||||
SIZE_CLASS(16, 64, 320) \
|
||||
SIZE_CLASS(17, 64, 384) \
|
||||
SIZE_CLASS(18, 64, 448) \
|
||||
SIZE_CLASS(19, 64, 512) \
|
||||
SIZE_CLASS(20, 128, 640) \
|
||||
SIZE_CLASS(21, 128, 768) \
|
||||
SIZE_CLASS(22, 128, 896) \
|
||||
SIZE_CLASS(23, 128, 1024) \
|
||||
SIZE_CLASS(24, 256, 1280) \
|
||||
SIZE_CLASS(25, 256, 1536) \
|
||||
SIZE_CLASS(26, 256, 1792) \
|
||||
SIZE_CLASS(27, 256, 2048) \
|
||||
SIZE_CLASS(28, 512, 2560) \
|
||||
SIZE_CLASS(29, 512, 3072) \
|
||||
SIZE_CLASS(30, 512, 3584) \
|
||||
|
||||
#define NBINS 31
|
||||
#define SMALL_MAXCLASS 3584
|
||||
#endif
|
||||
|
||||
#if (LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 13)
|
||||
#define SIZE_CLASSES_DEFINED
|
||||
/* SIZE_CLASS(bin, delta, sz) */
|
||||
#define SIZE_CLASSES \
|
||||
SIZE_CLASS(0, 8, 8) \
|
||||
SIZE_CLASS(1, 8, 16) \
|
||||
SIZE_CLASS(2, 8, 24) \
|
||||
SIZE_CLASS(3, 8, 32) \
|
||||
SIZE_CLASS(4, 8, 40) \
|
||||
SIZE_CLASS(5, 8, 48) \
|
||||
SIZE_CLASS(6, 8, 56) \
|
||||
SIZE_CLASS(7, 8, 64) \
|
||||
SIZE_CLASS(8, 16, 80) \
|
||||
SIZE_CLASS(9, 16, 96) \
|
||||
SIZE_CLASS(10, 16, 112) \
|
||||
SIZE_CLASS(11, 16, 128) \
|
||||
SIZE_CLASS(12, 32, 160) \
|
||||
SIZE_CLASS(13, 32, 192) \
|
||||
SIZE_CLASS(14, 32, 224) \
|
||||
SIZE_CLASS(15, 32, 256) \
|
||||
SIZE_CLASS(16, 64, 320) \
|
||||
SIZE_CLASS(17, 64, 384) \
|
||||
SIZE_CLASS(18, 64, 448) \
|
||||
SIZE_CLASS(19, 64, 512) \
|
||||
SIZE_CLASS(20, 128, 640) \
|
||||
SIZE_CLASS(21, 128, 768) \
|
||||
SIZE_CLASS(22, 128, 896) \
|
||||
SIZE_CLASS(23, 128, 1024) \
|
||||
SIZE_CLASS(24, 256, 1280) \
|
||||
SIZE_CLASS(25, 256, 1536) \
|
||||
SIZE_CLASS(26, 256, 1792) \
|
||||
SIZE_CLASS(27, 256, 2048) \
|
||||
SIZE_CLASS(28, 512, 2560) \
|
||||
SIZE_CLASS(29, 512, 3072) \
|
||||
SIZE_CLASS(30, 512, 3584) \
|
||||
SIZE_CLASS(31, 512, 4096) \
|
||||
SIZE_CLASS(32, 1024, 5120) \
|
||||
SIZE_CLASS(33, 1024, 6144) \
|
||||
SIZE_CLASS(34, 1024, 7168) \
|
||||
|
||||
#define NBINS 35
|
||||
#define SMALL_MAXCLASS 7168
|
||||
#endif
|
||||
|
||||
#if (LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 14)
|
||||
#define SIZE_CLASSES_DEFINED
|
||||
/* SIZE_CLASS(bin, delta, sz) */
|
||||
#define SIZE_CLASSES \
|
||||
SIZE_CLASS(0, 8, 8) \
|
||||
SIZE_CLASS(1, 8, 16) \
|
||||
SIZE_CLASS(2, 8, 24) \
|
||||
SIZE_CLASS(3, 8, 32) \
|
||||
SIZE_CLASS(4, 8, 40) \
|
||||
SIZE_CLASS(5, 8, 48) \
|
||||
SIZE_CLASS(6, 8, 56) \
|
||||
SIZE_CLASS(7, 8, 64) \
|
||||
SIZE_CLASS(8, 16, 80) \
|
||||
SIZE_CLASS(9, 16, 96) \
|
||||
SIZE_CLASS(10, 16, 112) \
|
||||
SIZE_CLASS(11, 16, 128) \
|
||||
SIZE_CLASS(12, 32, 160) \
|
||||
SIZE_CLASS(13, 32, 192) \
|
||||
SIZE_CLASS(14, 32, 224) \
|
||||
SIZE_CLASS(15, 32, 256) \
|
||||
SIZE_CLASS(16, 64, 320) \
|
||||
SIZE_CLASS(17, 64, 384) \
|
||||
SIZE_CLASS(18, 64, 448) \
|
||||
SIZE_CLASS(19, 64, 512) \
|
||||
SIZE_CLASS(20, 128, 640) \
|
||||
SIZE_CLASS(21, 128, 768) \
|
||||
SIZE_CLASS(22, 128, 896) \
|
||||
SIZE_CLASS(23, 128, 1024) \
|
||||
SIZE_CLASS(24, 256, 1280) \
|
||||
SIZE_CLASS(25, 256, 1536) \
|
||||
SIZE_CLASS(26, 256, 1792) \
|
||||
SIZE_CLASS(27, 256, 2048) \
|
||||
SIZE_CLASS(28, 512, 2560) \
|
||||
SIZE_CLASS(29, 512, 3072) \
|
||||
SIZE_CLASS(30, 512, 3584) \
|
||||
SIZE_CLASS(31, 512, 4096) \
|
||||
SIZE_CLASS(32, 1024, 5120) \
|
||||
SIZE_CLASS(33, 1024, 6144) \
|
||||
SIZE_CLASS(34, 1024, 7168) \
|
||||
SIZE_CLASS(35, 1024, 8192) \
|
||||
SIZE_CLASS(36, 2048, 10240) \
|
||||
SIZE_CLASS(37, 2048, 12288) \
|
||||
SIZE_CLASS(38, 2048, 14336) \
|
||||
|
||||
#define NBINS 39
|
||||
#define SMALL_MAXCLASS 14336
|
||||
#endif
|
||||
|
||||
#if (LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 15)
|
||||
#define SIZE_CLASSES_DEFINED
|
||||
/* SIZE_CLASS(bin, delta, sz) */
|
||||
#define SIZE_CLASSES \
|
||||
SIZE_CLASS(0, 8, 8) \
|
||||
SIZE_CLASS(1, 8, 16) \
|
||||
SIZE_CLASS(2, 8, 24) \
|
||||
SIZE_CLASS(3, 8, 32) \
|
||||
SIZE_CLASS(4, 8, 40) \
|
||||
SIZE_CLASS(5, 8, 48) \
|
||||
SIZE_CLASS(6, 8, 56) \
|
||||
SIZE_CLASS(7, 8, 64) \
|
||||
SIZE_CLASS(8, 16, 80) \
|
||||
SIZE_CLASS(9, 16, 96) \
|
||||
SIZE_CLASS(10, 16, 112) \
|
||||
SIZE_CLASS(11, 16, 128) \
|
||||
SIZE_CLASS(12, 32, 160) \
|
||||
SIZE_CLASS(13, 32, 192) \
|
||||
SIZE_CLASS(14, 32, 224) \
|
||||
SIZE_CLASS(15, 32, 256) \
|
||||
SIZE_CLASS(16, 64, 320) \
|
||||
SIZE_CLASS(17, 64, 384) \
|
||||
SIZE_CLASS(18, 64, 448) \
|
||||
SIZE_CLASS(19, 64, 512) \
|
||||
SIZE_CLASS(20, 128, 640) \
|
||||
SIZE_CLASS(21, 128, 768) \
|
||||
SIZE_CLASS(22, 128, 896) \
|
||||
SIZE_CLASS(23, 128, 1024) \
|
||||
SIZE_CLASS(24, 256, 1280) \
|
||||
SIZE_CLASS(25, 256, 1536) \
|
||||
SIZE_CLASS(26, 256, 1792) \
|
||||
SIZE_CLASS(27, 256, 2048) \
|
||||
SIZE_CLASS(28, 512, 2560) \
|
||||
SIZE_CLASS(29, 512, 3072) \
|
||||
SIZE_CLASS(30, 512, 3584) \
|
||||
SIZE_CLASS(31, 512, 4096) \
|
||||
SIZE_CLASS(32, 1024, 5120) \
|
||||
SIZE_CLASS(33, 1024, 6144) \
|
||||
SIZE_CLASS(34, 1024, 7168) \
|
||||
SIZE_CLASS(35, 1024, 8192) \
|
||||
SIZE_CLASS(36, 2048, 10240) \
|
||||
SIZE_CLASS(37, 2048, 12288) \
|
||||
SIZE_CLASS(38, 2048, 14336) \
|
||||
SIZE_CLASS(39, 2048, 16384) \
|
||||
SIZE_CLASS(40, 4096, 20480) \
|
||||
SIZE_CLASS(41, 4096, 24576) \
|
||||
SIZE_CLASS(42, 4096, 28672) \
|
||||
|
||||
#define NBINS 43
|
||||
#define SMALL_MAXCLASS 28672
|
||||
#endif
|
||||
|
||||
#if (LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 16)
|
||||
#define SIZE_CLASSES_DEFINED
|
||||
/* SIZE_CLASS(bin, delta, sz) */
|
||||
#define SIZE_CLASSES \
|
||||
SIZE_CLASS(0, 8, 8) \
|
||||
SIZE_CLASS(1, 8, 16) \
|
||||
SIZE_CLASS(2, 8, 24) \
|
||||
SIZE_CLASS(3, 8, 32) \
|
||||
SIZE_CLASS(4, 8, 40) \
|
||||
SIZE_CLASS(5, 8, 48) \
|
||||
SIZE_CLASS(6, 8, 56) \
|
||||
SIZE_CLASS(7, 8, 64) \
|
||||
SIZE_CLASS(8, 16, 80) \
|
||||
SIZE_CLASS(9, 16, 96) \
|
||||
SIZE_CLASS(10, 16, 112) \
|
||||
SIZE_CLASS(11, 16, 128) \
|
||||
SIZE_CLASS(12, 32, 160) \
|
||||
SIZE_CLASS(13, 32, 192) \
|
||||
SIZE_CLASS(14, 32, 224) \
|
||||
SIZE_CLASS(15, 32, 256) \
|
||||
SIZE_CLASS(16, 64, 320) \
|
||||
SIZE_CLASS(17, 64, 384) \
|
||||
SIZE_CLASS(18, 64, 448) \
|
||||
SIZE_CLASS(19, 64, 512) \
|
||||
SIZE_CLASS(20, 128, 640) \
|
||||
SIZE_CLASS(21, 128, 768) \
|
||||
SIZE_CLASS(22, 128, 896) \
|
||||
SIZE_CLASS(23, 128, 1024) \
|
||||
SIZE_CLASS(24, 256, 1280) \
|
||||
SIZE_CLASS(25, 256, 1536) \
|
||||
SIZE_CLASS(26, 256, 1792) \
|
||||
SIZE_CLASS(27, 256, 2048) \
|
||||
SIZE_CLASS(28, 512, 2560) \
|
||||
SIZE_CLASS(29, 512, 3072) \
|
||||
SIZE_CLASS(30, 512, 3584) \
|
||||
SIZE_CLASS(31, 512, 4096) \
|
||||
SIZE_CLASS(32, 1024, 5120) \
|
||||
SIZE_CLASS(33, 1024, 6144) \
|
||||
SIZE_CLASS(34, 1024, 7168) \
|
||||
SIZE_CLASS(35, 1024, 8192) \
|
||||
SIZE_CLASS(36, 2048, 10240) \
|
||||
SIZE_CLASS(37, 2048, 12288) \
|
||||
SIZE_CLASS(38, 2048, 14336) \
|
||||
SIZE_CLASS(39, 2048, 16384) \
|
||||
SIZE_CLASS(40, 4096, 20480) \
|
||||
SIZE_CLASS(41, 4096, 24576) \
|
||||
SIZE_CLASS(42, 4096, 28672) \
|
||||
SIZE_CLASS(43, 4096, 32768) \
|
||||
SIZE_CLASS(44, 8192, 40960) \
|
||||
SIZE_CLASS(45, 8192, 49152) \
|
||||
SIZE_CLASS(46, 8192, 57344) \
|
||||
|
||||
#define NBINS 47
|
||||
#define SMALL_MAXCLASS 57344
|
||||
#endif
|
||||
|
||||
#if (LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 12)
|
||||
#define SIZE_CLASSES_DEFINED
|
||||
/* SIZE_CLASS(bin, delta, sz) */
|
||||
#define SIZE_CLASSES \
|
||||
SIZE_CLASS(0, 8, 8) \
|
||||
SIZE_CLASS(1, 8, 16) \
|
||||
SIZE_CLASS(2, 16, 32) \
|
||||
SIZE_CLASS(3, 16, 48) \
|
||||
SIZE_CLASS(4, 16, 64) \
|
||||
SIZE_CLASS(5, 16, 80) \
|
||||
SIZE_CLASS(6, 16, 96) \
|
||||
SIZE_CLASS(7, 16, 112) \
|
||||
SIZE_CLASS(8, 16, 128) \
|
||||
SIZE_CLASS(9, 32, 160) \
|
||||
SIZE_CLASS(10, 32, 192) \
|
||||
SIZE_CLASS(11, 32, 224) \
|
||||
SIZE_CLASS(12, 32, 256) \
|
||||
SIZE_CLASS(13, 64, 320) \
|
||||
SIZE_CLASS(14, 64, 384) \
|
||||
SIZE_CLASS(15, 64, 448) \
|
||||
SIZE_CLASS(16, 64, 512) \
|
||||
SIZE_CLASS(17, 128, 640) \
|
||||
SIZE_CLASS(18, 128, 768) \
|
||||
SIZE_CLASS(19, 128, 896) \
|
||||
SIZE_CLASS(20, 128, 1024) \
|
||||
SIZE_CLASS(21, 256, 1280) \
|
||||
SIZE_CLASS(22, 256, 1536) \
|
||||
SIZE_CLASS(23, 256, 1792) \
|
||||
SIZE_CLASS(24, 256, 2048) \
|
||||
SIZE_CLASS(25, 512, 2560) \
|
||||
SIZE_CLASS(26, 512, 3072) \
|
||||
SIZE_CLASS(27, 512, 3584) \
|
||||
|
||||
#define NBINS 28
|
||||
#define SMALL_MAXCLASS 3584
|
||||
#endif
|
||||
|
||||
#if (LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 13)
|
||||
#define SIZE_CLASSES_DEFINED
|
||||
/* SIZE_CLASS(bin, delta, sz) */
|
||||
#define SIZE_CLASSES \
|
||||
SIZE_CLASS(0, 8, 8) \
|
||||
SIZE_CLASS(1, 8, 16) \
|
||||
SIZE_CLASS(2, 16, 32) \
|
||||
SIZE_CLASS(3, 16, 48) \
|
||||
SIZE_CLASS(4, 16, 64) \
|
||||
SIZE_CLASS(5, 16, 80) \
|
||||
SIZE_CLASS(6, 16, 96) \
|
||||
SIZE_CLASS(7, 16, 112) \
|
||||
SIZE_CLASS(8, 16, 128) \
|
||||
SIZE_CLASS(9, 32, 160) \
|
||||
SIZE_CLASS(10, 32, 192) \
|
||||
SIZE_CLASS(11, 32, 224) \
|
||||
SIZE_CLASS(12, 32, 256) \
|
||||
SIZE_CLASS(13, 64, 320) \
|
||||
SIZE_CLASS(14, 64, 384) \
|
||||
SIZE_CLASS(15, 64, 448) \
|
||||
SIZE_CLASS(16, 64, 512) \
|
||||
SIZE_CLASS(17, 128, 640) \
|
||||
SIZE_CLASS(18, 128, 768) \
|
||||
SIZE_CLASS(19, 128, 896) \
|
||||
SIZE_CLASS(20, 128, 1024) \
|
||||
SIZE_CLASS(21, 256, 1280) \
|
||||
SIZE_CLASS(22, 256, 1536) \
|
||||
SIZE_CLASS(23, 256, 1792) \
|
||||
SIZE_CLASS(24, 256, 2048) \
|
||||
SIZE_CLASS(25, 512, 2560) \
|
||||
SIZE_CLASS(26, 512, 3072) \
|
||||
SIZE_CLASS(27, 512, 3584) \
|
||||
SIZE_CLASS(28, 512, 4096) \
|
||||
SIZE_CLASS(29, 1024, 5120) \
|
||||
SIZE_CLASS(30, 1024, 6144) \
|
||||
SIZE_CLASS(31, 1024, 7168) \
|
||||
|
||||
#define NBINS 32
|
||||
#define SMALL_MAXCLASS 7168
|
||||
#endif
|
||||
|
||||
#if (LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 14)
|
||||
#define SIZE_CLASSES_DEFINED
|
||||
/* SIZE_CLASS(bin, delta, sz) */
|
||||
#define SIZE_CLASSES \
|
||||
SIZE_CLASS(0, 8, 8) \
|
||||
SIZE_CLASS(1, 8, 16) \
|
||||
SIZE_CLASS(2, 16, 32) \
|
||||
SIZE_CLASS(3, 16, 48) \
|
||||
SIZE_CLASS(4, 16, 64) \
|
||||
SIZE_CLASS(5, 16, 80) \
|
||||
SIZE_CLASS(6, 16, 96) \
|
||||
SIZE_CLASS(7, 16, 112) \
|
||||
SIZE_CLASS(8, 16, 128) \
|
||||
SIZE_CLASS(9, 32, 160) \
|
||||
SIZE_CLASS(10, 32, 192) \
|
||||
SIZE_CLASS(11, 32, 224) \
|
||||
SIZE_CLASS(12, 32, 256) \
|
||||
SIZE_CLASS(13, 64, 320) \
|
||||
SIZE_CLASS(14, 64, 384) \
|
||||
SIZE_CLASS(15, 64, 448) \
|
||||
SIZE_CLASS(16, 64, 512) \
|
||||
SIZE_CLASS(17, 128, 640) \
|
||||
SIZE_CLASS(18, 128, 768) \
|
||||
SIZE_CLASS(19, 128, 896) \
|
||||
SIZE_CLASS(20, 128, 1024) \
|
||||
SIZE_CLASS(21, 256, 1280) \
|
||||
SIZE_CLASS(22, 256, 1536) \
|
||||
SIZE_CLASS(23, 256, 1792) \
|
||||
SIZE_CLASS(24, 256, 2048) \
|
||||
SIZE_CLASS(25, 512, 2560) \
|
||||
SIZE_CLASS(26, 512, 3072) \
|
||||
SIZE_CLASS(27, 512, 3584) \
|
||||
SIZE_CLASS(28, 512, 4096) \
|
||||
SIZE_CLASS(29, 1024, 5120) \
|
||||
SIZE_CLASS(30, 1024, 6144) \
|
||||
SIZE_CLASS(31, 1024, 7168) \
|
||||
SIZE_CLASS(32, 1024, 8192) \
|
||||
SIZE_CLASS(33, 2048, 10240) \
|
||||
SIZE_CLASS(34, 2048, 12288) \
|
||||
SIZE_CLASS(35, 2048, 14336) \
|
||||
|
||||
#define NBINS 36
|
||||
#define SMALL_MAXCLASS 14336
|
||||
#endif
|
||||
|
||||
#if (LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 15)
|
||||
#define SIZE_CLASSES_DEFINED
|
||||
/* SIZE_CLASS(bin, delta, sz) */
|
||||
#define SIZE_CLASSES \
|
||||
SIZE_CLASS(0, 8, 8) \
|
||||
SIZE_CLASS(1, 8, 16) \
|
||||
SIZE_CLASS(2, 16, 32) \
|
||||
SIZE_CLASS(3, 16, 48) \
|
||||
SIZE_CLASS(4, 16, 64) \
|
||||
SIZE_CLASS(5, 16, 80) \
|
||||
SIZE_CLASS(6, 16, 96) \
|
||||
SIZE_CLASS(7, 16, 112) \
|
||||
SIZE_CLASS(8, 16, 128) \
|
||||
SIZE_CLASS(9, 32, 160) \
|
||||
SIZE_CLASS(10, 32, 192) \
|
||||
SIZE_CLASS(11, 32, 224) \
|
||||
SIZE_CLASS(12, 32, 256) \
|
||||
SIZE_CLASS(13, 64, 320) \
|
||||
SIZE_CLASS(14, 64, 384) \
|
||||
SIZE_CLASS(15, 64, 448) \
|
||||
SIZE_CLASS(16, 64, 512) \
|
||||
SIZE_CLASS(17, 128, 640) \
|
||||
SIZE_CLASS(18, 128, 768) \
|
||||
SIZE_CLASS(19, 128, 896) \
|
||||
SIZE_CLASS(20, 128, 1024) \
|
||||
SIZE_CLASS(21, 256, 1280) \
|
||||
SIZE_CLASS(22, 256, 1536) \
|
||||
SIZE_CLASS(23, 256, 1792) \
|
||||
SIZE_CLASS(24, 256, 2048) \
|
||||
SIZE_CLASS(25, 512, 2560) \
|
||||
SIZE_CLASS(26, 512, 3072) \
|
||||
SIZE_CLASS(27, 512, 3584) \
|
||||
SIZE_CLASS(28, 512, 4096) \
|
||||
SIZE_CLASS(29, 1024, 5120) \
|
||||
SIZE_CLASS(30, 1024, 6144) \
|
||||
SIZE_CLASS(31, 1024, 7168) \
|
||||
SIZE_CLASS(32, 1024, 8192) \
|
||||
SIZE_CLASS(33, 2048, 10240) \
|
||||
SIZE_CLASS(34, 2048, 12288) \
|
||||
SIZE_CLASS(35, 2048, 14336) \
|
||||
SIZE_CLASS(36, 2048, 16384) \
|
||||
SIZE_CLASS(37, 4096, 20480) \
|
||||
SIZE_CLASS(38, 4096, 24576) \
|
||||
SIZE_CLASS(39, 4096, 28672) \
|
||||
|
||||
#define NBINS 40
|
||||
#define SMALL_MAXCLASS 28672
|
||||
#endif
|
||||
|
||||
#if (LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 16)
|
||||
#define SIZE_CLASSES_DEFINED
|
||||
/* SIZE_CLASS(bin, delta, sz) */
|
||||
#define SIZE_CLASSES \
|
||||
SIZE_CLASS(0, 8, 8) \
|
||||
SIZE_CLASS(1, 8, 16) \
|
||||
SIZE_CLASS(2, 16, 32) \
|
||||
SIZE_CLASS(3, 16, 48) \
|
||||
SIZE_CLASS(4, 16, 64) \
|
||||
SIZE_CLASS(5, 16, 80) \
|
||||
SIZE_CLASS(6, 16, 96) \
|
||||
SIZE_CLASS(7, 16, 112) \
|
||||
SIZE_CLASS(8, 16, 128) \
|
||||
SIZE_CLASS(9, 32, 160) \
|
||||
SIZE_CLASS(10, 32, 192) \
|
||||
SIZE_CLASS(11, 32, 224) \
|
||||
SIZE_CLASS(12, 32, 256) \
|
||||
SIZE_CLASS(13, 64, 320) \
|
||||
SIZE_CLASS(14, 64, 384) \
|
||||
SIZE_CLASS(15, 64, 448) \
|
||||
SIZE_CLASS(16, 64, 512) \
|
||||
SIZE_CLASS(17, 128, 640) \
|
||||
SIZE_CLASS(18, 128, 768) \
|
||||
SIZE_CLASS(19, 128, 896) \
|
||||
SIZE_CLASS(20, 128, 1024) \
|
||||
SIZE_CLASS(21, 256, 1280) \
|
||||
SIZE_CLASS(22, 256, 1536) \
|
||||
SIZE_CLASS(23, 256, 1792) \
|
||||
SIZE_CLASS(24, 256, 2048) \
|
||||
SIZE_CLASS(25, 512, 2560) \
|
||||
SIZE_CLASS(26, 512, 3072) \
|
||||
SIZE_CLASS(27, 512, 3584) \
|
||||
SIZE_CLASS(28, 512, 4096) \
|
||||
SIZE_CLASS(29, 1024, 5120) \
|
||||
SIZE_CLASS(30, 1024, 6144) \
|
||||
SIZE_CLASS(31, 1024, 7168) \
|
||||
SIZE_CLASS(32, 1024, 8192) \
|
||||
SIZE_CLASS(33, 2048, 10240) \
|
||||
SIZE_CLASS(34, 2048, 12288) \
|
||||
SIZE_CLASS(35, 2048, 14336) \
|
||||
SIZE_CLASS(36, 2048, 16384) \
|
||||
SIZE_CLASS(37, 4096, 20480) \
|
||||
SIZE_CLASS(38, 4096, 24576) \
|
||||
SIZE_CLASS(39, 4096, 28672) \
|
||||
SIZE_CLASS(40, 4096, 32768) \
|
||||
SIZE_CLASS(41, 8192, 40960) \
|
||||
SIZE_CLASS(42, 8192, 49152) \
|
||||
SIZE_CLASS(43, 8192, 57344) \
|
||||
|
||||
#define NBINS 44
|
||||
#define SMALL_MAXCLASS 57344
|
||||
#endif
|
||||
|
||||
#if (LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 12)
|
||||
#define SIZE_CLASSES_DEFINED
|
||||
/* SIZE_CLASS(bin, delta, sz) */
|
||||
#define SIZE_CLASSES \
|
||||
SIZE_CLASS(0, 16, 16) \
|
||||
SIZE_CLASS(1, 16, 32) \
|
||||
SIZE_CLASS(2, 16, 48) \
|
||||
SIZE_CLASS(3, 16, 64) \
|
||||
SIZE_CLASS(4, 16, 80) \
|
||||
SIZE_CLASS(5, 16, 96) \
|
||||
SIZE_CLASS(6, 16, 112) \
|
||||
SIZE_CLASS(7, 16, 128) \
|
||||
SIZE_CLASS(8, 32, 160) \
|
||||
SIZE_CLASS(9, 32, 192) \
|
||||
SIZE_CLASS(10, 32, 224) \
|
||||
SIZE_CLASS(11, 32, 256) \
|
||||
SIZE_CLASS(12, 64, 320) \
|
||||
SIZE_CLASS(13, 64, 384) \
|
||||
SIZE_CLASS(14, 64, 448) \
|
||||
SIZE_CLASS(15, 64, 512) \
|
||||
SIZE_CLASS(16, 128, 640) \
|
||||
SIZE_CLASS(17, 128, 768) \
|
||||
SIZE_CLASS(18, 128, 896) \
|
||||
SIZE_CLASS(19, 128, 1024) \
|
||||
SIZE_CLASS(20, 256, 1280) \
|
||||
SIZE_CLASS(21, 256, 1536) \
|
||||
SIZE_CLASS(22, 256, 1792) \
|
||||
SIZE_CLASS(23, 256, 2048) \
|
||||
SIZE_CLASS(24, 512, 2560) \
|
||||
SIZE_CLASS(25, 512, 3072) \
|
||||
SIZE_CLASS(26, 512, 3584) \
|
||||
|
||||
#define NBINS 27
|
||||
#define SMALL_MAXCLASS 3584
|
||||
#endif
|
||||
|
||||
#if (LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 13)
|
||||
#define SIZE_CLASSES_DEFINED
|
||||
/* SIZE_CLASS(bin, delta, sz) */
|
||||
#define SIZE_CLASSES \
|
||||
SIZE_CLASS(0, 16, 16) \
|
||||
SIZE_CLASS(1, 16, 32) \
|
||||
SIZE_CLASS(2, 16, 48) \
|
||||
SIZE_CLASS(3, 16, 64) \
|
||||
SIZE_CLASS(4, 16, 80) \
|
||||
SIZE_CLASS(5, 16, 96) \
|
||||
SIZE_CLASS(6, 16, 112) \
|
||||
SIZE_CLASS(7, 16, 128) \
|
||||
SIZE_CLASS(8, 32, 160) \
|
||||
SIZE_CLASS(9, 32, 192) \
|
||||
SIZE_CLASS(10, 32, 224) \
|
||||
SIZE_CLASS(11, 32, 256) \
|
||||
SIZE_CLASS(12, 64, 320) \
|
||||
SIZE_CLASS(13, 64, 384) \
|
||||
SIZE_CLASS(14, 64, 448) \
|
||||
SIZE_CLASS(15, 64, 512) \
|
||||
SIZE_CLASS(16, 128, 640) \
|
||||
SIZE_CLASS(17, 128, 768) \
|
||||
SIZE_CLASS(18, 128, 896) \
|
||||
SIZE_CLASS(19, 128, 1024) \
|
||||
SIZE_CLASS(20, 256, 1280) \
|
||||
SIZE_CLASS(21, 256, 1536) \
|
||||
SIZE_CLASS(22, 256, 1792) \
|
||||
SIZE_CLASS(23, 256, 2048) \
|
||||
SIZE_CLASS(24, 512, 2560) \
|
||||
SIZE_CLASS(25, 512, 3072) \
|
||||
SIZE_CLASS(26, 512, 3584) \
|
||||
SIZE_CLASS(27, 512, 4096) \
|
||||
SIZE_CLASS(28, 1024, 5120) \
|
||||
SIZE_CLASS(29, 1024, 6144) \
|
||||
SIZE_CLASS(30, 1024, 7168) \
|
||||
|
||||
#define NBINS 31
|
||||
#define SMALL_MAXCLASS 7168
|
||||
#endif
|
||||
|
||||
#if (LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 14)
|
||||
#define SIZE_CLASSES_DEFINED
|
||||
/* SIZE_CLASS(bin, delta, sz) */
|
||||
#define SIZE_CLASSES \
|
||||
SIZE_CLASS(0, 16, 16) \
|
||||
SIZE_CLASS(1, 16, 32) \
|
||||
SIZE_CLASS(2, 16, 48) \
|
||||
SIZE_CLASS(3, 16, 64) \
|
||||
SIZE_CLASS(4, 16, 80) \
|
||||
SIZE_CLASS(5, 16, 96) \
|
||||
SIZE_CLASS(6, 16, 112) \
|
||||
SIZE_CLASS(7, 16, 128) \
|
||||
SIZE_CLASS(8, 32, 160) \
|
||||
SIZE_CLASS(9, 32, 192) \
|
||||
SIZE_CLASS(10, 32, 224) \
|
||||
SIZE_CLASS(11, 32, 256) \
|
||||
SIZE_CLASS(12, 64, 320) \
|
||||
SIZE_CLASS(13, 64, 384) \
|
||||
SIZE_CLASS(14, 64, 448) \
|
||||
SIZE_CLASS(15, 64, 512) \
|
||||
SIZE_CLASS(16, 128, 640) \
|
||||
SIZE_CLASS(17, 128, 768) \
|
||||
SIZE_CLASS(18, 128, 896) \
|
||||
SIZE_CLASS(19, 128, 1024) \
|
||||
SIZE_CLASS(20, 256, 1280) \
|
||||
SIZE_CLASS(21, 256, 1536) \
|
||||
SIZE_CLASS(22, 256, 1792) \
|
||||
SIZE_CLASS(23, 256, 2048) \
|
||||
SIZE_CLASS(24, 512, 2560) \
|
||||
SIZE_CLASS(25, 512, 3072) \
|
||||
SIZE_CLASS(26, 512, 3584) \
|
||||
SIZE_CLASS(27, 512, 4096) \
|
||||
SIZE_CLASS(28, 1024, 5120) \
|
||||
SIZE_CLASS(29, 1024, 6144) \
|
||||
SIZE_CLASS(30, 1024, 7168) \
|
||||
SIZE_CLASS(31, 1024, 8192) \
|
||||
SIZE_CLASS(32, 2048, 10240) \
|
||||
SIZE_CLASS(33, 2048, 12288) \
|
||||
SIZE_CLASS(34, 2048, 14336) \
|
||||
|
||||
#define NBINS 35
|
||||
#define SMALL_MAXCLASS 14336
|
||||
#endif
|
||||
|
||||
#if (LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 15)
|
||||
#define SIZE_CLASSES_DEFINED
|
||||
/* SIZE_CLASS(bin, delta, sz) */
|
||||
#define SIZE_CLASSES \
|
||||
SIZE_CLASS(0, 16, 16) \
|
||||
SIZE_CLASS(1, 16, 32) \
|
||||
SIZE_CLASS(2, 16, 48) \
|
||||
SIZE_CLASS(3, 16, 64) \
|
||||
SIZE_CLASS(4, 16, 80) \
|
||||
SIZE_CLASS(5, 16, 96) \
|
||||
SIZE_CLASS(6, 16, 112) \
|
||||
SIZE_CLASS(7, 16, 128) \
|
||||
SIZE_CLASS(8, 32, 160) \
|
||||
SIZE_CLASS(9, 32, 192) \
|
||||
SIZE_CLASS(10, 32, 224) \
|
||||
SIZE_CLASS(11, 32, 256) \
|
||||
SIZE_CLASS(12, 64, 320) \
|
||||
SIZE_CLASS(13, 64, 384) \
|
||||
SIZE_CLASS(14, 64, 448) \
|
||||
SIZE_CLASS(15, 64, 512) \
|
||||
SIZE_CLASS(16, 128, 640) \
|
||||
SIZE_CLASS(17, 128, 768) \
|
||||
SIZE_CLASS(18, 128, 896) \
|
||||
SIZE_CLASS(19, 128, 1024) \
|
||||
SIZE_CLASS(20, 256, 1280) \
|
||||
SIZE_CLASS(21, 256, 1536) \
|
||||
SIZE_CLASS(22, 256, 1792) \
|
||||
SIZE_CLASS(23, 256, 2048) \
|
||||
SIZE_CLASS(24, 512, 2560) \
|
||||
SIZE_CLASS(25, 512, 3072) \
|
||||
SIZE_CLASS(26, 512, 3584) \
|
||||
SIZE_CLASS(27, 512, 4096) \
|
||||
SIZE_CLASS(28, 1024, 5120) \
|
||||
SIZE_CLASS(29, 1024, 6144) \
|
||||
SIZE_CLASS(30, 1024, 7168) \
|
||||
SIZE_CLASS(31, 1024, 8192) \
|
||||
SIZE_CLASS(32, 2048, 10240) \
|
||||
SIZE_CLASS(33, 2048, 12288) \
|
||||
SIZE_CLASS(34, 2048, 14336) \
|
||||
SIZE_CLASS(35, 2048, 16384) \
|
||||
SIZE_CLASS(36, 4096, 20480) \
|
||||
SIZE_CLASS(37, 4096, 24576) \
|
||||
SIZE_CLASS(38, 4096, 28672) \
|
||||
|
||||
#define NBINS 39
|
||||
#define SMALL_MAXCLASS 28672
|
||||
#endif
|
||||
|
||||
#if (LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 16)
|
||||
#define SIZE_CLASSES_DEFINED
|
||||
/* SIZE_CLASS(bin, delta, sz) */
|
||||
#define SIZE_CLASSES \
|
||||
SIZE_CLASS(0, 16, 16) \
|
||||
SIZE_CLASS(1, 16, 32) \
|
||||
SIZE_CLASS(2, 16, 48) \
|
||||
SIZE_CLASS(3, 16, 64) \
|
||||
SIZE_CLASS(4, 16, 80) \
|
||||
SIZE_CLASS(5, 16, 96) \
|
||||
SIZE_CLASS(6, 16, 112) \
|
||||
SIZE_CLASS(7, 16, 128) \
|
||||
SIZE_CLASS(8, 32, 160) \
|
||||
SIZE_CLASS(9, 32, 192) \
|
||||
SIZE_CLASS(10, 32, 224) \
|
||||
SIZE_CLASS(11, 32, 256) \
|
||||
SIZE_CLASS(12, 64, 320) \
|
||||
SIZE_CLASS(13, 64, 384) \
|
||||
SIZE_CLASS(14, 64, 448) \
|
||||
SIZE_CLASS(15, 64, 512) \
|
||||
SIZE_CLASS(16, 128, 640) \
|
||||
SIZE_CLASS(17, 128, 768) \
|
||||
SIZE_CLASS(18, 128, 896) \
|
||||
SIZE_CLASS(19, 128, 1024) \
|
||||
SIZE_CLASS(20, 256, 1280) \
|
||||
SIZE_CLASS(21, 256, 1536) \
|
||||
SIZE_CLASS(22, 256, 1792) \
|
||||
SIZE_CLASS(23, 256, 2048) \
|
||||
SIZE_CLASS(24, 512, 2560) \
|
||||
SIZE_CLASS(25, 512, 3072) \
|
||||
SIZE_CLASS(26, 512, 3584) \
|
||||
SIZE_CLASS(27, 512, 4096) \
|
||||
SIZE_CLASS(28, 1024, 5120) \
|
||||
SIZE_CLASS(29, 1024, 6144) \
|
||||
SIZE_CLASS(30, 1024, 7168) \
|
||||
SIZE_CLASS(31, 1024, 8192) \
|
||||
SIZE_CLASS(32, 2048, 10240) \
|
||||
SIZE_CLASS(33, 2048, 12288) \
|
||||
SIZE_CLASS(34, 2048, 14336) \
|
||||
SIZE_CLASS(35, 2048, 16384) \
|
||||
SIZE_CLASS(36, 4096, 20480) \
|
||||
SIZE_CLASS(37, 4096, 24576) \
|
||||
SIZE_CLASS(38, 4096, 28672) \
|
||||
SIZE_CLASS(39, 4096, 32768) \
|
||||
SIZE_CLASS(40, 8192, 40960) \
|
||||
SIZE_CLASS(41, 8192, 49152) \
|
||||
SIZE_CLASS(42, 8192, 57344) \
|
||||
|
||||
#define NBINS 43
|
||||
#define SMALL_MAXCLASS 57344
|
||||
#endif
|
||||
|
||||
#ifndef SIZE_CLASSES_DEFINED
|
||||
# error "No size class definitions match configuration"
|
||||
#endif
|
||||
#undef SIZE_CLASSES_DEFINED
|
||||
/*
|
||||
* The small_size2bin lookup table uses uint8_t to encode each bin index, so we
|
||||
* cannot support more than 256 small size classes. Further constrain NBINS to
|
||||
* 255 to support prof_promote, since all small size classes, plus a "not
|
||||
* small" size class must be stored in 8 bits of arena_chunk_map_t's bits
|
||||
* field.
|
||||
*/
|
||||
#if (NBINS > 255)
|
||||
# error "Too many small size classes"
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
173
deps/jemalloc/include/jemalloc/internal/stats.h
vendored
Normal file
173
deps/jemalloc/include/jemalloc/internal/stats.h
vendored
Normal file
@@ -0,0 +1,173 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
typedef struct tcache_bin_stats_s tcache_bin_stats_t;
|
||||
typedef struct malloc_bin_stats_s malloc_bin_stats_t;
|
||||
typedef struct malloc_large_stats_s malloc_large_stats_t;
|
||||
typedef struct arena_stats_s arena_stats_t;
|
||||
typedef struct chunk_stats_s chunk_stats_t;
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
struct tcache_bin_stats_s {
|
||||
/*
|
||||
* Number of allocation requests that corresponded to the size of this
|
||||
* bin.
|
||||
*/
|
||||
uint64_t nrequests;
|
||||
};
|
||||
|
||||
struct malloc_bin_stats_s {
|
||||
/*
|
||||
* Current number of bytes allocated, including objects currently
|
||||
* cached by tcache.
|
||||
*/
|
||||
size_t allocated;
|
||||
|
||||
/*
|
||||
* Total number of allocation/deallocation requests served directly by
|
||||
* the bin. Note that tcache may allocate an object, then recycle it
|
||||
* many times, resulting many increments to nrequests, but only one
|
||||
* each to nmalloc and ndalloc.
|
||||
*/
|
||||
uint64_t nmalloc;
|
||||
uint64_t ndalloc;
|
||||
|
||||
/*
|
||||
* Number of allocation requests that correspond to the size of this
|
||||
* bin. This includes requests served by tcache, though tcache only
|
||||
* periodically merges into this counter.
|
||||
*/
|
||||
uint64_t nrequests;
|
||||
|
||||
/* Number of tcache fills from this bin. */
|
||||
uint64_t nfills;
|
||||
|
||||
/* Number of tcache flushes to this bin. */
|
||||
uint64_t nflushes;
|
||||
|
||||
/* Total number of runs created for this bin's size class. */
|
||||
uint64_t nruns;
|
||||
|
||||
/*
|
||||
* Total number of runs reused by extracting them from the runs tree for
|
||||
* this bin's size class.
|
||||
*/
|
||||
uint64_t reruns;
|
||||
|
||||
/* Current number of runs in this bin. */
|
||||
size_t curruns;
|
||||
};
|
||||
|
||||
struct malloc_large_stats_s {
|
||||
/*
|
||||
* Total number of allocation/deallocation requests served directly by
|
||||
* the arena. Note that tcache may allocate an object, then recycle it
|
||||
* many times, resulting many increments to nrequests, but only one
|
||||
* each to nmalloc and ndalloc.
|
||||
*/
|
||||
uint64_t nmalloc;
|
||||
uint64_t ndalloc;
|
||||
|
||||
/*
|
||||
* Number of allocation requests that correspond to this size class.
|
||||
* This includes requests served by tcache, though tcache only
|
||||
* periodically merges into this counter.
|
||||
*/
|
||||
uint64_t nrequests;
|
||||
|
||||
/* Current number of runs of this size class. */
|
||||
size_t curruns;
|
||||
};
|
||||
|
||||
struct arena_stats_s {
|
||||
/* Number of bytes currently mapped. */
|
||||
size_t mapped;
|
||||
|
||||
/*
|
||||
* Total number of purge sweeps, total number of madvise calls made,
|
||||
* and total pages purged in order to keep dirty unused memory under
|
||||
* control.
|
||||
*/
|
||||
uint64_t npurge;
|
||||
uint64_t nmadvise;
|
||||
uint64_t purged;
|
||||
|
||||
/* Per-size-category statistics. */
|
||||
size_t allocated_large;
|
||||
uint64_t nmalloc_large;
|
||||
uint64_t ndalloc_large;
|
||||
uint64_t nrequests_large;
|
||||
|
||||
/*
|
||||
* One element for each possible size class, including sizes that
|
||||
* overlap with bin size classes. This is necessary because ipalloc()
|
||||
* sometimes has to use such large objects in order to assure proper
|
||||
* alignment.
|
||||
*/
|
||||
malloc_large_stats_t *lstats;
|
||||
};
|
||||
|
||||
struct chunk_stats_s {
|
||||
/* Number of chunks that were allocated. */
|
||||
uint64_t nchunks;
|
||||
|
||||
/* High-water mark for number of chunks allocated. */
|
||||
size_t highchunks;
|
||||
|
||||
/*
|
||||
* Current number of chunks allocated. This value isn't maintained for
|
||||
* any other purpose, so keep track of it in order to be able to set
|
||||
* highchunks.
|
||||
*/
|
||||
size_t curchunks;
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
extern bool opt_stats_print;
|
||||
|
||||
extern size_t stats_cactive;
|
||||
|
||||
void stats_print(void (*write)(void *, const char *), void *cbopaque,
|
||||
const char *opts);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
size_t stats_cactive_get(void);
|
||||
void stats_cactive_add(size_t size);
|
||||
void stats_cactive_sub(size_t size);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_STATS_C_))
|
||||
JEMALLOC_INLINE size_t
|
||||
stats_cactive_get(void)
|
||||
{
|
||||
|
||||
return (atomic_read_z(&stats_cactive));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
stats_cactive_add(size_t size)
|
||||
{
|
||||
|
||||
atomic_add_z(&stats_cactive, size);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
stats_cactive_sub(size_t size)
|
||||
{
|
||||
|
||||
atomic_sub_z(&stats_cactive, size);
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
443
deps/jemalloc/include/jemalloc/internal/tcache.h
vendored
Normal file
443
deps/jemalloc/include/jemalloc/internal/tcache.h
vendored
Normal file
@@ -0,0 +1,443 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
typedef struct tcache_bin_info_s tcache_bin_info_t;
|
||||
typedef struct tcache_bin_s tcache_bin_t;
|
||||
typedef struct tcache_s tcache_t;
|
||||
|
||||
/*
|
||||
* tcache pointers close to NULL are used to encode state information that is
|
||||
* used for two purposes: preventing thread caching on a per thread basis and
|
||||
* cleaning up during thread shutdown.
|
||||
*/
|
||||
#define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1)
|
||||
#define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2)
|
||||
#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3)
|
||||
#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY
|
||||
|
||||
/*
|
||||
* Absolute maximum number of cache slots for each small bin in the thread
|
||||
* cache. This is an additional constraint beyond that imposed as: twice the
|
||||
* number of regions per run for this size class.
|
||||
*
|
||||
* This constant must be an even number.
|
||||
*/
|
||||
#define TCACHE_NSLOTS_SMALL_MAX 200
|
||||
|
||||
/* Number of cache slots for large size classes. */
|
||||
#define TCACHE_NSLOTS_LARGE 20
|
||||
|
||||
/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
|
||||
#define LG_TCACHE_MAXCLASS_DEFAULT 15
|
||||
|
||||
/*
|
||||
* TCACHE_GC_SWEEP is the approximate number of allocation events between
|
||||
* full GC sweeps. Integer rounding may cause the actual number to be
|
||||
* slightly higher, since GC is performed incrementally.
|
||||
*/
|
||||
#define TCACHE_GC_SWEEP 8192
|
||||
|
||||
/* Number of tcache allocation/deallocation events between incremental GCs. */
|
||||
#define TCACHE_GC_INCR \
|
||||
((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1))
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
typedef enum {
|
||||
tcache_enabled_false = 0, /* Enable cast to/from bool. */
|
||||
tcache_enabled_true = 1,
|
||||
tcache_enabled_default = 2
|
||||
} tcache_enabled_t;
|
||||
|
||||
/*
|
||||
* Read-only information associated with each element of tcache_t's tbins array
|
||||
* is stored separately, mainly to reduce memory usage.
|
||||
*/
|
||||
struct tcache_bin_info_s {
|
||||
unsigned ncached_max; /* Upper limit on ncached. */
|
||||
};
|
||||
|
||||
struct tcache_bin_s {
|
||||
tcache_bin_stats_t tstats;
|
||||
int low_water; /* Min # cached since last GC. */
|
||||
unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */
|
||||
unsigned ncached; /* # of cached objects. */
|
||||
void **avail; /* Stack of available objects. */
|
||||
};
|
||||
|
||||
struct tcache_s {
|
||||
ql_elm(tcache_t) link; /* Used for aggregating stats. */
|
||||
uint64_t prof_accumbytes;/* Cleared after arena_prof_accum() */
|
||||
arena_t *arena; /* This thread's arena. */
|
||||
unsigned ev_cnt; /* Event count since incremental GC. */
|
||||
unsigned next_gc_bin; /* Next bin to GC. */
|
||||
tcache_bin_t tbins[1]; /* Dynamically sized. */
|
||||
/*
|
||||
* The pointer stacks associated with tbins follow as a contiguous
|
||||
* array. During tcache initialization, the avail pointer in each
|
||||
* element of tbins is initialized to point to the proper offset within
|
||||
* this array.
|
||||
*/
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
extern bool opt_tcache;
|
||||
extern ssize_t opt_lg_tcache_max;
|
||||
|
||||
extern tcache_bin_info_t *tcache_bin_info;
|
||||
|
||||
/*
|
||||
* Number of tcache bins. There are NBINS small-object bins, plus 0 or more
|
||||
* large-object bins.
|
||||
*/
|
||||
extern size_t nhbins;
|
||||
|
||||
/* Maximum cached size class. */
|
||||
extern size_t tcache_maxclass;
|
||||
|
||||
size_t tcache_salloc(const void *ptr);
|
||||
void tcache_event_hard(tcache_t *tcache);
|
||||
void *tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin,
|
||||
size_t binind);
|
||||
void tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
||||
tcache_t *tcache);
|
||||
void tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
||||
tcache_t *tcache);
|
||||
void tcache_arena_associate(tcache_t *tcache, arena_t *arena);
|
||||
void tcache_arena_dissociate(tcache_t *tcache);
|
||||
tcache_t *tcache_create(arena_t *arena);
|
||||
void tcache_destroy(tcache_t *tcache);
|
||||
void tcache_thread_cleanup(void *arg);
|
||||
void tcache_stats_merge(tcache_t *tcache, arena_t *arena);
|
||||
bool tcache_boot0(void);
|
||||
bool tcache_boot1(void);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
malloc_tsd_protos(JEMALLOC_ATTR(unused), tcache, tcache_t *)
|
||||
malloc_tsd_protos(JEMALLOC_ATTR(unused), tcache_enabled, tcache_enabled_t)
|
||||
|
||||
void tcache_event(tcache_t *tcache);
|
||||
void tcache_flush(void);
|
||||
bool tcache_enabled_get(void);
|
||||
tcache_t *tcache_get(bool create);
|
||||
void tcache_enabled_set(bool enabled);
|
||||
void *tcache_alloc_easy(tcache_bin_t *tbin);
|
||||
void *tcache_alloc_small(tcache_t *tcache, size_t size, bool zero);
|
||||
void *tcache_alloc_large(tcache_t *tcache, size_t size, bool zero);
|
||||
void tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind);
|
||||
void tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
|
||||
/* Map of thread-specific caches. */
|
||||
malloc_tsd_externs(tcache, tcache_t *)
|
||||
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, tcache, tcache_t *, NULL,
|
||||
tcache_thread_cleanup)
|
||||
/* Per thread flag that allows thread caches to be disabled. */
|
||||
malloc_tsd_externs(tcache_enabled, tcache_enabled_t)
|
||||
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, tcache_enabled, tcache_enabled_t,
|
||||
tcache_enabled_default, malloc_tsd_no_cleanup)
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
tcache_flush(void)
|
||||
{
|
||||
tcache_t *tcache;
|
||||
|
||||
cassert(config_tcache);
|
||||
|
||||
tcache = *tcache_tsd_get();
|
||||
if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX)
|
||||
return;
|
||||
tcache_destroy(tcache);
|
||||
tcache = NULL;
|
||||
tcache_tsd_set(&tcache);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
tcache_enabled_get(void)
|
||||
{
|
||||
tcache_enabled_t tcache_enabled;
|
||||
|
||||
cassert(config_tcache);
|
||||
|
||||
tcache_enabled = *tcache_enabled_tsd_get();
|
||||
if (tcache_enabled == tcache_enabled_default) {
|
||||
tcache_enabled = (tcache_enabled_t)opt_tcache;
|
||||
tcache_enabled_tsd_set(&tcache_enabled);
|
||||
}
|
||||
|
||||
return ((bool)tcache_enabled);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
tcache_enabled_set(bool enabled)
|
||||
{
|
||||
tcache_enabled_t tcache_enabled;
|
||||
tcache_t *tcache;
|
||||
|
||||
cassert(config_tcache);
|
||||
|
||||
tcache_enabled = (tcache_enabled_t)enabled;
|
||||
tcache_enabled_tsd_set(&tcache_enabled);
|
||||
tcache = *tcache_tsd_get();
|
||||
if (enabled) {
|
||||
if (tcache == TCACHE_STATE_DISABLED) {
|
||||
tcache = NULL;
|
||||
tcache_tsd_set(&tcache);
|
||||
}
|
||||
} else /* disabled */ {
|
||||
if (tcache > TCACHE_STATE_MAX) {
|
||||
tcache_destroy(tcache);
|
||||
tcache = NULL;
|
||||
}
|
||||
if (tcache == NULL) {
|
||||
tcache = TCACHE_STATE_DISABLED;
|
||||
tcache_tsd_set(&tcache);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE tcache_t *
|
||||
tcache_get(bool create)
|
||||
{
|
||||
tcache_t *tcache;
|
||||
|
||||
if (config_tcache == false)
|
||||
return (NULL);
|
||||
if (config_lazy_lock && isthreaded == false)
|
||||
return (NULL);
|
||||
|
||||
tcache = *tcache_tsd_get();
|
||||
if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX) {
|
||||
if (tcache == TCACHE_STATE_DISABLED)
|
||||
return (NULL);
|
||||
if (tcache == NULL) {
|
||||
if (create == false) {
|
||||
/*
|
||||
* Creating a tcache here would cause
|
||||
* allocation as a side effect of free().
|
||||
* Ordinarily that would be okay since
|
||||
* tcache_create() failure is a soft failure
|
||||
* that doesn't propagate. However, if TLS
|
||||
* data are freed via free() as in glibc,
|
||||
* subtle corruption could result from setting
|
||||
* a TLS variable after its backing memory is
|
||||
* freed.
|
||||
*/
|
||||
return (NULL);
|
||||
}
|
||||
if (tcache_enabled_get() == false) {
|
||||
tcache_enabled_set(false); /* Memoize. */
|
||||
return (NULL);
|
||||
}
|
||||
return (tcache_create(choose_arena(NULL)));
|
||||
}
|
||||
if (tcache == TCACHE_STATE_PURGATORY) {
|
||||
/*
|
||||
* Make a note that an allocator function was called
|
||||
* after tcache_thread_cleanup() was called.
|
||||
*/
|
||||
tcache = TCACHE_STATE_REINCARNATED;
|
||||
tcache_tsd_set(&tcache);
|
||||
return (NULL);
|
||||
}
|
||||
if (tcache == TCACHE_STATE_REINCARNATED)
|
||||
return (NULL);
|
||||
not_reached();
|
||||
}
|
||||
|
||||
return (tcache);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
tcache_event(tcache_t *tcache)
|
||||
{
|
||||
|
||||
if (TCACHE_GC_INCR == 0)
|
||||
return;
|
||||
|
||||
tcache->ev_cnt++;
|
||||
assert(tcache->ev_cnt <= TCACHE_GC_INCR);
|
||||
if (tcache->ev_cnt == TCACHE_GC_INCR)
|
||||
tcache_event_hard(tcache);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
tcache_alloc_easy(tcache_bin_t *tbin)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
if (tbin->ncached == 0) {
|
||||
tbin->low_water = -1;
|
||||
return (NULL);
|
||||
}
|
||||
tbin->ncached--;
|
||||
if ((int)tbin->ncached < tbin->low_water)
|
||||
tbin->low_water = tbin->ncached;
|
||||
ret = tbin->avail[tbin->ncached];
|
||||
return (ret);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
|
||||
{
|
||||
void *ret;
|
||||
size_t binind;
|
||||
tcache_bin_t *tbin;
|
||||
|
||||
binind = SMALL_SIZE2BIN(size);
|
||||
assert(binind < NBINS);
|
||||
tbin = &tcache->tbins[binind];
|
||||
size = arena_bin_info[binind].reg_size;
|
||||
ret = tcache_alloc_easy(tbin);
|
||||
if (ret == NULL) {
|
||||
ret = tcache_alloc_small_hard(tcache, tbin, binind);
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
}
|
||||
assert(tcache_salloc(ret) == arena_bin_info[binind].reg_size);
|
||||
|
||||
if (zero == false) {
|
||||
if (config_fill) {
|
||||
if (opt_junk) {
|
||||
arena_alloc_junk_small(ret,
|
||||
&arena_bin_info[binind], false);
|
||||
} else if (opt_zero)
|
||||
memset(ret, 0, size);
|
||||
}
|
||||
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
||||
} else {
|
||||
if (config_fill && opt_junk) {
|
||||
arena_alloc_junk_small(ret, &arena_bin_info[binind],
|
||||
true);
|
||||
}
|
||||
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
||||
memset(ret, 0, size);
|
||||
}
|
||||
|
||||
if (config_stats)
|
||||
tbin->tstats.nrequests++;
|
||||
if (config_prof)
|
||||
tcache->prof_accumbytes += arena_bin_info[binind].reg_size;
|
||||
tcache_event(tcache);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
|
||||
{
|
||||
void *ret;
|
||||
size_t binind;
|
||||
tcache_bin_t *tbin;
|
||||
|
||||
size = PAGE_CEILING(size);
|
||||
assert(size <= tcache_maxclass);
|
||||
binind = NBINS + (size >> LG_PAGE) - 1;
|
||||
assert(binind < nhbins);
|
||||
tbin = &tcache->tbins[binind];
|
||||
ret = tcache_alloc_easy(tbin);
|
||||
if (ret == NULL) {
|
||||
/*
|
||||
* Only allocate one large object at a time, because it's quite
|
||||
* expensive to create one and not use it.
|
||||
*/
|
||||
ret = arena_malloc_large(tcache->arena, size, zero);
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
} else {
|
||||
if (config_prof && prof_promote && size == PAGE) {
|
||||
arena_chunk_t *chunk =
|
||||
(arena_chunk_t *)CHUNK_ADDR2BASE(ret);
|
||||
size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
|
||||
LG_PAGE);
|
||||
arena_mapbits_large_binind_set(chunk, pageind,
|
||||
BININD_INVALID);
|
||||
}
|
||||
if (zero == false) {
|
||||
if (config_fill) {
|
||||
if (opt_junk)
|
||||
memset(ret, 0xa5, size);
|
||||
else if (opt_zero)
|
||||
memset(ret, 0, size);
|
||||
}
|
||||
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
||||
} else {
|
||||
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
||||
memset(ret, 0, size);
|
||||
}
|
||||
|
||||
if (config_stats)
|
||||
tbin->tstats.nrequests++;
|
||||
if (config_prof)
|
||||
tcache->prof_accumbytes += size;
|
||||
}
|
||||
|
||||
tcache_event(tcache);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind)
|
||||
{
|
||||
tcache_bin_t *tbin;
|
||||
tcache_bin_info_t *tbin_info;
|
||||
|
||||
assert(tcache_salloc(ptr) <= SMALL_MAXCLASS);
|
||||
|
||||
if (config_fill && opt_junk)
|
||||
arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
|
||||
|
||||
tbin = &tcache->tbins[binind];
|
||||
tbin_info = &tcache_bin_info[binind];
|
||||
if (tbin->ncached == tbin_info->ncached_max) {
|
||||
tcache_bin_flush_small(tbin, binind, (tbin_info->ncached_max >>
|
||||
1), tcache);
|
||||
}
|
||||
assert(tbin->ncached < tbin_info->ncached_max);
|
||||
tbin->avail[tbin->ncached] = ptr;
|
||||
tbin->ncached++;
|
||||
|
||||
tcache_event(tcache);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
|
||||
{
|
||||
size_t binind;
|
||||
tcache_bin_t *tbin;
|
||||
tcache_bin_info_t *tbin_info;
|
||||
|
||||
assert((size & PAGE_MASK) == 0);
|
||||
assert(tcache_salloc(ptr) > SMALL_MAXCLASS);
|
||||
assert(tcache_salloc(ptr) <= tcache_maxclass);
|
||||
|
||||
binind = NBINS + (size >> LG_PAGE) - 1;
|
||||
|
||||
if (config_fill && opt_junk)
|
||||
memset(ptr, 0x5a, size);
|
||||
|
||||
tbin = &tcache->tbins[binind];
|
||||
tbin_info = &tcache_bin_info[binind];
|
||||
if (tbin->ncached == tbin_info->ncached_max) {
|
||||
tcache_bin_flush_large(tbin, binind, (tbin_info->ncached_max >>
|
||||
1), tcache);
|
||||
}
|
||||
assert(tbin->ncached < tbin_info->ncached_max);
|
||||
tbin->avail[tbin->ncached] = ptr;
|
||||
tbin->ncached++;
|
||||
|
||||
tcache_event(tcache);
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
434
deps/jemalloc/include/jemalloc/internal/tsd.h
vendored
Normal file
434
deps/jemalloc/include/jemalloc/internal/tsd.h
vendored
Normal file
@@ -0,0 +1,434 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
/* Maximum number of malloc_tsd users with cleanup functions. */
|
||||
#define MALLOC_TSD_CLEANUPS_MAX 8
|
||||
|
||||
typedef bool (*malloc_tsd_cleanup_t)(void);
|
||||
|
||||
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
|
||||
!defined(_WIN32))
|
||||
typedef struct tsd_init_block_s tsd_init_block_t;
|
||||
typedef struct tsd_init_head_s tsd_init_head_t;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* TLS/TSD-agnostic macro-based implementation of thread-specific data. There
|
||||
* are four macros that support (at least) three use cases: file-private,
|
||||
* library-private, and library-private inlined. Following is an example
|
||||
* library-private tsd variable:
|
||||
*
|
||||
* In example.h:
|
||||
* typedef struct {
|
||||
* int x;
|
||||
* int y;
|
||||
* } example_t;
|
||||
* #define EX_INITIALIZER JEMALLOC_CONCAT({0, 0})
|
||||
* malloc_tsd_protos(, example, example_t *)
|
||||
* malloc_tsd_externs(example, example_t *)
|
||||
* In example.c:
|
||||
* malloc_tsd_data(, example, example_t *, EX_INITIALIZER)
|
||||
* malloc_tsd_funcs(, example, example_t *, EX_INITIALIZER,
|
||||
* example_tsd_cleanup)
|
||||
*
|
||||
* The result is a set of generated functions, e.g.:
|
||||
*
|
||||
* bool example_tsd_boot(void) {...}
|
||||
* example_t **example_tsd_get() {...}
|
||||
* void example_tsd_set(example_t **val) {...}
|
||||
*
|
||||
* Note that all of the functions deal in terms of (a_type *) rather than
|
||||
* (a_type) so that it is possible to support non-pointer types (unlike
|
||||
* pthreads TSD). example_tsd_cleanup() is passed an (a_type *) pointer that is
|
||||
* cast to (void *). This means that the cleanup function needs to cast *and*
|
||||
* dereference the function argument, e.g.:
|
||||
*
|
||||
* void
|
||||
* example_tsd_cleanup(void *arg)
|
||||
* {
|
||||
* example_t *example = *(example_t **)arg;
|
||||
*
|
||||
* [...]
|
||||
* if ([want the cleanup function to be called again]) {
|
||||
* example_tsd_set(&example);
|
||||
* }
|
||||
* }
|
||||
*
|
||||
* If example_tsd_set() is called within example_tsd_cleanup(), it will be
|
||||
* called again. This is similar to how pthreads TSD destruction works, except
|
||||
* that pthreads only calls the cleanup function again if the value was set to
|
||||
* non-NULL.
|
||||
*/
|
||||
|
||||
/* malloc_tsd_protos(). */
|
||||
#define malloc_tsd_protos(a_attr, a_name, a_type) \
|
||||
a_attr bool \
|
||||
a_name##_tsd_boot(void); \
|
||||
a_attr a_type * \
|
||||
a_name##_tsd_get(void); \
|
||||
a_attr void \
|
||||
a_name##_tsd_set(a_type *val);
|
||||
|
||||
/* malloc_tsd_externs(). */
|
||||
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
|
||||
#define malloc_tsd_externs(a_name, a_type) \
|
||||
extern __thread a_type a_name##_tls; \
|
||||
extern __thread bool a_name##_initialized; \
|
||||
extern bool a_name##_booted;
|
||||
#elif (defined(JEMALLOC_TLS))
|
||||
#define malloc_tsd_externs(a_name, a_type) \
|
||||
extern __thread a_type a_name##_tls; \
|
||||
extern pthread_key_t a_name##_tsd; \
|
||||
extern bool a_name##_booted;
|
||||
#elif (defined(_WIN32))
|
||||
#define malloc_tsd_externs(a_name, a_type) \
|
||||
extern DWORD a_name##_tsd; \
|
||||
extern bool a_name##_booted;
|
||||
#else
|
||||
#define malloc_tsd_externs(a_name, a_type) \
|
||||
extern pthread_key_t a_name##_tsd; \
|
||||
extern tsd_init_head_t a_name##_tsd_init_head; \
|
||||
extern bool a_name##_booted;
|
||||
#endif
|
||||
|
||||
/* malloc_tsd_data(). */
|
||||
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
|
||||
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
|
||||
a_attr __thread a_type JEMALLOC_TLS_MODEL \
|
||||
a_name##_tls = a_initializer; \
|
||||
a_attr __thread bool JEMALLOC_TLS_MODEL \
|
||||
a_name##_initialized = false; \
|
||||
a_attr bool a_name##_booted = false;
|
||||
#elif (defined(JEMALLOC_TLS))
|
||||
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
|
||||
a_attr __thread a_type JEMALLOC_TLS_MODEL \
|
||||
a_name##_tls = a_initializer; \
|
||||
a_attr pthread_key_t a_name##_tsd; \
|
||||
a_attr bool a_name##_booted = false;
|
||||
#elif (defined(_WIN32))
|
||||
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
|
||||
a_attr DWORD a_name##_tsd; \
|
||||
a_attr bool a_name##_booted = false;
|
||||
#else
|
||||
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
|
||||
a_attr pthread_key_t a_name##_tsd; \
|
||||
a_attr tsd_init_head_t a_name##_tsd_init_head = { \
|
||||
ql_head_initializer(blocks), \
|
||||
MALLOC_MUTEX_INITIALIZER \
|
||||
}; \
|
||||
a_attr bool a_name##_booted = false;
|
||||
#endif
|
||||
|
||||
/* malloc_tsd_funcs(). */
|
||||
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
|
||||
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
|
||||
a_cleanup) \
|
||||
/* Initialization/cleanup. */ \
|
||||
a_attr bool \
|
||||
a_name##_tsd_cleanup_wrapper(void) \
|
||||
{ \
|
||||
\
|
||||
if (a_name##_initialized) { \
|
||||
a_name##_initialized = false; \
|
||||
a_cleanup(&a_name##_tls); \
|
||||
} \
|
||||
return (a_name##_initialized); \
|
||||
} \
|
||||
a_attr bool \
|
||||
a_name##_tsd_boot(void) \
|
||||
{ \
|
||||
\
|
||||
if (a_cleanup != malloc_tsd_no_cleanup) { \
|
||||
malloc_tsd_cleanup_register( \
|
||||
&a_name##_tsd_cleanup_wrapper); \
|
||||
} \
|
||||
a_name##_booted = true; \
|
||||
return (false); \
|
||||
} \
|
||||
/* Get/set. */ \
|
||||
a_attr a_type * \
|
||||
a_name##_tsd_get(void) \
|
||||
{ \
|
||||
\
|
||||
assert(a_name##_booted); \
|
||||
return (&a_name##_tls); \
|
||||
} \
|
||||
a_attr void \
|
||||
a_name##_tsd_set(a_type *val) \
|
||||
{ \
|
||||
\
|
||||
assert(a_name##_booted); \
|
||||
a_name##_tls = (*val); \
|
||||
if (a_cleanup != malloc_tsd_no_cleanup) \
|
||||
a_name##_initialized = true; \
|
||||
}
|
||||
#elif (defined(JEMALLOC_TLS))
|
||||
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
|
||||
a_cleanup) \
|
||||
/* Initialization/cleanup. */ \
|
||||
a_attr bool \
|
||||
a_name##_tsd_boot(void) \
|
||||
{ \
|
||||
\
|
||||
if (a_cleanup != malloc_tsd_no_cleanup) { \
|
||||
if (pthread_key_create(&a_name##_tsd, a_cleanup) != 0) \
|
||||
return (true); \
|
||||
} \
|
||||
a_name##_booted = true; \
|
||||
return (false); \
|
||||
} \
|
||||
/* Get/set. */ \
|
||||
a_attr a_type * \
|
||||
a_name##_tsd_get(void) \
|
||||
{ \
|
||||
\
|
||||
assert(a_name##_booted); \
|
||||
return (&a_name##_tls); \
|
||||
} \
|
||||
a_attr void \
|
||||
a_name##_tsd_set(a_type *val) \
|
||||
{ \
|
||||
\
|
||||
assert(a_name##_booted); \
|
||||
a_name##_tls = (*val); \
|
||||
if (a_cleanup != malloc_tsd_no_cleanup) { \
|
||||
if (pthread_setspecific(a_name##_tsd, \
|
||||
(void *)(&a_name##_tls))) { \
|
||||
malloc_write("<jemalloc>: Error" \
|
||||
" setting TSD for "#a_name"\n"); \
|
||||
if (opt_abort) \
|
||||
abort(); \
|
||||
} \
|
||||
} \
|
||||
}
|
||||
#elif (defined(_WIN32))
|
||||
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
|
||||
a_cleanup) \
|
||||
/* Data structure. */ \
|
||||
typedef struct { \
|
||||
bool initialized; \
|
||||
a_type val; \
|
||||
} a_name##_tsd_wrapper_t; \
|
||||
/* Initialization/cleanup. */ \
|
||||
a_attr bool \
|
||||
a_name##_tsd_cleanup_wrapper(void) \
|
||||
{ \
|
||||
a_name##_tsd_wrapper_t *wrapper; \
|
||||
\
|
||||
wrapper = (a_name##_tsd_wrapper_t *) TlsGetValue(a_name##_tsd); \
|
||||
if (wrapper == NULL) \
|
||||
return (false); \
|
||||
if (a_cleanup != malloc_tsd_no_cleanup && \
|
||||
wrapper->initialized) { \
|
||||
a_type val = wrapper->val; \
|
||||
a_type tsd_static_data = a_initializer; \
|
||||
wrapper->initialized = false; \
|
||||
wrapper->val = tsd_static_data; \
|
||||
a_cleanup(&val); \
|
||||
if (wrapper->initialized) { \
|
||||
/* Trigger another cleanup round. */ \
|
||||
return (true); \
|
||||
} \
|
||||
} \
|
||||
malloc_tsd_dalloc(wrapper); \
|
||||
return (false); \
|
||||
} \
|
||||
a_attr bool \
|
||||
a_name##_tsd_boot(void) \
|
||||
{ \
|
||||
\
|
||||
a_name##_tsd = TlsAlloc(); \
|
||||
if (a_name##_tsd == TLS_OUT_OF_INDEXES) \
|
||||
return (true); \
|
||||
if (a_cleanup != malloc_tsd_no_cleanup) { \
|
||||
malloc_tsd_cleanup_register( \
|
||||
&a_name##_tsd_cleanup_wrapper); \
|
||||
} \
|
||||
a_name##_booted = true; \
|
||||
return (false); \
|
||||
} \
|
||||
/* Get/set. */ \
|
||||
a_attr a_name##_tsd_wrapper_t * \
|
||||
a_name##_tsd_get_wrapper(void) \
|
||||
{ \
|
||||
a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *) \
|
||||
TlsGetValue(a_name##_tsd); \
|
||||
\
|
||||
if (wrapper == NULL) { \
|
||||
wrapper = (a_name##_tsd_wrapper_t *) \
|
||||
malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \
|
||||
if (wrapper == NULL) { \
|
||||
malloc_write("<jemalloc>: Error allocating" \
|
||||
" TSD for "#a_name"\n"); \
|
||||
abort(); \
|
||||
} else { \
|
||||
static a_type tsd_static_data = a_initializer; \
|
||||
wrapper->initialized = false; \
|
||||
wrapper->val = tsd_static_data; \
|
||||
} \
|
||||
if (!TlsSetValue(a_name##_tsd, (void *)wrapper)) { \
|
||||
malloc_write("<jemalloc>: Error setting" \
|
||||
" TSD for "#a_name"\n"); \
|
||||
abort(); \
|
||||
} \
|
||||
} \
|
||||
return (wrapper); \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_name##_tsd_get(void) \
|
||||
{ \
|
||||
a_name##_tsd_wrapper_t *wrapper; \
|
||||
\
|
||||
assert(a_name##_booted); \
|
||||
wrapper = a_name##_tsd_get_wrapper(); \
|
||||
return (&wrapper->val); \
|
||||
} \
|
||||
a_attr void \
|
||||
a_name##_tsd_set(a_type *val) \
|
||||
{ \
|
||||
a_name##_tsd_wrapper_t *wrapper; \
|
||||
\
|
||||
assert(a_name##_booted); \
|
||||
wrapper = a_name##_tsd_get_wrapper(); \
|
||||
wrapper->val = *(val); \
|
||||
if (a_cleanup != malloc_tsd_no_cleanup) \
|
||||
wrapper->initialized = true; \
|
||||
}
|
||||
#else
|
||||
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
|
||||
a_cleanup) \
|
||||
/* Data structure. */ \
|
||||
typedef struct { \
|
||||
bool initialized; \
|
||||
a_type val; \
|
||||
} a_name##_tsd_wrapper_t; \
|
||||
/* Initialization/cleanup. */ \
|
||||
a_attr void \
|
||||
a_name##_tsd_cleanup_wrapper(void *arg) \
|
||||
{ \
|
||||
a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *)arg;\
|
||||
\
|
||||
if (a_cleanup != malloc_tsd_no_cleanup && \
|
||||
wrapper->initialized) { \
|
||||
wrapper->initialized = false; \
|
||||
a_cleanup(&wrapper->val); \
|
||||
if (wrapper->initialized) { \
|
||||
/* Trigger another cleanup round. */ \
|
||||
if (pthread_setspecific(a_name##_tsd, \
|
||||
(void *)wrapper)) { \
|
||||
malloc_write("<jemalloc>: Error" \
|
||||
" setting TSD for "#a_name"\n"); \
|
||||
if (opt_abort) \
|
||||
abort(); \
|
||||
} \
|
||||
return; \
|
||||
} \
|
||||
} \
|
||||
malloc_tsd_dalloc(wrapper); \
|
||||
} \
|
||||
a_attr bool \
|
||||
a_name##_tsd_boot(void) \
|
||||
{ \
|
||||
\
|
||||
if (pthread_key_create(&a_name##_tsd, \
|
||||
a_name##_tsd_cleanup_wrapper) != 0) \
|
||||
return (true); \
|
||||
a_name##_booted = true; \
|
||||
return (false); \
|
||||
} \
|
||||
/* Get/set. */ \
|
||||
a_attr a_name##_tsd_wrapper_t * \
|
||||
a_name##_tsd_get_wrapper(void) \
|
||||
{ \
|
||||
a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *) \
|
||||
pthread_getspecific(a_name##_tsd); \
|
||||
\
|
||||
if (wrapper == NULL) { \
|
||||
tsd_init_block_t block; \
|
||||
wrapper = tsd_init_check_recursion( \
|
||||
&a_name##_tsd_init_head, &block); \
|
||||
if (wrapper) \
|
||||
return (wrapper); \
|
||||
wrapper = (a_name##_tsd_wrapper_t *) \
|
||||
malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \
|
||||
block.data = wrapper; \
|
||||
if (wrapper == NULL) { \
|
||||
malloc_write("<jemalloc>: Error allocating" \
|
||||
" TSD for "#a_name"\n"); \
|
||||
abort(); \
|
||||
} else { \
|
||||
static a_type tsd_static_data = a_initializer; \
|
||||
wrapper->initialized = false; \
|
||||
wrapper->val = tsd_static_data; \
|
||||
} \
|
||||
if (pthread_setspecific(a_name##_tsd, \
|
||||
(void *)wrapper)) { \
|
||||
malloc_write("<jemalloc>: Error setting" \
|
||||
" TSD for "#a_name"\n"); \
|
||||
abort(); \
|
||||
} \
|
||||
tsd_init_finish(&a_name##_tsd_init_head, &block); \
|
||||
} \
|
||||
return (wrapper); \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_name##_tsd_get(void) \
|
||||
{ \
|
||||
a_name##_tsd_wrapper_t *wrapper; \
|
||||
\
|
||||
assert(a_name##_booted); \
|
||||
wrapper = a_name##_tsd_get_wrapper(); \
|
||||
return (&wrapper->val); \
|
||||
} \
|
||||
a_attr void \
|
||||
a_name##_tsd_set(a_type *val) \
|
||||
{ \
|
||||
a_name##_tsd_wrapper_t *wrapper; \
|
||||
\
|
||||
assert(a_name##_booted); \
|
||||
wrapper = a_name##_tsd_get_wrapper(); \
|
||||
wrapper->val = *(val); \
|
||||
if (a_cleanup != malloc_tsd_no_cleanup) \
|
||||
wrapper->initialized = true; \
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
|
||||
!defined(_WIN32))
|
||||
struct tsd_init_block_s {
|
||||
ql_elm(tsd_init_block_t) link;
|
||||
pthread_t thread;
|
||||
void *data;
|
||||
};
|
||||
struct tsd_init_head_s {
|
||||
ql_head(tsd_init_block_t) blocks;
|
||||
malloc_mutex_t lock;
|
||||
};
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
void *malloc_tsd_malloc(size_t size);
|
||||
void malloc_tsd_dalloc(void *wrapper);
|
||||
void malloc_tsd_no_cleanup(void *);
|
||||
void malloc_tsd_cleanup_register(bool (*f)(void));
|
||||
void malloc_tsd_boot(void);
|
||||
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
|
||||
!defined(_WIN32))
|
||||
void *tsd_init_check_recursion(tsd_init_head_t *head,
|
||||
tsd_init_block_t *block);
|
||||
void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block);
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
162
deps/jemalloc/include/jemalloc/internal/util.h
vendored
Normal file
162
deps/jemalloc/include/jemalloc/internal/util.h
vendored
Normal file
@@ -0,0 +1,162 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
/* Size of stack-allocated buffer passed to buferror(). */
|
||||
#define BUFERROR_BUF 64
|
||||
|
||||
/*
|
||||
* Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be
|
||||
* large enough for all possible uses within jemalloc.
|
||||
*/
|
||||
#define MALLOC_PRINTF_BUFSIZE 4096
|
||||
|
||||
/*
|
||||
* Wrap a cpp argument that contains commas such that it isn't broken up into
|
||||
* multiple arguments.
|
||||
*/
|
||||
#define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__
|
||||
|
||||
/*
|
||||
* Silence compiler warnings due to uninitialized values. This is used
|
||||
* wherever the compiler fails to recognize that the variable is never used
|
||||
* uninitialized.
|
||||
*/
|
||||
#ifdef JEMALLOC_CC_SILENCE
|
||||
# define JEMALLOC_CC_SILENCE_INIT(v) = v
|
||||
#else
|
||||
# define JEMALLOC_CC_SILENCE_INIT(v)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Define a custom assert() in order to reduce the chances of deadlock during
|
||||
* assertion failure.
|
||||
*/
|
||||
#ifndef assert
|
||||
#define assert(e) do { \
|
||||
if (config_debug && !(e)) { \
|
||||
malloc_printf( \
|
||||
"<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
|
||||
__FILE__, __LINE__, #e); \
|
||||
abort(); \
|
||||
} \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#ifndef not_reached
|
||||
#define not_reached() do { \
|
||||
if (config_debug) { \
|
||||
malloc_printf( \
|
||||
"<jemalloc>: %s:%d: Unreachable code reached\n", \
|
||||
__FILE__, __LINE__); \
|
||||
abort(); \
|
||||
} \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#ifndef not_implemented
|
||||
#define not_implemented() do { \
|
||||
if (config_debug) { \
|
||||
malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
|
||||
__FILE__, __LINE__); \
|
||||
abort(); \
|
||||
} \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#ifndef assert_not_implemented
|
||||
#define assert_not_implemented(e) do { \
|
||||
if (config_debug && !(e)) \
|
||||
not_implemented(); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
/* Use to assert a particular configuration, e.g., cassert(config_debug). */
|
||||
#define cassert(c) do { \
|
||||
if ((c) == false) \
|
||||
not_reached(); \
|
||||
} while (0)
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
int buferror(int err, char *buf, size_t buflen);
|
||||
uintmax_t malloc_strtoumax(const char *restrict nptr,
|
||||
char **restrict endptr, int base);
|
||||
void malloc_write(const char *s);
|
||||
|
||||
/*
|
||||
* malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating
|
||||
* point math.
|
||||
*/
|
||||
int malloc_vsnprintf(char *str, size_t size, const char *format,
|
||||
va_list ap);
|
||||
int malloc_snprintf(char *str, size_t size, const char *format, ...)
|
||||
JEMALLOC_ATTR(format(printf, 3, 4));
|
||||
void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
const char *format, va_list ap);
|
||||
void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque,
|
||||
const char *format, ...) JEMALLOC_ATTR(format(printf, 3, 4));
|
||||
void malloc_printf(const char *format, ...)
|
||||
JEMALLOC_ATTR(format(printf, 1, 2));
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
size_t pow2_ceil(size_t x);
|
||||
void set_errno(int errnum);
|
||||
int get_errno(void);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_UTIL_C_))
|
||||
/* Compute the smallest power of 2 that is >= x. */
|
||||
JEMALLOC_INLINE size_t
|
||||
pow2_ceil(size_t x)
|
||||
{
|
||||
|
||||
x--;
|
||||
x |= x >> 1;
|
||||
x |= x >> 2;
|
||||
x |= x >> 4;
|
||||
x |= x >> 8;
|
||||
x |= x >> 16;
|
||||
#if (LG_SIZEOF_PTR == 3)
|
||||
x |= x >> 32;
|
||||
#endif
|
||||
x++;
|
||||
return (x);
|
||||
}
|
||||
|
||||
/* Sets error code */
|
||||
JEMALLOC_INLINE void
|
||||
set_errno(int errnum)
|
||||
{
|
||||
|
||||
#ifdef _WIN32
|
||||
SetLastError(errnum);
|
||||
#else
|
||||
errno = errnum;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Get last error code */
|
||||
JEMALLOC_INLINE int
|
||||
get_errno(void)
|
||||
{
|
||||
|
||||
#ifdef _WIN32
|
||||
return (GetLastError());
|
||||
#else
|
||||
return (errno);
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
182
deps/jemalloc/include/jemalloc/jemalloc.h
vendored
Normal file
182
deps/jemalloc/include/jemalloc/jemalloc.h
vendored
Normal file
@@ -0,0 +1,182 @@
|
||||
#ifndef JEMALLOC_H_
|
||||
#define JEMALLOC_H_
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include <limits.h>
|
||||
#include <strings.h>
|
||||
|
||||
#define JEMALLOC_VERSION "3.6.0-0-g46c0af68bd248b04df75e4f92d5fb804c3d75340"
|
||||
#define JEMALLOC_VERSION_MAJOR 3
|
||||
#define JEMALLOC_VERSION_MINOR 6
|
||||
#define JEMALLOC_VERSION_BUGFIX 0
|
||||
#define JEMALLOC_VERSION_NREV 0
|
||||
#define JEMALLOC_VERSION_GID "46c0af68bd248b04df75e4f92d5fb804c3d75340"
|
||||
|
||||
# define MALLOCX_LG_ALIGN(la) (la)
|
||||
# if LG_SIZEOF_PTR == 2
|
||||
# define MALLOCX_ALIGN(a) (ffs(a)-1)
|
||||
# else
|
||||
# define MALLOCX_ALIGN(a) \
|
||||
((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31)
|
||||
# endif
|
||||
# define MALLOCX_ZERO ((int)0x40)
|
||||
/* Bias arena index bits so that 0 encodes "MALLOCX_ARENA() unspecified". */
|
||||
# define MALLOCX_ARENA(a) ((int)(((a)+1) << 8))
|
||||
|
||||
#ifdef JEMALLOC_EXPERIMENTAL
|
||||
# define ALLOCM_LG_ALIGN(la) (la)
|
||||
# if LG_SIZEOF_PTR == 2
|
||||
# define ALLOCM_ALIGN(a) (ffs(a)-1)
|
||||
# else
|
||||
# define ALLOCM_ALIGN(a) \
|
||||
((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31)
|
||||
# endif
|
||||
# define ALLOCM_ZERO ((int)0x40)
|
||||
# define ALLOCM_NO_MOVE ((int)0x80)
|
||||
/* Bias arena index bits so that 0 encodes "ALLOCM_ARENA() unspecified". */
|
||||
# define ALLOCM_ARENA(a) ((int)(((a)+1) << 8))
|
||||
# define ALLOCM_SUCCESS 0
|
||||
# define ALLOCM_ERR_OOM 1
|
||||
# define ALLOCM_ERR_NOT_MOVED 2
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The je_ prefix on the following public symbol declarations is an artifact
|
||||
* of namespace management, and should be omitted in application code unless
|
||||
* JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle.h).
|
||||
*/
|
||||
extern JEMALLOC_EXPORT const char *je_malloc_conf;
|
||||
extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque,
|
||||
const char *s);
|
||||
|
||||
JEMALLOC_EXPORT void *je_malloc(size_t size) JEMALLOC_ATTR(malloc);
|
||||
JEMALLOC_EXPORT void *je_calloc(size_t num, size_t size)
|
||||
JEMALLOC_ATTR(malloc);
|
||||
JEMALLOC_EXPORT int je_posix_memalign(void **memptr, size_t alignment,
|
||||
size_t size) JEMALLOC_ATTR(nonnull(1));
|
||||
JEMALLOC_EXPORT void *je_aligned_alloc(size_t alignment, size_t size)
|
||||
JEMALLOC_ATTR(malloc);
|
||||
JEMALLOC_EXPORT void *je_realloc(void *ptr, size_t size);
|
||||
JEMALLOC_EXPORT void je_free(void *ptr);
|
||||
|
||||
JEMALLOC_EXPORT void *je_mallocx(size_t size, int flags);
|
||||
JEMALLOC_EXPORT void *je_rallocx(void *ptr, size_t size, int flags);
|
||||
JEMALLOC_EXPORT size_t je_xallocx(void *ptr, size_t size, size_t extra,
|
||||
int flags);
|
||||
JEMALLOC_EXPORT size_t je_sallocx(const void *ptr, int flags);
|
||||
JEMALLOC_EXPORT void je_dallocx(void *ptr, int flags);
|
||||
JEMALLOC_EXPORT size_t je_nallocx(size_t size, int flags);
|
||||
|
||||
JEMALLOC_EXPORT int je_mallctl(const char *name, void *oldp,
|
||||
size_t *oldlenp, void *newp, size_t newlen);
|
||||
JEMALLOC_EXPORT int je_mallctlnametomib(const char *name, size_t *mibp,
|
||||
size_t *miblenp);
|
||||
JEMALLOC_EXPORT int je_mallctlbymib(const size_t *mib, size_t miblen,
|
||||
void *oldp, size_t *oldlenp, void *newp, size_t newlen);
|
||||
JEMALLOC_EXPORT void je_malloc_stats_print(void (*write_cb)(void *,
|
||||
const char *), void *je_cbopaque, const char *opts);
|
||||
JEMALLOC_EXPORT size_t je_malloc_usable_size(
|
||||
JEMALLOC_USABLE_SIZE_CONST void *ptr);
|
||||
|
||||
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
|
||||
JEMALLOC_EXPORT void * je_memalign(size_t alignment, size_t size)
|
||||
JEMALLOC_ATTR(malloc);
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_OVERRIDE_VALLOC
|
||||
JEMALLOC_EXPORT void * je_valloc(size_t size) JEMALLOC_ATTR(malloc);
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_EXPERIMENTAL
|
||||
JEMALLOC_EXPORT int je_allocm(void **ptr, size_t *rsize, size_t size,
|
||||
int flags) JEMALLOC_ATTR(nonnull(1));
|
||||
JEMALLOC_EXPORT int je_rallocm(void **ptr, size_t *rsize, size_t size,
|
||||
size_t extra, int flags) JEMALLOC_ATTR(nonnull(1));
|
||||
JEMALLOC_EXPORT int je_sallocm(const void *ptr, size_t *rsize, int flags)
|
||||
JEMALLOC_ATTR(nonnull(1));
|
||||
JEMALLOC_EXPORT int je_dallocm(void *ptr, int flags)
|
||||
JEMALLOC_ATTR(nonnull(1));
|
||||
JEMALLOC_EXPORT int je_nallocm(size_t *rsize, size_t size, int flags);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* By default application code must explicitly refer to mangled symbol names,
|
||||
* so that it is possible to use jemalloc in conjunction with another allocator
|
||||
* in the same application. Define JEMALLOC_MANGLE in order to cause automatic
|
||||
* name mangling that matches the API prefixing that happened as a result of
|
||||
* --with-mangling and/or --with-jemalloc-prefix configuration settings.
|
||||
*/
|
||||
#ifdef JEMALLOC_MANGLE
|
||||
# ifndef JEMALLOC_NO_DEMANGLE
|
||||
# define JEMALLOC_NO_DEMANGLE
|
||||
# endif
|
||||
# define malloc_conf je_malloc_conf
|
||||
# define malloc_message je_malloc_message
|
||||
# define malloc je_malloc
|
||||
# define calloc je_calloc
|
||||
# define posix_memalign je_posix_memalign
|
||||
# define aligned_alloc je_aligned_alloc
|
||||
# define realloc je_realloc
|
||||
# define free je_free
|
||||
# define mallocx je_mallocx
|
||||
# define rallocx je_rallocx
|
||||
# define xallocx je_xallocx
|
||||
# define sallocx je_sallocx
|
||||
# define dallocx je_dallocx
|
||||
# define nallocx je_nallocx
|
||||
# define mallctl je_mallctl
|
||||
# define mallctlnametomib je_mallctlnametomib
|
||||
# define mallctlbymib je_mallctlbymib
|
||||
# define malloc_stats_print je_malloc_stats_print
|
||||
# define malloc_usable_size je_malloc_usable_size
|
||||
# define memalign je_memalign
|
||||
# define valloc je_valloc
|
||||
# define allocm je_allocm
|
||||
# define dallocm je_dallocm
|
||||
# define nallocm je_nallocm
|
||||
# define rallocm je_rallocm
|
||||
# define sallocm je_sallocm
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The je_* macros can be used as stable alternative names for the
|
||||
* public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily
|
||||
* meant for use in jemalloc itself, but it can be used by application code to
|
||||
* provide isolation from the name mangling specified via --with-mangling
|
||||
* and/or --with-jemalloc-prefix.
|
||||
*/
|
||||
#ifndef JEMALLOC_NO_DEMANGLE
|
||||
# undef je_malloc_conf
|
||||
# undef je_malloc_message
|
||||
# undef je_malloc
|
||||
# undef je_calloc
|
||||
# undef je_posix_memalign
|
||||
# undef je_aligned_alloc
|
||||
# undef je_realloc
|
||||
# undef je_free
|
||||
# undef je_mallocx
|
||||
# undef je_rallocx
|
||||
# undef je_xallocx
|
||||
# undef je_sallocx
|
||||
# undef je_dallocx
|
||||
# undef je_nallocx
|
||||
# undef je_mallctl
|
||||
# undef je_mallctlnametomib
|
||||
# undef je_mallctlbymib
|
||||
# undef je_malloc_stats_print
|
||||
# undef je_malloc_usable_size
|
||||
# undef je_memalign
|
||||
# undef je_valloc
|
||||
# undef je_allocm
|
||||
# undef je_dallocm
|
||||
# undef je_nallocm
|
||||
# undef je_rallocm
|
||||
# undef je_sallocm
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
};
|
||||
#endif
|
||||
#endif /* JEMALLOC_H_ */
|
||||
313
deps/jemalloc/include/msvc_compat/inttypes.h
vendored
Normal file
313
deps/jemalloc/include/msvc_compat/inttypes.h
vendored
Normal file
@@ -0,0 +1,313 @@
|
||||
// ISO C9x compliant inttypes.h for Microsoft Visual Studio
|
||||
// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
|
||||
//
|
||||
// Copyright (c) 2006 Alexander Chemeris
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// 2. Redistributions in binary form must reproduce the above copyright
|
||||
// notice, this list of conditions and the following disclaimer in the
|
||||
// documentation and/or other materials provided with the distribution.
|
||||
//
|
||||
// 3. The name of the author may be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
|
||||
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
|
||||
// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
|
||||
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
|
||||
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
|
||||
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
//
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef _MSC_VER // [
|
||||
#error "Use this header only with Microsoft Visual C++ compilers!"
|
||||
#endif // _MSC_VER ]
|
||||
|
||||
#ifndef _MSC_INTTYPES_H_ // [
|
||||
#define _MSC_INTTYPES_H_
|
||||
|
||||
#if _MSC_VER > 1000
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
#include "stdint.h"
|
||||
|
||||
// 7.8 Format conversion of integer types
|
||||
|
||||
typedef struct {
|
||||
intmax_t quot;
|
||||
intmax_t rem;
|
||||
} imaxdiv_t;
|
||||
|
||||
// 7.8.1 Macros for format specifiers
|
||||
|
||||
#if !defined(__cplusplus) || defined(__STDC_FORMAT_MACROS) // [ See footnote 185 at page 198
|
||||
|
||||
#ifdef _WIN64
|
||||
# define __PRI64_PREFIX "l"
|
||||
# define __PRIPTR_PREFIX "l"
|
||||
#else
|
||||
# define __PRI64_PREFIX "ll"
|
||||
# define __PRIPTR_PREFIX
|
||||
#endif
|
||||
|
||||
// The fprintf macros for signed integers are:
|
||||
#define PRId8 "d"
|
||||
#define PRIi8 "i"
|
||||
#define PRIdLEAST8 "d"
|
||||
#define PRIiLEAST8 "i"
|
||||
#define PRIdFAST8 "d"
|
||||
#define PRIiFAST8 "i"
|
||||
|
||||
#define PRId16 "hd"
|
||||
#define PRIi16 "hi"
|
||||
#define PRIdLEAST16 "hd"
|
||||
#define PRIiLEAST16 "hi"
|
||||
#define PRIdFAST16 "hd"
|
||||
#define PRIiFAST16 "hi"
|
||||
|
||||
#define PRId32 "d"
|
||||
#define PRIi32 "i"
|
||||
#define PRIdLEAST32 "d"
|
||||
#define PRIiLEAST32 "i"
|
||||
#define PRIdFAST32 "d"
|
||||
#define PRIiFAST32 "i"
|
||||
|
||||
#define PRId64 __PRI64_PREFIX "d"
|
||||
#define PRIi64 __PRI64_PREFIX "i"
|
||||
#define PRIdLEAST64 __PRI64_PREFIX "d"
|
||||
#define PRIiLEAST64 __PRI64_PREFIX "i"
|
||||
#define PRIdFAST64 __PRI64_PREFIX "d"
|
||||
#define PRIiFAST64 __PRI64_PREFIX "i"
|
||||
|
||||
#define PRIdMAX __PRI64_PREFIX "d"
|
||||
#define PRIiMAX __PRI64_PREFIX "i"
|
||||
|
||||
#define PRIdPTR __PRIPTR_PREFIX "d"
|
||||
#define PRIiPTR __PRIPTR_PREFIX "i"
|
||||
|
||||
// The fprintf macros for unsigned integers are:
|
||||
#define PRIo8 "o"
|
||||
#define PRIu8 "u"
|
||||
#define PRIx8 "x"
|
||||
#define PRIX8 "X"
|
||||
#define PRIoLEAST8 "o"
|
||||
#define PRIuLEAST8 "u"
|
||||
#define PRIxLEAST8 "x"
|
||||
#define PRIXLEAST8 "X"
|
||||
#define PRIoFAST8 "o"
|
||||
#define PRIuFAST8 "u"
|
||||
#define PRIxFAST8 "x"
|
||||
#define PRIXFAST8 "X"
|
||||
|
||||
#define PRIo16 "ho"
|
||||
#define PRIu16 "hu"
|
||||
#define PRIx16 "hx"
|
||||
#define PRIX16 "hX"
|
||||
#define PRIoLEAST16 "ho"
|
||||
#define PRIuLEAST16 "hu"
|
||||
#define PRIxLEAST16 "hx"
|
||||
#define PRIXLEAST16 "hX"
|
||||
#define PRIoFAST16 "ho"
|
||||
#define PRIuFAST16 "hu"
|
||||
#define PRIxFAST16 "hx"
|
||||
#define PRIXFAST16 "hX"
|
||||
|
||||
#define PRIo32 "o"
|
||||
#define PRIu32 "u"
|
||||
#define PRIx32 "x"
|
||||
#define PRIX32 "X"
|
||||
#define PRIoLEAST32 "o"
|
||||
#define PRIuLEAST32 "u"
|
||||
#define PRIxLEAST32 "x"
|
||||
#define PRIXLEAST32 "X"
|
||||
#define PRIoFAST32 "o"
|
||||
#define PRIuFAST32 "u"
|
||||
#define PRIxFAST32 "x"
|
||||
#define PRIXFAST32 "X"
|
||||
|
||||
#define PRIo64 __PRI64_PREFIX "o"
|
||||
#define PRIu64 __PRI64_PREFIX "u"
|
||||
#define PRIx64 __PRI64_PREFIX "x"
|
||||
#define PRIX64 __PRI64_PREFIX "X"
|
||||
#define PRIoLEAST64 __PRI64_PREFIX "o"
|
||||
#define PRIuLEAST64 __PRI64_PREFIX "u"
|
||||
#define PRIxLEAST64 __PRI64_PREFIX "x"
|
||||
#define PRIXLEAST64 __PRI64_PREFIX "X"
|
||||
#define PRIoFAST64 __PRI64_PREFIX "o"
|
||||
#define PRIuFAST64 __PRI64_PREFIX "u"
|
||||
#define PRIxFAST64 __PRI64_PREFIX "x"
|
||||
#define PRIXFAST64 __PRI64_PREFIX "X"
|
||||
|
||||
#define PRIoMAX __PRI64_PREFIX "o"
|
||||
#define PRIuMAX __PRI64_PREFIX "u"
|
||||
#define PRIxMAX __PRI64_PREFIX "x"
|
||||
#define PRIXMAX __PRI64_PREFIX "X"
|
||||
|
||||
#define PRIoPTR __PRIPTR_PREFIX "o"
|
||||
#define PRIuPTR __PRIPTR_PREFIX "u"
|
||||
#define PRIxPTR __PRIPTR_PREFIX "x"
|
||||
#define PRIXPTR __PRIPTR_PREFIX "X"
|
||||
|
||||
// The fscanf macros for signed integers are:
|
||||
#define SCNd8 "d"
|
||||
#define SCNi8 "i"
|
||||
#define SCNdLEAST8 "d"
|
||||
#define SCNiLEAST8 "i"
|
||||
#define SCNdFAST8 "d"
|
||||
#define SCNiFAST8 "i"
|
||||
|
||||
#define SCNd16 "hd"
|
||||
#define SCNi16 "hi"
|
||||
#define SCNdLEAST16 "hd"
|
||||
#define SCNiLEAST16 "hi"
|
||||
#define SCNdFAST16 "hd"
|
||||
#define SCNiFAST16 "hi"
|
||||
|
||||
#define SCNd32 "ld"
|
||||
#define SCNi32 "li"
|
||||
#define SCNdLEAST32 "ld"
|
||||
#define SCNiLEAST32 "li"
|
||||
#define SCNdFAST32 "ld"
|
||||
#define SCNiFAST32 "li"
|
||||
|
||||
#define SCNd64 "I64d"
|
||||
#define SCNi64 "I64i"
|
||||
#define SCNdLEAST64 "I64d"
|
||||
#define SCNiLEAST64 "I64i"
|
||||
#define SCNdFAST64 "I64d"
|
||||
#define SCNiFAST64 "I64i"
|
||||
|
||||
#define SCNdMAX "I64d"
|
||||
#define SCNiMAX "I64i"
|
||||
|
||||
#ifdef _WIN64 // [
|
||||
# define SCNdPTR "I64d"
|
||||
# define SCNiPTR "I64i"
|
||||
#else // _WIN64 ][
|
||||
# define SCNdPTR "ld"
|
||||
# define SCNiPTR "li"
|
||||
#endif // _WIN64 ]
|
||||
|
||||
// The fscanf macros for unsigned integers are:
|
||||
#define SCNo8 "o"
|
||||
#define SCNu8 "u"
|
||||
#define SCNx8 "x"
|
||||
#define SCNX8 "X"
|
||||
#define SCNoLEAST8 "o"
|
||||
#define SCNuLEAST8 "u"
|
||||
#define SCNxLEAST8 "x"
|
||||
#define SCNXLEAST8 "X"
|
||||
#define SCNoFAST8 "o"
|
||||
#define SCNuFAST8 "u"
|
||||
#define SCNxFAST8 "x"
|
||||
#define SCNXFAST8 "X"
|
||||
|
||||
#define SCNo16 "ho"
|
||||
#define SCNu16 "hu"
|
||||
#define SCNx16 "hx"
|
||||
#define SCNX16 "hX"
|
||||
#define SCNoLEAST16 "ho"
|
||||
#define SCNuLEAST16 "hu"
|
||||
#define SCNxLEAST16 "hx"
|
||||
#define SCNXLEAST16 "hX"
|
||||
#define SCNoFAST16 "ho"
|
||||
#define SCNuFAST16 "hu"
|
||||
#define SCNxFAST16 "hx"
|
||||
#define SCNXFAST16 "hX"
|
||||
|
||||
#define SCNo32 "lo"
|
||||
#define SCNu32 "lu"
|
||||
#define SCNx32 "lx"
|
||||
#define SCNX32 "lX"
|
||||
#define SCNoLEAST32 "lo"
|
||||
#define SCNuLEAST32 "lu"
|
||||
#define SCNxLEAST32 "lx"
|
||||
#define SCNXLEAST32 "lX"
|
||||
#define SCNoFAST32 "lo"
|
||||
#define SCNuFAST32 "lu"
|
||||
#define SCNxFAST32 "lx"
|
||||
#define SCNXFAST32 "lX"
|
||||
|
||||
#define SCNo64 "I64o"
|
||||
#define SCNu64 "I64u"
|
||||
#define SCNx64 "I64x"
|
||||
#define SCNX64 "I64X"
|
||||
#define SCNoLEAST64 "I64o"
|
||||
#define SCNuLEAST64 "I64u"
|
||||
#define SCNxLEAST64 "I64x"
|
||||
#define SCNXLEAST64 "I64X"
|
||||
#define SCNoFAST64 "I64o"
|
||||
#define SCNuFAST64 "I64u"
|
||||
#define SCNxFAST64 "I64x"
|
||||
#define SCNXFAST64 "I64X"
|
||||
|
||||
#define SCNoMAX "I64o"
|
||||
#define SCNuMAX "I64u"
|
||||
#define SCNxMAX "I64x"
|
||||
#define SCNXMAX "I64X"
|
||||
|
||||
#ifdef _WIN64 // [
|
||||
# define SCNoPTR "I64o"
|
||||
# define SCNuPTR "I64u"
|
||||
# define SCNxPTR "I64x"
|
||||
# define SCNXPTR "I64X"
|
||||
#else // _WIN64 ][
|
||||
# define SCNoPTR "lo"
|
||||
# define SCNuPTR "lu"
|
||||
# define SCNxPTR "lx"
|
||||
# define SCNXPTR "lX"
|
||||
#endif // _WIN64 ]
|
||||
|
||||
#endif // __STDC_FORMAT_MACROS ]
|
||||
|
||||
// 7.8.2 Functions for greatest-width integer types
|
||||
|
||||
// 7.8.2.1 The imaxabs function
|
||||
#define imaxabs _abs64
|
||||
|
||||
// 7.8.2.2 The imaxdiv function
|
||||
|
||||
// This is modified version of div() function from Microsoft's div.c found
|
||||
// in %MSVC.NET%\crt\src\div.c
|
||||
#ifdef STATIC_IMAXDIV // [
|
||||
static
|
||||
#else // STATIC_IMAXDIV ][
|
||||
_inline
|
||||
#endif // STATIC_IMAXDIV ]
|
||||
imaxdiv_t __cdecl imaxdiv(intmax_t numer, intmax_t denom)
|
||||
{
|
||||
imaxdiv_t result;
|
||||
|
||||
result.quot = numer / denom;
|
||||
result.rem = numer % denom;
|
||||
|
||||
if (numer < 0 && result.rem > 0) {
|
||||
// did division wrong; must fix up
|
||||
++result.quot;
|
||||
result.rem -= denom;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
// 7.8.2.3 The strtoimax and strtoumax functions
|
||||
#define strtoimax _strtoi64
|
||||
#define strtoumax _strtoui64
|
||||
|
||||
// 7.8.2.4 The wcstoimax and wcstoumax functions
|
||||
#define wcstoimax _wcstoi64
|
||||
#define wcstoumax _wcstoui64
|
||||
|
||||
|
||||
#endif // _MSC_INTTYPES_H_ ]
|
||||
16
deps/jemalloc/include/msvc_compat/stdbool.h
vendored
Normal file
16
deps/jemalloc/include/msvc_compat/stdbool.h
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
#ifndef stdbool_h
|
||||
#define stdbool_h
|
||||
|
||||
#include <wtypes.h>
|
||||
|
||||
/* MSVC doesn't define _Bool or bool in C, but does have BOOL */
|
||||
/* Note this doesn't pass autoconf's test because (bool) 0.5 != true */
|
||||
typedef BOOL _Bool;
|
||||
|
||||
#define bool _Bool
|
||||
#define true 1
|
||||
#define false 0
|
||||
|
||||
#define __bool_true_false_are_defined 1
|
||||
|
||||
#endif /* stdbool_h */
|
||||
247
deps/jemalloc/include/msvc_compat/stdint.h
vendored
Normal file
247
deps/jemalloc/include/msvc_compat/stdint.h
vendored
Normal file
@@ -0,0 +1,247 @@
|
||||
// ISO C9x compliant stdint.h for Microsoft Visual Studio
|
||||
// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
|
||||
//
|
||||
// Copyright (c) 2006-2008 Alexander Chemeris
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// 2. Redistributions in binary form must reproduce the above copyright
|
||||
// notice, this list of conditions and the following disclaimer in the
|
||||
// documentation and/or other materials provided with the distribution.
|
||||
//
|
||||
// 3. The name of the author may be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
|
||||
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
|
||||
// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
|
||||
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
|
||||
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
|
||||
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
//
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef _MSC_VER // [
|
||||
#error "Use this header only with Microsoft Visual C++ compilers!"
|
||||
#endif // _MSC_VER ]
|
||||
|
||||
#ifndef _MSC_STDINT_H_ // [
|
||||
#define _MSC_STDINT_H_
|
||||
|
||||
#if _MSC_VER > 1000
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
#include <limits.h>
|
||||
|
||||
// For Visual Studio 6 in C++ mode and for many Visual Studio versions when
|
||||
// compiling for ARM we should wrap <wchar.h> include with 'extern "C++" {}'
|
||||
// or compiler give many errors like this:
|
||||
// error C2733: second C linkage of overloaded function 'wmemchr' not allowed
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
# include <wchar.h>
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
// Define _W64 macros to mark types changing their size, like intptr_t.
|
||||
#ifndef _W64
|
||||
# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300
|
||||
# define _W64 __w64
|
||||
# else
|
||||
# define _W64
|
||||
# endif
|
||||
#endif
|
||||
|
||||
|
||||
// 7.18.1 Integer types
|
||||
|
||||
// 7.18.1.1 Exact-width integer types
|
||||
|
||||
// Visual Studio 6 and Embedded Visual C++ 4 doesn't
|
||||
// realize that, e.g. char has the same size as __int8
|
||||
// so we give up on __intX for them.
|
||||
#if (_MSC_VER < 1300)
|
||||
typedef signed char int8_t;
|
||||
typedef signed short int16_t;
|
||||
typedef signed int int32_t;
|
||||
typedef unsigned char uint8_t;
|
||||
typedef unsigned short uint16_t;
|
||||
typedef unsigned int uint32_t;
|
||||
#else
|
||||
typedef signed __int8 int8_t;
|
||||
typedef signed __int16 int16_t;
|
||||
typedef signed __int32 int32_t;
|
||||
typedef unsigned __int8 uint8_t;
|
||||
typedef unsigned __int16 uint16_t;
|
||||
typedef unsigned __int32 uint32_t;
|
||||
#endif
|
||||
typedef signed __int64 int64_t;
|
||||
typedef unsigned __int64 uint64_t;
|
||||
|
||||
|
||||
// 7.18.1.2 Minimum-width integer types
|
||||
typedef int8_t int_least8_t;
|
||||
typedef int16_t int_least16_t;
|
||||
typedef int32_t int_least32_t;
|
||||
typedef int64_t int_least64_t;
|
||||
typedef uint8_t uint_least8_t;
|
||||
typedef uint16_t uint_least16_t;
|
||||
typedef uint32_t uint_least32_t;
|
||||
typedef uint64_t uint_least64_t;
|
||||
|
||||
// 7.18.1.3 Fastest minimum-width integer types
|
||||
typedef int8_t int_fast8_t;
|
||||
typedef int16_t int_fast16_t;
|
||||
typedef int32_t int_fast32_t;
|
||||
typedef int64_t int_fast64_t;
|
||||
typedef uint8_t uint_fast8_t;
|
||||
typedef uint16_t uint_fast16_t;
|
||||
typedef uint32_t uint_fast32_t;
|
||||
typedef uint64_t uint_fast64_t;
|
||||
|
||||
// 7.18.1.4 Integer types capable of holding object pointers
|
||||
#ifdef _WIN64 // [
|
||||
typedef signed __int64 intptr_t;
|
||||
typedef unsigned __int64 uintptr_t;
|
||||
#else // _WIN64 ][
|
||||
typedef _W64 signed int intptr_t;
|
||||
typedef _W64 unsigned int uintptr_t;
|
||||
#endif // _WIN64 ]
|
||||
|
||||
// 7.18.1.5 Greatest-width integer types
|
||||
typedef int64_t intmax_t;
|
||||
typedef uint64_t uintmax_t;
|
||||
|
||||
|
||||
// 7.18.2 Limits of specified-width integer types
|
||||
|
||||
#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259
|
||||
|
||||
// 7.18.2.1 Limits of exact-width integer types
|
||||
#define INT8_MIN ((int8_t)_I8_MIN)
|
||||
#define INT8_MAX _I8_MAX
|
||||
#define INT16_MIN ((int16_t)_I16_MIN)
|
||||
#define INT16_MAX _I16_MAX
|
||||
#define INT32_MIN ((int32_t)_I32_MIN)
|
||||
#define INT32_MAX _I32_MAX
|
||||
#define INT64_MIN ((int64_t)_I64_MIN)
|
||||
#define INT64_MAX _I64_MAX
|
||||
#define UINT8_MAX _UI8_MAX
|
||||
#define UINT16_MAX _UI16_MAX
|
||||
#define UINT32_MAX _UI32_MAX
|
||||
#define UINT64_MAX _UI64_MAX
|
||||
|
||||
// 7.18.2.2 Limits of minimum-width integer types
|
||||
#define INT_LEAST8_MIN INT8_MIN
|
||||
#define INT_LEAST8_MAX INT8_MAX
|
||||
#define INT_LEAST16_MIN INT16_MIN
|
||||
#define INT_LEAST16_MAX INT16_MAX
|
||||
#define INT_LEAST32_MIN INT32_MIN
|
||||
#define INT_LEAST32_MAX INT32_MAX
|
||||
#define INT_LEAST64_MIN INT64_MIN
|
||||
#define INT_LEAST64_MAX INT64_MAX
|
||||
#define UINT_LEAST8_MAX UINT8_MAX
|
||||
#define UINT_LEAST16_MAX UINT16_MAX
|
||||
#define UINT_LEAST32_MAX UINT32_MAX
|
||||
#define UINT_LEAST64_MAX UINT64_MAX
|
||||
|
||||
// 7.18.2.3 Limits of fastest minimum-width integer types
|
||||
#define INT_FAST8_MIN INT8_MIN
|
||||
#define INT_FAST8_MAX INT8_MAX
|
||||
#define INT_FAST16_MIN INT16_MIN
|
||||
#define INT_FAST16_MAX INT16_MAX
|
||||
#define INT_FAST32_MIN INT32_MIN
|
||||
#define INT_FAST32_MAX INT32_MAX
|
||||
#define INT_FAST64_MIN INT64_MIN
|
||||
#define INT_FAST64_MAX INT64_MAX
|
||||
#define UINT_FAST8_MAX UINT8_MAX
|
||||
#define UINT_FAST16_MAX UINT16_MAX
|
||||
#define UINT_FAST32_MAX UINT32_MAX
|
||||
#define UINT_FAST64_MAX UINT64_MAX
|
||||
|
||||
// 7.18.2.4 Limits of integer types capable of holding object pointers
|
||||
#ifdef _WIN64 // [
|
||||
# define INTPTR_MIN INT64_MIN
|
||||
# define INTPTR_MAX INT64_MAX
|
||||
# define UINTPTR_MAX UINT64_MAX
|
||||
#else // _WIN64 ][
|
||||
# define INTPTR_MIN INT32_MIN
|
||||
# define INTPTR_MAX INT32_MAX
|
||||
# define UINTPTR_MAX UINT32_MAX
|
||||
#endif // _WIN64 ]
|
||||
|
||||
// 7.18.2.5 Limits of greatest-width integer types
|
||||
#define INTMAX_MIN INT64_MIN
|
||||
#define INTMAX_MAX INT64_MAX
|
||||
#define UINTMAX_MAX UINT64_MAX
|
||||
|
||||
// 7.18.3 Limits of other integer types
|
||||
|
||||
#ifdef _WIN64 // [
|
||||
# define PTRDIFF_MIN _I64_MIN
|
||||
# define PTRDIFF_MAX _I64_MAX
|
||||
#else // _WIN64 ][
|
||||
# define PTRDIFF_MIN _I32_MIN
|
||||
# define PTRDIFF_MAX _I32_MAX
|
||||
#endif // _WIN64 ]
|
||||
|
||||
#define SIG_ATOMIC_MIN INT_MIN
|
||||
#define SIG_ATOMIC_MAX INT_MAX
|
||||
|
||||
#ifndef SIZE_MAX // [
|
||||
# ifdef _WIN64 // [
|
||||
# define SIZE_MAX _UI64_MAX
|
||||
# else // _WIN64 ][
|
||||
# define SIZE_MAX _UI32_MAX
|
||||
# endif // _WIN64 ]
|
||||
#endif // SIZE_MAX ]
|
||||
|
||||
// WCHAR_MIN and WCHAR_MAX are also defined in <wchar.h>
|
||||
#ifndef WCHAR_MIN // [
|
||||
# define WCHAR_MIN 0
|
||||
#endif // WCHAR_MIN ]
|
||||
#ifndef WCHAR_MAX // [
|
||||
# define WCHAR_MAX _UI16_MAX
|
||||
#endif // WCHAR_MAX ]
|
||||
|
||||
#define WINT_MIN 0
|
||||
#define WINT_MAX _UI16_MAX
|
||||
|
||||
#endif // __STDC_LIMIT_MACROS ]
|
||||
|
||||
|
||||
// 7.18.4 Limits of other integer types
|
||||
|
||||
#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260
|
||||
|
||||
// 7.18.4.1 Macros for minimum-width integer constants
|
||||
|
||||
#define INT8_C(val) val##i8
|
||||
#define INT16_C(val) val##i16
|
||||
#define INT32_C(val) val##i32
|
||||
#define INT64_C(val) val##i64
|
||||
|
||||
#define UINT8_C(val) val##ui8
|
||||
#define UINT16_C(val) val##ui16
|
||||
#define UINT32_C(val) val##ui32
|
||||
#define UINT64_C(val) val##ui64
|
||||
|
||||
// 7.18.4.2 Macros for greatest-width integer constants
|
||||
#define INTMAX_C INT64_C
|
||||
#define UINTMAX_C UINT64_C
|
||||
|
||||
#endif // __STDC_CONSTANT_MACROS ]
|
||||
|
||||
|
||||
#endif // _MSC_STDINT_H_ ]
|
||||
23
deps/jemalloc/include/msvc_compat/strings.h
vendored
Normal file
23
deps/jemalloc/include/msvc_compat/strings.h
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
#ifndef strings_h
|
||||
#define strings_h
|
||||
|
||||
/* MSVC doesn't define ffs/ffsl. This dummy strings.h header is provided
|
||||
* for both */
|
||||
#include <intrin.h>
|
||||
#pragma intrinsic(_BitScanForward)
|
||||
static __forceinline int ffsl(long x)
|
||||
{
|
||||
unsigned long i;
|
||||
|
||||
if (_BitScanForward(&i, x))
|
||||
return (i + 1);
|
||||
return (0);
|
||||
}
|
||||
|
||||
static __forceinline int ffs(int x)
|
||||
{
|
||||
|
||||
return (ffsl(x));
|
||||
}
|
||||
|
||||
#endif
|
||||
Reference in New Issue
Block a user