mirror of
https://github.com/mod-playerbots/azerothcore-wotlk.git
synced 2026-01-26 15:16:24 +00:00
Moved files after merging
This commit is contained in:
880
deps/jemalloc/src/background_thread.c
vendored
Normal file
880
deps/jemalloc/src/background_thread.c
vendored
Normal file
@@ -0,0 +1,880 @@
|
||||
#define JEMALLOC_BACKGROUND_THREAD_C_
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
/* This option should be opt-in only. */
|
||||
#define BACKGROUND_THREAD_DEFAULT false
|
||||
/* Read-only after initialization. */
|
||||
bool opt_background_thread = BACKGROUND_THREAD_DEFAULT;
|
||||
|
||||
/* Used for thread creation, termination and stats. */
|
||||
malloc_mutex_t background_thread_lock;
|
||||
/* Indicates global state. Atomic because decay reads this w/o locking. */
|
||||
atomic_b_t background_thread_enabled_state;
|
||||
size_t n_background_threads;
|
||||
/* Thread info per-index. */
|
||||
background_thread_info_t *background_thread_info;
|
||||
|
||||
/* False if no necessary runtime support. */
|
||||
bool can_enable_background_thread;
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
|
||||
#include <dlfcn.h>
|
||||
|
||||
static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *,
|
||||
void *(*)(void *), void *__restrict);
|
||||
static pthread_once_t once_control = PTHREAD_ONCE_INIT;
|
||||
|
||||
static void
|
||||
pthread_create_wrapper_once(void) {
|
||||
#ifdef JEMALLOC_LAZY_LOCK
|
||||
isthreaded = true;
|
||||
#endif
|
||||
}
|
||||
|
||||
int
|
||||
pthread_create_wrapper(pthread_t *__restrict thread, const pthread_attr_t *attr,
|
||||
void *(*start_routine)(void *), void *__restrict arg) {
|
||||
pthread_once(&once_control, pthread_create_wrapper_once);
|
||||
|
||||
return pthread_create_fptr(thread, attr, start_routine, arg);
|
||||
}
|
||||
#endif /* JEMALLOC_PTHREAD_CREATE_WRAPPER */
|
||||
|
||||
#ifndef JEMALLOC_BACKGROUND_THREAD
|
||||
#define NOT_REACHED { not_reached(); }
|
||||
bool background_thread_create(tsd_t *tsd, unsigned arena_ind) NOT_REACHED
|
||||
bool background_threads_enable(tsd_t *tsd) NOT_REACHED
|
||||
bool background_threads_disable(tsd_t *tsd) NOT_REACHED
|
||||
void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
|
||||
arena_decay_t *decay, size_t npages_new) NOT_REACHED
|
||||
void background_thread_prefork0(tsdn_t *tsdn) NOT_REACHED
|
||||
void background_thread_prefork1(tsdn_t *tsdn) NOT_REACHED
|
||||
void background_thread_postfork_parent(tsdn_t *tsdn) NOT_REACHED
|
||||
void background_thread_postfork_child(tsdn_t *tsdn) NOT_REACHED
|
||||
bool background_thread_stats_read(tsdn_t *tsdn,
|
||||
background_thread_stats_t *stats) NOT_REACHED
|
||||
void background_thread_ctl_init(tsdn_t *tsdn) NOT_REACHED
|
||||
#undef NOT_REACHED
|
||||
#else
|
||||
|
||||
static bool background_thread_enabled_at_fork;
|
||||
|
||||
static void
|
||||
background_thread_info_init(tsdn_t *tsdn, background_thread_info_t *info) {
|
||||
background_thread_wakeup_time_set(tsdn, info, 0);
|
||||
info->npages_to_purge_new = 0;
|
||||
if (config_stats) {
|
||||
info->tot_n_runs = 0;
|
||||
nstime_init(&info->tot_sleep_time, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool
|
||||
set_current_thread_affinity(UNUSED int cpu) {
|
||||
#if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)
|
||||
cpu_set_t cpuset;
|
||||
CPU_ZERO(&cpuset);
|
||||
CPU_SET(cpu, &cpuset);
|
||||
int ret = sched_setaffinity(0, sizeof(cpu_set_t), &cpuset);
|
||||
|
||||
return (ret != 0);
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Threshold for determining when to wake up the background thread. */
|
||||
#define BACKGROUND_THREAD_NPAGES_THRESHOLD UINT64_C(1024)
|
||||
#define BILLION UINT64_C(1000000000)
|
||||
/* Minimal sleep interval 100 ms. */
|
||||
#define BACKGROUND_THREAD_MIN_INTERVAL_NS (BILLION / 10)
|
||||
|
||||
static inline size_t
|
||||
decay_npurge_after_interval(arena_decay_t *decay, size_t interval) {
|
||||
size_t i;
|
||||
uint64_t sum = 0;
|
||||
for (i = 0; i < interval; i++) {
|
||||
sum += decay->backlog[i] * h_steps[i];
|
||||
}
|
||||
for (; i < SMOOTHSTEP_NSTEPS; i++) {
|
||||
sum += decay->backlog[i] * (h_steps[i] - h_steps[i - interval]);
|
||||
}
|
||||
|
||||
return (size_t)(sum >> SMOOTHSTEP_BFP);
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
arena_decay_compute_purge_interval_impl(tsdn_t *tsdn, arena_decay_t *decay,
|
||||
extents_t *extents) {
|
||||
if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
|
||||
/* Use minimal interval if decay is contended. */
|
||||
return BACKGROUND_THREAD_MIN_INTERVAL_NS;
|
||||
}
|
||||
|
||||
uint64_t interval;
|
||||
ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
|
||||
if (decay_time <= 0) {
|
||||
/* Purging is eagerly done or disabled currently. */
|
||||
interval = BACKGROUND_THREAD_INDEFINITE_SLEEP;
|
||||
goto label_done;
|
||||
}
|
||||
|
||||
uint64_t decay_interval_ns = nstime_ns(&decay->interval);
|
||||
assert(decay_interval_ns > 0);
|
||||
size_t npages = extents_npages_get(extents);
|
||||
if (npages == 0) {
|
||||
unsigned i;
|
||||
for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
|
||||
if (decay->backlog[i] > 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (i == SMOOTHSTEP_NSTEPS) {
|
||||
/* No dirty pages recorded. Sleep indefinitely. */
|
||||
interval = BACKGROUND_THREAD_INDEFINITE_SLEEP;
|
||||
goto label_done;
|
||||
}
|
||||
}
|
||||
if (npages <= BACKGROUND_THREAD_NPAGES_THRESHOLD) {
|
||||
/* Use max interval. */
|
||||
interval = decay_interval_ns * SMOOTHSTEP_NSTEPS;
|
||||
goto label_done;
|
||||
}
|
||||
|
||||
size_t lb = BACKGROUND_THREAD_MIN_INTERVAL_NS / decay_interval_ns;
|
||||
size_t ub = SMOOTHSTEP_NSTEPS;
|
||||
/* Minimal 2 intervals to ensure reaching next epoch deadline. */
|
||||
lb = (lb < 2) ? 2 : lb;
|
||||
if ((decay_interval_ns * ub <= BACKGROUND_THREAD_MIN_INTERVAL_NS) ||
|
||||
(lb + 2 > ub)) {
|
||||
interval = BACKGROUND_THREAD_MIN_INTERVAL_NS;
|
||||
goto label_done;
|
||||
}
|
||||
|
||||
assert(lb + 2 <= ub);
|
||||
size_t npurge_lb, npurge_ub;
|
||||
npurge_lb = decay_npurge_after_interval(decay, lb);
|
||||
if (npurge_lb > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
|
||||
interval = decay_interval_ns * lb;
|
||||
goto label_done;
|
||||
}
|
||||
npurge_ub = decay_npurge_after_interval(decay, ub);
|
||||
if (npurge_ub < BACKGROUND_THREAD_NPAGES_THRESHOLD) {
|
||||
interval = decay_interval_ns * ub;
|
||||
goto label_done;
|
||||
}
|
||||
|
||||
unsigned n_search = 0;
|
||||
size_t target, npurge;
|
||||
while ((npurge_lb + BACKGROUND_THREAD_NPAGES_THRESHOLD < npurge_ub)
|
||||
&& (lb + 2 < ub)) {
|
||||
target = (lb + ub) / 2;
|
||||
npurge = decay_npurge_after_interval(decay, target);
|
||||
if (npurge > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
|
||||
ub = target;
|
||||
npurge_ub = npurge;
|
||||
} else {
|
||||
lb = target;
|
||||
npurge_lb = npurge;
|
||||
}
|
||||
assert(n_search++ < lg_floor(SMOOTHSTEP_NSTEPS) + 1);
|
||||
}
|
||||
interval = decay_interval_ns * (ub + lb) / 2;
|
||||
label_done:
|
||||
interval = (interval < BACKGROUND_THREAD_MIN_INTERVAL_NS) ?
|
||||
BACKGROUND_THREAD_MIN_INTERVAL_NS : interval;
|
||||
malloc_mutex_unlock(tsdn, &decay->mtx);
|
||||
|
||||
return interval;
|
||||
}
|
||||
|
||||
/* Compute purge interval for background threads. */
|
||||
static uint64_t
|
||||
arena_decay_compute_purge_interval(tsdn_t *tsdn, arena_t *arena) {
|
||||
uint64_t i1, i2;
|
||||
i1 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_dirty,
|
||||
&arena->extents_dirty);
|
||||
if (i1 == BACKGROUND_THREAD_MIN_INTERVAL_NS) {
|
||||
return i1;
|
||||
}
|
||||
i2 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_muzzy,
|
||||
&arena->extents_muzzy);
|
||||
|
||||
return i1 < i2 ? i1 : i2;
|
||||
}
|
||||
|
||||
static void
|
||||
background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info,
|
||||
uint64_t interval) {
|
||||
if (config_stats) {
|
||||
info->tot_n_runs++;
|
||||
}
|
||||
info->npages_to_purge_new = 0;
|
||||
|
||||
struct timeval tv;
|
||||
/* Specific clock required by timedwait. */
|
||||
gettimeofday(&tv, NULL);
|
||||
nstime_t before_sleep;
|
||||
nstime_init2(&before_sleep, tv.tv_sec, tv.tv_usec * 1000);
|
||||
|
||||
int ret;
|
||||
if (interval == BACKGROUND_THREAD_INDEFINITE_SLEEP) {
|
||||
assert(background_thread_indefinite_sleep(info));
|
||||
ret = pthread_cond_wait(&info->cond, &info->mtx.lock);
|
||||
assert(ret == 0);
|
||||
} else {
|
||||
assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS &&
|
||||
interval <= BACKGROUND_THREAD_INDEFINITE_SLEEP);
|
||||
/* We need malloc clock (can be different from tv). */
|
||||
nstime_t next_wakeup;
|
||||
nstime_init(&next_wakeup, 0);
|
||||
nstime_update(&next_wakeup);
|
||||
nstime_iadd(&next_wakeup, interval);
|
||||
assert(nstime_ns(&next_wakeup) <
|
||||
BACKGROUND_THREAD_INDEFINITE_SLEEP);
|
||||
background_thread_wakeup_time_set(tsdn, info,
|
||||
nstime_ns(&next_wakeup));
|
||||
|
||||
nstime_t ts_wakeup;
|
||||
nstime_copy(&ts_wakeup, &before_sleep);
|
||||
nstime_iadd(&ts_wakeup, interval);
|
||||
struct timespec ts;
|
||||
ts.tv_sec = (size_t)nstime_sec(&ts_wakeup);
|
||||
ts.tv_nsec = (size_t)nstime_nsec(&ts_wakeup);
|
||||
|
||||
assert(!background_thread_indefinite_sleep(info));
|
||||
ret = pthread_cond_timedwait(&info->cond, &info->mtx.lock, &ts);
|
||||
assert(ret == ETIMEDOUT || ret == 0);
|
||||
background_thread_wakeup_time_set(tsdn, info,
|
||||
BACKGROUND_THREAD_INDEFINITE_SLEEP);
|
||||
}
|
||||
if (config_stats) {
|
||||
gettimeofday(&tv, NULL);
|
||||
nstime_t after_sleep;
|
||||
nstime_init2(&after_sleep, tv.tv_sec, tv.tv_usec * 1000);
|
||||
if (nstime_compare(&after_sleep, &before_sleep) > 0) {
|
||||
nstime_subtract(&after_sleep, &before_sleep);
|
||||
nstime_add(&info->tot_sleep_time, &after_sleep);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static bool
|
||||
background_thread_pause_check(tsdn_t *tsdn, background_thread_info_t *info) {
|
||||
if (unlikely(info->state == background_thread_paused)) {
|
||||
malloc_mutex_unlock(tsdn, &info->mtx);
|
||||
/* Wait on global lock to update status. */
|
||||
malloc_mutex_lock(tsdn, &background_thread_lock);
|
||||
malloc_mutex_unlock(tsdn, &background_thread_lock);
|
||||
malloc_mutex_lock(tsdn, &info->mtx);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void
|
||||
background_work_sleep_once(tsdn_t *tsdn, background_thread_info_t *info, unsigned ind) {
|
||||
uint64_t min_interval = BACKGROUND_THREAD_INDEFINITE_SLEEP;
|
||||
unsigned narenas = narenas_total_get();
|
||||
|
||||
for (unsigned i = ind; i < narenas; i += ncpus) {
|
||||
arena_t *arena = arena_get(tsdn, i, false);
|
||||
if (!arena) {
|
||||
continue;
|
||||
}
|
||||
arena_decay(tsdn, arena, true, false);
|
||||
if (min_interval == BACKGROUND_THREAD_MIN_INTERVAL_NS) {
|
||||
/* Min interval will be used. */
|
||||
continue;
|
||||
}
|
||||
uint64_t interval = arena_decay_compute_purge_interval(tsdn,
|
||||
arena);
|
||||
assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS);
|
||||
if (min_interval > interval) {
|
||||
min_interval = interval;
|
||||
}
|
||||
}
|
||||
background_thread_sleep(tsdn, info, min_interval);
|
||||
}
|
||||
|
||||
static bool
|
||||
background_threads_disable_single(tsd_t *tsd, background_thread_info_t *info) {
|
||||
if (info == &background_thread_info[0]) {
|
||||
malloc_mutex_assert_owner(tsd_tsdn(tsd),
|
||||
&background_thread_lock);
|
||||
} else {
|
||||
malloc_mutex_assert_not_owner(tsd_tsdn(tsd),
|
||||
&background_thread_lock);
|
||||
}
|
||||
|
||||
pre_reentrancy(tsd, NULL);
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
|
||||
bool has_thread;
|
||||
assert(info->state != background_thread_paused);
|
||||
if (info->state == background_thread_started) {
|
||||
has_thread = true;
|
||||
info->state = background_thread_stopped;
|
||||
pthread_cond_signal(&info->cond);
|
||||
} else {
|
||||
has_thread = false;
|
||||
}
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
|
||||
|
||||
if (!has_thread) {
|
||||
post_reentrancy(tsd);
|
||||
return false;
|
||||
}
|
||||
void *ret;
|
||||
if (pthread_join(info->thread, &ret)) {
|
||||
post_reentrancy(tsd);
|
||||
return true;
|
||||
}
|
||||
assert(ret == NULL);
|
||||
n_background_threads--;
|
||||
post_reentrancy(tsd);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void *background_thread_entry(void *ind_arg);
|
||||
|
||||
static int
|
||||
background_thread_create_signals_masked(pthread_t *thread,
|
||||
const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg) {
|
||||
/*
|
||||
* Mask signals during thread creation so that the thread inherits
|
||||
* an empty signal set.
|
||||
*/
|
||||
sigset_t set;
|
||||
sigfillset(&set);
|
||||
sigset_t oldset;
|
||||
int mask_err = pthread_sigmask(SIG_SETMASK, &set, &oldset);
|
||||
if (mask_err != 0) {
|
||||
return mask_err;
|
||||
}
|
||||
int create_err = pthread_create_wrapper(thread, attr, start_routine,
|
||||
arg);
|
||||
/*
|
||||
* Restore the signal mask. Failure to restore the signal mask here
|
||||
* changes program behavior.
|
||||
*/
|
||||
int restore_err = pthread_sigmask(SIG_SETMASK, &oldset, NULL);
|
||||
if (restore_err != 0) {
|
||||
malloc_printf("<jemalloc>: background thread creation "
|
||||
"failed (%d), and signal mask restoration failed "
|
||||
"(%d)\n", create_err, restore_err);
|
||||
if (opt_abort) {
|
||||
abort();
|
||||
}
|
||||
}
|
||||
return create_err;
|
||||
}
|
||||
|
||||
static void
|
||||
check_background_thread_creation(tsd_t *tsd, unsigned *n_created,
|
||||
bool *created_threads) {
|
||||
if (likely(*n_created == n_background_threads)) {
|
||||
return;
|
||||
}
|
||||
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_info[0].mtx);
|
||||
label_restart:
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
|
||||
for (unsigned i = 1; i < ncpus; i++) {
|
||||
if (created_threads[i]) {
|
||||
continue;
|
||||
}
|
||||
background_thread_info_t *info = &background_thread_info[i];
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
|
||||
assert(info->state != background_thread_paused);
|
||||
bool create = (info->state == background_thread_started);
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
|
||||
if (!create) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* To avoid deadlock with prefork handlers (which waits for the
|
||||
* mutex held here), unlock before calling pthread_create().
|
||||
*/
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
|
||||
|
||||
pre_reentrancy(tsd, NULL);
|
||||
int err = background_thread_create_signals_masked(&info->thread,
|
||||
NULL, background_thread_entry, (void *)(uintptr_t)i);
|
||||
post_reentrancy(tsd);
|
||||
|
||||
if (err == 0) {
|
||||
(*n_created)++;
|
||||
created_threads[i] = true;
|
||||
} else {
|
||||
malloc_printf("<jemalloc>: background thread "
|
||||
"creation failed (%d)\n", err);
|
||||
if (opt_abort) {
|
||||
abort();
|
||||
}
|
||||
}
|
||||
/* Restart since we unlocked. */
|
||||
goto label_restart;
|
||||
}
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_info[0].mtx);
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
|
||||
}
|
||||
|
||||
static void
|
||||
background_thread0_work(tsd_t *tsd) {
|
||||
/* Thread0 is also responsible for launching / terminating threads. */
|
||||
VARIABLE_ARRAY(bool, created_threads, ncpus);
|
||||
unsigned i;
|
||||
for (i = 1; i < ncpus; i++) {
|
||||
created_threads[i] = false;
|
||||
}
|
||||
/* Start working, and create more threads when asked. */
|
||||
unsigned n_created = 1;
|
||||
while (background_thread_info[0].state != background_thread_stopped) {
|
||||
if (background_thread_pause_check(tsd_tsdn(tsd),
|
||||
&background_thread_info[0])) {
|
||||
continue;
|
||||
}
|
||||
check_background_thread_creation(tsd, &n_created,
|
||||
(bool *)&created_threads);
|
||||
background_work_sleep_once(tsd_tsdn(tsd),
|
||||
&background_thread_info[0], 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Shut down other threads at exit. Note that the ctl thread is holding
|
||||
* the global background_thread mutex (and is waiting) for us.
|
||||
*/
|
||||
assert(!background_thread_enabled());
|
||||
for (i = 1; i < ncpus; i++) {
|
||||
background_thread_info_t *info = &background_thread_info[i];
|
||||
assert(info->state != background_thread_paused);
|
||||
if (created_threads[i]) {
|
||||
background_threads_disable_single(tsd, info);
|
||||
} else {
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
|
||||
/* Clear in case the thread wasn't created. */
|
||||
info->state = background_thread_stopped;
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
|
||||
}
|
||||
}
|
||||
background_thread_info[0].state = background_thread_stopped;
|
||||
assert(n_background_threads == 1);
|
||||
}
|
||||
|
||||
static void
|
||||
background_work(tsd_t *tsd, unsigned ind) {
|
||||
background_thread_info_t *info = &background_thread_info[ind];
|
||||
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
|
||||
background_thread_wakeup_time_set(tsd_tsdn(tsd), info,
|
||||
BACKGROUND_THREAD_INDEFINITE_SLEEP);
|
||||
if (ind == 0) {
|
||||
background_thread0_work(tsd);
|
||||
} else {
|
||||
while (info->state != background_thread_stopped) {
|
||||
if (background_thread_pause_check(tsd_tsdn(tsd),
|
||||
info)) {
|
||||
continue;
|
||||
}
|
||||
background_work_sleep_once(tsd_tsdn(tsd), info, ind);
|
||||
}
|
||||
}
|
||||
assert(info->state == background_thread_stopped);
|
||||
background_thread_wakeup_time_set(tsd_tsdn(tsd), info, 0);
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
|
||||
}
|
||||
|
||||
static void *
|
||||
background_thread_entry(void *ind_arg) {
|
||||
unsigned thread_ind = (unsigned)(uintptr_t)ind_arg;
|
||||
assert(thread_ind < ncpus);
|
||||
#ifdef JEMALLOC_HAVE_PTHREAD_SETNAME_NP
|
||||
pthread_setname_np(pthread_self(), "jemalloc_bg_thd");
|
||||
#endif
|
||||
if (opt_percpu_arena != percpu_arena_disabled) {
|
||||
set_current_thread_affinity((int)thread_ind);
|
||||
}
|
||||
/*
|
||||
* Start periodic background work. We use internal tsd which avoids
|
||||
* side effects, for example triggering new arena creation (which in
|
||||
* turn triggers another background thread creation).
|
||||
*/
|
||||
background_work(tsd_internal_fetch(), thread_ind);
|
||||
assert(pthread_equal(pthread_self(),
|
||||
background_thread_info[thread_ind].thread));
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
background_thread_init(tsd_t *tsd, background_thread_info_t *info) {
|
||||
malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
|
||||
info->state = background_thread_started;
|
||||
background_thread_info_init(tsd_tsdn(tsd), info);
|
||||
n_background_threads++;
|
||||
}
|
||||
|
||||
/* Create a new background thread if needed. */
|
||||
bool
|
||||
background_thread_create(tsd_t *tsd, unsigned arena_ind) {
|
||||
assert(have_background_thread);
|
||||
malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
|
||||
|
||||
/* We create at most NCPUs threads. */
|
||||
size_t thread_ind = arena_ind % ncpus;
|
||||
background_thread_info_t *info = &background_thread_info[thread_ind];
|
||||
|
||||
bool need_new_thread;
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
|
||||
need_new_thread = background_thread_enabled() &&
|
||||
(info->state == background_thread_stopped);
|
||||
if (need_new_thread) {
|
||||
background_thread_init(tsd, info);
|
||||
}
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
|
||||
if (!need_new_thread) {
|
||||
return false;
|
||||
}
|
||||
if (arena_ind != 0) {
|
||||
/* Threads are created asynchronously by Thread 0. */
|
||||
background_thread_info_t *t0 = &background_thread_info[0];
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &t0->mtx);
|
||||
assert(t0->state == background_thread_started);
|
||||
pthread_cond_signal(&t0->cond);
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &t0->mtx);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
pre_reentrancy(tsd, NULL);
|
||||
/*
|
||||
* To avoid complications (besides reentrancy), create internal
|
||||
* background threads with the underlying pthread_create.
|
||||
*/
|
||||
int err = background_thread_create_signals_masked(&info->thread, NULL,
|
||||
background_thread_entry, (void *)thread_ind);
|
||||
post_reentrancy(tsd);
|
||||
|
||||
if (err != 0) {
|
||||
malloc_printf("<jemalloc>: arena 0 background thread creation "
|
||||
"failed (%d)\n", err);
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
|
||||
info->state = background_thread_stopped;
|
||||
n_background_threads--;
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
background_threads_enable(tsd_t *tsd) {
|
||||
assert(n_background_threads == 0);
|
||||
assert(background_thread_enabled());
|
||||
malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
|
||||
|
||||
VARIABLE_ARRAY(bool, marked, ncpus);
|
||||
unsigned i, nmarked;
|
||||
for (i = 0; i < ncpus; i++) {
|
||||
marked[i] = false;
|
||||
}
|
||||
nmarked = 0;
|
||||
/* Mark the threads we need to create for thread 0. */
|
||||
unsigned n = narenas_total_get();
|
||||
for (i = 1; i < n; i++) {
|
||||
if (marked[i % ncpus] ||
|
||||
arena_get(tsd_tsdn(tsd), i, false) == NULL) {
|
||||
continue;
|
||||
}
|
||||
background_thread_info_t *info = &background_thread_info[i];
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
|
||||
assert(info->state == background_thread_stopped);
|
||||
background_thread_init(tsd, info);
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
|
||||
marked[i % ncpus] = true;
|
||||
if (++nmarked == ncpus) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return background_thread_create(tsd, 0);
|
||||
}
|
||||
|
||||
bool
|
||||
background_threads_disable(tsd_t *tsd) {
|
||||
assert(!background_thread_enabled());
|
||||
malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
|
||||
|
||||
/* Thread 0 will be responsible for terminating other threads. */
|
||||
if (background_threads_disable_single(tsd,
|
||||
&background_thread_info[0])) {
|
||||
return true;
|
||||
}
|
||||
assert(n_background_threads == 0);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Check if we need to signal the background thread early. */
|
||||
void
|
||||
background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
|
||||
arena_decay_t *decay, size_t npages_new) {
|
||||
background_thread_info_t *info = arena_background_thread_info_get(
|
||||
arena);
|
||||
if (malloc_mutex_trylock(tsdn, &info->mtx)) {
|
||||
/*
|
||||
* Background thread may hold the mutex for a long period of
|
||||
* time. We'd like to avoid the variance on application
|
||||
* threads. So keep this non-blocking, and leave the work to a
|
||||
* future epoch.
|
||||
*/
|
||||
return;
|
||||
}
|
||||
|
||||
if (info->state != background_thread_started) {
|
||||
goto label_done;
|
||||
}
|
||||
if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
|
||||
goto label_done;
|
||||
}
|
||||
|
||||
ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
|
||||
if (decay_time <= 0) {
|
||||
/* Purging is eagerly done or disabled currently. */
|
||||
goto label_done_unlock2;
|
||||
}
|
||||
uint64_t decay_interval_ns = nstime_ns(&decay->interval);
|
||||
assert(decay_interval_ns > 0);
|
||||
|
||||
nstime_t diff;
|
||||
nstime_init(&diff, background_thread_wakeup_time_get(info));
|
||||
if (nstime_compare(&diff, &decay->epoch) <= 0) {
|
||||
goto label_done_unlock2;
|
||||
}
|
||||
nstime_subtract(&diff, &decay->epoch);
|
||||
if (nstime_ns(&diff) < BACKGROUND_THREAD_MIN_INTERVAL_NS) {
|
||||
goto label_done_unlock2;
|
||||
}
|
||||
|
||||
if (npages_new > 0) {
|
||||
size_t n_epoch = (size_t)(nstime_ns(&diff) / decay_interval_ns);
|
||||
/*
|
||||
* Compute how many new pages we would need to purge by the next
|
||||
* wakeup, which is used to determine if we should signal the
|
||||
* background thread.
|
||||
*/
|
||||
uint64_t npurge_new;
|
||||
if (n_epoch >= SMOOTHSTEP_NSTEPS) {
|
||||
npurge_new = npages_new;
|
||||
} else {
|
||||
uint64_t h_steps_max = h_steps[SMOOTHSTEP_NSTEPS - 1];
|
||||
assert(h_steps_max >=
|
||||
h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
|
||||
npurge_new = npages_new * (h_steps_max -
|
||||
h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
|
||||
npurge_new >>= SMOOTHSTEP_BFP;
|
||||
}
|
||||
info->npages_to_purge_new += npurge_new;
|
||||
}
|
||||
|
||||
bool should_signal;
|
||||
if (info->npages_to_purge_new > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
|
||||
should_signal = true;
|
||||
} else if (unlikely(background_thread_indefinite_sleep(info)) &&
|
||||
(extents_npages_get(&arena->extents_dirty) > 0 ||
|
||||
extents_npages_get(&arena->extents_muzzy) > 0 ||
|
||||
info->npages_to_purge_new > 0)) {
|
||||
should_signal = true;
|
||||
} else {
|
||||
should_signal = false;
|
||||
}
|
||||
|
||||
if (should_signal) {
|
||||
info->npages_to_purge_new = 0;
|
||||
pthread_cond_signal(&info->cond);
|
||||
}
|
||||
label_done_unlock2:
|
||||
malloc_mutex_unlock(tsdn, &decay->mtx);
|
||||
label_done:
|
||||
malloc_mutex_unlock(tsdn, &info->mtx);
|
||||
}
|
||||
|
||||
void
|
||||
background_thread_prefork0(tsdn_t *tsdn) {
|
||||
malloc_mutex_prefork(tsdn, &background_thread_lock);
|
||||
background_thread_enabled_at_fork = background_thread_enabled();
|
||||
}
|
||||
|
||||
void
|
||||
background_thread_prefork1(tsdn_t *tsdn) {
|
||||
for (unsigned i = 0; i < ncpus; i++) {
|
||||
malloc_mutex_prefork(tsdn, &background_thread_info[i].mtx);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
background_thread_postfork_parent(tsdn_t *tsdn) {
|
||||
for (unsigned i = 0; i < ncpus; i++) {
|
||||
malloc_mutex_postfork_parent(tsdn,
|
||||
&background_thread_info[i].mtx);
|
||||
}
|
||||
malloc_mutex_postfork_parent(tsdn, &background_thread_lock);
|
||||
}
|
||||
|
||||
void
|
||||
background_thread_postfork_child(tsdn_t *tsdn) {
|
||||
for (unsigned i = 0; i < ncpus; i++) {
|
||||
malloc_mutex_postfork_child(tsdn,
|
||||
&background_thread_info[i].mtx);
|
||||
}
|
||||
malloc_mutex_postfork_child(tsdn, &background_thread_lock);
|
||||
if (!background_thread_enabled_at_fork) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* Clear background_thread state (reset to disabled for child). */
|
||||
malloc_mutex_lock(tsdn, &background_thread_lock);
|
||||
n_background_threads = 0;
|
||||
background_thread_enabled_set(tsdn, false);
|
||||
for (unsigned i = 0; i < ncpus; i++) {
|
||||
background_thread_info_t *info = &background_thread_info[i];
|
||||
malloc_mutex_lock(tsdn, &info->mtx);
|
||||
info->state = background_thread_stopped;
|
||||
int ret = pthread_cond_init(&info->cond, NULL);
|
||||
assert(ret == 0);
|
||||
background_thread_info_init(tsdn, info);
|
||||
malloc_mutex_unlock(tsdn, &info->mtx);
|
||||
}
|
||||
malloc_mutex_unlock(tsdn, &background_thread_lock);
|
||||
}
|
||||
|
||||
bool
|
||||
background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) {
|
||||
assert(config_stats);
|
||||
malloc_mutex_lock(tsdn, &background_thread_lock);
|
||||
if (!background_thread_enabled()) {
|
||||
malloc_mutex_unlock(tsdn, &background_thread_lock);
|
||||
return true;
|
||||
}
|
||||
|
||||
stats->num_threads = n_background_threads;
|
||||
uint64_t num_runs = 0;
|
||||
nstime_init(&stats->run_interval, 0);
|
||||
for (unsigned i = 0; i < ncpus; i++) {
|
||||
background_thread_info_t *info = &background_thread_info[i];
|
||||
malloc_mutex_lock(tsdn, &info->mtx);
|
||||
if (info->state != background_thread_stopped) {
|
||||
num_runs += info->tot_n_runs;
|
||||
nstime_add(&stats->run_interval, &info->tot_sleep_time);
|
||||
}
|
||||
malloc_mutex_unlock(tsdn, &info->mtx);
|
||||
}
|
||||
stats->num_runs = num_runs;
|
||||
if (num_runs > 0) {
|
||||
nstime_idivide(&stats->run_interval, num_runs);
|
||||
}
|
||||
malloc_mutex_unlock(tsdn, &background_thread_lock);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
#undef BACKGROUND_THREAD_NPAGES_THRESHOLD
|
||||
#undef BILLION
|
||||
#undef BACKGROUND_THREAD_MIN_INTERVAL_NS
|
||||
|
||||
/*
|
||||
* When lazy lock is enabled, we need to make sure setting isthreaded before
|
||||
* taking any background_thread locks. This is called early in ctl (instead of
|
||||
* wait for the pthread_create calls to trigger) because the mutex is required
|
||||
* before creating background threads.
|
||||
*/
|
||||
void
|
||||
background_thread_ctl_init(tsdn_t *tsdn) {
|
||||
malloc_mutex_assert_not_owner(tsdn, &background_thread_lock);
|
||||
#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
|
||||
pthread_once(&once_control, pthread_create_wrapper_once);
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* defined(JEMALLOC_BACKGROUND_THREAD) */
|
||||
|
||||
bool
|
||||
background_thread_boot0(void) {
|
||||
if (!have_background_thread && opt_background_thread) {
|
||||
malloc_printf("<jemalloc>: option background_thread currently "
|
||||
"supports pthread only\n");
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
|
||||
pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create");
|
||||
if (pthread_create_fptr == NULL) {
|
||||
can_enable_background_thread = false;
|
||||
if (config_lazy_lock || opt_background_thread) {
|
||||
malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, "
|
||||
"\"pthread_create\")\n");
|
||||
abort();
|
||||
}
|
||||
} else {
|
||||
can_enable_background_thread = true;
|
||||
}
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
background_thread_boot1(tsdn_t *tsdn) {
|
||||
#ifdef JEMALLOC_BACKGROUND_THREAD
|
||||
assert(have_background_thread);
|
||||
assert(narenas_total_get() > 0);
|
||||
|
||||
background_thread_enabled_set(tsdn, opt_background_thread);
|
||||
if (malloc_mutex_init(&background_thread_lock,
|
||||
"background_thread_global",
|
||||
WITNESS_RANK_BACKGROUND_THREAD_GLOBAL,
|
||||
malloc_mutex_rank_exclusive)) {
|
||||
return true;
|
||||
}
|
||||
if (opt_background_thread) {
|
||||
background_thread_ctl_init(tsdn);
|
||||
}
|
||||
|
||||
background_thread_info = (background_thread_info_t *)base_alloc(tsdn,
|
||||
b0get(), ncpus * sizeof(background_thread_info_t), CACHELINE);
|
||||
if (background_thread_info == NULL) {
|
||||
return true;
|
||||
}
|
||||
|
||||
for (unsigned i = 0; i < ncpus; i++) {
|
||||
background_thread_info_t *info = &background_thread_info[i];
|
||||
/* Thread mutex is rank_inclusive because of thread0. */
|
||||
if (malloc_mutex_init(&info->mtx, "background_thread",
|
||||
WITNESS_RANK_BACKGROUND_THREAD,
|
||||
malloc_mutex_address_ordered)) {
|
||||
return true;
|
||||
}
|
||||
if (pthread_cond_init(&info->cond, NULL)) {
|
||||
return true;
|
||||
}
|
||||
malloc_mutex_lock(tsdn, &info->mtx);
|
||||
info->state = background_thread_stopped;
|
||||
background_thread_info_init(tsdn, info);
|
||||
malloc_mutex_unlock(tsdn, &info->mtx);
|
||||
}
|
||||
#endif
|
||||
|
||||
return false;
|
||||
}
|
||||
269
deps/jemalloc/src/extent_dss.c
vendored
Normal file
269
deps/jemalloc/src/extent_dss.c
vendored
Normal file
@@ -0,0 +1,269 @@
|
||||
#define JEMALLOC_EXTENT_DSS_C_
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
#include "jemalloc/internal/extent_dss.h"
|
||||
#include "jemalloc/internal/spin.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
const char *opt_dss = DSS_DEFAULT;
|
||||
|
||||
const char *dss_prec_names[] = {
|
||||
"disabled",
|
||||
"primary",
|
||||
"secondary",
|
||||
"N/A"
|
||||
};
|
||||
|
||||
/*
|
||||
* Current dss precedence default, used when creating new arenas. NB: This is
|
||||
* stored as unsigned rather than dss_prec_t because in principle there's no
|
||||
* guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use
|
||||
* atomic operations to synchronize the setting.
|
||||
*/
|
||||
static atomic_u_t dss_prec_default = ATOMIC_INIT(
|
||||
(unsigned)DSS_PREC_DEFAULT);
|
||||
|
||||
/* Base address of the DSS. */
|
||||
static void *dss_base;
|
||||
/* Atomic boolean indicating whether a thread is currently extending DSS. */
|
||||
static atomic_b_t dss_extending;
|
||||
/* Atomic boolean indicating whether the DSS is exhausted. */
|
||||
static atomic_b_t dss_exhausted;
|
||||
/* Atomic current upper limit on DSS addresses. */
|
||||
static atomic_p_t dss_max;
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
static void *
|
||||
extent_dss_sbrk(intptr_t increment) {
|
||||
#ifdef JEMALLOC_DSS
|
||||
return sbrk(increment);
|
||||
#else
|
||||
not_implemented();
|
||||
return NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
dss_prec_t
|
||||
extent_dss_prec_get(void) {
|
||||
dss_prec_t ret;
|
||||
|
||||
if (!have_dss) {
|
||||
return dss_prec_disabled;
|
||||
}
|
||||
ret = (dss_prec_t)atomic_load_u(&dss_prec_default, ATOMIC_ACQUIRE);
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool
|
||||
extent_dss_prec_set(dss_prec_t dss_prec) {
|
||||
if (!have_dss) {
|
||||
return (dss_prec != dss_prec_disabled);
|
||||
}
|
||||
atomic_store_u(&dss_prec_default, (unsigned)dss_prec, ATOMIC_RELEASE);
|
||||
return false;
|
||||
}
|
||||
|
||||
static void
|
||||
extent_dss_extending_start(void) {
|
||||
spin_t spinner = SPIN_INITIALIZER;
|
||||
while (true) {
|
||||
bool expected = false;
|
||||
if (atomic_compare_exchange_weak_b(&dss_extending, &expected,
|
||||
true, ATOMIC_ACQ_REL, ATOMIC_RELAXED)) {
|
||||
break;
|
||||
}
|
||||
spin_adaptive(&spinner);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
extent_dss_extending_finish(void) {
|
||||
assert(atomic_load_b(&dss_extending, ATOMIC_RELAXED));
|
||||
|
||||
atomic_store_b(&dss_extending, false, ATOMIC_RELEASE);
|
||||
}
|
||||
|
||||
static void *
|
||||
extent_dss_max_update(void *new_addr) {
|
||||
/*
|
||||
* Get the current end of the DSS as max_cur and assure that dss_max is
|
||||
* up to date.
|
||||
*/
|
||||
void *max_cur = extent_dss_sbrk(0);
|
||||
if (max_cur == (void *)-1) {
|
||||
return NULL;
|
||||
}
|
||||
atomic_store_p(&dss_max, max_cur, ATOMIC_RELEASE);
|
||||
/* Fixed new_addr can only be supported if it is at the edge of DSS. */
|
||||
if (new_addr != NULL && max_cur != new_addr) {
|
||||
return NULL;
|
||||
}
|
||||
return max_cur;
|
||||
}
|
||||
|
||||
void *
|
||||
extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
||||
size_t alignment, bool *zero, bool *commit) {
|
||||
extent_t *gap;
|
||||
|
||||
cassert(have_dss);
|
||||
assert(size > 0);
|
||||
assert(alignment > 0);
|
||||
|
||||
/*
|
||||
* sbrk() uses a signed increment argument, so take care not to
|
||||
* interpret a large allocation request as a negative increment.
|
||||
*/
|
||||
if ((intptr_t)size < 0) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
gap = extent_alloc(tsdn, arena);
|
||||
if (gap == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
extent_dss_extending_start();
|
||||
if (!atomic_load_b(&dss_exhausted, ATOMIC_ACQUIRE)) {
|
||||
/*
|
||||
* The loop is necessary to recover from races with other
|
||||
* threads that are using the DSS for something other than
|
||||
* malloc.
|
||||
*/
|
||||
while (true) {
|
||||
void *max_cur = extent_dss_max_update(new_addr);
|
||||
if (max_cur == NULL) {
|
||||
goto label_oom;
|
||||
}
|
||||
|
||||
/*
|
||||
* Compute how much page-aligned gap space (if any) is
|
||||
* necessary to satisfy alignment. This space can be
|
||||
* recycled for later use.
|
||||
*/
|
||||
void *gap_addr_page = (void *)(PAGE_CEILING(
|
||||
(uintptr_t)max_cur));
|
||||
void *ret = (void *)ALIGNMENT_CEILING(
|
||||
(uintptr_t)gap_addr_page, alignment);
|
||||
size_t gap_size_page = (uintptr_t)ret -
|
||||
(uintptr_t)gap_addr_page;
|
||||
if (gap_size_page != 0) {
|
||||
extent_init(gap, arena, gap_addr_page,
|
||||
gap_size_page, false, NSIZES,
|
||||
arena_extent_sn_next(arena),
|
||||
extent_state_active, false, true);
|
||||
}
|
||||
/*
|
||||
* Compute the address just past the end of the desired
|
||||
* allocation space.
|
||||
*/
|
||||
void *dss_next = (void *)((uintptr_t)ret + size);
|
||||
if ((uintptr_t)ret < (uintptr_t)max_cur ||
|
||||
(uintptr_t)dss_next < (uintptr_t)max_cur) {
|
||||
goto label_oom; /* Wrap-around. */
|
||||
}
|
||||
/* Compute the increment, including subpage bytes. */
|
||||
void *gap_addr_subpage = max_cur;
|
||||
size_t gap_size_subpage = (uintptr_t)ret -
|
||||
(uintptr_t)gap_addr_subpage;
|
||||
intptr_t incr = gap_size_subpage + size;
|
||||
|
||||
assert((uintptr_t)max_cur + incr == (uintptr_t)ret +
|
||||
size);
|
||||
|
||||
/* Try to allocate. */
|
||||
void *dss_prev = extent_dss_sbrk(incr);
|
||||
if (dss_prev == max_cur) {
|
||||
/* Success. */
|
||||
atomic_store_p(&dss_max, dss_next,
|
||||
ATOMIC_RELEASE);
|
||||
extent_dss_extending_finish();
|
||||
|
||||
if (gap_size_page != 0) {
|
||||
extent_dalloc_gap(tsdn, arena, gap);
|
||||
} else {
|
||||
extent_dalloc(tsdn, arena, gap);
|
||||
}
|
||||
if (!*commit) {
|
||||
*commit = pages_decommit(ret, size);
|
||||
}
|
||||
if (*zero && *commit) {
|
||||
extent_hooks_t *extent_hooks =
|
||||
EXTENT_HOOKS_INITIALIZER;
|
||||
extent_t extent;
|
||||
|
||||
extent_init(&extent, arena, ret, size,
|
||||
size, false, NSIZES,
|
||||
extent_state_active, false, true);
|
||||
if (extent_purge_forced_wrapper(tsdn,
|
||||
arena, &extent_hooks, &extent, 0,
|
||||
size)) {
|
||||
memset(ret, 0, size);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
/*
|
||||
* Failure, whether due to OOM or a race with a raw
|
||||
* sbrk() call from outside the allocator.
|
||||
*/
|
||||
if (dss_prev == (void *)-1) {
|
||||
/* OOM. */
|
||||
atomic_store_b(&dss_exhausted, true,
|
||||
ATOMIC_RELEASE);
|
||||
goto label_oom;
|
||||
}
|
||||
}
|
||||
}
|
||||
label_oom:
|
||||
extent_dss_extending_finish();
|
||||
extent_dalloc(tsdn, arena, gap);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static bool
|
||||
extent_in_dss_helper(void *addr, void *max) {
|
||||
return ((uintptr_t)addr >= (uintptr_t)dss_base && (uintptr_t)addr <
|
||||
(uintptr_t)max);
|
||||
}
|
||||
|
||||
bool
|
||||
extent_in_dss(void *addr) {
|
||||
cassert(have_dss);
|
||||
|
||||
return extent_in_dss_helper(addr, atomic_load_p(&dss_max,
|
||||
ATOMIC_ACQUIRE));
|
||||
}
|
||||
|
||||
bool
|
||||
extent_dss_mergeable(void *addr_a, void *addr_b) {
|
||||
void *max;
|
||||
|
||||
cassert(have_dss);
|
||||
|
||||
if ((uintptr_t)addr_a < (uintptr_t)dss_base && (uintptr_t)addr_b <
|
||||
(uintptr_t)dss_base) {
|
||||
return true;
|
||||
}
|
||||
|
||||
max = atomic_load_p(&dss_max, ATOMIC_ACQUIRE);
|
||||
return (extent_in_dss_helper(addr_a, max) ==
|
||||
extent_in_dss_helper(addr_b, max));
|
||||
}
|
||||
|
||||
void
|
||||
extent_dss_boot(void) {
|
||||
cassert(have_dss);
|
||||
|
||||
dss_base = extent_dss_sbrk(0);
|
||||
atomic_store_b(&dss_extending, false, ATOMIC_RELAXED);
|
||||
atomic_store_b(&dss_exhausted, dss_base == (void *)-1, ATOMIC_RELAXED);
|
||||
atomic_store_p(&dss_max, dss_base, ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
42
deps/jemalloc/src/extent_mmap.c
vendored
Normal file
42
deps/jemalloc/src/extent_mmap.c
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
#define JEMALLOC_EXTENT_MMAP_C_
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
#include "jemalloc/internal/extent_mmap.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
bool opt_retain =
|
||||
#ifdef JEMALLOC_RETAIN
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
void *
|
||||
extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
|
||||
bool *commit) {
|
||||
void *ret = pages_map(new_addr, size, ALIGNMENT_CEILING(alignment,
|
||||
PAGE), commit);
|
||||
if (ret == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
assert(ret != NULL);
|
||||
if (*commit) {
|
||||
*zero = true;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool
|
||||
extent_dalloc_mmap(void *addr, size_t size) {
|
||||
if (!opt_retain) {
|
||||
pages_unmap(addr, size);
|
||||
}
|
||||
return opt_retain;
|
||||
}
|
||||
12
deps/jemalloc/src/hooks.c
vendored
Normal file
12
deps/jemalloc/src/hooks.c
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
|
||||
/*
|
||||
* The hooks are a little bit screwy -- they're not genuinely exported in the
|
||||
* sense that we want them available to end-users, but we do want them visible
|
||||
* from outside the generated library, so that we can use them in test code.
|
||||
*/
|
||||
JEMALLOC_EXPORT
|
||||
void (*hooks_arena_new_hook)() = NULL;
|
||||
|
||||
JEMALLOC_EXPORT
|
||||
void (*hooks_libc_hook)() = NULL;
|
||||
132
deps/jemalloc/src/jemalloc_cpp.cpp
vendored
Normal file
132
deps/jemalloc/src/jemalloc_cpp.cpp
vendored
Normal file
@@ -0,0 +1,132 @@
|
||||
#include <mutex>
|
||||
#include <new>
|
||||
|
||||
#define JEMALLOC_CPP_CPP_
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
// All operators in this file are exported.
|
||||
|
||||
// Possibly alias hidden versions of malloc and sdallocx to avoid an extra plt
|
||||
// thunk?
|
||||
//
|
||||
// extern __typeof (sdallocx) sdallocx_int
|
||||
// __attribute ((alias ("sdallocx"),
|
||||
// visibility ("hidden")));
|
||||
//
|
||||
// ... but it needs to work with jemalloc namespaces.
|
||||
|
||||
void *operator new(std::size_t size);
|
||||
void *operator new[](std::size_t size);
|
||||
void *operator new(std::size_t size, const std::nothrow_t &) noexcept;
|
||||
void *operator new[](std::size_t size, const std::nothrow_t &) noexcept;
|
||||
void operator delete(void *ptr) noexcept;
|
||||
void operator delete[](void *ptr) noexcept;
|
||||
void operator delete(void *ptr, const std::nothrow_t &) noexcept;
|
||||
void operator delete[](void *ptr, const std::nothrow_t &) noexcept;
|
||||
|
||||
#if __cpp_sized_deallocation >= 201309
|
||||
/* C++14's sized-delete operators. */
|
||||
void operator delete(void *ptr, std::size_t size) noexcept;
|
||||
void operator delete[](void *ptr, std::size_t size) noexcept;
|
||||
#endif
|
||||
|
||||
template <bool IsNoExcept>
|
||||
void *
|
||||
newImpl(std::size_t size) noexcept(IsNoExcept) {
|
||||
void *ptr = je_malloc(size);
|
||||
if (likely(ptr != nullptr))
|
||||
return ptr;
|
||||
|
||||
while (ptr == nullptr) {
|
||||
std::new_handler handler;
|
||||
// GCC-4.8 and clang 4.0 do not have std::get_new_handler.
|
||||
{
|
||||
static std::mutex mtx;
|
||||
std::lock_guard<std::mutex> lock(mtx);
|
||||
|
||||
handler = std::set_new_handler(nullptr);
|
||||
std::set_new_handler(handler);
|
||||
}
|
||||
if (handler == nullptr)
|
||||
break;
|
||||
|
||||
try {
|
||||
handler();
|
||||
} catch (const std::bad_alloc &) {
|
||||
break;
|
||||
}
|
||||
|
||||
ptr = je_malloc(size);
|
||||
}
|
||||
|
||||
if (ptr == nullptr && !IsNoExcept)
|
||||
std::__throw_bad_alloc();
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void *
|
||||
operator new(std::size_t size) {
|
||||
return newImpl<false>(size);
|
||||
}
|
||||
|
||||
void *
|
||||
operator new[](std::size_t size) {
|
||||
return newImpl<false>(size);
|
||||
}
|
||||
|
||||
void *
|
||||
operator new(std::size_t size, const std::nothrow_t &) noexcept {
|
||||
return newImpl<true>(size);
|
||||
}
|
||||
|
||||
void *
|
||||
operator new[](std::size_t size, const std::nothrow_t &) noexcept {
|
||||
return newImpl<true>(size);
|
||||
}
|
||||
|
||||
void
|
||||
operator delete(void *ptr) noexcept {
|
||||
je_free(ptr);
|
||||
}
|
||||
|
||||
void
|
||||
operator delete[](void *ptr) noexcept {
|
||||
je_free(ptr);
|
||||
}
|
||||
|
||||
void
|
||||
operator delete(void *ptr, const std::nothrow_t &) noexcept {
|
||||
je_free(ptr);
|
||||
}
|
||||
|
||||
void operator delete[](void *ptr, const std::nothrow_t &) noexcept {
|
||||
je_free(ptr);
|
||||
}
|
||||
|
||||
#if __cpp_sized_deallocation >= 201309
|
||||
|
||||
void
|
||||
operator delete(void *ptr, std::size_t size) noexcept {
|
||||
if (unlikely(ptr == nullptr)) {
|
||||
return;
|
||||
}
|
||||
je_sdallocx(ptr, size, /*flags=*/0);
|
||||
}
|
||||
|
||||
void operator delete[](void *ptr, std::size_t size) noexcept {
|
||||
if (unlikely(ptr == nullptr)) {
|
||||
return;
|
||||
}
|
||||
je_sdallocx(ptr, size, /*flags=*/0);
|
||||
}
|
||||
|
||||
#endif // __cpp_sized_deallocation
|
||||
371
deps/jemalloc/src/large.c
vendored
Normal file
371
deps/jemalloc/src/large.c
vendored
Normal file
@@ -0,0 +1,371 @@
|
||||
#define JEMALLOC_LARGE_C_
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
#include "jemalloc/internal/extent_mmap.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/rtree.h"
|
||||
#include "jemalloc/internal/util.h"
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
void *
|
||||
large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) {
|
||||
assert(usize == sz_s2u(usize));
|
||||
|
||||
return large_palloc(tsdn, arena, usize, CACHELINE, zero);
|
||||
}
|
||||
|
||||
void *
|
||||
large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
||||
bool zero) {
|
||||
size_t ausize;
|
||||
extent_t *extent;
|
||||
bool is_zeroed;
|
||||
UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
|
||||
|
||||
assert(!tsdn_null(tsdn) || arena != NULL);
|
||||
|
||||
ausize = sz_sa2u(usize, alignment);
|
||||
if (unlikely(ausize == 0 || ausize > LARGE_MAXCLASS)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (config_fill && unlikely(opt_zero)) {
|
||||
zero = true;
|
||||
}
|
||||
/*
|
||||
* Copy zero into is_zeroed and pass the copy when allocating the
|
||||
* extent, so that it is possible to make correct junk/zero fill
|
||||
* decisions below, even if is_zeroed ends up true when zero is false.
|
||||
*/
|
||||
is_zeroed = zero;
|
||||
if (likely(!tsdn_null(tsdn))) {
|
||||
arena = arena_choose(tsdn_tsd(tsdn), arena);
|
||||
}
|
||||
if (unlikely(arena == NULL) || (extent = arena_extent_alloc_large(tsdn,
|
||||
arena, usize, alignment, &is_zeroed)) == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* See comments in arena_bin_slabs_full_insert(). */
|
||||
if (!arena_is_auto(arena)) {
|
||||
/* Insert extent into large. */
|
||||
malloc_mutex_lock(tsdn, &arena->large_mtx);
|
||||
extent_list_append(&arena->large, extent);
|
||||
malloc_mutex_unlock(tsdn, &arena->large_mtx);
|
||||
}
|
||||
if (config_prof && arena_prof_accum(tsdn, arena, usize)) {
|
||||
prof_idump(tsdn);
|
||||
}
|
||||
|
||||
if (zero) {
|
||||
assert(is_zeroed);
|
||||
} else if (config_fill && unlikely(opt_junk_alloc)) {
|
||||
memset(extent_addr_get(extent), JEMALLOC_ALLOC_JUNK,
|
||||
extent_usize_get(extent));
|
||||
}
|
||||
|
||||
arena_decay_tick(tsdn, arena);
|
||||
return extent_addr_get(extent);
|
||||
}
|
||||
|
||||
static void
|
||||
large_dalloc_junk_impl(void *ptr, size_t size) {
|
||||
memset(ptr, JEMALLOC_FREE_JUNK, size);
|
||||
}
|
||||
large_dalloc_junk_t *JET_MUTABLE large_dalloc_junk = large_dalloc_junk_impl;
|
||||
|
||||
static void
|
||||
large_dalloc_maybe_junk_impl(void *ptr, size_t size) {
|
||||
if (config_fill && have_dss && unlikely(opt_junk_free)) {
|
||||
/*
|
||||
* Only bother junk filling if the extent isn't about to be
|
||||
* unmapped.
|
||||
*/
|
||||
if (opt_retain || (have_dss && extent_in_dss(ptr))) {
|
||||
large_dalloc_junk(ptr, size);
|
||||
}
|
||||
}
|
||||
}
|
||||
large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk =
|
||||
large_dalloc_maybe_junk_impl;
|
||||
|
||||
static bool
|
||||
large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) {
|
||||
arena_t *arena = extent_arena_get(extent);
|
||||
size_t oldusize = extent_usize_get(extent);
|
||||
extent_hooks_t *extent_hooks = extent_hooks_get(arena);
|
||||
size_t diff = extent_size_get(extent) - (usize + sz_large_pad);
|
||||
|
||||
assert(oldusize > usize);
|
||||
|
||||
if (extent_hooks->split == NULL) {
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Split excess pages. */
|
||||
if (diff != 0) {
|
||||
extent_t *trail = extent_split_wrapper(tsdn, arena,
|
||||
&extent_hooks, extent, usize + sz_large_pad,
|
||||
sz_size2index(usize), false, diff, NSIZES, false);
|
||||
if (trail == NULL) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (config_fill && unlikely(opt_junk_free)) {
|
||||
large_dalloc_maybe_junk(extent_addr_get(trail),
|
||||
extent_size_get(trail));
|
||||
}
|
||||
|
||||
arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, trail);
|
||||
}
|
||||
|
||||
arena_extent_ralloc_large_shrink(tsdn, arena, extent, oldusize);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool
|
||||
large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
|
||||
bool zero) {
|
||||
arena_t *arena = extent_arena_get(extent);
|
||||
size_t oldusize = extent_usize_get(extent);
|
||||
extent_hooks_t *extent_hooks = extent_hooks_get(arena);
|
||||
size_t trailsize = usize - oldusize;
|
||||
|
||||
if (extent_hooks->merge == NULL) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (config_fill && unlikely(opt_zero)) {
|
||||
zero = true;
|
||||
}
|
||||
/*
|
||||
* Copy zero into is_zeroed_trail and pass the copy when allocating the
|
||||
* extent, so that it is possible to make correct junk/zero fill
|
||||
* decisions below, even if is_zeroed_trail ends up true when zero is
|
||||
* false.
|
||||
*/
|
||||
bool is_zeroed_trail = zero;
|
||||
bool commit = true;
|
||||
extent_t *trail;
|
||||
bool new_mapping;
|
||||
if ((trail = extents_alloc(tsdn, arena, &extent_hooks,
|
||||
&arena->extents_dirty, extent_past_get(extent), trailsize, 0,
|
||||
CACHELINE, false, NSIZES, &is_zeroed_trail, &commit)) != NULL
|
||||
|| (trail = extents_alloc(tsdn, arena, &extent_hooks,
|
||||
&arena->extents_muzzy, extent_past_get(extent), trailsize, 0,
|
||||
CACHELINE, false, NSIZES, &is_zeroed_trail, &commit)) != NULL) {
|
||||
if (config_stats) {
|
||||
new_mapping = false;
|
||||
}
|
||||
} else {
|
||||
if ((trail = extent_alloc_wrapper(tsdn, arena, &extent_hooks,
|
||||
extent_past_get(extent), trailsize, 0, CACHELINE, false,
|
||||
NSIZES, &is_zeroed_trail, &commit)) == NULL) {
|
||||
return true;
|
||||
}
|
||||
if (config_stats) {
|
||||
new_mapping = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (extent_merge_wrapper(tsdn, arena, &extent_hooks, extent, trail)) {
|
||||
extent_dalloc_wrapper(tsdn, arena, &extent_hooks, trail);
|
||||
return true;
|
||||
}
|
||||
rtree_ctx_t rtree_ctx_fallback;
|
||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
||||
szind_t szind = sz_size2index(usize);
|
||||
extent_szind_set(extent, szind);
|
||||
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
|
||||
(uintptr_t)extent_addr_get(extent), szind, false);
|
||||
|
||||
if (config_stats && new_mapping) {
|
||||
arena_stats_mapped_add(tsdn, &arena->stats, trailsize);
|
||||
}
|
||||
|
||||
if (zero) {
|
||||
if (config_cache_oblivious) {
|
||||
/*
|
||||
* Zero the trailing bytes of the original allocation's
|
||||
* last page, since they are in an indeterminate state.
|
||||
* There will always be trailing bytes, because ptr's
|
||||
* offset from the beginning of the extent is a multiple
|
||||
* of CACHELINE in [0 .. PAGE).
|
||||
*/
|
||||
void *zbase = (void *)
|
||||
((uintptr_t)extent_addr_get(extent) + oldusize);
|
||||
void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
|
||||
PAGE));
|
||||
size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
|
||||
assert(nzero > 0);
|
||||
memset(zbase, 0, nzero);
|
||||
}
|
||||
assert(is_zeroed_trail);
|
||||
} else if (config_fill && unlikely(opt_junk_alloc)) {
|
||||
memset((void *)((uintptr_t)extent_addr_get(extent) + oldusize),
|
||||
JEMALLOC_ALLOC_JUNK, usize - oldusize);
|
||||
}
|
||||
|
||||
arena_extent_ralloc_large_expand(tsdn, arena, extent, oldusize);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
|
||||
size_t usize_max, bool zero) {
|
||||
size_t oldusize = extent_usize_get(extent);
|
||||
|
||||
/* The following should have been caught by callers. */
|
||||
assert(usize_min > 0 && usize_max <= LARGE_MAXCLASS);
|
||||
/* Both allocation sizes must be large to avoid a move. */
|
||||
assert(oldusize >= LARGE_MINCLASS && usize_max >= LARGE_MINCLASS);
|
||||
|
||||
if (usize_max > oldusize) {
|
||||
/* Attempt to expand the allocation in-place. */
|
||||
if (!large_ralloc_no_move_expand(tsdn, extent, usize_max,
|
||||
zero)) {
|
||||
arena_decay_tick(tsdn, extent_arena_get(extent));
|
||||
return false;
|
||||
}
|
||||
/* Try again, this time with usize_min. */
|
||||
if (usize_min < usize_max && usize_min > oldusize &&
|
||||
large_ralloc_no_move_expand(tsdn, extent, usize_min,
|
||||
zero)) {
|
||||
arena_decay_tick(tsdn, extent_arena_get(extent));
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Avoid moving the allocation if the existing extent size accommodates
|
||||
* the new size.
|
||||
*/
|
||||
if (oldusize >= usize_min && oldusize <= usize_max) {
|
||||
arena_decay_tick(tsdn, extent_arena_get(extent));
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Attempt to shrink the allocation in-place. */
|
||||
if (oldusize > usize_max) {
|
||||
if (!large_ralloc_no_move_shrink(tsdn, extent, usize_max)) {
|
||||
arena_decay_tick(tsdn, extent_arena_get(extent));
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static void *
|
||||
large_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
||||
size_t alignment, bool zero) {
|
||||
if (alignment <= CACHELINE) {
|
||||
return large_malloc(tsdn, arena, usize, zero);
|
||||
}
|
||||
return large_palloc(tsdn, arena, usize, alignment, zero);
|
||||
}
|
||||
|
||||
void *
|
||||
large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize,
|
||||
size_t alignment, bool zero, tcache_t *tcache) {
|
||||
size_t oldusize = extent_usize_get(extent);
|
||||
|
||||
/* The following should have been caught by callers. */
|
||||
assert(usize > 0 && usize <= LARGE_MAXCLASS);
|
||||
/* Both allocation sizes must be large to avoid a move. */
|
||||
assert(oldusize >= LARGE_MINCLASS && usize >= LARGE_MINCLASS);
|
||||
|
||||
/* Try to avoid moving the allocation. */
|
||||
if (!large_ralloc_no_move(tsdn, extent, usize, usize, zero)) {
|
||||
return extent_addr_get(extent);
|
||||
}
|
||||
|
||||
/*
|
||||
* usize and old size are different enough that we need to use a
|
||||
* different size class. In that case, fall back to allocating new
|
||||
* space and copying.
|
||||
*/
|
||||
void *ret = large_ralloc_move_helper(tsdn, arena, usize, alignment,
|
||||
zero);
|
||||
if (ret == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
size_t copysize = (usize < oldusize) ? usize : oldusize;
|
||||
memcpy(ret, extent_addr_get(extent), copysize);
|
||||
isdalloct(tsdn, extent_addr_get(extent), oldusize, tcache, NULL, true);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* junked_locked indicates whether the extent's data have been junk-filled, and
|
||||
* whether the arena's large_mtx is currently held.
|
||||
*/
|
||||
static void
|
||||
large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
||||
bool junked_locked) {
|
||||
if (!junked_locked) {
|
||||
/* See comments in arena_bin_slabs_full_insert(). */
|
||||
if (!arena_is_auto(arena)) {
|
||||
malloc_mutex_lock(tsdn, &arena->large_mtx);
|
||||
extent_list_remove(&arena->large, extent);
|
||||
malloc_mutex_unlock(tsdn, &arena->large_mtx);
|
||||
}
|
||||
large_dalloc_maybe_junk(extent_addr_get(extent),
|
||||
extent_usize_get(extent));
|
||||
} else {
|
||||
malloc_mutex_assert_owner(tsdn, &arena->large_mtx);
|
||||
if (!arena_is_auto(arena)) {
|
||||
extent_list_remove(&arena->large, extent);
|
||||
}
|
||||
}
|
||||
arena_extent_dalloc_large_prep(tsdn, arena, extent);
|
||||
}
|
||||
|
||||
static void
|
||||
large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
|
||||
extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
|
||||
arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, extent);
|
||||
}
|
||||
|
||||
void
|
||||
large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent) {
|
||||
large_dalloc_prep_impl(tsdn, extent_arena_get(extent), extent, true);
|
||||
}
|
||||
|
||||
void
|
||||
large_dalloc_finish(tsdn_t *tsdn, extent_t *extent) {
|
||||
large_dalloc_finish_impl(tsdn, extent_arena_get(extent), extent);
|
||||
}
|
||||
|
||||
void
|
||||
large_dalloc(tsdn_t *tsdn, extent_t *extent) {
|
||||
arena_t *arena = extent_arena_get(extent);
|
||||
large_dalloc_prep_impl(tsdn, arena, extent, false);
|
||||
large_dalloc_finish_impl(tsdn, arena, extent);
|
||||
arena_decay_tick(tsdn, arena);
|
||||
}
|
||||
|
||||
size_t
|
||||
large_salloc(tsdn_t *tsdn, const extent_t *extent) {
|
||||
return extent_usize_get(extent);
|
||||
}
|
||||
|
||||
prof_tctx_t *
|
||||
large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent) {
|
||||
return extent_prof_tctx_get(extent);
|
||||
}
|
||||
|
||||
void
|
||||
large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx) {
|
||||
extent_prof_tctx_set(extent, tctx);
|
||||
}
|
||||
|
||||
void
|
||||
large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent) {
|
||||
large_prof_tctx_set(tsdn, extent, (prof_tctx_t *)(uintptr_t)1U);
|
||||
}
|
||||
689
deps/jemalloc/src/malloc_io.c
vendored
Normal file
689
deps/jemalloc/src/malloc_io.c
vendored
Normal file
@@ -0,0 +1,689 @@
|
||||
#define JEMALLOC_MALLOC_IO_C_
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/malloc_io.h"
|
||||
#include "jemalloc/internal/util.h"
|
||||
|
||||
#ifdef assert
|
||||
# undef assert
|
||||
#endif
|
||||
#ifdef not_reached
|
||||
# undef not_reached
|
||||
#endif
|
||||
#ifdef not_implemented
|
||||
# undef not_implemented
|
||||
#endif
|
||||
#ifdef assert_not_implemented
|
||||
# undef assert_not_implemented
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Define simple versions of assertion macros that won't recurse in case
|
||||
* of assertion failures in malloc_*printf().
|
||||
*/
|
||||
#define assert(e) do { \
|
||||
if (config_debug && !(e)) { \
|
||||
malloc_write("<jemalloc>: Failed assertion\n"); \
|
||||
abort(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define not_reached() do { \
|
||||
if (config_debug) { \
|
||||
malloc_write("<jemalloc>: Unreachable code reached\n"); \
|
||||
abort(); \
|
||||
} \
|
||||
unreachable(); \
|
||||
} while (0)
|
||||
|
||||
#define not_implemented() do { \
|
||||
if (config_debug) { \
|
||||
malloc_write("<jemalloc>: Not implemented\n"); \
|
||||
abort(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define assert_not_implemented(e) do { \
|
||||
if (unlikely(config_debug && !(e))) { \
|
||||
not_implemented(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/******************************************************************************/
|
||||
/* Function prototypes for non-inline static functions. */
|
||||
|
||||
static void wrtmessage(void *cbopaque, const char *s);
|
||||
#define U2S_BUFSIZE ((1U << (LG_SIZEOF_INTMAX_T + 3)) + 1)
|
||||
static char *u2s(uintmax_t x, unsigned base, bool uppercase, char *s,
|
||||
size_t *slen_p);
|
||||
#define D2S_BUFSIZE (1 + U2S_BUFSIZE)
|
||||
static char *d2s(intmax_t x, char sign, char *s, size_t *slen_p);
|
||||
#define O2S_BUFSIZE (1 + U2S_BUFSIZE)
|
||||
static char *o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p);
|
||||
#define X2S_BUFSIZE (2 + U2S_BUFSIZE)
|
||||
static char *x2s(uintmax_t x, bool alt_form, bool uppercase, char *s,
|
||||
size_t *slen_p);
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
/* malloc_message() setup. */
|
||||
static void
|
||||
wrtmessage(void *cbopaque, const char *s) {
|
||||
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_write)
|
||||
/*
|
||||
* Use syscall(2) rather than write(2) when possible in order to avoid
|
||||
* the possibility of memory allocation within libc. This is necessary
|
||||
* on FreeBSD; most operating systems do not have this problem though.
|
||||
*
|
||||
* syscall() returns long or int, depending on platform, so capture the
|
||||
* unused result in the widest plausible type to avoid compiler
|
||||
* warnings.
|
||||
*/
|
||||
UNUSED long result = syscall(SYS_write, STDERR_FILENO, s, strlen(s));
|
||||
#else
|
||||
UNUSED ssize_t result = write(STDERR_FILENO, s, strlen(s));
|
||||
#endif
|
||||
}
|
||||
|
||||
JEMALLOC_EXPORT void (*je_malloc_message)(void *, const char *s);
|
||||
|
||||
/*
|
||||
* Wrapper around malloc_message() that avoids the need for
|
||||
* je_malloc_message(...) throughout the code.
|
||||
*/
|
||||
void
|
||||
malloc_write(const char *s) {
|
||||
if (je_malloc_message != NULL) {
|
||||
je_malloc_message(NULL, s);
|
||||
} else {
|
||||
wrtmessage(NULL, s);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so
|
||||
* provide a wrapper.
|
||||
*/
|
||||
int
|
||||
buferror(int err, char *buf, size_t buflen) {
|
||||
#ifdef _WIN32
|
||||
FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, err, 0,
|
||||
(LPSTR)buf, (DWORD)buflen, NULL);
|
||||
return 0;
|
||||
#elif defined(__GLIBC__) && defined(_GNU_SOURCE)
|
||||
char *b = strerror_r(err, buf, buflen);
|
||||
if (b != buf) {
|
||||
strncpy(buf, b, buflen);
|
||||
buf[buflen-1] = '\0';
|
||||
}
|
||||
return 0;
|
||||
#else
|
||||
return strerror_r(err, buf, buflen);
|
||||
#endif
|
||||
}
|
||||
|
||||
uintmax_t
|
||||
malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) {
|
||||
uintmax_t ret, digit;
|
||||
unsigned b;
|
||||
bool neg;
|
||||
const char *p, *ns;
|
||||
|
||||
p = nptr;
|
||||
if (base < 0 || base == 1 || base > 36) {
|
||||
ns = p;
|
||||
set_errno(EINVAL);
|
||||
ret = UINTMAX_MAX;
|
||||
goto label_return;
|
||||
}
|
||||
b = base;
|
||||
|
||||
/* Swallow leading whitespace and get sign, if any. */
|
||||
neg = false;
|
||||
while (true) {
|
||||
switch (*p) {
|
||||
case '\t': case '\n': case '\v': case '\f': case '\r': case ' ':
|
||||
p++;
|
||||
break;
|
||||
case '-':
|
||||
neg = true;
|
||||
/* Fall through. */
|
||||
case '+':
|
||||
p++;
|
||||
/* Fall through. */
|
||||
default:
|
||||
goto label_prefix;
|
||||
}
|
||||
}
|
||||
|
||||
/* Get prefix, if any. */
|
||||
label_prefix:
|
||||
/*
|
||||
* Note where the first non-whitespace/sign character is so that it is
|
||||
* possible to tell whether any digits are consumed (e.g., " 0" vs.
|
||||
* " -x").
|
||||
*/
|
||||
ns = p;
|
||||
if (*p == '0') {
|
||||
switch (p[1]) {
|
||||
case '0': case '1': case '2': case '3': case '4': case '5':
|
||||
case '6': case '7':
|
||||
if (b == 0) {
|
||||
b = 8;
|
||||
}
|
||||
if (b == 8) {
|
||||
p++;
|
||||
}
|
||||
break;
|
||||
case 'X': case 'x':
|
||||
switch (p[2]) {
|
||||
case '0': case '1': case '2': case '3': case '4':
|
||||
case '5': case '6': case '7': case '8': case '9':
|
||||
case 'A': case 'B': case 'C': case 'D': case 'E':
|
||||
case 'F':
|
||||
case 'a': case 'b': case 'c': case 'd': case 'e':
|
||||
case 'f':
|
||||
if (b == 0) {
|
||||
b = 16;
|
||||
}
|
||||
if (b == 16) {
|
||||
p += 2;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
p++;
|
||||
ret = 0;
|
||||
goto label_return;
|
||||
}
|
||||
}
|
||||
if (b == 0) {
|
||||
b = 10;
|
||||
}
|
||||
|
||||
/* Convert. */
|
||||
ret = 0;
|
||||
while ((*p >= '0' && *p <= '9' && (digit = *p - '0') < b)
|
||||
|| (*p >= 'A' && *p <= 'Z' && (digit = 10 + *p - 'A') < b)
|
||||
|| (*p >= 'a' && *p <= 'z' && (digit = 10 + *p - 'a') < b)) {
|
||||
uintmax_t pret = ret;
|
||||
ret *= b;
|
||||
ret += digit;
|
||||
if (ret < pret) {
|
||||
/* Overflow. */
|
||||
set_errno(ERANGE);
|
||||
ret = UINTMAX_MAX;
|
||||
goto label_return;
|
||||
}
|
||||
p++;
|
||||
}
|
||||
if (neg) {
|
||||
ret = (uintmax_t)(-((intmax_t)ret));
|
||||
}
|
||||
|
||||
if (p == ns) {
|
||||
/* No conversion performed. */
|
||||
set_errno(EINVAL);
|
||||
ret = UINTMAX_MAX;
|
||||
goto label_return;
|
||||
}
|
||||
|
||||
label_return:
|
||||
if (endptr != NULL) {
|
||||
if (p == ns) {
|
||||
/* No characters were converted. */
|
||||
*endptr = (char *)nptr;
|
||||
} else {
|
||||
*endptr = (char *)p;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static char *
|
||||
u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p) {
|
||||
unsigned i;
|
||||
|
||||
i = U2S_BUFSIZE - 1;
|
||||
s[i] = '\0';
|
||||
switch (base) {
|
||||
case 10:
|
||||
do {
|
||||
i--;
|
||||
s[i] = "0123456789"[x % (uint64_t)10];
|
||||
x /= (uint64_t)10;
|
||||
} while (x > 0);
|
||||
break;
|
||||
case 16: {
|
||||
const char *digits = (uppercase)
|
||||
? "0123456789ABCDEF"
|
||||
: "0123456789abcdef";
|
||||
|
||||
do {
|
||||
i--;
|
||||
s[i] = digits[x & 0xf];
|
||||
x >>= 4;
|
||||
} while (x > 0);
|
||||
break;
|
||||
} default: {
|
||||
const char *digits = (uppercase)
|
||||
? "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
: "0123456789abcdefghijklmnopqrstuvwxyz";
|
||||
|
||||
assert(base >= 2 && base <= 36);
|
||||
do {
|
||||
i--;
|
||||
s[i] = digits[x % (uint64_t)base];
|
||||
x /= (uint64_t)base;
|
||||
} while (x > 0);
|
||||
}}
|
||||
|
||||
*slen_p = U2S_BUFSIZE - 1 - i;
|
||||
return &s[i];
|
||||
}
|
||||
|
||||
static char *
|
||||
d2s(intmax_t x, char sign, char *s, size_t *slen_p) {
|
||||
bool neg;
|
||||
|
||||
if ((neg = (x < 0))) {
|
||||
x = -x;
|
||||
}
|
||||
s = u2s(x, 10, false, s, slen_p);
|
||||
if (neg) {
|
||||
sign = '-';
|
||||
}
|
||||
switch (sign) {
|
||||
case '-':
|
||||
if (!neg) {
|
||||
break;
|
||||
}
|
||||
/* Fall through. */
|
||||
case ' ':
|
||||
case '+':
|
||||
s--;
|
||||
(*slen_p)++;
|
||||
*s = sign;
|
||||
break;
|
||||
default: not_reached();
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
static char *
|
||||
o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p) {
|
||||
s = u2s(x, 8, false, s, slen_p);
|
||||
if (alt_form && *s != '0') {
|
||||
s--;
|
||||
(*slen_p)++;
|
||||
*s = '0';
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
static char *
|
||||
x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p) {
|
||||
s = u2s(x, 16, uppercase, s, slen_p);
|
||||
if (alt_form) {
|
||||
s -= 2;
|
||||
(*slen_p) += 2;
|
||||
memcpy(s, uppercase ? "0X" : "0x", 2);
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
size_t
|
||||
malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) {
|
||||
size_t i;
|
||||
const char *f;
|
||||
|
||||
#define APPEND_C(c) do { \
|
||||
if (i < size) { \
|
||||
str[i] = (c); \
|
||||
} \
|
||||
i++; \
|
||||
} while (0)
|
||||
#define APPEND_S(s, slen) do { \
|
||||
if (i < size) { \
|
||||
size_t cpylen = (slen <= size - i) ? slen : size - i; \
|
||||
memcpy(&str[i], s, cpylen); \
|
||||
} \
|
||||
i += slen; \
|
||||
} while (0)
|
||||
#define APPEND_PADDED_S(s, slen, width, left_justify) do { \
|
||||
/* Left padding. */ \
|
||||
size_t pad_len = (width == -1) ? 0 : ((slen < (size_t)width) ? \
|
||||
(size_t)width - slen : 0); \
|
||||
if (!left_justify && pad_len != 0) { \
|
||||
size_t j; \
|
||||
for (j = 0; j < pad_len; j++) { \
|
||||
APPEND_C(' '); \
|
||||
} \
|
||||
} \
|
||||
/* Value. */ \
|
||||
APPEND_S(s, slen); \
|
||||
/* Right padding. */ \
|
||||
if (left_justify && pad_len != 0) { \
|
||||
size_t j; \
|
||||
for (j = 0; j < pad_len; j++) { \
|
||||
APPEND_C(' '); \
|
||||
} \
|
||||
} \
|
||||
} while (0)
|
||||
#define GET_ARG_NUMERIC(val, len) do { \
|
||||
switch (len) { \
|
||||
case '?': \
|
||||
val = va_arg(ap, int); \
|
||||
break; \
|
||||
case '?' | 0x80: \
|
||||
val = va_arg(ap, unsigned int); \
|
||||
break; \
|
||||
case 'l': \
|
||||
val = va_arg(ap, long); \
|
||||
break; \
|
||||
case 'l' | 0x80: \
|
||||
val = va_arg(ap, unsigned long); \
|
||||
break; \
|
||||
case 'q': \
|
||||
val = va_arg(ap, long long); \
|
||||
break; \
|
||||
case 'q' | 0x80: \
|
||||
val = va_arg(ap, unsigned long long); \
|
||||
break; \
|
||||
case 'j': \
|
||||
val = va_arg(ap, intmax_t); \
|
||||
break; \
|
||||
case 'j' | 0x80: \
|
||||
val = va_arg(ap, uintmax_t); \
|
||||
break; \
|
||||
case 't': \
|
||||
val = va_arg(ap, ptrdiff_t); \
|
||||
break; \
|
||||
case 'z': \
|
||||
val = va_arg(ap, ssize_t); \
|
||||
break; \
|
||||
case 'z' | 0x80: \
|
||||
val = va_arg(ap, size_t); \
|
||||
break; \
|
||||
case 'p': /* Synthetic; used for %p. */ \
|
||||
val = va_arg(ap, uintptr_t); \
|
||||
break; \
|
||||
default: \
|
||||
not_reached(); \
|
||||
val = 0; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
i = 0;
|
||||
f = format;
|
||||
while (true) {
|
||||
switch (*f) {
|
||||
case '\0': goto label_out;
|
||||
case '%': {
|
||||
bool alt_form = false;
|
||||
bool left_justify = false;
|
||||
bool plus_space = false;
|
||||
bool plus_plus = false;
|
||||
int prec = -1;
|
||||
int width = -1;
|
||||
unsigned char len = '?';
|
||||
char *s;
|
||||
size_t slen;
|
||||
|
||||
f++;
|
||||
/* Flags. */
|
||||
while (true) {
|
||||
switch (*f) {
|
||||
case '#':
|
||||
assert(!alt_form);
|
||||
alt_form = true;
|
||||
break;
|
||||
case '-':
|
||||
assert(!left_justify);
|
||||
left_justify = true;
|
||||
break;
|
||||
case ' ':
|
||||
assert(!plus_space);
|
||||
plus_space = true;
|
||||
break;
|
||||
case '+':
|
||||
assert(!plus_plus);
|
||||
plus_plus = true;
|
||||
break;
|
||||
default: goto label_width;
|
||||
}
|
||||
f++;
|
||||
}
|
||||
/* Width. */
|
||||
label_width:
|
||||
switch (*f) {
|
||||
case '*':
|
||||
width = va_arg(ap, int);
|
||||
f++;
|
||||
if (width < 0) {
|
||||
left_justify = true;
|
||||
width = -width;
|
||||
}
|
||||
break;
|
||||
case '0': case '1': case '2': case '3': case '4':
|
||||
case '5': case '6': case '7': case '8': case '9': {
|
||||
uintmax_t uwidth;
|
||||
set_errno(0);
|
||||
uwidth = malloc_strtoumax(f, (char **)&f, 10);
|
||||
assert(uwidth != UINTMAX_MAX || get_errno() !=
|
||||
ERANGE);
|
||||
width = (int)uwidth;
|
||||
break;
|
||||
} default:
|
||||
break;
|
||||
}
|
||||
/* Width/precision separator. */
|
||||
if (*f == '.') {
|
||||
f++;
|
||||
} else {
|
||||
goto label_length;
|
||||
}
|
||||
/* Precision. */
|
||||
switch (*f) {
|
||||
case '*':
|
||||
prec = va_arg(ap, int);
|
||||
f++;
|
||||
break;
|
||||
case '0': case '1': case '2': case '3': case '4':
|
||||
case '5': case '6': case '7': case '8': case '9': {
|
||||
uintmax_t uprec;
|
||||
set_errno(0);
|
||||
uprec = malloc_strtoumax(f, (char **)&f, 10);
|
||||
assert(uprec != UINTMAX_MAX || get_errno() !=
|
||||
ERANGE);
|
||||
prec = (int)uprec;
|
||||
break;
|
||||
}
|
||||
default: break;
|
||||
}
|
||||
/* Length. */
|
||||
label_length:
|
||||
switch (*f) {
|
||||
case 'l':
|
||||
f++;
|
||||
if (*f == 'l') {
|
||||
len = 'q';
|
||||
f++;
|
||||
} else {
|
||||
len = 'l';
|
||||
}
|
||||
break;
|
||||
case 'q': case 'j': case 't': case 'z':
|
||||
len = *f;
|
||||
f++;
|
||||
break;
|
||||
default: break;
|
||||
}
|
||||
/* Conversion specifier. */
|
||||
switch (*f) {
|
||||
case '%':
|
||||
/* %% */
|
||||
APPEND_C(*f);
|
||||
f++;
|
||||
break;
|
||||
case 'd': case 'i': {
|
||||
intmax_t val JEMALLOC_CC_SILENCE_INIT(0);
|
||||
char buf[D2S_BUFSIZE];
|
||||
|
||||
GET_ARG_NUMERIC(val, len);
|
||||
s = d2s(val, (plus_plus ? '+' : (plus_space ?
|
||||
' ' : '-')), buf, &slen);
|
||||
APPEND_PADDED_S(s, slen, width, left_justify);
|
||||
f++;
|
||||
break;
|
||||
} case 'o': {
|
||||
uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
|
||||
char buf[O2S_BUFSIZE];
|
||||
|
||||
GET_ARG_NUMERIC(val, len | 0x80);
|
||||
s = o2s(val, alt_form, buf, &slen);
|
||||
APPEND_PADDED_S(s, slen, width, left_justify);
|
||||
f++;
|
||||
break;
|
||||
} case 'u': {
|
||||
uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
|
||||
char buf[U2S_BUFSIZE];
|
||||
|
||||
GET_ARG_NUMERIC(val, len | 0x80);
|
||||
s = u2s(val, 10, false, buf, &slen);
|
||||
APPEND_PADDED_S(s, slen, width, left_justify);
|
||||
f++;
|
||||
break;
|
||||
} case 'x': case 'X': {
|
||||
uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
|
||||
char buf[X2S_BUFSIZE];
|
||||
|
||||
GET_ARG_NUMERIC(val, len | 0x80);
|
||||
s = x2s(val, alt_form, *f == 'X', buf, &slen);
|
||||
APPEND_PADDED_S(s, slen, width, left_justify);
|
||||
f++;
|
||||
break;
|
||||
} case 'c': {
|
||||
unsigned char val;
|
||||
char buf[2];
|
||||
|
||||
assert(len == '?' || len == 'l');
|
||||
assert_not_implemented(len != 'l');
|
||||
val = va_arg(ap, int);
|
||||
buf[0] = val;
|
||||
buf[1] = '\0';
|
||||
APPEND_PADDED_S(buf, 1, width, left_justify);
|
||||
f++;
|
||||
break;
|
||||
} case 's':
|
||||
assert(len == '?' || len == 'l');
|
||||
assert_not_implemented(len != 'l');
|
||||
s = va_arg(ap, char *);
|
||||
slen = (prec < 0) ? strlen(s) : (size_t)prec;
|
||||
APPEND_PADDED_S(s, slen, width, left_justify);
|
||||
f++;
|
||||
break;
|
||||
case 'p': {
|
||||
uintmax_t val;
|
||||
char buf[X2S_BUFSIZE];
|
||||
|
||||
GET_ARG_NUMERIC(val, 'p');
|
||||
s = x2s(val, true, false, buf, &slen);
|
||||
APPEND_PADDED_S(s, slen, width, left_justify);
|
||||
f++;
|
||||
break;
|
||||
} default: not_reached();
|
||||
}
|
||||
break;
|
||||
} default: {
|
||||
APPEND_C(*f);
|
||||
f++;
|
||||
break;
|
||||
}}
|
||||
}
|
||||
label_out:
|
||||
if (i < size) {
|
||||
str[i] = '\0';
|
||||
} else {
|
||||
str[size - 1] = '\0';
|
||||
}
|
||||
|
||||
#undef APPEND_C
|
||||
#undef APPEND_S
|
||||
#undef APPEND_PADDED_S
|
||||
#undef GET_ARG_NUMERIC
|
||||
return i;
|
||||
}
|
||||
|
||||
JEMALLOC_FORMAT_PRINTF(3, 4)
|
||||
size_t
|
||||
malloc_snprintf(char *str, size_t size, const char *format, ...) {
|
||||
size_t ret;
|
||||
va_list ap;
|
||||
|
||||
va_start(ap, format);
|
||||
ret = malloc_vsnprintf(str, size, format, ap);
|
||||
va_end(ap);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
const char *format, va_list ap) {
|
||||
char buf[MALLOC_PRINTF_BUFSIZE];
|
||||
|
||||
if (write_cb == NULL) {
|
||||
/*
|
||||
* The caller did not provide an alternate write_cb callback
|
||||
* function, so use the default one. malloc_write() is an
|
||||
* inline function, so use malloc_message() directly here.
|
||||
*/
|
||||
write_cb = (je_malloc_message != NULL) ? je_malloc_message :
|
||||
wrtmessage;
|
||||
cbopaque = NULL;
|
||||
}
|
||||
|
||||
malloc_vsnprintf(buf, sizeof(buf), format, ap);
|
||||
write_cb(cbopaque, buf);
|
||||
}
|
||||
|
||||
/*
|
||||
* Print to a callback function in such a way as to (hopefully) avoid memory
|
||||
* allocation.
|
||||
*/
|
||||
JEMALLOC_FORMAT_PRINTF(3, 4)
|
||||
void
|
||||
malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
const char *format, ...) {
|
||||
va_list ap;
|
||||
|
||||
va_start(ap, format);
|
||||
malloc_vcprintf(write_cb, cbopaque, format, ap);
|
||||
va_end(ap);
|
||||
}
|
||||
|
||||
/* Print to stderr in such a way as to avoid memory allocation. */
|
||||
JEMALLOC_FORMAT_PRINTF(1, 2)
|
||||
void
|
||||
malloc_printf(const char *format, ...) {
|
||||
va_list ap;
|
||||
|
||||
va_start(ap, format);
|
||||
malloc_vcprintf(NULL, NULL, format, ap);
|
||||
va_end(ap);
|
||||
}
|
||||
|
||||
/*
|
||||
* Restore normal assertion macros, in order to make it possible to compile all
|
||||
* C files as a single concatenation.
|
||||
*/
|
||||
#undef assert
|
||||
#undef not_reached
|
||||
#undef not_implemented
|
||||
#undef assert_not_implemented
|
||||
#include "jemalloc/internal/assert.h"
|
||||
18
deps/jemalloc/src/mutex_pool.c
vendored
Normal file
18
deps/jemalloc/src/mutex_pool.c
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
#define JEMALLOC_MUTEX_POOL_C_
|
||||
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/mutex_pool.h"
|
||||
|
||||
bool
|
||||
mutex_pool_init(mutex_pool_t *pool, const char *name, witness_rank_t rank) {
|
||||
for (int i = 0; i < MUTEX_POOL_SIZE; ++i) {
|
||||
if (malloc_mutex_init(&pool->mutexes[i], name, rank,
|
||||
malloc_mutex_address_ordered)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
170
deps/jemalloc/src/nstime.c
vendored
Normal file
170
deps/jemalloc/src/nstime.c
vendored
Normal file
@@ -0,0 +1,170 @@
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/nstime.h"
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
|
||||
#define BILLION UINT64_C(1000000000)
|
||||
#define MILLION UINT64_C(1000000)
|
||||
|
||||
void
|
||||
nstime_init(nstime_t *time, uint64_t ns) {
|
||||
time->ns = ns;
|
||||
}
|
||||
|
||||
void
|
||||
nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec) {
|
||||
time->ns = sec * BILLION + nsec;
|
||||
}
|
||||
|
||||
uint64_t
|
||||
nstime_ns(const nstime_t *time) {
|
||||
return time->ns;
|
||||
}
|
||||
|
||||
uint64_t
|
||||
nstime_msec(const nstime_t *time) {
|
||||
return time->ns / MILLION;
|
||||
}
|
||||
|
||||
uint64_t
|
||||
nstime_sec(const nstime_t *time) {
|
||||
return time->ns / BILLION;
|
||||
}
|
||||
|
||||
uint64_t
|
||||
nstime_nsec(const nstime_t *time) {
|
||||
return time->ns % BILLION;
|
||||
}
|
||||
|
||||
void
|
||||
nstime_copy(nstime_t *time, const nstime_t *source) {
|
||||
*time = *source;
|
||||
}
|
||||
|
||||
int
|
||||
nstime_compare(const nstime_t *a, const nstime_t *b) {
|
||||
return (a->ns > b->ns) - (a->ns < b->ns);
|
||||
}
|
||||
|
||||
void
|
||||
nstime_add(nstime_t *time, const nstime_t *addend) {
|
||||
assert(UINT64_MAX - time->ns >= addend->ns);
|
||||
|
||||
time->ns += addend->ns;
|
||||
}
|
||||
|
||||
void
|
||||
nstime_iadd(nstime_t *time, uint64_t addend) {
|
||||
assert(UINT64_MAX - time->ns >= addend);
|
||||
|
||||
time->ns += addend;
|
||||
}
|
||||
|
||||
void
|
||||
nstime_subtract(nstime_t *time, const nstime_t *subtrahend) {
|
||||
assert(nstime_compare(time, subtrahend) >= 0);
|
||||
|
||||
time->ns -= subtrahend->ns;
|
||||
}
|
||||
|
||||
void
|
||||
nstime_isubtract(nstime_t *time, uint64_t subtrahend) {
|
||||
assert(time->ns >= subtrahend);
|
||||
|
||||
time->ns -= subtrahend;
|
||||
}
|
||||
|
||||
void
|
||||
nstime_imultiply(nstime_t *time, uint64_t multiplier) {
|
||||
assert((((time->ns | multiplier) & (UINT64_MAX << (sizeof(uint64_t) <<
|
||||
2))) == 0) || ((time->ns * multiplier) / multiplier == time->ns));
|
||||
|
||||
time->ns *= multiplier;
|
||||
}
|
||||
|
||||
void
|
||||
nstime_idivide(nstime_t *time, uint64_t divisor) {
|
||||
assert(divisor != 0);
|
||||
|
||||
time->ns /= divisor;
|
||||
}
|
||||
|
||||
uint64_t
|
||||
nstime_divide(const nstime_t *time, const nstime_t *divisor) {
|
||||
assert(divisor->ns != 0);
|
||||
|
||||
return time->ns / divisor->ns;
|
||||
}
|
||||
|
||||
#ifdef _WIN32
|
||||
# define NSTIME_MONOTONIC true
|
||||
static void
|
||||
nstime_get(nstime_t *time) {
|
||||
FILETIME ft;
|
||||
uint64_t ticks_100ns;
|
||||
|
||||
GetSystemTimeAsFileTime(&ft);
|
||||
ticks_100ns = (((uint64_t)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
|
||||
|
||||
nstime_init(time, ticks_100ns * 100);
|
||||
}
|
||||
#elif defined(JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE)
|
||||
# define NSTIME_MONOTONIC true
|
||||
static void
|
||||
nstime_get(nstime_t *time) {
|
||||
struct timespec ts;
|
||||
|
||||
clock_gettime(CLOCK_MONOTONIC_COARSE, &ts);
|
||||
nstime_init2(time, ts.tv_sec, ts.tv_nsec);
|
||||
}
|
||||
#elif defined(JEMALLOC_HAVE_CLOCK_MONOTONIC)
|
||||
# define NSTIME_MONOTONIC true
|
||||
static void
|
||||
nstime_get(nstime_t *time) {
|
||||
struct timespec ts;
|
||||
|
||||
clock_gettime(CLOCK_MONOTONIC, &ts);
|
||||
nstime_init2(time, ts.tv_sec, ts.tv_nsec);
|
||||
}
|
||||
#elif defined(JEMALLOC_HAVE_MACH_ABSOLUTE_TIME)
|
||||
# define NSTIME_MONOTONIC true
|
||||
static void
|
||||
nstime_get(nstime_t *time) {
|
||||
nstime_init(time, mach_absolute_time());
|
||||
}
|
||||
#else
|
||||
# define NSTIME_MONOTONIC false
|
||||
static void
|
||||
nstime_get(nstime_t *time) {
|
||||
struct timeval tv;
|
||||
|
||||
gettimeofday(&tv, NULL);
|
||||
nstime_init2(time, tv.tv_sec, tv.tv_usec * 1000);
|
||||
}
|
||||
#endif
|
||||
|
||||
static bool
|
||||
nstime_monotonic_impl(void) {
|
||||
return NSTIME_MONOTONIC;
|
||||
#undef NSTIME_MONOTONIC
|
||||
}
|
||||
nstime_monotonic_t *JET_MUTABLE nstime_monotonic = nstime_monotonic_impl;
|
||||
|
||||
static bool
|
||||
nstime_update_impl(nstime_t *time) {
|
||||
nstime_t old_time;
|
||||
|
||||
nstime_copy(&old_time, time);
|
||||
nstime_get(time);
|
||||
|
||||
/* Handle non-monotonic clocks. */
|
||||
if (unlikely(nstime_compare(&old_time, time) > 0)) {
|
||||
nstime_copy(time, &old_time);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
nstime_update_t *JET_MUTABLE nstime_update = nstime_update_impl;
|
||||
423
deps/jemalloc/src/pages.c
vendored
Normal file
423
deps/jemalloc/src/pages.c
vendored
Normal file
@@ -0,0 +1,423 @@
|
||||
#define JEMALLOC_PAGES_C_
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
|
||||
#include "jemalloc/internal/pages.h"
|
||||
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
#include "jemalloc/internal/malloc_io.h"
|
||||
|
||||
#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
|
||||
#include <sys/sysctl.h>
|
||||
#endif
|
||||
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
/* Actual operating system page size, detected during bootstrap, <= PAGE. */
|
||||
static size_t os_page;
|
||||
|
||||
#ifndef _WIN32
|
||||
# define PAGES_PROT_COMMIT (PROT_READ | PROT_WRITE)
|
||||
# define PAGES_PROT_DECOMMIT (PROT_NONE)
|
||||
static int mmap_flags;
|
||||
#endif
|
||||
static bool os_overcommits;
|
||||
|
||||
/******************************************************************************/
|
||||
/*
|
||||
* Function prototypes for static functions that are referenced prior to
|
||||
* definition.
|
||||
*/
|
||||
|
||||
static void os_pages_unmap(void *addr, size_t size);
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
static void *
|
||||
os_pages_map(void *addr, size_t size, size_t alignment, bool *commit) {
|
||||
assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr);
|
||||
assert(ALIGNMENT_CEILING(size, os_page) == size);
|
||||
assert(size != 0);
|
||||
|
||||
if (os_overcommits) {
|
||||
*commit = true;
|
||||
}
|
||||
|
||||
void *ret;
|
||||
#ifdef _WIN32
|
||||
/*
|
||||
* If VirtualAlloc can't allocate at the given address when one is
|
||||
* given, it fails and returns NULL.
|
||||
*/
|
||||
ret = VirtualAlloc(addr, size, MEM_RESERVE | (*commit ? MEM_COMMIT : 0),
|
||||
PAGE_READWRITE);
|
||||
#else
|
||||
/*
|
||||
* We don't use MAP_FIXED here, because it can cause the *replacement*
|
||||
* of existing mappings, and we only want to create new mappings.
|
||||
*/
|
||||
{
|
||||
int prot = *commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
|
||||
|
||||
ret = mmap(addr, size, prot, mmap_flags, -1, 0);
|
||||
}
|
||||
assert(ret != NULL);
|
||||
|
||||
if (ret == MAP_FAILED) {
|
||||
ret = NULL;
|
||||
} else if (addr != NULL && ret != addr) {
|
||||
/*
|
||||
* We succeeded in mapping memory, but not in the right place.
|
||||
*/
|
||||
os_pages_unmap(ret, size);
|
||||
ret = NULL;
|
||||
}
|
||||
#endif
|
||||
assert(ret == NULL || (addr == NULL && ret != addr) || (addr != NULL &&
|
||||
ret == addr));
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void *
|
||||
os_pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size,
|
||||
bool *commit) {
|
||||
void *ret = (void *)((uintptr_t)addr + leadsize);
|
||||
|
||||
assert(alloc_size >= leadsize + size);
|
||||
#ifdef _WIN32
|
||||
os_pages_unmap(addr, alloc_size);
|
||||
void *new_addr = os_pages_map(ret, size, PAGE, commit);
|
||||
if (new_addr == ret) {
|
||||
return ret;
|
||||
}
|
||||
if (new_addr != NULL) {
|
||||
os_pages_unmap(new_addr, size);
|
||||
}
|
||||
return NULL;
|
||||
#else
|
||||
size_t trailsize = alloc_size - leadsize - size;
|
||||
|
||||
if (leadsize != 0) {
|
||||
os_pages_unmap(addr, leadsize);
|
||||
}
|
||||
if (trailsize != 0) {
|
||||
os_pages_unmap((void *)((uintptr_t)ret + size), trailsize);
|
||||
}
|
||||
return ret;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void
|
||||
os_pages_unmap(void *addr, size_t size) {
|
||||
assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr);
|
||||
assert(ALIGNMENT_CEILING(size, os_page) == size);
|
||||
|
||||
#ifdef _WIN32
|
||||
if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
|
||||
#else
|
||||
if (munmap(addr, size) == -1)
|
||||
#endif
|
||||
{
|
||||
char buf[BUFERROR_BUF];
|
||||
|
||||
buferror(get_errno(), buf, sizeof(buf));
|
||||
malloc_printf("<jemalloc>: Error in "
|
||||
#ifdef _WIN32
|
||||
"VirtualFree"
|
||||
#else
|
||||
"munmap"
|
||||
#endif
|
||||
"(): %s\n", buf);
|
||||
if (opt_abort) {
|
||||
abort();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void *
|
||||
pages_map_slow(size_t size, size_t alignment, bool *commit) {
|
||||
size_t alloc_size = size + alignment - os_page;
|
||||
/* Beware size_t wrap-around. */
|
||||
if (alloc_size < size) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void *ret;
|
||||
do {
|
||||
void *pages = os_pages_map(NULL, alloc_size, alignment, commit);
|
||||
if (pages == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
size_t leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment)
|
||||
- (uintptr_t)pages;
|
||||
ret = os_pages_trim(pages, alloc_size, leadsize, size, commit);
|
||||
} while (ret == NULL);
|
||||
|
||||
assert(ret != NULL);
|
||||
assert(PAGE_ADDR2BASE(ret) == ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void *
|
||||
pages_map(void *addr, size_t size, size_t alignment, bool *commit) {
|
||||
assert(alignment >= PAGE);
|
||||
assert(ALIGNMENT_ADDR2BASE(addr, alignment) == addr);
|
||||
|
||||
/*
|
||||
* Ideally, there would be a way to specify alignment to mmap() (like
|
||||
* NetBSD has), but in the absence of such a feature, we have to work
|
||||
* hard to efficiently create aligned mappings. The reliable, but
|
||||
* slow method is to create a mapping that is over-sized, then trim the
|
||||
* excess. However, that always results in one or two calls to
|
||||
* os_pages_unmap(), and it can leave holes in the process's virtual
|
||||
* memory map if memory grows downward.
|
||||
*
|
||||
* Optimistically try mapping precisely the right amount before falling
|
||||
* back to the slow method, with the expectation that the optimistic
|
||||
* approach works most of the time.
|
||||
*/
|
||||
|
||||
void *ret = os_pages_map(addr, size, os_page, commit);
|
||||
if (ret == NULL || ret == addr) {
|
||||
return ret;
|
||||
}
|
||||
assert(addr == NULL);
|
||||
if (ALIGNMENT_ADDR2OFFSET(ret, alignment) != 0) {
|
||||
os_pages_unmap(ret, size);
|
||||
return pages_map_slow(size, alignment, commit);
|
||||
}
|
||||
|
||||
assert(PAGE_ADDR2BASE(ret) == ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
pages_unmap(void *addr, size_t size) {
|
||||
assert(PAGE_ADDR2BASE(addr) == addr);
|
||||
assert(PAGE_CEILING(size) == size);
|
||||
|
||||
os_pages_unmap(addr, size);
|
||||
}
|
||||
|
||||
static bool
|
||||
pages_commit_impl(void *addr, size_t size, bool commit) {
|
||||
assert(PAGE_ADDR2BASE(addr) == addr);
|
||||
assert(PAGE_CEILING(size) == size);
|
||||
|
||||
if (os_overcommits) {
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifdef _WIN32
|
||||
return (commit ? (addr != VirtualAlloc(addr, size, MEM_COMMIT,
|
||||
PAGE_READWRITE)) : (!VirtualFree(addr, size, MEM_DECOMMIT)));
|
||||
#else
|
||||
{
|
||||
int prot = commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
|
||||
void *result = mmap(addr, size, prot, mmap_flags | MAP_FIXED,
|
||||
-1, 0);
|
||||
if (result == MAP_FAILED) {
|
||||
return true;
|
||||
}
|
||||
if (result != addr) {
|
||||
/*
|
||||
* We succeeded in mapping memory, but not in the right
|
||||
* place.
|
||||
*/
|
||||
os_pages_unmap(result, size);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
bool
|
||||
pages_commit(void *addr, size_t size) {
|
||||
return pages_commit_impl(addr, size, true);
|
||||
}
|
||||
|
||||
bool
|
||||
pages_decommit(void *addr, size_t size) {
|
||||
return pages_commit_impl(addr, size, false);
|
||||
}
|
||||
|
||||
bool
|
||||
pages_purge_lazy(void *addr, size_t size) {
|
||||
assert(PAGE_ADDR2BASE(addr) == addr);
|
||||
assert(PAGE_CEILING(size) == size);
|
||||
|
||||
if (!pages_can_purge_lazy) {
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifdef _WIN32
|
||||
VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE);
|
||||
return false;
|
||||
#elif defined(JEMALLOC_PURGE_MADVISE_FREE) && \
|
||||
!defined(PAGES_CAN_PURGE_LAZY)
|
||||
return (madvise(addr, size, MADV_FREE) != 0);
|
||||
#elif defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
|
||||
!defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)
|
||||
return (madvise(addr, size, MADV_DONTNEED) != 0);
|
||||
#else
|
||||
not_reached();
|
||||
#endif
|
||||
}
|
||||
|
||||
bool
|
||||
pages_purge_forced(void *addr, size_t size) {
|
||||
assert(PAGE_ADDR2BASE(addr) == addr);
|
||||
assert(PAGE_CEILING(size) == size);
|
||||
|
||||
if (!pages_can_purge_forced) {
|
||||
return true;
|
||||
}
|
||||
|
||||
#if defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
|
||||
defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)
|
||||
return (madvise(addr, size, MADV_DONTNEED) != 0);
|
||||
#elif defined(JEMALLOC_MAPS_COALESCE)
|
||||
/* Try to overlay a new demand-zeroed mapping. */
|
||||
return pages_commit(addr, size);
|
||||
#else
|
||||
not_reached();
|
||||
#endif
|
||||
}
|
||||
|
||||
bool
|
||||
pages_huge(void *addr, size_t size) {
|
||||
assert(HUGEPAGE_ADDR2BASE(addr) == addr);
|
||||
assert(HUGEPAGE_CEILING(size) == size);
|
||||
|
||||
#ifdef JEMALLOC_THP
|
||||
return (madvise(addr, size, MADV_HUGEPAGE) != 0);
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool
|
||||
pages_nohuge(void *addr, size_t size) {
|
||||
assert(HUGEPAGE_ADDR2BASE(addr) == addr);
|
||||
assert(HUGEPAGE_CEILING(size) == size);
|
||||
|
||||
#ifdef JEMALLOC_THP
|
||||
return (madvise(addr, size, MADV_NOHUGEPAGE) != 0);
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
static size_t
|
||||
os_page_detect(void) {
|
||||
#ifdef _WIN32
|
||||
SYSTEM_INFO si;
|
||||
GetSystemInfo(&si);
|
||||
return si.dwPageSize;
|
||||
#else
|
||||
long result = sysconf(_SC_PAGESIZE);
|
||||
if (result == -1) {
|
||||
return LG_PAGE;
|
||||
}
|
||||
return (size_t)result;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
|
||||
static bool
|
||||
os_overcommits_sysctl(void) {
|
||||
int vm_overcommit;
|
||||
size_t sz;
|
||||
|
||||
sz = sizeof(vm_overcommit);
|
||||
if (sysctlbyname("vm.overcommit", &vm_overcommit, &sz, NULL, 0) != 0) {
|
||||
return false; /* Error. */
|
||||
}
|
||||
|
||||
return ((vm_overcommit & 0x3) == 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
|
||||
/*
|
||||
* Use syscall(2) rather than {open,read,close}(2) when possible to avoid
|
||||
* reentry during bootstrapping if another library has interposed system call
|
||||
* wrappers.
|
||||
*/
|
||||
static bool
|
||||
os_overcommits_proc(void) {
|
||||
int fd;
|
||||
char buf[1];
|
||||
ssize_t nread;
|
||||
|
||||
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open)
|
||||
fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY |
|
||||
O_CLOEXEC);
|
||||
#elif defined(JEMALLOC_USE_SYSCALL) && defined(SYS_openat)
|
||||
fd = (int)syscall(SYS_openat,
|
||||
AT_FDCWD, "/proc/sys/vm/overcommit_memory", O_RDONLY | O_CLOEXEC);
|
||||
#else
|
||||
fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY | O_CLOEXEC);
|
||||
#endif
|
||||
if (fd == -1) {
|
||||
return false; /* Error. */
|
||||
}
|
||||
|
||||
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read)
|
||||
nread = (ssize_t)syscall(SYS_read, fd, &buf, sizeof(buf));
|
||||
#else
|
||||
nread = read(fd, &buf, sizeof(buf));
|
||||
#endif
|
||||
|
||||
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close)
|
||||
syscall(SYS_close, fd);
|
||||
#else
|
||||
close(fd);
|
||||
#endif
|
||||
|
||||
if (nread < 1) {
|
||||
return false; /* Error. */
|
||||
}
|
||||
/*
|
||||
* /proc/sys/vm/overcommit_memory meanings:
|
||||
* 0: Heuristic overcommit.
|
||||
* 1: Always overcommit.
|
||||
* 2: Never overcommit.
|
||||
*/
|
||||
return (buf[0] == '0' || buf[0] == '1');
|
||||
}
|
||||
#endif
|
||||
|
||||
bool
|
||||
pages_boot(void) {
|
||||
os_page = os_page_detect();
|
||||
if (os_page > PAGE) {
|
||||
malloc_write("<jemalloc>: Unsupported system page size\n");
|
||||
if (opt_abort) {
|
||||
abort();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifndef _WIN32
|
||||
mmap_flags = MAP_PRIVATE | MAP_ANON;
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
|
||||
os_overcommits = os_overcommits_sysctl();
|
||||
#elif defined(JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY)
|
||||
os_overcommits = os_overcommits_proc();
|
||||
# ifdef MAP_NORESERVE
|
||||
if (os_overcommits) {
|
||||
mmap_flags |= MAP_NORESERVE;
|
||||
}
|
||||
# endif
|
||||
#else
|
||||
os_overcommits = false;
|
||||
#endif
|
||||
|
||||
return false;
|
||||
}
|
||||
3
deps/jemalloc/src/prng.c
vendored
Normal file
3
deps/jemalloc/src/prng.c
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
#define JEMALLOC_PRNG_C_
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
4
deps/jemalloc/src/spin.c
vendored
Normal file
4
deps/jemalloc/src/spin.c
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
#define JEMALLOC_SPIN_C_
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
|
||||
#include "jemalloc/internal/spin.h"
|
||||
106
deps/jemalloc/src/sz.c
vendored
Normal file
106
deps/jemalloc/src/sz.c
vendored
Normal file
@@ -0,0 +1,106 @@
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/sz.h"
|
||||
|
||||
JEMALLOC_ALIGNED(CACHELINE)
|
||||
const size_t sz_pind2sz_tab[NPSIZES+1] = {
|
||||
#define PSZ_yes(lg_grp, ndelta, lg_delta) \
|
||||
(((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))),
|
||||
#define PSZ_no(lg_grp, ndelta, lg_delta)
|
||||
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup) \
|
||||
PSZ_##psz(lg_grp, ndelta, lg_delta)
|
||||
SIZE_CLASSES
|
||||
#undef PSZ_yes
|
||||
#undef PSZ_no
|
||||
#undef SC
|
||||
(LARGE_MAXCLASS + PAGE)
|
||||
};
|
||||
|
||||
JEMALLOC_ALIGNED(CACHELINE)
|
||||
const size_t sz_index2size_tab[NSIZES] = {
|
||||
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup) \
|
||||
((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)),
|
||||
SIZE_CLASSES
|
||||
#undef SC
|
||||
};
|
||||
|
||||
JEMALLOC_ALIGNED(CACHELINE)
|
||||
const uint8_t sz_size2index_tab[] = {
|
||||
#if LG_TINY_MIN == 0
|
||||
#warning "Dangerous LG_TINY_MIN"
|
||||
#define S2B_0(i) i,
|
||||
#elif LG_TINY_MIN == 1
|
||||
#warning "Dangerous LG_TINY_MIN"
|
||||
#define S2B_1(i) i,
|
||||
#elif LG_TINY_MIN == 2
|
||||
#warning "Dangerous LG_TINY_MIN"
|
||||
#define S2B_2(i) i,
|
||||
#elif LG_TINY_MIN == 3
|
||||
#define S2B_3(i) i,
|
||||
#elif LG_TINY_MIN == 4
|
||||
#define S2B_4(i) i,
|
||||
#elif LG_TINY_MIN == 5
|
||||
#define S2B_5(i) i,
|
||||
#elif LG_TINY_MIN == 6
|
||||
#define S2B_6(i) i,
|
||||
#elif LG_TINY_MIN == 7
|
||||
#define S2B_7(i) i,
|
||||
#elif LG_TINY_MIN == 8
|
||||
#define S2B_8(i) i,
|
||||
#elif LG_TINY_MIN == 9
|
||||
#define S2B_9(i) i,
|
||||
#elif LG_TINY_MIN == 10
|
||||
#define S2B_10(i) i,
|
||||
#elif LG_TINY_MIN == 11
|
||||
#define S2B_11(i) i,
|
||||
#else
|
||||
#error "Unsupported LG_TINY_MIN"
|
||||
#endif
|
||||
#if LG_TINY_MIN < 1
|
||||
#define S2B_1(i) S2B_0(i) S2B_0(i)
|
||||
#endif
|
||||
#if LG_TINY_MIN < 2
|
||||
#define S2B_2(i) S2B_1(i) S2B_1(i)
|
||||
#endif
|
||||
#if LG_TINY_MIN < 3
|
||||
#define S2B_3(i) S2B_2(i) S2B_2(i)
|
||||
#endif
|
||||
#if LG_TINY_MIN < 4
|
||||
#define S2B_4(i) S2B_3(i) S2B_3(i)
|
||||
#endif
|
||||
#if LG_TINY_MIN < 5
|
||||
#define S2B_5(i) S2B_4(i) S2B_4(i)
|
||||
#endif
|
||||
#if LG_TINY_MIN < 6
|
||||
#define S2B_6(i) S2B_5(i) S2B_5(i)
|
||||
#endif
|
||||
#if LG_TINY_MIN < 7
|
||||
#define S2B_7(i) S2B_6(i) S2B_6(i)
|
||||
#endif
|
||||
#if LG_TINY_MIN < 8
|
||||
#define S2B_8(i) S2B_7(i) S2B_7(i)
|
||||
#endif
|
||||
#if LG_TINY_MIN < 9
|
||||
#define S2B_9(i) S2B_8(i) S2B_8(i)
|
||||
#endif
|
||||
#if LG_TINY_MIN < 10
|
||||
#define S2B_10(i) S2B_9(i) S2B_9(i)
|
||||
#endif
|
||||
#if LG_TINY_MIN < 11
|
||||
#define S2B_11(i) S2B_10(i) S2B_10(i)
|
||||
#endif
|
||||
#define S2B_no(i)
|
||||
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup) \
|
||||
S2B_##lg_delta_lookup(index)
|
||||
SIZE_CLASSES
|
||||
#undef S2B_3
|
||||
#undef S2B_4
|
||||
#undef S2B_5
|
||||
#undef S2B_6
|
||||
#undef S2B_7
|
||||
#undef S2B_8
|
||||
#undef S2B_9
|
||||
#undef S2B_10
|
||||
#undef S2B_11
|
||||
#undef S2B_no
|
||||
#undef SC
|
||||
};
|
||||
3
deps/jemalloc/src/ticker.c
vendored
Normal file
3
deps/jemalloc/src/ticker.c
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
#define JEMALLOC_TICKER_C_
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
100
deps/jemalloc/src/witness.c
vendored
Normal file
100
deps/jemalloc/src/witness.c
vendored
Normal file
@@ -0,0 +1,100 @@
|
||||
#define JEMALLOC_WITNESS_C_
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
#include "jemalloc/internal/malloc_io.h"
|
||||
|
||||
void
|
||||
witness_init(witness_t *witness, const char *name, witness_rank_t rank,
|
||||
witness_comp_t *comp, void *opaque) {
|
||||
witness->name = name;
|
||||
witness->rank = rank;
|
||||
witness->comp = comp;
|
||||
witness->opaque = opaque;
|
||||
}
|
||||
|
||||
static void
|
||||
witness_lock_error_impl(const witness_list_t *witnesses,
|
||||
const witness_t *witness) {
|
||||
witness_t *w;
|
||||
|
||||
malloc_printf("<jemalloc>: Lock rank order reversal:");
|
||||
ql_foreach(w, witnesses, link) {
|
||||
malloc_printf(" %s(%u)", w->name, w->rank);
|
||||
}
|
||||
malloc_printf(" %s(%u)\n", witness->name, witness->rank);
|
||||
abort();
|
||||
}
|
||||
witness_lock_error_t *JET_MUTABLE witness_lock_error = witness_lock_error_impl;
|
||||
|
||||
static void
|
||||
witness_owner_error_impl(const witness_t *witness) {
|
||||
malloc_printf("<jemalloc>: Should own %s(%u)\n", witness->name,
|
||||
witness->rank);
|
||||
abort();
|
||||
}
|
||||
witness_owner_error_t *JET_MUTABLE witness_owner_error =
|
||||
witness_owner_error_impl;
|
||||
|
||||
static void
|
||||
witness_not_owner_error_impl(const witness_t *witness) {
|
||||
malloc_printf("<jemalloc>: Should not own %s(%u)\n", witness->name,
|
||||
witness->rank);
|
||||
abort();
|
||||
}
|
||||
witness_not_owner_error_t *JET_MUTABLE witness_not_owner_error =
|
||||
witness_not_owner_error_impl;
|
||||
|
||||
static void
|
||||
witness_depth_error_impl(const witness_list_t *witnesses,
|
||||
witness_rank_t rank_inclusive, unsigned depth) {
|
||||
witness_t *w;
|
||||
|
||||
malloc_printf("<jemalloc>: Should own %u lock%s of rank >= %u:", depth,
|
||||
(depth != 1) ? "s" : "", rank_inclusive);
|
||||
ql_foreach(w, witnesses, link) {
|
||||
malloc_printf(" %s(%u)", w->name, w->rank);
|
||||
}
|
||||
malloc_printf("\n");
|
||||
abort();
|
||||
}
|
||||
witness_depth_error_t *JET_MUTABLE witness_depth_error =
|
||||
witness_depth_error_impl;
|
||||
|
||||
void
|
||||
witnesses_cleanup(witness_tsd_t *witness_tsd) {
|
||||
witness_assert_lockless(witness_tsd_tsdn(witness_tsd));
|
||||
|
||||
/* Do nothing. */
|
||||
}
|
||||
|
||||
void
|
||||
witness_prefork(witness_tsd_t *witness_tsd) {
|
||||
if (!config_debug) {
|
||||
return;
|
||||
}
|
||||
witness_tsd->forking = true;
|
||||
}
|
||||
|
||||
void
|
||||
witness_postfork_parent(witness_tsd_t *witness_tsd) {
|
||||
if (!config_debug) {
|
||||
return;
|
||||
}
|
||||
witness_tsd->forking = false;
|
||||
}
|
||||
|
||||
void
|
||||
witness_postfork_child(witness_tsd_t *witness_tsd) {
|
||||
if (!config_debug) {
|
||||
return;
|
||||
}
|
||||
#ifndef JEMALLOC_MUTEX_INIT_CB
|
||||
witness_list_t *witnesses;
|
||||
|
||||
witnesses = &witness_tsd->witnesses;
|
||||
ql_new(witnesses);
|
||||
#endif
|
||||
witness_tsd->forking = false;
|
||||
}
|
||||
Reference in New Issue
Block a user