mirror of
https://gitlab.winehq.org/wine/wine-gecko.git
synced 2024-09-13 09:24:08 -07:00
Bug 799090 - Update jemalloc3 to commit d0ffd8e. r=jlebar
This commit is contained in:
parent
99b86d0934
commit
5b63083077
@ -6,6 +6,21 @@ found in the git revision history:
|
||||
http://www.canonware.com/cgi-bin/gitweb.cgi?p=jemalloc.git
|
||||
git://canonware.com/jemalloc.git
|
||||
|
||||
* 3.x.x (XXX not yet released)
|
||||
|
||||
New features:
|
||||
- Auto-detect whether running inside Valgrind, thus removing the need to
|
||||
manually specify MALLOC_CONF=valgrind:true.
|
||||
|
||||
Incompatible changes:
|
||||
- Disable tcache by default if running inside Valgrind, in order to avoid
|
||||
making unallocated objects appear reachable to Valgrind.
|
||||
|
||||
Bug fixes:
|
||||
- Fix heap profiling crash if sampled object is freed via realloc(p, 0).
|
||||
- Remove const from __*_hook variable declarations, so that glibc can modify
|
||||
them during process forking.
|
||||
|
||||
* 3.0.0 (May 11, 2012)
|
||||
|
||||
Although this version adds some major new features, the primary focus is on
|
||||
|
@ -1 +1 @@
|
||||
3.0.0-0-gfc9b1dbf69f59d7ecfc4ac68da9847e017e1d046
|
||||
1.0.0-357-gd0ffd8ed4f6aa4cf7248028eddfcb35f93247fe4
|
||||
|
6
memory/jemalloc/src/configure
vendored
6
memory/jemalloc/src/configure
vendored
@ -4448,6 +4448,7 @@ MKLIB='ar crus $@'
|
||||
CC_MM=1
|
||||
|
||||
default_munmap="1"
|
||||
JEMALLOC_USABLE_SIZE_CONST="const"
|
||||
case "${host}" in
|
||||
*-*-darwin*)
|
||||
CFLAGS="$CFLAGS"
|
||||
@ -4477,6 +4478,7 @@ case "${host}" in
|
||||
|
||||
$as_echo "#define JEMALLOC_THREADED_INIT " >>confdefs.h
|
||||
|
||||
JEMALLOC_USABLE_SIZE_CONST=""
|
||||
default_munmap="0"
|
||||
;;
|
||||
*-*-netbsd*)
|
||||
@ -4552,6 +4554,10 @@ $as_echo "Unsupported operating system: ${host}" >&6; }
|
||||
abi="elf"
|
||||
;;
|
||||
esac
|
||||
cat >>confdefs.h <<_ACEOF
|
||||
#define JEMALLOC_USABLE_SIZE_CONST $JEMALLOC_USABLE_SIZE_CONST
|
||||
_ACEOF
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -237,6 +237,7 @@ dnl Define cpp macros in CPPFLAGS, rather than doing AC_DEFINE(macro), since the
|
||||
dnl definitions need to be seen before any headers are included, which is a pain
|
||||
dnl to make happen otherwise.
|
||||
default_munmap="1"
|
||||
JEMALLOC_USABLE_SIZE_CONST="const"
|
||||
case "${host}" in
|
||||
*-*-darwin*)
|
||||
CFLAGS="$CFLAGS"
|
||||
@ -262,6 +263,7 @@ case "${host}" in
|
||||
abi="elf"
|
||||
AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED], [ ])
|
||||
AC_DEFINE([JEMALLOC_THREADED_INIT], [ ])
|
||||
JEMALLOC_USABLE_SIZE_CONST=""
|
||||
default_munmap="0"
|
||||
;;
|
||||
*-*-netbsd*)
|
||||
@ -323,6 +325,7 @@ case "${host}" in
|
||||
abi="elf"
|
||||
;;
|
||||
esac
|
||||
AC_DEFINE_UNQUOTED([JEMALLOC_USABLE_SIZE_CONST], [$JEMALLOC_USABLE_SIZE_CONST])
|
||||
AC_SUBST([abi])
|
||||
AC_SUBST([RPATH])
|
||||
AC_SUBST([LD_PRELOAD_VAR])
|
||||
|
@ -846,7 +846,9 @@ for (i = 0; i < nbins; i++) {
|
||||
<literal>0x5a</literal>. This is intended for debugging and will
|
||||
impact performance negatively. This option is disabled by default
|
||||
unless <option>--enable-debug</option> is specified during
|
||||
configuration, in which case it is enabled by default.</para></listitem>
|
||||
configuration, in which case it is enabled by default unless running
|
||||
inside <ulink
|
||||
url="http://valgrind.org/">Valgrind</ulink>.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="opt.quarantine">
|
||||
@ -865,8 +867,9 @@ for (i = 0; i < nbins; i++) {
|
||||
enabled. This feature is of particular use in combination with <ulink
|
||||
url="http://valgrind.org/">Valgrind</ulink>, which can detect attempts
|
||||
to access quarantined objects. This is intended for debugging and will
|
||||
impact performance negatively. The default quarantine size is
|
||||
0.</para></listitem>
|
||||
impact performance negatively. The default quarantine size is 0 unless
|
||||
running inside Valgrind, in which case the default is 16
|
||||
MiB.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="opt.redzone">
|
||||
@ -885,7 +888,7 @@ for (i = 0; i < nbins; i++) {
|
||||
which needs redzones in order to do effective buffer overflow/underflow
|
||||
detection. This option is intended for debugging and will impact
|
||||
performance negatively. This option is disabled by
|
||||
default.</para></listitem>
|
||||
default unless running inside Valgrind.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="opt.zero">
|
||||
@ -926,15 +929,9 @@ for (i = 0; i < nbins; i++) {
|
||||
[<option>--enable-valgrind</option>]
|
||||
</term>
|
||||
<listitem><para><ulink url="http://valgrind.org/">Valgrind</ulink>
|
||||
support enabled/disabled. If enabled, several other options are
|
||||
automatically modified during options processing to work well with
|
||||
Valgrind: <link linkend="opt.junk"><mallctl>opt.junk</mallctl></link>
|
||||
and <link linkend="opt.zero"><mallctl>opt.zero</mallctl></link> are set
|
||||
to false, <link
|
||||
linkend="opt.quarantine"><mallctl>opt.quarantine</mallctl></link> is
|
||||
set to 16 MiB, and <link
|
||||
linkend="opt.redzone"><mallctl>opt.redzone</mallctl></link> is set to
|
||||
true. This option is disabled by default.</para></listitem>
|
||||
support enabled/disabled. This option is vestigal because jemalloc
|
||||
auto-detects whether it is running inside Valgrind. This option is
|
||||
disabled by default, unless running inside Valgrind.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="opt.xmalloc">
|
||||
@ -972,7 +969,8 @@ malloc_conf = "xmalloc:true";]]></programlisting>
|
||||
<link
|
||||
linkend="opt.lg_tcache_max"><mallctl>opt.lg_tcache_max</mallctl></link>
|
||||
option for related tuning information. This option is enabled by
|
||||
default.</para></listitem>
|
||||
default unless running inside <ulink
|
||||
url="http://valgrind.org/">Valgrind</ulink>.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="opt.lg_tcache_max">
|
||||
@ -1865,9 +1863,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
|
||||
it detects, because the performance impact for storing such information
|
||||
would be prohibitive. However, jemalloc does integrate with the most
|
||||
excellent <ulink url="http://valgrind.org/">Valgrind</ulink> tool if the
|
||||
<option>--enable-valgrind</option> configuration option is enabled and the
|
||||
<link linkend="opt.valgrind"><mallctl>opt.valgrind</mallctl></link> option
|
||||
is enabled.</para>
|
||||
<option>--enable-valgrind</option> configuration option is enabled.</para>
|
||||
</refsect1>
|
||||
<refsect1 id="diagnostic_messages">
|
||||
<title>DIAGNOSTIC MESSAGES</title>
|
||||
|
@ -45,6 +45,9 @@ extern size_t arena_maxclass; /* Max size class for arenas. */
|
||||
void *chunk_alloc(size_t size, size_t alignment, bool base, bool *zero);
|
||||
void chunk_dealloc(void *chunk, size_t size, bool unmap);
|
||||
bool chunk_boot(void);
|
||||
void chunk_prefork(void);
|
||||
void chunk_postfork_parent(void);
|
||||
void chunk_postfork_child(void);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
|
@ -9,7 +9,7 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
void pages_purge(void *addr, size_t length);
|
||||
bool pages_purge(void *addr, size_t length);
|
||||
|
||||
void *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero);
|
||||
bool chunk_dealloc_mmap(void *chunk, size_t size);
|
||||
|
@ -75,6 +75,9 @@ int ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp);
|
||||
int ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
|
||||
void *newp, size_t newlen);
|
||||
bool ctl_boot(void);
|
||||
void ctl_prefork(void);
|
||||
void ctl_postfork_parent(void);
|
||||
void ctl_postfork_child(void);
|
||||
|
||||
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
|
||||
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
|
||||
|
@ -23,6 +23,9 @@ struct extent_node_s {
|
||||
|
||||
/* Total region size. */
|
||||
size_t size;
|
||||
|
||||
/* True if zero-filled; used by chunk recycling code. */
|
||||
bool zeroed;
|
||||
};
|
||||
typedef rb_tree(extent_node_t) extent_tree_t;
|
||||
|
||||
|
@ -270,6 +270,9 @@ static const bool config_ivsalloc =
|
||||
# ifdef __arm__
|
||||
# define LG_QUANTUM 3
|
||||
# endif
|
||||
# ifdef __hppa__
|
||||
# define LG_QUANTUM 4
|
||||
# endif
|
||||
# ifdef __mips__
|
||||
# define LG_QUANTUM 3
|
||||
# endif
|
||||
@ -424,6 +427,7 @@ static const bool config_ivsalloc =
|
||||
VALGRIND_FREELIKE_BLOCK(ptr, rzsize); \
|
||||
} while (0)
|
||||
#else
|
||||
#define RUNNING_ON_VALGRIND ((unsigned)0)
|
||||
#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed)
|
||||
#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB)
|
||||
#define VALGRIND_FREELIKE_BLOCK(addr, rzB)
|
||||
|
@ -59,6 +59,7 @@
|
||||
#define arenas_lock JEMALLOC_N(arenas_lock)
|
||||
#define arenas_lrun_i_index JEMALLOC_N(arenas_lrun_i_index)
|
||||
#define arenas_tls JEMALLOC_N(arenas_tls)
|
||||
#define arenas_tsd JEMALLOC_N(arenas_tsd)
|
||||
#define arenas_tsd_boot JEMALLOC_N(arenas_tsd_boot)
|
||||
#define arenas_tsd_cleanup_wrapper JEMALLOC_N(arenas_tsd_cleanup_wrapper)
|
||||
#define arenas_tsd_get JEMALLOC_N(arenas_tsd_get)
|
||||
@ -104,6 +105,9 @@
|
||||
#define chunk_dss_prefork JEMALLOC_N(chunk_dss_prefork)
|
||||
#define chunk_in_dss JEMALLOC_N(chunk_in_dss)
|
||||
#define chunk_npages JEMALLOC_N(chunk_npages)
|
||||
#define chunk_postfork_child JEMALLOC_N(chunk_postfork_child)
|
||||
#define chunk_postfork_parent JEMALLOC_N(chunk_postfork_parent)
|
||||
#define chunk_prefork JEMALLOC_N(chunk_prefork)
|
||||
#define chunks_mtx JEMALLOC_N(chunks_mtx)
|
||||
#define chunks_rtree JEMALLOC_N(chunks_rtree)
|
||||
#define chunksize JEMALLOC_N(chunksize)
|
||||
@ -129,6 +133,9 @@
|
||||
#define ctl_bymib JEMALLOC_N(ctl_bymib)
|
||||
#define ctl_byname JEMALLOC_N(ctl_byname)
|
||||
#define ctl_nametomib JEMALLOC_N(ctl_nametomib)
|
||||
#define ctl_postfork_child JEMALLOC_N(ctl_postfork_child)
|
||||
#define ctl_postfork_parent JEMALLOC_N(ctl_postfork_parent)
|
||||
#define ctl_prefork JEMALLOC_N(ctl_prefork)
|
||||
#define extent_tree_ad_first JEMALLOC_N(extent_tree_ad_first)
|
||||
#define extent_tree_ad_insert JEMALLOC_N(extent_tree_ad_insert)
|
||||
#define extent_tree_ad_iter JEMALLOC_N(extent_tree_ad_iter)
|
||||
@ -161,6 +168,7 @@
|
||||
#define extent_tree_szad_reverse_iter_recurse JEMALLOC_N(extent_tree_szad_reverse_iter_recurse)
|
||||
#define extent_tree_szad_reverse_iter_start JEMALLOC_N(extent_tree_szad_reverse_iter_start)
|
||||
#define extent_tree_szad_search JEMALLOC_N(extent_tree_szad_search)
|
||||
#define get_errno JEMALLOC_N(get_errno)
|
||||
#define hash JEMALLOC_N(hash)
|
||||
#define huge_allocated JEMALLOC_N(huge_allocated)
|
||||
#define huge_boot JEMALLOC_N(huge_boot)
|
||||
@ -254,6 +262,9 @@
|
||||
#define prof_lookup JEMALLOC_N(prof_lookup)
|
||||
#define prof_malloc JEMALLOC_N(prof_malloc)
|
||||
#define prof_mdump JEMALLOC_N(prof_mdump)
|
||||
#define prof_postfork_child JEMALLOC_N(prof_postfork_child)
|
||||
#define prof_postfork_parent JEMALLOC_N(prof_postfork_parent)
|
||||
#define prof_prefork JEMALLOC_N(prof_prefork)
|
||||
#define prof_promote JEMALLOC_N(prof_promote)
|
||||
#define prof_realloc JEMALLOC_N(prof_realloc)
|
||||
#define prof_sample_accum_update JEMALLOC_N(prof_sample_accum_update)
|
||||
@ -264,6 +275,7 @@
|
||||
#define prof_tdata_init JEMALLOC_N(prof_tdata_init)
|
||||
#define prof_tdata_initialized JEMALLOC_N(prof_tdata_initialized)
|
||||
#define prof_tdata_tls JEMALLOC_N(prof_tdata_tls)
|
||||
#define prof_tdata_tsd JEMALLOC_N(prof_tdata_tsd)
|
||||
#define prof_tdata_tsd_boot JEMALLOC_N(prof_tdata_tsd_boot)
|
||||
#define prof_tdata_tsd_cleanup_wrapper JEMALLOC_N(prof_tdata_tsd_cleanup_wrapper)
|
||||
#define prof_tdata_tsd_get JEMALLOC_N(prof_tdata_tsd_get)
|
||||
@ -278,9 +290,13 @@
|
||||
#define rtree_get JEMALLOC_N(rtree_get)
|
||||
#define rtree_get_locked JEMALLOC_N(rtree_get_locked)
|
||||
#define rtree_new JEMALLOC_N(rtree_new)
|
||||
#define rtree_postfork_child JEMALLOC_N(rtree_postfork_child)
|
||||
#define rtree_postfork_parent JEMALLOC_N(rtree_postfork_parent)
|
||||
#define rtree_prefork JEMALLOC_N(rtree_prefork)
|
||||
#define rtree_set JEMALLOC_N(rtree_set)
|
||||
#define s2u JEMALLOC_N(s2u)
|
||||
#define sa2u JEMALLOC_N(sa2u)
|
||||
#define set_errno JEMALLOC_N(set_errno)
|
||||
#define stats_arenas_i_bins_j_index JEMALLOC_N(stats_arenas_i_bins_j_index)
|
||||
#define stats_arenas_i_index JEMALLOC_N(stats_arenas_i_index)
|
||||
#define stats_arenas_i_lruns_j_index JEMALLOC_N(stats_arenas_i_lruns_j_index)
|
||||
@ -311,6 +327,7 @@
|
||||
#define tcache_enabled_initialized JEMALLOC_N(tcache_enabled_initialized)
|
||||
#define tcache_enabled_set JEMALLOC_N(tcache_enabled_set)
|
||||
#define tcache_enabled_tls JEMALLOC_N(tcache_enabled_tls)
|
||||
#define tcache_enabled_tsd JEMALLOC_N(tcache_enabled_tsd)
|
||||
#define tcache_enabled_tsd_boot JEMALLOC_N(tcache_enabled_tsd_boot)
|
||||
#define tcache_enabled_tsd_cleanup_wrapper JEMALLOC_N(tcache_enabled_tsd_cleanup_wrapper)
|
||||
#define tcache_enabled_tsd_get JEMALLOC_N(tcache_enabled_tsd_get)
|
||||
@ -325,6 +342,7 @@
|
||||
#define tcache_stats_merge JEMALLOC_N(tcache_stats_merge)
|
||||
#define tcache_thread_cleanup JEMALLOC_N(tcache_thread_cleanup)
|
||||
#define tcache_tls JEMALLOC_N(tcache_tls)
|
||||
#define tcache_tsd JEMALLOC_N(tcache_tsd)
|
||||
#define tcache_tsd_boot JEMALLOC_N(tcache_tsd_boot)
|
||||
#define tcache_tsd_cleanup_wrapper JEMALLOC_N(tcache_tsd_cleanup_wrapper)
|
||||
#define tcache_tsd_get JEMALLOC_N(tcache_tsd_get)
|
||||
@ -332,6 +350,7 @@
|
||||
#define thread_allocated_booted JEMALLOC_N(thread_allocated_booted)
|
||||
#define thread_allocated_initialized JEMALLOC_N(thread_allocated_initialized)
|
||||
#define thread_allocated_tls JEMALLOC_N(thread_allocated_tls)
|
||||
#define thread_allocated_tsd JEMALLOC_N(thread_allocated_tsd)
|
||||
#define thread_allocated_tsd_boot JEMALLOC_N(thread_allocated_tsd_boot)
|
||||
#define thread_allocated_tsd_cleanup_wrapper JEMALLOC_N(thread_allocated_tsd_cleanup_wrapper)
|
||||
#define thread_allocated_tsd_get JEMALLOC_N(thread_allocated_tsd_get)
|
||||
|
@ -223,6 +223,9 @@ void prof_tdata_cleanup(void *arg);
|
||||
void prof_boot0(void);
|
||||
void prof_boot1(void);
|
||||
bool prof_boot2(void);
|
||||
void prof_prefork(void);
|
||||
void prof_postfork_parent(void);
|
||||
void prof_postfork_child(void);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
@ -506,7 +509,7 @@ prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt,
|
||||
if ((uintptr_t)cnt > (uintptr_t)1U) {
|
||||
prof_ctx_set(ptr, cnt->ctx);
|
||||
cnt->epoch++;
|
||||
} else
|
||||
} else if (ptr != NULL)
|
||||
prof_ctx_set(ptr, (prof_ctx_t *)(uintptr_t)1U);
|
||||
/*********/
|
||||
mb_write();
|
||||
|
@ -36,6 +36,9 @@ struct rtree_s {
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
rtree_t *rtree_new(unsigned bits);
|
||||
void rtree_prefork(rtree_t *rtree);
|
||||
void rtree_postfork_parent(rtree_t *rtree);
|
||||
void rtree_postfork_child(rtree_t *rtree);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
|
@ -59,7 +59,8 @@ JEMALLOC_EXPORT void * je_memalign(size_t alignment, size_t size)
|
||||
JEMALLOC_EXPORT void * je_valloc(size_t size) JEMALLOC_ATTR(malloc);
|
||||
#endif
|
||||
|
||||
JEMALLOC_EXPORT size_t je_malloc_usable_size(const void *ptr);
|
||||
JEMALLOC_EXPORT size_t je_malloc_usable_size(
|
||||
JEMALLOC_USABLE_SIZE_CONST void *ptr);
|
||||
JEMALLOC_EXPORT void je_malloc_stats_print(void (*write_cb)(void *,
|
||||
const char *), void *je_cbopaque, const char *opts);
|
||||
JEMALLOC_EXPORT int je_mallctl(const char *name, void *oldp,
|
||||
|
@ -221,6 +221,15 @@
|
||||
#undef JEMALLOC_OVERRIDE_MEMALIGN
|
||||
#undef JEMALLOC_OVERRIDE_VALLOC
|
||||
|
||||
/*
|
||||
* At least Linux omits the "const" in:
|
||||
*
|
||||
* size_t malloc_usable_size(const void *ptr);
|
||||
*
|
||||
* Match the operating system's prototype.
|
||||
*/
|
||||
#undef JEMALLOC_USABLE_SIZE_CONST
|
||||
|
||||
/*
|
||||
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
|
||||
*/
|
||||
|
@ -551,24 +551,12 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
|
||||
{
|
||||
ql_head(arena_chunk_map_t) mapelms;
|
||||
arena_chunk_map_t *mapelm;
|
||||
size_t pageind, flag_unzeroed;
|
||||
size_t pageind;
|
||||
size_t ndirty;
|
||||
size_t nmadvise;
|
||||
|
||||
ql_new(&mapelms);
|
||||
|
||||
flag_unzeroed =
|
||||
#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
|
||||
/*
|
||||
* madvise(..., MADV_DONTNEED) results in zero-filled pages for anonymous
|
||||
* mappings, but not for file-backed mappings.
|
||||
*/
|
||||
0
|
||||
#else
|
||||
CHUNK_MAP_UNZEROED
|
||||
#endif
|
||||
;
|
||||
|
||||
/*
|
||||
* If chunk is the spare, temporarily re-allocate it, 1) so that its
|
||||
* run is reinserted into runs_avail_dirty, and 2) so that it cannot be
|
||||
@ -603,26 +591,12 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
|
||||
assert(arena_mapbits_dirty_get(chunk, pageind) ==
|
||||
arena_mapbits_dirty_get(chunk, pageind+npages-1));
|
||||
if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
|
||||
size_t i;
|
||||
|
||||
arena_avail_tree_remove(
|
||||
&arena->runs_avail_dirty, mapelm);
|
||||
|
||||
arena_mapbits_unzeroed_set(chunk, pageind,
|
||||
flag_unzeroed);
|
||||
arena_mapbits_large_set(chunk, pageind,
|
||||
(npages << LG_PAGE), 0);
|
||||
/*
|
||||
* Update internal elements in the page map, so
|
||||
* that CHUNK_MAP_UNZEROED is properly set.
|
||||
*/
|
||||
for (i = 1; i < npages - 1; i++) {
|
||||
arena_mapbits_unzeroed_set(chunk,
|
||||
pageind+i, flag_unzeroed);
|
||||
}
|
||||
if (npages > 1) {
|
||||
arena_mapbits_unzeroed_set(chunk,
|
||||
pageind+npages-1, flag_unzeroed);
|
||||
arena_mapbits_large_set(chunk,
|
||||
pageind+npages-1, 0, 0);
|
||||
}
|
||||
@ -685,14 +659,30 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
|
||||
sizeof(arena_chunk_map_t)) + map_bias;
|
||||
size_t npages = arena_mapbits_large_size_get(chunk, pageind) >>
|
||||
LG_PAGE;
|
||||
bool unzeroed;
|
||||
size_t flag_unzeroed, i;
|
||||
|
||||
assert(pageind + npages <= chunk_npages);
|
||||
assert(ndirty >= npages);
|
||||
if (config_debug)
|
||||
ndirty -= npages;
|
||||
|
||||
pages_purge((void *)((uintptr_t)chunk + (pageind << LG_PAGE)),
|
||||
(npages << LG_PAGE));
|
||||
unzeroed = pages_purge((void *)((uintptr_t)chunk + (pageind <<
|
||||
LG_PAGE)), (npages << LG_PAGE));
|
||||
flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0;
|
||||
/*
|
||||
* Set the unzeroed flag for all pages, now that pages_purge()
|
||||
* has returned whether the pages were zeroed as a side effect
|
||||
* of purging. This chunk map modification is safe even though
|
||||
* the arena mutex isn't currently owned by this thread,
|
||||
* because the run is marked as allocated, thus protecting it
|
||||
* from being modified by any other thread. As long as these
|
||||
* writes don't perturb the first and last elements'
|
||||
* CHUNK_MAP_ALLOCATED bits, behavior is well defined.
|
||||
*/
|
||||
for (i = 0; i < npages; i++) {
|
||||
arena_mapbits_unzeroed_set(chunk, pageind+i,
|
||||
flag_unzeroed);
|
||||
}
|
||||
if (config_stats)
|
||||
nmadvise++;
|
||||
}
|
||||
|
@ -43,6 +43,7 @@ chunk_recycle(size_t size, size_t alignment, bool base, bool *zero)
|
||||
extent_node_t *node;
|
||||
extent_node_t key;
|
||||
size_t alloc_size, leadsize, trailsize;
|
||||
bool zeroed;
|
||||
|
||||
if (base) {
|
||||
/*
|
||||
@ -107,17 +108,18 @@ chunk_recycle(size_t size, size_t alignment, bool base, bool *zero)
|
||||
}
|
||||
malloc_mutex_unlock(&chunks_mtx);
|
||||
|
||||
if (node != NULL)
|
||||
zeroed = false;
|
||||
if (node != NULL) {
|
||||
if (node->zeroed) {
|
||||
zeroed = true;
|
||||
*zero = true;
|
||||
}
|
||||
base_node_dealloc(node);
|
||||
#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
|
||||
/* Pages are zeroed as a side effect of pages_purge(). */
|
||||
*zero = true;
|
||||
#else
|
||||
if (*zero) {
|
||||
}
|
||||
if (zeroed == false && *zero) {
|
||||
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
||||
memset(ret, 0, size);
|
||||
}
|
||||
#endif
|
||||
return (ret);
|
||||
}
|
||||
|
||||
@ -191,9 +193,10 @@ label_return:
|
||||
static void
|
||||
chunk_record(void *chunk, size_t size)
|
||||
{
|
||||
bool unzeroed;
|
||||
extent_node_t *xnode, *node, *prev, key;
|
||||
|
||||
pages_purge(chunk, size);
|
||||
unzeroed = pages_purge(chunk, size);
|
||||
|
||||
/*
|
||||
* Allocate a node before acquiring chunks_mtx even though it might not
|
||||
@ -216,6 +219,7 @@ chunk_record(void *chunk, size_t size)
|
||||
extent_tree_szad_remove(&chunks_szad, node);
|
||||
node->addr = chunk;
|
||||
node->size += size;
|
||||
node->zeroed = (node->zeroed && (unzeroed == false));
|
||||
extent_tree_szad_insert(&chunks_szad, node);
|
||||
if (xnode != NULL)
|
||||
base_node_dealloc(xnode);
|
||||
@ -234,6 +238,7 @@ chunk_record(void *chunk, size_t size)
|
||||
node = xnode;
|
||||
node->addr = chunk;
|
||||
node->size = size;
|
||||
node->zeroed = (unzeroed == false);
|
||||
extent_tree_ad_insert(&chunks_ad, node);
|
||||
extent_tree_szad_insert(&chunks_szad, node);
|
||||
}
|
||||
@ -253,6 +258,7 @@ chunk_record(void *chunk, size_t size)
|
||||
extent_tree_szad_remove(&chunks_szad, node);
|
||||
node->addr = prev->addr;
|
||||
node->size += prev->size;
|
||||
node->zeroed = (node->zeroed && prev->zeroed);
|
||||
extent_tree_szad_insert(&chunks_szad, node);
|
||||
|
||||
base_node_dealloc(prev);
|
||||
@ -312,3 +318,33 @@ chunk_boot(void)
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
||||
void
|
||||
chunk_prefork(void)
|
||||
{
|
||||
|
||||
malloc_mutex_lock(&chunks_mtx);
|
||||
if (config_ivsalloc)
|
||||
rtree_prefork(chunks_rtree);
|
||||
chunk_dss_prefork();
|
||||
}
|
||||
|
||||
void
|
||||
chunk_postfork_parent(void)
|
||||
{
|
||||
|
||||
chunk_dss_postfork_parent();
|
||||
if (config_ivsalloc)
|
||||
rtree_postfork_parent(chunks_rtree);
|
||||
malloc_mutex_postfork_parent(&chunks_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
chunk_postfork_child(void)
|
||||
{
|
||||
|
||||
chunk_dss_postfork_child();
|
||||
if (config_ivsalloc)
|
||||
rtree_postfork_child(chunks_rtree);
|
||||
malloc_mutex_postfork_child(&chunks_mtx);
|
||||
}
|
||||
|
@ -113,22 +113,30 @@ pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
bool
|
||||
pages_purge(void *addr, size_t length)
|
||||
{
|
||||
bool unzeroed;
|
||||
|
||||
#ifdef _WIN32
|
||||
VirtualAlloc(addr, length, MEM_RESET, PAGE_READWRITE);
|
||||
unzeroed = true;
|
||||
#else
|
||||
# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
|
||||
# define JEMALLOC_MADV_PURGE MADV_DONTNEED
|
||||
# define JEMALLOC_MADV_ZEROS true
|
||||
# elif defined(JEMALLOC_PURGE_MADVISE_FREE)
|
||||
# define JEMALLOC_MADV_PURGE MADV_FREE
|
||||
# define JEMALLOC_MADV_ZEROS false
|
||||
# else
|
||||
# error "No method defined for purging unused dirty pages."
|
||||
# endif
|
||||
madvise(addr, length, JEMALLOC_MADV_PURGE);
|
||||
int err = madvise(addr, length, JEMALLOC_MADV_PURGE);
|
||||
unzeroed = (JEMALLOC_MADV_ZEROS == false || err != 0);
|
||||
# undef JEMALLOC_MADV_PURGE
|
||||
# undef JEMALLOC_MADV_ZEROS
|
||||
#endif
|
||||
return (unzeroed);
|
||||
}
|
||||
|
||||
static void *
|
||||
|
@ -827,6 +827,27 @@ ctl_boot(void)
|
||||
return (false);
|
||||
}
|
||||
|
||||
void
|
||||
ctl_prefork(void)
|
||||
{
|
||||
|
||||
malloc_mutex_lock(&ctl_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
ctl_postfork_parent(void)
|
||||
{
|
||||
|
||||
malloc_mutex_postfork_parent(&ctl_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
ctl_postfork_child(void)
|
||||
{
|
||||
|
||||
malloc_mutex_postfork_child(&ctl_mtx);
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
/* *_ctl() functions. */
|
||||
|
||||
@ -1032,8 +1053,8 @@ thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp,
|
||||
}
|
||||
READ(oldval, bool);
|
||||
|
||||
label_return:
|
||||
ret = 0;
|
||||
label_return:
|
||||
return (ret);
|
||||
}
|
||||
|
||||
|
@ -254,12 +254,13 @@ malloc_ncpus(void)
|
||||
result = si.dwNumberOfProcessors;
|
||||
#else
|
||||
result = sysconf(_SC_NPROCESSORS_ONLN);
|
||||
#endif
|
||||
if (result == -1) {
|
||||
/* Error. */
|
||||
ret = 1;
|
||||
}
|
||||
#endif
|
||||
ret = (unsigned)result;
|
||||
} else {
|
||||
ret = (unsigned)result;
|
||||
}
|
||||
|
||||
return (ret);
|
||||
}
|
||||
@ -377,6 +378,22 @@ malloc_conf_init(void)
|
||||
const char *opts, *k, *v;
|
||||
size_t klen, vlen;
|
||||
|
||||
/*
|
||||
* Automatically configure valgrind before processing options. The
|
||||
* valgrind option remains in jemalloc 3.x for compatibility reasons.
|
||||
*/
|
||||
if (config_valgrind) {
|
||||
opt_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
|
||||
if (config_fill && opt_valgrind) {
|
||||
opt_junk = false;
|
||||
assert(opt_zero == false);
|
||||
opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
|
||||
opt_redzone = true;
|
||||
}
|
||||
if (config_tcache && opt_valgrind)
|
||||
opt_tcache = false;
|
||||
}
|
||||
|
||||
for (i = 0; i < 3; i++) {
|
||||
/* Get runtime configuration. */
|
||||
switch (i) {
|
||||
@ -553,20 +570,7 @@ malloc_conf_init(void)
|
||||
CONF_HANDLE_BOOL(opt_utrace, "utrace")
|
||||
}
|
||||
if (config_valgrind) {
|
||||
bool hit;
|
||||
CONF_HANDLE_BOOL_HIT(opt_valgrind,
|
||||
"valgrind", hit)
|
||||
if (config_fill && opt_valgrind && hit) {
|
||||
opt_junk = false;
|
||||
opt_zero = false;
|
||||
if (opt_quarantine == 0) {
|
||||
opt_quarantine =
|
||||
JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
|
||||
}
|
||||
opt_redzone = true;
|
||||
}
|
||||
if (hit)
|
||||
continue;
|
||||
CONF_HANDLE_BOOL(opt_valgrind, "valgrind")
|
||||
}
|
||||
if (config_xmalloc) {
|
||||
CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
|
||||
@ -1262,11 +1266,10 @@ je_valloc(size_t size)
|
||||
* passed an extra argument for the caller return address, which will be
|
||||
* ignored.
|
||||
*/
|
||||
JEMALLOC_EXPORT void (* const __free_hook)(void *ptr) = je_free;
|
||||
JEMALLOC_EXPORT void *(* const __malloc_hook)(size_t size) = je_malloc;
|
||||
JEMALLOC_EXPORT void *(* const __realloc_hook)(void *ptr, size_t size) =
|
||||
je_realloc;
|
||||
JEMALLOC_EXPORT void *(* const __memalign_hook)(size_t alignment, size_t size) =
|
||||
JEMALLOC_EXPORT void (* __free_hook)(void *ptr) = je_free;
|
||||
JEMALLOC_EXPORT void *(* __malloc_hook)(size_t size) = je_malloc;
|
||||
JEMALLOC_EXPORT void *(* __realloc_hook)(void *ptr, size_t size) = je_realloc;
|
||||
JEMALLOC_EXPORT void *(* __memalign_hook)(size_t alignment, size_t size) =
|
||||
je_memalign;
|
||||
#endif
|
||||
|
||||
@ -1279,7 +1282,7 @@ JEMALLOC_EXPORT void *(* const __memalign_hook)(size_t alignment, size_t size) =
|
||||
*/
|
||||
|
||||
size_t
|
||||
je_malloc_usable_size(const void *ptr)
|
||||
je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
|
||||
{
|
||||
size_t ret;
|
||||
|
||||
@ -1611,6 +1614,27 @@ je_nallocm(size_t *rsize, size_t size, int flags)
|
||||
* malloc during fork().
|
||||
*/
|
||||
|
||||
/*
|
||||
* If an application creates a thread before doing any allocation in the main
|
||||
* thread, then calls fork(2) in the main thread followed by memory allocation
|
||||
* in the child process, a race can occur that results in deadlock within the
|
||||
* child: the main thread may have forked while the created thread had
|
||||
* partially initialized the allocator. Ordinarily jemalloc prevents
|
||||
* fork/malloc races via the following functions it registers during
|
||||
* initialization using pthread_atfork(), but of course that does no good if
|
||||
* the allocator isn't fully initialized at fork time. The following library
|
||||
* constructor is a partial solution to this problem. It may still possible to
|
||||
* trigger the deadlock described above, but doing so would involve forking via
|
||||
* a library constructor that runs before jemalloc's runs.
|
||||
*/
|
||||
JEMALLOC_ATTR(constructor)
|
||||
static void
|
||||
jemalloc_constructor(void)
|
||||
{
|
||||
|
||||
malloc_init();
|
||||
}
|
||||
|
||||
#ifndef JEMALLOC_MUTEX_INIT_CB
|
||||
void
|
||||
jemalloc_prefork(void)
|
||||
@ -1628,14 +1652,16 @@ _malloc_prefork(void)
|
||||
assert(malloc_initialized);
|
||||
|
||||
/* Acquire all mutexes in a safe order. */
|
||||
ctl_prefork();
|
||||
malloc_mutex_prefork(&arenas_lock);
|
||||
for (i = 0; i < narenas; i++) {
|
||||
if (arenas[i] != NULL)
|
||||
arena_prefork(arenas[i]);
|
||||
}
|
||||
prof_prefork();
|
||||
chunk_prefork();
|
||||
base_prefork();
|
||||
huge_prefork();
|
||||
chunk_dss_prefork();
|
||||
}
|
||||
|
||||
#ifndef JEMALLOC_MUTEX_INIT_CB
|
||||
@ -1655,14 +1681,16 @@ _malloc_postfork(void)
|
||||
assert(malloc_initialized);
|
||||
|
||||
/* Release all mutexes, now that fork() has completed. */
|
||||
chunk_dss_postfork_parent();
|
||||
huge_postfork_parent();
|
||||
base_postfork_parent();
|
||||
chunk_postfork_parent();
|
||||
prof_postfork_parent();
|
||||
for (i = 0; i < narenas; i++) {
|
||||
if (arenas[i] != NULL)
|
||||
arena_postfork_parent(arenas[i]);
|
||||
}
|
||||
malloc_mutex_postfork_parent(&arenas_lock);
|
||||
ctl_postfork_parent();
|
||||
}
|
||||
|
||||
void
|
||||
@ -1673,14 +1701,16 @@ jemalloc_postfork_child(void)
|
||||
assert(malloc_initialized);
|
||||
|
||||
/* Release all mutexes, now that fork() has completed. */
|
||||
chunk_dss_postfork_child();
|
||||
huge_postfork_child();
|
||||
base_postfork_child();
|
||||
chunk_postfork_child();
|
||||
prof_postfork_child();
|
||||
for (i = 0; i < narenas; i++) {
|
||||
if (arenas[i] != NULL)
|
||||
arena_postfork_child(arenas[i]);
|
||||
}
|
||||
malloc_mutex_postfork_child(&arenas_lock);
|
||||
ctl_postfork_child();
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
@ -64,7 +64,7 @@ pthread_create(pthread_t *__restrict thread,
|
||||
/******************************************************************************/
|
||||
|
||||
#ifdef JEMALLOC_MUTEX_INIT_CB
|
||||
int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
|
||||
JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
|
||||
void *(calloc_cb)(size_t, size_t));
|
||||
#endif
|
||||
|
||||
|
@ -1270,4 +1270,46 @@ prof_boot2(void)
|
||||
return (false);
|
||||
}
|
||||
|
||||
void
|
||||
prof_prefork(void)
|
||||
{
|
||||
|
||||
if (opt_prof) {
|
||||
unsigned i;
|
||||
|
||||
malloc_mutex_lock(&bt2ctx_mtx);
|
||||
malloc_mutex_lock(&prof_dump_seq_mtx);
|
||||
for (i = 0; i < PROF_NCTX_LOCKS; i++)
|
||||
malloc_mutex_lock(&ctx_locks[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
prof_postfork_parent(void)
|
||||
{
|
||||
|
||||
if (opt_prof) {
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < PROF_NCTX_LOCKS; i++)
|
||||
malloc_mutex_postfork_parent(&ctx_locks[i]);
|
||||
malloc_mutex_postfork_parent(&prof_dump_seq_mtx);
|
||||
malloc_mutex_postfork_parent(&bt2ctx_mtx);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
prof_postfork_child(void)
|
||||
{
|
||||
|
||||
if (opt_prof) {
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < PROF_NCTX_LOCKS; i++)
|
||||
malloc_mutex_postfork_child(&ctx_locks[i]);
|
||||
malloc_mutex_postfork_child(&prof_dump_seq_mtx);
|
||||
malloc_mutex_postfork_child(&bt2ctx_mtx);
|
||||
}
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
@ -44,3 +44,24 @@ rtree_new(unsigned bits)
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void
|
||||
rtree_prefork(rtree_t *rtree)
|
||||
{
|
||||
|
||||
malloc_mutex_prefork(&rtree->mutex);
|
||||
}
|
||||
|
||||
void
|
||||
rtree_postfork_parent(rtree_t *rtree)
|
||||
{
|
||||
|
||||
malloc_mutex_postfork_parent(&rtree->mutex);
|
||||
}
|
||||
|
||||
void
|
||||
rtree_postfork_child(rtree_t *rtree)
|
||||
{
|
||||
|
||||
malloc_mutex_postfork_child(&rtree->mutex);
|
||||
}
|
||||
|
@ -377,7 +377,6 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
|
||||
case '\0': goto label_out;
|
||||
case '%': {
|
||||
bool alt_form = false;
|
||||
bool zero_pad = false;
|
||||
bool left_justify = false;
|
||||
bool plus_space = false;
|
||||
bool plus_plus = false;
|
||||
@ -398,10 +397,6 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
|
||||
assert(alt_form == false);
|
||||
alt_form = true;
|
||||
break;
|
||||
case '0':
|
||||
assert(zero_pad == false);
|
||||
zero_pad = true;
|
||||
break;
|
||||
case '-':
|
||||
assert(left_justify == false);
|
||||
left_justify = true;
|
||||
|
@ -1,2 +1,2 @@
|
||||
UPSTREAM_REPO=git://canonware.com/jemalloc.git
|
||||
UPSTREAM_COMMIT=3.0.0
|
||||
UPSTREAM_COMMIT=d0ffd8ed4f6aa4cf7248028eddfcb35f93247fe4
|
||||
|
Loading…
Reference in New Issue
Block a user