Bug 681183 - Make jemalloc_stats.committed meaningful on *nix. r=khuey

This commit is contained in:
Justin Lebar 2011-10-10 13:54:42 -04:00
parent 2e60074c52
commit 254211dda2
2 changed files with 40 additions and 71 deletions

View File

@ -698,10 +698,10 @@ struct arena_stats_s {
uint64_t ndecommit;
uint64_t ncommit;
uint64_t decommitted;
#endif
/* Current number of committed pages. */
size_t committed;
#endif
/* Per-size-category statistics. */
size_t allocated_small;
@ -845,7 +845,7 @@ struct arena_chunk_map_s {
* -------- -------- -------- ------la
*/
size_t bits;
#ifdef MALLOC_DECOMMIT
#if defined(MALLOC_DECOMMIT) || defined(MALLOC_STATS)
#define CHUNK_MAP_DECOMMITTED ((size_t)0x20U)
#endif
#define CHUNK_MAP_KEY ((size_t)0x10U)
@ -1083,7 +1083,7 @@ static char pagefile_templ[PATH_MAX];
*/
static void *base_pages;
static void *base_next_addr;
#ifdef MALLOC_DECOMMIT
#if defined(MALLOC_DECOMMIT) || defined(MALLOC_STATS)
static void *base_next_decommitted;
#endif
static void *base_past_addr; /* Addr immediately past base_pages. */
@ -1091,9 +1091,7 @@ static extent_node_t *base_nodes;
static malloc_mutex_t base_mtx;
#ifdef MALLOC_STATS
static size_t base_mapped;
# ifdef MALLOC_DECOMMIT
static size_t base_committed;
# endif
#endif
/********/
@ -1843,7 +1841,7 @@ base_pages_alloc_mmap(size_t minsize)
{
bool ret;
size_t csize;
#ifdef MALLOC_DECOMMIT
#if defined(MALLOC_DECOMMIT) || defined(MALLOC_STATS)
size_t pminsize;
#endif
int pfd;
@ -1865,19 +1863,19 @@ base_pages_alloc_mmap(size_t minsize)
}
base_next_addr = base_pages;
base_past_addr = (void *)((uintptr_t)base_pages + csize);
#ifdef MALLOC_DECOMMIT
#if defined(MALLOC_DECOMMIT) || defined(MALLOC_STATS)
/*
* Leave enough pages for minsize committed, since otherwise they would
* have to be immediately recommitted.
*/
pminsize = PAGE_CEILING(minsize);
base_next_decommitted = (void *)((uintptr_t)base_pages + pminsize);
# if defined(MALLOC_DECOMMIT)
if (pminsize < csize)
pages_decommit(base_next_decommitted, csize - pminsize);
#endif
#ifdef MALLOC_STATS
# endif
# ifdef MALLOC_STATS
base_mapped += csize;
# ifdef MALLOC_DECOMMIT
base_committed += pminsize;
# endif
#endif
@ -1921,14 +1919,16 @@ base_alloc(size_t size)
/* Allocate. */
ret = base_next_addr;
base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
#ifdef MALLOC_DECOMMIT
#if defined(MALLOC_DECOMMIT) || defined(MALLOC_STATS)
/* Make sure enough pages are committed for the new allocation. */
if ((uintptr_t)base_next_addr > (uintptr_t)base_next_decommitted) {
void *pbase_next_addr =
(void *)(PAGE_CEILING((uintptr_t)base_next_addr));
# ifdef MALLOC_DECOMMIT
pages_commit(base_next_decommitted, (uintptr_t)pbase_next_addr -
(uintptr_t)base_next_decommitted);
# endif
base_next_decommitted = pbase_next_addr;
# ifdef MALLOC_STATS
base_committed += (uintptr_t)pbase_next_addr -
@ -3069,7 +3069,7 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
}
for (i = 0; i < need_pages; i++) {
#ifdef MALLOC_DECOMMIT
#if defined(MALLOC_DECOMMIT) || defined(MALLOC_STATS)
/*
* Commit decommitted pages if necessary. If a decommitted
* page is encountered, commit all needed adjacent decommitted
@ -3090,13 +3090,24 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
CHUNK_MAP_DECOMMITTED;
}
# ifdef MALLOC_DECOMMIT
pages_commit((void *)((uintptr_t)chunk + ((run_ind + i)
<< pagesize_2pow)), (j << pagesize_2pow));
# ifdef MALLOC_STATS
arena->stats.ncommit++;
# endif
# endif
# ifdef MALLOC_STATS
arena->stats.committed += j;
# endif
# ifndef MALLOC_DECOMMIT
}
# else
} else /* No need to zero since commit zeros. */
# endif
#endif
/* Zero if necessary. */
@ -3169,23 +3180,11 @@ arena_chunk_init(arena_t *arena, arena_chunk_t *chunk)
pagesize_2pow));
for (i = 0; i < arena_chunk_header_npages; i++)
chunk->map[i].bits = 0;
chunk->map[i].bits = arena_maxclass
#ifdef MALLOC_DECOMMIT
| CHUNK_MAP_DECOMMITTED
#endif
| CHUNK_MAP_ZEROED;
chunk->map[i].bits = arena_maxclass | CHUNK_MAP_DECOMMITTED | CHUNK_MAP_ZEROED;
for (i++; i < chunk_npages-1; i++) {
chunk->map[i].bits =
#ifdef MALLOC_DECOMMIT
CHUNK_MAP_DECOMMITTED |
#endif
CHUNK_MAP_ZEROED;
chunk->map[i].bits = CHUNK_MAP_DECOMMITTED | CHUNK_MAP_ZEROED;
}
chunk->map[chunk_npages-1].bits = arena_maxclass
#ifdef MALLOC_DECOMMIT
| CHUNK_MAP_DECOMMITTED
#endif
| CHUNK_MAP_ZEROED;
chunk->map[chunk_npages-1].bits = arena_maxclass | CHUNK_MAP_DECOMMITTED | CHUNK_MAP_ZEROED;
#ifdef MALLOC_DECOMMIT
/*
@ -3196,8 +3195,10 @@ arena_chunk_init(arena_t *arena, arena_chunk_t *chunk)
# ifdef MALLOC_STATS
arena->stats.ndecommit++;
arena->stats.decommitted += (chunk_npages - arena_chunk_header_npages);
arena->stats.committed += arena_chunk_header_npages;
# endif
#endif
#ifdef MALLOC_STATS
arena->stats.committed += arena_chunk_header_npages;
#endif
/* Insert the run into the runs_avail tree. */
@ -3214,7 +3215,7 @@ arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
arena_chunk_tree_dirty_remove(
&chunk->arena->chunks_dirty, arena->spare);
arena->ndirty -= arena->spare->ndirty;
#if (defined(MALLOC_STATS) && defined(MALLOC_DECOMMIT))
#ifdef MALLOC_STATS
arena->stats.committed -= arena->spare->ndirty;
#endif
}
@ -3222,9 +3223,7 @@ arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
chunk_dealloc((void *)arena->spare, chunksize);
#ifdef MALLOC_STATS
arena->stats.mapped -= chunksize;
# ifdef MALLOC_DECOMMIT
arena->stats.committed -= arena_chunk_header_npages;
# endif
#endif
}
@ -3330,29 +3329,17 @@ arena_purge(arena_t *arena)
assert(i >= arena_chunk_header_npages);
if (chunk->map[i].bits & CHUNK_MAP_DIRTY) {
#ifdef MALLOC_DECOMMIT
assert((chunk->map[i].bits &
CHUNK_MAP_DECOMMITTED) == 0);
#endif
chunk->map[i].bits ^=
#ifdef MALLOC_DECOMMIT
CHUNK_MAP_DECOMMITTED |
#endif
CHUNK_MAP_DIRTY;
chunk->map[i].bits ^= CHUNK_MAP_DECOMMITTED | CHUNK_MAP_DIRTY;
/* Find adjacent dirty run(s). */
for (npages = 1; i > arena_chunk_header_npages
&& (chunk->map[i - 1].bits &
CHUNK_MAP_DIRTY); npages++) {
i--;
#ifdef MALLOC_DECOMMIT
assert((chunk->map[i].bits &
CHUNK_MAP_DECOMMITTED) == 0);
#endif
chunk->map[i].bits ^=
#ifdef MALLOC_DECOMMIT
CHUNK_MAP_DECOMMITTED |
#endif
CHUNK_MAP_DIRTY;
chunk->map[i].bits ^= CHUNK_MAP_DECOMMITTED | CHUNK_MAP_DIRTY;
}
chunk->ndirty -= npages;
arena->ndirty -= npages;
@ -3364,9 +3351,13 @@ arena_purge(arena_t *arena)
# ifdef MALLOC_STATS
arena->stats.ndecommit++;
arena->stats.decommitted += npages;
arena->stats.committed -= npages;
# endif
#else
#endif
#ifdef MALLOC_STATS
arena->stats.committed -= npages;
#endif
#ifndef MALLOC_DECOMMIT
madvise((void *)((uintptr_t)chunk + (i <<
pagesize_2pow)), (npages << pagesize_2pow),
MADV_FREE);
@ -5707,9 +5698,7 @@ MALLOC_OUT:
/* Initialize base allocation data structures. */
#ifdef MALLOC_STATS
base_mapped = 0;
# ifdef MALLOC_DECOMMIT
base_committed = 0;
# endif
#endif
base_nodes = NULL;
malloc_mutex_init(&base_mtx);
@ -6363,19 +6352,15 @@ jemalloc_stats(jemalloc_stats_t *stats)
/* Get huge mapped/allocated. */
malloc_mutex_lock(&huge_mtx);
stats->mapped += stats_chunks.curchunks * chunksize;
#ifdef MALLOC_DECOMMIT
stats->committed += huge_allocated;
#endif
stats->allocated += huge_allocated;
malloc_mutex_unlock(&huge_mtx);
/* Get base mapped. */
malloc_mutex_lock(&base_mtx);
stats->mapped += base_mapped;
#ifdef MALLOC_DECOMMIT
assert(base_committed <= base_mapped);
stats->committed += base_committed;
#endif
malloc_mutex_unlock(&base_mtx);
/* Iterate over arenas and their chunks. */
@ -6385,18 +6370,13 @@ jemalloc_stats(jemalloc_stats_t *stats)
malloc_spin_lock(&arena->lock);
stats->allocated += arena->stats.allocated_small;
stats->allocated += arena->stats.allocated_large;
#ifdef MALLOC_DECOMMIT
stats->committed += (arena->stats.committed <<
pagesize_2pow);
#endif
stats->dirty += (arena->ndirty << pagesize_2pow);
malloc_spin_unlock(&arena->lock);
}
}
#ifndef MALLOC_DECOMMIT
stats->committed = stats->mapped;
#endif
assert(stats->mapped >= stats->committed);
assert(stats->committed >= stats->allocated);
}

View File

@ -436,19 +436,8 @@ nsMemoryReporterManager::Init()
REGISTER(Private);
#endif
#if defined(HAVE_JEMALLOC_STATS) && defined(XP_WIN)
// heap-committed is only meaningful where we have MALLOC_DECOMMIT defined
// (currently, just on Windows). Elsewhere, it's the same as
// stats->mapped, which is heap-allocated + heap-unallocated.
//
// Ideally, we'd check for MALLOC_DECOMMIT in the #if defined above, but
// MALLOC_DECOMMIT is defined in jemalloc.c, not a header, so we'll just
// have to settle for the OS check for now.
REGISTER(HeapCommitted);
#endif
#if defined(HAVE_JEMALLOC_STATS)
REGISTER(HeapCommitted);
REGISTER(HeapDirty);
#elif defined(XP_MACOSX) && !defined(MOZ_MEMORY)
REGISTER(HeapZone0Committed);