Bug 1006769 - Part 1: Improve heap-overhead memory reporting. r=njn

This commit is contained in:
Eric Rahm 2014-05-21 17:34:06 -07:00
parent 363ab2b9b3
commit fd5aaf36e7
3 changed files with 72 additions and 8 deletions

View File

@ -6584,7 +6584,7 @@ malloc_usable_size_impl(const void *ptr)
MOZ_JEMALLOC_API void
jemalloc_stats_impl(jemalloc_stats_t *stats)
{
size_t i;
size_t i, non_arena_mapped, chunk_header_size;
assert(stats != NULL);
@ -6644,17 +6644,20 @@ jemalloc_stats_impl(jemalloc_stats_t *stats)
stats->waste = 0;
stats->page_cache = 0;
stats->bookkeeping = 0;
stats->bin_unused = 0;
non_arena_mapped = 0;
/* Get huge mapped/allocated. */
malloc_mutex_lock(&huge_mtx);
stats->mapped += huge_mapped;
non_arena_mapped += huge_mapped;
stats->allocated += huge_allocated;
assert(huge_mapped >= huge_allocated);
malloc_mutex_unlock(&huge_mtx);
/* Get base mapped/allocated. */
malloc_mutex_lock(&base_mtx);
stats->mapped += base_mapped;
non_arena_mapped += base_mapped;
stats->bookkeeping += base_committed;
assert(base_mapped >= base_committed);
malloc_mutex_unlock(&base_mtx);
@ -6662,12 +6665,18 @@ jemalloc_stats_impl(jemalloc_stats_t *stats)
/* Iterate over arenas. */
for (i = 0; i < narenas; i++) {
arena_t *arena = arenas[i];
size_t arena_mapped, arena_allocated, arena_committed, arena_dirty;
size_t arena_mapped, arena_allocated, arena_committed, arena_dirty, j,
arena_unused, arena_headers;
arena_run_t* run;
arena_chunk_map_t* mapelm;
if (arena == NULL) {
continue;
}
arena_headers = 0;
arena_unused = 0;
malloc_spin_lock(&arena->lock);
arena_mapped = arena->stats.mapped;
@ -6680,6 +6689,25 @@ jemalloc_stats_impl(jemalloc_stats_t *stats)
arena_dirty = arena->ndirty << pagesize_2pow;
for (j = 0; j < ntbins + nqbins + nsbins; j++) {
arena_bin_t* bin = &arena->bins[j];
size_t bin_unused = 0;
const size_t run_header_size = sizeof(arena_run_t) +
(sizeof(unsigned) * (bin->regs_mask_nelms - 1));
rb_foreach_begin(arena_chunk_map_t, link, &bin->runs, mapelm) {
run = (arena_run_t *)(mapelm->bits & ~pagesize_mask);
bin_unused += run->nfree * bin->reg_size;
} rb_foreach_end(arena_chunk_map_t, link, &bin->runs, mapelm)
if (bin->runcur) {
bin_unused += bin->runcur->nfree * bin->reg_size;
}
arena_unused += bin_unused;
arena_headers += bin->stats.curruns * bin->reg0_offset;
}
malloc_spin_unlock(&arena->lock);
assert(arena_mapped >= arena_committed);
@ -6690,9 +6718,21 @@ jemalloc_stats_impl(jemalloc_stats_t *stats)
stats->mapped += arena_mapped;
stats->allocated += arena_allocated;
stats->page_cache += arena_dirty;
stats->waste += arena_committed - arena_allocated - arena_dirty;
stats->waste += arena_committed -
arena_allocated - arena_dirty - arena_unused - arena_headers;
stats->bin_unused += arena_unused;
stats->bookkeeping += arena_headers;
}
/* Account for arena chunk headers in bookkeeping rather than waste. */
chunk_header_size =
((stats->mapped / stats->chunksize) * arena_chunk_header_npages) <<
pagesize_2pow;
stats->mapped += non_arena_mapped;
stats->bookkeeping += chunk_header_size;
stats->waste -= chunk_header_size;
assert(stats->mapped >= stats->allocated + stats->waste +
stats->page_cache + stats->bookkeeping);
}

View File

@ -81,6 +81,7 @@ typedef struct {
cache. (jemalloc calls these "dirty".) */
size_t bookkeeping; /* Committed bytes used internally by the
allocator. */
size_t bin_unused; /* Bytes committed to a bin but currently unused. */
} jemalloc_stats_t;
#ifdef __cplusplus

View File

@ -710,14 +710,22 @@ public:
// We mark this and the other heap-overhead reporters as KIND_NONHEAP
// because KIND_HEAP memory means "counted in heap-allocated", which
// this is not.
rv = MOZ_COLLECT_REPORT(
"explicit/heap-overhead/bin-unused", KIND_NONHEAP, UNITS_BYTES,
stats.bin_unused,
"Bytes reserved for bins of fixed-size allocations which do not correspond to "
"an active allocation.");
NS_ENSURE_SUCCESS(rv, rv);
rv = MOZ_COLLECT_REPORT(
"explicit/heap-overhead/waste", KIND_NONHEAP, UNITS_BYTES,
stats.waste,
"Committed bytes which do not correspond to an active allocation and which the "
"allocator is not intentionally keeping alive (i.e., not 'heap-bookkeeping' or "
"'heap-page-cache'). Although the allocator will waste some space under any "
"circumstances, a large value here may indicate that the heap is highly "
"fragmented, or that allocator is performing poorly for some other reason.");
"'heap-page-cache' or 'heap-bin-unused'). Although the allocator will waste "
"some space under any circumstances, a large value here may indicate that the "
"heap is highly fragmented, or that allocator is performing poorly for some "
"other reason.");
NS_ENSURE_SUCCESS(rv, rv);
rv = MOZ_COLLECT_REPORT(
@ -753,6 +761,21 @@ public:
"the heap allocator relative to amount of memory allocated.");
NS_ENSURE_SUCCESS(rv, rv);
rv = MOZ_COLLECT_REPORT(
"heap-mapped", KIND_OTHER, UNITS_BYTES, stats.mapped,
"Amount of memory currently mapped.");
NS_ENSURE_SUCCESS(rv, rv);
rv = MOZ_COLLECT_REPORT(
"heap-chunksize", KIND_OTHER, UNITS_BYTES, stats.chunksize,
"Size of chunks.");
NS_ENSURE_SUCCESS(rv, rv);
rv = MOZ_COLLECT_REPORT(
"heap-chunks", KIND_OTHER, UNITS_COUNT, (stats.mapped / stats.chunksize),
"Number of chunks currently mapped.");
NS_ENSURE_SUCCESS(rv, rv);
return NS_OK;
}
};