Bug 1033442 - Automate tracking of the GCRuntime's gcBytes; r=jonco,sfink

This commit is contained in:
Terrence Cole 2014-07-22 13:30:26 -07:00
parent 28dadb434b
commit 8878446289
8 changed files with 72 additions and 38 deletions

View File

@ -247,7 +247,7 @@ GC(JSContext *cx, unsigned argc, jsval *vp)
}
#ifndef JS_MORE_DETERMINISTIC
size_t preBytes = cx->runtime()->gc.bytesAllocated();
size_t preBytes = cx->runtime()->gc.usage.gcBytes();
#endif
if (compartment)
@ -259,7 +259,7 @@ GC(JSContext *cx, unsigned argc, jsval *vp)
char buf[256] = { '\0' };
#ifndef JS_MORE_DETERMINISTIC
JS_snprintf(buf, sizeof(buf), "before %lu, after %lu\n",
(unsigned long)preBytes, (unsigned long)cx->runtime()->gc.bytesAllocated());
(unsigned long)preBytes, (unsigned long)cx->runtime()->gc.usage.gcBytes());
#endif
JSString *str = JS_NewStringCopyZ(cx, buf);
if (!str)

View File

@ -186,7 +186,6 @@ class GCRuntime
void setDeterministic(bool enable);
#endif
size_t bytesAllocated() { return bytes; }
size_t maxBytesAllocated() { return maxBytes; }
size_t maxMallocBytesAllocated() { return maxBytes; }
@ -330,7 +329,6 @@ class GCRuntime
inline void updateOnFreeArenaAlloc(const ChunkInfo &info);
inline void updateOnArenaFree(const ChunkInfo &info);
inline void updateBytesAllocated(ptrdiff_t size);
GCChunkSet::Range allChunks() { return chunkSet.all(); }
inline Chunk **getAvailableChunkList(Zone *zone);
@ -429,6 +427,9 @@ class GCRuntime
js::GCMarker marker;
/* Track heap usage for this runtime. */
HeapUsage usage;
private:
/*
* Set of all GC chunks with at least one allocated thing. The
@ -450,9 +451,6 @@ class GCRuntime
js::RootedValueMap rootsHash;
/* This is updated by both the main and GC helper threads. */
mozilla::Atomic<size_t, mozilla::ReleaseAcquire> bytes;
size_t maxBytes;
size_t maxMallocBytes;

View File

@ -7,6 +7,7 @@
#ifndef gc_Heap_h
#define gc_Heap_h
#include "mozilla/Atomics.h"
#include "mozilla/Attributes.h"
#include "mozilla/PodOperations.h"
@ -892,6 +893,54 @@ static_assert(js::gc::ChunkLocationOffset == offsetof(Chunk, info) +
offsetof(ChunkTrailer, location),
"The hardcoded API location offset must match the actual offset.");
/*
* Tracks the used sizes for owned heap data and automatically maintains the
* memory usage relationship between GCRuntime and Zones.
*/
class HeapUsage
{
/*
* A heap usage that contains our parent's heap usage, or null if this is
* the top-level usage container.
*/
HeapUsage *parent_;
/*
* The approximate number of bytes in use on the GC heap, to the nearest
* ArenaSize. This does not include any malloc data. It also does not
* include not-actively-used addresses that are still reserved at the OS
* level for GC usage. It is atomic because it is updated by both the main
* and GC helper threads.
*/
mozilla::Atomic<size_t, mozilla::ReleaseAcquire> gcBytes_;
public:
HeapUsage(HeapUsage *parent)
: parent_(parent),
gcBytes_(0)
{}
size_t gcBytes() const { return gcBytes_; }
void addGCArena() {
gcBytes_ += ArenaSize;
if (parent_)
parent_->addGCArena();
}
void removeGCArena() {
MOZ_ASSERT(gcBytes_ >= ArenaSize);
gcBytes_ -= ArenaSize;
if (parent_)
parent_->removeGCArena();
}
/* Pair to adoptArenas. Adopts the attendant usage statistics. */
void adopt(HeapUsage &other) {
gcBytes_ += other.gcBytes_;
other.gcBytes_ = 0;
}
};
inline uintptr_t
ArenaHeader::address() const
{

View File

@ -895,7 +895,7 @@ js::Nursery::collect(JSRuntime *rt, JS::gcreason::Reason reason, TypeObjectList
// We ignore gcMaxBytes when allocating for minor collection. However, if we
// overflowed, we disable the nursery. The next time we allocate, we'll fail
// because gcBytes >= gcMaxBytes.
if (rt->gc.bytesAllocated() >= rt->gc.maxBytesAllocated())
if (rt->gc.usage.gcBytes() >= rt->gc.maxBytesAllocated())
disable();
TIME_END(total);

View File

@ -528,7 +528,7 @@ Statistics::beginGC()
sccTimes.clearAndFree();
nonincrementalReason = nullptr;
preBytes = runtime->gc.bytesAllocated();
preBytes = runtime->gc.usage.gcBytes();
}
void

View File

@ -30,7 +30,7 @@ JS::Zone::Zone(JSRuntime *rt)
gcHeapGrowthFactor(3.0),
gcMallocBytes(0),
gcMallocGCTriggered(false),
gcBytes(0),
usage(&rt->gc.usage),
gcTriggerBytes(0),
data(nullptr),
isSystem(false),

View File

@ -244,9 +244,8 @@ struct Zone : public JS::shadow::Zone,
// types.
mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire> gcMallocGCTriggered;
// Counts the number of bytes allocated in the GC heap for this zone. It is
// updated by both the main and GC helper threads.
mozilla::Atomic<size_t, mozilla::ReleaseAcquire> gcBytes;
// Track heap usage under this Zone.
js::gc::HeapUsage usage;
// GC trigger threshold for allocations on the GC heap.
size_t gcTriggerBytes;

View File

@ -923,20 +923,13 @@ Chunk::fetchNextFreeArena(JSRuntime *rt)
return aheader;
}
inline void
GCRuntime::updateBytesAllocated(ptrdiff_t size)
{
JS_ASSERT_IF(size < 0, bytes >= size_t(-size));
bytes += size;
}
ArenaHeader *
Chunk::allocateArena(Zone *zone, AllocKind thingKind)
{
JS_ASSERT(hasAvailableArenas());
JSRuntime *rt = zone->runtimeFromAnyThread();
if (!rt->isHeapMinorCollecting() && rt->gc.bytesAllocated() >= rt->gc.maxBytesAllocated()) {
if (!rt->isHeapMinorCollecting() && rt->gc.usage.gcBytes() >= rt->gc.maxBytesAllocated()) {
#ifdef JSGC_FJGENERATIONAL
// This is an approximation to the best test, which would check that
// this thread is currently promoting into the tenured area. I doubt
@ -955,10 +948,9 @@ Chunk::allocateArena(Zone *zone, AllocKind thingKind)
if (MOZ_UNLIKELY(!hasAvailableArenas()))
removeFromAvailableList();
rt->gc.updateBytesAllocated(ArenaSize);
zone->gcBytes += ArenaSize;
zone->usage.addGCArena();
if (zone->gcBytes >= zone->gcTriggerBytes) {
if (zone->usage.gcBytes() >= zone->gcTriggerBytes) {
AutoUnlockGC unlock(rt);
TriggerZoneGC(zone, JS::gcreason::ALLOC_TRIGGER);
}
@ -1002,12 +994,9 @@ Chunk::releaseArena(ArenaHeader *aheader)
if (rt->gc.isBackgroundSweeping())
maybeLock.lock(rt);
JS_ASSERT(rt->gc.bytesAllocated() >= ArenaSize);
JS_ASSERT(zone->gcBytes >= ArenaSize);
if (rt->gc.isBackgroundSweeping())
zone->reduceGCTriggerBytes(zone->gcHeapGrowthFactor * ArenaSize);
rt->gc.updateBytesAllocated(-ArenaSize);
zone->gcBytes -= ArenaSize;
zone->usage.removeGCArena();
aheader->setAsNotAllocated();
addArenaToFreeList(rt, aheader);
@ -1126,9 +1115,9 @@ GCRuntime::GCRuntime(JSRuntime *rt) :
#endif
stats(rt),
marker(rt),
usage(nullptr),
systemAvailableChunkListHead(nullptr),
userAvailableChunkListHead(nullptr),
bytes(0),
maxBytes(0),
maxMallocBytes(0),
numArenasFreeCommitted(0),
@ -1424,7 +1413,7 @@ GCRuntime::setParameter(JSGCParamKey key, uint32_t value)
{
switch (key) {
case JSGC_MAX_BYTES: {
JS_ASSERT(value >= bytes);
JS_ASSERT(value >= usage.gcBytes());
maxBytes = value;
break;
}
@ -1499,7 +1488,7 @@ GCRuntime::getParameter(JSGCParamKey key)
case JSGC_MAX_MALLOC_BYTES:
return maxMallocBytes;
case JSGC_BYTES:
return uint32_t(bytes);
return uint32_t(usage.gcBytes());
case JSGC_MODE:
return uint32_t(mode);
case JSGC_UNUSED_CHUNKS:
@ -2209,7 +2198,7 @@ ArenaLists::refillFreeList(ThreadSafeContext *cx, AllocKind thingKind)
bool runGC = cx->allowGC() && allowGC &&
cx->asJSContext()->runtime()->gc.incrementalState != NO_INCREMENTAL &&
zone->gcBytes > zone->gcTriggerBytes;
zone->usage.gcBytes() > zone->gcTriggerBytes;
#ifdef JS_THREADSAFE
JS_ASSERT_IF(cx->isJSContext() && allowGC,
@ -2463,8 +2452,8 @@ GCRuntime::maybeGC(Zone *zone)
}
double factor = highFrequencyGC ? 0.85 : 0.9;
if (zone->gcBytes > 1024 * 1024 &&
zone->gcBytes >= factor * zone->gcTriggerBytes &&
if (zone->usage.gcBytes() > 1024 * 1024 &&
zone->usage.gcBytes() >= factor * zone->gcTriggerBytes &&
incrementalState == NO_INCREMENTAL &&
!isBackgroundSweeping())
{
@ -4577,7 +4566,7 @@ GCRuntime::endSweepPhase(JSGCInvocationKind gckind, bool lastGC)
lastGCTime + highFrequencyTimeThreshold * PRMJ_USEC_PER_MSEC > currentTime;
for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
zone->setGCLastBytes(zone->gcBytes, gckind);
zone->setGCLastBytes(zone->usage.gcBytes(), gckind);
if (zone->isCollecting()) {
JS_ASSERT(zone->isGCFinished());
zone->setGCState(Zone::NoGC);
@ -5001,7 +4990,7 @@ GCRuntime::budgetIncrementalGC(int64_t *budget)
bool reset = false;
for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
if (zone->gcBytes >= zone->gcTriggerBytes) {
if (zone->usage.gcBytes() >= zone->gcTriggerBytes) {
*budget = SliceBudget::Unlimited;
stats.nonincremental("allocation trigger");
}
@ -5588,8 +5577,7 @@ gc::MergeCompartments(JSCompartment *source, JSCompartment *target)
// Merge the allocator in source's zone into target's zone.
target->zone()->allocator.arenas.adoptArenas(rt, &source->zone()->allocator.arenas);
target->zone()->gcBytes += source->zone()->gcBytes;
source->zone()->gcBytes = 0;
target->zone()->usage.adopt(source->zone()->usage);
// Merge other info in source's zone into target's zone.
target->zone()->types.typeLifoAlloc.transferFrom(&source->zone()->types.typeLifoAlloc);