Bug 1033442 - Split out GC scheduling tunables to make Zone GC heap triggers clearer; r=sfink

--HG--
extra : rebase_source : 3631e7c23d5e48f294d7860d2edd3dda776036cc
This commit is contained in:
Terrence Cole 2014-07-22 16:24:28 -07:00
parent 2fe0282de9
commit 142c3254f8
6 changed files with 334 additions and 188 deletions

View File

@ -104,6 +104,126 @@ struct ConservativeGCData
}
};
/*
* Encapsulates all of the GC tunables. These are effectively constant and
* should only be modified by setParameter.
*/
class GCSchedulingTunables
{
/*
* Soft limit on the number of bytes we are allowed to allocate in the GC
* heap. Attempts to allocate gcthings over this limit will return null and
* subsequently invoke the standard OOM machinery, independent of available
* physical memory.
*/
size_t gcMaxBytes_;
/*
* The base value used to compute zone->trigger.gcBytes(). When
* usage.gcBytes() surpasses threshold.gcBytes() for a zone, the zone may
* be scheduled for a GC, depending on the exact circumstances.
*/
size_t gcZoneAllocThresholdBase_;
/*
* Totally disables |highFrequencyGC|, the HeapGrowthFactor, and other
* tunables that make GC non-deterministic.
*/
bool dynamicHeapGrowthEnabled_;
/*
* We enter high-frequency mode if we GC a twice within this many
* microseconds. This value is stored directly in microseconds.
*/
uint64_t highFrequencyThresholdUsec_;
/*
* When in the |highFrequencyGC| mode, these parameterize the per-zone
* "HeapGrowthFactor" computation.
*/
uint64_t highFrequencyLowLimitBytes_;
uint64_t highFrequencyHighLimitBytes_;
double highFrequencyHeapGrowthMax_;
double highFrequencyHeapGrowthMin_;
/*
* When not in |highFrequencyGC| mode, this is the global (stored per-zone)
* "HeapGrowthFactor".
*/
double lowFrequencyHeapGrowth_;
/*
* Doubles the length of IGC slices when in the |highFrequencyGC| mode.
*/
bool dynamicMarkSliceEnabled_;
/*
* Controls the number of empty chunks reserved for future allocation.
*/
unsigned minEmptyChunkCount_;
unsigned maxEmptyChunkCount_;
public:
GCSchedulingTunables()
: gcMaxBytes_(0),
gcZoneAllocThresholdBase_(30 * 1024 * 1024),
dynamicHeapGrowthEnabled_(false),
highFrequencyThresholdUsec_(1000 * 1000),
highFrequencyLowLimitBytes_(100 * 1024 * 1024),
highFrequencyHighLimitBytes_(500 * 1024 * 1024),
highFrequencyHeapGrowthMax_(3.0),
highFrequencyHeapGrowthMin_(1.5),
lowFrequencyHeapGrowth_(1.5),
dynamicMarkSliceEnabled_(false),
minEmptyChunkCount_(1),
maxEmptyChunkCount_(30)
{}
size_t gcMaxBytes() const { return gcMaxBytes_; }
size_t gcZoneAllocThresholdBase() const { return gcZoneAllocThresholdBase_; }
bool isDynamicHeapGrowthEnabled() const { return dynamicHeapGrowthEnabled_; }
uint64_t highFrequencyThresholdUsec() const { return highFrequencyThresholdUsec_; }
uint64_t highFrequencyLowLimitBytes() const { return highFrequencyLowLimitBytes_; }
uint64_t highFrequencyHighLimitBytes() const { return highFrequencyHighLimitBytes_; }
double highFrequencyHeapGrowthMax() const { return highFrequencyHeapGrowthMax_; }
double highFrequencyHeapGrowthMin() const { return highFrequencyHeapGrowthMin_; }
double lowFrequencyHeapGrowth() const { return lowFrequencyHeapGrowth_; }
bool isDynamicMarkSliceEnabled() const { return dynamicMarkSliceEnabled_; }
unsigned minEmptyChunkCount() const { return minEmptyChunkCount_; }
unsigned maxEmptyChunkCount() const { return maxEmptyChunkCount_; }
void setParameter(JSGCParamKey key, uint32_t value);
};
/*
* Internal values that effect GC scheduling that are not directly exposed
* in the GC API.
*/
class GCSchedulingState
{
/*
* Influences how we schedule and run GC's in several subtle ways. The most
* important factor is in how it controls the "HeapGrowthFactor". The
* growth factor is a measure of how large (as a percentage of the last GC)
* the heap is allowed to grow before we try to schedule another GC.
*/
bool inHighFrequencyGCMode_;
public:
GCSchedulingState()
: inHighFrequencyGCMode_(false)
{}
bool inHighFrequencyGCMode() const { return inHighFrequencyGCMode_; }
void updateHighFrequencyMode(uint64_t lastGCTime, uint64_t currentTime,
const GCSchedulingTunables &tunables) {
inHighFrequencyGCMode_ =
tunables.isDynamicHeapGrowthEnabled() && lastGCTime &&
lastGCTime + tunables.highFrequencyThresholdUsec() > currentTime;
}
};
template<typename F>
struct Callback {
F op;
@ -182,7 +302,6 @@ class GCRuntime
void setDeterministic(bool enable);
#endif
size_t maxBytesAllocated() { return maxBytes; }
size_t maxMallocBytesAllocated() { return maxMallocBytes; }
public:
@ -303,7 +422,6 @@ class GCRuntime
double computeHeapGrowthFactor(size_t lastBytes);
size_t computeTriggerBytes(double growthFactor, size_t lastBytes, JSGCInvocationKind gckind);
size_t allocationThreshold() { return allocThreshold; }
JSGCMode gcMode() const { return mode; }
void setGCMode(JSGCMode m) {
@ -414,6 +532,10 @@ class GCRuntime
/* Track heap usage for this runtime. */
HeapUsage usage;
/* GC scheduling state and parameters. */
GCSchedulingTunables tunables;
GCSchedulingState schedulingState;
private:
/*
* Set of all GC chunks with at least one allocated thing. The
@ -435,7 +557,6 @@ class GCRuntime
js::RootedValueMap rootsHash;
size_t maxBytes;
size_t maxMallocBytes;
/*
@ -451,19 +572,7 @@ class GCRuntime
JSGCMode mode;
size_t allocThreshold;
bool highFrequencyGC;
uint64_t highFrequencyTimeThreshold;
uint64_t highFrequencyLowLimitBytes;
uint64_t highFrequencyHighLimitBytes;
double highFrequencyHeapGrowthMax;
double highFrequencyHeapGrowthMin;
double lowFrequencyHeapGrowth;
bool dynamicHeapGrowth;
bool dynamicMarkSlice;
uint64_t decommitThreshold;
unsigned minEmptyChunkCount;
unsigned maxEmptyChunkCount;
/* During shutdown, the GC needs to clean up every possible object. */
bool cleanUpEverything;
@ -598,13 +707,13 @@ class GCRuntime
/*
* These options control the zealousness of the GC. The fundamental values
* are nextScheduled and gcDebugCompartmentGC. At every allocation,
* nextScheduled is decremented. When it reaches zero, we do either a
* full or a compartmental GC, based on debugCompartmentGC.
* are nextScheduled and gcDebugCompartmentGC. At every allocation,
* nextScheduled is decremented. When it reaches zero, we do either a full
* or a compartmental GC, based on debugCompartmentGC.
*
* At this point, if zeal_ is one of the types that trigger periodic
* collection, then nextScheduled is reset to the value of
* zealFrequency. Otherwise, no additional GCs take place.
* At this point, if zeal_ is one of the types that trigger periodic
* collection, then nextScheduled is reset to the value of zealFrequency.
* Otherwise, no additional GCs take place.
*
* You can control these values in several ways:
* - Pass the -Z flag to the shell (see the usage info for details)
@ -614,10 +723,10 @@ class GCRuntime
* If gzZeal_ == 1 then we perform GCs in select places (during MaybeGC and
* whenever a GC poke happens). This option is mainly useful to embedders.
*
* We use zeal_ == 4 to enable write barrier verification. See the comment
* We use zeal_ == 4 to enable write barrier verification. See the comment
* in jsgc.cpp for more information about this.
*
* zeal_ values from 8 to 10 periodically run different types of
* zeal_ values from 8 to 10 periodically run different types of
* incremental GC.
*/
#ifdef JS_GC_ZEAL

View File

@ -895,7 +895,7 @@ js::Nursery::collect(JSRuntime *rt, JS::gcreason::Reason reason, TypeObjectList
// We ignore gcMaxBytes when allocating for minor collection. However, if we
// overflowed, we disable the nursery. The next time we allocate, we'll fail
// because gcBytes >= gcMaxBytes.
if (rt->gc.usage.gcBytes() >= rt->gc.maxBytesAllocated())
if (rt->gc.usage.gcBytes() >= rt->gc.tunables.gcMaxBytes())
disable();
TIME_END(total);

View File

@ -27,11 +27,9 @@ JS::Zone::Zone(JSRuntime *rt)
types(this),
compartments(),
gcGrayRoots(),
gcHeapGrowthFactor(3.0),
gcMallocBytes(0),
gcMallocGCTriggered(false),
usage(&rt->gc.usage),
gcTriggerBytes(0),
data(nullptr),
isSystem(false),
usedByExclusiveThread(false),
@ -48,6 +46,7 @@ JS::Zone::Zone(JSRuntime *rt)
JS_ASSERT(reinterpret_cast<JS::shadow::Zone *>(this) ==
static_cast<JS::shadow::Zone *>(this));
threshold.updateAfterGC(8192, GC_NORMAL, rt->gc.tunables, rt->gc.schedulingState);
setGCMaxMallocBytes(rt->gc.maxMallocBytesAllocated() * 0.9);
}
@ -62,8 +61,9 @@ Zone::~Zone()
#endif
}
bool Zone::init()
bool Zone::init(bool isSystemArg)
{
isSystem = isSystemArg;
return gcZoneGroupEdges.init();
}

View File

@ -42,6 +42,40 @@ class Allocator
JS::Zone *zone_;
};
namespace gc {
// This class encapsulates the data that determines when we need to do a zone GC.
class ZoneHeapThreshold
{
// The "growth factor" for computing our next thresholds after a GC.
double gcHeapGrowthFactor_;
// GC trigger threshold for allocations on the GC heap.
size_t gcTriggerBytes_;
public:
ZoneHeapThreshold()
: gcHeapGrowthFactor_(3.0),
gcTriggerBytes_(0)
{}
double gcHeapGrowthFactor() const { return gcHeapGrowthFactor_; }
size_t gcTriggerBytes() const { return gcTriggerBytes_; }
void updateAfterGC(size_t lastBytes, JSGCInvocationKind gckind,
const GCSchedulingTunables &tunables, const GCSchedulingState &state);
void updateForRemovedArena(const GCSchedulingTunables &tunables);
private:
static double computeZoneHeapGrowthFactorForHeapSize(size_t lastBytes,
const GCSchedulingTunables &tunables,
const GCSchedulingState &state);
static size_t computeZoneTriggerBytes(double growthFactor, size_t lastBytes,
JSGCInvocationKind gckind,
const GCSchedulingTunables &tunables);
};
} // namespace gc
} // namespace js
namespace JS {
@ -95,7 +129,7 @@ struct Zone : public JS::shadow::Zone,
{
explicit Zone(JSRuntime *rt);
~Zone();
bool init();
bool init(bool isSystem);
void findOutgoingEdges(js::gc::ComponentFinder<JS::Zone> &finder);
@ -105,9 +139,6 @@ struct Zone : public JS::shadow::Zone,
size_t *typePool,
size_t *baselineStubsOptimized);
void setGCLastBytes(size_t lastBytes, js::JSGCInvocationKind gckind);
void reduceGCTriggerBytes(size_t amount);
void resetGCMallocBytes();
void setGCMaxMallocBytes(size_t value);
void updateMallocCounter(size_t nbytes) {
@ -226,9 +257,6 @@ struct Zone : public JS::shadow::Zone,
typedef js::HashSet<Zone *, js::DefaultHasher<Zone *>, js::SystemAllocPolicy> ZoneSet;
ZoneSet gcZoneGroupEdges;
// The "growth factor" for computing our next thresholds after a GC.
double gcHeapGrowthFactor;
// Malloc counter to measure memory pressure for GC scheduling. It runs from
// gcMaxMallocBytes down to zero. This counter should be used only when it's
// not possible to know the size of a free.
@ -247,8 +275,8 @@ struct Zone : public JS::shadow::Zone,
// Track heap usage under this Zone.
js::gc::HeapUsage usage;
// GC trigger threshold for allocations on the GC heap.
size_t gcTriggerBytes;
// Thresholds used to trigger GC.
js::gc::ZoneHeapThreshold threshold;
// Per-zone data for use by an embedder.
void *data;

View File

@ -704,15 +704,14 @@ GCRuntime::expireChunkPool(bool shrinkBuffers, bool releaseAll)
* without emptying the list, the older chunks will stay at the tail
* and are more likely to reach the max age.
*/
JS_ASSERT(maxEmptyChunkCount >= minEmptyChunkCount);
Chunk *freeList = nullptr;
unsigned freeChunkCount = 0;
for (ChunkPool::Enum e(chunkPool); !e.empty(); ) {
Chunk *chunk = e.front();
JS_ASSERT(chunk->unused());
JS_ASSERT(!chunkSet.has(chunk));
if (releaseAll || freeChunkCount >= maxEmptyChunkCount ||
(freeChunkCount >= minEmptyChunkCount &&
if (releaseAll || freeChunkCount >= tunables.maxEmptyChunkCount() ||
(freeChunkCount >= tunables.minEmptyChunkCount() &&
(shrinkBuffers || chunk->info.age == MAX_EMPTY_CHUNK_AGE)))
{
e.removeAndPopFront();
@ -726,8 +725,8 @@ GCRuntime::expireChunkPool(bool shrinkBuffers, bool releaseAll)
e.popFront();
}
}
JS_ASSERT(chunkPool.getEmptyCount() <= maxEmptyChunkCount);
JS_ASSERT_IF(shrinkBuffers, chunkPool.getEmptyCount() <= minEmptyChunkCount);
JS_ASSERT(chunkPool.getEmptyCount() <= tunables.maxEmptyChunkCount());
JS_ASSERT_IF(shrinkBuffers, chunkPool.getEmptyCount() <= tunables.minEmptyChunkCount());
JS_ASSERT_IF(releaseAll, chunkPool.getEmptyCount() == 0);
return freeList;
}
@ -929,7 +928,7 @@ Chunk::allocateArena(Zone *zone, AllocKind thingKind)
JS_ASSERT(hasAvailableArenas());
JSRuntime *rt = zone->runtimeFromAnyThread();
if (!rt->isHeapMinorCollecting() && rt->gc.usage.gcBytes() >= rt->gc.maxBytesAllocated()) {
if (!rt->isHeapMinorCollecting() && rt->gc.usage.gcBytes() >= rt->gc.tunables.gcMaxBytes()) {
#ifdef JSGC_FJGENERATIONAL
// This is an approximation to the best test, which would check that
// this thread is currently promoting into the tenured area. I doubt
@ -950,7 +949,7 @@ Chunk::allocateArena(Zone *zone, AllocKind thingKind)
zone->usage.addGCArena();
if (zone->usage.gcBytes() >= zone->gcTriggerBytes) {
if (zone->usage.gcBytes() >= zone->threshold.gcTriggerBytes()) {
AutoUnlockGC unlock(rt);
TriggerZoneGC(zone, JS::gcreason::ALLOC_TRIGGER);
}
@ -995,7 +994,7 @@ Chunk::releaseArena(ArenaHeader *aheader)
maybeLock.lock(rt);
if (rt->gc.isBackgroundSweeping())
zone->reduceGCTriggerBytes(zone->gcHeapGrowthFactor * ArenaSize);
zone->threshold.updateForRemovedArena(rt->gc.tunables);
zone->usage.removeGCArena();
aheader->setAsNotAllocated();
@ -1033,7 +1032,7 @@ GCRuntime::wantBackgroundAllocation() const
* of them.
*/
return helperState.canBackgroundAllocate() &&
chunkPool.getEmptyCount() < minEmptyChunkCount &&
chunkPool.getEmptyCount() < tunables.minEmptyChunkCount() &&
chunkSet.count() >= 4;
}
@ -1118,7 +1117,6 @@ GCRuntime::GCRuntime(JSRuntime *rt) :
usage(nullptr),
systemAvailableChunkListHead(nullptr),
userAvailableChunkListHead(nullptr),
maxBytes(0),
maxMallocBytes(0),
numArenasFreeCommitted(0),
verifyPreData(nullptr),
@ -1127,19 +1125,6 @@ GCRuntime::GCRuntime(JSRuntime *rt) :
nextFullGCTime(0),
lastGCTime(0),
jitReleaseTime(0),
allocThreshold(30 * 1024 * 1024),
highFrequencyGC(false),
highFrequencyTimeThreshold(1000),
highFrequencyLowLimitBytes(100 * 1024 * 1024),
highFrequencyHighLimitBytes(500 * 1024 * 1024),
highFrequencyHeapGrowthMax(3.0),
highFrequencyHeapGrowthMin(1.5),
lowFrequencyHeapGrowth(1.5),
dynamicHeapGrowth(false),
dynamicMarkSlice(false),
decommitThreshold(32 * 1024 * 1024),
minEmptyChunkCount(1),
maxEmptyChunkCount(30),
cleanUpEverything(false),
grayBitsValid(false),
isNeeded(0),
@ -1298,7 +1283,7 @@ GCRuntime::init(uint32_t maxbytes, uint32_t maxNurseryBytes)
* Separate gcMaxMallocBytes from gcMaxBytes but initialize to maxbytes
* for default backward API compatibility.
*/
maxBytes = maxbytes;
tunables.setParameter(JSGC_MAX_BYTES, maxbytes);
setMaxMallocBytes(maxbytes);
#ifndef JS_MORE_DETERMINISTIC
@ -1406,11 +1391,6 @@ void
GCRuntime::setParameter(JSGCParamKey key, uint32_t value)
{
switch (key) {
case JSGC_MAX_BYTES: {
JS_ASSERT(value >= usage.gcBytes());
maxBytes = value;
break;
}
case JSGC_MAX_MALLOC_BYTES:
setMaxMallocBytes(value);
break;
@ -1420,56 +1400,78 @@ GCRuntime::setParameter(JSGCParamKey key, uint32_t value)
case JSGC_MARK_STACK_LIMIT:
setMarkStackLimit(value);
break;
case JSGC_HIGH_FREQUENCY_TIME_LIMIT:
highFrequencyTimeThreshold = value;
break;
case JSGC_HIGH_FREQUENCY_LOW_LIMIT:
highFrequencyLowLimitBytes = value * 1024 * 1024;
break;
case JSGC_HIGH_FREQUENCY_HIGH_LIMIT:
highFrequencyHighLimitBytes = value * 1024 * 1024;
break;
case JSGC_HIGH_FREQUENCY_HEAP_GROWTH_MAX:
highFrequencyHeapGrowthMax = value / 100.0;
MOZ_ASSERT(highFrequencyHeapGrowthMax / 0.85 > 1.0);
break;
case JSGC_HIGH_FREQUENCY_HEAP_GROWTH_MIN:
highFrequencyHeapGrowthMin = value / 100.0;
MOZ_ASSERT(highFrequencyHeapGrowthMin / 0.85 > 1.0);
break;
case JSGC_LOW_FREQUENCY_HEAP_GROWTH:
lowFrequencyHeapGrowth = value / 100.0;
MOZ_ASSERT(lowFrequencyHeapGrowth / 0.9 > 1.0);
break;
case JSGC_DYNAMIC_HEAP_GROWTH:
dynamicHeapGrowth = value;
break;
case JSGC_DYNAMIC_MARK_SLICE:
dynamicMarkSlice = value;
break;
case JSGC_ALLOCATION_THRESHOLD:
allocThreshold = value * 1024 * 1024;
break;
case JSGC_DECOMMIT_THRESHOLD:
decommitThreshold = value * 1024 * 1024;
break;
case JSGC_MIN_EMPTY_CHUNK_COUNT:
minEmptyChunkCount = value;
if (minEmptyChunkCount > maxEmptyChunkCount)
maxEmptyChunkCount = minEmptyChunkCount;
break;
case JSGC_MAX_EMPTY_CHUNK_COUNT:
maxEmptyChunkCount = value;
if (minEmptyChunkCount > maxEmptyChunkCount)
minEmptyChunkCount = maxEmptyChunkCount;
break;
default:
JS_ASSERT(key == JSGC_MODE);
case JSGC_MODE:
mode = JSGCMode(value);
JS_ASSERT(mode == JSGC_MODE_GLOBAL ||
mode == JSGC_MODE_COMPARTMENT ||
mode == JSGC_MODE_INCREMENTAL);
return;
break;
default:
tunables.setParameter(key, value);
}
}
void
GCSchedulingTunables::setParameter(JSGCParamKey key, uint32_t value)
{
switch(key) {
case JSGC_MAX_BYTES:
gcMaxBytes_ = value;
break;
case JSGC_HIGH_FREQUENCY_TIME_LIMIT:
highFrequencyThresholdUsec_ = value * PRMJ_USEC_PER_MSEC;
break;
case JSGC_HIGH_FREQUENCY_LOW_LIMIT:
highFrequencyLowLimitBytes_ = value * 1024 * 1024;
if (highFrequencyLowLimitBytes_ >= highFrequencyHighLimitBytes_)
highFrequencyHighLimitBytes_ = highFrequencyLowLimitBytes_ + 1;
JS_ASSERT(highFrequencyHighLimitBytes_ > highFrequencyLowLimitBytes_);
break;
case JSGC_HIGH_FREQUENCY_HIGH_LIMIT:
MOZ_ASSERT(value > 0);
highFrequencyHighLimitBytes_ = value * 1024 * 1024;
if (highFrequencyHighLimitBytes_ <= highFrequencyLowLimitBytes_)
highFrequencyLowLimitBytes_ = highFrequencyHighLimitBytes_ - 1;
JS_ASSERT(highFrequencyHighLimitBytes_ > highFrequencyLowLimitBytes_);
break;
case JSGC_HIGH_FREQUENCY_HEAP_GROWTH_MAX:
highFrequencyHeapGrowthMax_ = value / 100.0;
MOZ_ASSERT(highFrequencyHeapGrowthMax_ / 0.85 > 1.0);
break;
case JSGC_HIGH_FREQUENCY_HEAP_GROWTH_MIN:
highFrequencyHeapGrowthMin_ = value / 100.0;
MOZ_ASSERT(highFrequencyHeapGrowthMin_ / 0.85 > 1.0);
break;
case JSGC_LOW_FREQUENCY_HEAP_GROWTH:
lowFrequencyHeapGrowth_ = value / 100.0;
MOZ_ASSERT(lowFrequencyHeapGrowth_ / 0.9 > 1.0);
break;
case JSGC_DYNAMIC_HEAP_GROWTH:
dynamicHeapGrowthEnabled_ = value;
break;
case JSGC_DYNAMIC_MARK_SLICE:
dynamicMarkSliceEnabled_ = value;
break;
case JSGC_ALLOCATION_THRESHOLD:
gcZoneAllocThresholdBase_ = value * 1024 * 1024;
break;
case JSGC_MIN_EMPTY_CHUNK_COUNT:
minEmptyChunkCount_ = value;
if (minEmptyChunkCount_ > maxEmptyChunkCount_)
maxEmptyChunkCount_ = minEmptyChunkCount_;
JS_ASSERT(maxEmptyChunkCount_ >= minEmptyChunkCount_);
break;
case JSGC_MAX_EMPTY_CHUNK_COUNT:
maxEmptyChunkCount_ = value;
if (minEmptyChunkCount_ > maxEmptyChunkCount_)
minEmptyChunkCount_ = maxEmptyChunkCount_;
JS_ASSERT(maxEmptyChunkCount_ >= minEmptyChunkCount_);
break;
default:
MOZ_CRASH("Unknown GC parameter.");
}
}
@ -1478,7 +1480,7 @@ GCRuntime::getParameter(JSGCParamKey key)
{
switch (key) {
case JSGC_MAX_BYTES:
return uint32_t(maxBytes);
return uint32_t(tunables.gcMaxBytes());
case JSGC_MAX_MALLOC_BYTES:
return maxMallocBytes;
case JSGC_BYTES:
@ -1494,27 +1496,27 @@ GCRuntime::getParameter(JSGCParamKey key)
case JSGC_MARK_STACK_LIMIT:
return marker.maxCapacity();
case JSGC_HIGH_FREQUENCY_TIME_LIMIT:
return highFrequencyTimeThreshold;
return tunables.highFrequencyThresholdUsec();
case JSGC_HIGH_FREQUENCY_LOW_LIMIT:
return highFrequencyLowLimitBytes / 1024 / 1024;
return tunables.highFrequencyLowLimitBytes() / 1024 / 1024;
case JSGC_HIGH_FREQUENCY_HIGH_LIMIT:
return highFrequencyHighLimitBytes / 1024 / 1024;
return tunables.highFrequencyHighLimitBytes() / 1024 / 1024;
case JSGC_HIGH_FREQUENCY_HEAP_GROWTH_MAX:
return uint32_t(highFrequencyHeapGrowthMax * 100);
return uint32_t(tunables.highFrequencyHeapGrowthMax() * 100);
case JSGC_HIGH_FREQUENCY_HEAP_GROWTH_MIN:
return uint32_t(highFrequencyHeapGrowthMin * 100);
return uint32_t(tunables.highFrequencyHeapGrowthMin() * 100);
case JSGC_LOW_FREQUENCY_HEAP_GROWTH:
return uint32_t(lowFrequencyHeapGrowth * 100);
return uint32_t(tunables.lowFrequencyHeapGrowth() * 100);
case JSGC_DYNAMIC_HEAP_GROWTH:
return dynamicHeapGrowth;
return tunables.isDynamicHeapGrowthEnabled();
case JSGC_DYNAMIC_MARK_SLICE:
return dynamicMarkSlice;
return tunables.isDynamicMarkSliceEnabled();
case JSGC_ALLOCATION_THRESHOLD:
return allocThreshold / 1024 / 1024;
return tunables.gcZoneAllocThresholdBase() / 1024 / 1024;
case JSGC_MIN_EMPTY_CHUNK_COUNT:
return minEmptyChunkCount;
return tunables.minEmptyChunkCount();
case JSGC_MAX_EMPTY_CHUNK_COUNT:
return maxEmptyChunkCount;
return tunables.maxEmptyChunkCount();
default:
JS_ASSERT(key == JSGC_NUMBER);
return uint32_t(number);
@ -1721,71 +1723,84 @@ GCRuntime::onTooMuchMalloc()
mallocGCTriggered = triggerGC(JS::gcreason::TOO_MUCH_MALLOC);
}
size_t
GCRuntime::computeTriggerBytes(double growthFactor, size_t lastBytes, JSGCInvocationKind gckind)
/* static */ double
ZoneHeapThreshold::computeZoneHeapGrowthFactorForHeapSize(size_t lastBytes,
const GCSchedulingTunables &tunables,
const GCSchedulingState &state)
{
size_t base = gckind == GC_SHRINK ? lastBytes : Max(lastBytes, allocThreshold);
double trigger = double(base) * growthFactor;
return size_t(Min(double(maxBytes), trigger));
}
if (!tunables.isDynamicHeapGrowthEnabled())
return 3.0;
double
GCRuntime::computeHeapGrowthFactor(size_t lastBytes)
{
/*
* The heap growth factor depends on the heap size after a GC and the GC frequency.
* For low frequency GCs (more than 1sec between GCs) we let the heap grow to 150%.
* For high frequency GCs we let the heap grow depending on the heap size:
* lastBytes < highFrequencyLowLimit: 300%
* lastBytes > highFrequencyHighLimit: 150%
* otherwise: linear interpolation between 150% and 300% based on lastBytes
*/
// For small zones, our collection heuristics do not matter much: favor
// something simple in this case.
if (lastBytes < 1 * 1024 * 1024)
return tunables.lowFrequencyHeapGrowth();
double factor;
if (!dynamicHeapGrowth) {
factor = 3.0;
} else if (lastBytes < 1 * 1024 * 1024) {
factor = lowFrequencyHeapGrowth;
} else {
JS_ASSERT(highFrequencyHighLimitBytes > highFrequencyLowLimitBytes);
if (highFrequencyGC) {
if (lastBytes <= highFrequencyLowLimitBytes) {
factor = highFrequencyHeapGrowthMax;
} else if (lastBytes >= highFrequencyHighLimitBytes) {
factor = highFrequencyHeapGrowthMin;
} else {
double k = (highFrequencyHeapGrowthMin - highFrequencyHeapGrowthMax)
/ (double)(highFrequencyHighLimitBytes - highFrequencyLowLimitBytes);
factor = (k * (lastBytes - highFrequencyLowLimitBytes)
+ highFrequencyHeapGrowthMax);
JS_ASSERT(factor <= highFrequencyHeapGrowthMax
&& factor >= highFrequencyHeapGrowthMin);
}
} else {
factor = lowFrequencyHeapGrowth;
}
}
// If GC's are not triggering in rapid succession, use a lower threshold so
// that we will collect garbage sooner.
if (!state.inHighFrequencyGCMode())
return tunables.lowFrequencyHeapGrowth();
// The heap growth factor depends on the heap size after a GC and the GC
// frequency. For low frequency GCs (more than 1sec between GCs) we let
// the heap grow to 150%. For high frequency GCs we let the heap grow
// depending on the heap size:
// lastBytes < highFrequencyLowLimit: 300%
// lastBytes > highFrequencyHighLimit: 150%
// otherwise: linear interpolation between 300% and 150% based on lastBytes
// Use shorter names to make the operation comprehensible.
double minRatio = tunables.highFrequencyHeapGrowthMin();
double maxRatio = tunables.highFrequencyHeapGrowthMax();
double lowLimit = tunables.highFrequencyLowLimitBytes();
double highLimit = tunables.highFrequencyHighLimitBytes();
if (lastBytes <= lowLimit)
return maxRatio;
if (lastBytes >= highLimit)
return minRatio;
double factor = maxRatio - ((maxRatio - minRatio) * ((lastBytes - lowLimit) /
(highLimit - lowLimit)));
JS_ASSERT(factor >= minRatio);
JS_ASSERT(factor <= maxRatio);
return factor;
}
void
Zone::setGCLastBytes(size_t lastBytes, JSGCInvocationKind gckind)
/* static */ size_t
ZoneHeapThreshold::computeZoneTriggerBytes(double growthFactor, size_t lastBytes,
JSGCInvocationKind gckind,
const GCSchedulingTunables &tunables)
{
GCRuntime &gc = runtimeFromMainThread()->gc;
gcHeapGrowthFactor = gc.computeHeapGrowthFactor(lastBytes);
gcTriggerBytes = gc.computeTriggerBytes(gcHeapGrowthFactor, lastBytes, gckind);
size_t base = gckind == GC_SHRINK
? lastBytes
: Max(lastBytes, tunables.gcZoneAllocThresholdBase());
double trigger = double(base) * growthFactor;
return size_t(Min(double(tunables.gcMaxBytes()), trigger));
}
void
Zone::reduceGCTriggerBytes(size_t amount)
ZoneHeapThreshold::updateAfterGC(size_t lastBytes, JSGCInvocationKind gckind,
const GCSchedulingTunables &tunables,
const GCSchedulingState &state)
{
gcHeapGrowthFactor_ = computeZoneHeapGrowthFactorForHeapSize(lastBytes, tunables, state);
gcTriggerBytes_ = computeZoneTriggerBytes(gcHeapGrowthFactor_, lastBytes, gckind, tunables);
}
void
ZoneHeapThreshold::updateForRemovedArena(const GCSchedulingTunables &tunables)
{
double amount = ArenaSize * gcHeapGrowthFactor_;
JS_ASSERT(amount > 0);
JS_ASSERT(gcTriggerBytes >= amount);
GCRuntime &gc = runtimeFromAnyThread()->gc;
if (gcTriggerBytes - amount < gc.allocationThreshold() * gcHeapGrowthFactor)
JS_ASSERT(gcTriggerBytes_ >= amount);
if (gcTriggerBytes_ - amount < tunables.gcZoneAllocThresholdBase() * gcHeapGrowthFactor_)
return;
gcTriggerBytes -= amount;
gcTriggerBytes_ -= amount;
}
Allocator::Allocator(Zone *zone)
@ -2187,7 +2202,7 @@ ArenaLists::refillFreeList(ThreadSafeContext *cx, AllocKind thingKind)
bool runGC = cx->allowGC() && allowGC &&
cx->asJSContext()->runtime()->gc.incrementalState != NO_INCREMENTAL &&
zone->usage.gcBytes() > zone->gcTriggerBytes;
zone->usage.gcBytes() > zone->threshold.gcTriggerBytes();
JS_ASSERT_IF(cx->isJSContext() && allowGC,
!cx->asJSContext()->runtime()->currentThreadHasExclusiveAccess());
@ -2434,9 +2449,9 @@ GCRuntime::maybeGC(Zone *zone)
return;
}
double factor = highFrequencyGC ? 0.85 : 0.9;
double factor = schedulingState.inHighFrequencyGCMode() ? 0.85 : 0.9;
if (zone->usage.gcBytes() > 1024 * 1024 &&
zone->usage.gcBytes() >= factor * zone->gcTriggerBytes &&
zone->usage.gcBytes() >= factor * zone->threshold.gcTriggerBytes() &&
incrementalState == NO_INCREMENTAL &&
!isBackgroundSweeping())
{
@ -4513,11 +4528,10 @@ GCRuntime::endSweepPhase(JSGCInvocationKind gckind, bool lastGC)
}
uint64_t currentTime = PRMJ_Now();
highFrequencyGC = dynamicHeapGrowth && lastGCTime &&
lastGCTime + highFrequencyTimeThreshold * PRMJ_USEC_PER_MSEC > currentTime;
schedulingState.updateHighFrequencyMode(lastGCTime, currentTime, tunables);
for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
zone->setGCLastBytes(zone->usage.gcBytes(), gckind);
zone->threshold.updateAfterGC(zone->usage.gcBytes(), gckind, tunables, schedulingState);
if (zone->isCollecting()) {
JS_ASSERT(zone->isGCFinished());
zone->setGCState(Zone::NoGC);
@ -4933,7 +4947,7 @@ GCRuntime::budgetIncrementalGC(int64_t *budget)
bool reset = false;
for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
if (zone->usage.gcBytes() >= zone->gcTriggerBytes) {
if (zone->usage.gcBytes() >= zone->threshold.gcTriggerBytes()) {
*budget = SliceBudget::Unlimited;
stats.nonincremental("allocation trigger");
}
@ -5214,7 +5228,7 @@ GCRuntime::gcSlice(JSGCInvocationKind gckind, JS::gcreason::Reason reason, int64
int64_t budget;
if (millis)
budget = SliceBudget::TimeBudget(millis);
else if (highFrequencyGC && dynamicMarkSlice)
else if (schedulingState.inHighFrequencyGCMode() && tunables.isDynamicMarkSliceEnabled())
budget = sliceBudget * IGC_MARK_SLICE_MULTIPLIER;
else
budget = sliceBudget;
@ -5432,13 +5446,10 @@ js::NewCompartment(JSContext *cx, Zone *zone, JSPrincipals *principals,
zoneHolder.reset(zone);
if (!zone->init())
return nullptr;
zone->setGCLastBytes(8192, GC_NORMAL);
const JSPrincipals *trusted = rt->trustedPrincipals();
zone->isSystem = principals && principals == trusted;
bool isSystem = principals && principals == trusted;
if (!zone->init(isSystem))
return nullptr;
}
ScopedJSDeletePtr<JSCompartment> compartment(cx->new_<JSCompartment>(zone, options));

View File

@ -291,7 +291,7 @@ JSRuntime::init(uint32_t maxbytes, uint32_t maxNurseryBytes)
SetMarkStackLimit(this, atoi(size));
ScopedJSDeletePtr<Zone> atomsZone(new_<Zone>(this));
if (!atomsZone || !atomsZone->init())
if (!atomsZone || !atomsZone->init(true))
return false;
JS::CompartmentOptions options;
@ -303,8 +303,6 @@ JSRuntime::init(uint32_t maxbytes, uint32_t maxNurseryBytes)
atomsZone->compartments.append(atomsCompartment.get());
atomsCompartment->isSystem = true;
atomsZone->isSystem = true;
atomsZone->setGCLastBytes(8192, GC_NORMAL);
atomsZone.forget();
this->atomsCompartment_ = atomsCompartment.forget();