Bug 988486 - Make more GCRuntime members private and add necessary accessors r=terrence

This commit is contained in:
Jon Coppeard 2014-07-08 14:05:24 +01:00
parent e9933ba3c1
commit c375daeb23
10 changed files with 151 additions and 103 deletions

View File

@ -247,7 +247,7 @@ GC(JSContext *cx, unsigned argc, jsval *vp)
}
#ifndef JS_MORE_DETERMINISTIC
size_t preBytes = cx->runtime()->gc.bytes;
size_t preBytes = cx->runtime()->gc.bytesAllocated();
#endif
if (compartment)
@ -259,7 +259,7 @@ GC(JSContext *cx, unsigned argc, jsval *vp)
char buf[256] = { '\0' };
#ifndef JS_MORE_DETERMINISTIC
JS_snprintf(buf, sizeof(buf), "before %lu, after %lu\n",
(unsigned long)preBytes, (unsigned long)cx->runtime()->gc.bytes);
(unsigned long)preBytes, (unsigned long)cx->runtime()->gc.bytesAllocated());
#endif
JSString *str = JS_NewStringCopyZ(cx, buf);
if (!str)

View File

@ -34,6 +34,43 @@ class MarkingValidator;
struct AutoPrepareForTracing;
class AutoTraceSession;
class ChunkPool
{
Chunk *emptyChunkListHead;
size_t emptyCount;
public:
ChunkPool()
: emptyChunkListHead(nullptr),
emptyCount(0)
{}
size_t getEmptyCount() const {
return emptyCount;
}
/* Must be called with the GC lock taken. */
inline Chunk *get(JSRuntime *rt);
/* Must be called either during the GC or with the GC lock taken. */
inline void put(Chunk *chunk);
/* Must be called with the GC lock taken. */
void expireAndFree(JSRuntime *rt, bool releaseAll);
class Enum {
public:
Enum(ChunkPool &pool) : pool(pool), chunkp(&pool.emptyChunkListHead) {}
bool empty() { return !*chunkp; }
Chunk *front();
inline void popFront();
inline void removeAndPopFront();
private:
ChunkPool &pool;
Chunk **chunkp;
};
};
struct ConservativeGCData
{
/*
@ -152,6 +189,10 @@ class GCRuntime
void setDeterministic(bool enable);
#endif
size_t bytesAllocated() { return bytes; }
size_t maxBytesAllocated() { return maxBytes; }
size_t maxMallocBytesAllocated() { return maxBytes; }
public:
// Internal public interface
js::gc::State state() { return incrementalState; }
@ -291,9 +332,14 @@ class GCRuntime
marker.setGCMode(mode);
}
inline void updateOnChunkFree(const ChunkInfo &info);
inline void updateOnFreeArenaAlloc(const ChunkInfo &info);
inline void updateOnArenaFree(const ChunkInfo &info);
inline void updateBytesAllocated(ptrdiff_t size);
GCChunkSet::Range allChunks() { return chunkSet.all(); }
inline Chunk **getAvailableChunkList(Zone *zone);
void moveChunkToFreePool(Chunk *chunk);
bool hasChunk(Chunk *chunk) { return chunkSet.has(chunk); }
#ifdef JS_GC_ZEAL
void startVerifyPreBarriers();
@ -309,6 +355,16 @@ class GCRuntime
Chunk *pickChunk(Zone *zone, AutoMaybeStartBackgroundAllocation &maybeStartBackgroundAllocation);
inline void arenaAllocatedDuringGC(JS::Zone *zone, ArenaHeader *arena);
/*
* Return the list of chunks that can be released outside the GC lock.
* Must be called either during the GC or with the GC lock taken.
*/
Chunk *expireChunkPool(bool releaseAll);
void expireAndFreeChunkPool(bool releaseAll);
void freeChunkList(Chunk *chunkListHead);
void prepareToFreeChunk(ChunkInfo &info);
void releaseChunk(Chunk *chunk);
inline bool wantBackgroundAllocation() const;
bool initZeal();
@ -358,7 +414,7 @@ class GCRuntime
void markAllGrayReferences();
#endif
public: // Internal state, public for now
public:
JSRuntime *rt;
/* Embedders can use this zone however they wish. */
@ -369,6 +425,16 @@ class GCRuntime
js::gc::SystemPageAllocator pageAllocator;
#ifdef JSGC_GENERATIONAL
js::Nursery nursery;
js::gc::StoreBuffer storeBuffer;
#endif
js::gcstats::Statistics stats;
js::GCMarker marker;
private:
/*
* Set of all GC chunks with at least one allocated thing. The
* conservative GC uses it to quickly check if a possible GC thing points
@ -387,15 +453,6 @@ class GCRuntime
js::gc::Chunk *userAvailableChunkListHead;
js::gc::ChunkPool chunkPool;
#ifdef JSGC_GENERATIONAL
js::Nursery nursery;
js::gc::StoreBuffer storeBuffer;
#endif
js::gcstats::Statistics stats;
js::GCMarker marker;
js::RootedValueMap rootsHash;
/* This is updated by both the main and GC helper threads. */
@ -404,7 +461,6 @@ class GCRuntime
size_t maxBytes;
size_t maxMallocBytes;
private:
/*
* Number of the committed arenas in all GC chunks including empty chunks.
*/

View File

@ -834,13 +834,6 @@ struct Chunk
void decommitAllArenas(JSRuntime *rt);
/* Must be called with the GC lock taken. */
static inline void release(JSRuntime *rt, Chunk *chunk);
static inline void releaseList(JSRuntime *rt, Chunk *chunkListHead);
/* Must be called with the GC lock taken. */
inline void prepareToBeFreed(JSRuntime *rt);
/*
* Assuming that the info.prevp points to the next field of the previous
* chunk in a doubly-linked list, get that chunk.

View File

@ -85,7 +85,7 @@ js::IterateChunks(JSRuntime *rt, void *data, IterateChunkCallback chunkCallback)
{
AutoPrepareForTracing prep(rt, SkipAtoms);
for (js::GCChunkSet::Range r = rt->gc.chunkSet.all(); !r.empty(); r.popFront())
for (js::GCChunkSet::Range r = rt->gc.allChunks(); !r.empty(); r.popFront())
chunkCallback(rt, data, r.front());
}

View File

@ -884,7 +884,7 @@ js::Nursery::collect(JSRuntime *rt, JS::gcreason::Reason reason, TypeObjectList
// We ignore gcMaxBytes when allocating for minor collection. However, if we
// overflowed, we disable the nursery. The next time we allocate, we'll fail
// because gcBytes >= gcMaxBytes.
if (rt->gc.bytes >= rt->gc.maxBytes)
if (rt->gc.bytesAllocated() >= rt->gc.maxBytesAllocated())
disable();
TIME_END(total);

View File

@ -187,7 +187,7 @@ IsAddressableGCThing(JSRuntime *rt, uintptr_t w,
Chunk *chunk = Chunk::fromAddress(addr);
if (!rt->gc.chunkSet.has(chunk))
if (!rt->gc.hasChunk(chunk))
return CGCT_NOTCHUNK;
/*

View File

@ -531,7 +531,7 @@ Statistics::beginGC()
sccTimes.clearAndFree();
nonincrementalReason = nullptr;
preBytes = runtime->gc.bytes;
preBytes = runtime->gc.bytesAllocated();
}
void

View File

@ -49,7 +49,7 @@ JS::Zone::Zone(JSRuntime *rt)
JS_ASSERT(reinterpret_cast<JS::shadow::Zone *>(this) ==
static_cast<JS::shadow::Zone *>(this));
setGCMaxMallocBytes(rt->gc.maxMallocBytes * 0.9);
setGCMaxMallocBytes(rt->gc.maxMallocBytesAllocated() * 0.9);
}
Zone::~Zone()

View File

@ -656,12 +656,33 @@ ChunkPool::put(Chunk *chunk)
emptyCount++;
}
inline Chunk *
ChunkPool::Enum::front()
{
Chunk *chunk = *chunkp;
JS_ASSERT_IF(chunk, pool.getEmptyCount() != 0);
return chunk;
}
inline void
ChunkPool::Enum::popFront()
{
JS_ASSERT(!empty());
chunkp = &front()->info.next;
}
inline void
ChunkPool::Enum::removeAndPopFront()
{
JS_ASSERT(!empty());
*chunkp = front()->info.next;
--pool.emptyCount;
}
/* Must be called either during the GC or with the GC lock taken. */
Chunk *
ChunkPool::expire(JSRuntime *rt, bool releaseAll)
GCRuntime::expireChunkPool(bool releaseAll)
{
JS_ASSERT(this == &rt->gc.chunkPool);
/*
* Return old empty chunks to the system while preserving the order of
* other chunks in the list. This way, if the GC runs several times
@ -670,32 +691,30 @@ ChunkPool::expire(JSRuntime *rt, bool releaseAll)
*/
Chunk *freeList = nullptr;
int freeChunkCount = 0;
for (Chunk **chunkp = &emptyChunkListHead; *chunkp; ) {
JS_ASSERT(emptyCount);
Chunk *chunk = *chunkp;
for (ChunkPool::Enum e(chunkPool); !e.empty(); ) {
Chunk *chunk = e.front();
JS_ASSERT(chunk->unused());
JS_ASSERT(!rt->gc.chunkSet.has(chunk));
JS_ASSERT(!chunkSet.has(chunk));
JS_ASSERT(chunk->info.age <= MAX_EMPTY_CHUNK_AGE);
if (releaseAll || chunk->info.age == MAX_EMPTY_CHUNK_AGE ||
freeChunkCount++ > MAX_EMPTY_CHUNK_COUNT)
{
*chunkp = chunk->info.next;
--emptyCount;
chunk->prepareToBeFreed(rt);
e.removeAndPopFront();
prepareToFreeChunk(chunk->info);
chunk->info.next = freeList;
freeList = chunk;
} else {
/* Keep the chunk but increase its age. */
++chunk->info.age;
chunkp = &chunk->info.next;
e.popFront();
}
}
JS_ASSERT_IF(releaseAll, !emptyCount);
JS_ASSERT_IF(releaseAll, chunkPool.getEmptyCount() == 0);
return freeList;
}
static void
FreeChunkList(JSRuntime *rt, Chunk *chunkListHead)
void
GCRuntime::freeChunkList(Chunk *chunkListHead)
{
while (Chunk *chunk = chunkListHead) {
JS_ASSERT(!chunk->info.numArenasFreeCommitted);
@ -705,9 +724,9 @@ FreeChunkList(JSRuntime *rt, Chunk *chunkListHead)
}
void
ChunkPool::expireAndFree(JSRuntime *rt, bool releaseAll)
GCRuntime::expireAndFreeChunkPool(bool releaseAll)
{
FreeChunkList(rt, expire(rt, releaseAll));
freeChunkList(expireChunkPool(releaseAll));
}
/* static */ Chunk *
@ -722,29 +741,23 @@ Chunk::allocate(JSRuntime *rt)
}
/* Must be called with the GC lock taken. */
/* static */ inline void
Chunk::release(JSRuntime *rt, Chunk *chunk)
inline void
GCRuntime::releaseChunk(Chunk *chunk)
{
JS_ASSERT(chunk);
chunk->prepareToBeFreed(rt);
prepareToFreeChunk(chunk->info);
FreeChunk(rt, chunk);
}
inline void
GCRuntime::updateOnChunkFree(const ChunkInfo &info)
GCRuntime::prepareToFreeChunk(ChunkInfo &info)
{
JS_ASSERT(numArenasFreeCommitted >= info.numArenasFreeCommitted);
numArenasFreeCommitted -= info.numArenasFreeCommitted;
stats.count(gcstats::STAT_DESTROY_CHUNK);
}
inline void
Chunk::prepareToBeFreed(JSRuntime *rt)
{
rt->gc.updateOnChunkFree(info);
#ifdef DEBUG
/*
* Let FreeChunkList detect a missing prepareToBeFreed call before it
* Let FreeChunkList detect a missing prepareToFreeChunk call before it
* frees chunk.
*/
info.numArenasFreeCommitted = 0;
@ -788,19 +801,19 @@ Chunk::init(JSRuntime *rt)
/* The rest of info fields are initialized in pickChunk. */
}
static inline Chunk **
GetAvailableChunkList(Zone *zone)
inline Chunk **
GCRuntime::getAvailableChunkList(Zone *zone)
{
JSRuntime *rt = zone->runtimeFromAnyThread();
return zone->isSystem
? &rt->gc.systemAvailableChunkListHead
: &rt->gc.userAvailableChunkListHead;
? &systemAvailableChunkListHead
: &userAvailableChunkListHead;
}
inline void
Chunk::addToAvailableList(Zone *zone)
{
insertToAvailableList(GetAvailableChunkList(zone));
JSRuntime *rt = zone->runtimeFromAnyThread();
insertToAvailableList(rt->gc.getAvailableChunkList(zone));
}
inline void
@ -891,13 +904,20 @@ Chunk::fetchNextFreeArena(JSRuntime *rt)
return aheader;
}
void
GCRuntime::updateBytesAllocated(ptrdiff_t size)
{
JS_ASSERT_IF(size < 0, bytes >= size_t(-size));
bytes += size;
}
ArenaHeader *
Chunk::allocateArena(Zone *zone, AllocKind thingKind)
{
JS_ASSERT(hasAvailableArenas());
JSRuntime *rt = zone->runtimeFromAnyThread();
if (!rt->isHeapMinorCollecting() && rt->gc.bytes >= rt->gc.maxBytes) {
if (!rt->isHeapMinorCollecting() && rt->gc.bytesAllocated() >= rt->gc.maxBytesAllocated()) {
#ifdef JSGC_FJGENERATIONAL
// This is an approximation to the best test, which would check that
// this thread is currently promoting into the tenured area. I doubt
@ -916,7 +936,7 @@ Chunk::allocateArena(Zone *zone, AllocKind thingKind)
if (MOZ_UNLIKELY(!hasAvailableArenas()))
removeFromAvailableList();
rt->gc.bytes += ArenaSize;
rt->gc.updateBytesAllocated(ArenaSize);
zone->gcBytes += ArenaSize;
if (zone->gcBytes >= zone->gcTriggerBytes) {
@ -962,11 +982,11 @@ Chunk::releaseArena(ArenaHeader *aheader)
if (rt->gc.isBackgroundSweeping())
maybeLock.lock(rt);
JS_ASSERT(rt->gc.bytes >= ArenaSize);
JS_ASSERT(rt->gc.bytesAllocated() >= ArenaSize);
JS_ASSERT(zone->gcBytes >= ArenaSize);
if (rt->gc.isBackgroundSweeping())
zone->gcBytesAfterGC -= ArenaSize;
rt->gc.bytes -= ArenaSize;
rt->gc.updateBytesAllocated(-ArenaSize);
zone->gcBytes -= ArenaSize;
aheader->setAsNotAllocated();
@ -979,14 +999,22 @@ Chunk::releaseArena(ArenaHeader *aheader)
} else if (!unused()) {
JS_ASSERT(info.prevp);
} else {
rt->gc.chunkSet.remove(this);
JS_ASSERT(unused());
removeFromAvailableList();
JS_ASSERT(info.numArenasFree == ArenasPerChunk);
decommitAllArenas(rt);
rt->gc.chunkPool.put(this);
rt->gc.moveChunkToFreePool(this);
}
}
void
GCRuntime::moveChunkToFreePool(Chunk *chunk)
{
JS_ASSERT(chunk->unused());
JS_ASSERT(chunkSet.has(chunk));
chunkSet.remove(chunk);
chunkPool.put(chunk);
}
inline bool
GCRuntime::wantBackgroundAllocation() const
{
@ -1030,7 +1058,7 @@ class js::gc::AutoMaybeStartBackgroundAllocation
Chunk *
GCRuntime::pickChunk(Zone *zone, AutoMaybeStartBackgroundAllocation &maybeStartBackgroundAllocation)
{
Chunk **listHeadp = GetAvailableChunkList(zone);
Chunk **listHeadp = getAvailableChunkList(zone);
Chunk *chunk = *listHeadp;
if (chunk)
return chunk;
@ -1058,7 +1086,7 @@ GCRuntime::pickChunk(Zone *zone, AutoMaybeStartBackgroundAllocation &maybeStartB
GCChunkSet::AddPtr p = chunkSet.lookupForAdd(chunk);
JS_ASSERT(!p);
if (!chunkSet.add(p, chunk)) {
Chunk::release(rt, chunk);
releaseChunk(chunk);
return nullptr;
}
@ -1072,14 +1100,14 @@ GCRuntime::pickChunk(Zone *zone, AutoMaybeStartBackgroundAllocation &maybeStartB
GCRuntime::GCRuntime(JSRuntime *rt) :
rt(rt),
systemZone(nullptr),
systemAvailableChunkListHead(nullptr),
userAvailableChunkListHead(nullptr),
#ifdef JSGC_GENERATIONAL
nursery(rt),
storeBuffer(rt, nursery),
#endif
stats(rt),
marker(rt),
systemAvailableChunkListHead(nullptr),
userAvailableChunkListHead(nullptr),
bytes(0),
maxBytes(0),
maxMallocBytes(0),
@ -1328,11 +1356,11 @@ GCRuntime::finish()
userAvailableChunkListHead = nullptr;
if (chunkSet.initialized()) {
for (GCChunkSet::Range r(chunkSet.all()); !r.empty(); r.popFront())
Chunk::release(rt, r.front());
releaseChunk(r.front());
chunkSet.clear();
}
chunkPool.expireAndFree(rt, true);
expireAndFreeChunkPool(true);
if (rootsHash.initialized())
rootsHash.clear();
@ -2524,9 +2552,9 @@ GCRuntime::expireChunksAndArenas(bool shouldShrink)
rt->threadPool.pruneChunkCache();
#endif
if (Chunk *toFree = chunkPool.expire(rt, shouldShrink)) {
if (Chunk *toFree = expireChunkPool(shouldShrink)) {
AutoUnlockGC unlock(rt);
FreeChunkList(rt, toFree);
freeChunkList(toFree);
}
if (shouldShrink)

View File

@ -73,35 +73,6 @@ enum State {
INVALID
};
class ChunkPool {
Chunk *emptyChunkListHead;
size_t emptyCount;
public:
ChunkPool()
: emptyChunkListHead(nullptr),
emptyCount(0) { }
size_t getEmptyCount() const {
return emptyCount;
}
/* Must be called with the GC lock taken. */
inline Chunk *get(JSRuntime *rt);
/* Must be called either during the GC or with the GC lock taken. */
inline void put(Chunk *chunk);
/*
* Return the list of chunks that can be released outside the GC lock.
* Must be called either during the GC or with the GC lock taken.
*/
Chunk *expire(JSRuntime *rt, bool releaseAll);
/* Must be called with the GC lock taken. */
void expireAndFree(JSRuntime *rt, bool releaseAll);
};
static inline JSGCTraceKind
MapAllocToTraceKind(AllocKind kind)
{