Bug 1074961 - Part 18: Use a ChunkPool for full chunks and get rid of chunkSet; r=sfink

--HG--
extra : rebase_source : b63841c680f10a758accd7a132338aa2e81e01b9
This commit is contained in:
Terrence Cole 2014-11-20 04:08:18 -08:00
parent 67e5da8c7b
commit 5255880de7
5 changed files with 82 additions and 82 deletions

View File

@ -250,6 +250,39 @@ struct Callback {
template<typename F>
class CallbackVector : public Vector<Callback<F>, 4, SystemAllocPolicy> {};
template <typename T, typename Iter0, typename Iter1>
class ChainedIter
{
Iter0 iter0_;
Iter1 iter1_;
public:
ChainedIter(const Iter0 &iter0, const Iter1 &iter1)
: iter0_(iter0), iter1_(iter1)
{}
bool done() const { return iter0_.done() && iter1_.done(); }
void next() {
MOZ_ASSERT(!done());
if (!iter0_.done()) {
iter0_.next();
} else {
MOZ_ASSERT(!iter1_.done());
iter1_.next();
}
}
T get() const {
MOZ_ASSERT(!done());
if (!iter0_.done())
return iter0_.get();
MOZ_ASSERT(!iter1_.done());
return iter1_.get();
}
operator T() const { return get(); }
T operator->() const { return get(); }
};
class GCRuntime
{
public:
@ -475,13 +508,16 @@ class GCRuntime
inline void updateOnFreeArenaAlloc(const ChunkInfo &info);
inline void updateOnArenaFree(const ChunkInfo &info);
GCChunkSet::Range allChunks() { return chunkSet.all(); }
void moveChunkToFreePool(Chunk *chunk, const AutoLockGC &lock);
bool hasChunk(Chunk *chunk) { return chunkSet.has(chunk); }
ChunkPool &fullChunks(const AutoLockGC &lock) { return fullChunks_; }
ChunkPool &availableChunks(const AutoLockGC &lock) { return availableChunks_; }
ChunkPool &emptyChunks(const AutoLockGC &lock) { return emptyChunks_; }
const ChunkPool &fullChunks(const AutoLockGC &lock) const { return fullChunks_; }
const ChunkPool &availableChunks(const AutoLockGC &lock) const { return availableChunks_; }
const ChunkPool &emptyChunks(const AutoLockGC &lock) const { return emptyChunks_; }
typedef ChainedIter<Chunk *, ChunkPool::Iter, ChunkPool::Iter> NonEmptyChunksIter;
NonEmptyChunksIter allNonEmptyChunks() {
return NonEmptyChunksIter(ChunkPool::Iter(availableChunks_), ChunkPool::Iter(fullChunks_));
}
#ifdef JS_GC_ZEAL
void startVerifyPreBarriers();
@ -628,22 +664,23 @@ class GCRuntime
GCSchedulingState schedulingState;
private:
/*
* Set of all GC chunks with at least one allocated thing. The
* conservative GC uses it to quickly check if a possible GC thing points
* into an allocated chunk.
*/
js::GCChunkSet chunkSet;
// When empty, chunks reside in the emptyChunks pool and are re-used as
// needed or eventually expired if not re-used. The emptyChunks pool gets
// refilled from the background allocation task heuristically so that empty
// chunks should always available for immediate allocation without syscalls.
ChunkPool emptyChunks_;
/*
* Doubly-linked lists of chunks from user and system compartments. The GC
* allocates its arenas from the corresponding list and when all arenas
* in the list head are taken, then the chunk is removed from the list.
* During the GC when all arenas in a chunk become free, that chunk is
* removed from the list and scheduled for release.
*/
ChunkPool availableChunks_;
ChunkPool emptyChunks_;
// Chunks which have had some, but not all, of their arenas allocated live
// in the available chunk lists. When all available arenas in a chunk have
// been allocated, the chunk is removed from the available list and moved
// to the fullChunks pool. During a GC, if all arenas are free, the chunk
// is moved back to the emptyChunks pool and scheduled for eventual
// release.
ChunkPool availableChunks_;
// When all arenas in a chunk are used, it is moved to the fullChunks pool
// so as to reduce the cost of operations on the available lists.
ChunkPool fullChunks_;
js::RootedValueMap rootsHash;

View File

@ -85,8 +85,8 @@ js::IterateChunks(JSRuntime *rt, void *data, IterateChunkCallback chunkCallback)
{
AutoPrepareForTracing prep(rt, SkipAtoms);
for (js::GCChunkSet::Range r = rt->gc.allChunks(); !r.empty(); r.popFront())
chunkCallback(rt, data, r.front());
for (auto chunk = rt->gc.allNonEmptyChunks(); !chunk.done(); chunk.next())
chunkCallback(rt, data, chunk);
}
void

View File

@ -189,8 +189,8 @@ gc::GCRuntime::startVerifyPreBarriers()
if (!IsIncrementalGCSafe(rt))
return;
for (GCChunkSet::Range r(chunkSet.all()); !r.empty(); r.popFront())
r.front()->bitmap.clear();
for (auto chunk = allNonEmptyChunks(); !chunk.done(); chunk.next())
chunk->bitmap.clear();
number++;

View File

@ -757,7 +757,8 @@ GCRuntime::expireEmptyChunkPool(bool shrinkBuffers, const AutoLockGC &lock)
iter.next();
MOZ_ASSERT(chunk->unused());
MOZ_ASSERT(!chunkSet.has(chunk));
MOZ_ASSERT(!fullChunks(lock).contains(chunk));
MOZ_ASSERT(!availableChunks(lock).contains(chunk));
if (freeChunkCount >= tunables.maxEmptyChunkCount() ||
(freeChunkCount >= tunables.minEmptyChunkCount() &&
(shrinkBuffers || chunk->info.age == MAX_EMPTY_CHUNK_AGE)))
@ -946,8 +947,10 @@ Chunk::allocateArena(JSRuntime *rt, Zone *zone, AllocKind thingKind, const AutoL
? fetchNextFreeArena(rt)
: fetchNextDecommittedArena();
aheader->init(zone, thingKind);
if (MOZ_UNLIKELY(!hasAvailableArenas()))
if (MOZ_UNLIKELY(!hasAvailableArenas())) {
rt->gc.availableChunks(lock).remove(this);
rt->gc.fullChunks(lock).push(this);
}
return aheader;
}
@ -998,28 +1001,20 @@ Chunk::releaseArena(JSRuntime *rt, ArenaHeader *aheader, const AutoLockGC &lock,
}
if (info.numArenasFree == 1) {
MOZ_ASSERT(!info.prev);
MOZ_ASSERT(!info.next);
rt->gc.fullChunks(lock).remove(this);
rt->gc.availableChunks(lock).push(this);
} else if (!unused()) {
MOZ_ASSERT(!rt->gc.fullChunks(lock).contains(this));
MOZ_ASSERT(rt->gc.availableChunks(lock).contains(this));
MOZ_ASSERT(!rt->gc.emptyChunks(lock).contains(this));
} else {
MOZ_ASSERT(unused());
rt->gc.availableChunks(lock).remove(this);
decommitAllArenas(rt);
rt->gc.moveChunkToFreePool(this, lock);
rt->gc.emptyChunks(lock).push(this);
}
}
void
GCRuntime::moveChunkToFreePool(Chunk *chunk, const AutoLockGC &lock)
{
MOZ_ASSERT(chunk->unused());
MOZ_ASSERT(chunkSet.has(chunk));
chunkSet.remove(chunk);
emptyChunks(lock).push(chunk);
}
inline bool
GCRuntime::wantBackgroundAllocation(const AutoLockGC &lock) const
{
@ -1028,7 +1023,7 @@ GCRuntime::wantBackgroundAllocation(const AutoLockGC &lock) const
// a small heap size (and therefore likely has a small growth rate).
return allocTask.enabled() &&
emptyChunks(lock).count() < tunables.minEmptyChunkCount() &&
chunkSet.count() >= 4;
(fullChunks(lock).count() + availableChunks(lock).count()) >= 4;
}
void
@ -1083,24 +1078,13 @@ GCRuntime::pickChunk(const AutoLockGC &lock,
}
MOZ_ASSERT(chunk->unused());
MOZ_ASSERT(!chunkSet.has(chunk));
MOZ_ASSERT(!fullChunks(lock).contains(chunk));
if (wantBackgroundAllocation(lock))
maybeStartBackgroundAllocation.tryToStartBackgroundAllocation(rt);
chunkAllocationSinceLastGC = true;
/*
* FIXME bug 583732 - chunk is newly allocated and cannot be present in
* the table so using ordinary lookupForAdd is suboptimal here.
*/
GCChunkSet::AddPtr p = chunkSet.lookupForAdd(chunk);
MOZ_ASSERT(!p);
if (!chunkSet.add(p, chunk)) {
releaseChunk(chunk);
return nullptr;
}
availableChunks(lock).push(chunk);
return chunk;
@ -1328,9 +1312,6 @@ GCRuntime::init(uint32_t maxbytes, uint32_t maxNurseryBytes)
if (!lock)
return false;
if (!chunkSet.init(INITIAL_CHUNK_CAPACITY))
return false;
if (!rootsHash.init(256))
return false;
@ -1400,19 +1381,8 @@ GCRuntime::finish()
zones.clear();
for (ChunkPool::Iter iter(availableChunks_); !iter.done();) {
Chunk *chunk = iter.get();
iter.next();
MOZ_ASSERT(chunkSet.has(chunk));
availableChunks_.remove(chunk);
}
if (chunkSet.initialized()) {
for (GCChunkSet::Range r(chunkSet.all()); !r.empty(); r.popFront())
releaseChunk(r.front());
chunkSet.clear();
}
FreeChunkPool(rt, fullChunks_);
FreeChunkPool(rt, availableChunks_);
FreeChunkPool(rt, emptyChunks_);
if (rootsHash.initialized())
@ -1543,7 +1513,9 @@ GCRuntime::getParameter(JSGCParamKey key, const AutoLockGC &lock)
case JSGC_UNUSED_CHUNKS:
return uint32_t(emptyChunks(lock).count());
case JSGC_TOTAL_CHUNKS:
return uint32_t(chunkSet.count() + emptyChunks(lock).count());
return uint32_t(fullChunks(lock).count() +
availableChunks(lock).count() +
emptyChunks(lock).count());
case JSGC_SLICE_TIME_BUDGET:
return uint32_t(sliceBudget > 0 ? sliceBudget : 0);
case JSGC_MARK_STACK_LIMIT:
@ -4298,14 +4270,14 @@ js::gc::MarkingValidator::nonIncrementalMark()
GCMarker *gcmarker = &gc->marker;
/* Save existing mark bits. */
for (GCChunkSet::Range r(gc->chunkSet.all()); !r.empty(); r.popFront()) {
ChunkBitmap *bitmap = &r.front()->bitmap;
for (auto chunk = gc->allNonEmptyChunks(); !chunk.done(); chunk.next()) {
ChunkBitmap *bitmap = &chunk->bitmap;
ChunkBitmap *entry = js_new<ChunkBitmap>();
if (!entry)
return;
memcpy((void *)entry->bitmap, (void *)bitmap->bitmap, sizeof(bitmap->bitmap));
if (!map.putNew(r.front(), entry))
if (!map.putNew(chunk, entry))
return;
}
@ -4339,8 +4311,8 @@ js::gc::MarkingValidator::nonIncrementalMark()
MOZ_ASSERT(gcmarker->isDrained());
gcmarker->reset();
for (GCChunkSet::Range r(gc->chunkSet.all()); !r.empty(); r.popFront())
r.front()->bitmap.clear();
for (auto chunk = gc->allNonEmptyChunks(); !chunk.done(); chunk.next())
chunk->bitmap.clear();
{
gcstats::AutoPhase ap1(gc->stats, gcstats::PHASE_MARK);
@ -4381,8 +4353,7 @@ js::gc::MarkingValidator::nonIncrementalMark()
}
/* Take a copy of the non-incremental mark state and restore the original. */
for (GCChunkSet::Range r(gc->chunkSet.all()); !r.empty(); r.popFront()) {
Chunk *chunk = r.front();
for (auto chunk = gc->allNonEmptyChunks(); !chunk.done(); chunk.next()) {
ChunkBitmap *bitmap = &chunk->bitmap;
ChunkBitmap *entry = map.lookup(chunk)->value();
Swap(*entry, *bitmap);
@ -4406,8 +4377,7 @@ js::gc::MarkingValidator::validate()
if (!initialized)
return;
for (GCChunkSet::Range r(gc->chunkSet.all()); !r.empty(); r.popFront()) {
Chunk *chunk = r.front();
for (auto chunk = gc->allNonEmptyChunks(); !chunk.done(); chunk.next()) {
BitmapMap::Ptr ptr = map.lookup(chunk);
if (!ptr)
continue; /* Allocated after we did the non-incremental mark. */

View File

@ -882,13 +882,6 @@ class ArenaLists
friend class GCRuntime;
};
/*
* Initial allocation size for data structures holding chunks is set to hold
* chunks with total capacity of 16MB to avoid buffer resizes during browser
* startup.
*/
const size_t INITIAL_CHUNK_CAPACITY = 16 * 1024 * 1024 / ChunkSize;
/* The number of GC cycles an empty chunk can survive before been released. */
const size_t MAX_EMPTY_CHUNK_AGE = 4;