Bug 1160163 - Refactor arena decommit so we don't have to pass dummy thing kind to allocateArena() r=terrence

This commit is contained in:
Jon Coppeard 2015-05-06 09:45:10 +01:00
parent 97c6239c09
commit 33a19409f6
2 changed files with 65 additions and 34 deletions

View File

@ -1024,19 +1024,20 @@ struct Chunk
ArenaHeader* allocateArena(JSRuntime* rt, JS::Zone* zone, AllocKind kind,
const AutoLockGC& lock);
enum ArenaDecommitState { IsCommitted = false, IsDecommitted = true };
void releaseArena(JSRuntime* rt, ArenaHeader* aheader, const AutoLockGC& lock,
ArenaDecommitState state = IsCommitted);
void releaseArena(JSRuntime* rt, ArenaHeader* aheader, const AutoLockGC& lock);
void recycleArena(ArenaHeader* aheader, SortedArenaList& dest, AllocKind thingKind,
size_t thingsPerArena);
static Chunk* allocate(JSRuntime* rt);
bool decommitOneFreeArena(JSRuntime* rt, AutoLockGC& lock);
void decommitAllArenasWithoutUnlocking(const AutoLockGC& lock);
void decommitAllArenas(JSRuntime* rt);
static Chunk* allocate(JSRuntime* rt);
private:
inline void init(JSRuntime* rt);
void decommitAllArenas(JSRuntime* rt);
/* Search for a decommitted arena to allocate. */
unsigned findDecommittedArenaOffset();
ArenaHeader* fetchNextDecommittedArena();
@ -1044,6 +1045,9 @@ struct Chunk
void addArenaToFreeList(JSRuntime* rt, ArenaHeader* aheader);
void addArenaToDecommittedList(JSRuntime* rt, const ArenaHeader* aheader);
void updateChunkListAfterAlloc(JSRuntime* rt, const AutoLockGC& lock);
void updateChunkListAfterFree(JSRuntime* rt, const AutoLockGC& lock);
public:
/* Unlink and return the freeArenasHead. */
inline ArenaHeader* fetchNextFreeArena(JSRuntime* rt);

View File

@ -892,10 +892,7 @@ Chunk::allocateArena(JSRuntime* rt, Zone* zone, AllocKind thingKind, const AutoL
? fetchNextFreeArena(rt)
: fetchNextDecommittedArena();
aheader->init(zone, thingKind);
if (MOZ_UNLIKELY(!hasAvailableArenas())) {
rt->gc.availableChunks(lock).remove(this);
rt->gc.fullChunks(lock).push(this);
}
updateChunkListAfterAlloc(rt, lock);
return aheader;
}
@ -932,19 +929,64 @@ Chunk::recycleArena(ArenaHeader* aheader, SortedArenaList& dest, AllocKind thing
}
void
Chunk::releaseArena(JSRuntime* rt, ArenaHeader* aheader, const AutoLockGC& lock,
ArenaDecommitState state /* = IsCommitted */)
Chunk::releaseArena(JSRuntime* rt, ArenaHeader* aheader, const AutoLockGC& lock)
{
MOZ_ASSERT(aheader->allocated());
MOZ_ASSERT(!aheader->hasDelayedMarking);
if (state == IsCommitted) {
aheader->setAsNotAllocated();
addArenaToFreeList(rt, aheader);
} else {
addArenaToDecommittedList(rt, aheader);
aheader->setAsNotAllocated();
addArenaToFreeList(rt, aheader);
updateChunkListAfterFree(rt, lock);
}
bool
Chunk::decommitOneFreeArena(JSRuntime* rt, AutoLockGC& lock)
{
MOZ_ASSERT(info.numArenasFreeCommitted > 0);
ArenaHeader* aheader = fetchNextFreeArena(rt);
updateChunkListAfterAlloc(rt, lock);
bool ok;
{
AutoUnlockGC unlock(lock);
ok = MarkPagesUnused(aheader->getArena(), ArenaSize);
}
if (ok)
addArenaToDecommittedList(rt, aheader);
else
addArenaToFreeList(rt, aheader);
updateChunkListAfterFree(rt, lock);
return ok;
}
void
Chunk::decommitAllArenasWithoutUnlocking(const AutoLockGC& lock)
{
for (size_t i = 0; i < ArenasPerChunk; ++i) {
if (decommittedArenas.get(i) || arenas[i].aheader.allocated())
continue;
if (MarkPagesUnused(&arenas[i], ArenaSize)) {
info.numArenasFreeCommitted--;
decommittedArenas.set(i);
}
}
}
void
Chunk::updateChunkListAfterAlloc(JSRuntime* rt, const AutoLockGC& lock)
{
if (MOZ_UNLIKELY(!hasAvailableArenas())) {
rt->gc.availableChunks(lock).remove(this);
rt->gc.fullChunks(lock).push(this);
}
}
void
Chunk::updateChunkListAfterFree(JSRuntime* rt, const AutoLockGC& lock)
{
if (info.numArenasFree == 1) {
rt->gc.fullChunks(lock).remove(this);
rt->gc.availableChunks(lock).push(this);
@ -3149,17 +3191,8 @@ void
GCRuntime::decommitAllWithoutUnlocking(const AutoLockGC& lock)
{
MOZ_ASSERT(emptyChunks(lock).count() == 0);
for (ChunkPool::Iter chunk(availableChunks(lock)); !chunk.done(); chunk.next()) {
for (size_t i = 0; i < ArenasPerChunk; ++i) {
if (chunk->decommittedArenas.get(i) || chunk->arenas[i].aheader.allocated())
continue;
if (MarkPagesUnused(&chunk->arenas[i], ArenaSize)) {
chunk->info.numArenasFreeCommitted--;
chunk->decommittedArenas.set(i);
}
}
}
for (ChunkPool::Iter chunk(availableChunks(lock)); !chunk.done(); chunk.next())
chunk->decommitAllArenasWithoutUnlocking(lock);
MOZ_ASSERT(availableChunks(lock).verify());
}
@ -3192,13 +3225,7 @@ GCRuntime::decommitArenas(AutoLockGC& lock)
// The arena list is not doubly-linked, so we have to work in the free
// list order and not in the natural order.
while (chunk->info.numArenasFreeCommitted) {
ArenaHeader* aheader = chunk->allocateArena(rt, nullptr, AllocKind::OBJECT0, lock);
bool ok;
{
AutoUnlockGC unlock(lock);
ok = MarkPagesUnused(aheader->getArena(), ArenaSize);
}
chunk->releaseArena(rt, aheader, lock, Chunk::ArenaDecommitState(ok));
bool ok = chunk->decommitOneFreeArena(rt, lock);
// FIXME Bug 1095620: add cancellation support when this becomes
// a ParallelTask.