Bug 1074961 - Use ChunkPool to manage the available chunk sets; r=sfink,r=jonco

This commit is contained in:
Terrence Cole 2014-10-06 10:25:18 -07:00
parent ef7f85f2e2
commit 81d7d96da3
9 changed files with 302 additions and 386 deletions

View File

@ -147,7 +147,6 @@ struct MovingTracer : JSTracer {
};
#endif
} /* namespace gc */
} /* namespace js */

View File

@ -35,35 +35,6 @@ class MarkingValidator;
struct AutoPrepareForTracing;
class AutoTraceSession;
class ChunkPool
{
Chunk *head_;
size_t count_;
public:
ChunkPool() : head_(nullptr), count_(0) {}
size_t count() const { return count_; }
/* Must be called with the GC lock taken. */
inline Chunk *get(JSRuntime *rt);
/* Must be called either during the GC or with the GC lock taken. */
inline void put(Chunk *chunk);
class Enum {
public:
explicit Enum(ChunkPool &pool) : pool(pool), chunkp(&pool.head_) {}
bool empty() { return !*chunkp; }
Chunk *front();
inline void popFront();
inline void removeAndPopFront();
private:
ChunkPool &pool;
Chunk **chunkp;
};
};
/*
* Encapsulates all of the GC tunables. These are effectively constant and
* should only be modified by setParameter.
@ -213,12 +184,45 @@ struct Callback {
template<typename F>
class CallbackVector : public Vector<Callback<F>, 4, SystemAllocPolicy> {};
template <typename T, typename Iter0, typename Iter1>
class ChainedIter
{
Iter0 iter0_;
Iter1 iter1_;
public:
ChainedIter(const Iter0 &iter0, const Iter1 &iter1)
: iter0_(iter0), iter1_(iter1)
{}
bool done() const { return iter0_.done() && iter1_.done(); }
void next() {
MOZ_ASSERT(!done());
if (!iter0_.done()) {
iter0_.next();
} else {
MOZ_ASSERT(!iter1_.done());
iter1_.next();
}
}
T get() const {
MOZ_ASSERT(!done());
if (!iter0_.done())
return iter0_.get();
MOZ_ASSERT(!iter1_.done());
return iter1_.get();
}
operator T() const { return get(); }
T operator->() const { return get(); }
};
class GCRuntime
{
public:
explicit GCRuntime(JSRuntime *rt);
~GCRuntime();
bool init(uint32_t maxbytes, uint32_t maxNurseryBytes);
void finish();
inline int zeal();
inline bool upcomingZealousGC();
@ -229,7 +233,7 @@ class GCRuntime
void setMarkStackLimit(size_t limit);
void setParameter(JSGCParamKey key, uint32_t value);
uint32_t getParameter(JSGCParamKey key);
uint32_t getParameter(JSGCParamKey key, const AutoLockGC &lock);
bool isHeapBusy() { return heapState != js::Idle; }
bool isHeapMajorCollecting() { return heapState == js::MajorCollecting; }
@ -428,10 +432,16 @@ class GCRuntime
inline void updateOnFreeArenaAlloc(const ChunkInfo &info);
inline void updateOnArenaFree(const ChunkInfo &info);
GCChunkSet::Range allChunks() { return chunkSet.all(); }
inline Chunk **getAvailableChunkList(Zone *zone);
void moveChunkToFreePool(Chunk *chunk);
bool hasChunk(Chunk *chunk) { return chunkSet.has(chunk); }
ChunkPool &emptyChunks(const AutoLockGC &lock) { return emptyChunks_; }
ChunkPool &availableChunks(const AutoLockGC &lock) { return availableChunks_; }
ChunkPool &fullChunks(const AutoLockGC &lock) { return fullChunks_; }
const ChunkPool &emptyChunks(const AutoLockGC &lock) const { return emptyChunks_; }
const ChunkPool &availableChunks(const AutoLockGC &lock) const { return availableChunks_; }
const ChunkPool &fullChunks(const AutoLockGC &lock) const { return fullChunks_; }
typedef ChainedIter<Chunk *, ChunkPool::Iter, ChunkPool::Iter> NonEmptyChunksIter;
NonEmptyChunksIter allNonEmptyChunks() {
return NonEmptyChunksIter(ChunkPool::Iter(availableChunks_), ChunkPool::Iter(fullChunks_));
}
#ifdef JS_GC_ZEAL
void startVerifyPreBarriers();
@ -451,7 +461,7 @@ class GCRuntime
private:
// For ArenaLists::allocateFromArena()
friend class ArenaLists;
Chunk *pickChunk(Zone *zone, AutoMaybeStartBackgroundAllocation &maybeStartBGAlloc);
Chunk *pickChunk(const AutoLockGC &lock, AutoMaybeStartBackgroundAllocation &maybeStartBGAlloc);
inline void arenaAllocatedDuringGC(JS::Zone *zone, ArenaHeader *arena);
template <AllowGC allowGC>
@ -459,17 +469,12 @@ class GCRuntime
static void *refillFreeListOffMainThread(ExclusiveContext *cx, AllocKind thingKind);
static void *refillFreeListPJS(ForkJoinContext *cx, AllocKind thingKind);
/*
* Return the list of chunks that can be released outside the GC lock.
* Must be called either during the GC or with the GC lock taken.
*/
Chunk *expireChunkPool(bool shrinkBuffers, bool releaseAll);
void expireAndFreeChunkPool(bool releaseAll);
void freeChunkList(Chunk *chunkListHead);
// Return the list of chunks that can be released outside the GC lock.
ChunkPool expireEmptyChunks(bool shrinkBuffers, const AutoLockGC &lock);
void freeChunks(ChunkPool pool);
void prepareToFreeChunk(ChunkInfo &info);
void releaseChunk(Chunk *chunk);
inline bool wantBackgroundAllocation() const;
inline bool wantBackgroundAllocation(const AutoLockGC &lock) const;
bool initZeal();
void requestInterrupt(JS::gcreason::Reason reason);
@ -505,9 +510,8 @@ class GCRuntime
bool sweepPhase(SliceBudget &sliceBudget);
void endSweepPhase(bool lastGC);
void sweepZones(FreeOp *fop, bool lastGC);
void decommitArenasFromAvailableList(Chunk **availableListHeadp);
void decommitArenas();
void expireChunksAndArenas(bool shouldShrink);
void decommitArenas(const AutoLockGC &lock);
void expireChunksAndArenas(bool shouldShrink, const AutoLockGC &lock);
void sweepBackgroundThings();
void assertBackgroundSweepingFinished();
bool shouldCompact();
@ -560,23 +564,25 @@ class GCRuntime
GCSchedulingState schedulingState;
private:
/*
* Set of all GC chunks with at least one allocated thing. The
* conservative GC uses it to quickly check if a possible GC thing points
* into an allocated chunk.
*/
js::GCChunkSet chunkSet;
// Chunks may be empty, available (partially allocated), or fully utilized.
/*
* Doubly-linked lists of chunks from user and system compartments. The GC
* allocates its arenas from the corresponding list and when all arenas
* in the list head are taken, then the chunk is removed from the list.
* During the GC when all arenas in a chunk become free, that chunk is
* removed from the list and scheduled for release.
*/
js::gc::Chunk *systemAvailableChunkListHead;
js::gc::Chunk *userAvailableChunkListHead;
js::gc::ChunkPool emptyChunks;
// When empty, chunks reside in the emptyChunks pool and are re-used as
// needed or eventually expired if not re-used. The emptyChunks pool gets
// refilled from the background allocation task heuristically so that empty
// chunks should always available for immediate allocation without syscalls.
ChunkPool emptyChunks_;
// Chunks which have had some, but not all, of their arenas allocated live
// in the available chunk lists. When all available arenas in a chunk have
// been allocated, the chunk is removed from the available list and moved
// to the fullChunks pool. During a GC, if all arenas are free, the chunk
// is moved back to the emptyChunks pool and scheduled for eventual
// release.
ChunkPool availableChunks_;
// When all arenas in a chunk are used, it is moved to the fullChunks pool
// so as to reduce the cost of operations on the available lists.
ChunkPool fullChunks_;
js::RootedValueMap rootsHash;

View File

@ -36,6 +36,7 @@ struct Runtime;
namespace js {
class AutoLockGC;
class FreeOp;
#ifdef DEBUG
@ -735,11 +736,63 @@ struct ChunkTrailer
static_assert(sizeof(ChunkTrailer) == 2 * sizeof(uintptr_t) + sizeof(uint64_t),
"ChunkTrailer size is incorrect.");
/* The chunk header (located at the end of the chunk to preserve arena alignment). */
struct ChunkInfo
class ChunkPool
{
Chunk *next;
Chunk **prevp;
Chunk *head_;
Chunk *tail_;
size_t count_;
public:
ChunkPool() : head_(nullptr), tail_(nullptr), count_(0) {}
size_t count() const { return count_; }
inline Chunk *pop();
inline Chunk *head() const { return head_; }
inline void push(Chunk *chunk);
inline void remove(Chunk *chunk);
class Iter {
public:
explicit Iter(ChunkPool &pool) : pool_(pool), current_(pool.head_) {}
bool done() const { return !current_; }
inline void next();
Chunk *get() const { return current_; }
operator Chunk *() const { return get(); }
Chunk *operator->() const { return get(); }
private:
ChunkPool &pool_;
Chunk *current_;
};
class ReverseIter {
public:
explicit ReverseIter(ChunkPool &pool) : pool_(pool), current_(pool.tail_) {}
bool done() const { return !current_; }
inline void prev();
inline void reset();
Chunk *get() const { return current_; }
operator Chunk *() const { return get(); }
Chunk *operator->() const { return get(); }
private:
ChunkPool &pool_;
Chunk *current_;
};
};
/* The chunk header (located at the end of the chunk to preserve arena alignment). */
class ChunkInfo
{
friend class ChunkPool;
Chunk *next;
Chunk *prev;
public:
bool belongsToAnyPool() const { return next || prev; }
void init() {
next = nullptr;
prev = nullptr;
age = 0;
}
/* Free arenas are linked together with aheader.next. */
ArenaHeader *freeArenasHead;
@ -942,7 +995,7 @@ struct Chunk
inline void insertToAvailableList(Chunk **insertPoint);
inline void removeFromAvailableList();
ArenaHeader *allocateArena(JS::Zone *zone, AllocKind kind);
ArenaHeader *allocateArena(JS::Zone *zone, AllocKind kind, const AutoLockGC &lock);
void releaseArena(ArenaHeader *aheader);
void recycleArena(ArenaHeader *aheader, SortedArenaList &dest, AllocKind thingKind,
@ -952,22 +1005,6 @@ struct Chunk
void decommitAllArenas(JSRuntime *rt);
/*
* Assuming that the info.prevp points to the next field of the previous
* chunk in a doubly-linked list, get that chunk.
*/
Chunk *getPrevious() {
MOZ_ASSERT(info.prevp);
return fromPointerToNext(info.prevp);
}
/* Get the chunk from a pointer to its info.next field. */
static Chunk *fromPointerToNext(Chunk **nextFieldPtr) {
uintptr_t addr = reinterpret_cast<uintptr_t>(nextFieldPtr);
MOZ_ASSERT((addr & ChunkMask) == offsetof(Chunk, info.next));
return reinterpret_cast<Chunk *>(addr - offsetof(Chunk, info.next));
}
private:
inline void init(JSRuntime *rt);
@ -995,6 +1032,12 @@ static_assert(js::gc::ChunkLocationOffset == offsetof(Chunk, info) +
offsetof(ChunkTrailer, location),
"The hardcoded API location offset must match the actual offset.");
inline void
ChunkPool::Iter::next()
{
current_ = current_->info.next;
}
/*
* Tracks the used sizes for owned heap data and automatically maintains the
* memory usage relationship between GCRuntime and Zones.

View File

@ -85,8 +85,8 @@ js::IterateChunks(JSRuntime *rt, void *data, IterateChunkCallback chunkCallback)
{
AutoPrepareForTracing prep(rt, SkipAtoms);
for (js::GCChunkSet::Range r = rt->gc.allChunks(); !r.empty(); r.popFront())
chunkCallback(rt, data, r.front());
for (auto chunk = rt->gc.allNonEmptyChunks(); !chunk.done(); chunk.next())
chunkCallback(rt, data, chunk);
}
void

View File

@ -189,8 +189,8 @@ gc::GCRuntime::startVerifyPreBarriers()
if (!IsIncrementalGCSafe(rt))
return;
for (GCChunkSet::Range r(chunkSet.all()); !r.empty(); r.popFront())
r.front()->bitmap.clear();
for (auto chunk = rt->gc.allNonEmptyChunks(); !chunk.done(); chunk.next())
chunk->bitmap.clear();
number++;

View File

@ -1936,7 +1936,8 @@ JS_SetGCParameter(JSRuntime *rt, JSGCParamKey key, uint32_t value)
JS_PUBLIC_API(uint32_t)
JS_GetGCParameter(JSRuntime *rt, JSGCParamKey key)
{
return rt->gc.getParameter(key);
AutoLockGC lock(rt);
return rt->gc.getParameter(key, lock);
}
JS_PUBLIC_API(void)

View File

@ -659,58 +659,78 @@ FreeChunk(JSRuntime *rt, Chunk *p)
UnmapPages(static_cast<void *>(p), ChunkSize);
}
#ifdef DEBUG
static bool
ChunkPoolContainsChunk(ChunkPool &pool, Chunk *chunk)
{
for (ChunkPool::Iter iter(pool); !iter.done(); iter.next()) {
if (iter.get() == chunk)
return true;
}
return false;
}
#endif
/* Must be called with the GC lock taken. */
inline Chunk *
ChunkPool::get(JSRuntime *rt)
ChunkPool::pop()
{
Chunk *chunk = head_;
if (!chunk) {
MOZ_ASSERT(!count_);
return nullptr;
}
MOZ_ASSERT(bool(count_) == bool(head_));
MOZ_ASSERT(count_);
head_ = chunk->info.next;
--count_;
Chunk *chunk = head_;
if (!chunk)
return nullptr;
remove(chunk);
return chunk;
}
/* Must be called either during the GC or with the GC lock taken. */
inline void
ChunkPool::put(Chunk *chunk)
ChunkPool::push(Chunk *chunk)
{
MOZ_ASSERT(!chunk->info.belongsToAnyPool());
chunk->info.age = 0;
chunk->info.next = head_;
if (head_)
head_->info.prev = chunk;
head_ = chunk;
count_++;
}
inline Chunk *
ChunkPool::Enum::front()
{
Chunk *chunk = *chunkp;
MOZ_ASSERT_IF(chunk, pool.count() != 0);
return chunk;
if (!tail_)
tail_ = chunk;
++count_;
}
inline void
ChunkPool::Enum::popFront()
ChunkPool::remove(Chunk *chunk)
{
MOZ_ASSERT(!empty());
chunkp = &front()->info.next;
MOZ_ASSERT(ChunkPoolContainsChunk(*this, chunk));
if (head_ == chunk)
head_ = chunk->info.next;
if (tail_ == chunk)
tail_ = chunk->info.prev;
if (chunk->info.prev)
chunk->info.prev->info.next = chunk->info.next;
if (chunk->info.next)
chunk->info.next->info.prev = chunk->info.prev;
chunk->info.next = chunk->info.prev = nullptr;
MOZ_ASSERT(!chunk->info.belongsToAnyPool());
--count_;
}
inline void
ChunkPool::Enum::removeAndPopFront()
ChunkPool::ReverseIter::prev()
{
MOZ_ASSERT(!empty());
*chunkp = front()->info.next;
--pool.count_;
current_ = current_->info.prev;
}
/* Must be called either during the GC or with the GC lock taken. */
Chunk *
GCRuntime::expireChunkPool(bool shrinkBuffers, bool releaseAll)
inline void
ChunkPool::ReverseIter::reset()
{
current_ = pool_.tail_;
}
ChunkPool
GCRuntime::expireEmptyChunks(bool shrinkBuffers, const AutoLockGC &lock)
{
/*
* Return old empty chunks to the system while preserving the order of
@ -718,47 +738,42 @@ GCRuntime::expireChunkPool(bool shrinkBuffers, bool releaseAll)
* without emptying the list, the older chunks will stay at the tail
* and are more likely to reach the max age.
*/
Chunk *freeList = nullptr;
ChunkPool expired;
unsigned freeChunkCount = 0;
for (ChunkPool::Enum e(emptyChunks); !e.empty(); ) {
Chunk *chunk = e.front();
for (ChunkPool::Iter iter(emptyChunks(lock)); !iter.done();) {
Chunk *chunk = iter.get();
iter.next();
MOZ_ASSERT(chunk->unused());
MOZ_ASSERT(!chunkSet.has(chunk));
if (releaseAll || freeChunkCount >= tunables.maxEmptyChunkCount() ||
if (freeChunkCount >= tunables.maxEmptyChunkCount() ||
(freeChunkCount >= tunables.minEmptyChunkCount() &&
(shrinkBuffers || chunk->info.age == MAX_EMPTY_CHUNK_AGE)))
{
e.removeAndPopFront();
emptyChunks(lock).remove(chunk);
prepareToFreeChunk(chunk->info);
chunk->info.next = freeList;
freeList = chunk;
expired.push(chunk);
} else {
/* Keep the chunk but increase its age. */
++freeChunkCount;
++chunk->info.age;
e.popFront();
}
}
MOZ_ASSERT(emptyChunks.count() <= tunables.maxEmptyChunkCount());
MOZ_ASSERT_IF(shrinkBuffers, emptyChunks.count() <= tunables.minEmptyChunkCount());
MOZ_ASSERT_IF(releaseAll, emptyChunks.count() == 0);
return freeList;
MOZ_ASSERT(emptyChunks(lock).count() <= tunables.maxEmptyChunkCount());
MOZ_ASSERT_IF(shrinkBuffers, emptyChunks(lock).count() <= tunables.minEmptyChunkCount());
return expired;
}
void
GCRuntime::freeChunkList(Chunk *chunkListHead)
GCRuntime::freeChunks(ChunkPool pool)
{
while (Chunk *chunk = chunkListHead) {
MOZ_ASSERT(!chunk->info.numArenasFreeCommitted);
chunkListHead = chunk->info.next;
for (ChunkPool::Iter iter(pool); !iter.done();) {
Chunk *chunk = iter.get();
iter.next();
pool.remove(chunk);
FreeChunk(rt, chunk);
}
}
void
GCRuntime::expireAndFreeChunkPool(bool releaseAll)
{
freeChunkList(expireChunkPool(true, releaseAll));
MOZ_ASSERT(pool.count() == 0);
}
/* static */ Chunk *
@ -772,15 +787,6 @@ Chunk::allocate(JSRuntime *rt)
return chunk;
}
/* Must be called with the GC lock taken. */
inline void
GCRuntime::releaseChunk(Chunk *chunk)
{
MOZ_ASSERT(chunk);
prepareToFreeChunk(chunk->info);
FreeChunk(rt, chunk);
}
inline void
GCRuntime::prepareToFreeChunk(ChunkInfo &info)
{
@ -825,7 +831,7 @@ Chunk::init(JSRuntime *rt)
decommitAllArenas(rt);
/* Initialize the chunk info. */
info.age = 0;
info.init();
info.trailer.storeBuffer = nullptr;
info.trailer.location = ChunkLocationBitTenuredHeap;
info.trailer.runtime = rt;
@ -833,50 +839,6 @@ Chunk::init(JSRuntime *rt)
/* The rest of info fields are initialized in pickChunk. */
}
inline Chunk **
GCRuntime::getAvailableChunkList(Zone *zone)
{
return zone->isSystem
? &systemAvailableChunkListHead
: &userAvailableChunkListHead;
}
inline void
Chunk::addToAvailableList(Zone *zone)
{
JSRuntime *rt = zone->runtimeFromAnyThread();
insertToAvailableList(rt->gc.getAvailableChunkList(zone));
}
inline void
Chunk::insertToAvailableList(Chunk **insertPoint)
{
MOZ_ASSERT(hasAvailableArenas());
MOZ_ASSERT(!info.prevp);
MOZ_ASSERT(!info.next);
info.prevp = insertPoint;
Chunk *insertBefore = *insertPoint;
if (insertBefore) {
MOZ_ASSERT(insertBefore->info.prevp == insertPoint);
insertBefore->info.prevp = &info.next;
}
info.next = insertBefore;
*insertPoint = this;
}
inline void
Chunk::removeFromAvailableList()
{
MOZ_ASSERT(info.prevp);
*info.prevp = info.next;
if (info.next) {
MOZ_ASSERT(info.next->info.prevp == &info.next);
info.next->info.prevp = info.prevp;
}
info.prevp = nullptr;
info.next = nullptr;
}
/*
* Search for and return the next decommitted Arena. Our goal is to keep
* lastDecommittedArenaOffset "close" to a free arena. We do this by setting
@ -937,7 +899,7 @@ Chunk::fetchNextFreeArena(JSRuntime *rt)
}
ArenaHeader *
Chunk::allocateArena(Zone *zone, AllocKind thingKind)
Chunk::allocateArena(Zone *zone, AllocKind thingKind, const AutoLockGC &lock)
{
MOZ_ASSERT(hasAvailableArenas());
@ -961,8 +923,10 @@ Chunk::allocateArena(Zone *zone, AllocKind thingKind)
? fetchNextFreeArena(rt)
: fetchNextDecommittedArena();
aheader->init(zone, thingKind);
if (MOZ_UNLIKELY(!hasAvailableArenas()))
removeFromAvailableList();
if (MOZ_UNLIKELY(!hasAvailableArenas())) {
rt->gc.availableChunks(lock).remove(this);
rt->gc.fullChunks(lock).push(this);
}
zone->usage.addGCArena();
@ -1046,30 +1010,28 @@ Chunk::releaseArena(ArenaHeader *aheader)
addArenaToFreeList(rt, aheader);
if (info.numArenasFree == 1) {
MOZ_ASSERT(!info.prevp);
MOZ_ASSERT(!info.next);
addToAvailableList(zone);
} else if (!unused()) {
MOZ_ASSERT(info.prevp);
} else {
MOZ_ASSERT(unused());
removeFromAvailableList();
if (maybeLock.isNothing())
maybeLock.emplace(rt);
rt->gc.fullChunks(maybeLock.ref()).remove(this);
rt->gc.availableChunks(maybeLock.ref()).push(this);
} else if (unused()) {
if (maybeLock.isNothing())
maybeLock.emplace(rt);
rt->gc.availableChunks(maybeLock.ref()).remove(this);
decommitAllArenas(rt);
rt->gc.moveChunkToFreePool(this);
rt->gc.emptyChunks(maybeLock.ref()).push(this);
} else {
#ifdef DEBUG
if (maybeLock.isNothing())
maybeLock.emplace(rt);
MOZ_ASSERT(!unused());
MOZ_ASSERT(ChunkPoolContainsChunk(rt->gc.availableChunks(maybeLock.ref()), this));
#endif
}
}
void
GCRuntime::moveChunkToFreePool(Chunk *chunk)
{
MOZ_ASSERT(chunk->unused());
MOZ_ASSERT(chunkSet.has(chunk));
chunkSet.remove(chunk);
emptyChunks.put(chunk);
}
inline bool
GCRuntime::wantBackgroundAllocation() const
GCRuntime::wantBackgroundAllocation(const AutoLockGC &lock) const
{
/*
* To minimize memory waste we do not want to run the background chunk
@ -1077,8 +1039,8 @@ GCRuntime::wantBackgroundAllocation() const
* of them.
*/
return helperState.canBackgroundAllocate() &&
emptyChunks.count() < tunables.minEmptyChunkCount() &&
chunkSet.count() >= 4;
emptyChunks(lock).count() < tunables.minEmptyChunkCount() &&
(fullChunks(lock).count() + availableChunks(lock).count()) >= 4;
}
class js::gc::AutoMaybeStartBackgroundAllocation
@ -1109,14 +1071,14 @@ class js::gc::AutoMaybeStartBackgroundAllocation
/* The caller must hold the GC lock. */
Chunk *
GCRuntime::pickChunk(Zone *zone, AutoMaybeStartBackgroundAllocation &maybeStartBackgroundAllocation)
GCRuntime::pickChunk(const AutoLockGC &lock,
AutoMaybeStartBackgroundAllocation &maybeStartBackgroundAllocation)
{
Chunk **listHeadp = getAvailableChunkList(zone);
Chunk *chunk = *listHeadp;
Chunk *chunk = availableChunks(lock).head();
if (chunk)
return chunk;
chunk = emptyChunks.get(rt);
chunk = emptyChunks(lock).pop();
if (!chunk) {
chunk = Chunk::allocate(rt);
if (!chunk)
@ -1125,28 +1087,13 @@ GCRuntime::pickChunk(Zone *zone, AutoMaybeStartBackgroundAllocation &maybeStartB
}
MOZ_ASSERT(chunk->unused());
MOZ_ASSERT(!chunkSet.has(chunk));
if (wantBackgroundAllocation())
if (wantBackgroundAllocation(lock))
maybeStartBackgroundAllocation.tryToStartBackgroundAllocation(rt);
chunkAllocationSinceLastGC = true;
/*
* FIXME bug 583732 - chunk is newly allocated and cannot be present in
* the table so using ordinary lookupForAdd is suboptimal here.
*/
GCChunkSet::AddPtr p = chunkSet.lookupForAdd(chunk);
MOZ_ASSERT(!p);
if (!chunkSet.add(p, chunk)) {
releaseChunk(chunk);
return nullptr;
}
chunk->info.prevp = nullptr;
chunk->info.next = nullptr;
chunk->addToAvailableList(zone);
availableChunks(lock).push(chunk);
return chunk;
}
@ -1160,8 +1107,6 @@ GCRuntime::GCRuntime(JSRuntime *rt) :
stats(rt),
marker(rt),
usage(nullptr),
systemAvailableChunkListHead(nullptr),
userAvailableChunkListHead(nullptr),
maxMallocBytes(0),
numArenasFreeCommitted(0),
verifyPreData(nullptr),
@ -1327,9 +1272,6 @@ GCRuntime::init(uint32_t maxbytes, uint32_t maxNurseryBytes)
if (!lock)
return false;
if (!chunkSet.init(INITIAL_CHUNK_CAPACITY))
return false;
if (!rootsHash.init(256))
return false;
@ -1373,8 +1315,7 @@ GCRuntime::init(uint32_t maxbytes, uint32_t maxNurseryBytes)
return true;
}
void
GCRuntime::finish()
GCRuntime::~GCRuntime()
{
/*
* Wait until the background finalization stops and the helper thread
@ -1387,6 +1328,11 @@ GCRuntime::finish()
finishVerifier();
#endif
#ifdef JSGC_GENERATIONAL
storeBuffer.disable();
nursery.disable();
#endif
/* Delete all remaining zones. */
if (rt->gcInitialized) {
for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
@ -1398,15 +1344,9 @@ GCRuntime::finish()
zones.clear();
systemAvailableChunkListHead = nullptr;
userAvailableChunkListHead = nullptr;
if (chunkSet.initialized()) {
for (GCChunkSet::Range r(chunkSet.all()); !r.empty(); r.popFront())
releaseChunk(r.front());
chunkSet.clear();
}
expireAndFreeChunkPool(true);
freeChunks(availableChunks_);
freeChunks(emptyChunks_);
freeChunks(fullChunks_);
if (rootsHash.initialized())
rootsHash.clear();
@ -1522,7 +1462,7 @@ GCSchedulingTunables::setParameter(JSGCParamKey key, uint32_t value)
}
uint32_t
GCRuntime::getParameter(JSGCParamKey key)
GCRuntime::getParameter(JSGCParamKey key, const AutoLockGC &lock)
{
switch (key) {
case JSGC_MAX_BYTES:
@ -1534,9 +1474,11 @@ GCRuntime::getParameter(JSGCParamKey key)
case JSGC_MODE:
return uint32_t(mode);
case JSGC_UNUSED_CHUNKS:
return uint32_t(emptyChunks.count());
return uint32_t(emptyChunks(lock).count());
case JSGC_TOTAL_CHUNKS:
return uint32_t(chunkSet.count() + emptyChunks.count());
return uint32_t(fullChunks(lock).count() +
availableChunks(lock).count() +
emptyChunks(lock).count());
case JSGC_SLICE_TIME_BUDGET:
return uint32_t(sliceBudget > 0 ? sliceBudget / PRMJ_USEC_PER_MSEC : 0);
case JSGC_MARK_STACK_LIMIT:
@ -1970,13 +1912,13 @@ ArenaLists::allocateFromArena(JS::Zone *zone, AllocKind thingKind,
if (maybeLock.isNothing())
maybeLock.emplace(rt);
Chunk *chunk = rt->gc.pickChunk(zone, maybeStartBGAlloc);
Chunk *chunk = rt->gc.pickChunk(maybeLock.ref(), maybeStartBGAlloc);
if (!chunk)
return nullptr;
// Although our chunk should definitely have enough space for another arena,
// there are other valid reasons why Chunk::allocateArena() may fail.
aheader = chunk->allocateArena(zone, thingKind);
aheader = chunk->allocateArena(zone, thingKind, maybeLock.ref());
if (!aheader)
return nullptr;
@ -2540,7 +2482,7 @@ GCRuntime::releaseRelocatedArenas(ArenaHeader *relocatedList)
}
AutoLockGC lock(rt);
expireChunksAndArenas(true);
expireChunksAndArenas(true, lock);
}
#endif // JSGC_COMPACTING
@ -3078,136 +3020,70 @@ GCRuntime::maybePeriodicFullGC()
}
void
GCRuntime::decommitArenasFromAvailableList(Chunk **availableListHeadp)
GCRuntime::decommitArenas(const AutoLockGC &lock)
{
Chunk *chunk = *availableListHeadp;
if (!chunk)
return;
/*
* Decommit is expensive so we avoid holding the GC lock while calling it.
*
* We decommit from the tail of the list to minimize interference with the
* main thread that may start to allocate things at this point.
*
* The arena that is been decommitted outside the GC lock must not be
* available for allocations either via the free list or via the
* decommittedArenas bitmap. For that we just fetch the arena from the
* free list before the decommit pretending as it was allocated. If this
* arena also is the single free arena in the chunk, then we must remove
* from the available list before we release the lock so the allocation
* thread would not see chunks with no free arenas on the available list.
*
* After we retake the lock, we mark the arena as free and decommitted if
* the decommit was successful. We must also add the chunk back to the
* available list if we removed it previously or when the main thread
* have allocated all remaining free arenas in the chunk.
*
* We also must make sure that the aheader is not accessed again after we
* decommit the arena.
*/
MOZ_ASSERT(chunk->info.prevp == availableListHeadp);
while (Chunk *next = chunk->info.next) {
MOZ_ASSERT(next->info.prevp == &chunk->info.next);
chunk = next;
}
for (;;) {
// Start from the list tail to avoid contending with the mutator.
for (ChunkPool::ReverseIter chunk(availableChunks(lock)); !chunk.done(); chunk.prev()) {
while (chunk->info.numArenasFreeCommitted != 0) {
// Ensure the arena we are going to decommit is not available for
// allocation by allocating it for ourself.
ArenaHeader *aheader = chunk->fetchNextFreeArena(rt);
Chunk **savedPrevp = chunk->info.prevp;
if (!chunk->hasAvailableArenas())
chunk->removeFromAvailableList();
availableChunks(lock).remove(chunk);
size_t arenaIndex = Chunk::arenaIndex(aheader->arenaAddress());
bool ok;
{
/*
* If the main thread waits for the decommit to finish, skip
* potentially expensive unlock/lock pair on the contested
* lock.
*/
// Decommit may be expensive in some cases, on some platforms,
// so we avoid holding the GC lock while calling it, with the
// following exception: If the main thread has entered the GC,
// it will wait for this thread to complete. In this case,
// releasing the lock will add unnecessary contention.
Maybe<AutoUnlockGC> maybeUnlock;
if (!isHeapBusy())
maybeUnlock.emplace(rt);
ok = MarkPagesUnused(aheader->getArena(), ArenaSize);
}
if (ok) {
++chunk->info.numArenasFree;
chunk->decommittedArenas.set(arenaIndex);
chunk->decommittedArenas.set(Chunk::arenaIndex(aheader->arenaAddress()));
} else {
chunk->addArenaToFreeList(rt, aheader);
}
MOZ_ASSERT(chunk->hasAvailableArenas());
MOZ_ASSERT(!chunk->unused());
if (chunk->info.numArenasFree == 1) {
/*
* Put the chunk back to the available list either at the
* point where it was before to preserve the available list
* that we enumerate, or, when the allocation thread has fully
* used all the previous chunks, at the beginning of the
* available list.
*/
Chunk **insertPoint = savedPrevp;
if (savedPrevp != availableListHeadp) {
Chunk *prev = Chunk::fromPointerToNext(savedPrevp);
if (!prev->hasAvailableArenas())
insertPoint = availableListHeadp;
}
chunk->insertToAvailableList(insertPoint);
} else {
MOZ_ASSERT(chunk->info.prevp);
}
if (chunkAllocationSinceLastGC || !ok) {
/*
* The allocator thread has started to get new chunks. We should stop
* to avoid decommitting arenas in just allocated chunks.
*/
return;
// While the lock was released, the mutator may have allocated all
// remaining chunks, moving this chunk out of the available list.
if (chunk->info.numArenasFree == 1) {
// Re-insert into the available list.
MOZ_ASSERT(!ChunkPoolContainsChunk(availableChunks(lock), chunk.get()));
availableChunks(lock).push(chunk);
// Start the iteration over again. Already-decommitted chunks
// will get skipped over quickly at the loop head.
chunk.reset();
}
}
/*
* chunk->info.prevp becomes null when the allocator thread consumed
* all chunks from the available list.
*/
MOZ_ASSERT_IF(chunk->info.prevp, *chunk->info.prevp == chunk);
if (chunk->info.prevp == availableListHeadp || !chunk->info.prevp)
break;
/*
* prevp exists and is not the list head. It must point to the next
* field of the previous chunk.
*/
chunk = chunk->getPrevious();
}
}
void
GCRuntime::decommitArenas()
{
decommitArenasFromAvailableList(&systemAvailableChunkListHead);
decommitArenasFromAvailableList(&userAvailableChunkListHead);
}
/* Must be called with the GC lock taken. */
void
GCRuntime::expireChunksAndArenas(bool shouldShrink)
GCRuntime::expireChunksAndArenas(bool shouldShrink, const AutoLockGC &lock)
{
#ifdef JSGC_FJGENERATIONAL
rt->threadPool.pruneChunkCache();
#endif
if (Chunk *toFree = expireChunkPool(shouldShrink, false)) {
ChunkPool chunksToFree = expireEmptyChunks(shouldShrink, lock);
if (chunksToFree.count()) {
AutoUnlockGC unlock(rt);
freeChunkList(toFree);
freeChunks(chunksToFree);
}
if (shouldShrink)
decommitArenas();
decommitArenas(lock);
}
void
@ -3353,7 +3229,7 @@ GCHelperState::work()
case SWEEPING: {
AutoTraceLog logSweeping(logger, TraceLogger::GCSweeping);
doSweep();
doSweep(lock);
MOZ_ASSERT(state() == SWEEPING);
break;
}
@ -3371,8 +3247,8 @@ GCHelperState::work()
if (!chunk)
break;
MOZ_ASSERT(chunk->info.numArenasFreeCommitted == 0);
rt->gc.emptyChunks.put(chunk);
} while (state() == ALLOCATING && rt->gc.wantBackgroundAllocation());
rt->gc.emptyChunks(lock).push(chunk);
} while (state() == ALLOCATING && rt->gc.wantBackgroundAllocation(lock));
MOZ_ASSERT(state() == ALLOCATING || state() == CANCEL_ALLOCATION);
break;
@ -3456,9 +3332,8 @@ GCHelperState::startBackgroundAllocationIfIdle()
startBackgroundThread(ALLOCATING);
}
/* Must be called with the GC lock taken. */
void
GCHelperState::doSweep()
GCHelperState::doSweep(const AutoLockGC &lock)
{
if (sweepFlag) {
sweepFlag = false;
@ -3470,7 +3345,7 @@ GCHelperState::doSweep()
}
bool shrinking = shrinkFlag;
rt->gc.expireChunksAndArenas(shrinking);
rt->gc.expireChunksAndArenas(shrinking, lock);
/*
* The main thread may have called ShrinkGCBuffers while
@ -3479,7 +3354,7 @@ GCHelperState::doSweep()
*/
if (!shrinking && shrinkFlag) {
shrinkFlag = false;
rt->gc.expireChunksAndArenas(true);
rt->gc.expireChunksAndArenas(true, lock);
}
}
@ -4043,14 +3918,14 @@ js::gc::MarkingValidator::nonIncrementalMark()
GCMarker *gcmarker = &gc->marker;
/* Save existing mark bits. */
for (GCChunkSet::Range r(gc->chunkSet.all()); !r.empty(); r.popFront()) {
ChunkBitmap *bitmap = &r.front()->bitmap;
ChunkBitmap *entry = js_new<ChunkBitmap>();
for (auto chunk = gc->allNonEmptyChunks(); !chunk.done(); chunk.next()) {
ChunkBitmap *bitmap = &chunk->bitmap;
ChunkBitmap *entry = js_new<ChunkBitmap>();
if (!entry)
return;
memcpy((void *)entry->bitmap, (void *)bitmap->bitmap, sizeof(bitmap->bitmap));
if (!map.putNew(r.front(), entry))
if (!map.putNew(chunk.get(), entry))
return;
}
@ -4084,8 +3959,8 @@ js::gc::MarkingValidator::nonIncrementalMark()
MOZ_ASSERT(gcmarker->isDrained());
gcmarker->reset();
for (GCChunkSet::Range r(gc->chunkSet.all()); !r.empty(); r.popFront())
r.front()->bitmap.clear();
for (auto chunk = gc->allNonEmptyChunks(); !chunk.done(); chunk.next())
chunk->bitmap.clear();
{
gcstats::AutoPhase ap1(gc->stats, gcstats::PHASE_MARK);
@ -4126,8 +4001,7 @@ js::gc::MarkingValidator::nonIncrementalMark()
}
/* Take a copy of the non-incremental mark state and restore the original. */
for (GCChunkSet::Range r(gc->chunkSet.all()); !r.empty(); r.popFront()) {
Chunk *chunk = r.front();
for (auto chunk = gc->allNonEmptyChunks(); !chunk.done(); chunk.next()) {
ChunkBitmap *bitmap = &chunk->bitmap;
ChunkBitmap *entry = map.lookup(chunk)->value();
Swap(*entry, *bitmap);
@ -4151,8 +4025,7 @@ js::gc::MarkingValidator::validate()
if (!initialized)
return;
for (GCChunkSet::Range r(gc->chunkSet.all()); !r.empty(); r.popFront()) {
Chunk *chunk = r.front();
for (auto chunk = gc->allNonEmptyChunks(); !chunk.done(); chunk.next()) {
BitmapMap::Ptr ptr = map.lookup(chunk);
if (!ptr)
continue; /* Allocated after we did the non-incremental mark. */
@ -5257,7 +5130,7 @@ GCRuntime::endSweepPhase(bool lastGC)
* Expire needs to unlock it for other callers.
*/
AutoLockGC lock(rt);
expireChunksAndArenas(invocationKind == GC_SHRINK);
expireChunksAndArenas(invocationKind == GC_SHRINK, lock);
}
}
@ -6138,7 +6011,7 @@ GCRuntime::shrinkBuffers()
if (CanUseExtraThreads())
helperState.startBackgroundShrink();
else
expireChunksAndArenas(true);
expireChunksAndArenas(true, lock);
}
void

View File

@ -24,6 +24,8 @@
namespace js {
class AutoLockGC;
namespace gc {
class ForkJoinNursery;
}
@ -1045,7 +1047,7 @@ class GCHelperState
}
/* Must be called with the GC lock taken. */
void doSweep();
void doSweep(const AutoLockGC &lock);
public:
explicit GCHelperState(JSRuntime *rt)
@ -1162,8 +1164,6 @@ struct GCChunkHasher {
}
};
typedef HashSet<js::gc::Chunk *, GCChunkHasher, SystemAllocPolicy> GCChunkSet;
struct GrayRoot {
void *thing;
JSGCTraceKind kind;

View File

@ -423,7 +423,6 @@ JSRuntime::~JSRuntime()
FinishRuntimeNumberState(this);
#endif
gc.finish();
atomsCompartment_ = nullptr;
js_free(defaultLocale);
@ -433,11 +432,6 @@ JSRuntime::~JSRuntime()
js_delete(ionPcScriptCache);
#ifdef JSGC_GENERATIONAL
gc.storeBuffer.disable();
gc.nursery.disable();
#endif
#if defined(JS_ARM_SIMULATOR) || defined(JS_MIPS_SIMULATOR)
js::jit::DestroySimulatorRuntime(simulatorRuntime_);
#endif