Bug 1141234 - Part 7: Reorder the allocator methods for clarity; r=sfink

This commit is contained in:
Terrence Cole 2015-03-11 11:11:29 -07:00
parent b5a8f018b9
commit f5192ea952
7 changed files with 279 additions and 279 deletions

View File

@ -8,6 +8,7 @@
#include "jscntxt.h"
#include "gc/GCInternals.h"
#include "gc/GCTrace.h"
#include "gc/Nursery.h"
#include "jit/JitCompartment.h"
@ -19,53 +20,6 @@
using namespace js;
using namespace gc;
// Attempt to allocate a new GC thing out of the nursery. If there is not enough
// room in the nursery or there is an OOM, this method will return nullptr.
template <AllowGC allowGC>
JSObject *
GCRuntime::tryNewNurseryObject(JSContext *cx, size_t thingSize, size_t nDynamicSlots, const Class *clasp)
{
MOZ_ASSERT(!IsAtomsCompartment(cx->compartment()));
JSObject *obj = nursery.allocateObject(cx, thingSize, nDynamicSlots, clasp);
if (obj)
return obj;
if (allowGC && !rt->mainThread.suppressGC) {
minorGC(cx, JS::gcreason::OUT_OF_NURSERY);
// Exceeding gcMaxBytes while tenuring can disable the Nursery.
if (nursery.isEnabled()) {
JSObject *obj = nursery.allocateObject(cx, thingSize, nDynamicSlots, clasp);
MOZ_ASSERT(obj);
return obj;
}
}
return nullptr;
}
template <AllowGC allowGC>
JSObject *
GCRuntime::tryNewTenuredObject(ExclusiveContext *cx, AllocKind kind, size_t thingSize,
size_t nDynamicSlots)
{
HeapSlot *slots = nullptr;
if (nDynamicSlots) {
slots = cx->zone()->pod_malloc<HeapSlot>(nDynamicSlots);
if (MOZ_UNLIKELY(!slots))
return nullptr;
Debug_SetSlotRangeToCrashOnTouch(slots, nDynamicSlots);
}
JSObject *obj = tryNewTenuredThing<JSObject, allowGC>(cx, kind, thingSize);
if (obj)
obj->setInitialSlotsMaybeNonNative(slots);
else
js_free(slots);
return obj;
}
bool
GCRuntime::gcIfNeededPerAllocation(JSContext *cx)
{
@ -139,13 +93,10 @@ GCRuntime::checkIncrementalZoneState(ExclusiveContext *cx, T *t)
#endif
}
/*
* Allocate a new GC thing. After a successful allocation the caller must
* fully initialize the thing before calling any function that can potentially
* trigger GC. This will ensure that GC tracing never sees junk values stored
* in the partially initialized thing.
*/
// Allocate a new GC thing. After a successful allocation the caller must
// fully initialize the thing before calling any function that can potentially
// trigger GC. This will ensure that GC tracing never sees junk values stored
// in the partially initialized thing.
template <typename T, AllowGC allowGC /* = CanGC */>
JSObject *
js::Allocate(ExclusiveContext *cx, AllocKind kind, size_t nDynamicSlots, InitialHeap heap,
@ -192,6 +143,67 @@ template JSObject *js::Allocate<JSObject, CanGC>(ExclusiveContext *cx, gc::Alloc
size_t nDynamicSlots, gc::InitialHeap heap,
const Class *clasp);
// Attempt to allocate a new GC thing out of the nursery. If there is not enough
// room in the nursery or there is an OOM, this method will return nullptr.
template <AllowGC allowGC>
JSObject *
GCRuntime::tryNewNurseryObject(JSContext *cx, size_t thingSize, size_t nDynamicSlots, const Class *clasp)
{
MOZ_ASSERT(!IsAtomsCompartment(cx->compartment()));
JSObject *obj = nursery.allocateObject(cx, thingSize, nDynamicSlots, clasp);
if (obj)
return obj;
if (allowGC && !rt->mainThread.suppressGC) {
minorGC(cx, JS::gcreason::OUT_OF_NURSERY);
// Exceeding gcMaxBytes while tenuring can disable the Nursery.
if (nursery.isEnabled()) {
JSObject *obj = nursery.allocateObject(cx, thingSize, nDynamicSlots, clasp);
MOZ_ASSERT(obj);
return obj;
}
}
return nullptr;
}
typedef mozilla::UniquePtr<HeapSlot, JS::FreePolicy> UniqueSlots;
static inline UniqueSlots
MakeSlotArray(ExclusiveContext *cx, size_t count)
{
HeapSlot *slots = nullptr;
if (count) {
slots = cx->zone()->pod_malloc<HeapSlot>(count);
if (slots)
Debug_SetSlotRangeToCrashOnTouch(slots, count);
}
return UniqueSlots(slots);
}
template <AllowGC allowGC>
JSObject *
GCRuntime::tryNewTenuredObject(ExclusiveContext *cx, AllocKind kind, size_t thingSize,
size_t nDynamicSlots)
{
HeapSlot *slots = nullptr;
if (nDynamicSlots) {
slots = cx->zone()->pod_malloc<HeapSlot>(nDynamicSlots);
if (MOZ_UNLIKELY(!slots))
return nullptr;
Debug_SetSlotRangeToCrashOnTouch(slots, nDynamicSlots);
}
JSObject *obj = tryNewTenuredThing<JSObject, allowGC>(cx, kind, thingSize);
if (obj)
obj->setInitialSlotsMaybeNonNative(slots);
else
js_free(slots);
return obj;
}
template <typename T, AllowGC allowGC /* = CanGC */>
T *
js::Allocate(ExclusiveContext *cx)
@ -244,3 +256,182 @@ GCRuntime::tryNewTenuredThing(ExclusiveContext *cx, AllocKind kind, size_t thing
TraceTenuredAlloc(t, kind);
return t;
}
template <AllowGC allowGC>
/* static */ void *
GCRuntime::refillFreeListFromAnyThread(ExclusiveContext *cx, AllocKind thingKind)
{
MOZ_ASSERT(cx->arenas()->freeLists[thingKind].isEmpty());
if (cx->isJSContext())
return refillFreeListFromMainThread<allowGC>(cx->asJSContext(), thingKind);
return refillFreeListOffMainThread(cx, thingKind);
}
template <AllowGC allowGC>
/* static */ void *
GCRuntime::refillFreeListFromMainThread(JSContext *cx, AllocKind thingKind)
{
JSRuntime *rt = cx->runtime();
MOZ_ASSERT(!rt->isHeapBusy(), "allocating while under GC");
MOZ_ASSERT_IF(allowGC, !rt->currentThreadHasExclusiveAccess());
// Try to allocate; synchronize with background GC threads if necessary.
void *thing = tryRefillFreeListFromMainThread(cx, thingKind);
if (MOZ_LIKELY(thing))
return thing;
// Perform a last-ditch GC to hopefully free up some memory.
{
// If we are doing a fallible allocation, percolate up the OOM
// instead of reporting it.
if (!allowGC)
return nullptr;
JS::PrepareForFullGC(rt);
AutoKeepAtoms keepAtoms(cx->perThreadData);
rt->gc.gc(GC_SHRINK, JS::gcreason::LAST_DITCH);
}
// Retry the allocation after the last-ditch GC.
thing = tryRefillFreeListFromMainThread(cx, thingKind);
if (thing)
return thing;
// We are really just totally out of memory.
MOZ_ASSERT(allowGC, "A fallible allocation must not report OOM on failure.");
ReportOutOfMemory(cx);
return nullptr;
}
/* static */ void *
GCRuntime::tryRefillFreeListFromMainThread(JSContext *cx, AllocKind thingKind)
{
ArenaLists *arenas = cx->arenas();
Zone *zone = cx->zone();
AutoMaybeStartBackgroundAllocation maybeStartBGAlloc;
void *thing = arenas->allocateFromArena(zone, thingKind, maybeStartBGAlloc);
if (MOZ_LIKELY(thing))
return thing;
// Even if allocateFromArena failed due to OOM, a background
// finalization or allocation task may be running freeing more memory
// or adding more available memory to our free pool; wait for them to
// finish, then try to allocate again in case they made more memory
// available.
cx->runtime()->gc.waitBackgroundSweepOrAllocEnd();
thing = arenas->allocateFromArena(zone, thingKind, maybeStartBGAlloc);
if (thing)
return thing;
return nullptr;
}
/* static */ void *
GCRuntime::refillFreeListOffMainThread(ExclusiveContext *cx, AllocKind thingKind)
{
ArenaLists *arenas = cx->arenas();
Zone *zone = cx->zone();
JSRuntime *rt = zone->runtimeFromAnyThread();
AutoMaybeStartBackgroundAllocation maybeStartBGAlloc;
// If we're off the main thread, we try to allocate once and return
// whatever value we get. We need to first ensure the main thread is not in
// a GC session.
AutoLockHelperThreadState lock;
while (rt->isHeapBusy())
HelperThreadState().wait(GlobalHelperThreadState::PRODUCER);
void *thing = arenas->allocateFromArena(zone, thingKind, maybeStartBGAlloc);
if (thing)
return thing;
ReportOutOfMemory(cx);
return nullptr;
}
TenuredCell *
ArenaLists::allocateFromArena(JS::Zone *zone, AllocKind thingKind,
AutoMaybeStartBackgroundAllocation &maybeStartBGAlloc)
{
JSRuntime *rt = zone->runtimeFromAnyThread();
Maybe<AutoLockGC> maybeLock;
// See if we can proceed without taking the GC lock.
if (backgroundFinalizeState[thingKind] != BFS_DONE)
maybeLock.emplace(rt);
ArenaList &al = arenaLists[thingKind];
ArenaHeader *aheader = al.takeNextArena();
if (aheader) {
// Empty arenas should be immediately freed.
MOZ_ASSERT(!aheader->isEmpty());
return allocateFromArenaInner<HasFreeThings>(zone, aheader, thingKind);
}
// Parallel threads have their own ArenaLists, but chunks are shared;
// if we haven't already, take the GC lock now to avoid racing.
if (maybeLock.isNothing())
maybeLock.emplace(rt);
Chunk *chunk = rt->gc.pickChunk(maybeLock.ref(), maybeStartBGAlloc);
if (!chunk)
return nullptr;
// Although our chunk should definitely have enough space for another arena,
// there are other valid reasons why Chunk::allocateArena() may fail.
aheader = rt->gc.allocateArena(chunk, zone, thingKind, maybeLock.ref());
if (!aheader)
return nullptr;
MOZ_ASSERT(!maybeLock->wasUnlocked());
MOZ_ASSERT(al.isCursorAtEnd());
al.insertAtCursor(aheader);
return allocateFromArenaInner<IsEmpty>(zone, aheader, thingKind);
}
template <ArenaLists::ArenaAllocMode hasFreeThings>
TenuredCell *
ArenaLists::allocateFromArenaInner(JS::Zone *zone, ArenaHeader *aheader, AllocKind kind)
{
size_t thingSize = Arena::thingSize(kind);
FreeSpan span;
if (hasFreeThings) {
MOZ_ASSERT(aheader->hasFreeThings());
span = aheader->getFirstFreeSpan();
aheader->setAsFullyUsed();
} else {
MOZ_ASSERT(!aheader->hasFreeThings());
Arena *arena = aheader->getArena();
span.initFinal(arena->thingsStart(kind), arena->thingsEnd() - thingSize, thingSize);
}
freeLists[kind].setHead(&span);
if (MOZ_UNLIKELY(zone->wasGCStarted()))
zone->runtimeFromAnyThread()->gc.arenaAllocatedDuringGC(zone, aheader);
TenuredCell *thing = freeLists[kind].allocate(thingSize);
MOZ_ASSERT(thing); // This allocation is infallible.
return thing;
}
void
GCRuntime::arenaAllocatedDuringGC(JS::Zone *zone, ArenaHeader *arena)
{
if (zone->needsIncrementalBarrier()) {
arena->allocatedDuringIncremental = true;
marker.delayMarkingArena(arena);
} else if (zone->isGCSweeping()) {
arena->setNextAllocDuringSweep(arenasAllocatedDuringSweep);
arenasAllocatedDuringSweep = arena;
}
}

View File

@ -149,6 +149,29 @@ struct MovingTracer : JSTracer {
}
};
class AutoMaybeStartBackgroundAllocation
{
private:
JSRuntime *runtime;
MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
public:
explicit AutoMaybeStartBackgroundAllocation(MOZ_GUARD_OBJECT_NOTIFIER_ONLY_PARAM)
: runtime(nullptr)
{
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
}
void tryToStartBackgroundAllocation(JSRuntime *rt) {
runtime = rt;
}
~AutoMaybeStartBackgroundAllocation() {
if (runtime)
runtime->gc.startBackgroundAllocTaskIfIdle();
}
};
} /* namespace gc */
} /* namespace js */

View File

@ -857,7 +857,7 @@ class GCRuntime
Chunk *pickChunk(const AutoLockGC &lock,
AutoMaybeStartBackgroundAllocation &maybeStartBGAlloc);
ArenaHeader *allocateArena(Chunk *chunk, Zone *zone, AllocKind kind, const AutoLockGC &lock);
inline void arenaAllocatedDuringGC(JS::Zone *zone, ArenaHeader *arena);
void arenaAllocatedDuringGC(JS::Zone *zone, ArenaHeader *arena);
// Allocator internals
bool gcIfNeededPerAllocation(JSContext *cx);

View File

@ -478,7 +478,8 @@ js::Nursery::allocateFromTenured(Zone *zone, AllocKind thingKind)
if (t)
return t;
zone->arenas.checkEmptyFreeList(thingKind);
return zone->arenas.allocateFromArena(zone, thingKind);
AutoMaybeStartBackgroundAllocation maybeStartBackgroundAllocation;
return zone->arenas.allocateFromArena(zone, thingKind, maybeStartBackgroundAllocation);
}
void

View File

@ -187,7 +187,7 @@ class GCMarker : public JSTracer
}
uint32_t markColor() const { return color; }
inline void delayMarkingArena(gc::ArenaHeader *aheader);
void delayMarkingArena(gc::ArenaHeader *aheader);
void delayMarkingChildren(const void *thing);
void markDelayedChildren(gc::ArenaHeader *aheader);
bool markDelayedChildren(SliceBudget &budget);

View File

@ -984,29 +984,6 @@ GCRuntime::startBackgroundAllocTaskIfIdle()
allocTask.startWithLockHeld();
}
class js::gc::AutoMaybeStartBackgroundAllocation
{
private:
JSRuntime *runtime;
MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
public:
explicit AutoMaybeStartBackgroundAllocation(MOZ_GUARD_OBJECT_NOTIFIER_ONLY_PARAM)
: runtime(nullptr)
{
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
}
void tryToStartBackgroundAllocation(JSRuntime *rt) {
runtime = rt;
}
~AutoMaybeStartBackgroundAllocation() {
if (runtime)
runtime->gc.startBackgroundAllocTaskIfIdle();
}
};
Chunk *
GCRuntime::pickChunk(const AutoLockGC &lock,
AutoMaybeStartBackgroundAllocation &maybeStartBackgroundAllocation)
@ -1766,7 +1743,7 @@ ZoneHeapThreshold::updateForRemovedArena(const GCSchedulingTunables &tunables)
gcTriggerBytes_ -= amount;
}
inline void
void
GCMarker::delayMarkingArena(ArenaHeader *aheader)
{
if (aheader->hasDelayedMarking) {
@ -1799,92 +1776,6 @@ ArenaLists::prepareForIncrementalGC(JSRuntime *rt)
}
}
inline void
GCRuntime::arenaAllocatedDuringGC(JS::Zone *zone, ArenaHeader *arena)
{
if (zone->needsIncrementalBarrier()) {
arena->allocatedDuringIncremental = true;
marker.delayMarkingArena(arena);
} else if (zone->isGCSweeping()) {
arena->setNextAllocDuringSweep(arenasAllocatedDuringSweep);
arenasAllocatedDuringSweep = arena;
}
}
TenuredCell *
ArenaLists::allocateFromArena(JS::Zone *zone, AllocKind thingKind)
{
AutoMaybeStartBackgroundAllocation maybeStartBackgroundAllocation;
return allocateFromArena(zone, thingKind, maybeStartBackgroundAllocation);
}
TenuredCell *
ArenaLists::allocateFromArena(JS::Zone *zone, AllocKind thingKind,
AutoMaybeStartBackgroundAllocation &maybeStartBGAlloc)
{
JSRuntime *rt = zone->runtimeFromAnyThread();
Maybe<AutoLockGC> maybeLock;
// See if we can proceed without taking the GC lock.
if (backgroundFinalizeState[thingKind] != BFS_DONE)
maybeLock.emplace(rt);
ArenaList &al = arenaLists[thingKind];
ArenaHeader *aheader = al.takeNextArena();
if (aheader) {
// Empty arenas should be immediately freed.
MOZ_ASSERT(!aheader->isEmpty());
return allocateFromArenaInner<HasFreeThings>(zone, aheader, thingKind);
}
// Parallel threads have their own ArenaLists, but chunks are shared;
// if we haven't already, take the GC lock now to avoid racing.
if (maybeLock.isNothing())
maybeLock.emplace(rt);
Chunk *chunk = rt->gc.pickChunk(maybeLock.ref(), maybeStartBGAlloc);
if (!chunk)
return nullptr;
// Although our chunk should definitely have enough space for another arena,
// there are other valid reasons why Chunk::allocateArena() may fail.
aheader = rt->gc.allocateArena(chunk, zone, thingKind, maybeLock.ref());
if (!aheader)
return nullptr;
MOZ_ASSERT(!maybeLock->wasUnlocked());
MOZ_ASSERT(al.isCursorAtEnd());
al.insertAtCursor(aheader);
return allocateFromArenaInner<IsEmpty>(zone, aheader, thingKind);
}
template <ArenaLists::ArenaAllocMode hasFreeThings>
inline TenuredCell *
ArenaLists::allocateFromArenaInner(JS::Zone *zone, ArenaHeader *aheader, AllocKind thingKind)
{
size_t thingSize = Arena::thingSize(thingKind);
FreeSpan span;
if (hasFreeThings) {
MOZ_ASSERT(aheader->hasFreeThings());
span = aheader->getFirstFreeSpan();
aheader->setAsFullyUsed();
} else {
MOZ_ASSERT(!aheader->hasFreeThings());
Arena *arena = aheader->getArena();
span.initFinal(arena->thingsStart(thingKind), arena->thingsEnd() - thingSize, thingSize);
}
freeLists[thingKind].setHead(&span);
if (MOZ_UNLIKELY(zone->wasGCStarted()))
zone->runtimeFromAnyThread()->gc.arenaAllocatedDuringGC(zone, aheader);
TenuredCell *thing = freeLists[thingKind].allocate(thingSize);
MOZ_ASSERT(thing); // This allocation is infallible.
return thing;
}
/* Compacting GC */
bool
@ -2973,110 +2864,6 @@ ArenaLists::queueForegroundThingsForSweep(FreeOp *fop)
gcScriptArenasToUpdate = arenaListsToSweep[AllocKind::SCRIPT];
}
/* static */ void *
GCRuntime::tryRefillFreeListFromMainThread(JSContext *cx, AllocKind thingKind)
{
ArenaLists *arenas = cx->arenas();
Zone *zone = cx->zone();
AutoMaybeStartBackgroundAllocation maybeStartBGAlloc;
void *thing = arenas->allocateFromArena(zone, thingKind, maybeStartBGAlloc);
if (MOZ_LIKELY(thing))
return thing;
// Even if allocateFromArena failed due to OOM, a background
// finalization or allocation task may be running freeing more memory
// or adding more available memory to our free pool; wait for them to
// finish, then try to allocate again in case they made more memory
// available.
cx->runtime()->gc.waitBackgroundSweepOrAllocEnd();
thing = arenas->allocateFromArena(zone, thingKind, maybeStartBGAlloc);
if (thing)
return thing;
return nullptr;
}
template <AllowGC allowGC>
/* static */ void *
GCRuntime::refillFreeListFromMainThread(JSContext *cx, AllocKind thingKind)
{
JSRuntime *rt = cx->runtime();
MOZ_ASSERT(!rt->isHeapBusy(), "allocating while under GC");
MOZ_ASSERT_IF(allowGC, !rt->currentThreadHasExclusiveAccess());
// Try to allocate; synchronize with background GC threads if necessary.
void *thing = tryRefillFreeListFromMainThread(cx, thingKind);
if (MOZ_LIKELY(thing))
return thing;
// Perform a last-ditch GC to hopefully free up some memory.
{
// If we are doing a fallible allocation, percolate up the OOM
// instead of reporting it.
if (!allowGC)
return nullptr;
JS::PrepareForFullGC(rt);
AutoKeepAtoms keepAtoms(cx->perThreadData);
rt->gc.gc(GC_SHRINK, JS::gcreason::LAST_DITCH);
}
// Retry the allocation after the last-ditch GC.
thing = tryRefillFreeListFromMainThread(cx, thingKind);
if (thing)
return thing;
// We are really just totally out of memory.
MOZ_ASSERT(allowGC, "A fallible allocation must not report OOM on failure.");
ReportOutOfMemory(cx);
return nullptr;
}
/* static */ void *
GCRuntime::refillFreeListOffMainThread(ExclusiveContext *cx, AllocKind thingKind)
{
ArenaLists *arenas = cx->arenas();
Zone *zone = cx->zone();
JSRuntime *rt = zone->runtimeFromAnyThread();
AutoMaybeStartBackgroundAllocation maybeStartBGAlloc;
// If we're off the main thread, we try to allocate once and return
// whatever value we get. We need to first ensure the main thread is not in
// a GC session.
AutoLockHelperThreadState lock;
while (rt->isHeapBusy())
HelperThreadState().wait(GlobalHelperThreadState::PRODUCER);
void *thing = arenas->allocateFromArena(zone, thingKind, maybeStartBGAlloc);
if (thing)
return thing;
ReportOutOfMemory(cx);
return nullptr;
}
template <AllowGC allowGC>
/* static */ void *
GCRuntime::refillFreeListFromAnyThread(ExclusiveContext *cx, AllocKind thingKind)
{
MOZ_ASSERT(cx->arenas()->freeLists[thingKind].isEmpty());
if (cx->isJSContext())
return refillFreeListFromMainThread<allowGC>(cx->asJSContext(), thingKind);
return refillFreeListOffMainThread(cx, thingKind);
}
template void *
GCRuntime::refillFreeListFromAnyThread<NoGC>(ExclusiveContext *cx, AllocKind thingKind);
template void *
GCRuntime::refillFreeListFromAnyThread<CanGC>(ExclusiveContext *cx, AllocKind thingKind);
/* static */ void *
GCRuntime::refillFreeListInGC(Zone *zone, AllocKind thingKind)
{
@ -3089,7 +2876,8 @@ GCRuntime::refillFreeListInGC(Zone *zone, AllocKind thingKind)
MOZ_ASSERT(rt->isHeapMajorCollecting());
MOZ_ASSERT(!rt->gc.isBackgroundSweeping());
return zone->arenas.allocateFromArena(zone, thingKind);
AutoMaybeStartBackgroundAllocation maybeStartBackgroundAllocation;
return zone->arenas.allocateFromArena(zone, thingKind, maybeStartBackgroundAllocation);
}
SliceBudget::SliceBudget()

View File

@ -50,6 +50,8 @@ namespace gcstats {
struct Statistics;
}
class Nursery;
namespace gc {
struct FinalizePhase;
@ -780,11 +782,6 @@ class ArenaLists
return freeLists[thingKind].allocate(thingSize);
}
// Returns false on Out-Of-Memory. This method makes no attempt to
// synchronize with background finalization, so may miss available memory
// that is waiting to be finalized.
TenuredCell *allocateFromArena(JS::Zone *zone, AllocKind thingKind);
/*
* Moves all arenas from |fromArenaLists| into |this|.
*/
@ -841,12 +838,12 @@ class ArenaLists
enum ArenaAllocMode { HasFreeThings = true, IsEmpty = false };
template <ArenaAllocMode hasFreeThings>
inline TenuredCell *allocateFromArenaInner(JS::Zone *zone, ArenaHeader *aheader,
AllocKind thingKind);
TenuredCell *allocateFromArenaInner(JS::Zone *zone, ArenaHeader *aheader, AllocKind kind);
inline void normalizeBackgroundFinalizeState(AllocKind thingKind);
friend class GCRuntime;
friend class js::Nursery;
};
/* The number of GC cycles an empty chunk can survive before been released. */