Bug 729760 - GC: Incremental sweeping of shapes and types r=billm

--HG--
extra : rebase_source : c3bdffe5b4093d774f8f5a7bdf42c9e734763326
This commit is contained in:
Jon Coppeard 2012-07-26 09:31:52 +01:00
parent f3dedd36fc
commit 8f882dd7d0
12 changed files with 578 additions and 243 deletions

View File

@ -423,38 +423,47 @@ struct ArenaHeader
* chunk. The latter allows to quickly check if the arena is allocated
* during the conservative GC scanning without searching the arena in the
* list.
*
* We use 8 bits for the allocKind so the compiler can use byte-level memory
* instructions to access it.
*/
size_t allocKind : 8;
/*
* When recursive marking uses too much stack the marking is delayed and
* the corresponding arenas are put into a stack using the following field
* as a linkage. To distinguish the bottom of the stack from the arenas
* not present in the stack we use an extra flag to tag arenas on the
* stack.
* When collecting we sometimes need to keep an auxillary list of arenas,
* for which we use the following fields. This happens for several reasons:
*
* When recursive marking uses too much stack the marking is delayed and the
* corresponding arenas are put into a stack. To distinguish the bottom of
* the stack from the arenas not present in the stack we use the
* markOverflow flag to tag arenas on the stack.
*
* Delayed marking is also used for arenas that we allocate into during an
* incremental GC. In this case, we intend to mark all the objects in the
* arena, and it's faster to do this marking in bulk.
*
* To minimize the ArenaHeader size we record the next delayed marking
* linkage as arenaAddress() >> ArenaShift and pack it with the allocKind
* field and hasDelayedMarking flag. We use 8 bits for the allocKind, not
* ArenaShift - 1, so the compiler can use byte-level memory instructions
* to access it.
* When sweeping we keep track of which arenas have been allocated since the
* end of the mark phase. This allows us to tell whether a pointer to an
* unmarked object is yet to be finalized or has already been reallocated.
* We set the allocatedDuringIncremental flag for this and clear it at the
* end of the sweep phase.
*
* To minimize the ArenaHeader size we record the next linkage as
* arenaAddress() >> ArenaShift and pack it with the allocKind field and the
* flags.
*/
public:
size_t hasDelayedMarking : 1;
size_t allocatedDuringIncremental : 1;
size_t markOverflow : 1;
size_t nextDelayedMarking : JS_BITS_PER_WORD - 8 - 1 - 1 - 1;
size_t auxNextLink : JS_BITS_PER_WORD - 8 - 1 - 1 - 1;
static void staticAsserts() {
/* We must be able to fit the allockind into uint8_t. */
JS_STATIC_ASSERT(FINALIZE_LIMIT <= 255);
/*
* nextDelayedMarkingpacking assumes that ArenaShift has enough bits
* auxNextLink packing assumes that ArenaShift has enough bits
* to cover allocKind and hasDelayedMarking.
*/
JS_STATIC_ASSERT(ArenaShift >= 8 + 1 + 1 + 1);
@ -487,7 +496,7 @@ struct ArenaHeader
markOverflow = 0;
allocatedDuringIncremental = 0;
hasDelayedMarking = 0;
nextDelayedMarking = 0;
auxNextLink = 0;
}
inline uintptr_t arenaAddress() const;
@ -519,6 +528,11 @@ struct ArenaHeader
inline ArenaHeader *getNextDelayedMarking() const;
inline void setNextDelayedMarking(ArenaHeader *aheader);
inline void unsetDelayedMarking();
inline ArenaHeader *getNextAllocDuringSweep() const;
inline void setNextAllocDuringSweep(ArenaHeader *aheader);
inline void unsetAllocDuringSweep();
};
struct Arena
@ -882,15 +896,48 @@ ArenaHeader::setFirstFreeSpan(const FreeSpan *span)
inline ArenaHeader *
ArenaHeader::getNextDelayedMarking() const
{
return &reinterpret_cast<Arena *>(nextDelayedMarking << ArenaShift)->aheader;
JS_ASSERT(hasDelayedMarking);
return &reinterpret_cast<Arena *>(auxNextLink << ArenaShift)->aheader;
}
inline void
ArenaHeader::setNextDelayedMarking(ArenaHeader *aheader)
{
JS_ASSERT(!(uintptr_t(aheader) & ArenaMask));
JS_ASSERT(!auxNextLink && !hasDelayedMarking);
hasDelayedMarking = 1;
nextDelayedMarking = aheader->arenaAddress() >> ArenaShift;
auxNextLink = aheader->arenaAddress() >> ArenaShift;
}
inline void
ArenaHeader::unsetDelayedMarking()
{
JS_ASSERT(hasDelayedMarking);
hasDelayedMarking = 0;
auxNextLink = 0;
}
inline ArenaHeader *
ArenaHeader::getNextAllocDuringSweep() const
{
JS_ASSERT(allocatedDuringIncremental);
return &reinterpret_cast<Arena *>(auxNextLink << ArenaShift)->aheader;
}
inline void
ArenaHeader::setNextAllocDuringSweep(ArenaHeader *aheader)
{
JS_ASSERT(!auxNextLink && !allocatedDuringIncremental);
allocatedDuringIncremental = 1;
auxNextLink = aheader->arenaAddress() >> ArenaShift;
}
inline void
ArenaHeader::unsetAllocDuringSweep()
{
JS_ASSERT(allocatedDuringIncremental);
allocatedDuringIncremental = 0;
auxNextLink = 0;
}
JS_ALWAYS_INLINE void

View File

@ -776,6 +776,11 @@ JSRuntime::JSRuntime()
gcDisableStrictProxyCheckingCount(0),
gcIncrementalState(gc::NO_INCREMENTAL),
gcLastMarkSlice(false),
gcSweepOnBackgroundThread(false),
gcSweepPhase(0),
gcSweepCompartmentIndex(0),
gcSweepKindIndex(0),
gcArenasAllocatedDuringSweep(NULL),
gcInterFrameGC(0),
gcSliceBudget(SliceBudget::Unlimited),
gcIncrementalEnabled(true),

View File

@ -564,6 +564,21 @@ struct JSRuntime : js::RuntimeFriendFields
/* Indicates that the last incremental slice exhausted the mark stack. */
bool gcLastMarkSlice;
/* Whether any sweeping will take place in the separate GC helper thread. */
bool gcSweepOnBackgroundThread;
/*
* Incremental sweep state.
*/
int gcSweepPhase;
ptrdiff_t gcSweepCompartmentIndex;
int gcSweepKindIndex;
/*
* List head of arenas allocated during the sweep phase.
*/
js::gc::ArenaHeader *gcArenasAllocatedDuringSweep;
/*
* Indicates that a GC slice has taken place in the middle of an animation
* frame, rather than at the beginning. In this case, the next slice will be

View File

@ -47,6 +47,7 @@ JSCompartment::JSCompartment(JSRuntime *rt)
needsBarrier_(false),
gcState(NoGCScheduled),
gcPreserveCode(false),
gcStarted(false),
gcBytes(0),
gcTriggerBytes(0),
gcHeapGrowthFactor(3.0),

View File

@ -174,6 +174,7 @@ struct JSCompartment
CompartmentGCState gcState;
bool gcPreserveCode;
bool gcStarted;
public:
bool isCollecting() const {
@ -226,6 +227,19 @@ struct JSCompartment
gcPreserveCode = preserving;
}
bool wasGCStarted() const {
return gcStarted;
}
void setGCStarted(bool started) {
JS_ASSERT(rt->isHeapBusy());
gcStarted = started;
}
bool isGCSweeping() {
return wasGCStarted() && rt->gcIncrementalState == js::gc::SWEEP;
}
size_t gcBytes;
size_t gcTriggerBytes;
size_t gcMaxMallocBytes;

View File

@ -127,7 +127,7 @@ js::PrepareForIncrementalGC(JSRuntime *rt)
return;
for (CompartmentsIter c(rt); !c.done(); c.next()) {
if (c->needsBarrier())
if (c->wasGCStarted())
PrepareCompartmentForGC(c);
}
}

View File

@ -632,6 +632,7 @@ SizeOfJSContext();
D(DEBUG_GC) \
D(DEBUG_MODE_GC) \
D(TRANSPLANT) \
D(RESET) \
\
/* Reasons from Firefox */ \
D(DOM_WINDOW_UTILS) \

View File

@ -200,6 +200,29 @@ const uint32_t Arena::FirstThingOffsets[] = {
#undef OFFSET
/*
* Finalization order for incrementally swept things.
*/
static const AllocKind FinalizePhaseShapes[] = {
FINALIZE_SHAPE,
FINALIZE_BASE_SHAPE,
FINALIZE_TYPE_OBJECT
};
static const AllocKind* FinalizePhases[] = {
FinalizePhaseShapes
};
static const int FinalizePhaseCount = sizeof(FinalizePhases) / sizeof(AllocKind*);
static const int FinalizePhaseLength[] = {
sizeof(FinalizePhaseShapes) / sizeof(AllocKind)
};
static const gcstats::Phase FinalizePhaseStatsPhase[] = {
gcstats::PHASE_SWEEP_SHAPE
};
#ifdef DEBUG
void
ArenaHeader::checkSynchronizedWithFreeList() const
@ -327,46 +350,60 @@ Arena::finalize(FreeOp *fop, AllocKind thingKind, size_t thingSize)
return false;
}
/*
* Insert an arena into the list in appropriate position and update the cursor
* to ensure that any arena before the cursor is full.
*/
void ArenaList::insert(ArenaHeader *a)
{
JS_ASSERT(a);
JS_ASSERT_IF(!head, cursor == &head);
a->next = *cursor;
*cursor = a;
if (!a->hasFreeThings())
cursor = &a->next;
}
template<typename T>
inline void
FinalizeTypedArenas(FreeOp *fop, ArenaLists::ArenaList *al, AllocKind thingKind)
inline bool
FinalizeTypedArenas(FreeOp *fop,
ArenaHeader **src,
ArenaList &dest,
AllocKind thingKind,
SliceBudget &budget)
{
/*
* Release empty arenas and move non-full arenas with some free things into
* a separated list that we append to al after the loop to ensure that any
* arena before al->cursor is full.
* Finalize arenas from src list, releasing empty arenas and inserting the
* others into dest in an appropriate position.
*/
JS_ASSERT_IF(!al->head, al->cursor == &al->head);
ArenaLists::ArenaList available;
ArenaHeader **ap = &al->head;
size_t thingSize = Arena::thingSize(thingKind);
while (ArenaHeader *aheader = *ap) {
while (ArenaHeader *aheader = *src) {
*src = aheader->next;
bool allClear = aheader->getArena()->finalize<T>(fop, thingKind, thingSize);
if (allClear) {
*ap = aheader->next;
if (allClear)
aheader->chunk()->releaseArena(aheader);
} else if (aheader->hasFreeThings()) {
*ap = aheader->next;
*available.cursor = aheader;
available.cursor = &aheader->next;
} else {
ap = &aheader->next;
}
else
dest.insert(aheader);
budget.step(Arena::thingsPerArena(thingSize));
if (budget.isOverBudget())
return false;
}
/* Terminate the available list and append it to al. */
*available.cursor = NULL;
*ap = available.head;
al->cursor = ap;
JS_ASSERT_IF(!al->head, al->cursor == &al->head);
return true;
}
/*
* Finalize the list. On return al->cursor points to the first non-empty arena
* after the al->head.
*/
static void
FinalizeArenas(FreeOp *fop, ArenaLists::ArenaList *al, AllocKind thingKind)
static bool
FinalizeArenas(FreeOp *fop,
ArenaHeader **src,
ArenaList &dest,
AllocKind thingKind,
SliceBudget &budget)
{
switch(thingKind) {
case FINALIZE_OBJECT0:
@ -381,34 +418,28 @@ FinalizeArenas(FreeOp *fop, ArenaLists::ArenaList *al, AllocKind thingKind)
case FINALIZE_OBJECT12_BACKGROUND:
case FINALIZE_OBJECT16:
case FINALIZE_OBJECT16_BACKGROUND:
FinalizeTypedArenas<JSObject>(fop, al, thingKind);
break;
return FinalizeTypedArenas<JSObject>(fop, src, dest, thingKind, budget);
case FINALIZE_SCRIPT:
FinalizeTypedArenas<JSScript>(fop, al, thingKind);
break;
return FinalizeTypedArenas<JSScript>(fop, src, dest, thingKind, budget);
case FINALIZE_SHAPE:
FinalizeTypedArenas<Shape>(fop, al, thingKind);
break;
return FinalizeTypedArenas<Shape>(fop, src, dest, thingKind, budget);
case FINALIZE_BASE_SHAPE:
FinalizeTypedArenas<BaseShape>(fop, al, thingKind);
break;
return FinalizeTypedArenas<BaseShape>(fop, src, dest, thingKind, budget);
case FINALIZE_TYPE_OBJECT:
FinalizeTypedArenas<types::TypeObject>(fop, al, thingKind);
break;
return FinalizeTypedArenas<types::TypeObject>(fop, src, dest, thingKind, budget);
#if JS_HAS_XML_SUPPORT
case FINALIZE_XML:
FinalizeTypedArenas<JSXML>(fop, al, thingKind);
break;
return FinalizeTypedArenas<JSXML>(fop, src, dest, thingKind, budget);
#endif
case FINALIZE_STRING:
FinalizeTypedArenas<JSString>(fop, al, thingKind);
break;
return FinalizeTypedArenas<JSString>(fop, src, dest, thingKind, budget);
case FINALIZE_SHORT_STRING:
FinalizeTypedArenas<JSShortString>(fop, al, thingKind);
break;
return FinalizeTypedArenas<JSShortString>(fop, src, dest, thingKind, budget);
case FINALIZE_EXTERNAL_STRING:
FinalizeTypedArenas<JSExternalString>(fop, al, thingKind);
break;
return FinalizeTypedArenas<JSExternalString>(fop, src, dest, thingKind, budget);
default:
JS_NOT_REACHED("Invalid alloc kind");
return true;
}
}
@ -1436,6 +1467,13 @@ ArenaLists::prepareForIncrementalGC(JSRuntime *rt)
}
}
static inline void
PushArenaAllocatedDuringSweep(JSRuntime *runtime, ArenaHeader *arena)
{
arena->setNextAllocDuringSweep(runtime->gcArenasAllocatedDuringSweep);
runtime->gcArenasAllocatedDuringSweep = arena;
}
inline void *
ArenaLists::allocateFromArena(JSCompartment *comp, AllocKind thingKind)
{
@ -1489,9 +1527,13 @@ ArenaLists::allocateFromArena(JSCompartment *comp, AllocKind thingKind)
*/
freeLists[thingKind] = aheader->getFirstFreeSpan();
aheader->setAsFullyUsed();
if (JS_UNLIKELY(comp->needsBarrier())) {
aheader->allocatedDuringIncremental = true;
comp->rt->gcMarker.delayMarkingArena(aheader);
if (JS_UNLIKELY(comp->wasGCStarted())) {
if (comp->needsBarrier()) {
aheader->allocatedDuringIncremental = true;
comp->rt->gcMarker.delayMarkingArena(aheader);
} else if (comp->isGCSweeping()) {
PushArenaAllocatedDuringSweep(comp->rt, aheader);
}
}
return freeLists[thingKind].infallibleAllocate(Arena::thingSize(thingKind));
}
@ -1518,9 +1560,13 @@ ArenaLists::allocateFromArena(JSCompartment *comp, AllocKind thingKind)
if (!aheader)
return NULL;
if (JS_UNLIKELY(comp->needsBarrier())) {
aheader->allocatedDuringIncremental = true;
comp->rt->gcMarker.delayMarkingArena(aheader);
if (JS_UNLIKELY(comp->wasGCStarted())) {
if (comp->needsBarrier()) {
aheader->allocatedDuringIncremental = true;
comp->rt->gcMarker.delayMarkingArena(aheader);
} else if (comp->isGCSweeping()) {
PushArenaAllocatedDuringSweep(comp->rt, aheader);
}
}
aheader->next = al->head;
if (!al->head) {
@ -1539,14 +1585,31 @@ ArenaLists::allocateFromArena(JSCompartment *comp, AllocKind thingKind)
void
ArenaLists::finalizeNow(FreeOp *fop, AllocKind thingKind)
{
JS_ASSERT(!fop->onBackgroundThread());
JS_ASSERT(backgroundFinalizeState[thingKind] == BFS_DONE ||
backgroundFinalizeState[thingKind] == BFS_JUST_FINISHED);
ArenaHeader *arenas = arenaLists[thingKind].head;
arenaLists[thingKind].clear();
SliceBudget budget;
FinalizeArenas(fop, &arenas, arenaLists[thingKind], thingKind, budget);
JS_ASSERT(!arenas);
}
void
ArenaLists::queueForForegroundSweep(FreeOp *fop, AllocKind thingKind)
{
JS_ASSERT(!fop->onBackgroundThread());
JS_ASSERT(backgroundFinalizeState[thingKind] == BFS_DONE);
FinalizeArenas(fop, &arenaLists[thingKind], thingKind);
JS_ASSERT(!arenaListsToSweep[thingKind]);
arenaListsToSweep[thingKind] = arenaLists[thingKind].head;
arenaLists[thingKind].clear();
}
inline void
ArenaLists::finalizeLater(FreeOp *fop, AllocKind thingKind)
ArenaLists::queueForBackgroundSweep(FreeOp *fop, AllocKind thingKind)
{
JS_ASSERT(thingKind == FINALIZE_OBJECT0_BACKGROUND ||
thingKind == FINALIZE_OBJECT2_BACKGROUND ||
@ -1585,7 +1648,7 @@ ArenaLists::finalizeLater(FreeOp *fop, AllocKind thingKind)
al->clear();
backgroundFinalizeState[thingKind] = BFS_RUN;
} else {
FinalizeArenas(fop, al, thingKind);
finalizeNow(fop, thingKind);
backgroundFinalizeState[thingKind] = BFS_DONE;
}
@ -1605,9 +1668,11 @@ ArenaLists::backgroundFinalize(FreeOp *fop, ArenaHeader *listHead)
JS_ASSERT(listHead);
AllocKind thingKind = listHead->getAllocKind();
JSCompartment *comp = listHead->compartment;
ArenaList finalized;
finalized.head = listHead;
FinalizeArenas(fop, &finalized, thingKind);
SliceBudget budget;
FinalizeArenas(fop, &listHead, finalized, thingKind, budget);
JS_ASSERT(!listHead);
/*
* After we finish the finalization al->cursor must point to the end of
@ -1641,7 +1706,7 @@ ArenaLists::backgroundFinalize(FreeOp *fop, ArenaHeader *listHead)
}
void
ArenaLists::finalizeObjects(FreeOp *fop)
ArenaLists::queueObjectsForSweep(FreeOp *fop)
{
finalizeNow(fop, FINALIZE_OBJECT0);
finalizeNow(fop, FINALIZE_OBJECT2);
@ -1650,12 +1715,12 @@ ArenaLists::finalizeObjects(FreeOp *fop)
finalizeNow(fop, FINALIZE_OBJECT12);
finalizeNow(fop, FINALIZE_OBJECT16);
finalizeLater(fop, FINALIZE_OBJECT0_BACKGROUND);
finalizeLater(fop, FINALIZE_OBJECT2_BACKGROUND);
finalizeLater(fop, FINALIZE_OBJECT4_BACKGROUND);
finalizeLater(fop, FINALIZE_OBJECT8_BACKGROUND);
finalizeLater(fop, FINALIZE_OBJECT12_BACKGROUND);
finalizeLater(fop, FINALIZE_OBJECT16_BACKGROUND);
queueForBackgroundSweep(fop, FINALIZE_OBJECT0_BACKGROUND);
queueForBackgroundSweep(fop, FINALIZE_OBJECT2_BACKGROUND);
queueForBackgroundSweep(fop, FINALIZE_OBJECT4_BACKGROUND);
queueForBackgroundSweep(fop, FINALIZE_OBJECT8_BACKGROUND);
queueForBackgroundSweep(fop, FINALIZE_OBJECT12_BACKGROUND);
queueForBackgroundSweep(fop, FINALIZE_OBJECT16_BACKGROUND);
#if JS_HAS_XML_SUPPORT
finalizeNow(fop, FINALIZE_XML);
@ -1663,26 +1728,26 @@ ArenaLists::finalizeObjects(FreeOp *fop)
}
void
ArenaLists::finalizeStrings(FreeOp *fop)
ArenaLists::queueStringsForSweep(FreeOp *fop)
{
finalizeLater(fop, FINALIZE_SHORT_STRING);
finalizeLater(fop, FINALIZE_STRING);
queueForBackgroundSweep(fop, FINALIZE_SHORT_STRING);
queueForBackgroundSweep(fop, FINALIZE_STRING);
finalizeNow(fop, FINALIZE_EXTERNAL_STRING);
}
void
ArenaLists::finalizeShapes(FreeOp *fop)
ArenaLists::queueScriptsForSweep(FreeOp *fop)
{
finalizeNow(fop, FINALIZE_SHAPE);
finalizeNow(fop, FINALIZE_BASE_SHAPE);
finalizeNow(fop, FINALIZE_TYPE_OBJECT);
finalizeNow(fop, FINALIZE_SCRIPT);
}
void
ArenaLists::finalizeScripts(FreeOp *fop)
ArenaLists::queueShapesForSweep(FreeOp *fop)
{
finalizeNow(fop, FINALIZE_SCRIPT);
queueForForegroundSweep(fop, FINALIZE_SHAPE);
queueForForegroundSweep(fop, FINALIZE_BASE_SHAPE);
queueForForegroundSweep(fop, FINALIZE_TYPE_OBJECT);
}
static void
@ -1923,7 +1988,7 @@ GCMarker::reset()
JS_ASSERT(aheader->hasDelayedMarking);
JS_ASSERT(markLaterArenas);
unmarkedArenaStackTop = aheader->getNextDelayedMarking();
aheader->hasDelayedMarking = 0;
aheader->unsetDelayedMarking();
aheader->markOverflow = 0;
aheader->allocatedDuringIncremental = 0;
markLaterArenas--;
@ -2006,7 +2071,7 @@ GCMarker::markDelayedChildren(SliceBudget &budget)
JS_ASSERT(aheader->hasDelayedMarking);
JS_ASSERT(markLaterArenas);
unmarkedArenaStackTop = aheader->getNextDelayedMarking();
aheader->hasDelayedMarking = 0;
aheader->unsetDelayedMarking();
markLaterArenas--;
markDelayedChildren(aheader);
@ -3042,9 +3107,11 @@ ReleaseObservedTypes(JSRuntime *rt)
}
static void
SweepCompartments(FreeOp *fop, JSGCInvocationKind gckind)
SweepCompartments(FreeOp *fop, gcreason::Reason gcReason)
{
JSRuntime *rt = fop->runtime();
JS_ASSERT_IF(gcReason == gcreason::LAST_CONTEXT, !rt->hasContexts());
JSDestroyCompartmentCallback callback = rt->destroyCompartmentCallback;
/* Skip the atomsCompartment. */
@ -3058,7 +3125,7 @@ SweepCompartments(FreeOp *fop, JSGCInvocationKind gckind)
JSCompartment *compartment = *read++;
if (!compartment->hold && compartment->isCollecting() &&
(compartment->arenas.arenaListsAreEmpty() || !rt->hasContexts()))
(compartment->arenas.arenaListsAreEmpty() || gcReason == gcreason::LAST_CONTEXT))
{
compartment->arenas.checkEmptyFreeLists();
if (callback)
@ -3133,7 +3200,13 @@ BeginMarkPhase(JSRuntime *rt, bool isIncremental)
rt->gcIsFull = true;
for (CompartmentsIter c(rt); !c.done(); c.next()) {
if (!c->isCollecting())
JS_ASSERT(!c->wasGCStarted());
for (unsigned i = 0 ; i < FINALIZE_LIMIT ; ++i)
JS_ASSERT(!c->arenas.arenaListsToSweep[i]);
if (c->isCollecting())
c->setGCStarted(true);
else
rt->gcIsFull = false;
c->setPreservingCode(ShouldPreserveJITCode(c, currentTime));
@ -3384,7 +3457,7 @@ ValidateIncrementalMarking(JSRuntime *rt)
#endif
static void
SweepPhase(JSRuntime *rt, JSGCInvocationKind gckind, bool *startBackgroundSweep)
BeginSweepPhase(JSRuntime *rt)
{
/*
* Sweep phase.
@ -3412,13 +3485,14 @@ SweepPhase(JSRuntime *rt, JSGCInvocationKind gckind, bool *startBackgroundSweep)
isFull = false;
}
*startBackgroundSweep = (rt->hasContexts() && rt->gcHelperThread.prepareForBackgroundSweep());
rt->gcSweepOnBackgroundThread =
(rt->hasContexts() && rt->gcHelperThread.prepareForBackgroundSweep());
/* Purge the ArenaLists before sweeping. */
for (GCCompartmentsIter c(rt); !c.done(); c.next())
c->arenas.purge();
FreeOp fop(rt, *startBackgroundSweep, false);
FreeOp fop(rt, rt->gcSweepOnBackgroundThread, false);
{
gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_FINALIZE_START);
@ -3453,37 +3527,82 @@ SweepPhase(JSRuntime *rt, JSGCInvocationKind gckind, bool *startBackgroundSweep)
}
}
{
gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP_OBJECT);
/*
* Queue all GC things in all compartments for sweeping, either in the
* foreground or on the background thread.
*
* Note that order is important here for the background case.
*
* Objects are finalized immediately but this may change in the future.
*/
for (GCCompartmentsIter c(rt); !c.done(); c.next())
c->arenas.queueObjectsForSweep(&fop);
for (GCCompartmentsIter c(rt); !c.done(); c.next())
c->arenas.queueStringsForSweep(&fop);
for (GCCompartmentsIter c(rt); !c.done(); c.next())
c->arenas.queueScriptsForSweep(&fop);
for (GCCompartmentsIter c(rt); !c.done(); c.next())
c->arenas.queueShapesForSweep(&fop);
/*
* We finalize objects before other GC things to ensure that the object's
* finalizer can access the other things even if they will be freed.
*/
for (GCCompartmentsIter c(rt); !c.done(); c.next())
c->arenas.finalizeObjects(&fop);
}
rt->gcSweepPhase = 0;
rt->gcSweepCompartmentIndex = 0;
rt->gcSweepKindIndex = 0;
{
gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP_STRING);
for (GCCompartmentsIter c(rt); !c.done(); c.next())
c->arenas.finalizeStrings(&fop);
gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_FINALIZE_END);
if (rt->gcFinalizeCallback)
rt->gcFinalizeCallback(&fop, JSFINALIZE_END, !rt->gcIsFull);
}
}
bool
ArenaLists::foregroundFinalize(FreeOp *fop, AllocKind thingKind, SliceBudget &sliceBudget)
{
if (!arenaListsToSweep[thingKind])
return true;
ArenaList &dest = arenaLists[thingKind];
return FinalizeArenas(fop, &arenaListsToSweep[thingKind], dest, thingKind, sliceBudget);
}
static bool
SweepPhase(JSRuntime *rt, SliceBudget &sliceBudget)
{
gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP);
FreeOp fop(rt, rt->gcSweepOnBackgroundThread, false);
for (; rt->gcSweepPhase < FinalizePhaseCount ; ++rt->gcSweepPhase) {
gcstats::AutoPhase ap(rt->gcStats, FinalizePhaseStatsPhase[rt->gcSweepPhase]);
ptrdiff_t len = rt->compartments.end() - rt->compartments.begin();
for (; rt->gcSweepCompartmentIndex < len ; ++rt->gcSweepCompartmentIndex) {
JSCompartment *c = rt->compartments.begin()[rt->gcSweepCompartmentIndex];
if (c->wasGCStarted()) {
while (rt->gcSweepKindIndex < FinalizePhaseLength[rt->gcSweepPhase]) {
AllocKind kind = FinalizePhases[rt->gcSweepPhase][rt->gcSweepKindIndex];
if (!c->arenas.foregroundFinalize(&fop, kind, sliceBudget))
return false;
++rt->gcSweepKindIndex;
}
}
rt->gcSweepKindIndex = 0;
}
rt->gcSweepCompartmentIndex = 0;
}
{
gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP_SCRIPT);
for (GCCompartmentsIter c(rt); !c.done(); c.next())
c->arenas.finalizeScripts(&fop);
}
return true;
}
{
gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP_SHAPE);
for (GCCompartmentsIter c(rt); !c.done(); c.next())
c->arenas.finalizeShapes(&fop);
}
static void
EndSweepPhase(JSRuntime *rt, JSGCInvocationKind gckind, gcreason::Reason gcReason)
{
gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP);
FreeOp fop(rt, rt->gcSweepOnBackgroundThread, false);
#ifdef DEBUG
PropertyTree::dumpShapes(rt);
PropertyTree::dumpShapes(rt);
#endif
{
@ -3504,7 +3623,7 @@ SweepPhase(JSRuntime *rt, JSGCInvocationKind gckind, bool *startBackgroundSweep)
* This removes compartments from rt->compartment, so we do it last to make
* sure we don't miss sweeping any compartments.
*/
SweepCompartments(&fop, gckind);
SweepCompartments(&fop, gcReason);
#ifndef JS_THREADSAFE
/*
@ -3516,14 +3635,24 @@ SweepPhase(JSRuntime *rt, JSGCInvocationKind gckind, bool *startBackgroundSweep)
#endif
}
{
gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_FINALIZE_END);
if (rt->gcFinalizeCallback)
rt->gcFinalizeCallback(&fop, JSFINALIZE_END, !isFull);
/*
* Reset the list of arenas marked as being allocated during sweep phase.
*/
while (ArenaHeader *arena = rt->gcArenasAllocatedDuringSweep) {
rt->gcArenasAllocatedDuringSweep = arena->getNextAllocDuringSweep();
arena->unsetAllocDuringSweep();
}
for (CompartmentsIter c(rt); !c.done(); c.next())
for (CompartmentsIter c(rt); !c.done(); c.next()) {
c->setGCLastBytes(c->gcBytes, c->gcMallocAndFreeBytes, gckind);
if (c->wasGCStarted())
c->setGCStarted(false);
JS_ASSERT(!c->wasGCStarted());
for (unsigned i = 0 ; i < FINALIZE_LIMIT ; ++i)
JS_ASSERT(!c->arenas.arenaListsToSweep[i]);
}
rt->gcLastGCTime = PRMJ_Now();
}
@ -3608,17 +3737,38 @@ AutoGCSession::~AutoGCSession()
runtime->resetGCMallocBytes();
}
static void
IncrementalCollectSlice(JSRuntime *rt,
int64_t budget,
gcreason::Reason gcReason,
JSGCInvocationKind gcKind);
static void
ResetIncrementalGC(JSRuntime *rt, const char *reason)
{
if (rt->gcIncrementalState == NO_INCREMENTAL)
return;
for (CompartmentsIter c(rt); !c.done(); c.next())
if (rt->gcIncrementalState == SWEEP) {
/* If we've finished marking then sweep to completion here. */
IncrementalCollectSlice(rt, SliceBudget::Unlimited, gcreason::RESET, GC_NORMAL);
gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_WAIT_BACKGROUND_THREAD);
rt->gcHelperThread.waitBackgroundSweepOrAllocEnd();
return;
}
JS_ASSERT(rt->gcIncrementalState == MARK);
for (CompartmentsIter c(rt); !c.done(); c.next()) {
c->setNeedsBarrier(false);
c->setGCStarted(false);
for (unsigned i = 0 ; i < FINALIZE_LIMIT ; ++i)
JS_ASSERT(!c->arenas.arenaListsToSweep[i]);
}
rt->gcMarker.reset();
rt->gcMarker.stop();
rt->gcIncrementalState = NO_INCREMENTAL;
JS_ASSERT(!rt->gcStrictCompartmentChecking);
@ -3648,10 +3798,12 @@ AutoGCSlice::AutoGCSlice(JSRuntime *rt)
for (GCCompartmentsIter c(rt); !c.done(); c.next()) {
/* Clear this early so we don't do any write barriers during GC. */
if (rt->gcIncrementalState == MARK)
if (rt->gcIncrementalState == MARK) {
JS_ASSERT(c->needsBarrier());
c->setNeedsBarrier(false);
else
} else {
JS_ASSERT(!c->needsBarrier());
}
}
}
@ -3662,7 +3814,8 @@ AutoGCSlice::~AutoGCSlice()
c->setNeedsBarrier(true);
c->arenas.prepareForIncrementalGC(runtime);
} else {
JS_ASSERT(runtime->gcIncrementalState == NO_INCREMENTAL);
JS_ASSERT(runtime->gcIncrementalState == NO_INCREMENTAL ||
runtime->gcIncrementalState == SWEEP);
c->setNeedsBarrier(false);
}
}
@ -3685,20 +3838,49 @@ class AutoCopyFreeListToArenas {
};
static void
IncrementalMarkSlice(JSRuntime *rt, int64_t budget, gcreason::Reason reason, bool *shouldSweep)
PushZealSelectedObjects(JSRuntime *rt)
{
#ifdef JS_GC_ZEAL
/* Push selected objects onto the mark stack and clear the list. */
for (JSObject **obj = rt->gcSelectedForMarking.begin();
obj != rt->gcSelectedForMarking.end(); obj++)
{
MarkObjectUnbarriered(&rt->gcMarker, obj, "selected obj");
}
#endif
}
static bool
DrainMarkStack(JSRuntime *rt, SliceBudget &sliceBudget)
{
/* Run a marking slice and return whether the stack is now empty. */
gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_MARK);
return rt->gcMarker.drainMarkStack(sliceBudget);
}
static void
IncrementalCollectSlice(JSRuntime *rt,
int64_t budget,
gcreason::Reason reason,
JSGCInvocationKind gckind)
{
AutoCopyFreeListToArenas copy(rt);
AutoGCSlice slice(rt);
gc::State initialState = rt->gcIncrementalState;
*shouldSweep = false;
SliceBudget sliceBudget(budget);
int zeal = 0;
#ifdef JS_GC_ZEAL
if (reason == gcreason::DEBUG_GC) {
// Do the collection type specified by zeal mode only if the collection
// was triggered by RunDebugGC().
/*
* Do the collection type specified by zeal mode only if the collection
* was triggered by RunDebugGC().
*/
zeal = rt->gcZeal();
JS_ASSERT_IF(zeal == ZealIncrementalMarkAllThenFinish ||
zeal == ZealIncrementalRootsThenFinish,
budget == SliceBudget::Unlimited);
}
#endif
@ -3712,51 +3894,88 @@ IncrementalMarkSlice(JSRuntime *rt, int64_t budget, gcreason::Reason reason, boo
rt->gcLastMarkSlice = false;
}
if (rt->gcIncrementalState == MARK_ROOTS) {
switch (rt->gcIncrementalState) {
case MARK_ROOTS:
BeginMarkPhase(rt, isIncremental);
PushZealSelectedObjects(rt);
rt->gcIncrementalState = MARK;
if (zeal == ZealIncrementalRootsThenFinish)
return;
}
break;
if (rt->gcIncrementalState == MARK) {
SliceBudget sliceBudget(budget);
/* fall through */
case MARK: {
/* If we needed delayed marking for gray roots, then collect until done. */
if (!rt->gcMarker.hasBufferedGrayRoots())
sliceBudget.reset();
#ifdef JS_GC_ZEAL
if (!rt->gcSelectedForMarking.empty()) {
for (JSObject **obj = rt->gcSelectedForMarking.begin();
obj != rt->gcSelectedForMarking.end(); obj++)
{
MarkObjectUnbarriered(&rt->gcMarker, obj, "selected obj");
}
bool finished = DrainMarkStack(rt, sliceBudget);
if (!finished)
break;
JS_ASSERT(rt->gcMarker.isDrained());
if (!rt->gcLastMarkSlice &&
((initialState == MARK && budget != SliceBudget::Unlimited) ||
zeal == ZealIncrementalMarkAllThenFinish))
{
/*
* Yield with the aim of starting the sweep in the next
* slice. We will need to mark anything new on the stack
* when we resume, so we stay in MARK state.
*/
rt->gcLastMarkSlice = true;
break;
}
EndMarkPhase(rt, isIncremental);
rt->gcIncrementalState = SWEEP;
/*
* This runs to completion, but we don't continue if the budget is
* now exhasted.
*/
BeginSweepPhase(rt);
if (sliceBudget.isOverBudget())
break;
/*
* Always yield here when running in incremental multi-slice zeal
* mode, so RunDebugGC can reset the slice buget.
*/
if (budget != SliceBudget::Unlimited && zeal == ZealIncrementalMultipleSlices)
break;
/* fall through */
}
case SWEEP: {
#ifdef DEBUG
for (CompartmentsIter c(rt); !c.done(); c.next())
JS_ASSERT(!c->needsBarrier());
#endif
bool finished;
{
gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_MARK);
finished = rt->gcMarker.drainMarkStack(sliceBudget);
}
if (finished) {
JS_ASSERT(rt->gcMarker.isDrained());
bool finished = SweepPhase(rt, sliceBudget);
if (!finished)
break;
if (!rt->gcLastMarkSlice &&
((initialState == MARK && budget != SliceBudget::Unlimited) ||
zeal == ZealIncrementalMarkAllThenFinish))
{
rt->gcLastMarkSlice = true;
} else {
EndMarkPhase(rt, isIncremental);
rt->gcIncrementalState = NO_INCREMENTAL;
*shouldSweep = true;
}
}
}
EndSweepPhase(rt, gckind, reason);
if (rt->gcSweepOnBackgroundThread)
rt->gcHelperThread.startBackgroundSweep(gckind == GC_SHRINK);
rt->gcIncrementalState = NO_INCREMENTAL;
break;
}
default:
JS_ASSERT(false);
}
}
class IncrementalSafety
@ -3834,8 +4053,10 @@ BudgetIncrementalGC(JSRuntime *rt, int64_t *budget)
rt->gcStats.nonincremental("malloc bytes trigger");
}
if (c->isCollecting() != c->needsBarrier())
if (rt->gcIncrementalState != NO_INCREMENTAL &&
c->isCollecting() != c->wasGCStarted()) {
reset = true;
}
}
if (reset)
@ -3877,7 +4098,6 @@ GCCycle(JSRuntime *rt, bool incremental, int64_t budget, JSGCInvocationKind gcki
rt->gcHelperThread.waitBackgroundSweepOrAllocEnd();
}
bool startBackgroundSweep = false;
{
if (!incremental) {
/* If non-incremental GC was requested, reset incremental GC. */
@ -3888,23 +4108,8 @@ GCCycle(JSRuntime *rt, bool incremental, int64_t budget, JSGCInvocationKind gcki
BudgetIncrementalGC(rt, &budget);
}
AutoCopyFreeListToArenas copy(rt);
bool shouldSweep;
IncrementalMarkSlice(rt, budget, reason, &shouldSweep);
#ifdef DEBUG
if (rt->gcIncrementalState == NO_INCREMENTAL) {
for (CompartmentsIter c(rt); !c.done(); c.next())
JS_ASSERT(!c->needsBarrier());
}
#endif
if (shouldSweep)
SweepPhase(rt, gckind, &startBackgroundSweep);
IncrementalCollectSlice(rt, budget, reason, gckind);
}
if (startBackgroundSweep)
rt->gcHelperThread.startBackgroundSweep(gckind == GC_SHRINK);
}
#ifdef JS_GC_ZEAL
@ -4270,20 +4475,34 @@ RunDebugGC(JSContext *cx)
type == ZealIncrementalMarkAllThenFinish ||
type == ZealIncrementalMultipleSlices)
{
js::gc::State initialState = rt->gcIncrementalState;
int64_t budget;
if (type == ZealIncrementalMultipleSlices) {
// Start with a small slice limit and double it every slice. This ensure that we get
// multiple slices, and collection runs to completion.
if (rt->gcIncrementalState == NO_INCREMENTAL)
/*
* Start with a small slice limit and double it every slice. This
* ensure that we get multiple slices, and collection runs to
* completion.
*/
if (initialState == NO_INCREMENTAL)
rt->gcIncrementalLimit = rt->gcZealFrequency / 2;
else
rt->gcIncrementalLimit *= 2;
budget = SliceBudget::WorkBudget(rt->gcIncrementalLimit);
} else {
// This triggers incremental GC but is actually ignored by IncrementalMarkSlice.
budget = SliceBudget::Unlimited;
}
Collect(rt, true, budget, GC_NORMAL, gcreason::DEBUG_GC);
/*
* For multi-slice zeal, reset the slice size when we get to the sweep
* phase.
*/
if (type == ZealIncrementalMultipleSlices &&
initialState == MARK && rt->gcIncrementalState == SWEEP)
{
rt->gcIncrementalLimit = rt->gcZealFrequency / 2;
}
} else {
Collect(rt, false, SliceBudget::Unlimited, GC_NORMAL, gcreason::DEBUG_GC);
}

View File

@ -41,6 +41,7 @@ namespace js {
class GCHelperThread;
struct Shape;
struct SliceBudget;
namespace gc {
@ -48,6 +49,7 @@ enum State {
NO_INCREMENTAL,
MARK_ROOTS,
MARK,
SWEEP,
INVALID
};
@ -146,33 +148,35 @@ IsNurseryAllocable(AllocKind kind)
inline JSGCTraceKind
GetGCThingTraceKind(const void *thing);
/*
* ArenaList::head points to the start of the list. Normally cursor points
* to the first arena in the list with some free things and all arenas
* before cursor are fully allocated. However, as the arena currently being
* allocated from is considered full while its list of free spans is moved
* into the freeList, during the GC or cell enumeration, when an
* unallocated freeList is moved back to the arena, we can see an arena
* with some free cells before the cursor. The cursor is an indirect
* pointer to allow for efficient list insertion at the cursor point and
* other list manipulations.
*/
struct ArenaList {
ArenaHeader *head;
ArenaHeader **cursor;
ArenaList() {
clear();
}
void clear() {
head = NULL;
cursor = &head;
}
void insert(ArenaHeader *arena);
};
struct ArenaLists {
/*
* ArenaList::head points to the start of the list. Normally cursor points
* to the first arena in the list with some free things and all arenas
* before cursor are fully allocated. However, as the arena currently being
* allocated from is considered full while its list of free spans is moved
* into the freeList, during the GC or cell enumeration, when an
* unallocated freeList is moved back to the arena, we can see an arena
* with some free cells before the cursor. The cursor is an indirect
* pointer to allow for efficient list insertion at the cursor point and
* other list manipulations.
*/
struct ArenaList {
ArenaHeader *head;
ArenaHeader **cursor;
ArenaList() {
clear();
}
void clear() {
head = NULL;
cursor = &head;
}
};
private:
/*
* For each arena kind its free list is represented as the first span with
@ -211,12 +215,18 @@ struct ArenaLists {
volatile uintptr_t backgroundFinalizeState[FINALIZE_LIMIT];
public:
/* For each arena kind, a list of arenas remaining to be swept. */
ArenaHeader *arenaListsToSweep[FINALIZE_LIMIT];
public:
ArenaLists() {
for (size_t i = 0; i != FINALIZE_LIMIT; ++i)
freeLists[i].initAsEmpty();
for (size_t i = 0; i != FINALIZE_LIMIT; ++i)
backgroundFinalizeState[i] = BFS_DONE;
for (size_t i = 0; i != FINALIZE_LIMIT; ++i)
arenaListsToSweep[i] = NULL;
}
~ArenaLists() {
@ -256,6 +266,10 @@ struct ArenaLists {
return true;
}
bool arenasAreFull(AllocKind thingKind) const {
return !*arenaLists[thingKind].cursor;
}
void unmarkAll() {
for (size_t i = 0; i != FINALIZE_LIMIT; ++i) {
/* The background finalization must have stopped at this point. */
@ -364,16 +378,18 @@ struct ArenaLists {
JS_ASSERT(freeLists[kind].isEmpty());
}
void finalizeObjects(FreeOp *fop);
void finalizeStrings(FreeOp *fop);
void finalizeShapes(FreeOp *fop);
void finalizeScripts(FreeOp *fop);
void queueObjectsForSweep(FreeOp *fop);
void queueStringsForSweep(FreeOp *fop);
void queueShapesForSweep(FreeOp *fop);
void queueScriptsForSweep(FreeOp *fop);
bool foregroundFinalize(FreeOp *fop, AllocKind thingKind, SliceBudget &sliceBudget);
static void backgroundFinalize(FreeOp *fop, ArenaHeader *listHead);
private:
inline void finalizeNow(FreeOp *fop, AllocKind thingKind);
inline void finalizeLater(FreeOp *fop, AllocKind thingKind);
inline void queueForForegroundSweep(FreeOp *fop, AllocKind thingKind);
inline void queueForBackgroundSweep(FreeOp *fop, AllocKind thingKind);
inline void *allocateFromArena(JSCompartment *comp, AllocKind thingKind);
};

View File

@ -418,7 +418,7 @@ NewGCThing(JSContext *cx, js::gc::AllocKind kind, size_t thingSize)
if (!t)
t = js::gc::ArenaLists::refillFreeList(cx, kind);
JS_ASSERT_IF(t && comp->needsBarrier(),
JS_ASSERT_IF(t && comp->wasGCStarted() && comp->needsBarrier(),
static_cast<T *>(t)->arenaHeader()->allocatedDuringIncremental);
#if defined(JSGC_GENERATIONAL) && defined(JS_GC_ZEAL)
@ -445,7 +445,7 @@ TryNewGCThing(JSContext *cx, js::gc::AllocKind kind, size_t thingSize)
#endif
void *t = cx->compartment->arenas.allocateFromFreeList(kind, thingSize);
JS_ASSERT_IF(t && cx->compartment->needsBarrier(),
JS_ASSERT_IF(t && cx->compartment->wasGCStarted() && cx->compartment->needsBarrier(),
static_cast<T *>(t)->arenaHeader()->allocatedDuringIncremental);
#if defined(JSGC_GENERATIONAL) && defined(JS_GC_ZEAL)

View File

@ -102,12 +102,14 @@ void
Shape::removeChild(Shape *child)
{
JS_ASSERT(!child->inDictionary());
JS_ASSERT(child->parent == this);
KidsPointer *kidp = &kids;
if (kidp->isShape()) {
JS_ASSERT(kidp->toShape() == child);
kidp->setNull();
child->parent = NULL;
return;
}
@ -115,6 +117,7 @@ Shape::removeChild(Shape *child)
JS_ASSERT(hash->count() >= 2); /* otherwise kidp->isShape() should be true */
hash->remove(child);
child->parent = NULL;
if (hash->count() == 1) {
/* Convert from HASH form back to SHAPE form. */
@ -126,27 +129,10 @@ Shape::removeChild(Shape *child)
}
}
/*
* We need a read barrier for the shape tree, since these are weak pointers.
*/
static Shape *
ReadBarrier(Shape *shape)
{
#ifdef JSGC_INCREMENTAL
JSCompartment *comp = shape->compartment();
if (comp->needsBarrier()) {
Shape *tmp = shape;
MarkShapeUnbarriered(comp->barrierTracer(), &tmp, "read barrier");
JS_ASSERT(tmp == shape);
}
#endif
return shape;
}
Shape *
PropertyTree::getChild(JSContext *cx, Shape *parent_, uint32_t nfixed, const StackShape &child)
{
Shape *shape;
Shape *shape = NULL;
JS_ASSERT(parent_);
@ -160,17 +146,43 @@ PropertyTree::getChild(JSContext *cx, Shape *parent_, uint32_t nfixed, const Sta
*/
KidsPointer *kidp = &parent_->kids;
if (kidp->isShape()) {
shape = kidp->toShape();
if (shape->matches(child))
return ReadBarrier(shape);
Shape *kid = kidp->toShape();
if (kid->matches(child))
shape = kid;
} else if (kidp->isHash()) {
shape = *kidp->toHash()->lookup(child);
if (shape)
return ReadBarrier(shape);
} else {
/* If kidp->isNull(), we always insert. */
}
#ifdef JSGC_INCREMENTAL
if (shape) {
JSCompartment *comp = shape->compartment();
if (comp->needsBarrier()) {
/*
* We need a read barrier for the shape tree, since these are weak
* pointers.
*/
Shape *tmp = shape;
MarkShapeUnbarriered(comp->barrierTracer(), &tmp, "read barrier");
JS_ASSERT(tmp == shape);
} else if (comp->isGCSweeping() && !shape->isMarked() &&
!shape->arenaHeader()->allocatedDuringIncremental)
{
/*
* The shape we've found is unreachable and due to be finalized, so
* remove our weak reference to it and don't use it.
*/
JS_ASSERT(parent_->isMarked());
parent_->removeChild(shape);
shape = NULL;
}
}
#endif
if (shape)
return shape;
StackShape::AutoRooter childRoot(cx, &child);
RootedShape parent(cx, parent_);
@ -190,6 +202,11 @@ void
Shape::finalize(FreeOp *fop)
{
if (!inDictionary()) {
/*
* Note that due to incremental sweeping, if !parent->isMarked() then
* the parent may point to a new shape allocated in the same cell that
* use to hold our parent.
*/
if (parent && parent->isMarked())
parent->removeChild(this);

View File

@ -1201,7 +1201,7 @@ ScriptSource::createFromSource(JSContext *cx, const jschar *src, uint32_t length
* accessed even if the name was already in the table. At this point old
* scripts pointing to the source may no longer be reachable.
*/
if (cx->runtime->gcIncrementalState == MARK && cx->runtime->gcIsFull)
if (cx->runtime->gcIncrementalState != NO_INCREMENTAL && cx->runtime->gcIsFull)
ss->marked = true;
#endif
@ -1341,7 +1341,7 @@ ScriptSource::performXDR(XDRState<mode> *xdr, ScriptSource **ssp)
cleanup.protect(ss);
#ifdef JSGC_INCREMENTAL
// See comment in ScriptSource::createFromSource.
if (xdr->cx()->runtime->gcIncrementalState == MARK &&
if (xdr->cx()->runtime->gcIncrementalState != NO_INCREMENTAL &&
xdr->cx()->runtime->gcIsFull)
ss->marked = true;
#endif
@ -1409,7 +1409,7 @@ js::SaveScriptFilename(JSContext *cx, const char *filename)
* scripts or exceptions pointing to the filename may no longer be
* reachable.
*/
if (rt->gcIncrementalState == MARK && rt->gcIsFull)
if (rt->gcIncrementalState != NO_INCREMENTAL && rt->gcIsFull)
sfe->marked = true;
#endif