Bug 1196847 - Part 1: Allow storage of a unique id for a cell independent of address; r=jonco

* * *
imported patch rewrite_uid_on_ht_with_zone_sweeping
This commit is contained in:
Terrence Cole 2015-08-20 10:35:22 -07:00
parent 668f5b5881
commit 9b70577d6b
14 changed files with 346 additions and 28 deletions

View File

@ -653,8 +653,6 @@ class HashMapEntry
template <class> friend class detail::HashTableEntry;
template <class, class, class, class> friend class HashMap;
Key & mutableKey() { return key_; }
public:
template<typename KeyInput, typename ValueInput>
HashMapEntry(KeyInput&& k, ValueInput&& v)
@ -670,9 +668,10 @@ class HashMapEntry
typedef Key KeyType;
typedef Value ValueType;
const Key & key() const { return key_; }
const Value & value() const { return value_; }
Value & value() { return value_; }
const Key& key() const { return key_; }
Key& mutableKey() { return key_; }
const Value& value() const { return value_; }
Value& value() { return value_; }
private:
HashMapEntry(const HashMapEntry&) = delete;

View File

@ -54,7 +54,8 @@ const size_t ChunkMarkBitmapOffset = 1032352;
const size_t ChunkMarkBitmapBits = 129024;
#endif
const size_t ChunkRuntimeOffset = ChunkSize - sizeof(void*);
const size_t ChunkLocationOffset = ChunkSize - 2 * sizeof(void*) - sizeof(uint64_t);
const size_t ChunkTrailerSize = 2 * sizeof(uintptr_t) + sizeof(uint64_t);
const size_t ChunkLocationOffset = ChunkSize - ChunkTrailerSize;
const size_t ArenaZoneOffset = 0;
/*

View File

@ -655,6 +655,11 @@ class GCRuntime
size_t maxMallocBytesAllocated() { return maxMallocBytes; }
uint64_t nextCellUniqueId() {
MOZ_ASSERT(nextCellUniqueId_ > 0);
return nextCellUniqueId_++;
}
public:
// Internal public interface
js::gc::State state() const { return incrementalState; }
@ -1013,6 +1018,9 @@ class GCRuntime
size_t maxMallocBytes;
// An incrementing id used to assign unique ids to cells that require one.
uint64_t nextCellUniqueId_;
/*
* Number of the committed arenas in all GC chunks including empty chunks.
*/

View File

@ -294,6 +294,9 @@ class TenuredCell : public Cell
#endif
};
/* Cells are aligned to CellShift, so the largest tagged null pointer is: */
const uintptr_t LargestTaggedNullCellPointer = (1 << CellShift) - 1;
/*
* The mark bitmap has one bit per each GC cell. For multi-cell GC things this
* wastes space but allows to avoid expensive devisions by thing's size when
@ -806,6 +809,17 @@ ArenaHeader::getThingSize() const
*/
struct ChunkTrailer
{
/* Construct a Nursery ChunkTrailer. */
ChunkTrailer(JSRuntime* rt, StoreBuffer* sb)
: location(gc::ChunkLocationBitNursery), storeBuffer(sb), runtime(rt)
{}
/* Construct a Tenured heap ChunkTrailer. */
explicit ChunkTrailer(JSRuntime* rt)
: location(gc::ChunkLocationBitTenuredHeap), storeBuffer(nullptr), runtime(rt)
{}
public:
/* The index the chunk in the nursery, or LocationTenuredHeap. */
uint32_t location;
uint32_t padding;
@ -813,11 +827,12 @@ struct ChunkTrailer
/* The store buffer for writes to things in this chunk or nullptr. */
StoreBuffer* storeBuffer;
/* This provides quick access to the runtime from absolutely anywhere. */
JSRuntime* runtime;
};
static_assert(sizeof(ChunkTrailer) == 2 * sizeof(uintptr_t) + sizeof(uint64_t),
"ChunkTrailer size is incorrect.");
static_assert(sizeof(ChunkTrailer) == ChunkTrailerSize,
"ChunkTrailer size must match the API defined size.");
/* The chunk header (located at the end of the chunk to preserve arena alignment). */
struct ChunkInfo
@ -1006,13 +1021,16 @@ struct Chunk
return reinterpret_cast<Chunk*>(addr);
}
static bool withinArenasRange(uintptr_t addr) {
static bool withinValidRange(uintptr_t addr) {
uintptr_t offset = addr & ChunkMask;
return offset < ArenasPerChunk * ArenaSize;
return Chunk::fromAddress(addr)->isNurseryChunk()
? offset < ChunkSize - sizeof(ChunkTrailer)
: offset < ArenasPerChunk * ArenaSize;
}
static size_t arenaIndex(uintptr_t addr) {
MOZ_ASSERT(withinArenasRange(addr));
MOZ_ASSERT(!Chunk::fromAddress(addr)->isNurseryChunk());
MOZ_ASSERT(withinValidRange(addr));
return (addr & ChunkMask) >> ArenaShift;
}
@ -1030,6 +1048,10 @@ struct Chunk
return info.numArenasFree != 0;
}
bool isNurseryChunk() const {
return info.trailer.storeBuffer;
}
ArenaHeader* allocateArena(JSRuntime* rt, JS::Zone* zone, AllocKind kind,
const AutoLockGC& lock);
@ -1129,7 +1151,7 @@ ArenaHeader::address() const
uintptr_t addr = reinterpret_cast<uintptr_t>(this);
MOZ_ASSERT(addr);
MOZ_ASSERT(!(addr & ArenaMask));
MOZ_ASSERT(Chunk::withinArenasRange(addr));
MOZ_ASSERT(Chunk::withinValidRange(addr));
return addr;
}
@ -1298,7 +1320,7 @@ Cell::address() const
{
uintptr_t addr = uintptr_t(this);
MOZ_ASSERT(addr % CellSize == 0);
MOZ_ASSERT(Chunk::withinArenasRange(addr));
MOZ_ASSERT(Chunk::withinValidRange(addr));
return addr;
}

View File

@ -2117,7 +2117,13 @@ js::TenuringTracer::moveObjectToTenured(JSObject* dst, JSObject* src, AllocKind
if (src->is<ArrayObject>())
tenuredSize = srcSize = sizeof(NativeObject);
// Copy the Cell contents.
js_memcpy(dst, src, srcSize);
// Move any hash code attached to the object.
src->zone()->transferUniqueId(dst, src);
// Move the slots and elements, if we need to.
if (src->isNative()) {
NativeObject* ndst = &dst->as<NativeObject>();
NativeObject* nsrc = &src->as<NativeObject>();

View File

@ -425,7 +425,7 @@ ToMarkable(Cell* cell)
MOZ_ALWAYS_INLINE bool
IsNullTaggedPointer(void* p)
{
return uintptr_t(p) < 32;
return uintptr_t(p) <= LargestTaggedNullCellPointer;
}
// HashKeyRef represents a reference to a HashMap key. This should normally

View File

@ -67,6 +67,9 @@ js::Nursery::init(uint32_t maxNurseryBytes)
if (!mallocedBuffers.init())
return false;
if (!cellsWithUid_.init())
return false;
void* heap = MapAlignedPages(nurserySize(), Alignment);
if (!heap)
return false;
@ -653,6 +656,16 @@ js::Nursery::waitBackgroundFreeEnd()
void
js::Nursery::sweep()
{
/* Sweep unique id's in all in-use chunks. */
for (CellsWithUniqueIdSet::Enum e(cellsWithUid_); !e.empty(); e.popFront()) {
JSObject* obj = static_cast<JSObject*>(e.front());
if (!IsForwarded(obj))
obj->zone()->removeUniqueId(obj);
else
MOZ_ASSERT(Forwarded(obj)->zone()->hasUniqueId(Forwarded(obj)));
}
cellsWithUid_.clear();
#ifdef JS_GC_ZEAL
/* Poison the nursery contents so touching a freed object will crash. */
JS_POISON((void*)start(), JS_SWEPT_NURSERY_PATTERN, nurserySize());
@ -670,10 +683,8 @@ js::Nursery::sweep()
{
#ifdef JS_CRASH_DIAGNOSTICS
JS_POISON((void*)start(), JS_SWEPT_NURSERY_PATTERN, allocationEnd() - start());
for (int i = 0; i < numActiveChunks_; ++i) {
chunk(i).trailer.location = gc::ChunkLocationBitNursery;
chunk(i).trailer.runtime = runtime();
}
for (int i = 0; i < numActiveChunks_; ++i)
initChunk(i);
#endif
setCurrentChunk(0);
}

View File

@ -183,6 +183,14 @@ class Nursery
void waitBackgroundFreeEnd();
bool addedUniqueIdToCell(gc::Cell* cell) {
if (!IsInsideNursery(cell) || !isEnabled())
return true;
MOZ_ASSERT(cellsWithUid_.initialized());
MOZ_ASSERT(!cellsWithUid_.has(cell));
return cellsWithUid_.put(cell);
}
size_t sizeOfHeapCommitted() const {
return numActiveChunks_ * gc::ChunkSize;
}
@ -266,6 +274,21 @@ class Nursery
typedef HashMap<void*, void*, PointerHasher<void*, 1>, SystemAllocPolicy> ForwardedBufferMap;
ForwardedBufferMap forwardedBuffers;
/*
* When we assign a unique id to cell in the nursery, that almost always
* means that the cell will be in a hash table, and thus, held live,
* automatically moving the uid from the nursery to its new home in
* tenured. It is possible, if rare, for an object that acquired a uid to
* be dead before the next collection, in which case we need to know to
* remove it when we sweep.
*
* Note: we store the pointers as Cell* here, resulting in an ugly cast in
* sweep. This is because this structure is used to help implement
* stable object hashing and we have to break the cycle somehow.
*/
using CellsWithUniqueIdSet = HashSet<gc::Cell*, PointerHasher<gc::Cell*, 3>, SystemAllocPolicy>;
CellsWithUniqueIdSet cellsWithUid_;
/* The maximum number of bytes allowed to reside in nursery buffers. */
static const size_t MaxNurseryBufferSize = 1024;
@ -287,10 +310,8 @@ class Nursery
}
MOZ_ALWAYS_INLINE void initChunk(int chunkno) {
NurseryChunkLayout& c = chunk(chunkno);
c.trailer.storeBuffer = JS::shadow::Runtime::asShadowRuntime(runtime())->gcStoreBufferPtr();
c.trailer.location = gc::ChunkLocationBitNursery;
c.trailer.runtime = runtime();
gc::StoreBuffer* sb = JS::shadow::Runtime::asShadowRuntime(runtime())->gcStoreBufferPtr();
new (&chunk(chunkno).trailer) gc::ChunkTrailer(runtime(), sb);
}
MOZ_ALWAYS_INLINE void setCurrentChunk(int chunkno) {

View File

@ -66,7 +66,7 @@ Zone::~Zone()
bool Zone::init(bool isSystemArg)
{
isSystem = isSystemArg;
return gcZoneGroupEdges.init();
return uniqueIds_.init() && gcZoneGroupEdges.init();
}
void
@ -156,7 +156,6 @@ Zone::logPromotionsToTenured()
awaitingTenureLogging.clear();
}
void
Zone::sweepBreakpoints(FreeOp* fop)
{
@ -257,6 +256,15 @@ Zone::discardJitCode(FreeOp* fop)
}
}
#ifdef JSGC_HASH_TABLE_CHECKS
void
JS::Zone::checkUniqueIdTableAfterMovingGC()
{
for (UniqueIdMap::Enum e(uniqueIds_); !e.empty(); e.popFront())
js::gc::CheckGCThingAfterMovingGC(e.front().key());
}
#endif
uint64_t
Zone::gcNumber()
{

View File

@ -13,9 +13,11 @@
#include "jscntxt.h"
#include "ds/SplayTree.h"
#include "gc/FindSCCs.h"
#include "gc/GCRuntime.h"
#include "js/TracingAPI.h"
#include "vm/MallocProvider.h"
#include "vm/TypeInference.h"
namespace js {
@ -58,6 +60,11 @@ class ZoneHeapThreshold
const GCSchedulingTunables& tunables);
};
// Maps a Cell* to a unique, 64bit id.
using UniqueIdMap = HashMap<Cell*, uint64_t, PointerHasher<Cell*, 3>, SystemAllocPolicy>;
extern uint64_t NextCellUniqueId(JSRuntime* rt);
} // namespace gc
} // namespace js
@ -242,6 +249,7 @@ struct Zone : public JS::shadow::Zone,
LogTenurePromotionQueue awaitingTenureLogging;
void sweepBreakpoints(js::FreeOp* fop);
void sweepUniqueIds(js::FreeOp* fop);
void sweepWeakMaps();
void sweepCompartments(js::FreeOp* fop, bool keepAtleastOne, bool lastGC);
@ -251,6 +259,9 @@ struct Zone : public JS::shadow::Zone,
return isOnList();
}
// Side map for storing a unique ids for cells, independent of address.
js::gc::UniqueIdMap uniqueIds_;
public:
bool hasDebuggers() const { return debuggers && debuggers->length(); }
DebuggerVector* getDebuggers() const { return debuggers; }
@ -323,6 +334,73 @@ struct Zone : public JS::shadow::Zone,
mozilla::DebugOnly<unsigned> gcLastZoneGroupIndex;
// Creates a HashNumber based on getUniqueId. Returns false on OOM.
bool getHashCode(js::gc::Cell* cell, js::HashNumber* hashp) {
uint64_t uid;
if (!getUniqueId(cell, &uid))
return false;
*hashp = js::HashNumber(uid >> 32) ^ js::HashNumber(uid & 0xFFFFFFFF);
return true;
}
// Puts an existing UID in |uidp|, or creates a new UID for this Cell and
// puts that into |uidp|. Returns false on OOM.
bool getUniqueId(js::gc::Cell* cell, uint64_t* uidp) {
MOZ_ASSERT(uidp);
MOZ_ASSERT(js::CurrentThreadCanAccessZone(this));
// Get an existing uid, if one has been set.
auto p = uniqueIds_.lookupForAdd(cell);
if (p) {
*uidp = p->value();
return true;
}
// Set a new uid on the cell.
*uidp = js::gc::NextCellUniqueId(runtimeFromAnyThread());
if (!uniqueIds_.add(p, cell, *uidp))
return false;
// If the cell was in the nursery, hopefully unlikely, then we need to
// tell the nursery about it so that it can sweep the uid if the thing
// does not get tenured.
if (!runtimeFromAnyThread()->gc.nursery.addedUniqueIdToCell(cell))
js::CrashAtUnhandlableOOM("failed to allocate tracking data for a nursery uid");
return true;
}
// Return true if this cell has a UID associated with it.
bool hasUniqueId(js::gc::Cell* cell) {
MOZ_ASSERT(js::CurrentThreadCanAccessZone(this));
return uniqueIds_.has(cell);
}
// Transfer an id from another cell. This must only be called on behalf of a
// moving GC. This method is infallible.
void transferUniqueId(js::gc::Cell* tgt, js::gc::Cell* src) {
MOZ_ASSERT(src != tgt);
MOZ_ASSERT(!IsInsideNursery(tgt));
MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtimeFromMainThread()));
MOZ_ASSERT(js::CurrentThreadCanAccessZone(this));
uniqueIds_.rekeyIfMoved(src, tgt);
}
// Remove any unique id associated with this Cell.
void removeUniqueId(js::gc::Cell* cell) {
MOZ_ASSERT(js::CurrentThreadCanAccessZone(this));
uniqueIds_.remove(cell);
}
// Off-thread parsing should not result in any UIDs being created.
void assertNoUniqueIdsInZone() const {
MOZ_ASSERT(uniqueIds_.count() == 0);
}
#ifdef JSGC_HASH_TABLE_CHECKS
// Assert that the UniqueId table has been redirected successfully.
void checkUniqueIdTableAfterMovingGC();
#endif
private:
js::jit::JitZone* jitZone_;

View File

@ -43,6 +43,7 @@ UNIFIED_SOURCES += [
'testGCMarking.cpp',
'testGCOutOfMemory.cpp',
'testGCStoreBufferRemoval.cpp',
'testGCUniqueId.cpp',
'testGetPropertyDescriptor.cpp',
'testHashTable.cpp',
'testIndexToString.cpp',

View File

@ -0,0 +1,120 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
*/
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "gc/GCInternals.h"
#include "gc/Zone.h"
static void
MinimizeHeap(JSRuntime* rt)
{
// The second collection is to force us to wait for the background
// sweeping that the first GC started to finish.
JS_GC(rt);
JS_GC(rt);
js::gc::AutoFinishGC finish(rt);
}
BEGIN_TEST(testGCUID)
{
#ifdef JS_GC_ZEAL
AutoLeaveZeal nozeal(cx);
#endif /* JS_GC_ZEAL */
uint64_t uid = 0;
uint64_t tmp = 0;
// Ensure the heap is as minimal as it can get.
MinimizeHeap(rt);
JS::RootedObject obj(cx, JS_NewPlainObject(cx));
uintptr_t nurseryAddr = uintptr_t(obj.get());
CHECK(obj);
CHECK(js::gc::IsInsideNursery(obj));
// Do not start with an ID.
CHECK(!obj->zone()->hasUniqueId(obj));
// Ensure we can get a new UID.
CHECK(obj->zone()->getUniqueId(obj, &uid));
CHECK(uid > js::gc::LargestTaggedNullCellPointer);
// We should now have an id.
CHECK(obj->zone()->hasUniqueId(obj));
// Calling again should get us the same thing.
CHECK(obj->zone()->getUniqueId(obj, &tmp));
CHECK(uid == tmp);
// Tenure the thing and check that the UID moved with it.
MinimizeHeap(rt);
uintptr_t tenuredAddr = uintptr_t(obj.get());
CHECK(tenuredAddr != nurseryAddr);
CHECK(!js::gc::IsInsideNursery(obj));
CHECK(obj->zone()->hasUniqueId(obj));
CHECK(obj->zone()->getUniqueId(obj, &tmp));
CHECK(uid == tmp);
// Allocate a new nursery thing in the same location and check that we
// removed the prior uid that was attached to the location.
obj = JS_NewPlainObject(cx);
CHECK(obj);
CHECK(uintptr_t(obj.get()) == nurseryAddr);
CHECK(!obj->zone()->hasUniqueId(obj));
// Try to get another tenured object in the same location and check that
// the uid was removed correctly.
obj = nullptr;
MinimizeHeap(rt);
obj = JS_NewPlainObject(cx);
MinimizeHeap(rt);
CHECK(uintptr_t(obj.get()) == tenuredAddr);
CHECK(!obj->zone()->hasUniqueId(obj));
CHECK(obj->zone()->getUniqueId(obj, &tmp));
CHECK(uid != tmp);
uid = tmp;
// Allocate a few arenas worth of objects to ensure we get some compaction.
const static size_t N = 2049;
using ObjectVector = js::TraceableVector<JSObject*>;
JS::Rooted<ObjectVector> vec(cx, ObjectVector(cx));
for (size_t i = 0; i < N; ++i) {
obj = JS_NewPlainObject(cx);
CHECK(obj);
CHECK(vec.append(obj));
}
// Transfer our vector to tenured if it isn't there already.
MinimizeHeap(rt);
// Tear holes in the heap by unrooting the even objects and collecting.
JS::Rooted<ObjectVector> vec2(cx, ObjectVector(cx));
for (size_t i = 0; i < N; ++i) {
if (i % 2 == 1)
vec2.append(vec[i]);
}
vec.clear();
MinimizeHeap(rt);
// Grab the last object in the vector as our object of interest.
obj = vec2.back();
CHECK(obj);
tenuredAddr = uintptr_t(obj.get());
CHECK(obj->zone()->getUniqueId(obj, &uid));
// Force a compaction to move the object and check that the uid moved to
// the new tenured heap location.
JS::PrepareForFullGC(rt);
JS::GCForReason(rt, GC_SHRINK, JS::gcreason::API);
MinimizeHeap(rt);
CHECK(uintptr_t(obj.get()) != tenuredAddr);
CHECK(obj->zone()->hasUniqueId(obj));
CHECK(obj->zone()->getUniqueId(obj, &tmp));
CHECK(uid == tmp);
return true;
}
END_TEST(testGCUID)

View File

@ -833,9 +833,7 @@ Chunk::init(JSRuntime* rt)
/* Initialize the chunk info. */
info.init();
info.trailer.storeBuffer = nullptr;
info.trailer.location = ChunkLocationBitTenuredHeap;
info.trailer.runtime = rt;
new (&info.trailer) ChunkTrailer(rt);
/* The rest of info fields are initialized in pickChunk. */
}
@ -1110,6 +1108,7 @@ GCRuntime::GCRuntime(JSRuntime* rt) :
usage(nullptr),
mMemProfiler(rt),
maxMallocBytes(0),
nextCellUniqueId_(LargestTaggedNullCellPointer + 1), // Ensure disjoint from null tagged pointers.
numArenasFreeCommitted(0),
verifyPreData(nullptr),
chunkAllocationSinceLastGC(false),
@ -2042,6 +2041,9 @@ RelocateCell(Zone* zone, TenuredCell* src, AllocKind thingKind, size_t thingSize
// Copy source cell contents to destination.
memcpy(dst, src, thingSize);
// Move any uid attached to the object.
src->zone()->transferUniqueId(dst, src);
if (IsObjectAllocKind(thingKind)) {
JSObject* srcObj = static_cast<JSObject*>(static_cast<Cell*>(src));
JSObject* dstObj = static_cast<JSObject*>(static_cast<Cell*>(dst));
@ -2173,7 +2175,6 @@ bool
ArenaLists::relocateArenas(Zone* zone, ArenaHeader*& relocatedListOut, JS::gcreason::Reason reason,
SliceBudget& sliceBudget, gcstats::Statistics& stats)
{
// This is only called from the main thread while we are doing a GC, so
// there is no need to lock.
MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime_));
@ -3588,6 +3589,29 @@ GCRuntime::shouldReleaseObservedTypes()
return releaseTypes;
}
struct IsAboutToBeFinalizedFunctor {
template <typename T> bool operator()(Cell** t) {
mozilla::DebugOnly<const Cell*> prior = *t;
bool result = IsAboutToBeFinalizedUnbarriered(reinterpret_cast<T**>(t));
// Sweep should not have to deal with moved pointers, since moving GC
// handles updating the UID table manually.
MOZ_ASSERT(*t == prior);
return result;
}
};
void
JS::Zone::sweepUniqueIds(js::FreeOp* fop)
{
for (UniqueIdMap::Enum e(uniqueIds_); !e.empty(); e.popFront()) {
if (DispatchTraceKindTyped(IsAboutToBeFinalizedFunctor(), e.front().key()->getTraceKind(),
&e.front().mutableKey()))
{
e.removeFront();
}
}
}
/*
* It's simpler if we preserve the invariant that every zone has at least one
* compartment. If we know we're deleting the entire zone, then
@ -5058,6 +5082,12 @@ GCRuntime::beginSweepingZoneGroup()
zone->sweepBreakpoints(&fop);
}
}
{
gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP_BREAKPOINT);
for (GCZoneGroupIter zone(rt); !zone.done(); zone.next())
zone->sweepUniqueIds(&fop);
}
}
if (sweepingAtoms) {
@ -6741,6 +6771,9 @@ gc::MergeCompartments(JSCompartment* source, JSCompartment* target)
// Merge other info in source's zone into target's zone.
target->zone()->types.typeLifoAlloc.transferFrom(&source->zone()->types.typeLifoAlloc);
// Ensure that we did not create any UIDs when running off-thread.
source->zone()->assertNoUniqueIdsInZone();
}
void
@ -7150,6 +7183,9 @@ js::gc::CheckHashTablesAfterMovingGC(JSRuntime* rt)
* Check that internal hash tables no longer have any pointers to things
* that have been moved.
*/
for (ZonesIter zone(rt, SkipAtoms); !zone.done(); zone.next()) {
zone->checkUniqueIdTableAfterMovingGC();
}
for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) {
c->objectGroups.checkTablesAfterMovingGC();
c->checkInitialShapesTableAfterMovingGC();
@ -7375,6 +7411,12 @@ JS::IsGenerationalGCEnabled(JSRuntime* rt)
return rt->gc.isGenerationalGCEnabled();
}
uint64_t
js::gc::NextCellUniqueId(JSRuntime* rt)
{
return rt->gc.nextCellUniqueId();
}
namespace js {
namespace gc {
namespace MemInfo {

View File

@ -76,6 +76,7 @@ JSObject::finalize(js::FreeOp* fop)
MOZ_ASSERT(CurrentThreadCanAccessRuntime(fop->runtime()));
}
#endif
const js::Class* clasp = getClass();
if (clasp->finalize)
clasp->finalize(fop, this);