Bug 936583 - Make common chunk trailer use a common struct; r=billm

--HG--
extra : rebase_source : c9a34087a60cd5df0f3b7ec0c32b602ab1159611
This commit is contained in:
Terrence Cole 2013-11-11 11:32:33 -08:00
parent 83ad9f37aa
commit a4376f9dca
4 changed files with 53 additions and 42 deletions

View File

@ -174,8 +174,7 @@ struct FreeSpan
* there as offsets from the arena start.
*/
static size_t encodeOffsets(size_t firstOffset, size_t lastOffset) {
/* Check that we can pack the offsets into uint16_t. */
JS_STATIC_ASSERT(ArenaShift < 16);
static_assert(ArenaShift < 16, "Check that we can pack offsets into uint16_t.");
JS_ASSERT(firstOffset <= ArenaSize);
JS_ASSERT(lastOffset < ArenaSize);
JS_ASSERT(firstOffset <= ((lastOffset + 1) & ~size_t(1)));
@ -445,17 +444,9 @@ struct ArenaHeader : public JS::shadow::ArenaHeader
size_t allocatedDuringIncremental : 1;
size_t markOverflow : 1;
size_t auxNextLink : JS_BITS_PER_WORD - 8 - 1 - 1 - 1;
static void staticAsserts() {
/* We must be able to fit the allockind into uint8_t. */
JS_STATIC_ASSERT(FINALIZE_LIMIT <= 255);
/*
* auxNextLink packing assumes that ArenaShift has enough bits
* to cover allocKind and hasDelayedMarking.
*/
JS_STATIC_ASSERT(ArenaShift >= 8 + 1 + 1 + 1);
}
static_assert(ArenaShift >= 8 + 1 + 1 + 1,
"ArenaHeader::auxNextLink packing assumes that ArenaShift has enough bits to "
"cover allocKind and hasDelayedMarking.");
inline uintptr_t address() const;
inline Chunk *chunk() const;
@ -472,7 +463,7 @@ struct ArenaHeader : public JS::shadow::ArenaHeader
JS_ASSERT(!hasDelayedMarking);
zone = zoneArg;
JS_STATIC_ASSERT(FINALIZE_LIMIT <= 255);
static_assert(FINALIZE_LIMIT <= 255, "We must be able to fit the allockind into uint8_t.");
allocKind = size_t(kind);
/* See comments in FreeSpan::allocateFromNewArena. */
@ -592,6 +583,8 @@ struct Arena
bool finalize(FreeOp *fop, AllocKind thingKind, size_t thingSize);
};
static_assert(sizeof(Arena) == ArenaSize, "The hardcoded arena size must match the struct size.");
inline size_t
ArenaHeader::getThingSize() const
{
@ -599,6 +592,16 @@ ArenaHeader::getThingSize() const
return Arena::thingSize(getAllocKind());
}
/*
* The tail of the chunk info is shared between all chunks in the system, both
* nursery and tenured. This structure is locatable from any GC pointer by
* aligning to 1MiB.
*/
struct ChunkTrailer
{
JSRuntime *runtime;
};
/* The chunk header (located at the end of the chunk to preserve arena alignment). */
struct ChunkInfo
{
@ -632,8 +635,8 @@ struct ChunkInfo
/* Number of GC cycles this chunk has survived. */
uint32_t age;
/* This is findable from any address in the Chunk by aligning to 1MiB. */
JSRuntime *runtime;
/* Information shared by all Chunk types. */
ChunkTrailer trailer;
};
/*
@ -669,6 +672,7 @@ const size_t BytesPerArenaWithHeader = ArenaSize + ArenaBitmapBytes;
const size_t ChunkDecommitBitmapBytes = ChunkSize / ArenaSize / JS_BITS_PER_BYTE;
const size_t ChunkBytesAvailable = ChunkSize - sizeof(ChunkInfo) - ChunkDecommitBitmapBytes;
const size_t ArenasPerChunk = ChunkBytesAvailable / BytesPerArenaWithHeader;
static_assert(ArenasPerChunk == 252, "Do not accidentally change our heap's density.");
/* A chunk bitmap contains enough mark bits for all the cells in a chunk. */
struct ChunkBitmap
@ -720,12 +724,10 @@ struct ChunkBitmap
}
uintptr_t *arenaBits(ArenaHeader *aheader) {
/*
* We assume that the part of the bitmap corresponding to the arena
* has the exact number of words so we do not need to deal with a word
* that covers bits from two arenas.
*/
JS_STATIC_ASSERT(ArenaBitmapBits == ArenaBitmapWords * JS_BITS_PER_WORD);
static_assert(ArenaBitmapBits == ArenaBitmapWords * JS_BITS_PER_WORD,
"We assume that the part of the bitmap corresponding to the arena "
"has the exact number of words so we do not need to deal with a word "
"that covers bits from two arenas.");
uintptr_t *word, unused;
getMarkWordAndMask(reinterpret_cast<Cell *>(aheader->address()), BLACK, &word, &unused);
@ -733,8 +735,10 @@ struct ChunkBitmap
}
};
JS_STATIC_ASSERT(ArenaBitmapBytes * ArenasPerChunk == sizeof(ChunkBitmap));
JS_STATIC_ASSERT(js::gc::ChunkMarkBitmapBits == ArenaBitmapBits * ArenasPerChunk);
static_assert(ArenaBitmapBytes * ArenasPerChunk == sizeof(ChunkBitmap),
"Ensure our ChunkBitmap actually covers all arenas.");
static_assert(js::gc::ChunkMarkBitmapBits == ArenaBitmapBits * ArenasPerChunk,
"Ensure that the mark bitmap has the right number of bits.");
typedef BitArray<ArenasPerChunk> PerArenaBitmap;
@ -743,7 +747,8 @@ const size_t ChunkPadSize = ChunkSize
- sizeof(ChunkBitmap)
- sizeof(PerArenaBitmap)
- sizeof(ChunkInfo);
JS_STATIC_ASSERT(ChunkPadSize < BytesPerArenaWithHeader);
static_assert(ChunkPadSize < BytesPerArenaWithHeader,
"If the chunk padding is larger than an arena, we should have one more arena.");
/*
* Chunks contain arenas and associated data structures (mark bitmap, delayed
@ -836,9 +841,14 @@ struct Chunk
inline void addArenaToFreeList(JSRuntime *rt, ArenaHeader *aheader);
};
JS_STATIC_ASSERT(sizeof(Chunk) == ChunkSize);
JS_STATIC_ASSERT(js::gc::ChunkMarkBitmapOffset == offsetof(Chunk, bitmap));
JS_STATIC_ASSERT(js::gc::ChunkRuntimeOffset == offsetof(Chunk, info) + offsetof(ChunkInfo, runtime));
static_assert(sizeof(Chunk) == ChunkSize,
"Ensure the hardcoded chunk size definition actually matches the struct.");
static_assert(js::gc::ChunkMarkBitmapOffset == offsetof(Chunk, bitmap),
"The hardcoded API bitmap offset must match the actual offset.");
static_assert(js::gc::ChunkRuntimeOffset == offsetof(Chunk, info) +
offsetof(ChunkInfo, trailer) +
offsetof(ChunkTrailer, runtime),
"The hardcoded API runtime offset must match the actual offset.");
inline uintptr_t
ArenaHeader::address() const
@ -960,7 +970,7 @@ Cell::arenaHeader() const
inline JSRuntime *
Cell::runtimeFromMainThread() const
{
JSRuntime *rt = chunk()->info.runtime;
JSRuntime *rt = chunk()->info.trailer.runtime;
JS_ASSERT(CurrentThreadCanAccessRuntime(rt));
return rt;
}
@ -974,7 +984,7 @@ Cell::shadowRuntimeFromMainThread() const
inline JSRuntime *
Cell::runtimeFromAnyThread() const
{
return chunk()->info.runtime;
return chunk()->info.trailer.runtime;
}
inline JS::shadow::Runtime *

View File

@ -60,7 +60,7 @@ js::Nursery::init()
JS_POISON(heap, FreshNursery, NurserySize);
#endif
for (int i = 0; i < NumNurseryChunks; ++i)
chunk(i).runtime = rt;
chunk(i).trailer.runtime = rt;
JS_ASSERT(isEnabled());
return true;
@ -648,7 +648,7 @@ js::Nursery::sweep(JSRuntime *rt)
/* Poison the nursery contents so touching a freed object will crash. */
JS_POISON((void *)start(), SweptNursery, NurserySize - sizeof(JSRuntime *));
for (int i = 0; i < NumNurseryChunks; ++i)
chunk(i).runtime = runtime();
chunk(i).trailer.runtime = runtime();
if (rt->gcZeal_ == ZealGenerationalGCValue) {
/* Undo any grow or shrink the collection may have done. */

View File

@ -155,16 +155,17 @@ class Nursery
static const size_t MaxNurserySlots = 100;
/* The amount of space in the mapped nursery available to allocations. */
static const size_t NurseryChunkUsableSize = gc::ChunkSize - sizeof(JSRuntime *);
static const size_t NurseryChunkUsableSize = gc::ChunkSize - sizeof(gc::ChunkTrailer);
struct NurseryChunkLayout {
char data[NurseryChunkUsableSize];
JSRuntime *runtime;
gc::ChunkTrailer trailer;
uintptr_t start() { return uintptr_t(&data); }
uintptr_t end() { return uintptr_t(&runtime); }
uintptr_t end() { return uintptr_t(&trailer); }
};
static_assert(sizeof(NurseryChunkLayout) == gc::ChunkSize,
"Nursery chunk size must match gc::Chunk size.");
NurseryChunkLayout &chunk(int index) const {
JS_STATIC_ASSERT(sizeof(NurseryChunkLayout) == gc::ChunkSize);
JS_ASSERT(index < NumNurseryChunks);
JS_ASSERT(start());
return reinterpret_cast<NurseryChunkLayout *>(start())[index];

View File

@ -113,7 +113,8 @@ const AllocKind gc::slotsToThingKind[] = {
/* 16 */ FINALIZE_OBJECT16
};
JS_STATIC_ASSERT(JS_ARRAY_LENGTH(slotsToThingKind) == SLOTS_TO_THING_KIND_LIMIT);
static_assert(JS_ARRAY_LENGTH(slotsToThingKind) == SLOTS_TO_THING_KIND_LIMIT,
"We have defined a slot count for each kind.");
const uint32_t Arena::ThingSizes[] = {
sizeof(JSObject), /* FINALIZE_OBJECT0 */
@ -276,9 +277,8 @@ ArenaHeader::checkSynchronizedWithFreeList() const
/* static */ void
Arena::staticAsserts()
{
JS_STATIC_ASSERT(sizeof(Arena) == ArenaSize);
JS_STATIC_ASSERT(JS_ARRAY_LENGTH(ThingSizes) == FINALIZE_LIMIT);
JS_STATIC_ASSERT(JS_ARRAY_LENGTH(FirstThingOffsets) == FINALIZE_LIMIT);
static_assert(JS_ARRAY_LENGTH(ThingSizes) == FINALIZE_LIMIT, "We have defined all thing sizes.");
static_assert(JS_ARRAY_LENGTH(FirstThingOffsets) == FINALIZE_LIMIT, "We have defined all offsets.");
}
template<typename T>
@ -654,7 +654,7 @@ Chunk::init(JSRuntime *rt)
info.numArenasFree = ArenasPerChunk;
info.numArenasFreeCommitted = ArenasPerChunk;
info.age = 0;
info.runtime = rt;
info.trailer.runtime = rt;
/* Initialize the arena header state. */
for (unsigned i = 0; i < ArenasPerChunk; i++) {
@ -742,7 +742,7 @@ Chunk::fetchNextDecommittedArena()
decommittedArenas.unset(offset);
Arena *arena = &arenas[offset];
MarkPagesInUse(info.runtime, arena, ArenaSize);
MarkPagesInUse(info.trailer.runtime, arena, ArenaSize);
arena->aheader.setAsNotAllocated();
return &arena->aheader;