Bug 614126 - Decouple CodeAlloc block size from allocation size (r+edwsmith,nnethercote)

--HG--
extra : convert_revision : 2c9dc64162bd57c225c4b8cd184dfcf6d03a2e5f
This commit is contained in:
Rick Reitmaier 2011-01-05 10:55:31 -08:00
parent 0486e22f7d
commit 6b35e95ed3
5 changed files with 55 additions and 14 deletions

View File

@ -276,14 +276,15 @@ namespace nanojit
}
void Assembler::codeAlloc(NIns *&start, NIns *&end, NIns *&eip
verbose_only(, size_t &nBytes))
verbose_only(, size_t &nBytes)
, size_t byteLimit)
{
// save the block we just filled
if (start)
CodeAlloc::add(codeList, start, end);
// CodeAlloc contract: allocations never fail
_codeAlloc.alloc(start, end);
_codeAlloc.alloc(start, end, byteLimit);
verbose_only( nBytes += (end - start) * sizeof(NIns); )
NanoAssert(uintptr_t(end) - uintptr_t(start) >= (size_t)LARGEST_UNDERRUN_PROT);
eip = end;

View File

@ -361,7 +361,8 @@ namespace nanojit
void getBaseIndexScale(LIns* addp, LIns** base, LIns** index, int* scale);
void codeAlloc(NIns *&start, NIns *&end, NIns *&eip
verbose_only(, size_t &nBytes));
verbose_only(, size_t &nBytes)
, size_t byteLimit=0);
// These instructions don't have to be saved & reloaded to spill,
// they can just be recalculated cheaply.

View File

@ -127,15 +127,49 @@ namespace nanojit
}
}
void CodeAlloc::alloc(NIns* &start, NIns* &end) {
void CodeAlloc::alloc(NIns* &start, NIns* &end, size_t byteLimit) {
if (!availblocks) {
// no free mem, get more
addMem();
}
// grab a block
NanoAssert(!byteLimit || byteLimit > blkSpaceFor(2)); // if a limit is imposed it must be bigger than 2x minimum block size (see below)
markBlockWrite(availblocks);
CodeList* b = removeBlock(availblocks);
// limit imposed (byteLimit > 0) and the block is too big? then break it apart
if (byteLimit > 0 && b->size() > byteLimit) {
size_t consume; // # bytes to extract from the free block
// enough space to carve out a perfectly sized blk? (leaving at least a full free blk)
if (b->size() >= byteLimit + headerSpaceFor(1) + blkSpaceFor(1)) {
// yes, then take exactly what we need
consume = byteLimit + headerSpaceFor(1);
} else {
// no, then we should only take the min amount
consume = blkSpaceFor(1);
// ... and since b->size() > byteLimit && byteLimit > blkSpaceFor(2)
NanoAssert( b->size() > blkSpaceFor(2) );
NanoAssert( b->size() - consume > blkSpaceFor(1) ); // thus, we know that at least 1 blk left.
}
// break block into 2 pieces, returning the lower portion to the free list
CodeList* higher = b->higher;
b->end = (NIns*) ( (uintptr_t)b->end - consume );
CodeList* b1 = b->higher;
higher->lower = b1;
b1->higher = higher;
b1->lower = b;
b1->terminator = b->terminator;
NanoAssert(b->size() > minAllocSize);
addBlock(availblocks, b); // put back the rest of the block
b = b1;
}
NanoAssert(b->size() >= minAllocSize);
b->next = 0; // not technically needed (except for debug builds), but good hygiene.
b->isFree = false;
start = b->start();
end = b->end;
@ -399,10 +433,9 @@ extern "C" void sync_instruction_memory(caddr_t v, u_int len);
// shrink the hole by aligning holeStart forward and holeEnd backward
holeStart = (NIns*) ((uintptr_t(holeStart) + sizeof(NIns*)-1) & ~(sizeof(NIns*)-1));
holeEnd = (NIns*) (uintptr_t(holeEnd) & ~(sizeof(NIns*)-1));
size_t minHole = minAllocSize;
if (minHole < 2*sizeofMinBlock)
minHole = 2*sizeofMinBlock;
if (uintptr_t(holeEnd) - uintptr_t(holeStart) < minHole) {
// hole needs to be big enough for 2 headers + 1 block of free space (subtraction not used in check to avoid wraparound)
size_t minHole = headerSpaceFor(2) + blkSpaceFor(1);
if (uintptr_t(holeEnd) < minHole + uintptr_t(holeStart) ) {
// the hole is too small to make a new free block and a new used block. just keep
// the whole original block and don't free anything.
add(blocks, start, end);

View File

@ -119,6 +119,12 @@ namespace nanojit
static const size_t sizeofMinBlock = offsetof(CodeList, code);
static const size_t minAllocSize = LARGEST_UNDERRUN_PROT;
// Return the number of bytes needed for the header of 'n' blocks
static size_t headerSpaceFor(uint32_t nbrBlks) { return nbrBlks * sizeofMinBlock; }
// Return the number of bytes needed in order to safely construct 'n' blocks
static size_t blkSpaceFor(uint32_t nbrBlks) { return (nbrBlks * minAllocSize) + headerSpaceFor(nbrBlks); }
/** Terminator blocks. All active and free allocations
are reachable by traversing this chain and each
element's lower chain. */
@ -181,8 +187,8 @@ namespace nanojit
/** return all the memory allocated through this allocator to the gcheap. */
void reset();
/** allocate some memory for code, return pointers to the region. */
void alloc(NIns* &start, NIns* &end);
/** allocate some memory (up to 'byteLimit' bytes) for code returning pointers to the region. A zero 'byteLimit' means no limit */
void alloc(NIns* &start, NIns* &end, size_t byteLimit);
/** free a block of memory previously returned by alloc() */
void free(NIns* start, NIns* end);

View File

@ -1710,7 +1710,7 @@ Assembler::nativePageSetup()
{
NanoAssert(!_inExit);
if (!_nIns)
codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes), NJ_MAX_CPOOL_OFFSET);
// constpool starts at top of page and goes down,
// code starts at bottom of page and moves up
@ -1731,7 +1731,7 @@ Assembler::underrunProtect(int bytes)
verbose_only(verbose_outputf(" %p:", _nIns);)
NIns* target = _nIns;
// This may be in a normal code chunk or an exit code chunk.
codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes), NJ_MAX_CPOOL_OFFSET);
_nSlot = codeStart;
@ -2910,7 +2910,7 @@ Assembler::asm_jtbl(LIns* ins, NIns** table)
void Assembler::swapCodeChunks() {
if (!_nExitIns)
codeAlloc(exitStart, exitEnd, _nExitIns verbose_only(, exitBytes));
codeAlloc(exitStart, exitEnd, _nExitIns verbose_only(, exitBytes), NJ_MAX_CPOOL_OFFSET);
if (!_nExitSlot)
_nExitSlot = exitStart;
SWAP(NIns*, _nIns, _nExitIns);