From 6b35e95ed300bece86ee852a0e227fe11b098105 Mon Sep 17 00:00:00 2001 From: Rick Reitmaier Date: Wed, 5 Jan 2011 10:55:31 -0800 Subject: [PATCH] Bug 614126 - Decouple CodeAlloc block size from allocation size (r+edwsmith,nnethercote) --HG-- extra : convert_revision : 2c9dc64162bd57c225c4b8cd184dfcf6d03a2e5f --- js/src/nanojit/Assembler.cpp | 5 ++-- js/src/nanojit/Assembler.h | 3 ++- js/src/nanojit/CodeAlloc.cpp | 45 +++++++++++++++++++++++++++++++----- js/src/nanojit/CodeAlloc.h | 10 ++++++-- js/src/nanojit/NativeARM.cpp | 6 ++--- 5 files changed, 55 insertions(+), 14 deletions(-) diff --git a/js/src/nanojit/Assembler.cpp b/js/src/nanojit/Assembler.cpp index 9883dec3722..4e468fa2cbf 100755 --- a/js/src/nanojit/Assembler.cpp +++ b/js/src/nanojit/Assembler.cpp @@ -276,14 +276,15 @@ namespace nanojit } void Assembler::codeAlloc(NIns *&start, NIns *&end, NIns *&eip - verbose_only(, size_t &nBytes)) + verbose_only(, size_t &nBytes) + , size_t byteLimit) { // save the block we just filled if (start) CodeAlloc::add(codeList, start, end); // CodeAlloc contract: allocations never fail - _codeAlloc.alloc(start, end); + _codeAlloc.alloc(start, end, byteLimit); verbose_only( nBytes += (end - start) * sizeof(NIns); ) NanoAssert(uintptr_t(end) - uintptr_t(start) >= (size_t)LARGEST_UNDERRUN_PROT); eip = end; diff --git a/js/src/nanojit/Assembler.h b/js/src/nanojit/Assembler.h index 3d2d643d2e7..649a3d2c1d7 100644 --- a/js/src/nanojit/Assembler.h +++ b/js/src/nanojit/Assembler.h @@ -361,7 +361,8 @@ namespace nanojit void getBaseIndexScale(LIns* addp, LIns** base, LIns** index, int* scale); void codeAlloc(NIns *&start, NIns *&end, NIns *&eip - verbose_only(, size_t &nBytes)); + verbose_only(, size_t &nBytes) + , size_t byteLimit=0); // These instructions don't have to be saved & reloaded to spill, // they can just be recalculated cheaply. diff --git a/js/src/nanojit/CodeAlloc.cpp b/js/src/nanojit/CodeAlloc.cpp index 2d0f0847473..61b711a48f9 100644 --- a/js/src/nanojit/CodeAlloc.cpp +++ b/js/src/nanojit/CodeAlloc.cpp @@ -127,15 +127,49 @@ namespace nanojit } } - void CodeAlloc::alloc(NIns* &start, NIns* &end) { + void CodeAlloc::alloc(NIns* &start, NIns* &end, size_t byteLimit) { if (!availblocks) { // no free mem, get more addMem(); } - // grab a block + // grab a block + NanoAssert(!byteLimit || byteLimit > blkSpaceFor(2)); // if a limit is imposed it must be bigger than 2x minimum block size (see below) markBlockWrite(availblocks); CodeList* b = removeBlock(availblocks); + + // limit imposed (byteLimit > 0) and the block is too big? then break it apart + if (byteLimit > 0 && b->size() > byteLimit) { + + size_t consume; // # bytes to extract from the free block + + // enough space to carve out a perfectly sized blk? (leaving at least a full free blk) + if (b->size() >= byteLimit + headerSpaceFor(1) + blkSpaceFor(1)) { + // yes, then take exactly what we need + consume = byteLimit + headerSpaceFor(1); + } else { + // no, then we should only take the min amount + consume = blkSpaceFor(1); + + // ... and since b->size() > byteLimit && byteLimit > blkSpaceFor(2) + NanoAssert( b->size() > blkSpaceFor(2) ); + NanoAssert( b->size() - consume > blkSpaceFor(1) ); // thus, we know that at least 1 blk left. + } + + // break block into 2 pieces, returning the lower portion to the free list + CodeList* higher = b->higher; + b->end = (NIns*) ( (uintptr_t)b->end - consume ); + CodeList* b1 = b->higher; + higher->lower = b1; + b1->higher = higher; + b1->lower = b; + b1->terminator = b->terminator; + NanoAssert(b->size() > minAllocSize); + addBlock(availblocks, b); // put back the rest of the block + b = b1; + } + NanoAssert(b->size() >= minAllocSize); + b->next = 0; // not technically needed (except for debug builds), but good hygiene. b->isFree = false; start = b->start(); end = b->end; @@ -399,10 +433,9 @@ extern "C" void sync_instruction_memory(caddr_t v, u_int len); // shrink the hole by aligning holeStart forward and holeEnd backward holeStart = (NIns*) ((uintptr_t(holeStart) + sizeof(NIns*)-1) & ~(sizeof(NIns*)-1)); holeEnd = (NIns*) (uintptr_t(holeEnd) & ~(sizeof(NIns*)-1)); - size_t minHole = minAllocSize; - if (minHole < 2*sizeofMinBlock) - minHole = 2*sizeofMinBlock; - if (uintptr_t(holeEnd) - uintptr_t(holeStart) < minHole) { + // hole needs to be big enough for 2 headers + 1 block of free space (subtraction not used in check to avoid wraparound) + size_t minHole = headerSpaceFor(2) + blkSpaceFor(1); + if (uintptr_t(holeEnd) < minHole + uintptr_t(holeStart) ) { // the hole is too small to make a new free block and a new used block. just keep // the whole original block and don't free anything. add(blocks, start, end); diff --git a/js/src/nanojit/CodeAlloc.h b/js/src/nanojit/CodeAlloc.h index e56ee0d2cdd..b5cb2c634e4 100644 --- a/js/src/nanojit/CodeAlloc.h +++ b/js/src/nanojit/CodeAlloc.h @@ -119,6 +119,12 @@ namespace nanojit static const size_t sizeofMinBlock = offsetof(CodeList, code); static const size_t minAllocSize = LARGEST_UNDERRUN_PROT; + // Return the number of bytes needed for the header of 'n' blocks + static size_t headerSpaceFor(uint32_t nbrBlks) { return nbrBlks * sizeofMinBlock; } + + // Return the number of bytes needed in order to safely construct 'n' blocks + static size_t blkSpaceFor(uint32_t nbrBlks) { return (nbrBlks * minAllocSize) + headerSpaceFor(nbrBlks); } + /** Terminator blocks. All active and free allocations are reachable by traversing this chain and each element's lower chain. */ @@ -181,8 +187,8 @@ namespace nanojit /** return all the memory allocated through this allocator to the gcheap. */ void reset(); - /** allocate some memory for code, return pointers to the region. */ - void alloc(NIns* &start, NIns* &end); + /** allocate some memory (up to 'byteLimit' bytes) for code returning pointers to the region. A zero 'byteLimit' means no limit */ + void alloc(NIns* &start, NIns* &end, size_t byteLimit); /** free a block of memory previously returned by alloc() */ void free(NIns* start, NIns* end); diff --git a/js/src/nanojit/NativeARM.cpp b/js/src/nanojit/NativeARM.cpp index 940df4fcff9..1c38cac36de 100644 --- a/js/src/nanojit/NativeARM.cpp +++ b/js/src/nanojit/NativeARM.cpp @@ -1710,7 +1710,7 @@ Assembler::nativePageSetup() { NanoAssert(!_inExit); if (!_nIns) - codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes)); + codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes), NJ_MAX_CPOOL_OFFSET); // constpool starts at top of page and goes down, // code starts at bottom of page and moves up @@ -1731,7 +1731,7 @@ Assembler::underrunProtect(int bytes) verbose_only(verbose_outputf(" %p:", _nIns);) NIns* target = _nIns; // This may be in a normal code chunk or an exit code chunk. - codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes)); + codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes), NJ_MAX_CPOOL_OFFSET); _nSlot = codeStart; @@ -2910,7 +2910,7 @@ Assembler::asm_jtbl(LIns* ins, NIns** table) void Assembler::swapCodeChunks() { if (!_nExitIns) - codeAlloc(exitStart, exitEnd, _nExitIns verbose_only(, exitBytes)); + codeAlloc(exitStart, exitEnd, _nExitIns verbose_only(, exitBytes), NJ_MAX_CPOOL_OFFSET); if (!_nExitSlot) _nExitSlot = exitStart; SWAP(NIns*, _nIns, _nExitIns);