Adds protect/unprotect API to CodeAlloc (bug 460993 r=nnethercote,rreitmai sr=gal)

Removes calls to VMPI_setPageProtection from CodeAlloc, and adds a new
protect/unprotect API to CodeAlloc, along with an SPI for the vm to implement
actual page protection.

It is up to the VM to call codeAlloc->protect() before executing jit'd code,
but CodeAlloc will internally call unprotect() before modifying blocks, as code
is generated.  If the VM's implementation of allocCodePage allocates memory
as RWX, then the new protection api's can be ignored and the implementations
of markCodeChunkExec/Write can be empty functions.

A flag per code chunk is used so that only modified pages are unprotected and
reprotected.

CodeAlloc never calls VMPI_setPageProtection any more, so platform abstractions
for this can be encapsulated in the CodeAlloc SPI methods.

nanojit/avmplus.cpp was modified to call VMPI_setPageProtection on the platforms
that didn't already allocate code memory as RWX.  Since those callsites are platform
specific and the only place we call VMPI_setPageProtection, the code could be further
simplified.

--HG--
extra : convert_revision : a1002278492b012b727550db2fb928faa6bee36b
This commit is contained in:
Edwin Smith 2010-01-20 13:32:02 -05:00
parent 9d7bbdf810
commit b1f006af09
5 changed files with 82 additions and 18 deletions

View File

@ -941,7 +941,7 @@ namespace nanojit
// something went wrong, release all allocated code memory
_codeAlloc.freeAll(codeList);
if (_nExitIns)
_codeAlloc.free(exitStart, exitEnd);
_codeAlloc.free(exitStart, exitEnd);
_codeAlloc.free(codeStart, codeEnd);
codeList = NULL;
return;
@ -957,15 +957,15 @@ namespace nanojit
#ifdef NANOJIT_ARM
// [codeStart, _nSlot) ... gap ... [_nIns, codeEnd)
if (_nExitIns) {
_codeAlloc.addRemainder(codeList, exitStart, exitEnd, _nExitSlot, _nExitIns);
verbose_only( exitBytes -= (_nExitIns - _nExitSlot) * sizeof(NIns); )
_codeAlloc.addRemainder(codeList, exitStart, exitEnd, _nExitSlot, _nExitIns);
verbose_only( exitBytes -= (_nExitIns - _nExitSlot) * sizeof(NIns); )
}
_codeAlloc.addRemainder(codeList, codeStart, codeEnd, _nSlot, _nIns);
verbose_only( codeBytes -= (_nIns - _nSlot) * sizeof(NIns); )
#else
// [codeStart ... gap ... [_nIns, codeEnd))
if (_nExitIns) {
_codeAlloc.addRemainder(codeList, exitStart, exitEnd, exitStart, _nExitIns);
_codeAlloc.addRemainder(codeList, exitStart, exitEnd, exitStart, _nExitIns);
verbose_only( exitBytes -= (_nExitIns - exitStart) * sizeof(NIns); )
}
_codeAlloc.addRemainder(codeList, codeStart, codeEnd, codeStart, _nIns);

View File

@ -70,14 +70,14 @@ namespace nanojit
void CodeAlloc::reset() {
// give all memory back to gcheap. Assumption is that all
// code is done being used by now.
for (CodeList* b = heapblocks; b != 0; ) {
for (CodeList* hb = heapblocks; hb != 0; ) {
_nvprof("free page",1);
CodeList* next = b->next;
void *mem = firstBlock(b);
VMPI_setPageProtection(mem, bytesPerAlloc, false /* executable */, true /* writable */);
freeCodeChunk(mem, bytesPerAlloc);
CodeList* next = hb->next;
CodeList* fb = firstBlock(hb);
markBlockWrite(fb);
freeCodeChunk(fb, bytesPerAlloc);
totalAllocated -= bytesPerAlloc;
b = next;
hb = next;
}
NanoAssert(!totalAllocated);
heapblocks = availblocks = 0;
@ -89,9 +89,10 @@ namespace nanojit
return (CodeList*) (end - (uintptr_t)bytesPerAlloc);
}
int round(size_t x) {
static int round(size_t x) {
return (int)((x + 512) >> 10);
}
void CodeAlloc::logStats() {
size_t total = 0;
size_t frag_size = 0;
@ -112,9 +113,19 @@ namespace nanojit
round(total), round(free_size), frag_size);
}
inline void CodeAlloc::markBlockWrite(CodeList* b) {
NanoAssert(b->terminator != NULL);
CodeList* term = b->terminator;
if (term->isExec) {
markCodeChunkWrite(firstBlock(term), bytesPerAlloc);
term->isExec = false;
}
}
void CodeAlloc::alloc(NIns* &start, NIns* &end) {
// Reuse a block if possible.
if (availblocks) {
markBlockWrite(availblocks);
CodeList* b = removeBlock(availblocks);
b->isFree = false;
start = b->start();
@ -128,7 +139,6 @@ namespace nanojit
totalAllocated += bytesPerAlloc;
NanoAssert(mem != NULL); // see allocCodeChunk contract in CodeAlloc.h
_nvprof("alloc page", uintptr_t(mem)>>12);
VMPI_setPageProtection(mem, bytesPerAlloc, true/*executable*/, true/*writable*/);
CodeList* b = addMem(mem, bytesPerAlloc);
b->isFree = false;
start = b->start();
@ -225,7 +235,7 @@ namespace nanojit
void* mem = hb->lower;
*prev = hb->next;
_nvprof("free page",1);
VMPI_setPageProtection(mem, bytesPerAlloc, false /* executable */, true /* writable */);
markBlockWrite(firstBlock(hb));
freeCodeChunk(mem, bytesPerAlloc);
totalAllocated -= bytesPerAlloc;
} else {
@ -347,9 +357,12 @@ extern "C" void sync_instruction_memory(caddr_t v, u_int len);
// create a tiny terminator block, add to fragmented list, this way
// all other blocks have a valid block at b->higher
CodeList* terminator = b->higher;
b->terminator = terminator;
terminator->lower = b;
terminator->end = 0; // this is how we identify the terminator
terminator->isFree = false;
terminator->isExec = false;
terminator->terminator = 0;
debug_only(sanity_check();)
// add terminator to heapblocks list so we can track whole blocks
@ -365,7 +378,7 @@ extern "C" void sync_instruction_memory(caddr_t v, u_int len);
CodeList* CodeAlloc::removeBlock(CodeList* &blocks) {
CodeList* b = blocks;
NanoAssert(b);
NanoAssert(b != NULL);
blocks = b->next;
b->next = 0;
return b;
@ -399,6 +412,7 @@ extern "C" void sync_instruction_memory(caddr_t v, u_int len);
// b1 b2
CodeList* b1 = getBlock(start, end);
CodeList* b2 = (CodeList*) (uintptr_t(holeEnd) - offsetof(CodeList, code));
b2->terminator = b1->terminator;
b2->isFree = false;
b2->next = 0;
b2->higher = b1->higher;
@ -421,10 +435,12 @@ extern "C" void sync_instruction_memory(caddr_t v, u_int len);
b2->lower = b1;
b2->higher = b3;
b2->isFree = false; // redundant, since we're about to free, but good hygiene
b2->terminator = b1->terminator;
b3->lower = b2;
b3->end = end;
b3->isFree = false;
b3->higher->lower = b3;
b3->terminator = b1->terminator;
b2->next = 0;
b3->next = 0;
debug_only(sanity_check();)
@ -518,5 +534,14 @@ extern "C" void sync_instruction_memory(caddr_t v, u_int len);
#endif /* CROSS_CHECK_FREE_LIST */
}
#endif
void CodeAlloc::markAllExec() {
for (CodeList* hb = heapblocks; hb != NULL; hb = hb->next) {
if (!hb->isExec) {
hb->isExec = true;
markCodeChunkExec(firstBlock(hb), bytesPerAlloc);
}
}
}
}
#endif // FEATURE_NANOJIT

View File

@ -64,8 +64,16 @@ namespace nanojit
for splitting and coalescing blocks. */
CodeList* lower;
/** pointer to the heapblock terminal that represents the code chunk containing this block */
CodeList* terminator;
/** true if block is free, false otherwise */
bool isFree;
/** (only valid for terminator blocks). Set true just before calling
* markCodeChunkExec() and false just after markCodeChunkWrite() */
bool isExec;
union {
// this union is used in leu of pointer punning in code
// the end of this block is always the address of the next higher block
@ -142,9 +150,17 @@ namespace nanojit
/** free a block previously allocated by allocCodeMem. nbytes will
* match the previous allocCodeMem, but is provided here as well
* to mirror the mmap()/munmap() api. */
* to mirror the mmap()/munmap() api. markCodeChunkWrite() will have
* been called if necessary, so it is not necessary for freeCodeChunk()
* to do it again. */
void freeCodeChunk(void* addr, size_t nbytes);
/** make this specific extent ready to execute (might remove write) */
void markCodeChunkExec(void* addr, size_t nbytes);
/** make this extent ready to modify (might remove exec) */
void markCodeChunkWrite(void* addr, size_t nbytes);
public:
CodeAlloc();
~CodeAlloc();
@ -198,6 +214,12 @@ namespace nanojit
/** return any completely empty pages */
void sweep();
/** protect all code in this code alloc */
void markAllExec();
/** unprotect the code chunk containing just this one block */
void markBlockWrite(CodeList* b);
};
}

View File

@ -495,7 +495,7 @@ namespace nanojit
return out->ins1(v, i);
}
// This is an ugly workaround for an apparent compiler
// bug; in VC2008, compiling with optimization on
// will produce spurious errors if this code is inlined
@ -544,7 +544,7 @@ namespace nanojit
int32_t r;
switch (v) {
case LIR_qjoin:
case LIR_qjoin:
return insImmf(do_join(c1, c2));
case LIR_eq:
return insImm(c1 == c2);

View File

@ -89,11 +89,13 @@ void*
nanojit::CodeAlloc::allocCodeChunk(size_t nbytes) {
void * buffer;
posix_memalign(&buffer, 4096, nbytes);
VMPI_setPageProtection(mem, nbytes, true /* exec */, true /* write */);
return buffer;
}
void
nanojit::CodeAlloc::freeCodeChunk(void *p, size_t nbytes) {
VMPI_setPageProtection(mem, nbytes, false /* exec */, true /* write */);
::free(p);
}
@ -155,13 +157,28 @@ nanojit::CodeAlloc::freeCodeChunk(void *p, size_t nbytes) {
void*
nanojit::CodeAlloc::allocCodeChunk(size_t nbytes) {
return valloc(nbytes);
void* mem = valloc(nbytes);
VMPI_setPageProtection(mem, nbytes, true /* exec */, true /* write */);
return mem;
}
void
nanojit::CodeAlloc::freeCodeChunk(void *p, size_t nbytes) {
VMPI_setPageProtection(mem, nbytes, false /* exec */, true /* write */);
::free(p);
}
#endif // WIN32
// All of the allocCodeChunk/freeCodeChunk implementations above allocate
// code memory as RWX and then free it, so the explicit page protection api's
// below are no-ops.
void
nanojit::CodeAlloc::markCodeChunkWrite(void*, size_t)
{}
void
nanojit::CodeAlloc::markCodeChunkExec(void*, size_t)
{}