2009-07-10 11:21:53 -07:00
/* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */
/* vi: set ts=4 sw=4 expandtab: (add to ~/.vimrc: set modeline modelines=5) */
/* ***** BEGIN LICENSE BLOCK *****
* Version : MPL 1.1 / GPL 2.0 / LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 ( the " License " ) ; you may not use this file except in compliance with
* the License . You may obtain a copy of the License at
* http : //www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an " AS IS " basis ,
* WITHOUT WARRANTY OF ANY KIND , either express or implied . See the License
* for the specific language governing rights and limitations under the
* License .
*
* The Original Code is [ Open Source Virtual Machine ] .
*
* The Initial Developer of the Original Code is
* Adobe System Incorporated .
* Portions created by the Initial Developer are Copyright ( C ) 2004 - 2007
* the Initial Developer . All Rights Reserved .
*
* Contributor ( s ) :
* Adobe AS3 Team
*
* Alternatively , the contents of this file may be used under the terms of
* either the GNU General Public License Version 2 or later ( the " GPL " ) , or
* the GNU Lesser General Public License Version 2.1 or later ( the " LGPL " ) ,
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above . If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL , and not to allow others to
* use your version of this file under the terms of the MPL , indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL . If you do not delete
* the provisions above , a recipient may use your version of this file under
* the terms of any one of the MPL , the GPL or the LGPL .
*
* * * * * * END LICENSE BLOCK * * * * * */
# include "nanojit.h"
//#define DOPROF
# include "../vprof/vprof.h"
# ifdef FEATURE_NANOJIT
namespace nanojit
{
static const bool verbose = false ;
2009-07-15 16:50:01 -07:00
# if defined(NANOJIT_ARM)
// ARM requires single-page allocations, due to the constant pool that
// lives on each page that must be reachable by a 4kb pcrel load.
2009-07-10 11:21:53 -07:00
static const int pagesPerAlloc = 1 ;
2009-07-15 16:50:01 -07:00
# else
static const int pagesPerAlloc = 16 ;
# endif
2009-07-10 11:21:53 -07:00
2009-07-15 16:50:01 -07:00
CodeAlloc : : CodeAlloc ( )
: heapblocks ( 0 )
, availblocks ( 0 )
, totalAllocated ( 0 )
2010-04-05 13:17:01 -07:00
, bytesPerPage ( VMPI_getVMPageSize ( ) )
, bytesPerAlloc ( pagesPerAlloc * bytesPerPage )
{
}
2009-07-10 11:21:53 -07:00
CodeAlloc : : ~ CodeAlloc ( ) {
2009-09-12 23:06:51 -07:00
reset ( ) ;
}
void CodeAlloc : : reset ( ) {
2009-07-10 11:21:53 -07:00
// give all memory back to gcheap. Assumption is that all
// code is done being used by now.
2010-01-22 12:18:12 -08:00
for ( CodeList * hb = heapblocks ; hb ! = 0 ; ) {
2009-07-10 11:21:53 -07:00
_nvprof ( " free page " , 1 ) ;
2010-01-22 12:18:12 -08:00
CodeList * next = hb - > next ;
CodeList * fb = firstBlock ( hb ) ;
markBlockWrite ( fb ) ;
freeCodeChunk ( fb , bytesPerAlloc ) ;
2009-07-15 16:50:01 -07:00
totalAllocated - = bytesPerAlloc ;
2010-01-22 12:18:12 -08:00
hb = next ;
2009-07-10 11:21:53 -07:00
}
2009-09-12 23:06:51 -07:00
NanoAssert ( ! totalAllocated ) ;
heapblocks = availblocks = 0 ;
2009-07-10 11:21:53 -07:00
}
CodeList * CodeAlloc : : firstBlock ( CodeList * term ) {
2009-09-18 13:31:09 -07:00
// use uintptr_t, rather than char*, to avoid "increases required alignment" warning
uintptr_t end = ( uintptr_t ) alignUp ( term , bytesPerPage ) ;
return ( CodeList * ) ( end - ( uintptr_t ) bytesPerAlloc ) ;
2009-07-10 11:21:53 -07:00
}
2010-01-22 12:18:12 -08:00
static int round ( size_t x ) {
2009-07-10 11:21:53 -07:00
return ( int ) ( ( x + 512 ) > > 10 ) ;
}
2010-01-22 12:18:12 -08:00
2009-07-10 11:21:53 -07:00
void CodeAlloc : : logStats ( ) {
size_t total = 0 ;
size_t frag_size = 0 ;
size_t free_size = 0 ;
int free_count = 0 ;
for ( CodeList * hb = heapblocks ; hb ! = 0 ; hb = hb - > next ) {
total + = bytesPerAlloc ;
for ( CodeList * b = hb - > lower ; b ! = 0 ; b = b - > lower ) {
if ( b - > isFree ) {
free_count + + ;
free_size + = b - > blockSize ( ) ;
if ( b - > size ( ) < minAllocSize )
frag_size + = b - > blockSize ( ) ;
}
}
}
avmplus : : AvmLog ( " code-heap: %dk free %dk fragmented %d \n " ,
round ( total ) , round ( free_size ) , frag_size ) ;
}
2010-01-22 12:18:12 -08:00
inline void CodeAlloc : : markBlockWrite ( CodeList * b ) {
NanoAssert ( b - > terminator ! = NULL ) ;
CodeList * term = b - > terminator ;
if ( term - > isExec ) {
markCodeChunkWrite ( firstBlock ( term ) , bytesPerAlloc ) ;
term - > isExec = false ;
}
}
2009-07-10 11:21:53 -07:00
void CodeAlloc : : alloc ( NIns * & start , NIns * & end ) {
2009-07-15 16:50:01 -07:00
// Reuse a block if possible.
if ( availblocks ) {
2010-01-22 12:18:12 -08:00
markBlockWrite ( availblocks ) ;
2009-07-15 16:50:01 -07:00
CodeList * b = removeBlock ( availblocks ) ;
b - > isFree = false ;
start = b - > start ( ) ;
end = b - > end ;
if ( verbose )
avmplus : : AvmLog ( " alloc %p-%p %d \n " , start , end , int ( end - start ) ) ;
return ;
2009-07-10 11:21:53 -07:00
}
// no suitable block found, get more memory
2009-07-15 16:50:01 -07:00
void * mem = allocCodeChunk ( bytesPerAlloc ) ; // allocations never fail
totalAllocated + = bytesPerAlloc ;
NanoAssert ( mem ! = NULL ) ; // see allocCodeChunk contract in CodeAlloc.h
2009-07-10 11:21:53 -07:00
_nvprof ( " alloc page " , uintptr_t ( mem ) > > 12 ) ;
CodeList * b = addMem ( mem , bytesPerAlloc ) ;
b - > isFree = false ;
start = b - > start ( ) ;
end = b - > end ;
if ( verbose )
avmplus : : AvmLog ( " alloc %p-%p %d \n " , start , end , int ( end - start ) ) ;
}
void CodeAlloc : : free ( NIns * start , NIns * end ) {
2009-07-15 16:50:01 -07:00
NanoAssert ( heapblocks ) ;
2009-07-10 11:21:53 -07:00
CodeList * blk = getBlock ( start , end ) ;
if ( verbose )
avmplus : : AvmLog ( " free %p-%p %d \n " , start , end , ( int ) blk - > size ( ) ) ;
2010-03-24 17:35:15 -07:00
NanoAssert ( ! blk - > isFree ) ;
2009-07-10 11:21:53 -07:00
2009-07-15 16:50:01 -07:00
// coalesce adjacent blocks.
bool already_on_avail_list ;
2009-07-10 11:21:53 -07:00
if ( blk - > lower & & blk - > lower - > isFree ) {
// combine blk into blk->lower (destroy blk)
CodeList * lower = blk - > lower ;
CodeList * higher = blk - > higher ;
2009-07-15 16:50:01 -07:00
already_on_avail_list = lower - > size ( ) > = minAllocSize ;
2009-07-10 11:21:53 -07:00
lower - > higher = higher ;
higher - > lower = lower ;
blk = lower ;
}
2009-07-15 16:50:01 -07:00
else
already_on_avail_list = false ;
// the last block in each heapblock is a terminator block,
// which is never free, therefore blk->higher != null
2009-07-10 11:21:53 -07:00
if ( blk - > higher - > isFree ) {
CodeList * higher = blk - > higher - > higher ;
2009-07-15 16:50:01 -07:00
CodeList * coalescedBlock = blk - > higher ;
if ( coalescedBlock - > size ( ) > = minAllocSize ) {
2009-09-18 13:31:09 -07:00
// Unlink coalescedBlock from the available block chain.
2009-07-15 16:50:01 -07:00
if ( availblocks = = coalescedBlock ) {
removeBlock ( availblocks ) ;
}
else {
CodeList * free_block = availblocks ;
while ( free_block & & free_block - > next ! = coalescedBlock ) {
NanoAssert ( free_block - > size ( ) > = minAllocSize ) ;
NanoAssert ( free_block - > isFree ) ;
NanoAssert ( free_block - > next ) ;
free_block = free_block - > next ;
}
NanoAssert ( free_block & & free_block - > next = = coalescedBlock ) ;
free_block - > next = coalescedBlock - > next ;
}
}
2009-09-18 13:31:09 -07:00
// combine blk->higher into blk (destroy coalescedBlock)
2009-07-10 11:21:53 -07:00
blk - > higher = higher ;
higher - > lower = blk ;
}
blk - > isFree = true ;
NanoAssert ( ! blk - > lower | | ! blk - > lower - > isFree ) ;
NanoAssert ( blk - > higher & & ! blk - > higher - > isFree ) ;
//memset(blk->start(), 0xCC, blk->size()); // INT 3 instruction
2009-07-15 16:50:01 -07:00
if ( ! already_on_avail_list & & blk - > size ( ) > = minAllocSize )
addBlock ( availblocks , blk ) ;
NanoAssert ( heapblocks ) ;
debug_only ( sanity_check ( ) ; )
2009-07-10 11:21:53 -07:00
}
void CodeAlloc : : freeAll ( CodeList * & code ) {
while ( code ) {
CodeList * b = removeBlock ( code ) ;
free ( b - > start ( ) , b - > end ) ;
}
}
2009-10-21 16:26:52 -07:00
# if defined NANOJIT_ARM && defined UNDER_CE
// Use a single flush for the whole CodeList, when we have no
// finer-granularity flush support, as on WinCE.
2009-10-27 15:24:12 -07:00
void CodeAlloc : : flushICache ( CodeList * & /*blocks*/ ) {
2009-10-21 16:26:52 -07:00
FlushInstructionCache ( GetCurrentProcess ( ) , NULL , NULL ) ;
}
# else
void CodeAlloc : : flushICache ( CodeList * & blocks ) {
for ( CodeList * b = blocks ; b ! = 0 ; b = b - > next )
flushICache ( b - > start ( ) , b - > size ( ) ) ;
}
# endif
2009-07-10 11:21:53 -07:00
# if defined(AVMPLUS_UNIX) && defined(NANOJIT_ARM)
# include <asm/unistd.h>
extern " C " void __clear_cache ( char * BEG , char * END ) ;
# endif
2010-02-01 10:22:30 -08:00
# if defined(AVMPLUS_UNIX) && defined(NANOJIT_MIPS)
# include <asm/cachectl.h>
extern " C " int cacheflush ( char * addr , int nbytes , int cache ) ;
# endif
2009-07-10 11:21:53 -07:00
# ifdef AVMPLUS_SPARC
2010-03-04 12:35:45 -08:00
// Note: the linux #define provided by the compiler.
# ifdef linux // bugzilla 502369
2009-10-29 12:29:28 -07:00
void sync_instruction_memory ( caddr_t v , u_int len )
{
2009-12-21 12:05:48 -08:00
caddr_t end = v + len ;
caddr_t p = v ;
while ( p < end ) {
asm ( " flush %0 " : : " r " ( p ) ) ;
p + = 32 ;
}
2009-10-29 12:29:28 -07:00
}
# else
2009-07-15 16:50:01 -07:00
extern " C " void sync_instruction_memory ( caddr_t v , u_int len ) ;
2009-07-10 11:21:53 -07:00
# endif
2009-10-29 12:29:28 -07:00
# endif
2009-07-10 11:21:53 -07:00
# if defined NANOJIT_IA32 || defined NANOJIT_X64
// intel chips have dcache/icache interlock
2009-10-21 16:26:52 -07:00
void CodeAlloc : : flushICache ( void * start , size_t len ) {
2009-07-15 16:50:01 -07:00
// Tell Valgrind that new code has been generated, and it must flush
// any translations it has for the memory range generated into.
2009-10-21 16:26:52 -07:00
( void ) start ;
( void ) len ;
VALGRIND_DISCARD_TRANSLATIONS ( start , len ) ;
2009-07-15 16:50:01 -07:00
}
2009-07-10 11:21:53 -07:00
# elif defined NANOJIT_ARM && defined UNDER_CE
2009-10-21 16:26:52 -07:00
// On arm/winmo, just flush the whole icache. The
// WinCE docs indicate that this function actually ignores its
// 2nd and 3rd arguments, and wants them to be NULL.
void CodeAlloc : : flushICache ( void * , size_t ) {
2009-07-10 12:58:34 -07:00
FlushInstructionCache ( GetCurrentProcess ( ) , NULL , NULL ) ;
2009-07-10 11:21:53 -07:00
}
# elif defined AVMPLUS_MAC && defined NANOJIT_PPC
# ifdef NANOJIT_64BIT
extern " C " void sys_icache_invalidate ( const void * , size_t len ) ;
extern " C " void sys_dcache_flush ( const void * , size_t len ) ;
// mac 64bit requires 10.5 so use that api
2009-10-21 16:26:52 -07:00
void CodeAlloc : : flushICache ( void * start , size_t len ) {
sys_dcache_flush ( start , len ) ;
sys_icache_invalidate ( start , len ) ;
2009-07-10 11:21:53 -07:00
}
# else
// mac ppc 32 could be 10.0 or later
// uses MakeDataExecutable() from Carbon api, OSUtils.h
// see http://developer.apple.com/documentation/Carbon/Reference/Memory_Manag_nt_Utilities/Reference/reference.html#//apple_ref/c/func/MakeDataExecutable
2009-10-21 16:26:52 -07:00
void CodeAlloc : : flushICache ( void * start , size_t len ) {
MakeDataExecutable ( start , len ) ;
2009-07-10 11:21:53 -07:00
}
# endif
2010-06-16 18:16:08 -07:00
# elif defined NANOJIT_ARM && defined VMCFG_SYMBIAN
void CodeAlloc : : flushICache ( void * ptr , size_t len ) {
uint32_t start = ( uint32_t ) ptr ;
uint32_t rangeEnd = start + len ;
User : : IMB_Range ( ( TAny * ) start , ( TAny * ) rangeEnd ) ;
}
2009-07-10 11:21:53 -07:00
# elif defined AVMPLUS_SPARC
// fixme: sync_instruction_memory is a solaris api, test for solaris not sparc
2009-10-21 16:26:52 -07:00
void CodeAlloc : : flushICache ( void * start , size_t len ) {
sync_instruction_memory ( ( char * ) start , len ) ;
2009-07-10 11:21:53 -07:00
}
2010-02-01 10:22:30 -08:00
# elif defined(AVMPLUS_UNIX) && defined(NANOJIT_MIPS)
void CodeAlloc : : flushICache ( void * start , size_t len ) {
// FIXME Use synci on MIPS32R2
cacheflush ( ( char * ) start , len , BCACHE ) ;
}
2009-07-10 11:21:53 -07:00
# elif defined AVMPLUS_UNIX
2009-09-18 13:31:09 -07:00
# ifdef ANDROID
2009-10-21 16:26:52 -07:00
void CodeAlloc : : flushICache ( void * start , size_t len ) {
2009-10-21 19:58:21 -07:00
cacheflush ( ( int ) start , ( int ) start + len , 0 ) ;
2009-09-18 13:31:09 -07:00
}
2009-12-21 12:05:48 -08:00
# else
2009-07-10 12:58:34 -07:00
// fixme: __clear_cache is a libgcc feature, test for libgcc or gcc
2009-10-21 16:26:52 -07:00
void CodeAlloc : : flushICache ( void * start , size_t len ) {
2009-10-21 19:58:21 -07:00
__clear_cache ( ( char * ) start , ( char * ) start + len ) ;
2009-07-10 11:21:53 -07:00
}
2009-12-21 12:05:48 -08:00
# endif
2009-07-10 11:21:53 -07:00
# endif // AVMPLUS_MAC && NANOJIT_PPC
void CodeAlloc : : addBlock ( CodeList * & blocks , CodeList * b ) {
b - > next = blocks ;
blocks = b ;
}
CodeList * CodeAlloc : : addMem ( void * mem , size_t bytes ) {
CodeList * b = ( CodeList * ) mem ;
b - > lower = 0 ;
b - > end = ( NIns * ) ( uintptr_t ( mem ) + bytes - sizeofMinBlock ) ;
b - > next = 0 ;
b - > isFree = true ;
// create a tiny terminator block, add to fragmented list, this way
// all other blocks have a valid block at b->higher
CodeList * terminator = b - > higher ;
2010-01-22 12:18:12 -08:00
b - > terminator = terminator ;
2009-07-10 11:21:53 -07:00
terminator - > lower = b ;
terminator - > end = 0 ; // this is how we identify the terminator
terminator - > isFree = false ;
2010-01-22 12:18:12 -08:00
terminator - > isExec = false ;
terminator - > terminator = 0 ;
2009-07-10 11:21:53 -07:00
debug_only ( sanity_check ( ) ; )
2009-07-10 12:58:34 -07:00
2009-07-10 11:21:53 -07:00
// add terminator to heapblocks list so we can track whole blocks
addBlock ( heapblocks , terminator ) ;
return b ;
}
CodeList * CodeAlloc : : getBlock ( NIns * start , NIns * end ) {
CodeList * b = ( CodeList * ) ( uintptr_t ( start ) - offsetof ( CodeList , code ) ) ;
NanoAssert ( b - > end = = end & & b - > next = = 0 ) ; ( void ) end ;
return b ;
}
CodeList * CodeAlloc : : removeBlock ( CodeList * & blocks ) {
CodeList * b = blocks ;
2010-01-22 12:18:12 -08:00
NanoAssert ( b ! = NULL ) ;
2009-07-10 11:21:53 -07:00
blocks = b - > next ;
b - > next = 0 ;
return b ;
}
void CodeAlloc : : add ( CodeList * & blocks , NIns * start , NIns * end ) {
addBlock ( blocks , getBlock ( start , end ) ) ;
}
/**
* split a block by freeing the hole in the middle defined by [ holeStart , holeEnd ) ,
* and adding the used prefix and suffix parts to the blocks CodeList .
*/
void CodeAlloc : : addRemainder ( CodeList * & blocks , NIns * start , NIns * end , NIns * holeStart , NIns * holeEnd ) {
NanoAssert ( start < end & & start < = holeStart & & holeStart < = holeEnd & & holeEnd < = end ) ;
// shrink the hole by aligning holeStart forward and holeEnd backward
holeStart = ( NIns * ) ( ( uintptr_t ( holeStart ) + sizeof ( NIns * ) - 1 ) & ~ ( sizeof ( NIns * ) - 1 ) ) ;
holeEnd = ( NIns * ) ( uintptr_t ( holeEnd ) & ~ ( sizeof ( NIns * ) - 1 ) ) ;
size_t minHole = minAllocSize ;
if ( minHole < 2 * sizeofMinBlock )
minHole = 2 * sizeofMinBlock ;
if ( uintptr_t ( holeEnd ) - uintptr_t ( holeStart ) < minHole ) {
// the hole is too small to make a new free block and a new used block. just keep
// the whole original block and don't free anything.
add ( blocks , start , end ) ;
} else if ( holeStart = = start & & holeEnd = = end ) {
// totally empty block. free whole start-end range
this - > free ( start , end ) ;
} else if ( holeStart = = start ) {
// hole is lower-aligned with start, so just need one new block
// b1 b2
CodeList * b1 = getBlock ( start , end ) ;
CodeList * b2 = ( CodeList * ) ( uintptr_t ( holeEnd ) - offsetof ( CodeList , code ) ) ;
2010-01-22 12:18:12 -08:00
b2 - > terminator = b1 - > terminator ;
2009-07-10 11:21:53 -07:00
b2 - > isFree = false ;
b2 - > next = 0 ;
b2 - > higher = b1 - > higher ;
b2 - > lower = b1 ;
b2 - > higher - > lower = b2 ;
b1 - > higher = b2 ;
debug_only ( sanity_check ( ) ; )
this - > free ( b1 - > start ( ) , b1 - > end ) ;
addBlock ( blocks , b2 ) ;
} else if ( holeEnd = = end ) {
// hole is right-aligned with end, just need one new block
// todo
NanoAssert ( false ) ;
} else {
// there's enough space left to split into three blocks (two new ones)
CodeList * b1 = getBlock ( start , end ) ;
CodeList * b2 = ( CodeList * ) holeStart ;
CodeList * b3 = ( CodeList * ) ( uintptr_t ( holeEnd ) - offsetof ( CodeList , code ) ) ;
b1 - > higher = b2 ;
b2 - > lower = b1 ;
b2 - > higher = b3 ;
b2 - > isFree = false ; // redundant, since we're about to free, but good hygiene
2010-01-22 12:18:12 -08:00
b2 - > terminator = b1 - > terminator ;
2009-07-10 11:21:53 -07:00
b3 - > lower = b2 ;
b3 - > end = end ;
b3 - > isFree = false ;
b3 - > higher - > lower = b3 ;
2010-01-22 12:18:12 -08:00
b3 - > terminator = b1 - > terminator ;
2009-07-10 11:21:53 -07:00
b2 - > next = 0 ;
b3 - > next = 0 ;
debug_only ( sanity_check ( ) ; )
this - > free ( b2 - > start ( ) , b2 - > end ) ;
addBlock ( blocks , b3 ) ;
addBlock ( blocks , b1 ) ;
}
}
2010-05-26 17:29:16 -07:00
# ifdef PERFM
// This method is used only for profiling purposes.
// See CodegenLIR::emitMD() in Tamarin for an example.
size_t CodeAlloc : : size ( const CodeList * blocks ) {
size_t size = 0 ;
for ( const CodeList * b = blocks ; b ! = 0 ; b = b - > next )
size + = int ( ( uintptr_t ) b - > end - ( uintptr_t ) b ) ;
return size ;
}
# endif
2009-07-15 16:50:01 -07:00
size_t CodeAlloc : : size ( ) {
return totalAllocated ;
}
2009-07-10 11:21:53 -07:00
// check that all block neighbors are correct
# ifdef _DEBUG
void CodeAlloc : : sanity_check ( ) {
for ( CodeList * hb = heapblocks ; hb ! = 0 ; hb = hb - > next ) {
NanoAssert ( hb - > higher = = 0 ) ;
for ( CodeList * b = hb - > lower ; b ! = 0 ; b = b - > lower ) {
NanoAssert ( b - > higher - > lower = = b ) ;
}
}
2009-07-15 16:50:01 -07:00
for ( CodeList * avail = this - > availblocks ; avail ; avail = avail - > next ) {
NanoAssert ( avail - > isFree & & avail - > size ( ) > = minAllocSize ) ;
}
# if CROSS_CHECK_FREE_LIST
for ( CodeList * term = heapblocks ; term ; term = term - > next ) {
for ( CodeList * hb = term - > lower ; hb ; hb = hb - > lower ) {
if ( hb - > isFree & & hb - > size ( ) > = minAllocSize ) {
bool found_on_avail = false ;
for ( CodeList * avail = this - > availblocks ; ! found_on_avail & & avail ; avail = avail - > next ) {
found_on_avail = avail = = hb ;
}
NanoAssert ( found_on_avail ) ;
}
}
}
for ( CodeList * avail = this - > availblocks ; avail ; avail = avail - > next ) {
bool found_in_heapblocks = false ;
for ( CodeList * term = heapblocks ; ! found_in_heapblocks & & term ; term = term - > next ) {
for ( CodeList * hb = term - > lower ; ! found_in_heapblocks & & hb ; hb = hb - > lower ) {
found_in_heapblocks = hb = = avail ;
}
}
NanoAssert ( found_in_heapblocks ) ;
}
# endif /* CROSS_CHECK_FREE_LIST */
2009-07-10 11:21:53 -07:00
}
# endif
2010-01-22 12:18:12 -08:00
void CodeAlloc : : markAllExec ( ) {
for ( CodeList * hb = heapblocks ; hb ! = NULL ; hb = hb - > next ) {
if ( ! hb - > isExec ) {
hb - > isExec = true ;
markCodeChunkExec ( firstBlock ( hb ) , bytesPerAlloc ) ;
}
}
}
2009-07-10 11:21:53 -07:00
}
# endif // FEATURE_NANOJIT