Backed out changeset 35777195800a, perf regression.

This commit is contained in:
Graydon Hoare 2009-09-14 21:07:57 -07:00
parent 5d33edd2e9
commit 2521578c2d
10 changed files with 739 additions and 75 deletions

View File

@ -2003,6 +2003,8 @@ CompileRegExpToAST(JSContext* cx, JSTokenStream* ts,
#ifdef JS_TRACER
typedef js::Vector<LIns *, 4, js::ContextAllocPolicy> LInsList;
/* Dummy GC for nanojit placement new. */
static GC gc;
static avmplus::AvmCore s_core = avmplus::AvmCore();
/* Return the cached fragment for the given regexp, or create one. */
@ -3122,19 +3124,19 @@ class RegExpNativeCompiler {
if (alloc.outOfMemory())
goto fail;
/* FIXME Use bug 463260 smart pointer when available. */
lir = lirBufWriter = new (alloc) LirBufWriter(lirbuf);
lir = lirBufWriter = new (&gc) LirBufWriter(lirbuf);
/* FIXME Use bug 463260 smart pointer when available. */
#ifdef NJ_VERBOSE
debug_only_stmt(
if (js_LogController.lcbits & LC_TMRegexp) {
lir = verbose_filter = new (alloc) VerboseWriter(alloc, lir, lirbuf->names,
&js_LogController);
lir = verbose_filter = new (&gc) VerboseWriter(alloc, lir, lirbuf->names,
&js_LogController);
}
)
#endif
#ifdef DEBUG
lir = sanity_filter = new (alloc) SanityFilter(lir);
lir = sanity_filter = new (&gc) SanityFilter(lir);
#endif
/*
@ -3176,14 +3178,28 @@ class RegExpNativeCompiler {
if (assm->error() != nanojit::None)
goto fail;
delete lirBufWriter;
#ifdef DEBUG
delete sanity_filter;
#endif
#ifdef NJ_VERBOSE
debug_only_stmt( if (js_LogController.lcbits & LC_TMRegexp)
delete verbose_filter; )
#endif
return JS_TRUE;
fail:
if (alloc.outOfMemory() || js_OverfullJITCache(tm)) {
delete lirBufWriter;
js_ResetJIT(cx);
} else {
if (!guard) insertGuard(loopLabel, re_chars, re_length);
re->flags |= JSREG_NOCOMPILE;
delete lirBufWriter;
}
#ifdef NJ_VERBOSE
debug_only_stmt( if (js_LogController.lcbits & LC_TMRegexp)
delete lir; )
#endif
return JS_FALSE;
}
};

View File

@ -270,6 +270,7 @@ js_InitJITStatsClass(JSContext *cx, JSObject *glob)
#define INS_NULL() INS_CONSTPTR(NULL)
#define INS_VOID() INS_CONST(JSVAL_TO_SPECIAL(JSVAL_VOID))
static GC gc = GC();
static avmplus::AvmCore s_core = avmplus::AvmCore();
static avmplus::AvmCore* core = &s_core;
@ -499,7 +500,7 @@ struct Tracker::Page*
Tracker::addPage(const void* v) {
jsuword base = getPageBase(v);
struct Tracker::Page* p = (struct Tracker::Page*)
calloc(1, sizeof(*p) - sizeof(p->map) + (NJ_PAGE_SIZE >> 2) * sizeof(LIns*));
GC::Alloc(sizeof(*p) - sizeof(p->map) + (NJ_PAGE_SIZE >> 2) * sizeof(LIns*));
p->base = base;
p->next = pagelist;
pagelist = p;
@ -512,7 +513,7 @@ Tracker::clear()
while (pagelist) {
Page* p = pagelist;
pagelist = pagelist->next;
free(p);
GC::Free(p);
}
}
@ -1702,60 +1703,37 @@ TrashTree(JSContext* cx, Fragment* f);
JS_REQUIRES_STACK
TraceRecorder::TraceRecorder(JSContext* cx, VMSideExit* _anchor, Fragment* _fragment,
TreeInfo* ti, unsigned stackSlots, unsigned ngslots, JSTraceType* typeMap,
VMSideExit* innermostNestedGuard, jsbytecode* outer, uint32 outerArgc)
: cx(cx),
traceMonitor(&JS_TRACE_MONITOR(cx)),
alloc(*JS_TRACE_MONITOR(cx).allocator),
globalObj(JS_GetGlobalForObject(cx, cx->fp->scopeChain)),
lexicalBlock(cx->fp->blockChain),
entryTypeMap(NULL),
callDepth(_anchor ? _anchor->calldepth : 0),
atoms(FrameAtomBase(cx, cx->fp)),
anchor(_anchor),
fragment(_fragment),
treeInfo(ti),
lirbuf(_fragment->lirbuf),
lir(NULL),
lir_buf_writer(NULL),
verbose_filter(NULL),
cse_filter(NULL),
expr_filter(NULL),
func_filter(NULL),
float_filter(NULL),
#ifdef DEBUG
sanity_filter_1(NULL),
sanity_filter_2(NULL),
#endif
cx_ins(NULL),
eos_ins(NULL),
eor_ins(NULL),
rval_ins(NULL),
inner_sp_ins(NULL),
native_rval_ins(NULL),
newobj_ins(NULL),
deepAborted(false),
trashSelf(false),
whichTreesToTrash(&alloc),
cfgMerges(&alloc),
global_dslots(globalObj->dslots),
pendingSpecializedNative(NULL),
pendingUnboxSlot(NULL),
pendingGuardCondition(NULL),
nextRecorderToAbort(NULL),
wasRootFragment(_fragment == _fragment->root),
outer(outer),
outerArgc(outerArgc),
loop(true),
loopLabel(NULL) /* default assumption is we are compiling a loop */
TreeInfo* ti, unsigned stackSlots, unsigned ngslots, JSTraceType* typeMap,
VMSideExit* innermostNestedGuard, jsbytecode* outer, uint32 outerArgc)
: whichTreesToTrash(JS_TRACE_MONITOR(cx).allocator),
cfgMerges(JS_TRACE_MONITOR(cx).allocator)
{
JS_ASSERT(!_fragment->vmprivate && ti && cx->fp->regs->pc == (jsbytecode*)_fragment->ip);
memset(&generatedSpecializedNative, 0, sizeof(generatedSpecializedNative));
/* Reset the fragment state we care about in case we got a recycled fragment. */
_fragment->lastIns = NULL;
this->cx = cx;
this->traceMonitor = &JS_TRACE_MONITOR(cx);
this->globalObj = JS_GetGlobalForObject(cx, cx->fp->scopeChain);
this->lexicalBlock = cx->fp->blockChain;
this->anchor = _anchor;
this->fragment = _fragment;
this->lirbuf = _fragment->lirbuf;
this->treeInfo = ti;
this->callDepth = _anchor ? _anchor->calldepth : 0;
this->atoms = FrameAtomBase(cx, cx->fp);
this->deepAborted = false;
this->trashSelf = false;
this->global_dslots = this->globalObj->dslots;
this->loop = true; /* default assumption is we are compiling a loop */
this->wasRootFragment = _fragment == _fragment->root;
this->outer = outer;
this->outerArgc = outerArgc;
this->pendingSpecializedNative = NULL;
this->newobj_ins = NULL;
this->loopLabel = NULL;
#ifdef JS_JIT_SPEW
debug_only_print0(LC_TMMinimal, "\n");
debug_only_printf(LC_TMMinimal, "Recording starting from %s:%u@%u\n",
@ -1794,24 +1772,26 @@ TraceRecorder::TraceRecorder(JSContext* cx, VMSideExit* _anchor, Fragment* _frag
#endif
lir = lir_buf_writer = new (alloc) LirBufWriter(lirbuf);
lir = lir_buf_writer = new (&gc) LirBufWriter(lirbuf);
#ifdef DEBUG
lir = sanity_filter_1 = new (alloc) SanityFilter(lir);
lir = sanity_filter_1 = new (&gc) SanityFilter(lir);
#endif
debug_only_stmt(
if (js_LogController.lcbits & LC_TMRecorder) {
lir = verbose_filter
= new (alloc) VerboseWriter (alloc, lir, lirbuf->names,
&js_LogController);
= new (&gc) VerboseWriter(*traceMonitor->allocator, lir,
lirbuf->names, &js_LogController);
}
)
if (nanojit::AvmCore::config.soft_float)
lir = float_filter = new (alloc) SoftFloatFilter(lir);
lir = cse_filter = new (alloc) CseFilter(lir, alloc);
lir = expr_filter = new (alloc) ExprFilter(lir);
lir = func_filter = new (alloc) FuncFilter(lir);
lir = float_filter = new (&gc) SoftFloatFilter(lir);
else
float_filter = 0;
lir = cse_filter = new (&gc) CseFilter(lir, *traceMonitor->allocator);
lir = expr_filter = new (&gc) ExprFilter(lir);
lir = func_filter = new (&gc) FuncFilter(lir);
#ifdef DEBUG
lir = sanity_filter_2 = new (alloc) SanityFilter(lir);
lir = sanity_filter_2 = new (&gc) SanityFilter(lir);
#endif
lir->ins0(LIR_start);
@ -1861,8 +1841,7 @@ TraceRecorder::TraceRecorder(JSContext* cx, VMSideExit* _anchor, Fragment* _frag
}
}
void
TraceRecorder::trashTrees()
TraceRecorder::~TraceRecorder()
{
JS_ASSERT(nextRecorderToAbort == NULL);
JS_ASSERT(treeInfo && (fragment || wasDeepAborted()));
@ -1882,6 +1861,16 @@ TraceRecorder::trashTrees()
for (unsigned int i = 0; i < whichTreesToTrash.length(); i++)
TrashTree(cx, whichTreesToTrash[i]);
}
#ifdef DEBUG
debug_only_stmt( delete verbose_filter; )
delete sanity_filter_1;
delete sanity_filter_2;
#endif
delete cse_filter;
delete expr_filter;
delete func_filter;
delete float_filter;
delete lir_buf_writer;
}
void
@ -4575,7 +4564,7 @@ DeleteRecorder(JSContext* cx)
JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
/* Aborting and completing a trace end up here. */
tm->recorder->trashTrees();
delete tm->recorder;
tm->recorder = NULL;
/* If we ran out of memory, flush the code cache. */
@ -4668,9 +4657,9 @@ StartRecorder(JSContext* cx, VMSideExit* anchor, Fragment* f, TreeInfo* ti,
JS_ASSERT(f->root != f || !cx->fp->imacpc);
/* Start recording if no exception during construction. */
tm->recorder = new (*tm->allocator) TraceRecorder(cx, anchor, f, ti,
stackSlots, ngslots, typeMap,
expectedInnerExit, outer, outerArgc);
tm->recorder = new (&gc) TraceRecorder(cx, anchor, f, ti,
stackSlots, ngslots, typeMap,
expectedInnerExit, outer, outerArgc);
if (cx->throwing) {
js_AbortRecording(cx, "setting up recorder failed");

View File

@ -686,10 +686,9 @@ enum TypeConsensus
TypeConsensus_Bad /* Typemaps are not compatible */
};
class TraceRecorder {
class TraceRecorder : public avmplus::GCObject {
JSContext* cx;
JSTraceMonitor* traceMonitor;
nanojit::Allocator& alloc;
JSObject* globalObj;
JSObject* lexicalBlock;
Tracker tracker;
@ -982,7 +981,7 @@ public:
unsigned stackSlots, unsigned ngslots, JSTraceType* typeMap,
VMSideExit* expectedInnerExit, jsbytecode* outerTree,
uint32 outerArgc);
void trashTrees();
~TraceRecorder();
static JS_REQUIRES_STACK JSRecordingStatus monitorRecording(JSContext* cx, TraceRecorder* tr,
JSOp op);

View File

@ -241,7 +241,7 @@ namespace nanojit
Allocator& alloc;
CodeAlloc& _codeAlloc;
Fragment* _thisfrag;
DWB(Fragment*) _thisfrag;
RegAllocMap _branchStateMap;
NInsMap _patches;
LabelStateMap _labels;

View File

@ -42,6 +42,9 @@
namespace nanojit
{
// Temporary tracemonkey hack until namespaces are sorted out.
using namespace MMgc;
/** return true if ptr is in the range [start, end) */
inline bool containsPtr(const NIns* start, const NIns* end, const NIns* ptr) {
return ptr >= start && ptr < end;

View File

@ -51,6 +51,8 @@
*/
namespace nanojit
{
using namespace MMgc;
enum LOpcode
#if defined(_MSC_VER) && _MSC_VER >= 1400
#pragma warning(disable:4480) // nonstandard extension used: specifying underlying type for enum
@ -1010,14 +1012,15 @@ namespace nanojit
return toLInsC()->ci;
}
class LirWriter
// make it a GCObject so we can explicitly delete it early
class LirWriter : public GCObject
{
public:
LirWriter *out;
virtual ~LirWriter() {}
LirWriter(LirWriter* out)
: out(out) {}
virtual ~LirWriter() {}
virtual LInsp ins0(LOpcode v) {
return out->ins0(v);

View File

@ -67,7 +67,11 @@
namespace nanojit
{
#ifdef MMGC_SPARC
const int NJ_LOG2_PAGE_SIZE = 12; // 4K
#else
const int NJ_LOG2_PAGE_SIZE = 13; // 8K
#endif
const int NJ_MAX_REGISTERS = 30; // L0 - L7, I0 - I5, F2 - F14
const int LARGEST_UNDERRUN_PROT = 32; // largest value passed to underrunProtect

View File

@ -48,6 +48,10 @@
using namespace avmplus;
Config AvmCore::config;
static GC _gc;
GC* AvmCore::gc = &_gc;
GCHeap GC::heap;
String* AvmCore::k_str[] = { (String*)"" };
void
avmplus::AvmLog(char const *msg, ...) {

View File

@ -159,6 +159,138 @@ static __inline__ unsigned long long rdtsc(void)
struct JSContext;
namespace MMgc {
class GC;
class GCObject
{
public:
inline void*
operator new(size_t size, GC* gc)
{
return calloc(1, size);
}
static void operator delete (void *gcObject)
{
free(gcObject);
}
};
#define MMGC_SUBCLASS_DECL : public avmplus::GCObject
class GCFinalizedObject : public GCObject
{
public:
static void operator delete (void *gcObject)
{
free(gcObject);
}
};
class GCHeap
{
public:
int32_t kNativePageSize;
GCHeap()
{
#if defined _SC_PAGE_SIZE
kNativePageSize = sysconf(_SC_PAGE_SIZE);
#else
kNativePageSize = 4096; // @todo: what is this?
#endif
}
inline void*
Alloc(uint32_t pages)
{
#ifdef XP_WIN
return VirtualAlloc(NULL,
pages * kNativePageSize,
MEM_COMMIT | MEM_RESERVE,
PAGE_EXECUTE_READWRITE);
#elif defined AVMPLUS_UNIX
/**
* Don't use normal heap with mprotect+PROT_EXEC for executable code.
* SELinux and friends don't allow this.
*/
return mmap(NULL,
pages * kNativePageSize,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_PRIVATE | MAP_ANON,
-1,
0);
#else
return valloc(pages * kNativePageSize);
#endif
}
inline void
Free(void* p, uint32_t pages)
{
#ifdef XP_WIN
VirtualFree(p, 0, MEM_RELEASE);
#elif defined AVMPLUS_UNIX
#if defined SOLARIS
munmap((char*)p, pages * kNativePageSize);
#else
munmap(p, pages * kNativePageSize);
#endif
#else
free(p);
#endif
}
};
class GC
{
static GCHeap heap;
public:
/**
* flags to be passed as second argument to alloc
*/
enum AllocFlags
{
kZero=1,
kContainsPointers=2,
kFinalize=4,
kRCObject=8
};
static inline void*
Alloc(uint32_t bytes, int flags=kZero)
{
if (flags & kZero)
return calloc(1, bytes);
else
return malloc(bytes);
}
static inline void
Free(void* p)
{
free(p);
}
static inline GCHeap*
GetGCHeap()
{
return &heap;
}
};
}
#define DWB(x) x
#define DRCWB(x) x
#define WB(gc, container, addr, value) do { *(addr) = (value); } while(0)
#define WBRC(gc, container, addr, value) do { *(addr) = (value); } while(0)
#define MMGC_MEM_TYPE(x)
#define VMPI_strlen strlen
#define VMPI_strcat strcat
#define VMPI_strcpy strcpy
@ -172,10 +304,42 @@ extern void VMPI_setPageProtection(void *address,
namespace avmplus {
using namespace MMgc;
typedef int FunctionID;
extern void AvmLog(char const *msg, ...);
class String
{
};
typedef class String AvmString;
class StringNullTerminatedUTF8
{
const char* cstr;
public:
StringNullTerminatedUTF8(GC* gc, String* s)
{
cstr = strdup((const char*)s);
}
~StringNullTerminatedUTF8()
{
free((void*)cstr);
}
inline
const char* c_str()
{
return cstr;
}
};
typedef String* Stringp;
class Config
{
public:
@ -286,6 +450,8 @@ namespace avmplus {
AvmConsole console;
static Config config;
static GC* gc;
static String* k_str[];
#ifdef AVMPLUS_IA32
static inline bool
@ -317,6 +483,19 @@ namespace avmplus {
return config.verbose;
}
static inline GC*
GetGC()
{
return gc;
}
static inline String* newString(const char* cstr) {
return (String*)strdup(cstr);
}
static inline void freeString(String* str) {
return free((char*)str);
}
};
class OSDep
@ -328,6 +507,433 @@ namespace avmplus {
}
};
/**
* The List<T> template implements a simple List, which can
* be templated to support different types.
*
* Elements can be added to the end, modified in the middle,
* but no holes are allowed. That is for set(n, v) to work
* size() > n
*
* Note that [] operators are provided and you can violate the
* set properties using these operators, if you want a real
* list dont use the [] operators, if you want a general purpose
* array use the [] operators.
*/
enum ListElementType {
LIST_NonGCObjects = 0,
LIST_GCObjects = 1,
LIST_RCObjects = 2
};
template <typename T, ListElementType kElementType>
class List
{
public:
enum { kInitialCapacity = 128 };
List(GC *_gc, uint32_t _capacity=kInitialCapacity) : data(NULL), len(0), capacity(0)
{
ensureCapacity(_capacity);
}
~List()
{
//clear();
destroy();
// zero out in case we are part of an RCObject
len = 0;
}
inline void destroy()
{
if (data)
free(data);
}
const T *getData() const { return data; }
// 'this' steals the guts of 'that' and 'that' gets reset.
void become(List& that)
{
this->destroy();
this->data = that.data;
this->len = that.len;
this->capacity = that.capacity;
that.data = 0;
that.len = 0;
that.capacity = 0;
}
uint32_t add(T value)
{
if (len >= capacity) {
grow();
}
wb(len++, value);
return len-1;
}
inline bool isEmpty() const
{
return len == 0;
}
inline uint32_t size() const
{
return len;
}
inline T get(uint32_t index) const
{
AvmAssert(index < len);
return *(T*)(data + index);
}
void set(uint32_t index, T value)
{
AvmAssert(index < capacity);
if (index >= len)
{
len = index+1;
}
AvmAssert(len <= capacity);
wb(index, value);
}
void add(const List<T, kElementType>& l)
{
ensureCapacity(len+l.size());
// FIXME: make RCObject version
AvmAssert(kElementType != LIST_RCObjects);
arraycopy(l.getData(), 0, data, len, l.size());
len += l.size();
}
inline void clear()
{
zero_range(0, len);
len = 0;
}
int indexOf(T value) const
{
for(uint32_t i=0; i<len; i++)
if (get(i) == value)
return i;
return -1;
}
int lastIndexOf(T value) const
{
for(int32_t i=len-1; i>=0; i--)
if (get(i) == value)
return i;
return -1;
}
inline T last() const
{
return get(len-1);
}
T removeLast()
{
if(isEmpty())
return undef_list_val();
T t = get(len-1);
set(len-1, undef_list_val());
len--;
return t;
}
inline T operator[](uint32_t index) const
{
AvmAssert(index < capacity);
return get(index);
}
void ensureCapacity(uint32_t cap)
{
if (cap > capacity) {
if (data == NULL) {
data = (T*)calloc(1, factor(cap));
} else {
data = (T*)realloc(data, factor(cap));
zero_range(capacity, cap - capacity);
}
capacity = cap;
}
}
void insert(uint32_t index, T value, uint32_t count = 1)
{
AvmAssert(index <= len);
AvmAssert(count > 0);
ensureCapacity(len+count);
memmove(data + index + count, data + index, factor(len - index));
wbzm(index, index+count, value);
len += count;
}
T removeAt(uint32_t index)
{
T old = get(index);
// dec the refcount on the one we're removing
wb(index, undef_list_val());
memmove(data + index, data + index + 1, factor(len - index - 1));
len--;
return old;
}
private:
void grow()
{
// growth is fast at first, then slows at larger list sizes.
uint32_t newMax = 0;
const uint32_t curMax = capacity;
if (curMax == 0)
newMax = kInitialCapacity;
else if(curMax > 15)
newMax = curMax * 3/2;
else
newMax = curMax * 2;
ensureCapacity(newMax);
}
void arraycopy(const T* src, int srcStart, T* dst, int dstStart, int nbr)
{
// we have 2 cases, either closing a gap or opening it.
if ((src == dst) && (srcStart > dstStart) )
{
for(int i=0; i<nbr; i++)
dst[i+dstStart] = src[i+srcStart];
}
else
{
for(int i=nbr-1; i>=0; i--)
dst[i+dstStart] = src[i+srcStart];
}
}
inline void do_wb_nongc(T* slot, T value)
{
*slot = value;
}
inline void do_wb_gc(GCObject** slot, const GCObject** value)
{
*slot = (GCObject*)*value;
}
void wb(uint32_t index, T value)
{
AvmAssert(index < capacity);
AvmAssert(data != NULL);
T* slot = &data[index];
do_wb_nongc(slot, value);
}
// multiple wb call with the same value, and assumption that existing value is all zero bits,
// like
// for (uint32_t u = index; u < index_end; ++u)
// wb(u, value);
void wbzm(uint32_t index, uint32_t index_end, T value)
{
AvmAssert(index < capacity);
AvmAssert(index_end <= capacity);
AvmAssert(index < index_end);
AvmAssert(data != NULL);
T* slot = data + index;
for ( ; index < index_end; ++index, ++slot)
do_wb_nongc(slot, value);
}
inline uint32_t factor(uint32_t index) const
{
return index * sizeof(T);
}
void zero_range(uint32_t _first, uint32_t _count)
{
memset(data + _first, 0, factor(_count));
}
// stuff that needs specialization based on the type
static inline T undef_list_val();
private:
List(const List& toCopy); // unimplemented
void operator=(const List& that); // unimplemented
// ------------------------ DATA SECTION BEGIN
private:
T* data;
uint32_t len;
uint32_t capacity;
// ------------------------ DATA SECTION END
};
// stuff that needs specialization based on the type
template<typename T, ListElementType kElementType>
/* static */ inline T List<T, kElementType>::undef_list_val() { return T(0); }
/**
* The SortedMap<K,T> template implements an object that
* maps keys to values. The keys are sorted
* from smallest to largest in the map. Time of operations
* is as follows:
* put() is O(1) if the key is higher than any existing
* key; O(logN) if the key already exists,
* and O(N) otherwise.
* get() is an O(logN) binary search.
*
* no duplicates are allowed.
*/
template <class K, class T, ListElementType valType>
class SortedMap : public GCObject
{
public:
enum { kInitialCapacity= 64 };
SortedMap(GC* gc, int _capacity=kInitialCapacity)
: keys(gc, _capacity), values(gc, _capacity)
{
}
bool isEmpty() const
{
return keys.size() == 0;
}
int size() const
{
return keys.size();
}
void clear()
{
keys.clear();
values.clear();
}
void destroy()
{
keys.destroy();
values.destroy();
}
T put(K k, T v)
{
if (keys.size() == 0 || k > keys.last())
{
keys.add(k);
values.add(v);
return (T)v;
}
else
{
int i = find(k);
if (i >= 0)
{
T old = values[i];
keys.set(i, k);
values.set(i, v);
return old;
}
else
{
i = -i - 1; // recover the insertion point
AvmAssert(keys.size() != (uint32_t)i);
keys.insert(i, k);
values.insert(i, v);
return v;
}
}
}
T get(K k) const
{
int i = find(k);
return i >= 0 ? values[i] : 0;
}
bool get(K k, T& v) const
{
int i = find(k);
if (i >= 0)
{
v = values[i];
return true;
}
return false;
}
bool containsKey(K k) const
{
int i = find(k);
return (i >= 0) ? true : false;
}
T remove(K k)
{
int i = find(k);
return removeAt(i);
}
T removeAt(int i)
{
T old = values.removeAt(i);
keys.removeAt(i);
return old;
}
T removeFirst() { return isEmpty() ? (T)0 : removeAt(0); }
T removeLast() { return isEmpty() ? (T)0 : removeAt(keys.size()-1); }
T first() const { return isEmpty() ? (T)0 : values[0]; }
T last() const { return isEmpty() ? (T)0 : values[keys.size()-1]; }
K firstKey() const { return isEmpty() ? 0 : keys[0]; }
K lastKey() const { return isEmpty() ? 0 : keys[keys.size()-1]; }
// iterator
T at(int i) const { return values[i]; }
K keyAt(int i) const { return keys[i]; }
int findNear(K k) const {
int i = find(k);
return i >= 0 ? i : -i-2;
}
protected:
List<K, LIST_NonGCObjects> keys;
List<T, valType> values;
int find(K k) const
{
int lo = 0;
int hi = keys.size()-1;
while (lo <= hi)
{
int i = (lo + hi)/2;
K m = keys[i];
if (k > m)
lo = i + 1;
else if (k < m)
hi = i - 1;
else
return i; // key found
}
return -(lo + 1); // key not found, low is the insertion point
}
};
#define GCSortedMap SortedMap
/**
* Bit vectors are an efficent method of keeping True/False information
* on a set of items or conditions. Class BitSet provides functions

View File

@ -60,6 +60,45 @@
#error "unknown nanojit architecture"
#endif
/*
If we're using MMGC, using operator delete on a GCFinalizedObject is problematic:
in particular, calling it from inside a dtor is risky because the dtor for the sub-object
might already have been called, wrecking its vtable and ending up in the wrong version
of operator delete (the global version rather than the class-specific one). Calling GC::Free
directly is fine (since it ignores the vtable), so we macro-ize to make the distinction.
macro-ization of operator new isn't strictly necessary, but is done to bottleneck both
sides of the new/delete pair to forestall future needs.
*/
#ifdef MMGC_API
// separate overloads because GCObject and GCFinalizedObjects have different dtors
// (GCFinalizedObject's is virtual, GCObject's is not)
inline void mmgc_delete(GCObject* o)
{
GC* g = GC::GetGC(o);
if (g->Collecting())
g->Free(o);
else
delete o;
}
inline void mmgc_delete(GCFinalizedObject* o)
{
GC* g = GC::GetGC(o);
if (g->Collecting())
g->Free(o);
else
delete o;
}
#define NJ_NEW(gc, cls) new (gc) cls
#define NJ_DELETE(obj) do { mmgc_delete(obj); } while (0)
#else
#define NJ_NEW(gc, cls) new (gc) cls
#define NJ_DELETE(obj) do { delete obj; } while (0)
#endif
// Embed no-op macros that let Valgrind work with the JIT.
#ifdef MOZ_VALGRIND
# define JS_VALGRIND
@ -80,6 +119,7 @@ namespace nanojit
class Fragment;
typedef avmplus::AvmCore AvmCore;
typedef avmplus::OSDep OSDep;
typedef avmplus::GCSortedMap<const void*,Fragment*,avmplus::LIST_GCObjects> FragmentMap;
const uint32_t MAXARGS = 8;