mirror of
https://gitlab.winehq.org/wine/wine-gecko.git
synced 2024-09-13 09:24:08 -07:00
Merge.
This commit is contained in:
commit
4d69e4136d
@ -955,6 +955,25 @@ js_Array_dense_setelem_int(JSContext* cx, JSObject* obj, jsint i, int32 j)
|
|||||||
return dense_grow(cx, obj, i, v);
|
return dense_grow(cx, obj, i, v);
|
||||||
}
|
}
|
||||||
JS_DEFINE_CALLINFO_4(extern, BOOL, js_Array_dense_setelem_int, CONTEXT, OBJECT, INT32, INT32, 0, 0)
|
JS_DEFINE_CALLINFO_4(extern, BOOL, js_Array_dense_setelem_int, CONTEXT, OBJECT, INT32, INT32, 0, 0)
|
||||||
|
|
||||||
|
JSBool FASTCALL
|
||||||
|
js_Array_dense_setelem_double(JSContext* cx, JSObject* obj, jsint i, jsdouble d)
|
||||||
|
{
|
||||||
|
JS_ASSERT(OBJ_IS_DENSE_ARRAY(cx, obj));
|
||||||
|
|
||||||
|
jsval v;
|
||||||
|
jsint j;
|
||||||
|
|
||||||
|
if (JS_LIKELY(JSDOUBLE_IS_INT(d, j) && INT_FITS_IN_JSVAL(j))) {
|
||||||
|
v = INT_TO_JSVAL(j);
|
||||||
|
} else {
|
||||||
|
if (!js_NewDoubleInRootedValue(cx, d, &v))
|
||||||
|
return JS_FALSE;
|
||||||
|
}
|
||||||
|
|
||||||
|
return dense_grow(cx, obj, i, v);
|
||||||
|
}
|
||||||
|
JS_DEFINE_CALLINFO_4(extern, BOOL, js_Array_dense_setelem_double, CONTEXT, OBJECT, INT32, DOUBLE, 0, 0)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static JSBool
|
static JSBool
|
||||||
|
@ -453,6 +453,7 @@ JS_DECLARE_CALLINFO(js_NewInstance)
|
|||||||
/* Defined in jsarray.cpp. */
|
/* Defined in jsarray.cpp. */
|
||||||
JS_DECLARE_CALLINFO(js_Array_dense_setelem)
|
JS_DECLARE_CALLINFO(js_Array_dense_setelem)
|
||||||
JS_DECLARE_CALLINFO(js_Array_dense_setelem_int)
|
JS_DECLARE_CALLINFO(js_Array_dense_setelem_int)
|
||||||
|
JS_DECLARE_CALLINFO(js_Array_dense_setelem_double)
|
||||||
JS_DECLARE_CALLINFO(js_NewEmptyArray)
|
JS_DECLARE_CALLINFO(js_NewEmptyArray)
|
||||||
JS_DECLARE_CALLINFO(js_NewUninitializedArray)
|
JS_DECLARE_CALLINFO(js_NewUninitializedArray)
|
||||||
JS_DECLARE_CALLINFO(js_ArrayCompPush)
|
JS_DECLARE_CALLINFO(js_ArrayCompPush)
|
||||||
|
@ -98,8 +98,11 @@ namespace nanojit {
|
|||||||
class Assembler;
|
class Assembler;
|
||||||
class CodeAlloc;
|
class CodeAlloc;
|
||||||
class Fragment;
|
class Fragment;
|
||||||
class Fragmento;
|
|
||||||
class LirBuffer;
|
class LirBuffer;
|
||||||
|
#ifdef DEBUG
|
||||||
|
class LabelMap;
|
||||||
|
#endif
|
||||||
|
extern "C++" { template<typename K, typename V, typename H> class HashMap; }
|
||||||
}
|
}
|
||||||
class TraceRecorder;
|
class TraceRecorder;
|
||||||
class VMAllocator;
|
class VMAllocator;
|
||||||
@ -114,6 +117,12 @@ typedef Queue<uint16> SlotList;
|
|||||||
#define FRAGMENT_TABLE_SIZE 512
|
#define FRAGMENT_TABLE_SIZE 512
|
||||||
struct VMFragment;
|
struct VMFragment;
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
struct REHashKey;
|
||||||
|
struct REHashFn;
|
||||||
|
typedef nanojit::HashMap<REHashKey, nanojit::Fragment*, REHashFn> REHashMap;
|
||||||
|
#endif
|
||||||
|
|
||||||
#define MONITOR_N_GLOBAL_STATES 4
|
#define MONITOR_N_GLOBAL_STATES 4
|
||||||
struct GlobalState {
|
struct GlobalState {
|
||||||
JSObject* globalObj;
|
JSObject* globalObj;
|
||||||
@ -141,10 +150,13 @@ struct JSTraceMonitor {
|
|||||||
JSContext *tracecx;
|
JSContext *tracecx;
|
||||||
|
|
||||||
CLS(nanojit::LirBuffer) lirbuf;
|
CLS(nanojit::LirBuffer) lirbuf;
|
||||||
CLS(nanojit::Fragmento) fragmento;
|
|
||||||
CLS(VMAllocator) allocator; // A chunk allocator for LIR.
|
CLS(VMAllocator) allocator; // A chunk allocator for LIR.
|
||||||
CLS(nanojit::CodeAlloc) codeAlloc; // A general allocator for native code.
|
CLS(nanojit::CodeAlloc) codeAlloc; // A general allocator for native code.
|
||||||
CLS(nanojit::Assembler) assembler;
|
CLS(nanojit::Assembler) assembler;
|
||||||
|
#ifdef DEBUG
|
||||||
|
CLS(nanojit::LabelMap) labels;
|
||||||
|
#endif
|
||||||
|
|
||||||
CLS(TraceRecorder) recorder;
|
CLS(TraceRecorder) recorder;
|
||||||
jsval *reservedDoublePool;
|
jsval *reservedDoublePool;
|
||||||
jsval *reservedDoublePoolPtr;
|
jsval *reservedDoublePoolPtr;
|
||||||
@ -180,7 +192,10 @@ struct JSTraceMonitor {
|
|||||||
CLS(nanojit::CodeAlloc) reCodeAlloc;
|
CLS(nanojit::CodeAlloc) reCodeAlloc;
|
||||||
CLS(nanojit::Assembler) reAssembler;
|
CLS(nanojit::Assembler) reAssembler;
|
||||||
CLS(nanojit::LirBuffer) reLirBuf;
|
CLS(nanojit::LirBuffer) reLirBuf;
|
||||||
CLS(nanojit::Fragmento) reFragmento;
|
CLS(REHashMap) reFragments;
|
||||||
|
#ifdef DEBUG
|
||||||
|
CLS(nanojit::LabelMap) reLabels;
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Keep a list of recorders we need to abort on cache flush. */
|
/* Keep a list of recorders we need to abort on cache flush. */
|
||||||
CLS(TraceRecorder) abortStack;
|
CLS(TraceRecorder) abortStack;
|
||||||
|
@ -2780,7 +2780,7 @@ js_Interpret(JSContext *cx)
|
|||||||
the recorder to be destroyed when we return. */
|
the recorder to be destroyed when we return. */
|
||||||
if (tr) {
|
if (tr) {
|
||||||
if (tr->wasDeepAborted())
|
if (tr->wasDeepAborted())
|
||||||
tr->removeFragmentoReferences();
|
tr->removeFragmentReferences();
|
||||||
else
|
else
|
||||||
tr->pushAbortStack();
|
tr->pushAbortStack();
|
||||||
}
|
}
|
||||||
|
@ -2005,42 +2005,35 @@ typedef JSTempVector<LIns *> LInsList;
|
|||||||
|
|
||||||
/* Dummy GC for nanojit placement new. */
|
/* Dummy GC for nanojit placement new. */
|
||||||
static GC gc;
|
static GC gc;
|
||||||
|
static avmplus::AvmCore s_core = avmplus::AvmCore();
|
||||||
|
static avmplus::AvmCore* core = &s_core;
|
||||||
|
|
||||||
static void *
|
/* Return the cached fragment for the given regexp, or create one. */
|
||||||
HashRegExp(uint16 flags, const jschar *s, size_t n)
|
|
||||||
{
|
|
||||||
uint32 h;
|
|
||||||
|
|
||||||
for (h = 0; n; s++, n--)
|
|
||||||
h = JS_ROTATE_LEFT32(h, 4) ^ *s;
|
|
||||||
return (void *)(h + flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
struct RESideExit : public SideExit {
|
|
||||||
size_t re_length;
|
|
||||||
uint16 re_flags;
|
|
||||||
jschar re_chars[1];
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Return the cached fragment for the given regexp, or NULL. */
|
|
||||||
static Fragment*
|
static Fragment*
|
||||||
LookupNativeRegExp(JSContext* cx, void* hash, uint16 re_flags,
|
LookupNativeRegExp(JSContext* cx, uint16 re_flags,
|
||||||
const jschar* re_chars, size_t re_length)
|
const jschar* re_chars, size_t re_length)
|
||||||
{
|
{
|
||||||
Fragmento* fragmento = JS_TRACE_MONITOR(cx).reFragmento;
|
JSTraceMonitor *tm = &JS_TRACE_MONITOR(cx);
|
||||||
Fragment* fragment = fragmento->getLoop(hash);
|
VMAllocator &alloc = *tm->reAllocator;
|
||||||
while (fragment) {
|
REHashMap &table = *tm->reFragments;
|
||||||
if (fragment->lastIns) {
|
|
||||||
RESideExit *exit = (RESideExit*)fragment->lastIns->record()->exit;
|
REHashKey k(re_length, re_flags, re_chars);
|
||||||
if (exit->re_flags == re_flags &&
|
Fragment *frag = table.get(k);
|
||||||
exit->re_length == re_length &&
|
|
||||||
!memcmp(exit->re_chars, re_chars, re_length * sizeof(jschar))) {
|
if (!frag) {
|
||||||
return fragment;
|
frag = new (alloc) Fragment(0);
|
||||||
}
|
frag->lirbuf = tm->reLirBuf;
|
||||||
}
|
frag->root = frag;
|
||||||
fragment = fragment->peer;
|
/*
|
||||||
|
* Copy the re_chars portion of the hash key into the Allocator, so
|
||||||
|
* its lifecycle is disconnected from the lifecycle of the
|
||||||
|
* underlying regexp.
|
||||||
|
*/
|
||||||
|
k.re_chars = (const jschar*) new (alloc) jschar[re_length];
|
||||||
|
memcpy((void*) k.re_chars, re_chars, re_length * sizeof(jschar));
|
||||||
|
table.put(k, frag);
|
||||||
}
|
}
|
||||||
return NULL;
|
return frag;
|
||||||
}
|
}
|
||||||
|
|
||||||
static JSBool
|
static JSBool
|
||||||
@ -3065,16 +3058,13 @@ class RegExpNativeCompiler {
|
|||||||
GuardRecord* insertGuard(const jschar* re_chars, size_t re_length)
|
GuardRecord* insertGuard(const jschar* re_chars, size_t re_length)
|
||||||
{
|
{
|
||||||
LIns* skip = lirBufWriter->insSkip(sizeof(GuardRecord) +
|
LIns* skip = lirBufWriter->insSkip(sizeof(GuardRecord) +
|
||||||
sizeof(RESideExit) +
|
sizeof(SideExit) +
|
||||||
(re_length-1) * sizeof(jschar));
|
(re_length-1) * sizeof(jschar));
|
||||||
GuardRecord* guard = (GuardRecord *) skip->payload();
|
GuardRecord* guard = (GuardRecord *) skip->payload();
|
||||||
memset(guard, 0, sizeof(*guard));
|
memset(guard, 0, sizeof(*guard));
|
||||||
RESideExit* exit = (RESideExit*)(guard+1);
|
SideExit* exit = (SideExit*)(guard+1);
|
||||||
guard->exit = exit;
|
guard->exit = exit;
|
||||||
guard->exit->target = fragment;
|
guard->exit->target = fragment;
|
||||||
exit->re_flags = re->flags;
|
|
||||||
exit->re_length = re_length;
|
|
||||||
memcpy(exit->re_chars, re_chars, re_length * sizeof(jschar));
|
|
||||||
fragment->lastIns = lir->insGuard(LIR_loop, NULL, skip);
|
fragment->lastIns = lir->insGuard(LIR_loop, NULL, skip);
|
||||||
return guard;
|
return guard;
|
||||||
}
|
}
|
||||||
@ -3092,7 +3082,6 @@ class RegExpNativeCompiler {
|
|||||||
size_t re_length;
|
size_t re_length;
|
||||||
JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
|
JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
|
||||||
Assembler *assm = tm->reAssembler;
|
Assembler *assm = tm->reAssembler;
|
||||||
Fragmento* fragmento = tm->reFragmento;
|
|
||||||
VMAllocator& alloc = *tm->reAllocator;
|
VMAllocator& alloc = *tm->reAllocator;
|
||||||
|
|
||||||
re->source->getCharsAndLength(re_chars, re_length);
|
re->source->getCharsAndLength(re_chars, re_length);
|
||||||
@ -3148,7 +3137,7 @@ class RegExpNativeCompiler {
|
|||||||
|
|
||||||
if (alloc.outOfMemory())
|
if (alloc.outOfMemory())
|
||||||
goto fail;
|
goto fail;
|
||||||
::compile(assm, fragment, alloc verbose_only(, fragmento->labels));
|
::compile(assm, fragment, alloc verbose_only(, tm->reLabels));
|
||||||
if (assm->error() != nanojit::None) {
|
if (assm->error() != nanojit::None) {
|
||||||
oom = assm->error() == nanojit::OutOMem;
|
oom = assm->error() == nanojit::OutOMem;
|
||||||
goto fail;
|
goto fail;
|
||||||
@ -3162,20 +3151,25 @@ class RegExpNativeCompiler {
|
|||||||
return JS_TRUE;
|
return JS_TRUE;
|
||||||
fail:
|
fail:
|
||||||
if (alloc.outOfMemory() || oom ||
|
if (alloc.outOfMemory() || oom ||
|
||||||
js_OverfullFragmento(tm, fragmento)) {
|
js_OverfullJITCache(tm, true)) {
|
||||||
fragmento->clearFrags();
|
delete lirBufWriter;
|
||||||
tm->reCodeAlloc->sweep();
|
tm->reCodeAlloc->sweep();
|
||||||
alloc.reset();
|
alloc.reset();
|
||||||
|
tm->reFragments = new (alloc) REHashMap(alloc);
|
||||||
|
tm->reLirBuf = new (alloc) LirBuffer(alloc);
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
fragmento->labels = new (alloc) LabelMap(alloc, &js_LogController);
|
tm->reLabels = new (alloc) LabelMap(alloc, &js_LogController);
|
||||||
lirbuf->names = new (alloc) LirNameMap(alloc, fragmento->labels);
|
tm->reLirBuf->names = new (alloc) LirNameMap(alloc, tm->reLabels);
|
||||||
|
tm->reAssembler = new (alloc) Assembler(*tm->reCodeAlloc, alloc, core,
|
||||||
|
&js_LogController);
|
||||||
|
#else
|
||||||
|
tm->reAssembler = new (alloc) Assembler(*tm->reCodeAlloc, alloc, core, NULL);
|
||||||
#endif
|
#endif
|
||||||
lirbuf->clear();
|
|
||||||
} else {
|
} else {
|
||||||
if (!guard) insertGuard(re_chars, re_length);
|
if (!guard) insertGuard(re_chars, re_length);
|
||||||
re->flags |= JSREG_NOCOMPILE;
|
re->flags |= JSREG_NOCOMPILE;
|
||||||
|
delete lirBufWriter;
|
||||||
}
|
}
|
||||||
delete lirBufWriter;
|
|
||||||
#ifdef NJ_VERBOSE
|
#ifdef NJ_VERBOSE
|
||||||
debug_only_stmt( if (js_LogController.lcbits & LC_TMRegexp)
|
debug_only_stmt( if (js_LogController.lcbits & LC_TMRegexp)
|
||||||
delete lir; )
|
delete lir; )
|
||||||
@ -3216,19 +3210,11 @@ typedef void *(FASTCALL *NativeRegExp)(REGlobalData*, const jschar *);
|
|||||||
static NativeRegExp
|
static NativeRegExp
|
||||||
GetNativeRegExp(JSContext* cx, JSRegExp* re)
|
GetNativeRegExp(JSContext* cx, JSRegExp* re)
|
||||||
{
|
{
|
||||||
Fragment *fragment;
|
|
||||||
const jschar *re_chars;
|
const jschar *re_chars;
|
||||||
size_t re_length;
|
size_t re_length;
|
||||||
Fragmento* fragmento = JS_TRACE_MONITOR(cx).reFragmento;
|
|
||||||
|
|
||||||
re->source->getCharsAndLength(re_chars, re_length);
|
re->source->getCharsAndLength(re_chars, re_length);
|
||||||
void* hash = HashRegExp(re->flags, re_chars, re_length);
|
Fragment *fragment = LookupNativeRegExp(cx, re->flags, re_chars, re_length);
|
||||||
fragment = LookupNativeRegExp(cx, hash, re->flags, re_chars, re_length);
|
JS_ASSERT(fragment);
|
||||||
if (!fragment) {
|
|
||||||
fragment = fragmento->getAnchor(hash);
|
|
||||||
fragment->lirbuf = JS_TRACE_MONITOR(cx).reLirBuf;
|
|
||||||
fragment->root = fragment;
|
|
||||||
}
|
|
||||||
if (!fragment->code()) {
|
if (!fragment->code()) {
|
||||||
if (!CompileRegExpToNative(cx, re, fragment))
|
if (!CompileRegExpToNative(cx, re, fragment))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -865,7 +865,7 @@ getLoop(JSTraceMonitor* tm, const void *ip, JSObject* globalObj, uint32 globalSh
|
|||||||
static Fragment*
|
static Fragment*
|
||||||
getAnchor(JSTraceMonitor* tm, const void *ip, JSObject* globalObj, uint32 globalShape, uint32 argc)
|
getAnchor(JSTraceMonitor* tm, const void *ip, JSObject* globalObj, uint32 globalShape, uint32 argc)
|
||||||
{
|
{
|
||||||
VMFragment *f = new (&gc) VMFragment(ip, globalObj, globalShape, argc);
|
VMFragment *f = new (*tm->allocator) VMFragment(ip, globalObj, globalShape, argc);
|
||||||
JS_ASSERT(f);
|
JS_ASSERT(f);
|
||||||
|
|
||||||
Fragment *p = getVMFragment(tm, ip, globalObj, globalShape, argc);
|
Fragment *p = getVMFragment(tm, ip, globalObj, globalShape, argc);
|
||||||
@ -1683,6 +1683,8 @@ JS_REQUIRES_STACK
|
|||||||
TraceRecorder::TraceRecorder(JSContext* cx, VMSideExit* _anchor, Fragment* _fragment,
|
TraceRecorder::TraceRecorder(JSContext* cx, VMSideExit* _anchor, Fragment* _fragment,
|
||||||
TreeInfo* ti, unsigned stackSlots, unsigned ngslots, JSTraceType* typeMap,
|
TreeInfo* ti, unsigned stackSlots, unsigned ngslots, JSTraceType* typeMap,
|
||||||
VMSideExit* innermostNestedGuard, jsbytecode* outer, uint32 outerArgc)
|
VMSideExit* innermostNestedGuard, jsbytecode* outer, uint32 outerArgc)
|
||||||
|
: whichTreesToTrash(JS_TRACE_MONITOR(cx).allocator),
|
||||||
|
cfgMerges(JS_TRACE_MONITOR(cx).allocator)
|
||||||
{
|
{
|
||||||
JS_ASSERT(!_fragment->vmprivate && ti && cx->fp->regs->pc == (jsbytecode*)_fragment->ip);
|
JS_ASSERT(!_fragment->vmprivate && ti && cx->fp->regs->pc == (jsbytecode*)_fragment->ip);
|
||||||
|
|
||||||
@ -1803,17 +1805,6 @@ TraceRecorder::TraceRecorder(JSContext* cx, VMSideExit* _anchor, Fragment* _frag
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TreeInfo::~TreeInfo()
|
|
||||||
{
|
|
||||||
UnstableExit* temp;
|
|
||||||
|
|
||||||
while (unstableExits) {
|
|
||||||
temp = unstableExits->next;
|
|
||||||
delete unstableExits;
|
|
||||||
unstableExits = temp;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
TraceRecorder::~TraceRecorder()
|
TraceRecorder::~TraceRecorder()
|
||||||
{
|
{
|
||||||
JS_ASSERT(nextRecorderToAbort == NULL);
|
JS_ASSERT(nextRecorderToAbort == NULL);
|
||||||
@ -1827,18 +1818,12 @@ TraceRecorder::~TraceRecorder()
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
if (fragment) {
|
if (fragment) {
|
||||||
if (wasRootFragment && !fragment->root->code()) {
|
|
||||||
JS_ASSERT(!fragment->root->vmprivate);
|
|
||||||
delete treeInfo;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (trashSelf)
|
if (trashSelf)
|
||||||
TrashTree(cx, fragment->root);
|
TrashTree(cx, fragment->root);
|
||||||
|
|
||||||
for (unsigned int i = 0; i < whichTreesToTrash.length(); i++)
|
for (unsigned int i = 0; i < whichTreesToTrash.length(); i++)
|
||||||
TrashTree(cx, whichTreesToTrash[i]);
|
TrashTree(cx, whichTreesToTrash[i]);
|
||||||
} else if (wasRootFragment) {
|
|
||||||
delete treeInfo;
|
|
||||||
}
|
}
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
debug_only_stmt( delete verbose_filter; )
|
debug_only_stmt( delete verbose_filter; )
|
||||||
@ -1852,7 +1837,7 @@ TraceRecorder::~TraceRecorder()
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
TraceRecorder::removeFragmentoReferences()
|
TraceRecorder::removeFragmentReferences()
|
||||||
{
|
{
|
||||||
fragment = NULL;
|
fragment = NULL;
|
||||||
}
|
}
|
||||||
@ -2112,35 +2097,29 @@ oom:
|
|||||||
void
|
void
|
||||||
JSTraceMonitor::flush()
|
JSTraceMonitor::flush()
|
||||||
{
|
{
|
||||||
if (fragmento) {
|
memset(&vmfragments[0], 0,
|
||||||
fragmento->clearFrags();
|
FRAGMENT_TABLE_SIZE * sizeof(VMFragment*));
|
||||||
for (size_t i = 0; i < FRAGMENT_TABLE_SIZE; ++i) {
|
|
||||||
VMFragment* f = vmfragments[i];
|
|
||||||
while (f) {
|
|
||||||
VMFragment* next = f->next;
|
|
||||||
fragmento->clearFragment(f);
|
|
||||||
f = next;
|
|
||||||
}
|
|
||||||
vmfragments[i] = NULL;
|
|
||||||
}
|
|
||||||
for (size_t i = 0; i < MONITOR_N_GLOBAL_STATES; ++i) {
|
|
||||||
globalStates[i].globalShape = -1;
|
|
||||||
globalStates[i].globalSlots->clear();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
allocator->reset();
|
allocator->reset();
|
||||||
codeAlloc->sweep();
|
codeAlloc->sweep();
|
||||||
|
|
||||||
#ifdef DEBUG
|
|
||||||
JS_ASSERT(fragmento);
|
|
||||||
JS_ASSERT(fragmento->labels);
|
|
||||||
Allocator& alloc = *allocator;
|
Allocator& alloc = *allocator;
|
||||||
fragmento->labels = new (alloc) LabelMap(alloc, &js_LogController);
|
|
||||||
lirbuf->names = new (alloc) LirNameMap(alloc, fragmento->labels);
|
for (size_t i = 0; i < MONITOR_N_GLOBAL_STATES; ++i) {
|
||||||
|
globalStates[i].globalShape = -1;
|
||||||
|
globalStates[i].globalSlots = new (alloc) SlotList(allocator);
|
||||||
|
}
|
||||||
|
|
||||||
|
assembler = new (alloc) Assembler(*codeAlloc, alloc, core,
|
||||||
|
&js_LogController);
|
||||||
|
lirbuf = new (alloc) LirBuffer(alloc);
|
||||||
|
|
||||||
|
#ifdef DEBUG
|
||||||
|
JS_ASSERT(labels);
|
||||||
|
labels = new (alloc) LabelMap(alloc, &js_LogController);
|
||||||
|
lirbuf->names = new (alloc) LirNameMap(alloc, labels);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
lirbuf->clear();
|
|
||||||
needFlush = JS_FALSE;
|
needFlush = JS_FALSE;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3529,12 +3508,12 @@ ResetJIT(JSContext* cx)
|
|||||||
js_AbortRecording(cx, "flush cache");
|
js_AbortRecording(cx, "flush cache");
|
||||||
TraceRecorder* tr;
|
TraceRecorder* tr;
|
||||||
while ((tr = tm->abortStack) != NULL) {
|
while ((tr = tm->abortStack) != NULL) {
|
||||||
tr->removeFragmentoReferences();
|
tr->removeFragmentReferences();
|
||||||
tr->deepAbort();
|
tr->deepAbort();
|
||||||
tr->popAbortStack();
|
tr->popAbortStack();
|
||||||
}
|
}
|
||||||
if (ProhibitFlush(cx)) {
|
if (ProhibitFlush(cx)) {
|
||||||
debug_only_print0(LC_TMTracer, "Deferring fragmento flush due to deep bail.\n");
|
debug_only_print0(LC_TMTracer, "Deferring JIT flush due to deep bail.\n");
|
||||||
tm->needFlush = JS_TRUE;
|
tm->needFlush = JS_TRUE;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -3553,7 +3532,6 @@ TraceRecorder::compile(JSTraceMonitor* tm)
|
|||||||
ResetJIT(cx);
|
ResetJIT(cx);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
verbose_only(Fragmento* fragmento = tm->fragmento;)
|
|
||||||
if (treeInfo->maxNativeStackSlots >= MAX_NATIVE_STACK_SLOTS) {
|
if (treeInfo->maxNativeStackSlots >= MAX_NATIVE_STACK_SLOTS) {
|
||||||
debug_only_print0(LC_TMTracer, "Blacklist: excessive stack use.\n");
|
debug_only_print0(LC_TMTracer, "Blacklist: excessive stack use.\n");
|
||||||
Blacklist((jsbytecode*) fragment->root->ip);
|
Blacklist((jsbytecode*) fragment->root->ip);
|
||||||
@ -3565,7 +3543,7 @@ TraceRecorder::compile(JSTraceMonitor* tm)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
Assembler *assm = tm->assembler;
|
Assembler *assm = tm->assembler;
|
||||||
::compile(assm, fragment, *tm->allocator verbose_only(, fragmento->labels));
|
::compile(assm, fragment, *tm->allocator verbose_only(, tm->labels));
|
||||||
if (assm->error() == nanojit::OutOMem)
|
if (assm->error() == nanojit::OutOMem)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
@ -3595,7 +3573,7 @@ TraceRecorder::compile(JSTraceMonitor* tm)
|
|||||||
char* label = (char*)js_malloc((filename ? strlen(filename) : 7) + 16);
|
char* label = (char*)js_malloc((filename ? strlen(filename) : 7) + 16);
|
||||||
sprintf(label, "%s:%u", filename ? filename : "<stdin>",
|
sprintf(label, "%s:%u", filename ? filename : "<stdin>",
|
||||||
js_FramePCToLineNumber(cx, cx->fp));
|
js_FramePCToLineNumber(cx, cx->fp));
|
||||||
fragmento->labels->add(fragment, sizeof(Fragment), 0, label);
|
tm->labels->add(fragment, sizeof(Fragment), 0, label);
|
||||||
js_free(label);
|
js_free(label);
|
||||||
#endif
|
#endif
|
||||||
AUDIT(traceCompleted);
|
AUDIT(traceCompleted);
|
||||||
@ -3631,17 +3609,22 @@ class SlotMap : public SlotVisitorBase
|
|||||||
public:
|
public:
|
||||||
struct SlotInfo
|
struct SlotInfo
|
||||||
{
|
{
|
||||||
|
SlotInfo()
|
||||||
|
: v(0), promoteInt(false), lastCheck(TypeCheck_Bad)
|
||||||
|
{}
|
||||||
SlotInfo(jsval* v, bool promoteInt)
|
SlotInfo(jsval* v, bool promoteInt)
|
||||||
: v(v), promoteInt(promoteInt), lastCheck(TypeCheck_Bad)
|
: v(v), promoteInt(promoteInt), lastCheck(TypeCheck_Bad)
|
||||||
{
|
{}
|
||||||
}
|
|
||||||
jsval *v;
|
jsval *v;
|
||||||
bool promoteInt;
|
bool promoteInt;
|
||||||
TypeCheckResult lastCheck;
|
TypeCheckResult lastCheck;
|
||||||
};
|
};
|
||||||
|
|
||||||
SlotMap(TraceRecorder& rec, unsigned slotOffset)
|
SlotMap(TraceRecorder& rec, unsigned slotOffset)
|
||||||
: mRecorder(rec), mCx(rec.cx), slotOffset(slotOffset)
|
: mRecorder(rec),
|
||||||
|
mCx(rec.cx),
|
||||||
|
slots(NULL),
|
||||||
|
slotOffset(slotOffset)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3862,8 +3845,6 @@ TraceRecorder::closeLoop(SlotMap& slotMap, VMSideExit* exit, TypeConsensus& cons
|
|||||||
*/
|
*/
|
||||||
JS_ASSERT((*cx->fp->regs->pc == JSOP_LOOP || *cx->fp->regs->pc == JSOP_NOP) && !cx->fp->imacpc);
|
JS_ASSERT((*cx->fp->regs->pc == JSOP_LOOP || *cx->fp->regs->pc == JSOP_NOP) && !cx->fp->imacpc);
|
||||||
|
|
||||||
Fragmento* fragmento = traceMonitor->fragmento;
|
|
||||||
|
|
||||||
if (callDepth != 0) {
|
if (callDepth != 0) {
|
||||||
debug_only_print0(LC_TMTracer,
|
debug_only_print0(LC_TMTracer,
|
||||||
"Blacklisted: stack depth mismatch, possible recursion.\n");
|
"Blacklisted: stack depth mismatch, possible recursion.\n");
|
||||||
@ -3916,7 +3897,7 @@ TraceRecorder::closeLoop(SlotMap& slotMap, VMSideExit* exit, TypeConsensus& cons
|
|||||||
debug_only_print0(LC_TMTracer,
|
debug_only_print0(LC_TMTracer,
|
||||||
"Trace has unstable loop variable with no stable peer, "
|
"Trace has unstable loop variable with no stable peer, "
|
||||||
"compiling anyway.\n");
|
"compiling anyway.\n");
|
||||||
UnstableExit* uexit = new UnstableExit;
|
UnstableExit* uexit = new (*traceMonitor->allocator) UnstableExit;
|
||||||
uexit->fragment = fragment;
|
uexit->fragment = fragment;
|
||||||
uexit->exit = exit;
|
uexit->exit = exit;
|
||||||
uexit->next = treeInfo->unstableExits;
|
uexit->next = treeInfo->unstableExits;
|
||||||
@ -3947,7 +3928,7 @@ TraceRecorder::closeLoop(SlotMap& slotMap, VMSideExit* exit, TypeConsensus& cons
|
|||||||
|
|
||||||
peer = getLoop(traceMonitor, root->ip, root->globalObj, root->globalShape, root->argc);
|
peer = getLoop(traceMonitor, root->ip, root->globalObj, root->globalShape, root->argc);
|
||||||
JS_ASSERT(peer);
|
JS_ASSERT(peer);
|
||||||
joinEdgesToEntry(fragmento, peer);
|
joinEdgesToEntry(peer);
|
||||||
|
|
||||||
debug_only_stmt(DumpPeerStability(traceMonitor, peer->ip, peer->globalObj,
|
debug_only_stmt(DumpPeerStability(traceMonitor, peer->ip, peer->globalObj,
|
||||||
peer->globalShape, peer->argc);)
|
peer->globalShape, peer->argc);)
|
||||||
@ -4026,13 +4007,13 @@ FindUndemotesInTypemaps(JSContext* cx, const TypeMap& typeMap, TreeInfo* treeInf
|
|||||||
}
|
}
|
||||||
|
|
||||||
JS_REQUIRES_STACK void
|
JS_REQUIRES_STACK void
|
||||||
TraceRecorder::joinEdgesToEntry(Fragmento* fragmento, VMFragment* peer_root)
|
TraceRecorder::joinEdgesToEntry(VMFragment* peer_root)
|
||||||
{
|
{
|
||||||
if (fragment->kind != LoopTrace)
|
if (fragment->kind != LoopTrace)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
TypeMap typeMap;
|
TypeMap typeMap(NULL);
|
||||||
Queue<unsigned> undemotes;
|
Queue<unsigned> undemotes(NULL);
|
||||||
|
|
||||||
for (VMFragment* peer = peer_root; peer; peer = (VMFragment*)peer->peer) {
|
for (VMFragment* peer = peer_root; peer; peer = (VMFragment*)peer->peer) {
|
||||||
TreeInfo* ti = peer->getTreeInfo();
|
TreeInfo* ti = peer->getTreeInfo();
|
||||||
@ -4096,11 +4077,11 @@ TraceRecorder::endLoop(VMSideExit* exit)
|
|||||||
debug_only_printf(LC_TMTreeVis, "TREEVIS ENDLOOP EXIT=%p\n", exit);
|
debug_only_printf(LC_TMTreeVis, "TREEVIS ENDLOOP EXIT=%p\n", exit);
|
||||||
|
|
||||||
VMFragment* root = (VMFragment*)fragment->root;
|
VMFragment* root = (VMFragment*)fragment->root;
|
||||||
joinEdgesToEntry(traceMonitor->fragmento, getLoop(traceMonitor,
|
joinEdgesToEntry(getLoop(traceMonitor,
|
||||||
root->ip,
|
root->ip,
|
||||||
root->globalObj,
|
root->globalObj,
|
||||||
root->globalShape,
|
root->globalShape,
|
||||||
root->argc));
|
root->argc));
|
||||||
debug_only_stmt(DumpPeerStability(traceMonitor, root->ip, root->globalObj,
|
debug_only_stmt(DumpPeerStability(traceMonitor, root->ip, root->globalObj,
|
||||||
root->globalShape, root->argc);)
|
root->globalShape, root->argc);)
|
||||||
|
|
||||||
@ -4228,7 +4209,7 @@ TraceRecorder::emitTreeCall(Fragment* inner, VMSideExit* exit)
|
|||||||
* Bug 502604 - It is illegal to extend from the outer typemap without
|
* Bug 502604 - It is illegal to extend from the outer typemap without
|
||||||
* first extending from the inner. Make a new typemap here.
|
* first extending from the inner. Make a new typemap here.
|
||||||
*/
|
*/
|
||||||
TypeMap fullMap;
|
TypeMap fullMap(NULL);
|
||||||
fullMap.add(exit->stackTypeMap(), exit->numStackSlots);
|
fullMap.add(exit->stackTypeMap(), exit->numStackSlots);
|
||||||
BuildGlobalTypeMapFromInnerTree(fullMap, exit);
|
BuildGlobalTypeMapFromInnerTree(fullMap, exit);
|
||||||
import(ti, inner_sp_ins, exit->numStackSlots, fullMap.length() - exit->numStackSlots,
|
import(ti, inner_sp_ins, exit->numStackSlots, fullMap.length() - exit->numStackSlots,
|
||||||
@ -4423,12 +4404,6 @@ nanojit::LirNameMap::formatGuard(LIns *i, char *out)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void
|
|
||||||
nanojit::Fragment::onDestroy()
|
|
||||||
{
|
|
||||||
delete (TreeInfo *)vmprivate;
|
|
||||||
}
|
|
||||||
|
|
||||||
static JS_REQUIRES_STACK bool
|
static JS_REQUIRES_STACK bool
|
||||||
DeleteRecorder(JSContext* cx)
|
DeleteRecorder(JSContext* cx)
|
||||||
{
|
{
|
||||||
@ -4441,7 +4416,7 @@ DeleteRecorder(JSContext* cx)
|
|||||||
/* If we ran out of memory, flush the code cache. */
|
/* If we ran out of memory, flush the code cache. */
|
||||||
Assembler *assm = JS_TRACE_MONITOR(cx).assembler;
|
Assembler *assm = JS_TRACE_MONITOR(cx).assembler;
|
||||||
if (assm->error() == OutOMem ||
|
if (assm->error() == OutOMem ||
|
||||||
js_OverfullFragmento(tm, tm->fragmento)) {
|
js_OverfullJITCache(tm, false)) {
|
||||||
ResetJIT(cx);
|
ResetJIT(cx);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -4556,7 +4531,7 @@ TrashTree(JSContext* cx, Fragment* f)
|
|||||||
debug_only_print0(LC_TMTracer, "Trashing tree info.\n");
|
debug_only_print0(LC_TMTracer, "Trashing tree info.\n");
|
||||||
TreeInfo* ti = (TreeInfo*)f->vmprivate;
|
TreeInfo* ti = (TreeInfo*)f->vmprivate;
|
||||||
f->vmprivate = NULL;
|
f->vmprivate = NULL;
|
||||||
f->releaseCode(JS_TRACE_MONITOR(cx).codeAlloc);
|
f->setCode(NULL);
|
||||||
Fragment** data = ti->dependentTrees.data();
|
Fragment** data = ti->dependentTrees.data();
|
||||||
unsigned length = ti->dependentTrees.length();
|
unsigned length = ti->dependentTrees.length();
|
||||||
for (unsigned n = 0; n < length; ++n)
|
for (unsigned n = 0; n < length; ++n)
|
||||||
@ -4565,8 +4540,6 @@ TrashTree(JSContext* cx, Fragment* f)
|
|||||||
length = ti->linkedTrees.length();
|
length = ti->linkedTrees.length();
|
||||||
for (unsigned n = 0; n < length; ++n)
|
for (unsigned n = 0; n < length; ++n)
|
||||||
TrashTree(cx, data[n]);
|
TrashTree(cx, data[n]);
|
||||||
delete ti;
|
|
||||||
JS_ASSERT(!f->code() && !f->vmprivate);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
@ -4808,7 +4781,7 @@ RecordTree(JSContext* cx, JSTraceMonitor* tm, Fragment* f, jsbytecode* outer,
|
|||||||
f->root = f;
|
f->root = f;
|
||||||
f->lirbuf = tm->lirbuf;
|
f->lirbuf = tm->lirbuf;
|
||||||
|
|
||||||
if (tm->allocator->outOfMemory() || js_OverfullFragmento(tm, tm->fragmento)) {
|
if (tm->allocator->outOfMemory() || js_OverfullJITCache(tm, false)) {
|
||||||
Backoff(cx, (jsbytecode*) f->root->ip);
|
Backoff(cx, (jsbytecode*) f->root->ip);
|
||||||
ResetJIT(cx);
|
ResetJIT(cx);
|
||||||
debug_only_print0(LC_TMTracer,
|
debug_only_print0(LC_TMTracer,
|
||||||
@ -4819,7 +4792,7 @@ RecordTree(JSContext* cx, JSTraceMonitor* tm, Fragment* f, jsbytecode* outer,
|
|||||||
JS_ASSERT(!f->code() && !f->vmprivate);
|
JS_ASSERT(!f->code() && !f->vmprivate);
|
||||||
|
|
||||||
/* Set up the VM-private treeInfo structure for this fragment. */
|
/* Set up the VM-private treeInfo structure for this fragment. */
|
||||||
TreeInfo* ti = new (&gc) TreeInfo(f, globalSlots);
|
TreeInfo* ti = new (*tm->allocator) TreeInfo(tm->allocator, f, globalSlots);
|
||||||
|
|
||||||
/* Capture the coerced type of each active slot in the type map. */
|
/* Capture the coerced type of each active slot in the type map. */
|
||||||
ti->typeMap.captureTypes(cx, globalObj, *globalSlots, 0 /* callDepth */);
|
ti->typeMap.captureTypes(cx, globalObj, *globalSlots, 0 /* callDepth */);
|
||||||
@ -4871,7 +4844,7 @@ FindLoopEdgeTarget(JSContext* cx, VMSideExit* exit, VMFragment** peerp)
|
|||||||
|
|
||||||
JS_ASSERT(from->code());
|
JS_ASSERT(from->code());
|
||||||
|
|
||||||
TypeMap typeMap;
|
TypeMap typeMap(NULL);
|
||||||
FullMapFromExit(typeMap, exit);
|
FullMapFromExit(typeMap, exit);
|
||||||
JS_ASSERT(typeMap.length() - exit->numStackSlots == from_ti->nGlobalTypes());
|
JS_ASSERT(typeMap.length() - exit->numStackSlots == from_ti->nGlobalTypes());
|
||||||
|
|
||||||
@ -4906,7 +4879,6 @@ TreeInfo::removeUnstableExit(VMSideExit* exit)
|
|||||||
for (UnstableExit* uexit = this->unstableExits; uexit != NULL; uexit = uexit->next) {
|
for (UnstableExit* uexit = this->unstableExits; uexit != NULL; uexit = uexit->next) {
|
||||||
if (uexit->exit == exit) {
|
if (uexit->exit == exit) {
|
||||||
*tail = uexit->next;
|
*tail = uexit->next;
|
||||||
delete uexit;
|
|
||||||
return *tail;
|
return *tail;
|
||||||
}
|
}
|
||||||
tail = &uexit->next;
|
tail = &uexit->next;
|
||||||
@ -4990,7 +4962,11 @@ AttemptToExtendTree(JSContext* cx, VMSideExit* anchor, VMSideExit* exitedFrom, j
|
|||||||
|
|
||||||
Fragment* c;
|
Fragment* c;
|
||||||
if (!(c = anchor->target)) {
|
if (!(c = anchor->target)) {
|
||||||
c = JS_TRACE_MONITOR(cx).fragmento->createBranch(anchor, cx->fp->regs->pc);
|
Allocator& alloc = *JS_TRACE_MONITOR(cx).allocator;
|
||||||
|
c = new (alloc) Fragment(cx->fp->regs->pc);
|
||||||
|
c->kind = BranchTrace;
|
||||||
|
c->anchor = anchor->from->anchor;
|
||||||
|
c->root = anchor->from->root;
|
||||||
debug_only_printf(LC_TMTreeVis, "TREEVIS CREATEBRANCH ROOT=%p FRAG=%p PC=%p FILE=\"%s\""
|
debug_only_printf(LC_TMTreeVis, "TREEVIS CREATEBRANCH ROOT=%p FRAG=%p PC=%p FILE=\"%s\""
|
||||||
" LINE=%d ANCHOR=%p OFFS=%d\n",
|
" LINE=%d ANCHOR=%p OFFS=%d\n",
|
||||||
f, c, cx->fp->regs->pc, cx->fp->script->filename,
|
f, c, cx->fp->regs->pc, cx->fp->script->filename,
|
||||||
@ -5019,7 +4995,7 @@ AttemptToExtendTree(JSContext* cx, VMSideExit* anchor, VMSideExit* exitedFrom, j
|
|||||||
unsigned stackSlots;
|
unsigned stackSlots;
|
||||||
unsigned ngslots;
|
unsigned ngslots;
|
||||||
JSTraceType* typeMap;
|
JSTraceType* typeMap;
|
||||||
TypeMap fullMap;
|
TypeMap fullMap(NULL);
|
||||||
if (exitedFrom == NULL) {
|
if (exitedFrom == NULL) {
|
||||||
/*
|
/*
|
||||||
* If we are coming straight from a simple side exit, just use that
|
* If we are coming straight from a simple side exit, just use that
|
||||||
@ -6182,8 +6158,7 @@ TraceRecorder::monitorRecording(JSContext* cx, TraceRecorder* tr, JSOp op)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (tr->traceMonitor->allocator->outOfMemory() ||
|
if (tr->traceMonitor->allocator->outOfMemory() ||
|
||||||
js_OverfullFragmento(&JS_TRACE_MONITOR(cx),
|
js_OverfullJITCache(&JS_TRACE_MONITOR(cx), false)) {
|
||||||
JS_TRACE_MONITOR(cx).fragmento)) {
|
|
||||||
js_AbortRecording(cx, "no more memory");
|
js_AbortRecording(cx, "no more memory");
|
||||||
ResetJIT(cx);
|
ResetJIT(cx);
|
||||||
return JSRS_STOP;
|
return JSRS_STOP;
|
||||||
@ -6484,7 +6459,8 @@ void
|
|||||||
js_SetMaxCodeCacheBytes(JSContext* cx, uint32 bytes)
|
js_SetMaxCodeCacheBytes(JSContext* cx, uint32 bytes)
|
||||||
{
|
{
|
||||||
JSTraceMonitor* tm = &JS_THREAD_DATA(cx)->traceMonitor;
|
JSTraceMonitor* tm = &JS_THREAD_DATA(cx)->traceMonitor;
|
||||||
JS_ASSERT(tm->fragmento && tm->reFragmento);
|
JS_ASSERT(tm->codeAlloc && tm->reCodeAlloc &&
|
||||||
|
tm->allocator && tm->reAllocator);
|
||||||
if (bytes > 1 G)
|
if (bytes > 1 G)
|
||||||
bytes = 1 G;
|
bytes = 1 G;
|
||||||
if (bytes < 128 K)
|
if (bytes < 128 K)
|
||||||
@ -6552,23 +6528,21 @@ js_InitJIT(JSTraceMonitor *tm)
|
|||||||
if (!tm->codeAlloc)
|
if (!tm->codeAlloc)
|
||||||
tm->codeAlloc = new CodeAlloc();
|
tm->codeAlloc = new CodeAlloc();
|
||||||
|
|
||||||
if (!tm->assembler)
|
if (!tm->assembler) {
|
||||||
tm->assembler = new (&gc) Assembler(*tm->codeAlloc, alloc, core,
|
tm->assembler = new (alloc) Assembler(*tm->codeAlloc, alloc, core,
|
||||||
&js_LogController);
|
&js_LogController);
|
||||||
|
|
||||||
|
|
||||||
if (!tm->fragmento) {
|
|
||||||
JS_ASSERT(!tm->reservedDoublePool);
|
JS_ASSERT(!tm->reservedDoublePool);
|
||||||
Fragmento* fragmento = new (&gc) Fragmento(core, &js_LogController, 32, tm->codeAlloc);
|
tm->lirbuf = new (alloc) LirBuffer(alloc);
|
||||||
verbose_only(fragmento->labels = new (alloc) LabelMap(alloc, &js_LogController);)
|
|
||||||
tm->fragmento = fragmento;
|
|
||||||
tm->lirbuf = new LirBuffer(alloc);
|
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
tm->lirbuf->names = new (alloc) LirNameMap(alloc, tm->fragmento->labels);
|
tm->labels = new (alloc) LabelMap(alloc, &js_LogController);
|
||||||
|
tm->lirbuf->names = new (alloc) LirNameMap(alloc, tm->labels);
|
||||||
#endif
|
#endif
|
||||||
for (size_t i = 0; i < MONITOR_N_GLOBAL_STATES; ++i) {
|
for (size_t i = 0; i < MONITOR_N_GLOBAL_STATES; ++i) {
|
||||||
tm->globalStates[i].globalShape = -1;
|
tm->globalStates[i].globalShape = -1;
|
||||||
JS_ASSERT(!tm->globalStates[i].globalSlots);
|
JS_ASSERT(!tm->globalStates[i].globalSlots);
|
||||||
tm->globalStates[i].globalSlots = new (&gc) SlotList();
|
tm->globalStates[i].globalSlots = new (alloc) SlotList(tm->allocator);
|
||||||
}
|
}
|
||||||
tm->reservedDoublePoolPtr = tm->reservedDoublePool = new jsval[MAX_NATIVE_STACK_SLOTS];
|
tm->reservedDoublePoolPtr = tm->reservedDoublePool = new jsval[MAX_NATIVE_STACK_SLOTS];
|
||||||
memset(tm->vmfragments, 0, sizeof(tm->vmfragments));
|
memset(tm->vmfragments, 0, sizeof(tm->vmfragments));
|
||||||
@ -6582,17 +6556,15 @@ js_InitJIT(JSTraceMonitor *tm)
|
|||||||
if (!tm->reCodeAlloc)
|
if (!tm->reCodeAlloc)
|
||||||
tm->reCodeAlloc = new CodeAlloc();
|
tm->reCodeAlloc = new CodeAlloc();
|
||||||
|
|
||||||
if (!tm->reAssembler)
|
if (!tm->reAssembler) {
|
||||||
tm->reAssembler = new (&gc) Assembler(*tm->reCodeAlloc, reAlloc, core,
|
tm->reAssembler = new (reAlloc) Assembler(*tm->reCodeAlloc, reAlloc, core,
|
||||||
&js_LogController);
|
&js_LogController);
|
||||||
|
|
||||||
if (!tm->reFragmento) {
|
tm->reFragments = new (reAlloc) REHashMap(reAlloc);
|
||||||
Fragmento* fragmento = new (&gc) Fragmento(core, &js_LogController, 32, tm->reCodeAlloc);
|
tm->reLirBuf = new (reAlloc) LirBuffer(reAlloc);
|
||||||
verbose_only(fragmento->labels = new (reAlloc) LabelMap(reAlloc, &js_LogController);)
|
|
||||||
tm->reFragmento = fragmento;
|
|
||||||
tm->reLirBuf = new LirBuffer(reAlloc);
|
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
tm->reLirBuf->names = new (reAlloc) LirNameMap(reAlloc, fragmento->labels);
|
tm->reLabels = new (reAlloc) LabelMap(reAlloc, &js_LogController);
|
||||||
|
tm->reLirBuf->names = new (reAlloc) LirNameMap(reAlloc, tm->reLabels);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
#if !defined XP_WIN
|
#if !defined XP_WIN
|
||||||
@ -6619,44 +6591,24 @@ js_FinishJIT(JSTraceMonitor *tm)
|
|||||||
jitstats.typeMapMismatchAtEntry, jitstats.globalShapeMismatchAtEntry);
|
jitstats.typeMapMismatchAtEntry, jitstats.globalShapeMismatchAtEntry);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
if (tm->fragmento != NULL) {
|
if (tm->assembler != NULL) {
|
||||||
JS_ASSERT(tm->reservedDoublePool);
|
JS_ASSERT(tm->reservedDoublePool);
|
||||||
#ifdef DEBUG
|
|
||||||
tm->lirbuf->names = NULL;
|
|
||||||
#endif
|
|
||||||
delete tm->lirbuf;
|
|
||||||
tm->lirbuf = NULL;
|
tm->lirbuf = NULL;
|
||||||
|
|
||||||
if (tm->recordAttempts.ops)
|
if (tm->recordAttempts.ops)
|
||||||
JS_DHashTableFinish(&tm->recordAttempts);
|
JS_DHashTableFinish(&tm->recordAttempts);
|
||||||
|
|
||||||
for (size_t i = 0; i < FRAGMENT_TABLE_SIZE; ++i) {
|
memset(&tm->vmfragments[0], 0,
|
||||||
VMFragment* f = tm->vmfragments[i];
|
FRAGMENT_TABLE_SIZE * sizeof(VMFragment*));
|
||||||
while (f) {
|
|
||||||
VMFragment* next = f->next;
|
|
||||||
tm->fragmento->clearFragment(f);
|
|
||||||
f = next;
|
|
||||||
}
|
|
||||||
tm->vmfragments[i] = NULL;
|
|
||||||
}
|
|
||||||
delete tm->fragmento;
|
|
||||||
tm->fragmento = NULL;
|
|
||||||
for (size_t i = 0; i < MONITOR_N_GLOBAL_STATES; ++i) {
|
|
||||||
JS_ASSERT(tm->globalStates[i].globalSlots);
|
|
||||||
delete tm->globalStates[i].globalSlots;
|
|
||||||
}
|
|
||||||
delete[] tm->reservedDoublePool;
|
delete[] tm->reservedDoublePool;
|
||||||
tm->reservedDoublePool = tm->reservedDoublePoolPtr = NULL;
|
tm->reservedDoublePool = tm->reservedDoublePoolPtr = NULL;
|
||||||
}
|
}
|
||||||
if (tm->reFragmento != NULL) {
|
if (tm->reAssembler != NULL) {
|
||||||
delete tm->reLirBuf;
|
|
||||||
delete tm->reFragmento;
|
|
||||||
delete tm->reAllocator;
|
delete tm->reAllocator;
|
||||||
delete tm->reAssembler;
|
|
||||||
delete tm->reCodeAlloc;
|
delete tm->reCodeAlloc;
|
||||||
}
|
}
|
||||||
if (tm->assembler)
|
|
||||||
delete tm->assembler;
|
|
||||||
if (tm->codeAlloc)
|
if (tm->codeAlloc)
|
||||||
delete tm->codeAlloc;
|
delete tm->codeAlloc;
|
||||||
if (tm->allocator)
|
if (tm->allocator)
|
||||||
@ -6703,50 +6655,6 @@ PurgeScriptRecordingAttempts(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 n
|
|||||||
return JS_DHASH_NEXT;
|
return JS_DHASH_NEXT;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Call 'action' for each root fragment created for 'script'. */
|
|
||||||
template<typename FragmentAction>
|
|
||||||
static void
|
|
||||||
IterateScriptFragments(JSContext* cx, JSScript* script, FragmentAction action)
|
|
||||||
{
|
|
||||||
JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
|
|
||||||
for (size_t i = 0; i < FRAGMENT_TABLE_SIZE; ++i) {
|
|
||||||
for (VMFragment **f = &(tm->vmfragments[i]); *f; ) {
|
|
||||||
VMFragment* frag = *f;
|
|
||||||
if (JS_UPTRDIFF(frag->ip, script->code) < script->length) {
|
|
||||||
/* This fragment is associated with the script. */
|
|
||||||
JS_ASSERT(frag->root == frag);
|
|
||||||
VMFragment* next = frag->next;
|
|
||||||
if (action(cx, tm, frag)) {
|
|
||||||
debug_only_printf(LC_TMTracer,
|
|
||||||
"Disconnecting VMFragment %p "
|
|
||||||
"with ip %p, in range [%p,%p).\n",
|
|
||||||
(void*)frag, frag->ip, script->code,
|
|
||||||
script->code + script->length);
|
|
||||||
*f = next;
|
|
||||||
} else {
|
|
||||||
f = &((*f)->next);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
f = &((*f)->next);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool
|
|
||||||
TrashTreeAction(JSContext* cx, JSTraceMonitor* tm, Fragment* frag)
|
|
||||||
{
|
|
||||||
for (Fragment *p = frag; p; p = p->peer)
|
|
||||||
TrashTree(cx, p);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool
|
|
||||||
ClearFragmentAction(JSContext* cx, JSTraceMonitor* tm, Fragment* frag)
|
|
||||||
{
|
|
||||||
tm->fragmento->clearFragment(frag);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
JS_REQUIRES_STACK void
|
JS_REQUIRES_STACK void
|
||||||
js_PurgeScriptFragments(JSContext* cx, JSScript* script)
|
js_PurgeScriptFragments(JSContext* cx, JSScript* script)
|
||||||
@ -6756,18 +6664,34 @@ js_PurgeScriptFragments(JSContext* cx, JSScript* script)
|
|||||||
debug_only_printf(LC_TMTracer,
|
debug_only_printf(LC_TMTracer,
|
||||||
"Purging fragments for JSScript %p.\n", (void*)script);
|
"Purging fragments for JSScript %p.\n", (void*)script);
|
||||||
|
|
||||||
/*
|
|
||||||
* TrashTree trashes dependent trees recursively, so we must do all the trashing
|
|
||||||
* before clearing in order to avoid calling TrashTree with a deleted fragment.
|
|
||||||
*/
|
|
||||||
IterateScriptFragments(cx, script, TrashTreeAction);
|
|
||||||
IterateScriptFragments(cx, script, ClearFragmentAction);
|
|
||||||
JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
|
JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
|
||||||
|
for (size_t i = 0; i < FRAGMENT_TABLE_SIZE; ++i) {
|
||||||
|
for (VMFragment **f = &(tm->vmfragments[i]); *f; ) {
|
||||||
|
VMFragment* frag = *f;
|
||||||
|
if (JS_UPTRDIFF(frag->ip, script->code) < script->length) {
|
||||||
|
/* This fragment is associated with the script. */
|
||||||
|
debug_only_printf(LC_TMTracer,
|
||||||
|
"Disconnecting VMFragment %p "
|
||||||
|
"with ip %p, in range [%p,%p).\n",
|
||||||
|
(void*)frag, frag->ip, script->code,
|
||||||
|
script->code + script->length);
|
||||||
|
|
||||||
|
JS_ASSERT(frag->root == frag);
|
||||||
|
VMFragment* next = frag->next;
|
||||||
|
for (Fragment *p = frag; p; p = p->peer)
|
||||||
|
TrashTree(cx, p);
|
||||||
|
*f = next;
|
||||||
|
} else {
|
||||||
|
f = &((*f)->next);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
JS_DHashTableEnumerate(&(tm->recordAttempts), PurgeScriptRecordingAttempts, script);
|
JS_DHashTableEnumerate(&(tm->recordAttempts), PurgeScriptRecordingAttempts, script);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
js_OverfullFragmento(JSTraceMonitor* tm, Fragmento *fragmento)
|
js_OverfullJITCache(JSTraceMonitor* tm, bool reCache)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* You might imagine the outOfMemory flag on the allocator is sufficient
|
* You might imagine the outOfMemory flag on the allocator is sufficient
|
||||||
@ -6797,7 +6721,7 @@ js_OverfullFragmento(JSTraceMonitor* tm, Fragmento *fragmento)
|
|||||||
* modeling the two forms of memory exhaustion *separately* for the
|
* modeling the two forms of memory exhaustion *separately* for the
|
||||||
* time being: condition 1 is handled by the outOfMemory flag inside
|
* time being: condition 1 is handled by the outOfMemory flag inside
|
||||||
* nanojit, and condition 2 is being handled independently *here*. So
|
* nanojit, and condition 2 is being handled independently *here*. So
|
||||||
* we construct our fragmentos to use all available memory they like,
|
* we construct our allocators to use all available memory they like,
|
||||||
* and only report outOfMemory to us when there is literally no OS memory
|
* and only report outOfMemory to us when there is literally no OS memory
|
||||||
* left. Merely purging our cache when we hit our highwater mark is
|
* left. Merely purging our cache when we hit our highwater mark is
|
||||||
* handled by the (few) callers of this function.
|
* handled by the (few) callers of this function.
|
||||||
@ -6806,7 +6730,7 @@ js_OverfullFragmento(JSTraceMonitor* tm, Fragmento *fragmento)
|
|||||||
jsuint maxsz = tm->maxCodeCacheBytes;
|
jsuint maxsz = tm->maxCodeCacheBytes;
|
||||||
VMAllocator *allocator = tm->allocator;
|
VMAllocator *allocator = tm->allocator;
|
||||||
CodeAlloc *codeAlloc = tm->codeAlloc;
|
CodeAlloc *codeAlloc = tm->codeAlloc;
|
||||||
if (fragmento == tm->reFragmento) {
|
if (reCache) {
|
||||||
/*
|
/*
|
||||||
* At the time of making the code cache size configurable, we were using
|
* At the time of making the code cache size configurable, we were using
|
||||||
* 16 MB for the main code cache and 1 MB for the regular expression code
|
* 16 MB for the main code cache and 1 MB for the regular expression code
|
||||||
@ -10422,9 +10346,15 @@ TraceRecorder::record_JSOP_SETELEM()
|
|||||||
// builtin for every storage type. Special case for integers though,
|
// builtin for every storage type. Special case for integers though,
|
||||||
// since they are so common.
|
// since they are so common.
|
||||||
LIns* res_ins;
|
LIns* res_ins;
|
||||||
if (isNumber(v) && isPromoteInt(v_ins)) {
|
LIns* args[] = { NULL, idx_ins, obj_ins, cx_ins };
|
||||||
LIns* args[] = { ::demote(lir, v_ins), idx_ins, obj_ins, cx_ins };
|
if (isNumber(v)) {
|
||||||
res_ins = lir->insCall(&js_Array_dense_setelem_int_ci, args);
|
if (isPromoteInt(v_ins)) {
|
||||||
|
args[0] = ::demote(lir, v_ins);
|
||||||
|
res_ins = lir->insCall(&js_Array_dense_setelem_int_ci, args);
|
||||||
|
} else {
|
||||||
|
args[0] = v_ins;
|
||||||
|
res_ins = lir->insCall(&js_Array_dense_setelem_double_ci, args);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
LIns* args[] = { box_jsval(v, v_ins), idx_ins, obj_ins, cx_ins };
|
LIns* args[] = { box_jsval(v, v_ins), idx_ins, obj_ins, cx_ins };
|
||||||
res_ins = lir->insCall(&js_Array_dense_setelem_ci, args);
|
res_ins = lir->insCall(&js_Array_dense_setelem_ci, args);
|
||||||
|
@ -56,10 +56,11 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
class Queue : public avmplus::GCObject {
|
class Queue {
|
||||||
T* _data;
|
T* _data;
|
||||||
unsigned _len;
|
unsigned _len;
|
||||||
unsigned _max;
|
unsigned _max;
|
||||||
|
nanojit::Allocator* alloc;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
void ensure(unsigned size) {
|
void ensure(unsigned size) {
|
||||||
@ -67,23 +68,34 @@ public:
|
|||||||
_max = 16;
|
_max = 16;
|
||||||
while (_max < size)
|
while (_max < size)
|
||||||
_max <<= 1;
|
_max <<= 1;
|
||||||
_data = (T*)realloc(_data, _max * sizeof(T));
|
if (alloc) {
|
||||||
|
T* tmp = new (*alloc) T[_max];
|
||||||
|
memcpy(tmp, _data, _len * sizeof(T));
|
||||||
|
_data = tmp;
|
||||||
|
} else {
|
||||||
|
_data = (T*)realloc(_data, _max * sizeof(T));
|
||||||
|
}
|
||||||
#if defined(DEBUG)
|
#if defined(DEBUG)
|
||||||
memset(&_data[_len], 0xcd, _max - _len);
|
memset(&_data[_len], 0xcd, _max - _len);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
Queue(unsigned max = 16) {
|
Queue(nanojit::Allocator* alloc, unsigned max = 16)
|
||||||
|
: alloc(alloc)
|
||||||
|
{
|
||||||
this->_max = max;
|
this->_max = max;
|
||||||
this->_len = 0;
|
this->_len = 0;
|
||||||
if (max)
|
if (max)
|
||||||
this->_data = (T*)malloc(max * sizeof(T));
|
this->_data = (alloc ?
|
||||||
|
new (*alloc) T[max] :
|
||||||
|
(T*)malloc(max * sizeof(T)));
|
||||||
else
|
else
|
||||||
this->_data = NULL;
|
this->_data = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
~Queue() {
|
~Queue() {
|
||||||
free(_data);
|
if (!alloc)
|
||||||
|
free(_data);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool contains(T a) {
|
bool contains(T a) {
|
||||||
@ -311,6 +323,7 @@ typedef Queue<uint16> SlotList;
|
|||||||
|
|
||||||
class TypeMap : public Queue<JSTraceType> {
|
class TypeMap : public Queue<JSTraceType> {
|
||||||
public:
|
public:
|
||||||
|
TypeMap(nanojit::Allocator* alloc) : Queue<JSTraceType>(alloc) {}
|
||||||
JS_REQUIRES_STACK void captureTypes(JSContext* cx, JSObject* globalObj, SlotList& slots, unsigned callDepth);
|
JS_REQUIRES_STACK void captureTypes(JSContext* cx, JSObject* globalObj, SlotList& slots, unsigned callDepth);
|
||||||
JS_REQUIRES_STACK void captureMissingGlobalTypes(JSContext* cx, JSObject* globalObj, SlotList& slots,
|
JS_REQUIRES_STACK void captureMissingGlobalTypes(JSContext* cx, JSObject* globalObj, SlotList& slots,
|
||||||
unsigned stackSlots);
|
unsigned stackSlots);
|
||||||
@ -411,7 +424,7 @@ struct VMSideExit : public nanojit::SideExit
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct VMAllocator : public nanojit::Allocator
|
class VMAllocator : public nanojit::Allocator
|
||||||
{
|
{
|
||||||
|
|
||||||
public:
|
public:
|
||||||
@ -428,6 +441,7 @@ public:
|
|||||||
|
|
||||||
bool mOutOfMemory;
|
bool mOutOfMemory;
|
||||||
size_t mSize;
|
size_t mSize;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FIXME: Area the LIR spills into if we encounter an OOM mid-way
|
* FIXME: Area the LIR spills into if we encounter an OOM mid-way
|
||||||
* through compilation; we must check mOutOfMemory before we run out
|
* through compilation; we must check mOutOfMemory before we run out
|
||||||
@ -438,6 +452,36 @@ public:
|
|||||||
uintptr_t mReserve[0x10000];
|
uintptr_t mReserve[0x10000];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
struct REHashKey {
|
||||||
|
size_t re_length;
|
||||||
|
uint16 re_flags;
|
||||||
|
const jschar* re_chars;
|
||||||
|
|
||||||
|
REHashKey(size_t re_length, uint16 re_flags, const jschar *re_chars)
|
||||||
|
: re_length(re_length)
|
||||||
|
, re_flags(re_flags)
|
||||||
|
, re_chars(re_chars)
|
||||||
|
{}
|
||||||
|
|
||||||
|
bool operator==(const REHashKey& other) const
|
||||||
|
{
|
||||||
|
return ((this->re_length == other.re_length) &&
|
||||||
|
(this->re_flags == other.re_flags) &&
|
||||||
|
!memcmp(this->re_chars, other.re_chars,
|
||||||
|
this->re_length * sizeof(jschar)));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
struct REHashFn {
|
||||||
|
static size_t hash(const REHashKey& k) {
|
||||||
|
return
|
||||||
|
k.re_length +
|
||||||
|
k.re_flags +
|
||||||
|
nanojit::murmurhash(k.re_chars, k.re_length * sizeof(jschar));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
struct FrameInfo {
|
struct FrameInfo {
|
||||||
JSObject* callee; // callee function object
|
JSObject* callee; // callee function object
|
||||||
JSObject* block; // caller block chain head
|
JSObject* block; // caller block chain head
|
||||||
@ -481,7 +525,7 @@ struct UnstableExit
|
|||||||
UnstableExit* next;
|
UnstableExit* next;
|
||||||
};
|
};
|
||||||
|
|
||||||
class TreeInfo MMGC_SUBCLASS_DECL {
|
class TreeInfo {
|
||||||
public:
|
public:
|
||||||
nanojit::Fragment* const fragment;
|
nanojit::Fragment* const fragment;
|
||||||
JSScript* script;
|
JSScript* script;
|
||||||
@ -507,19 +551,25 @@ public:
|
|||||||
uintN treePCOffset;
|
uintN treePCOffset;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
TreeInfo(nanojit::Fragment* _fragment,
|
TreeInfo(nanojit::Allocator* alloc,
|
||||||
|
nanojit::Fragment* _fragment,
|
||||||
SlotList* _globalSlots)
|
SlotList* _globalSlots)
|
||||||
: fragment(_fragment),
|
: fragment(_fragment),
|
||||||
script(NULL),
|
script(NULL),
|
||||||
maxNativeStackSlots(0),
|
maxNativeStackSlots(0),
|
||||||
nativeStackBase(0),
|
nativeStackBase(0),
|
||||||
maxCallDepth(0),
|
maxCallDepth(0),
|
||||||
nStackTypes(0),
|
typeMap(alloc),
|
||||||
globalSlots(_globalSlots),
|
nStackTypes(0),
|
||||||
branchCount(0),
|
globalSlots(_globalSlots),
|
||||||
unstableExits(NULL)
|
dependentTrees(alloc),
|
||||||
{}
|
linkedTrees(alloc),
|
||||||
~TreeInfo();
|
branchCount(0),
|
||||||
|
sideExits(alloc),
|
||||||
|
unstableExits(NULL),
|
||||||
|
gcthings(alloc),
|
||||||
|
sprops(alloc)
|
||||||
|
{}
|
||||||
|
|
||||||
inline unsigned nGlobalTypes() {
|
inline unsigned nGlobalTypes() {
|
||||||
return typeMap.length() - nStackTypes;
|
return typeMap.length() - nStackTypes;
|
||||||
@ -935,9 +985,7 @@ public:
|
|||||||
JS_REQUIRES_STACK bool closeLoop(SlotMap& slotMap, VMSideExit* exit, TypeConsensus &consensus);
|
JS_REQUIRES_STACK bool closeLoop(SlotMap& slotMap, VMSideExit* exit, TypeConsensus &consensus);
|
||||||
JS_REQUIRES_STACK void endLoop();
|
JS_REQUIRES_STACK void endLoop();
|
||||||
JS_REQUIRES_STACK void endLoop(VMSideExit* exit);
|
JS_REQUIRES_STACK void endLoop(VMSideExit* exit);
|
||||||
JS_REQUIRES_STACK void joinEdgesToEntry(nanojit::Fragmento* fragmento,
|
JS_REQUIRES_STACK void joinEdgesToEntry(VMFragment* peer_root);
|
||||||
VMFragment* peer_root);
|
|
||||||
void blacklist() { fragment->blacklist(); }
|
|
||||||
JS_REQUIRES_STACK void adjustCallerTypes(nanojit::Fragment* f);
|
JS_REQUIRES_STACK void adjustCallerTypes(nanojit::Fragment* f);
|
||||||
JS_REQUIRES_STACK nanojit::Fragment* findNestedCompatiblePeer(nanojit::Fragment* f);
|
JS_REQUIRES_STACK nanojit::Fragment* findNestedCompatiblePeer(nanojit::Fragment* f);
|
||||||
JS_REQUIRES_STACK void prepareTreeCall(nanojit::Fragment* inner);
|
JS_REQUIRES_STACK void prepareTreeCall(nanojit::Fragment* inner);
|
||||||
@ -945,7 +993,7 @@ public:
|
|||||||
unsigned getCallDepth() const;
|
unsigned getCallDepth() const;
|
||||||
void pushAbortStack();
|
void pushAbortStack();
|
||||||
void popAbortStack();
|
void popAbortStack();
|
||||||
void removeFragmentoReferences();
|
void removeFragmentReferences();
|
||||||
void deepAbort();
|
void deepAbort();
|
||||||
|
|
||||||
JS_REQUIRES_STACK JSRecordingStatus record_EnterFrame();
|
JS_REQUIRES_STACK JSRecordingStatus record_EnterFrame();
|
||||||
@ -1022,7 +1070,7 @@ extern void
|
|||||||
js_PurgeScriptFragments(JSContext* cx, JSScript* script);
|
js_PurgeScriptFragments(JSContext* cx, JSScript* script);
|
||||||
|
|
||||||
extern bool
|
extern bool
|
||||||
js_OverfullFragmento(JSTraceMonitor* tm, nanojit::Fragmento *frago);
|
js_OverfullJITCache(JSTraceMonitor* tm, bool reCache);
|
||||||
|
|
||||||
extern void
|
extern void
|
||||||
js_PurgeJITOracle();
|
js_PurgeJITOracle();
|
||||||
|
@ -149,7 +149,7 @@ namespace nanojit
|
|||||||
* value. Temporary values can be placed into the AR as method calls
|
* value. Temporary values can be placed into the AR as method calls
|
||||||
* are issued. Also LIR_alloc instructions will consume space.
|
* are issued. Also LIR_alloc instructions will consume space.
|
||||||
*/
|
*/
|
||||||
class Assembler MMGC_SUBCLASS_DECL
|
class Assembler
|
||||||
{
|
{
|
||||||
friend class VerboseBlockReader;
|
friend class VerboseBlockReader;
|
||||||
public:
|
public:
|
||||||
|
@ -48,150 +48,6 @@ namespace nanojit
|
|||||||
|
|
||||||
using namespace avmplus;
|
using namespace avmplus;
|
||||||
|
|
||||||
static uint32_t calcSaneCacheSize(uint32_t in)
|
|
||||||
{
|
|
||||||
if (in < uint32_t(NJ_LOG2_PAGE_SIZE)) return NJ_LOG2_PAGE_SIZE; // at least 1 page
|
|
||||||
if (in > 32) return 32; // 4GB should be enough for anyone
|
|
||||||
return in;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This is the main control center for creating and managing fragments.
|
|
||||||
*/
|
|
||||||
Fragmento::Fragmento(AvmCore* core, LogControl* logc, uint32_t cacheSizeLog2, CodeAlloc* codeAlloc)
|
|
||||||
:
|
|
||||||
#ifdef NJ_VERBOSE
|
|
||||||
enterCounts(NULL),
|
|
||||||
mergeCounts(NULL),
|
|
||||||
labels(NULL),
|
|
||||||
#endif
|
|
||||||
_core(core),
|
|
||||||
_codeAlloc(codeAlloc),
|
|
||||||
_frags(core->GetGC()),
|
|
||||||
_max_pages(1 << (calcSaneCacheSize(cacheSizeLog2) - NJ_LOG2_PAGE_SIZE)),
|
|
||||||
_pagesGrowth(1)
|
|
||||||
{
|
|
||||||
#ifdef _DEBUG
|
|
||||||
{
|
|
||||||
// XXX These belong somewhere else, but I can't find the
|
|
||||||
// right location right now.
|
|
||||||
NanoStaticAssert((LIR_lt ^ 3) == LIR_ge);
|
|
||||||
NanoStaticAssert((LIR_le ^ 3) == LIR_gt);
|
|
||||||
NanoStaticAssert((LIR_ult ^ 3) == LIR_uge);
|
|
||||||
NanoStaticAssert((LIR_ule ^ 3) == LIR_ugt);
|
|
||||||
NanoStaticAssert((LIR_flt ^ 3) == LIR_fge);
|
|
||||||
NanoStaticAssert((LIR_fle ^ 3) == LIR_fgt);
|
|
||||||
|
|
||||||
/* Opcodes must be strictly increasing without holes. */
|
|
||||||
uint32_t count = 0;
|
|
||||||
#define OPDEF(op, number, operands, repkind) \
|
|
||||||
NanoAssertMsg(LIR_##op == count++, "misnumbered opcode");
|
|
||||||
#define OPDEF64(op, number, operands, repkind) \
|
|
||||||
OPDEF(op, number, operands, repkind)
|
|
||||||
#include "LIRopcode.tbl"
|
|
||||||
#undef OPDEF
|
|
||||||
#undef OPDEF64
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef MEMORY_INFO
|
|
||||||
_allocList.set_meminfo_name("Fragmento._allocList");
|
|
||||||
#endif
|
|
||||||
NanoAssert(_max_pages > _pagesGrowth); // shrink growth if needed
|
|
||||||
verbose_only( enterCounts = NJ_NEW(core->gc, BlockHist)(core->gc); )
|
|
||||||
verbose_only( mergeCounts = NJ_NEW(core->gc, BlockHist)(core->gc); )
|
|
||||||
|
|
||||||
memset(&_stats, 0, sizeof(_stats));
|
|
||||||
}
|
|
||||||
|
|
||||||
Fragmento::~Fragmento()
|
|
||||||
{
|
|
||||||
clearFrags();
|
|
||||||
#if defined(NJ_VERBOSE)
|
|
||||||
NJ_DELETE(enterCounts);
|
|
||||||
NJ_DELETE(mergeCounts);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
// Clear the fragment. This *does not* remove the fragment from the
|
|
||||||
// map--the caller must take care of this.
|
|
||||||
void Fragmento::clearFragment(Fragment* f)
|
|
||||||
{
|
|
||||||
Fragment *peer = f->peer;
|
|
||||||
while (peer) {
|
|
||||||
Fragment *next = peer->peer;
|
|
||||||
peer->releaseTreeMem(_codeAlloc);
|
|
||||||
NJ_DELETE(peer);
|
|
||||||
peer = next;
|
|
||||||
}
|
|
||||||
f->releaseTreeMem(_codeAlloc);
|
|
||||||
NJ_DELETE(f);
|
|
||||||
}
|
|
||||||
|
|
||||||
void Fragmento::clearFrags()
|
|
||||||
{
|
|
||||||
while (!_frags.isEmpty()) {
|
|
||||||
clearFragment(_frags.removeLast());
|
|
||||||
}
|
|
||||||
|
|
||||||
verbose_only( enterCounts->clear();)
|
|
||||||
verbose_only( mergeCounts->clear();)
|
|
||||||
verbose_only( _stats.flushes++ );
|
|
||||||
verbose_only( _stats.compiles = 0 );
|
|
||||||
//nj_dprintf("Fragmento.clearFrags %d free pages of %d\n", _stats.freePages, _stats.pages);
|
|
||||||
}
|
|
||||||
|
|
||||||
AvmCore* Fragmento::core()
|
|
||||||
{
|
|
||||||
return _core;
|
|
||||||
}
|
|
||||||
|
|
||||||
Fragment* Fragmento::getAnchor(const void* ip)
|
|
||||||
{
|
|
||||||
Fragment *f = newFrag(ip);
|
|
||||||
Fragment *p = _frags.get(ip);
|
|
||||||
if (p) {
|
|
||||||
f->first = p;
|
|
||||||
/* append at the end of the peer list */
|
|
||||||
Fragment* next;
|
|
||||||
while ((next = p->peer) != NULL)
|
|
||||||
p = next;
|
|
||||||
p->peer = f;
|
|
||||||
} else {
|
|
||||||
f->first = f;
|
|
||||||
_frags.put(ip, f); /* this is the first fragment */
|
|
||||||
}
|
|
||||||
f->anchor = f;
|
|
||||||
f->root = f;
|
|
||||||
f->kind = LoopTrace;
|
|
||||||
verbose_only( addLabel(f, "T", _frags.size()); )
|
|
||||||
return f;
|
|
||||||
}
|
|
||||||
|
|
||||||
Fragment* Fragmento::getLoop(const void* ip)
|
|
||||||
{
|
|
||||||
return _frags.get(ip);
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef NJ_VERBOSE
|
|
||||||
void Fragmento::addLabel(Fragment *f, const char *prefix, int id)
|
|
||||||
{
|
|
||||||
char fragname[20];
|
|
||||||
sprintf(fragname,"%s%d", prefix, id);
|
|
||||||
labels->add(f, sizeof(Fragment), 0, fragname);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
Fragment *Fragmento::createBranch(SideExit* exit, const void* ip)
|
|
||||||
{
|
|
||||||
Fragment *f = newBranch(exit->from, ip);
|
|
||||||
f->kind = BranchTrace;
|
|
||||||
f->treeBranches = f->root->treeBranches;
|
|
||||||
f->root->treeBranches = f;
|
|
||||||
return f;
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// Fragment
|
// Fragment
|
||||||
//
|
//
|
||||||
@ -226,82 +82,13 @@ namespace nanojit
|
|||||||
guardCount(0),
|
guardCount(0),
|
||||||
xjumpCount(0),
|
xjumpCount(0),
|
||||||
recordAttempts(0),
|
recordAttempts(0),
|
||||||
blacklistLevel(0),
|
|
||||||
fragEntry(NULL),
|
fragEntry(NULL),
|
||||||
loopEntry(NULL),
|
loopEntry(NULL),
|
||||||
vmprivate(NULL),
|
vmprivate(NULL),
|
||||||
codeList(0),
|
|
||||||
_code(NULL),
|
_code(NULL),
|
||||||
_hits(0)
|
_hits(0)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
Fragment::~Fragment()
|
|
||||||
{
|
|
||||||
onDestroy();
|
|
||||||
}
|
|
||||||
|
|
||||||
void Fragment::blacklist()
|
|
||||||
{
|
|
||||||
blacklistLevel++;
|
|
||||||
_hits = -(1<<blacklistLevel);
|
|
||||||
}
|
|
||||||
|
|
||||||
Fragment *Fragmento::newFrag(const void* ip)
|
|
||||||
{
|
|
||||||
GC *gc = _core->gc;
|
|
||||||
Fragment *f = NJ_NEW(gc, Fragment)(ip);
|
|
||||||
f->blacklistLevel = 5;
|
|
||||||
return f;
|
|
||||||
}
|
|
||||||
|
|
||||||
Fragment *Fragmento::newBranch(Fragment *from, const void* ip)
|
|
||||||
{
|
|
||||||
Fragment *f = newFrag(ip);
|
|
||||||
f->anchor = from->anchor;
|
|
||||||
f->root = from->root;
|
|
||||||
f->xjumpCount = from->xjumpCount;
|
|
||||||
/*// prepend
|
|
||||||
f->nextbranch = from->branches;
|
|
||||||
from->branches = f;*/
|
|
||||||
// append
|
|
||||||
if (!from->branches) {
|
|
||||||
from->branches = f;
|
|
||||||
} else {
|
|
||||||
Fragment *p = from->branches;
|
|
||||||
while (p->nextbranch != 0)
|
|
||||||
p = p->nextbranch;
|
|
||||||
p->nextbranch = f;
|
|
||||||
}
|
|
||||||
return f;
|
|
||||||
}
|
|
||||||
|
|
||||||
void Fragment::releaseLirBuffer()
|
|
||||||
{
|
|
||||||
lastIns = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void Fragment::releaseCode(CodeAlloc *codeAlloc)
|
|
||||||
{
|
|
||||||
_code = 0;
|
|
||||||
codeAlloc->freeAll(codeList);
|
|
||||||
}
|
|
||||||
|
|
||||||
void Fragment::releaseTreeMem(CodeAlloc *codeAlloc)
|
|
||||||
{
|
|
||||||
releaseLirBuffer();
|
|
||||||
releaseCode(codeAlloc);
|
|
||||||
|
|
||||||
// now do it for all branches
|
|
||||||
Fragment* branch = branches;
|
|
||||||
while(branch)
|
|
||||||
{
|
|
||||||
Fragment* next = branch->nextbranch;
|
|
||||||
branch->releaseTreeMem(codeAlloc); // @todo safer here to recurse in case we support nested trees
|
|
||||||
NJ_DELETE(branch);
|
|
||||||
branch = next;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif /* FEATURE_NANOJIT */
|
#endif /* FEATURE_NANOJIT */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -46,74 +46,6 @@
|
|||||||
namespace nanojit
|
namespace nanojit
|
||||||
{
|
{
|
||||||
struct GuardRecord;
|
struct GuardRecord;
|
||||||
class Assembler;
|
|
||||||
|
|
||||||
typedef avmplus::GCSortedMap<const void*, uint32_t, avmplus::LIST_NonGCObjects> BlockSortedMap;
|
|
||||||
class BlockHist: public BlockSortedMap
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
BlockHist(avmplus::GC*gc) : BlockSortedMap(gc)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
uint32_t count(const void *p) {
|
|
||||||
uint32_t c = 1+get(p);
|
|
||||||
put(p, c);
|
|
||||||
return c;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct fragstats;
|
|
||||||
/*
|
|
||||||
*
|
|
||||||
* This is the main control center for creating and managing fragments.
|
|
||||||
*/
|
|
||||||
class Fragmento : public avmplus::GCFinalizedObject
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
Fragmento(AvmCore* core, LogControl* logc, uint32_t cacheSizeLog2, CodeAlloc *codeAlloc);
|
|
||||||
~Fragmento();
|
|
||||||
|
|
||||||
AvmCore* core();
|
|
||||||
|
|
||||||
Fragment* getLoop(const void* ip);
|
|
||||||
Fragment* getAnchor(const void* ip);
|
|
||||||
// Remove one fragment. The caller is responsible for making sure
|
|
||||||
// that this does not destroy any resources shared with other
|
|
||||||
// fragments (such as a LirBuffer or this fragment itself as a
|
|
||||||
// jump target).
|
|
||||||
void clearFrags(); // clear all fragments from the cache
|
|
||||||
Fragment* createBranch(SideExit *exit, const void* ip);
|
|
||||||
Fragment* newFrag(const void* ip);
|
|
||||||
Fragment* newBranch(Fragment *from, const void* ip);
|
|
||||||
|
|
||||||
verbose_only ( uint32_t pageCount(); )
|
|
||||||
verbose_only( void addLabel(Fragment* f, const char *prefix, int id); )
|
|
||||||
|
|
||||||
// stats
|
|
||||||
struct
|
|
||||||
{
|
|
||||||
uint32_t pages; // pages consumed
|
|
||||||
uint32_t flushes, ilsize, abcsize, compiles, totalCompiles;
|
|
||||||
}
|
|
||||||
_stats;
|
|
||||||
|
|
||||||
verbose_only( DWB(BlockHist*) enterCounts; )
|
|
||||||
verbose_only( DWB(BlockHist*) mergeCounts; )
|
|
||||||
verbose_only( LabelMap* labels; )
|
|
||||||
|
|
||||||
#ifdef AVMPLUS_VERBOSE
|
|
||||||
void drawTrees(char *fileName);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void clearFragment(Fragment *f);
|
|
||||||
private:
|
|
||||||
AvmCore* _core;
|
|
||||||
CodeAlloc* _codeAlloc;
|
|
||||||
FragmentMap _frags; /* map from ip -> Fragment ptr */
|
|
||||||
|
|
||||||
const uint32_t _max_pages;
|
|
||||||
uint32_t _pagesGrowth;
|
|
||||||
};
|
|
||||||
|
|
||||||
enum TraceKind {
|
enum TraceKind {
|
||||||
LoopTrace,
|
LoopTrace,
|
||||||
@ -128,23 +60,16 @@ namespace nanojit
|
|||||||
* It may turn out that that this arrangement causes too much traffic
|
* It may turn out that that this arrangement causes too much traffic
|
||||||
* between d and i-caches and that we need to carve up the structure differently.
|
* between d and i-caches and that we need to carve up the structure differently.
|
||||||
*/
|
*/
|
||||||
class Fragment : public avmplus::GCFinalizedObject
|
class Fragment
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
Fragment(const void*);
|
Fragment(const void*);
|
||||||
~Fragment();
|
|
||||||
|
|
||||||
NIns* code() { return _code; }
|
NIns* code() { return _code; }
|
||||||
void setCode(NIns* codee) { _code = codee; }
|
void setCode(NIns* codee) { _code = codee; }
|
||||||
int32_t& hits() { return _hits; }
|
int32_t& hits() { return _hits; }
|
||||||
void blacklist();
|
|
||||||
bool isBlacklisted() { return _hits < 0; }
|
|
||||||
void releaseLirBuffer();
|
|
||||||
void releaseCode(CodeAlloc *alloc);
|
|
||||||
void releaseTreeMem(CodeAlloc *alloc);
|
|
||||||
bool isAnchor() { return anchor == this; }
|
bool isAnchor() { return anchor == this; }
|
||||||
bool isRoot() { return root == this; }
|
bool isRoot() { return root == this; }
|
||||||
void onDestroy();
|
|
||||||
|
|
||||||
verbose_only( uint32_t _called; )
|
verbose_only( uint32_t _called; )
|
||||||
verbose_only( uint32_t _native; )
|
verbose_only( uint32_t _native; )
|
||||||
@ -176,11 +101,9 @@ namespace nanojit
|
|||||||
uint32_t guardCount;
|
uint32_t guardCount;
|
||||||
uint32_t xjumpCount;
|
uint32_t xjumpCount;
|
||||||
uint32_t recordAttempts;
|
uint32_t recordAttempts;
|
||||||
int32_t blacklistLevel;
|
|
||||||
NIns* fragEntry;
|
NIns* fragEntry;
|
||||||
NIns* loopEntry;
|
NIns* loopEntry;
|
||||||
void* vmprivate;
|
void* vmprivate;
|
||||||
CodeList* codeList;
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
NIns* _code; // ptr to start of code
|
NIns* _code; // ptr to start of code
|
||||||
|
@ -2062,10 +2062,6 @@ namespace nanojit
|
|||||||
frag->fragEntry = 0;
|
frag->fragEntry = 0;
|
||||||
frag->loopEntry = 0;
|
frag->loopEntry = 0;
|
||||||
}
|
}
|
||||||
else
|
|
||||||
{
|
|
||||||
CodeAlloc::moveAll(frag->codeList, assm->codeList);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* BEGIN decorative postamble */
|
/* BEGIN decorative postamble */
|
||||||
verbose_only( if (anyVerb) {
|
verbose_only( if (anyVerb) {
|
||||||
|
@ -781,8 +781,6 @@ namespace nanojit
|
|||||||
LIns* FASTCALL callArgN(LInsp i, uint32_t n);
|
LIns* FASTCALL callArgN(LInsp i, uint32_t n);
|
||||||
extern const uint8_t operandCount[];
|
extern const uint8_t operandCount[];
|
||||||
|
|
||||||
class Fragmento; // @todo remove this ; needed for minbuild for some reason?!? Should not be compiling this code at all
|
|
||||||
|
|
||||||
// make it a GCObject so we can explicitly delete it early
|
// make it a GCObject so we can explicitly delete it early
|
||||||
class LirWriter : public GCObject
|
class LirWriter : public GCObject
|
||||||
{
|
{
|
||||||
|
Loading…
Reference in New Issue
Block a user