mirror of
https://gitlab.winehq.org/wine/wine-gecko.git
synced 2024-09-13 09:24:08 -07:00
Merge.
This commit is contained in:
commit
4d69e4136d
@ -955,6 +955,25 @@ js_Array_dense_setelem_int(JSContext* cx, JSObject* obj, jsint i, int32 j)
|
||||
return dense_grow(cx, obj, i, v);
|
||||
}
|
||||
JS_DEFINE_CALLINFO_4(extern, BOOL, js_Array_dense_setelem_int, CONTEXT, OBJECT, INT32, INT32, 0, 0)
|
||||
|
||||
JSBool FASTCALL
|
||||
js_Array_dense_setelem_double(JSContext* cx, JSObject* obj, jsint i, jsdouble d)
|
||||
{
|
||||
JS_ASSERT(OBJ_IS_DENSE_ARRAY(cx, obj));
|
||||
|
||||
jsval v;
|
||||
jsint j;
|
||||
|
||||
if (JS_LIKELY(JSDOUBLE_IS_INT(d, j) && INT_FITS_IN_JSVAL(j))) {
|
||||
v = INT_TO_JSVAL(j);
|
||||
} else {
|
||||
if (!js_NewDoubleInRootedValue(cx, d, &v))
|
||||
return JS_FALSE;
|
||||
}
|
||||
|
||||
return dense_grow(cx, obj, i, v);
|
||||
}
|
||||
JS_DEFINE_CALLINFO_4(extern, BOOL, js_Array_dense_setelem_double, CONTEXT, OBJECT, INT32, DOUBLE, 0, 0)
|
||||
#endif
|
||||
|
||||
static JSBool
|
||||
|
@ -453,6 +453,7 @@ JS_DECLARE_CALLINFO(js_NewInstance)
|
||||
/* Defined in jsarray.cpp. */
|
||||
JS_DECLARE_CALLINFO(js_Array_dense_setelem)
|
||||
JS_DECLARE_CALLINFO(js_Array_dense_setelem_int)
|
||||
JS_DECLARE_CALLINFO(js_Array_dense_setelem_double)
|
||||
JS_DECLARE_CALLINFO(js_NewEmptyArray)
|
||||
JS_DECLARE_CALLINFO(js_NewUninitializedArray)
|
||||
JS_DECLARE_CALLINFO(js_ArrayCompPush)
|
||||
|
@ -98,8 +98,11 @@ namespace nanojit {
|
||||
class Assembler;
|
||||
class CodeAlloc;
|
||||
class Fragment;
|
||||
class Fragmento;
|
||||
class LirBuffer;
|
||||
#ifdef DEBUG
|
||||
class LabelMap;
|
||||
#endif
|
||||
extern "C++" { template<typename K, typename V, typename H> class HashMap; }
|
||||
}
|
||||
class TraceRecorder;
|
||||
class VMAllocator;
|
||||
@ -114,6 +117,12 @@ typedef Queue<uint16> SlotList;
|
||||
#define FRAGMENT_TABLE_SIZE 512
|
||||
struct VMFragment;
|
||||
|
||||
#ifdef __cplusplus
|
||||
struct REHashKey;
|
||||
struct REHashFn;
|
||||
typedef nanojit::HashMap<REHashKey, nanojit::Fragment*, REHashFn> REHashMap;
|
||||
#endif
|
||||
|
||||
#define MONITOR_N_GLOBAL_STATES 4
|
||||
struct GlobalState {
|
||||
JSObject* globalObj;
|
||||
@ -141,10 +150,13 @@ struct JSTraceMonitor {
|
||||
JSContext *tracecx;
|
||||
|
||||
CLS(nanojit::LirBuffer) lirbuf;
|
||||
CLS(nanojit::Fragmento) fragmento;
|
||||
CLS(VMAllocator) allocator; // A chunk allocator for LIR.
|
||||
CLS(nanojit::CodeAlloc) codeAlloc; // A general allocator for native code.
|
||||
CLS(nanojit::Assembler) assembler;
|
||||
#ifdef DEBUG
|
||||
CLS(nanojit::LabelMap) labels;
|
||||
#endif
|
||||
|
||||
CLS(TraceRecorder) recorder;
|
||||
jsval *reservedDoublePool;
|
||||
jsval *reservedDoublePoolPtr;
|
||||
@ -180,7 +192,10 @@ struct JSTraceMonitor {
|
||||
CLS(nanojit::CodeAlloc) reCodeAlloc;
|
||||
CLS(nanojit::Assembler) reAssembler;
|
||||
CLS(nanojit::LirBuffer) reLirBuf;
|
||||
CLS(nanojit::Fragmento) reFragmento;
|
||||
CLS(REHashMap) reFragments;
|
||||
#ifdef DEBUG
|
||||
CLS(nanojit::LabelMap) reLabels;
|
||||
#endif
|
||||
|
||||
/* Keep a list of recorders we need to abort on cache flush. */
|
||||
CLS(TraceRecorder) abortStack;
|
||||
|
@ -2780,7 +2780,7 @@ js_Interpret(JSContext *cx)
|
||||
the recorder to be destroyed when we return. */
|
||||
if (tr) {
|
||||
if (tr->wasDeepAborted())
|
||||
tr->removeFragmentoReferences();
|
||||
tr->removeFragmentReferences();
|
||||
else
|
||||
tr->pushAbortStack();
|
||||
}
|
||||
|
@ -2005,42 +2005,35 @@ typedef JSTempVector<LIns *> LInsList;
|
||||
|
||||
/* Dummy GC for nanojit placement new. */
|
||||
static GC gc;
|
||||
static avmplus::AvmCore s_core = avmplus::AvmCore();
|
||||
static avmplus::AvmCore* core = &s_core;
|
||||
|
||||
static void *
|
||||
HashRegExp(uint16 flags, const jschar *s, size_t n)
|
||||
{
|
||||
uint32 h;
|
||||
|
||||
for (h = 0; n; s++, n--)
|
||||
h = JS_ROTATE_LEFT32(h, 4) ^ *s;
|
||||
return (void *)(h + flags);
|
||||
}
|
||||
|
||||
struct RESideExit : public SideExit {
|
||||
size_t re_length;
|
||||
uint16 re_flags;
|
||||
jschar re_chars[1];
|
||||
};
|
||||
|
||||
/* Return the cached fragment for the given regexp, or NULL. */
|
||||
/* Return the cached fragment for the given regexp, or create one. */
|
||||
static Fragment*
|
||||
LookupNativeRegExp(JSContext* cx, void* hash, uint16 re_flags,
|
||||
LookupNativeRegExp(JSContext* cx, uint16 re_flags,
|
||||
const jschar* re_chars, size_t re_length)
|
||||
{
|
||||
Fragmento* fragmento = JS_TRACE_MONITOR(cx).reFragmento;
|
||||
Fragment* fragment = fragmento->getLoop(hash);
|
||||
while (fragment) {
|
||||
if (fragment->lastIns) {
|
||||
RESideExit *exit = (RESideExit*)fragment->lastIns->record()->exit;
|
||||
if (exit->re_flags == re_flags &&
|
||||
exit->re_length == re_length &&
|
||||
!memcmp(exit->re_chars, re_chars, re_length * sizeof(jschar))) {
|
||||
return fragment;
|
||||
}
|
||||
}
|
||||
fragment = fragment->peer;
|
||||
JSTraceMonitor *tm = &JS_TRACE_MONITOR(cx);
|
||||
VMAllocator &alloc = *tm->reAllocator;
|
||||
REHashMap &table = *tm->reFragments;
|
||||
|
||||
REHashKey k(re_length, re_flags, re_chars);
|
||||
Fragment *frag = table.get(k);
|
||||
|
||||
if (!frag) {
|
||||
frag = new (alloc) Fragment(0);
|
||||
frag->lirbuf = tm->reLirBuf;
|
||||
frag->root = frag;
|
||||
/*
|
||||
* Copy the re_chars portion of the hash key into the Allocator, so
|
||||
* its lifecycle is disconnected from the lifecycle of the
|
||||
* underlying regexp.
|
||||
*/
|
||||
k.re_chars = (const jschar*) new (alloc) jschar[re_length];
|
||||
memcpy((void*) k.re_chars, re_chars, re_length * sizeof(jschar));
|
||||
table.put(k, frag);
|
||||
}
|
||||
return NULL;
|
||||
return frag;
|
||||
}
|
||||
|
||||
static JSBool
|
||||
@ -3065,16 +3058,13 @@ class RegExpNativeCompiler {
|
||||
GuardRecord* insertGuard(const jschar* re_chars, size_t re_length)
|
||||
{
|
||||
LIns* skip = lirBufWriter->insSkip(sizeof(GuardRecord) +
|
||||
sizeof(RESideExit) +
|
||||
sizeof(SideExit) +
|
||||
(re_length-1) * sizeof(jschar));
|
||||
GuardRecord* guard = (GuardRecord *) skip->payload();
|
||||
memset(guard, 0, sizeof(*guard));
|
||||
RESideExit* exit = (RESideExit*)(guard+1);
|
||||
SideExit* exit = (SideExit*)(guard+1);
|
||||
guard->exit = exit;
|
||||
guard->exit->target = fragment;
|
||||
exit->re_flags = re->flags;
|
||||
exit->re_length = re_length;
|
||||
memcpy(exit->re_chars, re_chars, re_length * sizeof(jschar));
|
||||
fragment->lastIns = lir->insGuard(LIR_loop, NULL, skip);
|
||||
return guard;
|
||||
}
|
||||
@ -3092,7 +3082,6 @@ class RegExpNativeCompiler {
|
||||
size_t re_length;
|
||||
JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
|
||||
Assembler *assm = tm->reAssembler;
|
||||
Fragmento* fragmento = tm->reFragmento;
|
||||
VMAllocator& alloc = *tm->reAllocator;
|
||||
|
||||
re->source->getCharsAndLength(re_chars, re_length);
|
||||
@ -3148,7 +3137,7 @@ class RegExpNativeCompiler {
|
||||
|
||||
if (alloc.outOfMemory())
|
||||
goto fail;
|
||||
::compile(assm, fragment, alloc verbose_only(, fragmento->labels));
|
||||
::compile(assm, fragment, alloc verbose_only(, tm->reLabels));
|
||||
if (assm->error() != nanojit::None) {
|
||||
oom = assm->error() == nanojit::OutOMem;
|
||||
goto fail;
|
||||
@ -3162,20 +3151,25 @@ class RegExpNativeCompiler {
|
||||
return JS_TRUE;
|
||||
fail:
|
||||
if (alloc.outOfMemory() || oom ||
|
||||
js_OverfullFragmento(tm, fragmento)) {
|
||||
fragmento->clearFrags();
|
||||
js_OverfullJITCache(tm, true)) {
|
||||
delete lirBufWriter;
|
||||
tm->reCodeAlloc->sweep();
|
||||
alloc.reset();
|
||||
tm->reFragments = new (alloc) REHashMap(alloc);
|
||||
tm->reLirBuf = new (alloc) LirBuffer(alloc);
|
||||
#ifdef DEBUG
|
||||
fragmento->labels = new (alloc) LabelMap(alloc, &js_LogController);
|
||||
lirbuf->names = new (alloc) LirNameMap(alloc, fragmento->labels);
|
||||
tm->reLabels = new (alloc) LabelMap(alloc, &js_LogController);
|
||||
tm->reLirBuf->names = new (alloc) LirNameMap(alloc, tm->reLabels);
|
||||
tm->reAssembler = new (alloc) Assembler(*tm->reCodeAlloc, alloc, core,
|
||||
&js_LogController);
|
||||
#else
|
||||
tm->reAssembler = new (alloc) Assembler(*tm->reCodeAlloc, alloc, core, NULL);
|
||||
#endif
|
||||
lirbuf->clear();
|
||||
} else {
|
||||
if (!guard) insertGuard(re_chars, re_length);
|
||||
re->flags |= JSREG_NOCOMPILE;
|
||||
delete lirBufWriter;
|
||||
}
|
||||
delete lirBufWriter;
|
||||
#ifdef NJ_VERBOSE
|
||||
debug_only_stmt( if (js_LogController.lcbits & LC_TMRegexp)
|
||||
delete lir; )
|
||||
@ -3216,19 +3210,11 @@ typedef void *(FASTCALL *NativeRegExp)(REGlobalData*, const jschar *);
|
||||
static NativeRegExp
|
||||
GetNativeRegExp(JSContext* cx, JSRegExp* re)
|
||||
{
|
||||
Fragment *fragment;
|
||||
const jschar *re_chars;
|
||||
size_t re_length;
|
||||
Fragmento* fragmento = JS_TRACE_MONITOR(cx).reFragmento;
|
||||
|
||||
re->source->getCharsAndLength(re_chars, re_length);
|
||||
void* hash = HashRegExp(re->flags, re_chars, re_length);
|
||||
fragment = LookupNativeRegExp(cx, hash, re->flags, re_chars, re_length);
|
||||
if (!fragment) {
|
||||
fragment = fragmento->getAnchor(hash);
|
||||
fragment->lirbuf = JS_TRACE_MONITOR(cx).reLirBuf;
|
||||
fragment->root = fragment;
|
||||
}
|
||||
Fragment *fragment = LookupNativeRegExp(cx, re->flags, re_chars, re_length);
|
||||
JS_ASSERT(fragment);
|
||||
if (!fragment->code()) {
|
||||
if (!CompileRegExpToNative(cx, re, fragment))
|
||||
return NULL;
|
||||
|
@ -865,7 +865,7 @@ getLoop(JSTraceMonitor* tm, const void *ip, JSObject* globalObj, uint32 globalSh
|
||||
static Fragment*
|
||||
getAnchor(JSTraceMonitor* tm, const void *ip, JSObject* globalObj, uint32 globalShape, uint32 argc)
|
||||
{
|
||||
VMFragment *f = new (&gc) VMFragment(ip, globalObj, globalShape, argc);
|
||||
VMFragment *f = new (*tm->allocator) VMFragment(ip, globalObj, globalShape, argc);
|
||||
JS_ASSERT(f);
|
||||
|
||||
Fragment *p = getVMFragment(tm, ip, globalObj, globalShape, argc);
|
||||
@ -1683,6 +1683,8 @@ JS_REQUIRES_STACK
|
||||
TraceRecorder::TraceRecorder(JSContext* cx, VMSideExit* _anchor, Fragment* _fragment,
|
||||
TreeInfo* ti, unsigned stackSlots, unsigned ngslots, JSTraceType* typeMap,
|
||||
VMSideExit* innermostNestedGuard, jsbytecode* outer, uint32 outerArgc)
|
||||
: whichTreesToTrash(JS_TRACE_MONITOR(cx).allocator),
|
||||
cfgMerges(JS_TRACE_MONITOR(cx).allocator)
|
||||
{
|
||||
JS_ASSERT(!_fragment->vmprivate && ti && cx->fp->regs->pc == (jsbytecode*)_fragment->ip);
|
||||
|
||||
@ -1803,17 +1805,6 @@ TraceRecorder::TraceRecorder(JSContext* cx, VMSideExit* _anchor, Fragment* _frag
|
||||
}
|
||||
}
|
||||
|
||||
TreeInfo::~TreeInfo()
|
||||
{
|
||||
UnstableExit* temp;
|
||||
|
||||
while (unstableExits) {
|
||||
temp = unstableExits->next;
|
||||
delete unstableExits;
|
||||
unstableExits = temp;
|
||||
}
|
||||
}
|
||||
|
||||
TraceRecorder::~TraceRecorder()
|
||||
{
|
||||
JS_ASSERT(nextRecorderToAbort == NULL);
|
||||
@ -1827,18 +1818,12 @@ TraceRecorder::~TraceRecorder()
|
||||
}
|
||||
#endif
|
||||
if (fragment) {
|
||||
if (wasRootFragment && !fragment->root->code()) {
|
||||
JS_ASSERT(!fragment->root->vmprivate);
|
||||
delete treeInfo;
|
||||
}
|
||||
|
||||
if (trashSelf)
|
||||
TrashTree(cx, fragment->root);
|
||||
|
||||
for (unsigned int i = 0; i < whichTreesToTrash.length(); i++)
|
||||
TrashTree(cx, whichTreesToTrash[i]);
|
||||
} else if (wasRootFragment) {
|
||||
delete treeInfo;
|
||||
}
|
||||
#ifdef DEBUG
|
||||
debug_only_stmt( delete verbose_filter; )
|
||||
@ -1852,7 +1837,7 @@ TraceRecorder::~TraceRecorder()
|
||||
}
|
||||
|
||||
void
|
||||
TraceRecorder::removeFragmentoReferences()
|
||||
TraceRecorder::removeFragmentReferences()
|
||||
{
|
||||
fragment = NULL;
|
||||
}
|
||||
@ -2112,35 +2097,29 @@ oom:
|
||||
void
|
||||
JSTraceMonitor::flush()
|
||||
{
|
||||
if (fragmento) {
|
||||
fragmento->clearFrags();
|
||||
for (size_t i = 0; i < FRAGMENT_TABLE_SIZE; ++i) {
|
||||
VMFragment* f = vmfragments[i];
|
||||
while (f) {
|
||||
VMFragment* next = f->next;
|
||||
fragmento->clearFragment(f);
|
||||
f = next;
|
||||
}
|
||||
vmfragments[i] = NULL;
|
||||
}
|
||||
for (size_t i = 0; i < MONITOR_N_GLOBAL_STATES; ++i) {
|
||||
globalStates[i].globalShape = -1;
|
||||
globalStates[i].globalSlots->clear();
|
||||
}
|
||||
}
|
||||
memset(&vmfragments[0], 0,
|
||||
FRAGMENT_TABLE_SIZE * sizeof(VMFragment*));
|
||||
|
||||
allocator->reset();
|
||||
codeAlloc->sweep();
|
||||
|
||||
#ifdef DEBUG
|
||||
JS_ASSERT(fragmento);
|
||||
JS_ASSERT(fragmento->labels);
|
||||
Allocator& alloc = *allocator;
|
||||
fragmento->labels = new (alloc) LabelMap(alloc, &js_LogController);
|
||||
lirbuf->names = new (alloc) LirNameMap(alloc, fragmento->labels);
|
||||
|
||||
for (size_t i = 0; i < MONITOR_N_GLOBAL_STATES; ++i) {
|
||||
globalStates[i].globalShape = -1;
|
||||
globalStates[i].globalSlots = new (alloc) SlotList(allocator);
|
||||
}
|
||||
|
||||
assembler = new (alloc) Assembler(*codeAlloc, alloc, core,
|
||||
&js_LogController);
|
||||
lirbuf = new (alloc) LirBuffer(alloc);
|
||||
|
||||
#ifdef DEBUG
|
||||
JS_ASSERT(labels);
|
||||
labels = new (alloc) LabelMap(alloc, &js_LogController);
|
||||
lirbuf->names = new (alloc) LirNameMap(alloc, labels);
|
||||
#endif
|
||||
|
||||
lirbuf->clear();
|
||||
needFlush = JS_FALSE;
|
||||
}
|
||||
|
||||
@ -3529,12 +3508,12 @@ ResetJIT(JSContext* cx)
|
||||
js_AbortRecording(cx, "flush cache");
|
||||
TraceRecorder* tr;
|
||||
while ((tr = tm->abortStack) != NULL) {
|
||||
tr->removeFragmentoReferences();
|
||||
tr->removeFragmentReferences();
|
||||
tr->deepAbort();
|
||||
tr->popAbortStack();
|
||||
}
|
||||
if (ProhibitFlush(cx)) {
|
||||
debug_only_print0(LC_TMTracer, "Deferring fragmento flush due to deep bail.\n");
|
||||
debug_only_print0(LC_TMTracer, "Deferring JIT flush due to deep bail.\n");
|
||||
tm->needFlush = JS_TRUE;
|
||||
return;
|
||||
}
|
||||
@ -3553,7 +3532,6 @@ TraceRecorder::compile(JSTraceMonitor* tm)
|
||||
ResetJIT(cx);
|
||||
return;
|
||||
}
|
||||
verbose_only(Fragmento* fragmento = tm->fragmento;)
|
||||
if (treeInfo->maxNativeStackSlots >= MAX_NATIVE_STACK_SLOTS) {
|
||||
debug_only_print0(LC_TMTracer, "Blacklist: excessive stack use.\n");
|
||||
Blacklist((jsbytecode*) fragment->root->ip);
|
||||
@ -3565,7 +3543,7 @@ TraceRecorder::compile(JSTraceMonitor* tm)
|
||||
return;
|
||||
|
||||
Assembler *assm = tm->assembler;
|
||||
::compile(assm, fragment, *tm->allocator verbose_only(, fragmento->labels));
|
||||
::compile(assm, fragment, *tm->allocator verbose_only(, tm->labels));
|
||||
if (assm->error() == nanojit::OutOMem)
|
||||
return;
|
||||
|
||||
@ -3595,7 +3573,7 @@ TraceRecorder::compile(JSTraceMonitor* tm)
|
||||
char* label = (char*)js_malloc((filename ? strlen(filename) : 7) + 16);
|
||||
sprintf(label, "%s:%u", filename ? filename : "<stdin>",
|
||||
js_FramePCToLineNumber(cx, cx->fp));
|
||||
fragmento->labels->add(fragment, sizeof(Fragment), 0, label);
|
||||
tm->labels->add(fragment, sizeof(Fragment), 0, label);
|
||||
js_free(label);
|
||||
#endif
|
||||
AUDIT(traceCompleted);
|
||||
@ -3631,17 +3609,22 @@ class SlotMap : public SlotVisitorBase
|
||||
public:
|
||||
struct SlotInfo
|
||||
{
|
||||
SlotInfo()
|
||||
: v(0), promoteInt(false), lastCheck(TypeCheck_Bad)
|
||||
{}
|
||||
SlotInfo(jsval* v, bool promoteInt)
|
||||
: v(v), promoteInt(promoteInt), lastCheck(TypeCheck_Bad)
|
||||
{
|
||||
}
|
||||
{}
|
||||
jsval *v;
|
||||
bool promoteInt;
|
||||
TypeCheckResult lastCheck;
|
||||
};
|
||||
|
||||
SlotMap(TraceRecorder& rec, unsigned slotOffset)
|
||||
: mRecorder(rec), mCx(rec.cx), slotOffset(slotOffset)
|
||||
: mRecorder(rec),
|
||||
mCx(rec.cx),
|
||||
slots(NULL),
|
||||
slotOffset(slotOffset)
|
||||
{
|
||||
}
|
||||
|
||||
@ -3862,8 +3845,6 @@ TraceRecorder::closeLoop(SlotMap& slotMap, VMSideExit* exit, TypeConsensus& cons
|
||||
*/
|
||||
JS_ASSERT((*cx->fp->regs->pc == JSOP_LOOP || *cx->fp->regs->pc == JSOP_NOP) && !cx->fp->imacpc);
|
||||
|
||||
Fragmento* fragmento = traceMonitor->fragmento;
|
||||
|
||||
if (callDepth != 0) {
|
||||
debug_only_print0(LC_TMTracer,
|
||||
"Blacklisted: stack depth mismatch, possible recursion.\n");
|
||||
@ -3916,7 +3897,7 @@ TraceRecorder::closeLoop(SlotMap& slotMap, VMSideExit* exit, TypeConsensus& cons
|
||||
debug_only_print0(LC_TMTracer,
|
||||
"Trace has unstable loop variable with no stable peer, "
|
||||
"compiling anyway.\n");
|
||||
UnstableExit* uexit = new UnstableExit;
|
||||
UnstableExit* uexit = new (*traceMonitor->allocator) UnstableExit;
|
||||
uexit->fragment = fragment;
|
||||
uexit->exit = exit;
|
||||
uexit->next = treeInfo->unstableExits;
|
||||
@ -3947,7 +3928,7 @@ TraceRecorder::closeLoop(SlotMap& slotMap, VMSideExit* exit, TypeConsensus& cons
|
||||
|
||||
peer = getLoop(traceMonitor, root->ip, root->globalObj, root->globalShape, root->argc);
|
||||
JS_ASSERT(peer);
|
||||
joinEdgesToEntry(fragmento, peer);
|
||||
joinEdgesToEntry(peer);
|
||||
|
||||
debug_only_stmt(DumpPeerStability(traceMonitor, peer->ip, peer->globalObj,
|
||||
peer->globalShape, peer->argc);)
|
||||
@ -4026,13 +4007,13 @@ FindUndemotesInTypemaps(JSContext* cx, const TypeMap& typeMap, TreeInfo* treeInf
|
||||
}
|
||||
|
||||
JS_REQUIRES_STACK void
|
||||
TraceRecorder::joinEdgesToEntry(Fragmento* fragmento, VMFragment* peer_root)
|
||||
TraceRecorder::joinEdgesToEntry(VMFragment* peer_root)
|
||||
{
|
||||
if (fragment->kind != LoopTrace)
|
||||
return;
|
||||
|
||||
TypeMap typeMap;
|
||||
Queue<unsigned> undemotes;
|
||||
TypeMap typeMap(NULL);
|
||||
Queue<unsigned> undemotes(NULL);
|
||||
|
||||
for (VMFragment* peer = peer_root; peer; peer = (VMFragment*)peer->peer) {
|
||||
TreeInfo* ti = peer->getTreeInfo();
|
||||
@ -4096,11 +4077,11 @@ TraceRecorder::endLoop(VMSideExit* exit)
|
||||
debug_only_printf(LC_TMTreeVis, "TREEVIS ENDLOOP EXIT=%p\n", exit);
|
||||
|
||||
VMFragment* root = (VMFragment*)fragment->root;
|
||||
joinEdgesToEntry(traceMonitor->fragmento, getLoop(traceMonitor,
|
||||
root->ip,
|
||||
root->globalObj,
|
||||
root->globalShape,
|
||||
root->argc));
|
||||
joinEdgesToEntry(getLoop(traceMonitor,
|
||||
root->ip,
|
||||
root->globalObj,
|
||||
root->globalShape,
|
||||
root->argc));
|
||||
debug_only_stmt(DumpPeerStability(traceMonitor, root->ip, root->globalObj,
|
||||
root->globalShape, root->argc);)
|
||||
|
||||
@ -4228,7 +4209,7 @@ TraceRecorder::emitTreeCall(Fragment* inner, VMSideExit* exit)
|
||||
* Bug 502604 - It is illegal to extend from the outer typemap without
|
||||
* first extending from the inner. Make a new typemap here.
|
||||
*/
|
||||
TypeMap fullMap;
|
||||
TypeMap fullMap(NULL);
|
||||
fullMap.add(exit->stackTypeMap(), exit->numStackSlots);
|
||||
BuildGlobalTypeMapFromInnerTree(fullMap, exit);
|
||||
import(ti, inner_sp_ins, exit->numStackSlots, fullMap.length() - exit->numStackSlots,
|
||||
@ -4423,12 +4404,6 @@ nanojit::LirNameMap::formatGuard(LIns *i, char *out)
|
||||
}
|
||||
#endif
|
||||
|
||||
void
|
||||
nanojit::Fragment::onDestroy()
|
||||
{
|
||||
delete (TreeInfo *)vmprivate;
|
||||
}
|
||||
|
||||
static JS_REQUIRES_STACK bool
|
||||
DeleteRecorder(JSContext* cx)
|
||||
{
|
||||
@ -4441,7 +4416,7 @@ DeleteRecorder(JSContext* cx)
|
||||
/* If we ran out of memory, flush the code cache. */
|
||||
Assembler *assm = JS_TRACE_MONITOR(cx).assembler;
|
||||
if (assm->error() == OutOMem ||
|
||||
js_OverfullFragmento(tm, tm->fragmento)) {
|
||||
js_OverfullJITCache(tm, false)) {
|
||||
ResetJIT(cx);
|
||||
return false;
|
||||
}
|
||||
@ -4556,7 +4531,7 @@ TrashTree(JSContext* cx, Fragment* f)
|
||||
debug_only_print0(LC_TMTracer, "Trashing tree info.\n");
|
||||
TreeInfo* ti = (TreeInfo*)f->vmprivate;
|
||||
f->vmprivate = NULL;
|
||||
f->releaseCode(JS_TRACE_MONITOR(cx).codeAlloc);
|
||||
f->setCode(NULL);
|
||||
Fragment** data = ti->dependentTrees.data();
|
||||
unsigned length = ti->dependentTrees.length();
|
||||
for (unsigned n = 0; n < length; ++n)
|
||||
@ -4565,8 +4540,6 @@ TrashTree(JSContext* cx, Fragment* f)
|
||||
length = ti->linkedTrees.length();
|
||||
for (unsigned n = 0; n < length; ++n)
|
||||
TrashTree(cx, data[n]);
|
||||
delete ti;
|
||||
JS_ASSERT(!f->code() && !f->vmprivate);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -4808,7 +4781,7 @@ RecordTree(JSContext* cx, JSTraceMonitor* tm, Fragment* f, jsbytecode* outer,
|
||||
f->root = f;
|
||||
f->lirbuf = tm->lirbuf;
|
||||
|
||||
if (tm->allocator->outOfMemory() || js_OverfullFragmento(tm, tm->fragmento)) {
|
||||
if (tm->allocator->outOfMemory() || js_OverfullJITCache(tm, false)) {
|
||||
Backoff(cx, (jsbytecode*) f->root->ip);
|
||||
ResetJIT(cx);
|
||||
debug_only_print0(LC_TMTracer,
|
||||
@ -4819,7 +4792,7 @@ RecordTree(JSContext* cx, JSTraceMonitor* tm, Fragment* f, jsbytecode* outer,
|
||||
JS_ASSERT(!f->code() && !f->vmprivate);
|
||||
|
||||
/* Set up the VM-private treeInfo structure for this fragment. */
|
||||
TreeInfo* ti = new (&gc) TreeInfo(f, globalSlots);
|
||||
TreeInfo* ti = new (*tm->allocator) TreeInfo(tm->allocator, f, globalSlots);
|
||||
|
||||
/* Capture the coerced type of each active slot in the type map. */
|
||||
ti->typeMap.captureTypes(cx, globalObj, *globalSlots, 0 /* callDepth */);
|
||||
@ -4871,7 +4844,7 @@ FindLoopEdgeTarget(JSContext* cx, VMSideExit* exit, VMFragment** peerp)
|
||||
|
||||
JS_ASSERT(from->code());
|
||||
|
||||
TypeMap typeMap;
|
||||
TypeMap typeMap(NULL);
|
||||
FullMapFromExit(typeMap, exit);
|
||||
JS_ASSERT(typeMap.length() - exit->numStackSlots == from_ti->nGlobalTypes());
|
||||
|
||||
@ -4906,7 +4879,6 @@ TreeInfo::removeUnstableExit(VMSideExit* exit)
|
||||
for (UnstableExit* uexit = this->unstableExits; uexit != NULL; uexit = uexit->next) {
|
||||
if (uexit->exit == exit) {
|
||||
*tail = uexit->next;
|
||||
delete uexit;
|
||||
return *tail;
|
||||
}
|
||||
tail = &uexit->next;
|
||||
@ -4990,7 +4962,11 @@ AttemptToExtendTree(JSContext* cx, VMSideExit* anchor, VMSideExit* exitedFrom, j
|
||||
|
||||
Fragment* c;
|
||||
if (!(c = anchor->target)) {
|
||||
c = JS_TRACE_MONITOR(cx).fragmento->createBranch(anchor, cx->fp->regs->pc);
|
||||
Allocator& alloc = *JS_TRACE_MONITOR(cx).allocator;
|
||||
c = new (alloc) Fragment(cx->fp->regs->pc);
|
||||
c->kind = BranchTrace;
|
||||
c->anchor = anchor->from->anchor;
|
||||
c->root = anchor->from->root;
|
||||
debug_only_printf(LC_TMTreeVis, "TREEVIS CREATEBRANCH ROOT=%p FRAG=%p PC=%p FILE=\"%s\""
|
||||
" LINE=%d ANCHOR=%p OFFS=%d\n",
|
||||
f, c, cx->fp->regs->pc, cx->fp->script->filename,
|
||||
@ -5019,7 +4995,7 @@ AttemptToExtendTree(JSContext* cx, VMSideExit* anchor, VMSideExit* exitedFrom, j
|
||||
unsigned stackSlots;
|
||||
unsigned ngslots;
|
||||
JSTraceType* typeMap;
|
||||
TypeMap fullMap;
|
||||
TypeMap fullMap(NULL);
|
||||
if (exitedFrom == NULL) {
|
||||
/*
|
||||
* If we are coming straight from a simple side exit, just use that
|
||||
@ -6182,8 +6158,7 @@ TraceRecorder::monitorRecording(JSContext* cx, TraceRecorder* tr, JSOp op)
|
||||
}
|
||||
|
||||
if (tr->traceMonitor->allocator->outOfMemory() ||
|
||||
js_OverfullFragmento(&JS_TRACE_MONITOR(cx),
|
||||
JS_TRACE_MONITOR(cx).fragmento)) {
|
||||
js_OverfullJITCache(&JS_TRACE_MONITOR(cx), false)) {
|
||||
js_AbortRecording(cx, "no more memory");
|
||||
ResetJIT(cx);
|
||||
return JSRS_STOP;
|
||||
@ -6484,7 +6459,8 @@ void
|
||||
js_SetMaxCodeCacheBytes(JSContext* cx, uint32 bytes)
|
||||
{
|
||||
JSTraceMonitor* tm = &JS_THREAD_DATA(cx)->traceMonitor;
|
||||
JS_ASSERT(tm->fragmento && tm->reFragmento);
|
||||
JS_ASSERT(tm->codeAlloc && tm->reCodeAlloc &&
|
||||
tm->allocator && tm->reAllocator);
|
||||
if (bytes > 1 G)
|
||||
bytes = 1 G;
|
||||
if (bytes < 128 K)
|
||||
@ -6552,23 +6528,21 @@ js_InitJIT(JSTraceMonitor *tm)
|
||||
if (!tm->codeAlloc)
|
||||
tm->codeAlloc = new CodeAlloc();
|
||||
|
||||
if (!tm->assembler)
|
||||
tm->assembler = new (&gc) Assembler(*tm->codeAlloc, alloc, core,
|
||||
&js_LogController);
|
||||
if (!tm->assembler) {
|
||||
tm->assembler = new (alloc) Assembler(*tm->codeAlloc, alloc, core,
|
||||
&js_LogController);
|
||||
|
||||
|
||||
if (!tm->fragmento) {
|
||||
JS_ASSERT(!tm->reservedDoublePool);
|
||||
Fragmento* fragmento = new (&gc) Fragmento(core, &js_LogController, 32, tm->codeAlloc);
|
||||
verbose_only(fragmento->labels = new (alloc) LabelMap(alloc, &js_LogController);)
|
||||
tm->fragmento = fragmento;
|
||||
tm->lirbuf = new LirBuffer(alloc);
|
||||
tm->lirbuf = new (alloc) LirBuffer(alloc);
|
||||
#ifdef DEBUG
|
||||
tm->lirbuf->names = new (alloc) LirNameMap(alloc, tm->fragmento->labels);
|
||||
tm->labels = new (alloc) LabelMap(alloc, &js_LogController);
|
||||
tm->lirbuf->names = new (alloc) LirNameMap(alloc, tm->labels);
|
||||
#endif
|
||||
for (size_t i = 0; i < MONITOR_N_GLOBAL_STATES; ++i) {
|
||||
tm->globalStates[i].globalShape = -1;
|
||||
JS_ASSERT(!tm->globalStates[i].globalSlots);
|
||||
tm->globalStates[i].globalSlots = new (&gc) SlotList();
|
||||
tm->globalStates[i].globalSlots = new (alloc) SlotList(tm->allocator);
|
||||
}
|
||||
tm->reservedDoublePoolPtr = tm->reservedDoublePool = new jsval[MAX_NATIVE_STACK_SLOTS];
|
||||
memset(tm->vmfragments, 0, sizeof(tm->vmfragments));
|
||||
@ -6582,17 +6556,15 @@ js_InitJIT(JSTraceMonitor *tm)
|
||||
if (!tm->reCodeAlloc)
|
||||
tm->reCodeAlloc = new CodeAlloc();
|
||||
|
||||
if (!tm->reAssembler)
|
||||
tm->reAssembler = new (&gc) Assembler(*tm->reCodeAlloc, reAlloc, core,
|
||||
&js_LogController);
|
||||
if (!tm->reAssembler) {
|
||||
tm->reAssembler = new (reAlloc) Assembler(*tm->reCodeAlloc, reAlloc, core,
|
||||
&js_LogController);
|
||||
|
||||
if (!tm->reFragmento) {
|
||||
Fragmento* fragmento = new (&gc) Fragmento(core, &js_LogController, 32, tm->reCodeAlloc);
|
||||
verbose_only(fragmento->labels = new (reAlloc) LabelMap(reAlloc, &js_LogController);)
|
||||
tm->reFragmento = fragmento;
|
||||
tm->reLirBuf = new LirBuffer(reAlloc);
|
||||
tm->reFragments = new (reAlloc) REHashMap(reAlloc);
|
||||
tm->reLirBuf = new (reAlloc) LirBuffer(reAlloc);
|
||||
#ifdef DEBUG
|
||||
tm->reLirBuf->names = new (reAlloc) LirNameMap(reAlloc, fragmento->labels);
|
||||
tm->reLabels = new (reAlloc) LabelMap(reAlloc, &js_LogController);
|
||||
tm->reLirBuf->names = new (reAlloc) LirNameMap(reAlloc, tm->reLabels);
|
||||
#endif
|
||||
}
|
||||
#if !defined XP_WIN
|
||||
@ -6619,44 +6591,24 @@ js_FinishJIT(JSTraceMonitor *tm)
|
||||
jitstats.typeMapMismatchAtEntry, jitstats.globalShapeMismatchAtEntry);
|
||||
}
|
||||
#endif
|
||||
if (tm->fragmento != NULL) {
|
||||
if (tm->assembler != NULL) {
|
||||
JS_ASSERT(tm->reservedDoublePool);
|
||||
#ifdef DEBUG
|
||||
tm->lirbuf->names = NULL;
|
||||
#endif
|
||||
delete tm->lirbuf;
|
||||
|
||||
tm->lirbuf = NULL;
|
||||
|
||||
if (tm->recordAttempts.ops)
|
||||
JS_DHashTableFinish(&tm->recordAttempts);
|
||||
|
||||
for (size_t i = 0; i < FRAGMENT_TABLE_SIZE; ++i) {
|
||||
VMFragment* f = tm->vmfragments[i];
|
||||
while (f) {
|
||||
VMFragment* next = f->next;
|
||||
tm->fragmento->clearFragment(f);
|
||||
f = next;
|
||||
}
|
||||
tm->vmfragments[i] = NULL;
|
||||
}
|
||||
delete tm->fragmento;
|
||||
tm->fragmento = NULL;
|
||||
for (size_t i = 0; i < MONITOR_N_GLOBAL_STATES; ++i) {
|
||||
JS_ASSERT(tm->globalStates[i].globalSlots);
|
||||
delete tm->globalStates[i].globalSlots;
|
||||
}
|
||||
memset(&tm->vmfragments[0], 0,
|
||||
FRAGMENT_TABLE_SIZE * sizeof(VMFragment*));
|
||||
|
||||
delete[] tm->reservedDoublePool;
|
||||
tm->reservedDoublePool = tm->reservedDoublePoolPtr = NULL;
|
||||
}
|
||||
if (tm->reFragmento != NULL) {
|
||||
delete tm->reLirBuf;
|
||||
delete tm->reFragmento;
|
||||
if (tm->reAssembler != NULL) {
|
||||
delete tm->reAllocator;
|
||||
delete tm->reAssembler;
|
||||
delete tm->reCodeAlloc;
|
||||
}
|
||||
if (tm->assembler)
|
||||
delete tm->assembler;
|
||||
if (tm->codeAlloc)
|
||||
delete tm->codeAlloc;
|
||||
if (tm->allocator)
|
||||
@ -6703,50 +6655,6 @@ PurgeScriptRecordingAttempts(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 n
|
||||
return JS_DHASH_NEXT;
|
||||
}
|
||||
|
||||
/* Call 'action' for each root fragment created for 'script'. */
|
||||
template<typename FragmentAction>
|
||||
static void
|
||||
IterateScriptFragments(JSContext* cx, JSScript* script, FragmentAction action)
|
||||
{
|
||||
JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
|
||||
for (size_t i = 0; i < FRAGMENT_TABLE_SIZE; ++i) {
|
||||
for (VMFragment **f = &(tm->vmfragments[i]); *f; ) {
|
||||
VMFragment* frag = *f;
|
||||
if (JS_UPTRDIFF(frag->ip, script->code) < script->length) {
|
||||
/* This fragment is associated with the script. */
|
||||
JS_ASSERT(frag->root == frag);
|
||||
VMFragment* next = frag->next;
|
||||
if (action(cx, tm, frag)) {
|
||||
debug_only_printf(LC_TMTracer,
|
||||
"Disconnecting VMFragment %p "
|
||||
"with ip %p, in range [%p,%p).\n",
|
||||
(void*)frag, frag->ip, script->code,
|
||||
script->code + script->length);
|
||||
*f = next;
|
||||
} else {
|
||||
f = &((*f)->next);
|
||||
}
|
||||
} else {
|
||||
f = &((*f)->next);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static bool
|
||||
TrashTreeAction(JSContext* cx, JSTraceMonitor* tm, Fragment* frag)
|
||||
{
|
||||
for (Fragment *p = frag; p; p = p->peer)
|
||||
TrashTree(cx, p);
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool
|
||||
ClearFragmentAction(JSContext* cx, JSTraceMonitor* tm, Fragment* frag)
|
||||
{
|
||||
tm->fragmento->clearFragment(frag);
|
||||
return true;
|
||||
}
|
||||
|
||||
JS_REQUIRES_STACK void
|
||||
js_PurgeScriptFragments(JSContext* cx, JSScript* script)
|
||||
@ -6756,18 +6664,34 @@ js_PurgeScriptFragments(JSContext* cx, JSScript* script)
|
||||
debug_only_printf(LC_TMTracer,
|
||||
"Purging fragments for JSScript %p.\n", (void*)script);
|
||||
|
||||
/*
|
||||
* TrashTree trashes dependent trees recursively, so we must do all the trashing
|
||||
* before clearing in order to avoid calling TrashTree with a deleted fragment.
|
||||
*/
|
||||
IterateScriptFragments(cx, script, TrashTreeAction);
|
||||
IterateScriptFragments(cx, script, ClearFragmentAction);
|
||||
JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
|
||||
for (size_t i = 0; i < FRAGMENT_TABLE_SIZE; ++i) {
|
||||
for (VMFragment **f = &(tm->vmfragments[i]); *f; ) {
|
||||
VMFragment* frag = *f;
|
||||
if (JS_UPTRDIFF(frag->ip, script->code) < script->length) {
|
||||
/* This fragment is associated with the script. */
|
||||
debug_only_printf(LC_TMTracer,
|
||||
"Disconnecting VMFragment %p "
|
||||
"with ip %p, in range [%p,%p).\n",
|
||||
(void*)frag, frag->ip, script->code,
|
||||
script->code + script->length);
|
||||
|
||||
JS_ASSERT(frag->root == frag);
|
||||
VMFragment* next = frag->next;
|
||||
for (Fragment *p = frag; p; p = p->peer)
|
||||
TrashTree(cx, p);
|
||||
*f = next;
|
||||
} else {
|
||||
f = &((*f)->next);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
JS_DHashTableEnumerate(&(tm->recordAttempts), PurgeScriptRecordingAttempts, script);
|
||||
}
|
||||
|
||||
bool
|
||||
js_OverfullFragmento(JSTraceMonitor* tm, Fragmento *fragmento)
|
||||
js_OverfullJITCache(JSTraceMonitor* tm, bool reCache)
|
||||
{
|
||||
/*
|
||||
* You might imagine the outOfMemory flag on the allocator is sufficient
|
||||
@ -6797,7 +6721,7 @@ js_OverfullFragmento(JSTraceMonitor* tm, Fragmento *fragmento)
|
||||
* modeling the two forms of memory exhaustion *separately* for the
|
||||
* time being: condition 1 is handled by the outOfMemory flag inside
|
||||
* nanojit, and condition 2 is being handled independently *here*. So
|
||||
* we construct our fragmentos to use all available memory they like,
|
||||
* we construct our allocators to use all available memory they like,
|
||||
* and only report outOfMemory to us when there is literally no OS memory
|
||||
* left. Merely purging our cache when we hit our highwater mark is
|
||||
* handled by the (few) callers of this function.
|
||||
@ -6806,7 +6730,7 @@ js_OverfullFragmento(JSTraceMonitor* tm, Fragmento *fragmento)
|
||||
jsuint maxsz = tm->maxCodeCacheBytes;
|
||||
VMAllocator *allocator = tm->allocator;
|
||||
CodeAlloc *codeAlloc = tm->codeAlloc;
|
||||
if (fragmento == tm->reFragmento) {
|
||||
if (reCache) {
|
||||
/*
|
||||
* At the time of making the code cache size configurable, we were using
|
||||
* 16 MB for the main code cache and 1 MB for the regular expression code
|
||||
@ -10422,9 +10346,15 @@ TraceRecorder::record_JSOP_SETELEM()
|
||||
// builtin for every storage type. Special case for integers though,
|
||||
// since they are so common.
|
||||
LIns* res_ins;
|
||||
if (isNumber(v) && isPromoteInt(v_ins)) {
|
||||
LIns* args[] = { ::demote(lir, v_ins), idx_ins, obj_ins, cx_ins };
|
||||
res_ins = lir->insCall(&js_Array_dense_setelem_int_ci, args);
|
||||
LIns* args[] = { NULL, idx_ins, obj_ins, cx_ins };
|
||||
if (isNumber(v)) {
|
||||
if (isPromoteInt(v_ins)) {
|
||||
args[0] = ::demote(lir, v_ins);
|
||||
res_ins = lir->insCall(&js_Array_dense_setelem_int_ci, args);
|
||||
} else {
|
||||
args[0] = v_ins;
|
||||
res_ins = lir->insCall(&js_Array_dense_setelem_double_ci, args);
|
||||
}
|
||||
} else {
|
||||
LIns* args[] = { box_jsval(v, v_ins), idx_ins, obj_ins, cx_ins };
|
||||
res_ins = lir->insCall(&js_Array_dense_setelem_ci, args);
|
||||
|
@ -56,10 +56,11 @@
|
||||
#endif
|
||||
|
||||
template <typename T>
|
||||
class Queue : public avmplus::GCObject {
|
||||
class Queue {
|
||||
T* _data;
|
||||
unsigned _len;
|
||||
unsigned _max;
|
||||
nanojit::Allocator* alloc;
|
||||
|
||||
public:
|
||||
void ensure(unsigned size) {
|
||||
@ -67,23 +68,34 @@ public:
|
||||
_max = 16;
|
||||
while (_max < size)
|
||||
_max <<= 1;
|
||||
_data = (T*)realloc(_data, _max * sizeof(T));
|
||||
if (alloc) {
|
||||
T* tmp = new (*alloc) T[_max];
|
||||
memcpy(tmp, _data, _len * sizeof(T));
|
||||
_data = tmp;
|
||||
} else {
|
||||
_data = (T*)realloc(_data, _max * sizeof(T));
|
||||
}
|
||||
#if defined(DEBUG)
|
||||
memset(&_data[_len], 0xcd, _max - _len);
|
||||
#endif
|
||||
}
|
||||
|
||||
Queue(unsigned max = 16) {
|
||||
Queue(nanojit::Allocator* alloc, unsigned max = 16)
|
||||
: alloc(alloc)
|
||||
{
|
||||
this->_max = max;
|
||||
this->_len = 0;
|
||||
if (max)
|
||||
this->_data = (T*)malloc(max * sizeof(T));
|
||||
this->_data = (alloc ?
|
||||
new (*alloc) T[max] :
|
||||
(T*)malloc(max * sizeof(T)));
|
||||
else
|
||||
this->_data = NULL;
|
||||
}
|
||||
|
||||
~Queue() {
|
||||
free(_data);
|
||||
if (!alloc)
|
||||
free(_data);
|
||||
}
|
||||
|
||||
bool contains(T a) {
|
||||
@ -311,6 +323,7 @@ typedef Queue<uint16> SlotList;
|
||||
|
||||
class TypeMap : public Queue<JSTraceType> {
|
||||
public:
|
||||
TypeMap(nanojit::Allocator* alloc) : Queue<JSTraceType>(alloc) {}
|
||||
JS_REQUIRES_STACK void captureTypes(JSContext* cx, JSObject* globalObj, SlotList& slots, unsigned callDepth);
|
||||
JS_REQUIRES_STACK void captureMissingGlobalTypes(JSContext* cx, JSObject* globalObj, SlotList& slots,
|
||||
unsigned stackSlots);
|
||||
@ -411,7 +424,7 @@ struct VMSideExit : public nanojit::SideExit
|
||||
}
|
||||
};
|
||||
|
||||
struct VMAllocator : public nanojit::Allocator
|
||||
class VMAllocator : public nanojit::Allocator
|
||||
{
|
||||
|
||||
public:
|
||||
@ -428,6 +441,7 @@ public:
|
||||
|
||||
bool mOutOfMemory;
|
||||
size_t mSize;
|
||||
|
||||
/*
|
||||
* FIXME: Area the LIR spills into if we encounter an OOM mid-way
|
||||
* through compilation; we must check mOutOfMemory before we run out
|
||||
@ -438,6 +452,36 @@ public:
|
||||
uintptr_t mReserve[0x10000];
|
||||
};
|
||||
|
||||
|
||||
struct REHashKey {
|
||||
size_t re_length;
|
||||
uint16 re_flags;
|
||||
const jschar* re_chars;
|
||||
|
||||
REHashKey(size_t re_length, uint16 re_flags, const jschar *re_chars)
|
||||
: re_length(re_length)
|
||||
, re_flags(re_flags)
|
||||
, re_chars(re_chars)
|
||||
{}
|
||||
|
||||
bool operator==(const REHashKey& other) const
|
||||
{
|
||||
return ((this->re_length == other.re_length) &&
|
||||
(this->re_flags == other.re_flags) &&
|
||||
!memcmp(this->re_chars, other.re_chars,
|
||||
this->re_length * sizeof(jschar)));
|
||||
}
|
||||
};
|
||||
|
||||
struct REHashFn {
|
||||
static size_t hash(const REHashKey& k) {
|
||||
return
|
||||
k.re_length +
|
||||
k.re_flags +
|
||||
nanojit::murmurhash(k.re_chars, k.re_length * sizeof(jschar));
|
||||
}
|
||||
};
|
||||
|
||||
struct FrameInfo {
|
||||
JSObject* callee; // callee function object
|
||||
JSObject* block; // caller block chain head
|
||||
@ -481,7 +525,7 @@ struct UnstableExit
|
||||
UnstableExit* next;
|
||||
};
|
||||
|
||||
class TreeInfo MMGC_SUBCLASS_DECL {
|
||||
class TreeInfo {
|
||||
public:
|
||||
nanojit::Fragment* const fragment;
|
||||
JSScript* script;
|
||||
@ -507,19 +551,25 @@ public:
|
||||
uintN treePCOffset;
|
||||
#endif
|
||||
|
||||
TreeInfo(nanojit::Fragment* _fragment,
|
||||
TreeInfo(nanojit::Allocator* alloc,
|
||||
nanojit::Fragment* _fragment,
|
||||
SlotList* _globalSlots)
|
||||
: fragment(_fragment),
|
||||
script(NULL),
|
||||
maxNativeStackSlots(0),
|
||||
nativeStackBase(0),
|
||||
maxCallDepth(0),
|
||||
nStackTypes(0),
|
||||
globalSlots(_globalSlots),
|
||||
branchCount(0),
|
||||
unstableExits(NULL)
|
||||
{}
|
||||
~TreeInfo();
|
||||
: fragment(_fragment),
|
||||
script(NULL),
|
||||
maxNativeStackSlots(0),
|
||||
nativeStackBase(0),
|
||||
maxCallDepth(0),
|
||||
typeMap(alloc),
|
||||
nStackTypes(0),
|
||||
globalSlots(_globalSlots),
|
||||
dependentTrees(alloc),
|
||||
linkedTrees(alloc),
|
||||
branchCount(0),
|
||||
sideExits(alloc),
|
||||
unstableExits(NULL),
|
||||
gcthings(alloc),
|
||||
sprops(alloc)
|
||||
{}
|
||||
|
||||
inline unsigned nGlobalTypes() {
|
||||
return typeMap.length() - nStackTypes;
|
||||
@ -935,9 +985,7 @@ public:
|
||||
JS_REQUIRES_STACK bool closeLoop(SlotMap& slotMap, VMSideExit* exit, TypeConsensus &consensus);
|
||||
JS_REQUIRES_STACK void endLoop();
|
||||
JS_REQUIRES_STACK void endLoop(VMSideExit* exit);
|
||||
JS_REQUIRES_STACK void joinEdgesToEntry(nanojit::Fragmento* fragmento,
|
||||
VMFragment* peer_root);
|
||||
void blacklist() { fragment->blacklist(); }
|
||||
JS_REQUIRES_STACK void joinEdgesToEntry(VMFragment* peer_root);
|
||||
JS_REQUIRES_STACK void adjustCallerTypes(nanojit::Fragment* f);
|
||||
JS_REQUIRES_STACK nanojit::Fragment* findNestedCompatiblePeer(nanojit::Fragment* f);
|
||||
JS_REQUIRES_STACK void prepareTreeCall(nanojit::Fragment* inner);
|
||||
@ -945,7 +993,7 @@ public:
|
||||
unsigned getCallDepth() const;
|
||||
void pushAbortStack();
|
||||
void popAbortStack();
|
||||
void removeFragmentoReferences();
|
||||
void removeFragmentReferences();
|
||||
void deepAbort();
|
||||
|
||||
JS_REQUIRES_STACK JSRecordingStatus record_EnterFrame();
|
||||
@ -1022,7 +1070,7 @@ extern void
|
||||
js_PurgeScriptFragments(JSContext* cx, JSScript* script);
|
||||
|
||||
extern bool
|
||||
js_OverfullFragmento(JSTraceMonitor* tm, nanojit::Fragmento *frago);
|
||||
js_OverfullJITCache(JSTraceMonitor* tm, bool reCache);
|
||||
|
||||
extern void
|
||||
js_PurgeJITOracle();
|
||||
|
@ -149,7 +149,7 @@ namespace nanojit
|
||||
* value. Temporary values can be placed into the AR as method calls
|
||||
* are issued. Also LIR_alloc instructions will consume space.
|
||||
*/
|
||||
class Assembler MMGC_SUBCLASS_DECL
|
||||
class Assembler
|
||||
{
|
||||
friend class VerboseBlockReader;
|
||||
public:
|
||||
|
@ -48,150 +48,6 @@ namespace nanojit
|
||||
|
||||
using namespace avmplus;
|
||||
|
||||
static uint32_t calcSaneCacheSize(uint32_t in)
|
||||
{
|
||||
if (in < uint32_t(NJ_LOG2_PAGE_SIZE)) return NJ_LOG2_PAGE_SIZE; // at least 1 page
|
||||
if (in > 32) return 32; // 4GB should be enough for anyone
|
||||
return in;
|
||||
}
|
||||
|
||||
/**
|
||||
* This is the main control center for creating and managing fragments.
|
||||
*/
|
||||
Fragmento::Fragmento(AvmCore* core, LogControl* logc, uint32_t cacheSizeLog2, CodeAlloc* codeAlloc)
|
||||
:
|
||||
#ifdef NJ_VERBOSE
|
||||
enterCounts(NULL),
|
||||
mergeCounts(NULL),
|
||||
labels(NULL),
|
||||
#endif
|
||||
_core(core),
|
||||
_codeAlloc(codeAlloc),
|
||||
_frags(core->GetGC()),
|
||||
_max_pages(1 << (calcSaneCacheSize(cacheSizeLog2) - NJ_LOG2_PAGE_SIZE)),
|
||||
_pagesGrowth(1)
|
||||
{
|
||||
#ifdef _DEBUG
|
||||
{
|
||||
// XXX These belong somewhere else, but I can't find the
|
||||
// right location right now.
|
||||
NanoStaticAssert((LIR_lt ^ 3) == LIR_ge);
|
||||
NanoStaticAssert((LIR_le ^ 3) == LIR_gt);
|
||||
NanoStaticAssert((LIR_ult ^ 3) == LIR_uge);
|
||||
NanoStaticAssert((LIR_ule ^ 3) == LIR_ugt);
|
||||
NanoStaticAssert((LIR_flt ^ 3) == LIR_fge);
|
||||
NanoStaticAssert((LIR_fle ^ 3) == LIR_fgt);
|
||||
|
||||
/* Opcodes must be strictly increasing without holes. */
|
||||
uint32_t count = 0;
|
||||
#define OPDEF(op, number, operands, repkind) \
|
||||
NanoAssertMsg(LIR_##op == count++, "misnumbered opcode");
|
||||
#define OPDEF64(op, number, operands, repkind) \
|
||||
OPDEF(op, number, operands, repkind)
|
||||
#include "LIRopcode.tbl"
|
||||
#undef OPDEF
|
||||
#undef OPDEF64
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef MEMORY_INFO
|
||||
_allocList.set_meminfo_name("Fragmento._allocList");
|
||||
#endif
|
||||
NanoAssert(_max_pages > _pagesGrowth); // shrink growth if needed
|
||||
verbose_only( enterCounts = NJ_NEW(core->gc, BlockHist)(core->gc); )
|
||||
verbose_only( mergeCounts = NJ_NEW(core->gc, BlockHist)(core->gc); )
|
||||
|
||||
memset(&_stats, 0, sizeof(_stats));
|
||||
}
|
||||
|
||||
Fragmento::~Fragmento()
|
||||
{
|
||||
clearFrags();
|
||||
#if defined(NJ_VERBOSE)
|
||||
NJ_DELETE(enterCounts);
|
||||
NJ_DELETE(mergeCounts);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
// Clear the fragment. This *does not* remove the fragment from the
|
||||
// map--the caller must take care of this.
|
||||
void Fragmento::clearFragment(Fragment* f)
|
||||
{
|
||||
Fragment *peer = f->peer;
|
||||
while (peer) {
|
||||
Fragment *next = peer->peer;
|
||||
peer->releaseTreeMem(_codeAlloc);
|
||||
NJ_DELETE(peer);
|
||||
peer = next;
|
||||
}
|
||||
f->releaseTreeMem(_codeAlloc);
|
||||
NJ_DELETE(f);
|
||||
}
|
||||
|
||||
void Fragmento::clearFrags()
|
||||
{
|
||||
while (!_frags.isEmpty()) {
|
||||
clearFragment(_frags.removeLast());
|
||||
}
|
||||
|
||||
verbose_only( enterCounts->clear();)
|
||||
verbose_only( mergeCounts->clear();)
|
||||
verbose_only( _stats.flushes++ );
|
||||
verbose_only( _stats.compiles = 0 );
|
||||
//nj_dprintf("Fragmento.clearFrags %d free pages of %d\n", _stats.freePages, _stats.pages);
|
||||
}
|
||||
|
||||
AvmCore* Fragmento::core()
|
||||
{
|
||||
return _core;
|
||||
}
|
||||
|
||||
Fragment* Fragmento::getAnchor(const void* ip)
|
||||
{
|
||||
Fragment *f = newFrag(ip);
|
||||
Fragment *p = _frags.get(ip);
|
||||
if (p) {
|
||||
f->first = p;
|
||||
/* append at the end of the peer list */
|
||||
Fragment* next;
|
||||
while ((next = p->peer) != NULL)
|
||||
p = next;
|
||||
p->peer = f;
|
||||
} else {
|
||||
f->first = f;
|
||||
_frags.put(ip, f); /* this is the first fragment */
|
||||
}
|
||||
f->anchor = f;
|
||||
f->root = f;
|
||||
f->kind = LoopTrace;
|
||||
verbose_only( addLabel(f, "T", _frags.size()); )
|
||||
return f;
|
||||
}
|
||||
|
||||
Fragment* Fragmento::getLoop(const void* ip)
|
||||
{
|
||||
return _frags.get(ip);
|
||||
}
|
||||
|
||||
#ifdef NJ_VERBOSE
|
||||
void Fragmento::addLabel(Fragment *f, const char *prefix, int id)
|
||||
{
|
||||
char fragname[20];
|
||||
sprintf(fragname,"%s%d", prefix, id);
|
||||
labels->add(f, sizeof(Fragment), 0, fragname);
|
||||
}
|
||||
#endif
|
||||
|
||||
Fragment *Fragmento::createBranch(SideExit* exit, const void* ip)
|
||||
{
|
||||
Fragment *f = newBranch(exit->from, ip);
|
||||
f->kind = BranchTrace;
|
||||
f->treeBranches = f->root->treeBranches;
|
||||
f->root->treeBranches = f;
|
||||
return f;
|
||||
}
|
||||
|
||||
//
|
||||
// Fragment
|
||||
//
|
||||
@ -226,82 +82,13 @@ namespace nanojit
|
||||
guardCount(0),
|
||||
xjumpCount(0),
|
||||
recordAttempts(0),
|
||||
blacklistLevel(0),
|
||||
fragEntry(NULL),
|
||||
loopEntry(NULL),
|
||||
vmprivate(NULL),
|
||||
codeList(0),
|
||||
_code(NULL),
|
||||
_hits(0)
|
||||
{
|
||||
}
|
||||
|
||||
Fragment::~Fragment()
|
||||
{
|
||||
onDestroy();
|
||||
}
|
||||
|
||||
void Fragment::blacklist()
|
||||
{
|
||||
blacklistLevel++;
|
||||
_hits = -(1<<blacklistLevel);
|
||||
}
|
||||
|
||||
Fragment *Fragmento::newFrag(const void* ip)
|
||||
{
|
||||
GC *gc = _core->gc;
|
||||
Fragment *f = NJ_NEW(gc, Fragment)(ip);
|
||||
f->blacklistLevel = 5;
|
||||
return f;
|
||||
}
|
||||
|
||||
Fragment *Fragmento::newBranch(Fragment *from, const void* ip)
|
||||
{
|
||||
Fragment *f = newFrag(ip);
|
||||
f->anchor = from->anchor;
|
||||
f->root = from->root;
|
||||
f->xjumpCount = from->xjumpCount;
|
||||
/*// prepend
|
||||
f->nextbranch = from->branches;
|
||||
from->branches = f;*/
|
||||
// append
|
||||
if (!from->branches) {
|
||||
from->branches = f;
|
||||
} else {
|
||||
Fragment *p = from->branches;
|
||||
while (p->nextbranch != 0)
|
||||
p = p->nextbranch;
|
||||
p->nextbranch = f;
|
||||
}
|
||||
return f;
|
||||
}
|
||||
|
||||
void Fragment::releaseLirBuffer()
|
||||
{
|
||||
lastIns = 0;
|
||||
}
|
||||
|
||||
void Fragment::releaseCode(CodeAlloc *codeAlloc)
|
||||
{
|
||||
_code = 0;
|
||||
codeAlloc->freeAll(codeList);
|
||||
}
|
||||
|
||||
void Fragment::releaseTreeMem(CodeAlloc *codeAlloc)
|
||||
{
|
||||
releaseLirBuffer();
|
||||
releaseCode(codeAlloc);
|
||||
|
||||
// now do it for all branches
|
||||
Fragment* branch = branches;
|
||||
while(branch)
|
||||
{
|
||||
Fragment* next = branch->nextbranch;
|
||||
branch->releaseTreeMem(codeAlloc); // @todo safer here to recurse in case we support nested trees
|
||||
NJ_DELETE(branch);
|
||||
branch = next;
|
||||
}
|
||||
}
|
||||
#endif /* FEATURE_NANOJIT */
|
||||
}
|
||||
|
||||
|
@ -46,74 +46,6 @@
|
||||
namespace nanojit
|
||||
{
|
||||
struct GuardRecord;
|
||||
class Assembler;
|
||||
|
||||
typedef avmplus::GCSortedMap<const void*, uint32_t, avmplus::LIST_NonGCObjects> BlockSortedMap;
|
||||
class BlockHist: public BlockSortedMap
|
||||
{
|
||||
public:
|
||||
BlockHist(avmplus::GC*gc) : BlockSortedMap(gc)
|
||||
{
|
||||
}
|
||||
uint32_t count(const void *p) {
|
||||
uint32_t c = 1+get(p);
|
||||
put(p, c);
|
||||
return c;
|
||||
}
|
||||
};
|
||||
|
||||
struct fragstats;
|
||||
/*
|
||||
*
|
||||
* This is the main control center for creating and managing fragments.
|
||||
*/
|
||||
class Fragmento : public avmplus::GCFinalizedObject
|
||||
{
|
||||
public:
|
||||
Fragmento(AvmCore* core, LogControl* logc, uint32_t cacheSizeLog2, CodeAlloc *codeAlloc);
|
||||
~Fragmento();
|
||||
|
||||
AvmCore* core();
|
||||
|
||||
Fragment* getLoop(const void* ip);
|
||||
Fragment* getAnchor(const void* ip);
|
||||
// Remove one fragment. The caller is responsible for making sure
|
||||
// that this does not destroy any resources shared with other
|
||||
// fragments (such as a LirBuffer or this fragment itself as a
|
||||
// jump target).
|
||||
void clearFrags(); // clear all fragments from the cache
|
||||
Fragment* createBranch(SideExit *exit, const void* ip);
|
||||
Fragment* newFrag(const void* ip);
|
||||
Fragment* newBranch(Fragment *from, const void* ip);
|
||||
|
||||
verbose_only ( uint32_t pageCount(); )
|
||||
verbose_only( void addLabel(Fragment* f, const char *prefix, int id); )
|
||||
|
||||
// stats
|
||||
struct
|
||||
{
|
||||
uint32_t pages; // pages consumed
|
||||
uint32_t flushes, ilsize, abcsize, compiles, totalCompiles;
|
||||
}
|
||||
_stats;
|
||||
|
||||
verbose_only( DWB(BlockHist*) enterCounts; )
|
||||
verbose_only( DWB(BlockHist*) mergeCounts; )
|
||||
verbose_only( LabelMap* labels; )
|
||||
|
||||
#ifdef AVMPLUS_VERBOSE
|
||||
void drawTrees(char *fileName);
|
||||
#endif
|
||||
|
||||
void clearFragment(Fragment *f);
|
||||
private:
|
||||
AvmCore* _core;
|
||||
CodeAlloc* _codeAlloc;
|
||||
FragmentMap _frags; /* map from ip -> Fragment ptr */
|
||||
|
||||
const uint32_t _max_pages;
|
||||
uint32_t _pagesGrowth;
|
||||
};
|
||||
|
||||
enum TraceKind {
|
||||
LoopTrace,
|
||||
@ -128,23 +60,16 @@ namespace nanojit
|
||||
* It may turn out that that this arrangement causes too much traffic
|
||||
* between d and i-caches and that we need to carve up the structure differently.
|
||||
*/
|
||||
class Fragment : public avmplus::GCFinalizedObject
|
||||
class Fragment
|
||||
{
|
||||
public:
|
||||
Fragment(const void*);
|
||||
~Fragment();
|
||||
|
||||
NIns* code() { return _code; }
|
||||
void setCode(NIns* codee) { _code = codee; }
|
||||
int32_t& hits() { return _hits; }
|
||||
void blacklist();
|
||||
bool isBlacklisted() { return _hits < 0; }
|
||||
void releaseLirBuffer();
|
||||
void releaseCode(CodeAlloc *alloc);
|
||||
void releaseTreeMem(CodeAlloc *alloc);
|
||||
bool isAnchor() { return anchor == this; }
|
||||
bool isRoot() { return root == this; }
|
||||
void onDestroy();
|
||||
|
||||
verbose_only( uint32_t _called; )
|
||||
verbose_only( uint32_t _native; )
|
||||
@ -176,11 +101,9 @@ namespace nanojit
|
||||
uint32_t guardCount;
|
||||
uint32_t xjumpCount;
|
||||
uint32_t recordAttempts;
|
||||
int32_t blacklistLevel;
|
||||
NIns* fragEntry;
|
||||
NIns* loopEntry;
|
||||
void* vmprivate;
|
||||
CodeList* codeList;
|
||||
|
||||
private:
|
||||
NIns* _code; // ptr to start of code
|
||||
|
@ -2062,10 +2062,6 @@ namespace nanojit
|
||||
frag->fragEntry = 0;
|
||||
frag->loopEntry = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
CodeAlloc::moveAll(frag->codeList, assm->codeList);
|
||||
}
|
||||
|
||||
/* BEGIN decorative postamble */
|
||||
verbose_only( if (anyVerb) {
|
||||
|
@ -781,8 +781,6 @@ namespace nanojit
|
||||
LIns* FASTCALL callArgN(LInsp i, uint32_t n);
|
||||
extern const uint8_t operandCount[];
|
||||
|
||||
class Fragmento; // @todo remove this ; needed for minbuild for some reason?!? Should not be compiling this code at all
|
||||
|
||||
// make it a GCObject so we can explicitly delete it early
|
||||
class LirWriter : public GCObject
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user