mirror of
https://gitlab.winehq.org/wine/wine-gecko.git
synced 2024-09-13 09:24:08 -07:00
Cleanup GuardRecord, SideExit, and InterpStruct and extract VM-dependant fields (463313, r=danderson).
This commit is contained in:
parent
1efbaf7478
commit
e609f0bb95
@ -265,7 +265,7 @@ js_CallTree(InterpState* state, Fragment* f)
|
||||
#else
|
||||
rec = u.func(state, NULL);
|
||||
#endif
|
||||
SideExit* lr = rec->exit;
|
||||
VMSideExit* lr = (VMSideExit*)rec->exit;
|
||||
|
||||
if (lr->exitType == NESTED_EXIT) {
|
||||
/* This only occurs once a tree call guard mismatches and we unwind the tree call stack.
|
||||
|
@ -43,6 +43,7 @@
|
||||
#ifdef JS_TRACER
|
||||
|
||||
#include "nanojit/nanojit.h"
|
||||
#include "jstracer.h"
|
||||
|
||||
enum JSTNErrType { INFALLIBLE, FAIL_NULL, FAIL_NEG, FAIL_VOID, FAIL_JSVAL };
|
||||
enum { JSTN_ERRTYPE_MASK = 7, JSTN_MORE = 8 };
|
||||
@ -160,8 +161,8 @@ struct JSTraceableNative {
|
||||
#define _JS_CTYPE_OBJECT_FAIL_VOID _JS_CTYPE(JSObject *, _JS_PTR, --, --, FAIL_VOID)
|
||||
#define _JS_CTYPE_REGEXP _JS_CTYPE(JSObject *, _JS_PTR, "","r", INFALLIBLE)
|
||||
#define _JS_CTYPE_SCOPEPROP _JS_CTYPE(JSScopeProperty *, _JS_PTR, --, --, INFALLIBLE)
|
||||
#define _JS_CTYPE_SIDEEXIT _JS_CTYPE(nanojit::SideExit *, _JS_PTR, --, --, INFALLIBLE)
|
||||
#define _JS_CTYPE_INTERPSTATE _JS_CTYPE(avmplus::InterpState *, _JS_PTR, --, --, INFALLIBLE)
|
||||
#define _JS_CTYPE_SIDEEXIT _JS_CTYPE(SideExit *, _JS_PTR, --, --, INFALLIBLE)
|
||||
#define _JS_CTYPE_INTERPSTATE _JS_CTYPE(InterpState *, _JS_PTR, --, --, INFALLIBLE)
|
||||
#define _JS_CTYPE_FRAGMENT _JS_CTYPE(nanojit::Fragment *, _JS_PTR, --, --, INFALLIBLE)
|
||||
|
||||
#define _JS_EXPAND(tokens) tokens
|
||||
|
@ -113,7 +113,7 @@ class TypeMap;
|
||||
* JS_THREADSAFE) has an associated trace monitor that keeps track of loop
|
||||
* frequencies for all JavaScript code loaded into that runtime.
|
||||
*/
|
||||
typedef struct JSTraceMonitor {
|
||||
struct JSTraceMonitor {
|
||||
/*
|
||||
* Flag set when running (or recording) JIT-compiled code. This prevents
|
||||
* both interpreter activation and last-ditch garbage collection when up
|
||||
@ -133,7 +133,7 @@ typedef struct JSTraceMonitor {
|
||||
* a distinct compiler but needs to be managed in exactly the same
|
||||
* way as the real tracing Fragmento. */
|
||||
CLS(nanojit::Fragmento) reFragmento;
|
||||
} JSTraceMonitor;
|
||||
};
|
||||
|
||||
#ifdef JS_TRACER
|
||||
# define JS_ON_TRACE(cx) (JS_TRACE_MONITOR(cx).onTrace)
|
||||
|
@ -979,9 +979,9 @@ mergeTypeMaps(uint8** partial, unsigned* plength, uint8* complete, unsigned clen
|
||||
static void
|
||||
js_TrashTree(JSContext* cx, Fragment* f);
|
||||
|
||||
TraceRecorder::TraceRecorder(JSContext* cx, SideExit* _anchor, Fragment* _fragment,
|
||||
TraceRecorder::TraceRecorder(JSContext* cx, VMSideExit* _anchor, Fragment* _fragment,
|
||||
TreeInfo* ti, unsigned ngslots, uint8* globalTypeMap, uint8* stackTypeMap,
|
||||
SideExit* innermostNestedGuard, Fragment* outerToBlacklist)
|
||||
VMSideExit* innermostNestedGuard, Fragment* outerToBlacklist)
|
||||
{
|
||||
JS_ASSERT(!_fragment->vmprivate && ti);
|
||||
|
||||
@ -992,8 +992,7 @@ TraceRecorder::TraceRecorder(JSContext* cx, SideExit* _anchor, Fragment* _fragme
|
||||
this->fragment = _fragment;
|
||||
this->lirbuf = _fragment->lirbuf;
|
||||
this->treeInfo = ti;
|
||||
this->callDepth = _fragment->calldepth;
|
||||
JS_ASSERT(!_anchor || _anchor->calldepth == _fragment->calldepth);
|
||||
this->callDepth = _anchor ? _anchor->calldepth : 0;
|
||||
this->atoms = cx->fp->script->atomMap.vector;
|
||||
this->deepAborted = false;
|
||||
this->applyingArguments = false;
|
||||
@ -1829,18 +1828,18 @@ TraceRecorder::snapshot(ExitType exitType)
|
||||
|
||||
/* Check if we already have a matching side exit. If so use that side exit structure,
|
||||
otherwise we have to create our own. */
|
||||
SideExit** exits = treeInfo->sideExits.data();
|
||||
VMSideExit** exits = treeInfo->sideExits.data();
|
||||
unsigned nexits = treeInfo->sideExits.length();
|
||||
if (exitType == LOOP_EXIT) {
|
||||
for (unsigned n = 0; n < nexits; ++n) {
|
||||
SideExit* e = exits[n];
|
||||
VMSideExit* e = exits[n];
|
||||
if (e->ip_adj == ip_adj &&
|
||||
!memcmp(getTypeMap(exits[n]), typemap, typemap_size)) {
|
||||
LIns* data = lir_buf_writer->skip(sizeof(GuardRecord));
|
||||
GuardRecord* rec = (GuardRecord*)data->payload();
|
||||
/* setup guard record structure with shared side exit */
|
||||
memset(rec, 0, sizeof(GuardRecord));
|
||||
SideExit* exit = exits[n];
|
||||
VMSideExit* exit = exits[n];
|
||||
rec->exit = exit;
|
||||
exit->addGuard(rec);
|
||||
AUDIT(mergedLoopExits);
|
||||
@ -1851,15 +1850,15 @@ TraceRecorder::snapshot(ExitType exitType)
|
||||
|
||||
/* We couldn't find a matching side exit, so create our own side exit structure. */
|
||||
LIns* data = lir_buf_writer->skip(sizeof(GuardRecord) +
|
||||
sizeof(SideExit) +
|
||||
sizeof(VMSideExit) +
|
||||
(stackSlots + ngslots) * sizeof(uint8));
|
||||
GuardRecord* rec = (GuardRecord*)data->payload();
|
||||
SideExit* exit = (SideExit*)(rec + 1);
|
||||
VMSideExit* exit = (VMSideExit*)(rec + 1);
|
||||
/* setup guard record structure */
|
||||
memset(rec, 0, sizeof(GuardRecord));
|
||||
rec->exit = exit;
|
||||
/* setup side exit structure */
|
||||
memset(exit, 0, sizeof(SideExit));
|
||||
memset(exit, 0, sizeof(VMSideExit));
|
||||
exit->from = fragment;
|
||||
exit->calldepth = callDepth;
|
||||
exit->numGlobalSlots = ngslots;
|
||||
@ -2121,7 +2120,8 @@ TraceRecorder::compile(Fragmento* fragmento)
|
||||
}
|
||||
|
||||
static bool
|
||||
js_JoinPeersIfCompatible(Fragmento* frago, Fragment* stableFrag, TreeInfo* stableTree, SideExit* exit)
|
||||
js_JoinPeersIfCompatible(Fragmento* frago, Fragment* stableFrag, TreeInfo* stableTree,
|
||||
VMSideExit* exit)
|
||||
{
|
||||
JS_ASSERT(exit->numStackSlots == stableTree->stackTypeMap.length());
|
||||
/* Must have a matching type unstable exit. */
|
||||
@ -2146,13 +2146,13 @@ TraceRecorder::closeLoop(Fragmento* fragmento, bool& demote, unsigned *demotes)
|
||||
bool stable;
|
||||
LIns* exitIns;
|
||||
Fragment* peer;
|
||||
SideExit* exit;
|
||||
VMSideExit* exit;
|
||||
Fragment* peer_root;
|
||||
|
||||
demote = false;
|
||||
|
||||
exitIns = snapshot(UNSTABLE_LOOP_EXIT);
|
||||
exit = ((GuardRecord*)exitIns->payload())->exit;
|
||||
exit = (VMSideExit*)((GuardRecord*)exitIns->payload())->exit;
|
||||
peer_root = fragmento->getLoop(fragment->root->ip);
|
||||
JS_ASSERT(peer_root != NULL);
|
||||
stable = deduceTypeStability(peer_root, &peer, demotes);
|
||||
@ -2352,7 +2352,7 @@ TraceRecorder::prepareTreeCall(Fragment* inner)
|
||||
|
||||
/* Record a call to an inner tree. */
|
||||
void
|
||||
TraceRecorder::emitTreeCall(Fragment* inner, SideExit* exit)
|
||||
TraceRecorder::emitTreeCall(Fragment* inner, VMSideExit* exit)
|
||||
{
|
||||
TreeInfo* ti = (TreeInfo*)inner->vmprivate;
|
||||
/* Invoke the inner tree. */
|
||||
@ -2445,10 +2445,11 @@ TraceRecorder::fuseIf(jsbytecode* pc, bool cond, LIns* x)
|
||||
int
|
||||
nanojit::StackFilter::getTop(LInsp guard)
|
||||
{
|
||||
VMSideExit* e = (VMSideExit*)guard->record()->exit;
|
||||
if (sp == lirbuf->sp)
|
||||
return guard->record()->exit->sp_adj;
|
||||
return e->sp_adj;
|
||||
JS_ASSERT(sp == lirbuf->rp);
|
||||
return guard->record()->exit->rp_adj;
|
||||
return e->rp_adj;
|
||||
}
|
||||
|
||||
#if defined NJ_VERBOSE
|
||||
@ -2456,9 +2457,9 @@ void
|
||||
nanojit::LirNameMap::formatGuard(LIns *i, char *out)
|
||||
{
|
||||
uint32_t ip;
|
||||
SideExit *x;
|
||||
VMSideExit *x;
|
||||
|
||||
x = (SideExit *)i->record()->exit;
|
||||
x = (VMSideExit *)i->record()->exit;
|
||||
ip = intptr_t(x->from->ip) + x->ip_adj;
|
||||
sprintf(out,
|
||||
"%s: %s %s -> %s sp%+ld rp%+ld",
|
||||
@ -2514,9 +2515,9 @@ js_CheckGlobalObjectShape(JSContext* cx, JSTraceMonitor* tm, JSObject* globalObj
|
||||
}
|
||||
|
||||
static bool
|
||||
js_StartRecorder(JSContext* cx, SideExit* anchor, Fragment* f, TreeInfo* ti,
|
||||
unsigned ngslots, uint8* globalTypeMap, uint8* stackTypeMap,
|
||||
SideExit* expectedInnerExit, Fragment* outer)
|
||||
js_StartRecorder(JSContext* cx, VMSideExit* anchor, Fragment* f, TreeInfo* ti,
|
||||
unsigned ngslots, uint8* globalTypeMap, uint8* stackTypeMap,
|
||||
VMSideExit* expectedInnerExit, Fragment* outer)
|
||||
{
|
||||
JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
|
||||
|
||||
@ -2750,7 +2751,6 @@ js_RecordTree(JSContext* cx, JSTraceMonitor* tm, Fragment* f, Fragment* outer, u
|
||||
if (f->code())
|
||||
f = JS_TRACE_MONITOR(cx).fragmento->getAnchor(f->root->ip);
|
||||
|
||||
f->calldepth = 0;
|
||||
f->recordAttempts++;
|
||||
f->root = f;
|
||||
/* allocate space to store the LIR for this tree */
|
||||
@ -2818,7 +2818,7 @@ js_RecordTree(JSContext* cx, JSTraceMonitor* tm, Fragment* f, Fragment* outer, u
|
||||
}
|
||||
|
||||
static bool
|
||||
js_AttemptToStabilizeTree(JSContext* cx, SideExit* exit, Fragment* outer)
|
||||
js_AttemptToStabilizeTree(JSContext* cx, VMSideExit* exit, Fragment* outer)
|
||||
{
|
||||
JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
|
||||
Fragment* from = exit->from->root;
|
||||
@ -2847,7 +2847,7 @@ js_AttemptToStabilizeTree(JSContext* cx, SideExit* exit, Fragment* outer)
|
||||
}
|
||||
|
||||
static bool
|
||||
js_AttemptToExtendTree(JSContext* cx, SideExit* anchor, SideExit* exitedFrom, Fragment* outer)
|
||||
js_AttemptToExtendTree(JSContext* cx, VMSideExit* anchor, VMSideExit* exitedFrom, Fragment* outer)
|
||||
{
|
||||
Fragment* f = anchor->from->root;
|
||||
JS_ASSERT(f->vmprivate);
|
||||
@ -2886,8 +2886,8 @@ js_AttemptToExtendTree(JSContext* cx, SideExit* anchor, SideExit* exitedFrom, Fr
|
||||
guard (anchor) has the type information for everything below the current scope,
|
||||
and the actual guard we exited from has the types for everything in the current
|
||||
scope (and whatever it inlined). We have to merge those maps here. */
|
||||
SideExit* e1 = anchor;
|
||||
SideExit* e2 = exitedFrom;
|
||||
VMSideExit* e1 = anchor;
|
||||
VMSideExit* e2 = exitedFrom;
|
||||
fullMap.add(getTypeMap(e1) + e1->numGlobalSlots, e1->numStackSlotsBelowCurrentFrame);
|
||||
fullMap.add(getTypeMap(e2) + e2->numGlobalSlots, e2->numStackSlots);
|
||||
ngslots = e2->numGlobalSlots;
|
||||
@ -2900,9 +2900,9 @@ js_AttemptToExtendTree(JSContext* cx, SideExit* anchor, SideExit* exitedFrom, Fr
|
||||
return false;
|
||||
}
|
||||
|
||||
static SideExit*
|
||||
static VMSideExit*
|
||||
js_ExecuteTree(JSContext* cx, Fragment* f, uintN& inlineCallCount,
|
||||
SideExit** innermostNestedGuardp);
|
||||
VMSideExit** innermostNestedGuardp);
|
||||
|
||||
static nanojit::Fragment*
|
||||
js_FindVMCompatiblePeer(JSContext* cx, Fragment* f);
|
||||
@ -3011,8 +3011,8 @@ js_RecordLoopEdge(JSContext* cx, TraceRecorder* r, uintN& inlineCallCount)
|
||||
}
|
||||
|
||||
r->prepareTreeCall(f);
|
||||
SideExit* innermostNestedGuard = NULL;
|
||||
SideExit* lr = js_ExecuteTree(cx, f, inlineCallCount, &innermostNestedGuard);
|
||||
VMSideExit* innermostNestedGuard = NULL;
|
||||
VMSideExit* lr = js_ExecuteTree(cx, f, inlineCallCount, &innermostNestedGuard);
|
||||
if (!lr) {
|
||||
js_AbortRecording(cx, "Couldn't call inner tree");
|
||||
return false;
|
||||
@ -3240,9 +3240,9 @@ js_FindVMCompatiblePeer(JSContext* cx, Fragment* f)
|
||||
/**
|
||||
* Executes a tree.
|
||||
*/
|
||||
static SideExit*
|
||||
static VMSideExit*
|
||||
js_ExecuteTree(JSContext* cx, Fragment* f, uintN& inlineCallCount,
|
||||
SideExit** innermostNestedGuardp)
|
||||
VMSideExit** innermostNestedGuardp)
|
||||
{
|
||||
JS_ASSERT(f->code() && f->vmprivate);
|
||||
|
||||
@ -3308,7 +3308,7 @@ js_ExecuteTree(JSContext* cx, Fragment* f, uintN& inlineCallCount,
|
||||
bool onTrace = tm->onTrace;
|
||||
if (!onTrace)
|
||||
tm->onTrace = true;
|
||||
SideExit* lr;
|
||||
VMSideExit* lr;
|
||||
|
||||
debug_only(fflush(NULL);)
|
||||
GuardRecord* rec;
|
||||
@ -3317,7 +3317,7 @@ js_ExecuteTree(JSContext* cx, Fragment* f, uintN& inlineCallCount,
|
||||
#else
|
||||
rec = u.func(&state, NULL);
|
||||
#endif
|
||||
lr = rec->exit;
|
||||
lr = (VMSideExit*)rec->exit;
|
||||
|
||||
AUDIT(traceTriggered);
|
||||
|
||||
@ -3327,7 +3327,7 @@ js_ExecuteTree(JSContext* cx, Fragment* f, uintN& inlineCallCount,
|
||||
|
||||
/* Except if we find that this is a nested bailout, the guard the call returned is the
|
||||
one we have to use to adjust pc and sp. */
|
||||
SideExit* innermost = lr;
|
||||
VMSideExit* innermost = lr;
|
||||
|
||||
/* While executing a tree we do not update state.sp and state.rp even if they grow. Instead,
|
||||
guards tell us by how much sp and rp should be incremented in case of a side exit. When
|
||||
@ -3339,7 +3339,7 @@ js_ExecuteTree(JSContext* cx, Fragment* f, uintN& inlineCallCount,
|
||||
visible and only the guard we exited on will tells us about). */
|
||||
FrameInfo* rp = (FrameInfo*)state.rp;
|
||||
if (lr->exitType == NESTED_EXIT) {
|
||||
SideExit* nested = state.lastTreeCallGuard;
|
||||
VMSideExit* nested = state.lastTreeCallGuard;
|
||||
if (!nested) {
|
||||
/* If lastTreeCallGuard is not set in state, we only have a single level of
|
||||
nesting in this exit, so lr itself is the innermost and outermost nested
|
||||
@ -3525,8 +3525,8 @@ monitor_loop:
|
||||
if (!match)
|
||||
goto monitor_loop;
|
||||
|
||||
SideExit* lr = NULL;
|
||||
SideExit* innermostNestedGuard = NULL;
|
||||
VMSideExit* lr = NULL;
|
||||
VMSideExit* innermostNestedGuard = NULL;
|
||||
|
||||
lr = js_ExecuteTree(cx, match, inlineCallCount, &innermostNestedGuard);
|
||||
if (!lr)
|
||||
|
@ -44,6 +44,7 @@
|
||||
|
||||
#ifdef JS_TRACER
|
||||
|
||||
#include "jscntxt.h"
|
||||
#include "jsstddef.h"
|
||||
#include "jstypes.h"
|
||||
#include "jslock.h"
|
||||
@ -172,10 +173,52 @@ public:
|
||||
bool matches(TypeMap& other) const;
|
||||
};
|
||||
|
||||
enum ExitType {
|
||||
BRANCH_EXIT,
|
||||
LOOP_EXIT,
|
||||
NESTED_EXIT,
|
||||
MISMATCH_EXIT,
|
||||
OOM_EXIT,
|
||||
OVERFLOW_EXIT,
|
||||
UNSTABLE_LOOP_EXIT,
|
||||
TIMEOUT_EXIT
|
||||
};
|
||||
|
||||
struct VMSideExit : public nanojit::SideExit
|
||||
{
|
||||
intptr_t ip_adj;
|
||||
intptr_t sp_adj;
|
||||
intptr_t rp_adj;
|
||||
int32_t calldepth;
|
||||
uint32 numGlobalSlots;
|
||||
uint32 numStackSlots;
|
||||
uint32 numStackSlotsBelowCurrentFrame;
|
||||
ExitType exitType;
|
||||
};
|
||||
|
||||
static inline uint8* getTypeMap(nanojit::SideExit* exit)
|
||||
{
|
||||
return (uint8*)(((VMSideExit*)exit) + 1);
|
||||
}
|
||||
|
||||
struct InterpState
|
||||
{
|
||||
void* sp; /* native stack pointer, stack[0] is spbase[0] */
|
||||
void* rp; /* call stack pointer */
|
||||
void* gp; /* global frame pointer */
|
||||
JSContext *cx; /* current VM context handle */
|
||||
void* eos; /* first unusable word after the native stack */
|
||||
void* eor; /* first unusable word after the call stack */
|
||||
VMSideExit* lastTreeExitGuard; /* guard we exited on during a tree call */
|
||||
VMSideExit* lastTreeCallGuard; /* guard we want to grow from if the tree
|
||||
call exit guard mismatched */
|
||||
void* rpAtLastTreeCall; /* value of rp at innermost tree call guard */
|
||||
};
|
||||
|
||||
struct UnstableExit
|
||||
{
|
||||
nanojit::Fragment* fragment;
|
||||
nanojit::SideExit* exit;
|
||||
VMSideExit* exit;
|
||||
UnstableExit* next;
|
||||
};
|
||||
|
||||
@ -189,7 +232,7 @@ public:
|
||||
TypeMap stackTypeMap;
|
||||
Queue<nanojit::Fragment*> dependentTrees;
|
||||
unsigned branchCount;
|
||||
Queue<nanojit::SideExit*> sideExits;
|
||||
Queue<VMSideExit*> sideExits;
|
||||
UnstableExit* unstableExits;
|
||||
|
||||
TreeInfo(nanojit::Fragment* _fragment) : unstableExits(NULL) {
|
||||
@ -220,7 +263,7 @@ class TraceRecorder : public GCObject {
|
||||
char* entryTypeMap;
|
||||
unsigned callDepth;
|
||||
JSAtom** atoms;
|
||||
nanojit::SideExit* anchor;
|
||||
VMSideExit* anchor;
|
||||
nanojit::Fragment* fragment;
|
||||
TreeInfo* treeInfo;
|
||||
nanojit::LirBuffer* lirbuf;
|
||||
@ -262,7 +305,7 @@ class TraceRecorder : public GCObject {
|
||||
|
||||
bool lazilyImportGlobalSlot(unsigned slot);
|
||||
|
||||
nanojit::LIns* guard(bool expected, nanojit::LIns* cond, nanojit::ExitType exitType);
|
||||
nanojit::LIns* guard(bool expected, nanojit::LIns* cond, ExitType exitType);
|
||||
nanojit::LIns* guard(bool expected, nanojit::LIns* cond, nanojit::LIns* exit);
|
||||
nanojit::LIns* addName(nanojit::LIns* ins, const char* name);
|
||||
|
||||
@ -341,7 +384,7 @@ class TraceRecorder : public GCObject {
|
||||
bool guardDenseArray(JSObject* obj, nanojit::LIns* obj_ins);
|
||||
bool guardDenseArrayIndex(JSObject* obj, jsint idx, nanojit::LIns* obj_ins,
|
||||
nanojit::LIns* dslots_ins, nanojit::LIns* idx_ins,
|
||||
nanojit::ExitType exitType);
|
||||
ExitType exitType);
|
||||
bool guardElemOp(JSObject* obj, nanojit::LIns* obj_ins, jsid id, size_t op_offset, jsval* vp);
|
||||
void clearFrameSlotsFromCache();
|
||||
bool guardShapelessCallee(jsval& callee);
|
||||
@ -356,13 +399,13 @@ class TraceRecorder : public GCObject {
|
||||
public:
|
||||
friend bool js_MonitorRecording(TraceRecorder* tr);
|
||||
|
||||
TraceRecorder(JSContext* cx, nanojit::SideExit*, nanojit::Fragment*, TreeInfo*,
|
||||
TraceRecorder(JSContext* cx, VMSideExit*, nanojit::Fragment*, TreeInfo*,
|
||||
unsigned ngslots, uint8* globalTypeMap, uint8* stackTypeMap,
|
||||
nanojit::SideExit* expectedInnerExit, nanojit::Fragment* outerToBlacklist);
|
||||
VMSideExit* expectedInnerExit, nanojit::Fragment* outerToBlacklist);
|
||||
~TraceRecorder();
|
||||
|
||||
uint8 determineSlotType(jsval* vp) const;
|
||||
nanojit::LIns* snapshot(nanojit::ExitType exitType);
|
||||
nanojit::LIns* snapshot(ExitType exitType);
|
||||
nanojit::Fragment* getFragment() const { return fragment; }
|
||||
bool isLoopHeader(JSContext* cx) const;
|
||||
void compile(nanojit::Fragmento* fragmento);
|
||||
@ -373,7 +416,7 @@ public:
|
||||
bool adjustCallerTypes(nanojit::Fragment* f, unsigned* demote_slots, bool& trash);
|
||||
nanojit::Fragment* findNestedCompatiblePeer(nanojit::Fragment* f, nanojit::Fragment** empty);
|
||||
void prepareTreeCall(nanojit::Fragment* inner);
|
||||
void emitTreeCall(nanojit::Fragment* inner, nanojit::SideExit* exit);
|
||||
void emitTreeCall(nanojit::Fragment* inner, VMSideExit* exit);
|
||||
unsigned getCallDepth() const;
|
||||
|
||||
bool record_EnterFrame();
|
||||
|
@ -286,7 +286,6 @@ namespace nanojit
|
||||
Fragment *f = newBranch(anchor, ip);
|
||||
f->root = f;
|
||||
f->kind = MergeTrace;
|
||||
f->calldepth = lr->exit->calldepth;
|
||||
verbose_only(
|
||||
int mergeid = 1;
|
||||
for (Fragment *g = anchor->branches; g != 0; g = g->nextbranch)
|
||||
@ -301,7 +300,6 @@ namespace nanojit
|
||||
{
|
||||
Fragment *f = newBranch(exit->from, ip);
|
||||
f->kind = BranchTrace;
|
||||
f->calldepth = exit->calldepth;
|
||||
f->treeBranches = f->root->treeBranches;
|
||||
f->root->treeBranches = f;
|
||||
return f;
|
||||
|
@ -228,7 +228,6 @@ namespace nanojit
|
||||
int32_t blacklistLevel;
|
||||
NIns* fragEntry;
|
||||
NIns* loopEntry;
|
||||
int32_t calldepth;
|
||||
void* vmprivate;
|
||||
|
||||
private:
|
||||
|
@ -59,6 +59,31 @@
|
||||
|
||||
namespace nanojit {
|
||||
const uint32_t NJ_PAGE_SIZE = 1 << NJ_LOG2_PAGE_SIZE;
|
||||
|
||||
class Fragment;
|
||||
struct SideExit;
|
||||
|
||||
struct GuardRecord
|
||||
{
|
||||
void* jmpToStub;
|
||||
void* stubEntry;
|
||||
void* jmpToTarget;
|
||||
GuardRecord* next;
|
||||
SideExit* exit;
|
||||
};
|
||||
|
||||
struct SideExit
|
||||
{
|
||||
GuardRecord* guards;
|
||||
Fragment* from;
|
||||
Fragment* target;
|
||||
|
||||
void addGuard(GuardRecord* lr)
|
||||
{
|
||||
lr->next = guards;
|
||||
guards = lr;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#ifdef NJ_STACK_GROWTH_UP
|
||||
|
@ -173,7 +173,7 @@ namespace nanojit
|
||||
Fragment *frag = exit->target;
|
||||
GuardRecord *lr = 0;
|
||||
bool destKnown = (frag && frag->fragEntry);
|
||||
if (destKnown && !trees && exit->exitType != TIMEOUT_EXIT)
|
||||
if (destKnown && !trees && !guard->isop(LIR_loop))
|
||||
{
|
||||
// already exists, emit jump now. no patching required.
|
||||
JMP(frag->fragEntry);
|
||||
@ -951,8 +951,8 @@ namespace nanojit
|
||||
GuardRecord* guard = ins->record();
|
||||
SideExit* exit = guard->exit;
|
||||
|
||||
// Emit an exit stub that the loop may be patched to jump to, if a timeout fires.
|
||||
exit->exitType = TIMEOUT_EXIT;
|
||||
// Emit an exit stub that the loop may be patched to jump to (for example if we
|
||||
// want to terminate the loop because a timeout fires).
|
||||
asm_exit(ins);
|
||||
|
||||
// Emit the patchable jump itself.
|
||||
|
@ -367,7 +367,7 @@ namespace nanojit
|
||||
|
||||
#define LDdm(reg,addr) do { \
|
||||
ALUdm(0x8b,reg,addr); \
|
||||
asm_output2("mov %s,0(%x)",gpn(reg),addr); \
|
||||
asm_output2("mov %s,0(%lx)",gpn(reg),addr); \
|
||||
} while (0)
|
||||
|
||||
|
||||
@ -384,7 +384,7 @@ namespace nanojit
|
||||
// load 16-bit, zero extend
|
||||
#define LD16Z(r,d,b) do { ALU2m(0x0fb7,r,d,b); asm_output3("movsz %s,%d(%s)", gpn(r),d,gpn(b)); } while(0)
|
||||
|
||||
#define LD16Zdm(r,addr) do { ALU2dm(0x0fb7,r,addr); asm_output2("movsz %s,0(%x)", gpn(r),addr); } while (0)
|
||||
#define LD16Zdm(r,addr) do { ALU2dm(0x0fb7,r,addr); asm_output2("movsz %s,0(%lx)", gpn(r),addr); } while (0)
|
||||
|
||||
#define LD16Zsib(r,disp,base,index,scale) do { \
|
||||
ALU2sib(0x0fb7,r,base,index,scale,disp); \
|
||||
@ -399,7 +399,7 @@ namespace nanojit
|
||||
#define LD8Zdm(r,addr) do { \
|
||||
NanoAssert((d)>=0&&(d)<=31); \
|
||||
ALU2dm(0x0fb6,r,addr); \
|
||||
asm_output2("movzx %s,0(%x)", gpn(r),addr); \
|
||||
asm_output2("movzx %s,0(%lx)", gpn(r),addr); \
|
||||
} while(0)
|
||||
|
||||
#define LD8Zsib(r,disp,base,index,scale) do { \
|
||||
|
@ -162,60 +162,6 @@ static __inline__ unsigned long long rdtsc(void)
|
||||
|
||||
struct JSContext;
|
||||
|
||||
namespace nanojit
|
||||
{
|
||||
class Fragment;
|
||||
|
||||
enum ExitType {
|
||||
BRANCH_EXIT,
|
||||
LOOP_EXIT,
|
||||
NESTED_EXIT,
|
||||
MISMATCH_EXIT,
|
||||
OOM_EXIT,
|
||||
OVERFLOW_EXIT,
|
||||
UNSTABLE_LOOP_EXIT,
|
||||
TIMEOUT_EXIT
|
||||
};
|
||||
|
||||
struct GuardRecord;
|
||||
|
||||
class LIns;
|
||||
|
||||
struct SideExit;
|
||||
|
||||
typedef struct GuardRecord
|
||||
{
|
||||
void* jmpToStub;
|
||||
void* stubEntry;
|
||||
void* jmpToTarget;
|
||||
GuardRecord* next;
|
||||
SideExit* exit;
|
||||
};
|
||||
|
||||
typedef struct SideExit
|
||||
{
|
||||
GuardRecord* guards;
|
||||
Fragment* from;
|
||||
Fragment* target;
|
||||
intptr_t ip_adj;
|
||||
intptr_t sp_adj;
|
||||
intptr_t rp_adj;
|
||||
int32_t calldepth;
|
||||
uint32 numGlobalSlots;
|
||||
uint32 numStackSlots;
|
||||
uint32 numStackSlotsBelowCurrentFrame;
|
||||
ExitType exitType;
|
||||
|
||||
void addGuard(GuardRecord* lr)
|
||||
{
|
||||
lr->next = guards;
|
||||
guards = lr;
|
||||
}
|
||||
};
|
||||
|
||||
static inline uint8* getTypeMap(SideExit* exit) { return (uint8*)(exit + 1); }
|
||||
}
|
||||
|
||||
class GC;
|
||||
|
||||
class GCObject
|
||||
@ -335,20 +281,6 @@ typedef int FunctionID;
|
||||
|
||||
namespace avmplus
|
||||
{
|
||||
struct InterpState
|
||||
{
|
||||
void* sp; /* native stack pointer, stack[0] is spbase[0] */
|
||||
void* rp; /* call stack pointer */
|
||||
void* gp; /* global frame pointer */
|
||||
JSContext *cx; /* current VM context handle */
|
||||
void* eos; /* first unusable word after the native stack */
|
||||
void* eor; /* first unusable word after the call stack */
|
||||
nanojit::SideExit* lastTreeExitGuard; /* guard we exited on during a tree call */
|
||||
nanojit::SideExit* lastTreeCallGuard; /* guard we want to grow from if the tree
|
||||
call exit guard mismatched */
|
||||
void* rpAtLastTreeCall; /* value of rp at innermost tree call guard */
|
||||
};
|
||||
|
||||
class String
|
||||
{
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user