mirror of
https://gitlab.winehq.org/wine/wine-gecko.git
synced 2024-09-13 09:24:08 -07:00
Bug 605192 - JM: make f.apply(x, obj) fast, part 4 (r=dvander)
--HG-- extra : rebase_source : 2dec4f92e1306ef1594262607ffca908b9ed1c64
This commit is contained in:
parent
db7bcbff85
commit
f57c947fc9
@ -397,7 +397,7 @@ StackSpace::pushGeneratorFrame(JSContext *cx, JSFrameRegs *regs, GeneratorFrameG
|
||||
bool
|
||||
StackSpace::bumpCommitAndLimit(JSStackFrame *base, Value *sp, uintN nvals, Value **limit) const
|
||||
{
|
||||
JS_ASSERT(sp == firstUnused());
|
||||
JS_ASSERT(sp >= firstUnused());
|
||||
JS_ASSERT(sp + nvals >= *limit);
|
||||
#ifdef XP_WIN
|
||||
if (commitEnd <= *limit) {
|
||||
|
@ -701,12 +701,6 @@ class StackSpace
|
||||
friend class AllFramesIter;
|
||||
StackSegment *getCurrentSegment() const { return currentSegment; }
|
||||
|
||||
/*
|
||||
* Allocate nvals on the top of the stack, report error on failure.
|
||||
* N.B. the caller must ensure |from == firstUnused()|.
|
||||
*/
|
||||
inline bool ensureSpace(JSContext *maybecx, Value *from, ptrdiff_t nvals) const;
|
||||
|
||||
#ifdef XP_WIN
|
||||
/* Commit more memory from the reserved stack space. */
|
||||
JS_FRIEND_API(bool) bumpCommit(Value *from, ptrdiff_t nvals) const;
|
||||
@ -752,9 +746,6 @@ class StackSpace
|
||||
*/
|
||||
inline bool ensureEnoughSpaceToEnterTrace();
|
||||
|
||||
/* See stubs::HitStackQuota. */
|
||||
inline bool bumpCommitEnd(Value *from, uintN nslots);
|
||||
|
||||
/* +1 for slow native's stack frame. */
|
||||
static const ptrdiff_t MAX_TRACE_SPACE_VALS =
|
||||
MAX_NATIVE_STACK_SLOTS + MAX_CALL_STACK_ENTRIES * VALUES_PER_STACK_FRAME +
|
||||
@ -833,6 +824,12 @@ class StackSpace
|
||||
* if fully committed or if 'limit' exceeds 'base' + STACK_QUOTA.
|
||||
*/
|
||||
bool bumpCommitAndLimit(JSStackFrame *base, Value *from, uintN nvals, Value **limit) const;
|
||||
|
||||
/*
|
||||
* Allocate nvals on the top of the stack, report error on failure.
|
||||
* N.B. the caller must ensure |from >= firstUnused()|.
|
||||
*/
|
||||
inline bool ensureSpace(JSContext *maybecx, Value *from, ptrdiff_t nvals) const;
|
||||
};
|
||||
|
||||
JS_STATIC_ASSERT(StackSpace::CAPACITY_VALS % StackSpace::COMMIT_VALS == 0);
|
||||
|
@ -172,10 +172,10 @@ STATIC_POSTCONDITION(!return || ubound(from) >= nvals)
|
||||
JS_ALWAYS_INLINE bool
|
||||
StackSpace::ensureSpace(JSContext *maybecx, Value *from, ptrdiff_t nvals) const
|
||||
{
|
||||
JS_ASSERT(from == firstUnused());
|
||||
JS_ASSERT(from >= firstUnused());
|
||||
#ifdef XP_WIN
|
||||
JS_ASSERT(from <= commitEnd);
|
||||
if (JS_LIKELY(commitEnd - from >= nvals))
|
||||
if (commitEnd - from >= nvals)
|
||||
goto success;
|
||||
if (end - from < nvals) {
|
||||
if (maybecx)
|
||||
@ -189,7 +189,7 @@ StackSpace::ensureSpace(JSContext *maybecx, Value *from, ptrdiff_t nvals) const
|
||||
}
|
||||
goto success;
|
||||
#else
|
||||
if (JS_LIKELY(end - from < nvals)) {
|
||||
if (end - from < nvals) {
|
||||
if (maybecx)
|
||||
js_ReportOutOfScriptQuota(maybecx);
|
||||
return false;
|
||||
|
@ -2252,6 +2252,8 @@ js_fun_apply(JSContext *cx, uintN argc, Value *vp)
|
||||
if (argc < 2 || vp[3].isNullOrUndefined())
|
||||
return js_fun_call(cx, (argc > 0) ? 1 : 0, vp);
|
||||
|
||||
/* N.B. Changes need to be propagated to stubs::SplatApplyArgs. */
|
||||
|
||||
/* Step 3. */
|
||||
if (!vp[3].isObject()) {
|
||||
JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_BAD_APPLY_ARGS, js_apply_str);
|
||||
|
@ -297,7 +297,7 @@ static const JSC::MacroAssembler::RegisterID JSParamReg_Argc = JSC::ARMRegiste
|
||||
|
||||
#undef STUB_CALL_TYPE
|
||||
|
||||
Call stubCall(void *ptr, jsbytecode *pc, uint32 frameDepth) {
|
||||
Call stubCallImpl(void *ptr, jsbytecode *pc, int32 frameDepth) {
|
||||
JS_STATIC_ASSERT(ClobberInCall != Registers::ArgReg1);
|
||||
|
||||
void *pfun = getCallTarget(ptr);
|
||||
@ -315,6 +315,15 @@ static const JSC::MacroAssembler::RegisterID JSParamReg_Argc = JSC::ARMRegiste
|
||||
return wrapCall(pfun);
|
||||
}
|
||||
|
||||
Call stubCall(void *ptr, jsbytecode *pc, uint32 frameDepth) {
|
||||
JS_ASSERT(frameDepth <= INT32_MAX);
|
||||
return stubCallImpl(ptr, pc, (int32)frameDepth);
|
||||
}
|
||||
|
||||
Call stubCallWithDynamicDepth(void *ptr, jsbytecode *pc) {
|
||||
return stubCallImpl(ptr, pc, -1);
|
||||
}
|
||||
|
||||
Call wrapCall(void *pfun) {
|
||||
#if defined(JS_NO_FASTCALL) && defined(JS_CPU_X86)
|
||||
push(Registers::ArgReg1);
|
||||
@ -336,15 +345,20 @@ static const JSC::MacroAssembler::RegisterID JSParamReg_Argc = JSC::ARMRegiste
|
||||
return cl;
|
||||
}
|
||||
|
||||
void fixScriptStack(uint32 frameDepth) {
|
||||
/* sp = fp + slots() + stackDepth */
|
||||
addPtr(Imm32(sizeof(JSStackFrame) + frameDepth * sizeof(jsval)),
|
||||
JSFrameReg,
|
||||
ClobberInCall);
|
||||
|
||||
/* regs->sp = sp */
|
||||
storePtr(ClobberInCall,
|
||||
FrameAddress(offsetof(VMFrame, regs.sp)));
|
||||
void fixScriptStack(int32 frameDepth) {
|
||||
/*
|
||||
* sp = fp->slots() + frameDepth
|
||||
* regs->sp = sp
|
||||
*
|
||||
* |frameDepth < 0| implies ic::SplatApplyArgs has been called which
|
||||
* means regs.sp has already been set in the VMFrame.
|
||||
*/
|
||||
if (frameDepth >= 0) {
|
||||
addPtr(Imm32(sizeof(JSStackFrame) + frameDepth * sizeof(jsval)),
|
||||
JSFrameReg,
|
||||
ClobberInCall);
|
||||
storePtr(ClobberInCall, FrameAddress(offsetof(VMFrame, regs.sp)));
|
||||
}
|
||||
|
||||
/* regs->fp = fp */
|
||||
storePtr(JSFrameReg, FrameAddress(offsetof(VMFrame, regs.fp)));
|
||||
|
@ -534,10 +534,9 @@ mjit::Compiler::finishThisUp(JITScript **jitp)
|
||||
JS_ASSERT(cics[i].hotPathOffset == offset);
|
||||
|
||||
cics[i].pc = callICs[i].pc;
|
||||
cics[i].argc = callICs[i].argc;
|
||||
cics[i].frameSize = callICs[i].frameSize;
|
||||
cics[i].funObjReg = callICs[i].funObjReg;
|
||||
cics[i].funPtrReg = callICs[i].funPtrReg;
|
||||
cics[i].frameDepth = callICs[i].frameDepth;
|
||||
stubCode.patch(callICs[i].addrLabel1, &cics[i]);
|
||||
stubCode.patch(callICs[i].addrLabel2, &cics[i]);
|
||||
}
|
||||
@ -2337,36 +2336,38 @@ mjit::Compiler::emitUncachedCall(uint32 argc, bool callingNew)
|
||||
}
|
||||
|
||||
static bool
|
||||
IsLowerableFunCall(jsbytecode *pc)
|
||||
IsLowerableFunCallOrApply(jsbytecode *pc)
|
||||
{
|
||||
#ifdef JS_MONOIC
|
||||
return *pc == JSOP_FUNCALL && GET_ARGC(pc) >= 1;
|
||||
return (*pc == JSOP_FUNCALL && GET_ARGC(pc) >= 1) ||
|
||||
(*pc == JSOP_FUNAPPLY && GET_ARGC(pc) == 2);
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
mjit::Compiler::checkCallSpeculation(uint32 argc, FrameEntry *origCallee, FrameEntry *origThis,
|
||||
MaybeRegisterID origCalleeType, RegisterID origCalleeData,
|
||||
MaybeRegisterID origThisType, RegisterID origThisData,
|
||||
Jump *uncachedCallSlowRejoin, CallPatchInfo *uncachedCallPatch)
|
||||
mjit::Compiler::checkCallApplySpeculation(uint32 argc, FrameEntry *origCallee, FrameEntry *origThis,
|
||||
MaybeRegisterID origCalleeType, RegisterID origCalleeData,
|
||||
MaybeRegisterID origThisType, RegisterID origThisData,
|
||||
Jump *uncachedCallSlowRejoin, CallPatchInfo *uncachedCallPatch)
|
||||
{
|
||||
JS_ASSERT(IsLowerableFunCall(PC));
|
||||
JS_ASSERT(IsLowerableFunCallOrApply(PC));
|
||||
|
||||
/*
|
||||
* if (origCallee.isObject() &&
|
||||
* origCallee.toObject().isFunction &&
|
||||
* origCallee.toObject().getFunctionPrivate() == js_fun_call)
|
||||
* origCallee.toObject().getFunctionPrivate() == js_fun_{call,apply})
|
||||
*/
|
||||
MaybeJump isObj;
|
||||
if (origCalleeType.isSet())
|
||||
isObj = masm.testObject(Assembler::NotEqual, origCalleeType.reg());
|
||||
Jump isFun = masm.testFunction(Assembler::NotEqual, origCalleeData);
|
||||
masm.loadFunctionPrivate(origCalleeData, origCalleeData);
|
||||
Native native = *PC == JSOP_FUNCALL ? js_fun_call : js_fun_apply;
|
||||
Jump isNative = masm.branchPtr(Assembler::NotEqual,
|
||||
Address(origCalleeData, JSFunction::offsetOfNativeOrScript()),
|
||||
ImmPtr(JS_FUNC_TO_DATA_PTR(void *, js_fun_call)));
|
||||
ImmPtr(JS_FUNC_TO_DATA_PTR(void *, native)));
|
||||
|
||||
/*
|
||||
* If speculation fails, we can't use the ic, since it is compiled on the
|
||||
@ -2435,12 +2436,13 @@ mjit::Compiler::inlineCallHelper(uint32 argc, bool callingNew)
|
||||
}
|
||||
|
||||
/*
|
||||
* From the presence of JSOP_FUNCALL, we speculate that we are going to
|
||||
* call js_fun_call. Normally, this call would go through js::Invoke to
|
||||
* ultimately call 'this'. We can do much better by having the callIC cache
|
||||
* and call 'this' directly.
|
||||
* From the presence of JSOP_FUN{CALL,APPLY}, we speculate that we are
|
||||
* going to call js_fun_{call,apply}. Normally, this call would go through
|
||||
* js::Invoke to ultimately call 'this'. We can do much better by having
|
||||
* the callIC cache and call 'this' directly. However, if it turns out that
|
||||
* we are not actually calling js_fun_call, the callIC must act as normal.
|
||||
*/
|
||||
bool lowerFunCall = IsLowerableFunCall(PC);
|
||||
bool lowerFunCallOrApply = IsLowerableFunCallOrApply(PC);
|
||||
|
||||
/* Initialized by both branches below. */
|
||||
CallGenInfo callIC(PC);
|
||||
@ -2449,7 +2451,7 @@ mjit::Compiler::inlineCallHelper(uint32 argc, bool callingNew)
|
||||
RegisterID icCalleeData; /* data to call */
|
||||
Address icRvalAddr; /* return slot on slow-path rejoin */
|
||||
|
||||
/* Initialized only on lowerFunCall branch. */
|
||||
/* Initialized only on lowerFunCallOrApply branch. */
|
||||
Jump uncachedCallSlowRejoin;
|
||||
CallPatchInfo uncachedCallPatch;
|
||||
|
||||
@ -2462,7 +2464,7 @@ mjit::Compiler::inlineCallHelper(uint32 argc, bool callingNew)
|
||||
origCalleeData = maybeOrigCalleeData.reg();
|
||||
PinRegAcrossSyncAndKill p1(frame, origCalleeData), p2(frame, origCalleeType);
|
||||
|
||||
if (lowerFunCall) {
|
||||
if (lowerFunCallOrApply) {
|
||||
MaybeRegisterID origThisType, maybeOrigThisData;
|
||||
RegisterID origThisData;
|
||||
{
|
||||
@ -2475,16 +2477,25 @@ mjit::Compiler::inlineCallHelper(uint32 argc, bool callingNew)
|
||||
frame.syncAndKill(Registers(Registers::AvailRegs), Uses(argc + 2));
|
||||
}
|
||||
|
||||
checkCallSpeculation(argc,origCallee, origThis,
|
||||
origCalleeType, origCalleeData,
|
||||
origThisType, origThisData,
|
||||
&uncachedCallSlowRejoin, &uncachedCallPatch);
|
||||
checkCallApplySpeculation(argc, origCallee, origThis,
|
||||
origCalleeType, origCalleeData,
|
||||
origThisType, origThisData,
|
||||
&uncachedCallSlowRejoin, &uncachedCallPatch);
|
||||
|
||||
icCalleeType = origThisType;
|
||||
icCalleeData = origThisData;
|
||||
icRvalAddr = frame.addressOf(origThis);
|
||||
callIC.argc = argc - 1;
|
||||
callIC.frameDepth = frame.frameDepth();
|
||||
|
||||
/*
|
||||
* For f.call(), since we compile the ic under the (checked)
|
||||
* assumption that call == js_fun_call, we still have a static
|
||||
* frame size. For f.apply(), the frame size depends on the dynamic
|
||||
* length of the array passed to apply.
|
||||
*/
|
||||
if (*PC == JSOP_FUNCALL)
|
||||
callIC.frameSize.initStatic(frame.frameDepth(), argc - 1);
|
||||
else
|
||||
callIC.frameSize.initDynamic();
|
||||
} else {
|
||||
/* Leaves pinned regs untouched. */
|
||||
frame.syncAndKill(Registers(Registers::AvailRegs), Uses(argc + 2));
|
||||
@ -2492,8 +2503,7 @@ mjit::Compiler::inlineCallHelper(uint32 argc, bool callingNew)
|
||||
icCalleeType = origCalleeType;
|
||||
icCalleeData = origCalleeData;
|
||||
icRvalAddr = frame.addressOf(origCallee);
|
||||
callIC.argc = argc;
|
||||
callIC.frameDepth = frame.frameDepth();
|
||||
callIC.frameSize.initStatic(frame.frameDepth(), argc);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2502,10 +2512,19 @@ mjit::Compiler::inlineCallHelper(uint32 argc, bool callingNew)
|
||||
if (icCalleeType.isSet())
|
||||
notObjectJump = masm.testObject(Assembler::NotEqual, icCalleeType.reg());
|
||||
|
||||
/*
|
||||
* For an optimized apply, keep icCalleeData and funPtrReg in a
|
||||
* callee-saved registers for the subsequent ic::SplatApplyArgs call.
|
||||
*/
|
||||
Registers tempRegs;
|
||||
tempRegs.takeReg(icCalleeData);
|
||||
RegisterID t0 = tempRegs.takeAnyReg();
|
||||
RegisterID t1 = tempRegs.takeAnyReg();
|
||||
if (callIC.frameSize.isDynamic() && !Registers::isSaved(icCalleeData)) {
|
||||
RegisterID x = tempRegs.takeRegInMask(Registers::SavedRegs);
|
||||
masm.move(icCalleeData, x);
|
||||
icCalleeData = x;
|
||||
} else {
|
||||
tempRegs.takeReg(icCalleeData);
|
||||
}
|
||||
RegisterID funPtrReg = tempRegs.takeRegInMask(Registers::SavedRegs);
|
||||
|
||||
/*
|
||||
* Guard on the callee identity. This misses on the first run. If the
|
||||
@ -2527,38 +2546,55 @@ mjit::Compiler::inlineCallHelper(uint32 argc, bool callingNew)
|
||||
Jump notFunction = stubcc.masm.testFunction(Assembler::NotEqual, icCalleeData);
|
||||
|
||||
/* Test if the function is scripted. */
|
||||
stubcc.masm.loadFunctionPrivate(icCalleeData, t0);
|
||||
stubcc.masm.load16(Address(t0, offsetof(JSFunction, flags)), t1);
|
||||
stubcc.masm.and32(Imm32(JSFUN_KINDMASK), t1);
|
||||
Jump isNative = stubcc.masm.branch32(Assembler::Below, t1, Imm32(JSFUN_INTERPRETED));
|
||||
RegisterID tmp = tempRegs.takeAnyReg();
|
||||
stubcc.masm.loadFunctionPrivate(icCalleeData, funPtrReg);
|
||||
stubcc.masm.load16(Address(funPtrReg, offsetof(JSFunction, flags)), tmp);
|
||||
stubcc.masm.and32(Imm32(JSFUN_KINDMASK), tmp);
|
||||
Jump isNative = stubcc.masm.branch32(Assembler::Below, tmp, Imm32(JSFUN_INTERPRETED));
|
||||
tempRegs.putReg(tmp);
|
||||
|
||||
/*
|
||||
* No-op jump that gets re-patched. This is so ArgReg1 won't be
|
||||
* clobbered, with the added bonus that the generated stub doesn't
|
||||
* need to pop its own return address.
|
||||
* N.B. After this call, the frame will have a dynamic frame size.
|
||||
* Check after the function is known not to be a native so that the
|
||||
* catch-all/native path has a static depth.
|
||||
*/
|
||||
if (callIC.frameSize.isDynamic())
|
||||
stubcc.call(ic::SplatApplyArgs);
|
||||
|
||||
/*
|
||||
* No-op jump that gets patched by ic::New/Call to the stub generated
|
||||
* by generateFullCallStub.
|
||||
*/
|
||||
Jump toPatch = stubcc.masm.jump();
|
||||
toPatch.linkTo(stubcc.masm.label(), &stubcc.masm);
|
||||
callIC.oolJump = toPatch;
|
||||
|
||||
/* At this point the function is definitely scripted. Call the link routine. */
|
||||
/*
|
||||
* At this point the function is definitely scripted, so we try to
|
||||
* compile it and patch either funGuard/funJump or oolJump. This code
|
||||
* is only executed once.
|
||||
*/
|
||||
callIC.addrLabel1 = stubcc.masm.moveWithPatch(ImmPtr(NULL), Registers::ArgReg1);
|
||||
callIC.oolCall = stubcc.call(callingNew ? ic::New : ic::Call);
|
||||
void *icFunPtr = JS_FUNC_TO_DATA_PTR(void *, callingNew ? ic::New : ic::Call);
|
||||
if (callIC.frameSize.isStatic())
|
||||
callIC.oolCall = stubcc.masm.stubCall(icFunPtr, PC, frame.frameDepth());
|
||||
else
|
||||
callIC.oolCall = stubcc.masm.stubCallWithDynamicDepth(icFunPtr, PC);
|
||||
|
||||
callIC.funObjReg = icCalleeData;
|
||||
callIC.funPtrReg = t0;
|
||||
callIC.funPtrReg = funPtrReg;
|
||||
|
||||
/*
|
||||
* The IC call either returns NULL, meaning call completed, or a
|
||||
* function pointer to jump to. Caveat: Must restore JSFrameReg
|
||||
* because a new frame has been pushed.
|
||||
*
|
||||
* This function only executes once. If hit, it will generate a stub
|
||||
* to compile and execute calls on demand.
|
||||
*/
|
||||
rejoin1 = stubcc.masm.branchTestPtr(Assembler::Zero, Registers::ReturnReg,
|
||||
Registers::ReturnReg);
|
||||
stubcc.masm.move(Imm32(callIC.argc), JSParamReg_Argc);
|
||||
if (callIC.frameSize.isStatic())
|
||||
stubcc.masm.move(Imm32(callIC.frameSize.staticArgc()), JSParamReg_Argc);
|
||||
else
|
||||
stubcc.masm.load32(FrameAddress(offsetof(VMFrame, u.call.dynamicArgc)), JSParamReg_Argc);
|
||||
stubcc.masm.loadPtr(FrameAddress(offsetof(VMFrame, regs.fp)), JSFrameReg);
|
||||
callPatch.hasSlowNcode = true;
|
||||
callPatch.slowNcodePatch =
|
||||
@ -2566,7 +2602,13 @@ mjit::Compiler::inlineCallHelper(uint32 argc, bool callingNew)
|
||||
Address(JSFrameReg, JSStackFrame::offsetOfncode()));
|
||||
stubcc.masm.jump(Registers::ReturnReg);
|
||||
|
||||
/* Catch-all case, for natives this will turn into a MIC. */
|
||||
/*
|
||||
* This ool path is the catch-all for everything but scripted function
|
||||
* callees. For native functions, ic::NativeNew/NativeCall will repatch
|
||||
* funGaurd/funJump with a fast call stub. All other cases
|
||||
* (non-function callable objects and invalid callees) take the slow
|
||||
* path through js::Invoke.
|
||||
*/
|
||||
if (notObjectJump.isSet())
|
||||
stubcc.linkExitDirect(notObjectJump.get(), stubcc.masm.label());
|
||||
notFunction.linkTo(stubcc.masm.label(), &stubcc.masm);
|
||||
@ -2594,7 +2636,7 @@ mjit::Compiler::inlineCallHelper(uint32 argc, bool callingNew)
|
||||
|
||||
callIC.hotJump = masm.jump();
|
||||
callIC.joinPoint = callPatch.joinPoint = masm.label();
|
||||
if (lowerFunCall)
|
||||
if (lowerFunCallOrApply)
|
||||
uncachedCallPatch.joinPoint = callIC.joinPoint;
|
||||
masm.loadPtr(Address(JSFrameReg, JSStackFrame::offsetOfPrev()), JSFrameReg);
|
||||
|
||||
@ -2605,7 +2647,7 @@ mjit::Compiler::inlineCallHelper(uint32 argc, bool callingNew)
|
||||
|
||||
/*
|
||||
* Now that the frame state is set, generate the rejoin path. Note that, if
|
||||
* lowerFunCall, we cannot just call 'stubcc.rejoin' since the return
|
||||
* lowerFunCallOrApply, we cannot just call 'stubcc.rejoin' since the return
|
||||
* value has been placed at vp[1] which is not the stack address associated
|
||||
* with frame.peek(-1).
|
||||
*/
|
||||
@ -2617,12 +2659,12 @@ mjit::Compiler::inlineCallHelper(uint32 argc, bool callingNew)
|
||||
stubcc.crossJump(stubcc.masm.jump(), masm.label());
|
||||
JaegerSpew(JSpew_Insns, " ---- END SLOW RESTORE CODE ---- \n");
|
||||
|
||||
if (lowerFunCall)
|
||||
if (lowerFunCallOrApply)
|
||||
stubcc.crossJump(uncachedCallSlowRejoin, masm.label());
|
||||
|
||||
callICs.append(callIC);
|
||||
callPatches.append(callPatch);
|
||||
if (lowerFunCall)
|
||||
if (lowerFunCallOrApply)
|
||||
callPatches.append(uncachedCallPatch);
|
||||
#endif
|
||||
}
|
||||
|
@ -126,7 +126,6 @@ class Compiler : public BaseCompiler
|
||||
* more comments.
|
||||
*/
|
||||
jsbytecode *pc;
|
||||
uint32 argc;
|
||||
DataLabelPtr funGuard;
|
||||
Jump funJump;
|
||||
Jump hotJump;
|
||||
@ -140,7 +139,7 @@ class Compiler : public BaseCompiler
|
||||
Jump oolJump;
|
||||
RegisterID funObjReg;
|
||||
RegisterID funPtrReg;
|
||||
uint32 frameDepth;
|
||||
FrameSize frameSize;
|
||||
};
|
||||
|
||||
private:
|
||||
@ -354,10 +353,10 @@ class Compiler : public BaseCompiler
|
||||
void dispatchCall(VoidPtrStubUInt32 stub, uint32 argc);
|
||||
void interruptCheckHelper();
|
||||
void emitUncachedCall(uint32 argc, bool callingNew);
|
||||
void checkCallSpeculation(uint32 argc, FrameEntry *origCallee, FrameEntry *origThis,
|
||||
MaybeRegisterID origCalleeType, RegisterID origCalleeData,
|
||||
MaybeRegisterID origThisType, RegisterID origThisData,
|
||||
Jump *uncachedCallSlowRejoin, CallPatchInfo *uncachedCallPatch);
|
||||
void checkCallApplySpeculation(uint32 argc, FrameEntry *origCallee, FrameEntry *origThis,
|
||||
MaybeRegisterID origCalleeType, RegisterID origCalleeData,
|
||||
MaybeRegisterID origThisType, RegisterID origThisData,
|
||||
Jump *uncachedCallSlowRejoin, CallPatchInfo *uncachedCallPatch);
|
||||
void inlineCallHelper(uint32 argc, bool callingNew);
|
||||
void fixPrimitiveReturn(Assembler *masm, FrameEntry *fe);
|
||||
void jsop_gnameinc(JSOp op, VoidStubAtom stub, uint32 index);
|
||||
|
@ -74,7 +74,7 @@ class InlineFrameAssembler {
|
||||
typedef JSC::MacroAssembler::DataLabelPtr DataLabelPtr;
|
||||
|
||||
Assembler &masm;
|
||||
uint32 frameDepth; // script->nfixed + stack depth at caller call site
|
||||
FrameSize frameSize; // size of the caller's frame
|
||||
RegisterID funObjReg; // register containing the function object (callee)
|
||||
jsbytecode *pc; // bytecode location at the caller call site
|
||||
uint32 flags; // frame flags
|
||||
@ -89,7 +89,7 @@ class InlineFrameAssembler {
|
||||
InlineFrameAssembler(Assembler &masm, ic::CallICInfo &ic, uint32 flags)
|
||||
: masm(masm), pc(ic.pc), flags(flags)
|
||||
{
|
||||
frameDepth = ic.frameDepth;
|
||||
frameSize = ic.frameSize;
|
||||
funObjReg = ic.funObjReg;
|
||||
tempRegs.takeReg(ic.funPtrReg);
|
||||
tempRegs.takeReg(funObjReg);
|
||||
@ -98,7 +98,7 @@ class InlineFrameAssembler {
|
||||
InlineFrameAssembler(Assembler &masm, Compiler::CallGenInfo &gen, uint32 flags)
|
||||
: masm(masm), pc(gen.pc), flags(flags)
|
||||
{
|
||||
frameDepth = gen.frameDepth;
|
||||
frameSize = gen.frameSize;
|
||||
funObjReg = gen.funObjReg;
|
||||
tempRegs.takeReg(funObjReg);
|
||||
}
|
||||
@ -107,19 +107,43 @@ class InlineFrameAssembler {
|
||||
{
|
||||
JS_ASSERT((flags & ~JSFRAME_CONSTRUCTING) == 0);
|
||||
|
||||
RegisterID t0 = tempRegs.takeAnyReg();
|
||||
/* Generate JSStackFrame::initCallFrameCallerHalf. */
|
||||
|
||||
AdjustedFrame adj(sizeof(JSStackFrame) + frameDepth * sizeof(Value));
|
||||
masm.store32(Imm32(JSFRAME_FUNCTION | flags), adj.addrOf(JSStackFrame::offsetOfFlags()));
|
||||
masm.storePtr(JSFrameReg, adj.addrOf(JSStackFrame::offsetOfPrev()));
|
||||
DataLabelPtr ncodePatch;
|
||||
if (frameSize.isStatic()) {
|
||||
uint32 frameDepth = frameSize.staticFrameDepth();
|
||||
AdjustedFrame newfp(sizeof(JSStackFrame) + frameDepth * sizeof(Value));
|
||||
|
||||
DataLabelPtr ncodePatch =
|
||||
masm.storePtrWithPatch(ImmPtr(ncode), adj.addrOf(JSStackFrame::offsetOfncode()));
|
||||
Address flagsAddr = newfp.addrOf(JSStackFrame::offsetOfFlags());
|
||||
masm.store32(Imm32(JSFRAME_FUNCTION | flags), flagsAddr);
|
||||
Address prevAddr = newfp.addrOf(JSStackFrame::offsetOfPrev());
|
||||
masm.storePtr(JSFrameReg, prevAddr);
|
||||
Address ncodeAddr = newfp.addrOf(JSStackFrame::offsetOfncode());
|
||||
ncodePatch = masm.storePtrWithPatch(ImmPtr(ncode), ncodeAddr);
|
||||
|
||||
/* Adjust JSFrameReg. Callee fills in the rest. */
|
||||
masm.addPtr(Imm32(sizeof(JSStackFrame) + sizeof(Value) * frameDepth), JSFrameReg);
|
||||
masm.addPtr(Imm32(sizeof(JSStackFrame) + frameDepth * sizeof(Value)), JSFrameReg);
|
||||
} else {
|
||||
/*
|
||||
* If the frame size is dynamic, then the fast path generated by
|
||||
* generateFullCallStub must be used. Thus, this code is executed
|
||||
* after stubs::SplatApplyArgs has been called. SplatApplyArgs
|
||||
* stores the dynamic stack pointer (i.e., regs.sp after pushing a
|
||||
* dynamic number of arguments) to VMFrame.regs, so we just load it
|
||||
* here to get the new frame pointer.
|
||||
*/
|
||||
RegisterID newfp = tempRegs.takeAnyReg();
|
||||
masm.loadPtr(FrameAddress(offsetof(VMFrame, regs.sp)), newfp);
|
||||
|
||||
tempRegs.putReg(t0);
|
||||
Address flagsAddr(newfp, JSStackFrame::offsetOfFlags());
|
||||
masm.store32(Imm32(JSFRAME_FUNCTION | flags), flagsAddr);
|
||||
Address prevAddr(newfp, JSStackFrame::offsetOfPrev());
|
||||
masm.storePtr(JSFrameReg, prevAddr);
|
||||
Address ncodeAddr(newfp, JSStackFrame::offsetOfncode());
|
||||
ncodePatch = masm.storePtrWithPatch(ImmPtr(ncode), ncodeAddr);
|
||||
|
||||
masm.move(newfp, JSFrameReg);
|
||||
tempRegs.putReg(newfp);
|
||||
}
|
||||
|
||||
return ncodePatch;
|
||||
}
|
||||
|
@ -166,6 +166,12 @@ struct Registers {
|
||||
|
||||
static const uint32 AvailRegs = SavedRegs | TempRegs;
|
||||
|
||||
static bool isSaved(RegisterID reg) {
|
||||
uint32 mask = maskReg(reg);
|
||||
JS_ASSERT(mask & AvailRegs);
|
||||
return bool(mask & SavedRegs);
|
||||
}
|
||||
|
||||
Registers()
|
||||
: freeMask(AvailRegs)
|
||||
{ }
|
||||
|
@ -63,6 +63,9 @@ struct VMFrame
|
||||
void *ptr2;
|
||||
void *ptr3;
|
||||
} x;
|
||||
struct {
|
||||
uint32 dynamicArgc;
|
||||
} call;
|
||||
} u;
|
||||
|
||||
VMFrame *previous;
|
||||
|
@ -408,14 +408,14 @@ ic::Equality(VMFrame &f, ic::EqualityICInfo *ic)
|
||||
static void * JS_FASTCALL
|
||||
SlowCallFromIC(VMFrame &f, ic::CallICInfo *ic)
|
||||
{
|
||||
stubs::SlowCall(f, ic->argc);
|
||||
stubs::SlowCall(f, ic->frameSize.getArgc(f));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void * JS_FASTCALL
|
||||
SlowNewFromIC(VMFrame &f, ic::CallICInfo *ic)
|
||||
{
|
||||
stubs::SlowNew(f, ic->argc);
|
||||
stubs::SlowNew(f, ic->frameSize.staticArgc());
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -461,12 +461,11 @@ class CallCompiler : public BaseCompiler
|
||||
{
|
||||
VMFrame &f;
|
||||
CallICInfo ⁣
|
||||
Value *vp;
|
||||
bool callingNew;
|
||||
|
||||
public:
|
||||
CallCompiler(VMFrame &f, CallICInfo &ic, bool callingNew)
|
||||
: BaseCompiler(f.cx), f(f), ic(ic), vp(f.regs.sp - (ic.argc + 2)), callingNew(callingNew)
|
||||
: BaseCompiler(f.cx), f(f), ic(ic), callingNew(callingNew)
|
||||
{
|
||||
}
|
||||
|
||||
@ -513,10 +512,14 @@ class CallCompiler : public BaseCompiler
|
||||
|
||||
/* Try and compile. On success we get back the nmap pointer. */
|
||||
masm.storePtr(JSFrameReg, FrameAddress(offsetof(VMFrame, regs.fp)));
|
||||
masm.move(Imm32(ic.argc), Registers::ArgReg1);
|
||||
JSC::MacroAssembler::Call tryCompile =
|
||||
masm.stubCall(JS_FUNC_TO_DATA_PTR(void *, stubs::CompileFunction),
|
||||
script->code, ic.frameDepth);
|
||||
void *compilePtr = JS_FUNC_TO_DATA_PTR(void *, stubs::CompileFunction);
|
||||
if (ic.frameSize.isStatic()) {
|
||||
masm.move(Imm32(ic.frameSize.staticArgc()), Registers::ArgReg1);
|
||||
masm.stubCall(compilePtr, script->code, ic.frameSize.staticFrameDepth());
|
||||
} else {
|
||||
masm.load32(FrameAddress(offsetof(VMFrame, u.call.dynamicArgc)), Registers::ArgReg1);
|
||||
masm.stubCallWithDynamicDepth(compilePtr, script->code);
|
||||
}
|
||||
masm.loadPtr(FrameAddress(offsetof(VMFrame, regs.fp)), JSFrameReg);
|
||||
|
||||
Jump notCompiled = masm.branchTestPtr(Assembler::Zero, Registers::ReturnReg,
|
||||
@ -527,7 +530,10 @@ class CallCompiler : public BaseCompiler
|
||||
hasCode.linkTo(masm.label(), &masm);
|
||||
|
||||
/* Get nmap[ARITY], set argc, call. */
|
||||
masm.move(Imm32(ic.argc), JSParamReg_Argc);
|
||||
if (ic.frameSize.isStatic())
|
||||
masm.move(Imm32(ic.frameSize.staticArgc()), JSParamReg_Argc);
|
||||
else
|
||||
masm.load32(FrameAddress(offsetof(VMFrame, u.call.dynamicArgc)), JSParamReg_Argc);
|
||||
masm.jump(t0);
|
||||
|
||||
JSC::ExecutablePool *ep = poolForSize(masm.size(), CallICInfo::Pool_ScriptStub);
|
||||
@ -536,8 +542,7 @@ class CallCompiler : public BaseCompiler
|
||||
|
||||
JSC::LinkBuffer buffer(&masm, ep);
|
||||
buffer.link(notCompiled, ic.slowPathStart.labelAtOffset(ic.slowJoinOffset));
|
||||
buffer.link(tryCompile,
|
||||
JSC::FunctionPtr(JS_FUNC_TO_DATA_PTR(void *, stubs::CompileFunction)));
|
||||
masm.finalize(buffer);
|
||||
JSC::CodeLocationLabel cs = buffer.finalizeCodeAddendum();
|
||||
|
||||
JaegerSpew(JSpew_PICs, "generated CALL stub %p (%d bytes)\n", cs.executableAddress(),
|
||||
@ -553,6 +558,8 @@ class CallCompiler : public BaseCompiler
|
||||
|
||||
void patchInlinePath(JSScript *script, JSObject *obj)
|
||||
{
|
||||
JS_ASSERT(ic.frameSize.isStatic());
|
||||
|
||||
/* Very fast path. */
|
||||
uint8 *start = (uint8 *)ic.funGuard.executableAddress();
|
||||
JSC::RepatchBuffer repatch(start - 32, 64);
|
||||
@ -570,6 +577,8 @@ class CallCompiler : public BaseCompiler
|
||||
|
||||
bool generateStubForClosures(JSObject *obj)
|
||||
{
|
||||
JS_ASSERT(ic.frameSize.isStatic());
|
||||
|
||||
/* Slightly less fast path - guard on fun->getFunctionPrivate() instead. */
|
||||
Assembler masm;
|
||||
|
||||
@ -611,7 +620,21 @@ class CallCompiler : public BaseCompiler
|
||||
|
||||
bool generateNativeStub()
|
||||
{
|
||||
Value *vp = f.regs.sp - (ic.argc + 2);
|
||||
/*
|
||||
* SplatApplyArgs has not been called, so we call it here before
|
||||
* potentially touching f.u.call.dynamicArgc.
|
||||
*/
|
||||
uintN staticFrameDepth = f.regs.sp - f.regs.fp->slots();
|
||||
Value *vp;
|
||||
if (ic.frameSize.isStatic()) {
|
||||
JS_ASSERT(staticFrameDepth == ic.frameSize.staticFrameDepth());
|
||||
vp = f.regs.sp - (2 + ic.frameSize.staticArgc());
|
||||
} else {
|
||||
JS_ASSERT(*f.regs.pc == JSOP_FUNAPPLY && GET_ARGC(f.regs.pc) == 2);
|
||||
if (!ic::SplatApplyArgs(f)) /* updates regs.sp */
|
||||
THROWV(true);
|
||||
vp = f.regs.fp->slots() + (staticFrameDepth - 3); /* this, arg1, arg2 */
|
||||
}
|
||||
|
||||
JSObject *obj;
|
||||
if (!IsFunctionObject(*vp, &obj))
|
||||
@ -624,7 +647,7 @@ class CallCompiler : public BaseCompiler
|
||||
if (callingNew)
|
||||
vp[1].setMagicWithObjectOrNullPayload(NULL);
|
||||
|
||||
if (!CallJSNative(cx, fun->u.n.native, ic.argc, vp))
|
||||
if (!CallJSNative(cx, fun->u.n.native, ic.frameSize.getArgc(f), vp))
|
||||
THROWV(true);
|
||||
|
||||
/* Right now, take slow-path for IC misses or multiple stubs. */
|
||||
@ -643,6 +666,11 @@ class CallCompiler : public BaseCompiler
|
||||
/* Guard on the function object identity, for now. */
|
||||
Jump funGuard = masm.branchPtr(Assembler::NotEqual, ic.funObjReg, ImmPtr(obj));
|
||||
|
||||
/* N.B. After this call, the frame will have a dynamic frame size. */
|
||||
if (ic.frameSize.isDynamic()) {
|
||||
masm.stubCall(JS_FUNC_TO_DATA_PTR(void *, ic::SplatApplyArgs), f.regs.pc, staticFrameDepth);
|
||||
}
|
||||
|
||||
Registers tempRegs;
|
||||
#ifndef JS_CPU_X86
|
||||
tempRegs.takeReg(Registers::ArgReg0);
|
||||
@ -655,10 +683,12 @@ class CallCompiler : public BaseCompiler
|
||||
masm.storePtr(ImmPtr(cx->regs->pc),
|
||||
FrameAddress(offsetof(VMFrame, regs.pc)));
|
||||
|
||||
/* Store sp. */
|
||||
uint32 spOffset = sizeof(JSStackFrame) + ic.frameDepth * sizeof(Value);
|
||||
masm.addPtr(Imm32(spOffset), JSFrameReg, t0);
|
||||
masm.storePtr(t0, FrameAddress(offsetof(VMFrame, regs.sp)));
|
||||
/* Store sp (if not already set by ic::SplatApplyArgs). */
|
||||
if (ic.frameSize.isStatic()) {
|
||||
uint32 spOffset = sizeof(JSStackFrame) + staticFrameDepth * sizeof(Value);
|
||||
masm.addPtr(Imm32(spOffset), JSFrameReg, t0);
|
||||
masm.storePtr(t0, FrameAddress(offsetof(VMFrame, regs.sp)));
|
||||
}
|
||||
|
||||
/* Store fp. */
|
||||
masm.storePtr(JSFrameReg, FrameAddress(offsetof(VMFrame, regs.fp)));
|
||||
@ -671,20 +701,31 @@ class CallCompiler : public BaseCompiler
|
||||
#endif
|
||||
masm.loadPtr(FrameAddress(offsetof(VMFrame, cx)), cxReg);
|
||||
|
||||
#ifdef JS_CPU_X86
|
||||
/* x86's stack should be 16-byte aligned. */
|
||||
masm.subPtr(Imm32(16), Assembler::stackPointerRegister);
|
||||
#endif
|
||||
|
||||
/* Compute vp. */
|
||||
#ifdef JS_CPU_X86
|
||||
RegisterID vpReg = t0;
|
||||
#else
|
||||
RegisterID vpReg = Registers::ArgReg2;
|
||||
#endif
|
||||
|
||||
uint32 vpOffset = sizeof(JSStackFrame) + (ic.frameDepth - ic.argc - 2) * sizeof(Value);
|
||||
masm.addPtr(Imm32(vpOffset), JSFrameReg, vpReg);
|
||||
MaybeRegisterID argcReg;
|
||||
if (ic.frameSize.isStatic()) {
|
||||
uint32 vpOffset = sizeof(JSStackFrame) + (vp - f.regs.fp->slots()) * sizeof(Value);
|
||||
masm.addPtr(Imm32(vpOffset), JSFrameReg, vpReg);
|
||||
} else {
|
||||
argcReg = tempRegs.takeAnyReg();
|
||||
masm.load32(FrameAddress(offsetof(VMFrame, u.call.dynamicArgc)), argcReg.reg());
|
||||
masm.loadPtr(FrameAddress(offsetof(VMFrame, regs.sp)), vpReg);
|
||||
|
||||
/* vpOff = (argc + 2) * sizeof(Value) */
|
||||
RegisterID vpOff = tempRegs.takeAnyReg();
|
||||
masm.move(argcReg.reg(), vpOff);
|
||||
masm.add32(Imm32(2), vpOff); /* callee, this */
|
||||
JS_STATIC_ASSERT(sizeof(Value) == 8);
|
||||
masm.lshift32(Imm32(3), vpOff);
|
||||
masm.subPtr(vpOff, vpReg);
|
||||
|
||||
tempRegs.putReg(vpOff);
|
||||
}
|
||||
|
||||
/* Mark vp[1] as magic for |new|. */
|
||||
if (callingNew) {
|
||||
@ -694,14 +735,22 @@ class CallCompiler : public BaseCompiler
|
||||
}
|
||||
|
||||
#ifdef JS_CPU_X86
|
||||
/* x86's stack should be 16-byte aligned. */
|
||||
masm.subPtr(Imm32(16), Assembler::stackPointerRegister);
|
||||
masm.storePtr(vpReg, Address(Assembler::stackPointerRegister, 8));
|
||||
#endif
|
||||
|
||||
/* Push argc. */
|
||||
#ifdef JS_CPU_X86
|
||||
masm.store32(Imm32(ic.argc), Address(Assembler::stackPointerRegister, 4));
|
||||
if (ic.frameSize.isStatic())
|
||||
masm.store32(Imm32(ic.frameSize.staticArgc()), Address(Assembler::stackPointerRegister, 4));
|
||||
else
|
||||
masm.store32(argcReg.reg(), Address(Assembler::stackPointerRegister, 4));
|
||||
#else
|
||||
masm.move(Imm32(ic.argc), Registers::ArgReg1);
|
||||
if (ic.frameSize.isStatic())
|
||||
masm.move(Imm32(ic.frameSize.staticArgc()), Registers::ArgReg1);
|
||||
else if (argcReg.reg() != Registers::ArgReg1)
|
||||
masm.move(argcReg.reg(), Registers::ArgReg1);
|
||||
#endif
|
||||
|
||||
/* Push cx. */
|
||||
@ -755,6 +804,7 @@ class CallCompiler : public BaseCompiler
|
||||
buffer.link(done, ic.slowPathStart.labelAtOffset(ic.slowJoinOffset));
|
||||
buffer.link(call, JSC::FunctionPtr(JS_FUNC_TO_DATA_PTR(void *, fun->u.n.native)));
|
||||
buffer.link(funGuard, ic.slowPathStart);
|
||||
masm.finalize(buffer);
|
||||
|
||||
JSC::CodeLocationLabel cs = buffer.finalizeCodeAddendum();
|
||||
|
||||
@ -774,9 +824,9 @@ class CallCompiler : public BaseCompiler
|
||||
{
|
||||
stubs::UncachedCallResult ucr;
|
||||
if (callingNew)
|
||||
stubs::UncachedNewHelper(f, ic.argc, &ucr);
|
||||
stubs::UncachedNewHelper(f, ic.frameSize.staticArgc(), &ucr);
|
||||
else
|
||||
stubs::UncachedCallHelper(f, ic.argc, &ucr);
|
||||
stubs::UncachedCallHelper(f, ic.frameSize.getArgc(f), &ucr);
|
||||
|
||||
// If the function cannot be jitted (generally unjittable or empty script),
|
||||
// patch this site to go to a slow path always.
|
||||
@ -805,7 +855,7 @@ class CallCompiler : public BaseCompiler
|
||||
return ucr.codeAddr;
|
||||
}
|
||||
|
||||
if (ic.argc != fun->nargs) {
|
||||
if (!ic.frameSize.isStatic() || ic.frameSize.staticArgc() != fun->nargs) {
|
||||
if (!generateFullCallStub(script, flags))
|
||||
THROWV(NULL);
|
||||
} else {
|
||||
@ -849,7 +899,7 @@ ic::NativeCall(VMFrame &f, CallICInfo *ic)
|
||||
{
|
||||
CallCompiler cc(f, *ic, false);
|
||||
if (!cc.generateNativeStub())
|
||||
stubs::SlowCall(f, ic->argc);
|
||||
stubs::SlowCall(f, ic->frameSize.getArgc(f));
|
||||
}
|
||||
|
||||
void JS_FASTCALL
|
||||
@ -857,7 +907,109 @@ ic::NativeNew(VMFrame &f, CallICInfo *ic)
|
||||
{
|
||||
CallCompiler cc(f, *ic, true);
|
||||
if (!cc.generateNativeStub())
|
||||
stubs::SlowNew(f, ic->argc);
|
||||
stubs::SlowNew(f, ic->frameSize.staticArgc());
|
||||
}
|
||||
|
||||
static inline bool
|
||||
BumpStack(VMFrame &f, uintN inc)
|
||||
{
|
||||
static const unsigned MANY_ARGS = 1024;
|
||||
static const unsigned MIN_SPACE = 500;
|
||||
|
||||
/* If we are not passing many args, treat this as a normal call. */
|
||||
if (inc < MANY_ARGS) {
|
||||
if (f.regs.sp + inc < f.stackLimit)
|
||||
return true;
|
||||
StackSpace &stack = f.cx->stack();
|
||||
if (!stack.bumpCommitAndLimit(f.entryFp, f.regs.sp, inc, &f.stackLimit)) {
|
||||
js_ReportOverRecursed(f.cx);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* The purpose of f.stackLimit is to catch over-recursion based on
|
||||
* assumptions about the average frame size. 'apply' with a large number of
|
||||
* arguments breaks these assumptions and can result in premature "out of
|
||||
* script quota" errors. Normally, apply will go through js::Invoke, which
|
||||
* effectively starts a fresh stackLimit. Here, we bump f.stackLimit,
|
||||
* if necessary, to allow for this 'apply' call, and a reasonable number of
|
||||
* subsequent calls, to succeed without hitting the stackLimit. In theory,
|
||||
* this a recursive chain containing apply to circumvent the stackLimit.
|
||||
* However, since each apply call must consume at least MANY_ARGS slots,
|
||||
* this sequence will quickly reach the end of the stack and OOM.
|
||||
*/
|
||||
|
||||
uintN incWithSpace = inc + MIN_SPACE;
|
||||
Value *bumpedWithSpace = f.regs.sp + incWithSpace;
|
||||
if (bumpedWithSpace < f.stackLimit)
|
||||
return true;
|
||||
|
||||
StackSpace &stack = f.cx->stack();
|
||||
if (stack.bumpCommitAndLimit(f.entryFp, f.regs.sp, incWithSpace, &f.stackLimit))
|
||||
return true;
|
||||
|
||||
if (!stack.ensureSpace(f.cx, f.regs.sp, incWithSpace))
|
||||
return false;
|
||||
f.stackLimit = bumpedWithSpace;
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* SplatApplyArgs is only called for expressions of the form |f.apply(x, y)|.
|
||||
* Additionally, the callee has already been checked to be the native apply.
|
||||
* All successful paths through SplatApplyArgs must set f.u.call.dynamicArgc
|
||||
* and f.regs.sp.
|
||||
*/
|
||||
JSBool JS_FASTCALL
|
||||
ic::SplatApplyArgs(VMFrame &f)
|
||||
{
|
||||
JSContext *cx = f.cx;
|
||||
Value *vp = f.regs.sp - 4;
|
||||
JS_ASSERT(JS_CALLEE(cx, vp).toObject().getFunctionPrivate()->u.n.native == js_fun_apply);
|
||||
JS_ASSERT(GET_ARGC(f.regs.pc) == 2);
|
||||
|
||||
/*
|
||||
* This stub should mimic the steps taken by js_fun_apply. Step 1 and part
|
||||
* of Step 2 have already been taken care of by calling jit code.
|
||||
*/
|
||||
|
||||
/* Step 2 (part 2). */
|
||||
if (vp[3].isNullOrUndefined()) {
|
||||
f.regs.sp--;
|
||||
f.u.call.dynamicArgc = 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Step 3. */
|
||||
if (!vp[3].isObject()) {
|
||||
JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_BAD_APPLY_ARGS, js_apply_str);
|
||||
THROWV(false);
|
||||
}
|
||||
|
||||
/* Steps 4-5. */
|
||||
JSObject *aobj = &vp[3].toObject();
|
||||
jsuint length;
|
||||
if (!js_GetLengthProperty(cx, aobj, &length))
|
||||
THROWV(false);
|
||||
|
||||
JS_ASSERT(!JS_ON_TRACE(cx));
|
||||
|
||||
/* Step 6. */
|
||||
uintN n = uintN(JS_MIN(length, JS_ARGS_LENGTH_MAX));
|
||||
|
||||
intN delta = n - 1;
|
||||
if (delta > 0 && !BumpStack(f, delta))
|
||||
THROWV(false);
|
||||
f.regs.sp += delta;
|
||||
|
||||
/* Steps 7-8. */
|
||||
if (!GetElements(cx, aobj, n, f.regs.sp - n))
|
||||
THROWV(false);
|
||||
|
||||
f.u.call.dynamicArgc = n;
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -48,6 +48,46 @@
|
||||
|
||||
namespace js {
|
||||
namespace mjit {
|
||||
|
||||
class FrameSize
|
||||
{
|
||||
uint32 frameDepth_ : 16;
|
||||
uint32 argc_;
|
||||
public:
|
||||
void initStatic(uint32 frameDepth, uint32 argc) {
|
||||
JS_ASSERT(frameDepth > 0);
|
||||
frameDepth_ = frameDepth;
|
||||
argc_ = argc;
|
||||
}
|
||||
|
||||
void initDynamic() {
|
||||
frameDepth_ = 0;
|
||||
argc_ = -1; /* quiet gcc */
|
||||
}
|
||||
|
||||
bool isStatic() const {
|
||||
return frameDepth_ > 0;
|
||||
}
|
||||
|
||||
bool isDynamic() const {
|
||||
return frameDepth_ == 0;
|
||||
}
|
||||
|
||||
uint32 staticFrameDepth() const {
|
||||
JS_ASSERT(isStatic());
|
||||
return frameDepth_;
|
||||
}
|
||||
|
||||
uint32 staticArgc() const {
|
||||
JS_ASSERT(isStatic());
|
||||
return argc_;
|
||||
}
|
||||
|
||||
uint32 getArgc(VMFrame &f) const {
|
||||
return isStatic() ? staticArgc() : f.u.call.dynamicArgc;
|
||||
}
|
||||
};
|
||||
|
||||
namespace ic {
|
||||
|
||||
struct MICInfo {
|
||||
@ -160,8 +200,7 @@ struct CallICInfo {
|
||||
/* PC at the call site. */
|
||||
jsbytecode *pc;
|
||||
|
||||
uint32 argc : 16;
|
||||
uint32 frameDepth : 16;
|
||||
FrameSize frameSize;
|
||||
|
||||
/* Function object identity guard. */
|
||||
JSC::CodeLocationDataLabelPtr funGuard;
|
||||
@ -219,6 +258,7 @@ void * JS_FASTCALL New(VMFrame &f, ic::CallICInfo *ic);
|
||||
void * JS_FASTCALL Call(VMFrame &f, ic::CallICInfo *ic);
|
||||
void JS_FASTCALL NativeNew(VMFrame &f, ic::CallICInfo *ic);
|
||||
void JS_FASTCALL NativeCall(VMFrame &f, ic::CallICInfo *ic);
|
||||
JSBool JS_FASTCALL SplatApplyArgs(VMFrame &f);
|
||||
|
||||
void PurgeMICs(JSContext *cx, JSScript *script);
|
||||
void SweepCallICs(JSScript *script);
|
||||
|
Loading…
Reference in New Issue
Block a user