PIC for GETPROP and CALLPROP on JSPropertyOp properties, bug 557358. r=dvander

This commit is contained in:
Brian Hackett 2011-09-06 22:34:23 -07:00
parent 20031f4613
commit 63289b3360
25 changed files with 540 additions and 247 deletions

View File

@ -0,0 +1,18 @@
// GETPROP PIC with multiple stubs containing getter hooks.
function foo(arr) {
for (var i = 0; i < 100; i++)
arr[i].caller;
}
arr = Object.create(Object.prototype);
first = Object.create({});
first.caller = null;
second = Object.create({});
second.caller = null;
for (var i = 0; i < 100; ) {
arr[i++] = first;
arr[i++] = foo;
arr[i++] = second;
}
foo.caller;
foo(arr);

View File

@ -0,0 +1,19 @@
// PIC on CALLPROP invoking getter hook.
function foo(arr) {
for (var i = 0; i < 100; i++)
arr[i].caller(false);
}
arr = Object.create(Object.prototype);
first = Object.create({});
first.caller = bar;
second = Object.create({});
second.caller = bar;
for (var i = 0; i < 100; )
arr[i++] = foo;
foo.caller;
function bar(x) {
if (x)
foo(arr);
}
bar(true);

View File

@ -140,6 +140,13 @@ class Bytecode
/* Call whose result should be monitored. */
bool monitoredTypesReturn : 1;
/*
* Dynamically observed state about the execution of this opcode. These are
* hints about the script for use during compilation.
*/
bool arrayWriteHole: 1; /* SETELEM which has written to an array hole. */
bool accessGetter: 1; /* Property read on a shape with a getter hook. */
/* Stack depth before this opcode. */
uint32 stackDepth;
@ -964,7 +971,6 @@ class ScriptAnalysis
/* Accessors for bytecode information. */
Bytecode& getCode(uint32 offset) {
JS_ASSERT(script->compartment()->activeAnalysis);
JS_ASSERT(offset < script->length);
JS_ASSERT(codeArray[offset]);
return *codeArray[offset];
@ -972,7 +978,6 @@ class ScriptAnalysis
Bytecode& getCode(const jsbytecode *pc) { return getCode(pc - script->code); }
Bytecode* maybeCode(uint32 offset) {
JS_ASSERT(script->compartment()->activeAnalysis);
JS_ASSERT(offset < script->length);
return codeArray[offset];
}

View File

@ -3688,7 +3688,6 @@ ScriptAnalysis::analyzeTypesBytecode(JSContext *cx, unsigned offset,
}
case JSOP_SETELEM:
case JSOP_SETHOLE:
poppedTypes(pc, 1)->addSetElement(cx, script, pc, poppedTypes(pc, 2), poppedTypes(pc, 0));
poppedTypes(pc, 0)->addSubset(cx, &pushed[0]);
break;
@ -6016,6 +6015,13 @@ TypeScript::Sweep(JSContext *cx, JSScript *script)
#ifdef JS_METHODJIT
mjit::ReleaseScriptCode(cx, script);
#endif
/*
* Use counts for scripts are reset on GC. After discarding code we need to
* let it warm back up to get information like which opcodes are setting
* array holes or accessing getter properties.
*/
script->resetUseCount();
}
void

View File

@ -4080,7 +4080,6 @@ BEGIN_CASE(JSOP_CALLELEM)
END_CASE(JSOP_CALLELEM)
BEGIN_CASE(JSOP_SETELEM)
BEGIN_CASE(JSOP_SETHOLE)
{
JSObject *obj;
FETCH_OBJECT(cx, -3, obj);
@ -4098,12 +4097,13 @@ BEGIN_CASE(JSOP_SETHOLE)
break;
if ((jsuint)i >= obj->getArrayLength())
obj->setArrayLength(cx, i + 1);
*regs.pc = JSOP_SETHOLE;
}
obj->setDenseArrayElementWithType(cx, i, regs.sp[-1]);
goto end_setelem;
} else {
*regs.pc = JSOP_SETHOLE;
if (!script->ensureRanBytecode(cx))
goto error;
script->analysis()->getCode(regs.pc).arrayWriteHole = true;
}
}
} while (0);

View File

@ -5602,6 +5602,16 @@ js_NativeGetInline(JSContext *cx, JSObject *receiver, JSObject *obj, JSObject *p
return true;
}
jsbytecode *pc;
JSScript *script = cx->stack.currentScript(&pc);
if (script) {
if (!script->ensureRanBytecode(cx))
return false;
analyze::Bytecode *code = script->analysis()->maybeCode(pc);
if (code)
code->accessGetter = true;
}
sample = cx->runtime->propertyRemovals;
if (!shape->get(cx, receiver, obj, pobj, vp))
return false;
@ -5614,9 +5624,6 @@ js_NativeGetInline(JSContext *cx, JSObject *receiver, JSObject *obj, JSObject *p
pobj->nativeSetSlot(slot, *vp);
}
/* Record values produced by shapes without a default getter. */
AddTypePropertyId(cx, obj, shape->propid, *vp);
return true;
}

View File

@ -3967,7 +3967,6 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop)
break;
case JSOP_SETELEM:
case JSOP_SETHOLE:
rval = POP_STR();
op = JSOP_NOP; /* turn off parens */
xval = POP_STR();

View File

@ -592,8 +592,5 @@ OPDEF(JSOP_UNBRANDTHIS, 229,"unbrandthis", NULL, 1, 0, 0, 0, JOF_BYTE)
OPDEF(JSOP_SHARPINIT, 230,"sharpinit", NULL, 3, 0, 0, 0, JOF_UINT16|JOF_SHARPSLOT)
/* Substituted for JSOP_SETELEM to indicate opcodes which have written holes in dense arrays. */
OPDEF(JSOP_SETHOLE, 231, "sethole", NULL, 1, 3, 1, 3, JOF_BYTE |JOF_ELEM|JOF_SET|JOF_DETECTING)
/* Pop the stack, convert to a jsid (int or string), and push back. */
OPDEF(JSOP_TOID, 232, "toid", NULL, 1, 1, 1, 0, JOF_BYTE)
OPDEF(JSOP_TOID, 231, "toid", NULL, 1, 1, 1, 0, JOF_BYTE)

View File

@ -6864,8 +6864,7 @@ LeaveTree(TraceMonitor *tm, TracerState& state, VMSideExit* lr)
* Since this doesn't re-enter the recorder, the post-state snapshot
* is invalid. Fix it up here.
*/
if ((op == JSOP_SETELEM || op == JSOP_SETHOLE) &&
JSOp(regs->pc[JSOP_SETELEM_LENGTH]) == JSOP_POP) {
if (op == JSOP_SETELEM && JSOp(regs->pc[JSOP_SETELEM_LENGTH]) == JSOP_POP) {
regs->sp -= js_CodeSpec[JSOP_SETELEM].nuses;
regs->sp += js_CodeSpec[JSOP_SETELEM].ndefs;
regs->pc += JSOP_SETELEM_LENGTH;
@ -13405,7 +13404,7 @@ TraceRecorder::setElem(int lval_spindex, int idx_spindex, int v_spindex)
}
jsbytecode* pc = cx->regs().pc;
if ((*pc == JSOP_SETELEM || *pc == JSOP_SETHOLE) && pc[JSOP_SETELEM_LENGTH] != JSOP_POP)
if (*pc == JSOP_SETELEM && pc[JSOP_SETELEM_LENGTH] != JSOP_POP)
set(&lval, v_ins);
return ARECORD_CONTINUE;
@ -13417,12 +13416,6 @@ TraceRecorder::record_JSOP_SETELEM()
return setElem(-3, -2, -1);
}
JS_REQUIRES_STACK AbortableRecordingStatus
TraceRecorder::record_JSOP_SETHOLE()
{
return setElem(-3, -2, -1);
}
static JSBool FASTCALL
CheckSameGlobal(JSObject *obj, JSObject *globalObj)
{
@ -17050,7 +17043,7 @@ LoopProfile::profileOperation(JSContext* cx, JSOp op)
if (op == JSOP_NEW)
increment(OP_NEW);
if (op == JSOP_GETELEM || op == JSOP_SETELEM || op == JSOP_SETHOLE) {
if (op == JSOP_GETELEM || op == JSOP_SETELEM) {
Value& lval = cx->regs().sp[op == JSOP_GETELEM ? -2 : -3];
if (lval.isObject() && js_IsTypedArray(&lval.toObject()))
increment(OP_TYPED_ARRAY);

View File

@ -125,15 +125,6 @@ class Assembler : public ValueAssembler
DataLabelPtr label;
};
/* Need a temp reg that is not ArgReg1. */
#if defined(JS_CPU_X86) || defined(JS_CPU_X64)
static const RegisterID ClobberInCall = JSC::X86Registers::ecx;
#elif defined(JS_CPU_ARM)
static const RegisterID ClobberInCall = JSC::ARMRegisters::r2;
#elif defined(JS_CPU_SPARC)
static const RegisterID ClobberInCall = JSC::SparcRegisters::l1;
#endif
/* :TODO: OOM */
Label startLabel;
Vector<CallPatch, 64, SystemAllocPolicy> callPatches;
@ -553,14 +544,14 @@ static const JSC::MacroAssembler::RegisterID JSParamReg_Argc = JSC::SparcRegist
}
}
void storeArg(uint32 i, Imm32 imm) {
void storeArg(uint32 i, ImmPtr imm) {
JS_ASSERT(callIsAligned);
RegisterID to;
if (Registers::regForArg(callConvention, i, &to)) {
move(imm, to);
availInCall.takeRegUnchecked(to);
} else {
store32(imm, addressOfArg(i));
storePtr(imm, addressOfArg(i));
}
}
@ -625,7 +616,7 @@ static const JSC::MacroAssembler::RegisterID JSParamReg_Argc = JSC::SparcRegist
#undef STUB_CALL_TYPE
void setupInfallibleVMFrame(int32 frameDepth) {
void setupFrameDepth(int32 frameDepth) {
// |frameDepth < 0| implies ic::SplatApplyArgs has been called which
// means regs.sp has already been set in the VMFrame.
if (frameDepth >= 0) {
@ -633,9 +624,13 @@ static const JSC::MacroAssembler::RegisterID JSParamReg_Argc = JSC::SparcRegist
// regs->sp = sp
addPtr(Imm32(sizeof(StackFrame) + frameDepth * sizeof(jsval)),
JSFrameReg,
ClobberInCall);
storePtr(ClobberInCall, FrameAddress(offsetof(VMFrame, regs.sp)));
Registers::ClobberInCall);
storePtr(Registers::ClobberInCall, FrameAddress(offsetof(VMFrame, regs.sp)));
}
}
void setupInfallibleVMFrame(int32 frameDepth) {
setupFrameDepth(frameDepth);
// The JIT has moved Arg1 already, and we've guaranteed to not clobber
// it. Move ArgReg0 into place now. setupFallibleVMFrame will not
@ -664,6 +659,19 @@ static const JSC::MacroAssembler::RegisterID JSParamReg_Argc = JSC::SparcRegist
restoreStackBase();
}
void setupFallibleABICall(bool inlining, jsbytecode *pc, int32 frameDepth) {
setupFrameDepth(frameDepth);
/* Store fp and pc */
storePtr(JSFrameReg, FrameAddress(VMFrame::offsetOfFp));
storePtr(ImmPtr(pc), FrameAddress(offsetof(VMFrame, regs.pc)));
if (inlining) {
/* ABI calls cannot be made from inlined frames. */
storePtr(ImmPtr(NULL), FrameAddress(VMFrame::offsetOfInlined));
}
}
void restoreStackBase() {
#if defined(JS_CPU_X86)
/*
@ -867,6 +875,7 @@ static const JSC::MacroAssembler::RegisterID JSParamReg_Argc = JSC::SparcRegist
const js::Shape *shape,
RegisterID typeReg, RegisterID dataReg)
{
JS_ASSERT(shape->hasSlot());
if (shape->isMethod())
loadValueAsComponents(ObjectValue(shape->methodObject()), typeReg, dataReg);
else if (obj->isFixedSlot(shape->slot))

View File

@ -175,6 +175,40 @@ class LinkerHelper : public JSC::LinkBuffer
}
};
class NativeStubLinker : public LinkerHelper
{
public:
#ifdef JS_CPU_X64
typedef JSC::MacroAssembler::DataLabelPtr FinalJump;
#else
typedef JSC::MacroAssembler::Jump FinalJump;
#endif
NativeStubLinker(Assembler &masm, JITScript *jit, jsbytecode *pc, FinalJump done)
: LinkerHelper(masm, JSC::METHOD_CODE), jit(jit), pc(pc), done(done)
{}
bool init(JSContext *cx);
void patchJump(JSC::CodeLocationLabel target) {
#ifdef JS_CPU_X64
patch(done, target);
#else
link(done, target);
#endif
}
private:
JITScript *jit;
jsbytecode *pc;
FinalJump done;
};
bool
NativeStubEpilogue(VMFrame &f, Assembler &masm, NativeStubLinker::FinalJump *result,
int32 initialFrameDepth, int32 vpOffset,
MaybeRegisterID typeReg, MaybeRegisterID dataReg);
/*
* On ARM, we periodically flush a constant pool into the instruction stream
* where constants are found using PC-relative addressing. This is necessary

View File

@ -2027,7 +2027,6 @@ mjit::Compiler::generateMethod()
END_CASE(JSOP_TOID)
BEGIN_CASE(JSOP_SETELEM)
BEGIN_CASE(JSOP_SETHOLE)
{
jsbytecode *next = &PC[JSOP_SETELEM_LENGTH];
bool pop = (JSOp(*next) == JSOP_POP && !analysis->jumpTarget(next));
@ -4447,6 +4446,18 @@ mjit::Compiler::jsop_getprop(JSAtom *atom, JSValueType knownType,
shapeReg = frame.allocReg();
}
/*
* If this access has been on a shape with a getter hook, make preparations
* so that we can generate a stub to call the hook directly (rather than be
* forced to make a stub call). Sync the stack up front and kill all
* registers so that PIC stubs can contain calls, and always generate a
* type barrier if inference is enabled (known property types do not
* reflect properties with getter hooks).
*/
pic.canCallHook = usePropCache && analysis->getCode(PC).accessGetter;
if (pic.canCallHook)
frame.syncAndKillEverything();
pic.shapeReg = shapeReg;
pic.atom = atom;
@ -4497,7 +4508,8 @@ mjit::Compiler::jsop_getprop(JSAtom *atom, JSValueType knownType,
pic.objReg = objReg;
frame.pushRegs(shapeReg, objReg, knownType);
BarrierState barrier = testBarrier(pic.shapeReg, pic.objReg);
BarrierState barrier = testBarrier(pic.shapeReg, pic.objReg, false, false,
/* force = */ pic.canCallHook);
stubcc.rejoin(Changes(1));
pics.append(pic);
@ -4547,6 +4559,10 @@ mjit::Compiler::jsop_callprop_generic(JSAtom *atom)
pic.shapeReg = shapeReg;
pic.atom = atom;
pic.canCallHook = analysis->getCode(PC).accessGetter;
if (pic.canCallHook)
frame.syncAndKillEverything();
/*
* Store the type and object back. Don't bother keeping them in registers,
* since a sync will be needed for the upcoming call.
@ -4611,7 +4627,8 @@ mjit::Compiler::jsop_callprop_generic(JSAtom *atom)
/* Adjust the frame. */
frame.pop();
frame.pushRegs(shapeReg, objReg, knownPushedType(0));
BarrierState barrier = testBarrier(pic.shapeReg, pic.objReg);
BarrierState barrier = testBarrier(pic.shapeReg, pic.objReg, false, false,
/* force = */ pic.canCallHook);
pushSyncedEntry(1);
@ -4710,6 +4727,10 @@ mjit::Compiler::jsop_callprop_obj(JSAtom *atom)
objReg = frame.copyDataIntoReg(top);
}
pic.canCallHook = analysis->getCode(PC).accessGetter;
if (pic.canCallHook)
frame.syncAndKillEverything();
/* Guard on shape. */
masm.loadShape(objReg, shapeReg);
pic.shapeGuard = masm.label();
@ -4749,7 +4770,8 @@ mjit::Compiler::jsop_callprop_obj(JSAtom *atom)
*/
frame.dup();
frame.storeRegs(-2, shapeReg, objReg, knownPushedType(0));
BarrierState barrier = testBarrier(shapeReg, objReg);
BarrierState barrier = testBarrier(shapeReg, objReg, false, false,
/* force = */ pic.canCallHook);
/*
* Assert correctness of hardcoded offsets.
@ -7236,11 +7258,6 @@ mjit::Compiler::hasTypeBarriers(jsbytecode *pc)
if (!cx->typeInferenceEnabled())
return false;
#if 0
/* Stress test. */
return js_CodeSpec[*pc].format & JOF_TYPESET;
#endif
return analysis->typeBarriers(pc) != NULL;
}
@ -7440,7 +7457,7 @@ mjit::Compiler::addTypeTest(types::TypeSet *types, RegisterID typeReg, RegisterI
mjit::Compiler::BarrierState
mjit::Compiler::testBarrier(RegisterID typeReg, RegisterID dataReg,
bool testUndefined, bool testReturn)
bool testUndefined, bool testReturn, bool force)
{
BarrierState state;
state.typeReg = typeReg;
@ -7462,18 +7479,12 @@ mjit::Compiler::testBarrier(RegisterID typeReg, RegisterID dataReg,
JS_ASSERT(!testUndefined);
if (!analysis->getCode(PC).monitoredTypesReturn)
return state;
} else if (!hasTypeBarriers(PC)) {
} else if (!hasTypeBarriers(PC) && !force) {
if (testUndefined && !types->hasType(types::Type::UndefinedType()))
state.jump.setJump(masm.testUndefined(Assembler::Equal, typeReg));
return state;
}
#if 0
/* Stress test. */
state.jump.setJump(masm.testInt32(Assembler::NotEqual, typeReg));
return state;
#endif
types->addFreeze(cx);
/* Cannot have type barriers when the result of the operation is already unknown. */

View File

@ -184,7 +184,7 @@ class Compiler : public BaseCompiler
};
struct BaseICInfo {
BaseICInfo(JSOp op) : op(op)
BaseICInfo(JSOp op) : op(op), canCallHook(false)
{ }
Label fastPathStart;
Label fastPathRejoin;
@ -192,12 +192,14 @@ class Compiler : public BaseCompiler
Call slowPathCall;
DataLabelPtr paramAddr;
JSOp op;
bool canCallHook;
void copyTo(ic::BaseIC &to, JSC::LinkBuffer &full, JSC::LinkBuffer &stub) {
to.fastPathStart = full.locationOf(fastPathStart);
to.fastPathRejoin = full.locationOf(fastPathRejoin);
to.slowPathStart = stub.locationOf(slowPathStart);
to.slowPathCall = stub.locationOf(slowPathCall);
to.canCallHook = canCallHook;
to.op = op;
JS_ASSERT(to.op == op);
}
@ -565,7 +567,8 @@ class Compiler : public BaseCompiler
BarrierState pushAddressMaybeBarrier(Address address, JSValueType type, bool reuseBase,
bool testUndefined = false);
BarrierState testBarrier(RegisterID typeReg, RegisterID dataReg,
bool testUndefined = false, bool testReturn = false);
bool testUndefined = false, bool testReturn = false,
bool force = false);
void finishBarrier(const BarrierState &barrier, RejoinState rejoin, uint32 which);
/* Non-emitting helpers. */

View File

@ -1332,21 +1332,37 @@ js_InternalInterpret(void *returnData, void *returnType, void *returnReg, js::VM
break;
case REJOIN_NATIVE:
case REJOIN_NATIVE_LOWERED: {
case REJOIN_NATIVE_LOWERED:
case REJOIN_NATIVE_GETTER: {
/*
* We don't rejoin until after the native stub finishes execution, in
* which case the return value will be in memory. For lowered natives,
* the return value will be in the 'this' value's slot.
* the return value will be in the 'this' value's slot. For getters,
* the result is at nextsp[0] (see ic::CallProp).
*/
if (rejoin == REJOIN_NATIVE_LOWERED)
if (rejoin == REJOIN_NATIVE_LOWERED) {
nextsp[-1] = nextsp[0];
} else if (rejoin == REJOIN_NATIVE_GETTER) {
if (js_CodeSpec[op].format & JOF_CALLOP) {
/*
* If we went through jsop_callprop_obj then the 'this' value
* is still in its original slot and hasn't been shifted yet,
* so fix that now. Yuck.
*/
if (nextsp[-2].isObject())
nextsp[-1] = nextsp[-2];
nextsp[-2] = nextsp[0];
} else {
nextsp[-1] = nextsp[0];
}
}
/* Release this reference on the orphaned native stub. */
RemoveOrphanedNative(cx, fp);
/*
* Note: there is no need to monitor the result of the native, the
* native stub will always do a type check before finishing.
* Note: there is no need to monitor the result of the native, the stub
* will always do a type check before finishing.
*/
f.regs.pc = nextpc;
break;

View File

@ -1820,7 +1820,6 @@ LoopState::analyzeLoopBody(unsigned frame)
skipAnalysis = true;
break;
case JSOP_SETHOLE:
case JSOP_SETELEM: {
SSAValue objValue = analysis->poppedValue(pc, 2);
SSAValue elemValue = analysis->poppedValue(pc, 1);
@ -1844,7 +1843,7 @@ LoopState::analyzeLoopBody(unsigned frame)
continue;
if (!addModifiedProperty(object, JSID_VOID))
return;
if (op == JSOP_SETHOLE && !addGrowArray(object))
if (analysis->getCode(pc).arrayWriteHole && !addGrowArray(object))
return;
}

View File

@ -388,6 +388,15 @@ struct Registers {
# error "Unsupported platform"
#endif
/* Temp reg that can be clobbered when setting up a fallible fast or ABI call. */
#if defined(JS_CPU_X86) || defined(JS_CPU_X64)
static const RegisterID ClobberInCall = JSC::X86Registers::ecx;
#elif defined(JS_CPU_ARM)
static const RegisterID ClobberInCall = JSC::ARMRegisters::r2;
#elif defined(JS_CPU_SPARC)
static const RegisterID ClobberInCall = JSC::SparcRegisters::l1;
#endif
static const uint32 AvailFPRegs = TempFPRegs;
static inline uint32 maskReg(FPRegisterID reg) {
@ -412,6 +421,20 @@ struct Registers {
return regs.takeAnyReg().reg();
}
/* Get a register which is not live before a normal ABI call with at most four args. */
static inline Registers tempCallRegMask() {
Registers regs(AvailRegs);
#ifndef JS_CPU_X86
regs.takeReg(ArgReg0);
regs.takeReg(ArgReg1);
regs.takeReg(ArgReg2);
#ifdef JS_CPU_SPARC
regs.takeReg(ArgReg3);
#endif
#endif
return regs;
}
Registers(uint32 freeMask)
: freeMask(freeMask)
{ }

View File

@ -1115,6 +1115,12 @@ mjit::JITScript::~JITScript()
(*pExecPool)->release();
}
for (unsigned i = 0; i < nativeCallStubs.length(); i++) {
JSC::ExecutablePool *pool = nativeCallStubs[i].pool;
if (pool)
pool->release();
}
ic::CallICInfo *callICs_ = callICs();
for (uint32 i = 0; i < nCallICs; i++) {
callICs_[i].releasePools();

View File

@ -258,10 +258,12 @@ enum RejoinState {
/*
* As for REJOIN_FALLTHROUGH, but holds a reference on the compartment's
* orphaned native pools which needs to be reclaimed by InternalInterpret.
* The return value needs to be adjusted if REJOIN_NATIVE_LOWERED.
* The return value needs to be adjusted if REJOIN_NATIVE_LOWERED, and
* REJOIN_NATIVE_GETTER is for ABI calls made for property accesses.
*/
REJOIN_NATIVE,
REJOIN_NATIVE_LOWERED,
REJOIN_NATIVE_GETTER,
/*
* Dummy rejoin stored in VMFrames to indicate they return into a native
@ -553,6 +555,30 @@ struct PCLengthEntry {
double picsLength; /* amount of PIC stub code generated */
};
/*
* Pools and patch locations for managing stubs for non-FASTCALL C++ calls made
* from native call and PropertyOp stubs. Ownership of these may be transferred
* into the orphanedNativePools for the compartment.
*/
struct NativeCallStub {
/* PC for the stub. Native call stubs cannot be added for inline frames. */
jsbytecode *pc;
/* Pool for the stub, NULL if it has been removed from the script. */
JSC::ExecutablePool *pool;
/*
* Fallthrough jump returning to jitcode which may be patched during
* recompilation. On x64 this is an indirect jump to avoid issues with far
* jumps on relative branches.
*/
#ifdef JS_CPU_X64
JSC::CodeLocationDataLabelPtr jump;
#else
JSC::CodeLocationJump jump;
#endif
};
struct JITScript {
typedef JSC::MacroAssemblerCodeRef CodeRef;
CodeRef code; /* pool & code addresses */
@ -611,6 +637,9 @@ struct JITScript {
ExecPoolVector execPools;
#endif
// Additional ExecutablePools for native call and getter stubs.
Vector<NativeCallStub, 0, SystemAllocPolicy> nativeCallStubs;
NativeMapEntry *nmap() const;
js::mjit::InlineFrame *inlineFrames() const;
js::mjit::CallSite *callSites() const;

View File

@ -555,6 +555,101 @@ SlowNewFromIC(VMFrame &f, ic::CallICInfo *ic)
return NULL;
}
bool
NativeStubLinker::init(JSContext *cx)
{
JSC::ExecutablePool *pool = LinkerHelper::init(cx);
if (!pool)
return false;
NativeCallStub stub;
stub.pc = pc;
stub.pool = pool;
stub.jump = locationOf(done);
if (!jit->nativeCallStubs.append(stub)) {
pool->release();
return false;
}
return true;
}
/*
* Generate epilogue code to run after a stub ABI call to a native or getter.
* This checks for an exception, and either type checks the result against the
* observed types for the opcode or loads the result into a register pair
* (it will go through a type barrier afterwards).
*/
bool
mjit::NativeStubEpilogue(VMFrame &f, Assembler &masm, NativeStubLinker::FinalJump *result,
int32 initialFrameDepth, int32 vpOffset,
MaybeRegisterID typeReg, MaybeRegisterID dataReg)
{
/* Reload fp, which may have been clobbered by restoreStackBase(). */
masm.loadPtr(FrameAddress(VMFrame::offsetOfFp), JSFrameReg);
Jump hasException = masm.branchTest32(Assembler::Zero, Registers::ReturnReg,
Registers::ReturnReg);
Address resultAddress(JSFrameReg, vpOffset);
Vector<Jump> mismatches(f.cx);
if (f.cx->typeInferenceEnabled()) {
if (!typeReg.isSet()) {
/*
* Test the result of this native against the known result type set
* for the call. We don't assume knowledge about the types that
* natives can return, except when generating specialized paths in
* FastBuiltins.
*/
types::TypeSet *types = f.script()->analysis()->bytecodeTypes(f.pc());
if (!masm.generateTypeCheck(f.cx, resultAddress, types, &mismatches))
THROWV(false);
}
/*
* Can no longer trigger recompilation in this stub, clear the stub
* rejoin on the VMFrame.
*/
masm.storePtr(ImmPtr(NULL), FrameAddress(offsetof(VMFrame, stubRejoin)));
}
if (typeReg.isSet())
masm.loadValueAsComponents(resultAddress, typeReg.reg(), dataReg.reg());
/*
* The final jump is a indirect on x64, so that we'll always be able
* to repatch it to the interpoline later.
*/
Label finished = masm.label();
#ifdef JS_CPU_X64
JSC::MacroAssembler::DataLabelPtr done = masm.moveWithPatch(ImmPtr(NULL), Registers::ValueReg);
masm.jump(Registers::ValueReg);
#else
Jump done = masm.jump();
#endif
/* Generate a call for type check failures on the native result. */
if (!mismatches.empty()) {
for (unsigned i = 0; i < mismatches.length(); i++)
mismatches[i].linkTo(masm.label(), &masm);
masm.addPtr(Imm32(vpOffset), JSFrameReg, Registers::ArgReg1);
masm.fallibleVMCall(true, JS_FUNC_TO_DATA_PTR(void *, stubs::TypeBarrierReturn),
f.regs.pc, NULL, initialFrameDepth);
masm.storePtr(ImmPtr(NULL), FrameAddress(offsetof(VMFrame, stubRejoin)));
masm.jump().linkTo(finished, &masm);
}
/* Move JaegerThrowpoline into register for very far jump on x64. */
hasException.linkTo(masm.label(), &masm);
if (f.cx->typeInferenceEnabled())
masm.storePtr(ImmPtr(NULL), FrameAddress(offsetof(VMFrame, stubRejoin)));
masm.throwInJIT();
*result = done;
return true;
}
/*
* Calls have an inline path and an out-of-line path. The inline path is used
* in the fastest case: the method has JIT'd code, and |argc == nargs|.
@ -893,32 +988,12 @@ class CallCompiler : public BaseCompiler
f.regs.pc, NULL, initialFrameDepth);
}
Registers tempRegs(Registers::AvailRegs);
#ifndef JS_CPU_X86
tempRegs.takeReg(Registers::ArgReg0);
tempRegs.takeReg(Registers::ArgReg1);
tempRegs.takeReg(Registers::ArgReg2);
#endif
Registers tempRegs = Registers::tempCallRegMask();
RegisterID t0 = tempRegs.takeAnyReg().reg();
masm.bumpStubCounter(f.script(), f.pc(), t0);
/* Store pc. */
masm.storePtr(ImmPtr(f.regs.pc),
FrameAddress(offsetof(VMFrame, regs.pc)));
/* Store inlined. */
masm.storePtr(ImmPtr(f.regs.inlined()),
FrameAddress(VMFrame::offsetOfInlined));
/* Store sp (if not already set by ic::SplatApplyArgs). */
if (ic.frameSize.isStatic()) {
uint32 spOffset = sizeof(StackFrame) + initialFrameDepth * sizeof(Value);
masm.addPtr(Imm32(spOffset), JSFrameReg, t0);
masm.storePtr(t0, FrameAddress(offsetof(VMFrame, regs.sp)));
}
/* Store fp. */
masm.storePtr(JSFrameReg, FrameAddress(VMFrame::offsetOfFp));
int32 storeFrameDepth = ic.frameSize.isStatic() ? initialFrameDepth : -1;
masm.setupFallibleABICall(cx->typeInferenceEnabled(), f.regs.pc, storeFrameDepth);
/* Grab cx. */
#ifdef JS_CPU_X86
@ -959,7 +1034,7 @@ class CallCompiler : public BaseCompiler
masm.setupABICall(Registers::NormalCall, 3);
masm.storeArg(2, vpReg);
if (ic.frameSize.isStatic())
masm.storeArg(1, Imm32(ic.frameSize.staticArgc()));
masm.storeArg(1, ImmPtr((void *) ic.frameSize.staticArgc()));
else
masm.storeArg(1, argcReg.reg());
masm.storeArg(0, cxReg);
@ -977,83 +1052,21 @@ class CallCompiler : public BaseCompiler
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, native), false);
/* Reload fp, which may have been clobbered by restoreStackBase(). */
masm.loadPtr(FrameAddress(VMFrame::offsetOfFp), JSFrameReg);
Jump hasException = masm.branchTest32(Assembler::Zero, Registers::ReturnReg,
Registers::ReturnReg);
Vector<Jump> mismatches(f.cx);
if (cx->typeInferenceEnabled()) {
types::AutoEnterTypeInference enter(f.cx);
/*
* Test the result of this native against the known result type
* set for the call. We don't assume knowledge about the types that
* natives can return, except when generating specialized paths in
* FastBuiltins. We don't need to record dependencies on the result
* type set, as the compiler will already have done so when making
* the call IC.
*/
Address address(JSFrameReg, vpOffset);
types::TypeSet *types = f.script()->analysis()->bytecodeTypes(f.pc());
if (!masm.generateTypeCheck(f.cx, address, types, &mismatches))
THROWV(true);
/*
* Can no longer trigger recompilation in this stub, clear the stub
* rejoin on the VMFrame.
*/
masm.storePtr(ImmPtr(NULL), FrameAddress(offsetof(VMFrame, stubRejoin)));
}
/*
* The final jump is a indirect on x64, so that we'll always be able
* to repatch it to the interpoline later.
*/
Label finished = masm.label();
#ifdef JS_CPU_X64
void *slowJoin = ic.slowPathStart.labelAtOffset(ic.slowJoinOffset).executableAddress();
DataLabelPtr done = masm.moveWithPatch(ImmPtr(slowJoin), Registers::ValueReg);
masm.jump(Registers::ValueReg);
#else
Jump done = masm.jump();
#endif
/* Generate a call for type check failures on the native result. */
if (!mismatches.empty()) {
for (unsigned i = 0; i < mismatches.length(); i++)
mismatches[i].linkTo(masm.label(), &masm);
masm.addPtr(Imm32(vpOffset), JSFrameReg, Registers::ArgReg1);
masm.fallibleVMCall(true, JS_FUNC_TO_DATA_PTR(void *, stubs::TypeBarrierReturn),
f.regs.pc, NULL, initialFrameDepth);
masm.storePtr(ImmPtr(NULL), FrameAddress(offsetof(VMFrame, stubRejoin)));
masm.jump().linkTo(finished, &masm);
}
/* Move JaegerThrowpoline into register for very far jump on x64. */
hasException.linkTo(masm.label(), &masm);
if (cx->typeInferenceEnabled())
masm.storePtr(ImmPtr(NULL), FrameAddress(offsetof(VMFrame, stubRejoin)));
masm.throwInJIT();
LinkerHelper linker(masm, JSC::METHOD_CODE);
JSC::ExecutablePool *ep = poolForSize(linker, CallICInfo::Pool_NativeStub);
if (!ep)
NativeStubLinker::FinalJump done;
if (!NativeStubEpilogue(f, masm, &done, initialFrameDepth, vpOffset, MaybeRegisterID(), MaybeRegisterID()))
return false;
NativeStubLinker linker(masm, f.jit(), f.regs.pc, done);
if (!linker.init(f.cx))
THROWV(true);
ic.fastGuardedNative = obj;
if (!linker.verifyRange(jit)) {
disable(jit);
return true;
}
ic.nativeJump = linker.locationOf(done);
linker.patchJump(ic.slowPathStart.labelAtOffset(ic.slowJoinOffset));
#ifndef JS_CPU_X64
linker.link(done, ic.slowPathStart.labelAtOffset(ic.slowJoinOffset));
#endif
ic.fastGuardedNative = obj;
linker.link(funGuard, ic.slowPathStart);
JSC::CodeLocationLabel start = linker.finalize();
@ -1474,10 +1487,8 @@ JITScript::sweepCallICs(JSContext *cx, bool purgeAll)
ic.purgeGuardedObject();
}
if (nativeDead) {
ic.releasePool(CallICInfo::Pool_NativeStub);
if (nativeDead)
ic.fastGuardedNative = NULL;
}
if (purgeAll) {
ic.releasePool(CallICInfo::Pool_ScriptStub);

View File

@ -216,7 +216,6 @@ struct CallICInfo {
enum PoolIndex {
Pool_ScriptStub,
Pool_ClosureStub,
Pool_NativeStub,
Total_Pools
};
@ -240,17 +239,6 @@ struct CallICInfo {
/* Inline to OOL jump, redirected by stubs. */
JSC::CodeLocationJump funJump;
/*
* Native stub fallthrough jump which may be patched during recompilation.
* On x64 this is an indirect jump to avoid issues with far jumps on
* relative branches.
*/
#ifdef JS_CPU_X64
JSC::CodeLocationDataLabelPtr nativeJump;
#else
JSC::CodeLocationJump nativeJump;
#endif
/* Offset to inline scripted call, from funGuard. */
uint32 hotJumpOffset : 16;
uint32 joinPointOffset : 16;
@ -281,13 +269,12 @@ struct CallICInfo {
fastGuardedNative = NULL;
hit = false;
hasJsFunCheck = false;
pools[0] = pools[1] = pools[2] = NULL;
PodArrayZero(pools);
}
inline void releasePools() {
releasePool(Pool_ScriptStub);
releasePool(Pool_ClosureStub);
releasePool(Pool_NativeStub);
}
inline void releasePool(PoolIndex index) {

View File

@ -149,9 +149,11 @@ class PICStubCompiler : public BaseCompiler
uint32 gcNumber;
public:
bool canCallHook;
PICStubCompiler(const char *type, VMFrame &f, JSScript *script, ic::PICInfo &pic, void *stub)
: BaseCompiler(f.cx), type(type), f(f), script(script), pic(pic), stub(stub),
gcNumber(f.cx->runtime->gcNumber)
gcNumber(f.cx->runtime->gcNumber), canCallHook(pic.canCallHook)
{ }
bool isCallOp() const {
@ -800,10 +802,19 @@ struct GetPropertyHelper {
LookupStatus testForGet() {
if (!shape->hasDefaultGetter()) {
if (!shape->isMethod())
return ic.disable(cx, "getter");
if (!ic.isCallOp())
return ic.disable(cx, "method valued shape");
if (shape->isMethod()) {
if (!ic.isCallOp())
return ic.disable(cx, "method valued shape");
} else {
if (shape->hasGetterValue())
return ic.disable(cx, "getter value shape");
if (shape->hasSlot() && holder != obj)
return ic.disable(cx, "slotful getter hook through prototype");
if (!ic.canCallHook)
return ic.disable(cx, "can't call getter hook");
if (f.regs.inlined())
return ic.disable(cx, "hook called from inline frame");
}
} else if (!shape->hasSlot()) {
return ic.disable(cx, "no slot");
}
@ -1001,6 +1012,8 @@ class GetPropCompiler : public PICStubCompiler
return status;
if (getprop.obj != getprop.holder)
return disable("proto walk on String.prototype");
if (!getprop.shape->hasDefaultGetterOrIsMethod())
return disable("getter hook on String.prototype");
if (hadGC())
return Lookup_Uncacheable;
@ -1142,6 +1155,93 @@ class GetPropCompiler : public PICStubCompiler
return Lookup_Cacheable;
}
void generateGetterStub(Assembler &masm, const Shape *shape,
Label start, const Vector<Jump, 8> &shapeMismatches)
{
/*
* Getter hook needs to be called from the stub. The state is fully
* synced and no registers are live except the result registers.
*/
JS_ASSERT(pic.canCallHook);
PropertyOp getter = shape->getterOp();
if (cx->typeInferenceEnabled()) {
masm.storePtr(ImmPtr((void *) REJOIN_NATIVE_GETTER),
FrameAddress(offsetof(VMFrame, stubRejoin)));
}
Registers tempRegs = Registers::tempCallRegMask();
if (tempRegs.hasReg(Registers::ClobberInCall))
tempRegs.takeReg(Registers::ClobberInCall);
/* Get a register to hold obj while we set up the rest of the frame. */
RegisterID holdObjReg = pic.objReg;
if (tempRegs.hasReg(pic.objReg)) {
tempRegs.takeReg(pic.objReg);
} else {
holdObjReg = tempRegs.takeAnyReg().reg();
masm.move(pic.objReg, holdObjReg);
}
RegisterID t0 = tempRegs.takeAnyReg().reg();
masm.bumpStubCounter(f.script(), f.pc(), t0);
/*
* Initialize vp, which is either a slot in the object (the holder,
* actually, which must equal the object here) or undefined.
* Use vp == sp (which for CALLPROP will actually be the original
* sp + 1), to avoid clobbering stack values.
*/
int32 vpOffset = (char *) f.regs.sp - (char *) f.fp();
if (shape->hasSlot()) {
masm.loadObjProp(obj, holdObjReg, shape,
Registers::ClobberInCall, t0);
masm.storeValueFromComponents(Registers::ClobberInCall, t0, Address(JSFrameReg, vpOffset));
} else {
masm.storeValue(UndefinedValue(), Address(JSFrameReg, vpOffset));
}
int32 initialFrameDepth = f.regs.sp - f.fp()->slots();
masm.setupFallibleABICall(cx->typeInferenceEnabled(), f.regs.pc, initialFrameDepth);
/* Grab cx. */
#ifdef JS_CPU_X86
RegisterID cxReg = tempRegs.takeAnyReg().reg();
#else
RegisterID cxReg = Registers::ArgReg0;
#endif
masm.loadPtr(FrameAddress(offsetof(VMFrame, cx)), cxReg);
/* Grap vp. */
RegisterID vpReg = t0;
masm.addPtr(Imm32(vpOffset), JSFrameReg, vpReg);
masm.restoreStackBase();
masm.setupABICall(Registers::NormalCall, 4);
masm.storeArg(3, vpReg);
masm.storeArg(2, ImmPtr((void *) JSID_BITS(shape->propid)));
masm.storeArg(1, holdObjReg);
masm.storeArg(0, cxReg);
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, getter), false);
NativeStubLinker::FinalJump done;
if (!NativeStubEpilogue(f, masm, &done, 0, vpOffset, pic.shapeReg, pic.objReg))
return;
NativeStubLinker linker(masm, f.jit(), f.regs.pc, done);
if (!linker.init(f.cx))
THROW();
if (!linker.verifyRange(f.jit())) {
disable("code memory is out of range");
return;
}
linker.patchJump(pic.fastPathRejoin);
linkerEpilogue(linker, start, shapeMismatches);
}
LookupStatus generateStub(JSObject *holder, const Shape *shape)
{
Vector<Jump, 8> shapeMismatches(cx);
@ -1196,6 +1296,13 @@ class GetPropCompiler : public PICStubCompiler
pic.secondShapeGuard = 0;
}
if (!shape->hasDefaultGetterOrIsMethod()) {
generateGetterStub(masm, shape, start, shapeMismatches);
if (setStubShapeOffset)
pic.getPropLabels().setStubShapeJump(masm, start, stubShapeJumpLabel);
return Lookup_Cacheable;
}
/* Load the value out of the object. */
masm.loadObjProp(holder, holderReg, shape, pic.shapeReg, pic.objReg);
Jump done = masm.jump();
@ -1211,12 +1318,22 @@ class GetPropCompiler : public PICStubCompiler
return disable("code memory is out of range");
}
// The final exit jumps to the store-back in the inline stub.
buffer.link(done, pic.fastPathRejoin);
linkerEpilogue(buffer, start, shapeMismatches);
if (setStubShapeOffset)
pic.getPropLabels().setStubShapeJump(masm, start, stubShapeJumpLabel);
return Lookup_Cacheable;
}
void linkerEpilogue(LinkerHelper &buffer, Label start, const Vector<Jump, 8> &shapeMismatches)
{
// The guard exit jumps to the original slow case.
for (Jump *pj = shapeMismatches.begin(); pj != shapeMismatches.end(); ++pj)
buffer.link(*pj, pic.slowPathStart);
// The final exit jumps to the store-back in the inline stub.
buffer.link(done, pic.fastPathRejoin);
CodeLocationLabel cs = buffer.finalize();
JaegerSpew(JSpew_PICs, "generated %s stub at %p\n", type, cs.executableAddress());
@ -1225,15 +1342,10 @@ class GetPropCompiler : public PICStubCompiler
pic.stubsGenerated++;
pic.updateLastPath(buffer, start);
if (setStubShapeOffset)
pic.getPropLabels().setStubShapeJump(masm, start, stubShapeJumpLabel);
if (pic.stubsGenerated == MAX_PIC_STUBS)
disable("max stubs reached");
if (obj->isDenseArray())
disable("dense array");
return Lookup_Cacheable;
}
void patchPreviousToHere(CodeLocationLabel cs)
@ -1249,8 +1361,14 @@ class GetPropCompiler : public PICStubCompiler
shapeGuardJumpOffset = pic.getPropLabels().getStubShapeJumpOffset();
else
shapeGuardJumpOffset = pic.shapeGuard + pic.getPropLabels().getInlineShapeJumpOffset();
int secondGuardOffset = getLastStubSecondShapeGuard();
JaegerSpew(JSpew_PICs, "Patching previous (%d stubs) (start %p) (offset %d) (second %d)\n",
(int) pic.stubsGenerated, label.executableAddress(),
shapeGuardJumpOffset, secondGuardOffset);
repatcher.relink(label.jumpAtOffset(shapeGuardJumpOffset), cs);
if (int secondGuardOffset = getLastStubSecondShapeGuard())
if (secondGuardOffset)
repatcher.relink(label.jumpAtOffset(secondGuardOffset), cs);
}
@ -1265,8 +1383,11 @@ class GetPropCompiler : public PICStubCompiler
if (hadGC())
return Lookup_Uncacheable;
if (obj == getprop.holder && !pic.inlinePathPatched)
if (obj == getprop.holder &&
getprop.shape->hasDefaultGetterOrIsMethod() &&
!pic.inlinePathPatched) {
return patchInline(getprop.holder, getprop.shape);
}
return generateStub(getprop.holder, getprop.shape);
}
@ -2015,6 +2136,11 @@ ic::CallProp(VMFrame &f, ic::PICInfo *pic)
NATIVE_GET(cx, &objv.toObject(), obj2, shape, JSGET_NO_METHOD_BARRIER, &rval,
THROW());
}
/*
* Adjust the stack to reflect the height after the GETPROP, here and
* below. Getter hook ICs depend on this to know which value of sp they
* are updating for consistent rejoins, don't modify this!
*/
regs.sp++;
regs.sp[-2] = rval;
regs.sp[-1] = lval;

View File

@ -93,6 +93,9 @@ struct BaseIC : public MacroAssemblerTypedefs {
bool hit : 1;
bool slowCallPatched : 1;
// Whether getter/setter hooks can be called from IC stubs.
bool canCallHook : 1;
// Number of stubs generated.
uint32 stubsGenerated : 5;

View File

@ -85,6 +85,7 @@ static inline JSRejoinState ScriptedRejoin(uint32 pcOffset)
static inline JSRejoinState StubRejoin(RejoinState rejoin)
{
JS_ASSERT(rejoin != REJOIN_NONE);
return rejoin << 1;
}
@ -119,71 +120,66 @@ Recompiler::patchCall(JITScript *jit, StackFrame *fp, void **location)
void
Recompiler::patchNative(JSCompartment *compartment, JITScript *jit, StackFrame *fp,
jsbytecode *pc, CallSite *inlined, RejoinState rejoin)
jsbytecode *pc, RejoinState rejoin)
{
/*
* There is a native IC at pc which triggered a recompilation. The recompilation
* could have been triggered either by the native call itself, or by a SplatApplyArgs
* preparing for the native call. Either way, we don't want to patch up the call,
* but will instead steal the pool for the native IC so it doesn't get freed
* with the old script, and patch up the jump at the end to go to the interpoline.
* There is a native call or getter IC at pc which triggered recompilation.
* The recompilation could have been triggered either by the native call
* itself, or by a SplatApplyArgs preparing for the native call. Either
* way, we don't want to patch up the call, but will instead steal the pool
* for the IC so it doesn't get freed with the JITScript, and patch up the
* jump at the end to go to the interpoline.
*
* When doing this, we do not reset the the IC itself; the JITScript must
* be dead and about to be released due to the recompilation (or a GC).
*/
fp->setRejoin(StubRejoin(rejoin));
/* :XXX: We might crash later if this fails. */
compartment->jaegerCompartment()->orphanedNativeFrames.append(fp);
unsigned i;
ic::CallICInfo *callICs = jit->callICs();
for (i = 0; i < jit->nCallICs; i++) {
CallSite *call = callICs[i].call;
if (inlined) {
/*
* The IC and regs.inlined will have two different call sites for
* the same point in the script. The IC site refers to the scripted
* return and regs.inlined has the prologue site (which was in use
* when the native stub was generated.
*/
if (call->inlineIndex == inlined->inlineIndex && call->pcOffset == inlined->pcOffset)
break;
} else if (call->inlineIndex == uint32(-1) &&
call->pcOffset == uint32(pc - jit->script->code)) {
break;
}
}
JS_ASSERT(i < jit->nCallICs);
ic::CallICInfo &ic = callICs[i];
JS_ASSERT(ic.fastGuardedNative);
DebugOnly<bool> found = false;
JSC::ExecutablePool *&pool = ic.pools[ic::CallICInfo::Pool_NativeStub];
/*
* Find and patch all native call stubs attached to the given PC. There may
* be multiple ones for getter stubs attached to e.g. a GETELEM.
*/
for (unsigned i = 0; i < jit->nativeCallStubs.length(); i++) {
NativeCallStub &stub = jit->nativeCallStubs[i];
if (stub.pc != pc)
continue;
if (!pool) {
/* Already stole this stub. */
return;
}
found = true;
/* Patch the native fallthrough to go to the interpoline. */
{
/* Check for pools that were already patched. */
if (!stub.pool)
continue;
/* Patch the native fallthrough to go to the interpoline. */
{
#if (defined(JS_NO_FASTCALL) && defined(JS_CPU_X86)) || defined(_WIN64)
/* Win64 needs stack adjustment */
void *interpoline = JS_FUNC_TO_DATA_PTR(void *, JaegerInterpolinePatched);
/* Win64 needs stack adjustment */
void *interpoline = JS_FUNC_TO_DATA_PTR(void *, JaegerInterpolinePatched);
#else
void *interpoline = JS_FUNC_TO_DATA_PTR(void *, JaegerInterpoline);
void *interpoline = JS_FUNC_TO_DATA_PTR(void *, JaegerInterpoline);
#endif
uint8 *start = (uint8 *)ic.nativeJump.executableAddress();
JSC::RepatchBuffer repatch(JSC::JITCode(start - 32, 64));
uint8 *start = (uint8 *)stub.jump.executableAddress();
JSC::RepatchBuffer repatch(JSC::JITCode(start - 32, 64));
#ifdef JS_CPU_X64
repatch.repatch(ic.nativeJump, interpoline);
repatch.repatch(stub.jump, interpoline);
#else
repatch.relink(ic.nativeJump, JSC::CodeLocationLabel(interpoline));
repatch.relink(stub.jump, JSC::CodeLocationLabel(interpoline));
#endif
}
/* :XXX: We leak the pool if this fails. Oh well. */
compartment->jaegerCompartment()->orphanedNativePools.append(stub.pool);
/* Mark as stolen in case there are multiple calls on the stack. */
stub.pool = NULL;
}
/* :XXX: We leak the pool if this fails. Oh well. */
compartment->jaegerCompartment()->orphanedNativePools.append(pool);
/* Mark as stolen in case there are multiple calls on the stack. */
pool = NULL;
JS_ASSERT(found);
}
void
@ -199,11 +195,11 @@ Recompiler::patchFrame(JSCompartment *compartment, VMFrame *f, JSScript *script)
void **addr = f->returnAddressLocation();
RejoinState rejoin = (RejoinState) f->stubRejoin;
if (rejoin == REJOIN_NATIVE ||
rejoin == REJOIN_NATIVE_LOWERED) {
rejoin == REJOIN_NATIVE_LOWERED ||
rejoin == REJOIN_NATIVE_GETTER) {
/* Native call. */
if (fp->script() == script) {
patchNative(compartment, fp->jit(), fp,
f->regs.pc, NULL, rejoin);
patchNative(compartment, fp->jit(), fp, f->regs.pc, rejoin);
f->stubRejoin = REJOIN_NATIVE_PATCHED;
}
} else if (rejoin == REJOIN_NATIVE_PATCHED) {
@ -295,6 +291,7 @@ Recompiler::expandInlineFrames(JSCompartment *compartment,
/* The VMFrame is calling CompileFunction. */
JS_ASSERT(f->stubRejoin != REJOIN_NATIVE &&
f->stubRejoin != REJOIN_NATIVE_LOWERED &&
f->stubRejoin != REJOIN_NATIVE_GETTER &&
f->stubRejoin != REJOIN_NATIVE_PATCHED);
innerfp->setRejoin(StubRejoin((RejoinState) f->stubRejoin));
*frameAddr = JS_FUNC_TO_DATA_PTR(void *, JaegerInterpoline);

View File

@ -100,7 +100,7 @@ private:
static void patchCall(JITScript *jit, StackFrame *fp, void **location);
static void patchNative(JSCompartment *compartment, JITScript *jit, StackFrame *fp,
jsbytecode *pc, CallSite *inline_, RejoinState rejoin);
jsbytecode *pc, RejoinState rejoin);
static StackFrame *
expandInlineFrameChain(StackFrame *outer, InlineFrame *inner);

View File

@ -597,18 +597,13 @@ stubs::SetElem(VMFrame &f)
break;
if ((jsuint)i >= obj->getArrayLength())
obj->setArrayLength(cx, i + 1);
/*
* Note: this stub is used for ENUMELEM, so watch out
* before overwriting the op.
*/
if (JSOp(*f.pc()) == JSOP_SETELEM)
*f.pc() = JSOP_SETHOLE;
}
obj->setDenseArrayElementWithType(cx, i, rval);
goto end_setelem;
} else {
if (JSOp(*f.pc()) == JSOP_SETELEM)
*f.pc() = JSOP_SETHOLE;
if (!f.script()->ensureRanBytecode(cx))
THROW();
f.script()->analysis()->getCode(f.pc()).arrayWriteHole = true;
}
}
} while (0);