Backout 5377759a3145:0dfb6e5f8223 (bug 1171945) for SM ARM compilation bustage

This commit is contained in:
Nathan Froyd 2015-08-19 21:20:57 -04:00
parent 75b23850e3
commit 1f09139081
40 changed files with 948 additions and 1416 deletions

View File

@ -1612,7 +1612,7 @@ BaselineCompiler::emitBinaryArith()
frame.popRegsAndSync(2);
// Call IC
ICBinaryArith_Fallback::Compiler stubCompiler(cx, ICStubCompiler::Engine::Baseline);
ICBinaryArith_Fallback::Compiler stubCompiler(cx);
if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
return false;

View File

@ -41,30 +41,18 @@ namespace jit {
template <typename T>
class DebugModeOSRVolatileStub
{
ICStubCompiler::Engine engine_;
T stub_;
BaselineFrame* frame_;
uint32_t pcOffset_;
public:
DebugModeOSRVolatileStub(ICStubCompiler::Engine engine, BaselineFrame* frame,
ICFallbackStub* stub)
: engine_(engine),
stub_(static_cast<T>(stub)),
frame_(frame),
pcOffset_(stub->icEntry()->pcOffset())
{ }
DebugModeOSRVolatileStub(BaselineFrame* frame, ICFallbackStub* stub)
: engine_(ICStubCompiler::Engine::Baseline),
stub_(static_cast<T>(stub)),
: stub_(static_cast<T>(stub)),
frame_(frame),
pcOffset_(stub->icEntry()->pcOffset())
{ }
bool invalid() const {
if (engine_ == ICStubCompiler::Engine::IonMonkey)
return false;
MOZ_ASSERT(!frame_->isHandlingException());
ICEntry& entry = frame_->script()->baselineScript()->icEntryFromPCOffset(pcOffset_);
return stub_ != entry.fallbackStub();

View File

@ -1861,6 +1861,571 @@ ICToNumber_Fallback::Compiler::generateStubCode(MacroAssembler& masm)
return tailCallVM(DoToNumberFallbackInfo, masm);
}
//
// BinaryArith_Fallback
//
static bool
DoBinaryArithFallback(JSContext* cx, BaselineFrame* frame, ICBinaryArith_Fallback* stub_,
HandleValue lhs, HandleValue rhs, MutableHandleValue ret)
{
// This fallback stub may trigger debug mode toggling.
DebugModeOSRVolatileStub<ICBinaryArith_Fallback*> stub(frame, stub_);
RootedScript script(cx, frame->script());
jsbytecode* pc = stub->icEntry()->pc(script);
JSOp op = JSOp(*pc);
FallbackICSpew(cx, stub, "BinaryArith(%s,%d,%d)", js_CodeName[op],
int(lhs.isDouble() ? JSVAL_TYPE_DOUBLE : lhs.extractNonDoubleType()),
int(rhs.isDouble() ? JSVAL_TYPE_DOUBLE : rhs.extractNonDoubleType()));
// Don't pass lhs/rhs directly, we need the original values when
// generating stubs.
RootedValue lhsCopy(cx, lhs);
RootedValue rhsCopy(cx, rhs);
// Perform the compare operation.
switch(op) {
case JSOP_ADD:
// Do an add.
if (!AddValues(cx, &lhsCopy, &rhsCopy, ret))
return false;
break;
case JSOP_SUB:
if (!SubValues(cx, &lhsCopy, &rhsCopy, ret))
return false;
break;
case JSOP_MUL:
if (!MulValues(cx, &lhsCopy, &rhsCopy, ret))
return false;
break;
case JSOP_DIV:
if (!DivValues(cx, &lhsCopy, &rhsCopy, ret))
return false;
break;
case JSOP_MOD:
if (!ModValues(cx, &lhsCopy, &rhsCopy, ret))
return false;
break;
case JSOP_POW:
if (!math_pow_handle(cx, lhsCopy, rhsCopy, ret))
return false;
break;
case JSOP_BITOR: {
int32_t result;
if (!BitOr(cx, lhs, rhs, &result))
return false;
ret.setInt32(result);
break;
}
case JSOP_BITXOR: {
int32_t result;
if (!BitXor(cx, lhs, rhs, &result))
return false;
ret.setInt32(result);
break;
}
case JSOP_BITAND: {
int32_t result;
if (!BitAnd(cx, lhs, rhs, &result))
return false;
ret.setInt32(result);
break;
}
case JSOP_LSH: {
int32_t result;
if (!BitLsh(cx, lhs, rhs, &result))
return false;
ret.setInt32(result);
break;
}
case JSOP_RSH: {
int32_t result;
if (!BitRsh(cx, lhs, rhs, &result))
return false;
ret.setInt32(result);
break;
}
case JSOP_URSH: {
if (!UrshOperation(cx, lhs, rhs, ret))
return false;
break;
}
default:
MOZ_CRASH("Unhandled baseline arith op");
}
// Check if debug mode toggling made the stub invalid.
if (stub.invalid())
return true;
if (ret.isDouble())
stub->setSawDoubleResult();
// Check to see if a new stub should be generated.
if (stub->numOptimizedStubs() >= ICBinaryArith_Fallback::MAX_OPTIMIZED_STUBS) {
stub->noteUnoptimizableOperands();
return true;
}
// Handle string concat.
if (op == JSOP_ADD) {
if (lhs.isString() && rhs.isString()) {
JitSpew(JitSpew_BaselineIC, " Generating %s(String, String) stub", js_CodeName[op]);
MOZ_ASSERT(ret.isString());
ICBinaryArith_StringConcat::Compiler compiler(cx);
ICStub* strcatStub = compiler.getStub(compiler.getStubSpace(script));
if (!strcatStub)
return false;
stub->addNewStub(strcatStub);
return true;
}
if ((lhs.isString() && rhs.isObject()) || (lhs.isObject() && rhs.isString())) {
JitSpew(JitSpew_BaselineIC, " Generating %s(%s, %s) stub", js_CodeName[op],
lhs.isString() ? "String" : "Object",
lhs.isString() ? "Object" : "String");
MOZ_ASSERT(ret.isString());
ICBinaryArith_StringObjectConcat::Compiler compiler(cx, lhs.isString());
ICStub* strcatStub = compiler.getStub(compiler.getStubSpace(script));
if (!strcatStub)
return false;
stub->addNewStub(strcatStub);
return true;
}
}
if (((lhs.isBoolean() && (rhs.isBoolean() || rhs.isInt32())) ||
(rhs.isBoolean() && (lhs.isBoolean() || lhs.isInt32()))) &&
(op == JSOP_ADD || op == JSOP_SUB || op == JSOP_BITOR || op == JSOP_BITAND ||
op == JSOP_BITXOR))
{
JitSpew(JitSpew_BaselineIC, " Generating %s(%s, %s) stub", js_CodeName[op],
lhs.isBoolean() ? "Boolean" : "Int32", rhs.isBoolean() ? "Boolean" : "Int32");
ICBinaryArith_BooleanWithInt32::Compiler compiler(cx, op, lhs.isBoolean(), rhs.isBoolean());
ICStub* arithStub = compiler.getStub(compiler.getStubSpace(script));
if (!arithStub)
return false;
stub->addNewStub(arithStub);
return true;
}
// Handle only int32 or double.
if (!lhs.isNumber() || !rhs.isNumber()) {
stub->noteUnoptimizableOperands();
return true;
}
MOZ_ASSERT(ret.isNumber());
if (lhs.isDouble() || rhs.isDouble() || ret.isDouble()) {
if (!cx->runtime()->jitSupportsFloatingPoint)
return true;
switch (op) {
case JSOP_ADD:
case JSOP_SUB:
case JSOP_MUL:
case JSOP_DIV:
case JSOP_MOD: {
// Unlink int32 stubs, it's faster to always use the double stub.
stub->unlinkStubsWithKind(cx, ICStub::BinaryArith_Int32);
JitSpew(JitSpew_BaselineIC, " Generating %s(Double, Double) stub", js_CodeName[op]);
ICBinaryArith_Double::Compiler compiler(cx, op);
ICStub* doubleStub = compiler.getStub(compiler.getStubSpace(script));
if (!doubleStub)
return false;
stub->addNewStub(doubleStub);
return true;
}
default:
break;
}
}
if (lhs.isInt32() && rhs.isInt32() && op != JSOP_POW) {
bool allowDouble = ret.isDouble();
if (allowDouble)
stub->unlinkStubsWithKind(cx, ICStub::BinaryArith_Int32);
JitSpew(JitSpew_BaselineIC, " Generating %s(Int32, Int32%s) stub", js_CodeName[op],
allowDouble ? " => Double" : "");
ICBinaryArith_Int32::Compiler compilerInt32(cx, op, allowDouble);
ICStub* int32Stub = compilerInt32.getStub(compilerInt32.getStubSpace(script));
if (!int32Stub)
return false;
stub->addNewStub(int32Stub);
return true;
}
// Handle Double <BITOP> Int32 or Int32 <BITOP> Double case.
if (((lhs.isDouble() && rhs.isInt32()) || (lhs.isInt32() && rhs.isDouble())) &&
ret.isInt32())
{
switch(op) {
case JSOP_BITOR:
case JSOP_BITXOR:
case JSOP_BITAND: {
JitSpew(JitSpew_BaselineIC, " Generating %s(%s, %s) stub", js_CodeName[op],
lhs.isDouble() ? "Double" : "Int32",
lhs.isDouble() ? "Int32" : "Double");
ICBinaryArith_DoubleWithInt32::Compiler compiler(cx, op, lhs.isDouble());
ICStub* optStub = compiler.getStub(compiler.getStubSpace(script));
if (!optStub)
return false;
stub->addNewStub(optStub);
return true;
}
default:
break;
}
}
stub->noteUnoptimizableOperands();
return true;
}
typedef bool (*DoBinaryArithFallbackFn)(JSContext*, BaselineFrame*, ICBinaryArith_Fallback*,
HandleValue, HandleValue, MutableHandleValue);
static const VMFunction DoBinaryArithFallbackInfo =
FunctionInfo<DoBinaryArithFallbackFn>(DoBinaryArithFallback, TailCall, PopValues(2));
bool
ICBinaryArith_Fallback::Compiler::generateStubCode(MacroAssembler& masm)
{
MOZ_ASSERT(engine_ == Engine::Baseline);
MOZ_ASSERT(R0 == JSReturnOperand);
// Restore the tail call register.
EmitRestoreTailCallReg(masm);
// Ensure stack is fully synced for the expression decompiler.
masm.pushValue(R0);
masm.pushValue(R1);
// Push arguments.
masm.pushValue(R1);
masm.pushValue(R0);
masm.push(ICStubReg);
pushFramePtr(masm, R0.scratchReg());
return tailCallVM(DoBinaryArithFallbackInfo, masm);
}
static bool
DoConcatStrings(JSContext* cx, HandleString lhs, HandleString rhs, MutableHandleValue res)
{
JSString* result = ConcatStrings<CanGC>(cx, lhs, rhs);
if (!result)
return false;
res.setString(result);
return true;
}
typedef bool (*DoConcatStringsFn)(JSContext*, HandleString, HandleString, MutableHandleValue);
static const VMFunction DoConcatStringsInfo = FunctionInfo<DoConcatStringsFn>(DoConcatStrings, TailCall);
bool
ICBinaryArith_StringConcat::Compiler::generateStubCode(MacroAssembler& masm)
{
MOZ_ASSERT(engine_ == Engine::Baseline);
Label failure;
masm.branchTestString(Assembler::NotEqual, R0, &failure);
masm.branchTestString(Assembler::NotEqual, R1, &failure);
// Restore the tail call register.
EmitRestoreTailCallReg(masm);
masm.unboxString(R0, R0.scratchReg());
masm.unboxString(R1, R1.scratchReg());
masm.push(R1.scratchReg());
masm.push(R0.scratchReg());
if (!tailCallVM(DoConcatStringsInfo, masm))
return false;
// Failure case - jump to next stub
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
static JSString*
ConvertObjectToStringForConcat(JSContext* cx, HandleValue obj)
{
MOZ_ASSERT(obj.isObject());
RootedValue rootedObj(cx, obj);
if (!ToPrimitive(cx, &rootedObj))
return nullptr;
return ToString<CanGC>(cx, rootedObj);
}
static bool
DoConcatStringObject(JSContext* cx, bool lhsIsString, HandleValue lhs, HandleValue rhs,
MutableHandleValue res)
{
JSString* lstr = nullptr;
JSString* rstr = nullptr;
if (lhsIsString) {
// Convert rhs first.
MOZ_ASSERT(lhs.isString() && rhs.isObject());
rstr = ConvertObjectToStringForConcat(cx, rhs);
if (!rstr)
return false;
// lhs is already string.
lstr = lhs.toString();
} else {
MOZ_ASSERT(rhs.isString() && lhs.isObject());
// Convert lhs first.
lstr = ConvertObjectToStringForConcat(cx, lhs);
if (!lstr)
return false;
// rhs is already string.
rstr = rhs.toString();
}
JSString* str = ConcatStrings<NoGC>(cx, lstr, rstr);
if (!str) {
RootedString nlstr(cx, lstr), nrstr(cx, rstr);
str = ConcatStrings<CanGC>(cx, nlstr, nrstr);
if (!str)
return false;
}
// Technically, we need to call TypeScript::MonitorString for this PC, however
// it was called when this stub was attached so it's OK.
res.setString(str);
return true;
}
typedef bool (*DoConcatStringObjectFn)(JSContext*, bool lhsIsString, HandleValue, HandleValue,
MutableHandleValue);
static const VMFunction DoConcatStringObjectInfo =
FunctionInfo<DoConcatStringObjectFn>(DoConcatStringObject, TailCall, PopValues(2));
bool
ICBinaryArith_StringObjectConcat::Compiler::generateStubCode(MacroAssembler& masm)
{
MOZ_ASSERT(engine_ == Engine::Baseline);
Label failure;
if (lhsIsString_) {
masm.branchTestString(Assembler::NotEqual, R0, &failure);
masm.branchTestObject(Assembler::NotEqual, R1, &failure);
} else {
masm.branchTestObject(Assembler::NotEqual, R0, &failure);
masm.branchTestString(Assembler::NotEqual, R1, &failure);
}
// Restore the tail call register.
EmitRestoreTailCallReg(masm);
// Sync for the decompiler.
masm.pushValue(R0);
masm.pushValue(R1);
// Push arguments.
masm.pushValue(R1);
masm.pushValue(R0);
masm.push(Imm32(lhsIsString_));
if (!tailCallVM(DoConcatStringObjectInfo, masm))
return false;
// Failure case - jump to next stub
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
bool
ICBinaryArith_Double::Compiler::generateStubCode(MacroAssembler& masm)
{
MOZ_ASSERT(engine_ == Engine::Baseline);
Label failure;
masm.ensureDouble(R0, FloatReg0, &failure);
masm.ensureDouble(R1, FloatReg1, &failure);
switch (op) {
case JSOP_ADD:
masm.addDouble(FloatReg1, FloatReg0);
break;
case JSOP_SUB:
masm.subDouble(FloatReg1, FloatReg0);
break;
case JSOP_MUL:
masm.mulDouble(FloatReg1, FloatReg0);
break;
case JSOP_DIV:
masm.divDouble(FloatReg1, FloatReg0);
break;
case JSOP_MOD:
masm.setupUnalignedABICall(R0.scratchReg());
masm.passABIArg(FloatReg0, MoveOp::DOUBLE);
masm.passABIArg(FloatReg1, MoveOp::DOUBLE);
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, NumberMod), MoveOp::DOUBLE);
MOZ_ASSERT(ReturnDoubleReg == FloatReg0);
break;
default:
MOZ_CRASH("Unexpected op");
}
masm.boxDouble(FloatReg0, R0);
EmitReturnFromIC(masm);
// Failure case - jump to next stub
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
bool
ICBinaryArith_BooleanWithInt32::Compiler::generateStubCode(MacroAssembler& masm)
{
MOZ_ASSERT(engine_ == Engine::Baseline);
Label failure;
if (lhsIsBool_)
masm.branchTestBoolean(Assembler::NotEqual, R0, &failure);
else
masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
if (rhsIsBool_)
masm.branchTestBoolean(Assembler::NotEqual, R1, &failure);
else
masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
Register lhsReg = lhsIsBool_ ? masm.extractBoolean(R0, ExtractTemp0)
: masm.extractInt32(R0, ExtractTemp0);
Register rhsReg = rhsIsBool_ ? masm.extractBoolean(R1, ExtractTemp1)
: masm.extractInt32(R1, ExtractTemp1);
MOZ_ASSERT(op_ == JSOP_ADD || op_ == JSOP_SUB ||
op_ == JSOP_BITOR || op_ == JSOP_BITXOR || op_ == JSOP_BITAND);
switch(op_) {
case JSOP_ADD: {
Label fixOverflow;
masm.branchAdd32(Assembler::Overflow, rhsReg, lhsReg, &fixOverflow);
masm.tagValue(JSVAL_TYPE_INT32, lhsReg, R0);
EmitReturnFromIC(masm);
masm.bind(&fixOverflow);
masm.sub32(rhsReg, lhsReg);
// Proceed to failure below.
break;
}
case JSOP_SUB: {
Label fixOverflow;
masm.branchSub32(Assembler::Overflow, rhsReg, lhsReg, &fixOverflow);
masm.tagValue(JSVAL_TYPE_INT32, lhsReg, R0);
EmitReturnFromIC(masm);
masm.bind(&fixOverflow);
masm.add32(rhsReg, lhsReg);
// Proceed to failure below.
break;
}
case JSOP_BITOR: {
masm.orPtr(rhsReg, lhsReg);
masm.tagValue(JSVAL_TYPE_INT32, lhsReg, R0);
EmitReturnFromIC(masm);
break;
}
case JSOP_BITXOR: {
masm.xorPtr(rhsReg, lhsReg);
masm.tagValue(JSVAL_TYPE_INT32, lhsReg, R0);
EmitReturnFromIC(masm);
break;
}
case JSOP_BITAND: {
masm.andPtr(rhsReg, lhsReg);
masm.tagValue(JSVAL_TYPE_INT32, lhsReg, R0);
EmitReturnFromIC(masm);
break;
}
default:
MOZ_CRASH("Unhandled op for BinaryArith_BooleanWithInt32.");
}
// Failure case - jump to next stub
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
bool
ICBinaryArith_DoubleWithInt32::Compiler::generateStubCode(MacroAssembler& masm)
{
MOZ_ASSERT(engine_ == Engine::Baseline);
MOZ_ASSERT(op == JSOP_BITOR || op == JSOP_BITAND || op == JSOP_BITXOR);
Label failure;
Register intReg;
Register scratchReg;
if (lhsIsDouble_) {
masm.branchTestDouble(Assembler::NotEqual, R0, &failure);
masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
intReg = masm.extractInt32(R1, ExtractTemp0);
masm.unboxDouble(R0, FloatReg0);
scratchReg = R0.scratchReg();
} else {
masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
masm.branchTestDouble(Assembler::NotEqual, R1, &failure);
intReg = masm.extractInt32(R0, ExtractTemp0);
masm.unboxDouble(R1, FloatReg0);
scratchReg = R1.scratchReg();
}
// Truncate the double to an int32.
{
Label doneTruncate;
Label truncateABICall;
masm.branchTruncateDouble(FloatReg0, scratchReg, &truncateABICall);
masm.jump(&doneTruncate);
masm.bind(&truncateABICall);
masm.push(intReg);
masm.setupUnalignedABICall(scratchReg);
masm.passABIArg(FloatReg0, MoveOp::DOUBLE);
masm.callWithABI(mozilla::BitwiseCast<void*, int32_t(*)(double)>(JS::ToInt32));
masm.storeCallResult(scratchReg);
masm.pop(intReg);
masm.bind(&doneTruncate);
}
Register intReg2 = scratchReg;
// All handled ops commute, so no need to worry about ordering.
switch(op) {
case JSOP_BITOR:
masm.orPtr(intReg, intReg2);
break;
case JSOP_BITXOR:
masm.xorPtr(intReg, intReg2);
break;
case JSOP_BITAND:
masm.andPtr(intReg, intReg2);
break;
default:
MOZ_CRASH("Unhandled op for BinaryArith_DoubleWithInt32.");
}
masm.tagValue(JSVAL_TYPE_INT32, intReg2, R0);
EmitReturnFromIC(masm);
// Failure case - jump to next stub
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
//
// UnaryArith_Fallback
//

View File

@ -1104,6 +1104,280 @@ class ICToNumber_Fallback : public ICFallbackStub
};
};
// BinaryArith
// JSOP_ADD
// JSOP_BITAND, JSOP_BITXOR, JSOP_BITOR
// JSOP_LSH, JSOP_RSH, JSOP_URSH
class ICBinaryArith_Fallback : public ICFallbackStub
{
friend class ICStubSpace;
explicit ICBinaryArith_Fallback(JitCode* stubCode)
: ICFallbackStub(BinaryArith_Fallback, stubCode)
{
extra_ = 0;
}
static const uint16_t SAW_DOUBLE_RESULT_BIT = 0x1;
static const uint16_t UNOPTIMIZABLE_OPERANDS_BIT = 0x2;
public:
static const uint32_t MAX_OPTIMIZED_STUBS = 8;
bool sawDoubleResult() const {
return extra_ & SAW_DOUBLE_RESULT_BIT;
}
void setSawDoubleResult() {
extra_ |= SAW_DOUBLE_RESULT_BIT;
}
bool hadUnoptimizableOperands() const {
return extra_ & UNOPTIMIZABLE_OPERANDS_BIT;
}
void noteUnoptimizableOperands() {
extra_ |= UNOPTIMIZABLE_OPERANDS_BIT;
}
// Compiler for this stub kind.
class Compiler : public ICStubCompiler {
protected:
bool generateStubCode(MacroAssembler& masm);
public:
explicit Compiler(JSContext* cx)
: ICStubCompiler(cx, ICStub::BinaryArith_Fallback, Engine::Baseline) {}
ICStub* getStub(ICStubSpace* space) {
return newStub<ICBinaryArith_Fallback>(space, getStubCode());
}
};
};
class ICBinaryArith_Int32 : public ICStub
{
friend class ICStubSpace;
ICBinaryArith_Int32(JitCode* stubCode, bool allowDouble)
: ICStub(BinaryArith_Int32, stubCode)
{
extra_ = allowDouble;
}
public:
bool allowDouble() const {
return extra_;
}
// Compiler for this stub kind.
class Compiler : public ICStubCompiler {
protected:
JSOp op_;
bool allowDouble_;
bool generateStubCode(MacroAssembler& masm);
// Stub keys shift-stubs need to encode the kind, the JSOp and if we allow doubles.
virtual int32_t getKey() const {
return static_cast<int32_t>(engine_) |
(static_cast<int32_t>(kind) << 1) |
(static_cast<int32_t>(op_) << 17) |
(static_cast<int32_t>(allowDouble_) << 25);
}
public:
Compiler(JSContext* cx, JSOp op, bool allowDouble)
: ICStubCompiler(cx, ICStub::BinaryArith_Int32, Engine::Baseline),
op_(op), allowDouble_(allowDouble) {}
ICStub* getStub(ICStubSpace* space) {
return newStub<ICBinaryArith_Int32>(space, getStubCode(), allowDouble_);
}
};
};
class ICBinaryArith_StringConcat : public ICStub
{
friend class ICStubSpace;
explicit ICBinaryArith_StringConcat(JitCode* stubCode)
: ICStub(BinaryArith_StringConcat, stubCode)
{}
public:
class Compiler : public ICStubCompiler {
protected:
bool generateStubCode(MacroAssembler& masm);
public:
explicit Compiler(JSContext* cx)
: ICStubCompiler(cx, ICStub::BinaryArith_StringConcat, Engine::Baseline)
{}
ICStub* getStub(ICStubSpace* space) {
return newStub<ICBinaryArith_StringConcat>(space, getStubCode());
}
};
};
class ICBinaryArith_StringObjectConcat : public ICStub
{
friend class ICStubSpace;
ICBinaryArith_StringObjectConcat(JitCode* stubCode, bool lhsIsString)
: ICStub(BinaryArith_StringObjectConcat, stubCode)
{
extra_ = lhsIsString;
}
public:
bool lhsIsString() const {
return extra_;
}
class Compiler : public ICStubCompiler {
protected:
bool lhsIsString_;
bool generateStubCode(MacroAssembler& masm);
virtual int32_t getKey() const {
return static_cast<int32_t>(engine_) |
(static_cast<int32_t>(kind) << 1) |
(static_cast<int32_t>(lhsIsString_) << 17);
}
public:
Compiler(JSContext* cx, bool lhsIsString)
: ICStubCompiler(cx, ICStub::BinaryArith_StringObjectConcat, Engine::Baseline),
lhsIsString_(lhsIsString)
{}
ICStub* getStub(ICStubSpace* space) {
return newStub<ICBinaryArith_StringObjectConcat>(space, getStubCode(),
lhsIsString_);
}
};
};
class ICBinaryArith_Double : public ICStub
{
friend class ICStubSpace;
explicit ICBinaryArith_Double(JitCode* stubCode)
: ICStub(BinaryArith_Double, stubCode)
{}
public:
class Compiler : public ICMultiStubCompiler {
protected:
bool generateStubCode(MacroAssembler& masm);
public:
Compiler(JSContext* cx, JSOp op)
: ICMultiStubCompiler(cx, ICStub::BinaryArith_Double, op, Engine::Baseline)
{}
ICStub* getStub(ICStubSpace* space) {
return newStub<ICBinaryArith_Double>(space, getStubCode());
}
};
};
class ICBinaryArith_BooleanWithInt32 : public ICStub
{
friend class ICStubSpace;
ICBinaryArith_BooleanWithInt32(JitCode* stubCode, bool lhsIsBool, bool rhsIsBool)
: ICStub(BinaryArith_BooleanWithInt32, stubCode)
{
MOZ_ASSERT(lhsIsBool || rhsIsBool);
extra_ = 0;
if (lhsIsBool)
extra_ |= 1;
if (rhsIsBool)
extra_ |= 2;
}
public:
bool lhsIsBoolean() const {
return extra_ & 1;
}
bool rhsIsBoolean() const {
return extra_ & 2;
}
class Compiler : public ICStubCompiler {
protected:
JSOp op_;
bool lhsIsBool_;
bool rhsIsBool_;
bool generateStubCode(MacroAssembler& masm);
virtual int32_t getKey() const {
return static_cast<int32_t>(engine_) |
(static_cast<int32_t>(kind) << 1) |
(static_cast<int32_t>(op_) << 17) |
(static_cast<int32_t>(lhsIsBool_) << 25) |
(static_cast<int32_t>(rhsIsBool_) << 26);
}
public:
Compiler(JSContext* cx, JSOp op, bool lhsIsBool, bool rhsIsBool)
: ICStubCompiler(cx, ICStub::BinaryArith_BooleanWithInt32, Engine::Baseline),
op_(op), lhsIsBool_(lhsIsBool), rhsIsBool_(rhsIsBool)
{
MOZ_ASSERT(op_ == JSOP_ADD || op_ == JSOP_SUB || op_ == JSOP_BITOR ||
op_ == JSOP_BITAND || op_ == JSOP_BITXOR);
MOZ_ASSERT(lhsIsBool_ || rhsIsBool_);
}
ICStub* getStub(ICStubSpace* space) {
return newStub<ICBinaryArith_BooleanWithInt32>(space, getStubCode(),
lhsIsBool_, rhsIsBool_);
}
};
};
class ICBinaryArith_DoubleWithInt32 : public ICStub
{
friend class ICStubSpace;
ICBinaryArith_DoubleWithInt32(JitCode* stubCode, bool lhsIsDouble)
: ICStub(BinaryArith_DoubleWithInt32, stubCode)
{
extra_ = lhsIsDouble;
}
public:
bool lhsIsDouble() const {
return extra_;
}
class Compiler : public ICMultiStubCompiler {
protected:
bool lhsIsDouble_;
bool generateStubCode(MacroAssembler& masm);
virtual int32_t getKey() const {
return static_cast<int32_t>(engine_) |
(static_cast<int32_t>(kind) << 1) |
(static_cast<int32_t>(op) << 17) |
(static_cast<int32_t>(lhsIsDouble_) << 25);
}
public:
Compiler(JSContext* cx, JSOp op, bool lhsIsDouble)
: ICMultiStubCompiler(cx, ICStub::BinaryArith_DoubleWithInt32, op, Engine::Baseline),
lhsIsDouble_(lhsIsDouble)
{}
ICStub* getStub(ICStubSpace* space) {
return newStub<ICBinaryArith_DoubleWithInt32>(space, getStubCode(),
lhsIsDouble_);
}
};
};
// UnaryArith
// JSOP_BITNOT
// JSOP_NEG

View File

@ -49,6 +49,14 @@ namespace jit {
\
_(ToNumber_Fallback) \
\
_(BinaryArith_Fallback) \
_(BinaryArith_Int32) \
_(BinaryArith_Double) \
_(BinaryArith_StringConcat) \
_(BinaryArith_StringObjectConcat) \
_(BinaryArith_BooleanWithInt32) \
_(BinaryArith_DoubleWithInt32) \
\
_(UnaryArith_Fallback) \
_(UnaryArith_Int32) \
_(UnaryArith_Double) \

View File

@ -441,7 +441,10 @@ BaselineScript::trace(JSTracer* trc)
// Mark all IC stub codes hanging off the IC stub entries.
for (size_t i = 0; i < numICEntries(); i++) {
ICEntry& ent = icEntry(i);
ent.trace(trc);
if (!ent.hasStub())
continue;
for (ICStub* stub = ent.firstStub(); stub; stub = stub->next())
stub->trace(trc);
}
}

View File

@ -1682,58 +1682,6 @@ CodeGenerator::visitStringReplace(LStringReplace* lir)
callVM(StringReplaceInfo, lir);
}
void
CodeGenerator::emitSharedStub(ICStub::Kind kind, LInstruction* lir)
{
JSScript* script = lir->mirRaw()->block()->info().script();
jsbytecode* pc = lir->mirRaw()->toInstruction()->resumePoint()->pc();
// Create descriptor signifying end of Ion frame.
uint32_t descriptor = MakeFrameDescriptor(masm.framePushed(), JitFrame_IonJS);
masm.Push(Imm32(descriptor));
// Call into the stubcode.
CodeOffsetLabel patchOffset;
IonICEntry entry(script->pcToOffset(pc), ICEntry::Kind_Op, script);
EmitCallIC(&patchOffset, masm);
entry.setReturnOffset(CodeOffsetLabel(masm.currentOffset()));
SharedStub sharedStub(kind, entry, patchOffset);
masm.propagateOOM(sharedStubs_.append(sharedStub));
// Fix up upon return.
uint32_t callOffset = masm.currentOffset();
masm.freeStack(sizeof(intptr_t));
markSafepointAt(callOffset, lir);
}
void
CodeGenerator::visitBinarySharedStub(LBinarySharedStub* lir)
{
JSOp jsop = JSOp(*lir->mir()->resumePoint()->pc());
switch (jsop) {
case JSOP_ADD:
case JSOP_SUB:
case JSOP_MUL:
case JSOP_DIV:
case JSOP_MOD:
emitSharedStub(ICStub::Kind::BinaryArith_Fallback, lir);
break;
default:
MOZ_CRASH("Unsupported jsop in shared stubs.");
}
}
void
CodeGenerator::visitUnarySharedStub(LUnarySharedStub* lir)
{
JSOp jsop = JSOp(*lir->mir()->resumePoint()->pc());
switch (jsop) {
default:
MOZ_CRASH("Unsupported jsop in shared stubs.");
}
}
typedef JSObject* (*LambdaFn)(JSContext*, HandleFunction, HandleObject);
static const VMFunction LambdaInfo = FunctionInfo<LambdaFn>(js::Lambda);
@ -7851,30 +7799,6 @@ struct AutoDiscardIonCode
}
};
bool
CodeGenerator::linkSharedStubs(JSContext* cx)
{
for (uint32_t i = 0; i < sharedStubs_.length(); i++) {
ICStub *stub = nullptr;
switch (sharedStubs_[i].kind) {
case ICStub::Kind::BinaryArith_Fallback: {
ICBinaryArith_Fallback::Compiler stubCompiler(cx, ICStubCompiler::Engine::IonMonkey);
stub = stubCompiler.getStub(&stubSpace_);
break;
}
default:
MOZ_CRASH("Unsupported shared stub.");
}
if (!stub)
return false;
sharedStubs_[i].entry.setFirstStub(stub);
}
return true;
}
bool
CodeGenerator::link(JSContext* cx, CompilerConstraintList* constraints)
{
@ -7900,9 +7824,6 @@ CodeGenerator::link(JSContext* cx, CompilerConstraintList* constraints)
if (scriptCounts_ && !script->hasScriptCounts() && !script->initScriptCounts(cx))
return false;
if (!linkSharedStubs(cx))
return false;
// Check to make sure we didn't have a mid-build invalidation. If so, we
// will trickle to jit::Compile() and return Method_Skipped.
uint32_t warmUpCount = script->getWarmUpCount();
@ -7934,8 +7855,7 @@ CodeGenerator::link(JSContext* cx, CompilerConstraintList* constraints)
recovers_.size(), bailouts_.length(), graph.numConstants(),
safepointIndices_.length(), osiIndices_.length(),
cacheList_.length(), runtimeData_.length(),
safepoints_.size(), patchableBackedges_.length(),
sharedStubs_.length(), optimizationLevel);
safepoints_.size(), patchableBackedges_.length(), optimizationLevel);
if (!ionScript)
return false;
discardIonCode.ionScript = ionScript;
@ -8029,9 +7949,6 @@ CodeGenerator::link(JSContext* cx, CompilerConstraintList* constraints)
script->setIonScript(cx, ionScript);
// Adopt fallback shared stubs from the compiler into the ion script.
ionScript->adoptFallbackStubs(&stubSpace_);
{
AutoWritableJitCode awjc(code);
invalidateEpilogueData_.fixup(&masm);
@ -8068,23 +7985,6 @@ CodeGenerator::link(JSContext* cx, CompilerConstraintList* constraints)
}
}
#endif
// Patch shared stub IC loads using IC entries
for (size_t i = 0; i < sharedStubs_.length(); i++) {
CodeOffsetLabel label = sharedStubs_[i].label;
label.fixup(&masm);
IonICEntry& entry = ionScript->sharedStubList()[i];
entry = sharedStubs_[i].entry;
entry.fixupReturnOffset(masm);
Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, label),
ImmPtr(&entry),
ImmPtr((void*)-1));
MOZ_ASSERT(entry.hasStub());
MOZ_ASSERT(entry.firstStub()->isFallback());
entry.firstStub()->toFallbackStub()->fixupICEntry(&entry);
}
}
JitSpew(JitSpew_Codegen, "Created IonScript %p (raw %p)",

View File

@ -59,7 +59,6 @@ class CodeGenerator : public CodeGeneratorSpecific
bool generate();
bool generateAsmJS(AsmJSFunctionLabels* labels);
bool link(JSContext* cx, CompilerConstraintList* constraints);
bool linkSharedStubs(JSContext* cx);
void visitOsiPoint(LOsiPoint* lir);
void visitGoto(LGoto* lir);
@ -107,9 +106,6 @@ class CodeGenerator : public CodeGeneratorSpecific
void visitOutOfLineRegExpTest(OutOfLineRegExpTest* ool);
void visitRegExpReplace(LRegExpReplace* lir);
void visitStringReplace(LStringReplace* lir);
void emitSharedStub(ICStub::Kind kind, LInstruction* lir);
void visitBinarySharedStub(LBinarySharedStub* lir);
void visitUnarySharedStub(LUnarySharedStub* lir);
void visitLambda(LLambda* lir);
void visitOutOfLineLambdaArrow(OutOfLineLambdaArrow* ool);
void visitLambdaArrow(LLambdaArrow* lir);
@ -483,18 +479,6 @@ class CodeGenerator : public CodeGeneratorSpecific
Vector<CodeOffsetLabel, 0, JitAllocPolicy> ionScriptLabels_;
struct SharedStub {
ICStub::Kind kind;
IonICEntry entry;
CodeOffsetLabel label;
SharedStub(ICStub::Kind kind, IonICEntry entry, CodeOffsetLabel label)
: kind(kind), entry(entry), label(label)
{}
};
Vector<SharedStub, 0, SystemAllocPolicy> sharedStubs_;
void branchIfInvalidated(Register temp, Label* invalidated);
#ifdef DEBUG

View File

@ -1,76 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef jit_ICStubSpace_h
#define jit_ICStubSpace_h
namespace js {
namespace jit {
// ICStubSpace is an abstraction for allocation policy and storage for stub data.
// There are two kinds of stubs: optimized stubs and fallback stubs (the latter
// also includes stubs that can make non-tail calls that can GC).
//
// Optimized stubs are allocated per-compartment and are always purged when
// JIT-code is discarded. Fallback stubs are allocated per BaselineScript and
// are only destroyed when the BaselineScript is destroyed.
class ICStubSpace
{
protected:
LifoAlloc allocator_;
explicit ICStubSpace(size_t chunkSize)
: allocator_(chunkSize)
{}
public:
inline void* alloc(size_t size) {
return allocator_.alloc(size);
}
JS_DECLARE_NEW_METHODS(allocate, alloc, inline)
size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
return allocator_.sizeOfExcludingThis(mallocSizeOf);
}
};
// Space for optimized stubs. Every JitCompartment has a single
// OptimizedICStubSpace.
struct OptimizedICStubSpace : public ICStubSpace
{
static const size_t STUB_DEFAULT_CHUNK_SIZE = 4 * 1024;
public:
OptimizedICStubSpace()
: ICStubSpace(STUB_DEFAULT_CHUNK_SIZE)
{}
void free() {
allocator_.freeAll();
}
};
// Space for fallback stubs. Every BaselineScript has a
// FallbackICStubSpace.
struct FallbackICStubSpace : public ICStubSpace
{
static const size_t STUB_DEFAULT_CHUNK_SIZE = 256;
public:
FallbackICStubSpace()
: ICStubSpace(STUB_DEFAULT_CHUNK_SIZE)
{}
inline void adoptFrom(FallbackICStubSpace* other) {
allocator_.steal(&(other->allocator_));
}
};
} // namespace jit
} // namespace js
#endif /* jit_ICStubSpace_h */

View File

@ -898,8 +898,7 @@ IonScript::IonScript()
backedgeEntries_(0),
invalidationCount_(0),
recompileInfo_(),
osrPcMismatchCounter_(0),
fallbackStubSpace_()
osrPcMismatchCounter_(0)
{
}
@ -911,8 +910,7 @@ IonScript::New(JSContext* cx, RecompileInfo recompileInfo,
size_t constants, size_t safepointIndices,
size_t osiIndices, size_t cacheEntries,
size_t runtimeSize, size_t safepointsSize,
size_t backedgeEntries, size_t sharedStubEntries,
OptimizationLevel optimizationLevel)
size_t backedgeEntries, OptimizationLevel optimizationLevel)
{
static const int DataAlignment = sizeof(void*);
@ -936,8 +934,6 @@ IonScript::New(JSContext* cx, RecompileInfo recompileInfo,
size_t paddedRuntimeSize = AlignBytes(runtimeSize, DataAlignment);
size_t paddedSafepointSize = AlignBytes(safepointsSize, DataAlignment);
size_t paddedBackedgeSize = AlignBytes(backedgeEntries * sizeof(PatchableBackedge), DataAlignment);
size_t paddedSharedStubSize = AlignBytes(sharedStubEntries * sizeof(IonICEntry), DataAlignment);
size_t bytes = paddedSnapshotsSize +
paddedRecoversSize +
paddedBailoutSize +
@ -947,8 +943,7 @@ IonScript::New(JSContext* cx, RecompileInfo recompileInfo,
paddedCacheEntriesSize +
paddedRuntimeSize +
paddedSafepointSize +
paddedBackedgeSize +
paddedSharedStubSize;
paddedBackedgeSize;
IonScript* script = cx->zone()->pod_malloc_with_extra<IonScript, uint8_t>(bytes);
if (!script)
return nullptr;
@ -997,10 +992,6 @@ IonScript::New(JSContext* cx, RecompileInfo recompileInfo,
script->backedgeEntries_ = backedgeEntries;
offsetCursor += paddedBackedgeSize;
script->sharedStubList_ = offsetCursor;
script->sharedStubEntries_ = sharedStubEntries;
offsetCursor += paddedSharedStubSize;
script->frameSlots_ = frameSlots;
script->argumentSlots_ = argumentSlots;
@ -1012,13 +1003,6 @@ IonScript::New(JSContext* cx, RecompileInfo recompileInfo,
return script;
}
void
IonScript::adoptFallbackStubs(FallbackICStubSpace* stubSpace)
{
fallbackStubSpace()->adoptFrom(stubSpace);
}
void
IonScript::trace(JSTracer* trc)
{
@ -1030,12 +1014,6 @@ IonScript::trace(JSTracer* trc)
for (size_t i = 0; i < numConstants(); i++)
TraceEdge(trc, &getConstant(i), "constant");
// Mark all IC stub codes hanging off the IC stub entries.
for (size_t i = 0; i < numSharedStubs(); i++) {
ICEntry& ent = sharedStubList()[i];
ent.trace(trc);
}
}
/* static */ void
@ -1250,64 +1228,6 @@ IonScript::toggleBarriers(bool enabled)
method()->togglePreBarriers(enabled);
}
void
IonScript::purgeOptimizedStubs(Zone* zone)
{
for (size_t i = 0; i < numSharedStubs(); i++) {
ICEntry& entry = sharedStubList()[i];
if (!entry.hasStub())
continue;
ICStub* lastStub = entry.firstStub();
while (lastStub->next())
lastStub = lastStub->next();
if (lastStub->isFallback()) {
// Unlink all stubs allocated in the optimized space.
ICStub* stub = entry.firstStub();
ICStub* prev = nullptr;
while (stub->next()) {
if (!stub->allocatedInFallbackSpace()) {
lastStub->toFallbackStub()->unlinkStub(zone, prev, stub);
stub = stub->next();
continue;
}
prev = stub;
stub = stub->next();
}
if (lastStub->isMonitoredFallback()) {
// Monitor stubs can't make calls, so are always in the
// optimized stub space.
ICTypeMonitor_Fallback* lastMonStub =
lastStub->toMonitoredFallbackStub()->fallbackMonitorStub();
lastMonStub->resetMonitorStubChain(zone);
}
} else if (lastStub->isTypeMonitor_Fallback()) {
lastStub->toTypeMonitor_Fallback()->resetMonitorStubChain(zone);
} else {
MOZ_ASSERT(lastStub->isTableSwitch());
}
}
#ifdef DEBUG
// All remaining stubs must be allocated in the fallback space.
for (size_t i = 0; i < numSharedStubs(); i++) {
ICEntry& entry = sharedStubList()[i];
if (!entry.hasStub())
continue;
ICStub* stub = entry.firstStub();
while (stub->next()) {
MOZ_ASSERT(stub->allocatedInFallbackSpace());
stub = stub->next();
}
}
#endif
}
void
IonScript::purgeCaches()
{
@ -2756,7 +2676,6 @@ InvalidateActivation(FreeOp* fop, const JitActivationIterator& activations, bool
// prevent lastJump_ from appearing to be a bogus pointer, just
// in case anyone tries to read it.
ionScript->purgeCaches();
ionScript->purgeOptimizedStubs(script->zone());
// Clean up any pointers from elsewhere in the runtime to this IonScript
// which is about to become disconnected from its JSScript.

View File

@ -4671,36 +4671,6 @@ IonBuilder::binaryArithTrySpecializedOnBaselineInspector(bool* emitted, JSOp op,
return true;
}
bool
IonBuilder::binaryArithTrySharedStub(bool* emitted, JSOp op,
MDefinition* left, MDefinition* right)
{
MOZ_ASSERT(*emitted == false);
// Try to emit a shared stub cache.
if (js_JitOptions.disableSharedStubs)
return true;
// It is not possible for shared stubs to impersonate another op.
if (JSOp(*pc) != op)
return true;
MBinarySharedStub *stub = MBinarySharedStub::New(alloc(), left, right);
current->add(stub);
current->push(stub);
// Decrease type from 'any type' to 'empty type' when one of the operands
// is 'empty typed'.
maybeMarkEmpty(stub);
if (!resumeAfter(stub))
return false;
*emitted = true;
return true;
}
bool
IonBuilder::jsop_binary_arith(JSOp op, MDefinition* left, MDefinition* right)
{
@ -4717,9 +4687,6 @@ IonBuilder::jsop_binary_arith(JSOp op, MDefinition* left, MDefinition* right)
return emitted;
}
if (!binaryArithTrySharedStub(&emitted, op, left, right) || emitted)
return emitted;
// Not possible to optimize. Do a slow vm call.
MDefinition::Opcode def_op = JSOpToMDefinition(op);
MBinaryArithInstruction* ins = MBinaryArithInstruction::New(alloc(), def_op, left, right);

View File

@ -489,7 +489,6 @@ class IonBuilder
bool binaryArithTrySpecialized(bool* emitted, JSOp op, MDefinition* left, MDefinition* right);
bool binaryArithTrySpecializedOnBaselineInspector(bool* emitted, JSOp op, MDefinition* left,
MDefinition* right);
bool binaryArithTrySharedStub(bool* emitted, JSOp op, MDefinition* left, MDefinition* right);
// binary data lookup helpers.
TypedObjectPrediction typedObjectPrediction(MDefinition* typedObj);

View File

@ -15,7 +15,6 @@
#include "gc/Heap.h"
#include "jit/ExecutableAllocator.h"
#include "jit/ICStubSpace.h"
#include "jit/IonOptimizationLevels.h"
#include "jit/IonTypes.h"
#include "js/UbiNode.h"
@ -28,7 +27,6 @@ namespace jit {
class MacroAssembler;
class PatchableBackedge;
class IonBuilder;
class IonICEntry;
typedef Vector<JSObject*, 4, JitAllocPolicy> ObjectVector;
@ -261,10 +259,6 @@ struct IonScript
uint32_t backedgeList_;
uint32_t backedgeEntries_;
// List of entries to the shared stub.
uint32_t sharedStubList_;
uint32_t sharedStubEntries_;
// Number of references from invalidation records.
uint32_t invalidationCount_;
@ -278,9 +272,6 @@ struct IonScript
// a LOOPENTRY pc other than osrPc_.
uint32_t osrPcMismatchCounter_;
// Allocated space for fallback stubs.
FallbackICStubSpace fallbackStubSpace_;
// The tracelogger event used to log the start/stop of this IonScript.
TraceLoggerEvent traceLoggerScriptEvent_;
@ -336,8 +327,7 @@ struct IonScript
size_t constants, size_t safepointIndexEntries,
size_t osiIndexEntries, size_t cacheEntries,
size_t runtimeSize, size_t safepointsSize,
size_t backedgeEntries, size_t sharedStubEntries,
OptimizationLevel optimizationLevel);
size_t backedgeEntries, OptimizationLevel optimizationLevel);
static void Trace(JSTracer* trc, IonScript* script);
static void Destroy(FreeOp* fop, IonScript* script);
@ -496,12 +486,6 @@ struct IonScript
size_t numCaches() const {
return cacheEntries_;
}
IonICEntry* sharedStubList() {
return (IonICEntry*) &bottomBuffer()[sharedStubList_];
}
size_t numSharedStubs() const {
return sharedStubEntries_;
}
size_t runtimeSize() const {
return runtimeSize_;
}
@ -572,12 +556,6 @@ struct IonScript
recompiling_ = false;
}
FallbackICStubSpace* fallbackStubSpace() {
return &fallbackStubSpace_;
}
void adoptFallbackStubs(FallbackICStubSpace* stubSpace);
void purgeOptimizedStubs(Zone* zone);
enum ShouldIncreaseAge {
IncreaseAge = true,
KeepAge = false

View File

@ -14,7 +14,6 @@
#include "builtin/TypedObject.h"
#include "jit/CompileInfo.h"
#include "jit/ICStubSpace.h"
#include "jit/IonCode.h"
#include "jit/JitFrames.h"
#include "jit/shared/Assembler-shared.h"
@ -60,6 +59,66 @@ typedef void (*EnterJitCode)(void* code, unsigned argc, Value* argv, Interpreter
class JitcodeGlobalTable;
// ICStubSpace is an abstraction for allocation policy and storage for stub data.
// There are two kinds of stubs: optimized stubs and fallback stubs (the latter
// also includes stubs that can make non-tail calls that can GC).
//
// Optimized stubs are allocated per-compartment and are always purged when
// JIT-code is discarded. Fallback stubs are allocated per BaselineScript and
// are only destroyed when the BaselineScript is destroyed.
class ICStubSpace
{
protected:
LifoAlloc allocator_;
explicit ICStubSpace(size_t chunkSize)
: allocator_(chunkSize)
{}
public:
inline void* alloc(size_t size) {
return allocator_.alloc(size);
}
JS_DECLARE_NEW_METHODS(allocate, alloc, inline)
size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
return allocator_.sizeOfExcludingThis(mallocSizeOf);
}
};
// Space for optimized stubs. Every JitCompartment has a single
// OptimizedICStubSpace.
struct OptimizedICStubSpace : public ICStubSpace
{
static const size_t STUB_DEFAULT_CHUNK_SIZE = 4 * 1024;
public:
OptimizedICStubSpace()
: ICStubSpace(STUB_DEFAULT_CHUNK_SIZE)
{}
void free() {
allocator_.freeAll();
}
};
// Space for fallback stubs. Every BaselineScript has a
// FallbackICStubSpace.
struct FallbackICStubSpace : public ICStubSpace
{
static const size_t STUB_DEFAULT_CHUNK_SIZE = 256;
public:
FallbackICStubSpace()
: ICStubSpace(STUB_DEFAULT_CHUNK_SIZE)
{}
inline void adoptFrom(FallbackICStubSpace* other) {
allocator_.steal(&(other->allocator_));
}
};
// Information about a loop backedge in the runtime, which can be set to
// point to either the loop header or to an OOL interrupt checking stub,
// if signal handlers are being used to implement interrupts.

View File

@ -252,12 +252,12 @@ class LUse : public LAllocation
explicit LUse(FloatRegister reg, bool usedAtStart = false) {
set(FIXED, reg.code(), usedAtStart);
}
LUse(Register reg, uint32_t virtualRegister, bool usedAtStart = false) {
set(FIXED, reg.code(), usedAtStart);
LUse(Register reg, uint32_t virtualRegister) {
set(FIXED, reg.code(), false);
setVirtualRegister(virtualRegister);
}
LUse(FloatRegister reg, uint32_t virtualRegister, bool usedAtStart = false) {
set(FIXED, reg.code(), usedAtStart);
LUse(FloatRegister reg, uint32_t virtualRegister) {
set(FIXED, reg.code(), false);
setVirtualRegister(virtualRegister);
}

View File

@ -30,16 +30,6 @@ LIRGenerator::useBoxAtStart(LInstruction* lir, size_t n, MDefinition* mir, LUse:
return useBox(lir, n, mir, policy, true);
}
void
LIRGenerator::useBoxFixedAtStart(LInstruction* lir, size_t n, MDefinition* mir, ValueOperand op)
{
#if defined(JS_NUNBOX32)
return useBoxFixed(lir, n, mir, op.typeReg(), op.payloadReg(), true);
#elif defined(JS_PUNBOX64)
return useBoxFixed(lir, n, mir, op.valueReg(), op.scratchReg(), true);
#endif
}
void
LIRGenerator::visitCloneLiteral(MCloneLiteral* ins)
{
@ -2120,38 +2110,6 @@ LIRGenerator::visitStringReplace(MStringReplace* ins)
assignSafepoint(lir, ins);
}
void
LIRGenerator::visitBinarySharedStub(MBinarySharedStub* ins)
{
MDefinition* lhs = ins->getOperand(0);
MDefinition* rhs = ins->getOperand(1);
MOZ_ASSERT(ins->type() == MIRType_Value);
MOZ_ASSERT(ins->type() == MIRType_Value);
LBinarySharedStub* lir = new(alloc()) LBinarySharedStub();
useBoxFixedAtStart(lir, LBinarySharedStub::LhsInput, lhs, R0);
useBoxFixedAtStart(lir, LBinarySharedStub::RhsInput, rhs, R1);
defineSharedStubReturn(lir, ins);
assignSafepoint(lir, ins);
}
void
LIRGenerator::visitUnarySharedStub(MUnarySharedStub* ins)
{
MDefinition* input = ins->getOperand(0);
MOZ_ASSERT(ins->type() == MIRType_Value);
LUnarySharedStub* lir = new(alloc()) LUnarySharedStub();
useBoxFixedAtStart(lir, LUnarySharedStub::Input, input, R0);
defineSharedStubReturn(lir, ins);
assignSafepoint(lir, ins);
}
void
LIRGenerator::visitLambda(MLambda* ins)
{

View File

@ -50,7 +50,6 @@ class LIRGenerator : public LIRGeneratorSpecific
void useBoxAtStart(LInstruction* lir, size_t n, MDefinition* mir,
LUse::Policy policy = LUse::REGISTER);
void useBoxFixedAtStart(LInstruction* lir, size_t n, MDefinition* mir, ValueOperand op);
void lowerBitOp(JSOp op, MInstruction* ins);
void lowerShiftOp(JSOp op, MShiftInstruction* ins);
@ -161,8 +160,6 @@ class LIRGenerator : public LIRGeneratorSpecific
void visitRegExpTest(MRegExpTest* ins);
void visitRegExpReplace(MRegExpReplace* ins);
void visitStringReplace(MStringReplace* ins);
void visitBinarySharedStub(MBinarySharedStub* ins);
void visitUnarySharedStub(MUnarySharedStub* ins);
void visitLambda(MLambda* ins);
void visitLambdaArrow(MLambdaArrow* ins);
void visitKeepAliveObject(MKeepAliveObject* ins);

View File

@ -7062,45 +7062,6 @@ class MOsrReturnValue
}
};
class MBinarySharedStub
: public MBinaryInstruction,
public MixPolicy<BoxPolicy<0>, BoxPolicy<1> >::Data
{
explicit MBinarySharedStub(MDefinition* left, MDefinition* right)
: MBinaryInstruction(left, right)
{
setResultType(MIRType_Value);
}
public:
INSTRUCTION_HEADER(BinarySharedStub)
static MBinarySharedStub* New(TempAllocator& alloc, MDefinition* left, MDefinition* right)
{
return new(alloc) MBinarySharedStub(left, right);
}
};
class MUnarySharedStub
: public MUnaryInstruction,
public BoxPolicy<0>::Data
{
explicit MUnarySharedStub(MDefinition* input)
: MUnaryInstruction(input)
{
setResultType(MIRType_Value);
}
public:
INSTRUCTION_HEADER(UnarySharedStub)
static MUnarySharedStub* New(TempAllocator& alloc, MDefinition* input)
{
return new(alloc) MUnarySharedStub(input);
}
};
// Check the current frame for over-recursion past the global stack limit.
class MCheckOverRecursed
: public MNullaryInstruction

View File

@ -49,8 +49,6 @@ namespace jit {
_(OsrReturnValue) \
_(OsrArgumentsObject) \
_(ReturnFromCtor) \
_(BinarySharedStub) \
_(UnarySharedStub) \
_(CheckOverRecursed) \
_(DefVar) \
_(DefFun) \

View File

@ -7,10 +7,8 @@
#include "jit/SharedIC.h"
#include "mozilla/SizePrintfMacros.h"
#include "jslibmath.h"
#include "jstypes.h"
#include "jit/BaselineDebugModeOSR.h"
#include "jit/BaselineIC.h"
#include "jit/JitSpewer.h"
#include "jit/Linker.h"
@ -19,10 +17,8 @@
# include "jit/PerfSpewer.h"
#endif
#include "jit/VMFunctions.h"
#include "vm/Interpreter.h"
#include "jit/MacroAssembler-inl.h"
#include "vm/Interpreter-inl.h"
namespace js {
namespace jit {
@ -85,14 +81,6 @@ ICEntry::fallbackStub() const
return firstStub()->getChainFallback();
}
void
ICEntry::trace(JSTracer* trc)
{
if (!hasStub())
return;
for (ICStub* stub = firstStub(); stub; stub = stub->next())
stub->trace(trc);
}
ICStubConstIterator&
ICStubConstIterator::operator++()
@ -154,7 +142,7 @@ ICStub::updateCode(JitCode* code)
/* static */ void
ICStub::trace(JSTracer* trc)
{
markCode(trc, "shared-stub-jitcode");
markCode(trc, "baseline-stub-jitcode");
// If the stub is a monitored fallback stub, then mark the monitor ICs hanging
// off of that stub. We don't need to worry about the regular monitored stubs,
@ -832,582 +820,5 @@ ICStubCompiler::emitPostWriteBarrierSlot(MacroAssembler& masm, Register obj, Val
return true;
}
static ICStubCompiler::Engine
SharedStubEngine(BaselineFrame* frame)
{
return frame ? ICStubCompiler::Engine::Baseline : ICStubCompiler::Engine::IonMonkey;
}
static JSScript*
SharedStubScript(BaselineFrame* frame, ICFallbackStub* stub)
{
ICStubCompiler::Engine engine = SharedStubEngine(frame);
if (engine == ICStubCompiler::Engine::Baseline)
return frame->script();
IonICEntry* entry = (IonICEntry*) stub->icEntry();
return entry->script();
}
//
// BinaryArith_Fallback
//
static bool
DoBinaryArithFallback(JSContext* cx, BaselineFrame* frame, ICBinaryArith_Fallback* stub_,
HandleValue lhs, HandleValue rhs, MutableHandleValue ret)
{
ICStubCompiler::Engine engine = SharedStubEngine(frame);
RootedScript script(cx, SharedStubScript(frame, stub_));
// This fallback stub may trigger debug mode toggling.
DebugModeOSRVolatileStub<ICBinaryArith_Fallback*> stub(engine, frame, stub_);
jsbytecode* pc = stub->icEntry()->pc(script);
JSOp op = JSOp(*pc);
FallbackICSpew(cx, stub, "BinaryArith(%s,%d,%d)", js_CodeName[op],
int(lhs.isDouble() ? JSVAL_TYPE_DOUBLE : lhs.extractNonDoubleType()),
int(rhs.isDouble() ? JSVAL_TYPE_DOUBLE : rhs.extractNonDoubleType()));
// Don't pass lhs/rhs directly, we need the original values when
// generating stubs.
RootedValue lhsCopy(cx, lhs);
RootedValue rhsCopy(cx, rhs);
// Perform the compare operation.
switch(op) {
case JSOP_ADD:
// Do an add.
if (!AddValues(cx, &lhsCopy, &rhsCopy, ret))
return false;
break;
case JSOP_SUB:
if (!SubValues(cx, &lhsCopy, &rhsCopy, ret))
return false;
break;
case JSOP_MUL:
if (!MulValues(cx, &lhsCopy, &rhsCopy, ret))
return false;
break;
case JSOP_DIV:
if (!DivValues(cx, &lhsCopy, &rhsCopy, ret))
return false;
break;
case JSOP_MOD:
if (!ModValues(cx, &lhsCopy, &rhsCopy, ret))
return false;
break;
case JSOP_POW:
if (!math_pow_handle(cx, lhsCopy, rhsCopy, ret))
return false;
break;
case JSOP_BITOR: {
int32_t result;
if (!BitOr(cx, lhs, rhs, &result))
return false;
ret.setInt32(result);
break;
}
case JSOP_BITXOR: {
int32_t result;
if (!BitXor(cx, lhs, rhs, &result))
return false;
ret.setInt32(result);
break;
}
case JSOP_BITAND: {
int32_t result;
if (!BitAnd(cx, lhs, rhs, &result))
return false;
ret.setInt32(result);
break;
}
case JSOP_LSH: {
int32_t result;
if (!BitLsh(cx, lhs, rhs, &result))
return false;
ret.setInt32(result);
break;
}
case JSOP_RSH: {
int32_t result;
if (!BitRsh(cx, lhs, rhs, &result))
return false;
ret.setInt32(result);
break;
}
case JSOP_URSH: {
if (!UrshOperation(cx, lhs, rhs, ret))
return false;
break;
}
default:
MOZ_CRASH("Unhandled baseline arith op");
}
// Check if debug mode toggling made the stub invalid.
if (stub.invalid())
return true;
if (ret.isDouble())
stub->setSawDoubleResult();
// Check to see if a new stub should be generated.
if (stub->numOptimizedStubs() >= ICBinaryArith_Fallback::MAX_OPTIMIZED_STUBS) {
stub->noteUnoptimizableOperands();
return true;
}
// Handle string concat.
if (op == JSOP_ADD) {
if (lhs.isString() && rhs.isString()) {
JitSpew(JitSpew_BaselineIC, " Generating %s(String, String) stub", js_CodeName[op]);
MOZ_ASSERT(ret.isString());
ICBinaryArith_StringConcat::Compiler compiler(cx, engine);
ICStub* strcatStub = compiler.getStub(compiler.getStubSpace(script));
if (!strcatStub)
return false;
stub->addNewStub(strcatStub);
return true;
}
if ((lhs.isString() && rhs.isObject()) || (lhs.isObject() && rhs.isString())) {
JitSpew(JitSpew_BaselineIC, " Generating %s(%s, %s) stub", js_CodeName[op],
lhs.isString() ? "String" : "Object",
lhs.isString() ? "Object" : "String");
MOZ_ASSERT(ret.isString());
ICBinaryArith_StringObjectConcat::Compiler compiler(cx, engine, lhs.isString());
ICStub* strcatStub = compiler.getStub(compiler.getStubSpace(script));
if (!strcatStub)
return false;
stub->addNewStub(strcatStub);
return true;
}
}
if (((lhs.isBoolean() && (rhs.isBoolean() || rhs.isInt32())) ||
(rhs.isBoolean() && (lhs.isBoolean() || lhs.isInt32()))) &&
(op == JSOP_ADD || op == JSOP_SUB || op == JSOP_BITOR || op == JSOP_BITAND ||
op == JSOP_BITXOR))
{
JitSpew(JitSpew_BaselineIC, " Generating %s(%s, %s) stub", js_CodeName[op],
lhs.isBoolean() ? "Boolean" : "Int32", rhs.isBoolean() ? "Boolean" : "Int32");
ICBinaryArith_BooleanWithInt32::Compiler compiler(cx, op, engine,
lhs.isBoolean(), rhs.isBoolean());
ICStub* arithStub = compiler.getStub(compiler.getStubSpace(script));
if (!arithStub)
return false;
stub->addNewStub(arithStub);
return true;
}
// Handle only int32 or double.
if (!lhs.isNumber() || !rhs.isNumber()) {
stub->noteUnoptimizableOperands();
return true;
}
MOZ_ASSERT(ret.isNumber());
if (lhs.isDouble() || rhs.isDouble() || ret.isDouble()) {
if (!cx->runtime()->jitSupportsFloatingPoint)
return true;
switch (op) {
case JSOP_ADD:
case JSOP_SUB:
case JSOP_MUL:
case JSOP_DIV:
case JSOP_MOD: {
// Unlink int32 stubs, it's faster to always use the double stub.
stub->unlinkStubsWithKind(cx, ICStub::BinaryArith_Int32);
JitSpew(JitSpew_BaselineIC, " Generating %s(Double, Double) stub", js_CodeName[op]);
ICBinaryArith_Double::Compiler compiler(cx, op, engine);
ICStub* doubleStub = compiler.getStub(compiler.getStubSpace(script));
if (!doubleStub)
return false;
stub->addNewStub(doubleStub);
return true;
}
default:
break;
}
}
if (lhs.isInt32() && rhs.isInt32() && op != JSOP_POW) {
bool allowDouble = ret.isDouble();
if (allowDouble)
stub->unlinkStubsWithKind(cx, ICStub::BinaryArith_Int32);
JitSpew(JitSpew_BaselineIC, " Generating %s(Int32, Int32%s) stub", js_CodeName[op],
allowDouble ? " => Double" : "");
ICBinaryArith_Int32::Compiler compilerInt32(cx, op, engine, allowDouble);
ICStub* int32Stub = compilerInt32.getStub(compilerInt32.getStubSpace(script));
if (!int32Stub)
return false;
stub->addNewStub(int32Stub);
return true;
}
// Handle Double <BITOP> Int32 or Int32 <BITOP> Double case.
if (((lhs.isDouble() && rhs.isInt32()) || (lhs.isInt32() && rhs.isDouble())) &&
ret.isInt32())
{
switch(op) {
case JSOP_BITOR:
case JSOP_BITXOR:
case JSOP_BITAND: {
JitSpew(JitSpew_BaselineIC, " Generating %s(%s, %s) stub", js_CodeName[op],
lhs.isDouble() ? "Double" : "Int32",
lhs.isDouble() ? "Int32" : "Double");
ICBinaryArith_DoubleWithInt32::Compiler compiler(cx, op, engine, lhs.isDouble());
ICStub* optStub = compiler.getStub(compiler.getStubSpace(script));
if (!optStub)
return false;
stub->addNewStub(optStub);
return true;
}
default:
break;
}
}
stub->noteUnoptimizableOperands();
return true;
}
typedef bool (*DoBinaryArithFallbackFn)(JSContext*, BaselineFrame*, ICBinaryArith_Fallback*,
HandleValue, HandleValue, MutableHandleValue);
static const VMFunction DoBinaryArithFallbackInfo =
FunctionInfo<DoBinaryArithFallbackFn>(DoBinaryArithFallback, TailCall, PopValues(2));
bool
ICBinaryArith_Fallback::Compiler::generateStubCode(MacroAssembler& masm)
{
MOZ_ASSERT(R0 == JSReturnOperand);
// Restore the tail call register.
EmitRestoreTailCallReg(masm);
// Ensure stack is fully synced for the expression decompiler.
masm.pushValue(R0);
masm.pushValue(R1);
// Push arguments.
masm.pushValue(R1);
masm.pushValue(R0);
masm.push(ICStubReg);
pushFramePtr(masm, R0.scratchReg());
return tailCallVM(DoBinaryArithFallbackInfo, masm);
}
static bool
DoConcatStrings(JSContext* cx, HandleString lhs, HandleString rhs, MutableHandleValue res)
{
JSString* result = ConcatStrings<CanGC>(cx, lhs, rhs);
if (!result)
return false;
res.setString(result);
return true;
}
typedef bool (*DoConcatStringsFn)(JSContext*, HandleString, HandleString, MutableHandleValue);
static const VMFunction DoConcatStringsInfo = FunctionInfo<DoConcatStringsFn>(DoConcatStrings, TailCall);
bool
ICBinaryArith_StringConcat::Compiler::generateStubCode(MacroAssembler& masm)
{
Label failure;
masm.branchTestString(Assembler::NotEqual, R0, &failure);
masm.branchTestString(Assembler::NotEqual, R1, &failure);
// Restore the tail call register.
EmitRestoreTailCallReg(masm);
masm.unboxString(R0, R0.scratchReg());
masm.unboxString(R1, R1.scratchReg());
masm.push(R1.scratchReg());
masm.push(R0.scratchReg());
if (!tailCallVM(DoConcatStringsInfo, masm))
return false;
// Failure case - jump to next stub
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
static JSString*
ConvertObjectToStringForConcat(JSContext* cx, HandleValue obj)
{
MOZ_ASSERT(obj.isObject());
RootedValue rootedObj(cx, obj);
if (!ToPrimitive(cx, &rootedObj))
return nullptr;
return ToString<CanGC>(cx, rootedObj);
}
static bool
DoConcatStringObject(JSContext* cx, bool lhsIsString, HandleValue lhs, HandleValue rhs,
MutableHandleValue res)
{
JSString* lstr = nullptr;
JSString* rstr = nullptr;
if (lhsIsString) {
// Convert rhs first.
MOZ_ASSERT(lhs.isString() && rhs.isObject());
rstr = ConvertObjectToStringForConcat(cx, rhs);
if (!rstr)
return false;
// lhs is already string.
lstr = lhs.toString();
} else {
MOZ_ASSERT(rhs.isString() && lhs.isObject());
// Convert lhs first.
lstr = ConvertObjectToStringForConcat(cx, lhs);
if (!lstr)
return false;
// rhs is already string.
rstr = rhs.toString();
}
JSString* str = ConcatStrings<NoGC>(cx, lstr, rstr);
if (!str) {
RootedString nlstr(cx, lstr), nrstr(cx, rstr);
str = ConcatStrings<CanGC>(cx, nlstr, nrstr);
if (!str)
return false;
}
// Technically, we need to call TypeScript::MonitorString for this PC, however
// it was called when this stub was attached so it's OK.
res.setString(str);
return true;
}
typedef bool (*DoConcatStringObjectFn)(JSContext*, bool lhsIsString, HandleValue, HandleValue,
MutableHandleValue);
static const VMFunction DoConcatStringObjectInfo =
FunctionInfo<DoConcatStringObjectFn>(DoConcatStringObject, TailCall, PopValues(2));
bool
ICBinaryArith_StringObjectConcat::Compiler::generateStubCode(MacroAssembler& masm)
{
Label failure;
if (lhsIsString_) {
masm.branchTestString(Assembler::NotEqual, R0, &failure);
masm.branchTestObject(Assembler::NotEqual, R1, &failure);
} else {
masm.branchTestObject(Assembler::NotEqual, R0, &failure);
masm.branchTestString(Assembler::NotEqual, R1, &failure);
}
// Restore the tail call register.
EmitRestoreTailCallReg(masm);
// Sync for the decompiler.
masm.pushValue(R0);
masm.pushValue(R1);
// Push arguments.
masm.pushValue(R1);
masm.pushValue(R0);
masm.push(Imm32(lhsIsString_));
if (!tailCallVM(DoConcatStringObjectInfo, masm))
return false;
// Failure case - jump to next stub
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
bool
ICBinaryArith_Double::Compiler::generateStubCode(MacroAssembler& masm)
{
Label failure;
masm.ensureDouble(R0, FloatReg0, &failure);
masm.ensureDouble(R1, FloatReg1, &failure);
switch (op) {
case JSOP_ADD:
masm.addDouble(FloatReg1, FloatReg0);
break;
case JSOP_SUB:
masm.subDouble(FloatReg1, FloatReg0);
break;
case JSOP_MUL:
masm.mulDouble(FloatReg1, FloatReg0);
break;
case JSOP_DIV:
masm.divDouble(FloatReg1, FloatReg0);
break;
case JSOP_MOD:
masm.setupUnalignedABICall(R0.scratchReg());
masm.passABIArg(FloatReg0, MoveOp::DOUBLE);
masm.passABIArg(FloatReg1, MoveOp::DOUBLE);
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, NumberMod), MoveOp::DOUBLE);
MOZ_ASSERT(ReturnDoubleReg == FloatReg0);
break;
default:
MOZ_CRASH("Unexpected op");
}
masm.boxDouble(FloatReg0, R0);
EmitReturnFromIC(masm);
// Failure case - jump to next stub
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
bool
ICBinaryArith_BooleanWithInt32::Compiler::generateStubCode(MacroAssembler& masm)
{
Label failure;
if (lhsIsBool_)
masm.branchTestBoolean(Assembler::NotEqual, R0, &failure);
else
masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
if (rhsIsBool_)
masm.branchTestBoolean(Assembler::NotEqual, R1, &failure);
else
masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
Register lhsReg = lhsIsBool_ ? masm.extractBoolean(R0, ExtractTemp0)
: masm.extractInt32(R0, ExtractTemp0);
Register rhsReg = rhsIsBool_ ? masm.extractBoolean(R1, ExtractTemp1)
: masm.extractInt32(R1, ExtractTemp1);
MOZ_ASSERT(op_ == JSOP_ADD || op_ == JSOP_SUB ||
op_ == JSOP_BITOR || op_ == JSOP_BITXOR || op_ == JSOP_BITAND);
switch(op_) {
case JSOP_ADD: {
Label fixOverflow;
masm.branchAdd32(Assembler::Overflow, rhsReg, lhsReg, &fixOverflow);
masm.tagValue(JSVAL_TYPE_INT32, lhsReg, R0);
EmitReturnFromIC(masm);
masm.bind(&fixOverflow);
masm.sub32(rhsReg, lhsReg);
// Proceed to failure below.
break;
}
case JSOP_SUB: {
Label fixOverflow;
masm.branchSub32(Assembler::Overflow, rhsReg, lhsReg, &fixOverflow);
masm.tagValue(JSVAL_TYPE_INT32, lhsReg, R0);
EmitReturnFromIC(masm);
masm.bind(&fixOverflow);
masm.add32(rhsReg, lhsReg);
// Proceed to failure below.
break;
}
case JSOP_BITOR: {
masm.orPtr(rhsReg, lhsReg);
masm.tagValue(JSVAL_TYPE_INT32, lhsReg, R0);
EmitReturnFromIC(masm);
break;
}
case JSOP_BITXOR: {
masm.xorPtr(rhsReg, lhsReg);
masm.tagValue(JSVAL_TYPE_INT32, lhsReg, R0);
EmitReturnFromIC(masm);
break;
}
case JSOP_BITAND: {
masm.andPtr(rhsReg, lhsReg);
masm.tagValue(JSVAL_TYPE_INT32, lhsReg, R0);
EmitReturnFromIC(masm);
break;
}
default:
MOZ_CRASH("Unhandled op for BinaryArith_BooleanWithInt32.");
}
// Failure case - jump to next stub
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
bool
ICBinaryArith_DoubleWithInt32::Compiler::generateStubCode(MacroAssembler& masm)
{
MOZ_ASSERT(op == JSOP_BITOR || op == JSOP_BITAND || op == JSOP_BITXOR);
Label failure;
Register intReg;
Register scratchReg;
if (lhsIsDouble_) {
masm.branchTestDouble(Assembler::NotEqual, R0, &failure);
masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
intReg = masm.extractInt32(R1, ExtractTemp0);
masm.unboxDouble(R0, FloatReg0);
scratchReg = R0.scratchReg();
} else {
masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
masm.branchTestDouble(Assembler::NotEqual, R1, &failure);
intReg = masm.extractInt32(R0, ExtractTemp0);
masm.unboxDouble(R1, FloatReg0);
scratchReg = R1.scratchReg();
}
// Truncate the double to an int32.
{
Label doneTruncate;
Label truncateABICall;
masm.branchTruncateDouble(FloatReg0, scratchReg, &truncateABICall);
masm.jump(&doneTruncate);
masm.bind(&truncateABICall);
masm.push(intReg);
masm.setupUnalignedABICall(scratchReg);
masm.passABIArg(FloatReg0, MoveOp::DOUBLE);
masm.callWithABI(mozilla::BitwiseCast<void*, int32_t(*)(double)>(JS::ToInt32));
masm.storeCallResult(scratchReg);
masm.pop(intReg);
masm.bind(&doneTruncate);
}
Register intReg2 = scratchReg;
// All handled ops commute, so no need to worry about ordering.
switch(op) {
case JSOP_BITOR:
masm.orPtr(intReg, intReg2);
break;
case JSOP_BITXOR:
masm.xorPtr(intReg, intReg2);
break;
case JSOP_BITAND:
masm.andPtr(intReg, intReg2);
break;
default:
MOZ_CRASH("Unhandled op for BinaryArith_DoubleWithInt32.");
}
masm.tagValue(JSVAL_TYPE_INT32, intReg2, R0);
EmitReturnFromIC(masm);
// Failure case - jump to next stub
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
} // namespace jit
} // namespace js

View File

@ -206,12 +206,12 @@ void TypeFallbackICSpew(JSContext* cx, ICTypeMonitor_Fallback* stub, const char*
#endif
//
// An entry in the JIT IC descriptor table.
// An entry in the Baseline IC descriptor table.
//
class ICEntry
{
private:
// A pointer to the shared IC stub for this instruction.
// A pointer to the baseline IC stub for this instruction.
ICStub* firstStub_;
// Offset from the start of the JIT code where the IC
@ -335,27 +335,8 @@ class ICEntry
inline ICStub** addressOfFirstStub() {
return &firstStub_;
}
void trace(JSTracer* trc);
};
class IonICEntry : public ICEntry
{
JSScript* script_;
public:
IonICEntry(uint32_t pcOffset, Kind kind, JSScript* script)
: ICEntry(pcOffset, kind),
script_(script)
{ }
JSScript* script() {
return script_;
}
};
class ICMonitoredStub;
class ICMonitoredFallbackStub;
class ICUpdatedStub;
@ -801,7 +782,7 @@ class ICFallbackStub : public ICStub
// The icEntry and lastStubPtrAddr_ fields can't be initialized when the stub is
// created since the stub is created at compile time, and we won't know the IC entry
// address until after compile when the JitScript is created. This method
// address until after compile when the BaselineScript is created. This method
// allows these fields to be fixed up at that point.
void fixupICEntry(ICEntry* icEntry) {
MOZ_ASSERT(icEntry_ == nullptr);
@ -1115,280 +1096,6 @@ class ICMultiStubCompiler : public ICStubCompiler
: ICStubCompiler(cx, kind, engine), op(op) {}
};
// BinaryArith
// JSOP_ADD, JSOP_SUB, JSOP_MUL, JOP_DIV, JSOP_MOD
// JSOP_BITAND, JSOP_BITXOR, JSOP_BITOR
// JSOP_LSH, JSOP_RSH, JSOP_URSH
class ICBinaryArith_Fallback : public ICFallbackStub
{
friend class ICStubSpace;
explicit ICBinaryArith_Fallback(JitCode* stubCode)
: ICFallbackStub(BinaryArith_Fallback, stubCode)
{
extra_ = 0;
}
static const uint16_t SAW_DOUBLE_RESULT_BIT = 0x1;
static const uint16_t UNOPTIMIZABLE_OPERANDS_BIT = 0x2;
public:
static const uint32_t MAX_OPTIMIZED_STUBS = 8;
bool sawDoubleResult() const {
return extra_ & SAW_DOUBLE_RESULT_BIT;
}
void setSawDoubleResult() {
extra_ |= SAW_DOUBLE_RESULT_BIT;
}
bool hadUnoptimizableOperands() const {
return extra_ & UNOPTIMIZABLE_OPERANDS_BIT;
}
void noteUnoptimizableOperands() {
extra_ |= UNOPTIMIZABLE_OPERANDS_BIT;
}
// Compiler for this stub kind.
class Compiler : public ICStubCompiler {
protected:
bool generateStubCode(MacroAssembler& masm);
public:
explicit Compiler(JSContext* cx, Engine engine)
: ICStubCompiler(cx, ICStub::BinaryArith_Fallback, engine) {}
ICStub* getStub(ICStubSpace* space) {
return newStub<ICBinaryArith_Fallback>(space, getStubCode());
}
};
};
class ICBinaryArith_Int32 : public ICStub
{
friend class ICStubSpace;
ICBinaryArith_Int32(JitCode* stubCode, bool allowDouble)
: ICStub(BinaryArith_Int32, stubCode)
{
extra_ = allowDouble;
}
public:
bool allowDouble() const {
return extra_;
}
// Compiler for this stub kind.
class Compiler : public ICStubCompiler {
protected:
JSOp op_;
bool allowDouble_;
bool generateStubCode(MacroAssembler& masm);
// Stub keys shift-stubs need to encode the kind, the JSOp and if we allow doubles.
virtual int32_t getKey() const {
return static_cast<int32_t>(engine_) |
(static_cast<int32_t>(kind) << 1) |
(static_cast<int32_t>(op_) << 17) |
(static_cast<int32_t>(allowDouble_) << 25);
}
public:
Compiler(JSContext* cx, JSOp op, Engine engine, bool allowDouble)
: ICStubCompiler(cx, ICStub::BinaryArith_Int32, engine),
op_(op), allowDouble_(allowDouble) {}
ICStub* getStub(ICStubSpace* space) {
return newStub<ICBinaryArith_Int32>(space, getStubCode(), allowDouble_);
}
};
};
class ICBinaryArith_StringConcat : public ICStub
{
friend class ICStubSpace;
explicit ICBinaryArith_StringConcat(JitCode* stubCode)
: ICStub(BinaryArith_StringConcat, stubCode)
{}
public:
class Compiler : public ICStubCompiler {
protected:
bool generateStubCode(MacroAssembler& masm);
public:
explicit Compiler(JSContext* cx, Engine engine)
: ICStubCompiler(cx, ICStub::BinaryArith_StringConcat, engine)
{}
ICStub* getStub(ICStubSpace* space) {
return newStub<ICBinaryArith_StringConcat>(space, getStubCode());
}
};
};
class ICBinaryArith_StringObjectConcat : public ICStub
{
friend class ICStubSpace;
ICBinaryArith_StringObjectConcat(JitCode* stubCode, bool lhsIsString)
: ICStub(BinaryArith_StringObjectConcat, stubCode)
{
extra_ = lhsIsString;
}
public:
bool lhsIsString() const {
return extra_;
}
class Compiler : public ICStubCompiler {
protected:
bool lhsIsString_;
bool generateStubCode(MacroAssembler& masm);
virtual int32_t getKey() const {
return static_cast<int32_t>(engine_) |
(static_cast<int32_t>(kind) << 1) |
(static_cast<int32_t>(lhsIsString_) << 17);
}
public:
Compiler(JSContext* cx, Engine engine, bool lhsIsString)
: ICStubCompiler(cx, ICStub::BinaryArith_StringObjectConcat, engine),
lhsIsString_(lhsIsString)
{}
ICStub* getStub(ICStubSpace* space) {
return newStub<ICBinaryArith_StringObjectConcat>(space, getStubCode(),
lhsIsString_);
}
};
};
class ICBinaryArith_Double : public ICStub
{
friend class ICStubSpace;
explicit ICBinaryArith_Double(JitCode* stubCode)
: ICStub(BinaryArith_Double, stubCode)
{}
public:
class Compiler : public ICMultiStubCompiler {
protected:
bool generateStubCode(MacroAssembler& masm);
public:
Compiler(JSContext* cx, JSOp op, Engine engine)
: ICMultiStubCompiler(cx, ICStub::BinaryArith_Double, op, engine)
{}
ICStub* getStub(ICStubSpace* space) {
return newStub<ICBinaryArith_Double>(space, getStubCode());
}
};
};
class ICBinaryArith_BooleanWithInt32 : public ICStub
{
friend class ICStubSpace;
ICBinaryArith_BooleanWithInt32(JitCode* stubCode, bool lhsIsBool, bool rhsIsBool)
: ICStub(BinaryArith_BooleanWithInt32, stubCode)
{
MOZ_ASSERT(lhsIsBool || rhsIsBool);
extra_ = 0;
if (lhsIsBool)
extra_ |= 1;
if (rhsIsBool)
extra_ |= 2;
}
public:
bool lhsIsBoolean() const {
return extra_ & 1;
}
bool rhsIsBoolean() const {
return extra_ & 2;
}
class Compiler : public ICStubCompiler {
protected:
JSOp op_;
bool lhsIsBool_;
bool rhsIsBool_;
bool generateStubCode(MacroAssembler& masm);
virtual int32_t getKey() const {
return static_cast<int32_t>(engine_) |
(static_cast<int32_t>(kind) << 1) |
(static_cast<int32_t>(op_) << 17) |
(static_cast<int32_t>(lhsIsBool_) << 25) |
(static_cast<int32_t>(rhsIsBool_) << 26);
}
public:
Compiler(JSContext* cx, JSOp op, Engine engine, bool lhsIsBool, bool rhsIsBool)
: ICStubCompiler(cx, ICStub::BinaryArith_BooleanWithInt32, engine),
op_(op), lhsIsBool_(lhsIsBool), rhsIsBool_(rhsIsBool)
{
MOZ_ASSERT(op_ == JSOP_ADD || op_ == JSOP_SUB || op_ == JSOP_BITOR ||
op_ == JSOP_BITAND || op_ == JSOP_BITXOR);
MOZ_ASSERT(lhsIsBool_ || rhsIsBool_);
}
ICStub* getStub(ICStubSpace* space) {
return newStub<ICBinaryArith_BooleanWithInt32>(space, getStubCode(),
lhsIsBool_, rhsIsBool_);
}
};
};
class ICBinaryArith_DoubleWithInt32 : public ICStub
{
friend class ICStubSpace;
ICBinaryArith_DoubleWithInt32(JitCode* stubCode, bool lhsIsDouble)
: ICStub(BinaryArith_DoubleWithInt32, stubCode)
{
extra_ = lhsIsDouble;
}
public:
bool lhsIsDouble() const {
return extra_;
}
class Compiler : public ICMultiStubCompiler {
protected:
bool lhsIsDouble_;
bool generateStubCode(MacroAssembler& masm);
virtual int32_t getKey() const {
return static_cast<int32_t>(engine_) |
(static_cast<int32_t>(kind) << 1) |
(static_cast<int32_t>(op) << 17) |
(static_cast<int32_t>(lhsIsDouble_) << 25);
}
public:
Compiler(JSContext* cx, JSOp op, Engine engine, bool lhsIsDouble)
: ICMultiStubCompiler(cx, ICStub::BinaryArith_DoubleWithInt32, op, engine),
lhsIsDouble_(lhsIsDouble)
{}
ICStub* getStub(ICStubSpace* space) {
return newStub<ICBinaryArith_DoubleWithInt32>(space, getStubCode(),
lhsIsDouble_);
}
};
};
} // namespace jit
} // namespace js

View File

@ -11,15 +11,7 @@ namespace js {
namespace jit {
// List of IC stub kinds that can run in Baseline and in IonMonkey
#define IC_SHARED_STUB_KIND_LIST(_) \
_(BinaryArith_Fallback) \
_(BinaryArith_Int32) \
_(BinaryArith_Double) \
_(BinaryArith_StringConcat) \
_(BinaryArith_StringObjectConcat) \
_(BinaryArith_BooleanWithInt32) \
_(BinaryArith_DoubleWithInt32) \
\
#define IC_SHARED_STUB_KIND_LIST(_)
} // namespace jit
} // namespace js

View File

@ -1195,7 +1195,6 @@ FilterTypeSetPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins)
_(MixPolicy<SimdSameAsReturnedTypePolicy<0>, SimdScalarPolicy<1> >) \
_(MixPolicy<StringPolicy<0>, IntPolicy<1> >) \
_(MixPolicy<StringPolicy<0>, StringPolicy<1> >) \
_(MixPolicy<BoxPolicy<0>, BoxPolicy<1> >) \
_(NoFloatPolicy<0>) \
_(NoFloatPolicyAfter<1>) \
_(NoFloatPolicyAfter<2>) \

View File

@ -19,14 +19,14 @@ using mozilla::FloorLog2;
void
LIRGeneratorARM::useBoxFixed(LInstruction* lir, size_t n, MDefinition* mir, Register reg1,
Register reg2, bool useAtStart)
Register reg2)
{
MOZ_ASSERT(mir->type() == MIRType_Value);
MOZ_ASSERT(reg1 != reg2);
ensureDefined(mir);
lir->setOperand(n, LUse(reg1, mir->virtualRegister(), useAtStart));
lir->setOperand(n + 1, LUse(reg2, VirtualRegisterOfPayload(mir), useAtStart));
lir->setOperand(n, LUse(reg1, mir->virtualRegister()));
lir->setOperand(n + 1, LUse(reg2, VirtualRegisterOfPayload(mir)));
}
LAllocation

View File

@ -22,8 +22,7 @@ class LIRGeneratorARM : public LIRGeneratorShared
protected:
// Adds a box input to an instruction, setting operand |n| to the type and
// |n+1| to the payload.
void useBoxFixed(LInstruction* lir, size_t n, MDefinition* mir, Register reg1, Register reg2,
bool useAtStart = false);
void useBoxFixed(LInstruction* lir, size_t n, MDefinition* mir, Register reg1, Register reg2);
// x86 has constraints on what registers can be formatted for 1-byte
// stores and loads; on ARM all registers are okay.

View File

@ -18,7 +18,7 @@ using namespace js::jit;
using mozilla::FloorLog2;
void
LIRGeneratorARM64::useBoxFixed(LInstruction* lir, size_t n, MDefinition* mir, Register reg1, Register, bool useAtStart)
LIRGeneratorARM64::useBoxFixed(LInstruction* lir, size_t n, MDefinition* mir, Register reg1, Register)
{
MOZ_CRASH("useBoxFixed");
}

View File

@ -22,8 +22,7 @@ class LIRGeneratorARM64 : public LIRGeneratorShared
protected:
// Adds a box input to an instruction, setting operand |n| to the type and
// |n+1| to the payload.
void useBoxFixed(LInstruction* lir, size_t n, MDefinition* mir, Register reg1, Register reg2,
bool useAtStart = false);
void useBoxFixed(LInstruction* lir, size_t n, MDefinition* mir, Register reg1, Register reg2);
LAllocation useByteOpRegister(MDefinition* mir);
LAllocation useByteOpRegisterOrNonDoubleConstant(MDefinition* mir);

View File

@ -20,14 +20,14 @@ using mozilla::FloorLog2;
void
LIRGeneratorMIPS::useBoxFixed(LInstruction* lir, size_t n, MDefinition* mir, Register reg1,
Register reg2, bool useAtStart)
Register reg2)
{
MOZ_ASSERT(mir->type() == MIRType_Value);
MOZ_ASSERT(reg1 != reg2);
ensureDefined(mir);
lir->setOperand(n, LUse(reg1, mir->virtualRegister(), useAtStart));
lir->setOperand(n + 1, LUse(reg2, VirtualRegisterOfPayload(mir), useAtStart));
lir->setOperand(n, LUse(reg1, mir->virtualRegister()));
lir->setOperand(n + 1, LUse(reg2, VirtualRegisterOfPayload(mir)));
}
LAllocation

View File

@ -22,8 +22,7 @@ class LIRGeneratorMIPS : public LIRGeneratorShared
protected:
// Adds a box input to an instruction, setting operand |n| to the type and
// |n+1| to the payload.
void useBoxFixed(LInstruction* lir, size_t n, MDefinition* mir, Register reg1, Register reg2
bool useAtStart = false);
void useBoxFixed(LInstruction* lir, size_t n, MDefinition* mir, Register reg1, Register reg2);
// x86 has constraints on what registers can be formatted for 1-byte
// stores and loads; on MIPS all registers are okay.

View File

@ -21,7 +21,7 @@ class LIRGeneratorNone : public LIRGeneratorShared
MOZ_CRASH();
}
void useBoxFixed(LInstruction*, size_t, MDefinition*, Register, Register, bool useAtStart = false) { MOZ_CRASH(); }
void useBoxFixed(LInstruction*, size_t, MDefinition*, Register, Register) { MOZ_CRASH(); }
LAllocation useByteOpRegister(MDefinition*) { MOZ_CRASH(); }
LAllocation useByteOpRegisterOrNonDoubleConstant(MDefinition*) { MOZ_CRASH(); }

View File

@ -55,7 +55,6 @@ CodeGeneratorShared::CodeGeneratorShared(MIRGenerator* gen, LIRGraph* graph, Mac
lastOsiPointOffset_(0),
safepoints_(graph->totalSlotCount(), (gen->info().nargs() + 1) * sizeof(Value)),
returnLabel_(),
stubSpace_(),
nativeToBytecodeMap_(nullptr),
nativeToBytecodeMapSize_(0),
nativeToBytecodeTableOffset_(0),

View File

@ -85,11 +85,6 @@ class CodeGeneratorShared : public LElementVisitor
Label invalidate_;
CodeOffsetLabel invalidateEpilogueData_;
// Label for the common return path.
NonAssertingLabel returnLabel_;
FallbackICStubSpace stubSpace_;
js::Vector<SafepointIndex, 0, SystemAllocPolicy> safepointIndices_;
js::Vector<OsiIndex, 0, SystemAllocPolicy> osiIndices_;
@ -110,6 +105,9 @@ class CodeGeneratorShared : public LElementVisitor
js::Vector<CodeOffsetLabel, 0, SystemAllocPolicy> patchableTLScripts_;
#endif
// Label for the common return path.
NonAssertingLabel returnLabel_;
public:
struct NativeToBytecode {
CodeOffsetLabel nativeOffset;

View File

@ -3920,31 +3920,6 @@ class LStringReplace: public LStrReplace
}
};
class LBinarySharedStub : public LCallInstructionHelper<BOX_PIECES, 2 * BOX_PIECES, 0>
{
public:
LIR_HEADER(BinarySharedStub)
const MBinarySharedStub* mir() const {
return mir_->toBinarySharedStub();
}
static const size_t LhsInput = 0;
static const size_t RhsInput = BOX_PIECES;
};
class LUnarySharedStub : public LCallInstructionHelper<BOX_PIECES, BOX_PIECES, 0>
{
public:
LIR_HEADER(UnarySharedStub)
const MUnarySharedStub* mir() const {
return mir_->toUnarySharedStub();
}
static const size_t Input = 0;
};
class LLambdaForSingleton : public LCallInstructionHelper<1, 1, 0>
{
public:

View File

@ -193,8 +193,6 @@
_(RegExpReplace) \
_(StringReplace) \
_(Substr) \
_(BinarySharedStub) \
_(UnarySharedStub) \
_(Lambda) \
_(LambdaArrow) \
_(LambdaForSingleton) \

View File

@ -108,30 +108,6 @@ LIRGeneratorShared::defineBox(LInstructionHelper<BOX_PIECES, Ops, Temps>* lir, M
add(lir);
}
void
LIRGeneratorShared::defineSharedStubReturn(LInstruction* lir, MDefinition* mir)
{
lir->setMir(mir);
MOZ_ASSERT(lir->isBinarySharedStub() || lir->isUnarySharedStub());
MOZ_ASSERT(mir->type() == MIRType_Value);
uint32_t vreg = getVirtualRegister();
#if defined(JS_NUNBOX32)
lir->setDef(TYPE_INDEX, LDefinition(vreg + VREG_TYPE_OFFSET, LDefinition::TYPE,
LGeneralReg(JSReturnReg_Type)));
lir->setDef(PAYLOAD_INDEX, LDefinition(vreg + VREG_DATA_OFFSET, LDefinition::PAYLOAD,
LGeneralReg(JSReturnReg_Data)));
getVirtualRegister();
#elif defined(JS_PUNBOX64)
lir->setDef(0, LDefinition(vreg, LDefinition::BOX, LGeneralReg(JSReturnReg)));
#endif
mir->setVirtualRegister(vreg);
add(lir);
}
void
LIRGeneratorShared::defineReturn(LInstruction* lir, MDefinition* mir)
{

View File

@ -142,7 +142,6 @@ class LIRGeneratorShared : public MDefinitionVisitor
inline void defineBox(LInstructionHelper<BOX_PIECES, Ops, Temps>* lir, MDefinition* mir,
LDefinition::Policy policy = LDefinition::REGISTER);
inline void defineSharedStubReturn(LInstruction* lir, MDefinition* mir);
inline void defineReturn(LInstruction* lir, MDefinition* mir);
template <size_t X>

View File

@ -15,12 +15,12 @@ using namespace js;
using namespace js::jit;
void
LIRGeneratorX64::useBoxFixed(LInstruction* lir, size_t n, MDefinition* mir, Register reg1, Register, bool useAtStart)
LIRGeneratorX64::useBoxFixed(LInstruction* lir, size_t n, MDefinition* mir, Register reg1, Register)
{
MOZ_ASSERT(mir->type() == MIRType_Value);
ensureDefined(mir);
lir->setOperand(n, LUse(reg1, mir->virtualRegister(), useAtStart));
lir->setOperand(n, LUse(reg1, mir->virtualRegister()));
}
LAllocation

View File

@ -24,7 +24,7 @@ class LIRGeneratorX64 : public LIRGeneratorX86Shared
void defineUntypedPhi(MPhi* phi, size_t lirIndex);
// Adds a use at operand |n| of a value-typed insturction.
void useBoxFixed(LInstruction* lir, size_t n, MDefinition* mir, Register reg1, Register, bool useAtStart = false);
void useBoxFixed(LInstruction* lir, size_t n, MDefinition* mir, Register reg1, Register);
// x86 has constraints on what registers can be formatted for 1-byte
// stores and loads; on x64 all registers are okay.

View File

@ -16,14 +16,14 @@ using namespace js::jit;
void
LIRGeneratorX86::useBoxFixed(LInstruction* lir, size_t n, MDefinition* mir, Register reg1,
Register reg2, bool useAtStart)
Register reg2)
{
MOZ_ASSERT(mir->type() == MIRType_Value);
MOZ_ASSERT(reg1 != reg2);
ensureDefined(mir);
lir->setOperand(n, LUse(reg1, mir->virtualRegister(), useAtStart));
lir->setOperand(n + 1, LUse(reg2, VirtualRegisterOfPayload(mir), useAtStart));
lir->setOperand(n, LUse(reg1, mir->virtualRegister()));
lir->setOperand(n + 1, LUse(reg2, VirtualRegisterOfPayload(mir)));
}
LAllocation

View File

@ -22,8 +22,7 @@ class LIRGeneratorX86 : public LIRGeneratorX86Shared
protected:
// Adds a box input to an instruction, setting operand |n| to the type and
// |n+1| to the payload.
void useBoxFixed(LInstruction* lir, size_t n, MDefinition* mir, Register reg1, Register reg2,
bool useAtStart = false);
void useBoxFixed(LInstruction* lir, size_t n, MDefinition* mir, Register reg1, Register reg2);
// It's a trap! On x86, the 1-byte store can only use one of
// {al,bl,cl,dl,ah,bh,ch,dh}. That means if the register allocator