Bug 1166037 - Import ARM64 Baseline changes. r=djvj

This commit is contained in:
Sean Stangl 2015-06-10 15:08:35 -07:00
parent 25dbc7043c
commit a108009351
9 changed files with 694 additions and 19 deletions

View File

@ -372,8 +372,9 @@ struct BaselineStackBuilder
MOZ_ASSERT(BaselineFrameReg == FramePointer);
priorOffset -= sizeof(void*);
return virtualPointerAtStackOffset(priorOffset);
#elif defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS)
// On X64, ARM and MIPS, the frame pointer save location depends on
#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_MIPS)
// On X64, ARM, ARM64, and MIPS, the frame pointer save location depends on
// the caller of the rectifier frame.
BufferPointer<RectifierFrameLayout> priorFrame =
pointerAtStackOffset<RectifierFrameLayout>(priorOffset);
@ -1569,10 +1570,10 @@ jit::BailoutIonToBaseline(JSContext* cx, JitActivation* activation, JitFrameIter
// Do stack check.
bool overRecursed = false;
BaselineBailoutInfo* info = builder.info();
BaselineBailoutInfo *info = builder.info();
uint8_t* newsp = info->incomingStack - (info->copyStackTop - info->copyStackBottom);
#if defined(JS_ARM_SIMULATOR) || defined(JS_MIPS_SIMULATOR)
if (Simulator::Current()->overRecursed(uintptr_t(newsp)))
#if defined(JS_ARM_SIMULATOR) || defined(JS_ARM64_SIMULATOR) || defined(JS_MIPS_SIMULATOR)
if (SimulatorType::Current()->overRecursed(uintptr_t(newsp)))
overRecursed = true;
#else
JS_CHECK_RECURSION_WITH_SP_DONT_REPORT(cx, newsp, overRecursed = true);

View File

@ -345,13 +345,11 @@ BaselineCompiler::emitPrologue()
emitProfilerEnterFrame();
masm.push(BaselineFrameReg);
masm.mov(BaselineStackReg, BaselineFrameReg);
masm.subPtr(Imm32(BaselineFrame::Size()), BaselineStackReg);
masm.moveStackPtrTo(BaselineFrameReg);
masm.subFromStackPtr(Imm32(BaselineFrame::Size()));
// Initialize BaselineFrame. For eval scripts, the scope chain
// is passed in R1, so we have to be careful not to clobber
// it.
// is passed in R1, so we have to be careful not to clobber it.
// Initialize BaselineFrame::flags.
uint32_t flags = 0;
@ -453,7 +451,7 @@ BaselineCompiler::emitEpilogue()
return false;
#endif
masm.mov(BaselineFrameReg, BaselineStackReg);
masm.moveToStackPtr(BaselineFrameReg);
masm.pop(BaselineFrameReg);
emitProfilerExitFrame();
@ -479,7 +477,7 @@ BaselineCompiler::emitOutOfLinePostBarrierSlot()
regs.take(objReg);
regs.take(BaselineFrameReg);
Register scratch = regs.takeAny();
#if defined(JS_CODEGEN_ARM)
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)
// On ARM, save the link register before calling. It contains the return
// address. The |masm.ret()| later will pop this into |pc| to return.
masm.push(lr);
@ -527,7 +525,7 @@ BaselineCompiler::emitStackCheck(bool earlyCheck)
uint32_t slotsSize = script->nslots() * sizeof(Value);
uint32_t tolerance = earlyCheck ? slotsSize : 0;
masm.movePtr(BaselineStackReg, R1.scratchReg());
masm.moveStackPtrTo(R1.scratchReg());
// If this is the early stack check, locals haven't been pushed yet. Adjust the
// stack pointer to account for the locals that would be pushed before performing
@ -3710,7 +3708,7 @@ BaselineCompiler::emit_JSOP_RESUME()
// Update BaselineFrame frameSize field and create the frame descriptor.
masm.computeEffectiveAddress(Address(BaselineFrameReg, BaselineFrame::FramePointerOffset),
scratch2);
masm.subPtr(BaselineStackReg, scratch2);
masm.subStackPtrFrom(scratch2);
masm.store32(scratch2, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize()));
masm.makeFrameDescriptor(scratch2, JitFrame_BaselineJS);
@ -3755,8 +3753,8 @@ BaselineCompiler::emit_JSOP_RESUME()
// Construct BaselineFrame.
masm.push(BaselineFrameReg);
masm.mov(BaselineStackReg, BaselineFrameReg);
masm.subPtr(Imm32(BaselineFrame::Size()), BaselineStackReg);
masm.moveStackPtrTo(BaselineFrameReg);
masm.subFromStackPtr(Imm32(BaselineFrame::Size()));
masm.checkStackAlignment();
// Store flags and scope chain.
@ -3823,7 +3821,7 @@ BaselineCompiler::emit_JSOP_RESUME()
masm.computeEffectiveAddress(Address(BaselineFrameReg, BaselineFrame::FramePointerOffset),
scratch2);
masm.movePtr(scratch2, scratch1);
masm.subPtr(BaselineStackReg, scratch2);
masm.subStackPtrFrom(scratch2);
masm.store32(scratch2, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize()));
masm.loadBaselineFramePtr(BaselineFrameReg, scratch2);
@ -3838,14 +3836,18 @@ BaselineCompiler::emit_JSOP_RESUME()
return false;
// Create the frame descriptor.
masm.subPtr(BaselineStackReg, scratch1);
masm.subStackPtrFrom(scratch1);
masm.makeFrameDescriptor(scratch1, JitFrame_BaselineJS);
// Push the frame descriptor and a dummy return address (it doesn't
// matter what we push here, frame iterators will use the frame pc
// set in jit::GeneratorThrowOrClose).
masm.push(scratch1);
// On ARM64, the callee will push the return address.
#ifndef JS_CODEGEN_ARM64
masm.push(ImmWord(0));
#endif
masm.jump(code);
}
@ -3872,7 +3874,7 @@ BaselineCompiler::emit_JSOP_RESUME()
// After the generator returns, we restore the stack pointer, push the
// return value and we're done.
masm.bind(&returnTarget);
masm.computeEffectiveAddress(frame.addressOfStackValue(frame.peek(-1)), BaselineStackReg);
masm.computeEffectiveAddress(frame.addressOfStackValue(frame.peek(-1)), masm.getStackPointer());
frame.popn(2);
frame.push(R0);
return true;

View File

@ -14,6 +14,8 @@
# include "jit/x64/BaselineCompiler-x64.h"
#elif defined(JS_CODEGEN_ARM)
# include "jit/arm/BaselineCompiler-arm.h"
#elif defined(JS_CODEGEN_ARM64)
# include "jit/arm64/BaselineCompiler-arm64.h"
#elif defined(JS_CODEGEN_MIPS)
# include "jit/mips/BaselineCompiler-mips.h"
#elif defined(JS_CODEGEN_NONE)

View File

@ -13,6 +13,8 @@
# include "jit/x64/SharedICHelpers-x64.h"
#elif defined(JS_CODEGEN_ARM)
# include "jit/arm/SharedICHelpers-arm.h"
#elif defined(JS_CODEGEN_ARM64)
# include "jit/arm64/SharedICHelpers-arm64.h"
#elif defined(JS_CODEGEN_MIPS)
# include "jit/mips/SharedICHelpers-mips.h"
#elif defined(JS_CODEGEN_NONE)

View File

@ -13,6 +13,8 @@
# include "jit/x64/SharedICRegisters-x64.h"
#elif defined(JS_CODEGEN_ARM)
# include "jit/arm/SharedICRegisters-arm.h"
#elif defined(JS_CODEGEN_ARM64)
# include "jit/arm64/SharedICRegisters-arm64.h"
#elif defined(JS_CODEGEN_MIPS)
# include "jit/mips/SharedICRegisters-mips.h"
#elif defined(JS_CODEGEN_NONE)

View File

@ -0,0 +1,28 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef jit_arm64_BaselineCompiler_arm64_h
#define jit_arm64_BaselineCompiler_arm64_h
#include "jit/shared/BaselineCompiler-shared.h"
namespace js {
namespace jit {
class BaselineCompilerARM64 : public BaselineCompilerShared
{
protected:
BaselineCompilerARM64(JSContext* cx, TempAllocator& alloc, JSScript* script)
: BaselineCompilerShared(cx, alloc, script)
{ }
};
typedef BaselineCompilerARM64 BaselineCompilerSpecific;
} // namespace jit
} // namespace js
#endif /* jit_arm64_BaselineCompiler_arm64_h */

View File

@ -0,0 +1,269 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "jit/SharedIC.h"
#include "jit/SharedICHelpers.h"
#ifdef JS_ARM64_SIMULATOR
// TODO #include "jit/arm64/Assembler-arm64.h"
#include "jit/arm64/BaselineCompiler-arm64.h"
#include "jit/arm64/vixl/Debugger-vixl.h"
#endif
using namespace js;
using namespace js::jit;
namespace js {
namespace jit {
// ICCompare_Int32
bool
ICCompare_Int32::Compiler::generateStubCode(MacroAssembler& masm)
{
// Guard that R0 is an integer and R1 is an integer.
Label failure;
masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
// Compare payload regs of R0 and R1.
Assembler::Condition cond = JSOpToCondition(op, /* signed = */true);
masm.cmp32(R0.valueReg(), R1.valueReg());
masm.Cset(ARMRegister(R0.valueReg(), 32), cond);
// Result is implicitly boxed already.
masm.tagValue(JSVAL_TYPE_BOOLEAN, R0.valueReg(), R0);
EmitReturnFromIC(masm);
// Failure case - jump to next stub.
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
bool
ICCompare_Double::Compiler::generateStubCode(MacroAssembler& masm)
{
Label failure, isNaN;
masm.ensureDouble(R0, FloatReg0, &failure);
masm.ensureDouble(R1, FloatReg1, &failure);
Register dest = R0.valueReg();
Assembler::DoubleCondition doubleCond = JSOpToDoubleCondition(op);
Assembler::Condition cond = Assembler::ConditionFromDoubleCondition(doubleCond);
masm.compareDouble(doubleCond, FloatReg0, FloatReg1);
masm.Cset(ARMRegister(dest, 32), cond);
masm.tagValue(JSVAL_TYPE_BOOLEAN, dest, R0);
EmitReturnFromIC(masm);
// Failure case - jump to next stub.
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
// ICBinaryArith_Int32
bool
ICBinaryArith_Int32::Compiler::generateStubCode(MacroAssembler& masm)
{
// Guard that R0 is an integer and R1 is an integer.
Label failure;
masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
// Add R0 and R1. Don't need to explicitly unbox, just use R2.
Register Rscratch = R2_;
ARMRegister Wscratch = ARMRegister(Rscratch, 32);
#ifdef MERGE
// DIV and MOD need an extra non-volatile ValueOperand to hold R0.
AllocatableGeneralRegisterSet savedRegs(availableGeneralRegs(2));
savedRegs.set() = GeneralRegisterSet::Intersect(GeneralRegisterSet::NonVolatile(), savedRegs);
#endif
// get some more ARM-y names for the registers
ARMRegister W0(R0_, 32);
ARMRegister X0(R0_, 64);
ARMRegister W1(R1_, 32);
ARMRegister X1(R1_, 64);
ARMRegister WTemp(ExtractTemp0, 32);
ARMRegister XTemp(ExtractTemp0, 64);
Label maybeNegZero, revertRegister;
switch(op_) {
case JSOP_ADD:
masm.Adds(WTemp, W0, Operand(W1));
// Just jump to failure on overflow. R0 and R1 are preserved, so we can
// just jump to the next stub.
masm.j(Assembler::Overflow, &failure);
// Box the result and return. We know R0 already contains the
// integer tag, so we just need to move the payload into place.
masm.movePayload(ExtractTemp0, R0_);
break;
case JSOP_SUB:
masm.Subs(WTemp, W0, Operand(W1));
masm.j(Assembler::Overflow, &failure);
masm.movePayload(ExtractTemp0, R0_);
break;
case JSOP_MUL:
masm.mul32(R0.valueReg(), R1.valueReg(), Rscratch, &failure, &maybeNegZero);
masm.movePayload(Rscratch, R0_);
break;
case JSOP_DIV:
case JSOP_MOD: {
// Check for INT_MIN / -1, it results in a double.
Label check2;
masm.Cmp(W0, Operand(INT_MIN));
masm.B(&check2, Assembler::NotEqual);
masm.Cmp(W1, Operand(-1));
masm.j(Assembler::Equal, &failure);
masm.bind(&check2);
Label no_fail;
// Check for both division by zero and 0 / X with X < 0 (results in -0).
masm.Cmp(W1, Operand(0));
// If x > 0, then it can't be bad.
masm.B(&no_fail, Assembler::GreaterThan);
// if x == 0, then ignore any comparison, and force
// it to fail, if x < 0 (the only other case)
// then do the comparison, and fail if y == 0
masm.Ccmp(W0, Operand(0), vixl::ZFlag, Assembler::NotEqual);
masm.B(&failure, Assembler::Equal);
masm.bind(&no_fail);
masm.Sdiv(Wscratch, W0, W1);
// Start calculating the remainder, x - (x / y) * y.
masm.mul(WTemp, W1, Wscratch);
if (op_ == JSOP_DIV) {
// Result is a double if the remainder != 0, which happens
// when (x/y)*y != x.
masm.branch32(Assembler::NotEqual, R0.valueReg(), ExtractTemp0, &revertRegister);
masm.movePayload(Rscratch, R0_);
} else {
// Calculate the actual mod. Set the condition code, so we can see if it is non-zero.
masm.Subs(WTemp, W0, WTemp);
// If X % Y == 0 and X < 0, the result is -0.
masm.Ccmp(W0, Operand(0), vixl::NoFlag, Assembler::Equal);
masm.branch(Assembler::LessThan, &revertRegister);
masm.movePayload(ExtractTemp0, R0_);
}
break;
}
// ORR, EOR, AND can trivially be coerced int
// working without affecting the tag of the dest..
case JSOP_BITOR:
masm.Orr(X0, X0, Operand(X1));
break;
case JSOP_BITXOR:
masm.Eor(X0, X0, Operand(W1, vixl::UXTW));
break;
case JSOP_BITAND:
masm.And(X0, X0, Operand(X1));
break;
// LSH, RSH and URSH can not.
case JSOP_LSH:
// ARM will happily try to shift by more than 0x1f.
masm.Lsl(Wscratch, W0, W1);
masm.movePayload(Rscratch, R0.valueReg());
break;
case JSOP_RSH:
masm.Asr(Wscratch, W0, W1);
masm.movePayload(Rscratch, R0.valueReg());
break;
case JSOP_URSH:
masm.Lsr(Wscratch, W0, W1);
if (allowDouble_) {
Label toUint;
// Testing for negative is equivalent to testing bit 31
masm.Tbnz(Wscratch, 31, &toUint);
// Move result and box for return.
masm.movePayload(Rscratch, R0_);
EmitReturnFromIC(masm);
masm.bind(&toUint);
masm.convertUInt32ToDouble(Rscratch, ScratchDoubleReg);
masm.boxDouble(ScratchDoubleReg, R0);
} else {
// Testing for negative is equivalent to testing bit 31
masm.Tbnz(Wscratch, 31, &failure);
// Move result for return.
masm.movePayload(Rscratch, R0_);
}
break;
default:
MOZ_CRASH("Unhandled op for BinaryArith_Int32.");
}
EmitReturnFromIC(masm);
switch (op_) {
case JSOP_MUL:
masm.bind(&maybeNegZero);
// Result is -0 if exactly one of lhs or rhs is negative.
masm.Cmn(W0, W1);
masm.j(Assembler::Signed, &failure);
// Result is +0, so use the zero register.
masm.movePayload(rzr, R0_);
EmitReturnFromIC(masm);
break;
case JSOP_DIV:
case JSOP_MOD:
masm.bind(&revertRegister);
break;
default:
break;
}
// Failure case - jump to next stub.
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
bool
ICUnaryArith_Int32::Compiler::generateStubCode(MacroAssembler& masm)
{
Label failure;
masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
switch (op) {
case JSOP_BITNOT:
masm.Mvn(ARMRegister(R1.valueReg(), 32), ARMRegister(R0.valueReg(), 32));
masm.movePayload(R1.valueReg(), R0.valueReg());
break;
case JSOP_NEG:
// Guard against 0 and MIN_INT, both result in a double.
masm.branchTest32(Assembler::Zero, R0.valueReg(), Imm32(0x7fffffff), &failure);
// Compile -x as 0 - x.
masm.Sub(ARMRegister(R1.valueReg(), 32), wzr, ARMRegister(R0.valueReg(), 32));
masm.movePayload(R1.valueReg(), R0.valueReg());
break;
default:
MOZ_CRASH("Unexpected op");
}
EmitReturnFromIC(masm);
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
} // namespace jit
} // namespace js

View File

@ -0,0 +1,309 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef jit_arm64_SharedICHelpers_arm64_h
#define jit_arm64_SharedICHelpers_arm64_h
#include "jit/BaselineFrame.h"
#include "jit/BaselineIC.h"
#include "jit/MacroAssembler.h"
#include "jit/SharedICRegisters.h"
namespace js {
namespace jit {
// Distance from sp to the top Value inside an IC stub (no return address on the stack on ARM).
static const size_t ICStackValueOffset = 0;
inline void
EmitRestoreTailCallReg(MacroAssembler& masm)
{
// No-op on ARM because link register is always holding the return address.
}
inline void
EmitRepushTailCallReg(MacroAssembler& masm)
{
// No-op on ARM because link register is always holding the return address.
}
inline void
EmitCallIC(CodeOffsetLabel* patchOffset, MacroAssembler& masm)
{
// Move ICEntry offset into ICStubReg
CodeOffsetLabel offset = masm.movWithPatch(ImmWord(-1), ICStubReg);
*patchOffset = offset;
// Load stub pointer into ICStubReg
masm.loadPtr(Address(ICStubReg, ICEntry::offsetOfFirstStub()), ICStubReg);
// Load stubcode pointer from BaselineStubEntry.
// R2 won't be active when we call ICs, so we can use r0.
MOZ_ASSERT(R2 == ValueOperand(r0));
masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), r0);
// Call the stubcode via a direct branch-and-link.
masm.Blr(x0);
}
inline void
EmitEnterTypeMonitorIC(MacroAssembler& masm,
size_t monitorStubOffset = ICMonitoredStub::offsetOfFirstMonitorStub())
{
// This is expected to be called from within an IC, when ICStubReg is
// properly initialized to point to the stub.
masm.loadPtr(Address(ICStubReg, (uint32_t) monitorStubOffset), ICStubReg);
// Load stubcode pointer from BaselineStubEntry.
// R2 won't be active when we call ICs, so we can use r0.
MOZ_ASSERT(R2 == ValueOperand(r0));
masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), r0);
// Jump to the stubcode.
masm.Br(x0);
}
inline void
EmitReturnFromIC(MacroAssembler& masm)
{
masm.abiret(); // Defaults to lr.
}
inline void
EmitChangeICReturnAddress(MacroAssembler& masm, Register reg)
{
masm.movePtr(reg, lr);
}
inline void
EmitTailCallVM(JitCode* target, MacroAssembler& masm, uint32_t argSize)
{
// We assume that R0 has been pushed, and R2 is unused.
MOZ_ASSERT(R2 == ValueOperand(r0));
// Compute frame size into w0. Used below in makeFrameDescriptor().
masm.Sub(x0, BaselineFrameReg64, masm.GetStackPointer64());
masm.Add(w0, w0, Operand(BaselineFrame::FramePointerOffset));
// Store frame size without VMFunction arguments for GC marking.
{
vixl::UseScratchRegisterScope temps(&masm.asVIXL());
const ARMRegister scratch32 = temps.AcquireW();
masm.Sub(scratch32, w0, Operand(argSize));
masm.store32(scratch32.asUnsized(),
Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize()));
}
// Push frame descriptor (minus the return address) and perform the tail call.
MOZ_ASSERT(ICTailCallReg == lr);
masm.makeFrameDescriptor(r0, JitFrame_BaselineJS);
masm.push(r0);
// The return address will be pushed by the VM wrapper, for compatibility
// with direct calls. Refer to the top of generateVMWrapper().
// ICTailCallReg (lr) already contains the return address (as we keep
// it there through the stub calls).
masm.branch(target);
}
inline void
EmitCreateStubFrameDescriptor(MacroAssembler& masm, Register reg)
{
ARMRegister reg64(reg, 64);
// Compute stub frame size.
masm.Sub(reg64, masm.GetStackPointer64(), Operand(sizeof(void*) * 2));
masm.Sub(reg64, BaselineFrameReg64, reg64);
masm.makeFrameDescriptor(reg, JitFrame_BaselineStub);
}
inline void
EmitCallVM(JitCode* target, MacroAssembler& masm)
{
EmitCreateStubFrameDescriptor(masm, r0);
masm.push(r0);
masm.call(target);
}
// Size of values pushed by EmitEnterStubFrame.
static const uint32_t STUB_FRAME_SIZE = 4 * sizeof(void*);
static const uint32_t STUB_FRAME_SAVED_STUB_OFFSET = sizeof(void*);
inline void
EmitEnterStubFrame(MacroAssembler& masm, Register scratch)
{
MOZ_ASSERT(scratch != ICTailCallReg);
// Compute frame size.
masm.Add(ARMRegister(scratch, 64), BaselineFrameReg64, Operand(BaselineFrame::FramePointerOffset));
masm.Sub(ARMRegister(scratch, 64), ARMRegister(scratch, 64), masm.GetStackPointer64());
masm.store32(scratch, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize()));
// Note: when making changes here, don't forget to update STUB_FRAME_SIZE.
// Push frame descriptor and return address.
// Save old frame pointer, stack pointer, and stub reg.
masm.makeFrameDescriptor(scratch, JitFrame_BaselineJS);
masm.push(scratch, ICTailCallReg, ICStubReg, BaselineFrameReg);
// Update the frame register.
masm.Mov(BaselineFrameReg64, masm.GetStackPointer64());
// Stack should remain 16-byte aligned.
masm.checkStackAlignment();
}
inline void
EmitLeaveStubFrame(MacroAssembler& masm, bool calledIntoIon = false)
{
vixl::UseScratchRegisterScope temps(&masm.asVIXL());
const ARMRegister scratch64 = temps.AcquireX();
// Ion frames do not save and restore the frame pointer. If we called
// into Ion, we have to restore the stack pointer from the frame descriptor.
// If we performed a VM call, the descriptor has been popped already so
// in that case we use the frame pointer.
if (calledIntoIon) {
masm.pop(scratch64.asUnsized());
masm.Lsr(scratch64, scratch64, FRAMESIZE_SHIFT);
masm.Add(masm.GetStackPointer64(), masm.GetStackPointer64(), scratch64);
} else {
masm.Mov(masm.GetStackPointer64(), BaselineFrameReg64);
}
// Pop values, discarding the frame descriptor.
masm.pop(BaselineFrameReg, ICStubReg, ICTailCallReg, scratch64.asUnsized());
// Stack should remain 16-byte aligned.
masm.checkStackAlignment();
}
inline void
EmitStowICValues(MacroAssembler& masm, int values)
{
switch (values) {
case 1:
// Stow R0.
masm.pushValue(R0);
break;
case 2:
// Stow R0 and R1.
masm.push(R0.valueReg(), R1.valueReg());
break;
default:
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Expected 1 or 2 values");
}
}
inline void
EmitUnstowICValues(MacroAssembler& masm, int values, bool discard = false)
{
MOZ_ASSERT(values >= 0 && values <= 2);
switch (values) {
case 1:
// Unstow R0.
if (discard)
masm.Drop(Operand(sizeof(Value)));
else
masm.popValue(R0);
break;
case 2:
// Unstow R0 and R1.
if (discard)
masm.Drop(Operand(sizeof(Value) * 2));
else
masm.pop(R1.valueReg(), R0.valueReg());
break;
default:
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Expected 1 or 2 values");
}
}
inline void
EmitCallTypeUpdateIC(MacroAssembler& masm, JitCode* code, uint32_t objectOffset)
{
// R0 contains the value that needs to be typechecked.
// The object we're updating is a boxed Value on the stack, at offset
// objectOffset from stack top, excluding the return address.
MOZ_ASSERT(R2 == ValueOperand(r0));
// Save the current ICStubReg to stack, as well as the TailCallReg,
// since on AArch64, the LR is live.
masm.push(ICStubReg, ICTailCallReg);
// This is expected to be called from within an IC, when ICStubReg
// is properly initialized to point to the stub.
masm.loadPtr(Address(ICStubReg, (int32_t)ICUpdatedStub::offsetOfFirstUpdateStub()),
ICStubReg);
// Load stubcode pointer from ICStubReg into ICTailCallReg.
masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), ICTailCallReg);
// Call the stubcode.
masm.Blr(ARMRegister(ICTailCallReg, 64));
// Restore the old stub reg and tailcall reg.
masm.pop(ICTailCallReg, ICStubReg);
// The update IC will store 0 or 1 in R1.scratchReg() reflecting if the
// value in R0 type-checked properly or not.
Label success;
masm.cmp32(R1.scratchReg(), Imm32(1));
masm.j(Assembler::Equal, &success);
// If the IC failed, then call the update fallback function.
EmitEnterStubFrame(masm, R1.scratchReg());
masm.loadValue(Address(masm.getStackPointer(), STUB_FRAME_SIZE + objectOffset), R1);
masm.push(R0.valueReg(), R1.valueReg(), ICStubReg);
// Load previous frame pointer, push BaselineFrame*.
masm.loadPtr(Address(BaselineFrameReg, 0), R0.scratchReg());
masm.pushBaselineFramePtr(R0.scratchReg(), R0.scratchReg());
EmitCallVM(code, masm);
EmitLeaveStubFrame(masm);
// Success at end.
masm.bind(&success);
}
template <typename AddrType>
inline void
EmitPreBarrier(MacroAssembler& masm, const AddrType& addr, MIRType type)
{
// On AArch64, lr is clobbered by patchableCallPreBarrier. Save it first.
masm.push(lr);
masm.patchableCallPreBarrier(addr, type);
masm.pop(lr);
}
inline void
EmitStubGuardFailure(MacroAssembler& masm)
{
// NOTE: This routine assumes that the stub guard code left the stack in the
// same state it was in when it was entered.
// BaselineStubEntry points to the current stub.
// Load next stub into ICStubReg.
masm.loadPtr(Address(ICStubReg, ICStub::offsetOfNext()), ICStubReg);
// Load stubcode pointer from BaselineStubEntry into scratch register.
masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), r0);
// Return address is already loaded, just jump to the next stubcode.
masm.Br(x0);
}
} // namespace jit
} // namespace js
#endif // jit_arm64_SharedICHelpers_arm64_h

View File

@ -0,0 +1,60 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef jit_arm64_SharedICRegisters_arm64_h
#define jit_arm64_SharedICRegisters_arm64_h
#include "jit/MacroAssembler.h"
namespace js {
namespace jit {
// Must be a callee-saved register for preservation around generateEnterJIT().
static constexpr Register BaselineFrameReg = r23;
static constexpr ARMRegister BaselineFrameReg64 = { BaselineFrameReg, 64 };
// The BaselineStackReg cannot be sp, because that register is treated
// as xzr/wzr during load/store operations.
static constexpr Register BaselineStackReg = PseudoStackPointer;
// ValueOperands R0, R1, and R2.
// R0 == JSReturnReg, and R2 uses registers not preserved across calls.
// R1 value should be preserved across calls.
static constexpr Register R0_ = r2;
static constexpr Register R1_ = r19;
static constexpr Register R2_ = r0;
static constexpr ValueOperand R0(R0_);
static constexpr ValueOperand R1(R1_);
static constexpr ValueOperand R2(R2_);
// ICTailCallReg and ICStubReg use registers that are not preserved across calls.
static constexpr Register ICTailCallReg = r30;
static constexpr Register ICStubReg = r9;
// ExtractTemps must be callee-save registers:
// ICSetProp_Native::Compiler::generateStubCode() stores the object
// in ExtractTemp0, but then calls callTypeUpdateIC(), which clobbers
// caller-save registers.
// They should also not be the scratch registers ip0 or ip1,
// since those get clobbered all the time.
static constexpr Register ExtractTemp0 = r24;
static constexpr Register ExtractTemp1 = r25;
// R7 - R9 are generally available for use within stubcode.
// Note that BaselineTailCallReg is actually just the link
// register. In ARM code emission, we do not clobber BaselineTailCallReg
// since we keep the return address for calls there.
// FloatReg0 must be equal to ReturnFloatReg.
static constexpr FloatRegister FloatReg0 = { FloatRegisters::v0 };
static constexpr FloatRegister FloatReg1 = { FloatRegisters::v1 };
} // namespace jit
} // namespace js
#endif // jit_arm64_SharedICRegisters_arm64_h