Bug 840285 - ARM support for Ion asm.js (r=luke,mjrosenb also landing some of doug crosher's patches)

This commit is contained in:
Douglas Crosher 2013-03-25 18:22:45 +11:00
parent 1d0fa80c9d
commit ee4c996d7c
37 changed files with 1375 additions and 236 deletions

View File

@ -4370,7 +4370,6 @@ if test "$ACCESSIBILITY" -a "$MOZ_ENABLE_GTK2" ; then
AC_DEFINE_UNQUOTED(ATK_REV_VERSION, $ATK_REV_VERSION)
fi
dnl ECMAScript Internationalization API Support (uses ICU)
dnl ========================================================

View File

@ -1051,9 +1051,7 @@ class ModuleCompiler
FuncPtrTableVector funcPtrTables_;
ExitMap exits_;
MathNameMap standardLibraryMathNames_;
GlobalAccessVector globalAccesses_;
Label stackOverflowLabel_;
Label operationCallbackLabel_;
@ -1273,9 +1271,13 @@ class ModuleCompiler
return globals_.putNew(varName, g);
}
bool collectAccesses(MIRGenerator &gen) {
#ifdef JS_CPU_ARM
if (!module_->addBoundsChecks(gen.asmBoundsChecks()))
return false;
#else
if (!module_->addHeapAccesses(gen.heapAccesses()))
return false;
#endif
for (unsigned i = 0; i < gen.globalAccesses().length(); i++) {
if (!globalAccesses_.append(gen.globalAccesses()[i]))
return false;
@ -1331,10 +1333,16 @@ class ModuleCompiler
void setExitOffset(unsigned exitIndex) {
JS_ASSERT(currentPass_ == 3);
#if defined(JS_CPU_ARM)
masm_.flush();
#endif
module_->exit(exitIndex).initCodeOffset(masm_.size());
}
void setEntryOffset(unsigned exportIndex) {
JS_ASSERT(currentPass_ == 3);
#if defined(JS_CPU_ARM)
masm_.flush();
#endif
module_->exportedFunction(exportIndex).initCodeOffset(masm_.size());
}
@ -1406,11 +1414,19 @@ class ModuleCompiler
JS_ASSERT(elemIndex == module_->numFuncPtrTableElems());
// Global accesses in function bodies
#ifdef JS_CPU_ARM
JS_ASSERT(globalAccesses_.length() == 0);
// The AsmJSHeapAccess offsets need to be updated to reflect the
// "actualOffset" (an ARM distinction).
module_->convertBoundsChecksToActualOffset(masm_);
#else
for (unsigned i = 0; i < globalAccesses_.length(); i++) {
AsmJSGlobalAccess access = globalAccesses_[i];
masm_.patchAsmJSGlobalAccess(access.offset, code, codeBytes, access.globalDataOffset);
}
#endif
// The AsmJSHeapAccess offsets need to be updated to reflect the
// "actualOffset" (an ARM distinction).
for (unsigned i = 0; i < module_->numHeapAccesses(); i++) {
@ -4378,8 +4394,6 @@ CheckFunctionBody(ModuleCompiler &m, ModuleCompiler::Func &func, LifoAlloc &lifo
return mirGen;
}
static const unsigned CodeAlignment = 8;
static bool
GenerateAsmJSCode(ModuleCompiler &m, ModuleCompiler::Func &func,
MIRGenerator &mirGen, LIRGraph &lir)
@ -4684,7 +4698,7 @@ StackDecrementForCall(MacroAssembler &masm, const MIRTypeVector &argTypes, unsig
static const unsigned FramePushedAfterSave = NonVolatileRegs.gprs().size() * STACK_SLOT_SIZE +
NonVolatileRegs.fpus().size() * sizeof(double);
#ifndef JS_CPU_ARM
static bool
GenerateEntry(ModuleCompiler &m, const AsmJSModule::ExportedFunction &exportedFunc)
{
@ -4784,6 +4798,118 @@ GenerateEntry(ModuleCompiler &m, const AsmJSModule::ExportedFunction &exportedFu
masm.ret();
return true;
}
#else
static bool
GenerateEntry(ModuleCompiler &m, const AsmJSModule::ExportedFunction &exportedFunc)
{
const ModuleCompiler::Func &func = *m.lookupFunction(exportedFunc.name());
MacroAssembler &masm = m.masm();
// In constrast to the X64 system ABI, the Ion convention is that all
// registers are clobbered by calls. Thus, we must save the caller's
// non-volatile registers.
//
// NB: GenerateExits assumes that masm.framePushed() == 0 before
// PushRegsInMask(NonVolatileRegs).
masm.setFramePushed(0);
masm.PushRegsInMask(NonVolatileRegs);
JS_ASSERT(masm.framePushed() == FramePushedAfterSave);
JS_ASSERT(masm.framePushed() % 8 == 0);
// Remember the stack pointer in the current AsmJSActivation. This will be
// used by error exit paths to set the stack pointer back to what it was
// right after the (C++) caller's non-volatile registers were saved so that
// they can be restored.
LoadAsmJSActivationIntoRegister(masm, r9);
masm.ma_str(StackPointer, Address(r9, AsmJSActivation::offsetOfErrorRejoinSP()));
// masm.storeErrorRejoinSp();
// Move the parameters into non-argument registers since we are about to
// clobber these registers with the contents of argv.
Register argv = r9;
masm.movePtr(IntArgReg1, GlobalReg); // globalData
masm.movePtr(IntArgReg0, argv); // argv
masm.ma_ldr(Operand(GlobalReg, Imm32(m.module().heapOffset())), HeapReg);
// Remember argv so that we can load argv[0] after the call.
JS_ASSERT(masm.framePushed() % 8 == 0);
masm.Push(argv);
JS_ASSERT(masm.framePushed() % 8 == 4);
// Determine how many stack slots we need to hold arguments that don't fit
// in registers.
unsigned numStackArgs = 0;
for (ABIArgIter iter(func.argMIRTypes()); !iter.done(); iter++) {
if (iter->kind() == ABIArg::Stack)
numStackArgs++;
}
// Before calling, we must ensure sp % 16 == 0. Since (sp % 16) = 8 on
// entry, we need to push 8 (mod 16) bytes.
//JS_ASSERT(AlignmentAtPrologue == 8);
JS_ASSERT(masm.framePushed() % 8 == 4);
unsigned stackDec = numStackArgs * sizeof(double) + (masm.framePushed() >> 2) % 2 * sizeof(uint32_t);
masm.reserveStack(stackDec);
//JS_ASSERT(masm.framePushed() % 8 == 0);
if(getenv("GDB_BREAK")) {
masm.breakpoint(js::ion::Assembler::Always);
}
// Copy parameters out of argv into the registers/stack-slots specified by
// the system ABI.
for (ABIArgIter iter(func.argMIRTypes()); !iter.done(); iter++) {
unsigned argOffset = iter.index() * sizeof(uint64_t);
switch (iter->kind()) {
case ABIArg::GPR:
masm.ma_ldr(Operand(argv, argOffset), iter->gpr());
break;
case ABIArg::FPU:
#if defined(JS_CPU_ARM_HARDFP)
masm.ma_vldr(Operand(argv, argOffset), iter->fpu());
#else
// The ABI is expecting a double value in a pair of gpr's. Figure out which gprs it is,
// and use them explicityl.
masm.ma_dataTransferN(IsLoad, 64, true, argv, Imm32(argOffset), Register::FromCode(iter->fpu().code()*2));
#endif
break;
case ABIArg::Stack:
if (iter.mirType() == MIRType_Int32) {
masm.memMove32(Address(argv, argOffset), Address(StackPointer, iter->offsetFromArgBase()));
} else {
masm.memMove64(Address(argv, argOffset), Address(StackPointer, iter->offsetFromArgBase()));
}
break;
}
}
masm.ma_vimm(js_NaN, NANReg);
masm.call(func.codeLabel());
// Recover argv.
masm.freeStack(stackDec);
masm.Pop(argv);
// Store the result in argv[0].
switch (func.returnType().which()) {
case RetType::Void:
break;
case RetType::Signed:
masm.storeValue(JSVAL_TYPE_INT32, ReturnReg, Address(argv, 0));
break;
case RetType::Double:
masm.ma_vxfer(r0, r1, d0);
masm.canonicalizeDouble(ReturnFloatReg);
masm.storeDouble(ReturnFloatReg, Address(argv, 0));
break;
}
masm.PopRegsInMask(NonVolatileRegs);
masm.ma_mov(Imm32(true), ReturnReg);
masm.abiret();
return true;
}
#endif
static bool
GenerateEntries(ModuleCompiler &m)
@ -4848,7 +4974,7 @@ GenerateFFIExit(ModuleCompiler &m, const ModuleCompiler::ExitDescriptor &exit, u
MacroAssembler &masm = m.masm();
masm.align(CodeAlignment);
m.setExitOffset(exitIndex);
#if defined(JS_CPU_X86) || defined(JS_CPU_X64)
MIRType typeArray[] = { MIRType_Pointer, // cx
MIRType_Pointer, // exitDatum
MIRType_Int32, // argc
@ -4968,6 +5094,90 @@ GenerateFFIExit(ModuleCompiler &m, const ModuleCompiler::ExitDescriptor &exit, u
// registers to restore.
masm.freeStack(stackDec);
masm.ret();
#else
const unsigned arrayLength = Max<size_t>(1, exit.argTypes().length());
const unsigned arraySize = arrayLength * sizeof(Value);
const unsigned reserveSize = AlignBytes(arraySize, StackAlignment) +
ShadowStackSpace;
const unsigned callerArgsOffset = reserveSize + NativeFrameSize + sizeof(int32_t);
masm.setFramePushed(0);
masm.Push(lr);
masm.reserveStack(reserveSize + sizeof(int32_t));
for (ABIArgIter i(exit.argTypes()); !i.done(); i++) {
Address dstAddr = Address(StackPointer, ShadowStackSpace + i.index() * sizeof(Value));
switch (i->kind()) {
case ABIArg::GPR:
masm.storeValue(JSVAL_TYPE_INT32, i->gpr(), dstAddr);
break;
case ABIArg::FPU: {
#ifndef JS_CPU_ARM_HARDFP
FloatRegister fr = i->fpu();
int srcId = fr.code() * 2;
masm.ma_vxfer(Register::FromCode(srcId), Register::FromCode(srcId+1), fr);
#endif
masm.canonicalizeDouble(i->fpu());
masm.storeDouble(i->fpu(), dstAddr);
break;
}
case ABIArg::Stack:
if (i.mirType() == MIRType_Int32) {
Address src(StackPointer, callerArgsOffset + i->offsetFromArgBase());
masm.memIntToValue(src, dstAddr);
} else {
JS_ASSERT(i.mirType() == MIRType_Double);
Address src(StackPointer, callerArgsOffset + i->offsetFromArgBase());
masm.loadDouble(src, ScratchFloatReg);
masm.canonicalizeDouble(ScratchFloatReg);
masm.storeDouble(ScratchFloatReg, dstAddr);
}
break;
}
}
// argument 0: cx
Register activation = IntArgReg3;
LoadAsmJSActivationIntoRegister(masm, activation);
LoadJSContextFromActivation(masm, activation, IntArgReg0);
// argument 1: exitDatum
masm.lea(Operand(GlobalReg, m.module().exitIndexToGlobalDataOffset(exitIndex)), IntArgReg1);
// argument 2: argc
masm.mov(Imm32(exit.argTypes().length()), IntArgReg2);
// argument 3: argv
Address argv(StackPointer, ShadowStackSpace);
masm.lea(Operand(argv), IntArgReg3);
AssertStackAlignment(masm);
switch (exit.use().which()) {
case Use::NoCoercion:
masm.call(ImmWord(JS_FUNC_TO_DATA_PTR(void*, &InvokeFromAsmJS_Ignore)));
masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
break;
case Use::ToInt32:
masm.call(ImmWord(JS_FUNC_TO_DATA_PTR(void*, &InvokeFromAsmJS_ToInt32)));
masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
masm.unboxInt32(argv, ReturnReg);
break;
case Use::ToNumber:
masm.call(ImmWord(JS_FUNC_TO_DATA_PTR(void*, &InvokeFromAsmJS_ToNumber)));
masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
#if defined(JS_CPU_ARM) && !defined(JS_CPU_ARM_HARDFP)
masm.loadValue(argv, softfpReturnOperand);
#else
masm.loadDouble(argv, ReturnFloatReg);
#endif
break;
case Use::AddOrSub:
JS_NOT_REACHED("Should have been a type error");
}
masm.freeStack(reserveSize + sizeof(int32_t));
masm.ret();
#endif
}
// The stack-overflow exit is called when the stack limit has definitely been
@ -5001,12 +5211,16 @@ GenerateStackOverflowExit(ModuleCompiler &m, Label *throwLabel)
LoadAsmJSActivationIntoRegister(masm, IntArgReg0);
LoadJSContextFromActivation(masm, IntArgReg0, IntArgReg0);
#else
# error "ARM here"
#endif
// on ARM, we should always be aligned, just do the context manipulation
// and make the call.
LoadAsmJSActivationIntoRegister(masm, IntArgReg0);
LoadJSContextFromActivation(masm, IntArgReg0, IntArgReg0);
#endif
void (*pf)(JSContext*) = js_ReportOverRecursed;
masm.call(ImmWord(JS_FUNC_TO_DATA_PTR(void*, pf)));
masm.jmp(throwLabel);
masm.jump(throwLabel);
}
// The operation-callback exit is called from arbitrarily-interrupted asm.js
@ -5024,6 +5238,7 @@ GenerateOperationCallbackExit(ModuleCompiler &m, Label *throwLabel)
masm.align(CodeAlignment);
masm.bind(&m.operationCallbackLabel());
#ifndef JS_CPU_ARM
// Be very careful here not to perturb the machine state before saving it
// to the stack. In particular, add/sub instructions may set conditions in
// the flags register.
@ -5070,6 +5285,57 @@ GenerateOperationCallbackExit(ModuleCompiler &m, Label *throwLabel)
masm.PopRegsInMask(AllRegs); // restore all GP/FP registers
masm.popFlags(); // after this, nothing that sets conditions
masm.ret(); // pop resumePC into PC
#else
masm.setFramePushed(0); // set to zero so we can use masm.framePushed() below
masm.PushRegsInMask(RegisterSet(GeneralRegisterSet(Registers::AllMask & ~(1<<Registers::sp)), FloatRegisterSet(uint32_t(0)))); // save all GP registers,excep sp
// Save both the APSR and FPSCR in non-volatile registers.
masm.as_mrs(r4);
masm.as_vmrs(r5);
// Save the stack pointer in a non-volatile register.
masm.mov(sp,r6);
// Align the stack.
masm.ma_and(Imm32(~7), sp, sp);
// Store resumePC into the return PC stack slot.
LoadAsmJSActivationIntoRegister(masm, IntArgReg0);
masm.loadPtr(Address(IntArgReg0, AsmJSActivation::offsetOfResumePC()), IntArgReg1);
masm.storePtr(IntArgReg1, Address(r6, 14 * sizeof(uint32_t*)));
// argument 0: cx
masm.loadPtr(Address(IntArgReg0, AsmJSActivation::offsetOfContext()), IntArgReg0);
masm.PushRegsInMask(RegisterSet(GeneralRegisterSet(0), FloatRegisterSet(FloatRegisters::AllMask))); // save all FP registers
JSBool (*pf)(JSContext*) = js_HandleExecutionInterrupt;
masm.call(ImmWord(JS_FUNC_TO_DATA_PTR(void*, pf)));
masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
// Restore the machine state to before the interrupt. this will set the pc!
masm.PopRegsInMask(RegisterSet(GeneralRegisterSet(0), FloatRegisterSet(FloatRegisters::AllMask))); // restore all FP registers
masm.mov(r6,sp);
masm.as_vmsr(r5);
masm.as_msr(r4);
// Restore all GP registers
masm.startDataTransferM(IsLoad, sp, IA, WriteBack);
masm.transferReg(r0);
masm.transferReg(r1);
masm.transferReg(r2);
masm.transferReg(r3);
masm.transferReg(r4);
masm.transferReg(r5);
masm.transferReg(r6);
masm.transferReg(r7);
masm.transferReg(r8);
masm.transferReg(r9);
masm.transferReg(r10);
masm.transferReg(r11);
masm.transferReg(r12);
masm.transferReg(lr);
masm.finishDataTransfer();
masm.ret();
#endif
}
// If an exception is thrown, simply pop all frames (since asm.js does not
@ -5088,12 +5354,14 @@ GenerateThrowExit(ModuleCompiler &m, Label *throwLabel)
LoadAsmJSActivationIntoRegister(masm, activation);
masm.setFramePushed(FramePushedAfterSave);
masm.mov(Operand(activation, AsmJSActivation::offsetOfErrorRejoinSP()), StackPointer);
masm.loadPtr(Address(activation, AsmJSActivation::offsetOfErrorRejoinSP()), StackPointer);
masm.PopRegsInMask(NonVolatileRegs);
JS_ASSERT(masm.framePushed() == 0);
masm.mov(Imm32(0), ReturnReg);
masm.ret();
masm.abiret();
}
static bool

View File

@ -13,10 +13,7 @@
# include <mach/mach.h>
#endif
// asm.js compilation is only available on desktop x86/x64 at the moment.
// Don't panic, mobile support is coming soon.
#if defined(JS_ION) && \
(defined(JS_CPU_X86) || defined(JS_CPU_X64))
#if defined(JS_ION)
# define JS_ASMJS
#endif

View File

@ -14,6 +14,8 @@
#include "AsmJSModule.h"
#include "frontend/BytecodeCompiler.h"
#include "Ion.h"
using namespace js;
using namespace js::ion;
using namespace mozilla;
@ -200,6 +202,12 @@ DynamicallyLinkModule(JSContext *cx, CallArgs args, AsmJSModule &module)
JSC::X86Assembler::setPointer(access.patchLengthAt(code), heapLength);
JSC::X86Assembler::setPointer(access.patchOffsetAt(code), heapOffset);
}
#elif defined(JS_CPU_ARM)
// Now the length of the array is know, patch all of the bounds check sites
// with the new length.
ion::IonContext ic(cx, NULL);
module.patchBoundsChecks(heap->byteLength());
#endif
}
@ -323,8 +331,13 @@ CallAsmJS(JSContext *cx, unsigned argc, Value *vp)
AsmJSActivation activation(cx, module);
// Call into generated code.
#ifdef JS_CPU_ARM
if (!func.code()(coercedArgs.begin(), module.globalData()))
return false;
#else
if (!func.code()(coercedArgs.begin()))
return false;
#endif
}
switch (func.returnType()) {

View File

@ -14,6 +14,8 @@
#include "jsscript.h"
#include "jstypedarrayinlines.h"
#include "IonMacroAssembler.h"
namespace js {
// The basis of the asm.js type system is the EcmaScript-defined coercions
@ -168,8 +170,11 @@ class AsmJSModule
return u.code_;
}
};
#ifdef JS_CPU_ARM
typedef int32_t (*CodePtr)(uint64_t *args, uint8_t *global);
#else
typedef int32_t (*CodePtr)(uint64_t *args);
#endif
typedef Vector<AsmJSCoercion, 0, SystemAllocPolicy> ArgCoercionVector;
@ -296,11 +301,17 @@ class AsmJSModule
typedef Vector<Global, 0, SystemAllocPolicy> GlobalVector;
typedef Vector<Exit, 0, SystemAllocPolicy> ExitVector;
typedef Vector<ion::AsmJSHeapAccess, 0, SystemAllocPolicy> HeapAccessVector;
#if defined(JS_CPU_ARM)
typedef Vector<ion::AsmJSBoundsCheck, 0, SystemAllocPolicy> BoundsCheckVector;
#endif
GlobalVector globals_;
ExitVector exits_;
ExportedFunctionVector exports_;
HeapAccessVector heapAccesses_;
#if defined(JS_CPU_ARM)
BoundsCheckVector boundsChecks_;
#endif
uint32_t numGlobalVars_;
uint32_t numFFIs_;
uint32_t numFuncPtrTableElems_;
@ -322,11 +333,6 @@ class AsmJSModule
PostLinkFailureInfo postLinkFailureInfo_;
uint8_t *globalData() const {
JS_ASSERT(code_);
return code_ + codeBytes_;
}
public:
AsmJSModule(JSContext *cx)
: numGlobalVars_(0),
@ -482,6 +488,11 @@ class AsmJSModule
//
// NB: The list of exits is extended while emitting function bodies and
// thus exits must be at the end of the list to avoid invalidating indices.
uint8_t *globalData() const {
JS_ASSERT(code_);
return code_ + codeBytes_;
}
size_t globalDataBytes() const {
return sizeof(void*) +
numGlobalVars_ * sizeof(uint64_t) +
@ -551,6 +562,41 @@ class AsmJSModule
const ion::AsmJSHeapAccess &heapAccess(unsigned i) const {
return heapAccesses_[i];
}
#if defined(JS_CPU_ARM)
bool addBoundsChecks(const ion::AsmJSBoundsCheckVector &checks) {
if (!boundsChecks_.reserve(boundsChecks_.length() + checks.length()))
return false;
for (size_t i = 0; i < checks.length(); i++)
boundsChecks_.infallibleAppend(checks[i]);
return true;
}
void convertBoundsChecksToActualOffset(ion::MacroAssembler &masm) {
for (unsigned i = 0; i < boundsChecks_.length(); i++)
boundsChecks_[i].setOffset(masm.actualOffset(boundsChecks_[i].offset()));
}
void patchBoundsChecks(unsigned heapSize) {
ion::AutoFlushCache afc("patchBoundsCheck");
int bits = -1;
JS_CEILING_LOG2(bits, heapSize);
if (bits == -1) {
// tried to size the array to 0, that is bad, but not horrible
return;
}
for (unsigned i = 0; i < boundsChecks_.length(); i++)
ion::Assembler::updateBoundsCheck(bits, (ion::Instruction*)(boundsChecks_[i].offset() + code_));
}
unsigned numBoundsChecks() const {
return boundsChecks_.length();
}
const ion::AsmJSBoundsCheck &boundsCheck(unsigned i) const {
return boundsChecks_[i];
}
#endif
void takeOwnership(JSC::ExecutablePool *pool, uint8_t *code, size_t codeBytes, size_t totalBytes) {
JS_ASSERT(uintptr_t(code) % gc::PageSize == 0);

View File

@ -283,8 +283,13 @@ LookupHeapAccess(const AsmJSModule &module, uint8_t *pc)
//
// See: https://chromiumcodereview.appspot.com/10829122/
// See: http://code.google.com/p/android/issues/detail?id=34784
# if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T)
# if (defined(ANDROID)) && !defined(__BIONIC_HAVE_UCONTEXT_T)
# if defined(__arm__)
// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
// Old versions of the C library <signal.h> didn't define the type.
#if !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
#include <asm/sigcontext.h>
#endif
typedef struct sigcontext mcontext_t;

View File

@ -2973,26 +2973,6 @@ CodeGenerator::visitPowD(LPowD *ins)
return true;
}
bool
CodeGenerator::visitNegI(LNegI *ins)
{
Register input = ToRegister(ins->input());
JS_ASSERT(input == ToRegister(ins->output()));
masm.neg32(input);
return true;
}
bool
CodeGenerator::visitNegD(LNegD *ins)
{
FloatRegister input = ToFloatRegister(ins->input());
JS_ASSERT(input == ToFloatRegister(ins->output()));
masm.negateDouble(input);
return true;
}
bool
CodeGenerator::visitRandom(LRandom *ins)
{
@ -5922,10 +5902,21 @@ CodeGenerator::visitAsmJSCall(LAsmJSCall *ins)
{
MAsmJSCall *mir = ins->mir();
if (mir->spIncrement())
#if defined(JS_CPU_ARM) && !defined(JS_CPU_ARM_HARDFP)
for (unsigned i = 0; i < ins->numOperands(); i++) {
LAllocation *a = ins->getOperand(i);
if (a->isFloatReg()) {
FloatRegister fr = ToFloatRegister(a);
int srcId = fr.code() * 2;
masm.ma_vxfer(fr, Register::FromCode(srcId), Register::FromCode(srcId+1));
}
}
#endif
if (mir->spIncrement())
masm.freeStack(mir->spIncrement());
JS_ASSERT((AlignmentAtPrologue + masm.framePushed()) % StackAlignment == 0);
JS_ASSERT((AlignmentAtPrologue + masm.framePushed()) % StackAlignment == 0);
#ifdef DEBUG
Label ok;
JS_ASSERT(IsPowerOfTwo(StackAlignment));
@ -5957,6 +5948,16 @@ CodeGenerator::visitAsmJSCall(LAsmJSCall *ins)
bool
CodeGenerator::visitAsmJSParameter(LAsmJSParameter *lir)
{
#if defined(JS_CPU_ARM) && !defined(JS_CPU_ARM_HARDFP)
// softfp transfers some double values in gprs.
// undo this.
LAllocation *a = lir->getDef(0)->output();
if (a->isFloatReg()) {
FloatRegister fr = ToFloatRegister(a);
int srcId = fr.code() * 2;
masm.ma_vxfer(Register::FromCode(srcId), Register::FromCode(srcId+1), fr);
}
#endif
return true;
}
@ -5964,6 +5965,10 @@ bool
CodeGenerator::visitAsmJSReturn(LAsmJSReturn *lir)
{
// Don't emit a jump to the return label if this is the last block.
#if defined(JS_CPU_ARM) && !defined(JS_CPU_ARM_HARDFP)
if (lir->getOperand(0)->isFloatReg())
masm.ma_vxfer(d0, r0, r1);
#endif
if (current->mir() != *gen->graph().poBegin())
masm.jump(returnLabel_);
return true;

View File

@ -140,8 +140,6 @@ class CodeGenerator : public CodeGeneratorSpecific
bool visitAbsI(LAbsI *lir);
bool visitPowI(LPowI *lir);
bool visitPowD(LPowD *lir);
bool visitNegI(LNegI *lir);
bool visitNegD(LNegD *lir);
bool visitRandom(LRandom *lir);
bool visitMathFunctionD(LMathFunctionD *ins);
bool visitModD(LModD *ins);

View File

@ -19,7 +19,6 @@
namespace js {
namespace ion {
static const int CodeAlignment = 8;
class Linker
{
MacroAssembler &masm;

View File

@ -10,7 +10,7 @@
#include "LIR.h"
#include "IonSpewer.h"
#include "LIR-inl.h"
#include "shared/CodeGenerator-shared.h"
using namespace js;
using namespace js::ion;

View File

@ -111,12 +111,21 @@ class MIRGenerator
JS_ASSERT(compilingAsmJS());
return performsAsmJSCall_;
}
#ifndef JS_CPU_ARM
bool noteHeapAccess(AsmJSHeapAccess heapAccess) {
return asmJSHeapAccesses_.append(heapAccess);
}
const Vector<AsmJSHeapAccess, 0, IonAllocPolicy> &heapAccesses() const {
return asmJSHeapAccesses_;
}
#else
bool noteBoundsCheck(uint32_t offsetBefore) {
return asmJSBoundsChecks_.append(AsmJSBoundsCheck(offsetBefore));
}
const Vector<AsmJSBoundsCheck, 0, IonAllocPolicy> &asmBoundsChecks() const {
return asmJSBoundsChecks_;
}
#endif
bool noteGlobalAccess(unsigned offset, unsigned globalDataOffset) {
return asmJSGlobalAccesses_.append(AsmJSGlobalAccess(offset, globalDataOffset));
}
@ -138,7 +147,11 @@ class MIRGenerator
uint32_t maxAsmJSStackArgBytes_;
bool performsAsmJSCall_;
#ifdef JS_CPU_ARM
AsmJSBoundsCheckVector asmJSBoundsChecks_;
#else
AsmJSHeapAccessVector asmJSHeapAccesses_;
#endif
AsmJSGlobalAccessVector asmJSGlobalAccesses_;
};

View File

@ -309,9 +309,15 @@ class RegisterAllocator
{
if (FramePointer != InvalidReg && lir->mir()->instrumentedProfiling())
allRegisters_.take(AnyRegister(FramePointer));
#ifdef JS_CPU_X64
#if defined(JS_CPU_X64)
if (mir->compilingAsmJS())
allRegisters_.take(AnyRegister(HeapReg));
#elif defined(JS_CPU_ARM)
if (mir->compilingAsmJS()) {
allRegisters_.take(AnyRegister(HeapReg));
allRegisters_.take(AnyRegister(GlobalReg));
allRegisters_.take(AnyRegister(NANReg));
}
#endif
}

View File

@ -802,6 +802,20 @@ class AsmJSHeapAccess
typedef Vector<AsmJSHeapAccess, 0, IonAllocPolicy> AsmJSHeapAccessVector;
#ifdef JS_CPU_ARM
struct AsmJSBoundsCheck
{
unsigned offset_;
AsmJSBoundsCheck(unsigned offset)
: offset_(offset)
{}
void setOffset(uint32_t offset) { offset_ = offset; }
unsigned offset() {return offset_;}
};
typedef Vector<AsmJSBoundsCheck, 0, IonAllocPolicy> AsmJSBoundsCheckVector;
#endif
} // namespace ion
} // namespace js

View File

@ -33,6 +33,7 @@ static const int32_t INVALID_STACK_SLOT = -1;
static const int32_t NUNBOX32_TYPE_OFFSET = 4;
static const int32_t NUNBOX32_PAYLOAD_OFFSET = 0;
static const uint32_t ShadowStackSpace = 0;
////
// These offsets are related to bailouts.
////

View File

@ -18,6 +18,84 @@
using namespace js;
using namespace js::ion;
ABIArgGenerator::ABIArgGenerator() :
#if defined(JS_CPU_ARM_HARDFP)
intRegIndex_(0),
floatRegIndex_(0),
#else
argRegIndex_(0),
#endif
stackOffset_(0),
current_()
{}
ABIArg
ABIArgGenerator::next(MIRType type)
{
#if defined(JS_CPU_ARM_HARDFP)
switch (type) {
case MIRType_Int32:
case MIRType_Pointer:
if (intRegIndex_ == NumIntArgRegs) {
current_ = ABIArg(stackOffset_);
stackOffset_ += sizeof(uint32_t);
break;
}
current_ = ABIArg(Register::FromCode(intRegIndex_));
intRegIndex_++;
break;
case MIRType_Double:
if (floatRegIndex_ == NumFloatArgRegs) {
static const int align = sizeof(double) - 1;
stackOffset_ = (stackOffset_ + align) & ~align;
current_ = ABIArg(stackOffset_);
stackOffset_ += sizeof(uint64_t);
break;
}
current_ = ABIArg(FloatRegister::FromCode(floatRegIndex_));
floatRegIndex_++;
break;
default:
JS_NOT_REACHED("Unexpected argument type");
}
return current_;
#else
switch (type) {
case MIRType_Int32:
case MIRType_Pointer:
if (argRegIndex_ == NumIntArgRegs) {
current_ = ABIArg(stackOffset_);
stackOffset_ += sizeof(uint32_t);
break;
}
current_ = ABIArg(Register::FromCode(argRegIndex_));
argRegIndex_++;
break;
case MIRType_Double: {
unsigned alignedArgRegIndex_ = (argRegIndex_ + 1) & ~1;
if (alignedArgRegIndex_ + 1 > NumIntArgRegs) {
static const int align = sizeof(double) - 1;
stackOffset_ = (stackOffset_ + align) & ~align;
current_ = ABIArg(stackOffset_);
stackOffset_ += sizeof(uint64_t);
argRegIndex_ = NumIntArgRegs;
break;
}
argRegIndex_ = alignedArgRegIndex_;
current_ = ABIArg(FloatRegister::FromCode(argRegIndex_ >> 1));
argRegIndex_+=2;
}
break;
default:
JS_NOT_REACHED("Unexpected argument type");
}
return current_;
#endif
}
const Register ABIArgGenerator::NonArgReturnVolatileReg0 = r4;
const Register ABIArgGenerator::NonArgReturnVolatileReg1 = r5;
// Encode a standard register when it is being used as src1, the dest, and
// an extra register. These should never be called with an InvalidReg.
uint32_t
@ -400,6 +478,11 @@ InstALU::checkOp1(Register rn)
{
return rn == toRN(*this);
}
Operand2
InstALU::extractOp2()
{
return Operand2(encode());
}
InstCMP *
InstCMP::asTHIS(const Instruction &i)
@ -412,7 +495,34 @@ InstCMP::asTHIS(const Instruction &i)
bool
InstCMP::isTHIS(const Instruction &i)
{
return InstALU::isTHIS(i) && InstALU::asTHIS(i)->checkDest(r0);
return InstALU::isTHIS(i) && InstALU::asTHIS(i)->checkDest(r0) && InstALU::asTHIS(i)->checkOp(op_cmp);
}
InstMOV *
InstMOV::asTHIS(const Instruction &i)
{
if (isTHIS(i))
return (InstMOV*) (&i);
return NULL;
}
bool
InstMOV::isTHIS(const Instruction &i)
{
return InstALU::isTHIS(i) && InstALU::asTHIS(i)->checkOp1(r0) && InstALU::asTHIS(i)->checkOp(op_mov);
}
Op2Reg
Operand2::toOp2Reg() {
return *(Op2Reg*)this;
}
O2RegImmShift
Op2Reg::toO2RegImmShift() {
return *(O2RegImmShift*)this;
}
O2RegRegShift
Op2Reg::toO2RegRegShift() {
return *(O2RegRegShift*)this;
}
Imm16::Imm16(Instruction &inst)
@ -1237,10 +1347,20 @@ BufferOffset
Assembler::align(int alignment)
{
BufferOffset ret;
while (!m_buffer.isAligned(alignment)) {
BufferOffset tmp = as_nop();
if (!ret.assigned())
ret = tmp;
if (alignment == 8) {
while (!m_buffer.isAligned(alignment)) {
BufferOffset tmp = as_nop();
if (!ret.assigned())
ret = tmp;
}
} else {
flush();
JS_ASSERT((alignment & (alignment - 1)) == 0);
while (size() & (alignment-1)) {
BufferOffset tmp = as_nop();
if (!ret.assigned())
ret = tmp;
}
}
return ret;
@ -1252,17 +1372,19 @@ Assembler::as_nop()
}
BufferOffset
Assembler::as_alu(Register dest, Register src1, Operand2 op2,
ALUOp op, SetCond_ sc, Condition c)
ALUOp op, SetCond_ sc, Condition c, Instruction *instdest)
{
return writeInst((int)op | (int)sc | (int) c | op2.encode() |
((dest == InvalidReg) ? 0 : RD(dest)) |
((src1 == InvalidReg) ? 0 : RN(src1)));
((src1 == InvalidReg) ? 0 : RN(src1)), (uint32_t*)instdest);
}
BufferOffset
Assembler::as_mov(Register dest, Operand2 op2, SetCond_ sc, Condition c)
Assembler::as_mov(Register dest, Operand2 op2, SetCond_ sc, Condition c, Instruction *instdest)
{
return as_alu(dest, InvalidReg, op2, op_mov, sc, c);
return as_alu(dest, InvalidReg, op2, op_mov, sc, c, instdest);
}
BufferOffset
Assembler::as_mvn(Register dest, Operand2 op2, SetCond_ sc, Condition c)
{
@ -1759,12 +1881,19 @@ Assembler::as_blx(Register r, Condition c)
BufferOffset
Assembler::as_bl(BOffImm off, Condition c)
{
m_buffer.markNextAsBranch();
return writeInst(((int)c) | op_bl | off.encode());
}
BufferOffset
Assembler::as_bl(Label *l, Condition c)
{
if (m_buffer.oom()) {
BufferOffset ret;
return ret;
}
//as_bkpt();
m_buffer.markNextAsBranch();
if (l->bound()) {
BufferOffset ret = as_nop();
as_bl(BufferOffset(l).diffB<BOffImm>(ret), c, ret);
@ -1795,6 +1924,20 @@ Assembler::as_bl(BOffImm off, Condition c, BufferOffset inst)
return inst;
}
BufferOffset
Assembler::as_mrs(Register r, Condition c)
{
return writeInst(0x010f0000 | int(c) | RD(r));
}
BufferOffset
Assembler::as_msr(Register r, Condition c)
{
// hardcode the 'mask' field to 0b11 for now. it is bits 18 and 19, which are the two high bits of the 'c' in this constant.
JS_ASSERT((r.code() & ~0xf) == 0);
return writeInst(0x012cf000 | int(c) | r.code());
}
// VFP instructions!
enum vfp_tags {
vfp_tag = 0x0C000A00,
@ -2060,6 +2203,12 @@ Assembler::as_vmrs(Register r, Condition c)
return writeInst(c | 0x0ef10a10 | RT(r));
}
BufferOffset
Assembler::as_vmsr(Register r, Condition c)
{
return writeInst(c | 0x0ee10a10 | RT(r));
}
bool
Assembler::nextLink(BufferOffset b, BufferOffset *next)
{
@ -2244,8 +2393,11 @@ void
Assembler::retargetNearBranch(Instruction *i, int offset, Condition cond, bool final)
{
// Retargeting calls is totally unsupported!
JS_ASSERT_IF(i->is<InstBranchImm>(), i->is<InstBImm>());
new (i) InstBImm(BOffImm(offset), cond);
JS_ASSERT_IF(i->is<InstBranchImm>(), i->is<InstBImm>() || i->is<InstBLImm>());
if (i->is<InstBLImm>())
new (i) InstBLImm(BOffImm(offset), cond);
else
new (i) InstBImm(BOffImm(offset), cond);
// Flush the cache, since an instruction was overwritten
if (final)
@ -2558,10 +2710,34 @@ Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled)
AutoFlushCache::updateTop(uintptr_t(inst), 4);
}
void Assembler::updateBoundsCheck(uint32_t logHeapSize, Instruction *inst)
{
JS_ASSERT(inst->is<InstMOV>());
InstMOV *mov = inst->as<InstMOV>();
JS_ASSERT(mov->checkDest(ScratchRegister));
Operand2 op = mov->extractOp2();
JS_ASSERT(op.isO2Reg());
Op2Reg reg = op.toOp2Reg();
Register index;
reg.getRM(&index);
JS_ASSERT(reg.isO2RegImmShift());
// O2RegImmShift shift = reg.toO2RegImmShift();
*inst = InstALU(ScratchRegister, InvalidReg, lsr(index, logHeapSize), op_mov, SetCond, Always);
AutoFlushCache::updateTop(uintptr_t(inst), 4);
}
void
AutoFlushCache::update(uintptr_t newStart, size_t len)
{
uintptr_t newStop = newStart + len;
if (this == NULL) {
// just flush right here and now.
JSC::ExecutableAllocator::cacheFlush((void*)newStart, len);
return;
}
used_ = true;
if (!start_) {
IonSpewCont(IonSpew_CacheFlush, ".");

View File

@ -57,9 +57,36 @@ static const Register CallTempReg3 = r8;
static const Register CallTempReg4 = r0;
static const Register CallTempReg5 = r1;
static const Register IntArgReg0 = r0;
static const Register IntArgReg1 = r1;
static const Register IntArgReg2 = r2;
static const Register IntArgReg3 = r3;
static const Register GlobalReg = r10;
static const Register HeapReg = r11;
static const Register CallTempNonArgRegs[] = { r5, r6, r7, r8 };
static const uint32_t NumCallTempNonArgRegs =
mozilla::ArrayLength(CallTempNonArgRegs);
class ABIArgGenerator
{
#if defined(JS_CPU_ARM_HARDFP)
unsigned intRegIndex_;
unsigned floatRegIndex_;
#else
unsigned argRegIndex_;
#endif
uint32_t stackOffset_;
ABIArg current_;
public:
ABIArgGenerator();
ABIArg next(MIRType argType);
ABIArg &current() { return current_; }
uint32_t stackBytesConsumedSoFar() const { return stackOffset_; }
static const Register NonArgReturnVolatileReg0;
static const Register NonArgReturnVolatileReg1;
};
static const Register PreBarrierReg = r1;
@ -74,6 +101,8 @@ static const Register ReturnReg = r0;
static const FloatRegister ReturnFloatReg = { FloatRegisters::d0 };
static const FloatRegister ScratchFloatReg = { FloatRegisters::d1 };
static const FloatRegister NANReg = { FloatRegisters::d15 };
static const FloatRegister d0 = {FloatRegisters::d0};
static const FloatRegister d1 = {FloatRegisters::d1};
static const FloatRegister d2 = {FloatRegisters::d2};
@ -97,9 +126,12 @@ static const FloatRegister d15 = {FloatRegisters::d15};
// Also, the ARM abi wants the stack to be 8 byte aligned at
// function boundaries. I'm trying to make sure this is always true.
static const uint32_t StackAlignment = 8;
static const uint32_t CodeAlignment = 8;
static const bool StackKeptAligned = true;
static const uint32_t NativeFrameSize = sizeof(void*);
static const uint32_t AlignmentAtPrologue = sizeof(void*);
static const uint32_t AlignmentAtPrologue = 0;
static const uint32_t AlignmentMidPrologue = 4;
static const Scale ScalePointer = TimesFour;
@ -372,7 +404,7 @@ bool condsAreSafe(ALUOp op);
ALUOp getDestVariant(ALUOp op);
static const ValueOperand JSReturnOperand = ValueOperand(JSReturnReg_Type, JSReturnReg_Data);
static const ValueOperand softfpReturnOperand = ValueOperand(r1, r0);
// All of these classes exist solely to shuffle data into the various operands.
// For example Operand2 can be an imm8, a register-shifted-by-a-constant or
// a register-shifted-by-a-register. I represent this in C++ by having a
@ -391,6 +423,9 @@ static const ValueOperand JSReturnOperand = ValueOperand(JSReturnReg_Type, JSRet
// but have all of them take up only a single word of storage.
// I also wanted to avoid passing around raw integers at all
// since they are error prone.
class Op2Reg;
class O2RegImmShift;
class O2RegRegShift;
namespace datastore {
struct Reg
{
@ -411,6 +446,9 @@ struct Reg
uint32_t encode() {
return RM | RRS << 4 | Type << 5 | ShiftAmount << 7;
}
explicit Reg(const Op2Reg &op) {
memcpy(this, &op, sizeof(*this));
}
};
// Op2 has a mode labelled "<imm8m>", which is arm's magical
@ -534,6 +572,7 @@ struct RIS
{
JS_ASSERT(ShiftAmount == imm);
}
explicit RIS(Reg r) : ShiftAmount(ShiftAmount) { }
};
struct RRS
@ -557,15 +596,21 @@ struct RRS
class MacroAssemblerARM;
class Operand;
class Operand2
{
friend class Operand;
friend class MacroAssemblerARM;
friend class InstALU;
public:
uint32_t oper : 31;
uint32_t invalid : 1;
bool isO2Reg() {
return !(oper & IsImmOp2);
}
Op2Reg toOp2Reg();
bool isImm8() {
return oper & IsImmOp2;
}
protected:
Operand2(datastore::Imm8mData base)
@ -651,6 +696,30 @@ class Op2Reg : public Operand2
Op2Reg(Register rm, ShiftType type, datastore::RRS shiftReg)
: Operand2(datastore::Reg(rm.code(), type, 1, shiftReg.encode()))
{ }
bool isO2RegImmShift() {
datastore::Reg r(*this);
return !r.RRS;
}
O2RegImmShift toO2RegImmShift();
bool isO2RegRegShift() {
datastore::Reg r(*this);
return r.RRS;
}
O2RegRegShift toO2RegRegShift();
bool checkType(ShiftType type) {
datastore::Reg r(*this);
return r.Type == type;
}
bool checkRM(Register rm) {
datastore::Reg r(*this);
return r.RM == rm.code();
}
bool getRM(Register *rm) {
datastore::Reg r(*this);
*rm = Register::FromCode(r.RM);
return true;
}
};
class O2RegImmShift : public Op2Reg
@ -659,6 +728,12 @@ class O2RegImmShift : public Op2Reg
O2RegImmShift(Register rn, ShiftType type, uint32_t shift)
: Op2Reg(rn, type, datastore::RIS(shift))
{ }
int getShift() {
datastore::Reg r(*this);
datastore::RIS ris(r);
return ris.ShiftAmount;
}
};
class O2RegRegShift : public Op2Reg
@ -1179,19 +1254,11 @@ class Assembler
// TODO: this should actually be a pool-like object
// It is currently a big hack, and probably shouldn't exist
class JumpPool;
js::Vector<CodeLabel, 0, SystemAllocPolicy> codeLabels_;
js::Vector<RelativePatch, 8, SystemAllocPolicy> jumps_;
js::Vector<JumpPool *, 0, SystemAllocPolicy> jumpPools_;
js::Vector<BufferOffset, 0, SystemAllocPolicy> tmpJumpRelocations_;
js::Vector<BufferOffset, 0, SystemAllocPolicy> tmpDataRelocations_;
js::Vector<BufferOffset, 0, SystemAllocPolicy> tmpPreBarriers_;
class JumpPool : TempObject
{
BufferOffset start;
uint32_t size;
bool fixup(IonCode *code, uint8_t *data);
};
CompactBufferWriter jumpRelocations_;
CompactBufferWriter dataRelocations_;
@ -1235,13 +1302,13 @@ class Assembler
m_buffer.initWithAllocator();
// Set up the backwards double region
new (&pools_[2]) Pool (1024, 8, 4, 8, 8, true);
new (&pools_[2]) Pool (1024, 8, 4, 8, 8, m_buffer.LifoAlloc_, true);
// Set up the backwards 32 bit region
new (&pools_[3]) Pool (4096, 4, 4, 8, 4, true, true);
new (&pools_[3]) Pool (4096, 4, 4, 8, 4, m_buffer.LifoAlloc_, true, true);
// Set up the forwards double region
new (doublePool) Pool (1024, 8, 4, 8, 8, false, false, &pools_[2]);
new (doublePool) Pool (1024, 8, 4, 8, 8, m_buffer.LifoAlloc_, false, false, &pools_[2]);
// Set up the forwards 32 bit region
new (int32Pool) Pool (4096, 4, 4, 8, 4, false, true, &pools_[3]);
new (int32Pool) Pool (4096, 4, 4, 8, 4, m_buffer.LifoAlloc_, false, true, &pools_[3]);
for (int i = 0; i < 4; i++) {
if (pools_[i].poolData == NULL) {
m_buffer.fail_oom();
@ -1301,7 +1368,6 @@ class Assembler
public:
void finish();
void executableCopy(void *buffer);
void processCodeLabels(uint8_t *rawCode);
void copyJumpRelocationTable(uint8_t *dest);
void copyDataRelocationTable(uint8_t *dest);
void copyPreBarrierTable(uint8_t *dest);
@ -1334,10 +1400,10 @@ class Assembler
BufferOffset align(int alignment);
BufferOffset as_nop();
BufferOffset as_alu(Register dest, Register src1, Operand2 op2,
ALUOp op, SetCond_ sc = NoSetCond, Condition c = Always);
ALUOp op, SetCond_ sc = NoSetCond, Condition c = Always, Instruction *instdest = NULL);
BufferOffset as_mov(Register dest,
Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always, Instruction *instdest = NULL);
BufferOffset as_mvn(Register dest, Operand2 op2,
SetCond_ sc = NoSetCond, Condition c = Always);
// logical operations
@ -1448,6 +1514,8 @@ class Assembler
BufferOffset as_bl(Label *l, Condition c);
BufferOffset as_bl(BOffImm off, Condition c, BufferOffset inst);
BufferOffset as_mrs(Register r, Condition c = Always);
BufferOffset as_msr(Register r, Condition c = Always);
// VFP instructions!
private:
@ -1539,6 +1607,7 @@ class Assembler
BufferOffset as_vimm(VFPRegister vd, VFPImm imm, Condition c = Always);
BufferOffset as_vmrs(Register r, Condition c = Always);
BufferOffset as_vmsr(Register r, Condition c = Always);
// label operations
bool nextLink(BufferOffset b, BufferOffset *next);
void bind(Label *label, BufferOffset boff = BufferOffset());
@ -1549,6 +1618,7 @@ class Assembler
void retarget(Label *label, Label *target);
// I'm going to pretend this doesn't exist for now.
void retarget(Label *label, void *target, Relocation::Kind reloc);
// void Bind(IonCode *code, AbsoluteLabel *label, const void *address);
void Bind(uint8_t *rawCode, AbsoluteLabel *label, const void *address);
void call(Label *label);
@ -1718,6 +1788,10 @@ class Assembler
static void ToggleToCmp(CodeLocationLabel inst_);
static void ToggleCall(CodeLocationLabel inst_, bool enabled);
static void updateBoundsCheck(uint32_t logHeapSize, Instruction *inst);
void processCodeLabels(uint8_t *rawCode);
}; // Assembler
// An Instruction is a structure for both encoding and decoding any and all ARM instructions.
@ -1956,7 +2030,7 @@ class InstALU : public Instruction
static const int32_t ALUMask = 0xc << 24;
public:
InstALU (Register rd, Register rn, Operand2 op2, ALUOp op, SetCond_ sc, Assembler::Condition c)
: Instruction(RD(rd) | RN(rn) | op2.encode() | op | sc | c)
: Instruction(maybeRD(rd) | maybeRN(rn) | op2.encode() | op | sc, c)
{ }
static bool isTHIS (const Instruction &i);
static InstALU *asTHIS (const Instruction &i);
@ -1966,8 +2040,9 @@ class InstALU : public Instruction
bool checkDest(Register rd);
void extractOp1(Register *ret);
bool checkOp1(Register rn);
void extractOp2(Operand2 *ret);
Operand2 extractOp2();
};
class InstCMP : public InstALU
{
public:
@ -1975,6 +2050,13 @@ class InstCMP : public InstALU
static InstCMP *asTHIS (const Instruction &i);
};
class InstMOV : public InstALU
{
public:
static bool isTHIS (const Instruction &i);
static InstMOV *asTHIS (const Instruction &i);
};
class InstructionIterator {
private:

View File

@ -11,6 +11,7 @@
#include "jsnum.h"
#include "CodeGenerator-arm.h"
#include "ion/CodeGenerator.h"
#include "ion/IonCompartment.h"
#include "ion/IonFrames.h"
#include "ion/MIR.h"
@ -36,9 +37,16 @@ CodeGeneratorARM::CodeGeneratorARM(MIRGenerator *gen, LIRGraph *graph, MacroAsse
bool
CodeGeneratorARM::generatePrologue()
{
// Note that this automatically sets MacroAssembler::framePushed().
masm.reserveStack(frameSize());
masm.checkStackAlignment();
if (gen->compilingAsmJS()) {
masm.Push(lr);
// Note that this automatically sets MacroAssembler::framePushed().
masm.reserveStack(frameDepth_);
} else {
// Note that this automatically sets MacroAssembler::framePushed().
masm.reserveStack(frameSize());
masm.checkStackAlignment();
}
// Allocate returnLabel_ on the heap, so we don't run its destructor and
// assert-not-bound in debug mode on compilation failure.
returnLabel_ = new HeapLabel();
@ -49,13 +57,19 @@ CodeGeneratorARM::generatePrologue()
bool
CodeGeneratorARM::generateEpilogue()
{
masm.bind(returnLabel_);
// Pop the stack we allocated at the start of the function.
masm.freeStack(frameSize());
JS_ASSERT(masm.framePushed() == 0);
masm.ma_pop(pc);
masm.bind(returnLabel_);
if (gen->compilingAsmJS()) {
// Pop the stack we allocated at the start of the function.
masm.freeStack(frameDepth_);
masm.Pop(pc);
JS_ASSERT(masm.framePushed() == 0);
//masm.as_bkpt();
} else {
// Pop the stack we allocated at the start of the function.
masm.freeStack(frameSize());
JS_ASSERT(masm.framePushed() == 0);
masm.ma_pop(pc);
}
masm.dumpPool();
return true;
}
@ -479,6 +493,7 @@ CodeGeneratorARM::visitMulI(LMulI *ins)
extern "C" {
extern int __aeabi_idivmod(int,int);
extern int __aeabi_uidivmod(int,int);
}
bool
@ -843,7 +858,15 @@ CodeGeneratorARM::toMoveOperand(const LAllocation *a) const
return MoveOperand(ToRegister(a));
if (a->isFloatReg())
return MoveOperand(ToFloatRegister(a));
return MoveOperand(StackPointer, ToStackOffset(a));
JS_ASSERT((ToStackOffset(a) & 3) == 0);
int32_t offset = ToStackOffset(a);
// The way the stack slots work, we assume that everything from depth == 0 downwards is writable
// however, since our frame is included in this, ensure that the frame gets skipped
if (gen->compilingAsmJS())
offset -= AlignmentMidPrologue;
return MoveOperand(StackPointer, offset);
}
bool
@ -1178,19 +1201,9 @@ CodeGeneratorARM::visitDouble(LDouble *ins)
{
const LDefinition *out = ins->getDef(0);
const LConstantIndex *cindex = ins->getOperand(0)->toConstantIndex();
const Value &v = graph.getConstant(cindex->index());
masm.ma_vimm(v.toDouble(), ToFloatRegister(out));
masm.ma_vimm(ins->getDouble(), ToFloatRegister(out));
return true;
#if 0
DeferredDouble *d = new DeferredDouble(cindex->index());
if (!deferredDoubles_.append(d))
return false;
masm.movsd(d->label(), ToFloatRegister(out));
return true;
#endif
}
Register
@ -1616,3 +1629,224 @@ CodeGeneratorARM::generateInvalidateEpilogue()
masm.breakpoint();
return true;
}
template <class U>
Register
getBase(U *mir)
{
switch (mir->base()) {
case U::Heap: return HeapReg;
case U::Global: return GlobalReg;
}
return InvalidReg;
}
bool
CodeGeneratorARM::visitAsmJSLoadHeap(LAsmJSLoadHeap *ins)
{
const MAsmJSLoadHeap *mir = ins->mir();
bool isSigned;
int size;
bool isFloat = false;
switch (mir->viewType()) {
case ArrayBufferView::TYPE_INT8: isSigned = true; size = 8; break;
case ArrayBufferView::TYPE_UINT8: isSigned = false; size = 8; break;
case ArrayBufferView::TYPE_INT16: isSigned = true; size = 16; break;
case ArrayBufferView::TYPE_UINT16: isSigned = false; size = 16; break;
case ArrayBufferView::TYPE_INT32:
case ArrayBufferView::TYPE_UINT32: isSigned = true; size = 32; break;
case ArrayBufferView::TYPE_FLOAT64: isFloat = true; size = 64; break;
case ArrayBufferView::TYPE_FLOAT32:
isFloat = true;
size = 32;
break;
default: JS_NOT_REACHED("unexpected array type");
}
Register index = ToRegister(ins->ptr());
BufferOffset bo = masm.ma_BoundsCheck(index);
if (isFloat) {
VFPRegister vd(ToFloatRegister(ins->output()));
if (size == 32) {
masm.ma_vldr(vd.singleOverlay(), HeapReg, index, 0, Assembler::Zero);
masm.as_vcvt(vd, vd.singleOverlay(), false, Assembler::Zero);
} else {
masm.ma_vldr(vd, HeapReg, index, 0, Assembler::Zero);
}
masm.ma_vmov(NANReg, ToFloatRegister(ins->output()), Assembler::NonZero);
} else {
masm.ma_dataTransferN(IsLoad, size, isSigned, HeapReg, index,
ToRegister(ins->output()), Offset, Assembler::Zero);
masm.ma_mov(Imm32(0), ToRegister(ins->output()), NoSetCond, Assembler::NonZero);
}
return gen->noteBoundsCheck(bo.getOffset());
}
bool
CodeGeneratorARM::visitAsmJSStoreHeap(LAsmJSStoreHeap *ins)
{
const MAsmJSStoreHeap *mir = ins->mir();
bool isSigned;
int size;
bool isFloat = false;
switch (mir->viewType()) {
case ArrayBufferView::TYPE_INT8:
case ArrayBufferView::TYPE_UINT8: isSigned = false; size = 8; break;
case ArrayBufferView::TYPE_INT16:
case ArrayBufferView::TYPE_UINT16: isSigned = false; size = 16; break;
case ArrayBufferView::TYPE_INT32:
case ArrayBufferView::TYPE_UINT32: isSigned = true; size = 32; break;
case ArrayBufferView::TYPE_FLOAT64: isFloat = true; size = 64; break;
case ArrayBufferView::TYPE_FLOAT32:
isFloat = true;
size = 32;
break;
default: JS_NOT_REACHED("unexpected array type");
}
Register index = ToRegister(ins->ptr());
BufferOffset bo = masm.ma_BoundsCheck(index);
if (isFloat) {
VFPRegister vd(ToFloatRegister(ins->value()));
if (size == 32) {
masm.storeFloat(vd, HeapReg, index, Assembler::Zero);
} else {
masm.ma_vstr(vd, HeapReg, index, 0, Assembler::Zero);
}
} else {
masm.ma_dataTransferN(IsStore, size, isSigned, HeapReg, index,
ToRegister(ins->value()), Offset, Assembler::Zero);
}
return gen->noteBoundsCheck(bo.getOffset());
}
bool
CodeGeneratorARM::visitAsmJSPassStackArg(LAsmJSPassStackArg *ins)
{
const MAsmJSPassStackArg *mir = ins->mir();
Operand dst(StackPointer, mir->spOffset());
if (ins->arg()->isConstant()) {
//masm.as_bkpt();
masm.ma_storeImm(Imm32(ToInt32(ins->arg())), dst);
} else {
if (ins->arg()->isGeneralReg())
masm.ma_str(ToRegister(ins->arg()), dst);
else
masm.ma_vstr(ToFloatRegister(ins->arg()), dst);
}
return true;
}
bool
CodeGeneratorARM::visitAsmJSDivOrMod(LAsmJSDivOrMod *ins)
{
//Register remainder = ToRegister(ins->remainder());
Register lhs = ToRegister(ins->lhs());
Register rhs = ToRegister(ins->rhs());
Register output = ToRegister(ins->output());
//JS_ASSERT(remainder == edx);
//JS_ASSERT(lhs == eax);
JS_ASSERT(ins->mirRaw()->isAsmJSUDiv() || ins->mirRaw()->isAsmJSUMod());
//JS_ASSERT_IF(ins->mirRaw()->isAsmUDiv(), output == eax);
//JS_ASSERT_IF(ins->mirRaw()->isAsmUMod(), output == edx);
Label afterDiv;
masm.ma_cmp(rhs, Imm32(0));
Label notzero;
masm.ma_b(&notzero, Assembler::NonZero);
masm.ma_mov(Imm32(0), output);
masm.ma_b(&afterDiv);
masm.bind(&notzero);
masm.setupAlignedABICall(2);
masm.passABIArg(lhs);
masm.passABIArg(rhs);
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, __aeabi_uidivmod));
masm.bind(&afterDiv);
return true;
}
bool
CodeGeneratorARM::visitEffectiveAddress(LEffectiveAddress *ins)
{
const MEffectiveAddress *mir = ins->mir();
Register base = ToRegister(ins->base());
Register index = ToRegister(ins->index());
Register output = ToRegister(ins->output());
masm.as_add(output, base, lsl(index, mir->scale()));
masm.ma_add(Imm32(mir->displacement()), output);
return true;
}
bool
CodeGeneratorARM::visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar *ins)
{
const MAsmJSLoadGlobalVar *mir = ins->mir();
unsigned addr = mir->globalDataOffset();
if (mir->type() == MIRType_Int32)
masm.ma_dtr(IsLoad, GlobalReg, Imm32(addr), ToRegister(ins->output()));
else
masm.ma_vldr(Operand(GlobalReg, addr), ToFloatRegister(ins->output()));
return true;
}
bool
CodeGeneratorARM::visitAsmJSStoreGlobalVar(LAsmJSStoreGlobalVar *ins)
{
const MAsmJSStoreGlobalVar *mir = ins->mir();
MIRType type = mir->value()->type();
JS_ASSERT(type == MIRType_Int32 || type == MIRType_Double);
unsigned addr = mir->globalDataOffset();
if (mir->value()->type() == MIRType_Int32)
masm.ma_dtr(IsStore, GlobalReg, Imm32(addr), ToRegister(ins->value()));
else
masm.ma_vstr(ToFloatRegister(ins->value()), Operand(GlobalReg, addr));
return true;
}
bool
CodeGeneratorARM::visitAsmJSLoadFuncPtr(LAsmJSLoadFuncPtr *ins)
{
const MAsmJSLoadFuncPtr *mir = ins->mir();
Register index = ToRegister(ins->index());
Register tmp = ToRegister(ins->temp());
Register out = ToRegister(ins->output());
unsigned addr = mir->globalDataOffset();
masm.ma_mov(Imm32(addr), tmp);
masm.as_add(tmp, tmp, lsl(index, 2));
masm.ma_ldr(DTRAddr(GlobalReg, DtrRegImmShift(tmp, LSL, 0)), out);
return true;
}
bool
CodeGeneratorARM::visitAsmJSLoadFFIFunc(LAsmJSLoadFFIFunc *ins)
{
const MAsmJSLoadFFIFunc *mir = ins->mir();
masm.ma_ldr(Operand(GlobalReg, mir->globalDataOffset()), ToRegister(ins->output()));
return true;
}
bool
CodeGeneratorARM::visitNegI(LNegI *ins)
{
Register input = ToRegister(ins->input());
masm.ma_neg(input, ToRegister(ins->output()));
return true;
}
bool
CodeGeneratorARM::visitNegD(LNegD *ins)
{
FloatRegister input = ToFloatRegister(ins->input());
masm.ma_vneg(input, ToFloatRegister(ins->output()));
return true;
}

View File

@ -141,9 +141,31 @@ class CodeGeneratorARM : public CodeGeneratorShared
bool visitInterruptCheck(LInterruptCheck *lir);
bool generateInvalidateEpilogue();
bool visitNegI(LNegI *lir);
bool visitNegD(LNegD *lir);
bool visitAsmJSLoadHeap(LAsmJSLoadHeap *ins);
bool visitAsmJSStoreHeap(LAsmJSStoreHeap *ins);
bool visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar *ins);
bool visitAsmJSStoreGlobalVar(LAsmJSStoreGlobalVar *ins);
bool visitAsmJSLoadFuncPtr(LAsmJSLoadFuncPtr *ins);
bool visitAsmJSLoadFFIFunc(LAsmJSLoadFFIFunc *ins);
void postAsmJSCall(LAsmJSCall *lir) {}
bool visitAsmJSPassStackArg(LAsmJSPassStackArg *ins);
bool generateInvalidateEpilogue();
protected:
bool generateAsmJSPrologue(const MIRTypeVector &argTypes, MIRType returnType,
Label *internalEntry);
void postAsmJSCall(LAsmJSCall *lir) {
#if !defined(JS_CPU_ARM_HARDFP)
if (lir->mir()->type() == MIRType_Double) {
masm.ma_vxfer(r0, r1, d0);
}
#endif
}
bool visitEffectiveAddress(LEffectiveAddress *ins);
bool visitAsmJSDivOrMod(LAsmJSDivOrMod *ins);
};
typedef CodeGeneratorARM CodeGeneratorSpecific;

View File

@ -9,6 +9,7 @@
#define jsion_ionframes_arm_h__
#include "ion/shared/IonFrames-shared.h"
//#include "ion/arm/Assembler-arm.h"
namespace js {
namespace ion {

View File

@ -71,13 +71,16 @@ class LUnboxDouble : public LInstructionHelper<1, 2, 0>
};
// Constant double.
class LDouble : public LInstructionHelper<1, 1, 0>
class LDouble : public LInstructionHelper<1, 0, 0>
{
double d_;
public:
LIR_HEADER(Double);
LDouble(const LConstantIndex &cindex) {
setOperand(0, cindex);
LDouble(double d) : d_(d)
{ }
double getDouble() const {
return d_;
}
};
@ -294,6 +297,43 @@ class LMulI : public LBinaryMath<0>
}
};
// This class performs a simple x86 'div', yielding either a quotient or remainder depending on
// whether this instruction is defined to output eax (quotient) or edx (remainder).
class LAsmJSDivOrMod : public LBinaryMath<2>
{
public:
LIR_HEADER(AsmJSDivOrMod);
LAsmJSDivOrMod(const LAllocation &lhs, const LAllocation &rhs, const LDefinition &temp1, const LDefinition &temp2) {
setOperand(0, lhs);
setOperand(1, rhs);
setTemp(0, temp1);
setTemp(1, temp2);
}
// this is incorrect, it is returned in r1, getTemp(0) is r2.
const LDefinition *remainder() {
return getTemp(0);
}
};
class LAsmJSLoadFuncPtr : public LInstructionHelper<1, 1, 1>
{
public:
LIR_HEADER(AsmJSLoadFuncPtr);
LAsmJSLoadFuncPtr(const LAllocation &index, const LDefinition &temp) {
setOperand(0, index);
setTemp(0, temp);
}
const MAsmJSLoadFuncPtr *mir() const {
return mir_->toAsmJSLoadFuncPtr();
}
const LAllocation *index() {
return getOperand(0);
}
const LDefinition *temp() {
return getTemp(0);
}
};
} // namespace ion
} // namespace js

View File

@ -18,7 +18,10 @@
_(ModPowTwoI) \
_(ModMaskI) \
_(PowHalfD) \
_(UInt32ToDouble)
_(UInt32ToDouble) \
_(AsmJSDivOrMod) \
_(AsmJSLoadFuncPtr)
#endif // jsion_lir_opcodes_arm_h__

View File

@ -43,10 +43,7 @@ bool
LIRGeneratorARM::lowerConstantDouble(double d, MInstruction *mir)
{
uint32_t index;
if (!lirGraph_.addConstantToPool(DoubleValue(d), &index))
return false;
LDouble *lir = new LDouble(LConstantIndex::FromIndex(index));
LDouble *lir = new LDouble(d);
return define(lir, mir);
}
@ -54,10 +51,7 @@ bool
LIRGeneratorARM::visitConstant(MConstant *ins)
{
if (ins->type() == MIRType_Double) {
uint32_t index;
if (!lirGraph_.addConstantToPool(ins->value(), &index))
return false;
LDouble *lir = new LDouble(LConstantIndex::FromIndex(index));
LDouble *lir = new LDouble(ins->value().toDouble());
return define(lir, ins);
}
@ -375,3 +369,68 @@ LIRGeneratorARM::lowerUrshD(MUrsh *mir)
LUrshD *lir = new LUrshD(useRegister(lhs), useRegisterOrConstant(rhs), temp());
return define(lir, mir);
}
bool
LIRGeneratorARM::visitAsmJSNeg(MAsmJSNeg *ins)
{
if (ins->type() == MIRType_Int32)
return define(new LNegI(useRegisterAtStart(ins->input())), ins);
JS_ASSERT(ins->type() == MIRType_Double);
return define(new LNegD(useRegisterAtStart(ins->input())), ins);
}
bool
LIRGeneratorARM::visitAsmJSUDiv(MAsmJSUDiv *div)
{
LAsmJSDivOrMod *lir = new LAsmJSDivOrMod(useFixed(div->lhs(), r0),
useFixed(div->rhs(), r1),
tempFixed(r2), tempFixed(r3));
return defineFixed(lir, div, LAllocation(AnyRegister(r0)));
}
bool
LIRGeneratorARM::visitAsmJSUMod(MAsmJSUMod *mod)
{
LAsmJSDivOrMod *lir = new LAsmJSDivOrMod(useFixed(mod->lhs(), r0),
useFixed(mod->rhs(), r1),
tempFixed(r2), tempFixed(r3));
return defineFixed(lir, mod, LAllocation(AnyRegister(r1)));
}
bool
LIRGeneratorARM::visitAsmJSUnsignedToDouble(MAsmJSUnsignedToDouble *ins)
{
JS_ASSERT(ins->input()->type() == MIRType_Int32);
LUInt32ToDouble *lir = new LUInt32ToDouble(useRegisterAtStart(ins->input()));
return define(lir, ins);
}
bool
LIRGeneratorARM::visitAsmJSStoreHeap(MAsmJSStoreHeap *ins)
{
LAsmJSStoreHeap *lir;
switch (ins->viewType()) {
case ArrayBufferView::TYPE_INT8: case ArrayBufferView::TYPE_UINT8:
case ArrayBufferView::TYPE_INT16: case ArrayBufferView::TYPE_UINT16:
case ArrayBufferView::TYPE_INT32: case ArrayBufferView::TYPE_UINT32:
lir = new LAsmJSStoreHeap(useRegisterAtStart(ins->ptr()),
useRegisterAtStart(ins->value()));
break;
case ArrayBufferView::TYPE_FLOAT32:
case ArrayBufferView::TYPE_FLOAT64:
lir = new LAsmJSStoreHeap(useRegisterAtStart(ins->ptr()),
useRegisterAtStart(ins->value()));
break;
default: JS_NOT_REACHED("unexpected array type");
}
return add(lir, ins);
}
bool
LIRGeneratorARM::visitAsmJSLoadFuncPtr(MAsmJSLoadFuncPtr *ins)
{
return define(new LAsmJSLoadFuncPtr(useRegister(ins->index()), temp()), ins);
}
//__aeabi_uidiv

View File

@ -48,6 +48,9 @@ class LIRGeneratorARM : public LIRGeneratorShared
bool lowerModI(MMod *mod);
bool lowerMulI(MMul *mul, MDefinition *lhs, MDefinition *rhs);
bool visitPowHalf(MPowHalf *ins);
bool visitAsmJSNeg(MAsmJSNeg *ins);
bool visitAsmJSUDiv(MAsmJSUDiv *ins);
bool visitAsmJSUMod(MAsmJSUMod *ins);
LTableSwitch *newLTableSwitch(const LAllocation &in, const LDefinition &inputCopy,
MTableSwitch *ins);
@ -62,6 +65,9 @@ class LIRGeneratorARM : public LIRGeneratorShared
bool visitGuardShape(MGuardShape *ins);
bool visitStoreTypedArrayElement(MStoreTypedArrayElement *ins);
bool visitStoreTypedArrayElementHole(MStoreTypedArrayElementHole *ins);
bool visitAsmJSUnsignedToDouble(MAsmJSUnsignedToDouble *ins);
bool visitAsmJSStoreHeap(MAsmJSStoreHeap *ins);
bool visitAsmJSLoadFuncPtr(MAsmJSLoadFuncPtr *ins);
bool visitInterruptCheck(MInterruptCheck *ins);
};

View File

@ -371,6 +371,13 @@ MacroAssemblerARM::ma_mov(Imm32 imm, Register dest,
ma_alu(InvalidReg, imm, dest, op_mov, sc, c);
}
void
MacroAssemblerARM::ma_mov(ImmWord imm, Register dest,
SetCond_ sc, Assembler::Condition c)
{
ma_alu(InvalidReg, Imm32(imm.value), dest, op_mov, sc, c);
}
void
MacroAssemblerARM::ma_mov(const ImmGCPtr &ptr, Register dest)
{
@ -973,15 +980,25 @@ MacroAssemblerARM::ma_strb(Register rt, DTRAddr addr, Index mode, Condition cc)
}
// Specialty for moving N bits of data, where n == 8,16,32,64.
void
BufferOffset
MacroAssemblerARM::ma_dataTransferN(LoadStore ls, int size, bool IsSigned,
Register rn, Register rm, Register rt,
Index mode, Assembler::Condition cc)
Index mode, Assembler::Condition cc, unsigned shiftAmount)
{
JS_NOT_REACHED("Feature NYI");
if (size == 32 || (size == 8 && !IsSigned)) {
return as_dtr(ls, size, mode, rt, DTRAddr(rn, DtrRegImmShift(rm, LSL, shiftAmount)), cc);
} else {
if (shiftAmount != 0) {
JS_ASSERT(rn != ScratchRegister);
JS_ASSERT(rt != ScratchRegister);
ma_lsl(Imm32(shiftAmount), rm, ScratchRegister);
rm = ScratchRegister;
}
return as_extdtr(ls, size, IsSigned, mode, rt, EDtrAddr(rn, EDtrOffReg(rm)), cc);
}
}
void
BufferOffset
MacroAssemblerARM::ma_dataTransferN(LoadStore ls, int size, bool IsSigned,
Register rn, Imm32 offset, Register rt,
Index mode, Assembler::Condition cc)
@ -992,8 +1009,7 @@ MacroAssemblerARM::ma_dataTransferN(LoadStore ls, int size, bool IsSigned,
if (off < 4096 && off > -4096) {
// This encodes as a single instruction, Emulating mode's behavior
// in a multi-instruction sequence is not necessary.
as_dtr(ls, size, mode, rt, DTRAddr(rn, DtrOffImm(off)), cc);
return;
return as_dtr(ls, size, mode, rt, DTRAddr(rn, DtrOffImm(off)), cc);
}
// We cannot encode this offset in a a single ldr. For mode == index,
@ -1031,8 +1047,7 @@ MacroAssemblerARM::ma_dataTransferN(LoadStore ls, int size, bool IsSigned,
if (rt == pc && mode == PostIndex && ls == IsLoad) {
ma_mov(rn, ScratchRegister);
ma_alu(rn, offset, rn, op_add);
as_dtr(IsLoad, size, Offset, pc, DTRAddr(ScratchRegister, DtrOffImm(0)), cc);
return;
return as_dtr(IsLoad, size, Offset, pc, DTRAddr(ScratchRegister, DtrOffImm(0)), cc);
}
int bottom = off & 0xfff;
@ -1051,37 +1066,32 @@ MacroAssemblerARM::ma_dataTransferN(LoadStore ls, int size, bool IsSigned,
Operand2 sub_off = Imm8(-(off-bottom)); // sub_off = bottom - off
if (!sub_off.invalid) {
as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc); // - sub_off = off - bottom
as_dtr(ls, size, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(bottom)), cc);
return;
return as_dtr(ls, size, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(bottom)), cc);
}
sub_off = Imm8(-(off+neg_bottom));// sub_off = -neg_bottom - off
if (!sub_off.invalid) {
as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc); // - sub_off = neg_bottom + off
as_dtr(ls, size, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(-neg_bottom)), cc);
return;
return as_dtr(ls, size, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(-neg_bottom)), cc);
}
} else {
Operand2 sub_off = Imm8(off-bottom); // sub_off = off - bottom
if (!sub_off.invalid) {
as_add(ScratchRegister, rn, sub_off, NoSetCond, cc); // sub_off = off - bottom
as_dtr(ls, size, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(bottom)), cc);
return;
return as_dtr(ls, size, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(bottom)), cc);
}
sub_off = Imm8(off+neg_bottom);// sub_off = neg_bottom + off
if (!sub_off.invalid) {
as_add(ScratchRegister, rn, sub_off, NoSetCond, cc); // sub_off = neg_bottom + off
as_dtr(ls, size, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(-neg_bottom)), cc);
return;
return as_dtr(ls, size, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(-neg_bottom)), cc);
}
}
ma_mov(offset, ScratchRegister);
as_dtr(ls, size, mode, rt, DTRAddr(rn, DtrRegImmShift(ScratchRegister, LSL, 0)));
return as_dtr(ls, size, mode, rt, DTRAddr(rn, DtrRegImmShift(ScratchRegister, LSL, 0)));
} else {
// should attempt to use the extended load/store instructions
if (off < 256 && off > -256) {
as_extdtr(ls, size, IsSigned, mode, rt, EDtrAddr(rn, EDtrOffImm(off)), cc);
return;
}
if (off < 256 && off > -256)
return as_extdtr(ls, size, IsSigned, mode, rt, EDtrAddr(rn, EDtrOffImm(off)), cc);
// We cannot encode this offset in a a single extldr. Try to encode it as
// an add scratch, base, imm; extldr dest, [scratch, +offset].
int bottom = off & 0xff;
@ -1092,41 +1102,38 @@ MacroAssemblerARM::ma_dataTransferN(LoadStore ls, int size, bool IsSigned,
Operand2 sub_off = Imm8(-(off-bottom)); // sub_off = bottom - off
if (!sub_off.invalid) {
as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc); // - sub_off = off - bottom
as_extdtr(ls, size, IsSigned, Offset, rt,
EDtrAddr(ScratchRegister, EDtrOffImm(bottom)),
cc);
return;
return as_extdtr(ls, size, IsSigned, Offset, rt,
EDtrAddr(ScratchRegister, EDtrOffImm(bottom)),
cc);
}
sub_off = Imm8(-(off+neg_bottom));// sub_off = -neg_bottom - off
if (!sub_off.invalid) {
as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc); // - sub_off = neg_bottom + off
as_extdtr(ls, size, IsSigned, Offset, rt,
EDtrAddr(ScratchRegister, EDtrOffImm(-neg_bottom)),
cc);
return;
return as_extdtr(ls, size, IsSigned, Offset, rt,
EDtrAddr(ScratchRegister, EDtrOffImm(-neg_bottom)),
cc);
}
} else {
Operand2 sub_off = Imm8(off-bottom); // sub_off = off - bottom
if (!sub_off.invalid) {
as_add(ScratchRegister, rn, sub_off, NoSetCond, cc); // sub_off = off - bottom
as_extdtr(ls, size, IsSigned, Offset, rt,
EDtrAddr(ScratchRegister, EDtrOffImm(bottom)),
cc);
return;
return as_extdtr(ls, size, IsSigned, Offset, rt,
EDtrAddr(ScratchRegister, EDtrOffImm(bottom)),
cc);
}
sub_off = Imm8(off+neg_bottom);// sub_off = neg_bottom + off
if (!sub_off.invalid) {
as_add(ScratchRegister, rn, sub_off, NoSetCond, cc); // sub_off = neg_bottom + off
as_extdtr(ls, size, IsSigned, Offset, rt,
EDtrAddr(ScratchRegister, EDtrOffImm(-neg_bottom)),
cc);
return;
return as_extdtr(ls, size, IsSigned, Offset, rt,
EDtrAddr(ScratchRegister, EDtrOffImm(-neg_bottom)),
cc);
}
}
ma_mov(offset, ScratchRegister);
as_extdtr(ls, size, IsSigned, mode, rt, EDtrAddr(rn, EDtrOffReg(ScratchRegister)), cc);
return as_extdtr(ls, size, IsSigned, mode, rt, EDtrAddr(rn, EDtrOffReg(ScratchRegister)), cc);
}
}
void
MacroAssemblerARM::ma_pop(Register r)
{
@ -1350,6 +1357,12 @@ MacroAssemblerARM::ma_vxfer(FloatRegister src, Register dest1, Register dest2, C
as_vxfer(dest1, dest2, VFPRegister(src), FloatToCore, cc);
}
void
MacroAssemblerARM::ma_vxfer(Register src1, Register src2, FloatRegister dest, Condition cc)
{
as_vxfer(src1, src2, VFPRegister(dest), CoreToFloat, cc);
}
void
MacroAssemblerARM::ma_vxfer(VFPRegister src, Register dest, Condition cc)
{
@ -1362,16 +1375,14 @@ MacroAssemblerARM::ma_vxfer(VFPRegister src, Register dest1, Register dest2, Con
as_vxfer(dest1, dest2, src, FloatToCore, cc);
}
void
BufferOffset
MacroAssemblerARM::ma_vdtr(LoadStore ls, const Operand &addr, VFPRegister rt, Condition cc)
{
int off = addr.disp();
JS_ASSERT((off & 3) == 0);
Register base = Register::FromCode(addr.base());
if (off > -1024 && off < 1024) {
as_vdtr(ls, rt, addr.toVFPAddr(), cc);
return;
}
if (off > -1024 && off < 1024)
return as_vdtr(ls, rt, addr.toVFPAddr(), cc);
// We cannot encode this offset in a a single ldr. Try to encode it as
// an add scratch, base, imm; ldr dest, [scratch, +offset].
@ -1383,60 +1394,62 @@ MacroAssemblerARM::ma_vdtr(LoadStore ls, const Operand &addr, VFPRegister rt, Co
Operand2 sub_off = Imm8(-(off-bottom)); // sub_off = bottom - off
if (!sub_off.invalid) {
as_sub(ScratchRegister, base, sub_off, NoSetCond, cc); // - sub_off = off - bottom
as_vdtr(ls, rt, VFPAddr(ScratchRegister, VFPOffImm(bottom)), cc);
return;
return as_vdtr(ls, rt, VFPAddr(ScratchRegister, VFPOffImm(bottom)), cc);
}
sub_off = Imm8(-(off+neg_bottom));// sub_off = -neg_bottom - off
if (!sub_off.invalid) {
as_sub(ScratchRegister, base, sub_off, NoSetCond, cc); // - sub_off = neg_bottom + off
as_vdtr(ls, rt, VFPAddr(ScratchRegister, VFPOffImm(-neg_bottom)), cc);
return;
return as_vdtr(ls, rt, VFPAddr(ScratchRegister, VFPOffImm(-neg_bottom)), cc);
}
} else {
Operand2 sub_off = Imm8(off-bottom); // sub_off = off - bottom
if (!sub_off.invalid) {
as_add(ScratchRegister, base, sub_off, NoSetCond, cc); // sub_off = off - bottom
as_vdtr(ls, rt, VFPAddr(ScratchRegister, VFPOffImm(bottom)), cc);
return;
return as_vdtr(ls, rt, VFPAddr(ScratchRegister, VFPOffImm(bottom)), cc);
}
sub_off = Imm8(off+neg_bottom);// sub_off = neg_bottom + off
if (!sub_off.invalid) {
as_add(ScratchRegister, base, sub_off, NoSetCond, cc); // sub_off = neg_bottom + off
as_vdtr(ls, rt, VFPAddr(ScratchRegister, VFPOffImm(-neg_bottom)), cc);
return;
return as_vdtr(ls, rt, VFPAddr(ScratchRegister, VFPOffImm(-neg_bottom)), cc);
}
}
ma_add(base, Imm32(off), ScratchRegister, NoSetCond, cc);
as_vdtr(ls, rt, VFPAddr(ScratchRegister, VFPOffImm(0)), cc);
return as_vdtr(ls, rt, VFPAddr(ScratchRegister, VFPOffImm(0)), cc);
}
void
BufferOffset
MacroAssemblerARM::ma_vldr(VFPAddr addr, VFPRegister dest, Condition cc)
{
as_vdtr(IsLoad, dest, addr, cc);
return as_vdtr(IsLoad, dest, addr, cc);
}
void
BufferOffset
MacroAssemblerARM::ma_vldr(const Operand &addr, VFPRegister dest, Condition cc)
{
ma_vdtr(IsLoad, addr, dest, cc);
return ma_vdtr(IsLoad, addr, dest, cc);
}
BufferOffset
MacroAssemblerARM::ma_vldr(VFPRegister src, Register base, Register index, int32_t shift, Condition cc)
{
as_add(ScratchRegister, base, lsl(index, shift), NoSetCond, cc);
return ma_vldr(Operand(ScratchRegister, 0), src, cc);
}
void
BufferOffset
MacroAssemblerARM::ma_vstr(VFPRegister src, VFPAddr addr, Condition cc)
{
as_vdtr(IsStore, src, addr, cc);
return as_vdtr(IsStore, src, addr, cc);
}
void
BufferOffset
MacroAssemblerARM::ma_vstr(VFPRegister src, const Operand &addr, Condition cc)
{
ma_vdtr(IsStore, addr, src, cc);
return ma_vdtr(IsStore, addr, src, cc);
}
void
BufferOffset
MacroAssemblerARM::ma_vstr(VFPRegister src, Register base, Register index, int32_t shift, Condition cc)
{
as_add(ScratchRegister, base, lsl(index, shift), NoSetCond, cc);
ma_vstr(src, Operand(ScratchRegister, 0), cc);
return ma_vstr(src, Operand(ScratchRegister, 0), cc);
}
bool
@ -3348,3 +3361,4 @@ MacroAssemblerARMCompat::jumpWithPatch(RepatchLabel *label, Condition cond)
CodeOffsetJump ret(bo.getOffset(), pe.encode());
return ret;
}

View File

@ -91,6 +91,8 @@ class MacroAssemblerARM : public Assembler
void ma_mov(Imm32 imm, Register dest,
SetCond_ sc = NoSetCond, Condition c = Always);
void ma_mov(ImmWord imm, Register dest,
SetCond_ sc = NoSetCond, Condition c = Always);
void ma_mov(const ImmGCPtr &ptr, Register dest);
@ -262,11 +264,11 @@ class MacroAssemblerARM : public Assembler
void ma_strh(Register rt, EDtrAddr addr, Index mode = Offset, Condition cc = Always);
void ma_strd(Register rt, DebugOnly<Register> rt2, EDtrAddr addr, Index mode = Offset, Condition cc = Always);
// specialty for moving N bits of data, where n == 8,16,32,64
void ma_dataTransferN(LoadStore ls, int size, bool IsSigned,
BufferOffset ma_dataTransferN(LoadStore ls, int size, bool IsSigned,
Register rn, Register rm, Register rt,
Index mode = Offset, Condition cc = Always);
Index mode = Offset, Condition cc = Always, unsigned scale = TimesOne);
void ma_dataTransferN(LoadStore ls, int size, bool IsSigned,
BufferOffset ma_dataTransferN(LoadStore ls, int size, bool IsSigned,
Register rn, Imm32 offset, Register rt,
Index mode = Offset, Condition cc = Always);
void ma_pop(Register r);
@ -319,15 +321,19 @@ class MacroAssemblerARM : public Assembler
void ma_vxfer(VFPRegister src, Register dest, Condition cc = Always);
void ma_vxfer(VFPRegister src, Register dest1, Register dest2, Condition cc = Always);
void ma_vdtr(LoadStore ls, const Operand &addr, VFPRegister dest, Condition cc = Always);
void ma_vxfer(Register src1, Register src2, FloatRegister dest, Condition cc = Always);
void ma_vldr(VFPAddr addr, VFPRegister dest, Condition cc = Always);
void ma_vldr(const Operand &addr, VFPRegister dest, Condition cc = Always);
BufferOffset ma_vdtr(LoadStore ls, const Operand &addr, VFPRegister dest, Condition cc = Always);
void ma_vstr(VFPRegister src, VFPAddr addr, Condition cc = Always);
void ma_vstr(VFPRegister src, const Operand &addr, Condition cc = Always);
void ma_vstr(VFPRegister src, Register base, Register index, int32_t shift = defaultShift, Condition cc = Always);
BufferOffset ma_vldr(VFPAddr addr, VFPRegister dest, Condition cc = Always);
BufferOffset ma_vldr(const Operand &addr, VFPRegister dest, Condition cc = Always);
BufferOffset ma_vldr(VFPRegister src, Register base, Register index, int32_t shift = defaultShift, Condition cc = Always);
BufferOffset ma_vstr(VFPRegister src, VFPAddr addr, Condition cc = Always);
BufferOffset ma_vstr(VFPRegister src, const Operand &addr, Condition cc = Always);
BufferOffset ma_vstr(VFPRegister src, Register base, Register index, int32_t shift = defaultShift, Condition cc = Always);
// calls an Ion function, assumes that the stack is untouched (8 byte alinged)
void ma_callIon(const Register reg);
// callso an Ion function, assuming that sp has already been decremented
@ -444,7 +450,6 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
enoughMemory_(true),
framePushed_(0)
{ }
bool oom() const {
return Assembler::oom() || !enoughMemory_;
}
@ -485,10 +490,8 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
}
void call(Label *label) {
JS_NOT_REACHED("Feature NYI");
/* we can blx to it if it close by, otherwise, we need to
* set up a branch + link node.
*/
// for now, assume that it'll be nearby?
as_bl(label, Always);
}
void call(ImmWord word) {
BufferOffset bo = m_buffer.nextOffset();
@ -594,6 +597,9 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
void neg32(Register reg) {
ma_neg(reg, reg, SetCond);
}
void negl(Register reg) {
ma_neg(reg, reg, SetCond);
}
void test32(Register lhs, Register rhs) {
ma_tst(lhs, rhs);
}
@ -821,8 +827,8 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
void branchTestPtr(Condition cond, const Register &lhs, const Register &rhs, Label *label) {
branchTest32(cond, lhs, rhs, label);
}
void branchTestPtr(Condition cond, const Register &lhs, Imm32 imm, Label *label) {
branchTest32(cond, lhs, imm, label);
void branchTestPtr(Condition cond, const Register &lhs, const Imm32 rhs, Label *label) {
branchTest32(cond, lhs, rhs, label);
}
void branchPtr(Condition cond, Register lhs, Register rhs, Label *label) {
branch32(cond, lhs, rhs, label);
@ -910,9 +916,9 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
storeValue(val, Operand(dest));
}
void storeValue(JSValueType type, Register reg, Address dest) {
ma_str(reg, dest);
ma_mov(ImmTag(JSVAL_TYPE_TO_TAG(type)), secondScratchReg_);
ma_str(secondScratchReg_, Address(dest.base, dest.offset + 4));
ma_str(reg, dest);
}
void storeValue(const Value &val, Address dest) {
jsval_layout jv = JSVAL_TO_IMPL(val);
@ -1148,6 +1154,7 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
void cmp32(const Register &lhs, const Register &rhs);
void cmp32(const Operand &lhs, const Imm32 &rhs);
void cmp32(const Operand &lhs, const Register &rhs);
void cmpPtr(const Register &lhs, const ImmWord &rhs);
void cmpPtr(const Register &lhs, const Register &rhs);
void cmpPtr(const Register &lhs, const ImmGCPtr &rhs);
@ -1250,6 +1257,50 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
}
void enterOsr(Register calleeToken, Register code);
void memIntToValue(Address Source, Address Dest) {
load32(Source, lr);
storeValue(JSVAL_TYPE_INT32, lr, Dest);
}
void memMove32(Address Source, Address Dest) {
loadPtr(Source, lr);
storePtr(lr, Dest);
}
void memMove64(Address Source, Address Dest) {
loadPtr(Source, lr);
storePtr(lr, Dest);
loadPtr(Address(Source.base, Source.offset+4), lr);
storePtr(lr, Address(Dest.base, Dest.offset+4));
}
void lea(Operand addr, Register dest) {
ma_add(addr.baseReg(), Imm32(addr.disp()), dest);
}
void stackCheck(ImmWord limitAddr, Label *label) {
int *foo = 0;
*foo = 5;
movePtr(limitAddr, ScratchRegister);
ma_ldr(Address(ScratchRegister, 0), ScratchRegister);
ma_cmp(ScratchRegister, StackPointer);
ma_b(label, Assembler::AboveOrEqual);
}
void abiret() {
as_bx(lr);
}
void ma_storeImm(Imm32 c, const Operand &dest) {
ma_mov(c, lr);
ma_str(lr, dest);
}
BufferOffset ma_BoundsCheck(Register bounded) {
return as_mov(ScratchRegister, lsl(bounded, 0), SetCond);
}
void storeFloat(VFPRegister src, Register base, Register index, Condition cond) {
as_vcvt(VFPRegister(ScratchFloatReg).singleOverlay(), src, false, cond);
ma_vstr(VFPRegister(ScratchFloatReg).singleOverlay(), base, index, 0, cond);
}
};
typedef MacroAssemblerARMCompat MacroAssemblerSpecific;

View File

@ -86,7 +86,11 @@ MoveEmitterARM::tempReg()
// For now, just pick r12/ip as the eviction point. This is totally
// random, and if it ends up being bad, we can use actual heuristics later.
spilledReg_ = r12;
// r12 is actually a bad choice. it is the scratch register, which is frequently
// used for address computations, such as those found when we attempt to access
// values more than 4096 off of the stack pointer.
// instead, use lr, the LinkRegister.
spilledReg_ = r14;
if (pushedAtSpill_ == -1) {
masm.Push(spilledReg_);
pushedAtSpill_ = masm.framePushed();

View File

@ -65,8 +65,13 @@ CodeGeneratorShared::CodeGeneratorShared(MIRGenerator *gen, LIRGraph *graph, Mac
// An MAsmJSCall does not align the stack pointer at calls sites but instead
// relies on the a priori stack adjustment (in the prologue) on platforms
// (like x64) which require the stack to be aligned.
if (gen->performsAsmJSCall()) {
unsigned alignmentAtCall = AlignmentAtPrologue + frameDepth_;
#ifdef JS_CPU_ARM
bool forceAlign = true;
#else
bool forceAlign = false;
#endif
if (gen->performsAsmJSCall() || forceAlign) {
unsigned alignmentAtCall = AlignmentMidPrologue + frameDepth_;
if (unsigned rem = alignmentAtCall % StackAlignment)
frameDepth_ += StackAlignment - rem;
}
@ -444,7 +449,6 @@ CodeGeneratorShared::callVM(const VMFunction &fun, LInstruction *ins, const Regi
// Pop arguments from framePushed.
masm.implicitPop(fun.explicitStackSlots() * sizeof(void *) + framePop);
// Stack is:
// ... frame ...
return true;

View File

@ -1455,5 +1455,26 @@ CodeGeneratorX86Shared::generateInvalidateEpilogue()
return true;
}
bool
CodeGeneratorX86Shared::visitNegI(LNegI *ins)
{
Register input = ToRegister(ins->input());
JS_ASSERT(input == ToRegister(ins->output()));
masm.neg32(input);
return true;
}
bool
CodeGeneratorX86Shared::visitNegD(LNegD *ins)
{
FloatRegister input = ToFloatRegister(ins->input());
JS_ASSERT(input == ToFloatRegister(ins->output()));
masm.negateDouble(input);
return true;
}
} // namespace ion
} // namespace js

View File

@ -111,6 +111,9 @@ class CodeGeneratorX86Shared : public CodeGeneratorShared
virtual bool visitAsmJSDivOrMod(LAsmJSDivOrMod *ins);
virtual bool visitAsmJSPassStackArg(LAsmJSPassStackArg *ins);
bool visitNegI(LNegI *lir);
bool visitNegD(LNegD *lir);
// Out of line visitors.
bool visitOutOfLineBailout(OutOfLineBailout *ool);
bool visitOutOfLineUndoALUOperation(OutOfLineUndoALUOperation *ool);

View File

@ -76,10 +76,9 @@ struct BufferSlice : public InlineForwardListNode<BufferSlice<SliceSize> > {
template<int SliceSize, class Inst>
struct AssemblerBuffer
: public IonAllocPolicy
{
public:
AssemblerBuffer() : head(NULL), tail(NULL), m_oom(false), m_bail(false), bufferSize(0) {}
AssemblerBuffer() : head(NULL), tail(NULL), m_oom(false), m_bail(false), bufferSize(0), LifoAlloc_(8192) {}
protected:
typedef BufferSlice<SliceSize> Slice;
typedef AssemblerBuffer<SliceSize, Inst> AssemblerBuffer_;
@ -96,8 +95,8 @@ struct AssemblerBuffer
JS_ASSERT((alignment & (alignment-1)) == 0);
return !(size() & (alignment - 1));
}
virtual Slice *newSlice() {
Slice *tmp = static_cast<Slice*>(malloc_(sizeof(Slice)));
virtual Slice *newSlice(LifoAlloc &a) {
Slice *tmp = static_cast<Slice*>(a.alloc(sizeof(Slice)));
if (!tmp) {
m_oom = true;
return NULL;
@ -108,7 +107,7 @@ struct AssemblerBuffer
bool ensureSpace(int size) {
if (tail != NULL && tail->size()+size <= SliceSize)
return true;
Slice *tmp = newSlice();
Slice *tmp = newSlice(LifoAlloc_);
if (tmp == NULL)
return false;
if (tail != NULL) {
@ -193,7 +192,7 @@ struct AssemblerBuffer
// Break the instruction stream so we can go back and edit it at this point
void perforate() {
Slice *tmp = newSlice();
Slice *tmp = newSlice(LifoAlloc_);
if (!tmp)
m_oom = true;
bufferSize += tail->size();
@ -216,7 +215,8 @@ struct AssemblerBuffer
return m_buffer->getInst(bo);
}
};
public:
LifoAlloc LifoAlloc_;
};
} // ion

View File

@ -47,18 +47,18 @@ struct Pool
BufferOffset limitingUser;
int limitingUsee;
Pool(int maxOffset_, int immSize_, int instSize_, int bias_, int alignment_,
Pool(int maxOffset_, int immSize_, int instSize_, int bias_, int alignment_, LifoAlloc &LifoAlloc_,
bool isBackref_ = false, bool canDedup_ = false, Pool *other_ = NULL)
: maxOffset(maxOffset_), immSize(immSize_), instSize(instSize),
bias(bias_), alignment(alignment_),
isBackref(isBackref_), canDedup(canDedup_), other(other_),
poolData(static_cast<uint8_t *>(malloc_(8*immSize))), numEntries(0),
poolData(static_cast<uint8_t *>(LifoAlloc_.alloc(8*immSize))), numEntries(0),
buffSize(8), loadOffsets(), limitingUser(), limitingUsee(INT_MIN)
{
}
static const int garbage=0xa5a5a5a5;
Pool() : maxOffset(garbage), immSize(garbage), instSize(garbage), bias(garbage),
alignment(garbage), isBackref(garbage), canDedup(garbage)
alignment(garbage), isBackref(garbage), canDedup(garbage), other((Pool*)garbage)
{
}
// Sometimes, when we are adding large values to a pool, the limiting use may change.
@ -127,34 +127,35 @@ struct Pool
}
// By the time this function is called, we'd damn well better know that this is going to succeed.
uint32_t insertEntry(uint8_t *data, BufferOffset off) {
uint32_t insertEntry(uint8_t *data, BufferOffset off, LifoAlloc &LifoAlloc_) {
if (numEntries == buffSize) {
buffSize <<= 1;
poolData = static_cast<uint8_t*>(realloc_(poolData, immSize * numEntries,
immSize * buffSize));
uint8_t *tmp = static_cast<uint8_t*>(LifoAlloc_.alloc(immSize * buffSize));
memcpy(tmp, poolData, immSize * numEntries);
if (poolData == NULL) {
buffSize = 0;
return -1;
}
poolData = tmp;
}
memcpy(&poolData[numEntries * immSize], data, immSize);
loadOffsets.append(off.getOffset());
return numEntries++;
}
bool reset() {
bool reset(LifoAlloc &a) {
numEntries = 0;
buffSize = 8;
poolData = static_cast<uint8_t*>(malloc_(buffSize * immSize));
poolData = static_cast<uint8_t*>(a.alloc(buffSize * immSize));
if (poolData == NULL)
return false;
void *otherSpace = malloc_(sizeof(Pool));
void *otherSpace = a.alloc(sizeof(Pool));
if (otherSpace == NULL)
return false;
other = new (otherSpace) Pool(other->maxOffset, other->immSize, other->instSize,
other->bias, other->alignment, other->isBackref,
other->bias, other->alignment, a, other->isBackref,
other->canDedup);
new (&loadOffsets) LoadOffsets;
@ -215,6 +216,12 @@ struct BufferSliceTail : public BufferSlice<SliceSize> {
int idx = this->nodeSize / InstBaseSize;
isBranch[idx >> 3] |= 1 << (idx & 0x7);
}
bool isNextBranch() {
if (this->nodeSize == InstBaseSize)
return false;
int idx = this->nodeSize / InstBaseSize;
return (isBranch[idx >> 3] >> (idx & 0x7)) & 1;
}
};
#if 0
@ -351,8 +358,8 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
return (BufferSlice**)&this->tail;
}
virtual BufferSlice *newSlice() {
BufferSlice *tmp = static_cast<BufferSlice*>(this->malloc_(sizeof(BufferSlice)));
virtual BufferSlice *newSlice(LifoAlloc &a) {
BufferSlice *tmp = static_cast<BufferSlice*>(a.alloc(sizeof(BufferSlice)));
if (!tmp) {
this->m_oom = true;
return NULL;
@ -378,7 +385,7 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
// We need to wait until an AutoIonContextAlloc is created by the
// IonMacroAssembler, before allocating any space.
void initWithAllocator() {
poolInfo = static_cast<PoolInfo*>(this->calloc_(sizeof(PoolInfo) * (1 << logBasePoolInfo)));
poolInfo = static_cast<PoolInfo*>(this->LifoAlloc_.alloc(sizeof(PoolInfo) * (1 << logBasePoolInfo)));
}
const PoolInfo & getInfo(int x) const {
@ -523,7 +530,7 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
poolOffset += tmp->immSize;
}
}
return p->numEntries + p->other->insertEntry(data, this->nextOffset());
return p->numEntries + p->other->insertEntry(data, this->nextOffset(), this->LifoAlloc_);
}
// Simultaneously insert an instSized instruction into the stream,
@ -580,7 +587,7 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
if (p == NULL) {
return INT_MIN;
}
return p->insertEntry(data, this->nextOffset());
return p->insertEntry(data, this->nextOffset(), this->LifoAlloc_);
}
BufferOffset putInt(uint32_t value) {
return insertEntry(sizeof(uint32_t) / sizeof(uint8_t), (uint8_t*)&value, NULL, NULL);
@ -675,13 +682,14 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
JS_ASSERT(perforatedNode != NULL);
if (numDumps >= (1<<logBasePoolInfo) && (numDumps & (numDumps-1)) == 0) {
// need to resize.
poolInfo = static_cast<PoolInfo*>(
this->realloc_(poolInfo, sizeof(PoolInfo) * numDumps,
sizeof(PoolInfo) * numDumps * 2));
if (poolInfo == NULL) {
PoolInfo *tmp = static_cast<PoolInfo*>(this->LifoAlloc_.alloc(sizeof(PoolInfo) * numDumps * 2));
if (tmp == NULL) {
this->fail_oom();
return;
}
memcpy(tmp, poolInfo, sizeof(PoolInfo) * numDumps);
poolInfo = tmp;
}
// In order to figure out how to fix up the loads for the second half of the pool
@ -774,7 +782,7 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
}
// bind the current pool to the perforation point.
Pool **tmp = &perforatedNode->data;
*tmp = static_cast<Pool*>(this->malloc_(sizeof(Pool) * numPoolKinds));
*tmp = static_cast<Pool*>(this->LifoAlloc_.alloc(sizeof(Pool) * numPoolKinds));
if (tmp == NULL) {
this->fail_oom();
return;
@ -790,7 +798,7 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
// reset everything to the state that it was in when we started
for (int poolIdx = 0; poolIdx < numPoolKinds; poolIdx++) {
if (!pools[poolIdx].reset()) {
if (!pools[poolIdx].reset(this->LifoAlloc_)) {
this->fail_oom();
return;
}
@ -814,7 +822,7 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
pools[poolIdx].updateLimiter(*iter);
Inst *inst = this->getInst(*iter);
Asm::insertTokenIntoTag(pools[poolIdx].instSize, (uint8_t*)inst, outcasts[poolIdx].end()-1-iter);
pools[poolIdx].insertEntry(&outcastEntries[poolIdx][idx*pools[poolIdx].immSize], *iter);
pools[poolIdx].insertEntry(&outcastEntries[poolIdx][idx*pools[poolIdx].immSize], *iter, this->LifoAlloc_);
}
delete[] outcastEntries[poolIdx];
}
@ -855,12 +863,15 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
IonSpew(IonSpew_Pools, "[%d] No Perforation point selected, generating a new one", id);
// There isn't a perforation here, we need to dump the pool with a guard.
BufferOffset branch = this->nextOffset();
bool shouldMarkAsBranch = this->isNextBranch();
this->markNextAsBranch();
this->putBlob(guardSize, NULL);
BufferOffset afterPool = this->nextOffset();
Asm::writePoolGuard(branch, this->getInst(branch), afterPool);
markGuard();
perforatedNode->isNatural = false;
if (shouldMarkAsBranch)
this->markNextAsBranch();
}
// We have a perforation. Time to cut the instruction stream, patch in the pool
@ -882,7 +893,7 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
IonSpew(IonSpew_Pools, "[%d] Pushing entry %d in pool %d into the backwards section.", id, idx, poolIdx);
// insert this into the rear part of the pool.
int offset = idx * p->immSize;
p->other->insertEntry(&p->poolData[offset], BufferOffset(*iter));
p->other->insertEntry(&p->poolData[offset], BufferOffset(*iter), this->LifoAlloc_);
// update the limiting entry for this pool.
p->other->updateLimiter(*iter);
@ -943,7 +954,8 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
return;
int destOffset = branch.getOffset() + offset;
if (offset > 0) {
while (poolInfo[curpool].offset <= destOffset && curpool < numDumps) {
while (curpool < numDumps && poolInfo[curpool].offset <= destOffset) {
offset += poolInfo[curpool].size;
curpool++;
}
@ -1016,13 +1028,18 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
return &pools[idx];
}
void markNextAsBranch() {
JS_ASSERT(*this->getTail() != NULL);
// If the previous thing inserted was the last instruction of
// the node, then whoops, we want to mark the first instruction of
// the next node.
this->ensureSpace(InstBaseSize);
JS_ASSERT(*this->getTail() != NULL);
(*this->getTail())->markNextAsBranch();
}
bool isNextBranch() {
JS_ASSERT(*this->getTail() != NULL);
return (*this->getTail())->isNextBranch();
}
int uncheckedSize() const {
PoolInfo pi = getPoolData();
int codeEnd = this->nextOffset().getOffset();

View File

@ -276,6 +276,16 @@ LIRGeneratorShared::useAnyOrConstant(MDefinition *mir)
{
return useRegisterOrConstant(mir);
}
LAllocation
LIRGeneratorShared::useStorable(MDefinition *mir)
{
return useRegister(mir);
}
LAllocation
LIRGeneratorShared::useStorableAtStart(MDefinition *mir)
{
return useRegisterAtStart(mir);
}
LAllocation
LIRGeneratorShared::useAny(MDefinition *mir)
@ -294,6 +304,17 @@ LIRGeneratorShared::useAny(MDefinition *mir)
{
return use(mir);
}
LAllocation
LIRGeneratorShared::useStorable(MDefinition *mir)
{
return useRegisterOrConstant(mir);
}
LAllocation
LIRGeneratorShared::useStorableAtStart(MDefinition *mir)
{
return useRegisterOrConstantAtStart(mir);
}
#endif
LAllocation

View File

@ -79,6 +79,11 @@ class LIRGeneratorShared : public MInstructionVisitorWithDefaults
// and only registers on ARM.
inline LAllocation useAny(MDefinition *mir);
inline LAllocation useAnyOrConstant(MDefinition *mir);
// "Storable" is architecture dependend, and will include registers and constants on X86
// and only registers on ARM.
// this is a generic "things we can expect to write into memory in 1 instruction"
inline LAllocation useStorable(MDefinition *mir);
inline LAllocation useStorableAtStart(MDefinition *mir);
inline LAllocation useKeepaliveOrConstant(MDefinition *mir);
inline LAllocation useRegisterOrConstant(MDefinition *mir);
inline LAllocation useRegisterOrConstantAtStart(MDefinition *mir);

View File

@ -517,6 +517,10 @@ class MacroAssemblerX86Shared : public Assembler
CodeOffsetLabel labelForPatch() {
return CodeOffsetLabel(size());
}
void abiret() {
ret();
}
};
} // namespace ion

View File

@ -160,8 +160,10 @@ static const Register PreBarrierReg = rdx;
// jitted code.
static const uint32_t StackAlignment = 16;
static const bool StackKeptAligned = false;
static const uint32_t CodeAlignment = 8;
static const uint32_t NativeFrameSize = sizeof(void*);
static const uint32_t AlignmentAtPrologue = sizeof(void*);
static const uint32_t AlignmentMidPrologue = AlignmentAtPrologue;
static const Scale ScalePointer = TimesEight;

View File

@ -1013,6 +1013,11 @@ class MacroAssemblerX64 : public MacroAssemblerX86Shared
uint8_t *target = code + codeBytes + globalDataOffset;
((int32_t *)nextInsn)[-1] = target - nextInsn;
}
void memIntToValue(Address Source, Address Dest) {
load32(Source, ScratchReg);
storeValue(JSVAL_TYPE_INT32, ScratchReg, Dest);
}
};
typedef MacroAssemblerX64 MacroAssemblerSpecific;

View File

@ -89,9 +89,10 @@ static const uint32_t StackAlignment = 16;
static const uint32_t StackAlignment = 4;
#endif
static const bool StackKeptAligned = false;
static const uint32_t CodeAlignment = 8;
static const uint32_t NativeFrameSize = sizeof(void*);
static const uint32_t AlignmentAtPrologue = sizeof(void*);
static const uint32_t AlignmentMidPrologue = AlignmentAtPrologue;
struct ImmTag : public Imm32
{
ImmTag(JSValueTag mask)