mirror of
https://gitlab.winehq.org/wine/wine-gecko.git
synced 2024-09-13 09:24:08 -07:00
Bug 992267: Ensure stack alignment requirements for asm.js code; r=bbouvier
--HG-- extra : rebase_source : 33ca16407cd8b0bd1ca84075dbbb0acbb4272bf6
This commit is contained in:
parent
157738edd8
commit
ecea3f5f6c
@ -356,11 +356,11 @@ js::GenerateAsmJSStackOverflowExit(MacroAssembler &masm, Label *overflowExit, La
|
||||
masm.storePtr(StackPointer, Address(activation, AsmJSActivation::offsetOfFP()));
|
||||
|
||||
// Prepare the stack for calling C++.
|
||||
if (unsigned stackDec = StackDecrementForCall(sizeof(AsmJSFrame), ShadowStackSpace))
|
||||
masm.subPtr(Imm32(stackDec), StackPointer);
|
||||
if (uint32_t d = StackDecrementForCall(ABIStackAlignment, sizeof(AsmJSFrame), ShadowStackSpace))
|
||||
masm.subPtr(Imm32(d), StackPointer);
|
||||
|
||||
// No need to restore the stack; the throw stub pops everything.
|
||||
masm.assertStackAlignment();
|
||||
masm.assertStackAlignment(ABIStackAlignment);
|
||||
masm.call(AsmJSImmPtr(AsmJSImm_ReportOverRecursed));
|
||||
masm.jump(throwLabel);
|
||||
}
|
||||
|
@ -170,7 +170,6 @@ void
|
||||
GenerateAsmJSExitEpilogue(jit::MacroAssembler &masm, unsigned framePushed, AsmJSExit::Reason reason,
|
||||
jit::Label *profilingReturn);
|
||||
|
||||
|
||||
} // namespace js
|
||||
|
||||
#endif // asmjs_AsmJSFrameIterator_h
|
||||
|
@ -2283,7 +2283,7 @@ class FunctionCompiler
|
||||
uint32_t parentStackBytes = call->abi_.stackBytesConsumedSoFar();
|
||||
uint32_t newStackBytes;
|
||||
if (call->childClobbers_) {
|
||||
call->spIncrement_ = AlignBytes(call->maxChildStackBytes_, StackAlignment);
|
||||
call->spIncrement_ = AlignBytes(call->maxChildStackBytes_, AsmJSStackAlignment);
|
||||
for (unsigned i = 0; i < call->stackArgs_.length(); i++)
|
||||
call->stackArgs_[i]->incrementOffset(call->spIncrement_);
|
||||
newStackBytes = Max(call->prevMaxStackBytes_,
|
||||
@ -5936,16 +5936,16 @@ CheckModuleReturn(ModuleCompiler &m)
|
||||
}
|
||||
|
||||
static void
|
||||
AssertStackAlignment(MacroAssembler &masm)
|
||||
AssertStackAlignment(MacroAssembler &masm, uint32_t alignment)
|
||||
{
|
||||
JS_ASSERT((sizeof(AsmJSFrame) + masm.framePushed()) % StackAlignment == 0);
|
||||
masm.assertStackAlignment();
|
||||
JS_ASSERT((sizeof(AsmJSFrame) + masm.framePushed()) % alignment == 0);
|
||||
masm.assertStackAlignment(alignment);
|
||||
}
|
||||
|
||||
static unsigned
|
||||
StackDecrementForCall(MacroAssembler &masm, unsigned bytesToPush)
|
||||
StackDecrementForCall(MacroAssembler &masm, uint32_t alignment, unsigned bytesToPush)
|
||||
{
|
||||
return StackDecrementForCall(sizeof(AsmJSFrame) + masm.framePushed(), bytesToPush);
|
||||
return StackDecrementForCall(alignment, sizeof(AsmJSFrame) + masm.framePushed(), bytesToPush);
|
||||
}
|
||||
|
||||
template <class VectorT>
|
||||
@ -5960,9 +5960,10 @@ StackArgBytes(const VectorT &argTypes)
|
||||
|
||||
template <class VectorT>
|
||||
static unsigned
|
||||
StackDecrementForCall(MacroAssembler &masm, const VectorT &argTypes, unsigned extraBytes = 0)
|
||||
StackDecrementForCall(MacroAssembler &masm, uint32_t alignment, const VectorT &argTypes,
|
||||
unsigned extraBytes = 0)
|
||||
{
|
||||
return StackDecrementForCall(masm, StackArgBytes(argTypes) + extraBytes);
|
||||
return StackDecrementForCall(masm, alignment, StackArgBytes(argTypes) + extraBytes);
|
||||
}
|
||||
|
||||
#if defined(JS_CODEGEN_ARM)
|
||||
@ -5988,6 +5989,7 @@ static const unsigned FramePushedAfterSave = NonVolatileRegs.gprs().size() * siz
|
||||
static const unsigned FramePushedAfterSave = NonVolatileRegs.gprs().size() * sizeof(intptr_t) +
|
||||
NonVolatileRegs.fpus().getPushSizeInBytes();
|
||||
#endif
|
||||
static const unsigned FramePushedForEntrySP = FramePushedAfterSave + sizeof(void*);
|
||||
|
||||
static bool
|
||||
GenerateEntry(ModuleCompiler &m, unsigned exportIndex)
|
||||
@ -5998,17 +6000,18 @@ GenerateEntry(ModuleCompiler &m, unsigned exportIndex)
|
||||
masm.align(CodeAlignment);
|
||||
masm.bind(&begin);
|
||||
|
||||
// Save the return address if it wasn't already saved by the call insn.
|
||||
#if defined(JS_CODEGEN_ARM)
|
||||
masm.push(lr);
|
||||
#elif defined(JS_CODEGEN_MIPS)
|
||||
masm.push(ra);
|
||||
#elif defined(JS_CODEGEN_X86)
|
||||
static const unsigned EntryFrameSize = sizeof(void*);
|
||||
#endif
|
||||
masm.subPtr(Imm32(AsmJSFrameBytesAfterReturnAddress), StackPointer);
|
||||
masm.setFramePushed(0);
|
||||
|
||||
// In constrast to the system ABI, the Ion convention is that all registers
|
||||
// are clobbered by calls. Thus, we must save the caller's non-volatile
|
||||
// registers.
|
||||
// Save all caller non-volatile registers before we clobber them here and in
|
||||
// the asm.js callee (which does not preserve non-volatile registers).
|
||||
masm.setFramePushed(0);
|
||||
masm.PushRegsInMask(NonVolatileRegs);
|
||||
JS_ASSERT(masm.framePushed() == FramePushedAfterSave);
|
||||
|
||||
@ -6026,29 +6029,36 @@ GenerateEntry(ModuleCompiler &m, unsigned exportIndex)
|
||||
masm.loadPtr(Address(IntArgReg1, AsmJSModule::heapGlobalDataOffset()), HeapReg);
|
||||
#endif
|
||||
|
||||
// Remember the stack pointer in the current AsmJSActivation. This will be
|
||||
// used by error exit paths to set the stack pointer back to what it was
|
||||
// right after the (C++) caller's non-volatile registers were saved so that
|
||||
// they can be restored.
|
||||
Register activation = ABIArgGenerator::NonArgReturnReg0;
|
||||
masm.loadAsmJSActivation(activation);
|
||||
masm.storePtr(StackPointer, Address(activation, AsmJSActivation::offsetOfErrorRejoinSP()));
|
||||
|
||||
// Get 'argv' into a non-arg register and save it on the stack.
|
||||
// Put the 'argv' argument into a non-argument/return register so that we
|
||||
// can use 'argv' while we fill in the arguments for the asm.js callee.
|
||||
// Also, save 'argv' on the stack so that we can recover it after the call.
|
||||
// Use a second non-argument/return register as temporary scratch.
|
||||
Register argv = ABIArgGenerator::NonArgReturnReg0;
|
||||
Register scratch = ABIArgGenerator::NonArgReturnReg1;
|
||||
#if defined(JS_CODEGEN_X86)
|
||||
masm.loadPtr(Address(StackPointer, sizeof(AsmJSFrame) + masm.framePushed()), argv);
|
||||
masm.loadPtr(Address(StackPointer, EntryFrameSize + masm.framePushed()), argv);
|
||||
#else
|
||||
masm.movePtr(IntArgReg0, argv);
|
||||
#endif
|
||||
masm.Push(argv);
|
||||
|
||||
// Save the stack pointer to the saved non-volatile registers. We will use
|
||||
// this on two paths: normal return and exceptional return. Since
|
||||
// loadAsmJSActivation uses GlobalReg, we must do this after loading
|
||||
// GlobalReg.
|
||||
JS_ASSERT(masm.framePushed() == FramePushedForEntrySP);
|
||||
masm.loadAsmJSActivation(scratch);
|
||||
masm.storePtr(StackPointer, Address(scratch, AsmJSActivation::offsetOfEntrySP()));
|
||||
|
||||
// Dynamically align the stack since ABIStackAlignment is not necessarily
|
||||
// AsmJSStackAlignment. We'll use entrySP to recover the original stack
|
||||
// pointer on return.
|
||||
masm.andPtr(Imm32(~(AsmJSStackAlignment - 1)), StackPointer);
|
||||
|
||||
// Bump the stack for the call.
|
||||
PropertyName *funcName = m.module().exportedFunction(exportIndex).name();
|
||||
const ModuleCompiler::Func &func = *m.lookupFunction(funcName);
|
||||
unsigned stackDec = StackDecrementForCall(masm, func.sig().args());
|
||||
masm.reserveStack(stackDec);
|
||||
masm.reserveStack(AlignBytes(StackArgBytes(func.sig().args()), AsmJSStackAlignment));
|
||||
|
||||
// Copy parameters out of argv and into the registers/stack-slots specified by
|
||||
// the system ABI.
|
||||
@ -6084,12 +6094,15 @@ GenerateEntry(ModuleCompiler &m, unsigned exportIndex)
|
||||
}
|
||||
|
||||
// Call into the real function.
|
||||
AssertStackAlignment(masm);
|
||||
masm.assertStackAlignment(AsmJSStackAlignment);
|
||||
masm.call(CallSiteDesc(CallSiteDesc::Relative), &func.entry());
|
||||
|
||||
// Pop the stack and recover the original 'argv' argument passed to the
|
||||
// trampoline (which was pushed on the stack).
|
||||
masm.freeStack(stackDec);
|
||||
// Recover the stack pointer value before dynamic alignment.
|
||||
masm.loadAsmJSActivation(scratch);
|
||||
masm.loadPtr(Address(scratch, AsmJSActivation::offsetOfEntrySP()), StackPointer);
|
||||
masm.setFramePushed(FramePushedForEntrySP);
|
||||
|
||||
// Recover the 'argv' pointer which was saved before aligning the stack.
|
||||
masm.Pop(argv);
|
||||
|
||||
// Store the return value in argv[0]
|
||||
@ -6113,7 +6126,6 @@ GenerateEntry(ModuleCompiler &m, unsigned exportIndex)
|
||||
JS_ASSERT(masm.framePushed() == 0);
|
||||
|
||||
masm.move32(Imm32(true), ReturnReg);
|
||||
masm.addPtr(Imm32(AsmJSFrameBytesAfterReturnAddress), StackPointer);
|
||||
masm.ret();
|
||||
|
||||
return m.finishGeneratingEntry(exportIndex, &begin) && !masm.oom();
|
||||
@ -6177,7 +6189,7 @@ GenerateFFIInterpExit(ModuleCompiler &m, const ModuleCompiler::ExitDescriptor &e
|
||||
// padding between argv and retaddr ensures that sp is aligned.
|
||||
unsigned offsetToArgv = AlignBytes(StackArgBytes(invokeArgTypes), sizeof(double));
|
||||
unsigned argvBytes = Max<size_t>(1, exit.sig().args().length()) * sizeof(Value);
|
||||
unsigned framePushed = StackDecrementForCall(masm, offsetToArgv + argvBytes);
|
||||
unsigned framePushed = StackDecrementForCall(masm, ABIStackAlignment, offsetToArgv + argvBytes);
|
||||
|
||||
Label begin;
|
||||
GenerateAsmJSExitPrologue(masm, framePushed, AsmJSExit::SlowFFI, &begin);
|
||||
@ -6217,7 +6229,7 @@ GenerateFFIInterpExit(ModuleCompiler &m, const ModuleCompiler::ExitDescriptor &e
|
||||
JS_ASSERT(i.done());
|
||||
|
||||
// Make the call, test whether it succeeded, and extract the return value.
|
||||
AssertStackAlignment(masm);
|
||||
AssertStackAlignment(masm, ABIStackAlignment);
|
||||
switch (exit.sig().retType().which()) {
|
||||
case RetType::Void:
|
||||
masm.call(AsmJSImmPtr(AsmJSImm_InvokeFromAsmJS_Ignore));
|
||||
@ -6279,7 +6291,7 @@ GenerateFFIIonExit(ModuleCompiler &m, const ModuleCompiler::ExitDescriptor &exit
|
||||
unsigned offsetToIonArgs = MaybeRetAddr;
|
||||
unsigned ionArgBytes = 3 * sizeof(size_t) + (1 + exit.sig().args().length()) * sizeof(Value);
|
||||
unsigned totalIonBytes = offsetToIonArgs + ionArgBytes + savedRegBytes;
|
||||
unsigned ionFrameSize = StackDecrementForCall(masm, totalIonBytes);
|
||||
unsigned ionFrameSize = StackDecrementForCall(masm, AsmJSStackAlignment, totalIonBytes);
|
||||
|
||||
// Coercion calls use the following stack layout (sp grows to the left):
|
||||
// | stack args | padding | Value argv[1] | ...
|
||||
@ -6288,7 +6300,7 @@ GenerateFFIIonExit(ModuleCompiler &m, const ModuleCompiler::ExitDescriptor &exit
|
||||
coerceArgTypes.infallibleAppend(MIRType_Pointer); // argv
|
||||
unsigned offsetToCoerceArgv = AlignBytes(StackArgBytes(coerceArgTypes), sizeof(double));
|
||||
unsigned totalCoerceBytes = offsetToCoerceArgv + sizeof(Value) + savedRegBytes;
|
||||
unsigned coerceFrameSize = StackDecrementForCall(masm, totalCoerceBytes);
|
||||
unsigned coerceFrameSize = StackDecrementForCall(masm, AsmJSStackAlignment, totalCoerceBytes);
|
||||
|
||||
unsigned framePushed = Max(ionFrameSize, coerceFrameSize);
|
||||
|
||||
@ -6389,9 +6401,9 @@ GenerateFFIIonExit(ModuleCompiler &m, const ModuleCompiler::ExitDescriptor &exit
|
||||
}
|
||||
|
||||
// 2. Call
|
||||
AssertStackAlignment(masm);
|
||||
AssertStackAlignment(masm, AsmJSStackAlignment);
|
||||
masm.callIonFromAsmJS(callee);
|
||||
AssertStackAlignment(masm);
|
||||
AssertStackAlignment(masm, AsmJSStackAlignment);
|
||||
|
||||
{
|
||||
// Disable Activation.
|
||||
@ -6474,7 +6486,7 @@ GenerateFFIIonExit(ModuleCompiler &m, const ModuleCompiler::ExitDescriptor &exit
|
||||
JS_ASSERT(i.done());
|
||||
|
||||
// Call coercion function
|
||||
AssertStackAlignment(masm);
|
||||
AssertStackAlignment(masm, ABIStackAlignment);
|
||||
switch (exit.sig().retType().which()) {
|
||||
case RetType::Signed:
|
||||
masm.call(AsmJSImmPtr(AsmJSImm_CoerceInPlace_ToInt32));
|
||||
@ -6569,7 +6581,7 @@ GenerateBuiltinThunk(ModuleCompiler &m, AsmJSExit::BuiltinKind builtin)
|
||||
MOZ_CRASH("Bad builtin");
|
||||
}
|
||||
|
||||
uint32_t framePushed = StackDecrementForCall(masm, argTypes);
|
||||
uint32_t framePushed = StackDecrementForCall(masm, ABIStackAlignment, argTypes);
|
||||
|
||||
Label begin;
|
||||
GenerateAsmJSExitPrologue(masm, framePushed, AsmJSExit::Builtin(builtin), &begin);
|
||||
@ -6594,7 +6606,7 @@ GenerateBuiltinThunk(ModuleCompiler &m, AsmJSExit::BuiltinKind builtin)
|
||||
#endif
|
||||
}
|
||||
|
||||
AssertStackAlignment(masm);
|
||||
AssertStackAlignment(masm, ABIStackAlignment);
|
||||
masm.call(BuiltinToImmKind(builtin));
|
||||
|
||||
Label profilingReturn;
|
||||
@ -6649,11 +6661,11 @@ GenerateAsyncInterruptExit(ModuleCompiler &m, Label *throwLabel)
|
||||
// We know that StackPointer is word-aligned, but not necessarily
|
||||
// stack-aligned, so we need to align it dynamically.
|
||||
masm.mov(StackPointer, ABIArgGenerator::NonVolatileReg);
|
||||
masm.andPtr(Imm32(~(StackAlignment - 1)), StackPointer);
|
||||
masm.andPtr(Imm32(~(ABIStackAlignment - 1)), StackPointer);
|
||||
if (ShadowStackSpace)
|
||||
masm.subPtr(Imm32(ShadowStackSpace), StackPointer);
|
||||
|
||||
masm.assertStackAlignment();
|
||||
masm.assertStackAlignment(ABIStackAlignment);
|
||||
masm.call(AsmJSImmPtr(AsmJSImm_HandleExecutionInterrupt));
|
||||
|
||||
masm.branchIfFalseBool(ReturnReg, throwLabel);
|
||||
@ -6676,7 +6688,7 @@ GenerateAsyncInterruptExit(ModuleCompiler &m, Label *throwLabel)
|
||||
// Save the stack pointer in a non-volatile register.
|
||||
masm.movePtr(StackPointer, s0);
|
||||
// Align the stack.
|
||||
masm.ma_and(StackPointer, StackPointer, Imm32(~(StackAlignment - 1)));
|
||||
masm.ma_and(StackPointer, StackPointer, Imm32(~(ABIStackAlignment - 1)));
|
||||
|
||||
// Store resumePC into the reserved space.
|
||||
masm.loadAsmJSActivation(IntArgReg0);
|
||||
@ -6686,7 +6698,7 @@ GenerateAsyncInterruptExit(ModuleCompiler &m, Label *throwLabel)
|
||||
// MIPS ABI requires rewserving stack for registes $a0 to $a3.
|
||||
masm.subPtr(Imm32(4 * sizeof(intptr_t)), StackPointer);
|
||||
|
||||
masm.assertStackAlignment();
|
||||
masm.assertStackAlignment(ABIStackAlignment);
|
||||
masm.call(AsmJSImm_HandleExecutionInterrupt);
|
||||
|
||||
masm.addPtr(Imm32(4 * sizeof(intptr_t)), StackPointer);
|
||||
@ -6723,7 +6735,7 @@ GenerateAsyncInterruptExit(ModuleCompiler &m, Label *throwLabel)
|
||||
|
||||
masm.PushRegsInMask(RegisterSet(GeneralRegisterSet(0), FloatRegisterSet(FloatRegisters::AllDoubleMask))); // save all FP registers
|
||||
|
||||
masm.assertStackAlignment();
|
||||
masm.assertStackAlignment(ABIStackAlignment);
|
||||
masm.call(AsmJSImm_HandleExecutionInterrupt);
|
||||
|
||||
masm.branchIfFalseBool(ReturnReg, throwLabel);
|
||||
@ -6767,11 +6779,11 @@ GenerateSyncInterruptExit(ModuleCompiler &m, Label *throwLabel)
|
||||
MacroAssembler &masm = m.masm();
|
||||
masm.setFramePushed(0);
|
||||
|
||||
unsigned framePushed = StackDecrementForCall(masm, ShadowStackSpace);
|
||||
unsigned framePushed = StackDecrementForCall(masm, ABIStackAlignment, ShadowStackSpace);
|
||||
|
||||
GenerateAsmJSExitPrologue(masm, framePushed, AsmJSExit::Interrupt, &m.syncInterruptLabel());
|
||||
|
||||
AssertStackAlignment(masm);
|
||||
AssertStackAlignment(masm, ABIStackAlignment);
|
||||
masm.call(AsmJSImmPtr(AsmJSImm_HandleExecutionInterrupt));
|
||||
masm.branchIfFalseBool(ReturnReg, throwLabel);
|
||||
|
||||
@ -6795,17 +6807,17 @@ GenerateThrowStub(ModuleCompiler &m, Label *throwLabel)
|
||||
// We are about to pop all frames in this AsmJSActivation. Set fp to null to
|
||||
// maintain the invariant that fp is either null or pointing to a valid
|
||||
// frame.
|
||||
Register activation = ABIArgGenerator::NonArgReturnReg0;
|
||||
masm.loadAsmJSActivation(activation);
|
||||
masm.storePtr(ImmWord(0), Address(activation, AsmJSActivation::offsetOfFP()));
|
||||
Register scratch = ABIArgGenerator::NonArgReturnReg0;
|
||||
masm.loadAsmJSActivation(scratch);
|
||||
masm.storePtr(ImmWord(0), Address(scratch, AsmJSActivation::offsetOfFP()));
|
||||
|
||||
masm.setFramePushed(FramePushedAfterSave);
|
||||
masm.loadPtr(Address(activation, AsmJSActivation::offsetOfErrorRejoinSP()), StackPointer);
|
||||
masm.setFramePushed(FramePushedForEntrySP);
|
||||
masm.loadPtr(Address(scratch, AsmJSActivation::offsetOfEntrySP()), StackPointer);
|
||||
masm.Pop(scratch);
|
||||
masm.PopRegsInMask(NonVolatileRegs);
|
||||
JS_ASSERT(masm.framePushed() == 0);
|
||||
|
||||
masm.mov(ImmWord(0), ReturnReg);
|
||||
masm.addPtr(Imm32(AsmJSFrameBytesAfterReturnAddress), StackPointer);
|
||||
masm.ret();
|
||||
|
||||
return m.finishGeneratingInlineStub(throwLabel) && !masm.oom();
|
||||
|
@ -144,7 +144,7 @@ NativeRegExpMacroAssembler::GenerateCode(JSContext *cx)
|
||||
#endif
|
||||
|
||||
size_t frameSize = sizeof(FrameData) + num_registers_ * sizeof(void *);
|
||||
frameSize = JS_ROUNDUP(frameSize + masm.framePushed(), StackAlignment) - masm.framePushed();
|
||||
frameSize = JS_ROUNDUP(frameSize + masm.framePushed(), ABIStackAlignment) - masm.framePushed();
|
||||
|
||||
// Actually emit code to start a new stack frame.
|
||||
masm.reserveStack(frameSize);
|
||||
|
@ -8822,12 +8822,15 @@ CodeGenerator::visitAsmJSCall(LAsmJSCall *ins)
|
||||
if (mir->spIncrement())
|
||||
masm.freeStack(mir->spIncrement());
|
||||
|
||||
JS_ASSERT((sizeof(AsmJSFrame) + masm.framePushed()) % StackAlignment == 0);
|
||||
JS_ASSERT((sizeof(AsmJSFrame) + masm.framePushed()) % AsmJSStackAlignment == 0);
|
||||
|
||||
#ifdef DEBUG
|
||||
static_assert(AsmJSStackAlignment >= ABIStackAlignment,
|
||||
"The asm.js stack alignment should subsume the ABI-required alignment");
|
||||
static_assert(AsmJSStackAlignment % ABIStackAlignment == 0,
|
||||
"The asm.js stack alignment should subsume the ABI-required alignment");
|
||||
Label ok;
|
||||
JS_ASSERT(IsPowerOfTwo(StackAlignment));
|
||||
masm.branchTestPtr(Assembler::Zero, StackPointer, Imm32(StackAlignment - 1), &ok);
|
||||
masm.branchTestPtr(Assembler::Zero, StackPointer, Imm32(AsmJSStackAlignment - 1), &ok);
|
||||
masm.breakpoint();
|
||||
masm.bind(&ok);
|
||||
#endif
|
||||
@ -9066,7 +9069,7 @@ CodeGenerator::visitAsmJSInterruptCheck(LAsmJSInterruptCheck *lir)
|
||||
masm.branch32(Assembler::Equal, scratch, Imm32(0), &rejoin);
|
||||
{
|
||||
uint32_t stackFixup = ComputeByteAlignment(masm.framePushed() + sizeof(AsmJSFrame),
|
||||
StackAlignment);
|
||||
ABIStackAlignment);
|
||||
masm.reserveStack(stackFixup);
|
||||
masm.call(lir->funcDesc(), lir->interruptExit());
|
||||
masm.freeStack(stackFixup);
|
||||
|
@ -1075,7 +1075,7 @@ uint8_t *
|
||||
alignDoubleSpillWithOffset(uint8_t *pointer, int32_t offset)
|
||||
{
|
||||
uint32_t address = reinterpret_cast<uint32_t>(pointer);
|
||||
address = (address - offset) & ~(StackAlignment - 1);
|
||||
address = (address - offset) & ~(ABIStackAlignment - 1);
|
||||
return reinterpret_cast<uint8_t *>(address);
|
||||
}
|
||||
|
||||
|
@ -1426,11 +1426,11 @@ class MacroAssembler : public MacroAssemblerSpecific
|
||||
PopRegsInMask(liveRegs);
|
||||
}
|
||||
|
||||
void assertStackAlignment() {
|
||||
void assertStackAlignment(uint32_t alignment) {
|
||||
#ifdef DEBUG
|
||||
Label ok;
|
||||
JS_ASSERT(IsPowerOfTwo(StackAlignment));
|
||||
branchTestPtr(Assembler::Zero, StackPointer, Imm32(StackAlignment - 1), &ok);
|
||||
JS_ASSERT(IsPowerOfTwo(alignment));
|
||||
branchTestPtr(Assembler::Zero, StackPointer, Imm32(alignment - 1), &ok);
|
||||
breakpoint();
|
||||
bind(&ok);
|
||||
#endif
|
||||
@ -1508,10 +1508,10 @@ JSOpToCondition(JSOp op, bool isSigned)
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
StackDecrementForCall(size_t bytesAlreadyPushed, size_t bytesToPush)
|
||||
StackDecrementForCall(uint32_t alignment, size_t bytesAlreadyPushed, size_t bytesToPush)
|
||||
{
|
||||
return bytesToPush +
|
||||
ComputeByteAlignment(bytesAlreadyPushed + bytesToPush, StackAlignment);
|
||||
ComputeByteAlignment(bytesAlreadyPushed + bytesToPush, alignment);
|
||||
}
|
||||
|
||||
} // namespace jit
|
||||
|
@ -1586,10 +1586,10 @@ class LIRGraph
|
||||
// platform stack alignment requirement, and so that it's a multiple of
|
||||
// the number of slots per Value.
|
||||
uint32_t paddedLocalSlotCount() const {
|
||||
// Round to StackAlignment, but also round to at least sizeof(Value) in
|
||||
// case that's greater, because StackOffsetOfPassedArg rounds argument
|
||||
// slots to 8-byte boundaries.
|
||||
size_t Alignment = Max(size_t(StackAlignment), sizeof(Value));
|
||||
// Round to ABIStackAlignment, but also round to at least sizeof(Value)
|
||||
// in case that's greater, because StackOffsetOfPassedArg rounds
|
||||
// argument slots to 8-byte boundaries.
|
||||
size_t Alignment = Max(size_t(ABIStackAlignment), sizeof(Value));
|
||||
return AlignBytes(localSlotCount(), Alignment);
|
||||
}
|
||||
size_t paddedLocalSlotsSize() const {
|
||||
|
@ -145,9 +145,8 @@ static MOZ_CONSTEXPR_VAR FloatRegister d15 = {FloatRegisters::d15, VFPRegister::
|
||||
// load/store) operate in a single cycle when the address they are dealing with
|
||||
// is 8 byte aligned. Also, the ARM abi wants the stack to be 8 byte aligned at
|
||||
// function boundaries. I'm trying to make sure this is always true.
|
||||
static const uint32_t StackAlignment = 8;
|
||||
static const uint32_t ABIStackAlignment = 8;
|
||||
static const uint32_t CodeAlignment = 8;
|
||||
static const bool StackKeptAligned = true;
|
||||
|
||||
// This boolean indicates whether we support SIMD instructions flavoured for
|
||||
// this architecture or not. Rather than a method in the LIRGenerator, it is
|
||||
@ -156,6 +155,8 @@ static const bool StackKeptAligned = true;
|
||||
static const bool SupportsSimd = false;
|
||||
static const uint32_t SimdStackAlignment = 8;
|
||||
|
||||
static const uint32_t AsmJSStackAlignment = SimdStackAlignment;
|
||||
|
||||
static const Scale ScalePointer = TimesFour;
|
||||
|
||||
class Instruction;
|
||||
|
@ -3778,7 +3778,7 @@ MacroAssemblerARMCompat::setupUnalignedABICall(uint32_t args, Register scratch)
|
||||
ma_mov(sp, scratch);
|
||||
|
||||
// Force sp to be aligned.
|
||||
ma_and(Imm32(~(StackAlignment - 1)), sp, sp);
|
||||
ma_and(Imm32(~(ABIStackAlignment - 1)), sp, sp);
|
||||
ma_push(scratch);
|
||||
}
|
||||
|
||||
@ -3937,7 +3937,7 @@ MacroAssemblerARMCompat::passABIArg(FloatRegister freg, MoveOp::Type type)
|
||||
void MacroAssemblerARMCompat::checkStackAlignment()
|
||||
{
|
||||
#ifdef DEBUG
|
||||
ma_tst(sp, Imm32(StackAlignment - 1));
|
||||
ma_tst(sp, Imm32(ABIStackAlignment - 1));
|
||||
breakpoint(NonZero);
|
||||
#endif
|
||||
}
|
||||
@ -3956,11 +3956,11 @@ MacroAssemblerARMCompat::callWithABIPre(uint32_t *stackAdjust, bool callFromAsmJ
|
||||
|
||||
if (!dynamicAlignment_) {
|
||||
*stackAdjust += ComputeByteAlignment(framePushed_ + *stackAdjust + alignmentAtPrologue,
|
||||
StackAlignment);
|
||||
ABIStackAlignment);
|
||||
} else {
|
||||
// sizeof(intptr_t) accounts for the saved stack pointer pushed by
|
||||
// setupUnalignedABICall.
|
||||
*stackAdjust += ComputeByteAlignment(*stackAdjust + sizeof(intptr_t), StackAlignment);
|
||||
*stackAdjust += ComputeByteAlignment(*stackAdjust + sizeof(intptr_t), ABIStackAlignment);
|
||||
}
|
||||
|
||||
reserveStack(*stackAdjust);
|
||||
|
@ -2117,7 +2117,7 @@ Simulator::softwareInterrupt(SimInstruction *instr)
|
||||
int32_t saved_lr = get_register(lr);
|
||||
intptr_t external = reinterpret_cast<intptr_t>(redirection->nativeFunction());
|
||||
|
||||
bool stack_aligned = (get_register(sp) & (StackAlignment - 1)) == 0;
|
||||
bool stack_aligned = (get_register(sp) & (ABIStackAlignment - 1)) == 0;
|
||||
if (!stack_aligned) {
|
||||
fprintf(stderr, "Runtime call with unaligned stack!\n");
|
||||
MOZ_CRASH();
|
||||
@ -4258,7 +4258,7 @@ Simulator::call(uint8_t* entry, int argument_count, ...)
|
||||
if (argument_count >= 4)
|
||||
entry_stack -= (argument_count - 4) * sizeof(int32_t);
|
||||
|
||||
entry_stack &= ~StackAlignment;
|
||||
entry_stack &= ~ABIStackAlignment;
|
||||
|
||||
// Store remaining arguments on stack, from low to high memory.
|
||||
intptr_t *stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
|
||||
|
@ -158,9 +158,8 @@ static MOZ_CONSTEXPR_VAR FloatRegister f30 = { FloatRegisters::f30, FloatRegiste
|
||||
|
||||
// MIPS CPUs can only load multibyte data that is "naturally"
|
||||
// four-byte-aligned, sp register should be eight-byte-aligned.
|
||||
static const uint32_t StackAlignment = 8;
|
||||
static const uint32_t ABIStackAlignment = 8;
|
||||
static const uint32_t CodeAlignment = 4;
|
||||
static const bool StackKeptAligned = true;
|
||||
|
||||
// This boolean indicates whether we support SIMD instructions flavoured for
|
||||
// this architecture or not. Rather than a method in the LIRGenerator, it is
|
||||
@ -171,6 +170,8 @@ static const bool SupportsSimd = false;
|
||||
// alignment requirements still need to be explored.
|
||||
static const uint32_t SimdStackAlignment = 8;
|
||||
|
||||
static const uint32_t AsmJSStackAlignment = SimdStackAlignment;
|
||||
|
||||
static const Scale ScalePointer = TimesFour;
|
||||
|
||||
// MIPS instruction types
|
||||
@ -238,7 +239,6 @@ static const uint32_t RDMask = ((1 << RDBits) - 1) << RDShift;
|
||||
static const uint32_t SAMask = ((1 << SABits) - 1) << SAShift;
|
||||
static const uint32_t FunctionMask = ((1 << FunctionBits) - 1) << FunctionShift;
|
||||
static const uint32_t RegMask = Registers::Total - 1;
|
||||
static const uint32_t StackAlignmentMask = StackAlignment - 1;
|
||||
|
||||
static const uint32_t MAX_BREAK_CODE = 1024 - 1;
|
||||
|
||||
|
@ -1574,7 +1574,7 @@ MacroAssembler::PushRegsInMask(RegisterSet set, FloatRegisterSet simdSet)
|
||||
// Double values have to be aligned. We reserve extra space so that we can
|
||||
// start writing from the first aligned location.
|
||||
// We reserve a whole extra double so that the buffer has even size.
|
||||
ma_and(SecondScratchReg, sp, Imm32(~(StackAlignment - 1)));
|
||||
ma_and(SecondScratchReg, sp, Imm32(~(ABIStackAlignment - 1)));
|
||||
reserveStack(diffF + sizeof(double));
|
||||
|
||||
for (FloatRegisterForwardIterator iter(set.fpus().reduceSetForPush()); iter.more(); iter++) {
|
||||
@ -1596,7 +1596,7 @@ MacroAssembler::PopRegsInMaskIgnore(RegisterSet set, RegisterSet ignore, FloatRe
|
||||
|
||||
// Read the buffer form the first aligned location.
|
||||
ma_addu(SecondScratchReg, sp, Imm32(reservedF + sizeof(double)));
|
||||
ma_and(SecondScratchReg, SecondScratchReg, Imm32(~(StackAlignment - 1)));
|
||||
ma_and(SecondScratchReg, SecondScratchReg, Imm32(~(ABIStackAlignment - 1)));
|
||||
|
||||
for (FloatRegisterForwardIterator iter(set.fpus().reduceSetForPush()); iter.more(); iter++) {
|
||||
if (!ignore.has(*iter) && ((*iter).code() % 2 == 0))
|
||||
@ -3158,7 +3158,7 @@ MacroAssemblerMIPSCompat::setupUnalignedABICall(uint32_t args, Register scratch)
|
||||
|
||||
// Force sp to be aligned
|
||||
ma_subu(StackPointer, StackPointer, Imm32(sizeof(uint32_t)));
|
||||
ma_and(StackPointer, StackPointer, Imm32(~(StackAlignment - 1)));
|
||||
ma_and(StackPointer, StackPointer, Imm32(~(ABIStackAlignment - 1)));
|
||||
as_sw(scratch, StackPointer, 0);
|
||||
}
|
||||
|
||||
@ -3259,7 +3259,7 @@ MacroAssemblerMIPSCompat::checkStackAlignment()
|
||||
{
|
||||
#ifdef DEBUG
|
||||
Label aligned;
|
||||
as_andi(ScratchRegister, sp, StackAlignment - 1);
|
||||
as_andi(ScratchRegister, sp, ABIStackAlignment - 1);
|
||||
ma_b(ScratchRegister, zero, &aligned, Equal, ShortJump);
|
||||
as_break(MAX_BREAK_CODE);
|
||||
bind(&aligned);
|
||||
@ -3271,7 +3271,7 @@ MacroAssemblerMIPSCompat::alignStackPointer()
|
||||
{
|
||||
movePtr(StackPointer, SecondScratchReg);
|
||||
subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
|
||||
andPtr(Imm32(~(StackAlignment - 1)), StackPointer);
|
||||
andPtr(Imm32(~(ABIStackAlignment - 1)), StackPointer);
|
||||
storePtr(SecondScratchReg, Address(StackPointer, 0));
|
||||
}
|
||||
|
||||
@ -3284,13 +3284,13 @@ MacroAssemblerMIPSCompat::restoreStackPointer()
|
||||
void
|
||||
MacroAssembler::alignFrameForICArguments(AfterICSaveLive &aic)
|
||||
{
|
||||
if (framePushed() % StackAlignment != 0) {
|
||||
aic.alignmentPadding = StackAlignment - (framePushed() % StackAlignment);
|
||||
if (framePushed() % ABIStackAlignment != 0) {
|
||||
aic.alignmentPadding = ABIStackAlignment - (framePushed() % StackAlignment);
|
||||
reserveStack(aic.alignmentPadding);
|
||||
} else {
|
||||
aic.alignmentPadding = 0;
|
||||
}
|
||||
MOZ_ASSERT(framePushed() % StackAlignment == 0);
|
||||
MOZ_ASSERT(framePushed() % ABIStackAlignment == 0);
|
||||
checkStackAlignment();
|
||||
}
|
||||
|
||||
@ -3316,10 +3316,10 @@ MacroAssemblerMIPSCompat::callWithABIPre(uint32_t *stackAdjust, bool callFromAsm
|
||||
uint32_t alignmentAtPrologue = callFromAsmJS ? sizeof(AsmJSFrame) : 0;
|
||||
|
||||
if (dynamicAlignment_) {
|
||||
*stackAdjust += ComputeByteAlignment(*stackAdjust, StackAlignment);
|
||||
*stackAdjust += ComputeByteAlignment(*stackAdjust, ABIStackAlignment);
|
||||
} else {
|
||||
*stackAdjust += ComputeByteAlignment(framePushed_ + alignmentAtPrologue + *stackAdjust,
|
||||
StackAlignment);
|
||||
ABIStackAlignment);
|
||||
}
|
||||
|
||||
reserveStack(*stackAdjust);
|
||||
@ -3444,7 +3444,7 @@ void
|
||||
MacroAssemblerMIPSCompat::handleFailureWithHandler(void *handler)
|
||||
{
|
||||
// Reserve space for exception information.
|
||||
int size = (sizeof(ResumeFromException) + StackAlignment) & ~(StackAlignment - 1);
|
||||
int size = (sizeof(ResumeFromException) + ABIStackAlignment) & ~(ABIStackAlignment - 1);
|
||||
ma_subu(StackPointer, StackPointer, Imm32(size));
|
||||
ma_move(a0, StackPointer); // Use a0 since it is a first function argument
|
||||
|
||||
|
@ -1871,7 +1871,7 @@ Simulator::softwareInterrupt(SimInstruction *instr)
|
||||
|
||||
intptr_t external = reinterpret_cast<intptr_t>(redirection->nativeFunction());
|
||||
|
||||
bool stack_aligned = (getRegister(sp) & (StackAlignment - 1)) == 0;
|
||||
bool stack_aligned = (getRegister(sp) & (ABIStackAlignment - 1)) == 0;
|
||||
if (!stack_aligned) {
|
||||
fprintf(stderr, "Runtime call with unaligned stack!\n");
|
||||
MOZ_CRASH();
|
||||
@ -3405,7 +3405,7 @@ Simulator::call(uint8_t *entry, int argument_count, ...)
|
||||
else
|
||||
entry_stack = entry_stack - kCArgsSlotsSize;
|
||||
|
||||
entry_stack &= ~StackAlignment;
|
||||
entry_stack &= ~ABIStackAlignment;
|
||||
|
||||
intptr_t *stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
|
||||
|
||||
|
@ -16,6 +16,7 @@ namespace jit {
|
||||
|
||||
static const bool SupportsSimd = false;
|
||||
static const uint32_t SimdStackAlignment = 0;
|
||||
static const uint32_t AsmJSStackAlignment = 0;
|
||||
|
||||
class Registers
|
||||
{
|
||||
|
@ -68,9 +68,8 @@ static MOZ_CONSTEXPR_VAR ValueOperand JSReturnOperand(InvalidReg);
|
||||
#error "Bad architecture"
|
||||
#endif
|
||||
|
||||
static const uint32_t StackAlignment = 8;
|
||||
static const uint32_t ABIStackAlignment = 4;
|
||||
static const uint32_t CodeAlignment = 4;
|
||||
static const bool StackKeptAligned = false;
|
||||
|
||||
static const Scale ScalePointer = TimesOne;
|
||||
|
||||
|
@ -640,9 +640,9 @@ class CallSite : public CallSiteDesc
|
||||
typedef Vector<CallSite, 0, SystemAllocPolicy> CallSiteVector;
|
||||
|
||||
// As an invariant across architectures, within asm.js code:
|
||||
// $sp % StackAlignment = (sizeof(AsmJSFrame) + masm.framePushed) % StackAlignment
|
||||
// $sp % AsmJSStackAlignment = (sizeof(AsmJSFrame) + masm.framePushed) % AsmJSStackAlignment
|
||||
// Thus, AsmJSFrame represents the bytes pushed after the call (which occurred
|
||||
// with a StackAlignment-aligned StackPointer) that are not included in
|
||||
// with a AsmJSStackAlignment-aligned StackPointer) that are not included in
|
||||
// masm.framePushed.
|
||||
struct AsmJSFrame
|
||||
{
|
||||
|
@ -69,26 +69,26 @@ CodeGeneratorShared::CodeGeneratorShared(MIRGenerator *gen, LIRGraph *graph, Mac
|
||||
if (!gen->compilingAsmJS())
|
||||
masm.setInstrumentation(&sps_);
|
||||
|
||||
// Since asm.js uses the system ABI which does not necessarily use a
|
||||
// regular array where all slots are sizeof(Value), it maintains the max
|
||||
// argument stack depth separately.
|
||||
if (gen->compilingAsmJS()) {
|
||||
// Since asm.js uses the system ABI which does not necessarily use a
|
||||
// regular array where all slots are sizeof(Value), it maintains the max
|
||||
// argument stack depth separately.
|
||||
JS_ASSERT(graph->argumentSlotCount() == 0);
|
||||
frameDepth_ += gen->maxAsmJSStackArgBytes();
|
||||
|
||||
// An MAsmJSCall does not align the stack pointer at calls sites but instead
|
||||
// relies on the a priori stack adjustment (in the prologue) on platforms
|
||||
// (like x64) which require the stack to be aligned.
|
||||
if (StackKeptAligned || gen->performsCall() || gen->usesSimd()) {
|
||||
unsigned alignmentAtCall = sizeof(AsmJSFrame) + frameDepth_;
|
||||
unsigned firstFixup = 0;
|
||||
if (unsigned rem = alignmentAtCall % StackAlignment)
|
||||
frameDepth_ += (firstFixup = StackAlignment - rem);
|
||||
|
||||
if (gen->usesSimd())
|
||||
setupSimdAlignment(firstFixup);
|
||||
// If the function uses any SIMD, we may need to insert padding so that
|
||||
// local slots are aligned for SIMD.
|
||||
if (gen->usesSimd()) {
|
||||
frameInitialAdjustment_ = ComputeByteAlignment(sizeof(AsmJSFrame), AsmJSStackAlignment);
|
||||
frameDepth_ += frameInitialAdjustment_;
|
||||
}
|
||||
|
||||
// An MAsmJSCall does not align the stack pointer at calls sites but instead
|
||||
// relies on the a priori stack adjustment. This must be the last
|
||||
// adjustment of frameDepth_.
|
||||
if (gen->performsCall())
|
||||
frameDepth_ += ComputeByteAlignment(sizeof(AsmJSFrame) + frameDepth_, AsmJSStackAlignment);
|
||||
|
||||
// FrameSizeClass is only used for bailing, which cannot happen in
|
||||
// asm.js code.
|
||||
frameClass_ = FrameSizeClass::None();
|
||||
@ -97,38 +97,6 @@ CodeGeneratorShared::CodeGeneratorShared(MIRGenerator *gen, LIRGraph *graph, Mac
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
CodeGeneratorShared::setupSimdAlignment(unsigned fixup)
|
||||
{
|
||||
JS_STATIC_ASSERT(SimdStackAlignment % StackAlignment == 0);
|
||||
// At this point, we have:
|
||||
// (frameDepth_ + sizeof(AsmJSFrame)) % StackAlignment == 0
|
||||
// which means we can add as many SimdStackAlignment as needed.
|
||||
|
||||
// The next constraint is to have all stack slots
|
||||
// aligned for SIMD. That's done by having the first stack slot
|
||||
// aligned. We need an offset such that:
|
||||
// (frameDepth_ - offset) % SimdStackAlignment == 0
|
||||
frameInitialAdjustment_ = frameDepth_ % SimdStackAlignment;
|
||||
|
||||
// We need to ensure that the first stack slot is actually
|
||||
// located in this frame and not beforehand, when taking this
|
||||
// offset into account, i.e.:
|
||||
// frameDepth_ - initial adjustment >= frameDepth_ - fixup
|
||||
// <=> fixup >= initial adjustment
|
||||
//
|
||||
// For instance, on x86 with gcc, if the initial frameDepth
|
||||
// % 16 is 8, then the fixup is 0, although the initial
|
||||
// adjustment is 8. The first stack slot would be located at
|
||||
// frameDepth - 8 in this case, which is obviously before
|
||||
// frameDepth.
|
||||
//
|
||||
// If that's not the case, we add SimdStackAlignment to the
|
||||
// fixup, which will keep on satisfying other constraints.
|
||||
if (frameInitialAdjustment_ > int32_t(fixup))
|
||||
frameDepth_ += SimdStackAlignment;
|
||||
}
|
||||
|
||||
bool
|
||||
CodeGeneratorShared::generateOutOfLineCode()
|
||||
{
|
||||
|
@ -496,8 +496,6 @@ class CodeGeneratorShared : public LInstructionVisitor
|
||||
private:
|
||||
void generateInvalidateEpilogue();
|
||||
|
||||
void setupSimdAlignment(unsigned fixup);
|
||||
|
||||
public:
|
||||
CodeGeneratorShared(MIRGenerator *gen, LIRGraph *graph, MacroAssembler *masm);
|
||||
|
||||
|
@ -184,10 +184,7 @@ static MOZ_CONSTEXPR_VAR Register OsrFrameReg = IntArgReg3;
|
||||
|
||||
static MOZ_CONSTEXPR_VAR Register PreBarrierReg = rdx;
|
||||
|
||||
// GCC stack is aligned on 16 bytes, but we don't maintain the invariant in
|
||||
// jitted code.
|
||||
static const uint32_t StackAlignment = 16;
|
||||
static const bool StackKeptAligned = false;
|
||||
static const uint32_t ABIStackAlignment = 16;
|
||||
static const uint32_t CodeAlignment = 8;
|
||||
|
||||
// This boolean indicates whether we support SIMD instructions flavoured for
|
||||
@ -197,6 +194,8 @@ static const uint32_t CodeAlignment = 8;
|
||||
static const bool SupportsSimd = true;
|
||||
static const uint32_t SimdStackAlignment = 16;
|
||||
|
||||
static const uint32_t AsmJSStackAlignment = SimdStackAlignment;
|
||||
|
||||
static const Scale ScalePointer = TimesEight;
|
||||
|
||||
} // namespace jit
|
||||
|
@ -200,7 +200,7 @@ MacroAssemblerX64::setupUnalignedABICall(uint32_t args, Register scratch)
|
||||
dynamicAlignment_ = true;
|
||||
|
||||
movq(rsp, scratch);
|
||||
andq(Imm32(~(StackAlignment - 1)), rsp);
|
||||
andq(Imm32(~(ABIStackAlignment - 1)), rsp);
|
||||
push(scratch);
|
||||
}
|
||||
|
||||
@ -270,11 +270,11 @@ MacroAssemblerX64::callWithABIPre(uint32_t *stackAdjust)
|
||||
if (dynamicAlignment_) {
|
||||
*stackAdjust = stackForCall_
|
||||
+ ComputeByteAlignment(stackForCall_ + sizeof(intptr_t),
|
||||
StackAlignment);
|
||||
ABIStackAlignment);
|
||||
} else {
|
||||
*stackAdjust = stackForCall_
|
||||
+ ComputeByteAlignment(stackForCall_ + framePushed_,
|
||||
StackAlignment);
|
||||
ABIStackAlignment);
|
||||
}
|
||||
|
||||
reserveStack(*stackAdjust);
|
||||
@ -293,7 +293,7 @@ MacroAssemblerX64::callWithABIPre(uint32_t *stackAdjust)
|
||||
#ifdef DEBUG
|
||||
{
|
||||
Label good;
|
||||
testq(rsp, Imm32(StackAlignment - 1));
|
||||
testq(rsp, Imm32(ABIStackAlignment - 1));
|
||||
j(Equal, &good);
|
||||
breakpoint();
|
||||
bind(&good);
|
||||
|
@ -551,7 +551,6 @@ JitRuntime::generateBailoutHandler(JSContext *cx, ExecutionMode mode)
|
||||
JitCode *
|
||||
JitRuntime::generateVMWrapper(JSContext *cx, const VMFunction &f)
|
||||
{
|
||||
JS_ASSERT(!StackKeptAligned);
|
||||
JS_ASSERT(functionWrappers_);
|
||||
JS_ASSERT(functionWrappers_->initialized());
|
||||
VMWrapperMap::AddPtr p = functionWrappers_->lookupForAdd(&f);
|
||||
|
@ -108,14 +108,13 @@ static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegD0 = edi;
|
||||
static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegD1 = eax;
|
||||
static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegD2 = esi;
|
||||
|
||||
// GCC stack is aligned on 16 bytes, but we don't maintain the invariant in
|
||||
// jitted code.
|
||||
// GCC stack is aligned on 16 bytes. Ion does not maintain this for internal
|
||||
// calls. asm.js code does.
|
||||
#if defined(__GNUC__)
|
||||
static const uint32_t StackAlignment = 16;
|
||||
static const uint32_t ABIStackAlignment = 16;
|
||||
#else
|
||||
static const uint32_t StackAlignment = 4;
|
||||
static const uint32_t ABIStackAlignment = 4;
|
||||
#endif
|
||||
static const bool StackKeptAligned = false;
|
||||
static const uint32_t CodeAlignment = 8;
|
||||
|
||||
// This boolean indicates whether we support SIMD instructions flavoured for
|
||||
@ -125,6 +124,8 @@ static const uint32_t CodeAlignment = 8;
|
||||
static const bool SupportsSimd = true;
|
||||
static const uint32_t SimdStackAlignment = 16;
|
||||
|
||||
static const uint32_t AsmJSStackAlignment = SimdStackAlignment;
|
||||
|
||||
struct ImmTag : public Imm32
|
||||
{
|
||||
ImmTag(JSValueTag mask)
|
||||
|
@ -227,7 +227,7 @@ MacroAssemblerX86::setupUnalignedABICall(uint32_t args, Register scratch)
|
||||
dynamicAlignment_ = true;
|
||||
|
||||
movl(esp, scratch);
|
||||
andl(Imm32(~(StackAlignment - 1)), esp);
|
||||
andl(Imm32(~(ABIStackAlignment - 1)), esp);
|
||||
push(scratch);
|
||||
}
|
||||
|
||||
@ -267,11 +267,11 @@ MacroAssemblerX86::callWithABIPre(uint32_t *stackAdjust)
|
||||
if (dynamicAlignment_) {
|
||||
*stackAdjust = stackForCall_
|
||||
+ ComputeByteAlignment(stackForCall_ + sizeof(intptr_t),
|
||||
StackAlignment);
|
||||
ABIStackAlignment);
|
||||
} else {
|
||||
*stackAdjust = stackForCall_
|
||||
+ ComputeByteAlignment(stackForCall_ + framePushed_,
|
||||
StackAlignment);
|
||||
ABIStackAlignment);
|
||||
}
|
||||
|
||||
reserveStack(*stackAdjust);
|
||||
@ -291,7 +291,7 @@ MacroAssemblerX86::callWithABIPre(uint32_t *stackAdjust)
|
||||
{
|
||||
// Check call alignment.
|
||||
Label good;
|
||||
testl(esp, Imm32(StackAlignment - 1));
|
||||
testl(esp, Imm32(ABIStackAlignment - 1));
|
||||
j(Equal, &good);
|
||||
breakpoint();
|
||||
bind(&good);
|
||||
|
@ -590,7 +590,6 @@ JitRuntime::generateBailoutHandler(JSContext *cx, ExecutionMode mode)
|
||||
JitCode *
|
||||
JitRuntime::generateVMWrapper(JSContext *cx, const VMFunction &f)
|
||||
{
|
||||
JS_ASSERT(!StackKeptAligned);
|
||||
JS_ASSERT(functionWrappers_);
|
||||
JS_ASSERT(functionWrappers_->initialized());
|
||||
VMWrapperMap::AddPtr p = functionWrappers_->lookupForAdd(&f);
|
||||
|
@ -1550,7 +1550,7 @@ jit::JitActivation::markRematerializedFrames(JSTracer *trc)
|
||||
AsmJSActivation::AsmJSActivation(JSContext *cx, AsmJSModule &module)
|
||||
: Activation(cx, AsmJS),
|
||||
module_(module),
|
||||
errorRejoinSP_(nullptr),
|
||||
entrySP_(nullptr),
|
||||
profiler_(nullptr),
|
||||
resumePC_(nullptr),
|
||||
fp_(nullptr),
|
||||
@ -1573,7 +1573,7 @@ AsmJSActivation::AsmJSActivation(JSContext *cx, AsmJSModule &module)
|
||||
JSRuntime::AutoLockForInterrupt lock(cx->runtime());
|
||||
cx->mainThread().asmJSActivationStack_ = this;
|
||||
|
||||
(void) errorRejoinSP_; // squelch GCC warning
|
||||
(void) entrySP_; // squelch GCC warning
|
||||
}
|
||||
|
||||
AsmJSActivation::~AsmJSActivation()
|
||||
|
@ -1482,7 +1482,7 @@ class AsmJSActivation : public Activation
|
||||
AsmJSModule &module_;
|
||||
AsmJSActivation *prevAsmJS_;
|
||||
AsmJSActivation *prevAsmJSForModule_;
|
||||
void *errorRejoinSP_;
|
||||
void *entrySP_;
|
||||
SPSProfiler *profiler_;
|
||||
void *resumePC_;
|
||||
uint8_t *fp_;
|
||||
@ -1512,7 +1512,7 @@ class AsmJSActivation : public Activation
|
||||
static unsigned offsetOfResumePC() { return offsetof(AsmJSActivation, resumePC_); }
|
||||
|
||||
// Written by JIT code:
|
||||
static unsigned offsetOfErrorRejoinSP() { return offsetof(AsmJSActivation, errorRejoinSP_); }
|
||||
static unsigned offsetOfEntrySP() { return offsetof(AsmJSActivation, entrySP_); }
|
||||
static unsigned offsetOfFP() { return offsetof(AsmJSActivation, fp_); }
|
||||
static unsigned offsetOfExitReason() { return offsetof(AsmJSActivation, exitReason_); }
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user