Bug 959597 part 2 - Use JS_CODEGEN_* instead of JS_CPU_* for JIT backend code. r=nbp

This commit is contained in:
Jan de Mooij 2014-01-28 15:33:56 +01:00
parent f2e8aa5690
commit d78ff7f593
43 changed files with 147 additions and 147 deletions

View File

@ -1489,19 +1489,19 @@ class MOZ_STACK_CLASS ModuleCompiler
}
void setInterpExitOffset(unsigned exitIndex) {
#if defined(JS_CPU_ARM)
#if defined(JS_CODEGEN_ARM)
masm_.flush();
#endif
module_->exit(exitIndex).initInterpOffset(masm_.size());
}
void setIonExitOffset(unsigned exitIndex) {
#if defined(JS_CPU_ARM)
#if defined(JS_CODEGEN_ARM)
masm_.flush();
#endif
module_->exit(exitIndex).initIonOffset(masm_.size());
}
void setEntryOffset(unsigned exportIndex) {
#if defined(JS_CPU_ARM)
#if defined(JS_CODEGEN_ARM)
masm_.flush();
#endif
module_->exportedFunction(exportIndex).initCodeOffset(masm_.size());
@ -1543,7 +1543,7 @@ class MOZ_STACK_CLASS ModuleCompiler
if (masm_.oom())
return false;
#if defined(JS_CPU_ARM)
#if defined(JS_CODEGEN_ARM)
// Now that compilation has finished, we need to update offsets to
// reflect actual offsets (an ARM distinction).
for (unsigned i = 0; i < module_->numHeapAccesses(); i++) {
@ -1624,7 +1624,7 @@ class MOZ_STACK_CLASS ModuleCompiler
}
}
#if defined(JS_CPU_X86)
#if defined(JS_CODEGEN_X86)
// Global data accesses in x86 need to be patched with the absolute
// address of the global. Globals are allocated sequentially after the
// code section so we can just use an RelativeLink.
@ -1638,7 +1638,7 @@ class MOZ_STACK_CLASS ModuleCompiler
}
#endif
#if defined(JS_CPU_X64)
#if defined(JS_CODEGEN_X64)
// Global data accesses on x64 use rip-relative addressing and thus do
// not need patching after deserialization.
uint8_t *code = module_->codeBase();
@ -5836,21 +5836,21 @@ GenerateEntry(ModuleCompiler &m, const AsmJSModule::ExportedFunction &exportedFu
// ARM has a globally-pinned GlobalReg (x64 uses RIP-relative addressing,
// x86 uses immediates in effective addresses) and NaN register (used as
// part of the out-of-bounds handling in heap loads/stores).
#if defined(JS_CPU_ARM)
#if defined(JS_CODEGEN_ARM)
masm.movePtr(IntArgReg1, GlobalReg);
masm.ma_vimm(GenericNaN(), NANReg);
#endif
// ARM and x64 have a globally-pinned HeapReg (x86 uses immediates in
// effective addresses).
#if defined(JS_CPU_X64) || defined(JS_CPU_ARM)
#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM)
masm.loadPtr(Address(IntArgReg1, m.module().heapOffset()), HeapReg);
#endif
// Get 'argv' into a non-arg register and save it on the stack.
Register argv = ABIArgGenerator::NonArgReturnVolatileReg0;
Register scratch = ABIArgGenerator::NonArgReturnVolatileReg1;
#if defined(JS_CPU_X86)
#if defined(JS_CODEGEN_X86)
masm.loadPtr(Address(StackPointer, NativeFrameSize + masm.framePushed()), argv);
#else
masm.movePtr(IntArgReg0, argv);
@ -6046,7 +6046,7 @@ FillArgumentArray(ModuleCompiler &m, const VarTypeVector &argTypes,
case ABIArg::Stack:
if (i.mirType() == MIRType_Int32) {
Address src(StackPointer, offsetToCallerStackArgs + i->offsetFromArgBase());
#if defined(JS_CPU_X86) || defined(JS_CPU_X64)
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
masm.load32(src, scratch);
masm.storeValue(JSVAL_TYPE_INT32, scratch, dstAddr);
#else
@ -6073,7 +6073,7 @@ GenerateFFIInterpreterExit(ModuleCompiler &m, const ModuleCompiler::ExitDescript
m.setInterpExitOffset(exitIndex);
masm.setFramePushed(0);
#if defined(JS_CPU_X86) || defined(JS_CPU_X64)
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
MIRType typeArray[] = { MIRType_Pointer, // cx
MIRType_Pointer, // exitDatum
MIRType_Int32, // argc
@ -6291,7 +6291,7 @@ GenerateFFIIonExit(ModuleCompiler &m, const ModuleCompiler::ExitDescriptor &exit
RegisterSet restoreSet = RegisterSet::Intersect(RegisterSet::All(),
RegisterSet::Not(RegisterSet::Volatile()));
#if defined(JS_CPU_ARM)
#if defined(JS_CODEGEN_ARM)
masm.Push(lr);
#endif
masm.PushRegsInMask(restoreSet);
@ -6303,7 +6303,7 @@ GenerateFFIIonExit(ModuleCompiler &m, const ModuleCompiler::ExitDescriptor &exit
MIRTypeVector emptyVector(m.cx());
unsigned argBytes = 3 * sizeof(size_t) + (1 + exit.sig().args().length()) * sizeof(Value);
unsigned extraBytes = 0;
#if defined(JS_CPU_ARM)
#if defined(JS_CODEGEN_ARM)
extraBytes += sizeof(size_t);
#endif
unsigned stackDec = StackDecrementForCall(masm, emptyVector, argBytes + extraBytes);
@ -6319,10 +6319,10 @@ GenerateFFIIonExit(ModuleCompiler &m, const ModuleCompiler::ExitDescriptor &exit
// 2.1. Get ExitDatum
unsigned globalDataOffset = m.module().exitIndexToGlobalDataOffset(exitIndex);
#if defined(JS_CPU_X64)
#if defined(JS_CODEGEN_X64)
CodeOffsetLabel label2 = masm.leaRipRelative(callee);
m.addGlobalAccess(AsmJSGlobalAccess(label2.offset(), globalDataOffset));
#elif defined(JS_CPU_X86)
#elif defined(JS_CODEGEN_X86)
CodeOffsetLabel label2 = masm.movlWithPatch(Imm32(0), callee);
m.addGlobalAccess(AsmJSGlobalAccess(label2.offset(), globalDataOffset));
#else
@ -6345,7 +6345,7 @@ GenerateFFIIonExit(ModuleCompiler &m, const ModuleCompiler::ExitDescriptor &exit
// 5. Fill the arguments
unsigned offsetToArgs = 3 * sizeof(size_t) + sizeof(Value);
unsigned offsetToCallerStackArgs = masm.framePushed();
#if defined(JS_CPU_X86) || defined(JS_CPU_X64)
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
offsetToCallerStackArgs += NativeFrameSize;
#else
offsetToCallerStackArgs += ShadowStackSpace;
@ -6373,12 +6373,12 @@ GenerateFFIIonExit(ModuleCompiler &m, const ModuleCompiler::ExitDescriptor &exit
masm.pop(scratch);
// 2. Call
#if defined(JS_CPU_ARM) && defined(DEBUG)
#if defined(JS_CODEGEN_ARM) && defined(DEBUG)
// ARM still needs to push, before stack is aligned
masm.Push(scratch);
#endif
AssertStackAlignment(masm);
#if defined(JS_CPU_ARM) && defined(DEBUG)
#if defined(JS_CODEGEN_ARM) && defined(DEBUG)
masm.freeStack(sizeof(size_t));
#endif
masm.callIon(scratch);
@ -6457,7 +6457,7 @@ GenerateStackOverflowExit(ModuleCompiler &m, Label *throwLabel)
masm.align(CodeAlignment);
masm.bind(&m.stackOverflowLabel());
#if defined(JS_CPU_X86)
#if defined(JS_CODEGEN_X86)
// Ensure that at least one slot is pushed for passing 'cx' below.
masm.push(Imm32(0));
#endif
@ -6470,7 +6470,7 @@ GenerateStackOverflowExit(ModuleCompiler &m, Label *throwLabel)
masm.subPtr(Imm32(ShadowStackSpace), StackPointer);
// Prepare the arguments for the call to js_ReportOverRecursed.
#if defined(JS_CPU_X86)
#if defined(JS_CODEGEN_X86)
LoadAsmJSActivationIntoRegister(masm, eax);
LoadJSContextFromActivation(masm, eax, eax);
masm.storePtr(eax, Address(StackPointer, 0));
@ -6499,7 +6499,7 @@ GenerateOperationCallbackExit(ModuleCompiler &m, Label *throwLabel)
masm.align(CodeAlignment);
masm.bind(&m.operationCallbackLabel());
#ifndef JS_CPU_ARM
#ifndef JS_CODEGEN_ARM
// Be very careful here not to perturb the machine state before saving it
// to the stack. In particular, add/sub instructions may set conditions in
// the flags register.
@ -6519,7 +6519,7 @@ GenerateOperationCallbackExit(ModuleCompiler &m, Label *throwLabel)
// We know that StackPointer is word-aligned, but not necessarily
// stack-aligned, so we need to align it dynamically.
masm.mov(StackPointer, ABIArgGenerator::NonVolatileReg);
#if defined(JS_CPU_X86)
#if defined(JS_CODEGEN_X86)
// Ensure that at least one slot is pushed for passing 'cx' below.
masm.push(Imm32(0));
#endif
@ -6528,10 +6528,10 @@ GenerateOperationCallbackExit(ModuleCompiler &m, Label *throwLabel)
masm.subPtr(Imm32(ShadowStackSpace), StackPointer);
// argument 0: cx
#if defined(JS_CPU_X86)
#if defined(JS_CODEGEN_X86)
LoadJSContextFromActivation(masm, activation, scratch);
masm.storePtr(scratch, Address(StackPointer, 0));
#elif defined(JS_CPU_X64)
#elif defined(JS_CODEGEN_X64)
LoadJSContextFromActivation(masm, activation, IntArgReg0);
#endif

View File

@ -86,7 +86,7 @@ RoundUpToNextValidAsmJSHeapLength(uint32_t length);
extern bool
IsValidAsmJSHeapLength(uint32_t length);
#ifdef JS_CPU_X64
#ifdef JS_CODEGEN_X64
// On x64, the internal ArrayBuffer data array is inflated to 4GiB (only the
// byteLength portion of which is accessible) so that out-of-bounds accesses
// (made using a uint32 index) are guaranteed to raise a SIGSEGV.

View File

@ -44,7 +44,7 @@ AsmJSModule::initHeap(Handle<ArrayBufferObject*> heap, JSContext *cx)
heapDatum() = heap->dataPointer();
JS_ASSERT(IsValidAsmJSHeapLength(heap->byteLength()));
#if defined(JS_CPU_X86)
#if defined(JS_CODEGEN_X86)
uint8_t *heapOffset = heap->dataPointer();
void *heapLength = (void*)heap->byteLength();
for (unsigned i = 0; i < heapAccesses_.length(); i++) {
@ -56,7 +56,7 @@ AsmJSModule::initHeap(Handle<ArrayBufferObject*> heap, JSContext *cx)
JS_ASSERT(disp <= INT32_MAX);
JSC::X86Assembler::setPointer(addr, (void *)(heapOffset + disp));
}
#elif defined(JS_CPU_ARM)
#elif defined(JS_CODEGEN_ARM)
uint32_t heapLength = heap->byteLength();
for (unsigned i = 0; i < heapAccesses_.length(); i++) {
jit::Assembler::updateBoundsCheck(heapLength,
@ -180,7 +180,7 @@ InvokeFromAsmJS_ToNumber(JSContext *cx, int32_t exitIndex, int32_t argc, Value *
}
#if defined(JS_CPU_ARM)
#if defined(JS_CODEGEN_ARM)
extern "C" {
extern int
@ -227,7 +227,7 @@ AddressOf(AsmJSImmKind kind, ExclusiveContext *cx)
return FuncCast(EnableActivationFromAsmJS);
case AsmJSImm_DisableActivationFromAsmJS:
return FuncCast(DisableActivationFromAsmJS);
#if defined(JS_CPU_ARM)
#if defined(JS_CODEGEN_ARM)
case AsmJSImm_aeabi_idivmod:
return FuncCast(__aeabi_idivmod);
case AsmJSImm_aeabi_uidivmod:
@ -732,15 +732,15 @@ GetCPUID(uint32_t *cpuId)
ARCH_BITS = 2
};
#if defined(JS_CPU_X86)
#if defined(JS_CODEGEN_X86)
JS_ASSERT(uint32_t(JSC::MacroAssembler::getSSEState()) <= (UINT32_MAX >> ARCH_BITS));
*cpuId = X86 | (JSC::MacroAssembler::getSSEState() << ARCH_BITS);
return true;
#elif defined(JS_CPU_X64)
#elif defined(JS_CODEGEN_X64)
JS_ASSERT(uint32_t(JSC::MacroAssembler::getSSEState()) <= (UINT32_MAX >> ARCH_BITS));
*cpuId = X64 | (JSC::MacroAssembler::getSSEState() << ARCH_BITS);
return true;
#elif defined(JS_CPU_ARM)
#elif defined(JS_CODEGEN_ARM)
JS_ASSERT(GetARMFlags() <= (UINT32_MAX >> ARCH_BITS));
*cpuId = ARM | (GetARMFlags() << ARCH_BITS);
return true;

View File

@ -187,7 +187,7 @@ class AutoSetHandlingSignal
}
};
#if defined(JS_CPU_X64)
#if defined(JS_CODEGEN_X64)
template <class T>
static void
SetXMMRegToNaN(bool isFloat32, T *xmm_reg)
@ -250,7 +250,7 @@ LookupHeapAccess(const AsmJSModule &module, uint8_t *pc)
# include <sys/ucontext.h> // for ucontext_t, mcontext_t
#endif
#if defined(JS_CPU_X64)
#if defined(JS_CODEGEN_X64)
# if defined(__DragonFly__)
# include <machine/npx.h> // for union savefpu
# elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || \
@ -340,7 +340,7 @@ ContextToPC(CONTEXT *context)
return reinterpret_cast<uint8_t**>(&PC_sig(context));
}
# if defined(JS_CPU_X64)
# if defined(JS_CODEGEN_X64)
static void
SetRegisterToCoercedUndefined(CONTEXT *context, bool isFloat32, AnyRegister reg)
{
@ -386,7 +386,7 @@ SetRegisterToCoercedUndefined(CONTEXT *context, bool isFloat32, AnyRegister reg)
}
}
}
# endif // JS_CPU_X64
# endif // JS_CODEGEN_X64
#endif // !XP_MACOSX
#if defined(XP_WIN)
@ -441,7 +441,7 @@ HandleException(PEXCEPTION_POINTERS exception)
return true;
}
# if defined(JS_CPU_X64)
# if defined(JS_CODEGEN_X64)
// These checks aren't necessary, but, since we can, check anyway to make
// sure we aren't covering up a real bug.
if (!module.maybeHeap() ||
@ -490,7 +490,7 @@ AsmJSExceptionHandler(LPEXCEPTION_POINTERS exception)
static uint8_t **
ContextToPC(x86_thread_state_t &state)
{
# if defined(JS_CPU_X64)
# if defined(JS_CODEGEN_X64)
JS_STATIC_ASSERT(sizeof(state.uts.ts64.__rip) == sizeof(void*));
return reinterpret_cast<uint8_t**>(&state.uts.ts64.__rip);
# else
@ -499,7 +499,7 @@ ContextToPC(x86_thread_state_t &state)
# endif
}
# if defined(JS_CPU_X64)
# if defined(JS_CODEGEN_X64)
static bool
SetRegisterToCoercedUndefined(mach_port_t rtThread, x86_thread_state64_t &state,
const AsmJSHeapAccess &heapAccess)
@ -639,7 +639,7 @@ HandleMachException(JSRuntime *rt, const ExceptionRequest &request)
return kret == KERN_SUCCESS;
}
# if defined(JS_CPU_X64)
# if defined(JS_CODEGEN_X64)
// These checks aren't necessary, but, since we can, check anyway to make
// sure we aren't covering up a real bug.
if (!module.maybeHeap() ||
@ -878,7 +878,7 @@ HandleSignal(int signum, siginfo_t *info, void *ctx)
return true;
}
# if defined(JS_CPU_X64)
# if defined(JS_CODEGEN_X64)
// These checks aren't necessary, but, since we can, check anyway to make
// sure we aren't covering up a real bug.
if (!module.maybeHeap() ||

View File

@ -337,12 +337,12 @@ struct BaselineStackBuilder
// so we can calculate it directly. For other archs, the previous frame pointer
// is stored on the stack in the frame that precedes the rectifier frame.
size_t priorOffset = IonJSFrameLayout::Size() + topFrame->prevFrameLocalSize();
#if defined(JS_CPU_X86)
#if defined(JS_CODEGEN_X86)
// On X86, the FramePointer is pushed as the first value in the Rectifier frame.
JS_ASSERT(BaselineFrameReg == FramePointer);
priorOffset -= sizeof(void *);
return virtualPointerAtStackOffset(priorOffset);
#elif defined(JS_CPU_X64) || defined(JS_CPU_ARM)
#elif defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM)
// On X64 and ARM, the frame pointer save location depends on the caller of the
// the rectifier frame.
BufferPointer<IonRectifierFrameLayout> priorFrame =
@ -1141,7 +1141,7 @@ InitFromBailout(JSContext *cx, HandleScript caller, jsbytecode *callerPC,
size_t startOfRectifierFrame = builder.framePushed();
// On x86-only, the frame pointer is saved again in the rectifier frame.
#if defined(JS_CPU_X86)
#if defined(JS_CODEGEN_X86)
if (!builder.writePtr(prevFramePtr, "PrevFramePtr-X86Only"))
return false;
#endif

View File

@ -414,7 +414,7 @@ BaselineCompiler::emitOutOfLinePostBarrierSlot()
regs.take(objReg);
regs.take(BaselineFrameReg);
Register scratch = regs.takeAny();
#if defined(JS_CPU_ARM)
#if defined(JS_CODEGEN_ARM)
// On ARM, save the link register before calling. It contains the return
// address. The |masm.ret()| later will pop this into |pc| to return.
masm.push(lr);

View File

@ -10,9 +10,9 @@
#ifdef JS_ION
#include "jit/FixedList.h"
#if defined(JS_CPU_X86)
#if defined(JS_CODEGEN_X86)
# include "jit/x86/BaselineCompiler-x86.h"
#elif defined(JS_CPU_X64)
#elif defined(JS_CODEGEN_X64)
# include "jit/x64/BaselineCompiler-x64.h"
#else
# include "jit/arm/BaselineCompiler-arm.h"

View File

@ -9,11 +9,11 @@
#ifdef JS_ION
#if defined(JS_CPU_X86)
#if defined(JS_CODEGEN_X86)
# include "jit/x86/BaselineHelpers-x86.h"
#elif defined(JS_CPU_X64)
#elif defined(JS_CODEGEN_X64)
# include "jit/x64/BaselineHelpers-x64.h"
#elif defined(JS_CPU_ARM)
#elif defined(JS_CODEGEN_ARM)
# include "jit/arm/BaselineHelpers-arm.h"
#else
# error "Unknown architecture!"

View File

@ -581,7 +581,7 @@ ICStubCompiler::getStubCode()
// Compile new stubcode.
MacroAssembler masm;
#ifdef JS_CPU_ARM
#ifdef JS_CODEGEN_ARM
masm.setSecondScratchReg(BaselineSecondScratchReg);
#endif
@ -712,7 +712,7 @@ ICStubCompiler::emitPostWriteBarrierSlot(MacroAssembler &masm, Register obj, Reg
masm.bind(&isTenured);
// void PostWriteBarrier(JSRuntime *rt, JSObject *obj);
#ifdef JS_CPU_ARM
#ifdef JS_CODEGEN_ARM
saveRegs.add(BaselineTailCallReg);
#endif
saveRegs = GeneralRegisterSet::Intersect(saveRegs, GeneralRegisterSet::Volatile());

View File

@ -1049,13 +1049,13 @@ class ICStubCompiler
inline GeneralRegisterSet availableGeneralRegs(size_t numInputs) const {
GeneralRegisterSet regs(GeneralRegisterSet::All());
JS_ASSERT(!regs.has(BaselineStackReg));
#ifdef JS_CPU_ARM
#ifdef JS_CODEGEN_ARM
JS_ASSERT(!regs.has(BaselineTailCallReg));
regs.take(BaselineSecondScratchReg);
#endif
regs.take(BaselineFrameReg);
regs.take(BaselineStubReg);
#ifdef JS_CPU_X64
#ifdef JS_CODEGEN_X64
regs.take(ExtractTemp0);
regs.take(ExtractTemp1);
#endif

View File

@ -9,9 +9,9 @@
#ifdef JS_ION
#if defined(JS_CPU_X86)
#if defined(JS_CODEGEN_X86)
# include "jit/x86/BaselineRegisters-x86.h"
#elif defined(JS_CPU_X64)
#elif defined(JS_CODEGEN_X64)
# include "jit/x64/BaselineRegisters-x64.h"
#else
# include "jit/arm/BaselineRegisters-arm.h"

View File

@ -7900,7 +7900,7 @@ CodeGenerator::visitAsmJSCall(LAsmJSCall *ins)
{
MAsmJSCall *mir = ins->mir();
#if defined(JS_CPU_ARM) && !defined(JS_CPU_ARM_HARDFP)
#if defined(JS_CODEGEN_ARM) && !defined(JS_CODEGEN_ARM_HARDFP)
if (mir->callee().which() == MAsmJSCall::Callee::Builtin) {
for (unsigned i = 0, e = ins->numOperands(); i < e; i++) {
LAllocation *a = ins->getOperand(i);

View File

@ -12,11 +12,11 @@
# include "jit/PerfSpewer.h"
#endif
#if defined(JS_CPU_X86)
#if defined(JS_CODEGEN_X86)
# include "jit/x86/CodeGenerator-x86.h"
#elif defined(JS_CPU_X64)
#elif defined(JS_CODEGEN_X64)
# include "jit/x64/CodeGenerator-x64.h"
#elif defined(JS_CPU_ARM)
#elif defined(JS_CODEGEN_ARM)
# include "jit/arm/CodeGenerator-arm.h"
#else
#error "CPU Not Supported"

View File

@ -43,7 +43,7 @@ CodeLocationJump::repoint(JitCode *code, MacroAssembler *masm)
size_t jumpTableEntryOffset = reinterpret_cast<size_t>(jumpTableEntry_);
#endif
if (masm != nullptr) {
#ifdef JS_CPU_X64
#ifdef JS_CODEGEN_X64
JS_ASSERT((uint64_t)raw_ <= UINT32_MAX);
#endif
new_off = masm->actualOffset((uintptr_t)raw_);
@ -64,7 +64,7 @@ CodeLocationLabel::repoint(JitCode *code, MacroAssembler *masm)
JS_ASSERT(state_ == Relative);
size_t new_off = (size_t)raw_;
if (masm != nullptr) {
#ifdef JS_CPU_X64
#ifdef JS_CODEGEN_X64
JS_ASSERT((uint64_t)raw_ <= UINT32_MAX);
#endif
new_off = masm->actualOffset((uintptr_t)raw_);

View File

@ -7,7 +7,7 @@
#ifndef jit_IonCaches_h
#define jit_IonCaches_h
#ifdef JS_CPU_ARM
#ifdef JS_CODEGEN_ARM
# include "jit/arm/Assembler-arm.h"
#endif
#include "jit/Registers.h"
@ -347,7 +347,7 @@ class RepatchIonCache : public IonCache
CodeLocationJump lastJump_;
// Offset from the initial jump to the rejoin label.
#ifdef JS_CPU_ARM
#ifdef JS_CODEGEN_ARM
static const size_t REJOIN_LABEL_OFFSET = 4;
#else
static const size_t REJOIN_LABEL_OFFSET = 0;
@ -355,7 +355,7 @@ class RepatchIonCache : public IonCache
CodeLocationLabel rejoinLabel() const {
uint8_t *ptr = initialJump_.raw();
#ifdef JS_CPU_ARM
#ifdef JS_CODEGEN_ARM
uint32_t i = 0;
while (i < REJOIN_LABEL_OFFSET)
ptr = Assembler::nextInstruction(ptr, &i);
@ -1042,7 +1042,7 @@ class GetPropertyParIC : public ParallelIonCache
CACHE_HEADER(GetPropertyPar)
#ifdef JS_CPU_X86
#ifdef JS_CODEGEN_X86
// x86 lacks a general purpose scratch register for dispatch caches and
// must be given one manually.
void initializeAddCacheState(LInstruction *ins, AddCacheState *addState);
@ -1101,7 +1101,7 @@ class GetElementParIC : public ParallelIonCache
CACHE_HEADER(GetElementPar)
#ifdef JS_CPU_X86
#ifdef JS_CODEGEN_X86
// x86 lacks a general purpose scratch register for dispatch caches and
// must be given one manually.
void initializeAddCacheState(LInstruction *ins, AddCacheState *addState);
@ -1162,7 +1162,7 @@ class SetPropertyParIC : public ParallelIonCache
CACHE_HEADER(SetPropertyPar)
#ifdef JS_CPU_X86
#ifdef JS_CODEGEN_X86
// x86 lacks a general purpose scratch register for dispatch caches and
// must be given one manually.
void initializeAddCacheState(LInstruction *ins, AddCacheState *addState);
@ -1222,7 +1222,7 @@ class SetElementParIC : public ParallelIonCache
CACHE_HEADER(SetElementPar)
#ifdef JS_CPU_X86
#ifdef JS_CODEGEN_X86
// x86 lacks a general purpose scratch register for dispatch caches and
// must be given one manually.
void initializeAddCacheState(LInstruction *ins, AddCacheState *addState);

View File

@ -82,7 +82,7 @@ class Linker
}
JitCode *newCodeForIonScript(JSContext *cx) {
#ifdef JS_CPU_ARM
#ifdef JS_CODEGEN_ARM
// ARM does not yet use implicit interrupt checks, see bug 864220.
return newCode<CanGC>(cx, JSC::ION_CODE);
#else

View File

@ -245,14 +245,14 @@ MacroAssembler::PushRegsInMask(RegisterSet set)
int32_t diffF = set.fpus().size() * sizeof(double);
int32_t diffG = set.gprs().size() * sizeof(intptr_t);
#if defined(JS_CPU_X86) || defined(JS_CPU_X64)
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
// On x86, always use push to push the integer registers, as it's fast
// on modern hardware and it's a small instruction.
for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); iter++) {
diffG -= sizeof(intptr_t);
Push(*iter);
}
#elif defined(JS_CPU_ARM)
#elif defined(JS_CODEGEN_ARM)
if (set.gprs().size() > 1) {
adjustFrame(diffG);
startDataTransferM(IsStore, StackPointer, DB, WriteBack);
@ -277,7 +277,7 @@ MacroAssembler::PushRegsInMask(RegisterSet set)
#endif
JS_ASSERT(diffG == 0);
#ifdef JS_CPU_ARM
#ifdef JS_CODEGEN_ARM
adjustFrame(diffF);
diffF += transferMultipleByRuns(set.fpus(), IsStore, StackPointer, DB);
#else
@ -298,7 +298,7 @@ MacroAssembler::PopRegsInMaskIgnore(RegisterSet set, RegisterSet ignore)
const int32_t reservedG = diffG;
const int32_t reservedF = diffF;
#ifdef JS_CPU_ARM
#ifdef JS_CODEGEN_ARM
// ARM can load multiple registers at once, but only if we want back all
// the registers we previously saved to the stack.
if (ignore.empty(true)) {
@ -316,7 +316,7 @@ MacroAssembler::PopRegsInMaskIgnore(RegisterSet set, RegisterSet ignore)
}
JS_ASSERT(diffF == 0);
#if defined(JS_CPU_X86) || defined(JS_CPU_X64)
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
// On x86, use pop to pop the integer registers, if we're not going to
// ignore any slots, as it's fast on modern hardware and it's a small
// instruction.
@ -327,7 +327,7 @@ MacroAssembler::PopRegsInMaskIgnore(RegisterSet set, RegisterSet ignore)
}
} else
#endif
#ifdef JS_CPU_ARM
#ifdef JS_CODEGEN_ARM
if (set.gprs().size() > 1 && ignore.empty(false)) {
startDataTransferM(IsLoad, StackPointer, IA, WriteBack);
for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); iter++) {
@ -538,7 +538,7 @@ void
MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output)
{
JS_ASSERT(input != ScratchFloatReg);
#ifdef JS_CPU_ARM
#ifdef JS_CODEGEN_ARM
ma_vimm(0.5, ScratchFloatReg);
if (hasVFPv3()) {
Label notSplit;
@ -993,7 +993,7 @@ MacroAssembler::generateBailoutTail(Register scratch, Register bailoutInfo)
// Discard exit frame.
addPtr(Imm32(IonExitFrameLayout::SizeWithFooter()), StackPointer);
#if defined(JS_CPU_X86) || defined(JS_CPU_X64)
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
push(BaselineTailCallReg);
#endif
jump(Address(BaselineStubReg, ICStub::offsetOfStubCode()));

View File

@ -11,11 +11,11 @@
#include "jscompartment.h"
#if defined(JS_CPU_X86)
#if defined(JS_CODEGEN_X86)
# include "jit/x86/MacroAssembler-x86.h"
#elif defined(JS_CPU_X64)
#elif defined(JS_CODEGEN_X64)
# include "jit/x64/MacroAssembler-x64.h"
#elif defined(JS_CPU_ARM)
#elif defined(JS_CODEGEN_ARM)
# include "jit/arm/MacroAssembler-arm.h"
#endif
#include "jit/IonInstrumentation.h"
@ -204,7 +204,7 @@ class MacroAssembler : public MacroAssemblerSpecific
}
moveResolver_.setAllocator(*icx->temp);
#ifdef JS_CPU_ARM
#ifdef JS_CODEGEN_ARM
initWithAllocator();
m_buffer.id = icx->getNextAssemblerId();
#endif
@ -221,7 +221,7 @@ class MacroAssembler : public MacroAssemblerSpecific
ionContext_.construct(cx, (js::jit::TempAllocator *)nullptr);
alloc_.construct(cx);
moveResolver_.setAllocator(*ionContext_.ref().temp);
#ifdef JS_CPU_ARM
#ifdef JS_CODEGEN_ARM
initWithAllocator();
m_buffer.id = GetIonContext()->getNextAssemblerId();
#endif
@ -236,7 +236,7 @@ class MacroAssembler : public MacroAssemblerSpecific
embedsNurseryPointers_(false),
sps_(nullptr)
{
#ifdef JS_CPU_ARM
#ifdef JS_CODEGEN_ARM
initWithAllocator();
m_buffer.id = 0;
#endif

View File

@ -1541,14 +1541,14 @@ LAllocation::toRegister() const
#endif
#include "jit/LIR-Common.h"
#if defined(JS_CPU_X86) || defined(JS_CPU_X64)
# if defined(JS_CPU_X86)
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
# if defined(JS_CODEGEN_X86)
# include "jit/x86/LIR-x86.h"
# elif defined(JS_CPU_X64)
# elif defined(JS_CODEGEN_X64)
# include "jit/x64/LIR-x64.h"
# endif
# include "jit/shared/LIR-x86-shared.h"
#elif defined(JS_CPU_ARM)
#elif defined(JS_CODEGEN_ARM)
# include "jit/arm/LIR-arm.h"
#endif

View File

@ -290,11 +290,11 @@
_(AssertRangeF) \
_(AssertRangeV)
#if defined(JS_CPU_X86)
#if defined(JS_CODEGEN_X86)
# include "jit/x86/LOpcodes-x86.h"
#elif defined(JS_CPU_X64)
#elif defined(JS_CODEGEN_X64)
# include "jit/x64/LOpcodes-x64.h"
#elif defined(JS_CPU_ARM)
#elif defined(JS_CODEGEN_ARM)
# include "jit/arm/LOpcodes-arm.h"
#endif

View File

@ -2106,7 +2106,7 @@ LIRGenerator::visitInterruptCheck(MInterruptCheck *ins)
// Implicit interrupt checks require asm.js signal handlers to be
// installed. ARM does not yet use implicit interrupt checks, see
// bug 864220.
#ifndef JS_CPU_ARM
#ifndef JS_CODEGEN_ARM
if (GetIonContext()->runtime->signalHandlersInstalled()) {
LInterruptCheckImplicit *lir = new(alloc()) LInterruptCheckImplicit();
return add(lir, ins) && assignSafepoint(lir, ins);

View File

@ -11,11 +11,11 @@
// MIRGraph.
#include "jit/LIR.h"
#if defined(JS_CPU_X86)
#if defined(JS_CODEGEN_X86)
# include "jit/x86/Lowering-x86.h"
#elif defined(JS_CPU_X64)
#elif defined(JS_CODEGEN_X64)
# include "jit/x64/Lowering-x64.h"
#elif defined(JS_CPU_ARM)
#elif defined(JS_CODEGEN_ARM)
# include "jit/arm/Lowering-arm.h"
#else
# error "CPU!"

View File

@ -7,9 +7,9 @@
#ifndef jit_MoveEmitter_h
#define jit_MoveEmitter_h
#if defined(JS_CPU_X86) || defined(JS_CPU_X64)
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
# include "jit/shared/MoveEmitter-x86-shared.h"
#elif defined(JS_CPU_ARM)
#elif defined(JS_CODEGEN_ARM)
# include "jit/arm/MoveEmitter-arm.h"
#else
# error "CPU Not Supported"

View File

@ -308,10 +308,10 @@ class RegisterAllocator
{
if (FramePointer != InvalidReg && mir->instrumentedProfiling())
allRegisters_.take(AnyRegister(FramePointer));
#if defined(JS_CPU_X64)
#if defined(JS_CODEGEN_X64)
if (mir->compilingAsmJS())
allRegisters_.take(AnyRegister(HeapReg));
#elif defined(JS_CPU_ARM)
#elif defined(JS_CODEGEN_ARM)
if (mir->compilingAsmJS()) {
allRegisters_.take(AnyRegister(HeapReg));
allRegisters_.take(AnyRegister(GlobalReg));

View File

@ -809,10 +809,10 @@ class ABIArg
class AsmJSHeapAccess
{
uint32_t offset_;
#if defined(JS_CPU_X86)
#if defined(JS_CODEGEN_X86)
uint8_t cmpDelta_; // the number of bytes from the cmp to the load/store instruction
#endif
#if defined(JS_CPU_X86) || defined(JS_CPU_X64)
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
uint8_t opLength_; // the length of the load/store instruction
uint8_t isFloat32Load_;
AnyRegister::Code loadedReg_ : 8;
@ -822,13 +822,13 @@ class AsmJSHeapAccess
public:
AsmJSHeapAccess() {}
#if defined(JS_CPU_X86) || defined(JS_CPU_X64)
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
// If 'cmp' equals 'offset' or if it is not supplied then the
// cmpDelta_ is zero indicating that there is no length to patch.
AsmJSHeapAccess(uint32_t offset, uint32_t after, ArrayBufferView::ViewType vt,
AnyRegister loadedReg, uint32_t cmp = UINT32_MAX)
: offset_(offset),
# if defined(JS_CPU_X86)
# if defined(JS_CODEGEN_X86)
cmpDelta_(cmp == UINT32_MAX ? 0 : offset - cmp),
# endif
opLength_(after - offset),
@ -837,14 +837,14 @@ class AsmJSHeapAccess
{}
AsmJSHeapAccess(uint32_t offset, uint8_t after, uint32_t cmp = UINT32_MAX)
: offset_(offset),
# if defined(JS_CPU_X86)
# if defined(JS_CODEGEN_X86)
cmpDelta_(cmp == UINT32_MAX ? 0 : offset - cmp),
# endif
opLength_(after - offset),
isFloat32Load_(false),
loadedReg_(UINT8_MAX)
{}
#elif defined(JS_CPU_ARM)
#elif defined(JS_CODEGEN_ARM)
explicit AsmJSHeapAccess(uint32_t offset)
: offset_(offset)
{}
@ -852,12 +852,12 @@ class AsmJSHeapAccess
uint32_t offset() const { return offset_; }
void setOffset(uint32_t offset) { offset_ = offset; }
#if defined(JS_CPU_X86)
#if defined(JS_CODEGEN_X86)
bool hasLengthCheck() const { return cmpDelta_ > 0; }
void *patchLengthAt(uint8_t *code) const { return code + (offset_ - cmpDelta_); }
void *patchOffsetAt(uint8_t *code) const { return code + (offset_ + opLength_); }
#endif
#if defined(JS_CPU_X86) || defined(JS_CPU_X64)
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
unsigned opLength() const { return opLength_; }
bool isLoad() const { return loadedReg_ != UINT8_MAX; }
bool isFloat32Load() const { return isFloat32Load_; }

View File

@ -10,11 +10,11 @@
#include "mozilla/Array.h"
#include "jit/IonTypes.h"
#if defined(JS_CPU_X86)
#if defined(JS_CODEGEN_X86)
# include "jit/x86/Architecture-x86.h"
#elif defined(JS_CPU_X64)
#elif defined(JS_CODEGEN_X64)
# include "jit/x64/Architecture-x64.h"
#elif defined(JS_CPU_ARM)
#elif defined(JS_CODEGEN_ARM)
# include "jit/arm/Architecture-arm.h"
#endif

View File

@ -12,7 +12,7 @@
// gcc appears to use __ARM_PCS_VFP to denote that the target is a hard-float target.
#ifdef __ARM_PCS_VFP
#define JS_CPU_ARM_HARDFP
#define JS_CODEGEN_ARM_HARDFP
#endif
namespace js {
namespace jit {

View File

@ -2102,7 +2102,7 @@ class InstructionIterator {
static const uint32_t NumIntArgRegs = 4;
static const uint32_t NumFloatArgRegs = 8;
#ifdef JS_CPU_ARM_HARDFP
#ifdef JS_CODEGEN_ARM_HARDFP
static inline bool
GetIntArgReg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register *out)
{

View File

@ -176,7 +176,7 @@ class CodeGeneratorARM : public CodeGeneratorShared
bool generateInvalidateEpilogue();
protected:
void postAsmJSCall(LAsmJSCall *lir) {
#ifndef JS_CPU_ARM_HARDFP
#ifndef JS_CODEGEN_ARM_HARDFP
if (lir->mir()->callee().which() == MAsmJSCall::Callee::Builtin) {
switch (lir->mir()->type()) {
case MIRType_Double:

View File

@ -3479,7 +3479,7 @@ MacroAssemblerARMCompat::setupABICall(uint32_t args)
inCall_ = true;
args_ = args;
passedArgs_ = 0;
#ifdef JS_CPU_ARM_HARDFP
#ifdef JS_CODEGEN_ARM_HARDFP
usedIntSlots_ = 0;
usedFloatSlots_ = 0;
padding_ = 0;
@ -3512,7 +3512,7 @@ MacroAssemblerARMCompat::setupUnalignedABICall(uint32_t args, const Register &sc
ma_and(Imm32(~(StackAlignment - 1)), sp, sp);
ma_push(scratch);
}
#ifdef JS_CPU_ARM_HARDFP
#ifdef JS_CODEGEN_ARM_HARDFP
void
MacroAssemblerARMCompat::passABIArg(const MoveOperand &from, MoveOp::Type type)
{
@ -3631,7 +3631,7 @@ void
MacroAssemblerARMCompat::callWithABIPre(uint32_t *stackAdjust)
{
JS_ASSERT(inCall_);
#ifdef JS_CPU_ARM_HARDFP
#ifdef JS_CODEGEN_ARM_HARDFP
*stackAdjust = ((usedIntSlots_ > NumIntArgRegs) ? usedIntSlots_ - NumIntArgRegs : 0) * sizeof(intptr_t);
*stackAdjust += 2*((usedFloatSlots_ > NumFloatArgRegs) ? usedFloatSlots_ - NumFloatArgRegs : 0) * sizeof(intptr_t);
#else
@ -3693,13 +3693,13 @@ MacroAssemblerARMCompat::callWithABIPost(uint32_t stackAdjust, MoveOp::Type resu
switch (result) {
case MoveOp::DOUBLE:
#ifndef JS_CPU_ARM_HARDFP
#ifndef JS_CODEGEN_ARM_HARDFP
// Move double from r0/r1 to ReturnFloatReg.
as_vxfer(r0, r1, ReturnFloatReg, CoreToFloat);
break;
#endif
case MoveOp::FLOAT32:
#ifndef JS_CPU_ARM_HARDFP
#ifndef JS_CODEGEN_ARM_HARDFP
// Move float32 from r0 to ReturnFloatReg.
as_vxfer(r0, InvalidReg, VFPRegister(d0).singleOverlay(), CoreToFloat);
break;

View File

@ -444,7 +444,7 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
// the initial number of arguments declared was correct.
uint32_t passedArgs_;
#ifdef JS_CPU_ARM_HARDFP
#ifdef JS_CODEGEN_ARM_HARDFP
uint32_t usedIntSlots_;
uint32_t usedFloatSlots_;
uint32_t padding_;

View File

@ -17,7 +17,7 @@
#include "jit/Registers.h"
#include "jit/RegisterSets.h"
#if defined(JS_CPU_X64) || defined(JS_CPU_ARM)
#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM)
// JS_SMALL_BRANCH means the range on a branch instruction
// is smaller than the whole address space
# define JS_SMALL_BRANCH
@ -685,7 +685,7 @@ enum AsmJSImmKind
AsmJSImm_ToInt32,
AsmJSImm_EnableActivationFromAsmJS,
AsmJSImm_DisableActivationFromAsmJS,
#if defined(JS_CPU_ARM)
#if defined(JS_CODEGEN_ARM)
AsmJSImm_aeabi_idivmod,
AsmJSImm_aeabi_uidivmod,
#endif

View File

@ -6,11 +6,11 @@
#include "gc/Marking.h"
#include "jit/JitCompartment.h"
#if defined(JS_CPU_X86)
#if defined(JS_CODEGEN_X86)
# include "jit/x86/MacroAssembler-x86.h"
#elif defined(JS_CPU_X64)
#elif defined(JS_CODEGEN_X64)
# include "jit/x64/MacroAssembler-x64.h"
#elif defined(JS_CPU_ARM)
#elif defined(JS_CODEGEN_ARM)
# include "jit/arm/MacroAssembler-arm.h"
#endif

View File

@ -1165,7 +1165,7 @@ class AssemblerX86Shared
masm.pop_flags();
}
#ifdef JS_CPU_X86
#ifdef JS_CODEGEN_X86
void pushAllRegs() {
masm.pusha();
}

View File

@ -64,7 +64,7 @@ CodeGeneratorShared::CodeGeneratorShared(MIRGenerator *gen, LIRGraph *graph, Mac
// An MAsmJSCall does not align the stack pointer at calls sites but instead
// relies on the a priori stack adjustment (in the prologue) on platforms
// (like x64) which require the stack to be aligned.
#ifdef JS_CPU_ARM
#ifdef JS_CODEGEN_ARM
bool forceAlign = true;
#else
bool forceAlign = false;

View File

@ -155,7 +155,7 @@ CodeGeneratorX86Shared::visitBitAndAndBranch(LBitAndAndBranch *baab)
void
CodeGeneratorX86Shared::emitCompare(MCompare::CompareType type, const LAllocation *left, const LAllocation *right)
{
#ifdef JS_CPU_X64
#ifdef JS_CODEGEN_X64
if (type == MCompare::Compare_Object) {
masm.cmpq(ToRegister(left), ToOperand(right));
return;
@ -339,7 +339,7 @@ class BailoutJump {
public:
BailoutJump(Assembler::Condition cond) : cond_(cond)
{ }
#ifdef JS_CPU_X86
#ifdef JS_CODEGEN_X86
void operator()(MacroAssembler &masm, uint8_t *code) const {
masm.j(cond_, ImmPtr(code), Relocation::HARDCODED);
}
@ -355,7 +355,7 @@ class BailoutLabel {
public:
BailoutLabel(Label *label) : label_(label)
{ }
#ifdef JS_CPU_X86
#ifdef JS_CODEGEN_X86
void operator()(MacroAssembler &masm, uint8_t *code) const {
masm.retarget(label_, ImmPtr(code), Relocation::HARDCODED);
}
@ -393,7 +393,7 @@ CodeGeneratorX86Shared::bailout(const T &binder, LSnapshot *snapshot)
JS_ASSERT_IF(frameClass_ != FrameSizeClass::None() && deoptTable_,
frameClass_.frameSize() == masm.framePushed());
#ifdef JS_CPU_X86
#ifdef JS_CODEGEN_X86
// On x64, bailout tables are pointless, because 16 extra bytes are
// reserved per external jump, whereas it takes only 10 bytes to encode a
// a non-table based bailout.

View File

@ -324,7 +324,7 @@ LIRGeneratorShared::useRegisterOrNonDoubleConstant(MDefinition *mir)
return useRegister(mir);
}
#if defined(JS_CPU_ARM)
#if defined(JS_CODEGEN_ARM)
LAllocation
LIRGeneratorShared::useAnyOrConstant(MDefinition *mir)
{

View File

@ -288,7 +288,7 @@ LIRGeneratorX86Shared::lowerUrshD(MUrsh *mir)
JS_ASSERT(rhs->type() == MIRType_Int32);
JS_ASSERT(mir->type() == MIRType_Double);
#ifdef JS_CPU_X64
#ifdef JS_CODEGEN_X64
JS_ASSERT(ecx == rcx);
#endif

View File

@ -10,9 +10,9 @@
#include "mozilla/Casting.h"
#include "mozilla/DebugOnly.h"
#if defined(JS_CPU_X86)
#if defined(JS_CODEGEN_X86)
# include "jit/x86/Assembler-x86.h"
#elif defined(JS_CPU_X64)
#elif defined(JS_CODEGEN_X64)
# include "jit/x64/Assembler-x64.h"
#endif

View File

@ -241,7 +241,7 @@ MoveEmitterX86::breakCycle(const MoveOperand &to, MoveOp::Type type)
masm.storeDouble(to.floatReg(), cycleSlot());
}
break;
#ifdef JS_CPU_X64
#ifdef JS_CODEGEN_X64
case MoveOp::INT32:
// x64 can't pop to a 32-bit destination, so don't push.
if (to.isMemory()) {
@ -252,7 +252,7 @@ MoveEmitterX86::breakCycle(const MoveOperand &to, MoveOp::Type type)
}
break;
#endif
#ifndef JS_CPU_X64
#ifndef JS_CODEGEN_X64
case MoveOp::INT32:
#endif
case MoveOp::GENERAL:
@ -293,7 +293,7 @@ MoveEmitterX86::completeCycle(const MoveOperand &to, MoveOp::Type type)
masm.loadDouble(cycleSlot(), to.floatReg());
}
break;
#ifdef JS_CPU_X64
#ifdef JS_CODEGEN_X64
case MoveOp::INT32:
JS_ASSERT(pushedAtCycle_ != -1);
JS_ASSERT(pushedAtCycle_ - pushedAtStart_ >= sizeof(int32_t));
@ -306,7 +306,7 @@ MoveEmitterX86::completeCycle(const MoveOperand &to, MoveOp::Type type)
}
break;
#endif
#ifndef JS_CPU_X64
#ifndef JS_CODEGEN_X64
case MoveOp::INT32:
#endif
case MoveOp::GENERAL:
@ -329,7 +329,7 @@ MoveEmitterX86::emitInt32Move(const MoveOperand &from, const MoveOperand &to)
} else {
// Memory to memory gpr move.
JS_ASSERT(from.isMemory());
#ifdef JS_CPU_X64
#ifdef JS_CODEGEN_X64
// x64 has a ScratchReg. Use it.
masm.load32(toAddress(from), ScratchReg);
masm.move32(ScratchReg, toOperand(to));
@ -354,7 +354,7 @@ MoveEmitterX86::emitGeneralMove(const MoveOperand &from, const MoveOperand &to)
masm.lea(toOperand(from), to.reg());
} else if (from.isMemory()) {
// Memory to memory gpr move.
#ifdef JS_CPU_X64
#ifdef JS_CODEGEN_X64
// x64 has a ScratchReg. Use it.
masm.loadPtr(toAddress(from), ScratchReg);
masm.mov(ScratchReg, toOperand(to));
@ -366,7 +366,7 @@ MoveEmitterX86::emitGeneralMove(const MoveOperand &from, const MoveOperand &to)
} else {
// Effective address to memory move.
JS_ASSERT(from.isEffectiveAddress());
#ifdef JS_CPU_X64
#ifdef JS_CODEGEN_X64
// x64 has a ScratchReg. Use it.
masm.lea(toOperand(from), ScratchReg);
masm.mov(ScratchReg, toOperand(to));

View File

@ -7,11 +7,11 @@
#ifndef jit_MoveEmitter_x86_shared_h
#define jit_MoveEmitter_x86_shared_h
#if defined(JS_CPU_X86)
#if defined(JS_CODEGEN_X86)
# include "jit/x86/MacroAssembler-x86.h"
#elif defined(JS_CPU_X64)
#elif defined(JS_CODEGEN_X64)
# include "jit/x64/MacroAssembler-x64.h"
#elif defined(JS_CPU_ARM)
#elif defined(JS_CODEGEN_ARM)
# include "jit/arm/MacroAssembler-arm.h"
#endif
#include "jit/MoveResolver.h"

View File

@ -364,7 +364,7 @@ JitRuntime::generateArgumentsRectifier(JSContext *cx, ExecutionMode mode, void *
// NOTE: The fact that x86 ArgumentsRectifier saves the FramePointer is relied upon
// by the baseline bailout code. If this changes, fix that code! See
// BaselineJIT.cpp/BaselineStackBuilder::calculatePrevFramePtr, and
// BaselineJIT.cpp/InitFromBailout. Check for the |#if defined(JS_CPU_X86)| portions.
// BaselineJIT.cpp/InitFromBailout. Check for the |#if defined(JS_CODEGEN_X86)| portions.
masm.push(FramePointer);
masm.movl(esp, FramePointer); // Save %esp.

View File

@ -5901,12 +5901,12 @@ main(int argc, char **argv, char **envp)
if (op.getBoolOption('O'))
OOM_printAllocationCount = true;
#if defined(JS_CPU_X86) && defined(JS_ION)
#if defined(JS_CODEGEN_X86) && defined(JS_ION)
if (op.getBoolOption("no-fpu"))
JSC::MacroAssembler::SetFloatingPointDisabled();
#endif
#if (defined(JS_CPU_X86) || defined(JS_CPU_X64)) && defined(JS_ION)
#if (defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)) && defined(JS_ION)
if (op.getBoolOption("no-sse3")) {
JSC::MacroAssembler::SetSSE3Disabled();
PropagateFlagToNestedShells("--no-sse3");