[JAEGER] 64-bit MethodJIT support. b=578245, r=dvander.

This commit is contained in:
Sean Stangl 2010-07-23 14:46:13 -07:00
parent b278d2d970
commit 6a4816128b
16 changed files with 506 additions and 49 deletions

View File

@ -379,6 +379,13 @@ public:
m_assembler.movzbl_rr(dest, dest);
}
void setPtr(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
{
m_assembler.cmpq_rr(right, left);
m_assembler.setCC_r(x86Condition(cond), dest);
m_assembler.movzbl_rr(dest, dest);
}
Jump branchPtr(Condition cond, RegisterID left, RegisterID right)
{
m_assembler.cmpq_rr(right, left);

View File

@ -2582,9 +2582,11 @@ i?86-*)
AC_DEFINE(JS_32BIT)
;;
x86_64*-*)
ENABLE_TRACEJIT=1
dnl ENABLE_TRACEJIT=1
NANOJIT_ARCH=X64
ENABLE_METHODJIT=1
AC_DEFINE(JS_CPU_X64)
AC_DEFINE(JS_64BIT)
;;
arm*-*)
ENABLE_TRACEJIT=1

View File

@ -116,8 +116,8 @@ struct JSStackFrame
static jsbytecode *const sInvalidPC;
#endif
#if defined(JS_CPU_X86) || defined(JS_CPU_ARM)
void *ncode; /* jit return pc */
#if defined(JS_CPU_X86) || defined(JS_CPU_ARM)
/* Guh. Align. */
void *align_[3];
#endif

View File

@ -317,20 +317,22 @@
* Define JS_64BIT iff we are building in an environment with 64-bit
* addresses.
*/
#ifdef _MSC_VER
# if defined(_M_X64) || defined(_M_AMD64)
# define JS_64BIT
#ifndef JS_64BIT
# ifdef _MSC_VER
# if defined(_M_X64) || defined(_M_AMD64)
# define JS_64BIT
# endif
# elif defined(__GNUC__)
# ifdef __x86_64__
# define JS_64BIT
# endif
# elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
# ifdef __x86_64
# define JS_64BIT
# endif
# else
# error "Implement me"
# endif
#elif defined(__GNUC__)
# ifdef __x86_64__
# define JS_64BIT
# endif
#elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
# ifdef __x86_64
# define JS_64BIT
# endif
#else
# error "Implement me"
#endif

View File

@ -41,7 +41,13 @@
#if !defined jsjaeger_codegenincs_h__ && defined JS_METHODJIT
#define jsjaeger_codegenincs_h__
#include "nunbox/Assembler.h"
#if defined JS_32BIT
# include "nunbox/Assembler.h"
#elif defined JS_64BIT
# include "nunbox/Assembler64.h"
#else
# error "Neither JS_32BIT or JS_64BIT is defined."
#endif
#endif

View File

@ -68,7 +68,7 @@ JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse
NotCheckedSSE2;
#endif
#ifdef JS_CPU_X86
#if defined(JS_CPU_X86) or defined(JS_CPU_X64)
static const JSC::MacroAssembler::RegisterID JSReturnReg_Type = JSC::X86Registers::ecx;
static const JSC::MacroAssembler::RegisterID JSReturnReg_Data = JSC::X86Registers::edx;
#elif defined(JS_CPU_ARM)
@ -1513,14 +1513,21 @@ mjit::Compiler::emitReturn()
masm.loadPtr(FrameAddress(offsetof(VMFrame, cx)), Registers::ArgReg1);
masm.storePtr(Registers::ReturnReg, FrameAddress(offsetof(VMFrame, fp)));
masm.storePtr(Registers::ReturnReg, Address(Registers::ArgReg1, offsetof(JSContext, fp)));
#if defined(JS_CPU_X86) or defined(JS_CPU_ARM)
masm.subPtr(ImmIntPtr(1), FrameAddress(offsetof(VMFrame, inlineCallCount)));
#elif defined (JS_CPU_X64)
/* Register is clobbered later, so it's safe to use. */
masm.loadPtr(FrameAddress(offsetof(VMFrame, inlineCallCount)), JSReturnReg_Data);
masm.subPtr(ImmIntPtr(1), JSReturnReg_Data);
masm.storePtr(JSReturnReg_Data, FrameAddress(offsetof(VMFrame, inlineCallCount)));
#endif
JS_STATIC_ASSERT(Registers::ReturnReg != JSReturnReg_Data);
JS_STATIC_ASSERT(Registers::ReturnReg != JSReturnReg_Type);
Address rval(JSFrameReg, offsetof(JSStackFrame, rval));
masm.load32(masm.payloadOf(rval), JSReturnReg_Data);
masm.load32(masm.tagOf(rval), JSReturnReg_Type);
masm.loadPayload(rval, JSReturnReg_Data);
masm.loadTypeTag(rval, JSReturnReg_Type);
masm.move(Registers::ReturnReg, JSFrameReg);
masm.loadPtr(Address(JSFrameReg, offsetof(JSStackFrame, ncode)), Registers::ReturnReg);
#ifdef DEBUG
@ -2392,10 +2399,15 @@ mjit::Compiler::jsop_bindname(uint32 index)
pic.hotPathBegin = masm.label();
Address parent(pic.objReg, offsetof(JSObject, fslots) + JSSLOT_PARENT * sizeof(jsval));
masm.loadPayload(Address(JSFrameReg, offsetof(JSStackFrame, scopeChain)), pic.objReg);
masm.loadPtr(Address(JSFrameReg, offsetof(JSStackFrame, scopeChain)), pic.objReg);
pic.shapeGuard = masm.label();
#if defined JS_32BIT
Jump j = masm.branchPtr(Assembler::NotEqual, masm.payloadOf(parent), ImmPtr(0));
#elif defined JS_64BIT
masm.loadPayload(parent, Registers::ValueReg);
Jump j = masm.branchPtr(Assembler::NotEqual, Registers::ValueReg, ImmPtr(0));
#endif
{
pic.slowPathStart = stubcc.masm.label();
stubcc.linkExit(j, Uses(0));
@ -2446,11 +2458,16 @@ mjit::Compiler::jsop_bindname(uint32 index)
{
RegisterID reg = frame.allocReg();
Address scopeChain(JSFrameReg, offsetof(JSStackFrame, scopeChain));
masm.loadPayload(scopeChain, reg);
masm.loadPtr(scopeChain, reg);
Address address(reg, offsetof(JSObject, fslots) + JSSLOT_PARENT * sizeof(jsval));
#if defined JS_32BIT
Jump j = masm.branchPtr(Assembler::NotEqual, masm.payloadOf(address), ImmPtr(0));
#elif defined JS_64BIT
masm.loadPayload(address, Registers::ValueReg);
Jump j = masm.branchPtr(Assembler::NotEqual, Registers::ValueReg, ImmPtr(0));
#endif
stubcc.linkExit(j, Uses(0));
stubcc.leave();

View File

@ -72,15 +72,21 @@ class FrameEntry
return type.isConstant();
}
JSValueTag getKnownTag() const {
return v_.s.tag;
}
JSValueType getKnownType() const {
JS_ASSERT(isTypeKnown());
return knownType;
}
#if defined JS_32BIT
JSValueTag getKnownTag() const {
return v_.s.tag;
}
#elif defined JS_64BIT
JSValueShiftedTag getKnownShiftedTag() const {
return JSValueShiftedTag(v_.asBits & JSVAL_TAG_MASK);
}
#endif
// Return true iff the type of this value is definitely known to be type_.
bool isType(JSValueType type_) const {
return isTypeKnown() && getKnownType() == type_;
@ -91,10 +97,16 @@ class FrameEntry
return isTypeKnown() && getKnownType() != type_;
}
#if defined JS_32BIT
uint32 getPayload32() const {
//JS_ASSERT(!Valueify(v_.asBits).isDouble() || type.synced());
return v_.s.payload.u32;
}
#elif defined JS_64BIT
uint64 getPayload64() const {
return v_.asBits & JSVAL_PAYLOAD_MASK;
}
#endif
bool isCachedNumber() const {
return isNumber;
@ -103,7 +115,11 @@ class FrameEntry
private:
void setType(JSValueType type_) {
type.setConstant();
#if defined JS_32BIT
v_.s.tag = JSVAL_TYPE_TO_TAG(type_);
#elif defined JS_64BIT
v_.debugView.tag = JSVAL_TYPE_TO_TAG(type_);
#endif
knownType = type_;
JS_ASSERT(!isNumber);
}

View File

@ -473,10 +473,14 @@ FrameState::syncData(const FrameEntry *fe, Address to, Assembler &masm) const
JS_ASSERT(fe->data.inRegister() || fe->data.isConstant());
if (fe->data.isConstant()) {
if (!fe->type.synced())
masm.storeValue(fe->getValue(), to);
else
masm.storePayload(Imm32(fe->getPayload32()), to);
if (!fe->type.synced())
masm.storeValue(fe->getValue(), to);
else
#if defined JS_32BIT
masm.storePayload(Imm32(fe->getPayload32()), to);
#elif defined JS_64BIT
masm.storePayload(Imm64(fe->getPayload64()), to);
#endif
} else {
masm.storePayload(fe->data.reg(), to);
}

View File

@ -823,7 +823,7 @@ FrameState::uncopy(FrameEntry *original)
moveOwnership(fe->type.reg(), fe);
} else {
JS_ASSERT(fe->isTypeKnown());
JS_ASSERT(fe->getKnownTag() == original->getKnownTag());
JS_ASSERT(fe->getKnownType() == original->getKnownType());
}
if (original->data.inMemory() && !fe->data.synced())
tempRegForData(original);

View File

@ -51,6 +51,11 @@ struct Registers {
typedef JSC::MacroAssembler::RegisterID RegisterID;
// TODO: Eliminate scratch register (requires rewriting register allocation mechanism)
#if defined(JS_CPU_X64)
static const RegisterID ValueReg = JSC::X86Registers::r15;
#endif
#if defined(JS_CPU_X86) || defined(JS_CPU_X64)
static const RegisterID ReturnReg = JSC::X86Registers::eax;
# if defined(JS_CPU_X86) || defined(_MSC_VER)
@ -98,10 +103,11 @@ struct Registers {
# if defined(JS_CPU_X64)
static const uint32 SavedRegs =
/* r11 is scratchRegister, used by JSC. */
(1 << JSC::X86Registers::r12)
| (1 << JSC::X86Registers::r13)
| (1 << JSC::X86Registers::r14)
| (1 << JSC::X86Registers::r15)
// TODO: Remove ValueReg | (1 << JSC::X86Registers::r15)
# if defined(_MSC_VER)
| (1 << JSC::X86Registers::esi)
| (1 << JSC::X86Registers::edi)

View File

@ -104,8 +104,8 @@ JS_STATIC_ASSERT(sizeof(VMFrame) % 16 == 0);
* If these assertions break, update the constants below.
* *** DANGER ***
*/
JS_STATIC_ASSERT(offsetof(VMFrame, savedRBX) == 0x48);
JS_STATIC_ASSERT(offsetof(VMFrame, fp) == 0x30);
JS_STATIC_ASSERT(offsetof(VMFrame, savedRBX) == 0x58);
JS_STATIC_ASSERT(offsetof(VMFrame, fp) == 0x40);
asm volatile (
".text\n"
@ -133,7 +133,7 @@ SYMBOL_STRING(JaegerTrampoline) ":" "\n"
"movq %rsi, %rbx" "\n"
/* Space for the rest of the VMFrame. */
"subq $0x28, %rsp" "\n"
"subq $0x38, %rsp" "\n"
/* Set cx->regs (requires saving rdx). */
"pushq %rdx" "\n"
@ -149,7 +149,7 @@ SYMBOL_STRING(JaegerTrampoline) ":" "\n"
"leaq -8(%rsp), %rdi" "\n"
"call " SYMBOL_STRING_RELOC(UnsetVMFrameRegs) "\n"
"addq $0x40, %rsp" "\n"
"addq $0x50, %rsp" "\n"
"popq %rbx" "\n"
"popq %r15" "\n"
"popq %r14" "\n"
@ -170,7 +170,7 @@ SYMBOL_STRING(JaegerThrowpoline) ":" "\n"
"je throwpoline_exit" "\n"
"jmp *%rax" "\n"
"throwpoline_exit:" "\n"
"addq $0x48, %rsp" "\n"
"addq $0x58, %rsp" "\n"
"popq %rbx" "\n"
"popq %r15" "\n"
"popq %r14" "\n"
@ -185,7 +185,7 @@ asm volatile (
".globl " SYMBOL_STRING(JaegerFromTracer) "\n"
SYMBOL_STRING(JaegerFromTracer) ":" "\n"
/* Restore fp reg. */
"movq 0x30(%rsp), %rbx" "\n"
"movq 0x40(%rsp), %rbx" "\n"
"jmp *%rax" "\n"
);

View File

@ -64,7 +64,7 @@ struct VMFrame
/* This must be the first entry on CPUs which push return addresses. */
void *scriptedReturn;
#if defined(JS_CPU_X86)
#if defined(JS_CPU_X86) or defined(JS_CPU_X64)
uintptr_t padding[2];
#elif defined(JS_CPU_ARM)
uintptr_t padding;

View File

@ -37,7 +37,8 @@
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
#if !defined jsjaeger_assembler_h__ && defined JS_METHODJIT
#if !defined jsjaeger_assembler_h__ && defined JS_METHODJIT && defined JS_32BIT
#define jsjaeger_assembler_h__
#include "methodjit/BaseAssembler.h"

View File

@ -0,0 +1,362 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=4 sw=4 et tw=99:
*
* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is Mozilla SpiderMonkey JavaScript 1.9 code, released
* May 28, 2008.
*
* The Initial Developer of the Original Code is
* Brendan Eich <brendan@mozilla.org>
*
* Contributor(s):
* Sean Stangl <sstangl@mozilla.com>
*
* Alternatively, the contents of this file may be used under the terms of
* either of the GNU General Public License Version 2 or later (the "GPL"),
* or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
#if !defined jsjaeger_assembler64_h__ && defined JS_METHODJIT && defined JS_64BIT
#define jsjaeger_assembler64_h__
#include "methodjit/BaseAssembler.h"
#include "methodjit/MachineRegs.h"
namespace js {
namespace mjit {
class Imm64 : public JSC::MacroAssembler::ImmPtr
{
public:
Imm64(uint64 u)
: ImmPtr((const void *)u)
{ }
};
class ImmShiftedTag : public JSC::MacroAssembler::ImmPtr
{
public:
ImmShiftedTag(JSValueShiftedTag shtag)
: ImmPtr((const void *)shtag)
{ }
};
class ImmType : public ImmShiftedTag
{
public:
ImmType(JSValueType type)
: ImmShiftedTag(JSValueShiftedTag(JSVAL_TYPE_TO_SHIFTED_TAG(type)))
{ }
};
class Assembler : public BaseAssembler
{
static const uint32 PAYLOAD_OFFSET = 0;
public:
static const JSC::MacroAssembler::Scale JSVAL_SCALE = JSC::MacroAssembler::TimesEight;
Address payloadOf(Address address) {
return address;
}
BaseIndex payloadOf(BaseIndex address) {
return address;
}
Address valueOf(Address address) {
return address;
}
BaseIndex valueOf(BaseIndex address) {
return address;
}
#if 0
/* This does not work with 64-bit boxing format, since tag is not byte-aligned. */
Address tagOf(Address address) {
return Address(address.base, address.offset + TAG_OFFSET);
}
BaseIndex tagOf(BaseIndex address) {
return BaseIndex(address.base, address.index, address.scale, address.offset + TAG_OFFSET);
}
#endif /* 0 */
#if 0
/* type and data need to be unified. */
void loadSlot(RegisterID obj, RegisterID clobber, uint32 slot, RegisterID type, RegisterID data) {
JS_ASSERT(type != data);
Address address(obj, offsetof(JSObject, fslots) + slot * sizeof(Value));
if (slot >= JS_INITIAL_NSLOTS) {
loadPtr(Address(obj, offsetof(JSObject, dslots)), clobber);
address = Address(obj, (slot - JS_INITIAL_NSLOTS) * sizeof(Value));
}
if (obj == type) {
loadData32(address, data);
loadTypeTag(address, type);
} else {
loadTypeTag(address, type);
loadData32(address, data);
}
}
#endif
/* TODO: Don't really need this..? */
void loadValue(Address address, RegisterID dst) {
loadPtr(address, dst);
}
void loadValue(BaseIndex address, RegisterID dst) {
loadPtr(address, dst);
}
void loadValueThenType(Address address, RegisterID val, RegisterID type) {
loadValue(valueOf(address), val);
if (val != type)
move(val, type);
andPtr(Imm64(JSVAL_TAG_MASK), type);
}
void loadValueThenType(BaseIndex address, RegisterID val, RegisterID type) {
loadValue(valueOf(address), val);
if (val != type)
move(val, type);
andPtr(Imm64(JSVAL_TAG_MASK), type);
}
void loadValueThenPayload(Address address, RegisterID val, RegisterID payload) {
loadValue(valueOf(address), val);
if (val != payload)
move(val, payload);
andPtr(Imm64(JSVAL_PAYLOAD_MASK), payload);
}
void loadValueThenPayload(BaseIndex address, RegisterID val, RegisterID payload) {
loadValue(valueOf(address), val);
if (val != payload)
move(val, payload);
andPtr(Imm64(JSVAL_PAYLOAD_MASK), payload);
}
/*
* TODO: All this gets to go.
* This needs to be part of the FrameState, since it will
* be performing register allocation.
*/
void loadTypeTag(Address address, RegisterID reg) {
loadValueThenType(valueOf(address), Registers::ValueReg, reg);
}
void loadTypeTag(BaseIndex address, RegisterID reg) {
loadValueThenType(valueOf(address), Registers::ValueReg, reg);
}
void storeTypeTag(ImmShiftedTag imm, Address address) {
loadValue(valueOf(address), Registers::ValueReg);
andPtr(Imm64(JSVAL_PAYLOAD_MASK), Registers::ValueReg);
orPtr(imm, Registers::ValueReg);
storePtr(Registers::ValueReg, valueOf(address));
}
void storeTypeTag(ImmShiftedTag imm, BaseIndex address) {
loadValue(valueOf(address), Registers::ValueReg);
andPtr(Imm64(JSVAL_PAYLOAD_MASK), Registers::ValueReg);
orPtr(imm, Registers::ValueReg);
storePtr(Registers::ValueReg, valueOf(address));
}
void storeTypeTag(RegisterID reg, Address address) {
/* The type tag must be stored in shifted format. */
loadValue(valueOf(address), Registers::ValueReg);
andPtr(Imm64(JSVAL_PAYLOAD_MASK), Registers::ValueReg);
orPtr(reg, Registers::ValueReg);
storePtr(Registers::ValueReg, valueOf(address));
}
void storeTypeTag(RegisterID reg, BaseIndex address) {
/* The type tag must be stored in shifted format. */
loadValue(valueOf(address), Registers::ValueReg);
andPtr(Imm64(JSVAL_PAYLOAD_MASK), Registers::ValueReg);
orPtr(reg, Registers::ValueReg);
storePtr(Registers::ValueReg, valueOf(address));
}
void loadPayload(Address address, RegisterID reg) {
loadValueThenPayload(address, Registers::ValueReg, reg);
}
void loadPayload(BaseIndex address, RegisterID reg) {
loadValueThenPayload(address, Registers::ValueReg, reg);
}
void storePayload(RegisterID reg, Address address) {
/* Not for doubles. */
loadValue(valueOf(address), Registers::ValueReg);
andPtr(Imm64(JSVAL_TAG_MASK), Registers::ValueReg);
orPtr(reg, Registers::ValueReg);
storePtr(Registers::ValueReg, valueOf(address));
}
void storePayload(RegisterID reg, BaseIndex address) {
/* Not for doubles. */
loadValue(valueOf(address), Registers::ValueReg);
andPtr(Imm64(JSVAL_TAG_MASK), Registers::ValueReg);
orPtr(reg, Registers::ValueReg);
storePtr(Registers::ValueReg, valueOf(address));
}
void storePayload(Imm64 imm, Address address) {
/* Not for doubles. */
storePtr(imm, valueOf(address));
}
#if 0
void storePayload(Imm32 imm, Address address) {
}
#endif
void storeValue(const Value &v, Address address) {
jsval_layout jv;
jv.asBits = JSVAL_BITS(Jsvalify(v));
storePtr(Imm64(jv.asBits), valueOf(address));
}
void storeValue(const Value &v, BaseIndex address) {
jsval_layout jv;
jv.asBits = JSVAL_BITS(Jsvalify(v));
storePtr(Imm64(jv.asBits), valueOf(address));
}
/*
* FIXME: This is only used by slowLoadConstantDouble().
* It should disappear when that function can generate
* constants into the opstream.
*/
void storeLayout(const jsval_layout &jv, Address address) {
storePtr(Imm64(jv.asBits), valueOf(address));
}
void loadFunctionPrivate(RegisterID base, RegisterID to) {
Address privSlot(base, offsetof(JSObject, fslots) +
JSSLOT_PRIVATE * sizeof(Value));
loadPtr(privSlot, to);
lshiftPtr(Imm32(1), to);
}
Jump testNull(Assembler::Condition cond, RegisterID reg) {
return branchPtr(cond, reg, ImmShiftedTag(JSVAL_SHIFTED_TAG_NULL));
}
Jump testNull(Assembler::Condition cond, Address address) {
loadValueThenType(address, Registers::ValueReg, Registers::ValueReg);
return branchPtr(cond, Registers::ValueReg, ImmShiftedTag(JSVAL_SHIFTED_TAG_NULL));
}
Jump testInt32(Assembler::Condition cond, RegisterID reg) {
return branchPtr(cond, reg, ImmShiftedTag(JSVAL_SHIFTED_TAG_INT32));
}
Jump testInt32(Assembler::Condition cond, Address address) {
loadValueThenType(address, Registers::ValueReg, Registers::ValueReg);
return branchPtr(cond, Registers::ValueReg, ImmShiftedTag(JSVAL_SHIFTED_TAG_INT32));
}
Jump testNumber(Assembler::Condition cond, RegisterID reg) {
cond = (cond == Assembler::Equal) ? Assembler::BelowOrEqual : Assembler::Above;
return branchPtr(cond, reg, ImmShiftedTag(JSVAL_SHIFTED_TAG_INT32));
}
Jump testNumber(Assembler::Condition cond, Address address) {
cond = (cond == Assembler::Equal) ? Assembler::BelowOrEqual : Assembler::Above;
loadValueThenType(address, Registers::ValueReg, Registers::ValueReg);
return branchPtr(cond, Registers::ValueReg, ImmShiftedTag(JSVAL_SHIFTED_TAG_INT32));
}
Jump testPrimitive(Assembler::Condition cond, RegisterID reg) {
cond = (cond == Assembler::NotEqual) ? Assembler::AboveOrEqual : Assembler::Below;
return branchPtr(cond, reg, ImmShiftedTag(JSVAL_SHIFTED_TAG_OBJECT));
}
Jump testPrimitive(Assembler::Condition cond, Address address) {
cond = (cond == Assembler::NotEqual) ? Assembler::AboveOrEqual : Assembler::Below;
loadValueThenType(address, Registers::ValueReg, Registers::ValueReg);
return branchPtr(cond, Registers::ValueReg, ImmShiftedTag(JSVAL_SHIFTED_TAG_OBJECT));
}
Jump testObject(Assembler::Condition cond, RegisterID reg) {
return branchPtr(cond, reg, ImmShiftedTag(JSVAL_SHIFTED_TAG_OBJECT));
}
Jump testObject(Assembler::Condition cond, Address address) {
loadValueThenType(address, Registers::ValueReg, Registers::ValueReg);
return branchPtr(cond, Registers::ValueReg, ImmShiftedTag(JSVAL_SHIFTED_TAG_OBJECT));
}
Jump testDouble(Assembler::Condition cond, RegisterID reg) {
Assembler::Condition opcond;
if (cond == Assembler::Equal)
opcond = Assembler::Below;
else
opcond = Assembler::AboveOrEqual;
return branchPtr(opcond, reg, ImmShiftedTag(JSVAL_SHIFTED_TAG_MAX_DOUBLE));
}
Jump testDouble(Assembler::Condition cond, Address address) {
Assembler::Condition opcond;
if (cond == Assembler::Equal)
opcond = Assembler::Below;
else
opcond = Assembler::AboveOrEqual;
loadValueThenType(address, Registers::ValueReg, Registers::ValueReg);
return branchPtr(opcond, Registers::ValueReg, ImmShiftedTag(JSVAL_SHIFTED_TAG_MAX_DOUBLE));
}
Jump testBoolean(Assembler::Condition cond, RegisterID reg) {
return branchPtr(cond, reg, ImmShiftedTag(JSVAL_SHIFTED_TAG_BOOLEAN));
}
Jump testBoolean(Assembler::Condition cond, Address address) {
loadValueThenType(address, Registers::ValueReg, Registers::ValueReg);
return branchPtr(cond, Registers::ValueReg, ImmShiftedTag(JSVAL_SHIFTED_TAG_BOOLEAN));
}
Jump testString(Assembler::Condition cond, RegisterID reg) {
return branchPtr(cond, reg, ImmShiftedTag(JSVAL_SHIFTED_TAG_STRING));
}
Jump testString(Assembler::Condition cond, Address address) {
loadValueThenType(address, Registers::ValueReg, Registers::ValueReg);
return branchPtr(cond, Registers::ValueReg, ImmShiftedTag(JSVAL_SHIFTED_TAG_BOOLEAN));
}
};
} /* namespace js */
} /* namespace mjit */
#endif

View File

@ -710,11 +710,11 @@ mjit::Compiler::jsop_neg()
FPRegisterID fpreg = frame.copyEntryIntoFPReg(fe, FPRegisters::First);
#ifdef JS_CPU_X86
#if defined JS_CPU_X86 or defined JS_CPU_X64
masm.loadDouble(&DoubleNegMask, FPRegisters::Second);
masm.xorDouble(FPRegisters::Second, fpreg);
#else
masm.negDouble(fpreg, fpreg);
#elif defined JS_CPU_ARM
masm.negDouble(fpreg);
#endif
/* Overwrite pushed frame's memory (before push). */

View File

@ -387,19 +387,19 @@ mjit::Compiler::jsop_equality(JSOp op, BoolStub stub, jsbytecode *target, JSOp f
if ((op == JSOP_EQ && fused == JSOP_IFNE) ||
(op == JSOP_NE && fused == JSOP_IFEQ)) {
Jump j = masm.branch32(Assembler::Equal, reg, ImmType(JSVAL_TYPE_UNDEFINED));
Jump j = masm.branchPtr(Assembler::Equal, reg, ImmType(JSVAL_TYPE_UNDEFINED));
jumpAndTrace(j, target);
j = masm.branch32(Assembler::Equal, reg, ImmType(JSVAL_TYPE_NULL));
j = masm.branchPtr(Assembler::Equal, reg, ImmType(JSVAL_TYPE_NULL));
jumpAndTrace(j, target);
} else {
Jump j = masm.branch32(Assembler::Equal, reg, ImmType(JSVAL_TYPE_UNDEFINED));
Jump j2 = masm.branch32(Assembler::NotEqual, reg, ImmType(JSVAL_TYPE_NULL));
Jump j = masm.branchPtr(Assembler::Equal, reg, ImmType(JSVAL_TYPE_UNDEFINED));
Jump j2 = masm.branchPtr(Assembler::NotEqual, reg, ImmType(JSVAL_TYPE_NULL));
jumpAndTrace(j2, target);
j.linkTo(masm.label(), &masm);
}
} else {
Jump j = masm.branch32(Assembler::Equal, reg, ImmType(JSVAL_TYPE_UNDEFINED));
Jump j2 = masm.branch32(Assembler::Equal, reg, ImmType(JSVAL_TYPE_NULL));
Jump j = masm.branchPtr(Assembler::Equal, reg, ImmType(JSVAL_TYPE_UNDEFINED));
Jump j2 = masm.branchPtr(Assembler::Equal, reg, ImmType(JSVAL_TYPE_NULL));
masm.move(Imm32(op == JSOP_NE), reg);
Jump j3 = masm.jump();
j2.linkTo(masm.label(), &masm);
@ -1085,7 +1085,12 @@ mjit::Compiler::jsop_setelem()
/* guard not a hole */
Address slot(objReg, id->getValue().toInt32() * sizeof(Value));
#if defined JS_32BIT
Jump notHole = masm.branch32(Assembler::Equal, masm.tagOf(slot), ImmType(JSVAL_TYPE_MAGIC));
#elif defined JS_64BIT
masm.loadTypeTag(slot, Registers::ValueReg);
Jump notHole = masm.branchPtr(Assembler::Equal, Registers::ValueReg, ImmType(JSVAL_TYPE_MAGIC));
#endif
stubcc.linkExit(notHole, Uses(3));
stubcc.leave();
@ -1114,7 +1119,12 @@ mjit::Compiler::jsop_setelem()
/* guard not a hole */
BaseIndex slot(objReg, idReg, Assembler::JSVAL_SCALE);
#if defined JS_32BIT
Jump notHole = masm.branch32(Assembler::Equal, masm.tagOf(slot), ImmType(JSVAL_TYPE_MAGIC));
#elif defined JS_64BIT
masm.loadTypeTag(slot, Registers::ValueReg);
Jump notHole = masm.branchPtr(Assembler::Equal, Registers::ValueReg, ImmType(JSVAL_TYPE_MAGIC));
#endif
stubcc.linkExit(notHole, Uses(3));
stubcc.leave();
@ -1167,7 +1177,12 @@ mjit::Compiler::jsop_getelem_dense(FrameEntry *obj, FrameEntry *id, RegisterID o
/* guard not a hole */
Address slot(objReg, id->getValue().toInt32() * sizeof(Value));
#if defined JS_32BIT
Jump notHole = masm.branch32(Assembler::Equal, masm.tagOf(slot), ImmType(JSVAL_TYPE_MAGIC));
#elif defined JS_64BIT
masm.loadTypeTag(slot, Registers::ValueReg);
Jump notHole = masm.branchPtr(Assembler::Equal, Registers::ValueReg, ImmType(JSVAL_TYPE_MAGIC));
#endif
stubcc.linkExit(notHole, Uses(2));
/* Load slot address into regs. */
@ -1182,7 +1197,12 @@ mjit::Compiler::jsop_getelem_dense(FrameEntry *obj, FrameEntry *id, RegisterID o
/* guard not a hole */
BaseIndex slot(objReg, idReg.reg(), Assembler::JSVAL_SCALE);
#if defined JS_32BIT
Jump notHole = masm.branch32(Assembler::Equal, masm.tagOf(slot), ImmType(JSVAL_TYPE_MAGIC));
#elif defined JS_64BIT
masm.loadTypeTag(slot, Registers::ValueReg);
Jump notHole = masm.branchPtr(Assembler::Equal, Registers::ValueReg, ImmType(JSVAL_TYPE_MAGIC));
#endif
stubcc.linkExit(notHole, Uses(2));
masm.loadTypeTag(slot, tmpReg);
@ -1388,14 +1408,23 @@ mjit::Compiler::jsop_stricteq(JSOp op)
}
FrameEntry *known = lhsTest ? lhs : rhs;
JSValueTag mask = known->getKnownTag();
/* This is only true if the other side is |null|. */
RegisterID result = frame.allocReg(Registers::SingleByteRegs);
#if defined JS_CPU_X86 or defined JS_CPU_ARM
JSValueTag mask = known->getKnownTag();
if (frame.shouldAvoidTypeRemat(test))
masm.set32(cond, masm.tagOf(frame.addressOf(test)), Imm32(mask), result);
else
masm.set32(cond, frame.tempRegForType(test), Imm32(mask), result);
#elif defined JS_CPU_X64
RegisterID maskReg = frame.allocReg();
masm.move(Imm64(known->getKnownShiftedTag()), maskReg);
RegisterID r = frame.tempRegForType(test);
masm.setPtr(cond, r, maskReg, result);
frame.freeReg(maskReg);
#endif
frame.popn(2);
frame.pushTypedPayload(JSVAL_TYPE_BOOLEAN, result);
return;
@ -1428,10 +1457,15 @@ mjit::Compiler::jsop_stricteq(JSOp op)
/* Do a dynamic test. */
bool val = lhsTest ? lhs->getValue().toBoolean() : rhs->getValue().toBoolean();
#if defined JS_CPU_X86 or defined JS_CPU_ARM
if (frame.shouldAvoidDataRemat(test))
masm.set32(cond, masm.payloadOf(frame.addressOf(test)), Imm32(val), result);
else
masm.set32(cond, frame.tempRegForData(test), Imm32(val), result);
#elif defined JS_CPU_X64
RegisterID r = frame.tempRegForData(test);
masm.set32(cond, r, Imm32(val), result);
#endif
if (!test->isTypeKnown()) {
Jump done = masm.jump();