Bug 534313 - nanojit: split isQuad() into isI64() + isF64() + is64(). r=dvander,stejohns.

--HG--
extra : convert_revision : f24a70adec4c24dffd3a9c6c3572c5755938291a
This commit is contained in:
Nicholas Nethercote 2010-01-25 08:25:04 +11:00
parent 37c967beaf
commit 43cc6ace57
12 changed files with 65 additions and 55 deletions

View File

@ -319,7 +319,7 @@ namespace nanojit
NanoAssert(arIndex == (uint32_t)n-1);
i = n-1;
}
else if (ins->isQuad()) {
else if (ins->isI64() || ins->isF64()) {
NanoAssert(_entries[i + 1]==ins);
i += 1; // skip high word
}
@ -664,7 +664,7 @@ namespace nanojit
verbose_only( if (d && (_logc->lcbits & LC_Assembly)) {
setOutputForEOL(" <= spill %s",
_thisfrag->lirbuf->names->formatRef(ins)); } )
asm_spill(r, d, pop, ins->isQuad());
asm_spill(r, d, pop, ins->isI64() || ins->isF64());
}
// XXX: This function is error-prone and should be phased out; see bug 513615.
@ -1793,8 +1793,7 @@ namespace nanojit
continue;
}
const char* rname = ins->isQuad() ? fpn(r) : gpn(r);
VMPI_sprintf(s, " %s(%s)", rname, n);
VMPI_sprintf(s, " %s(%s)", gpn(r), n);
s += VMPI_strlen(s);
}
}

View File

@ -151,7 +151,7 @@ namespace nanojit
inline /*static*/ uint32_t AR::nStackSlotsFor(LIns* ins)
{
return ins->isop(LIR_alloc) ? (ins->size()>>2) : (ins->isQuad() ? 2 : 1);
return ins->isop(LIR_alloc) ? (ins->size()>>2) : ((ins->isI64() || ins->isF64()) ? 2 : 1);
}
inline uint32_t AR::stackSlotsNeeded() const

View File

@ -907,8 +907,19 @@ namespace nanojit
iffalse = tmp;
}
if (use_cmov)
return ins3((iftrue->isQuad() || iffalse->isQuad()) ? LIR_qcmov : LIR_cmov, cond, iftrue, iffalse);
if (use_cmov) {
LOpcode op = LIR_cmov;
if (iftrue->isI32() && iffalse->isI32()) {
op = LIR_cmov;
} else if (iftrue->isI64() && iffalse->isI64()) {
op = LIR_qcmov;
} else if (iftrue->isF64() && iffalse->isF64()) {
NanoAssertMsg(0, "LIR_fcmov doesn't exist yet, sorry");
} else {
NanoAssert(0); // type error
}
return ins3(op, cond, iftrue, iffalse);
}
LInsp ncond = ins1(LIR_neg, cond); // cond ? -1 : 0
return ins2(LIR_or,
@ -964,7 +975,8 @@ namespace nanojit
ignore = true;
} else {
d = top - d;
if (ins->oprnd1()->isQuad()) {
LTy ty = ins->oprnd1()->retType();
if (ty == LTy_I64 || ty == LTy_F64) {
// storing 8 bytes
if (stk->get(d) && stk->get(d-1)) {
ignore = true;
@ -975,6 +987,7 @@ namespace nanojit
}
else {
// storing 4 bytes
NanoAssert(ty == LTy_I32);
if (stk->get(d)) {
ignore = true;
} else {

View File

@ -577,9 +577,6 @@ namespace nanojit
bool isF64() const {
return retType() == LTy_F64;
}
bool isQuad() const {
return isI64() || isF64();
}
bool isPtr() const {
#ifdef NANOJIT_64BIT
return isI64();

View File

@ -132,7 +132,6 @@ namespace nanojit {
#ifdef NJ_NO_VARIADIC_MACROS
static void asm_output(const char *f, ...) {}
#define gpn(r) regNames[(r)]
#define fpn(r) regNames[(r)]
#elif defined(NJ_VERBOSE)
// Used for printing native instructions. Like Assembler::outputf(),
// but only outputs if LC_Assembly is set. Also prepends the output
@ -147,11 +146,9 @@ namespace nanojit {
} \
} while (0) /* no semi */
#define gpn(r) regNames[(r)]
#define fpn(r) regNames[(r)]
#else
#define asm_output(...)
#define gpn(r)
#define fpn(r)
#endif /* NJ_VERBOSE */
#endif // __nanojit_Native__

View File

@ -769,13 +769,13 @@ Assembler::asm_regarg(ArgSize sz, LInsp p, Register r)
void
Assembler::asm_stkarg(LInsp arg, int stkd)
{
bool isQuad = arg->isQuad();
bool isF64 = arg->isF64();
Register rr;
if (arg->isUsed() && (rr = arg->getReg(), isKnownReg(rr))) {
// The argument resides somewhere in registers, so we simply need to
// push it onto the stack.
if (!ARM_VFP || !isQuad) {
if (!ARM_VFP || !isF64) {
NanoAssert(IsGpReg(rr));
STR(rr, SP, stkd);
@ -800,7 +800,7 @@ Assembler::asm_stkarg(LInsp arg, int stkd)
// The argument does not reside in registers, so we need to get some
// memory for it and then copy it onto the stack.
int d = findMemFor(arg);
if (!isQuad) {
if (!isF64) {
STR(IP, SP, stkd);
if (arg->isop(LIR_alloc)) {
asm_add_imm(IP, FP, d);
@ -1323,7 +1323,7 @@ Assembler::asm_load64(LInsp ins)
return;
}
NanoAssert(ins->isQuad());
NanoAssert(ins->isF64());
LIns* base = ins->oprnd1();
int offset = ins->disp();
@ -2195,8 +2195,7 @@ Assembler::asm_cmp(LIns *cond)
LInsp lhs = cond->oprnd1();
LInsp rhs = cond->oprnd2();
// Not supported yet.
NanoAssert(!lhs->isQuad() && !rhs->isQuad());
NanoAssert(lhs->isI32() && rhs->isI32());
// ready to issue the compare
if (rhs->isconst()) {
@ -2542,13 +2541,13 @@ Assembler::asm_load32(LInsp ins)
void
Assembler::asm_cmov(LInsp ins)
{
NanoAssert(ins->opcode() == LIR_cmov);
LOpcode op = ins->opcode();
LIns* condval = ins->oprnd1();
LIns* iftrue = ins->oprnd2();
LIns* iffalse = ins->oprnd3();
NanoAssert(condval->isCmp());
NanoAssert(!iftrue->isQuad() && !iffalse->isQuad());
NanoAssert(op == LIR_cmov && iftrue->isI32() && iffalse->isI32());
const Register rr = prepResultReg(ins, GpRegs);

View File

@ -319,7 +319,7 @@ namespace nanojit
}
void Assembler::asm_store64(LOpcode op, LIns *value, int32_t dr, LIns *base) {
NanoAssert(value->isQuad());
NanoAssert(value->isI64() || value->isF64());
switch (op) {
case LIR_stfi:
@ -662,11 +662,14 @@ namespace nanojit
else {
d = findMemFor(i);
if (IsFpReg(r)) {
NanoAssert(i->isQuad());
NanoAssert(i->isI64() || i->isF64());
LFD(r, d, FP);
} else if (i->isQuad()) {
} else if (i->isI64() || i->isF64()) {
NanoAssert(IsGpReg(r));
LD(r, d, FP);
} else {
NanoAssert(i->isI32());
NanoAssert(IsGpReg(r));
LWZ(r, d, FP);
}
}
@ -799,7 +802,7 @@ namespace nanojit
if (p->isop(LIR_alloc)) {
NanoAssert(isS16(d));
ADDI(r, FP, d);
} else if (p->isQuad()) {
} else if (p->isI64() || p->isF64()) {
LD(r, d, FP);
} else {
LWZ(r, d, FP);
@ -1183,13 +1186,14 @@ namespace nanojit
}
void Assembler::asm_cmov(LIns *ins) {
NanoAssert(ins->isop(LIR_cmov) || ins->isop(LIR_qcmov));
LOpcode op = ins->opcode();
LIns* cond = ins->oprnd1();
LIns* iftrue = ins->oprnd2();
LIns* iffalse = ins->oprnd3();
NanoAssert(cond->isCmp());
NanoAssert(iftrue->isQuad() == iffalse->isQuad());
NanoAssert((op == LIR_cmov && iftrue->isI32() && iffalse->isI32()) ||
(op == LIR_qcmov && iftrue->isI64() && iffalse->isI64()));
// fixme: we could handle fpu registers here, too, since we're just branching
Register rr = prepResultReg(ins, GpRegs);

View File

@ -259,6 +259,9 @@ namespace nanojit
static const int NumSavedRegs = 18; // R13-R30
#endif
static inline bool IsGpReg(Register r) {
return r <= R31;
}
static inline bool IsFpReg(Register r) {
return r >= F0;
}

View File

@ -577,9 +577,7 @@ namespace nanojit
LInsp lhs = cond->oprnd1();
LInsp rhs = cond->oprnd2();
NanoAssert((!lhs->isQuad() && !rhs->isQuad()) || (lhs->isQuad() && rhs->isQuad()));
NanoAssert(!lhs->isQuad() && !rhs->isQuad());
NanoAssert(lhs->isI32() && rhs->isI32());
// ready to issue the compare
if (rhs->isconst())
@ -805,7 +803,7 @@ namespace nanojit
LIns* iffalse = ins->oprnd3();
NanoAssert(condval->isCmp());
NanoAssert(op == LIR_qcmov || (!iftrue->isQuad() && !iffalse->isQuad()));
NanoAssert(op == LIR_cmov && iftrue->isI32() && iffalse->isI32());
const Register rr = prepResultReg(ins, GpRegs);

View File

@ -932,7 +932,7 @@ namespace nanojit
void Assembler::asm_regarg(ArgSize sz, LIns *p, Register r) {
if (sz == ARGSIZE_I) {
NanoAssert(!p->isQuad());
NanoAssert(p->isI32());
if (p->isconst()) {
asm_quad(r, int64_t(p->imm32()));
return;
@ -940,7 +940,7 @@ namespace nanojit
// sign extend int32 to int64
MOVSXDR(r, r);
} else if (sz == ARGSIZE_U) {
NanoAssert(!p->isQuad());
NanoAssert(p->isI32());
if (p->isconst()) {
asm_quad(r, uint64_t(uint32_t(p->imm32())));
return;
@ -965,11 +965,11 @@ namespace nanojit
MOVQSPR(stk_off, r); // movq [rsp+d8], r
if (sz == ARGSIZE_I) {
// extend int32 to int64
NanoAssert(!p->isQuad());
NanoAssert(p->isI32());
MOVSXDR(r, r);
} else if (sz == ARGSIZE_U) {
// extend uint32 to uint64
NanoAssert(!p->isQuad());
NanoAssert(p->isI32());
MOVLR(r, r);
}
} else {
@ -1003,7 +1003,7 @@ namespace nanojit
void Assembler::asm_u2f(LIns *ins) {
Register r = prepResultReg(ins, FpRegs);
Register b = findRegFor(ins->oprnd1(), GpRegs);
NanoAssert(!ins->oprnd1()->isQuad());
NanoAssert(ins->oprnd1()->isI32());
// since oprnd1 value is 32bit, its okay to zero-extend the value without worrying about clobbering.
CVTSQ2SD(r, b); // convert int64 to double
XORPS(r); // xorps xmmr,xmmr to break dependency chains
@ -1013,7 +1013,7 @@ namespace nanojit
void Assembler::asm_f2i(LIns *ins) {
LIns *lhs = ins->oprnd1();
NanoAssert(!ins->isQuad() && lhs->isQuad());
NanoAssert(ins->isI32() && lhs->isF64());
Register r = prepareResultReg(ins, GpRegs);
Register b = findRegFor(lhs, FpRegs);
@ -1027,8 +1027,8 @@ namespace nanojit
LIns* iftrue = ins->oprnd2();
LIns* iffalse = ins->oprnd3();
NanoAssert(cond->isCmp());
NanoAssert((ins->isop(LIR_qcmov) && iftrue->isQuad() && iffalse->isQuad()) ||
(ins->isop(LIR_cmov) && !iftrue->isQuad() && !iffalse->isQuad()));
NanoAssert((ins->isop(LIR_cmov) && iftrue->isI32() && iffalse->isI32()) ||
(ins->isop(LIR_qcmov) && iftrue->isI64() && iffalse->isI64()));
// this code assumes that neither LD nor MR nor MRcc set any of the condition flags.
// (This is true on Intel, is it true on all architectures?)
@ -1313,12 +1313,15 @@ namespace nanojit
else {
int d = findMemFor(ins);
if (IsFpReg(r)) {
NanoAssert(ins->isQuad());
NanoAssert(ins->isI64() || ins->isF64());
// load 64bits into XMM. don't know if double or int64, assume double.
MOVSDRM(r, d, FP);
} else if (ins->isQuad()) {
} else if (ins->isI64() || ins->isF64()) {
NanoAssert(IsGpReg(r));
MOVQRM(r, d, FP);
} else {
NanoAssert(ins->isI32());
NanoAssert(IsGpReg(r));
MOVLRM(r, d, FP);
}
}
@ -1428,7 +1431,7 @@ namespace nanojit
}
void Assembler::asm_load32(LIns *ins) {
NanoAssert(!ins->isQuad());
NanoAssert(ins->isI32());
Register r, b;
int32_t d;
regalloc_load(ins, GpRegs, r, d, b);
@ -1461,7 +1464,7 @@ namespace nanojit
}
void Assembler::asm_store64(LOpcode op, LIns *value, int d, LIns *base) {
NanoAssert(value->isQuad());
NanoAssert(value->isI64() || value->isF64());
switch (op) {
case LIR_stqi: {
@ -1498,7 +1501,7 @@ namespace nanojit
// single-byte stores with REX prefix.
const RegisterMask SrcRegs = (op == LIR_stb) ? SingleByteStoreRegs : GpRegs;
NanoAssert(!value->isQuad());
NanoAssert(value->isI32());
Register b = getBaseReg(base, d, BaseRegs);
Register r = findRegFor(value, SrcRegs & ~rmask(b));

View File

@ -831,10 +831,7 @@ namespace nanojit
LInsp lhs = cond->oprnd1();
LInsp rhs = cond->oprnd2();
NanoAssert((!lhs->isQuad() && !rhs->isQuad()) || (lhs->isQuad() && rhs->isQuad()));
// Not supported yet.
NanoAssert(!lhs->isQuad() && !rhs->isQuad());
NanoAssert(lhs->isI32() && rhs->isI32());
// Ready to issue the compare.
if (rhs->isconst()) {
@ -1255,7 +1252,7 @@ namespace nanojit
LIns* iffalse = ins->oprnd3();
NanoAssert(condval->isCmp());
NanoAssert(op == LIR_qcmov || (!iftrue->isQuad() && !iffalse->isQuad()));
NanoAssert(op == LIR_cmov && iftrue->isI32() && iffalse->isI32());
const Register rr = prepResultReg(ins, GpRegs);
@ -1609,7 +1606,7 @@ namespace nanojit
void Assembler::asm_farg(LInsp ins, int32_t& stkd)
{
NanoAssert(ins->isQuad());
NanoAssert(ins->isF64());
Register r = findRegFor(ins, FpRegs);
if (rmask(r) & XmmRegs) {
SSE_STQ(stkd, SP, r);
@ -1895,7 +1892,7 @@ namespace nanojit
NanoAssert(condop >= LIR_feq && condop <= LIR_fge);
LIns* lhs = cond->oprnd1();
LIns* rhs = cond->oprnd2();
NanoAssert(lhs->isQuad() && rhs->isQuad());
NanoAssert(lhs->isF64() && rhs->isF64());
if (config.sse2) {
// First, we convert (a < b) into (b > a), and (a <= b) into (b >= a).

View File

@ -928,7 +928,7 @@ namespace nanojit
#define FCHS() do { count_fpu(); FPUc(0xd9e0); asm_output("fchs"); } while(0)
#define FLD1() do { count_fpu(); FPUc(0xd9e8); asm_output("fld1"); fpu_push(); } while(0)
#define FLDZ() do { count_fpu(); FPUc(0xd9ee); asm_output("fldz"); fpu_push(); } while(0)
#define FFREE(r) do { count_fpu(); FPU(0xddc0, r); asm_output("ffree %s",fpn(r)); } while(0)
#define FFREE(r) do { count_fpu(); FPU(0xddc0, r); asm_output("ffree %s",gpn(r)); } while(0)
#define FST32(p,d,b) do { count_stq(); FPUm(0xd902|(p), d, b); asm_output("fst%s32 %d(%s)",((p)?"p":""),d,gpn(b)); if (p) fpu_pop(); } while(0)
#define FSTQ(p,d,b) do { count_stq(); FPUm(0xdd02|(p), d, b); asm_output("fst%sq %d(%s)",((p)?"p":""),d,gpn(b)); if (p) fpu_pop(); } while(0)
#define FSTPQ(d,b) FSTQ(1,d,b)
@ -957,10 +957,10 @@ namespace nanojit
#define FDIVRdm(m) do { const double* const dm = m; \
count_ldq(); FPUdm(0xdc07, dm); asm_output("fdivr (%p)",(void*)dm); } while(0)
#define FINCSTP() do { count_fpu(); FPUc(0xd9f7); asm_output("fincstp"); } while(0)
#define FSTP(r) do { count_fpu(); FPU(0xddd8, r&7); asm_output("fstp %s",fpn(r)); fpu_pop();} while(0)
#define FSTP(r) do { count_fpu(); FPU(0xddd8, r&7); asm_output("fstp %s",gpn(r)); fpu_pop();} while(0)
#define FCOMP() do { count_fpu(); FPUc(0xD8D9); asm_output("fcomp"); fpu_pop();} while(0)
#define FCOMPP() do { count_fpu(); FPUc(0xDED9); asm_output("fcompp"); fpu_pop();fpu_pop();} while(0)
#define FLDr(r) do { count_ldq(); FPU(0xd9c0,r); asm_output("fld %s",fpn(r)); fpu_push(); } while(0)
#define FLDr(r) do { count_ldq(); FPU(0xd9c0,r); asm_output("fld %s",gpn(r)); fpu_push(); } while(0)
#define EMMS() do { count_fpu(); FPUc(0x0f77); asm_output("emms"); } while (0)
// standard direct call