Added LIR_ov and full cmov support to x64 backend (bug 514496, r=rreitmai).

This commit is contained in:
David Anderson 2009-09-09 11:19:12 -07:00
parent 22a1120a3e
commit e0f904df58
3 changed files with 49 additions and 14 deletions

View File

@ -156,10 +156,11 @@ OPDEF(qlo, 50, 1, Op1) // get the low 32 bits of a 64-bit value
OPDEF(qhi, 51, 1, Op1) // get the high 32 bits of a 64-bit value
OPDEF(unused52, 52,-1, None)
OPDEF(unused53, 53,-1, None)
OPDEF(ov, 53, 1, Op1) // test for overflow; value must have just been computed
OPDEF(unused53, 54,-1, None)
// This must be right before LIR_eq, so (op&~LIR64 - LIR_ov) can be indexed
// into a convenient table.
OPDEF(ov, 54, 1, Op1) // test for overflow; value must have just been computed
// Integer (32 bit) relational operators. (op ^ 1) is the op which flips the
// left and right sides of the comparison, so (lt ^ 1) == gt, or the operator

View File

@ -670,13 +670,18 @@ namespace nanojit
// (This is true on Intel, is it true on all architectures?)
const Register rr = prepResultReg(ins, GpRegs);
const Register rf = findRegFor(iffalse, GpRegs & ~rmask(rr));
X64Opcode xop;
switch (cond->opcode()) {
default: TODO(asm_cmov);
case LIR_qeq:
xop = X64_cmovqne;
break;
}
int condop = (cond->opcode() & ~LIR64) - LIR_ov;
static const X64Opcode cmov[] = {
X64_cmovno, X64_cmovne, // ov, eq
X64_cmovge, X64_cmovle, X64_cmovg, X64_cmovl, // lt, gt, le, ge
X64_cmovae, X64_cmovbe, X64_cmova, X64_cmovb // ult, ugt, ule, uge
};
NanoAssert(condop >= 0 && condop < int(sizeof(cmov) / sizeof(cmov[0])));
NanoStaticAssert(sizeof(cmov) / sizeof(cmov[0]) == size_t(LIR_uge - LIR_ov + 1));
uint64_t xop = cmov[condop];
if (ins->opcode() == LIR_qcmov)
xop |= (uint64_t)X64_cmov_64;
emitrr(xop, rr, rf);
/*const Register rt =*/ findSpecificRegFor(iftrue, rr);
asm_cmp(cond);
@ -690,22 +695,26 @@ namespace nanojit
// we must ensure there's room for the instr before calculating
// the offset. and the offset, determines the opcode (8bit or 32bit)
underrunProtect(8);
NanoAssert((condop & ~LIR64) >= LIR_ov);
NanoAssert((condop & ~LIR64) <= LIR_uge);
if (target && isS8(target - _nIns)) {
static const X64Opcode j8[] = {
X64_je8, // eq
X64_jo8, X64_je8, // ov, eq
X64_jl8, X64_jg8, X64_jle8, X64_jge8, // lt, gt, le, ge
X64_jb8, X64_ja8, X64_jbe8, X64_jae8 // ult, ugt, ule, uge
};
uint64_t xop = j8[(condop & ~LIR64) - LIR_eq];
NanoStaticAssert(sizeof(j8) / sizeof(j8[0]) == LIR_uge - LIR_ov + 1);
uint64_t xop = j8[(condop & ~LIR64) - LIR_ov];
xop ^= onFalse ? (uint64_t)X64_jneg8 : 0;
emit8(xop, target - _nIns);
} else {
static const X64Opcode j32[] = {
X64_je, // eq
X64_jo, X64_je, // ov, eq
X64_jl, X64_jg, X64_jle, X64_jge, // lt, gt, le, ge
X64_jb, X64_ja, X64_jbe, X64_jae // ult, ugt, ule, uge
};
uint64_t xop = j32[(condop & ~LIR64) - LIR_eq];
NanoStaticAssert(sizeof(j32) / sizeof(j32[0]) == LIR_uge - LIR_ov + 1);
uint64_t xop = j32[(condop & ~LIR64) - LIR_ov];
xop ^= onFalse ? (uint64_t)X64_jneg : 0;
emit32(xop, target ? target - _nIns : 0);
}
@ -715,6 +724,9 @@ namespace nanojit
}
void Assembler::asm_cmp(LIns *cond) {
// LIR_ov recycles the flags set by arithmetic ops
if (cond->opcode() == LIR_ov)
return;
LIns *b = cond->oprnd2();
if (isImm32(b)) {
asm_cmp_imm(cond);

View File

@ -162,7 +162,27 @@ namespace nanojit
X64_andrr = 0xC023400000000003LL, // 32bit and r &= b
X64_call = 0x00000000E8000005LL, // near call
X64_callrax = 0xD0FF000000000002LL, // indirect call to addr in rax (no REX)
X64_cmovqno = 0xC0410F4800000004LL, // 64bit conditional mov if (no overflow) r = b
X64_cmovqb = 0xC0420F4800000004LL, // 64bit conditional mov if (uint <) r = b
X64_cmovqae = 0xC0430F4800000004LL, // 64bit conditional mov if (uint >=) r = b
X64_cmovqne = 0xC0450F4800000004LL, // 64bit conditional mov if (c) r = b
X64_cmovqbe = 0xC0460F4800000004LL, // 64bit conditional mov if (uint <=) r = b
X64_cmovqa = 0xC0470F4800000004LL, // 64bit conditional mov if (uint >) r = b
X64_cmovql = 0xC04C0F4800000004LL, // 64bit conditional mov if (int <) r = b
X64_cmovqge = 0xC04D0F4800000004LL, // 64bit conditional mov if (int >=) r = b
X64_cmovqle = 0xC04E0F4800000004LL, // 64bit conditional mov if (int <=) r = b
X64_cmovqg = 0xC04F0F4800000004LL, // 64bit conditional mov if (int >) r = b
X64_cmovno = 0xC0410F4000000004LL, // 32bit conditional mov if (no overflow) r = b
X64_cmovb = 0xC0420F4000000004LL, // 32bit conditional mov if (uint <) r = b
X64_cmovae = 0xC0430F4000000004LL, // 32bit conditional mov if (uint >=) r = b
X64_cmovne = 0xC0450F4000000004LL, // 32bit conditional mov if (c) r = b
X64_cmovbe = 0xC0460F4000000004LL, // 32bit conditional mov if (uint <=) r = b
X64_cmova = 0xC0470F4000000004LL, // 32bit conditional mov if (uint >) r = b
X64_cmovl = 0xC04C0F4000000004LL, // 32bit conditional mov if (int <) r = b
X64_cmovge = 0xC04D0F4000000004LL, // 32bit conditional mov if (int >=) r = b
X64_cmovle = 0xC04E0F4000000004LL, // 32bit conditional mov if (int <=) r = b
X64_cmovg = 0xC04F0F4000000004LL, // 32bit conditional mov if (int >) r = b
X64_cmov_64 = 0x0000000800000000LL, // OR with 32-bit cmov to promote to 64-bit
X64_cmplr = 0xC03B400000000003LL, // 32bit compare r,b
X64_cmpqr = 0xC03B480000000003LL, // 64bit compare r,b
X64_cmplri = 0xF881400000000003LL, // 32bit compare r,imm32
@ -179,6 +199,7 @@ namespace nanojit
X64_imul8 = 0x00C06B4000000004LL, // 32bit signed mul r = b * imm8
X64_jmp = 0x00000000E9000005LL, // jump near rel32
X64_jmp8 = 0x00EB000000000002LL, // jump near rel8
X64_jo = 0x00000000800F0006LL, // jump near if overflow
X64_jb = 0x00000000820F0006LL, // jump near if below (uint <)
X64_jae = 0x00000000830F0006LL, // jump near if above or equal (uint >=)
X64_ja = 0x00000000870F0006LL, // jump near if above (uint >)
@ -192,6 +213,7 @@ namespace nanojit
X64_jp = 0x000000008A0F0006LL, // jump near if parity (PF == 1)
X64_jnp = 0x000000008B0F0006LL, // jump near if not parity (PF == 0)
X64_jneg = 0x0000000001000000LL, // xor with this mask to negate the condition
X64_jo8 = 0x0070000000000002LL, // jump near if overflow
X64_jb8 = 0x0072000000000002LL, // jump near if below (uint <)
X64_jae8 = 0x0073000000000002LL, // jump near if above or equal (uint >=)
X64_ja8 = 0x0077000000000002LL, // jump near if above (uint >)