mirror of
https://gitlab.winehq.org/wine/wine-gecko.git
synced 2024-09-13 09:24:08 -07:00
Bug 560160 - nanojit: rename LOpcode-related names. r=edwsmith.
--HG-- extra : convert_revision : 234af92683dad306d03911975b0e9afc57936cf5
This commit is contained in:
parent
aa481cb81c
commit
0ca74ddded
@ -85,16 +85,16 @@ CL___( LALLOC, 1) // 2% LIR_alloc
|
||||
|
||||
CL___( LIMM_I, 4) // 6% LIR_imm
|
||||
CL_64( LIMM_Q, 3) // 9% LIR_quad
|
||||
CL___( LIMM_F, 3) // 12% LIR_float
|
||||
CL___( LIMM_D, 3) // 12% LIR_float
|
||||
|
||||
CL___( LOP_I_I, 2) // 14% LIR_neg, LIR_not
|
||||
CL_64( LOP_Q_Q, 0) // 14% (none)
|
||||
CL___( LOP_F_F, 2) // 16% LIR_fneg
|
||||
CL___( LOP_D_D, 2) // 16% LIR_fneg
|
||||
|
||||
CL___( LOP_I_II, 6) // 32% LIR_add, LIR_and, LIR_eq, etc.
|
||||
CL_64( LOP_Q_QQ, 7) // 39% LIR_qiadd, LIR_qiand, LIR_qeq, etc.
|
||||
CL_64( LOP_Q_QI, 2) // 41% LIR_qilsh, LIR_qirsh, LIR_qursh
|
||||
CL___( LOP_F_FF, 0) // 51% LIR_fadd, etc.
|
||||
CL___( LOP_D_DD, 0) // 51% LIR_fadd, etc.
|
||||
|
||||
// cmov has a low weight because is also used with LIR_div/LIR_mod.
|
||||
CL___( LOP_I_BII, 1) // 52% LIR_cmov
|
||||
@ -102,29 +102,29 @@ CL_64( LOP_Q_BQQ, 2) // 54% LIR_qcmov
|
||||
|
||||
CL___( LOP_B_II, 3) // 57% LIR_eq, LIR_lt, etc
|
||||
CL_64( LOP_B_QQ, 3) // 60% LIR_qeq, LIR_qlt, etc
|
||||
CL___( LOP_B_FF, 3) // 63% LIR_feq, LIR_flt, etc
|
||||
CL___( LOP_B_DD, 3) // 63% LIR_feq, LIR_flt, etc
|
||||
|
||||
CL_64( LOP_Q_I, 2) // 65% LIR_i2q, LIR_u2q
|
||||
CL___( LOP_F_I, 2) // 67% LIR_i2f, LIR_u2f
|
||||
CL___( LOP_D_I, 2) // 67% LIR_i2f, LIR_u2f
|
||||
CL_64( LOP_I_Q, 1) // 68% LIR_q2i
|
||||
CL___( LOP_I_F, 1) // 69% LIR_qlo, LIR_qhi, LIR_f2i
|
||||
CL___( LOP_F_II, 1) // 70% LIR_qjoin
|
||||
CL___( LOP_I_D, 1) // 69% LIR_qlo, LIR_qhi, LIR_f2i
|
||||
CL___( LOP_D_II, 1) // 70% LIR_qjoin
|
||||
|
||||
CL___( LLD_I, 3) // 73% LIR_ld, LIR_ldc, LIR_ld*b, LIR_ld*s
|
||||
CL_64( LLD_Q, 2) // 75% LIR_ldq, LIR_ldqc
|
||||
CL___( LLD_F, 3) // 78% LIR_ldf, LIR_ldfc
|
||||
CL___( LLD_D, 3) // 78% LIR_ldf, LIR_ldfc
|
||||
|
||||
CL___( LST_I, 5) // 83% LIR_sti
|
||||
CL_64( LST_Q, 4) // 87% LIR_stqi
|
||||
CL___( LST_F, 5) // 92% LIR_stfi
|
||||
CL___( LST_D, 5) // 92% LIR_stfi
|
||||
|
||||
CL___( LCALL_I_I1, 1) // 93% LIR_icall
|
||||
CL___( LCALL_I_I6, 1) // 94% LIR_icall
|
||||
CL_64( LCALL_Q_Q2, 1) // 95% LIR_qcall
|
||||
CL_64( LCALL_Q_Q7, 1) // 96% LIR_qcall
|
||||
CL___( LCALL_F_F3, 1) // 97% LIR_fcall
|
||||
CL___( LCALL_F_F8, 1) // 98% LIR_fcall
|
||||
CL_64( LCALL_V_IQF, 1) // 99% LIR_icall or LIR_qcall
|
||||
CL___( LCALL_D_D3, 1) // 97% LIR_fcall
|
||||
CL___( LCALL_D_D8, 1) // 98% LIR_fcall
|
||||
CL_64( LCALL_V_IQD, 1) // 99% LIR_icall or LIR_qcall
|
||||
|
||||
CL___( LLABEL, 1) //100% LIR_label
|
||||
|
||||
|
@ -363,7 +363,7 @@ double sinFn(double d) {
|
||||
|
||||
Function functions[] = {
|
||||
FN(puts, argMask(ARGTYPE_P, 1, 1) | retMask(ARGTYPE_I)),
|
||||
FN(sin, argMask(ARGTYPE_F, 1, 1) | retMask(ARGTYPE_F)),
|
||||
FN(sin, argMask(ARGTYPE_D, 1, 1) | retMask(ARGTYPE_D)),
|
||||
FN(malloc, argMask(ARGTYPE_P, 1, 1) | retMask(ARGTYPE_P)),
|
||||
FN(free, argMask(ARGTYPE_P, 1, 1) | retMask(ARGTYPE_V))
|
||||
};
|
||||
@ -693,9 +693,9 @@ FragmentAssembler::assemble_call(const string &op)
|
||||
size_t argc = mTokens.size();
|
||||
for (size_t i = 0; i < argc; ++i) {
|
||||
args[i] = ref(mTokens[mTokens.size() - (i+1)]);
|
||||
if (args[i]->isF64()) ty = ARGTYPE_F;
|
||||
if (args[i]->isD()) ty = ARGTYPE_D;
|
||||
#ifdef NANOJIT_64BIT
|
||||
else if (args[i]->isI64()) ty = ARGTYPE_Q;
|
||||
else if (args[i]->isQ()) ty = ARGTYPE_Q;
|
||||
#endif
|
||||
else ty = ARGTYPE_I;
|
||||
// Nb: i+1 because argMask() uses 1-based arg counting.
|
||||
@ -705,7 +705,7 @@ FragmentAssembler::assemble_call(const string &op)
|
||||
// Select return type from opcode.
|
||||
ty = 0;
|
||||
if (mOpcode == LIR_icall) ty = ARGTYPE_LO;
|
||||
else if (mOpcode == LIR_fcall) ty = ARGTYPE_F;
|
||||
else if (mOpcode == LIR_fcall) ty = ARGTYPE_D;
|
||||
#ifdef NANOJIT_64BIT
|
||||
else if (mOpcode == LIR_qcall) ty = ARGTYPE_Q;
|
||||
#endif
|
||||
@ -1024,19 +1024,19 @@ FragmentAssembler::assembleFragment(LirTokenStream &in, bool implicitBegin, cons
|
||||
|
||||
case LIR_int:
|
||||
need(1);
|
||||
ins = mLir->insImm(imm(mTokens[0]));
|
||||
ins = mLir->insImmI(imm(mTokens[0]));
|
||||
break;
|
||||
|
||||
#ifdef NANOJIT_64BIT
|
||||
case LIR_quad:
|
||||
need(1);
|
||||
ins = mLir->insImmq(lquad(mTokens[0]));
|
||||
ins = mLir->insImmQ(lquad(mTokens[0]));
|
||||
break;
|
||||
#endif
|
||||
|
||||
case LIR_float:
|
||||
need(1);
|
||||
ins = mLir->insImmf(immf(mTokens[0]));
|
||||
ins = mLir->insImmD(immf(mTokens[0]));
|
||||
break;
|
||||
|
||||
#if NJ_EXPANDED_LOADSTORE_SUPPORTED
|
||||
@ -1265,25 +1265,25 @@ const CallInfo ci_Q_Q7 = CI(f_Q_Q7, argMask(ARGTYPE_Q, 1, 7) |
|
||||
retMask(ARGTYPE_Q));
|
||||
#endif
|
||||
|
||||
const CallInfo ci_F_F3 = CI(f_F_F3, argMask(ARGTYPE_F, 1, 3) |
|
||||
argMask(ARGTYPE_F, 2, 3) |
|
||||
argMask(ARGTYPE_F, 3, 3) |
|
||||
retMask(ARGTYPE_F));
|
||||
const CallInfo ci_F_F3 = CI(f_F_F3, argMask(ARGTYPE_D, 1, 3) |
|
||||
argMask(ARGTYPE_D, 2, 3) |
|
||||
argMask(ARGTYPE_D, 3, 3) |
|
||||
retMask(ARGTYPE_D));
|
||||
|
||||
const CallInfo ci_F_F8 = CI(f_F_F8, argMask(ARGTYPE_F, 1, 8) |
|
||||
argMask(ARGTYPE_F, 2, 8) |
|
||||
argMask(ARGTYPE_F, 3, 8) |
|
||||
argMask(ARGTYPE_F, 4, 8) |
|
||||
argMask(ARGTYPE_F, 5, 8) |
|
||||
argMask(ARGTYPE_F, 6, 8) |
|
||||
argMask(ARGTYPE_F, 7, 8) |
|
||||
argMask(ARGTYPE_F, 8, 8) |
|
||||
retMask(ARGTYPE_F));
|
||||
const CallInfo ci_F_F8 = CI(f_F_F8, argMask(ARGTYPE_D, 1, 8) |
|
||||
argMask(ARGTYPE_D, 2, 8) |
|
||||
argMask(ARGTYPE_D, 3, 8) |
|
||||
argMask(ARGTYPE_D, 4, 8) |
|
||||
argMask(ARGTYPE_D, 5, 8) |
|
||||
argMask(ARGTYPE_D, 6, 8) |
|
||||
argMask(ARGTYPE_D, 7, 8) |
|
||||
argMask(ARGTYPE_D, 8, 8) |
|
||||
retMask(ARGTYPE_D));
|
||||
|
||||
#ifdef NANOJIT_64BIT
|
||||
const CallInfo ci_V_IQF = CI(f_V_IQF, argMask(ARGTYPE_I, 1, 3) |
|
||||
argMask(ARGTYPE_Q, 2, 3) |
|
||||
argMask(ARGTYPE_F, 3, 3) |
|
||||
argMask(ARGTYPE_D, 3, 3) |
|
||||
retMask(ARGTYPE_V));
|
||||
#endif
|
||||
|
||||
@ -1336,8 +1336,8 @@ FragmentAssembler::assembleRandomFragment(int nIns)
|
||||
|
||||
// Nb: there are no Q_Q_ops.
|
||||
|
||||
vector<LOpcode> F_F_ops;
|
||||
F_F_ops.push_back(LIR_fneg);
|
||||
vector<LOpcode> D_D_ops;
|
||||
D_D_ops.push_back(LIR_fneg);
|
||||
|
||||
vector<LOpcode> I_II_ops;
|
||||
I_II_ops.push_back(LIR_add);
|
||||
@ -1367,11 +1367,11 @@ FragmentAssembler::assembleRandomFragment(int nIns)
|
||||
Q_QI_ops.push_back(LIR_qursh);
|
||||
#endif
|
||||
|
||||
vector<LOpcode> F_FF_ops;
|
||||
F_FF_ops.push_back(LIR_fadd);
|
||||
F_FF_ops.push_back(LIR_fsub);
|
||||
F_FF_ops.push_back(LIR_fmul);
|
||||
F_FF_ops.push_back(LIR_fdiv);
|
||||
vector<LOpcode> D_DD_ops;
|
||||
D_DD_ops.push_back(LIR_fadd);
|
||||
D_DD_ops.push_back(LIR_fsub);
|
||||
D_DD_ops.push_back(LIR_fmul);
|
||||
D_DD_ops.push_back(LIR_fdiv);
|
||||
|
||||
vector<LOpcode> I_BII_ops;
|
||||
I_BII_ops.push_back(LIR_cmov);
|
||||
@ -1405,12 +1405,12 @@ FragmentAssembler::assembleRandomFragment(int nIns)
|
||||
B_QQ_ops.push_back(LIR_quge);
|
||||
#endif
|
||||
|
||||
vector<LOpcode> B_FF_ops;
|
||||
B_FF_ops.push_back(LIR_feq);
|
||||
B_FF_ops.push_back(LIR_flt);
|
||||
B_FF_ops.push_back(LIR_fgt);
|
||||
B_FF_ops.push_back(LIR_fle);
|
||||
B_FF_ops.push_back(LIR_fge);
|
||||
vector<LOpcode> B_DD_ops;
|
||||
B_DD_ops.push_back(LIR_feq);
|
||||
B_DD_ops.push_back(LIR_flt);
|
||||
B_DD_ops.push_back(LIR_fgt);
|
||||
B_DD_ops.push_back(LIR_fle);
|
||||
B_DD_ops.push_back(LIR_fge);
|
||||
|
||||
#ifdef NANOJIT_64BIT
|
||||
vector<LOpcode> Q_I_ops;
|
||||
@ -1421,9 +1421,9 @@ FragmentAssembler::assembleRandomFragment(int nIns)
|
||||
I_Q_ops.push_back(LIR_q2i);
|
||||
#endif
|
||||
|
||||
vector<LOpcode> F_I_ops;
|
||||
F_I_ops.push_back(LIR_i2f);
|
||||
F_I_ops.push_back(LIR_u2f);
|
||||
vector<LOpcode> D_I_ops;
|
||||
D_I_ops.push_back(LIR_i2f);
|
||||
D_I_ops.push_back(LIR_u2f);
|
||||
|
||||
vector<LOpcode> I_F_ops;
|
||||
#if NJ_SOFTFLOAT_SUPPORTED
|
||||
@ -1432,9 +1432,9 @@ FragmentAssembler::assembleRandomFragment(int nIns)
|
||||
#endif
|
||||
I_F_ops.push_back(LIR_f2i);
|
||||
|
||||
vector<LOpcode> F_II_ops;
|
||||
vector<LOpcode> D_II_ops;
|
||||
#if NJ_SOFTFLOAT_SUPPORTED
|
||||
F_II_ops.push_back(LIR_qjoin);
|
||||
D_II_ops.push_back(LIR_qjoin);
|
||||
#endif
|
||||
|
||||
vector<LOpcode> I_loads;
|
||||
@ -1453,11 +1453,11 @@ FragmentAssembler::assembleRandomFragment(int nIns)
|
||||
Q_loads.push_back(LIR_ldq);
|
||||
#endif
|
||||
|
||||
vector<LOpcode> F_loads;
|
||||
F_loads.push_back(LIR_ldf);
|
||||
vector<LOpcode> D_loads;
|
||||
D_loads.push_back(LIR_ldf);
|
||||
#if NJ_EXPANDED_LOADSTORE_SUPPORTED
|
||||
// this loads a 32-bit float and expands it to 64-bit float
|
||||
F_loads.push_back(LIR_ld32f);
|
||||
D_loads.push_back(LIR_ld32f);
|
||||
#endif
|
||||
|
||||
enum LInsClass {
|
||||
@ -1552,15 +1552,15 @@ FragmentAssembler::assembleRandomFragment(int nIns)
|
||||
// and 1 and small multiples of 4 which are common due to memory
|
||||
// addressing. This puts some realistic stress on CseFilter.
|
||||
case LIMM_I: {
|
||||
int32_t imm32 = 0; // shut gcc up
|
||||
int32_t immI = 0; // shut gcc up
|
||||
switch (rnd(5)) {
|
||||
case 0: imm32 = 0; break;
|
||||
case 1: imm32 = 1; break;
|
||||
case 2: imm32 = 4 * (rnd(256) + 1); break; // 4, 8, ..., 1024
|
||||
case 3: imm32 = rnd(19999) - 9999; break; // -9999..9999
|
||||
case 4: imm32 = rndI32(); break; // -RAND_MAX..RAND_MAX
|
||||
case 0: immI = 0; break;
|
||||
case 1: immI = 1; break;
|
||||
case 2: immI = 4 * (rnd(256) + 1); break; // 4, 8, ..., 1024
|
||||
case 3: immI = rnd(19999) - 9999; break; // -9999..9999
|
||||
case 4: immI = rndI32(); break; // -RAND_MAX..RAND_MAX
|
||||
}
|
||||
ins = mLir->insImm(imm32);
|
||||
ins = mLir->insImmI(immI);
|
||||
addOrReplace(Is, ins);
|
||||
n++;
|
||||
break;
|
||||
@ -1576,17 +1576,17 @@ FragmentAssembler::assembleRandomFragment(int nIns)
|
||||
case 3: imm64 = rnd(19999) - 9999; break; // -9999..9999
|
||||
case 4: imm64 = uint64_t(rndU32()) << 32 | rndU32(); break; // possibly big!
|
||||
}
|
||||
ins = mLir->insImmq(imm64);
|
||||
ins = mLir->insImmQ(imm64);
|
||||
addOrReplace(Qs, ins);
|
||||
n++;
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
|
||||
case LIMM_F: {
|
||||
case LIMM_D: {
|
||||
// We don't explicitly generate infinities and NaNs here, but they
|
||||
// end up occurring due to ExprFilter evaluating expressions like
|
||||
// fdiv(1,0) and fdiv(Infinity,Infinity).
|
||||
// divd(1,0) and divd(Infinity,Infinity).
|
||||
double imm64f = 0;
|
||||
switch (rnd(5)) {
|
||||
case 0: imm64f = 0.0; break;
|
||||
@ -1602,7 +1602,7 @@ FragmentAssembler::assembleRandomFragment(int nIns)
|
||||
imm64f = u.d;
|
||||
break;
|
||||
}
|
||||
ins = mLir->insImmf(imm64f);
|
||||
ins = mLir->insImmD(imm64f);
|
||||
addOrReplace(Fs, ins);
|
||||
n++;
|
||||
break;
|
||||
@ -1618,9 +1618,9 @@ FragmentAssembler::assembleRandomFragment(int nIns)
|
||||
|
||||
// case LOP_Q_Q: no instruction in this category
|
||||
|
||||
case LOP_F_F:
|
||||
case LOP_D_D:
|
||||
if (!Fs.empty()) {
|
||||
ins = mLir->ins1(rndPick(F_F_ops), rndPick(Fs));
|
||||
ins = mLir->ins1(rndPick(D_D_ops), rndPick(Fs));
|
||||
addOrReplace(Fs, ins);
|
||||
n++;
|
||||
}
|
||||
@ -1636,15 +1636,15 @@ FragmentAssembler::assembleRandomFragment(int nIns)
|
||||
// XXX: ExprFilter can't fold a div/mod with constant
|
||||
// args, due to the horrible semantics of LIR_mod. So we
|
||||
// just don't generate anything if we hit that case.
|
||||
if (!lhs->isconst() || !rhs->isconst()) {
|
||||
if (!lhs->isImmI() || !rhs->isImmI()) {
|
||||
// If the divisor is positive, no problems. If it's zero, we get an
|
||||
// exception. If it's -1 and the dividend is -2147483648 (-2^31) we get
|
||||
// an exception (and this has been encountered in practice). So we only
|
||||
// allow positive divisors, ie. compute: lhs / (rhs > 0 ? rhs : -k),
|
||||
// where k is a random number in the range 2..100 (this ensures we have
|
||||
// some negative divisors).
|
||||
LIns* gt0 = mLir->ins2i(LIR_gt, rhs, 0);
|
||||
LIns* rhs2 = mLir->ins3(LIR_cmov, gt0, rhs, mLir->insImm(-((int32_t)rnd(99)) - 2));
|
||||
LIns* gt0 = mLir->ins2ImmI(LIR_gt, rhs, 0);
|
||||
LIns* rhs2 = mLir->ins3(LIR_cmov, gt0, rhs, mLir->insImmI(-((int32_t)rnd(99)) - 2));
|
||||
LIns* div = mLir->ins2(LIR_div, lhs, rhs2);
|
||||
if (op == LIR_div) {
|
||||
ins = div;
|
||||
@ -1688,9 +1688,9 @@ FragmentAssembler::assembleRandomFragment(int nIns)
|
||||
break;
|
||||
#endif
|
||||
|
||||
case LOP_F_FF:
|
||||
case LOP_D_DD:
|
||||
if (!Fs.empty()) {
|
||||
ins = mLir->ins2(rndPick(F_FF_ops), rndPick(Fs), rndPick(Fs));
|
||||
ins = mLir->ins2(rndPick(D_DD_ops), rndPick(Fs), rndPick(Fs));
|
||||
addOrReplace(Fs, ins);
|
||||
n++;
|
||||
}
|
||||
@ -1732,9 +1732,9 @@ FragmentAssembler::assembleRandomFragment(int nIns)
|
||||
break;
|
||||
#endif
|
||||
|
||||
case LOP_B_FF:
|
||||
case LOP_B_DD:
|
||||
if (!Fs.empty()) {
|
||||
ins = mLir->ins2(rndPick(B_FF_ops), rndPick(Fs), rndPick(Fs));
|
||||
ins = mLir->ins2(rndPick(B_DD_ops), rndPick(Fs), rndPick(Fs));
|
||||
// XXX: we don't push the result, because most (all?) of the
|
||||
// backends currently can't handle cmovs/qcmovs that take
|
||||
// float comparisons for the test (see bug 520944). This means
|
||||
@ -1754,9 +1754,9 @@ FragmentAssembler::assembleRandomFragment(int nIns)
|
||||
break;
|
||||
#endif
|
||||
|
||||
case LOP_F_I:
|
||||
case LOP_D_I:
|
||||
if (!Is.empty()) {
|
||||
ins = mLir->ins1(rndPick(F_I_ops), rndPick(Is));
|
||||
ins = mLir->ins1(rndPick(D_I_ops), rndPick(Is));
|
||||
addOrReplace(Fs, ins);
|
||||
n++;
|
||||
}
|
||||
@ -1772,7 +1772,7 @@ FragmentAssembler::assembleRandomFragment(int nIns)
|
||||
break;
|
||||
#endif
|
||||
|
||||
case LOP_I_F:
|
||||
case LOP_I_D:
|
||||
// XXX: NativeX64 doesn't implement qhi yet (and it may not need to).
|
||||
#if !defined NANOJIT_X64
|
||||
if (!Fs.empty()) {
|
||||
@ -1783,9 +1783,9 @@ FragmentAssembler::assembleRandomFragment(int nIns)
|
||||
#endif
|
||||
break;
|
||||
|
||||
case LOP_F_II:
|
||||
if (!Is.empty() && !F_II_ops.empty()) {
|
||||
ins = mLir->ins2(rndPick(F_II_ops), rndPick(Is), rndPick(Is));
|
||||
case LOP_D_II:
|
||||
if (!Is.empty() && !D_II_ops.empty()) {
|
||||
ins = mLir->ins2(rndPick(D_II_ops), rndPick(Is), rndPick(Is));
|
||||
addOrReplace(Fs, ins);
|
||||
n++;
|
||||
}
|
||||
@ -1813,10 +1813,10 @@ FragmentAssembler::assembleRandomFragment(int nIns)
|
||||
break;
|
||||
#endif
|
||||
|
||||
case LLD_F:
|
||||
case LLD_D:
|
||||
if (!M8ps.empty()) {
|
||||
LIns* base = rndPick(M8ps);
|
||||
ins = mLir->insLoad(rndPick(F_loads), base, rndOffset64(base->size()), ACC_LOAD_ANY);
|
||||
ins = mLir->insLoad(rndPick(D_loads), base, rndOffset64(base->size()), ACC_LOAD_ANY);
|
||||
addOrReplace(Fs, ins);
|
||||
n++;
|
||||
}
|
||||
@ -1826,7 +1826,7 @@ FragmentAssembler::assembleRandomFragment(int nIns)
|
||||
vector<LIns*> Ms = rnd(2) ? M4s : M8ps;
|
||||
if (!Ms.empty() && !Is.empty()) {
|
||||
LIns* base = rndPick(Ms);
|
||||
mLir->insStorei(rndPick(Is), base, rndOffset32(base->size()), ACC_STORE_ANY);
|
||||
mLir->insStore(rndPick(Is), base, rndOffset32(base->size()), ACC_STORE_ANY);
|
||||
n++;
|
||||
}
|
||||
break;
|
||||
@ -1836,16 +1836,16 @@ FragmentAssembler::assembleRandomFragment(int nIns)
|
||||
case LST_Q:
|
||||
if (!M8ps.empty() && !Qs.empty()) {
|
||||
LIns* base = rndPick(M8ps);
|
||||
mLir->insStorei(rndPick(Qs), base, rndOffset64(base->size()), ACC_STORE_ANY);
|
||||
mLir->insStore(rndPick(Qs), base, rndOffset64(base->size()), ACC_STORE_ANY);
|
||||
n++;
|
||||
}
|
||||
break;
|
||||
#endif
|
||||
|
||||
case LST_F:
|
||||
case LST_D:
|
||||
if (!M8ps.empty() && !Fs.empty()) {
|
||||
LIns* base = rndPick(M8ps);
|
||||
mLir->insStorei(rndPick(Fs), base, rndOffset64(base->size()), ACC_STORE_ANY);
|
||||
mLir->insStore(rndPick(Fs), base, rndOffset64(base->size()), ACC_STORE_ANY);
|
||||
n++;
|
||||
}
|
||||
break;
|
||||
@ -1890,7 +1890,7 @@ FragmentAssembler::assembleRandomFragment(int nIns)
|
||||
break;
|
||||
#endif
|
||||
|
||||
case LCALL_F_F3:
|
||||
case LCALL_D_D3:
|
||||
if (!Fs.empty()) {
|
||||
LIns* args[3] = { rndPick(Fs), rndPick(Fs), rndPick(Fs) };
|
||||
ins = mLir->insCall(&ci_F_F3, args);
|
||||
@ -1899,7 +1899,7 @@ FragmentAssembler::assembleRandomFragment(int nIns)
|
||||
}
|
||||
break;
|
||||
|
||||
case LCALL_F_F8:
|
||||
case LCALL_D_D8:
|
||||
if (!Fs.empty()) {
|
||||
LIns* args[8] = { rndPick(Fs), rndPick(Fs), rndPick(Fs), rndPick(Fs),
|
||||
rndPick(Fs), rndPick(Fs), rndPick(Fs), rndPick(Fs) };
|
||||
@ -1910,7 +1910,7 @@ FragmentAssembler::assembleRandomFragment(int nIns)
|
||||
break;
|
||||
|
||||
#ifdef NANOJIT_64BIT
|
||||
case LCALL_V_IQF:
|
||||
case LCALL_V_IQD:
|
||||
if (!Is.empty() && !Qs.empty() && !Fs.empty()) {
|
||||
// Nb: args[] holds the args in reverse order... sigh.
|
||||
LIns* args[3] = { rndPick(Fs), rndPick(Qs), rndPick(Is) };
|
||||
@ -1940,7 +1940,7 @@ FragmentAssembler::assembleRandomFragment(int nIns)
|
||||
|
||||
// Return 0.
|
||||
mReturnTypeBits |= RT_INT32;
|
||||
mLir->ins1(LIR_ret, mLir->insImm(0));
|
||||
mLir->ins1(LIR_ret, mLir->insImmI(0));
|
||||
|
||||
endFragment();
|
||||
}
|
||||
|
@ -66,8 +66,8 @@ namespace nanojit
|
||||
, _branchStateMap(alloc)
|
||||
, _patches(alloc)
|
||||
, _labels(alloc)
|
||||
#if NJ_USES_QUAD_CONSTANTS
|
||||
, _quadConstants(alloc)
|
||||
#if NJ_USES_IMMD_POOL
|
||||
, _immDPool(alloc)
|
||||
#endif
|
||||
, _epilogue(NULL)
|
||||
, _err(None)
|
||||
@ -168,8 +168,8 @@ namespace nanojit
|
||||
_branchStateMap.clear();
|
||||
_patches.clear();
|
||||
_labels.clear();
|
||||
#if NJ_USES_QUAD_CONSTANTS
|
||||
_quadConstants.clear();
|
||||
#if NJ_USES_IMMD_POOL
|
||||
_immDPool.clear();
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -321,7 +321,7 @@ namespace nanojit
|
||||
NanoAssert(arIndex == (uint32_t)n-1);
|
||||
i = n-1;
|
||||
}
|
||||
else if (ins->isN64()) {
|
||||
else if (ins->isQorD()) {
|
||||
NanoAssert(_entries[i + 1]==ins);
|
||||
i += 1; // skip high word
|
||||
}
|
||||
@ -539,15 +539,15 @@ namespace nanojit
|
||||
return r;
|
||||
}
|
||||
|
||||
#if NJ_USES_QUAD_CONSTANTS
|
||||
const uint64_t* Assembler::findQuadConstant(uint64_t q)
|
||||
#if NJ_USES_IMMD_POOL
|
||||
const uint64_t* Assembler::findImmDFromPool(uint64_t q)
|
||||
{
|
||||
uint64_t* p = _quadConstants.get(q);
|
||||
uint64_t* p = _immDPool.get(q);
|
||||
if (!p)
|
||||
{
|
||||
p = new (_dataAlloc) uint64_t;
|
||||
*p = q;
|
||||
_quadConstants.put(q, p);
|
||||
_immDPool.put(q, p);
|
||||
}
|
||||
return p;
|
||||
}
|
||||
@ -555,8 +555,8 @@ namespace nanojit
|
||||
|
||||
int Assembler::findMemFor(LIns *ins)
|
||||
{
|
||||
#if NJ_USES_QUAD_CONSTANTS
|
||||
NanoAssert(!ins->isconstf());
|
||||
#if NJ_USES_IMMD_POOL
|
||||
NanoAssert(!ins->isImmD());
|
||||
#endif
|
||||
if (!ins->isInAr()) {
|
||||
uint32_t const arIndex = arReserve(ins);
|
||||
@ -647,7 +647,7 @@ namespace nanojit
|
||||
if (_logc->lcbits & LC_Assembly) {
|
||||
setOutputForEOL(" <= spill %s",
|
||||
_thisfrag->lirbuf->printer->formatRef(&b, ins)); } )
|
||||
asm_spill(r, d, pop, ins->isN64());
|
||||
asm_spill(r, d, pop, ins->isQorD());
|
||||
}
|
||||
}
|
||||
|
||||
@ -1178,8 +1178,8 @@ namespace nanojit
|
||||
void Assembler::asm_jmp(LInsp ins, InsList& pending_lives)
|
||||
{
|
||||
NanoAssert((ins->isop(LIR_j) && !ins->oprnd1()) ||
|
||||
(ins->isop(LIR_jf) && ins->oprnd1()->isconstval(0)) ||
|
||||
(ins->isop(LIR_jt) && ins->oprnd1()->isconstval(1)));
|
||||
(ins->isop(LIR_jf) && ins->oprnd1()->isImmI(0)) ||
|
||||
(ins->isop(LIR_jt) && ins->oprnd1()->isImmI(1)));
|
||||
|
||||
countlir_jmp();
|
||||
LInsp to = ins->getTarget();
|
||||
@ -1213,8 +1213,8 @@ namespace nanojit
|
||||
{
|
||||
bool branchOnFalse = (ins->opcode() == LIR_jf);
|
||||
LIns* cond = ins->oprnd1();
|
||||
if (cond->isconst()) {
|
||||
if ((!branchOnFalse && !cond->imm32()) || (branchOnFalse && cond->imm32())) {
|
||||
if (cond->isImmI()) {
|
||||
if ((!branchOnFalse && !cond->immI()) || (branchOnFalse && cond->immI())) {
|
||||
// jmp never taken, not needed
|
||||
} else {
|
||||
asm_jmp(ins, pending_lives); // jmp always taken
|
||||
@ -1259,8 +1259,8 @@ namespace nanojit
|
||||
void Assembler::asm_xcc(LInsp ins)
|
||||
{
|
||||
LIns* cond = ins->oprnd1();
|
||||
if (cond->isconst()) {
|
||||
if ((ins->isop(LIR_xt) && !cond->imm32()) || (ins->isop(LIR_xf) && cond->imm32())) {
|
||||
if (cond->isImmI()) {
|
||||
if ((ins->isop(LIR_xt) && !cond->immI()) || (ins->isop(LIR_xf) && cond->immI())) {
|
||||
// guard never taken, not needed
|
||||
} else {
|
||||
asm_x(ins); // guard always taken
|
||||
@ -1792,7 +1792,7 @@ namespace nanojit
|
||||
{
|
||||
// we traverse backwards so we are now hitting the file
|
||||
// that is associated with a bunch of LIR_lines we already have seen
|
||||
uintptr_t currentFile = ins->oprnd1()->imm32();
|
||||
uintptr_t currentFile = ins->oprnd1()->immI();
|
||||
cgen->jitFilenameUpdate(currentFile);
|
||||
break;
|
||||
}
|
||||
@ -1801,7 +1801,7 @@ namespace nanojit
|
||||
// add a new table entry, we don't yet knwo which file it belongs
|
||||
// to so we need to add it to the update table too
|
||||
// note the alloc, actual act is delayed; see above
|
||||
uint32_t currentLine = (uint32_t) ins->oprnd1()->imm32();
|
||||
uint32_t currentLine = (uint32_t) ins->oprnd1()->immI();
|
||||
cgen->jitLineNumUpdate(currentLine);
|
||||
cgen->jitAddRecord((uintptr_t)_nIns, 0, currentLine, true);
|
||||
break;
|
||||
@ -1926,10 +1926,10 @@ namespace nanojit
|
||||
// Must findMemFor even if we're going to findRegFor; loop-carried
|
||||
// operands may spill on another edge, and we need them to always
|
||||
// spill to the same place.
|
||||
#if NJ_USES_QUAD_CONSTANTS
|
||||
#if NJ_USES_IMMD_POOL
|
||||
// Exception: if float constants are true constants, we should
|
||||
// never call findMemFor on those ops.
|
||||
if (!op1->isconstf())
|
||||
if (!op1->isImmD())
|
||||
#endif
|
||||
{
|
||||
findMemFor(op1);
|
||||
|
@ -157,10 +157,10 @@ namespace nanojit
|
||||
n = ins->size() >> 2;
|
||||
} else {
|
||||
switch (ins->retType()) {
|
||||
case LTy_I32: n = 1; break;
|
||||
CASE64(LTy_I64:)
|
||||
case LTy_F64: n = 2; break;
|
||||
case LTy_Void: NanoAssert(0); break;
|
||||
case LTy_I: n = 1; break;
|
||||
CASE64(LTy_Q:)
|
||||
case LTy_D: n = 2; break;
|
||||
case LTy_V: NanoAssert(0); break;
|
||||
default: NanoAssert(0); break;
|
||||
}
|
||||
}
|
||||
@ -192,8 +192,8 @@ namespace nanojit
|
||||
|
||||
typedef SeqBuilder<NIns*> NInsList;
|
||||
typedef HashMap<NIns*, LIns*> NInsMap;
|
||||
#if NJ_USES_QUAD_CONSTANTS
|
||||
typedef HashMap<uint64_t, uint64_t*> QuadConstantMap;
|
||||
#if NJ_USES_IMMD_POOL
|
||||
typedef HashMap<uint64_t, uint64_t*> ImmDPoolMap;
|
||||
#endif
|
||||
|
||||
#ifdef VTUNE
|
||||
@ -326,9 +326,9 @@ namespace nanojit
|
||||
Register getBaseReg(LIns *ins, int &d, RegisterMask allow);
|
||||
void getBaseReg2(RegisterMask allowValue, LIns* value, Register& rv,
|
||||
RegisterMask allowBase, LIns* base, Register& rb, int &d);
|
||||
#if NJ_USES_QUAD_CONSTANTS
|
||||
#if NJ_USES_IMMD_POOL
|
||||
const uint64_t*
|
||||
findQuadConstant(uint64_t q);
|
||||
findImmDFromPool(uint64_t q);
|
||||
#endif
|
||||
int findMemFor(LIns* ins);
|
||||
Register findRegFor(LIns* ins, RegisterMask allow);
|
||||
@ -362,8 +362,8 @@ namespace nanojit
|
||||
RegAllocMap _branchStateMap;
|
||||
NInsMap _patches;
|
||||
LabelStateMap _labels;
|
||||
#if NJ_USES_QUAD_CONSTANTS
|
||||
QuadConstantMap _quadConstants;
|
||||
#if NJ_USES_IMMD_POOL
|
||||
ImmDPoolMap _immDPool;
|
||||
#endif
|
||||
|
||||
// We generate code into two places: normal code chunks, and exit
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -362,9 +362,9 @@ namespace nanojit
|
||||
// All values must fit into three bits. See CallInfo for details.
|
||||
enum ArgType {
|
||||
ARGTYPE_V = 0, // void
|
||||
ARGTYPE_F = 1, // double (64bit)
|
||||
ARGTYPE_D = 1, // double (64bit)
|
||||
ARGTYPE_I = 2, // int32_t
|
||||
ARGTYPE_U = 3, // uint32_t
|
||||
ARGTYPE_UI = 3, // uint32_t
|
||||
#ifdef NANOJIT_64BIT
|
||||
ARGTYPE_Q = 4, // uint64_t
|
||||
#endif
|
||||
@ -574,27 +574,27 @@ namespace nanojit
|
||||
#endif
|
||||
op == LIR_cmovi;
|
||||
}
|
||||
inline bool isICmpOpcode(LOpcode op) {
|
||||
inline bool isCmpIOpcode(LOpcode op) {
|
||||
return LIR_eqi <= op && op <= LIR_geui;
|
||||
}
|
||||
inline bool isSICmpOpcode(LOpcode op) {
|
||||
inline bool isCmpSIOpcode(LOpcode op) {
|
||||
return LIR_eqi <= op && op <= LIR_gei;
|
||||
}
|
||||
inline bool isUICmpOpcode(LOpcode op) {
|
||||
inline bool isCmpUIOpcode(LOpcode op) {
|
||||
return LIR_eqi == op || (LIR_ltui <= op && op <= LIR_geui);
|
||||
}
|
||||
#ifdef NANOJIT_64BIT
|
||||
inline bool isQCmpOpcode(LOpcode op) {
|
||||
inline bool isCmpQOpcode(LOpcode op) {
|
||||
return LIR_eqq <= op && op <= LIR_geuq;
|
||||
}
|
||||
inline bool isSQCmpOpcode(LOpcode op) {
|
||||
inline bool isCmpSQOpcode(LOpcode op) {
|
||||
return LIR_eqq <= op && op <= LIR_geq;
|
||||
}
|
||||
inline bool isUQCmpOpcode(LOpcode op) {
|
||||
inline bool isCmpUQOpcode(LOpcode op) {
|
||||
return LIR_eqq == op || (LIR_ltuq <= op && op <= LIR_geuq);
|
||||
}
|
||||
#endif
|
||||
inline bool isFCmpOpcode(LOpcode op) {
|
||||
inline bool isCmpDOpcode(LOpcode op) {
|
||||
return LIR_eqd <= op && op <= LIR_ged;
|
||||
}
|
||||
|
||||
@ -606,18 +606,18 @@ namespace nanojit
|
||||
NanoAssert(op == LIR_xt || op == LIR_xf);
|
||||
return LOpcode(op ^ 1);
|
||||
}
|
||||
inline LOpcode invertICmpOpcode(LOpcode op) {
|
||||
NanoAssert(isICmpOpcode(op));
|
||||
inline LOpcode invertCmpIOpcode(LOpcode op) {
|
||||
NanoAssert(isCmpIOpcode(op));
|
||||
return LOpcode(op ^ 1);
|
||||
}
|
||||
#ifdef NANOJIT_64BIT
|
||||
inline LOpcode invertQCmpOpcode(LOpcode op) {
|
||||
NanoAssert(isQCmpOpcode(op));
|
||||
inline LOpcode invertCmpQOpcode(LOpcode op) {
|
||||
NanoAssert(isCmpQOpcode(op));
|
||||
return LOpcode(op ^ 1);
|
||||
}
|
||||
#endif
|
||||
inline LOpcode invertFCmpOpcode(LOpcode op) {
|
||||
NanoAssert(isFCmpOpcode(op));
|
||||
inline LOpcode invertCmpDOpcode(LOpcode op) {
|
||||
NanoAssert(isCmpDOpcode(op));
|
||||
return LOpcode(op ^ 1);
|
||||
}
|
||||
|
||||
@ -626,8 +626,8 @@ namespace nanojit
|
||||
switch (ci->returnType()) {
|
||||
case ARGTYPE_V: op = LIR_callp; break;
|
||||
case ARGTYPE_I:
|
||||
case ARGTYPE_U: op = LIR_calli; break;
|
||||
case ARGTYPE_F: op = LIR_calld; break;
|
||||
case ARGTYPE_UI: op = LIR_calli; break;
|
||||
case ARGTYPE_D: op = LIR_calld; break;
|
||||
#ifdef NANOJIT_64BIT
|
||||
case ARGTYPE_Q: op = LIR_callq; break;
|
||||
#endif
|
||||
@ -636,25 +636,25 @@ namespace nanojit
|
||||
return op;
|
||||
}
|
||||
|
||||
LOpcode f64arith_to_i32arith(LOpcode op);
|
||||
LOpcode arithOpcodeD2I(LOpcode op);
|
||||
#ifdef NANOJIT_64BIT
|
||||
LOpcode i32cmp_to_i64cmp(LOpcode op);
|
||||
LOpcode cmpOpcodeI2Q(LOpcode op);
|
||||
#endif
|
||||
LOpcode f64cmp_to_i32cmp(LOpcode op);
|
||||
LOpcode f64cmp_to_u32cmp(LOpcode op);
|
||||
LOpcode cmpOpcodeD2I(LOpcode op);
|
||||
LOpcode cmpOpcodeD2UI(LOpcode op);
|
||||
|
||||
// Array holding the 'repKind' field from LIRopcode.tbl.
|
||||
extern const uint8_t repKinds[];
|
||||
|
||||
enum LTy {
|
||||
LTy_Void, // no value/no type
|
||||
LTy_I32, // 32-bit integer
|
||||
LTy_V, // no value/no type
|
||||
LTy_I, // 32-bit integer
|
||||
#ifdef NANOJIT_64BIT
|
||||
LTy_I64, // 64-bit integer
|
||||
LTy_Q, // 64-bit integer
|
||||
#endif
|
||||
LTy_F64, // 64-bit float
|
||||
LTy_D, // 64-bit float
|
||||
|
||||
LTy_Ptr = PTR_SIZE(LTy_I32, LTy_I64) // word-sized integer
|
||||
LTy_P = PTR_SIZE(LTy_I, LTy_Q) // word-sized integer
|
||||
};
|
||||
|
||||
// Array holding the 'retType' field from LIRopcode.tbl.
|
||||
@ -748,7 +748,7 @@ namespace nanojit
|
||||
LRK_C,
|
||||
LRK_P,
|
||||
LRK_I,
|
||||
LRK_N64,
|
||||
LRK_QorD,
|
||||
LRK_Jtbl,
|
||||
LRK_None // this one is used for unused opcode numbers
|
||||
};
|
||||
@ -763,7 +763,7 @@ namespace nanojit
|
||||
class LInsC;
|
||||
class LInsP;
|
||||
class LInsI;
|
||||
class LInsN64;
|
||||
class LInsQorD;
|
||||
class LInsJtbl;
|
||||
|
||||
class LIns
|
||||
@ -812,7 +812,7 @@ namespace nanojit
|
||||
inline LInsC* toLInsC() const;
|
||||
inline LInsP* toLInsP() const;
|
||||
inline LInsI* toLInsI() const;
|
||||
inline LInsN64* toLInsN64() const;
|
||||
inline LInsQorD* toLInsQorD() const;
|
||||
inline LInsJtbl*toLInsJtbl()const;
|
||||
|
||||
void staticSanityCheck();
|
||||
@ -830,8 +830,8 @@ namespace nanojit
|
||||
// initLInsC() just copies the pointer into the LInsC.
|
||||
inline void initLInsC(LOpcode opcode, LIns** args, const CallInfo* ci);
|
||||
inline void initLInsP(int32_t arg, int32_t kind);
|
||||
inline void initLInsI(LOpcode opcode, int32_t imm32);
|
||||
inline void initLInsN64(LOpcode opcode, int64_t imm64);
|
||||
inline void initLInsI(LOpcode opcode, int32_t immI);
|
||||
inline void initLInsQorD(LOpcode opcode, int64_t imm64);
|
||||
inline void initLInsJtbl(LIns* index, uint32_t size, LIns** table);
|
||||
|
||||
LOpcode opcode() const { return sharedFields.opcode; }
|
||||
@ -919,13 +919,13 @@ namespace nanojit
|
||||
inline uint8_t paramKind() const;
|
||||
|
||||
// For LInsI.
|
||||
inline int32_t imm32() const;
|
||||
inline int32_t immI() const;
|
||||
|
||||
// For LInsN64.
|
||||
inline int32_t imm64_0() const;
|
||||
inline int32_t imm64_1() const;
|
||||
inline uint64_t imm64() const;
|
||||
inline double imm64f() const;
|
||||
// For LInsQorD.
|
||||
inline int32_t immQorDlo() const;
|
||||
inline int32_t immQorDhi() const;
|
||||
inline uint64_t immQ() const;
|
||||
inline double immD() const;
|
||||
|
||||
// For LIR_allocp.
|
||||
inline int32_t size() const;
|
||||
@ -988,9 +988,9 @@ namespace nanojit
|
||||
NanoAssert(LRK_None != repKinds[opcode()]);
|
||||
return LRK_I == repKinds[opcode()];
|
||||
}
|
||||
bool isLInsN64() const {
|
||||
bool isLInsQorD() const {
|
||||
NanoAssert(LRK_None != repKinds[opcode()]);
|
||||
return LRK_N64 == repKinds[opcode()];
|
||||
return LRK_QorD == repKinds[opcode()];
|
||||
}
|
||||
bool isLInsJtbl() const {
|
||||
NanoAssert(LRK_None != repKinds[opcode()]);
|
||||
@ -1013,11 +1013,11 @@ namespace nanojit
|
||||
}
|
||||
bool isCmp() const {
|
||||
LOpcode op = opcode();
|
||||
return isICmpOpcode(op) ||
|
||||
return isCmpIOpcode(op) ||
|
||||
#if defined NANOJIT_64BIT
|
||||
isQCmpOpcode(op) ||
|
||||
isCmpQOpcode(op) ||
|
||||
#endif
|
||||
isFCmpOpcode(op);
|
||||
isCmpDOpcode(op);
|
||||
}
|
||||
bool isCall() const {
|
||||
return isop(LIR_calli) ||
|
||||
@ -1041,44 +1041,44 @@ namespace nanojit
|
||||
isop(LIR_addxovi) || isop(LIR_subxovi) || isop(LIR_mulxovi);
|
||||
}
|
||||
// True if the instruction is a 32-bit integer immediate.
|
||||
bool isconst() const {
|
||||
bool isImmI() const {
|
||||
return isop(LIR_immi);
|
||||
}
|
||||
// True if the instruction is a 32-bit integer immediate and
|
||||
// has the value 'val' when treated as a 32-bit signed integer.
|
||||
bool isconstval(int32_t val) const {
|
||||
return isconst() && imm32()==val;
|
||||
bool isImmI(int32_t val) const {
|
||||
return isImmI() && immI()==val;
|
||||
}
|
||||
#ifdef NANOJIT_64BIT
|
||||
// True if the instruction is a 64-bit integer immediate.
|
||||
bool isconstq() const {
|
||||
bool isImmQ() const {
|
||||
return isop(LIR_immq);
|
||||
}
|
||||
#endif
|
||||
// True if the instruction is a pointer-sized integer immediate.
|
||||
bool isconstp() const
|
||||
bool isImmP() const
|
||||
{
|
||||
#ifdef NANOJIT_64BIT
|
||||
return isconstq();
|
||||
return isImmQ();
|
||||
#else
|
||||
return isconst();
|
||||
return isImmI();
|
||||
#endif
|
||||
}
|
||||
// True if the instruction is a 64-bit float immediate.
|
||||
bool isconstf() const {
|
||||
bool isImmD() const {
|
||||
return isop(LIR_immd);
|
||||
}
|
||||
// True if the instruction is a 64-bit integer or float immediate.
|
||||
bool isconstqf() const {
|
||||
bool isImmQorD() const {
|
||||
return
|
||||
#ifdef NANOJIT_64BIT
|
||||
isconstq() ||
|
||||
isImmQ() ||
|
||||
#endif
|
||||
isconstf();
|
||||
isImmD();
|
||||
}
|
||||
// True if the instruction an any type of immediate.
|
||||
bool isImmAny() const {
|
||||
return isconst() || isconstqf();
|
||||
return isImmI() || isImmQorD();
|
||||
}
|
||||
|
||||
bool isBranch() const {
|
||||
@ -1088,32 +1088,32 @@ namespace nanojit
|
||||
LTy retType() const {
|
||||
return retTypes[opcode()];
|
||||
}
|
||||
bool isVoid() const {
|
||||
return retType() == LTy_Void;
|
||||
bool isV() const {
|
||||
return retType() == LTy_V;
|
||||
}
|
||||
bool isI32() const {
|
||||
return retType() == LTy_I32;
|
||||
bool isI() const {
|
||||
return retType() == LTy_I;
|
||||
}
|
||||
#ifdef NANOJIT_64BIT
|
||||
bool isI64() const {
|
||||
return retType() == LTy_I64;
|
||||
bool isQ() const {
|
||||
return retType() == LTy_Q;
|
||||
}
|
||||
#endif
|
||||
bool isF64() const {
|
||||
return retType() == LTy_F64;
|
||||
bool isD() const {
|
||||
return retType() == LTy_D;
|
||||
}
|
||||
bool isN64() const {
|
||||
bool isQorD() const {
|
||||
return
|
||||
#ifdef NANOJIT_64BIT
|
||||
isI64() ||
|
||||
isQ() ||
|
||||
#endif
|
||||
isF64();
|
||||
isD();
|
||||
}
|
||||
bool isPtr() const {
|
||||
bool isP() const {
|
||||
#ifdef NANOJIT_64BIT
|
||||
return isI64();
|
||||
return isQ();
|
||||
#else
|
||||
return isI32();
|
||||
return isI();
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -1130,15 +1130,15 @@ namespace nanojit
|
||||
if (isCall())
|
||||
return !callInfo()->_isPure;
|
||||
else
|
||||
return isVoid();
|
||||
return isV();
|
||||
}
|
||||
|
||||
inline void* constvalp() const
|
||||
inline void* immP() const
|
||||
{
|
||||
#ifdef NANOJIT_64BIT
|
||||
return (void*)imm64();
|
||||
return (void*)immQ();
|
||||
#else
|
||||
return (void*)imm32();
|
||||
return (void*)immI();
|
||||
#endif
|
||||
}
|
||||
};
|
||||
@ -1303,7 +1303,7 @@ namespace nanojit
|
||||
private:
|
||||
friend class LIns;
|
||||
|
||||
int32_t imm32;
|
||||
int32_t immI;
|
||||
|
||||
LIns ins;
|
||||
|
||||
@ -1312,14 +1312,14 @@ namespace nanojit
|
||||
};
|
||||
|
||||
// Used for LIR_immq and LIR_immd.
|
||||
class LInsN64
|
||||
class LInsQorD
|
||||
{
|
||||
private:
|
||||
friend class LIns;
|
||||
|
||||
int32_t imm64_0;
|
||||
int32_t immQorDlo;
|
||||
|
||||
int32_t imm64_1;
|
||||
int32_t immQorDhi;
|
||||
|
||||
LIns ins;
|
||||
|
||||
@ -1361,7 +1361,7 @@ namespace nanojit
|
||||
LInsC* LIns::toLInsC() const { return (LInsC* )( uintptr_t(this+1) - sizeof(LInsC ) ); }
|
||||
LInsP* LIns::toLInsP() const { return (LInsP* )( uintptr_t(this+1) - sizeof(LInsP ) ); }
|
||||
LInsI* LIns::toLInsI() const { return (LInsI* )( uintptr_t(this+1) - sizeof(LInsI ) ); }
|
||||
LInsN64* LIns::toLInsN64() const { return (LInsN64*)( uintptr_t(this+1) - sizeof(LInsN64) ); }
|
||||
LInsQorD* LIns::toLInsQorD() const { return (LInsQorD*)( uintptr_t(this+1) - sizeof(LInsQorD) ); }
|
||||
LInsJtbl*LIns::toLInsJtbl()const { return (LInsJtbl*)(uintptr_t(this+1) - sizeof(LInsJtbl)); }
|
||||
|
||||
void LIns::initLInsOp0(LOpcode opcode) {
|
||||
@ -1421,16 +1421,16 @@ namespace nanojit
|
||||
toLInsP()->kind = kind;
|
||||
NanoAssert(isLInsP());
|
||||
}
|
||||
void LIns::initLInsI(LOpcode opcode, int32_t imm32) {
|
||||
void LIns::initLInsI(LOpcode opcode, int32_t immI) {
|
||||
initSharedFields(opcode);
|
||||
toLInsI()->imm32 = imm32;
|
||||
toLInsI()->immI = immI;
|
||||
NanoAssert(isLInsI());
|
||||
}
|
||||
void LIns::initLInsN64(LOpcode opcode, int64_t imm64) {
|
||||
void LIns::initLInsQorD(LOpcode opcode, int64_t imm64) {
|
||||
initSharedFields(opcode);
|
||||
toLInsN64()->imm64_0 = int32_t(imm64);
|
||||
toLInsN64()->imm64_1 = int32_t(imm64 >> 32);
|
||||
NanoAssert(isLInsN64());
|
||||
toLInsQorD()->immQorDlo = int32_t(imm64);
|
||||
toLInsQorD()->immQorDhi = int32_t(imm64 >> 32);
|
||||
NanoAssert(isLInsQorD());
|
||||
}
|
||||
void LIns::initLInsJtbl(LIns* index, uint32_t size, LIns** table) {
|
||||
initSharedFields(LIR_jtbl);
|
||||
@ -1524,33 +1524,33 @@ namespace nanojit
|
||||
inline uint8_t LIns::paramArg() const { NanoAssert(isop(LIR_paramp)); return toLInsP()->arg; }
|
||||
inline uint8_t LIns::paramKind() const { NanoAssert(isop(LIR_paramp)); return toLInsP()->kind; }
|
||||
|
||||
inline int32_t LIns::imm32() const { NanoAssert(isconst()); return toLInsI()->imm32; }
|
||||
inline int32_t LIns::immI() const { NanoAssert(isImmI()); return toLInsI()->immI; }
|
||||
|
||||
inline int32_t LIns::imm64_0() const { NanoAssert(isconstqf()); return toLInsN64()->imm64_0; }
|
||||
inline int32_t LIns::imm64_1() const { NanoAssert(isconstqf()); return toLInsN64()->imm64_1; }
|
||||
uint64_t LIns::imm64() const {
|
||||
NanoAssert(isconstqf());
|
||||
return (uint64_t(toLInsN64()->imm64_1) << 32) | uint32_t(toLInsN64()->imm64_0);
|
||||
inline int32_t LIns::immQorDlo() const { NanoAssert(isImmQorD()); return toLInsQorD()->immQorDlo; }
|
||||
inline int32_t LIns::immQorDhi() const { NanoAssert(isImmQorD()); return toLInsQorD()->immQorDhi; }
|
||||
uint64_t LIns::immQ() const {
|
||||
NanoAssert(isImmQorD());
|
||||
return (uint64_t(toLInsQorD()->immQorDhi) << 32) | uint32_t(toLInsQorD()->immQorDlo);
|
||||
}
|
||||
double LIns::imm64f() const {
|
||||
NanoAssert(isconstf());
|
||||
double LIns::immD() const {
|
||||
NanoAssert(isImmD());
|
||||
union {
|
||||
double f;
|
||||
uint64_t q;
|
||||
} u;
|
||||
u.q = imm64();
|
||||
u.q = immQ();
|
||||
return u.f;
|
||||
}
|
||||
|
||||
int32_t LIns::size() const {
|
||||
NanoAssert(isop(LIR_allocp));
|
||||
return toLInsI()->imm32 << 2;
|
||||
return toLInsI()->immI << 2;
|
||||
}
|
||||
|
||||
void LIns::setSize(int32_t nbytes) {
|
||||
NanoAssert(isop(LIR_allocp));
|
||||
NanoAssert(nbytes > 0);
|
||||
toLInsI()->imm32 = (nbytes+3)>>2; // # of required 32bit words
|
||||
toLInsI()->immI = (nbytes+3)>>2; // # of required 32bit words
|
||||
}
|
||||
|
||||
// Index args in reverse order, i.e. arg(0) returns the rightmost arg.
|
||||
@ -1618,16 +1618,16 @@ namespace nanojit
|
||||
virtual LInsp insParam(int32_t arg, int32_t kind) {
|
||||
return out->insParam(arg, kind);
|
||||
}
|
||||
virtual LInsp insImm(int32_t imm) {
|
||||
return out->insImm(imm);
|
||||
virtual LInsp insImmI(int32_t imm) {
|
||||
return out->insImmI(imm);
|
||||
}
|
||||
#ifdef NANOJIT_64BIT
|
||||
virtual LInsp insImmq(uint64_t imm) {
|
||||
return out->insImmq(imm);
|
||||
virtual LInsp insImmQ(uint64_t imm) {
|
||||
return out->insImmQ(imm);
|
||||
}
|
||||
#endif
|
||||
virtual LInsp insImmf(double d) {
|
||||
return out->insImmf(d);
|
||||
virtual LInsp insImmD(double d) {
|
||||
return out->insImmD(d);
|
||||
}
|
||||
virtual LInsp insLoad(LOpcode op, LIns* base, int32_t d, AccSet accSet) {
|
||||
return out->insLoad(op, base, d, accSet);
|
||||
@ -1651,22 +1651,22 @@ namespace nanojit
|
||||
|
||||
// Inserts a conditional to execute and branches to execute if
|
||||
// the condition is true and false respectively.
|
||||
LIns* ins_choose(LIns* cond, LIns* iftrue, LIns* iffalse, bool use_cmov);
|
||||
LIns* insChoose(LIns* cond, LIns* iftrue, LIns* iffalse, bool use_cmov);
|
||||
|
||||
// Inserts an integer comparison to 0
|
||||
LIns* ins_eq0(LIns* oprnd1) {
|
||||
return ins2i(LIR_eqi, oprnd1, 0);
|
||||
LIns* insEqI_0(LIns* oprnd1) {
|
||||
return ins2ImmI(LIR_eqi, oprnd1, 0);
|
||||
}
|
||||
|
||||
// Inserts a pointer comparison to 0
|
||||
LIns* ins_peq0(LIns* oprnd1) {
|
||||
LIns* insEqP_0(LIns* oprnd1) {
|
||||
return ins2(LIR_eqp, oprnd1, insImmWord(0));
|
||||
}
|
||||
|
||||
// Inserts a binary operation where the second operand is an
|
||||
// integer immediate.
|
||||
LIns* ins2i(LOpcode v, LIns* oprnd1, int32_t imm) {
|
||||
return ins2(v, oprnd1, insImm(imm));
|
||||
LIns* ins2ImmI(LOpcode v, LIns* oprnd1, int32_t imm) {
|
||||
return ins2(v, oprnd1, insImmI(imm));
|
||||
}
|
||||
|
||||
#if NJ_SOFTFLOAT_SUPPORTED
|
||||
@ -1674,24 +1674,24 @@ namespace nanojit
|
||||
return ins2(LIR_ii2d, lo, hi);
|
||||
}
|
||||
#endif
|
||||
LIns* insImmPtr(const void *ptr) {
|
||||
LIns* insImmP(const void *ptr) {
|
||||
#ifdef NANOJIT_64BIT
|
||||
return insImmq((uint64_t)ptr);
|
||||
return insImmQ((uint64_t)ptr);
|
||||
#else
|
||||
return insImm((int32_t)ptr);
|
||||
return insImmI((int32_t)ptr);
|
||||
#endif
|
||||
}
|
||||
|
||||
LIns* insImmWord(intptr_t value) {
|
||||
#ifdef NANOJIT_64BIT
|
||||
return insImmq(value);
|
||||
return insImmQ(value);
|
||||
#else
|
||||
return insImm(value);
|
||||
return insImmI(value);
|
||||
#endif
|
||||
}
|
||||
|
||||
// Sign-extend integers to native integers. On 32-bit this is a no-op.
|
||||
LIns* ins_i2p(LIns* intIns) {
|
||||
LIns* insI2P(LIns* intIns) {
|
||||
#ifdef NANOJIT_64BIT
|
||||
return ins1(LIR_i2q, intIns);
|
||||
#else
|
||||
@ -1700,7 +1700,7 @@ namespace nanojit
|
||||
}
|
||||
|
||||
// Zero-extend integers to native integers. On 32-bit this is a no-op.
|
||||
LIns* ins_u2p(LIns* uintIns) {
|
||||
LIns* insUI2P(LIns* uintIns) {
|
||||
#ifdef NANOJIT_64BIT
|
||||
return ins1(LIR_ui2uq, uintIns);
|
||||
#else
|
||||
@ -1709,7 +1709,7 @@ namespace nanojit
|
||||
}
|
||||
|
||||
// Chooses LIR_sti or LIR_stq based on size of value.
|
||||
LIns* insStorei(LIns* value, LIns* base, int32_t d, AccSet accSet);
|
||||
LIns* insStore(LIns* value, LIns* base, int32_t d, AccSet accSet);
|
||||
};
|
||||
|
||||
|
||||
@ -1918,16 +1918,16 @@ namespace nanojit
|
||||
LIns* insAlloc(int32_t size) {
|
||||
return add(out->insAlloc(size));
|
||||
}
|
||||
LIns* insImm(int32_t imm) {
|
||||
return add(out->insImm(imm));
|
||||
LIns* insImmI(int32_t imm) {
|
||||
return add(out->insImmI(imm));
|
||||
}
|
||||
#ifdef NANOJIT_64BIT
|
||||
LIns* insImmq(uint64_t imm) {
|
||||
return add(out->insImmq(imm));
|
||||
LIns* insImmQ(uint64_t imm) {
|
||||
return add(out->insImmQ(imm));
|
||||
}
|
||||
#endif
|
||||
LIns* insImmf(double d) {
|
||||
return add(out->insImmf(d));
|
||||
LIns* insImmD(double d) {
|
||||
return add(out->insImmD(d));
|
||||
}
|
||||
};
|
||||
|
||||
@ -1950,9 +1950,9 @@ namespace nanojit
|
||||
// We divide instruction kinds into groups for the use of LInsHashSet.
|
||||
// LIns0 isn't present because we don't need to record any 0-ary
|
||||
// instructions.
|
||||
LInsImm = 0,
|
||||
LInsImmq = 1, // only occurs on 64-bit platforms
|
||||
LInsImmf = 2,
|
||||
LInsImmI = 0,
|
||||
LInsImmQ = 1, // only occurs on 64-bit platforms
|
||||
LInsImmD = 2,
|
||||
LIns1 = 3,
|
||||
LIns2 = 4,
|
||||
LIns3 = 5,
|
||||
@ -1998,8 +1998,8 @@ namespace nanojit
|
||||
|
||||
Allocator& alloc;
|
||||
|
||||
static uint32_t hashImm(int32_t);
|
||||
static uint32_t hashImmq(uint64_t); // not NANOJIT_64BIT-only -- used by findImmf()
|
||||
static uint32_t hashImmI(int32_t);
|
||||
static uint32_t hashImmQorD(uint64_t); // not NANOJIT_64BIT-only -- used by findImmD()
|
||||
static uint32_t hash1(LOpcode op, LInsp);
|
||||
static uint32_t hash2(LOpcode op, LInsp, LInsp);
|
||||
static uint32_t hash3(LOpcode op, LInsp, LInsp, LInsp);
|
||||
@ -2008,11 +2008,11 @@ namespace nanojit
|
||||
|
||||
// These private versions are used after an LIns has been created;
|
||||
// they are used for rehashing after growing.
|
||||
uint32_t findImm(LInsp ins);
|
||||
uint32_t findImmI(LInsp ins);
|
||||
#ifdef NANOJIT_64BIT
|
||||
uint32_t findImmq(LInsp ins);
|
||||
uint32_t findImmQ(LInsp ins);
|
||||
#endif
|
||||
uint32_t findImmf(LInsp ins);
|
||||
uint32_t findImmD(LInsp ins);
|
||||
uint32_t find1(LInsp ins);
|
||||
uint32_t find2(LInsp ins);
|
||||
uint32_t find3(LInsp ins);
|
||||
@ -2030,11 +2030,11 @@ namespace nanojit
|
||||
LInsHashSet(Allocator&, uint32_t kInitialCaps[]);
|
||||
|
||||
// These public versions are used before an LIns has been created.
|
||||
LInsp findImm(int32_t a, uint32_t &k);
|
||||
LInsp findImmI(int32_t a, uint32_t &k);
|
||||
#ifdef NANOJIT_64BIT
|
||||
LInsp findImmq(uint64_t a, uint32_t &k);
|
||||
LInsp findImmQ(uint64_t a, uint32_t &k);
|
||||
#endif
|
||||
LInsp findImmf(uint64_t d, uint32_t &k);
|
||||
LInsp findImmD(uint64_t d, uint32_t &k);
|
||||
LInsp find1(LOpcode v, LInsp a, uint32_t &k);
|
||||
LInsp find2(LOpcode v, LInsp a, LInsp b, uint32_t &k);
|
||||
LInsp find3(LOpcode v, LInsp a, LInsp b, LInsp c, uint32_t &k);
|
||||
@ -2058,11 +2058,11 @@ namespace nanojit
|
||||
public:
|
||||
CseFilter(LirWriter *out, Allocator&);
|
||||
|
||||
LIns* insImm(int32_t imm);
|
||||
LIns* insImmI(int32_t imm);
|
||||
#ifdef NANOJIT_64BIT
|
||||
LIns* insImmq(uint64_t q);
|
||||
LIns* insImmQ(uint64_t q);
|
||||
#endif
|
||||
LIns* insImmf(double d);
|
||||
LIns* insImmD(double d);
|
||||
LIns* ins0(LOpcode v);
|
||||
LIns* ins1(LOpcode v, LInsp);
|
||||
LIns* ins2(LOpcode v, LInsp, LInsp);
|
||||
@ -2133,11 +2133,11 @@ namespace nanojit
|
||||
LInsp ins2(LOpcode op, LInsp o1, LInsp o2);
|
||||
LInsp ins3(LOpcode op, LInsp o1, LInsp o2, LInsp o3);
|
||||
LInsp insParam(int32_t i, int32_t kind);
|
||||
LInsp insImm(int32_t imm);
|
||||
LInsp insImmI(int32_t imm);
|
||||
#ifdef NANOJIT_64BIT
|
||||
LInsp insImmq(uint64_t imm);
|
||||
LInsp insImmQ(uint64_t imm);
|
||||
#endif
|
||||
LInsp insImmf(double d);
|
||||
LInsp insImmD(double d);
|
||||
LInsp insCall(const CallInfo *call, LInsp args[]);
|
||||
LInsp insGuard(LOpcode op, LInsp cond, GuardRecord *gr);
|
||||
LInsp insGuardXov(LOpcode op, LInsp a, LInsp b, GuardRecord *gr);
|
||||
@ -2228,9 +2228,9 @@ namespace nanojit
|
||||
SoftFloatFilter(LirWriter *out);
|
||||
LIns *split(LIns *a);
|
||||
LIns *split(const CallInfo *call, LInsp args[]);
|
||||
LIns *fcall1(const CallInfo *call, LIns *a);
|
||||
LIns *fcall2(const CallInfo *call, LIns *a, LIns *b);
|
||||
LIns *fcmp(const CallInfo *call, LIns *a, LIns *b);
|
||||
LIns *callD1(const CallInfo *call, LIns *a);
|
||||
LIns *callD2(const CallInfo *call, LIns *a, LIns *b);
|
||||
LIns *cmpD(const CallInfo *call, LIns *a, LIns *b);
|
||||
LIns *ins1(LOpcode op, LIns *a);
|
||||
LIns *ins2(LOpcode op, LIns *a, LIns *b);
|
||||
LIns *insCall(const CallInfo *ci, LInsp args[]);
|
||||
@ -2280,11 +2280,11 @@ namespace nanojit
|
||||
LIns* ins2(LOpcode v, LIns* a, LIns* b);
|
||||
LIns* ins3(LOpcode v, LIns* a, LIns* b, LIns* c);
|
||||
LIns* insParam(int32_t arg, int32_t kind);
|
||||
LIns* insImm(int32_t imm);
|
||||
LIns* insImmI(int32_t imm);
|
||||
#ifdef NANOJIT_64BIT
|
||||
LIns* insImmq(uint64_t imm);
|
||||
LIns* insImmQ(uint64_t imm);
|
||||
#endif
|
||||
LIns* insImmf(double d);
|
||||
LIns* insImmD(double d);
|
||||
LIns* insCall(const CallInfo *call, LIns* args[]);
|
||||
LIns* insGuard(LOpcode v, LIns *c, GuardRecord *gr);
|
||||
LIns* insGuardXov(LOpcode v, LIns* a, LIns* b, GuardRecord* gr);
|
||||
|
@ -83,7 +83,7 @@
|
||||
* OP_86: for opcodes supported only on i386/X64.
|
||||
*/
|
||||
|
||||
#define OP_UN(n) OP___(__##n, n, None, Void, -1)
|
||||
#define OP_UN(n) OP___(__##n, n, None, V, -1)
|
||||
|
||||
#ifdef NANOJIT_64BIT
|
||||
# define OP_32(a, b, c, d, e) OP_UN(b)
|
||||
@ -108,30 +108,30 @@
|
||||
//---------------------------------------------------------------------------
|
||||
// Miscellaneous operations
|
||||
//---------------------------------------------------------------------------
|
||||
OP___(start, 0, Op0, Void, 0) // start of a fragment
|
||||
OP___(start, 0, Op0, V, 0) // start of a fragment
|
||||
|
||||
// A register fence causes no code to be generated, but it affects register
|
||||
// allocation so that no registers are live when it is reached.
|
||||
OP___(regfence, 1, Op0, Void, 0)
|
||||
OP___(regfence, 1, Op0, V, 0)
|
||||
|
||||
OP___(skip, 2, Sk, Void, 0) // links code chunks
|
||||
OP___(skip, 2, Sk, V, 0) // links code chunks
|
||||
|
||||
OP_32(parami, 3, P, I32, 0) // load an int parameter (register or stack location)
|
||||
OP_64(paramq, 4, P, I64, 0) // load a quad parameter (register or stack location)
|
||||
OP_32(parami, 3, P, I, 0) // load an int parameter (register or stack location)
|
||||
OP_64(paramq, 4, P, Q, 0) // load a quad parameter (register or stack location)
|
||||
|
||||
OP_32(alloci, 5, I, I32, 0) // allocate stack space (result is an int address)
|
||||
OP_64(allocq, 6, I, I64, 0) // allocate stack space (result is a quad address)
|
||||
OP_32(alloci, 5, I, I, 0) // allocate stack space (result is an int address)
|
||||
OP_64(allocq, 6, I, Q, 0) // allocate stack space (result is a quad address)
|
||||
|
||||
OP___(reti, 7, Op1, Void, 0) // return an int
|
||||
OP_64(retq, 8, Op1, Void, 0) // return a quad
|
||||
OP___(retd, 9, Op1, Void, 0) // return a double
|
||||
OP___(reti, 7, Op1, V, 0) // return an int
|
||||
OP_64(retq, 8, Op1, V, 0) // return a quad
|
||||
OP___(retd, 9, Op1, V, 0) // return a double
|
||||
|
||||
OP___(livei, 10, Op1, Void, 0) // extend live range of an int
|
||||
OP_64(liveq, 11, Op1, Void, 0) // extend live range of a quad
|
||||
OP___(lived, 12, Op1, Void, 0) // extend live range of a double
|
||||
OP___(livei, 10, Op1, V, 0) // extend live range of an int
|
||||
OP_64(liveq, 11, Op1, V, 0) // extend live range of a quad
|
||||
OP___(lived, 12, Op1, V, 0) // extend live range of a double
|
||||
|
||||
OP___(file, 13, Op1, Void, 0) // source filename for debug symbols
|
||||
OP___(line, 14, Op1, Void, 0) // source line number for debug symbols
|
||||
OP___(file, 13, Op1, V, 0) // source filename for debug symbols
|
||||
OP___(line, 14, Op1, V, 0) // source line number for debug symbols
|
||||
|
||||
OP_UN(15)
|
||||
OP_UN(16)
|
||||
@ -139,21 +139,21 @@ OP_UN(16)
|
||||
//---------------------------------------------------------------------------
|
||||
// Loads and stores
|
||||
//---------------------------------------------------------------------------
|
||||
OP___(ldc2i, 17, Ld, I32, -1) // load char and sign-extend to an int
|
||||
OP___(lds2i, 18, Ld, I32, -1) // load short and sign-extend to an int
|
||||
OP___(lduc2ui, 19, Ld, I32, -1) // load unsigned char and zero-extend to an unsigned int
|
||||
OP___(ldus2ui, 20, Ld, I32, -1) // load unsigned short and zero-extend to an unsigned int
|
||||
OP___(ldi, 21, Ld, I32, -1) // load int
|
||||
OP_64(ldq, 22, Ld, I64, -1) // load quad
|
||||
OP___(ldd, 23, Ld, F64, -1) // load double
|
||||
OP___(ldf2d, 24, Ld, F64, -1) // load float and extend to a double
|
||||
OP___(ldc2i, 17, Ld, I, -1) // load char and sign-extend to an int
|
||||
OP___(lds2i, 18, Ld, I, -1) // load short and sign-extend to an int
|
||||
OP___(lduc2ui, 19, Ld, I, -1) // load unsigned char and zero-extend to an unsigned int
|
||||
OP___(ldus2ui, 20, Ld, I, -1) // load unsigned short and zero-extend to an unsigned int
|
||||
OP___(ldi, 21, Ld, I, -1) // load int
|
||||
OP_64(ldq, 22, Ld, Q, -1) // load quad
|
||||
OP___(ldd, 23, Ld, D, -1) // load double
|
||||
OP___(ldf2d, 24, Ld, D, -1) // load float and extend to a double
|
||||
|
||||
OP___(sti2c, 25, Sti, Void, 0) // store int truncated to char
|
||||
OP___(sti2s, 26, Sti, Void, 0) // store int truncated to short
|
||||
OP___(sti, 27, Sti, Void, 0) // store int
|
||||
OP_64(stq, 28, Sti, Void, 0) // store quad
|
||||
OP___(std, 29, Sti, Void, 0) // store double
|
||||
OP___(std2f, 30, Sti, Void, 0) // store double as a float (losing precision)
|
||||
OP___(sti2c, 25, Sti, V, 0) // store int truncated to char
|
||||
OP___(sti2s, 26, Sti, V, 0) // store int truncated to short
|
||||
OP___(sti, 27, Sti, V, 0) // store int
|
||||
OP_64(stq, 28, Sti, V, 0) // store quad
|
||||
OP___(std, 29, Sti, V, 0) // store double
|
||||
OP___(std2f, 30, Sti, V, 0) // store double as a float (losing precision)
|
||||
|
||||
OP_UN(31)
|
||||
OP_UN(32)
|
||||
@ -161,9 +161,9 @@ OP_UN(32)
|
||||
//---------------------------------------------------------------------------
|
||||
// Calls
|
||||
//---------------------------------------------------------------------------
|
||||
OP___(calli, 33, C, I32, -1) // call subroutine that returns an int
|
||||
OP_64(callq, 34, C, I64, -1) // call subroutine that returns a quad
|
||||
OP___(calld, 35, C, F64, -1) // call subroutine that returns a double
|
||||
OP___(calli, 33, C, I, -1) // call subroutine that returns an int
|
||||
OP_64(callq, 34, C, Q, -1) // call subroutine that returns a quad
|
||||
OP___(calld, 35, C, D, -1) // call subroutine that returns a double
|
||||
|
||||
OP_UN(36)
|
||||
|
||||
@ -172,12 +172,12 @@ OP_UN(36)
|
||||
//---------------------------------------------------------------------------
|
||||
// 'jt' and 'jf' must be adjacent so that (op ^ 1) gives the opposite one.
|
||||
// Static assertions in LIR.h check this requirement.
|
||||
OP___(j, 37, Op2, Void, 0) // jump always
|
||||
OP___(jt, 38, Op2, Void, 0) // jump if true
|
||||
OP___(jf, 39, Op2, Void, 0) // jump if false
|
||||
OP___(jtbl, 40, Jtbl, Void, 0) // jump to address in table
|
||||
OP___(j, 37, Op2, V, 0) // jump always
|
||||
OP___(jt, 38, Op2, V, 0) // jump if true
|
||||
OP___(jf, 39, Op2, V, 0) // jump if false
|
||||
OP___(jtbl, 40, Jtbl, V, 0) // jump to address in table
|
||||
|
||||
OP___(label, 41, Op0, Void, 0) // a jump target (no machine code is emitted for this)
|
||||
OP___(label, 41, Op0, V, 0) // a jump target (no machine code is emitted for this)
|
||||
|
||||
OP_UN(42)
|
||||
|
||||
@ -186,23 +186,23 @@ OP_UN(42)
|
||||
//---------------------------------------------------------------------------
|
||||
// 'xt' and 'xf' must be adjacent so that (op ^ 1) gives the opposite one.
|
||||
// Static assertions in LIR.h check this requirement.
|
||||
OP___(x, 43, Op2, Void, 0) // exit always
|
||||
OP___(xt, 44, Op2, Void, 1) // exit if true
|
||||
OP___(xf, 45, Op2, Void, 1) // exit if false
|
||||
OP___(xtbl, 46, Op2, Void, 0) // exit via indirect jump
|
||||
OP___(x, 43, Op2, V, 0) // exit always
|
||||
OP___(xt, 44, Op2, V, 1) // exit if true
|
||||
OP___(xf, 45, Op2, V, 1) // exit if false
|
||||
OP___(xtbl, 46, Op2, V, 0) // exit via indirect jump
|
||||
// A LIR_xbarrier cause no code to be generated, but it acts like a never-taken
|
||||
// guard in that it inhibits certain optimisations, such as dead stack store
|
||||
// elimination.
|
||||
OP___(xbarrier, 47, Op2, Void, 0)
|
||||
OP___(xbarrier, 47, Op2, V, 0)
|
||||
|
||||
OP_UN(48)
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
// Immediates
|
||||
//---------------------------------------------------------------------------
|
||||
OP___(immi, 49, I, I32, 1) // int immediate
|
||||
OP_64(immq, 50, N64, I64, 1) // quad immediate
|
||||
OP___(immd, 51, N64, F64, 1) // double immediate
|
||||
OP___(immi, 49, I, I, 1) // int immediate
|
||||
OP_64(immq, 50, QorD, Q, 1) // quad immediate
|
||||
OP___(immd, 51, QorD, D, 1) // double immediate
|
||||
|
||||
OP_UN(52)
|
||||
|
||||
@ -215,83 +215,83 @@ OP_UN(52)
|
||||
// for this to work. They must also remain contiguous so that opcode range
|
||||
// checking works correctly. Static assertions in LIR.h check these
|
||||
// requirements.
|
||||
OP___(eqi, 53, Op2, I32, 1) // int equality
|
||||
OP___(lti, 54, Op2, I32, 1) // signed int less-than
|
||||
OP___(gti, 55, Op2, I32, 1) // signed int greater-than
|
||||
OP___(lei, 56, Op2, I32, 1) // signed int less-than-or-equal
|
||||
OP___(gei, 57, Op2, I32, 1) // signed int greater-than-or-equal
|
||||
OP___(ltui, 58, Op2, I32, 1) // unsigned int less-than
|
||||
OP___(gtui, 59, Op2, I32, 1) // unsigned int greater-than
|
||||
OP___(leui, 60, Op2, I32, 1) // unsigned int less-than-or-equal
|
||||
OP___(geui, 61, Op2, I32, 1) // unsigned int greater-than-or-equal
|
||||
OP___(eqi, 53, Op2, I, 1) // int equality
|
||||
OP___(lti, 54, Op2, I, 1) // signed int less-than
|
||||
OP___(gti, 55, Op2, I, 1) // signed int greater-than
|
||||
OP___(lei, 56, Op2, I, 1) // signed int less-than-or-equal
|
||||
OP___(gei, 57, Op2, I, 1) // signed int greater-than-or-equal
|
||||
OP___(ltui, 58, Op2, I, 1) // unsigned int less-than
|
||||
OP___(gtui, 59, Op2, I, 1) // unsigned int greater-than
|
||||
OP___(leui, 60, Op2, I, 1) // unsigned int less-than-or-equal
|
||||
OP___(geui, 61, Op2, I, 1) // unsigned int greater-than-or-equal
|
||||
|
||||
OP_UN(62)
|
||||
|
||||
OP_64(eqq, 63, Op2, I32, 1) // quad equality
|
||||
OP_64(ltq, 64, Op2, I32, 1) // signed quad less-than
|
||||
OP_64(gtq, 65, Op2, I32, 1) // signed quad greater-than
|
||||
OP_64(leq, 66, Op2, I32, 1) // signed quad less-than-or-equal
|
||||
OP_64(geq, 67, Op2, I32, 1) // signed quad greater-than-or-equal
|
||||
OP_64(ltuq, 68, Op2, I32, 1) // unsigned quad less-than
|
||||
OP_64(gtuq, 69, Op2, I32, 1) // unsigned quad greater-than
|
||||
OP_64(leuq, 70, Op2, I32, 1) // unsigned quad less-than-or-equal
|
||||
OP_64(geuq, 71, Op2, I32, 1) // unsigned quad greater-than-or-equal
|
||||
OP_64(eqq, 63, Op2, I, 1) // quad equality
|
||||
OP_64(ltq, 64, Op2, I, 1) // signed quad less-than
|
||||
OP_64(gtq, 65, Op2, I, 1) // signed quad greater-than
|
||||
OP_64(leq, 66, Op2, I, 1) // signed quad less-than-or-equal
|
||||
OP_64(geq, 67, Op2, I, 1) // signed quad greater-than-or-equal
|
||||
OP_64(ltuq, 68, Op2, I, 1) // unsigned quad less-than
|
||||
OP_64(gtuq, 69, Op2, I, 1) // unsigned quad greater-than
|
||||
OP_64(leuq, 70, Op2, I, 1) // unsigned quad less-than-or-equal
|
||||
OP_64(geuq, 71, Op2, I, 1) // unsigned quad greater-than-or-equal
|
||||
|
||||
OP_UN(72)
|
||||
|
||||
OP___(eqd, 73, Op2, I32, 1) // double equality
|
||||
OP___(ltd, 74, Op2, I32, 1) // double less-than
|
||||
OP___(gtd, 75, Op2, I32, 1) // double greater-than
|
||||
OP___(led, 76, Op2, I32, 1) // double less-than-or-equal
|
||||
OP___(ged, 77, Op2, I32, 1) // double greater-than-or-equal
|
||||
OP___(eqd, 73, Op2, I, 1) // double equality
|
||||
OP___(ltd, 74, Op2, I, 1) // double less-than
|
||||
OP___(gtd, 75, Op2, I, 1) // double greater-than
|
||||
OP___(led, 76, Op2, I, 1) // double less-than-or-equal
|
||||
OP___(ged, 77, Op2, I, 1) // double greater-than-or-equal
|
||||
|
||||
OP_UN(78)
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
// Arithmetic
|
||||
//---------------------------------------------------------------------------
|
||||
OP___(negi, 79, Op1, I32, 1) // negate int
|
||||
OP___(addi, 80, Op2, I32, 1) // add int
|
||||
OP___(subi, 81, Op2, I32, 1) // subtract int
|
||||
OP___(muli, 82, Op2, I32, 1) // multiply int
|
||||
OP_86(divi, 83, Op2, I32, 1) // divide int
|
||||
OP___(negi, 79, Op1, I, 1) // negate int
|
||||
OP___(addi, 80, Op2, I, 1) // add int
|
||||
OP___(subi, 81, Op2, I, 1) // subtract int
|
||||
OP___(muli, 82, Op2, I, 1) // multiply int
|
||||
OP_86(divi, 83, Op2, I, 1) // divide int
|
||||
// LIR_modi is a hack. It's only used on i386/X64. The operand is the result
|
||||
// of a LIR_divi because on i386/X64 div and mod results are computed by the
|
||||
// same instruction.
|
||||
OP_86(modi, 84, Op1, I32, 1) // modulo int
|
||||
OP_86(modi, 84, Op1, I, 1) // modulo int
|
||||
|
||||
OP___(noti, 85, Op1, I32, 1) // bitwise-NOT int
|
||||
OP___(andi, 86, Op2, I32, 1) // bitwise-AND int
|
||||
OP___(ori, 87, Op2, I32, 1) // bitwise-OR int
|
||||
OP___(xori, 88, Op2, I32, 1) // bitwise-XOR int
|
||||
OP___(noti, 85, Op1, I, 1) // bitwise-NOT int
|
||||
OP___(andi, 86, Op2, I, 1) // bitwise-AND int
|
||||
OP___(ori, 87, Op2, I, 1) // bitwise-OR int
|
||||
OP___(xori, 88, Op2, I, 1) // bitwise-XOR int
|
||||
|
||||
OP___(lshi, 89, Op2, I32, 1) // left shift int
|
||||
OP___(rshi, 90, Op2, I32, 1) // right shift int (>>)
|
||||
OP___(rshui, 91, Op2, I32, 1) // right shift unsigned int (>>>)
|
||||
OP___(lshi, 89, Op2, I, 1) // left shift int
|
||||
OP___(rshi, 90, Op2, I, 1) // right shift int (>>)
|
||||
OP___(rshui, 91, Op2, I, 1) // right shift unsigned int (>>>)
|
||||
|
||||
OP_64(addq, 92, Op2, I64, 1) // add quad
|
||||
OP_64(addq, 92, Op2, Q, 1) // add quad
|
||||
|
||||
OP_64(andq, 93, Op2, I64, 1) // bitwise-AND quad
|
||||
OP_64(orq, 94, Op2, I64, 1) // bitwise-OR quad
|
||||
OP_64(xorq, 95, Op2, I64, 1) // bitwise-XOR quad
|
||||
OP_64(andq, 93, Op2, Q, 1) // bitwise-AND quad
|
||||
OP_64(orq, 94, Op2, Q, 1) // bitwise-OR quad
|
||||
OP_64(xorq, 95, Op2, Q, 1) // bitwise-XOR quad
|
||||
|
||||
OP_64(lshq, 96, Op2, I64, 1) // left shift quad; 2nd operand is an int
|
||||
OP_64(rshq, 97, Op2, I64, 1) // right shift quad; 2nd operand is an int
|
||||
OP_64(rshuq, 98, Op2, I64, 1) // right shift unsigned quad; 2nd operand is an int
|
||||
OP_64(lshq, 96, Op2, Q, 1) // left shift quad; 2nd operand is an int
|
||||
OP_64(rshq, 97, Op2, Q, 1) // right shift quad; 2nd operand is an int
|
||||
OP_64(rshuq, 98, Op2, Q, 1) // right shift unsigned quad; 2nd operand is an int
|
||||
|
||||
OP___(negd, 99, Op1, F64, 1) // negate double
|
||||
OP___(addd, 100, Op2, F64, 1) // add double
|
||||
OP___(subd, 101, Op2, F64, 1) // subtract double
|
||||
OP___(muld, 102, Op2, F64, 1) // multiply double
|
||||
OP___(divd, 103, Op2, F64, 1) // divide double
|
||||
OP___(negd, 99, Op1, D, 1) // negate double
|
||||
OP___(addd, 100, Op2, D, 1) // add double
|
||||
OP___(subd, 101, Op2, D, 1) // subtract double
|
||||
OP___(muld, 102, Op2, D, 1) // multiply double
|
||||
OP___(divd, 103, Op2, D, 1) // divide double
|
||||
// LIR_modd is just a place-holder opcode, ie. the back-ends cannot generate
|
||||
// code for it. It's used in TraceMonkey briefly but is always demoted to a
|
||||
// LIR_modl or converted to a function call before Nanojit has to do anything
|
||||
// serious with it.
|
||||
OP___(modd, 104, Op2, F64, 1) // modulo double
|
||||
OP___(modd, 104, Op2, D, 1) // modulo double
|
||||
|
||||
OP___(cmovi, 105, Op3, I32, 1) // conditional move int
|
||||
OP_64(cmovq, 106, Op3, I64, 1) // conditional move quad
|
||||
OP___(cmovi, 105, Op3, I, 1) // conditional move int
|
||||
OP_64(cmovq, 106, Op3, Q, 1) // conditional move quad
|
||||
|
||||
OP_UN(107)
|
||||
OP_UN(108)
|
||||
@ -299,13 +299,13 @@ OP_UN(108)
|
||||
//---------------------------------------------------------------------------
|
||||
// Conversions
|
||||
//---------------------------------------------------------------------------
|
||||
OP_64(i2q, 109, Op1, I64, 1) // sign-extend int to quad
|
||||
OP_64(ui2uq, 110, Op1, I64, 1) // zero-extend unsigned int to unsigned quad
|
||||
OP_64(q2i, 111, Op1, I32, 1) // truncate quad to int (removes the high 32 bits)
|
||||
OP_64(i2q, 109, Op1, Q, 1) // sign-extend int to quad
|
||||
OP_64(ui2uq, 110, Op1, Q, 1) // zero-extend unsigned int to unsigned quad
|
||||
OP_64(q2i, 111, Op1, I, 1) // truncate quad to int (removes the high 32 bits)
|
||||
|
||||
OP___(i2d, 112, Op1, F64, 1) // convert int to double
|
||||
OP___(ui2d, 113, Op1, F64, 1) // convert unsigned int to double
|
||||
OP___(d2i, 114, Op1, I32, 1) // convert double to int (no exceptions raised, platform rounding rules)
|
||||
OP___(i2d, 112, Op1, D, 1) // convert int to double
|
||||
OP___(ui2d, 113, Op1, D, 1) // convert unsigned int to double
|
||||
OP___(d2i, 114, Op1, I, 1) // convert double to int (no exceptions raised, platform rounding rules)
|
||||
|
||||
OP_UN(115)
|
||||
OP_UN(116)
|
||||
@ -314,25 +314,25 @@ OP_UN(116)
|
||||
// Overflow arithmetic
|
||||
//---------------------------------------------------------------------------
|
||||
// These all exit if overflow occurred. The results is valid on either path.
|
||||
OP___(addxovi, 117, Op3, I32, 1) // add int and exit on overflow
|
||||
OP___(subxovi, 118, Op3, I32, 1) // sub int and exit on overflow
|
||||
OP___(mulxovi, 119, Op3, I32, 1) // multiply int and exit on overflow
|
||||
OP___(addxovi, 117, Op3, I, 1) // add int and exit on overflow
|
||||
OP___(subxovi, 118, Op3, I, 1) // sub int and exit on overflow
|
||||
OP___(mulxovi, 119, Op3, I, 1) // multiply int and exit on overflow
|
||||
|
||||
OP_UN(120)
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
// SoftFloat
|
||||
//---------------------------------------------------------------------------
|
||||
OP_SF(dlo2i, 121, Op1, I32, 1) // get the low 32 bits of a double as an int
|
||||
OP_SF(dhi2i, 122, Op1, I32, 1) // get the high 32 bits of a double as an int
|
||||
OP_SF(ii2d, 123, Op2, F64, 1) // join two ints (1st arg is low bits, 2nd is high)
|
||||
OP_SF(dlo2i, 121, Op1, I, 1) // get the low 32 bits of a double as an int
|
||||
OP_SF(dhi2i, 122, Op1, I, 1) // get the high 32 bits of a double as an int
|
||||
OP_SF(ii2d, 123, Op2, D, 1) // join two ints (1st arg is low bits, 2nd is high)
|
||||
|
||||
// LIR_hcalli is a hack that's only used on 32-bit platforms that use
|
||||
// SoftFloat. Its operand is always a LIR_calli, but one that specifies a
|
||||
// function that returns a double. It indicates that the double result is
|
||||
// returned via two 32-bit integer registers. The result is always used as the
|
||||
// second operand of a LIR_ii2d.
|
||||
OP_SF(hcalli, 124, Op1, I32, 1)
|
||||
OP_SF(hcalli, 124, Op1, I, 1)
|
||||
|
||||
OP_UN(125)
|
||||
OP_UN(126)
|
||||
|
@ -70,8 +70,8 @@
|
||||
#error "unknown nanojit architecture"
|
||||
#endif
|
||||
|
||||
#ifndef NJ_USES_QUAD_CONSTANTS
|
||||
# define NJ_USES_QUAD_CONSTANTS 0
|
||||
#ifndef NJ_USES_IMMD_POOL
|
||||
# define NJ_USES_IMMD_POOL 0
|
||||
#endif
|
||||
|
||||
#ifndef NJ_JTBL_SUPPORTED
|
||||
|
@ -602,11 +602,11 @@ Assembler::asm_arg(ArgType ty, LInsp arg, Register& r, int& stkd)
|
||||
// The stack pointer must always be at least aligned to 4 bytes.
|
||||
NanoAssert((stkd & 3) == 0);
|
||||
|
||||
if (ty == ARGTYPE_F) {
|
||||
if (ty == ARGTYPE_D) {
|
||||
// This task is fairly complex and so is delegated to asm_arg_64.
|
||||
asm_arg_64(arg, r, stkd);
|
||||
} else {
|
||||
NanoAssert(ty == ARGTYPE_I || ty == ARGTYPE_U);
|
||||
NanoAssert(ty == ARGTYPE_I || ty == ARGTYPE_UI);
|
||||
// pre-assign registers R0-R3 for arguments (if they fit)
|
||||
if (r < R4) {
|
||||
asm_regarg(ty, arg, r);
|
||||
@ -620,7 +620,7 @@ Assembler::asm_arg(ArgType ty, LInsp arg, Register& r, int& stkd)
|
||||
|
||||
// Encode a 64-bit floating-point argument using the appropriate ABI.
|
||||
// This function operates in the same way as asm_arg, except that it will only
|
||||
// handle arguments where (ArgType)ty == ARGTYPE_F.
|
||||
// handle arguments where (ArgType)ty == ARGTYPE_D.
|
||||
void
|
||||
Assembler::asm_arg_64(LInsp arg, Register& r, int& stkd)
|
||||
{
|
||||
@ -723,11 +723,11 @@ void
|
||||
Assembler::asm_regarg(ArgType ty, LInsp p, Register r)
|
||||
{
|
||||
NanoAssert(deprecated_isKnownReg(r));
|
||||
if (ty == ARGTYPE_I || ty == ARGTYPE_U)
|
||||
if (ty == ARGTYPE_I || ty == ARGTYPE_UI)
|
||||
{
|
||||
// arg goes in specific register
|
||||
if (p->isconst()) {
|
||||
asm_ld_imm(r, p->imm32());
|
||||
if (p->isImmI()) {
|
||||
asm_ld_imm(r, p->immI());
|
||||
} else {
|
||||
if (p->isUsed()) {
|
||||
if (!p->deprecated_hasKnownReg()) {
|
||||
@ -752,7 +752,7 @@ Assembler::asm_regarg(ArgType ty, LInsp p, Register r)
|
||||
}
|
||||
else
|
||||
{
|
||||
NanoAssert(ty == ARGTYPE_F);
|
||||
NanoAssert(ty == ARGTYPE_D);
|
||||
// fpu argument in register - should never happen since FPU
|
||||
// args are converted to two 32-bit ints on ARM
|
||||
NanoAssert(false);
|
||||
@ -762,7 +762,7 @@ Assembler::asm_regarg(ArgType ty, LInsp p, Register r)
|
||||
void
|
||||
Assembler::asm_stkarg(LInsp arg, int stkd)
|
||||
{
|
||||
bool isF64 = arg->isF64();
|
||||
bool isF64 = arg->isD();
|
||||
|
||||
Register rr;
|
||||
if (arg->isUsed() && (rr = arg->deprecated_getReg(), deprecated_isKnownReg(rr))) {
|
||||
@ -864,7 +864,7 @@ Assembler::asm_call(LInsp ins)
|
||||
if (_config.arm_vfp && ins->isUsed()) {
|
||||
// If the result size is a floating-point value, treat the result
|
||||
// specially, as described previously.
|
||||
if (ci->returnType() == ARGTYPE_F) {
|
||||
if (ci->returnType() == ARGTYPE_D) {
|
||||
Register rr = ins->deprecated_getReg();
|
||||
|
||||
NanoAssert(ins->opcode() == LIR_fcall);
|
||||
@ -1241,8 +1241,8 @@ Assembler::asm_restore(LInsp i, Register r)
|
||||
{
|
||||
if (i->isop(LIR_alloc)) {
|
||||
asm_add_imm(r, FP, deprecated_disp(i));
|
||||
} else if (i->isconst()) {
|
||||
asm_ld_imm(r, i->imm32());
|
||||
} else if (i->isImmI()) {
|
||||
asm_ld_imm(r, i->immI());
|
||||
}
|
||||
else {
|
||||
// We can't easily load immediate values directly into FP registers, so
|
||||
@ -1311,7 +1311,7 @@ Assembler::asm_load64(LInsp ins)
|
||||
{
|
||||
//asm_output("<<< load64");
|
||||
|
||||
NanoAssert(ins->isF64());
|
||||
NanoAssert(ins->isD());
|
||||
|
||||
LIns* base = ins->oprnd1();
|
||||
int offset = ins->disp();
|
||||
@ -1400,14 +1400,14 @@ Assembler::asm_store64(LOpcode op, LInsp value, int dr, LInsp base)
|
||||
if (_config.arm_vfp) {
|
||||
Register rb = findRegFor(base, GpRegs);
|
||||
|
||||
if (value->isconstf()) {
|
||||
if (value->isImmD()) {
|
||||
underrunProtect(LD32_size*2 + 8);
|
||||
|
||||
// XXX use another reg, get rid of dependency
|
||||
STR(IP, rb, dr);
|
||||
asm_ld_imm(IP, value->imm64_0(), false);
|
||||
asm_ld_imm(IP, value->immQorDlo(), false);
|
||||
STR(IP, rb, dr+4);
|
||||
asm_ld_imm(IP, value->imm64_1(), false);
|
||||
asm_ld_imm(IP, value->immQorDhi(), false);
|
||||
|
||||
return;
|
||||
}
|
||||
@ -1433,9 +1433,9 @@ Assembler::asm_store64(LOpcode op, LInsp value, int dr, LInsp base)
|
||||
|
||||
// if it's a constant, make sure our baseReg/baseOffset location
|
||||
// has the right value
|
||||
if (value->isconstf()) {
|
||||
if (value->isImmD()) {
|
||||
underrunProtect(4*4);
|
||||
asm_immf_nochk(rv, value->imm64_0(), value->imm64_1());
|
||||
asm_immf_nochk(rv, value->immQorDlo(), value->immQorDhi());
|
||||
}
|
||||
} else {
|
||||
int da = findMemFor(value);
|
||||
@ -1449,14 +1449,14 @@ Assembler::asm_store64(LOpcode op, LInsp value, int dr, LInsp base)
|
||||
if (_config.arm_vfp) {
|
||||
Register rb = findRegFor(base, GpRegs);
|
||||
|
||||
if (value->isconstf()) {
|
||||
if (value->isImmD()) {
|
||||
underrunProtect(LD32_size*2 + 8);
|
||||
|
||||
// XXX use another reg, get rid of dependency
|
||||
STR(IP, rb, dr);
|
||||
asm_ld_imm(IP, value->imm64_0(), false);
|
||||
asm_ld_imm(IP, value->immQorDlo(), false);
|
||||
STR(IP, rb, dr+4);
|
||||
asm_ld_imm(IP, value->imm64_1(), false);
|
||||
asm_ld_imm(IP, value->immQorDhi(), false);
|
||||
|
||||
return;
|
||||
}
|
||||
@ -1484,9 +1484,9 @@ Assembler::asm_store64(LOpcode op, LInsp value, int dr, LInsp base)
|
||||
|
||||
// if it's a constant, make sure our baseReg/baseOffset location
|
||||
// has the right value
|
||||
if (value->isconstf()) {
|
||||
if (value->isImmD()) {
|
||||
underrunProtect(4*4);
|
||||
asm_immf_nochk(rv, value->imm64_0(), value->imm64_1());
|
||||
asm_immf_nochk(rv, value->immQorDlo(), value->immQorDhi());
|
||||
}
|
||||
} else {
|
||||
NanoAssertMsg(0, "st32f not supported with non-VFP, fix me");
|
||||
@ -1503,7 +1503,7 @@ Assembler::asm_store64(LOpcode op, LInsp value, int dr, LInsp base)
|
||||
// Stick a float into register rr, where p points to the two
|
||||
// 32-bit parts of the quad, optinally also storing at FP+d
|
||||
void
|
||||
Assembler::asm_immf_nochk(Register rr, int32_t imm64_0, int32_t imm64_1)
|
||||
Assembler::asm_immf_nochk(Register rr, int32_t immQorDlo, int32_t immQorDhi)
|
||||
{
|
||||
// We're not going to use a slot, because it might be too far
|
||||
// away. Instead, we're going to stick a branch in the stream to
|
||||
@ -1512,14 +1512,14 @@ Assembler::asm_immf_nochk(Register rr, int32_t imm64_0, int32_t imm64_1)
|
||||
|
||||
// stream should look like:
|
||||
// branch A
|
||||
// imm64_0
|
||||
// imm64_1
|
||||
// immQorDlo
|
||||
// immQorDhi
|
||||
// A: FLDD PC-16
|
||||
|
||||
FLDD(rr, PC, -16);
|
||||
|
||||
*(--_nIns) = (NIns) imm64_1;
|
||||
*(--_nIns) = (NIns) imm64_0;
|
||||
*(--_nIns) = (NIns) immQorDhi;
|
||||
*(--_nIns) = (NIns) immQorDlo;
|
||||
|
||||
B_nochk(_nIns+2);
|
||||
}
|
||||
@ -1539,7 +1539,7 @@ Assembler::asm_immf(LInsp ins)
|
||||
asm_spill(rr, d, false, true);
|
||||
|
||||
underrunProtect(4*4);
|
||||
asm_immf_nochk(rr, ins->imm64_0(), ins->imm64_1());
|
||||
asm_immf_nochk(rr, ins->immQorDlo(), ins->immQorDhi());
|
||||
} else {
|
||||
NanoAssert(d);
|
||||
// asm_mmq might spill a reg, so don't call it;
|
||||
@ -1547,9 +1547,9 @@ Assembler::asm_immf(LInsp ins)
|
||||
//asm_mmq(FP, d, PC, -16);
|
||||
|
||||
STR(IP, FP, d+4);
|
||||
asm_ld_imm(IP, ins->imm64_1());
|
||||
asm_ld_imm(IP, ins->immQorDhi());
|
||||
STR(IP, FP, d);
|
||||
asm_ld_imm(IP, ins->imm64_0());
|
||||
asm_ld_imm(IP, ins->immQorDlo());
|
||||
}
|
||||
|
||||
//asm_output("<<< asm_immf");
|
||||
@ -2160,7 +2160,7 @@ Assembler::asm_fcmp(LInsp ins)
|
||||
LInsp rhs = ins->oprnd2();
|
||||
LOpcode op = ins->opcode();
|
||||
|
||||
NanoAssert(isFCmpOpcode(op));
|
||||
NanoAssert(isCmpDOpcode(op));
|
||||
|
||||
Register ra, rb;
|
||||
findRegFor2(FpRegs, lhs, ra, FpRegs, rhs, rb);
|
||||
@ -2180,7 +2180,7 @@ Assembler::asm_branch(bool branchOnFalse, LInsp cond, NIns* targ)
|
||||
{
|
||||
LOpcode condop = cond->opcode();
|
||||
NanoAssert(cond->isCmp());
|
||||
NanoAssert(_config.arm_vfp || !isFCmpOpcode(condop));
|
||||
NanoAssert(_config.arm_vfp || !isCmpDOpcode(condop));
|
||||
|
||||
// The old "never" condition code has special meaning on newer ARM cores,
|
||||
// so use "always" as a sensible default code.
|
||||
@ -2258,11 +2258,11 @@ Assembler::asm_cmp(LIns *cond)
|
||||
LInsp lhs = cond->oprnd1();
|
||||
LInsp rhs = cond->oprnd2();
|
||||
|
||||
NanoAssert(lhs->isI32() && rhs->isI32());
|
||||
NanoAssert(lhs->isI() && rhs->isI());
|
||||
|
||||
// ready to issue the compare
|
||||
if (rhs->isconst()) {
|
||||
int c = rhs->imm32();
|
||||
if (rhs->isImmI()) {
|
||||
int c = rhs->immI();
|
||||
Register r = findRegFor(lhs, GpRegs);
|
||||
if (c == 0 && cond->isop(LIR_eq)) {
|
||||
TST(r, r);
|
||||
@ -2373,32 +2373,32 @@ Assembler::asm_arith(LInsp ins)
|
||||
// basic arithmetic instructions to generate constant multiplications.
|
||||
// However, LIR_mul is never invoked with a constant during
|
||||
// trace-tests.js so it is very unlikely to be worthwhile implementing it.
|
||||
if (rhs->isconst() && op != LIR_mul && op != LIR_mulxov)
|
||||
if (rhs->isImmI() && op != LIR_mul && op != LIR_mulxov)
|
||||
{
|
||||
if ((op == LIR_add || op == LIR_addxov) && lhs->isop(LIR_ialloc)) {
|
||||
// Add alloc+const. The result should be the address of the
|
||||
// allocated space plus a constant.
|
||||
Register rs = deprecated_prepResultReg(ins, allow);
|
||||
int d = findMemFor(lhs) + rhs->imm32();
|
||||
int d = findMemFor(lhs) + rhs->immI();
|
||||
|
||||
NanoAssert(deprecated_isKnownReg(rs));
|
||||
asm_add_imm(rs, FP, d);
|
||||
}
|
||||
|
||||
int32_t imm32 = rhs->imm32();
|
||||
int32_t immI = rhs->immI();
|
||||
|
||||
switch (op)
|
||||
{
|
||||
case LIR_add: asm_add_imm(rr, ra, imm32); break;
|
||||
case LIR_addxov: asm_add_imm(rr, ra, imm32, 1); break;
|
||||
case LIR_sub: asm_sub_imm(rr, ra, imm32); break;
|
||||
case LIR_subxov: asm_sub_imm(rr, ra, imm32, 1); break;
|
||||
case LIR_and: asm_and_imm(rr, ra, imm32); break;
|
||||
case LIR_or: asm_orr_imm(rr, ra, imm32); break;
|
||||
case LIR_xor: asm_eor_imm(rr, ra, imm32); break;
|
||||
case LIR_lsh: LSLi(rr, ra, imm32); break;
|
||||
case LIR_rsh: ASRi(rr, ra, imm32); break;
|
||||
case LIR_ush: LSRi(rr, ra, imm32); break;
|
||||
case LIR_add: asm_add_imm(rr, ra, immI); break;
|
||||
case LIR_addxov: asm_add_imm(rr, ra, immI, 1); break;
|
||||
case LIR_sub: asm_sub_imm(rr, ra, immI); break;
|
||||
case LIR_subxov: asm_sub_imm(rr, ra, immI, 1); break;
|
||||
case LIR_and: asm_and_imm(rr, ra, immI); break;
|
||||
case LIR_or: asm_orr_imm(rr, ra, immI); break;
|
||||
case LIR_xor: asm_eor_imm(rr, ra, immI); break;
|
||||
case LIR_lsh: LSLi(rr, ra, immI); break;
|
||||
case LIR_rsh: ASRi(rr, ra, immI); break;
|
||||
case LIR_ush: LSRi(rr, ra, immI); break;
|
||||
|
||||
default:
|
||||
NanoAssertMsg(0, "Unsupported");
|
||||
@ -2609,7 +2609,7 @@ Assembler::asm_cmov(LInsp ins)
|
||||
LIns* iffalse = ins->oprnd3();
|
||||
|
||||
NanoAssert(condval->isCmp());
|
||||
NanoAssert(ins->opcode() == LIR_cmov && iftrue->isI32() && iffalse->isI32());
|
||||
NanoAssert(ins->opcode() == LIR_cmov && iftrue->isI() && iffalse->isI());
|
||||
|
||||
const Register rr = deprecated_prepResultReg(ins, GpRegs);
|
||||
|
||||
@ -2679,7 +2679,7 @@ void
|
||||
Assembler::asm_immi(LInsp ins)
|
||||
{
|
||||
Register rr = deprecated_prepResultReg(ins, GpRegs);
|
||||
asm_ld_imm(rr, ins->imm32());
|
||||
asm_ld_imm(rr, ins->immI());
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -62,7 +62,7 @@ namespace nanojit
|
||||
# define NJ_ARM_EABI 1
|
||||
#endif
|
||||
|
||||
// only d0-d6 are actually used; we'll use d7 as s14-s15 for i2f/u2f/etc.
|
||||
// only d0-d6 are actually used; we'll use d7 as s14-s15 for i2d/u2f/etc.
|
||||
#define NJ_VFP_MAX_REGISTERS 8
|
||||
#define NJ_MAX_REGISTERS (11 + NJ_VFP_MAX_REGISTERS)
|
||||
|
||||
@ -117,7 +117,7 @@ typedef enum {
|
||||
D4 = 20,
|
||||
D5 = 21,
|
||||
D6 = 22,
|
||||
// S14 overlaps with D7 and is hard-coded into i2f and u2f operations, but
|
||||
// S14 overlaps with D7 and is hard-coded into i2d and u2f operations, but
|
||||
// D7 is still listed here for completeness and to facilitate assertions.
|
||||
D7 = 23,
|
||||
|
||||
@ -175,7 +175,7 @@ static const RegisterMask SavedFpRegs = 0;
|
||||
static const RegisterMask SavedRegs = 1<<R4 | 1<<R5 | 1<<R6 | 1<<R7 | 1<<R8 | 1<<R9 | 1<<R10;
|
||||
static const int NumSavedRegs = 7;
|
||||
|
||||
static const RegisterMask FpRegs = 1<<D0 | 1<<D1 | 1<<D2 | 1<<D3 | 1<<D4 | 1<<D5 | 1<<D6; // no D7; S14-S15 are used for i2f/u2f.
|
||||
static const RegisterMask FpRegs = 1<<D0 | 1<<D1 | 1<<D2 | 1<<D3 | 1<<D4 | 1<<D5 | 1<<D6; // no D7; S14-S15 are used for i2d/u2f.
|
||||
static const RegisterMask GpRegs = 0xFFFF;
|
||||
static const RegisterMask AllowableFlagRegs = 1<<R0 | 1<<R1 | 1<<R2 | 1<<R3 | 1<<R4 | 1<<R5 | 1<<R6 | 1<<R7 | 1<<R8 | 1<<R9 | 1<<R10;
|
||||
|
||||
|
@ -361,9 +361,9 @@ namespace nanojit
|
||||
|
||||
void Assembler::asm_store_imm64(LIns *value, int dr, Register rbase)
|
||||
{
|
||||
NanoAssert(value->isconstf());
|
||||
int32_t msw = value->imm64_1();
|
||||
int32_t lsw = value->imm64_0();
|
||||
NanoAssert(value->isImmD());
|
||||
int32_t msw = value->immQorDhi();
|
||||
int32_t lsw = value->immQorDlo();
|
||||
|
||||
// li $at,lsw # iff lsw != 0
|
||||
// sw $at,off+LSWOFF($rbase) # may use $0 instead of $at
|
||||
@ -392,10 +392,10 @@ namespace nanojit
|
||||
void Assembler::asm_regarg(ArgType ty, LInsp p, Register r)
|
||||
{
|
||||
NanoAssert(deprecated_isKnownReg(r));
|
||||
if (ty == ARGTYPE_I || ty == ARGTYPE_U) {
|
||||
if (ty == ARGTYPE_I || ty == ARGTYPE_UI) {
|
||||
// arg goes in specific register
|
||||
if (p->isconst())
|
||||
asm_li(r, p->imm32());
|
||||
if (p->isImmI())
|
||||
asm_li(r, p->immI());
|
||||
else {
|
||||
if (p->isUsed()) {
|
||||
if (!p->deprecated_hasKnownReg()) {
|
||||
@ -425,7 +425,7 @@ namespace nanojit
|
||||
|
||||
void Assembler::asm_stkarg(LInsp arg, int stkd)
|
||||
{
|
||||
bool isF64 = arg->isF64();
|
||||
bool isF64 = arg->isD();
|
||||
Register rr;
|
||||
if (arg->isUsed() && (rr = arg->deprecated_getReg(), deprecated_isKnownReg(rr))) {
|
||||
// The argument resides somewhere in registers, so we simply need to
|
||||
@ -464,7 +464,7 @@ namespace nanojit
|
||||
|
||||
// Encode a 64-bit floating-point argument using the appropriate ABI.
|
||||
// This function operates in the same way as asm_arg, except that it will only
|
||||
// handle arguments where (ArgType)ty == ARGTYPE_F.
|
||||
// handle arguments where (ArgType)ty == ARGTYPE_D.
|
||||
void
|
||||
Assembler::asm_arg_64(LInsp arg, Register& r, Register& fr, int& stkd)
|
||||
{
|
||||
@ -631,7 +631,7 @@ namespace nanojit
|
||||
if (cpu_has_fpu && deprecated_isKnownReg(rr)) {
|
||||
if (d)
|
||||
asm_spill(rr, d, false, true);
|
||||
asm_li_d(rr, ins->imm64_1(), ins->imm64_0());
|
||||
asm_li_d(rr, ins->immQorDhi(), ins->immQorDlo());
|
||||
}
|
||||
else {
|
||||
NanoAssert(d);
|
||||
@ -657,7 +657,7 @@ namespace nanojit
|
||||
|
||||
void Assembler::asm_load64(LIns *ins)
|
||||
{
|
||||
NanoAssert(ins->isF64());
|
||||
NanoAssert(ins->isD());
|
||||
|
||||
LIns* base = ins->oprnd1();
|
||||
int dr = ins->disp();
|
||||
@ -770,7 +770,7 @@ namespace nanojit
|
||||
void Assembler::asm_immi(LIns *ins)
|
||||
{
|
||||
Register rr = deprecated_prepResultReg(ins, GpRegs);
|
||||
asm_li(rr, ins->imm32());
|
||||
asm_li(rr, ins->immI());
|
||||
TAG("asm_immi(ins=%p{%s})", ins, lirNames[ins->opcode()]);
|
||||
}
|
||||
|
||||
@ -781,7 +781,7 @@ namespace nanojit
|
||||
LIns* iffalse = ins->oprnd3();
|
||||
|
||||
NanoAssert(condval->isCmp());
|
||||
NanoAssert(ins->opcode() == LIR_cmov && iftrue->isI32() && iffalse->isI32());
|
||||
NanoAssert(ins->opcode() == LIR_cmov && iftrue->isI() && iffalse->isI());
|
||||
|
||||
const Register rr = deprecated_prepResultReg(ins, GpRegs);
|
||||
|
||||
@ -948,8 +948,8 @@ namespace nanojit
|
||||
allow &= ~rmask(rr);
|
||||
allow &= ~rmask(ra);
|
||||
|
||||
if (rhs->isconst()) {
|
||||
int32_t rhsc = rhs->imm32();
|
||||
if (rhs->isImmI()) {
|
||||
int32_t rhsc = rhs->immI();
|
||||
if (isS16(rhsc)) {
|
||||
// MIPS arith immediate ops sign-extend the imm16 value
|
||||
switch (op) {
|
||||
@ -1098,7 +1098,7 @@ namespace nanojit
|
||||
if (cpu_has_fpu) {
|
||||
Register rbase = findRegFor(base, GpRegs);
|
||||
|
||||
if (value->isconstf())
|
||||
if (value->isImmD())
|
||||
asm_store_imm64(value, dr, rbase);
|
||||
else {
|
||||
Register fr = findRegFor(value, FpRegs);
|
||||
@ -1151,8 +1151,8 @@ namespace nanojit
|
||||
asm_li(AT, d);
|
||||
}
|
||||
}
|
||||
else if (i->isconst()) {
|
||||
asm_li(r, i->imm32());
|
||||
else if (i->isImmI()) {
|
||||
asm_li(r, i->immI());
|
||||
}
|
||||
else {
|
||||
d = findMemFor(i);
|
||||
@ -1168,7 +1168,7 @@ namespace nanojit
|
||||
|
||||
void Assembler::asm_cmp(LOpcode condop, LIns *a, LIns *b, Register cr)
|
||||
{
|
||||
RegisterMask allow = isFCmpOpcode(condop) ? FpRegs : GpRegs;
|
||||
RegisterMask allow = isCmpDOpcode(condop) ? FpRegs : GpRegs;
|
||||
Register ra = findRegFor(a, allow);
|
||||
Register rb = (b==a) ? ra : findRegFor(b, allow & ~rmask(ra));
|
||||
|
||||
@ -1331,7 +1331,7 @@ namespace nanojit
|
||||
NIns *patch = NULL;
|
||||
NIns *btarg = asm_branchtarget(targ);
|
||||
|
||||
if (cpu_has_fpu && isFCmpOpcode(condop)) {
|
||||
if (cpu_has_fpu && isCmpDOpcode(condop)) {
|
||||
// c.xx.d $ra,$rb
|
||||
// bc1x btarg
|
||||
switch (condop) {
|
||||
@ -1495,7 +1495,7 @@ namespace nanojit
|
||||
{
|
||||
NanoAssert(cond->isCmp());
|
||||
LOpcode condop = cond->opcode();
|
||||
RegisterMask allow = isFCmpOpcode(condop) ? FpRegs : GpRegs;
|
||||
RegisterMask allow = isCmpDOpcode(condop) ? FpRegs : GpRegs;
|
||||
LIns *a = cond->oprnd1();
|
||||
LIns *b = cond->oprnd2();
|
||||
Register ra = findRegFor(a, allow);
|
||||
@ -1564,11 +1564,11 @@ namespace nanojit
|
||||
// The stack offset must always be at least aligned to 4 bytes.
|
||||
NanoAssert((stkd & 3) == 0);
|
||||
|
||||
if (ty == ARGTYPE_F) {
|
||||
if (ty == ARGTYPE_D) {
|
||||
// This task is fairly complex and so is delegated to asm_arg_64.
|
||||
asm_arg_64(arg, r, fr, stkd);
|
||||
} else {
|
||||
NanoAssert(ty == ARGTYPE_I || ty == ARGTYPE_U);
|
||||
NanoAssert(ty == ARGTYPE_I || ty == ARGTYPE_UI);
|
||||
if (stkd < 16) {
|
||||
asm_regarg(ty, arg, r);
|
||||
fr = nextreg(fr);
|
||||
|
@ -310,7 +310,7 @@ namespace nanojit
|
||||
}
|
||||
|
||||
void Assembler::asm_store64(LOpcode op, LIns *value, int32_t dr, LIns *base) {
|
||||
NanoAssert(value->isN64());
|
||||
NanoAssert(value->isQorD());
|
||||
|
||||
switch (op) {
|
||||
case LIR_stfi:
|
||||
@ -546,19 +546,19 @@ namespace nanojit
|
||||
}
|
||||
|
||||
void Assembler::asm_cmp(LOpcode condop, LIns *a, LIns *b, ConditionRegister cr) {
|
||||
RegisterMask allow = isFCmpOpcode(condop) ? FpRegs : GpRegs;
|
||||
RegisterMask allow = isCmpDOpcode(condop) ? FpRegs : GpRegs;
|
||||
Register ra = findRegFor(a, allow);
|
||||
|
||||
#if !PEDANTIC
|
||||
if (b->isconst()) {
|
||||
int32_t d = b->imm32();
|
||||
if (b->isImmI()) {
|
||||
int32_t d = b->immI();
|
||||
if (isS16(d)) {
|
||||
if (isSICmpOpcode(condop)) {
|
||||
if (isCmpSIOpcode(condop)) {
|
||||
CMPWI(cr, ra, d);
|
||||
return;
|
||||
}
|
||||
#if defined NANOJIT_64BIT
|
||||
if (isSQCmpOpcode(condop)) {
|
||||
if (isCmpSQOpcode(condop)) {
|
||||
CMPDI(cr, ra, d);
|
||||
TODO(cmpdi);
|
||||
return;
|
||||
@ -566,12 +566,12 @@ namespace nanojit
|
||||
#endif
|
||||
}
|
||||
if (isU16(d)) {
|
||||
if (isUICmpOpcode(condop)) {
|
||||
if (isCmpUIOpcode(condop)) {
|
||||
CMPLWI(cr, ra, d);
|
||||
return;
|
||||
}
|
||||
#if defined NANOJIT_64BIT
|
||||
if (isUQCmpOpcode(condop)) {
|
||||
if (isCmpUQOpcode(condop)) {
|
||||
CMPLDI(cr, ra, d);
|
||||
TODO(cmpldi);
|
||||
return;
|
||||
@ -583,21 +583,21 @@ namespace nanojit
|
||||
|
||||
// general case
|
||||
Register rb = b==a ? ra : findRegFor(b, allow & ~rmask(ra));
|
||||
if (isSICmpOpcode(condop)) {
|
||||
if (isCmpSIOpcode(condop)) {
|
||||
CMPW(cr, ra, rb);
|
||||
}
|
||||
else if (isUICmpOpcode(condop)) {
|
||||
else if (isCmpUIOpcode(condop)) {
|
||||
CMPLW(cr, ra, rb);
|
||||
}
|
||||
#if defined NANOJIT_64BIT
|
||||
else if (isSQCmpOpcode(condop)) {
|
||||
else if (isCmpSQOpcode(condop)) {
|
||||
CMPD(cr, ra, rb);
|
||||
}
|
||||
else if (isUQCmpOpcode(condop)) {
|
||||
else if (isCmpUQOpcode(condop)) {
|
||||
CMPLD(cr, ra, rb);
|
||||
}
|
||||
#endif
|
||||
else if (isFCmpOpcode(condop)) {
|
||||
else if (isCmpDOpcode(condop)) {
|
||||
// set the lt/gt bit for fle/fge. We don't do this for
|
||||
// int/uint because in those cases we can invert the branch condition.
|
||||
// for float, we can't because of unordered comparisons
|
||||
@ -638,21 +638,21 @@ namespace nanojit
|
||||
d = deprecated_disp(i);
|
||||
ADDI(r, FP, d);
|
||||
}
|
||||
else if (i->isconst()) {
|
||||
asm_li(r, i->imm32());
|
||||
else if (i->isImmI()) {
|
||||
asm_li(r, i->immI());
|
||||
}
|
||||
// XXX: should really rematerializable isconstf() and isconstq() cases
|
||||
// XXX: should really rematerializable isImmD() and isImmQ() cases
|
||||
// here; canRemat() assumes they will be rematerialized.
|
||||
else {
|
||||
d = findMemFor(i);
|
||||
if (IsFpReg(r)) {
|
||||
NanoAssert(i->isN64());
|
||||
NanoAssert(i->isQorD());
|
||||
LFD(r, d, FP);
|
||||
} else if (i->isN64()) {
|
||||
} else if (i->isQorD()) {
|
||||
NanoAssert(IsGpReg(r));
|
||||
LD(r, d, FP);
|
||||
} else {
|
||||
NanoAssert(i->isI32());
|
||||
NanoAssert(i->isI());
|
||||
NanoAssert(IsGpReg(r));
|
||||
LWZ(r, d, FP);
|
||||
}
|
||||
@ -661,7 +661,7 @@ namespace nanojit
|
||||
|
||||
void Assembler::asm_immi(LIns *ins) {
|
||||
Register rr = deprecated_prepResultReg(ins, GpRegs);
|
||||
asm_li(rr, ins->imm32());
|
||||
asm_li(rr, ins->immI());
|
||||
}
|
||||
|
||||
void Assembler::asm_fneg(LIns *ins) {
|
||||
@ -729,7 +729,7 @@ namespace nanojit
|
||||
ArgType ty = argTypes[j];
|
||||
LInsp arg = ins->arg(j);
|
||||
NanoAssert(ty != ARGTYPE_V);
|
||||
if (ty != ARGTYPE_F) {
|
||||
if (ty != ARGTYPE_D) {
|
||||
// GP arg
|
||||
if (r <= R10) {
|
||||
asm_regarg(ty, arg, r);
|
||||
@ -764,20 +764,20 @@ namespace nanojit
|
||||
{
|
||||
NanoAssert(r != deprecated_UnknownReg);
|
||||
NanoAssert(ty != ARGTYPE_V);
|
||||
if (ty != ARGTYPE_F)
|
||||
if (ty != ARGTYPE_D)
|
||||
{
|
||||
#ifdef NANOJIT_64BIT
|
||||
if (ty == ARGTYPE_I) {
|
||||
// sign extend 32->64
|
||||
EXTSW(r, r);
|
||||
} else if (ty == ARGTYPE_U) {
|
||||
} else if (ty == ARGTYPE_UI) {
|
||||
// zero extend 32->64
|
||||
CLRLDI(r, r, 32);
|
||||
}
|
||||
#endif
|
||||
// arg goes in specific register
|
||||
if (p->isconst()) {
|
||||
asm_li(r, p->imm32());
|
||||
if (p->isImmI()) {
|
||||
asm_li(r, p->immI());
|
||||
} else {
|
||||
if (p->isUsed()) {
|
||||
if (!p->deprecated_hasKnownReg()) {
|
||||
@ -786,7 +786,7 @@ namespace nanojit
|
||||
if (p->isop(LIR_alloc)) {
|
||||
NanoAssert(isS16(d));
|
||||
ADDI(r, FP, d);
|
||||
} else if (p->isN64()) {
|
||||
} else if (p->isQorD()) {
|
||||
LD(r, d, FP);
|
||||
} else {
|
||||
LWZ(r, d, FP);
|
||||
@ -850,8 +850,8 @@ namespace nanojit
|
||||
Register rr = deprecated_prepResultReg(ins, allow);
|
||||
Register ra = findRegFor(lhs, GpRegs);
|
||||
|
||||
if (rhs->isconst()) {
|
||||
int32_t rhsc = rhs->imm32();
|
||||
if (rhs->isImmI()) {
|
||||
int32_t rhsc = rhs->immI();
|
||||
if (isS16(rhsc)) {
|
||||
// ppc arith immediate ops sign-exted the imm16 value
|
||||
switch (op) {
|
||||
@ -1057,7 +1057,7 @@ namespace nanojit
|
||||
int32_t hi, lo;
|
||||
} w;
|
||||
};
|
||||
d = ins->imm64f();
|
||||
d = ins->immD();
|
||||
LFD(r, 8, SP);
|
||||
STW(R0, 12, SP);
|
||||
asm_li(R0, w.lo);
|
||||
@ -1065,7 +1065,7 @@ namespace nanojit
|
||||
asm_li(R0, w.hi);
|
||||
}
|
||||
else {
|
||||
int64_t q = ins->imm64();
|
||||
int64_t q = ins->immQ();
|
||||
if (isS32(q)) {
|
||||
asm_li(r, int32_t(q));
|
||||
return;
|
||||
@ -1099,7 +1099,7 @@ namespace nanojit
|
||||
int32_t hi, lo;
|
||||
} w;
|
||||
};
|
||||
d = ins->imm64f();
|
||||
d = ins->immD();
|
||||
LFD(r, 8, SP);
|
||||
STW(R0, 12, SP);
|
||||
asm_li(R0, w.lo);
|
||||
@ -1107,7 +1107,7 @@ namespace nanojit
|
||||
asm_li(R0, w.hi);
|
||||
}
|
||||
else {
|
||||
int64_t q = ins->imm64();
|
||||
int64_t q = ins->immQ();
|
||||
if (isS32(q)) {
|
||||
asm_li(r, int32_t(q));
|
||||
return;
|
||||
@ -1216,10 +1216,10 @@ namespace nanojit
|
||||
|
||||
NanoAssert(cond->isCmp());
|
||||
#ifdef NANOJIT_64BIT
|
||||
NanoAssert((ins->opcode() == LIR_cmov && iftrue->isI32() && iffalse->isI32()) ||
|
||||
(ins->opcode() == LIR_qcmov && iftrue->isI64() && iffalse->isI64()));
|
||||
NanoAssert((ins->opcode() == LIR_cmov && iftrue->isI() && iffalse->isI()) ||
|
||||
(ins->opcode() == LIR_qcmov && iftrue->isQ() && iffalse->isQ()));
|
||||
#else
|
||||
NanoAssert((ins->opcode() == LIR_cmov && iftrue->isI32() && iffalse->isI32()));
|
||||
NanoAssert((ins->opcode() == LIR_cmov && iftrue->isI() && iffalse->isI()));
|
||||
#endif
|
||||
|
||||
// fixme: we could handle fpu registers here, too, since we're just branching
|
||||
|
@ -391,14 +391,14 @@ namespace nanojit
|
||||
#define SRD(rd,rs,rb) BITALU2(srd, rd, rs, rb, 0)
|
||||
#define SRAD(rd,rs,rb) BITALU2(srad, rd, rs, rb, 0)
|
||||
|
||||
#define FADD(rd,ra,rb) FPUAB(fadd, rd, ra, rb, 0)
|
||||
#define FADD_(rd,ra,rb) FPUAB(fadd, rd, ra, rb, 1)
|
||||
#define FDIV(rd,ra,rb) FPUAB(fdiv, rd, ra, rb, 0)
|
||||
#define FDIV_(rd,ra,rb) FPUAB(fdiv, rd, ra, rb, 1)
|
||||
#define FMUL(rd,ra,rb) FPUAC(fmul, rd, ra, rb, 0)
|
||||
#define FMUL_(rd,ra,rb) FPUAC(fmul, rd, ra, rb, 1)
|
||||
#define FSUB(rd,ra,rb) FPUAB(fsub, rd, ra, rb, 0)
|
||||
#define FSUB_(rd,ra,rb) FPUAB(fsub, rd, ra, rb, 1)
|
||||
#define FADD(rd,ra,rb) FPUAB(addd, rd, ra, rb, 0)
|
||||
#define FADD_(rd,ra,rb) FPUAB(addd, rd, ra, rb, 1)
|
||||
#define FDIV(rd,ra,rb) FPUAB(divd, rd, ra, rb, 0)
|
||||
#define FDIV_(rd,ra,rb) FPUAB(divd, rd, ra, rb, 1)
|
||||
#define FMUL(rd,ra,rb) FPUAC(muld, rd, ra, rb, 0)
|
||||
#define FMUL_(rd,ra,rb) FPUAC(muld, rd, ra, rb, 1)
|
||||
#define FSUB(rd,ra,rb) FPUAB(subd, rd, ra, rb, 0)
|
||||
#define FSUB_(rd,ra,rb) FPUAB(subd, rd, ra, rb, 1)
|
||||
|
||||
#define MULLI(rd,ra,simm) EMIT1(PPC_mulli | GPR(rd)<<21 | GPR(ra)<<16 | uint16_t(simm),\
|
||||
"mulli %s,%s,%d", gpn(rd), gpn(ra), int16_t(simm))
|
||||
|
@ -190,7 +190,7 @@ namespace nanojit
|
||||
{
|
||||
uint32_t j = argc-i-1;
|
||||
ArgType ty = argTypes[j];
|
||||
if (ty == ARGTYPE_F) {
|
||||
if (ty == ARGTYPE_D) {
|
||||
Register r = findRegFor(ins->arg(j), FpRegs);
|
||||
GPRIndex += 2;
|
||||
offset += 8;
|
||||
@ -263,8 +263,8 @@ namespace nanojit
|
||||
int32_t d = deprecated_disp(i);
|
||||
SET32(d, L2);
|
||||
}
|
||||
else if (i->isconst()) {
|
||||
int v = i->imm32();
|
||||
else if (i->isImmI()) {
|
||||
int v = i->immI();
|
||||
SET32(v, r);
|
||||
} else {
|
||||
int d = findMemFor(i);
|
||||
@ -292,10 +292,10 @@ namespace nanojit
|
||||
}
|
||||
|
||||
underrunProtect(20);
|
||||
if (value->isconst())
|
||||
if (value->isImmI())
|
||||
{
|
||||
Register rb = getBaseReg(base, dr, GpRegs);
|
||||
int c = value->imm32();
|
||||
int c = value->immI();
|
||||
switch (op) {
|
||||
case LIR_sti:
|
||||
STW32(L2, dr, rb);
|
||||
@ -310,9 +310,9 @@ namespace nanojit
|
||||
{
|
||||
// make sure what is in a register
|
||||
Register ra, rb;
|
||||
if (base->isconst()) {
|
||||
if (base->isImmI()) {
|
||||
// absolute address
|
||||
dr += base->imm32();
|
||||
dr += base->immI();
|
||||
ra = findRegFor(value, GpRegs);
|
||||
rb = G0;
|
||||
} else {
|
||||
@ -399,15 +399,15 @@ namespace nanojit
|
||||
}
|
||||
|
||||
underrunProtect(48);
|
||||
if (value->isconstf())
|
||||
if (value->isImmD())
|
||||
{
|
||||
// if a constant 64-bit value just store it now rather than
|
||||
// generating a pointless store/load/store sequence
|
||||
Register rb = findRegFor(base, GpRegs);
|
||||
STW32(L2, dr+4, rb);
|
||||
SET32(value->imm64_0(), L2);
|
||||
SET32(value->immQorDlo(), L2);
|
||||
STW32(L2, dr, rb);
|
||||
SET32(value->imm64_1(), L2);
|
||||
SET32(value->immQorDhi(), L2);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -471,7 +471,7 @@ namespace nanojit
|
||||
NIns* at = 0;
|
||||
LOpcode condop = cond->opcode();
|
||||
NanoAssert(cond->isCmp());
|
||||
if (isFCmpOpcode(condop))
|
||||
if (isCmpDOpcode(condop))
|
||||
{
|
||||
return asm_fbranch(branchOnFalse, cond, targ);
|
||||
}
|
||||
@ -558,12 +558,12 @@ namespace nanojit
|
||||
LInsp lhs = cond->oprnd1();
|
||||
LInsp rhs = cond->oprnd2();
|
||||
|
||||
NanoAssert(lhs->isI32() && rhs->isI32());
|
||||
NanoAssert(lhs->isI() && rhs->isI());
|
||||
|
||||
// ready to issue the compare
|
||||
if (rhs->isconst())
|
||||
if (rhs->isImmI())
|
||||
{
|
||||
int c = rhs->imm32();
|
||||
int c = rhs->immI();
|
||||
Register r = findRegFor(lhs, GpRegs);
|
||||
if (c == 0 && cond->isop(LIR_eq)) {
|
||||
ANDCC(r, r, G0);
|
||||
@ -587,7 +587,7 @@ namespace nanojit
|
||||
Register r = deprecated_prepResultReg(ins, AllowableFlagRegs);
|
||||
underrunProtect(8);
|
||||
LOpcode condop = ins->opcode();
|
||||
NanoAssert(isFCmpOpcode(condop));
|
||||
NanoAssert(isCmpDOpcode(condop));
|
||||
if (condop == LIR_feq)
|
||||
MOVFEI(1, 0, 0, 0, r);
|
||||
else if (condop == LIR_fle)
|
||||
@ -640,7 +640,7 @@ namespace nanojit
|
||||
|
||||
Register rb = deprecated_UnknownReg;
|
||||
RegisterMask allow = GpRegs;
|
||||
bool forceReg = (op == LIR_mul || op == LIR_mulxov || !rhs->isconst());
|
||||
bool forceReg = (op == LIR_mul || op == LIR_mulxov || !rhs->isImmI());
|
||||
|
||||
if (lhs != rhs && forceReg)
|
||||
{
|
||||
@ -649,10 +649,10 @@ namespace nanojit
|
||||
}
|
||||
allow &= ~rmask(rb);
|
||||
}
|
||||
else if ((op == LIR_add || op == LIR_addxov) && lhs->isop(LIR_alloc) && rhs->isconst()) {
|
||||
else if ((op == LIR_add || op == LIR_addxov) && lhs->isop(LIR_alloc) && rhs->isImmI()) {
|
||||
// add alloc+const, use lea
|
||||
Register rr = deprecated_prepResultReg(ins, allow);
|
||||
int d = findMemFor(lhs) + rhs->imm32();
|
||||
int d = findMemFor(lhs) + rhs->immI();
|
||||
ADD(FP, L2, rr);
|
||||
SET32(d, L2);
|
||||
}
|
||||
@ -692,7 +692,7 @@ namespace nanojit
|
||||
}
|
||||
else
|
||||
{
|
||||
int c = rhs->imm32();
|
||||
int c = rhs->immI();
|
||||
if (op == LIR_add || op == LIR_addxov)
|
||||
ADDCC(rr, L2, rr);
|
||||
else if (op == LIR_sub || op == LIR_subxov)
|
||||
@ -777,7 +777,7 @@ namespace nanojit
|
||||
LIns* iffalse = ins->oprnd3();
|
||||
|
||||
NanoAssert(condval->isCmp());
|
||||
NanoAssert(op == LIR_cmov && iftrue->isI32() && iffalse->isI32());
|
||||
NanoAssert(op == LIR_cmov && iftrue->isI() && iffalse->isI());
|
||||
|
||||
const Register rr = deprecated_prepResultReg(ins, GpRegs);
|
||||
|
||||
@ -814,7 +814,7 @@ namespace nanojit
|
||||
{
|
||||
underrunProtect(8);
|
||||
Register rr = deprecated_prepResultReg(ins, GpRegs);
|
||||
int32_t val = ins->imm32();
|
||||
int32_t val = ins->immI();
|
||||
if (val == 0)
|
||||
XOR(rr, rr, rr);
|
||||
else
|
||||
@ -842,9 +842,9 @@ namespace nanojit
|
||||
if (d)
|
||||
{
|
||||
STW32(L2, d+4, FP);
|
||||
SET32(ins->imm64_0(), L2);
|
||||
SET32(ins->immQorDlo(), L2);
|
||||
STW32(L2, d, FP);
|
||||
SET32(ins->imm64_1(), L2);
|
||||
SET32(ins->immQorDhi(), L2);
|
||||
}
|
||||
}
|
||||
|
||||
@ -938,7 +938,7 @@ namespace nanojit
|
||||
{
|
||||
NIns *at = 0;
|
||||
LOpcode condop = cond->opcode();
|
||||
NanoAssert(isFCmpOpcode(condop));
|
||||
NanoAssert(isCmpDOpcode(condop));
|
||||
underrunProtect(32);
|
||||
intptr_t tt = ((intptr_t)targ - (intptr_t)_nIns + 8) >> 2;
|
||||
// !targ means that it needs patch.
|
||||
|
@ -888,82 +888,82 @@ namespace nanojit
|
||||
#define isIMM22(imm) \
|
||||
(imm) <= 0x1fffff && (imm) >= -0x200000
|
||||
|
||||
#define SET32(imm32, rd) \
|
||||
if(isIMM13(imm32)) { \
|
||||
ORI(G0, imm32, rd); \
|
||||
#define SET32(immI, rd) \
|
||||
if(isIMM13(immI)) { \
|
||||
ORI(G0, immI, rd); \
|
||||
} else { \
|
||||
ORI(rd, imm32 & 0x3FF, rd); \
|
||||
SETHI(imm32, rd); \
|
||||
ORI(rd, immI & 0x3FF, rd); \
|
||||
SETHI(immI, rd); \
|
||||
}
|
||||
|
||||
#define STDF32(rd, imm32, rs1) \
|
||||
if(isIMM13(imm32+4)) { \
|
||||
STFI(rd+1, imm32+4, rs1); \
|
||||
STFI(rd, imm32, rs1); \
|
||||
#define STDF32(rd, immI, rs1) \
|
||||
if(isIMM13(immI+4)) { \
|
||||
STFI(rd+1, immI+4, rs1); \
|
||||
STFI(rd, immI, rs1); \
|
||||
} else { \
|
||||
STF(rd+1, L0, rs1); \
|
||||
SET32(imm32+4, L0); \
|
||||
SET32(immI+4, L0); \
|
||||
STF(rd, L0, rs1); \
|
||||
SET32(imm32, L0); \
|
||||
SET32(immI, L0); \
|
||||
}
|
||||
|
||||
#define STF32(rd, imm32, rs1) \
|
||||
if(isIMM13(imm32+4)) { \
|
||||
STFI(rd, imm32, rs1); \
|
||||
#define STF32(rd, immI, rs1) \
|
||||
if(isIMM13(immI+4)) { \
|
||||
STFI(rd, immI, rs1); \
|
||||
} else { \
|
||||
STF(rd, L0, rs1); \
|
||||
SET32(imm32, L0); \
|
||||
SET32(immI, L0); \
|
||||
}
|
||||
|
||||
#define LDDF32(rs1, imm32, rd) \
|
||||
if(isIMM13(imm32+4)) { \
|
||||
LDFI(rs1, imm32+4, rd+1); \
|
||||
LDFI(rs1, imm32, rd); \
|
||||
#define LDDF32(rs1, immI, rd) \
|
||||
if(isIMM13(immI+4)) { \
|
||||
LDFI(rs1, immI+4, rd+1); \
|
||||
LDFI(rs1, immI, rd); \
|
||||
} else { \
|
||||
LDF(rs1, L0, rd+1); \
|
||||
SET32(imm32+4, L0); \
|
||||
SET32(immI+4, L0); \
|
||||
LDF(rs1, L0, rd); \
|
||||
SET32(imm32, L0); \
|
||||
SET32(immI, L0); \
|
||||
}
|
||||
|
||||
#define STW32(rd, imm32, rs1) \
|
||||
if(isIMM13(imm32)) { \
|
||||
STWI(rd, imm32, rs1); \
|
||||
#define STW32(rd, immI, rs1) \
|
||||
if(isIMM13(immI)) { \
|
||||
STWI(rd, immI, rs1); \
|
||||
} else { \
|
||||
STW(rd, L0, rs1); \
|
||||
SET32(imm32, L0); \
|
||||
SET32(immI, L0); \
|
||||
}
|
||||
|
||||
#define STB32(rd, imm32, rs1) \
|
||||
if(isIMM13(imm32)) { \
|
||||
STBI(rd, imm32, rs1); \
|
||||
#define STB32(rd, immI, rs1) \
|
||||
if(isIMM13(immI)) { \
|
||||
STBI(rd, immI, rs1); \
|
||||
} else { \
|
||||
STB(rd, L0, rs1); \
|
||||
SET32(imm32, L0); \
|
||||
SET32(immI, L0); \
|
||||
}
|
||||
|
||||
#define LDUB32(rs1, imm32, rd) \
|
||||
if(isIMM13(imm32)) { \
|
||||
LDUBI(rs1, imm32, rd); \
|
||||
#define LDUB32(rs1, immI, rd) \
|
||||
if(isIMM13(immI)) { \
|
||||
LDUBI(rs1, immI, rd); \
|
||||
} else { \
|
||||
LDUB(rs1, L0, rd); \
|
||||
SET32(imm32, L0); \
|
||||
SET32(immI, L0); \
|
||||
}
|
||||
|
||||
#define LDUH32(rs1, imm32, rd) \
|
||||
if(isIMM13(imm32)) { \
|
||||
LDUHI(rs1, imm32, rd); \
|
||||
#define LDUH32(rs1, immI, rd) \
|
||||
if(isIMM13(immI)) { \
|
||||
LDUHI(rs1, immI, rd); \
|
||||
} else { \
|
||||
LDUH(rs1, L0, rd); \
|
||||
SET32(imm32, L0); \
|
||||
SET32(immI, L0); \
|
||||
}
|
||||
|
||||
#define LDSW32(rs1, imm32, rd) \
|
||||
if(isIMM13(imm32)) { \
|
||||
LDSWI(rs1, imm32, rd); \
|
||||
#define LDSW32(rs1, immI, rd) \
|
||||
if(isIMM13(immI)) { \
|
||||
LDSWI(rs1, immI, rd); \
|
||||
} else { \
|
||||
LDSW(rs1, L0, rd); \
|
||||
SET32(imm32, L0); \
|
||||
SET32(immI, L0); \
|
||||
}
|
||||
|
||||
|
||||
|
@ -629,7 +629,7 @@ namespace nanojit
|
||||
// Shift requires rcx for shift count.
|
||||
LIns *a = ins->oprnd1();
|
||||
LIns *b = ins->oprnd2();
|
||||
if (b->isconst()) {
|
||||
if (b->isImmI()) {
|
||||
asm_shift_imm(ins);
|
||||
return;
|
||||
}
|
||||
@ -670,7 +670,7 @@ namespace nanojit
|
||||
Register rr, ra;
|
||||
beginOp1Regs(ins, GpRegs, rr, ra);
|
||||
|
||||
int shift = ins->oprnd2()->imm32() & 63;
|
||||
int shift = ins->oprnd2()->immI() & 63;
|
||||
switch (ins->opcode()) {
|
||||
default: TODO(shiftimm);
|
||||
case LIR_qursh: SHRQI(rr, shift); break;
|
||||
@ -687,10 +687,10 @@ namespace nanojit
|
||||
}
|
||||
|
||||
static bool isImm32(LIns *ins) {
|
||||
return ins->isconst() || (ins->isconstq() && isS32(ins->imm64()));
|
||||
return ins->isImmI() || (ins->isImmQ() && isS32(ins->immQ()));
|
||||
}
|
||||
static int32_t getImm32(LIns *ins) {
|
||||
return ins->isconst() ? ins->imm32() : int32_t(ins->imm64());
|
||||
return ins->isImmI() ? ins->immI() : int32_t(ins->immQ());
|
||||
}
|
||||
|
||||
// Binary op, integer regs, rhs is int32 constant.
|
||||
@ -932,19 +932,19 @@ namespace nanojit
|
||||
int j = argc - i - 1;
|
||||
ArgType ty = argTypes[j];
|
||||
LIns* arg = ins->arg(j);
|
||||
if ((ty == ARGTYPE_I || ty == ARGTYPE_U || ty == ARGTYPE_Q) && arg_index < NumArgRegs) {
|
||||
if ((ty == ARGTYPE_I || ty == ARGTYPE_UI || ty == ARGTYPE_Q) && arg_index < NumArgRegs) {
|
||||
// gp arg
|
||||
asm_regarg(ty, arg, argRegs[arg_index]);
|
||||
arg_index++;
|
||||
}
|
||||
#ifdef _WIN64
|
||||
else if (ty == ARGTYPE_F && arg_index < NumArgRegs) {
|
||||
else if (ty == ARGTYPE_D && arg_index < NumArgRegs) {
|
||||
// double goes in XMM reg # based on overall arg_index
|
||||
asm_regarg(ty, arg, Register(XMM0+arg_index));
|
||||
arg_index++;
|
||||
}
|
||||
#else
|
||||
else if (ty == ARGTYPE_F && fr < XMM8) {
|
||||
else if (ty == ARGTYPE_D && fr < XMM8) {
|
||||
// double goes in next available XMM register
|
||||
asm_regarg(ty, arg, fr);
|
||||
fr = nextreg(fr);
|
||||
@ -962,17 +962,17 @@ namespace nanojit
|
||||
|
||||
void Assembler::asm_regarg(ArgType ty, LIns *p, Register r) {
|
||||
if (ty == ARGTYPE_I) {
|
||||
NanoAssert(p->isI32());
|
||||
if (p->isconst()) {
|
||||
asm_immq(r, int64_t(p->imm32()), /*canClobberCCs*/true);
|
||||
NanoAssert(p->isI());
|
||||
if (p->isImmI()) {
|
||||
asm_immq(r, int64_t(p->immI()), /*canClobberCCs*/true);
|
||||
return;
|
||||
}
|
||||
// sign extend int32 to int64
|
||||
MOVSXDR(r, r);
|
||||
} else if (ty == ARGTYPE_U) {
|
||||
NanoAssert(p->isI32());
|
||||
if (p->isconst()) {
|
||||
asm_immq(r, uint64_t(uint32_t(p->imm32())), /*canClobberCCs*/true);
|
||||
} else if (ty == ARGTYPE_UI) {
|
||||
NanoAssert(p->isI());
|
||||
if (p->isImmI()) {
|
||||
asm_immq(r, uint64_t(uint32_t(p->immI())), /*canClobberCCs*/true);
|
||||
return;
|
||||
}
|
||||
// zero extend with 32bit mov, auto-zeros upper 32bits
|
||||
@ -992,16 +992,16 @@ namespace nanojit
|
||||
|
||||
void Assembler::asm_stkarg(ArgType ty, LIns *p, int stk_off) {
|
||||
NanoAssert(isS8(stk_off));
|
||||
if (ty == ARGTYPE_I || ty == ARGTYPE_U || ty == ARGTYPE_Q) {
|
||||
if (ty == ARGTYPE_I || ty == ARGTYPE_UI || ty == ARGTYPE_Q) {
|
||||
Register r = findRegFor(p, GpRegs);
|
||||
MOVQSPR(stk_off, r); // movq [rsp+d8], r
|
||||
if (ty == ARGTYPE_I) {
|
||||
// extend int32 to int64
|
||||
NanoAssert(p->isI32());
|
||||
NanoAssert(p->isI());
|
||||
MOVSXDR(r, r);
|
||||
} else if (ty == ARGTYPE_U) {
|
||||
} else if (ty == ARGTYPE_UI) {
|
||||
// extend uint32 to uint64
|
||||
NanoAssert(p->isI32());
|
||||
NanoAssert(p->isI());
|
||||
MOVLR(r, r);
|
||||
} else {
|
||||
NanoAssert(ty == ARGTYPE_Q);
|
||||
@ -1039,7 +1039,7 @@ namespace nanojit
|
||||
|
||||
void Assembler::asm_i2f(LIns *ins) {
|
||||
LIns *a = ins->oprnd1();
|
||||
NanoAssert(ins->isF64() && a->isI32());
|
||||
NanoAssert(ins->isD() && a->isI());
|
||||
|
||||
Register rr = prepareResultReg(ins, FpRegs);
|
||||
Register ra = findRegFor(a, GpRegs);
|
||||
@ -1050,7 +1050,7 @@ namespace nanojit
|
||||
|
||||
void Assembler::asm_u2f(LIns *ins) {
|
||||
LIns *a = ins->oprnd1();
|
||||
NanoAssert(ins->isF64() && a->isI32());
|
||||
NanoAssert(ins->isD() && a->isI());
|
||||
|
||||
Register rr = prepareResultReg(ins, FpRegs);
|
||||
Register ra = findRegFor(a, GpRegs);
|
||||
@ -1063,7 +1063,7 @@ namespace nanojit
|
||||
|
||||
void Assembler::asm_f2i(LIns *ins) {
|
||||
LIns *a = ins->oprnd1();
|
||||
NanoAssert(ins->isI32() && a->isF64());
|
||||
NanoAssert(ins->isI() && a->isD());
|
||||
|
||||
Register rr = prepareResultReg(ins, GpRegs);
|
||||
Register rb = findRegFor(a, FpRegs);
|
||||
@ -1076,8 +1076,8 @@ namespace nanojit
|
||||
LIns* iftrue = ins->oprnd2();
|
||||
LIns* iffalse = ins->oprnd3();
|
||||
NanoAssert(cond->isCmp());
|
||||
NanoAssert((ins->isop(LIR_cmov) && iftrue->isI32() && iffalse->isI32()) ||
|
||||
(ins->isop(LIR_qcmov) && iftrue->isI64() && iffalse->isI64()));
|
||||
NanoAssert((ins->isop(LIR_cmov) && iftrue->isI() && iffalse->isI()) ||
|
||||
(ins->isop(LIR_qcmov) && iftrue->isQ() && iffalse->isQ()));
|
||||
|
||||
Register rr = prepareResultReg(ins, GpRegs);
|
||||
|
||||
@ -1136,7 +1136,7 @@ namespace nanojit
|
||||
}
|
||||
NanoAssert(cond->isCmp());
|
||||
LOpcode condop = cond->opcode();
|
||||
if (isFCmpOpcode(condop))
|
||||
if (isCmpDOpcode(condop))
|
||||
return asm_fbranch(onFalse, cond, target);
|
||||
|
||||
// We must ensure there's room for the instruction before calculating
|
||||
@ -1235,10 +1235,10 @@ namespace nanojit
|
||||
}
|
||||
|
||||
LOpcode condop = cond->opcode();
|
||||
if (isQCmpOpcode(condop)) {
|
||||
if (isCmpQOpcode(condop)) {
|
||||
CMPQR(ra, rb);
|
||||
} else {
|
||||
NanoAssert(isICmpOpcode(condop));
|
||||
NanoAssert(isCmpIOpcode(condop));
|
||||
CMPLR(ra, rb);
|
||||
}
|
||||
}
|
||||
@ -1249,13 +1249,13 @@ namespace nanojit
|
||||
LIns *b = cond->oprnd2();
|
||||
Register ra = findRegFor(a, GpRegs);
|
||||
int32_t imm = getImm32(b);
|
||||
if (isQCmpOpcode(condop)) {
|
||||
if (isCmpQOpcode(condop)) {
|
||||
if (isS8(imm))
|
||||
CMPQR8(ra, imm);
|
||||
else
|
||||
CMPQRI(ra, imm);
|
||||
} else {
|
||||
NanoAssert(isICmpOpcode(condop));
|
||||
NanoAssert(isCmpIOpcode(condop));
|
||||
if (isS8(imm))
|
||||
CMPLR8(ra, imm);
|
||||
else
|
||||
@ -1381,25 +1381,25 @@ namespace nanojit
|
||||
int d = arDisp(ins);
|
||||
LEAQRM(r, d, FP);
|
||||
}
|
||||
else if (ins->isconst()) {
|
||||
asm_immi(r, ins->imm32(), /*canClobberCCs*/false);
|
||||
else if (ins->isImmI()) {
|
||||
asm_immi(r, ins->immI(), /*canClobberCCs*/false);
|
||||
}
|
||||
else if (ins->isconstq()) {
|
||||
asm_immq(r, ins->imm64(), /*canClobberCCs*/false);
|
||||
else if (ins->isImmQ()) {
|
||||
asm_immq(r, ins->immQ(), /*canClobberCCs*/false);
|
||||
}
|
||||
else if (ins->isconstf()) {
|
||||
asm_immf(r, ins->imm64(), /*canClobberCCs*/false);
|
||||
else if (ins->isImmD()) {
|
||||
asm_immf(r, ins->immQ(), /*canClobberCCs*/false);
|
||||
}
|
||||
else {
|
||||
int d = findMemFor(ins);
|
||||
if (ins->isF64()) {
|
||||
if (ins->isD()) {
|
||||
NanoAssert(IsFpReg(r));
|
||||
MOVSDRM(r, d, FP);
|
||||
} else if (ins->isI64()) {
|
||||
} else if (ins->isQ()) {
|
||||
NanoAssert(IsGpReg(r));
|
||||
MOVQRM(r, d, FP);
|
||||
} else {
|
||||
NanoAssert(ins->isI32());
|
||||
NanoAssert(ins->isI());
|
||||
MOVLRM(r, d, FP);
|
||||
}
|
||||
}
|
||||
@ -1508,7 +1508,7 @@ namespace nanojit
|
||||
}
|
||||
|
||||
void Assembler::asm_load32(LIns *ins) {
|
||||
NanoAssert(ins->isI32());
|
||||
NanoAssert(ins->isI());
|
||||
Register r, b;
|
||||
int32_t d;
|
||||
beginLoadRegs(ins, GpRegs, r, d, b);
|
||||
@ -1537,7 +1537,7 @@ namespace nanojit
|
||||
}
|
||||
|
||||
void Assembler::asm_store64(LOpcode op, LIns *value, int d, LIns *base) {
|
||||
NanoAssert(value->isN64());
|
||||
NanoAssert(value->isQorD());
|
||||
|
||||
switch (op) {
|
||||
case LIR_stqi: {
|
||||
@ -1574,7 +1574,7 @@ namespace nanojit
|
||||
// single-byte stores with REX prefix.
|
||||
const RegisterMask SrcRegs = (op == LIR_stb) ? SingleByteStoreRegs : GpRegs;
|
||||
|
||||
NanoAssert(value->isI32());
|
||||
NanoAssert(value->isI());
|
||||
Register b = getBaseReg(base, d, BaseRegs);
|
||||
Register r = findRegFor(value, SrcRegs & ~rmask(b));
|
||||
|
||||
@ -1596,19 +1596,19 @@ namespace nanojit
|
||||
|
||||
void Assembler::asm_immi(LIns *ins) {
|
||||
Register rr = prepareResultReg(ins, GpRegs);
|
||||
asm_immi(rr, ins->imm32(), /*canClobberCCs*/true);
|
||||
asm_immi(rr, ins->immI(), /*canClobberCCs*/true);
|
||||
freeResourcesOf(ins);
|
||||
}
|
||||
|
||||
void Assembler::asm_immq(LIns *ins) {
|
||||
Register rr = prepareResultReg(ins, GpRegs);
|
||||
asm_immq(rr, ins->imm64(), /*canClobberCCs*/true);
|
||||
asm_immq(rr, ins->immQ(), /*canClobberCCs*/true);
|
||||
freeResourcesOf(ins);
|
||||
}
|
||||
|
||||
void Assembler::asm_immf(LIns *ins) {
|
||||
Register r = prepareResultReg(ins, FpRegs);
|
||||
asm_immf(r, ins->imm64(), /*canClobberCCs*/true);
|
||||
asm_immf(r, ins->immQ(), /*canClobberCCs*/true);
|
||||
freeResourcesOf(ins);
|
||||
}
|
||||
|
||||
|
@ -132,7 +132,7 @@ namespace nanojit
|
||||
* to emitrr() is 0. In a few cases, a whole instruction is encoded
|
||||
* this way (eg callrax).
|
||||
*
|
||||
* when a disp32, imm32, or imm64 suffix can't fit in an 8-byte
|
||||
* when a disp32, immI, or imm64 suffix can't fit in an 8-byte
|
||||
* opcode, then it is written into the code separately and not counted
|
||||
* in the opcode length.
|
||||
*/
|
||||
@ -146,23 +146,23 @@ namespace nanojit
|
||||
// 64bit opcode constants
|
||||
// msb lsb len
|
||||
X64_addqrr = 0xC003480000000003LL, // 64bit add r += b
|
||||
X64_addqri = 0xC081480000000003LL, // 64bit add r += int64(imm32)
|
||||
X64_addqri = 0xC081480000000003LL, // 64bit add r += int64(immI)
|
||||
X64_addqr8 = 0x00C0834800000004LL, // 64bit add r += int64(imm8)
|
||||
X64_andqri = 0xE081480000000003LL, // 64bit and r &= int64(imm32)
|
||||
X64_andqri = 0xE081480000000003LL, // 64bit and r &= int64(immI)
|
||||
X64_andqr8 = 0x00E0834800000004LL, // 64bit and r &= int64(imm8)
|
||||
X64_orqri = 0xC881480000000003LL, // 64bit or r |= int64(imm32)
|
||||
X64_orqri = 0xC881480000000003LL, // 64bit or r |= int64(immI)
|
||||
X64_orqr8 = 0x00C8834800000004LL, // 64bit or r |= int64(imm8)
|
||||
X64_xorqri = 0xF081480000000003LL, // 64bit xor r ^= int64(imm32)
|
||||
X64_xorqri = 0xF081480000000003LL, // 64bit xor r ^= int64(immI)
|
||||
X64_xorqr8 = 0x00F0834800000004LL, // 64bit xor r ^= int64(imm8)
|
||||
X64_addlri = 0xC081400000000003LL, // 32bit add r += imm32
|
||||
X64_addlri = 0xC081400000000003LL, // 32bit add r += immI
|
||||
X64_addlr8 = 0x00C0834000000004LL, // 32bit add r += imm8
|
||||
X64_andlri = 0xE081400000000003LL, // 32bit and r &= imm32
|
||||
X64_andlri = 0xE081400000000003LL, // 32bit and r &= immI
|
||||
X64_andlr8 = 0x00E0834000000004LL, // 32bit and r &= imm8
|
||||
X64_orlri = 0xC881400000000003LL, // 32bit or r |= imm32
|
||||
X64_orlri = 0xC881400000000003LL, // 32bit or r |= immI
|
||||
X64_orlr8 = 0x00C8834000000004LL, // 32bit or r |= imm8
|
||||
X64_sublri = 0xE881400000000003LL, // 32bit sub r -= imm32
|
||||
X64_sublri = 0xE881400000000003LL, // 32bit sub r -= immI
|
||||
X64_sublr8 = 0x00E8834000000004LL, // 32bit sub r -= imm8
|
||||
X64_xorlri = 0xF081400000000003LL, // 32bit xor r ^= imm32
|
||||
X64_xorlri = 0xF081400000000003LL, // 32bit xor r ^= immI
|
||||
X64_xorlr8 = 0x00F0834000000004LL, // 32bit xor r ^= imm8
|
||||
X64_addrr = 0xC003400000000003LL, // 32bit add r += b
|
||||
X64_andqrr = 0xC023480000000003LL, // 64bit and r &= b
|
||||
@ -191,8 +191,8 @@ namespace nanojit
|
||||
X64_cmovnle = 0xC04F0F4000000004LL, // 32bit conditional mov if (int >) r = b
|
||||
X64_cmplr = 0xC03B400000000003LL, // 32bit compare r,b
|
||||
X64_cmpqr = 0xC03B480000000003LL, // 64bit compare r,b
|
||||
X64_cmplri = 0xF881400000000003LL, // 32bit compare r,imm32
|
||||
X64_cmpqri = 0xF881480000000003LL, // 64bit compare r,int64(imm32)
|
||||
X64_cmplri = 0xF881400000000003LL, // 32bit compare r,immI
|
||||
X64_cmpqri = 0xF881480000000003LL, // 64bit compare r,int64(immI)
|
||||
X64_cmplr8 = 0x00F8834000000004LL, // 32bit compare r,imm8
|
||||
X64_cmpqr8 = 0x00F8834800000004LL, // 64bit compare r,int64(imm8)
|
||||
X64_cvtsi2sd= 0xC02A0F40F2000005LL, // convert int32 to double r = (double) b
|
||||
@ -205,7 +205,7 @@ namespace nanojit
|
||||
X64_addsd = 0xC0580F40F2000005LL, // add scalar double r += b
|
||||
X64_idiv = 0xF8F7400000000003LL, // 32bit signed div (rax = rdx:rax/r, rdx=rdx:rax%r)
|
||||
X64_imul = 0xC0AF0F4000000004LL, // 32bit signed mul r *= b
|
||||
X64_imuli = 0xC069400000000003LL, // 32bit signed mul r = b * imm32
|
||||
X64_imuli = 0xC069400000000003LL, // 32bit signed mul r = b * immI
|
||||
X64_imul8 = 0x00C06B4000000004LL, // 32bit signed mul r = b * imm8
|
||||
X64_jmpi = 0x0000000025FF0006LL, // jump *0(rip)
|
||||
X64_jmp = 0x00000000E9000005LL, // jump near rel32
|
||||
@ -248,8 +248,8 @@ namespace nanojit
|
||||
X64_movqspr = 0x0024448948000005LL, // 64bit store gpr -> [rsp+d32] (sib required)
|
||||
X64_movqr = 0xC08B480000000003LL, // 64bit mov r <- b
|
||||
X64_movqi = 0xB848000000000002LL, // 64bit mov r <- imm64
|
||||
X64_movi = 0xB840000000000002LL, // 32bit mov r <- imm32
|
||||
X64_movqi32 = 0xC0C7480000000003LL, // 64bit mov r <- int64(imm32)
|
||||
X64_movi = 0xB840000000000002LL, // 32bit mov r <- immI
|
||||
X64_movqi32 = 0xC0C7480000000003LL, // 64bit mov r <- int64(immI)
|
||||
X64_movapsr = 0xC0280F4000000004LL, // 128bit mov xmm <- xmm
|
||||
X64_movqrx = 0xC07E0F4866000005LL, // 64bit mov b <- xmm-r (reverses the usual r/b order)
|
||||
X64_movqxr = 0xC06E0F4866000005LL, // 64bit mov b -> xmm-r
|
||||
@ -306,7 +306,7 @@ namespace nanojit
|
||||
X64_shrqi = 0x00E8C14800000004LL, // 64bit uint right shift r >>= imm8
|
||||
X64_subqrr = 0xC02B480000000003LL, // 64bit sub r -= b
|
||||
X64_subrr = 0xC02B400000000003LL, // 32bit sub r -= b
|
||||
X64_subqri = 0xE881480000000003LL, // 64bit sub r -= int64(imm32)
|
||||
X64_subqri = 0xE881480000000003LL, // 64bit sub r -= int64(immI)
|
||||
X64_subqr8 = 0x00E8834800000004LL, // 64bit sub r -= int64(imm8)
|
||||
X64_ucomisd = 0xC02E0F4066000005LL, // unordered compare scalar double
|
||||
X64_xorqrr = 0xC033480000000003LL, // 64bit xor r &= b
|
||||
|
@ -854,7 +854,7 @@ namespace nanojit
|
||||
IMM32( (uint32_t)offset );
|
||||
*(--_nIns) = 0xE8;
|
||||
verbose_only(asm_output("call %s",(ci->_name));)
|
||||
debug_only(if (ci->returnType()==ARGTYPE_F) fpu_push();)
|
||||
debug_only(if (ci->returnType()==ARGTYPE_D) fpu_push();)
|
||||
}
|
||||
|
||||
// indirect call thru register
|
||||
@ -863,7 +863,7 @@ namespace nanojit
|
||||
underrunProtect(2);
|
||||
ALU(0xff, 2, (r));
|
||||
verbose_only(asm_output("call %s",gpn(r));)
|
||||
debug_only(if (ci->returnType()==ARGTYPE_F) fpu_push();) (void)ci;
|
||||
debug_only(if (ci->returnType()==ARGTYPE_D) fpu_push();) (void)ci;
|
||||
}
|
||||
|
||||
void Assembler::nInit(AvmCore*)
|
||||
@ -1049,7 +1049,7 @@ namespace nanojit
|
||||
uint32_t j = argc-i-1;
|
||||
ArgType ty = argTypes[j];
|
||||
Register r = UnspecifiedReg;
|
||||
if (n < max_regs && ty != ARGTYPE_F) {
|
||||
if (n < max_regs && ty != ARGTYPE_D) {
|
||||
r = argRegs[n++]; // tell asm_arg what reg to use
|
||||
}
|
||||
asm_arg(ty, ins->arg(j), r, stkd);
|
||||
@ -1142,7 +1142,7 @@ namespace nanojit
|
||||
else if (ins->isCmp()) {
|
||||
prefer = AllowableFlagRegs;
|
||||
}
|
||||
else if (ins->isconst()) {
|
||||
else if (ins->isImmI()) {
|
||||
prefer = ScratchRegs;
|
||||
}
|
||||
|
||||
@ -1169,11 +1169,11 @@ namespace nanojit
|
||||
NanoAssert(ins->isInAr()); // must have stack slots allocated
|
||||
LEA(r, arDisp(ins), FP);
|
||||
|
||||
} else if (ins->isconst()) {
|
||||
asm_immi(r, ins->imm32(), /*canClobberCCs*/false);
|
||||
} else if (ins->isImmI()) {
|
||||
asm_immi(r, ins->immI(), /*canClobberCCs*/false);
|
||||
|
||||
} else if (ins->isconstf()) {
|
||||
asm_immf(r, ins->imm64(), ins->imm64f(), /*canClobberCCs*/false);
|
||||
} else if (ins->isImmD()) {
|
||||
asm_immf(r, ins->immQ(), ins->immD(), /*canClobberCCs*/false);
|
||||
|
||||
} else if (ins->isop(LIR_param) && ins->paramKind() == 0 &&
|
||||
(arg = ins->paramArg()) >= (abi_regcount = max_abi_regs[_thisfrag->lirbuf->abi])) {
|
||||
@ -1192,11 +1192,11 @@ namespace nanojit
|
||||
|
||||
} else {
|
||||
int d = findMemFor(ins);
|
||||
if (ins->isI32()) {
|
||||
if (ins->isI()) {
|
||||
NanoAssert(rmask(r) & GpRegs);
|
||||
LD(r, d, FP);
|
||||
} else {
|
||||
NanoAssert(ins->isF64());
|
||||
NanoAssert(ins->isD());
|
||||
if (rmask(r) & XmmRegs) {
|
||||
SSE_LDQ(r, d, FP);
|
||||
} else {
|
||||
@ -1209,9 +1209,9 @@ namespace nanojit
|
||||
|
||||
void Assembler::asm_store32(LOpcode op, LIns* value, int dr, LIns* base)
|
||||
{
|
||||
if (value->isconst()) {
|
||||
if (value->isImmI()) {
|
||||
Register rb = getBaseReg(base, dr, GpRegs);
|
||||
int c = value->imm32();
|
||||
int c = value->immI();
|
||||
switch (op) {
|
||||
case LIR_stb:
|
||||
ST8i(rb, dr, c);
|
||||
@ -1235,10 +1235,10 @@ namespace nanojit
|
||||
GpRegs;
|
||||
|
||||
Register ra, rb;
|
||||
if (base->isconst()) {
|
||||
if (base->isImmI()) {
|
||||
// absolute address
|
||||
rb = UnspecifiedReg;
|
||||
dr += base->imm32();
|
||||
dr += base->immI();
|
||||
ra = findRegFor(value, SrcRegs);
|
||||
} else {
|
||||
getBaseReg2(SrcRegs, value, ra, GpRegs, base, rb, dr);
|
||||
@ -1367,9 +1367,9 @@ namespace nanojit
|
||||
FST32(pop?1:0, dr, rb);
|
||||
}
|
||||
|
||||
} else if (value->isconstf()) {
|
||||
STi(rb, dr+4, value->imm64_1());
|
||||
STi(rb, dr, value->imm64_0());
|
||||
} else if (value->isImmD()) {
|
||||
STi(rb, dr+4, value->immQorDhi());
|
||||
STi(rb, dr, value->immQorDlo());
|
||||
|
||||
} else if (value->isop(LIR_ldf)) {
|
||||
// value is 64bit struct or int64_t, or maybe a double.
|
||||
@ -1431,7 +1431,7 @@ namespace nanojit
|
||||
NanoAssert(cond->isCmp());
|
||||
|
||||
// Handle float conditions separately.
|
||||
if (isFCmpOpcode(condop)) {
|
||||
if (isCmpDOpcode(condop)) {
|
||||
return asm_fbranch(branchOnFalse, cond, targ);
|
||||
}
|
||||
|
||||
@ -1531,11 +1531,11 @@ namespace nanojit
|
||||
LInsp lhs = cond->oprnd1();
|
||||
LInsp rhs = cond->oprnd2();
|
||||
|
||||
NanoAssert(lhs->isI32() && rhs->isI32());
|
||||
NanoAssert(lhs->isI() && rhs->isI());
|
||||
|
||||
// Ready to issue the compare.
|
||||
if (rhs->isconst()) {
|
||||
int c = rhs->imm32();
|
||||
if (rhs->isImmI()) {
|
||||
int c = rhs->immI();
|
||||
// findRegFor() can call asm_restore() -- asm_restore() better not
|
||||
// disturb the CCs!
|
||||
Register r = findRegFor(lhs, GpRegs);
|
||||
@ -1648,10 +1648,10 @@ namespace nanojit
|
||||
|
||||
// Second special case.
|
||||
// XXX: bug 547125: don't need this once LEA is used for LIR_add in all cases below
|
||||
if (op == LIR_add && lhs->isop(LIR_alloc) && rhs->isconst()) {
|
||||
if (op == LIR_add && lhs->isop(LIR_alloc) && rhs->isImmI()) {
|
||||
// LIR_add(LIR_alloc, LIR_int) -- use lea.
|
||||
Register rr = prepareResultReg(ins, GpRegs);
|
||||
int d = findMemFor(lhs) + rhs->imm32();
|
||||
int d = findMemFor(lhs) + rhs->immI();
|
||||
|
||||
LEA(rr, d, FP);
|
||||
|
||||
@ -1684,14 +1684,14 @@ namespace nanojit
|
||||
case LIR_lsh:
|
||||
case LIR_rsh:
|
||||
case LIR_ush:
|
||||
isConstRhs = rhs->isconst();
|
||||
isConstRhs = rhs->isImmI();
|
||||
if (!isConstRhs) {
|
||||
rb = findSpecificRegFor(rhs, ECX);
|
||||
allow &= ~rmask(rb);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
isConstRhs = rhs->isconst();
|
||||
isConstRhs = rhs->isImmI();
|
||||
if (!isConstRhs && lhs != rhs) {
|
||||
rb = findRegFor(rhs, allow);
|
||||
allow &= ~rmask(rb);
|
||||
@ -1730,7 +1730,7 @@ namespace nanojit
|
||||
}
|
||||
|
||||
} else {
|
||||
int c = rhs->imm32();
|
||||
int c = rhs->immI();
|
||||
switch (op) {
|
||||
case LIR_add:
|
||||
// this doesn't set cc's, only use it when cc's not required.
|
||||
@ -1841,8 +1841,8 @@ namespace nanojit
|
||||
|
||||
Register rr = prepareResultReg(ins, GpRegs);
|
||||
|
||||
if (base->isconst()) {
|
||||
intptr_t addr = base->imm32();
|
||||
if (base->isImmI()) {
|
||||
intptr_t addr = base->immI();
|
||||
addr += d;
|
||||
switch (op) {
|
||||
case LIR_ldzb:
|
||||
@ -1889,8 +1889,8 @@ namespace nanojit
|
||||
// W = ld (add(X, shl(Y, 0)))[d]
|
||||
//
|
||||
int scale;
|
||||
if (rhs->opcode() == LIR_pilsh && rhs->oprnd2()->isconst()) {
|
||||
scale = rhs->oprnd2()->imm32();
|
||||
if (rhs->opcode() == LIR_pilsh && rhs->oprnd2()->isImmI()) {
|
||||
scale = rhs->oprnd2()->immI();
|
||||
if (scale >= 1 && scale <= 3)
|
||||
rhs = rhs->oprnd1();
|
||||
else
|
||||
@ -1982,7 +1982,7 @@ namespace nanojit
|
||||
LIns* iffalse = ins->oprnd3();
|
||||
|
||||
NanoAssert(condval->isCmp());
|
||||
NanoAssert(ins->isop(LIR_cmov) && iftrue->isI32() && iffalse->isI32());
|
||||
NanoAssert(ins->isop(LIR_cmov) && iftrue->isI() && iffalse->isI());
|
||||
|
||||
Register rr = prepareResultReg(ins, GpRegs);
|
||||
|
||||
@ -2055,7 +2055,7 @@ namespace nanojit
|
||||
{
|
||||
Register rr = prepareResultReg(ins, GpRegs);
|
||||
|
||||
asm_immi(rr, ins->imm32(), /*canClobberCCs*/true);
|
||||
asm_immi(rr, ins->immI(), /*canClobberCCs*/true);
|
||||
|
||||
freeResourcesOf(ins);
|
||||
}
|
||||
@ -2090,7 +2090,7 @@ namespace nanojit
|
||||
SSE_XORPDr(r, r); // zero r to ensure no dependency stalls
|
||||
asm_immi(tr, (int)d, canClobberCCs);
|
||||
} else {
|
||||
const uint64_t* p = findQuadConstant(q);
|
||||
const uint64_t* p = findImmDFromPool(q);
|
||||
LDSDm(r, (const double*)p);
|
||||
}
|
||||
} else {
|
||||
@ -2101,7 +2101,7 @@ namespace nanojit
|
||||
} else if (d == 1.0) {
|
||||
FLD1();
|
||||
} else {
|
||||
const uint64_t* p = findQuadConstant(q);
|
||||
const uint64_t* p = findImmDFromPool(q);
|
||||
FLDQdm((const double*)p);
|
||||
}
|
||||
}
|
||||
@ -2109,11 +2109,11 @@ namespace nanojit
|
||||
|
||||
void Assembler::asm_immf(LInsp ins)
|
||||
{
|
||||
NanoAssert(ins->isconstf());
|
||||
NanoAssert(ins->isImmD());
|
||||
if (ins->isInReg()) {
|
||||
Register rr = ins->getReg();
|
||||
NanoAssert(rmask(rr) & FpRegs);
|
||||
asm_immf(rr, ins->imm64(), ins->imm64f(), /*canClobberCCs*/true);
|
||||
asm_immf(rr, ins->immQ(), ins->immD(), /*canClobberCCs*/true);
|
||||
} else {
|
||||
// Do nothing, will be rematerialized when necessary.
|
||||
}
|
||||
@ -2189,11 +2189,11 @@ namespace nanojit
|
||||
// If 'r' is known, then that's the register we have to put 'ins'
|
||||
// into.
|
||||
|
||||
if (ty == ARGTYPE_I || ty == ARGTYPE_U) {
|
||||
if (ty == ARGTYPE_I || ty == ARGTYPE_UI) {
|
||||
if (r != UnspecifiedReg) {
|
||||
if (ins->isconst()) {
|
||||
if (ins->isImmI()) {
|
||||
// Rematerialize the constant.
|
||||
asm_immi(r, ins->imm32(), /*canClobberCCs*/true);
|
||||
asm_immi(r, ins->immI(), /*canClobberCCs*/true);
|
||||
} else if (ins->isInReg()) {
|
||||
if (r != ins->getReg())
|
||||
MR(r, ins->getReg());
|
||||
@ -2220,7 +2220,7 @@ namespace nanojit
|
||||
}
|
||||
|
||||
} else {
|
||||
NanoAssert(ty == ARGTYPE_F);
|
||||
NanoAssert(ty == ARGTYPE_D);
|
||||
asm_farg(ins, stkd);
|
||||
}
|
||||
}
|
||||
@ -2228,9 +2228,9 @@ namespace nanojit
|
||||
void Assembler::asm_pusharg(LInsp ins)
|
||||
{
|
||||
// arg goes on stack
|
||||
if (!ins->isUsed() && ins->isconst())
|
||||
if (!ins->isUsed() && ins->isImmI())
|
||||
{
|
||||
PUSHi(ins->imm32()); // small const we push directly
|
||||
PUSHi(ins->immI()); // small const we push directly
|
||||
}
|
||||
else if (!ins->isUsed() || ins->isop(LIR_alloc))
|
||||
{
|
||||
@ -2251,10 +2251,10 @@ namespace nanojit
|
||||
void Assembler::asm_stkarg(LInsp ins, int32_t& stkd)
|
||||
{
|
||||
// arg goes on stack
|
||||
if (!ins->isUsed() && ins->isconst())
|
||||
if (!ins->isUsed() && ins->isImmI())
|
||||
{
|
||||
// small const we push directly
|
||||
STi(SP, stkd, ins->imm32());
|
||||
STi(SP, stkd, ins->immI());
|
||||
}
|
||||
else {
|
||||
Register ra;
|
||||
@ -2270,7 +2270,7 @@ namespace nanojit
|
||||
|
||||
void Assembler::asm_farg(LInsp ins, int32_t& stkd)
|
||||
{
|
||||
NanoAssert(ins->isF64());
|
||||
NanoAssert(ins->isD());
|
||||
Register r = findRegFor(ins, FpRegs);
|
||||
if (rmask(r) & XmmRegs) {
|
||||
SSE_STQ(stkd, SP, r);
|
||||
@ -2362,8 +2362,8 @@ namespace nanojit
|
||||
NanoAssert(FST0 == rr);
|
||||
NanoAssert(!lhs->isInReg() || FST0 == lhs->getReg());
|
||||
|
||||
if (rhs->isconstf()) {
|
||||
const uint64_t* p = findQuadConstant(rhs->imm64());
|
||||
if (rhs->isImmD()) {
|
||||
const uint64_t* p = findImmDFromPool(rhs->immQ());
|
||||
|
||||
switch (op) {
|
||||
case LIR_fadd: FADDdm( (const double*)p); break;
|
||||
@ -2545,10 +2545,10 @@ namespace nanojit
|
||||
void Assembler::asm_fcmp(LIns *cond)
|
||||
{
|
||||
LOpcode condop = cond->opcode();
|
||||
NanoAssert(isFCmpOpcode(condop));
|
||||
NanoAssert(isCmpDOpcode(condop));
|
||||
LIns* lhs = cond->oprnd1();
|
||||
LIns* rhs = cond->oprnd2();
|
||||
NanoAssert(lhs->isF64() && rhs->isF64());
|
||||
NanoAssert(lhs->isD() && rhs->isD());
|
||||
|
||||
if (_config.i386_sse2) {
|
||||
// First, we convert (a < b) into (b > a), and (a <= b) into (b >= a).
|
||||
@ -2689,9 +2689,9 @@ namespace nanojit
|
||||
} else {
|
||||
TEST_AH(mask);
|
||||
FNSTSW_AX(); // requires EAX to be free
|
||||
if (rhs->isconstf())
|
||||
if (rhs->isImmD())
|
||||
{
|
||||
const uint64_t* p = findQuadConstant(rhs->imm64());
|
||||
const uint64_t* p = findImmDFromPool(rhs->immQ());
|
||||
FCOMdm((pop?1:0), (const double*)p);
|
||||
}
|
||||
else
|
||||
|
@ -96,7 +96,7 @@ namespace nanojit
|
||||
#define NJ_MAX_STACK_ENTRY 4096
|
||||
#define NJ_MAX_PARAMETERS 1
|
||||
|
||||
#define NJ_USES_QUAD_CONSTANTS 1
|
||||
#define NJ_USES_IMMD_POOL 1
|
||||
|
||||
#define NJ_JTBL_SUPPORTED 1
|
||||
#define NJ_EXPANDED_LOADSTORE_SUPPORTED 1
|
||||
|
Loading…
Reference in New Issue
Block a user