Bug 564941 - Rename LOpcode enums in lirasm.cpp (r=nnethercote+)

--HG--
extra : convert_revision : e165192e6b4adc66ac4c395c8e8e80a66d7f1117
This commit is contained in:
Edwin Smith 2010-05-10 21:00:15 -04:00
parent 4d87ad98e3
commit fd1b388c23

View File

@ -704,10 +704,10 @@ FragmentAssembler::assemble_call(const string &op)
// Select return type from opcode.
ty = 0;
if (mOpcode == LIR_icall) ty = ARGTYPE_LO;
else if (mOpcode == LIR_fcall) ty = ARGTYPE_D;
if (mOpcode == LIR_calli) ty = ARGTYPE_LO;
else if (mOpcode == LIR_calld) ty = ARGTYPE_D;
#ifdef NANOJIT_64BIT
else if (mOpcode == LIR_qcall) ty = ARGTYPE_Q;
else if (mOpcode == LIR_callq) ty = ARGTYPE_Q;
#endif
else nyi("callh");
ci->_typesig |= retMask(ty);
@ -929,83 +929,83 @@ FragmentAssembler::assembleFragment(LirTokenStream &in, bool implicitBegin, cons
ins = mLir->ins0(mOpcode);
break;
case LIR_live:
CASE64(LIR_qlive:)
case LIR_flive:
case LIR_neg:
case LIR_fneg:
case LIR_not:
CASESF(LIR_qlo:)
CASESF(LIR_qhi:)
case LIR_livei:
CASE64(LIR_liveq:)
case LIR_lived:
case LIR_negi:
case LIR_negd:
case LIR_noti:
CASESF(LIR_dlo2i:)
CASESF(LIR_dhi2i:)
CASE64(LIR_q2i:)
CASE64(LIR_i2q:)
CASE64(LIR_u2q:)
case LIR_i2f:
case LIR_u2f:
case LIR_f2i:
CASE64(LIR_ui2uq:)
case LIR_i2d:
case LIR_ui2d:
case LIR_d2i:
#if defined NANOJIT_IA32 || defined NANOJIT_X64
case LIR_mod:
case LIR_modi:
#endif
need(1);
ins = mLir->ins1(mOpcode,
ref(mTokens[0]));
break;
case LIR_add:
case LIR_sub:
case LIR_mul:
case LIR_addi:
case LIR_subi:
case LIR_muli:
#if defined NANOJIT_IA32 || defined NANOJIT_X64
case LIR_div:
case LIR_divi:
#endif
case LIR_fadd:
case LIR_fsub:
case LIR_fmul:
case LIR_fdiv:
CASE64(LIR_qiadd:)
case LIR_and:
case LIR_or:
case LIR_xor:
CASE64(LIR_qiand:)
CASE64(LIR_qior:)
CASE64(LIR_qxor:)
case LIR_lsh:
case LIR_rsh:
case LIR_ush:
CASE64(LIR_qilsh:)
CASE64(LIR_qirsh:)
CASE64(LIR_qursh:)
case LIR_eq:
case LIR_lt:
case LIR_gt:
case LIR_le:
case LIR_ge:
case LIR_ult:
case LIR_ugt:
case LIR_ule:
case LIR_uge:
case LIR_feq:
case LIR_flt:
case LIR_fgt:
case LIR_fle:
case LIR_fge:
CASE64(LIR_qeq:)
CASE64(LIR_qlt:)
CASE64(LIR_qgt:)
CASE64(LIR_qle:)
CASE64(LIR_qge:)
CASE64(LIR_qult:)
CASE64(LIR_qugt:)
CASE64(LIR_qule:)
CASE64(LIR_quge:)
CASESF(LIR_qjoin:)
case LIR_addd:
case LIR_subd:
case LIR_muld:
case LIR_divd:
CASE64(LIR_addq:)
case LIR_andi:
case LIR_ori:
case LIR_xori:
CASE64(LIR_andq:)
CASE64(LIR_orq:)
CASE64(LIR_xorq:)
case LIR_lshi:
case LIR_rshi:
case LIR_rshui:
CASE64(LIR_lshq:)
CASE64(LIR_rshq:)
CASE64(LIR_rshuq:)
case LIR_eqi:
case LIR_lti:
case LIR_gti:
case LIR_lei:
case LIR_gei:
case LIR_ltui:
case LIR_gtui:
case LIR_leui:
case LIR_geui:
case LIR_eqd:
case LIR_ltd:
case LIR_gtd:
case LIR_led:
case LIR_ged:
CASE64(LIR_eqq:)
CASE64(LIR_ltq:)
CASE64(LIR_gtq:)
CASE64(LIR_leq:)
CASE64(LIR_geq:)
CASE64(LIR_ltuq:)
CASE64(LIR_gtuq:)
CASE64(LIR_leuq:)
CASE64(LIR_geuq:)
CASESF(LIR_ii2d:)
need(2);
ins = mLir->ins2(mOpcode,
ref(mTokens[0]),
ref(mTokens[1]));
break;
case LIR_cmov:
CASE64(LIR_qcmov:)
case LIR_cmovi:
CASE64(LIR_cmovq:)
need(3);
ins = mLir->ins3(mOpcode,
ref(mTokens[0]),
@ -1022,31 +1022,31 @@ FragmentAssembler::assembleFragment(LirTokenStream &in, bool implicitBegin, cons
ins = assemble_jump(/*isCond*/true);
break;
case LIR_int:
case LIR_immi:
need(1);
ins = mLir->insImmI(imm(mTokens[0]));
break;
#ifdef NANOJIT_64BIT
case LIR_quad:
case LIR_immq:
need(1);
ins = mLir->insImmQ(lquad(mTokens[0]));
break;
#endif
case LIR_float:
case LIR_immd:
need(1);
ins = mLir->insImmD(immf(mTokens[0]));
break;
#if NJ_EXPANDED_LOADSTORE_SUPPORTED
case LIR_stb:
case LIR_sts:
case LIR_st32f:
case LIR_sti2c:
case LIR_sti2s:
case LIR_std2f:
#endif
case LIR_sti:
CASE64(LIR_stqi:)
case LIR_stfi:
CASE64(LIR_stq:)
case LIR_std:
need(3);
ins = mLir->insStore(mOpcode, ref(mTokens[0]),
ref(mTokens[1]),
@ -1054,29 +1054,29 @@ FragmentAssembler::assembleFragment(LirTokenStream &in, bool implicitBegin, cons
break;
#if NJ_EXPANDED_LOADSTORE_SUPPORTED
case LIR_ldsb:
case LIR_ldss:
case LIR_ld32f:
case LIR_ldc2i:
case LIR_lds2i:
case LIR_ldf2d:
#endif
case LIR_ldzb:
case LIR_ldzs:
case LIR_ld:
case LIR_lduc2ui:
case LIR_ldus2ui:
case LIR_ldi:
CASE64(LIR_ldq:)
case LIR_ldf:
case LIR_ldd:
ins = assemble_load();
break;
// XXX: insParam gives the one appropriate for the platform. Eg. if
// you specify qparam on x86 you'll end up with iparam anyway. Fix
// this.
case LIR_param:
case LIR_paramp:
need(2);
ins = mLir->insParam(imm(mTokens[0]),
imm(mTokens[1]));
break;
// XXX: similar to iparam/qparam above.
case LIR_alloc:
case LIR_allocp:
need(1);
ins = mLir->insAlloc(imm(mTokens[0]));
break;
@ -1095,24 +1095,24 @@ FragmentAssembler::assembleFragment(LirTokenStream &in, bool implicitBegin, cons
ins = assemble_guard(/*isCond*/true);
break;
case LIR_addxov:
case LIR_subxov:
case LIR_mulxov:
case LIR_addxovi:
case LIR_subxovi:
case LIR_mulxovi:
ins = assemble_guard_xov();
break;
case LIR_icall:
CASESF(LIR_callh:)
case LIR_fcall:
CASE64(LIR_qcall:)
case LIR_calli:
CASESF(LIR_hcalli:)
case LIR_calld:
CASE64(LIR_callq:)
ins = assemble_call(op);
break;
case LIR_ret:
case LIR_reti:
ins = assemble_ret(RT_INT32);
break;
case LIR_fret:
case LIR_retd:
ins = assemble_ret(RT_FLOAT);
break;
@ -1121,7 +1121,7 @@ FragmentAssembler::assembleFragment(LirTokenStream &in, bool implicitBegin, cons
case LIR_line:
case LIR_xtbl:
case LIR_jtbl:
CASE64(LIR_qret:)
CASE64(LIR_retq:)
nyi(op);
break;
@ -1304,17 +1304,17 @@ const CallInfo ci_V_IQF = CI(f_V_IQF, argMask(ARGTYPE_I, 1, 3) |
// sufficiently big that it's spread across multiple chunks.
//
// The following instructions aren't generated yet:
// - LIR_iparam/LIR_qparam (hard to test beyond what is auto-generated in fragment
// - LIR_parami/LIR_paramq (hard to test beyond what is auto-generated in fragment
// prologues)
// - LIR_live/LIR_qlive/LIR_flive
// - LIR_callh
// - LIR_x/LIR_xt/LIR_xf/LIR_xtbl/LIR_addxov/LIR_subxov/LIR_mulxov (hard to
// - LIR_livei/LIR_liveq/LIR_lived
// - LIR_hcalli
// - LIR_x/LIR_xt/LIR_xf/LIR_xtbl/LIR_addxovi/LIR_subxovi/LIR_mulxovi (hard to
// test without having multiple fragments; when we only have one fragment
// we don't really want to leave it early)
// - LIR_ret/LIR_qret/LIR_fret (hard to test without having multiple fragments)
// - LIR_reti/LIR_retq/LIR_retd (hard to test without having multiple fragments)
// - LIR_j/LIR_jt/LIR_jf/LIR_jtbl/LIR_label
// - LIR_file/LIR_line (#ifdef VTUNE only)
// - LIR_fmod (not implemented in NJ backends)
// - LIR_modd (not implemented in NJ backends)
//
// Other limitations:
// - Loads always use accSet==ACC_LOAD_ANY
@ -1331,121 +1331,121 @@ FragmentAssembler::assembleRandomFragment(int nIns)
vector<LIns*> M8ps; // 8+ byte allocs
vector<LOpcode> I_I_ops;
I_I_ops.push_back(LIR_neg);
I_I_ops.push_back(LIR_not);
I_I_ops.push_back(LIR_negi);
I_I_ops.push_back(LIR_noti);
// Nb: there are no Q_Q_ops.
vector<LOpcode> D_D_ops;
D_D_ops.push_back(LIR_fneg);
D_D_ops.push_back(LIR_negd);
vector<LOpcode> I_II_ops;
I_II_ops.push_back(LIR_add);
I_II_ops.push_back(LIR_sub);
I_II_ops.push_back(LIR_mul);
I_II_ops.push_back(LIR_addi);
I_II_ops.push_back(LIR_subi);
I_II_ops.push_back(LIR_muli);
#if defined NANOJIT_IA32 || defined NANOJIT_X64
I_II_ops.push_back(LIR_div);
I_II_ops.push_back(LIR_mod);
I_II_ops.push_back(LIR_divi);
I_II_ops.push_back(LIR_modi);
#endif
I_II_ops.push_back(LIR_and);
I_II_ops.push_back(LIR_or);
I_II_ops.push_back(LIR_xor);
I_II_ops.push_back(LIR_lsh);
I_II_ops.push_back(LIR_rsh);
I_II_ops.push_back(LIR_ush);
I_II_ops.push_back(LIR_andi);
I_II_ops.push_back(LIR_ori);
I_II_ops.push_back(LIR_xori);
I_II_ops.push_back(LIR_lshi);
I_II_ops.push_back(LIR_rshi);
I_II_ops.push_back(LIR_rshui);
#ifdef NANOJIT_64BIT
vector<LOpcode> Q_QQ_ops;
Q_QQ_ops.push_back(LIR_qiadd);
Q_QQ_ops.push_back(LIR_qiand);
Q_QQ_ops.push_back(LIR_qior);
Q_QQ_ops.push_back(LIR_qxor);
Q_QQ_ops.push_back(LIR_addq);
Q_QQ_ops.push_back(LIR_andq);
Q_QQ_ops.push_back(LIR_orq);
Q_QQ_ops.push_back(LIR_xorq);
vector<LOpcode> Q_QI_ops;
Q_QI_ops.push_back(LIR_qilsh);
Q_QI_ops.push_back(LIR_qirsh);
Q_QI_ops.push_back(LIR_qursh);
Q_QI_ops.push_back(LIR_lshq);
Q_QI_ops.push_back(LIR_rshq);
Q_QI_ops.push_back(LIR_rshuq);
#endif
vector<LOpcode> D_DD_ops;
D_DD_ops.push_back(LIR_fadd);
D_DD_ops.push_back(LIR_fsub);
D_DD_ops.push_back(LIR_fmul);
D_DD_ops.push_back(LIR_fdiv);
D_DD_ops.push_back(LIR_addd);
D_DD_ops.push_back(LIR_subd);
D_DD_ops.push_back(LIR_muld);
D_DD_ops.push_back(LIR_divd);
vector<LOpcode> I_BII_ops;
I_BII_ops.push_back(LIR_cmov);
I_BII_ops.push_back(LIR_cmovi);
#ifdef NANOJIT_64BIT
vector<LOpcode> Q_BQQ_ops;
Q_BQQ_ops.push_back(LIR_qcmov);
Q_BQQ_ops.push_back(LIR_cmovq);
#endif
vector<LOpcode> B_II_ops;
B_II_ops.push_back(LIR_eq);
B_II_ops.push_back(LIR_lt);
B_II_ops.push_back(LIR_gt);
B_II_ops.push_back(LIR_le);
B_II_ops.push_back(LIR_ge);
B_II_ops.push_back(LIR_ult);
B_II_ops.push_back(LIR_ugt);
B_II_ops.push_back(LIR_ule);
B_II_ops.push_back(LIR_uge);
B_II_ops.push_back(LIR_eqi);
B_II_ops.push_back(LIR_lti);
B_II_ops.push_back(LIR_gti);
B_II_ops.push_back(LIR_lei);
B_II_ops.push_back(LIR_gei);
B_II_ops.push_back(LIR_ltui);
B_II_ops.push_back(LIR_gtui);
B_II_ops.push_back(LIR_leui);
B_II_ops.push_back(LIR_geui);
#ifdef NANOJIT_64BIT
vector<LOpcode> B_QQ_ops;
B_QQ_ops.push_back(LIR_qeq);
B_QQ_ops.push_back(LIR_qlt);
B_QQ_ops.push_back(LIR_qgt);
B_QQ_ops.push_back(LIR_qle);
B_QQ_ops.push_back(LIR_qge);
B_QQ_ops.push_back(LIR_qult);
B_QQ_ops.push_back(LIR_qugt);
B_QQ_ops.push_back(LIR_qule);
B_QQ_ops.push_back(LIR_quge);
B_QQ_ops.push_back(LIR_eqq);
B_QQ_ops.push_back(LIR_ltq);
B_QQ_ops.push_back(LIR_gtq);
B_QQ_ops.push_back(LIR_leq);
B_QQ_ops.push_back(LIR_geq);
B_QQ_ops.push_back(LIR_ltuq);
B_QQ_ops.push_back(LIR_gtuq);
B_QQ_ops.push_back(LIR_leuq);
B_QQ_ops.push_back(LIR_geuq);
#endif
vector<LOpcode> B_DD_ops;
B_DD_ops.push_back(LIR_feq);
B_DD_ops.push_back(LIR_flt);
B_DD_ops.push_back(LIR_fgt);
B_DD_ops.push_back(LIR_fle);
B_DD_ops.push_back(LIR_fge);
B_DD_ops.push_back(LIR_eqd);
B_DD_ops.push_back(LIR_ltd);
B_DD_ops.push_back(LIR_gtd);
B_DD_ops.push_back(LIR_led);
B_DD_ops.push_back(LIR_ged);
#ifdef NANOJIT_64BIT
vector<LOpcode> Q_I_ops;
Q_I_ops.push_back(LIR_i2q);
Q_I_ops.push_back(LIR_u2q);
Q_I_ops.push_back(LIR_ui2uq);
vector<LOpcode> I_Q_ops;
I_Q_ops.push_back(LIR_q2i);
#endif
vector<LOpcode> D_I_ops;
D_I_ops.push_back(LIR_i2f);
D_I_ops.push_back(LIR_u2f);
D_I_ops.push_back(LIR_i2d);
D_I_ops.push_back(LIR_ui2d);
vector<LOpcode> I_F_ops;
#if NJ_SOFTFLOAT_SUPPORTED
I_F_ops.push_back(LIR_qlo);
I_F_ops.push_back(LIR_qhi);
I_F_ops.push_back(LIR_dlo2i);
I_F_ops.push_back(LIR_dhi2i);
#endif
I_F_ops.push_back(LIR_f2i);
I_F_ops.push_back(LIR_d2i);
vector<LOpcode> D_II_ops;
#if NJ_SOFTFLOAT_SUPPORTED
D_II_ops.push_back(LIR_qjoin);
D_II_ops.push_back(LIR_ii2d);
#endif
vector<LOpcode> I_loads;
I_loads.push_back(LIR_ld); // weight LIR_ld more heavily
I_loads.push_back(LIR_ld);
I_loads.push_back(LIR_ld);
I_loads.push_back(LIR_ldzb);
I_loads.push_back(LIR_ldzs);
I_loads.push_back(LIR_ldi); // weight LIR_ldi more heavily
I_loads.push_back(LIR_ldi);
I_loads.push_back(LIR_ldi);
I_loads.push_back(LIR_lduc2ui);
I_loads.push_back(LIR_ldus2ui);
#if NJ_EXPANDED_LOADSTORE_SUPPORTED
I_loads.push_back(LIR_ldsb);
I_loads.push_back(LIR_ldss);
I_loads.push_back(LIR_ldc2i);
I_loads.push_back(LIR_lds2i);
#endif
#ifdef NANOJIT_64BIT
@ -1454,10 +1454,10 @@ FragmentAssembler::assembleRandomFragment(int nIns)
#endif
vector<LOpcode> D_loads;
D_loads.push_back(LIR_ldf);
D_loads.push_back(LIR_ldd);
#if NJ_EXPANDED_LOADSTORE_SUPPORTED
// this loads a 32-bit float and expands it to 64-bit float
D_loads.push_back(LIR_ld32f);
D_loads.push_back(LIR_ldf2d);
#endif
enum LInsClass {
@ -1490,7 +1490,7 @@ FragmentAssembler::assembleRandomFragment(int nIns)
}
// Used to keep track of how much stack we've explicitly used via
// LIR_alloc. We then need to keep some reserve for spills as well.
// LIR_allocp. We then need to keep some reserve for spills as well.
const size_t stackSzB = NJ_MAX_STACK_ENTRY * 4;
const size_t spillStackSzB = 1024;
const size_t maxExplicitlyUsedStackSzB = stackSzB - spillStackSzB;
@ -1632,9 +1632,9 @@ FragmentAssembler::assembleRandomFragment(int nIns)
LIns* lhs = rndPick(Is);
LIns* rhs = rndPick(Is);
#if defined NANOJIT_IA32 || defined NANOJIT_X64
if (op == LIR_div || op == LIR_mod) {
if (op == LIR_divi || op == LIR_modi) {
// XXX: ExprFilter can't fold a div/mod with constant
// args, due to the horrible semantics of LIR_mod. So we
// args, due to the horrible semantics of LIR_modi. So we
// just don't generate anything if we hit that case.
if (!lhs->isImmI() || !rhs->isImmI()) {
// If the divisor is positive, no problems. If it's zero, we get an
@ -1643,15 +1643,15 @@ FragmentAssembler::assembleRandomFragment(int nIns)
// allow positive divisors, ie. compute: lhs / (rhs > 0 ? rhs : -k),
// where k is a random number in the range 2..100 (this ensures we have
// some negative divisors).
LIns* gt0 = mLir->ins2ImmI(LIR_gt, rhs, 0);
LIns* rhs2 = mLir->ins3(LIR_cmov, gt0, rhs, mLir->insImmI(-((int32_t)rnd(99)) - 2));
LIns* div = mLir->ins2(LIR_div, lhs, rhs2);
if (op == LIR_div) {
LIns* gt0 = mLir->ins2ImmI(LIR_gti, rhs, 0);
LIns* rhs2 = mLir->ins3(LIR_cmovi, gt0, rhs, mLir->insImmI(-((int32_t)rnd(99)) - 2));
LIns* div = mLir->ins2(LIR_divi, lhs, rhs2);
if (op == LIR_divi) {
ins = div;
addOrReplace(Is, ins);
n += 5;
} else {
ins = mLir->ins1(LIR_mod, div);
ins = mLir->ins1(LIR_modi, div);
// Add 'div' to the operands too so it might be used again, because
// the code generated is different as compared to the case where 'div'
// isn't used again.
@ -1940,7 +1940,7 @@ FragmentAssembler::assembleRandomFragment(int nIns)
// Return 0.
mReturnTypeBits |= RT_INT32;
mLir->ins1(LIR_ret, mLir->insImmI(0));
mLir->ins1(LIR_reti, mLir->insImmI(0));
endFragment();
}