mirror of
https://gitlab.winehq.org/wine/wine-gecko.git
synced 2024-09-13 09:24:08 -07:00
Bug 562597 - nanojit: more LOpcode-related renamings. r=edwsmith.
--HG-- extra : convert_revision : 96391e53a0b48bd53fa98db68da86f76a55e9bd0
This commit is contained in:
parent
6d5053e9df
commit
7a0c7aaa29
@ -380,7 +380,7 @@ lexical_cast(in arg)
|
||||
}
|
||||
|
||||
int32_t
|
||||
imm(const string &s)
|
||||
immI(const string &s)
|
||||
{
|
||||
stringstream tmp(s);
|
||||
int32_t ret;
|
||||
@ -392,7 +392,7 @@ imm(const string &s)
|
||||
}
|
||||
|
||||
uint64_t
|
||||
lquad(const string &s)
|
||||
immQ(const string &s)
|
||||
{
|
||||
stringstream tmp(s);
|
||||
uint64_t ret;
|
||||
@ -404,7 +404,7 @@ lquad(const string &s)
|
||||
}
|
||||
|
||||
double
|
||||
immf(const string &s)
|
||||
immD(const string &s)
|
||||
{
|
||||
return lexical_cast<double>(s);
|
||||
}
|
||||
@ -626,7 +626,7 @@ FragmentAssembler::assemble_load()
|
||||
mTokens[1].find_first_of("0123456789") == 0) {
|
||||
return mLir->insLoad(mOpcode,
|
||||
ref(mTokens[0]),
|
||||
imm(mTokens[1]), ACC_LOAD_ANY);
|
||||
immI(mTokens[1]), ACC_LOAD_ANY);
|
||||
}
|
||||
bad("immediate offset required for load");
|
||||
return NULL; // not reached
|
||||
@ -704,7 +704,7 @@ FragmentAssembler::assemble_call(const string &op)
|
||||
|
||||
// Select return type from opcode.
|
||||
ty = 0;
|
||||
if (mOpcode == LIR_calli) ty = ARGTYPE_LO;
|
||||
if (mOpcode == LIR_calli) ty = ARGTYPE_I;
|
||||
else if (mOpcode == LIR_calld) ty = ARGTYPE_D;
|
||||
#ifdef NANOJIT_64BIT
|
||||
else if (mOpcode == LIR_callq) ty = ARGTYPE_Q;
|
||||
@ -1024,19 +1024,19 @@ FragmentAssembler::assembleFragment(LirTokenStream &in, bool implicitBegin, cons
|
||||
|
||||
case LIR_immi:
|
||||
need(1);
|
||||
ins = mLir->insImmI(imm(mTokens[0]));
|
||||
ins = mLir->insImmI(immI(mTokens[0]));
|
||||
break;
|
||||
|
||||
#ifdef NANOJIT_64BIT
|
||||
case LIR_immq:
|
||||
need(1);
|
||||
ins = mLir->insImmQ(lquad(mTokens[0]));
|
||||
ins = mLir->insImmQ(immQ(mTokens[0]));
|
||||
break;
|
||||
#endif
|
||||
|
||||
case LIR_immd:
|
||||
need(1);
|
||||
ins = mLir->insImmD(immf(mTokens[0]));
|
||||
ins = mLir->insImmD(immD(mTokens[0]));
|
||||
break;
|
||||
|
||||
#if NJ_EXPANDED_LOADSTORE_SUPPORTED
|
||||
@ -1050,7 +1050,7 @@ FragmentAssembler::assembleFragment(LirTokenStream &in, bool implicitBegin, cons
|
||||
need(3);
|
||||
ins = mLir->insStore(mOpcode, ref(mTokens[0]),
|
||||
ref(mTokens[1]),
|
||||
imm(mTokens[2]), ACC_STORE_ANY);
|
||||
immI(mTokens[2]), ACC_STORE_ANY);
|
||||
break;
|
||||
|
||||
#if NJ_EXPANDED_LOADSTORE_SUPPORTED
|
||||
@ -1071,14 +1071,14 @@ FragmentAssembler::assembleFragment(LirTokenStream &in, bool implicitBegin, cons
|
||||
// this.
|
||||
case LIR_paramp:
|
||||
need(2);
|
||||
ins = mLir->insParam(imm(mTokens[0]),
|
||||
imm(mTokens[1]));
|
||||
ins = mLir->insParam(immI(mTokens[0]),
|
||||
immI(mTokens[1]));
|
||||
break;
|
||||
|
||||
// XXX: similar to iparam/qparam above.
|
||||
case LIR_allocp:
|
||||
need(1);
|
||||
ins = mLir->insAlloc(imm(mTokens[0]));
|
||||
ins = mLir->insAlloc(immI(mTokens[0]));
|
||||
break;
|
||||
|
||||
case LIR_skip:
|
||||
@ -1326,7 +1326,7 @@ FragmentAssembler::assembleRandomFragment(int nIns)
|
||||
vector<LIns*> Bs; // boolean values, ie. 32-bit int values produced by tests
|
||||
vector<LIns*> Is; // 32-bit int values
|
||||
vector<LIns*> Qs; // 64-bit int values
|
||||
vector<LIns*> Fs; // 64-bit float values
|
||||
vector<LIns*> Ds; // 64-bit double values
|
||||
vector<LIns*> M4s; // 4 byte allocs
|
||||
vector<LIns*> M8ps; // 8+ byte allocs
|
||||
|
||||
@ -1425,12 +1425,12 @@ FragmentAssembler::assembleRandomFragment(int nIns)
|
||||
D_I_ops.push_back(LIR_i2d);
|
||||
D_I_ops.push_back(LIR_ui2d);
|
||||
|
||||
vector<LOpcode> I_F_ops;
|
||||
vector<LOpcode> I_D_ops;
|
||||
#if NJ_SOFTFLOAT_SUPPORTED
|
||||
I_F_ops.push_back(LIR_dlo2i);
|
||||
I_F_ops.push_back(LIR_dhi2i);
|
||||
I_D_ops.push_back(LIR_dlo2i);
|
||||
I_D_ops.push_back(LIR_dhi2i);
|
||||
#endif
|
||||
I_F_ops.push_back(LIR_d2i);
|
||||
I_D_ops.push_back(LIR_d2i);
|
||||
|
||||
vector<LOpcode> D_II_ops;
|
||||
#if NJ_SOFTFLOAT_SUPPORTED
|
||||
@ -1603,7 +1603,7 @@ FragmentAssembler::assembleRandomFragment(int nIns)
|
||||
break;
|
||||
}
|
||||
ins = mLir->insImmD(imm64f);
|
||||
addOrReplace(Fs, ins);
|
||||
addOrReplace(Ds, ins);
|
||||
n++;
|
||||
break;
|
||||
}
|
||||
@ -1619,9 +1619,9 @@ FragmentAssembler::assembleRandomFragment(int nIns)
|
||||
// case LOP_Q_Q: no instruction in this category
|
||||
|
||||
case LOP_D_D:
|
||||
if (!Fs.empty()) {
|
||||
ins = mLir->ins1(rndPick(D_D_ops), rndPick(Fs));
|
||||
addOrReplace(Fs, ins);
|
||||
if (!Ds.empty()) {
|
||||
ins = mLir->ins1(rndPick(D_D_ops), rndPick(Ds));
|
||||
addOrReplace(Ds, ins);
|
||||
n++;
|
||||
}
|
||||
break;
|
||||
@ -1689,9 +1689,9 @@ FragmentAssembler::assembleRandomFragment(int nIns)
|
||||
#endif
|
||||
|
||||
case LOP_D_DD:
|
||||
if (!Fs.empty()) {
|
||||
ins = mLir->ins2(rndPick(D_DD_ops), rndPick(Fs), rndPick(Fs));
|
||||
addOrReplace(Fs, ins);
|
||||
if (!Ds.empty()) {
|
||||
ins = mLir->ins2(rndPick(D_DD_ops), rndPick(Ds), rndPick(Ds));
|
||||
addOrReplace(Ds, ins);
|
||||
n++;
|
||||
}
|
||||
break;
|
||||
@ -1733,12 +1733,12 @@ FragmentAssembler::assembleRandomFragment(int nIns)
|
||||
#endif
|
||||
|
||||
case LOP_B_DD:
|
||||
if (!Fs.empty()) {
|
||||
ins = mLir->ins2(rndPick(B_DD_ops), rndPick(Fs), rndPick(Fs));
|
||||
if (!Ds.empty()) {
|
||||
ins = mLir->ins2(rndPick(B_DD_ops), rndPick(Ds), rndPick(Ds));
|
||||
// XXX: we don't push the result, because most (all?) of the
|
||||
// backends currently can't handle cmovs/qcmovs that take
|
||||
// float comparisons for the test (see bug 520944). This means
|
||||
// that all B_FF values are dead, unfortunately.
|
||||
// that all B_DD values are dead, unfortunately.
|
||||
//addOrReplace(Bs, ins);
|
||||
n++;
|
||||
}
|
||||
@ -1757,7 +1757,7 @@ FragmentAssembler::assembleRandomFragment(int nIns)
|
||||
case LOP_D_I:
|
||||
if (!Is.empty()) {
|
||||
ins = mLir->ins1(rndPick(D_I_ops), rndPick(Is));
|
||||
addOrReplace(Fs, ins);
|
||||
addOrReplace(Ds, ins);
|
||||
n++;
|
||||
}
|
||||
break;
|
||||
@ -1775,8 +1775,8 @@ FragmentAssembler::assembleRandomFragment(int nIns)
|
||||
case LOP_I_D:
|
||||
// XXX: NativeX64 doesn't implement qhi yet (and it may not need to).
|
||||
#if !defined NANOJIT_X64
|
||||
if (!Fs.empty()) {
|
||||
ins = mLir->ins1(rndPick(I_F_ops), rndPick(Fs));
|
||||
if (!Ds.empty()) {
|
||||
ins = mLir->ins1(rndPick(I_D_ops), rndPick(Ds));
|
||||
addOrReplace(Is, ins);
|
||||
n++;
|
||||
}
|
||||
@ -1786,7 +1786,7 @@ FragmentAssembler::assembleRandomFragment(int nIns)
|
||||
case LOP_D_II:
|
||||
if (!Is.empty() && !D_II_ops.empty()) {
|
||||
ins = mLir->ins2(rndPick(D_II_ops), rndPick(Is), rndPick(Is));
|
||||
addOrReplace(Fs, ins);
|
||||
addOrReplace(Ds, ins);
|
||||
n++;
|
||||
}
|
||||
break;
|
||||
@ -1817,7 +1817,7 @@ FragmentAssembler::assembleRandomFragment(int nIns)
|
||||
if (!M8ps.empty()) {
|
||||
LIns* base = rndPick(M8ps);
|
||||
ins = mLir->insLoad(rndPick(D_loads), base, rndOffset64(base->size()), ACC_LOAD_ANY);
|
||||
addOrReplace(Fs, ins);
|
||||
addOrReplace(Ds, ins);
|
||||
n++;
|
||||
}
|
||||
break;
|
||||
@ -1843,9 +1843,9 @@ FragmentAssembler::assembleRandomFragment(int nIns)
|
||||
#endif
|
||||
|
||||
case LST_D:
|
||||
if (!M8ps.empty() && !Fs.empty()) {
|
||||
if (!M8ps.empty() && !Ds.empty()) {
|
||||
LIns* base = rndPick(M8ps);
|
||||
mLir->insStore(rndPick(Fs), base, rndOffset64(base->size()), ACC_STORE_ANY);
|
||||
mLir->insStore(rndPick(Ds), base, rndOffset64(base->size()), ACC_STORE_ANY);
|
||||
n++;
|
||||
}
|
||||
break;
|
||||
@ -1891,29 +1891,29 @@ FragmentAssembler::assembleRandomFragment(int nIns)
|
||||
#endif
|
||||
|
||||
case LCALL_D_D3:
|
||||
if (!Fs.empty()) {
|
||||
LIns* args[3] = { rndPick(Fs), rndPick(Fs), rndPick(Fs) };
|
||||
if (!Ds.empty()) {
|
||||
LIns* args[3] = { rndPick(Ds), rndPick(Ds), rndPick(Ds) };
|
||||
ins = mLir->insCall(&ci_F_F3, args);
|
||||
addOrReplace(Fs, ins);
|
||||
addOrReplace(Ds, ins);
|
||||
n++;
|
||||
}
|
||||
break;
|
||||
|
||||
case LCALL_D_D8:
|
||||
if (!Fs.empty()) {
|
||||
LIns* args[8] = { rndPick(Fs), rndPick(Fs), rndPick(Fs), rndPick(Fs),
|
||||
rndPick(Fs), rndPick(Fs), rndPick(Fs), rndPick(Fs) };
|
||||
if (!Ds.empty()) {
|
||||
LIns* args[8] = { rndPick(Ds), rndPick(Ds), rndPick(Ds), rndPick(Ds),
|
||||
rndPick(Ds), rndPick(Ds), rndPick(Ds), rndPick(Ds) };
|
||||
ins = mLir->insCall(&ci_F_F8, args);
|
||||
addOrReplace(Fs, ins);
|
||||
addOrReplace(Ds, ins);
|
||||
n++;
|
||||
}
|
||||
break;
|
||||
|
||||
#ifdef NANOJIT_64BIT
|
||||
case LCALL_V_IQD:
|
||||
if (!Is.empty() && !Qs.empty() && !Fs.empty()) {
|
||||
if (!Is.empty() && !Qs.empty() && !Ds.empty()) {
|
||||
// Nb: args[] holds the args in reverse order... sigh.
|
||||
LIns* args[3] = { rndPick(Fs), rndPick(Qs), rndPick(Is) };
|
||||
LIns* args[3] = { rndPick(Ds), rndPick(Qs), rndPick(Is) };
|
||||
ins = mLir->insCall(&ci_V_IQF, args);
|
||||
n++;
|
||||
}
|
||||
|
@ -1428,7 +1428,7 @@ namespace nanojit
|
||||
case LIR_immd:
|
||||
countlir_imm();
|
||||
if (ins->isExtant()) {
|
||||
asm_immf(ins);
|
||||
asm_immd(ins);
|
||||
}
|
||||
break;
|
||||
|
||||
@ -1588,7 +1588,7 @@ namespace nanojit
|
||||
countlir_fpu();
|
||||
ins->oprnd1()->setResultLive();
|
||||
if (ins->isExtant()) {
|
||||
asm_i2f(ins);
|
||||
asm_i2d(ins);
|
||||
}
|
||||
break;
|
||||
|
||||
@ -1596,7 +1596,7 @@ namespace nanojit
|
||||
countlir_fpu();
|
||||
ins->oprnd1()->setResultLive();
|
||||
if (ins->isExtant()) {
|
||||
asm_u2f(ins);
|
||||
asm_ui2d(ins);
|
||||
}
|
||||
break;
|
||||
|
||||
@ -1604,7 +1604,7 @@ namespace nanojit
|
||||
countlir_fpu();
|
||||
ins->oprnd1()->setResultLive();
|
||||
if (ins->isExtant()) {
|
||||
asm_f2i(ins);
|
||||
asm_d2i(ins);
|
||||
}
|
||||
break;
|
||||
|
||||
@ -1804,7 +1804,7 @@ namespace nanojit
|
||||
ins->oprnd1()->setResultLive();
|
||||
ins->oprnd2()->setResultLive();
|
||||
if (ins->isExtant()) {
|
||||
asm_fcond(ins);
|
||||
asm_condd(ins);
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -435,8 +435,8 @@ namespace nanojit
|
||||
#ifdef NANOJIT_64BIT
|
||||
void asm_immq(LInsp ins);
|
||||
#endif
|
||||
void asm_immf(LInsp ins);
|
||||
void asm_fcond(LInsp ins);
|
||||
void asm_immd(LInsp ins);
|
||||
void asm_condd(LInsp ins);
|
||||
void asm_cond(LInsp ins);
|
||||
void asm_arith(LInsp ins);
|
||||
void asm_neg_not(LInsp ins);
|
||||
@ -451,9 +451,9 @@ namespace nanojit
|
||||
#endif
|
||||
void asm_fneg(LInsp ins);
|
||||
void asm_fop(LInsp ins);
|
||||
void asm_i2f(LInsp ins);
|
||||
void asm_u2f(LInsp ins);
|
||||
void asm_f2i(LInsp ins);
|
||||
void asm_i2d(LInsp ins);
|
||||
void asm_ui2d(LInsp ins);
|
||||
void asm_d2i(LInsp ins);
|
||||
#ifdef NANOJIT_64BIT
|
||||
void asm_q2i(LInsp ins);
|
||||
void asm_promote(LIns *ins);
|
||||
|
@ -212,7 +212,7 @@ namespace nanojit
|
||||
{
|
||||
// Make sure the size is ok
|
||||
NanoAssert(0 == szB % sizeof(void*));
|
||||
NanoAssert(sizeof(LIns) <= szB && szB <= sizeof(LInsSti)); // LInsSti is the biggest one
|
||||
NanoAssert(sizeof(LIns) <= szB && szB <= sizeof(LInsSt)); // LInsSt is the biggest one
|
||||
NanoAssert(_unused < _limit);
|
||||
|
||||
debug_only( bool moved = false; )
|
||||
@ -252,9 +252,9 @@ namespace nanojit
|
||||
LInsp LirBufWriter::insStore(LOpcode op, LInsp val, LInsp base, int32_t d, AccSet accSet)
|
||||
{
|
||||
if (isS16(d)) {
|
||||
LInsSti* insSti = (LInsSti*)_buf->makeRoom(sizeof(LInsSti));
|
||||
LIns* ins = insSti->getLIns();
|
||||
ins->initLInsSti(op, val, base, d, accSet);
|
||||
LInsSt* insSt = (LInsSt*)_buf->makeRoom(sizeof(LInsSt));
|
||||
LIns* ins = insSt->getLIns();
|
||||
ins->initLInsSt(op, val, base, d, accSet);
|
||||
return ins;
|
||||
} else {
|
||||
// If the displacement is more than 16 bits, put it in a separate instruction.
|
||||
@ -369,8 +369,8 @@ namespace nanojit
|
||||
#ifdef NANOJIT_64BIT
|
||||
LInsp LirBufWriter::insImmQ(uint64_t imm)
|
||||
{
|
||||
LInsQorD* insN64 = (LInsQorD*)_buf->makeRoom(sizeof(LInsQorD));
|
||||
LIns* ins = insN64->getLIns();
|
||||
LInsQorD* insQorD = (LInsQorD*)_buf->makeRoom(sizeof(LInsQorD));
|
||||
LIns* ins = insQorD->getLIns();
|
||||
ins->initLInsQorD(LIR_immq, imm);
|
||||
return ins;
|
||||
}
|
||||
@ -378,8 +378,8 @@ namespace nanojit
|
||||
|
||||
LInsp LirBufWriter::insImmD(double d)
|
||||
{
|
||||
LInsQorD* insN64 = (LInsQorD*)_buf->makeRoom(sizeof(LInsQorD));
|
||||
LIns* ins = insN64->getLIns();
|
||||
LInsQorD* insQorD = (LInsQorD*)_buf->makeRoom(sizeof(LInsQorD));
|
||||
LIns* ins = insQorD->getLIns();
|
||||
union {
|
||||
double d;
|
||||
uint64_t q;
|
||||
@ -486,7 +486,7 @@ namespace nanojit
|
||||
NanoStaticAssert(sizeof(LInsOp2) == 3*sizeof(void*));
|
||||
NanoStaticAssert(sizeof(LInsOp3) == 4*sizeof(void*));
|
||||
NanoStaticAssert(sizeof(LInsLd) == 3*sizeof(void*));
|
||||
NanoStaticAssert(sizeof(LInsSti) == 4*sizeof(void*));
|
||||
NanoStaticAssert(sizeof(LInsSt) == 4*sizeof(void*));
|
||||
NanoStaticAssert(sizeof(LInsSk) == 2*sizeof(void*));
|
||||
NanoStaticAssert(sizeof(LInsC) == 3*sizeof(void*));
|
||||
NanoStaticAssert(sizeof(LInsP) == 2*sizeof(void*));
|
||||
@ -496,24 +496,22 @@ namespace nanojit
|
||||
#else
|
||||
NanoStaticAssert(sizeof(LInsQorD) == 3*sizeof(void*));
|
||||
#endif
|
||||
NanoStaticAssert(sizeof(LInsJtbl) == 4*sizeof(void*));
|
||||
|
||||
// oprnd_1 must be in the same position in LIns{Op1,Op2,Op3,Ld,Sti}
|
||||
// oprnd_1 must be in the same position in LIns{Op1,Op2,Op3,Ld,St,Jtbl}
|
||||
// because oprnd1() is used for all of them.
|
||||
NanoStaticAssert( (offsetof(LInsOp1, ins) - offsetof(LInsOp1, oprnd_1)) ==
|
||||
(offsetof(LInsOp2, ins) - offsetof(LInsOp2, oprnd_1)) );
|
||||
NanoStaticAssert( (offsetof(LInsOp2, ins) - offsetof(LInsOp2, oprnd_1)) ==
|
||||
(offsetof(LInsOp3, ins) - offsetof(LInsOp3, oprnd_1)) );
|
||||
NanoStaticAssert( (offsetof(LInsOp3, ins) - offsetof(LInsOp3, oprnd_1)) ==
|
||||
(offsetof(LInsLd, ins) - offsetof(LInsLd, oprnd_1)) );
|
||||
NanoStaticAssert( (offsetof(LInsLd, ins) - offsetof(LInsLd, oprnd_1)) ==
|
||||
(offsetof(LInsSti, ins) - offsetof(LInsSti, oprnd_1)) );
|
||||
#define OP1OFFSET (offsetof(LInsOp1, ins) - offsetof(LInsOp1, oprnd_1))
|
||||
NanoStaticAssert( OP1OFFSET == (offsetof(LInsOp2, ins) - offsetof(LInsOp2, oprnd_1)) );
|
||||
NanoStaticAssert( OP1OFFSET == (offsetof(LInsOp3, ins) - offsetof(LInsOp3, oprnd_1)) );
|
||||
NanoStaticAssert( OP1OFFSET == (offsetof(LInsLd, ins) - offsetof(LInsLd, oprnd_1)) );
|
||||
NanoStaticAssert( OP1OFFSET == (offsetof(LInsSt, ins) - offsetof(LInsSt, oprnd_1)) );
|
||||
NanoStaticAssert( OP1OFFSET == (offsetof(LInsJtbl, ins) - offsetof(LInsJtbl, oprnd_1)) );
|
||||
|
||||
// oprnd_2 must be in the same position in LIns{Op2,Op3,Sti}
|
||||
// because oprnd2() is used for both of them.
|
||||
NanoStaticAssert( (offsetof(LInsOp2, ins) - offsetof(LInsOp2, oprnd_2)) ==
|
||||
(offsetof(LInsOp3, ins) - offsetof(LInsOp3, oprnd_2)) );
|
||||
NanoStaticAssert( (offsetof(LInsOp3, ins) - offsetof(LInsOp3, oprnd_2)) ==
|
||||
(offsetof(LInsSti, ins) - offsetof(LInsSti, oprnd_2)) );
|
||||
// oprnd_2 must be in the same position in LIns{Op2,Op3,St}
|
||||
// because oprnd2() is used for all of them.
|
||||
#define OP2OFFSET (offsetof(LInsOp2, ins) - offsetof(LInsOp2, oprnd_2))
|
||||
NanoStaticAssert( OP2OFFSET == (offsetof(LInsOp3, ins) - offsetof(LInsOp3, oprnd_2)) );
|
||||
NanoStaticAssert( OP2OFFSET == (offsetof(LInsSt, ins) - offsetof(LInsSt, oprnd_2)) );
|
||||
}
|
||||
|
||||
bool insIsS16(LInsp i)
|
||||
@ -537,19 +535,19 @@ namespace nanojit
|
||||
#ifdef NANOJIT_64BIT
|
||||
case LIR_q2i:
|
||||
if (oprnd->isImmQ())
|
||||
return insImmI(oprnd->immQorDlo());
|
||||
return insImmI(oprnd->immQlo());
|
||||
break;
|
||||
#endif
|
||||
#if NJ_SOFTFLOAT_SUPPORTED
|
||||
case LIR_dlo2i:
|
||||
if (oprnd->isImmD())
|
||||
return insImmI(oprnd->immQorDlo());
|
||||
return insImmI(oprnd->immDlo());
|
||||
if (oprnd->isop(LIR_ii2d))
|
||||
return oprnd->oprnd1();
|
||||
break;
|
||||
case LIR_dhi2i:
|
||||
if (oprnd->isImmD())
|
||||
return insImmI(oprnd->immQorDhi());
|
||||
return insImmI(oprnd->immDhi());
|
||||
if (oprnd->isop(LIR_ii2d))
|
||||
return oprnd->oprnd2();
|
||||
break;
|
||||
@ -2014,7 +2012,7 @@ namespace nanojit
|
||||
if (!ins)
|
||||
return NULL;
|
||||
NanoAssert(ins->isImmD());
|
||||
if (ins->immQ() == a)
|
||||
if (ins->immDasQ() == a)
|
||||
return ins;
|
||||
k = (k + n) & bitmask;
|
||||
n += 1;
|
||||
@ -2024,7 +2022,7 @@ namespace nanojit
|
||||
uint32_t CseFilter::findImmD(LInsp ins)
|
||||
{
|
||||
uint32_t k;
|
||||
findImmD(ins->immQ(), k);
|
||||
findImmD(ins->immDasQ(), k);
|
||||
return k;
|
||||
}
|
||||
|
||||
@ -2234,7 +2232,7 @@ namespace nanojit
|
||||
ins = out->insImmD(d);
|
||||
add(LInsImmD, ins, k);
|
||||
}
|
||||
NanoAssert(ins->isop(LIR_immd) && ins->immQ() == u.u64);
|
||||
NanoAssert(ins->isop(LIR_immd) && ins->immDasQ() == u.u64);
|
||||
return ins;
|
||||
}
|
||||
|
||||
|
@ -357,19 +357,22 @@ namespace nanojit
|
||||
ABI_CDECL
|
||||
};
|
||||
|
||||
// This is much the same as LTy, but we need to distinguish signed and
|
||||
// unsigned 32-bit ints so that they will be extended to 64-bits correctly
|
||||
// on 64-bit platforms.
|
||||
//
|
||||
// All values must fit into three bits. See CallInfo for details.
|
||||
enum ArgType {
|
||||
ARGTYPE_V = 0, // void
|
||||
ARGTYPE_D = 1, // double (64bit)
|
||||
ARGTYPE_I = 2, // int32_t
|
||||
ARGTYPE_UI = 3, // uint32_t
|
||||
ARGTYPE_I = 1, // int32_t
|
||||
ARGTYPE_UI = 2, // uint32_t
|
||||
#ifdef NANOJIT_64BIT
|
||||
ARGTYPE_Q = 4, // uint64_t
|
||||
ARGTYPE_Q = 3, // uint64_t
|
||||
#endif
|
||||
ARGTYPE_D = 4, // double
|
||||
|
||||
// aliases
|
||||
ARGTYPE_P = PTR_SIZE(ARGTYPE_I, ARGTYPE_Q), // pointer
|
||||
ARGTYPE_LO = ARGTYPE_I, // int32_t
|
||||
ARGTYPE_B = ARGTYPE_I // bool
|
||||
};
|
||||
|
||||
@ -652,12 +655,12 @@ namespace nanojit
|
||||
extern const uint8_t repKinds[];
|
||||
|
||||
enum LTy {
|
||||
LTy_V, // no value/no type
|
||||
LTy_I, // 32-bit integer
|
||||
LTy_V, // void: no value/no type
|
||||
LTy_I, // int: 32-bit integer
|
||||
#ifdef NANOJIT_64BIT
|
||||
LTy_Q, // 64-bit integer
|
||||
LTy_Q, // quad: 64-bit integer
|
||||
#endif
|
||||
LTy_D, // 64-bit float
|
||||
LTy_D, // double: 64-bit float
|
||||
|
||||
LTy_P = PTR_SIZE(LTy_I, LTy_Q) // word-sized integer
|
||||
};
|
||||
@ -748,7 +751,7 @@ namespace nanojit
|
||||
LRK_Op2,
|
||||
LRK_Op3,
|
||||
LRK_Ld,
|
||||
LRK_Sti,
|
||||
LRK_St,
|
||||
LRK_Sk,
|
||||
LRK_C,
|
||||
LRK_P,
|
||||
@ -763,7 +766,7 @@ namespace nanojit
|
||||
class LInsOp2;
|
||||
class LInsOp3;
|
||||
class LInsLd;
|
||||
class LInsSti;
|
||||
class LInsSt;
|
||||
class LInsSk;
|
||||
class LInsC;
|
||||
class LInsP;
|
||||
@ -819,7 +822,7 @@ namespace nanojit
|
||||
inline LInsOp2* toLInsOp2() const;
|
||||
inline LInsOp3* toLInsOp3() const;
|
||||
inline LInsLd* toLInsLd() const;
|
||||
inline LInsSti* toLInsSti() const;
|
||||
inline LInsSt* toLInsSt() const;
|
||||
inline LInsSk* toLInsSk() const;
|
||||
inline LInsC* toLInsC() const;
|
||||
inline LInsP* toLInsP() const;
|
||||
@ -836,14 +839,14 @@ namespace nanojit
|
||||
inline void initLInsOp2(LOpcode opcode, LIns* oprnd1, LIns* oprnd2);
|
||||
inline void initLInsOp3(LOpcode opcode, LIns* oprnd1, LIns* oprnd2, LIns* oprnd3);
|
||||
inline void initLInsLd(LOpcode opcode, LIns* val, int32_t d, AccSet accSet);
|
||||
inline void initLInsSti(LOpcode opcode, LIns* val, LIns* base, int32_t d, AccSet accSet);
|
||||
inline void initLInsSt(LOpcode opcode, LIns* val, LIns* base, int32_t d, AccSet accSet);
|
||||
inline void initLInsSk(LIns* prevLIns);
|
||||
// Nb: args[] must be allocated and initialised before being passed in;
|
||||
// initLInsC() just copies the pointer into the LInsC.
|
||||
inline void initLInsC(LOpcode opcode, LIns** args, const CallInfo* ci);
|
||||
inline void initLInsP(int32_t arg, int32_t kind);
|
||||
inline void initLInsI(LOpcode opcode, int32_t immI);
|
||||
inline void initLInsQorD(LOpcode opcode, int64_t imm64);
|
||||
inline void initLInsQorD(LOpcode opcode, uint64_t immQorD);
|
||||
inline void initLInsJtbl(LIns* index, uint32_t size, LIns** table);
|
||||
|
||||
LOpcode opcode() const { return sharedFields.opcode; }
|
||||
@ -953,10 +956,14 @@ namespace nanojit
|
||||
inline int32_t immI() const;
|
||||
|
||||
// For LInsQorD.
|
||||
inline int32_t immQorDlo() const;
|
||||
inline int32_t immQorDhi() const;
|
||||
#ifdef NANOJIT_64BIT
|
||||
inline int32_t immQlo() const;
|
||||
inline uint64_t immQ() const;
|
||||
#endif
|
||||
inline int32_t immDlo() const;
|
||||
inline int32_t immDhi() const;
|
||||
inline double immD() const;
|
||||
inline uint64_t immDasQ() const;
|
||||
|
||||
// For LIR_allocp.
|
||||
inline int32_t size() const;
|
||||
@ -975,7 +982,7 @@ namespace nanojit
|
||||
|
||||
// isLInsXYZ() returns true if the instruction has the LInsXYZ form.
|
||||
// Note that there is some overlap with other predicates, eg.
|
||||
// isStore()==isLInsSti(), isCall()==isLInsC(), but that's ok; these
|
||||
// isStore()==isLInsSt(), isCall()==isLInsC(), but that's ok; these
|
||||
// ones are used mostly to check that opcodes are appropriate for
|
||||
// instruction layouts, the others are used for non-debugging
|
||||
// purposes.
|
||||
@ -999,9 +1006,9 @@ namespace nanojit
|
||||
NanoAssert(LRK_None != repKinds[opcode()]);
|
||||
return LRK_Ld == repKinds[opcode()];
|
||||
}
|
||||
bool isLInsSti() const {
|
||||
bool isLInsSt() const {
|
||||
NanoAssert(LRK_None != repKinds[opcode()]);
|
||||
return LRK_Sti == repKinds[opcode()];
|
||||
return LRK_St == repKinds[opcode()];
|
||||
}
|
||||
bool isLInsSk() const {
|
||||
NanoAssert(LRK_None != repKinds[opcode()]);
|
||||
@ -1054,7 +1061,7 @@ namespace nanojit
|
||||
return isCmovOpcode(opcode());
|
||||
}
|
||||
bool isStore() const {
|
||||
return isLInsSti();
|
||||
return isLInsSt();
|
||||
}
|
||||
bool isLoad() const {
|
||||
return isLInsLd();
|
||||
@ -1182,7 +1189,7 @@ namespace nanojit
|
||||
LIns* getLIns() { return &ins; };
|
||||
};
|
||||
|
||||
// 2-operand form. Used for loads, guards, branches, comparisons, binary
|
||||
// 2-operand form. Used for guards, branches, comparisons, binary
|
||||
// arithmetic/logic ops, etc.
|
||||
class LInsOp2
|
||||
{
|
||||
@ -1238,8 +1245,8 @@ namespace nanojit
|
||||
LIns* getLIns() { return &ins; };
|
||||
};
|
||||
|
||||
// Used for LIR_sti and LIR_stq.
|
||||
class LInsSti
|
||||
// Used for all stores.
|
||||
class LInsSt
|
||||
{
|
||||
private:
|
||||
friend class LIns;
|
||||
@ -1359,18 +1366,18 @@ namespace nanojit
|
||||
{
|
||||
};
|
||||
|
||||
LInsOp0* LIns::toLInsOp0() const { return (LInsOp0*)( uintptr_t(this+1) - sizeof(LInsOp0) ); }
|
||||
LInsOp1* LIns::toLInsOp1() const { return (LInsOp1*)( uintptr_t(this+1) - sizeof(LInsOp1) ); }
|
||||
LInsOp2* LIns::toLInsOp2() const { return (LInsOp2*)( uintptr_t(this+1) - sizeof(LInsOp2) ); }
|
||||
LInsOp3* LIns::toLInsOp3() const { return (LInsOp3*)( uintptr_t(this+1) - sizeof(LInsOp3) ); }
|
||||
LInsLd* LIns::toLInsLd() const { return (LInsLd* )( uintptr_t(this+1) - sizeof(LInsLd ) ); }
|
||||
LInsSti* LIns::toLInsSti() const { return (LInsSti*)( uintptr_t(this+1) - sizeof(LInsSti) ); }
|
||||
LInsSk* LIns::toLInsSk() const { return (LInsSk* )( uintptr_t(this+1) - sizeof(LInsSk ) ); }
|
||||
LInsC* LIns::toLInsC() const { return (LInsC* )( uintptr_t(this+1) - sizeof(LInsC ) ); }
|
||||
LInsP* LIns::toLInsP() const { return (LInsP* )( uintptr_t(this+1) - sizeof(LInsP ) ); }
|
||||
LInsI* LIns::toLInsI() const { return (LInsI* )( uintptr_t(this+1) - sizeof(LInsI ) ); }
|
||||
LInsQorD* LIns::toLInsQorD() const { return (LInsQorD*)( uintptr_t(this+1) - sizeof(LInsQorD) ); }
|
||||
LInsJtbl*LIns::toLInsJtbl()const { return (LInsJtbl*)(uintptr_t(this+1) - sizeof(LInsJtbl)); }
|
||||
LInsOp0* LIns::toLInsOp0() const { return (LInsOp0* )(uintptr_t(this+1) - sizeof(LInsOp0 )); }
|
||||
LInsOp1* LIns::toLInsOp1() const { return (LInsOp1* )(uintptr_t(this+1) - sizeof(LInsOp1 )); }
|
||||
LInsOp2* LIns::toLInsOp2() const { return (LInsOp2* )(uintptr_t(this+1) - sizeof(LInsOp2 )); }
|
||||
LInsOp3* LIns::toLInsOp3() const { return (LInsOp3* )(uintptr_t(this+1) - sizeof(LInsOp3 )); }
|
||||
LInsLd* LIns::toLInsLd() const { return (LInsLd* )(uintptr_t(this+1) - sizeof(LInsLd )); }
|
||||
LInsSt* LIns::toLInsSt() const { return (LInsSt* )(uintptr_t(this+1) - sizeof(LInsSt )); }
|
||||
LInsSk* LIns::toLInsSk() const { return (LInsSk* )(uintptr_t(this+1) - sizeof(LInsSk )); }
|
||||
LInsC* LIns::toLInsC() const { return (LInsC* )(uintptr_t(this+1) - sizeof(LInsC )); }
|
||||
LInsP* LIns::toLInsP() const { return (LInsP* )(uintptr_t(this+1) - sizeof(LInsP )); }
|
||||
LInsI* LIns::toLInsI() const { return (LInsI* )(uintptr_t(this+1) - sizeof(LInsI )); }
|
||||
LInsQorD* LIns::toLInsQorD() const { return (LInsQorD*)(uintptr_t(this+1) - sizeof(LInsQorD)); }
|
||||
LInsJtbl* LIns::toLInsJtbl() const { return (LInsJtbl*)(uintptr_t(this+1) - sizeof(LInsJtbl)); }
|
||||
|
||||
void LIns::initLInsOp0(LOpcode opcode) {
|
||||
initSharedFields(opcode);
|
||||
@ -1402,14 +1409,14 @@ namespace nanojit
|
||||
toLInsLd()->accSet = accSet;
|
||||
NanoAssert(isLInsLd());
|
||||
}
|
||||
void LIns::initLInsSti(LOpcode opcode, LIns* val, LIns* base, int32_t d, AccSet accSet) {
|
||||
void LIns::initLInsSt(LOpcode opcode, LIns* val, LIns* base, int32_t d, AccSet accSet) {
|
||||
initSharedFields(opcode);
|
||||
toLInsSti()->oprnd_1 = val;
|
||||
toLInsSti()->oprnd_2 = base;
|
||||
toLInsSt()->oprnd_1 = val;
|
||||
toLInsSt()->oprnd_2 = base;
|
||||
NanoAssert(d == int16_t(d));
|
||||
toLInsSti()->disp = int16_t(d);
|
||||
toLInsSti()->accSet = accSet;
|
||||
NanoAssert(isLInsSti());
|
||||
toLInsSt()->disp = int16_t(d);
|
||||
toLInsSt()->accSet = accSet;
|
||||
NanoAssert(isLInsSt());
|
||||
}
|
||||
void LIns::initLInsSk(LIns* prevLIns) {
|
||||
initSharedFields(LIR_skip);
|
||||
@ -1434,10 +1441,10 @@ namespace nanojit
|
||||
toLInsI()->immI = immI;
|
||||
NanoAssert(isLInsI());
|
||||
}
|
||||
void LIns::initLInsQorD(LOpcode opcode, int64_t imm64) {
|
||||
void LIns::initLInsQorD(LOpcode opcode, uint64_t immQorD) {
|
||||
initSharedFields(opcode);
|
||||
toLInsQorD()->immQorDlo = int32_t(imm64);
|
||||
toLInsQorD()->immQorDhi = int32_t(imm64 >> 32);
|
||||
toLInsQorD()->immQorDlo = int32_t(immQorD);
|
||||
toLInsQorD()->immQorDhi = int32_t(immQorD >> 32);
|
||||
NanoAssert(isLInsQorD());
|
||||
}
|
||||
void LIns::initLInsJtbl(LIns* index, uint32_t size, LIns** table) {
|
||||
@ -1449,11 +1456,11 @@ namespace nanojit
|
||||
}
|
||||
|
||||
LIns* LIns::oprnd1() const {
|
||||
NanoAssert(isLInsOp1() || isLInsOp2() || isLInsOp3() || isLInsLd() || isLInsSti() || isLInsJtbl());
|
||||
NanoAssert(isLInsOp1() || isLInsOp2() || isLInsOp3() || isLInsLd() || isLInsSt() || isLInsJtbl());
|
||||
return toLInsOp2()->oprnd_1;
|
||||
}
|
||||
LIns* LIns::oprnd2() const {
|
||||
NanoAssert(isLInsOp2() || isLInsOp3() || isLInsSti());
|
||||
NanoAssert(isLInsOp2() || isLInsOp3() || isLInsSt());
|
||||
return toLInsOp2()->oprnd_2;
|
||||
}
|
||||
LIns* LIns::oprnd3() const {
|
||||
@ -1507,8 +1514,8 @@ namespace nanojit
|
||||
}
|
||||
|
||||
int32_t LIns::disp() const {
|
||||
if (isLInsSti()) {
|
||||
return toLInsSti()->disp;
|
||||
if (isLInsSt()) {
|
||||
return toLInsSt()->disp;
|
||||
} else {
|
||||
NanoAssert(isLInsLd());
|
||||
return toLInsLd()->disp;
|
||||
@ -1516,8 +1523,8 @@ namespace nanojit
|
||||
}
|
||||
|
||||
AccSet LIns::accSet() const {
|
||||
if (isLInsSti()) {
|
||||
return toLInsSti()->accSet;
|
||||
if (isLInsSt()) {
|
||||
return toLInsSt()->accSet;
|
||||
} else {
|
||||
NanoAssert(isLInsLd());
|
||||
return toLInsLd()->accSet;
|
||||
@ -1534,21 +1541,28 @@ namespace nanojit
|
||||
|
||||
inline int32_t LIns::immI() const { NanoAssert(isImmI()); return toLInsI()->immI; }
|
||||
|
||||
inline int32_t LIns::immQorDlo() const { NanoAssert(isImmQorD()); return toLInsQorD()->immQorDlo; }
|
||||
inline int32_t LIns::immQorDhi() const { NanoAssert(isImmQorD()); return toLInsQorD()->immQorDhi; }
|
||||
#ifdef NANOJIT_64BIT
|
||||
inline int32_t LIns::immQlo() const { NanoAssert(isImmQ()); return toLInsQorD()->immQorDlo; }
|
||||
uint64_t LIns::immQ() const {
|
||||
NanoAssert(isImmQorD());
|
||||
NanoAssert(isImmQ());
|
||||
return (uint64_t(toLInsQorD()->immQorDhi) << 32) | uint32_t(toLInsQorD()->immQorDlo);
|
||||
}
|
||||
#endif
|
||||
inline int32_t LIns::immDlo() const { NanoAssert(isImmD()); return toLInsQorD()->immQorDlo; }
|
||||
inline int32_t LIns::immDhi() const { NanoAssert(isImmD()); return toLInsQorD()->immQorDhi; }
|
||||
double LIns::immD() const {
|
||||
NanoAssert(isImmD());
|
||||
union {
|
||||
double f;
|
||||
uint64_t q;
|
||||
} u;
|
||||
u.q = immQ();
|
||||
u.q = immDasQ();
|
||||
return u.f;
|
||||
}
|
||||
uint64_t LIns::immDasQ() const {
|
||||
NanoAssert(isImmD());
|
||||
return (uint64_t(toLInsQorD()->immQorDhi) << 32) | uint32_t(toLInsQorD()->immQorDlo);
|
||||
}
|
||||
|
||||
int32_t LIns::size() const {
|
||||
NanoAssert(isop(LIR_allocp));
|
||||
@ -1677,11 +1691,6 @@ namespace nanojit
|
||||
return ins2(v, oprnd1, insImmI(imm));
|
||||
}
|
||||
|
||||
#if NJ_SOFTFLOAT_SUPPORTED
|
||||
LIns* qjoin(LInsp lo, LInsp hi) {
|
||||
return ins2(LIR_ii2d, lo, hi);
|
||||
}
|
||||
#endif
|
||||
LIns* insImmP(const void *ptr) {
|
||||
#ifdef NANOJIT_64BIT
|
||||
return insImmQ((uint64_t)ptr);
|
||||
@ -1716,7 +1725,7 @@ namespace nanojit
|
||||
#endif
|
||||
}
|
||||
|
||||
// Chooses LIR_sti or LIR_stq based on size of value.
|
||||
// Chooses LIR_sti, LIR_stq or LIR_std according to the type of 'value'.
|
||||
LIns* insStore(LIns* value, LIns* base, int32_t d, AccSet accSet);
|
||||
};
|
||||
|
||||
|
@ -148,12 +148,12 @@ OP_64(ldq, 22, Ld, Q, -1) // load quad
|
||||
OP___(ldd, 23, Ld, D, -1) // load double
|
||||
OP___(ldf2d, 24, Ld, D, -1) // load float and extend to a double
|
||||
|
||||
OP___(sti2c, 25, Sti, V, 0) // store int truncated to char
|
||||
OP___(sti2s, 26, Sti, V, 0) // store int truncated to short
|
||||
OP___(sti, 27, Sti, V, 0) // store int
|
||||
OP_64(stq, 28, Sti, V, 0) // store quad
|
||||
OP___(std, 29, Sti, V, 0) // store double
|
||||
OP___(std2f, 30, Sti, V, 0) // store double as a float (losing precision)
|
||||
OP___(sti2c, 25, St, V, 0) // store int truncated to char
|
||||
OP___(sti2s, 26, St, V, 0) // store int truncated to short
|
||||
OP___(sti, 27, St, V, 0) // store int
|
||||
OP_64(stq, 28, St, V, 0) // store quad
|
||||
OP___(std, 29, St, V, 0) // store double
|
||||
OP___(std2f, 30, St, V, 0) // store double as a float (losing precision)
|
||||
|
||||
OP_UN(31)
|
||||
OP_UN(32)
|
||||
|
@ -665,8 +665,8 @@ Assembler::asm_arg_64(LInsp arg, Register& r, int& stkd)
|
||||
if (_config.arm_vfp) {
|
||||
FMRRD(ra, rb, fp_reg);
|
||||
} else {
|
||||
asm_regarg(ARGTYPE_LO, arg->oprnd1(), ra);
|
||||
asm_regarg(ARGTYPE_LO, arg->oprnd2(), rb);
|
||||
asm_regarg(ARGTYPE_I, arg->oprnd1(), ra);
|
||||
asm_regarg(ARGTYPE_I, arg->oprnd2(), rb);
|
||||
}
|
||||
|
||||
#ifndef NJ_ARM_EABI
|
||||
@ -699,7 +699,7 @@ Assembler::asm_arg_64(LInsp arg, Register& r, int& stkd)
|
||||
// Without VFP, we can simply use asm_regarg and asm_stkarg to
|
||||
// encode the two 32-bit words as we don't need to load from a VFP
|
||||
// register.
|
||||
asm_regarg(ARGTYPE_LO, arg->oprnd1(), ra);
|
||||
asm_regarg(ARGTYPE_I, arg->oprnd1(), ra);
|
||||
asm_stkarg(arg->oprnd2(), 0);
|
||||
stkd += 4;
|
||||
}
|
||||
@ -914,7 +914,7 @@ Assembler::asm_call(LInsp ins)
|
||||
} else {
|
||||
BLX(LR);
|
||||
}
|
||||
asm_regarg(ARGTYPE_LO, ins->arg(--argc), LR);
|
||||
asm_regarg(ARGTYPE_I, ins->arg(--argc), LR);
|
||||
}
|
||||
|
||||
// Encode the arguments, starting at R0 and with an empty argument stack.
|
||||
@ -1433,9 +1433,9 @@ Assembler::asm_store64(LOpcode op, LInsp value, int dr, LInsp base)
|
||||
|
||||
// XXX use another reg, get rid of dependency
|
||||
STR(IP, rb, dr);
|
||||
asm_ld_imm(IP, value->immQorDlo(), false);
|
||||
asm_ld_imm(IP, value->immDlo(), false);
|
||||
STR(IP, rb, dr+4);
|
||||
asm_ld_imm(IP, value->immQorDhi(), false);
|
||||
asm_ld_imm(IP, value->immDhi(), false);
|
||||
|
||||
return;
|
||||
}
|
||||
@ -1463,7 +1463,7 @@ Assembler::asm_store64(LOpcode op, LInsp value, int dr, LInsp base)
|
||||
// has the right value
|
||||
if (value->isImmD()) {
|
||||
underrunProtect(4*4);
|
||||
asm_immf_nochk(rv, value->immQorDlo(), value->immQorDhi());
|
||||
asm_immd_nochk(rv, value->immDlo(), value->immDhi());
|
||||
}
|
||||
} else {
|
||||
int da = findMemFor(value);
|
||||
@ -1482,9 +1482,9 @@ Assembler::asm_store64(LOpcode op, LInsp value, int dr, LInsp base)
|
||||
|
||||
// XXX use another reg, get rid of dependency
|
||||
STR(IP, rb, dr);
|
||||
asm_ld_imm(IP, value->immQorDlo(), false);
|
||||
asm_ld_imm(IP, value->immDlo(), false);
|
||||
STR(IP, rb, dr+4);
|
||||
asm_ld_imm(IP, value->immQorDhi(), false);
|
||||
asm_ld_imm(IP, value->immDhi(), false);
|
||||
|
||||
return;
|
||||
}
|
||||
@ -1514,7 +1514,7 @@ Assembler::asm_store64(LOpcode op, LInsp value, int dr, LInsp base)
|
||||
// has the right value
|
||||
if (value->isImmD()) {
|
||||
underrunProtect(4*4);
|
||||
asm_immf_nochk(rv, value->immQorDlo(), value->immQorDhi());
|
||||
asm_immd_nochk(rv, value->immDlo(), value->immDhi());
|
||||
}
|
||||
} else {
|
||||
NanoAssertMsg(0, "st32f not supported with non-VFP, fix me");
|
||||
@ -1531,7 +1531,7 @@ Assembler::asm_store64(LOpcode op, LInsp value, int dr, LInsp base)
|
||||
// Stick a float into register rr, where p points to the two
|
||||
// 32-bit parts of the quad, optinally also storing at FP+d
|
||||
void
|
||||
Assembler::asm_immf_nochk(Register rr, int32_t immQorDlo, int32_t immQorDhi)
|
||||
Assembler::asm_immd_nochk(Register rr, int32_t immDlo, int32_t immDhi)
|
||||
{
|
||||
// We're not going to use a slot, because it might be too far
|
||||
// away. Instead, we're going to stick a branch in the stream to
|
||||
@ -1540,22 +1540,22 @@ Assembler::asm_immf_nochk(Register rr, int32_t immQorDlo, int32_t immQorDhi)
|
||||
|
||||
// stream should look like:
|
||||
// branch A
|
||||
// immQorDlo
|
||||
// immQorDhi
|
||||
// immDlo
|
||||
// immDhi
|
||||
// A: FLDD PC-16
|
||||
|
||||
FLDD(rr, PC, -16);
|
||||
|
||||
*(--_nIns) = (NIns) immQorDhi;
|
||||
*(--_nIns) = (NIns) immQorDlo;
|
||||
*(--_nIns) = (NIns) immDhi;
|
||||
*(--_nIns) = (NIns) immDlo;
|
||||
|
||||
B_nochk(_nIns+2);
|
||||
}
|
||||
|
||||
void
|
||||
Assembler::asm_immf(LInsp ins)
|
||||
Assembler::asm_immd(LInsp ins)
|
||||
{
|
||||
//asm_output(">>> asm_immf");
|
||||
//asm_output(">>> asm_immd");
|
||||
|
||||
int d = deprecated_disp(ins);
|
||||
Register rr = ins->deprecated_getReg();
|
||||
@ -1567,7 +1567,7 @@ Assembler::asm_immf(LInsp ins)
|
||||
asm_spill(rr, d, false, true);
|
||||
|
||||
underrunProtect(4*4);
|
||||
asm_immf_nochk(rr, ins->immQorDlo(), ins->immQorDhi());
|
||||
asm_immd_nochk(rr, ins->immDlo(), ins->immDhi());
|
||||
} else {
|
||||
NanoAssert(d);
|
||||
// asm_mmq might spill a reg, so don't call it;
|
||||
@ -1575,12 +1575,12 @@ Assembler::asm_immf(LInsp ins)
|
||||
//asm_mmq(FP, d, PC, -16);
|
||||
|
||||
STR(IP, FP, d+4);
|
||||
asm_ld_imm(IP, ins->immQorDhi());
|
||||
asm_ld_imm(IP, ins->immDhi());
|
||||
STR(IP, FP, d);
|
||||
asm_ld_imm(IP, ins->immQorDlo());
|
||||
asm_ld_imm(IP, ins->immDlo());
|
||||
}
|
||||
|
||||
//asm_output("<<< asm_immf");
|
||||
//asm_output("<<< asm_immd");
|
||||
}
|
||||
|
||||
void
|
||||
@ -2107,7 +2107,7 @@ Assembler::B_cond_chk(ConditionCode _c, NIns* _t, bool _chk)
|
||||
*/
|
||||
|
||||
void
|
||||
Assembler::asm_i2f(LInsp ins)
|
||||
Assembler::asm_i2d(LInsp ins)
|
||||
{
|
||||
Register rr = deprecated_prepResultReg(ins, FpRegs);
|
||||
Register srcr = findRegFor(ins->oprnd1(), GpRegs);
|
||||
@ -2120,7 +2120,7 @@ Assembler::asm_i2f(LInsp ins)
|
||||
}
|
||||
|
||||
void
|
||||
Assembler::asm_u2f(LInsp ins)
|
||||
Assembler::asm_ui2d(LInsp ins)
|
||||
{
|
||||
Register rr = deprecated_prepResultReg(ins, FpRegs);
|
||||
Register sr = findRegFor(ins->oprnd1(), GpRegs);
|
||||
@ -2132,7 +2132,7 @@ Assembler::asm_u2f(LInsp ins)
|
||||
FMSR(S14, sr);
|
||||
}
|
||||
|
||||
void Assembler::asm_f2i(LInsp ins)
|
||||
void Assembler::asm_d2i(LInsp ins)
|
||||
{
|
||||
// where our result goes
|
||||
Register rr = deprecated_prepResultReg(ins, GpRegs);
|
||||
@ -2182,7 +2182,7 @@ Assembler::asm_fop(LInsp ins)
|
||||
}
|
||||
|
||||
void
|
||||
Assembler::asm_fcmp(LInsp ins)
|
||||
Assembler::asm_cmpd(LInsp ins)
|
||||
{
|
||||
LInsp lhs = ins->oprnd1();
|
||||
LInsp rhs = ins->oprnd2();
|
||||
@ -2262,7 +2262,7 @@ Assembler::asm_branch(bool branchOnFalse, LInsp cond, NIns* targ)
|
||||
NIns *at = _nIns;
|
||||
|
||||
if (_config.arm_vfp && fp_cond)
|
||||
asm_fcmp(cond);
|
||||
asm_cmpd(cond);
|
||||
else
|
||||
asm_cmp(cond);
|
||||
|
||||
@ -2327,7 +2327,7 @@ Assembler::asm_cmpi(Register r, int32_t imm)
|
||||
}
|
||||
|
||||
void
|
||||
Assembler::asm_fcond(LInsp ins)
|
||||
Assembler::asm_condd(LInsp ins)
|
||||
{
|
||||
// only want certain regs
|
||||
Register r = deprecated_prepResultReg(ins, AllowableFlagRegs);
|
||||
@ -2341,7 +2341,7 @@ Assembler::asm_fcond(LInsp ins)
|
||||
default: NanoAssert(0); break;
|
||||
}
|
||||
|
||||
asm_fcmp(ins);
|
||||
asm_cmpd(ins);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -224,13 +224,13 @@ verbose_only( extern const char* shiftNames[]; )
|
||||
void underrunProtect(int bytes); \
|
||||
void nativePageReset(); \
|
||||
void nativePageSetup(); \
|
||||
void asm_immf_nochk(Register, int32_t, int32_t); \
|
||||
void asm_immd_nochk(Register, int32_t, int32_t); \
|
||||
void asm_regarg(ArgType, LInsp, Register); \
|
||||
void asm_stkarg(LInsp p, int stkd); \
|
||||
void asm_cmpi(Register, int32_t imm); \
|
||||
void asm_ldr_chk(Register d, Register b, int32_t off, bool chk); \
|
||||
void asm_cmp(LIns *cond); \
|
||||
void asm_fcmp(LIns *cond); \
|
||||
void asm_cmpd(LIns *cond); \
|
||||
void asm_ld_imm(Register d, int32_t imm, bool chk = true); \
|
||||
void asm_arg(ArgType ty, LInsp arg, Register& r, int& stkd); \
|
||||
void asm_arg_64(LInsp arg, Register& r, int& stkd); \
|
||||
|
@ -362,8 +362,8 @@ namespace nanojit
|
||||
void Assembler::asm_store_imm64(LIns *value, int dr, Register rbase)
|
||||
{
|
||||
NanoAssert(value->isImmD());
|
||||
int32_t msw = value->immQorDhi();
|
||||
int32_t lsw = value->immQorDlo();
|
||||
int32_t msw = value->immDhi();
|
||||
int32_t lsw = value->immDlo();
|
||||
|
||||
// li $at,lsw # iff lsw != 0
|
||||
// sw $at,off+LSWOFF($rbase) # may use $0 instead of $at
|
||||
@ -537,7 +537,7 @@ namespace nanojit
|
||||
value, lirNames[value->opcode()], dr, base, lirNames[base->opcode()]);
|
||||
}
|
||||
|
||||
void Assembler::asm_u2f(LIns *ins)
|
||||
void Assembler::asm_ui2d(LIns *ins)
|
||||
{
|
||||
Register fr = deprecated_prepResultReg(ins, FpRegs);
|
||||
Register v = findRegFor(ins->oprnd1(), GpRegs);
|
||||
@ -565,10 +565,10 @@ namespace nanojit
|
||||
BGEZ(v,here);
|
||||
MTC1(v,ft);
|
||||
|
||||
TAG("asm_u2f(ins=%p{%s})", ins, lirNames[ins->opcode()]);
|
||||
TAG("asm_ui2d(ins=%p{%s})", ins, lirNames[ins->opcode()]);
|
||||
}
|
||||
|
||||
void Assembler::asm_f2i(LInsp ins)
|
||||
void Assembler::asm_d2i(LInsp ins)
|
||||
{
|
||||
NanoAssert(cpu_has_fpu);
|
||||
|
||||
@ -578,7 +578,7 @@ namespace nanojit
|
||||
// mfc1 $rr,$sr
|
||||
MFC1(rr,sr);
|
||||
TRUNC_W_D(sr,sr);
|
||||
TAG("asm_u2f(ins=%p{%s})", ins, lirNames[ins->opcode()]);
|
||||
TAG("asm_d2i(ins=%p{%s})", ins, lirNames[ins->opcode()]);
|
||||
}
|
||||
|
||||
void Assembler::asm_fop(LIns *ins)
|
||||
@ -621,7 +621,7 @@ namespace nanojit
|
||||
TAG("asm_fneg(ins=%p{%s})", ins, lirNames[ins->opcode()]);
|
||||
}
|
||||
|
||||
void Assembler::asm_immf(LIns *ins)
|
||||
void Assembler::asm_immd(LIns *ins)
|
||||
{
|
||||
int d = deprecated_disp(ins);
|
||||
Register rr = ins->deprecated_getReg();
|
||||
@ -631,13 +631,13 @@ namespace nanojit
|
||||
if (cpu_has_fpu && deprecated_isKnownReg(rr)) {
|
||||
if (d)
|
||||
asm_spill(rr, d, false, true);
|
||||
asm_li_d(rr, ins->immQorDhi(), ins->immQorDlo());
|
||||
asm_li_d(rr, ins->immDhi(), ins->immDlo());
|
||||
}
|
||||
else {
|
||||
NanoAssert(d);
|
||||
asm_store_imm64(ins, d, FP);
|
||||
}
|
||||
TAG("asm_immf(ins=%p{%s})", ins, lirNames[ins->opcode()]);
|
||||
TAG("asm_immd(ins=%p{%s})", ins, lirNames[ins->opcode()]);
|
||||
}
|
||||
|
||||
#ifdef NANOJIT_64BIT
|
||||
@ -792,7 +792,7 @@ namespace nanojit
|
||||
TAG("asm_cmov(ins=%p{%s})", ins, lirNames[ins->opcode()]);
|
||||
}
|
||||
|
||||
void Assembler::asm_fcond(LIns *ins)
|
||||
void Assembler::asm_condd(LIns *ins)
|
||||
{
|
||||
NanoAssert(cpu_has_fpu);
|
||||
if (cpu_has_fpu) {
|
||||
@ -826,10 +826,10 @@ namespace nanojit
|
||||
}
|
||||
asm_cmp(op, a, b, r);
|
||||
}
|
||||
TAG("asm_fcond(ins=%p{%s})", ins, lirNames[ins->opcode()]);
|
||||
TAG("asm_condd(ins=%p{%s})", ins, lirNames[ins->opcode()]);
|
||||
}
|
||||
|
||||
void Assembler::asm_i2f(LIns *ins)
|
||||
void Assembler::asm_i2d(LIns *ins)
|
||||
{
|
||||
NanoAssert(cpu_has_fpu);
|
||||
if (cpu_has_fpu) {
|
||||
@ -841,7 +841,7 @@ namespace nanojit
|
||||
CVT_D_W(fr,fr);
|
||||
MTC1(v,fr);
|
||||
}
|
||||
TAG("asm_i2f(ins=%p{%s})", ins, lirNames[ins->opcode()]);
|
||||
TAG("asm_i2d(ins=%p{%s})", ins, lirNames[ins->opcode()]);
|
||||
}
|
||||
|
||||
void Assembler::asm_ret(LIns *ins)
|
||||
|
@ -409,7 +409,7 @@ namespace nanojit
|
||||
asm_cmp(op, a, b, cr);
|
||||
}
|
||||
|
||||
void Assembler::asm_fcond(LIns *ins) {
|
||||
void Assembler::asm_condd(LIns *ins) {
|
||||
asm_cond(ins);
|
||||
}
|
||||
|
||||
@ -965,7 +965,7 @@ namespace nanojit
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::asm_i2f(LIns *ins) {
|
||||
void Assembler::asm_i2d(LIns *ins) {
|
||||
Register r = deprecated_prepResultReg(ins, FpRegs);
|
||||
Register v = findRegFor(ins->oprnd1(), GpRegs);
|
||||
const int d = 16; // natural aligned
|
||||
@ -988,7 +988,7 @@ namespace nanojit
|
||||
#endif
|
||||
}
|
||||
|
||||
void Assembler::asm_u2f(LIns *ins) {
|
||||
void Assembler::asm_ui2d(LIns *ins) {
|
||||
Register r = deprecated_prepResultReg(ins, FpRegs);
|
||||
Register v = findRegFor(ins->oprnd1(), GpRegs);
|
||||
const int d = 16;
|
||||
@ -1010,7 +1010,7 @@ namespace nanojit
|
||||
#endif
|
||||
}
|
||||
|
||||
void Assembler::asm_f2i(LInsp) {
|
||||
void Assembler::asm_d2i(LInsp) {
|
||||
NanoAssertMsg(0, "NJ_F2I_SUPPORTED not yet supported for this architecture");
|
||||
}
|
||||
|
||||
@ -1079,7 +1079,7 @@ namespace nanojit
|
||||
}
|
||||
#endif
|
||||
|
||||
void Assembler::asm_immf(LIns *ins) {
|
||||
void Assembler::asm_immd(LIns *ins) {
|
||||
#ifdef NANOJIT_64BIT
|
||||
Register r = ins->deprecated_getReg();
|
||||
if (deprecated_isKnownReg(r) && (rmask(r) & FpRegs)) {
|
||||
@ -1109,7 +1109,7 @@ namespace nanojit
|
||||
asm_li(R0, w.hi);
|
||||
}
|
||||
else {
|
||||
int64_t q = ins->immQ();
|
||||
int64_t q = ins->immDasQ();
|
||||
if (isS32(q)) {
|
||||
asm_li(r, int32_t(q));
|
||||
return;
|
||||
|
@ -405,9 +405,9 @@ namespace nanojit
|
||||
// generating a pointless store/load/store sequence
|
||||
Register rb = findRegFor(base, GpRegs);
|
||||
STW32(L2, dr+4, rb);
|
||||
SET32(value->immQorDlo(), L2);
|
||||
SET32(value->immDlo(), L2);
|
||||
STW32(L2, dr, rb);
|
||||
SET32(value->immQorDhi(), L2);
|
||||
SET32(value->immDhi(), L2);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -473,7 +473,7 @@ namespace nanojit
|
||||
NanoAssert(cond->isCmp());
|
||||
if (isCmpDOpcode(condop))
|
||||
{
|
||||
return asm_fbranch(branchOnFalse, cond, targ);
|
||||
return asm_branchd(branchOnFalse, cond, targ);
|
||||
}
|
||||
|
||||
underrunProtect(32);
|
||||
@ -581,7 +581,7 @@ namespace nanojit
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::asm_fcond(LInsp ins)
|
||||
void Assembler::asm_condd(LInsp ins)
|
||||
{
|
||||
// only want certain regs
|
||||
Register r = deprecated_prepResultReg(ins, AllowableFlagRegs);
|
||||
@ -599,7 +599,7 @@ namespace nanojit
|
||||
else // if (condop == LIR_gtd)
|
||||
MOVFGI(1, 0, 0, 0, r);
|
||||
ORI(G0, 0, r);
|
||||
asm_fcmp(ins);
|
||||
asm_cmpd(ins);
|
||||
}
|
||||
|
||||
void Assembler::asm_cond(LInsp ins)
|
||||
@ -821,7 +821,7 @@ namespace nanojit
|
||||
SET32(val, rr);
|
||||
}
|
||||
|
||||
void Assembler::asm_immf(LInsp ins)
|
||||
void Assembler::asm_immd(LInsp ins)
|
||||
{
|
||||
underrunProtect(64);
|
||||
Register rr = ins->deprecated_getReg();
|
||||
@ -842,9 +842,9 @@ namespace nanojit
|
||||
if (d)
|
||||
{
|
||||
STW32(L2, d+4, FP);
|
||||
SET32(ins->immQorDlo(), L2);
|
||||
SET32(ins->immDlo(), L2);
|
||||
STW32(L2, d, FP);
|
||||
SET32(ins->immQorDhi(), L2);
|
||||
SET32(ins->immDhi(), L2);
|
||||
}
|
||||
}
|
||||
|
||||
@ -888,7 +888,7 @@ namespace nanojit
|
||||
|
||||
}
|
||||
|
||||
void Assembler::asm_i2f(LInsp ins)
|
||||
void Assembler::asm_i2d(LInsp ins)
|
||||
{
|
||||
underrunProtect(32);
|
||||
// where our result goes
|
||||
@ -898,7 +898,7 @@ namespace nanojit
|
||||
LDDF32(FP, d, rr);
|
||||
}
|
||||
|
||||
void Assembler::asm_u2f(LInsp ins)
|
||||
void Assembler::asm_ui2d(LInsp ins)
|
||||
{
|
||||
underrunProtect(72);
|
||||
// where our result goes
|
||||
@ -917,7 +917,7 @@ namespace nanojit
|
||||
SETHI(0x43300000, G1);
|
||||
}
|
||||
|
||||
void Assembler::asm_f2i(LInsp ins) {
|
||||
void Assembler::asm_d2i(LInsp ins) {
|
||||
LIns *lhs = ins->oprnd1();
|
||||
Register rr = prepareResultReg(ins, GpRegs);
|
||||
Register ra = findRegFor(lhs, FpRegs);
|
||||
@ -934,7 +934,7 @@ namespace nanojit
|
||||
FMOVD(s, r);
|
||||
}
|
||||
|
||||
NIns * Assembler::asm_fbranch(bool branchOnFalse, LIns *cond, NIns *targ)
|
||||
NIns * Assembler::asm_branchd(bool branchOnFalse, LIns *cond, NIns *targ)
|
||||
{
|
||||
NIns *at = 0;
|
||||
LOpcode condop = cond->opcode();
|
||||
@ -978,11 +978,11 @@ namespace nanojit
|
||||
else //if (condop == LIR_gtd)
|
||||
FBG(0, tt);
|
||||
}
|
||||
asm_fcmp(cond);
|
||||
asm_cmpd(cond);
|
||||
return at;
|
||||
}
|
||||
|
||||
void Assembler::asm_fcmp(LIns *cond)
|
||||
void Assembler::asm_cmpd(LIns *cond)
|
||||
{
|
||||
underrunProtect(4);
|
||||
LIns* lhs = cond->oprnd1();
|
||||
|
@ -208,8 +208,8 @@ namespace nanojit
|
||||
void underrunProtect(int bytes); \
|
||||
void asm_align_code(); \
|
||||
void asm_cmp(LIns *cond); \
|
||||
void asm_fcmp(LIns *cond); \
|
||||
NIns* asm_fbranch(bool, LIns*, NIns*);
|
||||
void asm_cmpd(LIns *cond); \
|
||||
NIns* asm_branchd(bool, LIns*, NIns*);
|
||||
|
||||
#define IMM32(i) \
|
||||
--_nIns; \
|
||||
|
@ -1038,7 +1038,7 @@ namespace nanojit
|
||||
// XMM register, which hinders register renaming and makes dependence
|
||||
// chains longer. So we precede with XORPS to clear the target register.
|
||||
|
||||
void Assembler::asm_i2f(LIns *ins) {
|
||||
void Assembler::asm_i2d(LIns *ins) {
|
||||
LIns *a = ins->oprnd1();
|
||||
NanoAssert(ins->isD() && a->isI());
|
||||
|
||||
@ -1049,7 +1049,7 @@ namespace nanojit
|
||||
freeResourcesOf(ins);
|
||||
}
|
||||
|
||||
void Assembler::asm_u2f(LIns *ins) {
|
||||
void Assembler::asm_ui2d(LIns *ins) {
|
||||
LIns *a = ins->oprnd1();
|
||||
NanoAssert(ins->isD() && a->isI());
|
||||
|
||||
@ -1062,7 +1062,7 @@ namespace nanojit
|
||||
freeResourcesOf(ins);
|
||||
}
|
||||
|
||||
void Assembler::asm_f2i(LIns *ins) {
|
||||
void Assembler::asm_d2i(LIns *ins) {
|
||||
LIns *a = ins->oprnd1();
|
||||
NanoAssert(ins->isI() && a->isD());
|
||||
|
||||
@ -1138,7 +1138,7 @@ namespace nanojit
|
||||
NanoAssert(cond->isCmp());
|
||||
LOpcode condop = cond->opcode();
|
||||
if (isCmpDOpcode(condop))
|
||||
return asm_fbranch(onFalse, cond, target);
|
||||
return asm_branchd(onFalse, cond, target);
|
||||
|
||||
// We must ensure there's room for the instruction before calculating
|
||||
// the offset. And the offset determines the opcode (8bit or 32bit).
|
||||
@ -1282,7 +1282,7 @@ namespace nanojit
|
||||
// LIR_jt jae ja swap+jae swap+ja jp over je
|
||||
// LIR_jf jb jbe swap+jb swap+jbe jne+jp
|
||||
|
||||
NIns* Assembler::asm_fbranch(bool onFalse, LIns *cond, NIns *target) {
|
||||
NIns* Assembler::asm_branchd(bool onFalse, LIns *cond, NIns *target) {
|
||||
LOpcode condop = cond->opcode();
|
||||
NIns *patch;
|
||||
LIns *a = cond->oprnd1();
|
||||
@ -1325,11 +1325,11 @@ namespace nanojit
|
||||
}
|
||||
patch = _nIns;
|
||||
}
|
||||
asm_fcmp(a, b);
|
||||
asm_cmpd(a, b);
|
||||
return patch;
|
||||
}
|
||||
|
||||
void Assembler::asm_fcond(LIns *ins) {
|
||||
void Assembler::asm_condd(LIns *ins) {
|
||||
LOpcode op = ins->opcode();
|
||||
LIns *a = ins->oprnd1();
|
||||
LIns *b = ins->oprnd2();
|
||||
@ -1359,13 +1359,13 @@ namespace nanojit
|
||||
|
||||
freeResourcesOf(ins);
|
||||
|
||||
asm_fcmp(a, b);
|
||||
asm_cmpd(a, b);
|
||||
}
|
||||
|
||||
// WARNING: This function cannot generate any code that will affect the
|
||||
// condition codes prior to the generation of the ucomisd. See asm_cmp()
|
||||
// for more details.
|
||||
void Assembler::asm_fcmp(LIns *a, LIns *b) {
|
||||
void Assembler::asm_cmpd(LIns *a, LIns *b) {
|
||||
Register ra, rb;
|
||||
findRegFor2(FpRegs, a, ra, FpRegs, b, rb);
|
||||
UCOMISD(ra, rb);
|
||||
@ -1413,7 +1413,7 @@ namespace nanojit
|
||||
asm_immq(r, ins->immQ(), /*canClobberCCs*/false);
|
||||
}
|
||||
else if (ins->isImmD()) {
|
||||
asm_immf(r, ins->immQ(), /*canClobberCCs*/false);
|
||||
asm_immd(r, ins->immDasQ(), /*canClobberCCs*/false);
|
||||
}
|
||||
else if (canRematLEA(ins)) {
|
||||
Register lhsReg = ins->oprnd1()->getReg();
|
||||
@ -1638,9 +1638,9 @@ namespace nanojit
|
||||
freeResourcesOf(ins);
|
||||
}
|
||||
|
||||
void Assembler::asm_immf(LIns *ins) {
|
||||
void Assembler::asm_immd(LIns *ins) {
|
||||
Register r = prepareResultReg(ins, FpRegs);
|
||||
asm_immf(r, ins->immQ(), /*canClobberCCs*/true);
|
||||
asm_immd(r, ins->immDasQ(), /*canClobberCCs*/true);
|
||||
freeResourcesOf(ins);
|
||||
}
|
||||
|
||||
@ -1669,7 +1669,7 @@ namespace nanojit
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::asm_immf(Register r, uint64_t v, bool canClobberCCs) {
|
||||
void Assembler::asm_immd(Register r, uint64_t v, bool canClobberCCs) {
|
||||
NanoAssert(IsFpReg(r));
|
||||
if (v == 0 && canClobberCCs) {
|
||||
XORPS(r);
|
||||
|
@ -394,7 +394,7 @@ namespace nanojit
|
||||
bool isTargetWithinS32(NIns* target);\
|
||||
void asm_immi(Register r, int32_t v, bool canClobberCCs);\
|
||||
void asm_immq(Register r, uint64_t v, bool canClobberCCs);\
|
||||
void asm_immf(Register r, uint64_t v, bool canClobberCCs);\
|
||||
void asm_immd(Register r, uint64_t v, bool canClobberCCs);\
|
||||
void asm_regarg(ArgType, LIns*, Register);\
|
||||
void asm_stkarg(ArgType, LIns*, int);\
|
||||
void asm_shift(LIns*);\
|
||||
@ -408,8 +408,8 @@ namespace nanojit
|
||||
void dis(NIns *p, int bytes);\
|
||||
void asm_cmp(LIns*);\
|
||||
void asm_cmp_imm(LIns*);\
|
||||
void asm_fcmp(LIns*, LIns*);\
|
||||
NIns* asm_fbranch(bool, LIns*, NIns*);\
|
||||
void asm_cmpd(LIns*, LIns*);\
|
||||
NIns* asm_branchd(bool, LIns*, NIns*);\
|
||||
void asm_div(LIns *ins);\
|
||||
void asm_div_mod(LIns *ins);\
|
||||
int max_stk_used;\
|
||||
|
@ -1187,7 +1187,7 @@ namespace nanojit
|
||||
asm_immi(r, ins->immI(), /*canClobberCCs*/false);
|
||||
|
||||
} else if (ins->isImmD()) {
|
||||
asm_immf(r, ins->immQ(), ins->immD(), /*canClobberCCs*/false);
|
||||
asm_immd(r, ins->immDasQ(), ins->immD(), /*canClobberCCs*/false);
|
||||
|
||||
} else if (ins->isop(LIR_paramp) && ins->paramKind() == 0 &&
|
||||
(arg = ins->paramArg()) >= (abi_regcount = max_abi_regs[_thisfrag->lirbuf->abi])) {
|
||||
@ -1391,8 +1391,8 @@ namespace nanojit
|
||||
}
|
||||
|
||||
} else if (value->isImmD()) {
|
||||
STi(rb, dr+4, value->immQorDhi());
|
||||
STi(rb, dr, value->immQorDlo());
|
||||
STi(rb, dr+4, value->immDhi());
|
||||
STi(rb, dr, value->immDlo());
|
||||
|
||||
} else if (value->isop(LIR_ldd)) {
|
||||
// value is 64bit struct or int64_t, or maybe a double.
|
||||
@ -1455,7 +1455,7 @@ namespace nanojit
|
||||
|
||||
// Handle float conditions separately.
|
||||
if (isCmpDOpcode(condop)) {
|
||||
return asm_fbranch(branchOnFalse, cond, targ);
|
||||
return asm_branchd(branchOnFalse, cond, targ);
|
||||
}
|
||||
|
||||
if (branchOnFalse) {
|
||||
@ -1583,7 +1583,7 @@ namespace nanojit
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::asm_fcond(LInsp ins)
|
||||
void Assembler::asm_condd(LInsp ins)
|
||||
{
|
||||
LOpcode opcode = ins->opcode();
|
||||
Register r = prepareResultReg(ins, AllowableFlagRegs);
|
||||
@ -1593,7 +1593,7 @@ namespace nanojit
|
||||
|
||||
if (_config.i386_sse2) {
|
||||
// LIR_ltd and LIR_gtd are handled by the same case because
|
||||
// asm_fcmp() converts LIR_ltd(a,b) to LIR_gtd(b,a). Likewise
|
||||
// asm_cmpd() converts LIR_ltd(a,b) to LIR_gtd(b,a). Likewise
|
||||
// for LIR_led/LIR_ged.
|
||||
switch (opcode) {
|
||||
case LIR_eqd: SETNP(r); break;
|
||||
@ -1609,7 +1609,7 @@ namespace nanojit
|
||||
|
||||
freeResourcesOf(ins);
|
||||
|
||||
asm_fcmp(ins);
|
||||
asm_cmpd(ins);
|
||||
}
|
||||
|
||||
void Assembler::asm_cond(LInsp ins)
|
||||
@ -2091,7 +2091,7 @@ namespace nanojit
|
||||
LDi(r, val);
|
||||
}
|
||||
|
||||
void Assembler::asm_immf(Register r, uint64_t q, double d, bool canClobberCCs)
|
||||
void Assembler::asm_immd(Register r, uint64_t q, double d, bool canClobberCCs)
|
||||
{
|
||||
// Floats require non-standard handling. There is no load-64-bit-immediate
|
||||
// instruction on i386, so in the general case, we must load it from memory.
|
||||
@ -2130,13 +2130,13 @@ namespace nanojit
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::asm_immf(LInsp ins)
|
||||
void Assembler::asm_immd(LInsp ins)
|
||||
{
|
||||
NanoAssert(ins->isImmD());
|
||||
if (ins->isInReg()) {
|
||||
Register rr = ins->getReg();
|
||||
NanoAssert(rmask(rr) & FpRegs);
|
||||
asm_immf(rr, ins->immQ(), ins->immD(), /*canClobberCCs*/true);
|
||||
asm_immd(rr, ins->immDasQ(), ins->immD(), /*canClobberCCs*/true);
|
||||
} else {
|
||||
// Do nothing, will be rematerialized when necessary.
|
||||
}
|
||||
@ -2386,7 +2386,7 @@ namespace nanojit
|
||||
NanoAssert(!lhs->isInReg() || FST0 == lhs->getReg());
|
||||
|
||||
if (rhs->isImmD()) {
|
||||
const uint64_t* p = findImmDFromPool(rhs->immQ());
|
||||
const uint64_t* p = findImmDFromPool(rhs->immDasQ());
|
||||
|
||||
switch (op) {
|
||||
case LIR_addd: FADDdm( (const double*)p); break;
|
||||
@ -2414,7 +2414,7 @@ namespace nanojit
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::asm_i2f(LInsp ins)
|
||||
void Assembler::asm_i2d(LInsp ins)
|
||||
{
|
||||
LIns* lhs = ins->oprnd1();
|
||||
|
||||
@ -2432,7 +2432,7 @@ namespace nanojit
|
||||
freeResourcesOf(ins);
|
||||
}
|
||||
|
||||
void Assembler::asm_u2f(LInsp ins)
|
||||
void Assembler::asm_ui2d(LInsp ins)
|
||||
{
|
||||
LIns* lhs = ins->oprnd1();
|
||||
|
||||
@ -2486,7 +2486,7 @@ namespace nanojit
|
||||
freeResourcesOf(ins);
|
||||
}
|
||||
|
||||
void Assembler::asm_f2i(LInsp ins)
|
||||
void Assembler::asm_d2i(LInsp ins)
|
||||
{
|
||||
LIns *lhs = ins->oprnd1();
|
||||
|
||||
@ -2519,14 +2519,14 @@ namespace nanojit
|
||||
}
|
||||
}
|
||||
|
||||
NIns* Assembler::asm_fbranch(bool branchOnFalse, LIns *cond, NIns *targ)
|
||||
NIns* Assembler::asm_branchd(bool branchOnFalse, LIns *cond, NIns *targ)
|
||||
{
|
||||
NIns* at;
|
||||
LOpcode opcode = cond->opcode();
|
||||
|
||||
if (_config.i386_sse2) {
|
||||
// LIR_ltd and LIR_gtd are handled by the same case because
|
||||
// asm_fcmp() converts LIR_ltd(a,b) to LIR_gtd(b,a). Likewise
|
||||
// asm_cmpd() converts LIR_ltd(a,b) to LIR_gtd(b,a). Likewise
|
||||
// for LIR_led/LIR_ged.
|
||||
if (branchOnFalse) {
|
||||
// op == LIR_xf
|
||||
@ -2557,7 +2557,7 @@ namespace nanojit
|
||||
}
|
||||
|
||||
at = _nIns;
|
||||
asm_fcmp(cond);
|
||||
asm_cmpd(cond);
|
||||
|
||||
return at;
|
||||
}
|
||||
@ -2565,7 +2565,7 @@ namespace nanojit
|
||||
// WARNING: This function cannot generate any code that will affect the
|
||||
// condition codes prior to the generation of the
|
||||
// ucomisd/fcompp/fcmop/fcom. See asm_cmp() for more details.
|
||||
void Assembler::asm_fcmp(LIns *cond)
|
||||
void Assembler::asm_cmpd(LIns *cond)
|
||||
{
|
||||
LOpcode condop = cond->opcode();
|
||||
NanoAssert(isCmpDOpcode(condop));
|
||||
@ -2714,7 +2714,7 @@ namespace nanojit
|
||||
FNSTSW_AX(); // requires EAX to be free
|
||||
if (rhs->isImmD())
|
||||
{
|
||||
const uint64_t* p = findImmDFromPool(rhs->immQ());
|
||||
const uint64_t* p = findImmDFromPool(rhs->immDasQ());
|
||||
FCOMdm((pop?1:0), (const double*)p);
|
||||
}
|
||||
else
|
||||
|
@ -190,12 +190,12 @@ namespace nanojit
|
||||
void asm_farg(LInsp, int32_t& stkd);\
|
||||
void asm_arg(ArgType ty, LInsp p, Register r, int32_t& stkd);\
|
||||
void asm_pusharg(LInsp);\
|
||||
void asm_fcmp(LIns *cond);\
|
||||
NIns* asm_fbranch(bool, LIns*, NIns*);\
|
||||
void asm_cmpd(LIns *cond);\
|
||||
NIns* asm_branchd(bool, LIns*, NIns*);\
|
||||
void asm_cmp(LIns *cond); \
|
||||
void asm_div_mod(LIns *cond); \
|
||||
void asm_load(int d, Register r); \
|
||||
void asm_immf(Register r, uint64_t q, double d, bool canClobberCCs); \
|
||||
void asm_immd(Register r, uint64_t q, double d, bool canClobberCCs); \
|
||||
void IMM8(int32_t i) { \
|
||||
_nIns -= 1; \
|
||||
*((int8_t*)_nIns) = (int8_t)(i); \
|
||||
|
Loading…
Reference in New Issue
Block a user