diff --git a/js/src/nanojit/NativePPC.cpp b/js/src/nanojit/NativePPC.cpp index 6897c64a14c..95a8dbfa051 100644 --- a/js/src/nanojit/NativePPC.cpp +++ b/js/src/nanojit/NativePPC.cpp @@ -214,8 +214,8 @@ namespace nanojit switch (ins->opcode()) { case LIR_ldf: case LIR_ldfc: - case LIR_ldq: - case LIR_ldqc: + CASE64(LIR_ldq:) + CASE64(LIR_ldqc:) // handled by mainline code below for now break; case LIR_ld32f: @@ -308,7 +308,7 @@ namespace nanojit switch (op) { case LIR_stfi: - case LIR_stqi: + CASE64(LIR_stqi:) // handled by mainline code below for now break; case LIR_st32f: @@ -321,18 +321,6 @@ namespace nanojit Register ra = getBaseReg(base, dr, GpRegs); - #if !PEDANTIC && !defined NANOJIT_64BIT - if (value->isop(LIR_quad) && isS16(dr) && isS16(dr+4)) { - // quad constant and short offset - uint64_t q = value->imm64(); - STW(R0, dr, ra); // hi - asm_li(R0, int32_t(q>>32)); // hi - STW(R0, dr+4, ra); // lo - asm_li(R0, int32_t(q)); // lo - return; - } - #endif // !PEDANTIC - // general case for any value #if !defined NANOJIT_64BIT // on 32bit cpu's, we only use store64 for doubles @@ -379,30 +367,30 @@ namespace nanojit Register r = deprecated_prepResultReg(ins, GpRegs); switch (op) { case LIR_eq: case LIR_feq: - case LIR_qeq: + CASE64(LIR_qeq:) EXTRWI(r, r, 1, 4*cr+COND_eq); // extract CR7.eq MFCR(r); break; case LIR_lt: case LIR_ult: case LIR_flt: case LIR_fle: - case LIR_qlt: case LIR_qult: + CASE64(LIR_qlt:) CASE64(LIR_qult:) EXTRWI(r, r, 1, 4*cr+COND_lt); // extract CR7.lt MFCR(r); break; case LIR_gt: case LIR_ugt: case LIR_fgt: case LIR_fge: - case LIR_qgt: case LIR_qugt: + CASE64(LIR_qgt:) CASE64(LIR_qugt:) EXTRWI(r, r, 1, 4*cr+COND_gt); // extract CR7.gt MFCR(r); break; case LIR_le: case LIR_ule: - case LIR_qle: case LIR_qule: + CASE64(LIR_qle:) CASE64(LIR_qule:) EXTRWI(r, r, 1, 4*cr+COND_eq); // extract CR7.eq MFCR(r); CROR(CR7, eq, lt, eq); break; case LIR_ge: case LIR_uge: - case LIR_qge: case LIR_quge: + CASE64(LIR_qge:) CASE64(LIR_quge:) EXTRWI(r, r, 1, 4*cr+COND_eq); // select CR7.eq MFCR(r); CROR(CR7, eq, gt, eq); @@ -469,25 +457,25 @@ namespace nanojit switch (cond->opcode()) { case LIR_eq: case LIR_feq: - case LIR_qeq: + CASE64(LIR_qeq:) if (onfalse) BNE(cr,bd); else BEQ(cr,bd); break; case LIR_lt: case LIR_ult: case LIR_flt: case LIR_fle: - case LIR_qlt: case LIR_qult: + CASE64(LIR_qlt:) CASE64(LIR_qult:) if (onfalse) BNL(cr,bd); else BLT(cr,bd); break; case LIR_le: case LIR_ule: - case LIR_qle: case LIR_qule: + CASE64(LIR_qle:) CASE64(LIR_qule:) if (onfalse) BGT(cr,bd); else BLE(cr,bd); break; case LIR_gt: case LIR_ugt: case LIR_fgt: case LIR_fge: - case LIR_qgt: case LIR_qugt: + CASE64(LIR_qgt:) CASE64(LIR_qugt:) if (onfalse) BNG(cr,bd); else BGT(cr,bd); break; case LIR_ge: case LIR_uge: - case LIR_qge: case LIR_quge: + CASE64(LIR_qge:) CASE64(LIR_quge:) if (onfalse) BLT(cr,bd); else BGE(cr,bd); break; default: @@ -507,25 +495,25 @@ namespace nanojit switch (condop) { case LIR_eq: case LIR_feq: - case LIR_qeq: + CASE64(LIR_qeq:) if (onfalse) BNECTR(cr); else BEQCTR(cr); break; case LIR_lt: case LIR_ult: - case LIR_qlt: case LIR_qult: + CASE64(LIR_qlt:) CASE64(LIR_qult:) case LIR_flt: case LIR_fle: if (onfalse) BNLCTR(cr); else BLTCTR(cr); break; case LIR_le: case LIR_ule: - case LIR_qle: case LIR_qule: + CASE64(LIR_qle:) CASE64(LIR_qule:) if (onfalse) BGTCTR(cr); else BLECTR(cr); break; case LIR_gt: case LIR_ugt: - case LIR_qgt: case LIR_qugt: + CASE64(LIR_qgt:) CASE64(LIR_qugt:) case LIR_fgt: case LIR_fge: if (onfalse) BNGCTR(cr); else BGTCTR(cr); break; case LIR_ge: case LIR_uge: - case LIR_qge: case LIR_quge: + CASE64(LIR_qge:) CASE64(LIR_quge:) if (onfalse) BLTCTR(cr); else BGECTR(cr); break; default: @@ -559,22 +547,26 @@ namespace nanojit CMPWI(cr, ra, d); return; } + #if defined NANOJIT_64BIT if (condop >= LIR_qeq && condop <= LIR_qge) { CMPDI(cr, ra, d); TODO(cmpdi); return; } + #endif } if (isU16(d)) { if ((condop == LIR_eq || condop >= LIR_ult && condop <= LIR_uge)) { CMPLWI(cr, ra, d); return; } + #if defined NANOJIT_64BIT if ((condop == LIR_qeq || condop >= LIR_qult && condop <= LIR_quge)) { CMPLDI(cr, ra, d); TODO(cmpldi); return; } + #endif } } #endif @@ -583,14 +575,18 @@ namespace nanojit Register rb = b==a ? ra : findRegFor(b, allow & ~rmask(ra)); if (condop >= LIR_eq && condop <= LIR_ge) { CMPW(cr, ra, rb); - } else if (condop >= LIR_ult && condop <= LIR_uge) { + } + else if (condop >= LIR_ult && condop <= LIR_uge) { CMPLW(cr, ra, rb); - } else if (condop >= LIR_qeq && condop <= LIR_qge) { + } + #if defined NANOJIT_64BIT + else if (condop >= LIR_qeq && condop <= LIR_qge) { CMPD(cr, ra, rb); } else if (condop >= LIR_qult && condop <= LIR_quge) { CMPLD(cr, ra, rb); } + #endif else if (condop >= LIR_feq && condop <= LIR_fge) { // set the lt/gt bit for fle/fge. We don't do this for // int/uint because in those cases we can invert the branch condition. @@ -850,7 +846,7 @@ namespace nanojit // ppc arith immediate ops sign-exted the imm16 value switch (op) { case LIR_add: - case LIR_iaddp: + CASE32(LIR_iaddp:) CASE64(LIR_qiadd:) CASE64(LIR_qaddp:) ADDI(rr, ra, rhsc); @@ -901,7 +897,7 @@ namespace nanojit CASE64(LIR_qiadd:) CASE64(LIR_qaddp:) case LIR_add: - case LIR_iaddp: + CASE32(LIR_iaddp:) ADD(rr, ra, rb); break; CASE64(LIR_qiand:) @@ -1010,6 +1006,7 @@ namespace nanojit NanoAssertMsg(0, "NJ_F2I_SUPPORTED not yet supported for this architecture"); } + #if defined NANOJIT_64BIT // XXX: this is sub-optimal, see https://bugzilla.mozilla.org/show_bug.cgi?id=540368#c7. void Assembler::asm_q2i(LIns *ins) { Register rr = deprecated_prepResultReg(ins, GpRegs); @@ -1033,7 +1030,8 @@ namespace nanojit break; } } - + #endif + void Assembler::asm_quad(LIns *ins) { #ifdef NANOJIT_64BIT Register r = ins->deprecated_getReg(); @@ -1172,9 +1170,13 @@ namespace nanojit LIns* iffalse = ins->oprnd3(); NanoAssert(cond->isCmp()); + #ifdef NANOJIT_64BIT NanoAssert((ins->opcode() == LIR_cmov && iftrue->isI32() && iffalse->isI32()) || (ins->opcode() == LIR_qcmov && iftrue->isI64() && iffalse->isI64())); - + #else + NanoAssert((ins->opcode() == LIR_cmov && iftrue->isI32() && iffalse->isI32())); + #endif + // fixme: we could handle fpu registers here, too, since we're just branching Register rr = deprecated_prepResultReg(ins, GpRegs); findSpecificRegFor(iftrue, rr); @@ -1188,8 +1190,12 @@ namespace nanojit RegisterMask Assembler::hint(LIns* ins) { LOpcode op = ins->opcode(); RegisterMask prefer = 0; - if (op == LIR_icall || op == LIR_qcall) + if (op == LIR_icall) prefer = rmask(R3); + #ifdef NANOJIT_64BIT + else if (op == LIR_qcall) + prefer = rmask(R3); + #endif else if (op == LIR_fcall) prefer = rmask(F1); else if (op == LIR_param) {