mirror of
https://gitlab.winehq.org/wine/wine-gecko.git
synced 2024-09-13 09:24:08 -07:00
Whitespace hygene (r=me)
--HG-- extra : convert_revision : b7a4852a92d3ceb74592c1da0e1cd0beace84eb5
This commit is contained in:
parent
4015262424
commit
245eaeb9f7
@ -99,7 +99,7 @@ namespace nanojit
|
||||
NanoAssert(_entries[i] != BAD_ENTRY);
|
||||
for (uint32_t i = _highWaterMark+1; i < NJ_MAX_STACK_ENTRY; ++i)
|
||||
NanoAssert(_entries[i] == BAD_ENTRY);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@ -111,13 +111,13 @@ namespace nanojit
|
||||
_entries[i] = BAD_ENTRY;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool AR::Iter::next(LIns*& ins, uint32_t& nStackSlots, int32_t& arIndex)
|
||||
{
|
||||
|
||||
bool AR::Iter::next(LIns*& ins, uint32_t& nStackSlots, int32_t& arIndex)
|
||||
{
|
||||
while (++_i <= _ar._highWaterMark)
|
||||
{
|
||||
if ((ins = _ar._entries[_i]) != NULL)
|
||||
{
|
||||
{
|
||||
nStackSlots = nStackSlotsFor(ins);
|
||||
_i += nStackSlots - 1;
|
||||
arIndex = _i;
|
||||
@ -160,7 +160,7 @@ namespace nanojit
|
||||
NanoAssert(ins->isUsed());
|
||||
|
||||
if (allowedAndFree) {
|
||||
// At least one usable register is free -- no need to steal.
|
||||
// At least one usable register is free -- no need to steal.
|
||||
// Pick a preferred one if possible.
|
||||
RegisterMask preferredAndFree = allowedAndFree & SavedRegs;
|
||||
RegisterMask set = ( preferredAndFree ? preferredAndFree : allowedAndFree );
|
||||
@ -196,7 +196,7 @@ namespace nanojit
|
||||
{
|
||||
LIns dummyIns;
|
||||
dummyIns.markAsUsed();
|
||||
Register r = registerAlloc(&dummyIns, allow);
|
||||
Register r = registerAlloc(&dummyIns, allow);
|
||||
|
||||
// Mark r as free, ready for use as a temporary value.
|
||||
_allocator.removeActive(r);
|
||||
@ -278,7 +278,7 @@ namespace nanojit
|
||||
if (ins->isop(LIR_alloc)) {
|
||||
int const n = i + (ins->size()>>2);
|
||||
for (int j=i+1; j < n; j++) {
|
||||
NanoAssert(_entries[j]==ins);
|
||||
NanoAssert(_entries[j]==ins);
|
||||
}
|
||||
NanoAssert(arIndex == (uint32_t)n-1);
|
||||
i = n-1;
|
||||
@ -499,7 +499,7 @@ namespace nanojit
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
|
||||
int Assembler::findMemFor(LIns *ins)
|
||||
{
|
||||
if (!ins->isUsed())
|
||||
@ -1427,8 +1427,8 @@ namespace nanojit
|
||||
intersectRegisterState(label->regs);
|
||||
label->addr = _nIns;
|
||||
}
|
||||
verbose_only( if (_logc->lcbits & LC_Assembly) {
|
||||
asm_output("[%s]", _thisfrag->lirbuf->names->formatRef(ins));
|
||||
verbose_only( if (_logc->lcbits & LC_Assembly) {
|
||||
asm_output("[%s]", _thisfrag->lirbuf->names->formatRef(ins));
|
||||
})
|
||||
break;
|
||||
}
|
||||
@ -1709,7 +1709,7 @@ namespace nanojit
|
||||
s += VMPI_strlen(s);
|
||||
VMPI_sprintf(s, "AR");
|
||||
s += VMPI_strlen(s);
|
||||
|
||||
|
||||
LIns* ins = 0;
|
||||
uint32_t nStackSlots = 0;
|
||||
int32_t arIndex = 0;
|
||||
@ -1730,7 +1730,7 @@ namespace nanojit
|
||||
|
||||
inline bool AR::isEmptyRange(uint32_t start, uint32_t nStackSlots) const
|
||||
{
|
||||
for (uint32_t i=0; i < nStackSlots; i++)
|
||||
for (uint32_t i=0; i < nStackSlots; i++)
|
||||
{
|
||||
if (_entries[start-i] != NULL)
|
||||
return false;
|
||||
@ -1742,11 +1742,11 @@ namespace nanojit
|
||||
{
|
||||
uint32_t const nStackSlots = nStackSlotsFor(ins);
|
||||
|
||||
if (nStackSlots == 1)
|
||||
if (nStackSlots == 1)
|
||||
{
|
||||
for (uint32_t i = 1; i <= _highWaterMark; i++)
|
||||
for (uint32_t i = 1; i <= _highWaterMark; i++)
|
||||
{
|
||||
if (_entries[i] == NULL)
|
||||
if (_entries[i] == NULL)
|
||||
{
|
||||
_entries[i] = ins;
|
||||
return i;
|
||||
@ -1760,16 +1760,16 @@ namespace nanojit
|
||||
return _highWaterMark;
|
||||
}
|
||||
}
|
||||
else
|
||||
else
|
||||
{
|
||||
// alloc larger block on 8byte boundary.
|
||||
uint32_t const start = nStackSlots + (nStackSlots & 1);
|
||||
for (uint32_t i = start; i <= _highWaterMark; i += 2)
|
||||
for (uint32_t i = start; i <= _highWaterMark; i += 2)
|
||||
{
|
||||
if (isEmptyRange(i, nStackSlots))
|
||||
if (isEmptyRange(i, nStackSlots))
|
||||
{
|
||||
// place the entry in the table and mark the instruction with it
|
||||
for (uint32_t j=0; j < nStackSlots; j++)
|
||||
for (uint32_t j=0; j < nStackSlots; j++)
|
||||
{
|
||||
NanoAssert(i-j <= _highWaterMark);
|
||||
NanoAssert(_entries[i-j] == NULL);
|
||||
@ -1923,7 +1923,7 @@ namespace nanojit
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Merge the current regstate with a previously stored version.
|
||||
* current == saved skip
|
||||
|
@ -112,22 +112,22 @@ namespace nanojit
|
||||
|
||||
bool isEmptyRange(uint32_t start, uint32_t nStackSlots) const;
|
||||
static uint32_t nStackSlotsFor(LIns* ins);
|
||||
|
||||
|
||||
public:
|
||||
|
||||
|
||||
uint32_t stackSlotsNeeded() const;
|
||||
|
||||
void clear();
|
||||
void freeEntryAt(uint32_t i);
|
||||
uint32_t reserveEntry(LIns* ins); /* return 0 if unable to reserve the entry */
|
||||
|
||||
|
||||
#ifdef _DEBUG
|
||||
void validate();
|
||||
void validate();
|
||||
bool isValidEntry(uint32_t idx, LIns* ins) const; /* return true iff idx and ins are matched */
|
||||
void checkForResourceConsistency(const RegAlloc& regs) const;
|
||||
void checkForResourceLeaks() const;
|
||||
#endif
|
||||
|
||||
|
||||
class Iter
|
||||
{
|
||||
private:
|
||||
@ -144,19 +144,19 @@ namespace nanojit
|
||||
return ins->isop(LIR_alloc) ? (ins->size()>>2) : (ins->isQuad() ? 2 : 1);
|
||||
}
|
||||
|
||||
inline uint32_t AR::stackSlotsNeeded() const
|
||||
{
|
||||
inline uint32_t AR::stackSlotsNeeded() const
|
||||
{
|
||||
// NB: _highWaterMark is an index, not a count
|
||||
return _highWaterMark+1;
|
||||
return _highWaterMark+1;
|
||||
}
|
||||
|
||||
#ifndef AVMPLUS_ALIGN16
|
||||
#ifdef AVMPLUS_WIN32
|
||||
#define AVMPLUS_ALIGN16(type) __declspec(align(16)) type
|
||||
#else
|
||||
#define AVMPLUS_ALIGN16(type) type __attribute__ ((aligned (16)))
|
||||
#endif
|
||||
#endif
|
||||
#ifndef AVMPLUS_ALIGN16
|
||||
#ifdef AVMPLUS_WIN32
|
||||
#define AVMPLUS_ALIGN16(type) __declspec(align(16)) type
|
||||
#else
|
||||
#define AVMPLUS_ALIGN16(type) type __attribute__ ((aligned (16)))
|
||||
#endif
|
||||
#endif
|
||||
|
||||
struct Stats
|
||||
{
|
||||
|
@ -263,12 +263,12 @@ extern "C" void __clear_cache(char *BEG, char *END);
|
||||
#ifdef __linux__ // bugzilla 502369
|
||||
void sync_instruction_memory(caddr_t v, u_int len)
|
||||
{
|
||||
caddr_t end = v + len;
|
||||
caddr_t p = v;
|
||||
while (p < end) {
|
||||
asm("flush %0" : : "r" (p));
|
||||
p += 32;
|
||||
}
|
||||
caddr_t end = v + len;
|
||||
caddr_t p = v;
|
||||
while (p < end) {
|
||||
asm("flush %0" : : "r" (p));
|
||||
p += 32;
|
||||
}
|
||||
}
|
||||
#else
|
||||
extern "C" void sync_instruction_memory(caddr_t v, u_int len);
|
||||
@ -324,12 +324,12 @@ extern "C" void sync_instruction_memory(caddr_t v, u_int len);
|
||||
void CodeAlloc::flushICache(void *start, size_t len) {
|
||||
cacheflush((int)start, (int)start + len, 0);
|
||||
}
|
||||
#else
|
||||
#else
|
||||
// fixme: __clear_cache is a libgcc feature, test for libgcc or gcc
|
||||
void CodeAlloc::flushICache(void *start, size_t len) {
|
||||
__clear_cache((char*)start, (char*)start + len);
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
#endif // AVMPLUS_MAC && NANOJIT_PPC
|
||||
|
||||
void CodeAlloc::addBlock(CodeList* &blocks, CodeList* b) {
|
||||
|
@ -953,7 +953,7 @@ namespace nanojit
|
||||
// Nb: this must be kept in sync with arg().
|
||||
LInsp* args2 = (LInsp*)_buf->_allocator.alloc(argc * sizeof(LInsp));
|
||||
memcpy(args2, args, argc * sizeof(LInsp));
|
||||
|
||||
|
||||
// Allocate and write the call instruction.
|
||||
LInsC* insC = (LInsC*)_buf->makeRoom(sizeof(LInsC));
|
||||
LIns* ins = insC->getLIns();
|
||||
@ -1032,7 +1032,7 @@ namespace nanojit
|
||||
spTop >>= 2;
|
||||
rpTop >>= 2;
|
||||
}
|
||||
|
||||
|
||||
return i;
|
||||
}
|
||||
}
|
||||
@ -1407,7 +1407,7 @@ namespace nanojit
|
||||
SeqBuilder<RetiredEntry*> retired;
|
||||
int retiredCount;
|
||||
int maxlive;
|
||||
LiveTable(Allocator& alloc)
|
||||
LiveTable(Allocator& alloc)
|
||||
: alloc(alloc)
|
||||
, live(alloc)
|
||||
, retired(alloc)
|
||||
@ -2173,7 +2173,7 @@ namespace nanojit
|
||||
logc->printf("=== -- Compile trunk %s: begin\n",
|
||||
labels->format(frag));
|
||||
})
|
||||
|
||||
|
||||
// Used for debug printing, if needed
|
||||
verbose_only(
|
||||
ReverseLister *pp_init = NULL;
|
||||
@ -2205,16 +2205,16 @@ namespace nanojit
|
||||
"After StackFilter");
|
||||
prev = pp_after_sf;
|
||||
})
|
||||
|
||||
|
||||
assm->assemble(frag, prev);
|
||||
|
||||
|
||||
// If we were accumulating debug info in the various ReverseListers,
|
||||
// call finish() to emit whatever contents they have accumulated.
|
||||
verbose_only(
|
||||
if (pp_init) pp_init->finish();
|
||||
if (pp_after_sf) pp_after_sf->finish();
|
||||
)
|
||||
|
||||
|
||||
verbose_only( if (anyVerb) {
|
||||
logc->printf("=== -- Compile trunk %s: end\n",
|
||||
labels->format(frag));
|
||||
@ -2267,7 +2267,7 @@ namespace nanojit
|
||||
|
||||
LInsp LoadFilter::insLoad(LOpcode v, LInsp base, int32_t disp)
|
||||
{
|
||||
if (base != sp && base != rp)
|
||||
if (base != sp && base != rp)
|
||||
{
|
||||
switch (v)
|
||||
{
|
||||
|
@ -236,9 +236,9 @@ namespace nanojit
|
||||
// oprnd_1
|
||||
// opcode + resv ] <-- LIns* ins
|
||||
//
|
||||
// - LIR_skip instructions are used to link code chunks. If the first
|
||||
// - LIR_skip instructions are used to link code chunks. If the first
|
||||
// instruction on a chunk isn't a LIR_start, it will be a skip, and the
|
||||
// skip's operand will point to the last LIns on the preceding chunk.
|
||||
// skip's operand will point to the last LIns on the preceding chunk.
|
||||
// LInsSk has the same layout as LInsOp1, but we represent it as a
|
||||
// different class because there are some places where we treat
|
||||
// skips specially and so having it separate seems like a good idea.
|
||||
@ -1464,7 +1464,7 @@ namespace nanojit
|
||||
|
||||
// Returns next instruction and advances to the prior instruction.
|
||||
// Invariant: never returns a skip.
|
||||
LInsp read();
|
||||
LInsp read();
|
||||
|
||||
// Returns next instruction. Invariant: never returns a skip.
|
||||
LInsp pos() {
|
||||
@ -1506,7 +1506,7 @@ namespace nanojit
|
||||
public:
|
||||
LoadFilter(LirWriter *out, Allocator& alloc)
|
||||
: LirWriter(out), sp(NULL), rp(NULL)
|
||||
{
|
||||
{
|
||||
uint32_t kInitialCaps[LInsLast + 1];
|
||||
kInitialCaps[LInsImm] = 1;
|
||||
kInitialCaps[LInsImmq] = 1;
|
||||
|
@ -117,7 +117,7 @@ Assembler::CountLeadingZeroes(uint32_t data)
|
||||
// (even though this is a legal instruction there). Since we currently only compile for ARMv5
|
||||
// for emulation, we don't care too much (but we DO care for ARMv6+ since those are "real"
|
||||
// devices).
|
||||
#elif defined(__GNUC__) && !(defined(ANDROID) && __ARM_ARCH__ <= 5)
|
||||
#elif defined(__GNUC__) && !(defined(ANDROID) && __ARM_ARCH__ <= 5)
|
||||
// GCC can use inline assembler to insert a CLZ instruction.
|
||||
__asm (
|
||||
" clz %0, %1 \n"
|
||||
@ -596,7 +596,7 @@ Assembler::genEpilogue()
|
||||
* - both doubles and 32-bit arguments are placed on stack with 32-bit
|
||||
* alignment.
|
||||
*/
|
||||
void
|
||||
void
|
||||
Assembler::asm_arg(ArgSize sz, LInsp arg, Register& r, int& stkd)
|
||||
{
|
||||
// The stack pointer must always be at least aligned to 4 bytes.
|
||||
@ -722,7 +722,7 @@ Assembler::asm_arg_64(LInsp arg, Register& r, int& stkd)
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
void
|
||||
Assembler::asm_regarg(ArgSize sz, LInsp p, Register r)
|
||||
{
|
||||
NanoAssert(isKnownReg(r));
|
||||
@ -1047,7 +1047,7 @@ do_peep_2_1(/*OUT*/NIns* merged, NIns i1, NIns i2)
|
||||
ld/str rY, [fp, #-4]
|
||||
==>
|
||||
ld/stmdb fp, {rX, rY}
|
||||
when
|
||||
when
|
||||
X < Y and X != fp and Y != fp and X != 15 and Y != 15
|
||||
*/
|
||||
if (is_ldstr_reg_fp_minus_imm(&isLoadX, &rX, &immX, i1) &&
|
||||
@ -1264,7 +1264,7 @@ Assembler::asm_restore(LInsp i, Register r)
|
||||
// See if we can merge this load into an immediately following
|
||||
// one, by creating or extending an LDM instruction.
|
||||
if (/* is it safe to poke _nIns[1] ? */
|
||||
does_next_instruction_exist(_nIns, codeStart, codeEnd,
|
||||
does_next_instruction_exist(_nIns, codeStart, codeEnd,
|
||||
exitStart, exitEnd)
|
||||
&& /* can we merge _nIns[0] into _nIns[1] ? */
|
||||
do_peep_2_1(&merged, _nIns[0], _nIns[1])) {
|
||||
@ -1295,7 +1295,7 @@ Assembler::asm_spill(Register rr, int d, bool pop, bool quad)
|
||||
// See if we can merge this store into an immediately following one,
|
||||
// one, by creating or extending a STM instruction.
|
||||
if (/* is it safe to poke _nIns[1] ? */
|
||||
does_next_instruction_exist(_nIns, codeStart, codeEnd,
|
||||
does_next_instruction_exist(_nIns, codeStart, codeEnd,
|
||||
exitStart, exitEnd)
|
||||
&& /* can we merge _nIns[0] into _nIns[1] ? */
|
||||
do_peep_2_1(&merged, _nIns[0], _nIns[1])) {
|
||||
@ -2252,7 +2252,7 @@ Assembler::asm_cond(LInsp ins)
|
||||
{
|
||||
Register r = prepResultReg(ins, AllowableFlagRegs);
|
||||
LOpcode op = ins->opcode();
|
||||
|
||||
|
||||
switch(op)
|
||||
{
|
||||
case LIR_eq: SETEQ(r); break;
|
||||
@ -2379,7 +2379,7 @@ Assembler::asm_arith(LInsp ins)
|
||||
// We try to use rb as the first operand by default because it is
|
||||
// common for (rr == ra) and is thus likely to be the most
|
||||
// efficient method.
|
||||
|
||||
|
||||
if ((ARM_ARCH > 5) || (rr != rb)) {
|
||||
// IP is used to temporarily store the high word of the result from
|
||||
// SMULL, so we make use of this to perform an overflow check, as
|
||||
@ -2394,7 +2394,7 @@ Assembler::asm_arith(LInsp ins)
|
||||
} else {
|
||||
// ARM_ARCH is ARMv5 (or below) and rr == rb, so we must
|
||||
// find a different way to encode the instruction.
|
||||
|
||||
|
||||
// If possible, swap the arguments to avoid the restriction.
|
||||
if (rr != ra) {
|
||||
// We know that rr == rb, so this will be something like
|
||||
@ -2415,7 +2415,7 @@ Assembler::asm_arith(LInsp ins)
|
||||
// bits are zero.
|
||||
// - Any argument lower than (or equal to) 0xffff that
|
||||
// also overflows is guaranteed to set output bit 31.
|
||||
//
|
||||
//
|
||||
// Thus, we know we have _not_ overflowed if:
|
||||
// abs(rX)&0xffff0000 == 0 AND result[31] == 0
|
||||
//
|
||||
@ -2436,7 +2436,7 @@ Assembler::asm_arith(LInsp ins)
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
|
||||
// The shift operations need a mask to match the JavaScript
|
||||
// specification because the ARM architecture allows a greater shift
|
||||
// range than JavaScript.
|
||||
|
@ -209,9 +209,9 @@ verbose_only( extern const char* shiftNames[]; )
|
||||
inline uint32_t decOp2Imm(uint32_t enc);
|
||||
#else
|
||||
// define stubs, for code that defines NJ_VERBOSE without DEBUG
|
||||
# define DECLARE_PLATFORM_ASSEMBLER_DEBUG() \
|
||||
inline bool isOp2Imm(uint32_t ) { return true; } \
|
||||
inline uint32_t decOp2Imm(uint32_t ) { return 0; }
|
||||
# define DECLARE_PLATFORM_ASSEMBLER_DEBUG() \
|
||||
inline bool isOp2Imm(uint32_t ) { return true; } \
|
||||
inline uint32_t decOp2Imm(uint32_t ) { return 0; }
|
||||
#endif
|
||||
|
||||
#define DECLARE_PLATFORM_ASSEMBLER() \
|
||||
@ -247,7 +247,7 @@ verbose_only( extern const char* shiftNames[]; )
|
||||
int * _nSlot; \
|
||||
int * _nExitSlot; \
|
||||
bool blx_lr_bug; \
|
||||
int max_out_args; /* bytes */
|
||||
int max_out_args; /* bytes */
|
||||
|
||||
#define IMM32(imm) *(--_nIns) = (NIns)((imm));
|
||||
|
||||
|
@ -1260,7 +1260,7 @@ namespace nanojit
|
||||
}
|
||||
Register r = prepResultReg(ins, GpRegs); // x64 can use any GPR as setcc target
|
||||
MOVZX8(r, r);
|
||||
if (op == LIR_fgt)
|
||||
if (op == LIR_fgt)
|
||||
SETA(r);
|
||||
else
|
||||
SETAE(r);
|
||||
@ -1401,7 +1401,7 @@ namespace nanojit
|
||||
regalloc_load(ins, FpRegs, rr, dr, rb);
|
||||
NanoAssert(IsFpReg(rr));
|
||||
CVTSS2SD(rr, rr);
|
||||
MOVSSRM(rr, dr, rb);
|
||||
MOVSSRM(rr, dr, rb);
|
||||
break;
|
||||
default:
|
||||
NanoAssertMsg(0, "asm_load64 should never receive this LIR opcode");
|
||||
@ -1508,9 +1508,9 @@ namespace nanojit
|
||||
|
||||
void Assembler::asm_store32(LOpcode op, LIns *value, int d, LIns *base) {
|
||||
|
||||
// quirk of x86-64: reg cannot appear to be ah/bh/ch/dh
|
||||
// quirk of x86-64: reg cannot appear to be ah/bh/ch/dh
|
||||
// for single-byte stores with REX prefix
|
||||
const RegisterMask SrcRegs =
|
||||
const RegisterMask SrcRegs =
|
||||
(op == LIR_stb) ?
|
||||
(GpRegs & ~(1<<RSP | 1<<RBP | 1<<RSI | 1<<RDI)) :
|
||||
GpRegs;
|
||||
|
@ -279,11 +279,11 @@ namespace nanojit
|
||||
ArgSize sizes[MAXARGS];
|
||||
uint32_t argc = call->get_sizes(sizes);
|
||||
int32_t stkd = 0;
|
||||
|
||||
|
||||
if (indirect) {
|
||||
argc--;
|
||||
asm_arg(ARGSIZE_P, ins->arg(argc), EAX, stkd);
|
||||
if (!config.fixed_esp)
|
||||
if (!config.fixed_esp)
|
||||
stkd = 0;
|
||||
}
|
||||
|
||||
@ -296,7 +296,7 @@ namespace nanojit
|
||||
r = argRegs[n++]; // tell asm_arg what reg to use
|
||||
}
|
||||
asm_arg(sz, ins->arg(j), r, stkd);
|
||||
if (!config.fixed_esp)
|
||||
if (!config.fixed_esp)
|
||||
stkd = 0;
|
||||
}
|
||||
|
||||
@ -572,7 +572,7 @@ namespace nanojit
|
||||
case LIR_ldc32f:
|
||||
SSE_CVTSS2SD(rr, rr);
|
||||
SSE_LDSS(rr, db, rb);
|
||||
SSE_XORPDr(rr,rr);
|
||||
SSE_XORPDr(rr,rr);
|
||||
break;
|
||||
default:
|
||||
NanoAssertMsg(0, "asm_load64 should never receive this LIR opcode");
|
||||
@ -581,7 +581,7 @@ namespace nanojit
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
|
||||
int dr = disp(ins);
|
||||
Register rb;
|
||||
if (base->isop(LIR_alloc)) {
|
||||
@ -622,7 +622,7 @@ namespace nanojit
|
||||
else
|
||||
{
|
||||
// We need to use fpu to expand 32->64, can't use asm_mmq...
|
||||
// just load-and-store-with-pop.
|
||||
// just load-and-store-with-pop.
|
||||
NanoAssert(dr != 0);
|
||||
FSTPQ(dr, FP);
|
||||
FLD32(db, rb);
|
||||
@ -694,7 +694,7 @@ namespace nanojit
|
||||
}
|
||||
}
|
||||
|
||||
// Copy 64 bits: (rd+dd) <- (rs+ds).
|
||||
// Copy 64 bits: (rd+dd) <- (rs+ds).
|
||||
//
|
||||
void Assembler::asm_mmq(Register rd, int dd, Register rs, int ds)
|
||||
{
|
||||
@ -799,7 +799,7 @@ namespace nanojit
|
||||
// LIR: eq1 = eq a, 0
|
||||
// LIR: test edx, edx
|
||||
// asm: sete ebx
|
||||
// asm: movzx ebx, ebx
|
||||
// asm: movzx ebx, ebx
|
||||
//
|
||||
// In this case we end up computing the condition twice, but that's ok, as
|
||||
// it's just as short as testing eq1's value in the code generated for the
|
||||
@ -1434,7 +1434,7 @@ namespace nanojit
|
||||
}
|
||||
}
|
||||
|
||||
// negateMask is used by asm_fneg.
|
||||
// negateMask is used by asm_fneg.
|
||||
#if defined __SUNPRO_CC
|
||||
// From Sun Studio C++ Readme: #pragma align inside namespace requires mangled names.
|
||||
// Initialize here to avoid multithreading contention issues during initialization.
|
||||
@ -1609,7 +1609,7 @@ namespace nanojit
|
||||
SSE_STQ(stkd, SP, r);
|
||||
} else {
|
||||
FSTPQ(stkd, SP);
|
||||
|
||||
|
||||
//
|
||||
// 22Jul09 rickr - Enabling the evict causes a 10% slowdown on primes
|
||||
//
|
||||
@ -1617,7 +1617,7 @@ namespace nanojit
|
||||
// We need to resolve the bug some other way.
|
||||
//
|
||||
// see https://bugzilla.mozilla.org/show_bug.cgi?id=491084
|
||||
|
||||
|
||||
/* It's possible that the same LIns* with r=FST0 will appear in the argument list more
|
||||
* than once. In this case FST0 will not have been evicted and the multiple pop
|
||||
* actions will unbalance the FPU stack. A quick fix is to always evict FST0 manually.
|
||||
@ -1877,7 +1877,7 @@ namespace nanojit
|
||||
// ------- --- --------- --- -------
|
||||
// UNORDERED 111 0100_0100 001 SETNP/JNP fails
|
||||
// EQUAL 100 0100_0000 000 SETNP/JNP succeeds
|
||||
// GREATER_THAN 000 0000_0000 011 SETNP/JNP fails
|
||||
// GREATER_THAN 000 0000_0000 011 SETNP/JNP fails
|
||||
// LESS_THAN 001 0000_0000 011 SETNP/JNP fails
|
||||
|
||||
evictIfActive(EAX);
|
||||
@ -1894,8 +1894,8 @@ namespace nanojit
|
||||
// ------- --- -------
|
||||
// UNORDERED 111 SETA/JA fails
|
||||
// EQUAL 100 SETA/JA fails
|
||||
// GREATER_THAN 000 SETA/JA succeeds
|
||||
// LESS_THAN 001 SETA/JA fails
|
||||
// GREATER_THAN 000 SETA/JA succeeds
|
||||
// LESS_THAN 001 SETA/JA fails
|
||||
//
|
||||
// LIR_fge:
|
||||
// ucomisd ZPC outcome (SETAE/JAE succeeds if C==0)
|
||||
@ -1942,7 +1942,7 @@ namespace nanojit
|
||||
// ------- -------- --------- --- -------
|
||||
// UNORDERED 111 0100_0100 001 SETNP fails
|
||||
// EQUAL 100 0100_0000 000 SETNP succeeds
|
||||
// GREATER_THAN 000 0000_0000 011 SETNP fails
|
||||
// GREATER_THAN 000 0000_0000 011 SETNP fails
|
||||
// LESS_THAN 001 0000_0000 011 SETNP fails
|
||||
//
|
||||
// LIR_flt:
|
||||
@ -1950,7 +1950,7 @@ namespace nanojit
|
||||
// ------- -------- --------- --- -------
|
||||
// UNORDERED 111 0000_0101 001 SETNP fails
|
||||
// EQUAL 100 0000_0000 011 SETNP fails
|
||||
// GREATER_THAN 000 0000_0000 011 SETNP fails
|
||||
// GREATER_THAN 000 0000_0000 011 SETNP fails
|
||||
// LESS_THAN 001 0000_0001 000 SETNP succeeds
|
||||
//
|
||||
// LIR_fle:
|
||||
@ -1958,7 +1958,7 @@ namespace nanojit
|
||||
// ------- --- --------- --- -------
|
||||
// UNORDERED 111 0100_0001 001 SETNP fails
|
||||
// EQUAL 100 0100_0000 000 SETNP succeeds
|
||||
// GREATER_THAN 000 0000_0000 011 SETNP fails
|
||||
// GREATER_THAN 000 0000_0000 011 SETNP fails
|
||||
// LESS_THAN 001 0000_0001 010 SETNP succeeds
|
||||
|
||||
int mask = 0; // init to avoid MSVC compile warnings
|
||||
@ -2053,6 +2053,6 @@ namespace nanojit
|
||||
SWAP(NIns*, codeEnd, exitEnd);
|
||||
verbose_only( SWAP(size_t, codeBytes, exitBytes); )
|
||||
}
|
||||
|
||||
|
||||
#endif /* FEATURE_NANOJIT */
|
||||
}
|
||||
|
@ -189,11 +189,11 @@ namespace nanojit
|
||||
#define IMM8(i) \
|
||||
_nIns -= 1; \
|
||||
*((int8_t*)_nIns) = (int8_t)(i)
|
||||
|
||||
|
||||
#define IMM16(i) \
|
||||
_nIns -= 2; \
|
||||
*((int16_t*)_nIns) = (int16_t)(i)
|
||||
|
||||
|
||||
#define IMM32(i) \
|
||||
_nIns -= 4; \
|
||||
*((int32_t*)_nIns) = (int32_t)(i)
|
||||
@ -451,8 +451,8 @@ namespace nanojit
|
||||
|
||||
// note: movzx/movsx are being output with an 8/16 suffix to indicate the size
|
||||
// being loaded. this doesn't really match standard intel format (though is arguably
|
||||
// terser and more obvious in this case) and would probably be nice to fix.
|
||||
// (likewise, the 8/16 bit stores being output as "mov8" and "mov16" respectively.)
|
||||
// terser and more obvious in this case) and would probably be nice to fix.
|
||||
// (likewise, the 8/16 bit stores being output as "mov8" and "mov16" respectively.)
|
||||
|
||||
// load 16-bit, sign extend
|
||||
#define LD16S(r,d,b) do { count_ld(); ALU2m(0x0fbf,r,d,b); asm_output("movsx16 %s,%d(%s)", gpn(r),d,gpn(b)); } while(0)
|
||||
|
@ -140,7 +140,7 @@ namespace nanojit
|
||||
// and thus available for use. At the start of register
|
||||
// allocation most registers are free; those that are not
|
||||
// aren't available for general use, e.g. the stack pointer and
|
||||
// frame pointer registers.
|
||||
// frame pointer registers.
|
||||
//
|
||||
// - 'managed' is exactly this list of initially free registers,
|
||||
// ie. the registers managed by the register allocator.
|
||||
@ -166,10 +166,10 @@ namespace nanojit
|
||||
// * An LIns can appear at most once in 'active'.
|
||||
//
|
||||
// * An LIns named by 'active[R]' must have an in-use
|
||||
// reservation that names R.
|
||||
// reservation that names R.
|
||||
//
|
||||
// * And vice versa: an LIns with an in-use reservation that
|
||||
// names R must be named by 'active[R]'.
|
||||
// names R must be named by 'active[R]'.
|
||||
//
|
||||
// * If an LIns's reservation names 'UnknownReg' then LIns
|
||||
// should not be in 'active'.
|
||||
|
@ -98,10 +98,10 @@ extern void VMPI_setPageProtection(void *address,
|
||||
// Keep this warning-set relatively in sync with platform/win32/win32-platform.h in tamarin.
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning(disable:4201) // nonstandard extension used : nameless struct/union
|
||||
#pragma warning(disable:4512) // assignment operator could not be generated
|
||||
#pragma warning(disable:4511) // can't generate copy ctor
|
||||
#pragma warning(disable:4127) // conditional expression is constant - appears to be compiler noise primarily
|
||||
#pragma warning(disable:4201) // nonstandard extension used : nameless struct/union
|
||||
#pragma warning(disable:4512) // assignment operator could not be generated
|
||||
#pragma warning(disable:4511) // can't generate copy ctor
|
||||
#pragma warning(disable:4127) // conditional expression is constant - appears to be compiler noise primarily
|
||||
#pragma warning(disable:4611) // interaction between _setjmp and destruct
|
||||
#pragma warning(disable:4725) // instruction may be inaccurate on some Pentiums
|
||||
#pragma warning(disable:4611) // interaction between '_setjmp' and C++ object destruction is non-portable
|
||||
|
@ -152,10 +152,10 @@ namespace nanojit
|
||||
|
||||
#ifdef AVMPLUS_VERBOSE
|
||||
#ifndef NJ_VERBOSE_DISABLED
|
||||
#define NJ_VERBOSE 1
|
||||
#define NJ_VERBOSE 1
|
||||
#endif
|
||||
#ifndef NJ_PROFILE_DISABLED
|
||||
#define NJ_PROFILE 1
|
||||
#define NJ_PROFILE 1
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user