Bug 507089 - TM/nanojit: prepare to add get/set methods for CallInfo::_argtypes. r=edwsmith.

--HG--
extra : convert_revision : 55f02d7976752940a9f328d440fb6601ee2dc9f4
This commit is contained in:
Nicholas Nethercote 2010-03-21 19:47:02 -07:00
parent 8e0decc494
commit 070e390dac
16 changed files with 225 additions and 221 deletions

View File

@ -124,7 +124,7 @@ CL_64( LCALL_Q_Q2, 1) // 95% LIR_qcall
CL_64( LCALL_Q_Q7, 1) // 96% LIR_qcall CL_64( LCALL_Q_Q7, 1) // 96% LIR_qcall
CL___( LCALL_F_F3, 1) // 97% LIR_fcall CL___( LCALL_F_F3, 1) // 97% LIR_fcall
CL___( LCALL_F_F8, 1) // 98% LIR_fcall CL___( LCALL_F_F8, 1) // 98% LIR_fcall
CL_64( LCALL_N_IQF, 1) // 99% LIR_icall or LIR_qcall CL_64( LCALL_V_IQF, 1) // 99% LIR_icall or LIR_qcall
CL___( LLABEL, 1) //100% LIR_label CL___( LLABEL, 1) //100% LIR_label

View File

@ -154,12 +154,15 @@ enum ReturnType {
#define FN(name, args) \ #define FN(name, args) \
{#name, CI(name, args)} {#name, CI(name, args)}
const int I32 = nanojit::ARGSIZE_LO; const ArgType I32 = nanojit::ARGTYPE_LO;
#ifdef NANOJIT_64BIT #ifdef NANOJIT_64BIT
const int I64 = nanojit::ARGSIZE_Q; const ArgType I64 = nanojit::ARGTYPE_Q;
#endif #endif
const int F64 = nanojit::ARGSIZE_F; const ArgType F64 = nanojit::ARGTYPE_F;
const int PTR = nanojit::ARGSIZE_P; const ArgType PTR = nanojit::ARGTYPE_P;
const ArgType WRD = nanojit::ARGTYPE_P;
const ArgType VOID = nanojit::ARGTYPE_V;
enum LirTokenType { enum LirTokenType {
NAME, NUMBER, PUNCT, NEWLINE NAME, NUMBER, PUNCT, NEWLINE
@ -342,8 +345,8 @@ private:
void endFragment(); void endFragment();
}; };
// Meaning: arg 'm' of 'n' has size 'sz'. // Meaning: arg 'm' of 'n' has type 'ty'.
static int argMask(int sz, int m, int n) static int argMask(int ty, int m, int n)
{ {
// Order examples, from MSB to LSB: // Order examples, from MSB to LSB:
// - 3 args: 000 | 000 | 000 | 000 | 000 | arg1| arg2| arg3| ret // - 3 args: 000 | 000 | 000 | 000 | 000 | arg1| arg2| arg3| ret
@ -351,13 +354,13 @@ static int argMask(int sz, int m, int n)
// If the mask encoding reversed the arg order the 'n' parameter wouldn't // If the mask encoding reversed the arg order the 'n' parameter wouldn't
// be necessary, as argN would always be in the same place in the // be necessary, as argN would always be in the same place in the
// bitfield. // bitfield.
return sz << ((1 + n - m) * ARGSIZE_SHIFT); return ty << ((1 + n - m) * ARGTYPE_SHIFT);
} }
// Return value has size 'sz'. // Return value has type 'ty'.
static int retMask(int sz) static int retMask(int ty)
{ {
return sz; return ty;
} }
// 'sin' is overloaded on some platforms, so taking its address // 'sin' is overloaded on some platforms, so taking its address
@ -371,8 +374,8 @@ double sinFn(double d) {
Function functions[] = { Function functions[] = {
FN(puts, argMask(PTR, 1, 1) | retMask(I32)), FN(puts, argMask(PTR, 1, 1) | retMask(I32)),
FN(sin, argMask(F64, 1, 1) | retMask(F64)), FN(sin, argMask(F64, 1, 1) | retMask(F64)),
FN(malloc, argMask(PTR, 1, 1) | retMask(PTR)), FN(malloc, argMask(WRD, 1, 1) | retMask(PTR)),
FN(free, argMask(PTR, 1, 1) | retMask(I32)) FN(free, argMask(PTR, 1, 1) | retMask(VOID))
}; };
template<typename out, typename in> out template<typename out, typename in> out
@ -694,28 +697,28 @@ FragmentAssembler::assemble_call(const string &op)
ci->_abi = _abi; ci->_abi = _abi;
ci->_argtypes = 0; ci->_typesig = 0;
size_t argc = mTokens.size(); size_t argc = mTokens.size();
for (size_t i = 0; i < argc; ++i) { for (size_t i = 0; i < argc; ++i) {
args[i] = ref(mTokens[mTokens.size() - (i+1)]); args[i] = ref(mTokens[mTokens.size() - (i+1)]);
if (args[i]->isF64()) ty = ARGSIZE_F; if (args[i]->isF64()) ty = ARGTYPE_F;
#ifdef NANOJIT_64BIT #ifdef NANOJIT_64BIT
else if (args[i]->isI64()) ty = ARGSIZE_Q; else if (args[i]->isI64()) ty = ARGTYPE_Q;
#endif #endif
else ty = ARGSIZE_I; else ty = ARGTYPE_I;
// Nb: i+1 because argMask() uses 1-based arg counting. // Nb: i+1 because argMask() uses 1-based arg counting.
ci->_argtypes |= argMask(ty, i+1, argc); ci->_typesig |= argMask(ty, i+1, argc);
} }
// Select return type from opcode. // Select return type from opcode.
ty = 0; ty = 0;
if (mOpcode == LIR_icall) ty = ARGSIZE_LO; if (mOpcode == LIR_icall) ty = ARGTYPE_LO;
else if (mOpcode == LIR_fcall) ty = ARGSIZE_F; else if (mOpcode == LIR_fcall) ty = ARGTYPE_F;
#ifdef NANOJIT_64BIT #ifdef NANOJIT_64BIT
else if (mOpcode == LIR_qcall) ty = ARGSIZE_Q; else if (mOpcode == LIR_qcall) ty = ARGTYPE_Q;
#endif #endif
else nyi("callh"); else nyi("callh");
ci->_argtypes |= retMask(ty); ci->_typesig |= retMask(ty);
} }
return mLir->insCall(ci, args); return mLir->insCall(ci, args);
@ -1239,7 +1242,7 @@ static double f_F_F8(double a, double b, double c, double d,
} }
#ifdef NANOJIT_64BIT #ifdef NANOJIT_64BIT
static void f_N_IQF(int32_t, uint64_t, double) static void f_V_IQF(int32_t, uint64_t, double)
{ {
return; // no need to do anything return; // no need to do anything
} }
@ -1287,10 +1290,10 @@ const CallInfo ci_F_F8 = CI(f_F_F8, argMask(F64, 1, 8) |
retMask(F64)); retMask(F64));
#ifdef NANOJIT_64BIT #ifdef NANOJIT_64BIT
const CallInfo ci_N_IQF = CI(f_N_IQF, argMask(I32, 1, 3) | const CallInfo ci_V_IQF = CI(f_V_IQF, argMask(I32, 1, 3) |
argMask(I64, 2, 3) | argMask(I64, 2, 3) |
argMask(F64, 3, 3) | argMask(F64, 3, 3) |
retMask(ARGSIZE_NONE)); retMask(ARGTYPE_V));
#endif #endif
// Generate a random block containing nIns instructions, plus a few more // Generate a random block containing nIns instructions, plus a few more
@ -1920,11 +1923,11 @@ FragmentAssembler::assembleRandomFragment(int nIns)
break; break;
#ifdef NANOJIT_64BIT #ifdef NANOJIT_64BIT
case LCALL_N_IQF: case LCALL_V_IQF:
if (!Is.empty() && !Qs.empty() && !Fs.empty()) { if (!Is.empty() && !Qs.empty() && !Fs.empty()) {
// Nb: args[] holds the args in reverse order... sigh. // Nb: args[] holds the args in reverse order... sigh.
LIns* args[3] = { rndPick(Fs), rndPick(Qs), rndPick(Is) }; LIns* args[3] = { rndPick(Fs), rndPick(Qs), rndPick(Is) };
ins = mLir->insCall(&ci_N_IQF, args); ins = mLir->insCall(&ci_V_IQF, args);
n++; n++;
} }
break; break;

View File

@ -2385,35 +2385,6 @@ namespace nanojit
} }
#endif // NJ_VERBOSE #endif // NJ_VERBOSE
uint32_t CallInfo::_count_args(uint32_t mask) const
{
uint32_t argc = 0;
uint32_t argt = _argtypes;
for (uint32_t i = 0; i < MAXARGS; ++i) {
argt >>= ARGSIZE_SHIFT;
if (!argt)
break;
argc += (argt & mask) != 0;
}
return argc;
}
uint32_t CallInfo::get_sizes(ArgSize* sizes) const
{
uint32_t argt = _argtypes;
uint32_t argc = 0;
for (uint32_t i = 0; i < MAXARGS; i++) {
argt >>= ARGSIZE_SHIFT;
ArgSize a = ArgSize(argt & ARGSIZE_MASK_ANY);
if (a != ARGSIZE_NONE) {
sizes[argc++] = a;
} else {
break;
}
}
return argc;
}
void LabelStateMap::add(LIns *label, NIns *addr, RegAlloc &regs) { void LabelStateMap::add(LIns *label, NIns *addr, RegAlloc &regs) {
LabelState *st = new (alloc) LabelState(addr, regs); LabelState *st = new (alloc) LabelState(addr, regs);
labels.put(label, st); labels.put(label, st);

View File

@ -81,6 +81,46 @@ namespace nanojit
#endif /* NANOJIT_VERBOSE */ #endif /* NANOJIT_VERBOSE */
uint32_t CallInfo::count_args() const
{
uint32_t argc = 0;
uint32_t argt = _typesig;
argt >>= ARGTYPE_SHIFT; // remove retType
while (argt) {
argc++;
argt >>= ARGTYPE_SHIFT;
}
return argc;
}
uint32_t CallInfo::count_int32_args() const
{
uint32_t argc = 0;
uint32_t argt = _typesig;
argt >>= ARGTYPE_SHIFT; // remove retType
while (argt) {
ArgType a = ArgType(argt & ARGTYPE_MASK);
if (a == ARGTYPE_I || a == ARGTYPE_U)
argc++;
argt >>= ARGTYPE_SHIFT;
}
return argc;
}
uint32_t CallInfo::getArgTypes(ArgType* argTypes) const
{
uint32_t argc = 0;
uint32_t argt = _typesig;
argt >>= ARGTYPE_SHIFT; // remove retType
while (argt) {
ArgType a = ArgType(argt & ARGTYPE_MASK);
argTypes[argc] = a;
argc++;
argt >>= ARGTYPE_SHIFT;
}
return argc;
}
// implementation // implementation
#ifdef NJ_VERBOSE #ifdef NJ_VERBOSE
void ReverseLister::finish() void ReverseLister::finish()
@ -2324,11 +2364,11 @@ namespace nanojit
static int32_t FASTCALL fle(double a, double b) { return a <= b; } static int32_t FASTCALL fle(double a, double b) { return a <= b; }
static int32_t FASTCALL fge(double a, double b) { return a >= b; } static int32_t FASTCALL fge(double a, double b) { return a >= b; }
#define SIG_F_I (ARGSIZE_F | ARGSIZE_I << ARGSIZE_SHIFT*1) #define SIG_F_I (ARGTYPE_F | ARGTYPE_I << ARGTYPE_SHIFT*1)
#define SIG_F_U (ARGSIZE_F | ARGSIZE_U << ARGSIZE_SHIFT*1) #define SIG_F_U (ARGTYPE_F | ARGTYPE_U << ARGTYPE_SHIFT*1)
#define SIG_F_F (ARGSIZE_F | ARGSIZE_F << ARGSIZE_SHIFT*1) #define SIG_F_F (ARGTYPE_F | ARGTYPE_F << ARGTYPE_SHIFT*1)
#define SIG_F_FF (ARGSIZE_F | ARGSIZE_F << ARGSIZE_SHIFT*1 | ARGSIZE_F << ARGSIZE_SHIFT*2) #define SIG_F_FF (ARGTYPE_F | ARGTYPE_F << ARGTYPE_SHIFT*1 | ARGTYPE_F << ARGTYPE_SHIFT*2)
#define SIG_B_FF (ARGSIZE_B | ARGSIZE_F << ARGSIZE_SHIFT*1 | ARGSIZE_F << ARGSIZE_SHIFT*2) #define SIG_B_FF (ARGTYPE_B | ARGTYPE_F << ARGTYPE_SHIFT*1 | ARGTYPE_F << ARGTYPE_SHIFT*2)
#define SF_CALLINFO(name, typesig) \ #define SF_CALLINFO(name, typesig) \
static const CallInfo name##_ci = \ static const CallInfo name##_ci = \
@ -2418,14 +2458,13 @@ namespace nanojit
} }
LIns* SoftFloatFilter::insCall(const CallInfo *ci, LInsp args[]) { LIns* SoftFloatFilter::insCall(const CallInfo *ci, LInsp args[]) {
uint32_t argt = ci->_argtypes; uint32_t nArgs = ci->count_args();
for (uint32_t i = 0; i < nArgs; i++)
for (uint32_t i = 0, argsizes = argt >> ARGSIZE_SHIFT; argsizes != 0; i++, argsizes >>= ARGSIZE_SHIFT)
args[i] = split(args[i]); args[i] = split(args[i]);
if ((argt & ARGSIZE_MASK_ANY) == ARGSIZE_F) { if (ci->returnType() == ARGTYPE_F) {
// this function returns a double as two 32bit values, so replace // This function returns a double as two 32bit values, so replace
// call with qjoin(qhi(call), call) // call with qjoin(qhi(call), call).
return split(ci, args); return split(ci, args);
} }
return out->insCall(ci, args); return out->insCall(ci, args);
@ -2876,8 +2915,8 @@ namespace nanojit
LIns* ValidateWriter::insCall(const CallInfo *ci, LIns* args0[]) LIns* ValidateWriter::insCall(const CallInfo *ci, LIns* args0[])
{ {
ArgSize sizes[MAXARGS]; ArgType argTypes[MAXARGS];
uint32_t nArgs = ci->get_sizes(sizes); uint32_t nArgs = ci->getArgTypes(argTypes);
LTy formals[MAXARGS]; LTy formals[MAXARGS];
LIns* args[MAXARGS]; // in left-to-right order, unlike args0[] LIns* args[MAXARGS]; // in left-to-right order, unlike args0[]
@ -2890,20 +2929,20 @@ namespace nanojit
errorAccSetShould(lirNames[op], ci->_storeAccSet, errorAccSetShould(lirNames[op], ci->_storeAccSet,
"not contain bits that aren't in ACC_STORE_ANY"); "not contain bits that aren't in ACC_STORE_ANY");
// This loop iterates over the args from right-to-left (because // This loop iterates over the args from right-to-left (because arg()
// arg() and get_sizes() use right-to-left order), but puts the // and getArgTypes() use right-to-left order), but puts the results
// results into formals[] and args[] in left-to-right order so // into formals[] and args[] in left-to-right order so that arg
// that arg numbers in error messages make sense to the user. // numbers in error messages make sense to the user.
for (uint32_t i = 0; i < nArgs; i++) { for (uint32_t i = 0; i < nArgs; i++) {
uint32_t i2 = nArgs - i - 1; // converts right-to-left to left-to-right uint32_t i2 = nArgs - i - 1; // converts right-to-left to left-to-right
switch (sizes[i]) { switch (argTypes[i]) {
case ARGSIZE_I: case ARGTYPE_I:
case ARGSIZE_U: formals[i2] = LTy_I32; break; case ARGTYPE_U: formals[i2] = LTy_I32; break;
#ifdef NANOJIT_64BIT #ifdef NANOJIT_64BIT
case ARGSIZE_Q: formals[i2] = LTy_I64; break; case ARGTYPE_Q: formals[i2] = LTy_I64; break;
#endif #endif
case ARGSIZE_F: formals[i2] = LTy_F64; break; case ARGTYPE_F: formals[i2] = LTy_F64; break;
default: NanoAssert(0); formals[i2] = LTy_Void; break; default: NanoAssertMsgf(0, "%d %s\n", argTypes[i],ci->_name); formals[i2] = LTy_Void; break;
} }
args[i2] = args0[i]; args[i2] = args0[i];
} }

View File

@ -146,25 +146,26 @@ namespace nanojit
ABI_CDECL ABI_CDECL
}; };
enum ArgSize { // All values must fit into three bits. See CallInfo for details.
ARGSIZE_NONE = 0, enum ArgType {
ARGSIZE_F = 1, // double (64bit) ARGTYPE_V = 0, // void
ARGSIZE_I = 2, // int32_t ARGTYPE_F = 1, // double (64bit)
ARGTYPE_I = 2, // int32_t
ARGTYPE_U = 3, // uint32_t
#ifdef NANOJIT_64BIT #ifdef NANOJIT_64BIT
ARGSIZE_Q = 3, // uint64_t ARGTYPE_Q = 4, // uint64_t
#endif #endif
ARGSIZE_U = 6, // uint32_t
ARGSIZE_MASK_ANY = 7,
ARGSIZE_MASK_INT = 2,
ARGSIZE_SHIFT = 3,
// aliases // aliases
ARGSIZE_P = PTR_SIZE(ARGSIZE_I, ARGSIZE_Q), // pointer ARGTYPE_P = PTR_SIZE(ARGTYPE_I, ARGTYPE_Q), // pointer
ARGSIZE_LO = ARGSIZE_I, // int32_t ARGTYPE_LO = ARGTYPE_I, // int32_t
ARGSIZE_B = ARGSIZE_I, // bool ARGTYPE_B = ARGTYPE_I // bool
ARGSIZE_V = ARGSIZE_NONE // void
}; };
// In _typesig, each entry is three bits.
static const int ARGTYPE_SHIFT = 3;
static const int ARGTYPE_MASK = 0x7;
enum IndirectCall { enum IndirectCall {
CALL_INDIRECT = 0 CALL_INDIRECT = 0
}; };
@ -290,38 +291,28 @@ namespace nanojit
struct CallInfo struct CallInfo
{ {
private:
public:
uintptr_t _address; uintptr_t _address;
uint32_t _argtypes:27; // 9 3-bit fields indicating arg type, by ARGSIZE above (including ret type): a1 a2 a3 a4 a5 ret uint32_t _typesig:27; // 9 3-bit fields indicating arg type, by ARGTYPE above (including ret type): a1 a2 a3 a4 a5 ret
AbiKind _abi:3; AbiKind _abi:3;
uint8_t _isPure:1; // _isPure=1 means no side-effects, result only depends on args uint8_t _isPure:1; // _isPure=1 means no side-effects, result only depends on args
AccSet _storeAccSet; // access regions stored by the function AccSet _storeAccSet; // access regions stored by the function
verbose_only ( const char* _name; ) verbose_only ( const char* _name; )
uint32_t _count_args(uint32_t mask) const; uint32_t count_args() const;
uint32_t count_int32_args() const;
// Nb: uses right-to-left order, eg. sizes[0] is the size of the right-most arg. // Nb: uses right-to-left order, eg. sizes[0] is the size of the right-most arg.
uint32_t get_sizes(ArgSize* sizes) const; uint32_t getArgTypes(ArgType* types) const;
inline ArgSize returnType() const { inline ArgType returnType() const {
return ArgSize(_argtypes & ARGSIZE_MASK_ANY); return ArgType(_typesig & ARGTYPE_MASK);
}
// Note that this indexes arguments *backwards*, that is to
// get the Nth arg, you have to ask for index (numargs - N).
// See mozilla bug 525815 for fixing this.
inline ArgSize argType(uint32_t arg) const {
return ArgSize((_argtypes >> (ARGSIZE_SHIFT * (arg+1))) & ARGSIZE_MASK_ANY);
} }
inline bool isIndirect() const { inline bool isIndirect() const {
return _address < 256; return _address < 256;
} }
inline uint32_t count_args() const {
return _count_args(ARGSIZE_MASK_ANY);
}
inline uint32_t count_iargs() const {
return _count_args(ARGSIZE_MASK_INT);
}
// fargs = args - iargs
}; };
/* /*
@ -408,14 +399,14 @@ namespace nanojit
inline LOpcode getCallOpcode(const CallInfo* ci) { inline LOpcode getCallOpcode(const CallInfo* ci) {
LOpcode op = LIR_pcall; LOpcode op = LIR_pcall;
switch (ci->returnType()) { switch (ci->returnType()) {
case ARGSIZE_NONE: op = LIR_pcall; break; case ARGTYPE_V: op = LIR_pcall; break;
case ARGSIZE_I: case ARGTYPE_I:
case ARGSIZE_U: op = LIR_icall; break; case ARGTYPE_U: op = LIR_icall; break;
case ARGSIZE_F: op = LIR_fcall; break; case ARGTYPE_F: op = LIR_fcall; break;
#ifdef NANOJIT_64BIT #ifdef NANOJIT_64BIT
case ARGSIZE_Q: op = LIR_qcall; break; case ARGTYPE_Q: op = LIR_qcall; break;
#endif #endif
default: NanoAssert(0); break; default: NanoAssert(0); break;
} }
return op; return op;
} }

View File

@ -597,19 +597,19 @@ Assembler::genEpilogue()
* alignment. * alignment.
*/ */
void void
Assembler::asm_arg(ArgSize sz, LInsp arg, Register& r, int& stkd) Assembler::asm_arg(ArgType ty, LInsp arg, Register& r, int& stkd)
{ {
// The stack pointer must always be at least aligned to 4 bytes. // The stack pointer must always be at least aligned to 4 bytes.
NanoAssert((stkd & 3) == 0); NanoAssert((stkd & 3) == 0);
if (sz == ARGSIZE_F) { if (ty == ARGTYPE_F) {
// This task is fairly complex and so is delegated to asm_arg_64. // This task is fairly complex and so is delegated to asm_arg_64.
asm_arg_64(arg, r, stkd); asm_arg_64(arg, r, stkd);
} else { } else {
NanoAssert(sz == ARGSIZE_I || sz == ARGSIZE_U); NanoAssert(ty == ARGTYPE_I || ty == ARGTYPE_U);
// pre-assign registers R0-R3 for arguments (if they fit) // pre-assign registers R0-R3 for arguments (if they fit)
if (r < R4) { if (r < R4) {
asm_regarg(sz, arg, r); asm_regarg(ty, arg, r);
r = nextreg(r); r = nextreg(r);
} else { } else {
asm_stkarg(arg, stkd); asm_stkarg(arg, stkd);
@ -620,7 +620,7 @@ Assembler::asm_arg(ArgSize sz, LInsp arg, Register& r, int& stkd)
// Encode a 64-bit floating-point argument using the appropriate ABI. // Encode a 64-bit floating-point argument using the appropriate ABI.
// This function operates in the same way as asm_arg, except that it will only // This function operates in the same way as asm_arg, except that it will only
// handle arguments where (ArgSize)sz == ARGSIZE_F. // handle arguments where (ArgType)ty == ARGTYPE_F.
void void
Assembler::asm_arg_64(LInsp arg, Register& r, int& stkd) Assembler::asm_arg_64(LInsp arg, Register& r, int& stkd)
{ {
@ -665,8 +665,8 @@ Assembler::asm_arg_64(LInsp arg, Register& r, int& stkd)
if (_config.arm_vfp) { if (_config.arm_vfp) {
FMRRD(ra, rb, fp_reg); FMRRD(ra, rb, fp_reg);
} else { } else {
asm_regarg(ARGSIZE_LO, arg->oprnd1(), ra); asm_regarg(ARGTYPE_LO, arg->oprnd1(), ra);
asm_regarg(ARGSIZE_LO, arg->oprnd2(), rb); asm_regarg(ARGTYPE_LO, arg->oprnd2(), rb);
} }
#ifndef NJ_ARM_EABI #ifndef NJ_ARM_EABI
@ -699,7 +699,7 @@ Assembler::asm_arg_64(LInsp arg, Register& r, int& stkd)
// Without VFP, we can simply use asm_regarg and asm_stkarg to // Without VFP, we can simply use asm_regarg and asm_stkarg to
// encode the two 32-bit words as we don't need to load from a VFP // encode the two 32-bit words as we don't need to load from a VFP
// register. // register.
asm_regarg(ARGSIZE_LO, arg->oprnd1(), ra); asm_regarg(ARGTYPE_LO, arg->oprnd1(), ra);
asm_stkarg(arg->oprnd2(), 0); asm_stkarg(arg->oprnd2(), 0);
stkd += 4; stkd += 4;
} }
@ -720,10 +720,10 @@ Assembler::asm_arg_64(LInsp arg, Register& r, int& stkd)
} }
void void
Assembler::asm_regarg(ArgSize sz, LInsp p, Register r) Assembler::asm_regarg(ArgType ty, LInsp p, Register r)
{ {
NanoAssert(deprecated_isKnownReg(r)); NanoAssert(deprecated_isKnownReg(r));
if (sz & ARGSIZE_MASK_INT) if (ty == ARGTYPE_I || ty == ARGTYPE_U)
{ {
// arg goes in specific register // arg goes in specific register
if (p->isconst()) { if (p->isconst()) {
@ -752,7 +752,7 @@ Assembler::asm_regarg(ArgSize sz, LInsp p, Register r)
} }
else else
{ {
NanoAssert(sz == ARGSIZE_F); NanoAssert(ty == ARGTYPE_F);
// fpu argument in register - should never happen since FPU // fpu argument in register - should never happen since FPU
// args are converted to two 32-bit ints on ARM // args are converted to two 32-bit ints on ARM
NanoAssert(false); NanoAssert(false);
@ -848,10 +848,10 @@ Assembler::asm_call(LInsp ins)
evictScratchRegsExcept(0); evictScratchRegsExcept(0);
const CallInfo* call = ins->callInfo(); const CallInfo* ci = ins->callInfo();
ArgSize sizes[MAXARGS]; ArgType argTypes[MAXARGS];
uint32_t argc = call->get_sizes(sizes); uint32_t argc = ci->getArgTypes(argTypes);
bool indirect = call->isIndirect(); bool indirect = ci->isIndirect();
// If we aren't using VFP, assert that the LIR operation is an integer // If we aren't using VFP, assert that the LIR operation is an integer
// function call. // function call.
@ -863,11 +863,11 @@ Assembler::asm_call(LInsp ins)
// for floating point calls, but not for integer calls. // for floating point calls, but not for integer calls.
if (_config.arm_vfp && ins->isUsed()) { if (_config.arm_vfp && ins->isUsed()) {
// Determine the size (and type) of the instruction result. // Determine the size (and type) of the instruction result.
ArgSize rsize = (ArgSize)(call->_argtypes & ARGSIZE_MASK_ANY); ArgType rsize = (ArgType)(ci->_typesig & ARGTYPE_MASK_ANY);
// If the result size is a floating-point value, treat the result // If the result size is a floating-point value, treat the result
// specially, as described previously. // specially, as described previously.
if (rsize == ARGSIZE_F) { if (ci->returnType() == ARGTYPE_F) {
Register rr = ins->deprecated_getReg(); Register rr = ins->deprecated_getReg();
NanoAssert(ins->opcode() == LIR_fcall); NanoAssert(ins->opcode() == LIR_fcall);
@ -902,7 +902,7 @@ Assembler::asm_call(LInsp ins)
// interlock in the "long" branch sequence by manually loading the // interlock in the "long" branch sequence by manually loading the
// target address into LR ourselves before setting up the parameters // target address into LR ourselves before setting up the parameters
// in other registers. // in other registers.
BranchWithLink((NIns*)call->_address); BranchWithLink((NIns*)ci->_address);
} else { } else {
// Indirect call: we assign the address arg to LR since it's not // Indirect call: we assign the address arg to LR since it's not
// used for regular arguments, and is otherwise scratch since it's // used for regular arguments, and is otherwise scratch since it's
@ -917,7 +917,7 @@ Assembler::asm_call(LInsp ins)
} else { } else {
BLX(LR); BLX(LR);
} }
asm_regarg(ARGSIZE_LO, ins->arg(--argc), LR); asm_regarg(ARGTYPE_LO, ins->arg(--argc), LR);
} }
// Encode the arguments, starting at R0 and with an empty argument stack. // Encode the arguments, starting at R0 and with an empty argument stack.
@ -930,7 +930,7 @@ Assembler::asm_call(LInsp ins)
// in reverse order. // in reverse order.
uint32_t i = argc; uint32_t i = argc;
while(i--) { while(i--) {
asm_arg(sizes[i], ins->arg(i), r, stkd); asm_arg(argTypes[i], ins->arg(i), r, stkd);
} }
if (stkd > max_out_args) { if (stkd > max_out_args) {

View File

@ -220,14 +220,14 @@ verbose_only( extern const char* shiftNames[]; )
void nativePageReset(); \ void nativePageReset(); \
void nativePageSetup(); \ void nativePageSetup(); \
void asm_immf_nochk(Register, int32_t, int32_t); \ void asm_immf_nochk(Register, int32_t, int32_t); \
void asm_regarg(ArgSize, LInsp, Register); \ void asm_regarg(ArgType, LInsp, Register); \
void asm_stkarg(LInsp p, int stkd); \ void asm_stkarg(LInsp p, int stkd); \
void asm_cmpi(Register, int32_t imm); \ void asm_cmpi(Register, int32_t imm); \
void asm_ldr_chk(Register d, Register b, int32_t off, bool chk); \ void asm_ldr_chk(Register d, Register b, int32_t off, bool chk); \
void asm_cmp(LIns *cond); \ void asm_cmp(LIns *cond); \
void asm_fcmp(LIns *cond); \ void asm_fcmp(LIns *cond); \
void asm_ld_imm(Register d, int32_t imm, bool chk = true); \ void asm_ld_imm(Register d, int32_t imm, bool chk = true); \
void asm_arg(ArgSize sz, LInsp arg, Register& r, int& stkd); \ void asm_arg(ArgType ty, LInsp arg, Register& r, int& stkd); \
void asm_arg_64(LInsp arg, Register& r, int& stkd); \ void asm_arg_64(LInsp arg, Register& r, int& stkd); \
void asm_add_imm(Register rd, Register rn, int32_t imm, int stat = 0); \ void asm_add_imm(Register rd, Register rn, int32_t imm, int stat = 0); \
void asm_sub_imm(Register rd, Register rn, int32_t imm, int stat = 0); \ void asm_sub_imm(Register rd, Register rn, int32_t imm, int stat = 0); \

View File

@ -389,10 +389,10 @@ namespace nanojit
} }
} }
void Assembler::asm_regarg(ArgSize sz, LInsp p, Register r) void Assembler::asm_regarg(ArgType ty, LInsp p, Register r)
{ {
NanoAssert(deprecated_isKnownReg(r)); NanoAssert(deprecated_isKnownReg(r));
if (sz & ARGSIZE_MASK_INT) { if (ty == ARGTYPE_I || ty == ARGTYPE_U) {
// arg goes in specific register // arg goes in specific register
if (p->isconst()) if (p->isconst())
asm_li(r, p->imm32()); asm_li(r, p->imm32());
@ -464,7 +464,7 @@ namespace nanojit
// Encode a 64-bit floating-point argument using the appropriate ABI. // Encode a 64-bit floating-point argument using the appropriate ABI.
// This function operates in the same way as asm_arg, except that it will only // This function operates in the same way as asm_arg, except that it will only
// handle arguments where (ArgSize)sz == ARGSIZE_F. // handle arguments where (ArgType)ty == ARGTYPE_F.
void void
Assembler::asm_arg_64(LInsp arg, Register& r, Register& fr, int& stkd) Assembler::asm_arg_64(LInsp arg, Register& r, Register& fr, int& stkd)
{ {
@ -1505,18 +1505,18 @@ namespace nanojit
* on the stack. * on the stack.
*/ */
void void
Assembler::asm_arg(ArgSize sz, LInsp arg, Register& r, Register& fr, int& stkd) Assembler::asm_arg(ArgType ty, LInsp arg, Register& r, Register& fr, int& stkd)
{ {
// The stack offset must always be at least aligned to 4 bytes. // The stack offset must always be at least aligned to 4 bytes.
NanoAssert((stkd & 3) == 0); NanoAssert((stkd & 3) == 0);
if (sz == ARGSIZE_F) { if (ty == ARGTYPE_F) {
// This task is fairly complex and so is delegated to asm_arg_64. // This task is fairly complex and so is delegated to asm_arg_64.
asm_arg_64(arg, r, fr, stkd); asm_arg_64(arg, r, fr, stkd);
} } else {
else if (sz & ARGSIZE_MASK_INT) { NanoAssert(ty == ARGTYPE_I || ty == ARGTYPE_U);
if (stkd < 16) { if (stkd < 16) {
asm_regarg(sz, arg, r); asm_regarg(ty, arg, r);
fr = nextreg(fr); fr = nextreg(fr);
r = nextreg(r); r = nextreg(r);
} }
@ -1527,11 +1527,6 @@ namespace nanojit
fr = r; fr = r;
stkd += 4; stkd += 4;
} }
else {
NanoAssert(sz == ARGSIZE_Q);
// shouldn't have 64 bit int params
NanoAssert(false);
}
} }
void void
@ -1560,10 +1555,10 @@ namespace nanojit
evictScratchRegsExcept(0); evictScratchRegsExcept(0);
const CallInfo* call = ins->callInfo(); const CallInfo* ci = ins->callInfo();
ArgSize sizes[MAXARGS]; ArgType argTypes[MAXARGS];
uint32_t argc = call->get_sizes(sizes); uint32_t argc = ci->getArgTypes(argTypes);
bool indirect = call->isIndirect(); bool indirect = ci->isIndirect();
// FIXME: Put one of the argument moves into the BDS slot // FIXME: Put one of the argument moves into the BDS slot
@ -1574,11 +1569,11 @@ namespace nanojit
if (!indirect) if (!indirect)
// FIXME: If we can tell that we are calling non-PIC // FIXME: If we can tell that we are calling non-PIC
// (ie JIT) code, we could call direct instead of using t9 // (ie JIT) code, we could call direct instead of using t9
asm_li(T9, call->_address); asm_li(T9, ci->_address);
else else
// Indirect call: we assign the address arg to t9 // Indirect call: we assign the address arg to t9
// which matches the o32 ABI for calling functions // which matches the o32 ABI for calling functions
asm_regarg(ARGSIZE_P, ins->arg(--argc), T9); asm_regarg(ARGTYPE_P, ins->arg(--argc), T9);
// Encode the arguments, starting at A0 and with an empty argument stack. // Encode the arguments, starting at A0 and with an empty argument stack.
Register r = A0, fr = FA0; Register r = A0, fr = FA0;
@ -1589,7 +1584,7 @@ namespace nanojit
// Note that we loop through the arguments backwards as LIR specifies them // Note that we loop through the arguments backwards as LIR specifies them
// in reverse order. // in reverse order.
while(argc--) while(argc--)
asm_arg(sizes[argc], ins->arg(argc), r, fr, stkd); asm_arg(argTypes[argc], ins->arg(argc), r, fr, stkd);
if (stkd > max_out_args) if (stkd > max_out_args)
max_out_args = stkd; max_out_args = stkd;

View File

@ -179,9 +179,9 @@ namespace nanojit
NIns *asm_branch_near(bool, LIns*, NIns*); \ NIns *asm_branch_near(bool, LIns*, NIns*); \
void asm_cmp(LOpcode condop, LIns *a, LIns *b, Register cr); \ void asm_cmp(LOpcode condop, LIns *a, LIns *b, Register cr); \
void asm_move(Register d, Register s); \ void asm_move(Register d, Register s); \
void asm_regarg(ArgSize sz, LInsp p, Register r); \ void asm_regarg(ArgType ty, LInsp p, Register r); \
void asm_stkarg(LInsp arg, int stkd); \ void asm_stkarg(LInsp arg, int stkd); \
void asm_arg(ArgSize sz, LInsp arg, Register& r, Register& fr, int& stkd); \ void asm_arg(ArgType ty, LInsp arg, Register& r, Register& fr, int& stkd); \
void asm_arg_64(LInsp arg, Register& r, Register& fr, int& stkd) ; void asm_arg_64(LInsp arg, Register& r, Register& fr, int& stkd) ;

View File

@ -683,8 +683,8 @@ namespace nanojit
evictScratchRegsExcept(0); evictScratchRegsExcept(0);
const CallInfo* call = ins->callInfo(); const CallInfo* call = ins->callInfo();
ArgSize sizes[MAXARGS]; ArgType argTypes[MAXARGS];
uint32_t argc = call->get_sizes(sizes); uint32_t argc = call->getArgTypes(argTypes);
bool indirect; bool indirect;
if (!(indirect = call->isIndirect())) { if (!(indirect = call->isIndirect())) {
@ -699,7 +699,7 @@ namespace nanojit
underrunProtect(8); // underrunProtect might clobber CTR underrunProtect(8); // underrunProtect might clobber CTR
BCTRL(); BCTRL();
MTCTR(R11); MTCTR(R11);
asm_regarg(ARGSIZE_P, ins->arg(--argc), R11); asm_regarg(ARGTYPE_P, ins->arg(--argc), R11);
} }
int param_size = 0; int param_size = 0;
@ -708,22 +708,22 @@ namespace nanojit
Register fr = F1; Register fr = F1;
for(uint32_t i = 0; i < argc; i++) { for(uint32_t i = 0; i < argc; i++) {
uint32_t j = argc - i - 1; uint32_t j = argc - i - 1;
ArgSize sz = sizes[j]; ArgType ty = argTypes[j];
LInsp arg = ins->arg(j); LInsp arg = ins->arg(j);
if (sz & ARGSIZE_MASK_INT) { if (ty == ARGTYPE_I || ty == ARGTYPE_U || ty == ARGTYPE_Q) {
// GP arg // GP arg
if (r <= R10) { if (r <= R10) {
asm_regarg(sz, arg, r); asm_regarg(ty, arg, r);
r = nextreg(r); r = nextreg(r);
param_size += sizeof(void*); param_size += sizeof(void*);
} else { } else {
// put arg on stack // put arg on stack
TODO(stack_int32); TODO(stack_int32);
} }
} else if (sz == ARGSIZE_F) { } else if (ty == ARGTYPE_F) {
// double // double
if (fr <= F13) { if (fr <= F13) {
asm_regarg(sz, arg, fr); asm_regarg(ty, arg, fr);
fr = nextreg(fr); fr = nextreg(fr);
#ifdef NANOJIT_64BIT #ifdef NANOJIT_64BIT
r = nextreg(r); r = nextreg(r);
@ -736,23 +736,23 @@ namespace nanojit
TODO(stack_double); TODO(stack_double);
} }
} else { } else {
TODO(ARGSIZE_UNK); TODO(ARGTYPE_UNK);
} }
} }
if (param_size > max_param_size) if (param_size > max_param_size)
max_param_size = param_size; max_param_size = param_size;
} }
void Assembler::asm_regarg(ArgSize sz, LInsp p, Register r) void Assembler::asm_regarg(ArgType ty, LInsp p, Register r)
{ {
NanoAssert(r != deprecated_UnknownReg); NanoAssert(r != deprecated_UnknownReg);
if (sz & ARGSIZE_MASK_INT) if (ty == ARGTYPE_I || ty == ARGTYPE_U || ty == ARGTYPE_Q)
{ {
#ifdef NANOJIT_64BIT #ifdef NANOJIT_64BIT
if (sz == ARGSIZE_I) { if (ty == ARGTYPE_I) {
// sign extend 32->64 // sign extend 32->64
EXTSW(r, r); EXTSW(r, r);
} else if (sz == ARGSIZE_U) { } else if (ty == ARGTYPE_U) {
// zero extend 32->64 // zero extend 32->64
CLRLDI(r, r, 32); CLRLDI(r, r, 32);
} }
@ -785,7 +785,7 @@ namespace nanojit
} }
} }
} }
else if (sz == ARGSIZE_F) { else if (ty == ARGTYPE_F) {
if (p->isUsed()) { if (p->isUsed()) {
Register rp = p->deprecated_getReg(); Register rp = p->deprecated_getReg();
if (!deprecated_isKnownReg(rp) || !IsFpReg(rp)) { if (!deprecated_isKnownReg(rp) || !IsFpReg(rp)) {
@ -805,7 +805,7 @@ namespace nanojit
} }
} }
else { else {
TODO(ARGSIZE_UNK); TODO(ARGTYPE_UNK);
} }
} }

View File

@ -287,7 +287,7 @@ namespace nanojit
void nativePageSetup(); \ void nativePageSetup(); \
void br(NIns *addr, int link); \ void br(NIns *addr, int link); \
void br_far(NIns *addr, int link); \ void br_far(NIns *addr, int link); \
void asm_regarg(ArgSize, LIns*, Register); \ void asm_regarg(ArgType, LIns*, Register); \
void asm_li(Register r, int32_t imm); \ void asm_li(Register r, int32_t imm); \
void asm_li32(Register r, int32_t imm); \ void asm_li32(Register r, int32_t imm); \
void asm_li64(Register r, uint64_t imm); \ void asm_li64(Register r, uint64_t imm); \

View File

@ -161,21 +161,21 @@ namespace nanojit
evictScratchRegsExcept(0); evictScratchRegsExcept(0);
const CallInfo* call = ins->callInfo(); const CallInfo* ci = ins->callInfo();
underrunProtect(8); underrunProtect(8);
NOP(); NOP();
ArgSize sizes[MAXARGS]; ArgType argTypes[MAXARGS];
uint32_t argc = call->get_sizes(sizes); uint32_t argc = ci->getArgTypes(argTypes);
NanoAssert(ins->isop(LIR_pcall) || ins->isop(LIR_fcall)); NanoAssert(ins->isop(LIR_pcall) || ins->isop(LIR_fcall));
verbose_only(if (_logc->lcbits & LC_Assembly) verbose_only(if (_logc->lcbits & LC_Assembly)
outputf(" %p:", _nIns); outputf(" %p:", _nIns);
) )
bool indirect = call->isIndirect(); bool indirect = ci->isIndirect();
if (!indirect) { if (!indirect) {
CALL(call); CALL(ci);
} }
else { else {
argc--; argc--;
@ -189,8 +189,8 @@ namespace nanojit
for(int i=0; i<argc; i++) for(int i=0; i<argc; i++)
{ {
uint32_t j = argc-i-1; uint32_t j = argc-i-1;
ArgSize sz = sizes[j]; ArgType ty = argTypes[j];
if (sz == ARGSIZE_F) { if (ty == ARGTYPE_F) {
Register r = findRegFor(ins->arg(j), FpRegs); Register r = findRegFor(ins->arg(j), FpRegs);
GPRIndex += 2; GPRIndex += 2;
offset += 8; offset += 8;

View File

@ -893,8 +893,8 @@ namespace nanojit
evictScratchRegsExcept(rmask(rr)); evictScratchRegsExcept(rmask(rr));
const CallInfo *call = ins->callInfo(); const CallInfo *call = ins->callInfo();
ArgSize sizes[MAXARGS]; ArgType argTypes[MAXARGS];
int argc = call->get_sizes(sizes); int argc = call->getArgTypes(argTypes);
if (!call->isIndirect()) { if (!call->isIndirect()) {
verbose_only(if (_logc->lcbits & LC_Assembly) verbose_only(if (_logc->lcbits & LC_Assembly)
@ -921,7 +921,7 @@ namespace nanojit
// Assign the call address to RAX. Must happen after freeResourcesOf() // Assign the call address to RAX. Must happen after freeResourcesOf()
// since RAX is usually the return value and will be allocated until that point. // since RAX is usually the return value and will be allocated until that point.
asm_regarg(ARGSIZE_P, ins->arg(--argc), RAX); asm_regarg(ARGTYPE_P, ins->arg(--argc), RAX);
} }
#ifdef _WIN64 #ifdef _WIN64
@ -933,28 +933,28 @@ namespace nanojit
int arg_index = 0; int arg_index = 0;
for (int i = 0; i < argc; i++) { for (int i = 0; i < argc; i++) {
int j = argc - i - 1; int j = argc - i - 1;
ArgSize sz = sizes[j]; ArgType ty = argTypes[j];
LIns* arg = ins->arg(j); LIns* arg = ins->arg(j);
if ((sz & ARGSIZE_MASK_INT) && arg_index < NumArgRegs) { if ((ty == ARGTYPE_I || ty == ARGTYPE_U || ty == ARGTYPE_Q) && arg_index < NumArgRegs) {
// gp arg // gp arg
asm_regarg(sz, arg, argRegs[arg_index]); asm_regarg(ty, arg, argRegs[arg_index]);
arg_index++; arg_index++;
} }
#ifdef _WIN64 #ifdef _WIN64
else if (sz == ARGSIZE_F && arg_index < NumArgRegs) { else if (ty == ARGTYPE_F && arg_index < NumArgRegs) {
// double goes in XMM reg # based on overall arg_index // double goes in XMM reg # based on overall arg_index
asm_regarg(sz, arg, Register(XMM0+arg_index)); asm_regarg(ty, arg, Register(XMM0+arg_index));
arg_index++; arg_index++;
} }
#else #else
else if (sz == ARGSIZE_F && fr < XMM8) { else if (ty == ARGTYPE_F && fr < XMM8) {
// double goes in next available XMM register // double goes in next available XMM register
asm_regarg(sz, arg, fr); asm_regarg(ty, arg, fr);
fr = nextreg(fr); fr = nextreg(fr);
} }
#endif #endif
else { else {
asm_stkarg(sz, arg, stk_used); asm_stkarg(ty, arg, stk_used);
stk_used += sizeof(void*); stk_used += sizeof(void*);
} }
} }
@ -963,8 +963,8 @@ namespace nanojit
max_stk_used = stk_used; max_stk_used = stk_used;
} }
void Assembler::asm_regarg(ArgSize sz, LIns *p, Register r) { void Assembler::asm_regarg(ArgType ty, LIns *p, Register r) {
if (sz == ARGSIZE_I) { if (ty == ARGTYPE_I) {
NanoAssert(p->isI32()); NanoAssert(p->isI32());
if (p->isconst()) { if (p->isconst()) {
asm_immq(r, int64_t(p->imm32()), /*canClobberCCs*/true); asm_immq(r, int64_t(p->imm32()), /*canClobberCCs*/true);
@ -972,7 +972,7 @@ namespace nanojit
} }
// sign extend int32 to int64 // sign extend int32 to int64
MOVSXDR(r, r); MOVSXDR(r, r);
} else if (sz == ARGSIZE_U) { } else if (ty == ARGTYPE_U) {
NanoAssert(p->isI32()); NanoAssert(p->isI32());
if (p->isconst()) { if (p->isconst()) {
asm_immq(r, uint64_t(uint32_t(p->imm32())), /*canClobberCCs*/true); asm_immq(r, uint64_t(uint32_t(p->imm32())), /*canClobberCCs*/true);
@ -980,6 +980,8 @@ namespace nanojit
} }
// zero extend with 32bit mov, auto-zeros upper 32bits // zero extend with 32bit mov, auto-zeros upper 32bits
MOVLR(r, r); MOVLR(r, r);
} else {
// Do nothing.
} }
/* there is no point in folding an immediate here, because /* there is no point in folding an immediate here, because
* the argument register must be a scratch register and we're * the argument register must be a scratch register and we're
@ -991,19 +993,22 @@ namespace nanojit
findSpecificRegFor(p, r); findSpecificRegFor(p, r);
} }
void Assembler::asm_stkarg(ArgSize sz, LIns *p, int stk_off) { void Assembler::asm_stkarg(ArgType ty, LIns *p, int stk_off) {
NanoAssert(isS8(stk_off)); NanoAssert(isS8(stk_off));
if (sz & ARGSIZE_MASK_INT) { if (ty == ARGTYPE_I || ty == ARGTYPE_U || ty == ARGTYPE_Q) {
Register r = findRegFor(p, GpRegs); Register r = findRegFor(p, GpRegs);
MOVQSPR(stk_off, r); // movq [rsp+d8], r MOVQSPR(stk_off, r); // movq [rsp+d8], r
if (sz == ARGSIZE_I) { if (ty == ARGTYPE_I) {
// extend int32 to int64 // extend int32 to int64
NanoAssert(p->isI32()); NanoAssert(p->isI32());
MOVSXDR(r, r); MOVSXDR(r, r);
} else if (sz == ARGSIZE_U) { } else if (ty == ARGTYPE_U) {
// extend uint32 to uint64 // extend uint32 to uint64
NanoAssert(p->isI32()); NanoAssert(p->isI32());
MOVLR(r, r); MOVLR(r, r);
} else {
NanoAssert(ty == ARGTYPE_Q);
// Do nothing.
} }
} else { } else {
TODO(asm_stkarg_non_int); TODO(asm_stkarg_non_int);

View File

@ -395,8 +395,8 @@ namespace nanojit
void asm_immi(Register r, int32_t v, bool canClobberCCs);\ void asm_immi(Register r, int32_t v, bool canClobberCCs);\
void asm_immq(Register r, uint64_t v, bool canClobberCCs);\ void asm_immq(Register r, uint64_t v, bool canClobberCCs);\
void asm_immf(Register r, uint64_t v, bool canClobberCCs);\ void asm_immf(Register r, uint64_t v, bool canClobberCCs);\
void asm_regarg(ArgSize, LIns*, Register);\ void asm_regarg(ArgType, LIns*, Register);\
void asm_stkarg(ArgSize, LIns*, int);\ void asm_stkarg(ArgType, LIns*, int);\
void asm_shift(LIns*);\ void asm_shift(LIns*);\
void asm_shift_imm(LIns*);\ void asm_shift_imm(LIns*);\
void asm_arith_imm(LIns*);\ void asm_arith_imm(LIns*);\

View File

@ -168,7 +168,7 @@ namespace nanojit
const CallInfo* call = ins->callInfo(); const CallInfo* call = ins->callInfo();
// must be signed, not unsigned // must be signed, not unsigned
uint32_t iargs = call->count_iargs(); uint32_t iargs = call->count_int32_args();
int32_t fargs = call->count_args() - iargs; int32_t fargs = call->count_args() - iargs;
bool indirect = call->isIndirect(); bool indirect = call->isIndirect();
@ -237,13 +237,13 @@ namespace nanojit
// Pre-assign registers to the first N 4B args based on the calling convention. // Pre-assign registers to the first N 4B args based on the calling convention.
uint32_t n = 0; uint32_t n = 0;
ArgSize sizes[MAXARGS]; ArgType argTypes[MAXARGS];
uint32_t argc = call->get_sizes(sizes); uint32_t argc = call->getArgTypes(argTypes);
int32_t stkd = 0; int32_t stkd = 0;
if (indirect) { if (indirect) {
argc--; argc--;
asm_arg(ARGSIZE_P, ins->arg(argc), EAX, stkd); asm_arg(ARGTYPE_P, ins->arg(argc), EAX, stkd);
if (!_config.i386_fixed_esp) if (!_config.i386_fixed_esp)
stkd = 0; stkd = 0;
} }
@ -251,12 +251,12 @@ namespace nanojit
for (uint32_t i = 0; i < argc; i++) for (uint32_t i = 0; i < argc; i++)
{ {
uint32_t j = argc-i-1; uint32_t j = argc-i-1;
ArgSize sz = sizes[j]; ArgType ty = argTypes[j];
Register r = UnspecifiedReg; Register r = UnspecifiedReg;
if (n < max_regs && sz != ARGSIZE_F) { if (n < max_regs && ty != ARGTYPE_F) {
r = argRegs[n++]; // tell asm_arg what reg to use r = argRegs[n++]; // tell asm_arg what reg to use
} }
asm_arg(sz, ins->arg(j), r, stkd); asm_arg(ty, ins->arg(j), r, stkd);
if (!_config.i386_fixed_esp) if (!_config.i386_fixed_esp)
stkd = 0; stkd = 0;
} }
@ -1377,12 +1377,12 @@ namespace nanojit
} }
} }
void Assembler::asm_arg(ArgSize sz, LInsp ins, Register r, int32_t& stkd) void Assembler::asm_arg(ArgType ty, LInsp ins, Register r, int32_t& stkd)
{ {
// If 'r' is known, then that's the register we have to put 'ins' // If 'r' is known, then that's the register we have to put 'ins'
// into. // into.
if (sz == ARGSIZE_I || sz == ARGSIZE_U) { if (ty == ARGTYPE_I || ty == ARGTYPE_U) {
if (r != UnspecifiedReg) { if (r != UnspecifiedReg) {
if (ins->isconst()) { if (ins->isconst()) {
// Rematerialize the constant. // Rematerialize the constant.
@ -1413,7 +1413,7 @@ namespace nanojit
} }
} else { } else {
NanoAssert(sz == ARGSIZE_F); NanoAssert(ty == ARGTYPE_F);
asm_farg(ins, stkd); asm_farg(ins, stkd);
} }
} }

View File

@ -184,7 +184,7 @@ namespace nanojit
void asm_immi(Register r, int32_t val, bool canClobberCCs);\ void asm_immi(Register r, int32_t val, bool canClobberCCs);\
void asm_stkarg(LInsp p, int32_t& stkd);\ void asm_stkarg(LInsp p, int32_t& stkd);\
void asm_farg(LInsp, int32_t& stkd);\ void asm_farg(LInsp, int32_t& stkd);\
void asm_arg(ArgSize sz, LInsp p, Register r, int32_t& stkd);\ void asm_arg(ArgType ty, LInsp p, Register r, int32_t& stkd);\
void asm_pusharg(LInsp);\ void asm_pusharg(LInsp);\
void asm_fcmp(LIns *cond);\ void asm_fcmp(LIns *cond);\
NIns* asm_fbranch(bool, LIns*, NIns*);\ NIns* asm_fbranch(bool, LIns*, NIns*);\
@ -968,23 +968,23 @@ namespace nanojit
#define EMMS() do { count_fpu(); FPUc(0x0f77); asm_output("emms"); } while (0) #define EMMS() do { count_fpu(); FPUc(0x0f77); asm_output("emms"); } while (0)
// standard direct call // standard direct call
#define CALL(c) do { \ #define CALL(ci) do { \
count_call();\ count_call();\
underrunProtect(5); \ underrunProtect(5); \
int offset = (c->_address) - ((int)_nIns); \ int offset = (ci->_address) - ((int)_nIns); \
IMM32( (uint32_t)offset ); \ IMM32( (uint32_t)offset ); \
*(--_nIns) = 0xE8; \ *(--_nIns) = 0xE8; \
verbose_only(asm_output("call %s",(c->_name));) \ verbose_only(asm_output("call %s",(ci->_name));) \
debug_only(if ((c->_argtypes & ARGSIZE_MASK_ANY)==ARGSIZE_F) fpu_push();)\ debug_only(if (ci->returnType()==ARGTYPE_F) fpu_push();)\
} while (0) } while (0)
// indirect call thru register // indirect call thru register
#define CALLr(c,r) do { \ #define CALLr(ci,r) do { \
count_calli();\ count_calli();\
underrunProtect(2);\ underrunProtect(2);\
ALU(0xff, 2, (r));\ ALU(0xff, 2, (r));\
verbose_only(asm_output("call %s",gpn(r));) \ verbose_only(asm_output("call %s",gpn(r));) \
debug_only(if ((c->_argtypes & ARGSIZE_MASK_ANY)==ARGSIZE_F) fpu_push();)\ debug_only(if (ci->returnType()==ARGTYPE_F) fpu_push();)\
} while (0) } while (0)
} }