Bug 507089 - TM/nanojit: prepare to add get/set methods for CallInfo::_argtypes. r=edwsmith.

--HG--
extra : convert_revision : 55f02d7976752940a9f328d440fb6601ee2dc9f4
This commit is contained in:
Nicholas Nethercote 2010-03-21 19:47:02 -07:00
parent 8e0decc494
commit 070e390dac
16 changed files with 225 additions and 221 deletions

View File

@ -124,7 +124,7 @@ CL_64( LCALL_Q_Q2, 1) // 95% LIR_qcall
CL_64( LCALL_Q_Q7, 1) // 96% LIR_qcall
CL___( LCALL_F_F3, 1) // 97% LIR_fcall
CL___( LCALL_F_F8, 1) // 98% LIR_fcall
CL_64( LCALL_N_IQF, 1) // 99% LIR_icall or LIR_qcall
CL_64( LCALL_V_IQF, 1) // 99% LIR_icall or LIR_qcall
CL___( LLABEL, 1) //100% LIR_label

View File

@ -154,12 +154,15 @@ enum ReturnType {
#define FN(name, args) \
{#name, CI(name, args)}
const int I32 = nanojit::ARGSIZE_LO;
const ArgType I32 = nanojit::ARGTYPE_LO;
#ifdef NANOJIT_64BIT
const int I64 = nanojit::ARGSIZE_Q;
const ArgType I64 = nanojit::ARGTYPE_Q;
#endif
const int F64 = nanojit::ARGSIZE_F;
const int PTR = nanojit::ARGSIZE_P;
const ArgType F64 = nanojit::ARGTYPE_F;
const ArgType PTR = nanojit::ARGTYPE_P;
const ArgType WRD = nanojit::ARGTYPE_P;
const ArgType VOID = nanojit::ARGTYPE_V;
enum LirTokenType {
NAME, NUMBER, PUNCT, NEWLINE
@ -342,8 +345,8 @@ private:
void endFragment();
};
// Meaning: arg 'm' of 'n' has size 'sz'.
static int argMask(int sz, int m, int n)
// Meaning: arg 'm' of 'n' has type 'ty'.
static int argMask(int ty, int m, int n)
{
// Order examples, from MSB to LSB:
// - 3 args: 000 | 000 | 000 | 000 | 000 | arg1| arg2| arg3| ret
@ -351,13 +354,13 @@ static int argMask(int sz, int m, int n)
// If the mask encoding reversed the arg order the 'n' parameter wouldn't
// be necessary, as argN would always be in the same place in the
// bitfield.
return sz << ((1 + n - m) * ARGSIZE_SHIFT);
return ty << ((1 + n - m) * ARGTYPE_SHIFT);
}
// Return value has size 'sz'.
static int retMask(int sz)
// Return value has type 'ty'.
static int retMask(int ty)
{
return sz;
return ty;
}
// 'sin' is overloaded on some platforms, so taking its address
@ -371,8 +374,8 @@ double sinFn(double d) {
Function functions[] = {
FN(puts, argMask(PTR, 1, 1) | retMask(I32)),
FN(sin, argMask(F64, 1, 1) | retMask(F64)),
FN(malloc, argMask(PTR, 1, 1) | retMask(PTR)),
FN(free, argMask(PTR, 1, 1) | retMask(I32))
FN(malloc, argMask(WRD, 1, 1) | retMask(PTR)),
FN(free, argMask(PTR, 1, 1) | retMask(VOID))
};
template<typename out, typename in> out
@ -694,28 +697,28 @@ FragmentAssembler::assemble_call(const string &op)
ci->_abi = _abi;
ci->_argtypes = 0;
ci->_typesig = 0;
size_t argc = mTokens.size();
for (size_t i = 0; i < argc; ++i) {
args[i] = ref(mTokens[mTokens.size() - (i+1)]);
if (args[i]->isF64()) ty = ARGSIZE_F;
if (args[i]->isF64()) ty = ARGTYPE_F;
#ifdef NANOJIT_64BIT
else if (args[i]->isI64()) ty = ARGSIZE_Q;
else if (args[i]->isI64()) ty = ARGTYPE_Q;
#endif
else ty = ARGSIZE_I;
else ty = ARGTYPE_I;
// Nb: i+1 because argMask() uses 1-based arg counting.
ci->_argtypes |= argMask(ty, i+1, argc);
ci->_typesig |= argMask(ty, i+1, argc);
}
// Select return type from opcode.
ty = 0;
if (mOpcode == LIR_icall) ty = ARGSIZE_LO;
else if (mOpcode == LIR_fcall) ty = ARGSIZE_F;
if (mOpcode == LIR_icall) ty = ARGTYPE_LO;
else if (mOpcode == LIR_fcall) ty = ARGTYPE_F;
#ifdef NANOJIT_64BIT
else if (mOpcode == LIR_qcall) ty = ARGSIZE_Q;
else if (mOpcode == LIR_qcall) ty = ARGTYPE_Q;
#endif
else nyi("callh");
ci->_argtypes |= retMask(ty);
ci->_typesig |= retMask(ty);
}
return mLir->insCall(ci, args);
@ -1239,7 +1242,7 @@ static double f_F_F8(double a, double b, double c, double d,
}
#ifdef NANOJIT_64BIT
static void f_N_IQF(int32_t, uint64_t, double)
static void f_V_IQF(int32_t, uint64_t, double)
{
return; // no need to do anything
}
@ -1287,10 +1290,10 @@ const CallInfo ci_F_F8 = CI(f_F_F8, argMask(F64, 1, 8) |
retMask(F64));
#ifdef NANOJIT_64BIT
const CallInfo ci_N_IQF = CI(f_N_IQF, argMask(I32, 1, 3) |
const CallInfo ci_V_IQF = CI(f_V_IQF, argMask(I32, 1, 3) |
argMask(I64, 2, 3) |
argMask(F64, 3, 3) |
retMask(ARGSIZE_NONE));
retMask(ARGTYPE_V));
#endif
// Generate a random block containing nIns instructions, plus a few more
@ -1920,11 +1923,11 @@ FragmentAssembler::assembleRandomFragment(int nIns)
break;
#ifdef NANOJIT_64BIT
case LCALL_N_IQF:
case LCALL_V_IQF:
if (!Is.empty() && !Qs.empty() && !Fs.empty()) {
// Nb: args[] holds the args in reverse order... sigh.
LIns* args[3] = { rndPick(Fs), rndPick(Qs), rndPick(Is) };
ins = mLir->insCall(&ci_N_IQF, args);
ins = mLir->insCall(&ci_V_IQF, args);
n++;
}
break;

View File

@ -2385,35 +2385,6 @@ namespace nanojit
}
#endif // NJ_VERBOSE
uint32_t CallInfo::_count_args(uint32_t mask) const
{
uint32_t argc = 0;
uint32_t argt = _argtypes;
for (uint32_t i = 0; i < MAXARGS; ++i) {
argt >>= ARGSIZE_SHIFT;
if (!argt)
break;
argc += (argt & mask) != 0;
}
return argc;
}
uint32_t CallInfo::get_sizes(ArgSize* sizes) const
{
uint32_t argt = _argtypes;
uint32_t argc = 0;
for (uint32_t i = 0; i < MAXARGS; i++) {
argt >>= ARGSIZE_SHIFT;
ArgSize a = ArgSize(argt & ARGSIZE_MASK_ANY);
if (a != ARGSIZE_NONE) {
sizes[argc++] = a;
} else {
break;
}
}
return argc;
}
void LabelStateMap::add(LIns *label, NIns *addr, RegAlloc &regs) {
LabelState *st = new (alloc) LabelState(addr, regs);
labels.put(label, st);

View File

@ -81,6 +81,46 @@ namespace nanojit
#endif /* NANOJIT_VERBOSE */
uint32_t CallInfo::count_args() const
{
uint32_t argc = 0;
uint32_t argt = _typesig;
argt >>= ARGTYPE_SHIFT; // remove retType
while (argt) {
argc++;
argt >>= ARGTYPE_SHIFT;
}
return argc;
}
uint32_t CallInfo::count_int32_args() const
{
uint32_t argc = 0;
uint32_t argt = _typesig;
argt >>= ARGTYPE_SHIFT; // remove retType
while (argt) {
ArgType a = ArgType(argt & ARGTYPE_MASK);
if (a == ARGTYPE_I || a == ARGTYPE_U)
argc++;
argt >>= ARGTYPE_SHIFT;
}
return argc;
}
uint32_t CallInfo::getArgTypes(ArgType* argTypes) const
{
uint32_t argc = 0;
uint32_t argt = _typesig;
argt >>= ARGTYPE_SHIFT; // remove retType
while (argt) {
ArgType a = ArgType(argt & ARGTYPE_MASK);
argTypes[argc] = a;
argc++;
argt >>= ARGTYPE_SHIFT;
}
return argc;
}
// implementation
#ifdef NJ_VERBOSE
void ReverseLister::finish()
@ -2324,11 +2364,11 @@ namespace nanojit
static int32_t FASTCALL fle(double a, double b) { return a <= b; }
static int32_t FASTCALL fge(double a, double b) { return a >= b; }
#define SIG_F_I (ARGSIZE_F | ARGSIZE_I << ARGSIZE_SHIFT*1)
#define SIG_F_U (ARGSIZE_F | ARGSIZE_U << ARGSIZE_SHIFT*1)
#define SIG_F_F (ARGSIZE_F | ARGSIZE_F << ARGSIZE_SHIFT*1)
#define SIG_F_FF (ARGSIZE_F | ARGSIZE_F << ARGSIZE_SHIFT*1 | ARGSIZE_F << ARGSIZE_SHIFT*2)
#define SIG_B_FF (ARGSIZE_B | ARGSIZE_F << ARGSIZE_SHIFT*1 | ARGSIZE_F << ARGSIZE_SHIFT*2)
#define SIG_F_I (ARGTYPE_F | ARGTYPE_I << ARGTYPE_SHIFT*1)
#define SIG_F_U (ARGTYPE_F | ARGTYPE_U << ARGTYPE_SHIFT*1)
#define SIG_F_F (ARGTYPE_F | ARGTYPE_F << ARGTYPE_SHIFT*1)
#define SIG_F_FF (ARGTYPE_F | ARGTYPE_F << ARGTYPE_SHIFT*1 | ARGTYPE_F << ARGTYPE_SHIFT*2)
#define SIG_B_FF (ARGTYPE_B | ARGTYPE_F << ARGTYPE_SHIFT*1 | ARGTYPE_F << ARGTYPE_SHIFT*2)
#define SF_CALLINFO(name, typesig) \
static const CallInfo name##_ci = \
@ -2418,14 +2458,13 @@ namespace nanojit
}
LIns* SoftFloatFilter::insCall(const CallInfo *ci, LInsp args[]) {
uint32_t argt = ci->_argtypes;
for (uint32_t i = 0, argsizes = argt >> ARGSIZE_SHIFT; argsizes != 0; i++, argsizes >>= ARGSIZE_SHIFT)
uint32_t nArgs = ci->count_args();
for (uint32_t i = 0; i < nArgs; i++)
args[i] = split(args[i]);
if ((argt & ARGSIZE_MASK_ANY) == ARGSIZE_F) {
// this function returns a double as two 32bit values, so replace
// call with qjoin(qhi(call), call)
if (ci->returnType() == ARGTYPE_F) {
// This function returns a double as two 32bit values, so replace
// call with qjoin(qhi(call), call).
return split(ci, args);
}
return out->insCall(ci, args);
@ -2876,8 +2915,8 @@ namespace nanojit
LIns* ValidateWriter::insCall(const CallInfo *ci, LIns* args0[])
{
ArgSize sizes[MAXARGS];
uint32_t nArgs = ci->get_sizes(sizes);
ArgType argTypes[MAXARGS];
uint32_t nArgs = ci->getArgTypes(argTypes);
LTy formals[MAXARGS];
LIns* args[MAXARGS]; // in left-to-right order, unlike args0[]
@ -2890,20 +2929,20 @@ namespace nanojit
errorAccSetShould(lirNames[op], ci->_storeAccSet,
"not contain bits that aren't in ACC_STORE_ANY");
// This loop iterates over the args from right-to-left (because
// arg() and get_sizes() use right-to-left order), but puts the
// results into formals[] and args[] in left-to-right order so
// that arg numbers in error messages make sense to the user.
// This loop iterates over the args from right-to-left (because arg()
// and getArgTypes() use right-to-left order), but puts the results
// into formals[] and args[] in left-to-right order so that arg
// numbers in error messages make sense to the user.
for (uint32_t i = 0; i < nArgs; i++) {
uint32_t i2 = nArgs - i - 1; // converts right-to-left to left-to-right
switch (sizes[i]) {
case ARGSIZE_I:
case ARGSIZE_U: formals[i2] = LTy_I32; break;
switch (argTypes[i]) {
case ARGTYPE_I:
case ARGTYPE_U: formals[i2] = LTy_I32; break;
#ifdef NANOJIT_64BIT
case ARGSIZE_Q: formals[i2] = LTy_I64; break;
case ARGTYPE_Q: formals[i2] = LTy_I64; break;
#endif
case ARGSIZE_F: formals[i2] = LTy_F64; break;
default: NanoAssert(0); formals[i2] = LTy_Void; break;
case ARGTYPE_F: formals[i2] = LTy_F64; break;
default: NanoAssertMsgf(0, "%d %s\n", argTypes[i],ci->_name); formals[i2] = LTy_Void; break;
}
args[i2] = args0[i];
}

View File

@ -146,25 +146,26 @@ namespace nanojit
ABI_CDECL
};
enum ArgSize {
ARGSIZE_NONE = 0,
ARGSIZE_F = 1, // double (64bit)
ARGSIZE_I = 2, // int32_t
// All values must fit into three bits. See CallInfo for details.
enum ArgType {
ARGTYPE_V = 0, // void
ARGTYPE_F = 1, // double (64bit)
ARGTYPE_I = 2, // int32_t
ARGTYPE_U = 3, // uint32_t
#ifdef NANOJIT_64BIT
ARGSIZE_Q = 3, // uint64_t
ARGTYPE_Q = 4, // uint64_t
#endif
ARGSIZE_U = 6, // uint32_t
ARGSIZE_MASK_ANY = 7,
ARGSIZE_MASK_INT = 2,
ARGSIZE_SHIFT = 3,
// aliases
ARGSIZE_P = PTR_SIZE(ARGSIZE_I, ARGSIZE_Q), // pointer
ARGSIZE_LO = ARGSIZE_I, // int32_t
ARGSIZE_B = ARGSIZE_I, // bool
ARGSIZE_V = ARGSIZE_NONE // void
ARGTYPE_P = PTR_SIZE(ARGTYPE_I, ARGTYPE_Q), // pointer
ARGTYPE_LO = ARGTYPE_I, // int32_t
ARGTYPE_B = ARGTYPE_I // bool
};
// In _typesig, each entry is three bits.
static const int ARGTYPE_SHIFT = 3;
static const int ARGTYPE_MASK = 0x7;
enum IndirectCall {
CALL_INDIRECT = 0
};
@ -290,38 +291,28 @@ namespace nanojit
struct CallInfo
{
private:
public:
uintptr_t _address;
uint32_t _argtypes:27; // 9 3-bit fields indicating arg type, by ARGSIZE above (including ret type): a1 a2 a3 a4 a5 ret
uint32_t _typesig:27; // 9 3-bit fields indicating arg type, by ARGTYPE above (including ret type): a1 a2 a3 a4 a5 ret
AbiKind _abi:3;
uint8_t _isPure:1; // _isPure=1 means no side-effects, result only depends on args
AccSet _storeAccSet; // access regions stored by the function
verbose_only ( const char* _name; )
uint32_t _count_args(uint32_t mask) const;
uint32_t count_args() const;
uint32_t count_int32_args() const;
// Nb: uses right-to-left order, eg. sizes[0] is the size of the right-most arg.
uint32_t get_sizes(ArgSize* sizes) const;
uint32_t getArgTypes(ArgType* types) const;
inline ArgSize returnType() const {
return ArgSize(_argtypes & ARGSIZE_MASK_ANY);
}
// Note that this indexes arguments *backwards*, that is to
// get the Nth arg, you have to ask for index (numargs - N).
// See mozilla bug 525815 for fixing this.
inline ArgSize argType(uint32_t arg) const {
return ArgSize((_argtypes >> (ARGSIZE_SHIFT * (arg+1))) & ARGSIZE_MASK_ANY);
inline ArgType returnType() const {
return ArgType(_typesig & ARGTYPE_MASK);
}
inline bool isIndirect() const {
return _address < 256;
}
inline uint32_t count_args() const {
return _count_args(ARGSIZE_MASK_ANY);
}
inline uint32_t count_iargs() const {
return _count_args(ARGSIZE_MASK_INT);
}
// fargs = args - iargs
};
/*
@ -408,12 +399,12 @@ namespace nanojit
inline LOpcode getCallOpcode(const CallInfo* ci) {
LOpcode op = LIR_pcall;
switch (ci->returnType()) {
case ARGSIZE_NONE: op = LIR_pcall; break;
case ARGSIZE_I:
case ARGSIZE_U: op = LIR_icall; break;
case ARGSIZE_F: op = LIR_fcall; break;
case ARGTYPE_V: op = LIR_pcall; break;
case ARGTYPE_I:
case ARGTYPE_U: op = LIR_icall; break;
case ARGTYPE_F: op = LIR_fcall; break;
#ifdef NANOJIT_64BIT
case ARGSIZE_Q: op = LIR_qcall; break;
case ARGTYPE_Q: op = LIR_qcall; break;
#endif
default: NanoAssert(0); break;
}

View File

@ -597,19 +597,19 @@ Assembler::genEpilogue()
* alignment.
*/
void
Assembler::asm_arg(ArgSize sz, LInsp arg, Register& r, int& stkd)
Assembler::asm_arg(ArgType ty, LInsp arg, Register& r, int& stkd)
{
// The stack pointer must always be at least aligned to 4 bytes.
NanoAssert((stkd & 3) == 0);
if (sz == ARGSIZE_F) {
if (ty == ARGTYPE_F) {
// This task is fairly complex and so is delegated to asm_arg_64.
asm_arg_64(arg, r, stkd);
} else {
NanoAssert(sz == ARGSIZE_I || sz == ARGSIZE_U);
NanoAssert(ty == ARGTYPE_I || ty == ARGTYPE_U);
// pre-assign registers R0-R3 for arguments (if they fit)
if (r < R4) {
asm_regarg(sz, arg, r);
asm_regarg(ty, arg, r);
r = nextreg(r);
} else {
asm_stkarg(arg, stkd);
@ -620,7 +620,7 @@ Assembler::asm_arg(ArgSize sz, LInsp arg, Register& r, int& stkd)
// Encode a 64-bit floating-point argument using the appropriate ABI.
// This function operates in the same way as asm_arg, except that it will only
// handle arguments where (ArgSize)sz == ARGSIZE_F.
// handle arguments where (ArgType)ty == ARGTYPE_F.
void
Assembler::asm_arg_64(LInsp arg, Register& r, int& stkd)
{
@ -665,8 +665,8 @@ Assembler::asm_arg_64(LInsp arg, Register& r, int& stkd)
if (_config.arm_vfp) {
FMRRD(ra, rb, fp_reg);
} else {
asm_regarg(ARGSIZE_LO, arg->oprnd1(), ra);
asm_regarg(ARGSIZE_LO, arg->oprnd2(), rb);
asm_regarg(ARGTYPE_LO, arg->oprnd1(), ra);
asm_regarg(ARGTYPE_LO, arg->oprnd2(), rb);
}
#ifndef NJ_ARM_EABI
@ -699,7 +699,7 @@ Assembler::asm_arg_64(LInsp arg, Register& r, int& stkd)
// Without VFP, we can simply use asm_regarg and asm_stkarg to
// encode the two 32-bit words as we don't need to load from a VFP
// register.
asm_regarg(ARGSIZE_LO, arg->oprnd1(), ra);
asm_regarg(ARGTYPE_LO, arg->oprnd1(), ra);
asm_stkarg(arg->oprnd2(), 0);
stkd += 4;
}
@ -720,10 +720,10 @@ Assembler::asm_arg_64(LInsp arg, Register& r, int& stkd)
}
void
Assembler::asm_regarg(ArgSize sz, LInsp p, Register r)
Assembler::asm_regarg(ArgType ty, LInsp p, Register r)
{
NanoAssert(deprecated_isKnownReg(r));
if (sz & ARGSIZE_MASK_INT)
if (ty == ARGTYPE_I || ty == ARGTYPE_U)
{
// arg goes in specific register
if (p->isconst()) {
@ -752,7 +752,7 @@ Assembler::asm_regarg(ArgSize sz, LInsp p, Register r)
}
else
{
NanoAssert(sz == ARGSIZE_F);
NanoAssert(ty == ARGTYPE_F);
// fpu argument in register - should never happen since FPU
// args are converted to two 32-bit ints on ARM
NanoAssert(false);
@ -848,10 +848,10 @@ Assembler::asm_call(LInsp ins)
evictScratchRegsExcept(0);
const CallInfo* call = ins->callInfo();
ArgSize sizes[MAXARGS];
uint32_t argc = call->get_sizes(sizes);
bool indirect = call->isIndirect();
const CallInfo* ci = ins->callInfo();
ArgType argTypes[MAXARGS];
uint32_t argc = ci->getArgTypes(argTypes);
bool indirect = ci->isIndirect();
// If we aren't using VFP, assert that the LIR operation is an integer
// function call.
@ -863,11 +863,11 @@ Assembler::asm_call(LInsp ins)
// for floating point calls, but not for integer calls.
if (_config.arm_vfp && ins->isUsed()) {
// Determine the size (and type) of the instruction result.
ArgSize rsize = (ArgSize)(call->_argtypes & ARGSIZE_MASK_ANY);
ArgType rsize = (ArgType)(ci->_typesig & ARGTYPE_MASK_ANY);
// If the result size is a floating-point value, treat the result
// specially, as described previously.
if (rsize == ARGSIZE_F) {
if (ci->returnType() == ARGTYPE_F) {
Register rr = ins->deprecated_getReg();
NanoAssert(ins->opcode() == LIR_fcall);
@ -902,7 +902,7 @@ Assembler::asm_call(LInsp ins)
// interlock in the "long" branch sequence by manually loading the
// target address into LR ourselves before setting up the parameters
// in other registers.
BranchWithLink((NIns*)call->_address);
BranchWithLink((NIns*)ci->_address);
} else {
// Indirect call: we assign the address arg to LR since it's not
// used for regular arguments, and is otherwise scratch since it's
@ -917,7 +917,7 @@ Assembler::asm_call(LInsp ins)
} else {
BLX(LR);
}
asm_regarg(ARGSIZE_LO, ins->arg(--argc), LR);
asm_regarg(ARGTYPE_LO, ins->arg(--argc), LR);
}
// Encode the arguments, starting at R0 and with an empty argument stack.
@ -930,7 +930,7 @@ Assembler::asm_call(LInsp ins)
// in reverse order.
uint32_t i = argc;
while(i--) {
asm_arg(sizes[i], ins->arg(i), r, stkd);
asm_arg(argTypes[i], ins->arg(i), r, stkd);
}
if (stkd > max_out_args) {

View File

@ -220,14 +220,14 @@ verbose_only( extern const char* shiftNames[]; )
void nativePageReset(); \
void nativePageSetup(); \
void asm_immf_nochk(Register, int32_t, int32_t); \
void asm_regarg(ArgSize, LInsp, Register); \
void asm_regarg(ArgType, LInsp, Register); \
void asm_stkarg(LInsp p, int stkd); \
void asm_cmpi(Register, int32_t imm); \
void asm_ldr_chk(Register d, Register b, int32_t off, bool chk); \
void asm_cmp(LIns *cond); \
void asm_fcmp(LIns *cond); \
void asm_ld_imm(Register d, int32_t imm, bool chk = true); \
void asm_arg(ArgSize sz, LInsp arg, Register& r, int& stkd); \
void asm_arg(ArgType ty, LInsp arg, Register& r, int& stkd); \
void asm_arg_64(LInsp arg, Register& r, int& stkd); \
void asm_add_imm(Register rd, Register rn, int32_t imm, int stat = 0); \
void asm_sub_imm(Register rd, Register rn, int32_t imm, int stat = 0); \

View File

@ -389,10 +389,10 @@ namespace nanojit
}
}
void Assembler::asm_regarg(ArgSize sz, LInsp p, Register r)
void Assembler::asm_regarg(ArgType ty, LInsp p, Register r)
{
NanoAssert(deprecated_isKnownReg(r));
if (sz & ARGSIZE_MASK_INT) {
if (ty == ARGTYPE_I || ty == ARGTYPE_U) {
// arg goes in specific register
if (p->isconst())
asm_li(r, p->imm32());
@ -464,7 +464,7 @@ namespace nanojit
// Encode a 64-bit floating-point argument using the appropriate ABI.
// This function operates in the same way as asm_arg, except that it will only
// handle arguments where (ArgSize)sz == ARGSIZE_F.
// handle arguments where (ArgType)ty == ARGTYPE_F.
void
Assembler::asm_arg_64(LInsp arg, Register& r, Register& fr, int& stkd)
{
@ -1505,18 +1505,18 @@ namespace nanojit
* on the stack.
*/
void
Assembler::asm_arg(ArgSize sz, LInsp arg, Register& r, Register& fr, int& stkd)
Assembler::asm_arg(ArgType ty, LInsp arg, Register& r, Register& fr, int& stkd)
{
// The stack offset must always be at least aligned to 4 bytes.
NanoAssert((stkd & 3) == 0);
if (sz == ARGSIZE_F) {
if (ty == ARGTYPE_F) {
// This task is fairly complex and so is delegated to asm_arg_64.
asm_arg_64(arg, r, fr, stkd);
}
else if (sz & ARGSIZE_MASK_INT) {
} else {
NanoAssert(ty == ARGTYPE_I || ty == ARGTYPE_U);
if (stkd < 16) {
asm_regarg(sz, arg, r);
asm_regarg(ty, arg, r);
fr = nextreg(fr);
r = nextreg(r);
}
@ -1527,11 +1527,6 @@ namespace nanojit
fr = r;
stkd += 4;
}
else {
NanoAssert(sz == ARGSIZE_Q);
// shouldn't have 64 bit int params
NanoAssert(false);
}
}
void
@ -1560,10 +1555,10 @@ namespace nanojit
evictScratchRegsExcept(0);
const CallInfo* call = ins->callInfo();
ArgSize sizes[MAXARGS];
uint32_t argc = call->get_sizes(sizes);
bool indirect = call->isIndirect();
const CallInfo* ci = ins->callInfo();
ArgType argTypes[MAXARGS];
uint32_t argc = ci->getArgTypes(argTypes);
bool indirect = ci->isIndirect();
// FIXME: Put one of the argument moves into the BDS slot
@ -1574,11 +1569,11 @@ namespace nanojit
if (!indirect)
// FIXME: If we can tell that we are calling non-PIC
// (ie JIT) code, we could call direct instead of using t9
asm_li(T9, call->_address);
asm_li(T9, ci->_address);
else
// Indirect call: we assign the address arg to t9
// which matches the o32 ABI for calling functions
asm_regarg(ARGSIZE_P, ins->arg(--argc), T9);
asm_regarg(ARGTYPE_P, ins->arg(--argc), T9);
// Encode the arguments, starting at A0 and with an empty argument stack.
Register r = A0, fr = FA0;
@ -1589,7 +1584,7 @@ namespace nanojit
// Note that we loop through the arguments backwards as LIR specifies them
// in reverse order.
while(argc--)
asm_arg(sizes[argc], ins->arg(argc), r, fr, stkd);
asm_arg(argTypes[argc], ins->arg(argc), r, fr, stkd);
if (stkd > max_out_args)
max_out_args = stkd;

View File

@ -179,9 +179,9 @@ namespace nanojit
NIns *asm_branch_near(bool, LIns*, NIns*); \
void asm_cmp(LOpcode condop, LIns *a, LIns *b, Register cr); \
void asm_move(Register d, Register s); \
void asm_regarg(ArgSize sz, LInsp p, Register r); \
void asm_regarg(ArgType ty, LInsp p, Register r); \
void asm_stkarg(LInsp arg, int stkd); \
void asm_arg(ArgSize sz, LInsp arg, Register& r, Register& fr, int& stkd); \
void asm_arg(ArgType ty, LInsp arg, Register& r, Register& fr, int& stkd); \
void asm_arg_64(LInsp arg, Register& r, Register& fr, int& stkd) ;

View File

@ -683,8 +683,8 @@ namespace nanojit
evictScratchRegsExcept(0);
const CallInfo* call = ins->callInfo();
ArgSize sizes[MAXARGS];
uint32_t argc = call->get_sizes(sizes);
ArgType argTypes[MAXARGS];
uint32_t argc = call->getArgTypes(argTypes);
bool indirect;
if (!(indirect = call->isIndirect())) {
@ -699,7 +699,7 @@ namespace nanojit
underrunProtect(8); // underrunProtect might clobber CTR
BCTRL();
MTCTR(R11);
asm_regarg(ARGSIZE_P, ins->arg(--argc), R11);
asm_regarg(ARGTYPE_P, ins->arg(--argc), R11);
}
int param_size = 0;
@ -708,22 +708,22 @@ namespace nanojit
Register fr = F1;
for(uint32_t i = 0; i < argc; i++) {
uint32_t j = argc - i - 1;
ArgSize sz = sizes[j];
ArgType ty = argTypes[j];
LInsp arg = ins->arg(j);
if (sz & ARGSIZE_MASK_INT) {
if (ty == ARGTYPE_I || ty == ARGTYPE_U || ty == ARGTYPE_Q) {
// GP arg
if (r <= R10) {
asm_regarg(sz, arg, r);
asm_regarg(ty, arg, r);
r = nextreg(r);
param_size += sizeof(void*);
} else {
// put arg on stack
TODO(stack_int32);
}
} else if (sz == ARGSIZE_F) {
} else if (ty == ARGTYPE_F) {
// double
if (fr <= F13) {
asm_regarg(sz, arg, fr);
asm_regarg(ty, arg, fr);
fr = nextreg(fr);
#ifdef NANOJIT_64BIT
r = nextreg(r);
@ -736,23 +736,23 @@ namespace nanojit
TODO(stack_double);
}
} else {
TODO(ARGSIZE_UNK);
TODO(ARGTYPE_UNK);
}
}
if (param_size > max_param_size)
max_param_size = param_size;
}
void Assembler::asm_regarg(ArgSize sz, LInsp p, Register r)
void Assembler::asm_regarg(ArgType ty, LInsp p, Register r)
{
NanoAssert(r != deprecated_UnknownReg);
if (sz & ARGSIZE_MASK_INT)
if (ty == ARGTYPE_I || ty == ARGTYPE_U || ty == ARGTYPE_Q)
{
#ifdef NANOJIT_64BIT
if (sz == ARGSIZE_I) {
if (ty == ARGTYPE_I) {
// sign extend 32->64
EXTSW(r, r);
} else if (sz == ARGSIZE_U) {
} else if (ty == ARGTYPE_U) {
// zero extend 32->64
CLRLDI(r, r, 32);
}
@ -785,7 +785,7 @@ namespace nanojit
}
}
}
else if (sz == ARGSIZE_F) {
else if (ty == ARGTYPE_F) {
if (p->isUsed()) {
Register rp = p->deprecated_getReg();
if (!deprecated_isKnownReg(rp) || !IsFpReg(rp)) {
@ -805,7 +805,7 @@ namespace nanojit
}
}
else {
TODO(ARGSIZE_UNK);
TODO(ARGTYPE_UNK);
}
}

View File

@ -287,7 +287,7 @@ namespace nanojit
void nativePageSetup(); \
void br(NIns *addr, int link); \
void br_far(NIns *addr, int link); \
void asm_regarg(ArgSize, LIns*, Register); \
void asm_regarg(ArgType, LIns*, Register); \
void asm_li(Register r, int32_t imm); \
void asm_li32(Register r, int32_t imm); \
void asm_li64(Register r, uint64_t imm); \

View File

@ -161,21 +161,21 @@ namespace nanojit
evictScratchRegsExcept(0);
const CallInfo* call = ins->callInfo();
const CallInfo* ci = ins->callInfo();
underrunProtect(8);
NOP();
ArgSize sizes[MAXARGS];
uint32_t argc = call->get_sizes(sizes);
ArgType argTypes[MAXARGS];
uint32_t argc = ci->getArgTypes(argTypes);
NanoAssert(ins->isop(LIR_pcall) || ins->isop(LIR_fcall));
verbose_only(if (_logc->lcbits & LC_Assembly)
outputf(" %p:", _nIns);
)
bool indirect = call->isIndirect();
bool indirect = ci->isIndirect();
if (!indirect) {
CALL(call);
CALL(ci);
}
else {
argc--;
@ -189,8 +189,8 @@ namespace nanojit
for(int i=0; i<argc; i++)
{
uint32_t j = argc-i-1;
ArgSize sz = sizes[j];
if (sz == ARGSIZE_F) {
ArgType ty = argTypes[j];
if (ty == ARGTYPE_F) {
Register r = findRegFor(ins->arg(j), FpRegs);
GPRIndex += 2;
offset += 8;

View File

@ -893,8 +893,8 @@ namespace nanojit
evictScratchRegsExcept(rmask(rr));
const CallInfo *call = ins->callInfo();
ArgSize sizes[MAXARGS];
int argc = call->get_sizes(sizes);
ArgType argTypes[MAXARGS];
int argc = call->getArgTypes(argTypes);
if (!call->isIndirect()) {
verbose_only(if (_logc->lcbits & LC_Assembly)
@ -921,7 +921,7 @@ namespace nanojit
// Assign the call address to RAX. Must happen after freeResourcesOf()
// since RAX is usually the return value and will be allocated until that point.
asm_regarg(ARGSIZE_P, ins->arg(--argc), RAX);
asm_regarg(ARGTYPE_P, ins->arg(--argc), RAX);
}
#ifdef _WIN64
@ -933,28 +933,28 @@ namespace nanojit
int arg_index = 0;
for (int i = 0; i < argc; i++) {
int j = argc - i - 1;
ArgSize sz = sizes[j];
ArgType ty = argTypes[j];
LIns* arg = ins->arg(j);
if ((sz & ARGSIZE_MASK_INT) && arg_index < NumArgRegs) {
if ((ty == ARGTYPE_I || ty == ARGTYPE_U || ty == ARGTYPE_Q) && arg_index < NumArgRegs) {
// gp arg
asm_regarg(sz, arg, argRegs[arg_index]);
asm_regarg(ty, arg, argRegs[arg_index]);
arg_index++;
}
#ifdef _WIN64
else if (sz == ARGSIZE_F && arg_index < NumArgRegs) {
else if (ty == ARGTYPE_F && arg_index < NumArgRegs) {
// double goes in XMM reg # based on overall arg_index
asm_regarg(sz, arg, Register(XMM0+arg_index));
asm_regarg(ty, arg, Register(XMM0+arg_index));
arg_index++;
}
#else
else if (sz == ARGSIZE_F && fr < XMM8) {
else if (ty == ARGTYPE_F && fr < XMM8) {
// double goes in next available XMM register
asm_regarg(sz, arg, fr);
asm_regarg(ty, arg, fr);
fr = nextreg(fr);
}
#endif
else {
asm_stkarg(sz, arg, stk_used);
asm_stkarg(ty, arg, stk_used);
stk_used += sizeof(void*);
}
}
@ -963,8 +963,8 @@ namespace nanojit
max_stk_used = stk_used;
}
void Assembler::asm_regarg(ArgSize sz, LIns *p, Register r) {
if (sz == ARGSIZE_I) {
void Assembler::asm_regarg(ArgType ty, LIns *p, Register r) {
if (ty == ARGTYPE_I) {
NanoAssert(p->isI32());
if (p->isconst()) {
asm_immq(r, int64_t(p->imm32()), /*canClobberCCs*/true);
@ -972,7 +972,7 @@ namespace nanojit
}
// sign extend int32 to int64
MOVSXDR(r, r);
} else if (sz == ARGSIZE_U) {
} else if (ty == ARGTYPE_U) {
NanoAssert(p->isI32());
if (p->isconst()) {
asm_immq(r, uint64_t(uint32_t(p->imm32())), /*canClobberCCs*/true);
@ -980,6 +980,8 @@ namespace nanojit
}
// zero extend with 32bit mov, auto-zeros upper 32bits
MOVLR(r, r);
} else {
// Do nothing.
}
/* there is no point in folding an immediate here, because
* the argument register must be a scratch register and we're
@ -991,19 +993,22 @@ namespace nanojit
findSpecificRegFor(p, r);
}
void Assembler::asm_stkarg(ArgSize sz, LIns *p, int stk_off) {
void Assembler::asm_stkarg(ArgType ty, LIns *p, int stk_off) {
NanoAssert(isS8(stk_off));
if (sz & ARGSIZE_MASK_INT) {
if (ty == ARGTYPE_I || ty == ARGTYPE_U || ty == ARGTYPE_Q) {
Register r = findRegFor(p, GpRegs);
MOVQSPR(stk_off, r); // movq [rsp+d8], r
if (sz == ARGSIZE_I) {
if (ty == ARGTYPE_I) {
// extend int32 to int64
NanoAssert(p->isI32());
MOVSXDR(r, r);
} else if (sz == ARGSIZE_U) {
} else if (ty == ARGTYPE_U) {
// extend uint32 to uint64
NanoAssert(p->isI32());
MOVLR(r, r);
} else {
NanoAssert(ty == ARGTYPE_Q);
// Do nothing.
}
} else {
TODO(asm_stkarg_non_int);

View File

@ -395,8 +395,8 @@ namespace nanojit
void asm_immi(Register r, int32_t v, bool canClobberCCs);\
void asm_immq(Register r, uint64_t v, bool canClobberCCs);\
void asm_immf(Register r, uint64_t v, bool canClobberCCs);\
void asm_regarg(ArgSize, LIns*, Register);\
void asm_stkarg(ArgSize, LIns*, int);\
void asm_regarg(ArgType, LIns*, Register);\
void asm_stkarg(ArgType, LIns*, int);\
void asm_shift(LIns*);\
void asm_shift_imm(LIns*);\
void asm_arith_imm(LIns*);\

View File

@ -168,7 +168,7 @@ namespace nanojit
const CallInfo* call = ins->callInfo();
// must be signed, not unsigned
uint32_t iargs = call->count_iargs();
uint32_t iargs = call->count_int32_args();
int32_t fargs = call->count_args() - iargs;
bool indirect = call->isIndirect();
@ -237,13 +237,13 @@ namespace nanojit
// Pre-assign registers to the first N 4B args based on the calling convention.
uint32_t n = 0;
ArgSize sizes[MAXARGS];
uint32_t argc = call->get_sizes(sizes);
ArgType argTypes[MAXARGS];
uint32_t argc = call->getArgTypes(argTypes);
int32_t stkd = 0;
if (indirect) {
argc--;
asm_arg(ARGSIZE_P, ins->arg(argc), EAX, stkd);
asm_arg(ARGTYPE_P, ins->arg(argc), EAX, stkd);
if (!_config.i386_fixed_esp)
stkd = 0;
}
@ -251,12 +251,12 @@ namespace nanojit
for (uint32_t i = 0; i < argc; i++)
{
uint32_t j = argc-i-1;
ArgSize sz = sizes[j];
ArgType ty = argTypes[j];
Register r = UnspecifiedReg;
if (n < max_regs && sz != ARGSIZE_F) {
if (n < max_regs && ty != ARGTYPE_F) {
r = argRegs[n++]; // tell asm_arg what reg to use
}
asm_arg(sz, ins->arg(j), r, stkd);
asm_arg(ty, ins->arg(j), r, stkd);
if (!_config.i386_fixed_esp)
stkd = 0;
}
@ -1377,12 +1377,12 @@ namespace nanojit
}
}
void Assembler::asm_arg(ArgSize sz, LInsp ins, Register r, int32_t& stkd)
void Assembler::asm_arg(ArgType ty, LInsp ins, Register r, int32_t& stkd)
{
// If 'r' is known, then that's the register we have to put 'ins'
// into.
if (sz == ARGSIZE_I || sz == ARGSIZE_U) {
if (ty == ARGTYPE_I || ty == ARGTYPE_U) {
if (r != UnspecifiedReg) {
if (ins->isconst()) {
// Rematerialize the constant.
@ -1413,7 +1413,7 @@ namespace nanojit
}
} else {
NanoAssert(sz == ARGSIZE_F);
NanoAssert(ty == ARGTYPE_F);
asm_farg(ins, stkd);
}
}

View File

@ -184,7 +184,7 @@ namespace nanojit
void asm_immi(Register r, int32_t val, bool canClobberCCs);\
void asm_stkarg(LInsp p, int32_t& stkd);\
void asm_farg(LInsp, int32_t& stkd);\
void asm_arg(ArgSize sz, LInsp p, Register r, int32_t& stkd);\
void asm_arg(ArgType ty, LInsp p, Register r, int32_t& stkd);\
void asm_pusharg(LInsp);\
void asm_fcmp(LIns *cond);\
NIns* asm_fbranch(bool, LIns*, NIns*);\
@ -968,23 +968,23 @@ namespace nanojit
#define EMMS() do { count_fpu(); FPUc(0x0f77); asm_output("emms"); } while (0)
// standard direct call
#define CALL(c) do { \
#define CALL(ci) do { \
count_call();\
underrunProtect(5); \
int offset = (c->_address) - ((int)_nIns); \
int offset = (ci->_address) - ((int)_nIns); \
IMM32( (uint32_t)offset ); \
*(--_nIns) = 0xE8; \
verbose_only(asm_output("call %s",(c->_name));) \
debug_only(if ((c->_argtypes & ARGSIZE_MASK_ANY)==ARGSIZE_F) fpu_push();)\
verbose_only(asm_output("call %s",(ci->_name));) \
debug_only(if (ci->returnType()==ARGTYPE_F) fpu_push();)\
} while (0)
// indirect call thru register
#define CALLr(c,r) do { \
#define CALLr(ci,r) do { \
count_calli();\
underrunProtect(2);\
ALU(0xff, 2, (r));\
verbose_only(asm_output("call %s",gpn(r));) \
debug_only(if ((c->_argtypes & ARGSIZE_MASK_ANY)==ARGSIZE_F) fpu_push();)\
debug_only(if (ci->returnType()==ARGTYPE_F) fpu_push();)\
} while (0)
}