Bug 572798 - add LIR_callv (r=nnethercote)

Adds LIR_callv for calls to helper functions that return void.

Added a ValidateWriter check that LIR_callv to be paired with ARGTYPE_V,
plus checks for the other obvious pairings, plus a check that callv must
not call a _pure=1 function.

getCallOpcode() returns LIR_callv for ARGTYPE_V, as expected.  This means that
some calls will return LTy_V from LIns::retType(), as expected, but unlike
before.  This in turn can cause a ValidateWriter error if an instruction uses
the result of a void call.  (after all, that's the point).

Each backend was modified to not assign a register or save the result of a void
call.

--HG--
extra : convert_revision : f1076b3fa633922ce95c24ac622934be4815376d
This commit is contained in:
Edwin Smith 2010-09-23 15:08:15 -04:00
parent 64beede2e2
commit 52ea68fee4
14 changed files with 124 additions and 63 deletions

View File

@ -394,18 +394,24 @@ double callid3(int i, int j, double x, int k, double y, double z) {
return (x + y + z) / (double)(i + j + k);
}
// Simple print function for testing void calls.
void printi(int x) {
cout << x << endl;
}
Function functions[] = {
FN(puts, CallInfo::typeSig1(ARGTYPE_I, ARGTYPE_P)),
FN(sin, CallInfo::typeSig1(ARGTYPE_D, ARGTYPE_D)),
FN(malloc, CallInfo::typeSig1(ARGTYPE_P, ARGTYPE_P)),
FN(free, CallInfo::typeSig1(ARGTYPE_V, ARGTYPE_P)),
FN(calld1, CallInfo::typeSig8(ARGTYPE_D, ARGTYPE_D, ARGTYPE_D, ARGTYPE_D,
ARGTYPE_D, ARGTYPE_D, ARGTYPE_D, ARGTYPE_D, ARGTYPE_D)),
FN(puts, CallInfo::typeSig1(ARGTYPE_I, ARGTYPE_P)),
FN(sin, CallInfo::typeSig1(ARGTYPE_D, ARGTYPE_D)),
FN(malloc, CallInfo::typeSig1(ARGTYPE_P, ARGTYPE_P)),
FN(free, CallInfo::typeSig1(ARGTYPE_V, ARGTYPE_P)),
FN(calld1, CallInfo::typeSig8(ARGTYPE_D, ARGTYPE_D, ARGTYPE_D, ARGTYPE_D,
ARGTYPE_D, ARGTYPE_D, ARGTYPE_D, ARGTYPE_D, ARGTYPE_D)),
FN(callid1, CallInfo::typeSig6(ARGTYPE_D, ARGTYPE_I, ARGTYPE_D, ARGTYPE_D,
ARGTYPE_I, ARGTYPE_I, ARGTYPE_D)),
FN(callid2, CallInfo::typeSig4(ARGTYPE_D, ARGTYPE_I, ARGTYPE_I, ARGTYPE_I, ARGTYPE_D)),
FN(callid3, CallInfo::typeSig6(ARGTYPE_D, ARGTYPE_I, ARGTYPE_I, ARGTYPE_D,
ARGTYPE_I, ARGTYPE_D, ARGTYPE_D)),
FN(printi, CallInfo::typeSig1(ARGTYPE_V, ARGTYPE_I)),
};
template<typename out, typename in> out
@ -739,12 +745,13 @@ FragmentAssembler::assemble_call(const string &op)
}
// Select return type from opcode.
ArgType retType = ARGTYPE_V;
if (mOpcode == LIR_calli) retType = ARGTYPE_I;
else if (mOpcode == LIR_calld) retType = ARGTYPE_D;
ArgType retType = ARGTYPE_P;
if (mOpcode == LIR_callv) retType = ARGTYPE_V;
else if (mOpcode == LIR_calli) retType = ARGTYPE_I;
#ifdef NANOJIT_64BIT
else if (mOpcode == LIR_callq) retType = ARGTYPE_Q;
#endif
else if (mOpcode == LIR_calld) retType = ARGTYPE_D;
else nyi("callh");
ci->_typesig = CallInfo::typeSigN(retType, argc, argTypes);
}
@ -1191,10 +1198,11 @@ FragmentAssembler::assembleFragment(LirTokenStream &in, bool implicitBegin, cons
ins = assemble_jump_jov();
break;
case LIR_callv:
case LIR_calli:
CASESF(LIR_hcalli:)
case LIR_calld:
CASE64(LIR_callq:)
case LIR_calld:
ins = assemble_call(op);
break;

View File

@ -0,0 +1,5 @@
; test call to void function
forty_two = immi 42
callv printi cdecl forty_two
reti forty_two

View File

@ -0,0 +1,2 @@
42
Output is: 42

View File

@ -1956,6 +1956,7 @@ namespace nanojit
}
break;
case LIR_callv:
case LIR_calli:
CASE64(LIR_callq:)
case LIR_calld:

View File

@ -1483,9 +1483,10 @@ namespace nanojit
live.add(ins->oprnd3(), 0);
break;
case LIR_callv:
case LIR_calli:
case LIR_calld:
CASE64(LIR_callq:)
case LIR_calld:
for (int i = 0, argc = ins->argc(); i < argc; i++)
live.add(ins->arg(i), 0);
break;
@ -1739,9 +1740,10 @@ namespace nanojit
VMPI_snprintf(s, n, "%s", lirNames[op]);
break;
case LIR_callv:
case LIR_calli:
case LIR_calld:
CASE64(LIR_callq:) {
CASE64(LIR_callq:)
case LIR_calld: {
const CallInfo* call = i->callInfo();
int32_t argc = i->argc();
int32_t m = int32_t(n); // Windows doesn't have 'ssize_t'
@ -3394,6 +3396,14 @@ namespace nanojit
return out->insImmD(d);
}
static const char* argtypeNames[] = {
"void", // ARGTYPE_V = 0
"int32_t", // ARGTYPE_I = 1
"uint32_t", // ARGTYPE_UI = 2
"uint64_t", // ARGTYPE_Q = 3
"double" // ARGTYPE_D = 4
};
LIns* ValidateWriter::insCall(const CallInfo *ci, LIns* args0[])
{
ArgType argTypes[MAXARGS];
@ -3402,6 +3412,27 @@ namespace nanojit
LIns* args[MAXARGS]; // in left-to-right order, unlike args0[]
LOpcode op = getCallOpcode(ci);
ArgType retType = ci->returnType();
if ((op == LIR_callv) != (retType == ARGTYPE_V) ||
(op == LIR_calli) != (retType == ARGTYPE_UI ||
retType == ARGTYPE_I) ||
#ifdef NANOJIT_64BIT
(op == LIR_callq) != (retType == ARGTYPE_Q) ||
#endif
(op == LIR_calld) != (retType == ARGTYPE_D)) {
NanoAssertMsgf(0,
"LIR structure error (%s): return type mismatch: opcode %s with %s return type",
whereInPipeline, lirNames[op], argtypeNames[retType]);
}
if (op == LIR_callv && ci->_isPure) {
// Since nobody can use the result of a void call, any pure call
// would just be dead. This is probably a mistake.
NanoAssertMsgf(0,
"LIR structure error (%s): LIR_callv must only be used with nonpure functions.",
whereInPipeline);
}
if (ci->_isPure && ci->_storeAccSet != ACCSET_NONE)
errorAccSet(ci->_name, ci->_storeAccSet, "it should be ACCSET_NONE for pure functions");

View File

@ -509,13 +509,13 @@ namespace nanojit
inline LOpcode getCallOpcode(const CallInfo* ci) {
LOpcode op = LIR_callp;
switch (ci->returnType()) {
case ARGTYPE_V: op = LIR_callp; break;
case ARGTYPE_V: op = LIR_callv; break;
case ARGTYPE_I:
case ARGTYPE_UI: op = LIR_calli; break;
case ARGTYPE_D: op = LIR_calld; break;
#ifdef NANOJIT_64BIT
case ARGTYPE_Q: op = LIR_callq; break;
#endif
case ARGTYPE_D: op = LIR_calld; break;
default: NanoAssert(0); break;
}
return op;
@ -927,7 +927,8 @@ namespace nanojit
return isCmpOpcode(opcode());
}
bool isCall() const {
return isop(LIR_calli) ||
return isop(LIR_callv) ||
isop(LIR_calli) ||
#if defined NANOJIT_64BIT
isop(LIR_callq) ||
#endif

View File

@ -161,11 +161,10 @@ OP_UN(32)
//---------------------------------------------------------------------------
// Calls
//---------------------------------------------------------------------------
OP___(calli, 33, C, I, -1) // call subroutine that returns an int
OP_64(callq, 34, C, Q, -1) // call subroutine that returns a quad
OP___(calld, 35, C, D, -1) // call subroutine that returns a double
OP_UN(36)
OP___(callv, 33, C, V, -1) // call subroutine that returns void
OP___(calli, 34, C, I, -1) // call subroutine that returns an int
OP_64(callq, 35, C, Q, -1) // call subroutine that returns a quad
OP___(calld, 36, C, D, -1) // call subroutine that returns a double
//---------------------------------------------------------------------------
// Branches and labels

View File

@ -818,7 +818,7 @@ Assembler::asm_call(LIns* ins)
* used here with the ultimate VFP register, and not R0/R1, which
* potentially allows for R0/R1 to get corrupted as described.
*/
} else {
} else if (!ins->isop(LIR_callv)) {
prepareResultReg(ins, rmask(retRegs[0]));
// Immediately free the resources as we need to re-use the register for
// the arguments.
@ -837,7 +837,7 @@ Assembler::asm_call(LIns* ins)
// If we aren't using VFP, assert that the LIR operation is an integer
// function call.
NanoAssert(ARM_VFP || ins->isop(LIR_calli));
NanoAssert(ARM_VFP || ins->isop(LIR_callv) || ins->isop(LIR_calli));
// If we're using VFP, and the return type is a double, it'll come back in
// R0/R1. We need to either place it in the result fp reg, or store it.

View File

@ -1712,27 +1712,28 @@ namespace nanojit
void
Assembler::asm_call(LIns* ins)
{
Register rr;
LOpcode op = ins->opcode();
if (!ins->isop(LIR_callv)) {
Register rr;
LOpcode op = ins->opcode();
switch (op) {
case LIR_calld:
NanoAssert(cpu_has_fpu);
rr = FV0;
break;
case LIR_calli:
rr = retRegs[0];
break;
default:
BADOPCODE(op);
return;
switch (op) {
case LIR_calli:
rr = retRegs[0];
break;
case LIR_calld:
NanoAssert(cpu_has_fpu);
rr = FV0;
break;
default:
BADOPCODE(op);
return;
}
deprecated_prepResultReg(ins, rmask(rr));
}
deprecated_prepResultReg(ins, rmask(rr));
// Do this after we've handled the call result, so we don't
// force the call result to be spilled unnecessarily.
evictScratchRegsExcept(0);
const CallInfo* ci = ins->callInfo();

View File

@ -695,12 +695,13 @@ namespace nanojit
}
void Assembler::asm_call(LIns *ins) {
Register retReg = ( ins->isop(LIR_calld) ? F1 : retRegs[0] );
deprecated_prepResultReg(ins, rmask(retReg));
if (!ins->isop(LIR_callv)) {
Register retReg = ( ins->isop(LIR_calld) ? F1 : retRegs[0] );
deprecated_prepResultReg(ins, rmask(retReg));
}
// Do this after we've handled the call result, so we don't
// force the call result to be spilled unnecessarily.
evictScratchRegsExcept(0);
const CallInfo* call = ins->callInfo();

View File

@ -39,6 +39,8 @@
#include "nanojit.h"
#if defined FEATURE_NANOJIT && defined NANOJIT_SH4
namespace nanojit
{
const int Assembler::NumArgRegs = 4;
@ -2034,14 +2036,16 @@ namespace nanojit
}
void Assembler::asm_call(LIns *inst) {
Register result_reg = inst->isop(LIR_calld) ? retDregs[0] : retRegs[0];
prepareResultReg(inst, rmask(result_reg));
// Do this after we've handled the call result, so we don't
// force the call result to be spilled unnecessarily.
evictScratchRegsExcept(rmask(result_reg));
if (!inst->isop(LIR_callv)) {
Register result_reg = inst->isop(LIR_calld) ? retDregs[0] : retRegs[0];
prepareResultReg(inst, rmask(result_reg));
// Do this after we've handled the call result, so we don't
// force the call result to be spilled unnecessarily.
evictScratchRegsExcept(rmask(result_reg));
} else {
evictScratchRegsExcept(0);
}
ArgType types[MAXARGS];
const CallInfo* call = inst->callInfo();
uint32_t argc = call->getArgTypes(types);
@ -3231,3 +3235,4 @@ namespace nanojit
}
}
}
#endif // FEATURE_NANOJIT && FEATURE_SH4

View File

@ -153,12 +153,13 @@ namespace nanojit
void Assembler::asm_call(LIns* ins)
{
Register retReg = ( ins->isop(LIR_calld) ? F0 : retRegs[0] );
deprecated_prepResultReg(ins, rmask(retReg));
if (!ins->isop(LIR_callv)) {
Register retReg = ( ins->isop(LIR_calld) ? F0 : retRegs[0] );
deprecated_prepResultReg(ins, rmask(retReg));
}
// Do this after we've handled the call result, so we don't
// force the call result to be spilled unnecessarily.
evictScratchRegsExcept(0);
const CallInfo* ci = ins->callInfo();
@ -169,7 +170,8 @@ namespace nanojit
ArgType argTypes[MAXARGS];
uint32_t argc = ci->getArgTypes(argTypes);
NanoAssert(ins->isop(LIR_callp) || ins->isop(LIR_calld));
NanoAssert(ins->isop(LIR_callv) || ins->isop(LIR_callp) ||
ins->isop(LIR_calld));
verbose_only(if (_logc->lcbits & LC_Native)
outputf(" %p:", _nIns);
)

View File

@ -903,10 +903,13 @@ namespace nanojit
}
void Assembler::asm_call(LIns *ins) {
Register rr = ( ins->isop(LIR_calld) ? XMM0 : retRegs[0] );
prepareResultReg(ins, rmask(rr));
evictScratchRegsExcept(rmask(rr));
if (!ins->isop(LIR_callv)) {
Register rr = ( ins->isop(LIR_calld) ? XMM0 : retRegs[0] );
prepareResultReg(ins, rmask(rr));
evictScratchRegsExcept(rmask(rr));
} else {
evictScratchRegsExcept(0);
}
const CallInfo *call = ins->callInfo();
ArgType argTypes[MAXARGS];

View File

@ -966,11 +966,13 @@ namespace nanojit
void Assembler::asm_call(LIns* ins)
{
Register rr = ( ins->isop(LIR_calld) ? FST0 : retRegs[0] );
prepareResultReg(ins, rmask(rr));
evictScratchRegsExcept(rmask(rr));
if (!ins->isop(LIR_callv)) {
Register rr = ( ins->isop(LIR_calld) ? FST0 : retRegs[0] );
prepareResultReg(ins, rmask(rr));
evictScratchRegsExcept(rmask(rr));
} else {
evictScratchRegsExcept(0);
}
const CallInfo* call = ins->callInfo();
// must be signed, not unsigned
uint32_t iargs = call->count_int32_args();
@ -1021,7 +1023,7 @@ namespace nanojit
}
}
NanoAssert(ins->isop(LIR_callp) || ins->isop(LIR_calld));
NanoAssert(ins->isop(LIR_callv) || ins->isop(LIR_callp) || ins->isop(LIR_calld));
if (!indirect) {
CALL(call);
}