2009-07-10 12:58:34 -07:00
|
|
|
/* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */
|
|
|
|
/* vi: set ts=4 sw=4 expandtab: (add to ~/.vimrc: set modeline modelines=5) */
|
2008-06-19 10:47:58 -07:00
|
|
|
/* ***** BEGIN LICENSE BLOCK *****
|
|
|
|
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
|
|
|
|
*
|
|
|
|
* The contents of this file are subject to the Mozilla Public License Version
|
|
|
|
* 1.1 (the "License"); you may not use this file except in compliance with
|
|
|
|
* the License. You may obtain a copy of the License at
|
|
|
|
* http://www.mozilla.org/MPL/
|
|
|
|
*
|
|
|
|
* Software distributed under the License is distributed on an "AS IS" basis,
|
|
|
|
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
|
|
|
* for the specific language governing rights and limitations under the
|
|
|
|
* License.
|
|
|
|
*
|
|
|
|
* The Original Code is [Open Source Virtual Machine].
|
|
|
|
*
|
|
|
|
* The Initial Developer of the Original Code is
|
|
|
|
* Adobe System Incorporated.
|
|
|
|
* Portions created by the Initial Developer are Copyright (C) 2004-2007
|
|
|
|
* the Initial Developer. All Rights Reserved.
|
|
|
|
*
|
|
|
|
* Contributor(s):
|
|
|
|
* Adobe AS3 Team
|
|
|
|
*
|
|
|
|
* Alternatively, the contents of this file may be used under the terms of
|
|
|
|
* either the GNU General Public License Version 2 or later (the "GPL"), or
|
|
|
|
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
|
|
|
|
* in which case the provisions of the GPL or the LGPL are applicable instead
|
|
|
|
* of those above. If you wish to allow use of your version of this file only
|
|
|
|
* under the terms of either the GPL or the LGPL, and not to allow others to
|
|
|
|
* use your version of this file under the terms of the MPL, indicate your
|
|
|
|
* decision by deleting the provisions above and replace them with the notice
|
|
|
|
* and other provisions required by the GPL or the LGPL. If you do not delete
|
|
|
|
* the provisions above, a recipient may use your version of this file under
|
|
|
|
* the terms of any one of the MPL, the GPL or the LGPL.
|
|
|
|
*
|
|
|
|
* ***** END LICENSE BLOCK ***** */
|
|
|
|
|
|
|
|
#include "nanojit.h"
|
|
|
|
|
2008-10-13 13:29:18 -07:00
|
|
|
#ifdef FEATURE_NANOJIT
|
|
|
|
|
2008-06-19 10:47:58 -07:00
|
|
|
namespace nanojit
|
|
|
|
{
|
|
|
|
#ifdef NJ_VERBOSE
|
2009-07-10 12:58:34 -07:00
|
|
|
/* A listing filter for LIR, going through backwards. It merely
|
2009-06-24 20:32:00 -07:00
|
|
|
passes its input to its output, but notes it down too. When
|
|
|
|
destructed, prints out what went through. Is intended to be
|
|
|
|
used to print arbitrary intermediate transformation stages of
|
|
|
|
LIR. */
|
2009-07-10 12:58:34 -07:00
|
|
|
class ReverseLister : public LirFilter
|
|
|
|
{
|
2009-08-04 06:54:47 -07:00
|
|
|
Allocator& _alloc;
|
2009-06-24 20:32:00 -07:00
|
|
|
LirNameMap* _names;
|
2009-07-10 12:58:34 -07:00
|
|
|
const char* _title;
|
2009-08-04 06:54:47 -07:00
|
|
|
StringList _strs;
|
2009-07-10 12:58:34 -07:00
|
|
|
LogControl* _logc;
|
|
|
|
public:
|
2009-08-04 06:54:47 -07:00
|
|
|
ReverseLister(LirFilter* in, Allocator& alloc,
|
2009-07-10 12:58:34 -07:00
|
|
|
LirNameMap* names, LogControl* logc, const char* title)
|
|
|
|
: LirFilter(in)
|
2009-08-04 06:54:47 -07:00
|
|
|
, _alloc(alloc)
|
|
|
|
, _names(names)
|
|
|
|
, _title(title)
|
|
|
|
, _strs(alloc)
|
|
|
|
, _logc(logc)
|
2009-08-04 08:33:14 -07:00
|
|
|
{ }
|
2009-08-04 06:54:47 -07:00
|
|
|
|
|
|
|
void finish()
|
2009-07-10 12:58:34 -07:00
|
|
|
{
|
|
|
|
_logc->printf("\n");
|
|
|
|
_logc->printf("=== BEGIN %s ===\n", _title);
|
2009-08-04 06:54:47 -07:00
|
|
|
int j = 0;
|
|
|
|
for (Seq<char*>* p = _strs.get(); p != NULL; p = p->tail)
|
|
|
|
_logc->printf(" %02d: %s\n", j++, p->head);
|
2009-07-10 12:58:34 -07:00
|
|
|
_logc->printf("=== END %s ===\n", _title);
|
|
|
|
_logc->printf("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
LInsp read()
|
|
|
|
{
|
|
|
|
LInsp i = in->read();
|
|
|
|
const char* str = _names->formatIns(i);
|
2009-08-04 06:54:47 -07:00
|
|
|
char* cpy = new (_alloc) char[strlen(str)+1];
|
2009-07-10 12:58:34 -07:00
|
|
|
strcpy(cpy, str);
|
2009-08-04 06:54:47 -07:00
|
|
|
_strs.insert(cpy);
|
2009-07-10 12:58:34 -07:00
|
|
|
return i;
|
|
|
|
}
|
|
|
|
};
|
2008-06-19 10:47:58 -07:00
|
|
|
#endif
|
2009-07-10 12:58:34 -07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Need the following:
|
|
|
|
*
|
|
|
|
* - merging paths ( build a graph? ), possibly use external rep to drive codegen
|
|
|
|
*/
|
2009-08-04 06:54:47 -07:00
|
|
|
Assembler::Assembler(CodeAlloc& codeAlloc, Allocator& alloc, AvmCore *core, LogControl* logc)
|
2009-08-28 14:38:09 -07:00
|
|
|
: codeList(0)
|
2009-08-04 06:54:47 -07:00
|
|
|
, alloc(alloc)
|
2009-07-15 16:50:01 -07:00
|
|
|
, _codeAlloc(codeAlloc)
|
2009-09-01 15:27:34 -07:00
|
|
|
, _branchStateMap(alloc)
|
|
|
|
, _patches(alloc)
|
|
|
|
, _labels(alloc)
|
2009-07-15 16:50:01 -07:00
|
|
|
, config(core->config)
|
2009-07-10 12:58:34 -07:00
|
|
|
{
|
|
|
|
nInit(core);
|
|
|
|
verbose_only( _logc = logc; )
|
|
|
|
verbose_only( _outputCache = 0; )
|
|
|
|
verbose_only( outlineEOL[0] = '\0'; )
|
2009-08-31 16:35:50 -07:00
|
|
|
verbose_only( outputAddr = false; )
|
2009-07-10 12:58:34 -07:00
|
|
|
|
2009-07-15 16:50:01 -07:00
|
|
|
reset();
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
2008-06-19 10:47:58 -07:00
|
|
|
|
|
|
|
void Assembler::arReset()
|
2009-07-10 12:58:34 -07:00
|
|
|
{
|
|
|
|
_activation.highwatermark = 0;
|
|
|
|
_activation.lowwatermark = 0;
|
|
|
|
_activation.tos = 0;
|
|
|
|
|
|
|
|
for(uint32_t i=0; i<NJ_MAX_STACK_ENTRY; i++)
|
|
|
|
_activation.entry[i] = 0;
|
2009-09-01 15:27:34 -07:00
|
|
|
|
|
|
|
_branchStateMap.clear();
|
|
|
|
_patches.clear();
|
|
|
|
_labels.clear();
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
|
|
|
|
2009-07-29 16:21:40 -07:00
|
|
|
void Assembler::registerResetAll()
|
2009-07-10 12:58:34 -07:00
|
|
|
{
|
|
|
|
nRegisterResetAll(_allocator);
|
|
|
|
|
|
|
|
// keep a tally of the registers to check that our allocator works correctly
|
|
|
|
debug_only(_allocator.count = _allocator.countFree(); )
|
|
|
|
debug_only(_allocator.checkCount(); )
|
|
|
|
debug_only(_fpuStkDepth = 0; )
|
|
|
|
}
|
|
|
|
|
|
|
|
Register Assembler::registerAlloc(RegisterMask allow)
|
|
|
|
{
|
|
|
|
RegAlloc ®s = _allocator;
|
2009-08-24 17:59:09 -07:00
|
|
|
RegisterMask allowedAndFree = allow & regs.free;
|
2009-07-10 12:58:34 -07:00
|
|
|
|
2009-08-24 17:59:09 -07:00
|
|
|
if (allowedAndFree)
|
2008-06-19 10:47:58 -07:00
|
|
|
{
|
2009-08-24 17:59:09 -07:00
|
|
|
// At least one usable register is free -- no need to steal.
|
|
|
|
// Pick a preferred one if possible.
|
|
|
|
RegisterMask preferredAndFree = allowedAndFree & SavedRegs;
|
|
|
|
RegisterMask set = ( preferredAndFree ? preferredAndFree : allowedAndFree );
|
2009-07-10 12:58:34 -07:00
|
|
|
Register r = nRegisterAllocFromSet(set);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
// nothing free, steal one
|
|
|
|
// LSRA says pick the one with the furthest use
|
2009-08-24 17:59:09 -07:00
|
|
|
counter_increment(steals);
|
2009-07-10 12:58:34 -07:00
|
|
|
LIns* vic = findVictim(regs, allow);
|
2009-08-30 18:48:21 -07:00
|
|
|
NanoAssert(vic);
|
2009-07-10 12:58:34 -07:00
|
|
|
|
2009-09-02 16:36:51 -07:00
|
|
|
Reservation* resv = vic->resvUsed();
|
2008-06-19 10:47:58 -07:00
|
|
|
|
2009-07-10 12:58:34 -07:00
|
|
|
// restore vic
|
|
|
|
Register r = resv->reg;
|
2008-06-19 10:47:58 -07:00
|
|
|
regs.removeActive(r);
|
|
|
|
resv->reg = UnknownReg;
|
|
|
|
|
2009-07-10 12:58:34 -07:00
|
|
|
asm_restore(vic, resv, r);
|
|
|
|
return r;
|
|
|
|
}
|
2008-06-19 10:47:58 -07:00
|
|
|
|
2008-10-13 13:29:18 -07:00
|
|
|
/**
|
|
|
|
* these instructions don't have to be saved & reloaded to spill,
|
|
|
|
* they can just be recalculated w/out any inputs.
|
|
|
|
*/
|
|
|
|
bool Assembler::canRemat(LIns *i) {
|
2009-08-27 13:22:30 -07:00
|
|
|
return i->isconst() || i->isconstq() || i->isop(LIR_alloc);
|
2008-10-13 13:29:18 -07:00
|
|
|
}
|
|
|
|
|
2009-07-15 16:50:01 -07:00
|
|
|
void Assembler::codeAlloc(NIns *&start, NIns *&end, NIns *&eip)
|
2009-07-10 12:58:34 -07:00
|
|
|
{
|
2009-07-15 16:50:01 -07:00
|
|
|
// save the block we just filled
|
|
|
|
if (start)
|
|
|
|
CodeAlloc::add(codeList, start, end);
|
|
|
|
|
|
|
|
// CodeAlloc contract: allocations never fail
|
2009-08-04 11:53:56 -07:00
|
|
|
_codeAlloc.alloc(start, end);
|
2009-07-15 16:50:01 -07:00
|
|
|
NanoAssert(uintptr_t(end) - uintptr_t(start) >= (size_t)LARGEST_UNDERRUN_PROT);
|
|
|
|
eip = end;
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
|
|
|
|
2009-07-15 16:50:01 -07:00
|
|
|
void Assembler::reset()
|
2009-07-10 12:58:34 -07:00
|
|
|
{
|
|
|
|
_nIns = 0;
|
|
|
|
_nExitIns = 0;
|
2009-07-15 16:50:01 -07:00
|
|
|
codeStart = codeEnd = 0;
|
|
|
|
exitStart = exitEnd = 0;
|
2009-07-10 12:58:34 -07:00
|
|
|
_stats.pages = 0;
|
2009-07-15 16:50:01 -07:00
|
|
|
codeList = 0;
|
2009-07-10 12:58:34 -07:00
|
|
|
|
|
|
|
nativePageReset();
|
2009-07-15 16:50:01 -07:00
|
|
|
registerResetAll();
|
|
|
|
arReset();
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
|
|
|
|
2009-07-15 16:50:01 -07:00
|
|
|
#ifdef _DEBUG
|
|
|
|
void Assembler::pageValidate()
|
2009-07-30 14:28:26 -07:00
|
|
|
{
|
2009-07-15 16:50:01 -07:00
|
|
|
if (error()) return;
|
|
|
|
// _nIns needs to be at least on one of these pages
|
|
|
|
NanoAssertMsg(_inExit ? containsPtr(exitStart, exitEnd, _nIns) : containsPtr(codeStart, codeEnd, _nIns),
|
|
|
|
"Native instruction pointer overstep paging bounds; check overrideProtect for last instruction");
|
2009-07-30 14:28:26 -07:00
|
|
|
}
|
2009-07-15 16:50:01 -07:00
|
|
|
#endif
|
2009-07-10 12:58:34 -07:00
|
|
|
|
2009-07-15 16:50:01 -07:00
|
|
|
#endif
|
2009-07-30 14:28:26 -07:00
|
|
|
|
2009-07-10 12:58:34 -07:00
|
|
|
|
|
|
|
#ifdef _DEBUG
|
|
|
|
|
|
|
|
void Assembler::resourceConsistencyCheck()
|
|
|
|
{
|
2009-07-15 16:50:01 -07:00
|
|
|
if (error()) return;
|
2008-06-19 10:47:58 -07:00
|
|
|
|
|
|
|
#ifdef NANOJIT_IA32
|
2009-02-19 11:17:31 -08:00
|
|
|
NanoAssert((_allocator.active[FST0] && _fpuStkDepth == -1) ||
|
|
|
|
(!_allocator.active[FST0] && _fpuStkDepth == 0));
|
2008-06-19 10:47:58 -07:00
|
|
|
#endif
|
2009-07-10 12:58:34 -07:00
|
|
|
|
2008-10-13 13:29:18 -07:00
|
|
|
AR &ar = _activation;
|
2009-07-10 12:58:34 -07:00
|
|
|
// check AR entries
|
|
|
|
NanoAssert(ar.highwatermark < NJ_MAX_STACK_ENTRY);
|
|
|
|
LIns* ins = 0;
|
|
|
|
RegAlloc* regs = &_allocator;
|
|
|
|
for(uint32_t i = ar.lowwatermark; i < ar.tos; i++)
|
|
|
|
{
|
|
|
|
ins = ar.entry[i];
|
|
|
|
if ( !ins )
|
|
|
|
continue;
|
2009-09-02 16:36:51 -07:00
|
|
|
Reservation *r = ins->resvUsed();
|
2008-10-13 13:29:18 -07:00
|
|
|
if (r->arIndex) {
|
2009-08-27 13:22:30 -07:00
|
|
|
if (ins->isop(LIR_alloc)) {
|
2008-10-13 13:29:18 -07:00
|
|
|
int j=i+1;
|
|
|
|
for (int n = i + (ins->size()>>2); j < n; j++) {
|
|
|
|
NanoAssert(ar.entry[j]==ins);
|
|
|
|
}
|
2009-07-10 12:58:34 -07:00
|
|
|
NanoAssert(r->arIndex == (uint32_t)j-1);
|
2008-10-13 13:29:18 -07:00
|
|
|
i = j-1;
|
|
|
|
}
|
|
|
|
else if (ins->isQuad()) {
|
|
|
|
NanoAssert(ar.entry[i - stack_direction(1)]==ins);
|
|
|
|
i += 1; // skip high word
|
|
|
|
}
|
|
|
|
else {
|
2009-07-10 12:58:34 -07:00
|
|
|
NanoAssertMsg(r->arIndex == i, "Stack record index mismatch");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
NanoAssertMsg( r->reg==UnknownReg || regs->isConsistent(r->reg,ins), "Register record mismatch");
|
|
|
|
}
|
|
|
|
|
|
|
|
registerConsistencyCheck();
|
|
|
|
}
|
|
|
|
|
|
|
|
void Assembler::registerConsistencyCheck()
|
|
|
|
{
|
|
|
|
// check registers
|
|
|
|
RegAlloc *regs = &_allocator;
|
|
|
|
uint32_t managed = regs->managed;
|
|
|
|
Register r = FirstReg;
|
|
|
|
while(managed)
|
|
|
|
{
|
|
|
|
if (managed&1)
|
|
|
|
{
|
|
|
|
if (regs->isFree(r))
|
|
|
|
{
|
2009-08-04 08:33:14 -07:00
|
|
|
NanoAssertMsgf(regs->getActive(r)==0, "register %s is free but assigned to ins", gpn(r));
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
LIns* ins = regs->getActive(r);
|
|
|
|
// @todo we should be able to check across RegAlloc's somehow (to include savedGP...)
|
2009-09-02 16:36:51 -07:00
|
|
|
Reservation *v = ins->resvUsed();
|
2009-07-10 12:58:34 -07:00
|
|
|
NanoAssertMsg( regs->getActive(v->reg)==ins, "Register record mismatch");
|
2008-10-13 13:29:18 -07:00
|
|
|
}
|
|
|
|
}
|
2009-07-10 12:58:34 -07:00
|
|
|
|
|
|
|
// next register in bitfield
|
|
|
|
r = nextreg(r);
|
|
|
|
managed >>= 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* _DEBUG */
|
|
|
|
|
|
|
|
void Assembler::findRegFor2(RegisterMask allow, LIns* ia, Reservation* &resva, LIns* ib, Reservation* &resvb)
|
|
|
|
{
|
|
|
|
if (ia == ib)
|
|
|
|
{
|
|
|
|
findRegFor(ia, allow);
|
2009-09-02 16:36:51 -07:00
|
|
|
resva = resvb = ia->resvUsed();
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2009-09-02 16:36:51 -07:00
|
|
|
resvb = ib->resv();
|
|
|
|
bool rbDone = (resvb->used && resvb->reg != UnknownReg && (allow & rmask(resvb->reg)));
|
2009-08-24 17:59:09 -07:00
|
|
|
if (rbDone) {
|
|
|
|
// ib already assigned to an allowable reg, keep that one
|
|
|
|
allow &= ~rmask(resvb->reg);
|
2008-10-13 13:29:18 -07:00
|
|
|
}
|
|
|
|
Register ra = findRegFor(ia, allow);
|
2009-09-02 16:36:51 -07:00
|
|
|
resva = ia->resv();
|
|
|
|
NanoAssert(error() || (resva->used && ra != UnknownReg));
|
2009-08-24 17:59:09 -07:00
|
|
|
if (!rbDone) {
|
2008-10-13 13:29:18 -07:00
|
|
|
allow &= ~rmask(ra);
|
|
|
|
findRegFor(ib, allow);
|
2009-09-02 16:36:51 -07:00
|
|
|
resvb = ib->resvUsed();
|
2008-10-13 13:29:18 -07:00
|
|
|
}
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
|
|
|
}
|
2008-06-19 10:47:58 -07:00
|
|
|
|
2009-07-10 12:58:34 -07:00
|
|
|
Register Assembler::findSpecificRegFor(LIns* i, Register w)
|
|
|
|
{
|
|
|
|
return findRegFor(i, rmask(w));
|
|
|
|
}
|
2008-10-20 10:15:07 -07:00
|
|
|
|
|
|
|
Register Assembler::getBaseReg(LIns *i, int &d, RegisterMask allow)
|
|
|
|
{
|
2009-08-27 13:22:30 -07:00
|
|
|
if (i->isop(LIR_alloc)) {
|
2008-10-20 10:15:07 -07:00
|
|
|
d += findMemFor(i);
|
|
|
|
return FP;
|
|
|
|
}
|
2009-08-27 17:52:46 -07:00
|
|
|
return findRegFor(i, allow);
|
2008-10-20 10:15:07 -07:00
|
|
|
}
|
2009-07-10 12:58:34 -07:00
|
|
|
|
|
|
|
Register Assembler::findRegFor(LIns* i, RegisterMask allow)
|
|
|
|
{
|
2009-08-27 13:22:30 -07:00
|
|
|
if (i->isop(LIR_alloc)) {
|
2008-10-13 13:29:18 -07:00
|
|
|
// never allocate a reg for this w/out stack space too
|
|
|
|
findMemFor(i);
|
|
|
|
}
|
|
|
|
|
2009-08-27 21:59:52 -07:00
|
|
|
Reservation* resv = i->resv();
|
|
|
|
Register r = resv->reg;
|
2008-06-19 10:47:58 -07:00
|
|
|
|
2009-08-27 21:59:52 -07:00
|
|
|
if (!resv->used) {
|
|
|
|
// No reservation. Create one, and do a fresh allocation.
|
|
|
|
RegisterMask prefer = hint(i, allow);
|
|
|
|
resv->init();
|
|
|
|
r = resv->reg = registerAlloc(prefer);
|
|
|
|
_allocator.addActive(r, i);
|
2008-09-02 22:29:23 -07:00
|
|
|
|
2009-08-27 21:59:52 -07:00
|
|
|
} else if (r == UnknownReg) {
|
|
|
|
// Existing reservation with an unknown register. Do a fresh
|
|
|
|
// allocation.
|
|
|
|
RegisterMask prefer = hint(i, allow);
|
|
|
|
r = resv->reg = registerAlloc(prefer);
|
|
|
|
_allocator.addActive(r, i);
|
2008-06-19 10:47:58 -07:00
|
|
|
|
2009-08-27 21:59:52 -07:00
|
|
|
} else if (rmask(r) & allow) {
|
|
|
|
// Existing reservation with a known register allocated, and
|
|
|
|
// that register is allowed. Use it.
|
|
|
|
_allocator.useActive(r);
|
2008-10-20 10:15:07 -07:00
|
|
|
|
2009-08-27 21:59:52 -07:00
|
|
|
} else {
|
|
|
|
// Existing reservation with a known register allocated, but
|
|
|
|
// the register is not allowed.
|
|
|
|
RegisterMask prefer = hint(i, allow);
|
2008-10-20 10:15:07 -07:00
|
|
|
#ifdef AVMPLUS_IA32
|
2009-08-27 21:59:52 -07:00
|
|
|
if (((rmask(r)&XmmRegs) && !(allow&XmmRegs)) ||
|
|
|
|
((rmask(r)&x87Regs) && !(allow&x87Regs)))
|
|
|
|
{
|
|
|
|
// x87 <-> xmm copy required
|
|
|
|
//_nvprof("fpu-evict",1);
|
2009-08-30 18:48:21 -07:00
|
|
|
evict(r, i);
|
2009-08-27 21:59:52 -07:00
|
|
|
r = resv->reg = registerAlloc(prefer);
|
|
|
|
_allocator.addActive(r, i);
|
|
|
|
} else
|
2008-10-20 10:15:07 -07:00
|
|
|
#endif
|
2009-08-27 21:59:52 -07:00
|
|
|
{
|
|
|
|
// Grab a new register and copy the old contents to the new.
|
|
|
|
_allocator.retire(r);
|
|
|
|
Register s = r;
|
|
|
|
r = resv->reg = registerAlloc(prefer);
|
|
|
|
_allocator.addActive(r, i);
|
|
|
|
if ((rmask(s) & GpRegs) && (rmask(r) & GpRegs)) {
|
2009-03-20 15:53:14 -07:00
|
|
|
#ifdef NANOJIT_ARM
|
2009-08-27 21:59:52 -07:00
|
|
|
MOV(s, r);
|
2009-03-20 15:53:14 -07:00
|
|
|
#else
|
2009-08-27 21:59:52 -07:00
|
|
|
MR(s, r);
|
2009-03-20 15:53:14 -07:00
|
|
|
#endif
|
2009-08-27 21:59:52 -07:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
asm_nongp_copy(s, r);
|
|
|
|
}
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
|
|
|
}
|
2009-08-27 21:59:52 -07:00
|
|
|
return r;
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
|
|
|
|
2009-08-27 21:59:52 -07:00
|
|
|
|
2009-07-10 12:58:34 -07:00
|
|
|
int Assembler::findMemFor(LIns *i)
|
|
|
|
{
|
2009-09-02 16:36:51 -07:00
|
|
|
Reservation* resv = i->resv();
|
|
|
|
if (!resv->used)
|
|
|
|
resv->init();
|
2008-10-13 13:29:18 -07:00
|
|
|
if (!resv->arIndex) {
|
2009-07-10 12:58:34 -07:00
|
|
|
resv->arIndex = arReserve(i);
|
2008-10-13 13:29:18 -07:00
|
|
|
NanoAssert(resv->arIndex <= _activation.highwatermark);
|
|
|
|
}
|
2009-07-10 12:58:34 -07:00
|
|
|
return disp(resv);
|
|
|
|
}
|
|
|
|
|
|
|
|
Register Assembler::prepResultReg(LIns *i, RegisterMask allow)
|
|
|
|
{
|
2009-09-02 16:36:51 -07:00
|
|
|
Reservation* resv = i->resv();
|
|
|
|
const bool pop = !resv->used || resv->reg == UnknownReg;
|
2009-07-10 12:58:34 -07:00
|
|
|
Register rr = findRegFor(i, allow);
|
|
|
|
freeRsrcOf(i, pop);
|
|
|
|
return rr;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Assembler::asm_spilli(LInsp i, Reservation *resv, bool pop)
|
|
|
|
{
|
|
|
|
int d = disp(resv);
|
|
|
|
Register rr = resv->reg;
|
2009-07-14 17:00:43 -07:00
|
|
|
bool quad = i->opcode() == LIR_iparam || i->isQuad();
|
2009-07-10 12:58:34 -07:00
|
|
|
verbose_only( if (d && (_logc->lcbits & LC_RegAlloc)) {
|
|
|
|
outputForEOL(" <= spill %s",
|
|
|
|
_thisfrag->lirbuf->names->formatRef(i)); } )
|
|
|
|
asm_spill(rr, d, pop, quad);
|
|
|
|
}
|
2008-10-20 10:15:07 -07:00
|
|
|
|
2009-06-23 01:36:53 -07:00
|
|
|
// NOTE: Because this function frees slots on the stack, it is not safe to
|
|
|
|
// follow a call to this with a call to anything which might spill a
|
|
|
|
// register, as the stack can be corrupted. Refer to bug 495239 for a more
|
|
|
|
// detailed description.
|
2009-07-10 12:58:34 -07:00
|
|
|
void Assembler::freeRsrcOf(LIns *i, bool pop)
|
|
|
|
{
|
2009-09-02 16:36:51 -07:00
|
|
|
Reservation* resv = i->resvUsed();
|
2009-07-10 12:58:34 -07:00
|
|
|
int index = resv->arIndex;
|
|
|
|
Register rr = resv->reg;
|
|
|
|
|
|
|
|
if (rr != UnknownReg)
|
|
|
|
{
|
|
|
|
asm_spilli(i, resv, pop);
|
|
|
|
_allocator.retire(rr); // free any register associated with entry
|
|
|
|
}
|
2009-07-21 12:14:33 -07:00
|
|
|
if (index) {
|
|
|
|
NanoAssert(_activation.entry[index] == i);
|
2009-07-10 12:58:34 -07:00
|
|
|
arFree(index); // free any stack stack space associated with entry
|
2009-07-21 12:14:33 -07:00
|
|
|
}
|
2009-06-23 14:51:15 -07:00
|
|
|
i->resv()->clear();
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
2008-06-19 10:47:58 -07:00
|
|
|
|
2009-08-30 18:48:21 -07:00
|
|
|
void Assembler::evictIfActive(Register r)
|
2009-07-10 12:58:34 -07:00
|
|
|
{
|
2009-08-30 22:02:08 -07:00
|
|
|
if (LIns* vic = _allocator.getActive(r)) {
|
2009-08-30 18:48:21 -07:00
|
|
|
evict(r, vic);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Assembler::evict(Register r, LIns* vic)
|
|
|
|
{
|
|
|
|
// Not free, need to steal.
|
|
|
|
counter_increment(steals);
|
|
|
|
|
|
|
|
// Get vic's resv, check r matches.
|
|
|
|
NanoAssert(!_allocator.isFree(r));
|
|
|
|
NanoAssert(vic == _allocator.getActive(r));
|
2009-09-02 16:36:51 -07:00
|
|
|
Reservation* resv = vic->resvUsed();
|
|
|
|
NanoAssert(r == resv->reg);
|
2009-08-30 18:48:21 -07:00
|
|
|
|
|
|
|
// Free r.
|
|
|
|
_allocator.retire(r);
|
|
|
|
resv->reg = UnknownReg;
|
|
|
|
|
|
|
|
// Restore vic.
|
|
|
|
asm_restore(vic, resv, r);
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
2008-06-19 10:47:58 -07:00
|
|
|
|
|
|
|
void Assembler::patch(GuardRecord *lr)
|
|
|
|
{
|
2009-07-26 17:28:46 -07:00
|
|
|
if (!lr->jmp) // the guard might have been eliminated as redundant
|
|
|
|
return;
|
2008-10-21 17:50:32 -07:00
|
|
|
Fragment *frag = lr->exit->target;
|
2009-07-10 12:58:34 -07:00
|
|
|
NanoAssert(frag->fragEntry != 0);
|
2009-08-27 13:47:39 -07:00
|
|
|
nPatchBranch((NIns*)lr->jmp, frag->fragEntry);
|
|
|
|
verbose_only(verbose_outputf("patching jump at %p to target %p\n",
|
|
|
|
lr->jmp, frag->fragEntry);)
|
2008-06-19 10:47:58 -07:00
|
|
|
}
|
|
|
|
|
2008-10-21 17:50:32 -07:00
|
|
|
void Assembler::patch(SideExit *exit)
|
2008-06-19 10:47:58 -07:00
|
|
|
{
|
2008-10-21 17:50:32 -07:00
|
|
|
GuardRecord *rec = exit->guards;
|
2009-08-04 08:33:14 -07:00
|
|
|
NanoAssert(rec);
|
2008-10-21 17:50:32 -07:00
|
|
|
while (rec) {
|
|
|
|
patch(rec);
|
2008-10-22 16:00:08 -07:00
|
|
|
rec = rec->next;
|
2008-10-21 17:50:32 -07:00
|
|
|
}
|
2008-06-19 10:47:58 -07:00
|
|
|
}
|
2008-11-04 14:20:19 -08:00
|
|
|
|
2009-02-11 17:40:27 -08:00
|
|
|
#ifdef NANOJIT_IA32
|
|
|
|
void Assembler::patch(SideExit* exit, SwitchInfo* si)
|
|
|
|
{
|
2009-07-10 12:58:34 -07:00
|
|
|
for (GuardRecord* lr = exit->guards; lr; lr = lr->next) {
|
|
|
|
Fragment *frag = lr->exit->target;
|
|
|
|
NanoAssert(frag->fragEntry != 0);
|
|
|
|
si->table[si->index] = frag->fragEntry;
|
|
|
|
}
|
2009-02-11 17:40:27 -08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2008-06-24 15:57:33 -07:00
|
|
|
NIns* Assembler::asm_exit(LInsp guard)
|
2008-06-19 10:47:58 -07:00
|
|
|
{
|
2009-07-10 12:58:34 -07:00
|
|
|
SideExit *exit = guard->record()->exit;
|
|
|
|
NIns* at = 0;
|
2009-09-01 15:27:34 -07:00
|
|
|
if (!_branchStateMap.get(exit))
|
2009-07-10 12:58:34 -07:00
|
|
|
{
|
|
|
|
at = asm_leave_trace(guard);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2009-09-01 15:27:34 -07:00
|
|
|
RegAlloc* captured = _branchStateMap.get(exit);
|
2009-07-10 12:58:34 -07:00
|
|
|
intersectRegisterState(*captured);
|
|
|
|
at = exit->target->fragEntry;
|
|
|
|
NanoAssert(at != 0);
|
2009-09-01 15:27:34 -07:00
|
|
|
_branchStateMap.remove(exit);
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
|
|
|
return at;
|
|
|
|
}
|
|
|
|
|
|
|
|
NIns* Assembler::asm_leave_trace(LInsp guard)
|
|
|
|
{
|
2008-06-19 10:47:58 -07:00
|
|
|
verbose_only( int32_t nativeSave = _stats.native );
|
2009-07-10 12:58:34 -07:00
|
|
|
verbose_only( verbose_outputf("----------------------------------- ## END exit block %p", guard);)
|
2008-06-19 10:47:58 -07:00
|
|
|
|
2009-07-10 12:58:34 -07:00
|
|
|
RegAlloc capture = _allocator;
|
2008-06-19 10:47:58 -07:00
|
|
|
|
|
|
|
// this point is unreachable. so free all the registers.
|
2009-07-10 12:58:34 -07:00
|
|
|
// if an instruction has a stack entry we will leave it alone,
|
|
|
|
// otherwise we free it entirely. intersectRegisterState will restore.
|
|
|
|
releaseRegisters();
|
|
|
|
|
|
|
|
swapptrs();
|
|
|
|
_inExit = true;
|
|
|
|
|
|
|
|
debug_only( _sv_fpuStkDepth = _fpuStkDepth; _fpuStkDepth = 0; )
|
|
|
|
|
|
|
|
nFragExit(guard);
|
|
|
|
|
|
|
|
// restore the callee-saved register and parameters
|
|
|
|
assignSavedRegs();
|
|
|
|
assignParamRegs();
|
|
|
|
|
|
|
|
intersectRegisterState(capture);
|
|
|
|
|
|
|
|
// this can be useful for breaking whenever an exit is taken
|
|
|
|
//INT3();
|
|
|
|
//NOP();
|
|
|
|
|
|
|
|
// we are done producing the exit logic for the guard so demark where our exit block code begins
|
|
|
|
NIns* jmpTarget = _nIns; // target in exit path for our mainline conditional jump
|
|
|
|
|
|
|
|
// swap back pointers, effectively storing the last location used in the exit path
|
|
|
|
swapptrs();
|
|
|
|
_inExit = false;
|
|
|
|
|
2009-08-04 08:33:14 -07:00
|
|
|
//verbose_only( verbose_outputf(" LIR_xt/xf swapptrs, _nIns is now %08X(%08X), _nExitIns is now %08X(%08X)",_nIns, *_nIns,_nExitIns,*_nExitIns) );
|
2009-07-10 12:58:34 -07:00
|
|
|
verbose_only( verbose_outputf("%010lx:", (unsigned long)jmpTarget);)
|
|
|
|
verbose_only( verbose_outputf("----------------------------------- ## BEGIN exit block (LIR_xt|LIR_xf)") );
|
2008-06-19 10:47:58 -07:00
|
|
|
|
|
|
|
#ifdef NANOJIT_IA32
|
2009-07-10 12:58:34 -07:00
|
|
|
NanoAssertMsgf(_fpuStkDepth == _sv_fpuStkDepth, "LIR_xtf, _fpuStkDepth=%d, expect %d",_fpuStkDepth, _sv_fpuStkDepth);
|
|
|
|
debug_only( _fpuStkDepth = _sv_fpuStkDepth; _sv_fpuStkDepth = 9999; )
|
2008-06-19 10:47:58 -07:00
|
|
|
#endif
|
|
|
|
|
|
|
|
verbose_only(_stats.exitnative += (_stats.native-nativeSave));
|
|
|
|
|
2008-12-10 17:31:17 -08:00
|
|
|
return jmpTarget;
|
2008-06-19 10:47:58 -07:00
|
|
|
}
|
2009-07-10 12:58:34 -07:00
|
|
|
|
2009-09-01 15:27:34 -07:00
|
|
|
void Assembler::beginAssembly(Fragment *frag)
|
2009-07-10 12:58:34 -07:00
|
|
|
{
|
2009-07-15 16:50:01 -07:00
|
|
|
reset();
|
|
|
|
|
|
|
|
NanoAssert(codeList == 0);
|
|
|
|
NanoAssert(codeStart == 0);
|
|
|
|
NanoAssert(codeEnd == 0);
|
|
|
|
NanoAssert(exitStart == 0);
|
|
|
|
NanoAssert(exitEnd == 0);
|
|
|
|
NanoAssert(_nIns == 0);
|
|
|
|
NanoAssert(_nExitIns == 0);
|
2009-02-06 11:59:54 -08:00
|
|
|
|
2008-10-13 13:29:18 -07:00
|
|
|
_thisfrag = frag;
|
2009-07-10 12:58:34 -07:00
|
|
|
_activation.lowwatermark = 1;
|
|
|
|
_activation.tos = _activation.lowwatermark;
|
|
|
|
_activation.highwatermark = _activation.tos;
|
2009-08-04 08:33:14 -07:00
|
|
|
_inExit = false;
|
2009-07-10 12:58:34 -07:00
|
|
|
|
|
|
|
counter_reset(native);
|
|
|
|
counter_reset(exitnative);
|
|
|
|
counter_reset(steals);
|
|
|
|
counter_reset(spills);
|
|
|
|
counter_reset(remats);
|
|
|
|
|
|
|
|
setError(None);
|
|
|
|
|
|
|
|
// native code gen buffer setup
|
|
|
|
nativePageSetup();
|
|
|
|
|
|
|
|
// make sure we got memory at least one page
|
|
|
|
if (error()) return;
|
|
|
|
|
2008-10-13 13:29:18 -07:00
|
|
|
#ifdef PERFM
|
2009-07-10 12:58:34 -07:00
|
|
|
_stats.pages = 0;
|
|
|
|
_stats.codeStart = _nIns-1;
|
|
|
|
_stats.codeExitStart = _nExitIns-1;
|
2008-10-13 13:29:18 -07:00
|
|
|
#endif /* PERFM */
|
|
|
|
|
2009-08-31 16:35:50 -07:00
|
|
|
_epilogue = NULL;
|
2009-07-10 12:58:34 -07:00
|
|
|
|
2009-08-31 16:35:50 -07:00
|
|
|
nBeginAssembly();
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
|
|
|
|
2009-08-29 23:26:54 -07:00
|
|
|
void Assembler::assemble(Fragment* frag)
|
2009-07-10 12:58:34 -07:00
|
|
|
{
|
|
|
|
if (error()) return;
|
2008-06-19 10:47:58 -07:00
|
|
|
_thisfrag = frag;
|
|
|
|
|
2009-07-10 12:58:34 -07:00
|
|
|
// Used for debug printing, if needed
|
|
|
|
verbose_only(
|
2009-07-15 12:31:26 -07:00
|
|
|
ReverseLister *pp_init = NULL;
|
|
|
|
ReverseLister *pp_after_sf1 = NULL;
|
|
|
|
ReverseLister *pp_after_sf2 = NULL;
|
2009-07-10 12:58:34 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
// set up backwards pipeline: assembler -> StackFilter -> LirReader
|
|
|
|
LirReader bufreader(frag->lastIns);
|
|
|
|
|
|
|
|
// Used to construct the pipeline
|
|
|
|
LirFilter* prev = &bufreader;
|
|
|
|
|
|
|
|
// The LIR passes through these filters as listed in this
|
|
|
|
// function, viz, top to bottom.
|
|
|
|
|
|
|
|
// INITIAL PRINTING
|
|
|
|
verbose_only( if (_logc->lcbits & LC_ReadLIR) {
|
2009-08-04 06:54:47 -07:00
|
|
|
pp_init = new (alloc) ReverseLister(prev, alloc, frag->lirbuf->names, _logc,
|
2009-08-04 08:33:14 -07:00
|
|
|
"Initial LIR");
|
2009-07-10 12:58:34 -07:00
|
|
|
prev = pp_init;
|
|
|
|
})
|
|
|
|
|
|
|
|
// STOREFILTER for sp
|
2009-08-04 06:54:47 -07:00
|
|
|
StackFilter storefilter1(prev, alloc, frag->lirbuf, frag->lirbuf->sp);
|
2009-07-10 12:58:34 -07:00
|
|
|
prev = &storefilter1;
|
|
|
|
|
|
|
|
verbose_only( if (_logc->lcbits & LC_AfterSF_SP) {
|
2009-08-04 06:54:47 -07:00
|
|
|
pp_after_sf1 = new (alloc) ReverseLister(prev, alloc, frag->lirbuf->names, _logc,
|
|
|
|
"After Storefilter(sp)");
|
2009-07-10 12:58:34 -07:00
|
|
|
prev = pp_after_sf1;
|
|
|
|
})
|
|
|
|
|
|
|
|
// STOREFILTER for rp
|
2009-08-04 06:54:47 -07:00
|
|
|
StackFilter storefilter2(prev, alloc, frag->lirbuf, frag->lirbuf->rp);
|
2009-07-10 12:58:34 -07:00
|
|
|
prev = &storefilter2;
|
|
|
|
|
|
|
|
verbose_only( if (_logc->lcbits & LC_AfterSF_RP) {
|
2009-08-04 06:54:47 -07:00
|
|
|
pp_after_sf2 = new (alloc) ReverseLister(prev, alloc, frag->lirbuf->names, _logc,
|
|
|
|
"After StoreFilter(rp) (final LIR)");
|
2009-07-10 12:58:34 -07:00
|
|
|
prev = pp_after_sf2;
|
|
|
|
})
|
|
|
|
|
|
|
|
_inExit = false;
|
2009-07-08 15:54:31 -07:00
|
|
|
|
2009-09-01 15:27:34 -07:00
|
|
|
gen(prev);
|
2008-10-13 13:29:18 -07:00
|
|
|
|
|
|
|
if (!error()) {
|
2009-07-10 12:58:34 -07:00
|
|
|
// patch all branches
|
2009-09-01 15:27:34 -07:00
|
|
|
NInsMap::Iter iter(_patches);
|
2009-08-04 08:06:46 -07:00
|
|
|
while (iter.next()) {
|
|
|
|
NIns* where = iter.key();
|
|
|
|
LIns* targ = iter.value();
|
2009-09-01 15:27:34 -07:00
|
|
|
LabelState *label = _labels.get(targ);
|
2009-07-10 12:58:34 -07:00
|
|
|
NIns* ntarg = label->addr;
|
2008-10-13 13:29:18 -07:00
|
|
|
if (ntarg) {
|
2009-07-10 12:58:34 -07:00
|
|
|
nPatchBranch(where,ntarg);
|
|
|
|
}
|
2008-10-13 13:29:18 -07:00
|
|
|
else {
|
2009-07-10 12:58:34 -07:00
|
|
|
setError(UnknownBranch);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2008-10-13 13:29:18 -07:00
|
|
|
}
|
2009-07-10 12:58:34 -07:00
|
|
|
|
|
|
|
// If we were accumulating debug info in the various ReverseListers,
|
2009-08-04 06:54:47 -07:00
|
|
|
// call finish() to emit whatever contents they have accumulated.
|
2009-07-10 12:58:34 -07:00
|
|
|
verbose_only(
|
2009-08-04 08:33:14 -07:00
|
|
|
if (pp_init) pp_init->finish();
|
|
|
|
if (pp_after_sf1) pp_after_sf1->finish();
|
|
|
|
if (pp_after_sf2) pp_after_sf2->finish();
|
2009-07-13 11:50:42 -07:00
|
|
|
)
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
|
|
|
|
2009-08-29 23:26:54 -07:00
|
|
|
void Assembler::endAssembly(Fragment* frag)
|
2009-07-10 12:58:34 -07:00
|
|
|
{
|
|
|
|
// don't try to patch code if we are in an error state since we might have partially
|
|
|
|
// overwritten the code cache already
|
2009-07-15 16:50:01 -07:00
|
|
|
if (error()) {
|
|
|
|
// something went wrong, release all allocated code memory
|
2009-08-04 11:53:56 -07:00
|
|
|
_codeAlloc.freeAll(codeList);
|
|
|
|
_codeAlloc.free(exitStart, exitEnd);
|
|
|
|
_codeAlloc.free(codeStart, codeEnd);
|
2009-07-10 12:58:34 -07:00
|
|
|
return;
|
2009-07-15 16:50:01 -07:00
|
|
|
}
|
2009-07-10 12:58:34 -07:00
|
|
|
|
2009-07-15 16:50:01 -07:00
|
|
|
NIns* fragEntry = genPrologue();
|
|
|
|
verbose_only( outputAddr=true; )
|
|
|
|
verbose_only( asm_output("[prologue]"); )
|
2009-07-10 12:58:34 -07:00
|
|
|
|
2009-07-15 16:50:01 -07:00
|
|
|
// check for resource leaks
|
|
|
|
debug_only(
|
|
|
|
for(uint32_t i=_activation.lowwatermark;i<_activation.highwatermark; i++) {
|
|
|
|
NanoAssertMsgf(_activation.entry[i] == 0, "frame entry %d wasn't freed\n",-4*i);
|
|
|
|
}
|
|
|
|
)
|
2008-06-19 10:47:58 -07:00
|
|
|
|
2009-07-15 16:50:01 -07:00
|
|
|
// save used parts of current block on fragment's code list, free the rest
|
|
|
|
#ifdef NANOJIT_ARM
|
2009-08-04 11:53:56 -07:00
|
|
|
_codeAlloc.addRemainder(codeList, exitStart, exitEnd, _nExitSlot, _nExitIns);
|
|
|
|
_codeAlloc.addRemainder(codeList, codeStart, codeEnd, _nSlot, _nIns);
|
2009-07-15 16:50:01 -07:00
|
|
|
#else
|
2009-08-04 11:53:56 -07:00
|
|
|
_codeAlloc.addRemainder(codeList, exitStart, exitEnd, exitStart, _nExitIns);
|
|
|
|
_codeAlloc.addRemainder(codeList, codeStart, codeEnd, codeStart, _nIns);
|
2009-07-30 14:28:26 -07:00
|
|
|
#endif
|
2009-07-10 12:58:34 -07:00
|
|
|
|
2009-07-15 16:50:01 -07:00
|
|
|
// at this point all our new code is in the d-cache and not the i-cache,
|
|
|
|
// so flush the i-cache on cpu's that need it.
|
2009-08-04 11:53:56 -07:00
|
|
|
_codeAlloc.flushICache(codeList);
|
2008-06-19 10:47:58 -07:00
|
|
|
|
2009-07-15 16:50:01 -07:00
|
|
|
// save entry point pointers
|
|
|
|
frag->fragEntry = fragEntry;
|
|
|
|
frag->setCode(_nIns);
|
|
|
|
// PERFM_NVPROF("code", CodeAlloc::size(codeList));
|
2009-07-30 14:28:26 -07:00
|
|
|
|
2009-07-15 16:50:01 -07:00
|
|
|
NanoAssertMsgf(_fpuStkDepth == 0,"_fpuStkDepth %d\n",_fpuStkDepth);
|
2009-07-30 14:28:26 -07:00
|
|
|
|
2009-07-15 16:50:01 -07:00
|
|
|
debug_only( pageValidate(); )
|
2009-09-01 15:27:34 -07:00
|
|
|
NanoAssert(_branchStateMap.isEmpty());
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
void Assembler::releaseRegisters()
|
|
|
|
{
|
|
|
|
for (Register r = FirstReg; r <= LastReg; r = nextreg(r))
|
|
|
|
{
|
|
|
|
LIns *i = _allocator.getActive(r);
|
|
|
|
if (i)
|
|
|
|
{
|
|
|
|
// clear reg allocation, preserve stack allocation.
|
2009-09-02 16:36:51 -07:00
|
|
|
Reservation* resv = i->resvUsed();
|
2009-07-10 12:58:34 -07:00
|
|
|
_allocator.retire(r);
|
|
|
|
if (r == resv->reg)
|
|
|
|
resv->reg = UnknownReg;
|
|
|
|
|
|
|
|
if (!resv->arIndex && resv->reg == UnknownReg)
|
|
|
|
{
|
2009-06-23 14:51:15 -07:00
|
|
|
i->resv()->clear();
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2008-10-13 13:29:18 -07:00
|
|
|
|
|
|
|
#ifdef PERFM
|
|
|
|
#define countlir_live() _nvprof("lir-live",1)
|
|
|
|
#define countlir_ret() _nvprof("lir-ret",1)
|
|
|
|
#define countlir_alloc() _nvprof("lir-alloc",1)
|
|
|
|
#define countlir_var() _nvprof("lir-var",1)
|
|
|
|
#define countlir_use() _nvprof("lir-use",1)
|
|
|
|
#define countlir_def() _nvprof("lir-def",1)
|
|
|
|
#define countlir_imm() _nvprof("lir-imm",1)
|
|
|
|
#define countlir_param() _nvprof("lir-param",1)
|
|
|
|
#define countlir_cmov() _nvprof("lir-cmov",1)
|
|
|
|
#define countlir_ld() _nvprof("lir-ld",1)
|
|
|
|
#define countlir_ldq() _nvprof("lir-ldq",1)
|
|
|
|
#define countlir_alu() _nvprof("lir-alu",1)
|
|
|
|
#define countlir_qjoin() _nvprof("lir-qjoin",1)
|
|
|
|
#define countlir_qlo() _nvprof("lir-qlo",1)
|
|
|
|
#define countlir_qhi() _nvprof("lir-qhi",1)
|
|
|
|
#define countlir_fpu() _nvprof("lir-fpu",1)
|
|
|
|
#define countlir_st() _nvprof("lir-st",1)
|
|
|
|
#define countlir_stq() _nvprof("lir-stq",1)
|
|
|
|
#define countlir_jmp() _nvprof("lir-jmp",1)
|
|
|
|
#define countlir_jcc() _nvprof("lir-jcc",1)
|
|
|
|
#define countlir_label() _nvprof("lir-label",1)
|
|
|
|
#define countlir_xcc() _nvprof("lir-xcc",1)
|
|
|
|
#define countlir_x() _nvprof("lir-x",1)
|
|
|
|
#define countlir_loop() _nvprof("lir-loop",1)
|
|
|
|
#define countlir_call() _nvprof("lir-call",1)
|
|
|
|
#else
|
|
|
|
#define countlir_live()
|
|
|
|
#define countlir_ret()
|
|
|
|
#define countlir_alloc()
|
|
|
|
#define countlir_var()
|
|
|
|
#define countlir_use()
|
|
|
|
#define countlir_def()
|
|
|
|
#define countlir_imm()
|
|
|
|
#define countlir_param()
|
|
|
|
#define countlir_cmov()
|
|
|
|
#define countlir_ld()
|
|
|
|
#define countlir_ldq()
|
|
|
|
#define countlir_alu()
|
|
|
|
#define countlir_qjoin()
|
|
|
|
#define countlir_qlo()
|
|
|
|
#define countlir_qhi()
|
|
|
|
#define countlir_fpu()
|
|
|
|
#define countlir_st()
|
|
|
|
#define countlir_stq()
|
|
|
|
#define countlir_jmp()
|
|
|
|
#define countlir_jcc()
|
|
|
|
#define countlir_label()
|
|
|
|
#define countlir_xcc()
|
|
|
|
#define countlir_x()
|
|
|
|
#define countlir_loop()
|
|
|
|
#define countlir_call()
|
|
|
|
#endif
|
|
|
|
|
2009-09-01 15:27:34 -07:00
|
|
|
void Assembler::gen(LirFilter* reader)
|
2009-07-10 12:58:34 -07:00
|
|
|
{
|
2009-09-01 16:30:51 -07:00
|
|
|
// trace must end with LIR_x, LIR_loop, LIR_[f]ret, LIR_xtbl, or LIR_[f]live
|
2009-07-10 12:58:34 -07:00
|
|
|
NanoAssert(reader->pos()->isop(LIR_x) ||
|
|
|
|
reader->pos()->isop(LIR_ret) ||
|
2009-08-27 17:52:46 -07:00
|
|
|
reader->pos()->isop(LIR_fret) ||
|
|
|
|
reader->pos()->isop(LIR_xtbl) ||
|
2009-09-01 16:30:51 -07:00
|
|
|
reader->pos()->isop(LIR_live) ||
|
|
|
|
reader->pos()->isop(LIR_flive));
|
2009-07-08 15:54:31 -07:00
|
|
|
|
2009-08-04 10:11:53 -07:00
|
|
|
InsList pending_lives(alloc);
|
2009-07-10 12:58:34 -07:00
|
|
|
|
2009-07-13 11:50:42 -07:00
|
|
|
for (LInsp ins = reader->read(); !ins->isop(LIR_start) && !error();
|
|
|
|
ins = reader->read())
|
2009-07-10 12:58:34 -07:00
|
|
|
{
|
2009-07-13 11:50:42 -07:00
|
|
|
/* What's going on here: we're visiting all the LIR nodes
|
|
|
|
in the buffer, working strictly backwards in
|
|
|
|
buffer-order, and generating machine instructions for
|
|
|
|
them as we go.
|
|
|
|
|
|
|
|
But we're not visiting all of them, only the ones that
|
|
|
|
made it through the filter pipeline that we're reading
|
|
|
|
from. For each visited node, we first determine
|
|
|
|
whether it's actually necessary, and if not skip it.
|
|
|
|
Otherwise we fall into the big switch, which calls a
|
|
|
|
target-specific routine to generate the required
|
|
|
|
instructions.
|
2009-08-04 08:33:14 -07:00
|
|
|
|
2009-07-13 11:50:42 -07:00
|
|
|
For each node, we need to decide whether we need to
|
|
|
|
generate any code. This is a rather subtle part of the
|
|
|
|
generation algorithm.
|
2009-08-04 08:33:14 -07:00
|
|
|
|
2009-07-13 11:50:42 -07:00
|
|
|
There are two categories:
|
2009-08-04 08:33:14 -07:00
|
|
|
|
2009-07-13 11:50:42 -07:00
|
|
|
"statement" nodes -- ones with side effects. Anything
|
|
|
|
that could change control flow or the state of memory.
|
|
|
|
These we must absolutely retain. That accounts for the
|
|
|
|
first part of the following disjunction for 'required'.
|
2009-08-04 08:33:14 -07:00
|
|
|
|
2009-07-13 11:50:42 -07:00
|
|
|
The rest are "value" nodes, which compute a value based
|
|
|
|
only on the operands to the node (and, in the case of
|
|
|
|
loads, the state of memory). It's safe to omit these
|
|
|
|
if the value(s) computed are not used later. Since
|
|
|
|
we're visiting nodes in reverse order, if some
|
|
|
|
previously visited (viz, later in the buffer ordering)
|
|
|
|
node uses the value computed by this node, then this
|
|
|
|
node will already have a register assigned to hold that
|
|
|
|
value. Hence we can consult the reservation to detect
|
|
|
|
whether the value is in fact used. That's the second
|
|
|
|
part of the disjunction.
|
|
|
|
*/
|
|
|
|
bool required = ins->isStmt() || ins->resv()->used;
|
|
|
|
if (!required)
|
|
|
|
continue;
|
2009-08-04 08:33:14 -07:00
|
|
|
|
2009-07-10 12:58:34 -07:00
|
|
|
LOpcode op = ins->opcode();
|
|
|
|
switch(op)
|
|
|
|
{
|
|
|
|
default:
|
2009-08-27 17:52:46 -07:00
|
|
|
NanoAssertMsgf(false, "unsupported LIR instruction: %d (~0x40: %d)\n", op, op&~LIR64);
|
2009-07-10 12:58:34 -07:00
|
|
|
break;
|
2009-01-23 13:21:55 -08:00
|
|
|
|
2009-09-03 14:43:30 -07:00
|
|
|
case LIR_regfence:
|
|
|
|
evictRegs(~_allocator.free);
|
|
|
|
break;
|
|
|
|
|
2009-09-01 16:30:51 -07:00
|
|
|
case LIR_flive:
|
2008-10-13 13:29:18 -07:00
|
|
|
case LIR_live: {
|
|
|
|
countlir_live();
|
2009-08-27 17:52:46 -07:00
|
|
|
LInsp op1 = ins->oprnd1();
|
|
|
|
// alloca's are meant to live until the point of the LIR_live instruction, marking
|
|
|
|
// other expressions as live ensures that they remain so at loop bottoms.
|
|
|
|
// alloca areas require special treatment because they are accessed indirectly and
|
|
|
|
// the indirect accesses are invisible to the assembler, other than via LIR_live.
|
|
|
|
// other expression results are only accessed directly in ways that are visible to
|
|
|
|
// the assembler, so extending those expression's lifetimes past the last loop edge
|
|
|
|
// isn't necessary.
|
|
|
|
if (op1->isop(LIR_alloc)) {
|
|
|
|
findMemFor(op1);
|
|
|
|
} else {
|
2009-09-01 16:30:51 -07:00
|
|
|
pending_lives.add(ins);
|
2009-08-27 17:52:46 -07:00
|
|
|
}
|
2008-10-13 13:29:18 -07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2009-08-31 15:01:41 -07:00
|
|
|
case LIR_ret:
|
|
|
|
case LIR_fret:
|
2008-10-13 13:29:18 -07:00
|
|
|
countlir_ret();
|
2009-08-31 15:01:41 -07:00
|
|
|
asm_ret(ins);
|
2008-12-11 13:50:55 -08:00
|
|
|
break;
|
2008-10-13 13:29:18 -07:00
|
|
|
|
|
|
|
// allocate some stack space. the value of this instruction
|
|
|
|
// is the address of the stack space.
|
2009-08-27 13:22:30 -07:00
|
|
|
case LIR_alloc: {
|
2008-10-13 13:29:18 -07:00
|
|
|
countlir_alloc();
|
2009-09-02 16:36:51 -07:00
|
|
|
Reservation *resv = ins->resvUsed();
|
2008-10-13 13:29:18 -07:00
|
|
|
NanoAssert(resv->arIndex != 0);
|
|
|
|
Register r = resv->reg;
|
|
|
|
if (r != UnknownReg) {
|
2009-07-10 12:58:34 -07:00
|
|
|
_allocator.retire(r);
|
2008-10-13 13:29:18 -07:00
|
|
|
resv->reg = UnknownReg;
|
|
|
|
asm_restore(ins, resv, r);
|
|
|
|
}
|
|
|
|
freeRsrcOf(ins, 0);
|
|
|
|
break;
|
|
|
|
}
|
2009-07-10 12:58:34 -07:00
|
|
|
case LIR_int:
|
|
|
|
{
|
2008-10-13 13:29:18 -07:00
|
|
|
countlir_imm();
|
2009-07-10 12:58:34 -07:00
|
|
|
asm_int(ins);
|
|
|
|
break;
|
|
|
|
}
|
2009-09-01 14:26:24 -07:00
|
|
|
case LIR_float:
|
2009-07-10 12:58:34 -07:00
|
|
|
case LIR_quad:
|
|
|
|
{
|
2008-10-13 13:29:18 -07:00
|
|
|
countlir_imm();
|
2009-07-10 12:58:34 -07:00
|
|
|
asm_quad(ins);
|
|
|
|
break;
|
|
|
|
}
|
2009-06-23 15:50:23 -07:00
|
|
|
#if !defined NANOJIT_64BIT
|
2009-07-10 12:58:34 -07:00
|
|
|
case LIR_callh:
|
|
|
|
{
|
|
|
|
// return result of quad-call in register
|
|
|
|
prepResultReg(ins, rmask(retRegs[1]));
|
2008-06-19 10:47:58 -07:00
|
|
|
// if hi half was used, we must use the call to ensure it happens
|
2008-11-13 09:52:26 -08:00
|
|
|
findSpecificRegFor(ins->oprnd1(), retRegs[0]);
|
2009-07-10 12:58:34 -07:00
|
|
|
break;
|
|
|
|
}
|
2009-06-23 15:50:23 -07:00
|
|
|
#endif
|
2009-08-27 17:52:46 -07:00
|
|
|
case LIR_param:
|
2009-07-10 12:58:34 -07:00
|
|
|
{
|
2008-10-13 13:29:18 -07:00
|
|
|
countlir_param();
|
2009-07-10 12:58:34 -07:00
|
|
|
asm_param(ins);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case LIR_qlo:
|
2008-06-19 10:47:58 -07:00
|
|
|
{
|
2008-10-13 13:29:18 -07:00
|
|
|
countlir_qlo();
|
2009-07-10 12:58:34 -07:00
|
|
|
asm_qlo(ins);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case LIR_qhi:
|
|
|
|
{
|
2008-10-13 13:29:18 -07:00
|
|
|
countlir_qhi();
|
2009-07-10 12:58:34 -07:00
|
|
|
asm_qhi(ins);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case LIR_qcmov:
|
|
|
|
case LIR_cmov:
|
|
|
|
{
|
2008-10-13 13:29:18 -07:00
|
|
|
countlir_cmov();
|
2009-07-10 12:58:34 -07:00
|
|
|
asm_cmov(ins);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case LIR_ld:
|
|
|
|
case LIR_ldc:
|
|
|
|
case LIR_ldcb:
|
|
|
|
case LIR_ldcs:
|
|
|
|
{
|
2008-10-13 13:29:18 -07:00
|
|
|
countlir_ld();
|
2009-07-10 12:58:34 -07:00
|
|
|
asm_ld(ins);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case LIR_ldq:
|
|
|
|
case LIR_ldqc:
|
|
|
|
{
|
2008-10-13 13:29:18 -07:00
|
|
|
countlir_ldq();
|
2009-07-10 12:58:34 -07:00
|
|
|
asm_load64(ins);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case LIR_neg:
|
|
|
|
case LIR_not:
|
|
|
|
{
|
2008-10-13 13:29:18 -07:00
|
|
|
countlir_alu();
|
2009-07-10 12:58:34 -07:00
|
|
|
asm_neg_not(ins);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case LIR_qjoin:
|
|
|
|
{
|
2008-10-13 13:29:18 -07:00
|
|
|
countlir_qjoin();
|
2008-06-19 10:47:58 -07:00
|
|
|
asm_qjoin(ins);
|
2009-07-10 12:58:34 -07:00
|
|
|
break;
|
|
|
|
}
|
2008-06-19 10:47:58 -07:00
|
|
|
|
2008-08-18 12:32:14 -07:00
|
|
|
#if defined NANOJIT_64BIT
|
|
|
|
case LIR_qiadd:
|
|
|
|
case LIR_qiand:
|
|
|
|
case LIR_qilsh:
|
2009-08-26 17:14:18 -07:00
|
|
|
case LIR_qursh:
|
|
|
|
case LIR_qirsh:
|
2008-08-27 16:08:59 -07:00
|
|
|
case LIR_qior:
|
2009-08-26 17:14:18 -07:00
|
|
|
case LIR_qaddp:
|
|
|
|
case LIR_qxor:
|
2008-08-18 12:32:14 -07:00
|
|
|
{
|
|
|
|
asm_qbinop(ins);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2009-07-10 12:58:34 -07:00
|
|
|
case LIR_add:
|
2009-07-14 17:00:43 -07:00
|
|
|
case LIR_iaddp:
|
2009-07-10 12:58:34 -07:00
|
|
|
case LIR_sub:
|
|
|
|
case LIR_mul:
|
|
|
|
case LIR_and:
|
|
|
|
case LIR_or:
|
|
|
|
case LIR_xor:
|
|
|
|
case LIR_lsh:
|
|
|
|
case LIR_rsh:
|
|
|
|
case LIR_ush:
|
|
|
|
case LIR_div:
|
|
|
|
case LIR_mod:
|
|
|
|
{
|
2008-10-13 13:29:18 -07:00
|
|
|
countlir_alu();
|
2009-07-10 12:58:34 -07:00
|
|
|
asm_arith(ins);
|
|
|
|
break;
|
|
|
|
}
|
2009-08-27 17:52:46 -07:00
|
|
|
#ifndef NJ_SOFTFLOAT
|
2009-07-10 12:58:34 -07:00
|
|
|
case LIR_fneg:
|
|
|
|
{
|
2008-10-13 13:29:18 -07:00
|
|
|
countlir_fpu();
|
2009-07-10 12:58:34 -07:00
|
|
|
asm_fneg(ins);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case LIR_fadd:
|
|
|
|
case LIR_fsub:
|
|
|
|
case LIR_fmul:
|
|
|
|
case LIR_fdiv:
|
|
|
|
{
|
2008-10-13 13:29:18 -07:00
|
|
|
countlir_fpu();
|
2009-07-10 12:58:34 -07:00
|
|
|
asm_fop(ins);
|
2008-06-19 10:47:58 -07:00
|
|
|
break;
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
|
|
|
case LIR_i2f:
|
|
|
|
{
|
2008-10-13 13:29:18 -07:00
|
|
|
countlir_fpu();
|
2009-07-10 12:58:34 -07:00
|
|
|
asm_i2f(ins);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case LIR_u2f:
|
|
|
|
{
|
2008-10-13 13:29:18 -07:00
|
|
|
countlir_fpu();
|
2009-07-10 12:58:34 -07:00
|
|
|
asm_u2f(ins);
|
|
|
|
break;
|
|
|
|
}
|
2009-08-26 17:14:18 -07:00
|
|
|
case LIR_i2q:
|
|
|
|
case LIR_u2q:
|
|
|
|
{
|
|
|
|
countlir_alu();
|
|
|
|
asm_promote(ins);
|
|
|
|
break;
|
|
|
|
}
|
2009-08-27 17:52:46 -07:00
|
|
|
#endif // NJ_SOFTFLOAT
|
2009-07-10 12:58:34 -07:00
|
|
|
case LIR_sti:
|
|
|
|
{
|
2008-10-13 13:29:18 -07:00
|
|
|
countlir_st();
|
2009-07-06 16:26:12 -07:00
|
|
|
asm_store32(ins->oprnd1(), ins->disp(), ins->oprnd2());
|
2008-06-24 15:57:33 -07:00
|
|
|
break;
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
|
|
|
case LIR_stqi:
|
|
|
|
{
|
2008-10-13 13:29:18 -07:00
|
|
|
countlir_stq();
|
2009-07-10 12:58:34 -07:00
|
|
|
LIns* value = ins->oprnd1();
|
|
|
|
LIns* base = ins->oprnd2();
|
|
|
|
int dr = ins->disp();
|
|
|
|
if (value->isop(LIR_qjoin))
|
|
|
|
{
|
|
|
|
// this is correct for little-endian only
|
|
|
|
asm_store32(value->oprnd1(), dr, base);
|
|
|
|
asm_store32(value->oprnd2(), dr+4, base);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
asm_store64(value, dr, base);
|
|
|
|
}
|
2008-06-19 10:47:58 -07:00
|
|
|
break;
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
2008-10-13 13:29:18 -07:00
|
|
|
|
2009-07-10 12:58:34 -07:00
|
|
|
case LIR_j:
|
|
|
|
{
|
2008-10-13 13:29:18 -07:00
|
|
|
countlir_jmp();
|
2009-07-10 12:58:34 -07:00
|
|
|
LInsp to = ins->getTarget();
|
2009-09-01 15:27:34 -07:00
|
|
|
LabelState *label = _labels.get(to);
|
2008-10-13 13:29:18 -07:00
|
|
|
// the jump is always taken so whatever register state we
|
|
|
|
// have from downstream code, is irrelevant to code before
|
|
|
|
// this jump. so clear it out. we will pick up register
|
|
|
|
// state from the jump target, if we have seen that label.
|
|
|
|
releaseRegisters();
|
|
|
|
if (label && label->addr) {
|
|
|
|
// forward jump - pick up register state from target.
|
|
|
|
unionRegisterState(label->regs);
|
|
|
|
JMP(label->addr);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
// backwards jump
|
2009-07-08 15:54:31 -07:00
|
|
|
handleLoopCarriedExprs(pending_lives);
|
2008-10-13 13:29:18 -07:00
|
|
|
if (!label) {
|
|
|
|
// save empty register state at loop header
|
2009-09-01 15:27:34 -07:00
|
|
|
_labels.add(to, 0, _allocator);
|
2008-10-13 13:29:18 -07:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
intersectRegisterState(label->regs);
|
|
|
|
}
|
|
|
|
JMP(0);
|
2009-09-01 15:27:34 -07:00
|
|
|
_patches.put(_nIns, to);
|
2008-10-13 13:29:18 -07:00
|
|
|
}
|
2009-07-10 12:58:34 -07:00
|
|
|
break;
|
|
|
|
}
|
2008-10-13 13:29:18 -07:00
|
|
|
|
2009-07-10 12:58:34 -07:00
|
|
|
case LIR_jt:
|
|
|
|
case LIR_jf:
|
|
|
|
{
|
2008-10-13 13:29:18 -07:00
|
|
|
countlir_jcc();
|
2009-07-10 12:58:34 -07:00
|
|
|
LInsp to = ins->getTarget();
|
|
|
|
LIns* cond = ins->oprnd1();
|
2009-09-01 15:27:34 -07:00
|
|
|
LabelState *label = _labels.get(to);
|
2008-10-13 13:29:18 -07:00
|
|
|
if (label && label->addr) {
|
|
|
|
// forward jump to known label. need to merge with label's register state.
|
|
|
|
unionRegisterState(label->regs);
|
2009-07-23 22:41:32 -07:00
|
|
|
asm_branch(op == LIR_jf, cond, label->addr);
|
2008-10-13 13:29:18 -07:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
// back edge.
|
2009-07-08 15:54:31 -07:00
|
|
|
handleLoopCarriedExprs(pending_lives);
|
2008-10-13 13:29:18 -07:00
|
|
|
if (!label) {
|
|
|
|
// evict all registers, most conservative approach.
|
|
|
|
evictRegs(~_allocator.free);
|
2009-09-01 15:27:34 -07:00
|
|
|
_labels.add(to, 0, _allocator);
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
2008-10-13 13:29:18 -07:00
|
|
|
else {
|
|
|
|
// evict all registers, most conservative approach.
|
|
|
|
intersectRegisterState(label->regs);
|
|
|
|
}
|
2009-07-23 22:41:32 -07:00
|
|
|
NIns *branch = asm_branch(op == LIR_jf, cond, 0);
|
2009-09-01 15:27:34 -07:00
|
|
|
_patches.put(branch,to);
|
2008-10-13 13:29:18 -07:00
|
|
|
}
|
2009-07-10 12:58:34 -07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case LIR_label:
|
|
|
|
{
|
2008-10-13 13:29:18 -07:00
|
|
|
countlir_label();
|
2009-09-01 15:27:34 -07:00
|
|
|
LabelState *label = _labels.get(ins);
|
2008-10-13 13:29:18 -07:00
|
|
|
if (!label) {
|
|
|
|
// label seen first, normal target of forward jump, save addr & allocator
|
2009-09-01 15:27:34 -07:00
|
|
|
_labels.add(ins, _nIns, _allocator);
|
2008-10-13 13:29:18 -07:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
// we're at the top of a loop
|
2009-08-30 22:02:08 -07:00
|
|
|
NanoAssert(label->addr == 0);
|
2008-10-13 13:29:18 -07:00
|
|
|
//evictRegs(~_allocator.free);
|
|
|
|
intersectRegisterState(label->regs);
|
|
|
|
label->addr = _nIns;
|
|
|
|
}
|
2009-07-10 12:58:34 -07:00
|
|
|
verbose_only( if (_logc->lcbits & LC_Assembly) { outputAddr=true; asm_output("[%s]", _thisfrag->lirbuf->names->formatRef(ins)); } )
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case LIR_xbarrier: {
|
|
|
|
break;
|
|
|
|
}
|
2009-02-11 17:40:27 -08:00
|
|
|
#ifdef NANOJIT_IA32
|
2009-07-10 12:58:34 -07:00
|
|
|
case LIR_xtbl: {
|
2009-02-11 17:40:27 -08:00
|
|
|
NIns* exit = asm_exit(ins); // does intersectRegisterState()
|
2009-07-10 12:58:34 -07:00
|
|
|
asm_switch(ins, exit);
|
|
|
|
break;
|
|
|
|
}
|
2009-02-11 17:40:27 -08:00
|
|
|
#else
|
2009-07-10 12:58:34 -07:00
|
|
|
case LIR_xtbl:
|
|
|
|
NanoAssertMsg(0, "Not supported for this architecture");
|
|
|
|
break;
|
2009-02-11 17:40:27 -08:00
|
|
|
#endif
|
2008-10-13 13:29:18 -07:00
|
|
|
case LIR_xt:
|
2009-07-10 12:58:34 -07:00
|
|
|
case LIR_xf:
|
|
|
|
{
|
2008-10-13 13:29:18 -07:00
|
|
|
countlir_xcc();
|
2009-07-10 12:58:34 -07:00
|
|
|
// we only support cmp with guard right now, also assume it is 'close' and only emit the branch
|
2008-10-13 13:29:18 -07:00
|
|
|
NIns* exit = asm_exit(ins); // does intersectRegisterState()
|
2009-07-10 12:58:34 -07:00
|
|
|
LIns* cond = ins->oprnd1();
|
2009-07-23 22:41:32 -07:00
|
|
|
asm_branch(op == LIR_xf, cond, exit);
|
2009-07-10 12:58:34 -07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case LIR_x:
|
|
|
|
{
|
2008-10-13 13:29:18 -07:00
|
|
|
countlir_x();
|
2009-07-10 12:58:34 -07:00
|
|
|
// generate the side exit branch on the main trace.
|
2008-06-24 15:57:33 -07:00
|
|
|
NIns *exit = asm_exit(ins);
|
2009-07-10 12:58:34 -07:00
|
|
|
JMP( exit );
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2009-08-27 17:52:46 -07:00
|
|
|
#ifndef NJ_SOFTFLOAT
|
2009-07-10 12:58:34 -07:00
|
|
|
case LIR_feq:
|
|
|
|
case LIR_fle:
|
|
|
|
case LIR_flt:
|
|
|
|
case LIR_fgt:
|
|
|
|
case LIR_fge:
|
|
|
|
{
|
2008-10-13 13:29:18 -07:00
|
|
|
countlir_fpu();
|
2009-07-10 12:58:34 -07:00
|
|
|
asm_fcond(ins);
|
|
|
|
break;
|
|
|
|
}
|
2009-08-27 17:52:46 -07:00
|
|
|
#endif
|
2009-07-10 12:58:34 -07:00
|
|
|
case LIR_eq:
|
2008-06-30 15:33:41 -07:00
|
|
|
case LIR_ov:
|
2009-07-10 12:58:34 -07:00
|
|
|
case LIR_le:
|
|
|
|
case LIR_lt:
|
|
|
|
case LIR_gt:
|
|
|
|
case LIR_ge:
|
|
|
|
case LIR_ult:
|
|
|
|
case LIR_ule:
|
|
|
|
case LIR_ugt:
|
|
|
|
case LIR_uge:
|
2009-08-26 16:30:23 -07:00
|
|
|
#ifdef NANOJIT_64BIT
|
|
|
|
case LIR_qeq:
|
|
|
|
case LIR_qle:
|
|
|
|
case LIR_qlt:
|
|
|
|
case LIR_qgt:
|
|
|
|
case LIR_qge:
|
|
|
|
case LIR_qult:
|
|
|
|
case LIR_qule:
|
|
|
|
case LIR_qugt:
|
|
|
|
case LIR_quge:
|
|
|
|
#endif
|
2009-07-10 12:58:34 -07:00
|
|
|
{
|
2008-10-13 13:29:18 -07:00
|
|
|
countlir_alu();
|
2009-07-10 12:58:34 -07:00
|
|
|
asm_cond(ins);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2009-08-27 17:52:46 -07:00
|
|
|
#ifndef NJ_SOFTFLOAT
|
2009-07-10 12:58:34 -07:00
|
|
|
case LIR_fcall:
|
2009-08-27 17:52:46 -07:00
|
|
|
#endif
|
|
|
|
#ifdef NANOJIT_64BIT
|
|
|
|
case LIR_qcall:
|
|
|
|
#endif
|
|
|
|
case LIR_icall:
|
2009-07-10 12:58:34 -07:00
|
|
|
{
|
2008-10-13 13:29:18 -07:00
|
|
|
countlir_call();
|
2008-06-19 10:47:58 -07:00
|
|
|
Register rr = UnknownReg;
|
2009-08-27 17:52:46 -07:00
|
|
|
#ifndef NJ_SOFTFLOAT
|
|
|
|
if (op == LIR_fcall)
|
2008-06-19 10:47:58 -07:00
|
|
|
{
|
2009-08-27 17:52:46 -07:00
|
|
|
// fcall
|
2009-07-10 12:58:34 -07:00
|
|
|
Reservation* rR = getresv(ins);
|
|
|
|
rr = asm_prep_fcall(rR, ins);
|
2008-06-19 10:47:58 -07:00
|
|
|
}
|
|
|
|
else
|
2009-08-27 17:52:46 -07:00
|
|
|
#endif
|
2008-06-19 10:47:58 -07:00
|
|
|
{
|
|
|
|
rr = retRegs[0];
|
2009-07-10 12:58:34 -07:00
|
|
|
prepResultReg(ins, rmask(rr));
|
2008-06-19 10:47:58 -07:00
|
|
|
}
|
|
|
|
|
2009-07-10 12:58:34 -07:00
|
|
|
// do this after we've handled the call result, so we dont
|
|
|
|
// force the call result to be spilled unnecessarily.
|
|
|
|
|
|
|
|
evictScratchRegs();
|
|
|
|
|
|
|
|
asm_call(ins);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-08-23 16:41:11 -07:00
|
|
|
#ifdef NJ_VERBOSE
|
2009-08-23 15:57:40 -07:00
|
|
|
// We have to do final LIR printing inside this loop. If we do it
|
|
|
|
// before this loop, we we end up printing a lot of dead LIR
|
|
|
|
// instructions.
|
|
|
|
//
|
|
|
|
// We print the LIns after generating the code. This ensures that
|
|
|
|
// the LIns will appear in debug output *before* the generated
|
|
|
|
// code, because Assembler::outputf() prints everything in reverse.
|
|
|
|
//
|
|
|
|
// Note that some live LIR instructions won't be printed. Eg. an
|
|
|
|
// immediate won't be printed unless it is explicitly loaded into
|
|
|
|
// a register (as opposed to being incorporated into an immediate
|
|
|
|
// field in another machine instruction).
|
|
|
|
//
|
|
|
|
if (_logc->lcbits & LC_Assembly) {
|
|
|
|
outputf(" %s", _thisfrag->lirbuf->names->formatIns(ins));
|
|
|
|
// Special case: a guard condition won't get printed next time
|
|
|
|
// around the loop, so do it now.
|
|
|
|
if (ins->isGuard() && ins->oprnd1()) {
|
|
|
|
outputf(" %s # handled by the guard",
|
|
|
|
_thisfrag->lirbuf->names->formatIns(ins->oprnd1()));
|
|
|
|
}
|
|
|
|
}
|
2009-08-23 16:41:11 -07:00
|
|
|
#endif
|
2009-08-23 15:57:40 -07:00
|
|
|
|
2009-07-10 12:58:34 -07:00
|
|
|
if (error())
|
|
|
|
return;
|
|
|
|
|
|
|
|
// check that all is well (don't check in exit paths since its more complicated)
|
|
|
|
debug_only( pageValidate(); )
|
|
|
|
debug_only( resourceConsistencyCheck(); )
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Write a jump table for the given SwitchInfo and store the table
|
|
|
|
* address in the SwitchInfo. Every entry will initially point to
|
|
|
|
* target.
|
|
|
|
*/
|
|
|
|
void Assembler::emitJumpTable(SwitchInfo* si, NIns* target)
|
|
|
|
{
|
|
|
|
underrunProtect(si->count * sizeof(NIns*) + 20);
|
|
|
|
_nIns = reinterpret_cast<NIns*>(uintptr_t(_nIns) & ~(sizeof(NIns*) - 1));
|
|
|
|
for (uint32_t i = 0; i < si->count; ++i) {
|
2009-07-20 03:34:34 -07:00
|
|
|
_nIns = (NIns*) (((uint8*) _nIns) - sizeof(NIns*));
|
|
|
|
*(NIns**) _nIns = target;
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
|
|
|
si->table = (NIns**) _nIns;
|
|
|
|
}
|
2009-02-11 17:40:27 -08:00
|
|
|
|
2008-10-31 16:48:14 -07:00
|
|
|
void Assembler::assignSavedRegs()
|
2008-10-13 13:29:18 -07:00
|
|
|
{
|
|
|
|
// restore saved regs
|
2009-07-10 12:58:34 -07:00
|
|
|
releaseRegisters();
|
2008-10-13 13:29:18 -07:00
|
|
|
LirBuffer *b = _thisfrag->lirbuf;
|
|
|
|
for (int i=0, n = NumSavedRegs; i < n; i++) {
|
2008-10-31 16:48:14 -07:00
|
|
|
LIns *p = b->savedRegs[i];
|
2008-10-13 13:29:18 -07:00
|
|
|
if (p)
|
2009-06-30 21:18:55 -07:00
|
|
|
findSpecificRegFor(p, savedRegs[p->paramArg()]);
|
2008-10-13 13:29:18 -07:00
|
|
|
}
|
2008-07-16 14:21:31 -07:00
|
|
|
}
|
|
|
|
|
2008-10-31 16:48:14 -07:00
|
|
|
void Assembler::reserveSavedRegs()
|
2008-10-13 13:29:18 -07:00
|
|
|
{
|
|
|
|
LirBuffer *b = _thisfrag->lirbuf;
|
|
|
|
for (int i=0, n = NumSavedRegs; i < n; i++) {
|
2008-10-31 16:48:14 -07:00
|
|
|
LIns *p = b->savedRegs[i];
|
2008-10-13 13:29:18 -07:00
|
|
|
if (p)
|
|
|
|
findMemFor(p);
|
|
|
|
}
|
|
|
|
}
|
2008-09-02 22:29:23 -07:00
|
|
|
|
2008-10-31 16:48:14 -07:00
|
|
|
// restore parameter registers
|
|
|
|
void Assembler::assignParamRegs()
|
|
|
|
{
|
|
|
|
LInsp state = _thisfrag->lirbuf->state;
|
|
|
|
if (state)
|
2009-07-10 12:58:34 -07:00
|
|
|
findSpecificRegFor(state, argRegs[state->paramArg()]);
|
2008-10-31 16:48:14 -07:00
|
|
|
LInsp param1 = _thisfrag->lirbuf->param1;
|
|
|
|
if (param1)
|
2009-06-30 21:18:55 -07:00
|
|
|
findSpecificRegFor(param1, argRegs[param1->paramArg()]);
|
2008-10-31 16:48:14 -07:00
|
|
|
}
|
2009-07-10 12:58:34 -07:00
|
|
|
|
2009-07-08 15:54:31 -07:00
|
|
|
void Assembler::handleLoopCarriedExprs(InsList& pending_lives)
|
2008-10-13 13:29:18 -07:00
|
|
|
{
|
|
|
|
// ensure that exprs spanning the loop are marked live at the end of the loop
|
2008-10-31 16:48:14 -07:00
|
|
|
reserveSavedRegs();
|
2009-09-01 16:30:51 -07:00
|
|
|
for (Seq<LIns*> *p = pending_lives.get(); p != NULL; p = p->tail) {
|
|
|
|
LIns *i = p->head;
|
|
|
|
NanoAssert(i->isop(LIR_live) || i->isop(LIR_flive));
|
|
|
|
LIns *op1 = i->oprnd1();
|
|
|
|
if (op1->isconst() || op1->isconstf() || op1->isconstq())
|
|
|
|
findMemFor(op1);
|
|
|
|
else
|
|
|
|
findRegFor(op1, i->isop(LIR_flive) ? FpRegs : GpRegs);
|
|
|
|
}
|
2009-07-16 17:17:35 -07:00
|
|
|
pending_lives.clear();
|
2008-10-13 13:29:18 -07:00
|
|
|
}
|
2008-09-02 22:29:23 -07:00
|
|
|
|
2009-07-10 12:58:34 -07:00
|
|
|
void Assembler::arFree(uint32_t idx)
|
|
|
|
{
|
2008-10-13 13:29:18 -07:00
|
|
|
AR &ar = _activation;
|
|
|
|
LIns *i = ar.entry[idx];
|
|
|
|
do {
|
|
|
|
ar.entry[idx] = 0;
|
|
|
|
idx--;
|
|
|
|
} while (ar.entry[idx] == i);
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
2008-06-19 10:47:58 -07:00
|
|
|
|
|
|
|
#ifdef NJ_VERBOSE
|
2009-07-10 12:58:34 -07:00
|
|
|
void Assembler::printActivationState()
|
|
|
|
{
|
|
|
|
bool verbose_activation = false;
|
|
|
|
if (!verbose_activation)
|
|
|
|
return;
|
|
|
|
|
2008-06-19 10:47:58 -07:00
|
|
|
#ifdef NANOJIT_ARM
|
2009-07-10 12:58:34 -07:00
|
|
|
// @todo Why is there here?!? This routine should be indep. of platform
|
|
|
|
verbose_only(
|
|
|
|
if (_logc->lcbits & LC_Assembly) {
|
|
|
|
char* s = &outline[0];
|
|
|
|
memset(s, ' ', 51); s[51] = '\0';
|
|
|
|
s += strlen(s);
|
|
|
|
sprintf(s, " SP ");
|
|
|
|
s += strlen(s);
|
|
|
|
for(uint32_t i=_activation.lowwatermark; i<_activation.tos;i++) {
|
|
|
|
LInsp ins = _activation.entry[i];
|
|
|
|
if (ins && ins !=_activation.entry[i+1]) {
|
|
|
|
sprintf(s, "%d(%s) ", 4*i, _thisfrag->lirbuf->names->formatRef(ins));
|
|
|
|
s += strlen(s);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
output(&outline[0]);
|
|
|
|
}
|
|
|
|
)
|
2008-06-19 10:47:58 -07:00
|
|
|
#else
|
2009-07-10 12:58:34 -07:00
|
|
|
verbose_only(
|
|
|
|
char* s = &outline[0];
|
|
|
|
if (_logc->lcbits & LC_Assembly) {
|
|
|
|
memset(s, ' ', 51); s[51] = '\0';
|
|
|
|
s += strlen(s);
|
|
|
|
sprintf(s, " ebp ");
|
|
|
|
s += strlen(s);
|
|
|
|
|
|
|
|
for(uint32_t i=_activation.lowwatermark; i<_activation.tos;i++) {
|
|
|
|
LInsp ins = _activation.entry[i];
|
|
|
|
if (ins) {
|
|
|
|
sprintf(s, "%d(%s) ", -4*i,_thisfrag->lirbuf->names->formatRef(ins));
|
|
|
|
s += strlen(s);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
output(&outline[0]);
|
|
|
|
}
|
|
|
|
)
|
2008-06-19 10:47:58 -07:00
|
|
|
#endif
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
2008-06-19 10:47:58 -07:00
|
|
|
#endif
|
2008-10-13 13:29:18 -07:00
|
|
|
|
|
|
|
bool canfit(int32_t size, int32_t loc, AR &ar) {
|
|
|
|
for (int i=0; i < size; i++) {
|
|
|
|
if (ar.entry[loc+stack_direction(i)])
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2009-07-10 12:58:34 -07:00
|
|
|
|
|
|
|
uint32_t Assembler::arReserve(LIns* l)
|
|
|
|
{
|
2009-08-27 17:52:46 -07:00
|
|
|
int32_t size = l->isop(LIR_alloc) ? (l->size()>>2) : l->isQuad() ? 2 : 1;
|
2008-10-13 13:29:18 -07:00
|
|
|
AR &ar = _activation;
|
2009-07-10 12:58:34 -07:00
|
|
|
const int32_t tos = ar.tos;
|
|
|
|
int32_t start = ar.lowwatermark;
|
|
|
|
int32_t i = 0;
|
|
|
|
NanoAssert(start>0);
|
2008-10-13 13:29:18 -07:00
|
|
|
|
|
|
|
if (size == 1) {
|
|
|
|
// easy most common case -- find a hole, or make the frame bigger
|
|
|
|
for (i=start; i < NJ_MAX_STACK_ENTRY; i++) {
|
|
|
|
if (ar.entry[i] == 0) {
|
|
|
|
// found a hole
|
|
|
|
ar.entry[i] = l;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (size == 2) {
|
2009-07-10 12:58:34 -07:00
|
|
|
if ( (start&1)==1 ) start++; // even 8 boundary
|
|
|
|
for (i=start; i < NJ_MAX_STACK_ENTRY; i+=2) {
|
2008-10-13 13:29:18 -07:00
|
|
|
if ( (ar.entry[i+stack_direction(1)] == 0) && (i==tos || (ar.entry[i] == 0)) ) {
|
|
|
|
// found 2 adjacent aligned slots
|
2009-06-23 01:36:53 -07:00
|
|
|
NanoAssert(ar.entry[i] == 0);
|
|
|
|
NanoAssert(ar.entry[i+stack_direction(1)] == 0);
|
2008-10-13 13:29:18 -07:00
|
|
|
ar.entry[i] = l;
|
|
|
|
ar.entry[i+stack_direction(1)] = l;
|
2009-07-10 12:58:34 -07:00
|
|
|
break;
|
2008-10-13 13:29:18 -07:00
|
|
|
}
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
|
|
|
}
|
2008-10-13 13:29:18 -07:00
|
|
|
else {
|
|
|
|
// alloc larger block on 8byte boundary.
|
|
|
|
if (start < size) start = size;
|
|
|
|
if ((start&1)==1) start++;
|
|
|
|
for (i=start; i < NJ_MAX_STACK_ENTRY; i+=2) {
|
|
|
|
if (canfit(size, i, ar)) {
|
2009-07-10 12:58:34 -07:00
|
|
|
// place the entry in the table and mark the instruction with it
|
2008-10-13 13:29:18 -07:00
|
|
|
for (int32_t j=0; j < size; j++) {
|
2009-06-23 01:36:53 -07:00
|
|
|
NanoAssert(ar.entry[i+stack_direction(j)] == 0);
|
|
|
|
ar.entry[i+stack_direction(j)] = l;
|
2008-10-13 13:29:18 -07:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
2008-10-13 13:29:18 -07:00
|
|
|
if (i >= (int32_t)ar.tos) {
|
|
|
|
ar.tos = ar.highwatermark = i+1;
|
|
|
|
}
|
2009-07-10 12:58:34 -07:00
|
|
|
if (tos+size >= NJ_MAX_STACK_ENTRY) {
|
|
|
|
setError(StackFull);
|
|
|
|
}
|
2008-10-13 13:29:18 -07:00
|
|
|
return i;
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
2008-06-19 10:47:58 -07:00
|
|
|
|
2008-10-13 13:29:18 -07:00
|
|
|
/**
|
|
|
|
* move regs around so the SavedRegs contains the highest priority regs.
|
|
|
|
*/
|
|
|
|
void Assembler::evictScratchRegs()
|
|
|
|
{
|
2009-07-10 12:58:34 -07:00
|
|
|
// find the top GpRegs that are candidates to put in SavedRegs
|
2008-10-13 13:29:18 -07:00
|
|
|
|
|
|
|
// tosave is a binary heap stored in an array. the root is tosave[0],
|
2009-07-10 12:58:34 -07:00
|
|
|
// left child is at i+1, right child is at i+2.
|
2008-10-13 13:29:18 -07:00
|
|
|
|
|
|
|
Register tosave[LastReg-FirstReg+1];
|
|
|
|
int len=0;
|
|
|
|
RegAlloc *regs = &_allocator;
|
|
|
|
for (Register r = FirstReg; r <= LastReg; r = nextreg(r)) {
|
2009-07-10 12:58:34 -07:00
|
|
|
if (rmask(r) & GpRegs) {
|
|
|
|
LIns *i = regs->getActive(r);
|
|
|
|
if (i) {
|
|
|
|
if (canRemat(i)) {
|
2009-08-30 18:48:21 -07:00
|
|
|
evict(r, i);
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
int32_t pri = regs->getPriority(r);
|
2008-10-13 13:29:18 -07:00
|
|
|
// add to heap by adding to end and bubbling up
|
|
|
|
int j = len++;
|
|
|
|
while (j > 0 && pri > regs->getPriority(tosave[j/2])) {
|
|
|
|
tosave[j] = tosave[j/2];
|
|
|
|
j /= 2;
|
|
|
|
}
|
|
|
|
NanoAssert(size_t(j) < sizeof(tosave)/sizeof(tosave[0]));
|
|
|
|
tosave[j] = r;
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
|
|
|
}
|
2008-10-13 13:29:18 -07:00
|
|
|
}
|
|
|
|
}
|
2008-06-19 10:47:58 -07:00
|
|
|
|
2009-07-10 12:58:34 -07:00
|
|
|
// now primap has the live exprs in priority order.
|
|
|
|
// allocate each of the top priority exprs to a SavedReg
|
2008-10-13 13:29:18 -07:00
|
|
|
|
|
|
|
RegisterMask allow = SavedRegs;
|
|
|
|
while (allow && len > 0) {
|
|
|
|
// get the highest priority var
|
|
|
|
Register hi = tosave[0];
|
2008-10-31 12:56:02 -07:00
|
|
|
if (!(rmask(hi) & SavedRegs)) {
|
|
|
|
LIns *i = regs->getActive(hi);
|
|
|
|
Register r = findRegFor(i, allow);
|
2009-07-10 12:58:34 -07:00
|
|
|
allow &= ~rmask(r);
|
2008-10-31 12:56:02 -07:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
// hi is already in a saved reg, leave it alone.
|
|
|
|
allow &= ~rmask(hi);
|
|
|
|
}
|
2008-10-13 13:29:18 -07:00
|
|
|
|
|
|
|
// remove from heap by replacing root with end element and bubbling down.
|
|
|
|
if (allow && --len > 0) {
|
|
|
|
Register last = tosave[len];
|
|
|
|
int j = 0;
|
|
|
|
while (j+1 < len) {
|
|
|
|
int child = j+1;
|
|
|
|
if (j+2 < len && regs->getPriority(tosave[j+2]) > regs->getPriority(tosave[j+1]))
|
|
|
|
child++;
|
|
|
|
if (regs->getPriority(last) > regs->getPriority(tosave[child]))
|
|
|
|
break;
|
|
|
|
tosave[j] = tosave[child];
|
|
|
|
j = child;
|
|
|
|
}
|
|
|
|
tosave[j] = last;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-07-10 12:58:34 -07:00
|
|
|
// now evict everything else.
|
|
|
|
evictRegs(~SavedRegs);
|
2008-10-13 13:29:18 -07:00
|
|
|
}
|
2008-06-19 10:47:58 -07:00
|
|
|
|
2009-07-10 12:58:34 -07:00
|
|
|
void Assembler::evictRegs(RegisterMask regs)
|
|
|
|
{
|
|
|
|
// generate code to restore callee saved registers
|
|
|
|
// @todo speed this up
|
2009-08-30 18:48:21 -07:00
|
|
|
LIns* i;
|
2008-10-13 13:29:18 -07:00
|
|
|
for (Register r = FirstReg; r <= LastReg; r = nextreg(r)) {
|
2009-08-30 18:48:21 -07:00
|
|
|
if ((rmask(r) & regs) && (i = _allocator.getActive(r))) {
|
|
|
|
evict(r, i);
|
2008-06-19 10:47:58 -07:00
|
|
|
}
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Merge the current state of the registers with a previously stored version
|
2008-10-13 13:29:18 -07:00
|
|
|
* current == saved skip
|
|
|
|
* current & saved evict current, keep saved
|
|
|
|
* current & !saved evict current (unionRegisterState would keep)
|
|
|
|
* !current & saved keep saved
|
2009-07-10 12:58:34 -07:00
|
|
|
*/
|
|
|
|
void Assembler::intersectRegisterState(RegAlloc& saved)
|
|
|
|
{
|
|
|
|
// evictions and pops first
|
|
|
|
RegisterMask skip = 0;
|
|
|
|
verbose_only(bool shouldMention=false; )
|
|
|
|
for (Register r=FirstReg; r <= LastReg; r = nextreg(r))
|
|
|
|
{
|
|
|
|
LIns * curins = _allocator.getActive(r);
|
|
|
|
LIns * savedins = saved.getActive(r);
|
|
|
|
if (curins == savedins)
|
|
|
|
{
|
|
|
|
//verbose_only( if (curins) verbose_outputf(" skip %s", regNames[r]); )
|
|
|
|
skip |= rmask(r);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2008-10-13 13:29:18 -07:00
|
|
|
if (curins) {
|
|
|
|
//_nvprof("intersect-evict",1);
|
2009-07-10 12:58:34 -07:00
|
|
|
verbose_only( shouldMention=true; )
|
2009-08-30 18:48:21 -07:00
|
|
|
evict(r, curins);
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef NANOJIT_IA32
|
|
|
|
if (savedins && (rmask(r) & x87Regs)) {
|
|
|
|
verbose_only( shouldMention=true; )
|
|
|
|
FSTP(r);
|
2008-10-13 13:29:18 -07:00
|
|
|
}
|
2009-07-10 12:58:34 -07:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
}
|
2008-10-13 13:29:18 -07:00
|
|
|
assignSaved(saved, skip);
|
2009-07-10 12:58:34 -07:00
|
|
|
verbose_only(
|
|
|
|
if (shouldMention)
|
2009-06-24 20:32:00 -07:00
|
|
|
verbose_outputf("## merging registers (intersect) "
|
|
|
|
"with existing edge");
|
|
|
|
)
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
2008-06-19 10:47:58 -07:00
|
|
|
|
2009-07-10 12:58:34 -07:00
|
|
|
/**
|
|
|
|
* Merge the current state of the registers with a previously stored version.
|
|
|
|
*
|
2008-10-13 13:29:18 -07:00
|
|
|
* current == saved skip
|
|
|
|
* current & saved evict current, keep saved
|
|
|
|
* current & !saved keep current (intersectRegisterState would evict)
|
|
|
|
* !current & saved keep saved
|
2009-07-10 12:58:34 -07:00
|
|
|
*/
|
|
|
|
void Assembler::unionRegisterState(RegAlloc& saved)
|
|
|
|
{
|
|
|
|
// evictions and pops first
|
|
|
|
verbose_only(bool shouldMention=false; )
|
|
|
|
RegisterMask skip = 0;
|
|
|
|
for (Register r=FirstReg; r <= LastReg; r = nextreg(r))
|
|
|
|
{
|
|
|
|
LIns * curins = _allocator.getActive(r);
|
|
|
|
LIns * savedins = saved.getActive(r);
|
|
|
|
if (curins == savedins)
|
|
|
|
{
|
|
|
|
//verbose_only( if (curins) verbose_outputf(" skip %s", regNames[r]); )
|
|
|
|
skip |= rmask(r);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2008-10-13 13:29:18 -07:00
|
|
|
if (curins && savedins) {
|
|
|
|
//_nvprof("union-evict",1);
|
2009-07-10 12:58:34 -07:00
|
|
|
verbose_only( shouldMention=true; )
|
2009-08-30 18:48:21 -07:00
|
|
|
evict(r, curins);
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef NANOJIT_IA32
|
|
|
|
if (rmask(r) & x87Regs) {
|
|
|
|
if (savedins) {
|
|
|
|
FSTP(r);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
// saved state did not have fpu reg allocated,
|
|
|
|
// so we must evict here to keep x87 stack balanced.
|
2009-08-30 18:48:21 -07:00
|
|
|
evictIfActive(r);
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
|
|
|
verbose_only( shouldMention=true; )
|
2008-10-13 13:29:18 -07:00
|
|
|
}
|
2009-07-10 12:58:34 -07:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
}
|
2008-10-13 13:29:18 -07:00
|
|
|
assignSaved(saved, skip);
|
2009-07-10 12:58:34 -07:00
|
|
|
verbose_only( if (shouldMention) verbose_outputf(" merging registers (union) with existing edge"); )
|
2008-10-13 13:29:18 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
void Assembler::assignSaved(RegAlloc &saved, RegisterMask skip)
|
|
|
|
{
|
2009-07-10 12:58:34 -07:00
|
|
|
// now reassign mainline registers
|
|
|
|
for (Register r=FirstReg; r <= LastReg; r = nextreg(r))
|
|
|
|
{
|
|
|
|
LIns *i = saved.getActive(r);
|
|
|
|
if (i && !(skip&rmask(r)))
|
|
|
|
findSpecificRegFor(i, r);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-05-17 23:20:59 -07:00
|
|
|
// scan table for instruction with the lowest priority, meaning it is used
|
|
|
|
// furthest in the future.
|
|
|
|
LIns* Assembler::findVictim(RegAlloc ®s, RegisterMask allow)
|
|
|
|
{
|
|
|
|
NanoAssert(allow != 0);
|
|
|
|
LIns *i, *a=0;
|
|
|
|
int allow_pri = 0x7fffffff;
|
|
|
|
for (Register r=FirstReg; r <= LastReg; r = nextreg(r))
|
|
|
|
{
|
|
|
|
if ((allow & rmask(r)) && (i = regs.getActive(r)) != 0)
|
|
|
|
{
|
|
|
|
int pri = canRemat(i) ? 0 : regs.getPriority(r);
|
|
|
|
if (!a || pri < allow_pri) {
|
|
|
|
a = i;
|
|
|
|
allow_pri = pri;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
NanoAssert(a != 0);
|
|
|
|
return a;
|
|
|
|
}
|
|
|
|
|
2009-07-10 12:58:34 -07:00
|
|
|
#ifdef NJ_VERBOSE
|
2009-06-23 01:36:53 -07:00
|
|
|
// "outline" must be able to hold the output line in addition to the
|
|
|
|
// outlineEOL buffer, which is concatenated onto outline just before it
|
|
|
|
// is printed.
|
2009-07-10 12:58:34 -07:00
|
|
|
char Assembler::outline[8192];
|
|
|
|
char Assembler::outlineEOL[512];
|
2008-10-20 15:52:11 -07:00
|
|
|
|
2009-07-10 12:58:34 -07:00
|
|
|
void Assembler::outputForEOL(const char* format, ...)
|
|
|
|
{
|
|
|
|
va_list args;
|
|
|
|
va_start(args, format);
|
|
|
|
outlineEOL[0] = '\0';
|
|
|
|
vsprintf(outlineEOL, format, args);
|
|
|
|
}
|
2008-06-19 10:47:58 -07:00
|
|
|
|
2009-06-23 01:36:53 -07:00
|
|
|
void Assembler::outputf(const char* format, ...)
|
|
|
|
{
|
|
|
|
va_list args;
|
|
|
|
va_start(args, format);
|
|
|
|
outline[0] = '\0';
|
2008-06-19 10:47:58 -07:00
|
|
|
|
2009-06-23 01:36:53 -07:00
|
|
|
// Format the output string and remember the number of characters
|
|
|
|
// that were written.
|
|
|
|
uint32_t outline_len = vsprintf(outline, format, args);
|
|
|
|
|
|
|
|
// Add the EOL string to the output, ensuring that we leave enough
|
|
|
|
// space for the terminating NULL character, then reset it so it
|
|
|
|
// doesn't repeat on the next outputf.
|
|
|
|
strncat(outline, outlineEOL, sizeof(outline)-(outline_len+1));
|
|
|
|
outlineEOL[0] = '\0';
|
|
|
|
|
|
|
|
output(outline);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Assembler::output(const char* s)
|
|
|
|
{
|
|
|
|
if (_outputCache)
|
|
|
|
{
|
2009-08-04 06:54:47 -07:00
|
|
|
char* str = new (alloc) char[VMPI_strlen(s)+1];
|
2009-06-23 01:36:53 -07:00
|
|
|
strcpy(str, s);
|
2009-08-04 06:54:47 -07:00
|
|
|
_outputCache->insert(str);
|
2009-06-23 01:36:53 -07:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2009-06-24 20:32:00 -07:00
|
|
|
_logc->printf("%s\n", s);
|
2009-06-23 01:36:53 -07:00
|
|
|
}
|
|
|
|
}
|
2008-06-19 10:47:58 -07:00
|
|
|
|
2009-06-23 01:36:53 -07:00
|
|
|
void Assembler::output_asm(const char* s)
|
|
|
|
{
|
2009-06-24 20:32:00 -07:00
|
|
|
if (!(_logc->lcbits & LC_Assembly))
|
2009-06-23 01:36:53 -07:00
|
|
|
return;
|
|
|
|
|
|
|
|
// Add the EOL string to the output, ensuring that we leave enough
|
|
|
|
// space for the terminating NULL character, then reset it so it
|
|
|
|
// doesn't repeat on the next outputf.
|
|
|
|
strncat(outline, outlineEOL, sizeof(outline)-(strlen(outline)+1));
|
|
|
|
outlineEOL[0] = '\0';
|
|
|
|
|
|
|
|
output(s);
|
|
|
|
}
|
2008-06-19 10:47:58 -07:00
|
|
|
|
2009-07-10 12:58:34 -07:00
|
|
|
char* Assembler::outputAlign(char *s, int col)
|
|
|
|
{
|
|
|
|
int len = strlen(s);
|
|
|
|
int add = ((col-len)>0) ? col-len : 1;
|
|
|
|
memset(&s[len], ' ', add);
|
|
|
|
s[col] = '\0';
|
|
|
|
return &s[col];
|
|
|
|
}
|
|
|
|
#endif // verbose
|
2008-06-19 10:47:58 -07:00
|
|
|
|
2009-07-10 12:58:34 -07:00
|
|
|
uint32_t CallInfo::_count_args(uint32_t mask) const
|
|
|
|
{
|
|
|
|
uint32_t argc = 0;
|
|
|
|
uint32_t argt = _argtypes;
|
|
|
|
for (uint32_t i = 0; i < MAXARGS; ++i) {
|
2009-07-29 17:44:34 -07:00
|
|
|
argt >>= ARGSIZE_SHIFT;
|
2008-11-13 09:52:26 -08:00
|
|
|
if (!argt)
|
|
|
|
break;
|
2009-07-10 12:58:34 -07:00
|
|
|
argc += (argt & mask) != 0;
|
|
|
|
}
|
|
|
|
return argc;
|
|
|
|
}
|
2008-06-19 10:47:58 -07:00
|
|
|
|
2008-07-16 14:21:31 -07:00
|
|
|
uint32_t CallInfo::get_sizes(ArgSize* sizes) const
|
|
|
|
{
|
2009-07-10 12:58:34 -07:00
|
|
|
uint32_t argt = _argtypes;
|
|
|
|
uint32_t argc = 0;
|
|
|
|
for (uint32_t i = 0; i < MAXARGS; i++) {
|
2009-07-29 17:44:34 -07:00
|
|
|
argt >>= ARGSIZE_SHIFT;
|
|
|
|
ArgSize a = ArgSize(argt & ARGSIZE_MASK_ANY);
|
2008-07-16 14:21:31 -07:00
|
|
|
if (a != ARGSIZE_NONE) {
|
|
|
|
sizes[argc++] = a;
|
2008-11-13 09:52:26 -08:00
|
|
|
} else {
|
|
|
|
break;
|
2008-07-16 14:21:31 -07:00
|
|
|
}
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
2008-07-16 14:21:31 -07:00
|
|
|
return argc;
|
|
|
|
}
|
2008-10-13 13:29:18 -07:00
|
|
|
|
|
|
|
void LabelStateMap::add(LIns *label, NIns *addr, RegAlloc ®s) {
|
2009-08-04 07:48:48 -07:00
|
|
|
LabelState *st = new (alloc) LabelState(addr, regs);
|
2008-10-13 13:29:18 -07:00
|
|
|
labels.put(label, st);
|
|
|
|
}
|
|
|
|
|
|
|
|
LabelState* LabelStateMap::get(LIns *label) {
|
|
|
|
return labels.get(label);
|
|
|
|
}
|
2008-06-19 10:47:58 -07:00
|
|
|
}
|