Fix Greedy register allocation bug in backing stack computation (bug 680432, r=sstangl).

This commit is contained in:
David Anderson 2011-11-04 13:50:56 -07:00
parent f7cad0c5d8
commit a82b0818f2
8 changed files with 182 additions and 105 deletions

View File

@ -41,6 +41,7 @@
#include "GreedyAllocator.h"
#include "IonAnalysis.h"
#include "IonSpewer.h"
using namespace js;
using namespace js::ion;
@ -61,10 +62,8 @@ GreedyAllocator::findDefinitionsInLIR(LInstruction *ins)
if (def->policy() == LDefinition::REDEFINED)
continue;
vars[def->virtualRegister()].def = def;
#ifdef DEBUG
vars[def->virtualRegister()].ins = ins;
#endif
VirtualRegister *vr = &vars[def->virtualRegister()];
vr->init(def, ins);
}
}
@ -203,6 +202,8 @@ GreedyAllocator::allocateStack(VirtualRegister *vr)
return false;
}
IonSpew(IonSpew_RegAlloc, " assign vr%d := stack%d", vr->def->virtualRegister(), index);
vr->setStackSlot(index);
return true;
}
@ -255,10 +256,15 @@ GreedyAllocator::kill(VirtualRegister *vr)
AnyRegister reg = vr->reg();
JS_ASSERT(state[reg] == vr);
IonSpew(IonSpew_RegAlloc, " kill vr%d (stack:%s)",
vr->def->virtualRegister(), reg.name());
freeReg(reg);
}
if (vr->hasStackSlot())
if (vr->hasStackSlot()) {
IonSpew(IonSpew_RegAlloc, " kill vr%d (stack:%d)",
vr->def->virtualRegister(), vr->stackSlot_);
freeStack(vr);
}
return true;
}
@ -286,6 +292,7 @@ void
GreedyAllocator::assign(VirtualRegister *vr, AnyRegister reg)
{
JS_ASSERT(!state[reg]);
IonSpew(IonSpew_RegAlloc, " assign vr%d := %s", vr->def->virtualRegister(), reg.name());
state[reg] = vr;
vr->setRegister(reg);
state.free.take(reg);
@ -672,6 +679,8 @@ GreedyAllocator::allocateInstruction(LBlock *block, LInstruction *ins)
bool
GreedyAllocator::allocateRegistersInBlock(LBlock *block)
{
IonSpew(IonSpew_RegAlloc, " Allocating instructions.");
LInstructionReverseIterator ri = block->instructions().rbegin();
// Control instructions need to be handled specially. Since they have no
@ -730,6 +739,8 @@ GreedyAllocator::allocateRegistersInBlock(LBlock *block)
assertValidRegisterState();
}
IonSpew(IonSpew_RegAlloc, " Done allocating instructions.");
return true;
}
@ -775,11 +786,17 @@ GreedyAllocator::mergeRegisterState(const AnyRegister &reg, LBlock *left, LBlock
if (vright->hasRegister()) {
JS_ASSERT(vright->reg() != reg);
IonSpew(IonSpew_RegAlloc, " merging vr%d %s to %s",
vright->def->virtualRegister(), vright->reg().name(), reg.name());
if (!rinfo->restores.move(vright->reg(), reg))
return false;
} else {
if (!allocateStack(vright))
return false;
IonSpew(IonSpew_RegAlloc, " merging vr%d stack to %s",
vright->def->virtualRegister(), reg.name());
if (!rinfo->restores.move(vright->backingStack(), reg))
return false;
}
@ -799,6 +816,8 @@ GreedyAllocator::findLoopCarriedUses(LBlock *backedge)
uint32 upperBound = backedge->lastId();
uint32 lowerBound = mheader->lir()->firstId();
IonSpew(IonSpew_RegAlloc, " Finding loop-carried uses.");
for (size_t i = 0; i < mheader->numContainedInLoop(); i++) {
LBlock *block = mheader->getContainedInLoop(i)->lir();
@ -819,6 +838,8 @@ GreedyAllocator::findLoopCarriedUses(LBlock *backedge)
}
}
IonSpew(IonSpew_RegAlloc, " Done finding loop-carried uses.");
return true;
}
@ -829,6 +850,8 @@ GreedyAllocator::prepareBackedge(LBlock *block)
if (!msuccessor)
return true;
IonSpew(IonSpew_RegAlloc, " Preparing backedge.");
LBlock *successor = msuccessor->lir();
uint32 pos = block->mir()->positionInPhiSuccessor();
@ -850,7 +873,9 @@ GreedyAllocator::prepareBackedge(LBlock *block)
// to the phi's final storage.
phi->getDef(0)->setOutput(result);
}
IonSpew(IonSpew_RegAlloc, " Done preparing backedge.");
if (!findLoopCarriedUses(block))
return false;
@ -862,6 +887,8 @@ GreedyAllocator::mergeBackedgeState(LBlock *header, LBlock *backedge)
{
BlockInfo *info = blockInfo(backedge);
IonSpew(IonSpew_RegAlloc, " Merging backedge state.");
// Handle loop-carried carried registers, making sure anything live at the
// backedge is also properly held live at the top of the loop.
Mover carried;
@ -884,6 +911,10 @@ GreedyAllocator::mergeBackedgeState(LBlock *header, LBlock *backedge)
// preserved across the loop.
if (!allocateStack(inVr))
return false;
IonSpew(IonSpew_RegAlloc, " loop carried vr%d (stack:%d) -> %s",
inVr->def->virtualRegister(), inVr->stackSlot_, reg.name());
if (!carried.move(inVr->backingStack(), reg))
return false;
}
@ -892,6 +923,9 @@ GreedyAllocator::mergeBackedgeState(LBlock *header, LBlock *backedge)
header->insertBefore(ins, carried.moves);
}
IonSpew(IonSpew_RegAlloc, " Done merging backedge state.");
IonSpew(IonSpew_RegAlloc, " Handling loop phis.");
Mover phis;
// Handle loop phis.
@ -917,6 +951,8 @@ GreedyAllocator::mergeBackedgeState(LBlock *header, LBlock *backedge)
backedge->insertBefore(ins, phis.moves);
}
IonSpew(IonSpew_RegAlloc, " Done handling loop phis.");
return true;
}
@ -927,6 +963,8 @@ GreedyAllocator::mergePhiState(LBlock *block)
if (!mblock->successorWithPhis())
return true;
IonSpew(IonSpew_RegAlloc, " Merging phi state.");
// Reset state so evictions will work.
reset();
@ -939,8 +977,10 @@ GreedyAllocator::mergePhiState(LBlock *block)
VirtualRegister *def = getVirtualRegister(phi->getDef(0));
// Ignore non-loop phis with no uses.
if (!def->hasRegister() && !def->hasStackSlot())
if (!def->hasRegister() && !def->hasStackSlot()) {
IonSpew(IonSpew_RegAlloc, " ignoring unused phi vr%d", def->def->virtualRegister());
continue;
}
LAllocation *a = phi->getOperand(pos);
VirtualRegister *use = getVirtualRegister(a->toUse());
@ -959,9 +999,19 @@ GreedyAllocator::mergePhiState(LBlock *block)
// Emit a move from the use to a def register.
if (def->hasRegister()) {
if (use->hasRegister()) {
if (use->reg() != def->reg() && !phis.move(use->reg(), def->reg()))
return false;
if (use->reg() != def->reg()) {
IonSpew(IonSpew_RegAlloc, " vr%d (%s) -> phi vr%d (%s)",
use->def->virtualRegister(), use->reg().name(),
def->def->virtualRegister(), def->reg().name());
if (!phis.move(use->reg(), def->reg()))
return false;
}
} else {
IonSpew(IonSpew_RegAlloc, " vr%d (stack:%d) -> phi vr%d (%s)",
use->def->virtualRegister(), use->stackSlot_,
def->def->virtualRegister(), def->reg().name());
if (!phis.move(use->backingStack(), def->reg()))
return false;
}
@ -970,9 +1020,17 @@ GreedyAllocator::mergePhiState(LBlock *block)
// Emit a move from the use to a def stack slot.
if (def->hasStackSlot()) {
if (use->hasRegister()) {
IonSpew(IonSpew_RegAlloc, " vr%d (%s) -> phi vr%d (stack:%d)",
use->def->virtualRegister(), use->reg().name(),
def->def->virtualRegister(), use->stackSlot_);
if (!phis.move(use->reg(), def->backingStack()))
return false;
} else if (use->backingStack() != def->backingStack()) {
IonSpew(IonSpew_RegAlloc, " vr%d (stack:%d) -> phi vr%d (stack:%d)",
use->def->virtualRegister(), use->stackSlot_,
def->def->virtualRegister(), def->stackSlot_);
if (!phis.move(use->backingStack(), def->backingStack()))
return false;
}
@ -988,6 +1046,8 @@ GreedyAllocator::mergePhiState(LBlock *block)
if (phis.moves)
block->insertBefore(before, phis.moves);
IonSpew(IonSpew_RegAlloc, " Done merging phi state.");
return true;
}
@ -1001,6 +1061,8 @@ GreedyAllocator::mergeAllocationState(LBlock *block)
return true;
}
IonSpew(IonSpew_RegAlloc, " Merging allocation state.");
// Prefer the successor with phis as the baseline state
LBlock *leftblock = mblock->getSuccessor(0)->lir();
state = blockInfo(leftblock)->in;
@ -1009,8 +1071,11 @@ GreedyAllocator::mergeAllocationState(LBlock *block)
// register is applied to the def for which it was intended.
for (AnyRegisterIterator iter; iter.more(); iter++) {
AnyRegister reg = *iter;
if (VirtualRegister *vr = state[reg])
if (VirtualRegister *vr = state[reg]) {
vr->setRegister(reg);
IonSpew(IonSpew_RegAlloc, " vr%d inherits %s",
vr->def->virtualRegister(), reg.name());
}
}
// Merge state from each additional successor.
@ -1032,6 +1097,8 @@ GreedyAllocator::mergeAllocationState(LBlock *block)
if (!mergePhiState(block))
return false;
IonSpew(IonSpew_RegAlloc, " Done merging allocation state.");
return true;
}
@ -1043,6 +1110,8 @@ GreedyAllocator::allocateRegisters()
for (size_t i = graph.numBlocks() - 1; i < graph.numBlocks(); i--) {
LBlock *block = graph.getBlock(i);
IonSpew(IonSpew_RegAlloc, "Allocating block %d", (uint32)i);
// Merge allocation state from our successors.
if (!mergeAllocationState(block))
return false;

View File

@ -75,12 +75,20 @@ class GreedyAllocator
};
bool hasRegister_;
bool hasStackSlot_;
bool hasBackingStack_;
mutable bool backingStackUsed_;
#ifdef DEBUG
LInstruction *ins;
#endif
void init(LDefinition *def, LInstruction *ins) {
this->def = def;
this->hasBackingStack_ = (def->isPreset() && def->output()->isMemory());
#ifdef DEBUG
this->ins = ins;
#endif
}
LDefinition::Type type() const {
return def->type();
}
@ -120,8 +128,7 @@ class GreedyAllocator
return stackSlot_;
}
bool hasBackingStack() const {
return hasStackSlot() ||
(def->isPreset() && def->output()->isMemory());
return hasStackSlot() || hasBackingStack_;
}
bool backingStackUsed() const {
return backingStackUsed_;

View File

@ -213,8 +213,8 @@ ion::CheckLogging()
LoggingBits |= (1 << uint32(IonSpew_GVN));
if (ContainsFlag(env, "licm"))
LoggingBits |= (1 << uint32(IonSpew_LICM));
if (ContainsFlag(env, "lsra"))
LoggingBits |= (1 << uint32(IonSpew_LSRA));
if (ContainsFlag(env, "regalloc"))
LoggingBits |= (1 << uint32(IonSpew_RegAlloc));
if (ContainsFlag(env, "snapshots"))
LoggingBits |= (1 << uint32(IonSpew_Snapshots));
if (ContainsFlag(env, "all"))

View File

@ -62,7 +62,7 @@ namespace ion {
/* Information during LICM */ \
_(LICM) \
/* Information during LSRA */ \
_(LSRA) \
_(RegAlloc) \
/* Debug info about snapshots */ \
_(Snapshots)

View File

@ -640,7 +640,7 @@ LinearScanAllocator::allocateRegisters()
Requirement *req = current->requirement();
Requirement *hint = current->hint();
IonSpew(IonSpew_LSRA, "Processing %d = [%u, %u] (pri=%d)",
IonSpew(IonSpew_RegAlloc, "Processing %d = [%u, %u] (pri=%d)",
current->reg() ? current->reg()->reg() : 0, current->start().pos(),
current->end().pos(), current->requirement()->priority());
@ -695,7 +695,7 @@ LinearScanAllocator::allocateRegisters()
// If we don't really need this in a register, don't allocate one
if (req->kind() != Requirement::REGISTER && hint->kind() == Requirement::NONE) {
// FIXME: Eager spill ok? Check for canonical spill location?
IonSpew(IonSpew_LSRA, " Eagerly spilling virtual register %d",
IonSpew(IonSpew_RegAlloc, " Eagerly spilling virtual register %d",
current->reg() ? current->reg()->reg() : 0);
if (!spill())
return false;
@ -703,12 +703,12 @@ LinearScanAllocator::allocateRegisters()
}
// Try to allocate a free register
IonSpew(IonSpew_LSRA, " Attempting free register allocation");
IonSpew(IonSpew_RegAlloc, " Attempting free register allocation");
CodePosition bestFreeUntil;
AnyRegister::Code bestCode = findBestFreeRegister(&bestFreeUntil);
if (bestCode != AnyRegister::Invalid) {
AnyRegister best = AnyRegister::FromCode(bestCode);
IonSpew(IonSpew_LSRA, " Decided best register was %s", best.name());
IonSpew(IonSpew_RegAlloc, " Decided best register was %s", best.name());
// Split when the register is next needed if necessary
if (bestFreeUntil <= current->end()) {
@ -721,17 +721,17 @@ LinearScanAllocator::allocateRegisters()
continue;
}
IonSpew(IonSpew_LSRA, " Unable to allocate free register");
IonSpew(IonSpew_RegAlloc, " Unable to allocate free register");
// Phis can't spill other intervals at their definition
if (!current->index() && current->reg() && current->reg()->ins()->isPhi()) {
IonSpew(IonSpew_LSRA, " Can't split at phi, spilling this interval");
IonSpew(IonSpew_RegAlloc, " Can't split at phi, spilling this interval");
if (!spill())
return false;
continue;
}
IonSpew(IonSpew_LSRA, " Attempting blocked register allocation");
IonSpew(IonSpew_RegAlloc, " Attempting blocked register allocation");
// If we absolutely need a register or our next use is closer than the
// selected blocking register then we spill the blocker. Otherwise, we
@ -742,7 +742,7 @@ LinearScanAllocator::allocateRegisters()
(req->kind() == Requirement::REGISTER || hint->pos() < bestNextUsed))
{
AnyRegister best = AnyRegister::FromCode(bestCode);
IonSpew(IonSpew_LSRA, " Decided best register was %s", best.name());
IonSpew(IonSpew_RegAlloc, " Decided best register was %s", best.name());
if (!assign(LAllocation(best)))
return false;
@ -750,7 +750,7 @@ LinearScanAllocator::allocateRegisters()
continue;
}
IonSpew(IonSpew_LSRA, " No registers available to spill");
IonSpew(IonSpew_RegAlloc, " No registers available to spill");
JS_ASSERT(req->kind() == Requirement::NONE);
if (!spill())
@ -915,7 +915,7 @@ LinearScanAllocator::splitInterval(LiveInterval *interval, CodePosition pos)
if (!getMoveGroupBefore(pos))
return false;
IonSpew(IonSpew_LSRA, " Split interval to %u = [%u, %u]/[%u, %u]",
IonSpew(IonSpew_RegAlloc, " Split interval to %u = [%u, %u]/[%u, %u]",
interval->reg()->reg(), interval->start().pos(),
interval->end().pos(), newInterval->start().pos(),
newInterval->end().pos());
@ -937,7 +937,7 @@ bool
LinearScanAllocator::assign(LAllocation allocation)
{
if (allocation.isRegister())
IonSpew(IonSpew_LSRA, "Assigning register %s", allocation.toRegister().name());
IonSpew(IonSpew_RegAlloc, "Assigning register %s", allocation.toRegister().name());
current->setAllocation(allocation);
// Split this interval at the next incompatible one
@ -954,7 +954,7 @@ LinearScanAllocator::assign(LAllocation allocation)
// Split the blocking interval if it exists
for (IntervalIterator i(active.begin()); i != active.end(); i++) {
if (i->getAllocation()->isRegister() && *i->getAllocation() == allocation) {
IonSpew(IonSpew_LSRA, " Splitting active interval %u = [%u, %u]",
IonSpew(IonSpew_RegAlloc, " Splitting active interval %u = [%u, %u]",
i->reg()->ins()->id(), i->start().pos(), i->end().pos());
JS_ASSERT(i->start() != current->start());
@ -974,7 +974,7 @@ LinearScanAllocator::assign(LAllocation allocation)
// Split any inactive intervals at the next live point
for (IntervalIterator i(inactive.begin()); i != inactive.end(); ) {
if (i->getAllocation()->isRegister() && *i->getAllocation() == allocation) {
IonSpew(IonSpew_LSRA, " Splitting inactive interval %u = [%u, %u]",
IonSpew(IonSpew_RegAlloc, " Splitting inactive interval %u = [%u, %u]",
i->reg()->ins()->id(), i->start().pos(), i->end().pos());
LiveInterval *it = *i;
@ -1005,13 +1005,13 @@ LinearScanAllocator::assign(LAllocation allocation)
bool
LinearScanAllocator::spill()
{
IonSpew(IonSpew_LSRA, " Decided to spill current interval");
IonSpew(IonSpew_RegAlloc, " Decided to spill current interval");
// We can't spill bogus intervals
JS_ASSERT(current->reg());
if (current->reg()->canonicalSpill()) {
IonSpew(IonSpew_LSRA, " Allocating canonical spill location");
IonSpew(IonSpew_RegAlloc, " Allocating canonical spill location");
return assign(*current->reg()->canonicalSpill());
}
@ -1058,7 +1058,7 @@ LinearScanAllocator::finishInterval(LiveInterval *interval)
AnyRegister::Code
LinearScanAllocator::findBestFreeRegister(CodePosition *freeUntil)
{
IonSpew(IonSpew_LSRA, " Computing freeUntilPos");
IonSpew(IonSpew_RegAlloc, " Computing freeUntilPos");
// Compute free-until positions for all registers
CodePosition freeUntilPos[AnyRegister::Total];
@ -1071,7 +1071,7 @@ LinearScanAllocator::findBestFreeRegister(CodePosition *freeUntil)
for (IntervalIterator i(active.begin()); i != active.end(); i++) {
if (i->getAllocation()->isRegister()) {
AnyRegister reg = i->getAllocation()->toRegister();
IonSpew(IonSpew_LSRA, " Register %s not free", reg.name());
IonSpew(IonSpew_RegAlloc, " Register %s not free", reg.name());
freeUntilPos[reg.code()] = CodePosition::MIN;
}
}
@ -1081,7 +1081,7 @@ LinearScanAllocator::findBestFreeRegister(CodePosition *freeUntil)
CodePosition pos = current->intersect(*i);
if (pos != CodePosition::MIN && pos < freeUntilPos[reg.code()]) {
freeUntilPos[reg.code()] = pos;
IonSpew(IonSpew_LSRA, " Register %s free until %u", reg.name(), pos.pos());
IonSpew(IonSpew_RegAlloc, " Register %s free until %u", reg.name(), pos.pos());
}
}
}
@ -1131,7 +1131,7 @@ LinearScanAllocator::findBestFreeRegister(CodePosition *freeUntil)
AnyRegister::Code
LinearScanAllocator::findBestBlockedRegister(CodePosition *nextUsed)
{
IonSpew(IonSpew_LSRA, " Computing nextUsePos");
IonSpew(IonSpew_RegAlloc, " Computing nextUsePos");
// Compute next-used positions for all registers
CodePosition nextUsePos[AnyRegister::Total];
@ -1146,10 +1146,10 @@ LinearScanAllocator::findBestBlockedRegister(CodePosition *nextUsed)
AnyRegister reg = i->getAllocation()->toRegister();
if (i->start().ins() == current->start().ins()) {
nextUsePos[reg.code()] = CodePosition::MIN;
IonSpew(IonSpew_LSRA, " Disqualifying %s due to recency", reg.name());
IonSpew(IonSpew_RegAlloc, " Disqualifying %s due to recency", reg.name());
} else if (nextUsePos[reg.code()] != CodePosition::MIN) {
nextUsePos[reg.code()] = i->reg()->nextUsePosAfter(current->start());
IonSpew(IonSpew_LSRA, " Register %s next used %u", reg.name(),
IonSpew(IonSpew_RegAlloc, " Register %s next used %u", reg.name(),
nextUsePos[reg.code()].pos());
}
}
@ -1161,7 +1161,7 @@ LinearScanAllocator::findBestBlockedRegister(CodePosition *nextUsed)
JS_ASSERT(i->covers(pos) || pos == CodePosition::MAX);
if (pos < nextUsePos[reg.code()]) {
nextUsePos[reg.code()] = pos;
IonSpew(IonSpew_LSRA, " Register %s next used %u", reg.name(), pos.pos());
IonSpew(IonSpew_RegAlloc, " Register %s next used %u", reg.name(), pos.pos());
}
}
}
@ -1320,34 +1320,34 @@ LinearScanAllocator::validateAllocations()
bool
LinearScanAllocator::go()
{
IonSpew(IonSpew_LSRA, "Beginning register allocation");
IonSpew(IonSpew_RegAlloc, "Beginning register allocation");
IonSpew(IonSpew_LSRA, "Beginning creation of initial data structures");
IonSpew(IonSpew_RegAlloc, "Beginning creation of initial data structures");
if (!createDataStructures())
return false;
IonSpew(IonSpew_LSRA, "Creation of initial data structures completed");
IonSpew(IonSpew_RegAlloc, "Creation of initial data structures completed");
IonSpew(IonSpew_LSRA, "Beginning liveness analysis");
IonSpew(IonSpew_RegAlloc, "Beginning liveness analysis");
if (!buildLivenessInfo())
return false;
IonSpew(IonSpew_LSRA, "Liveness analysis complete");
IonSpew(IonSpew_RegAlloc, "Liveness analysis complete");
IonSpew(IonSpew_LSRA, "Beginning preliminary register allocation");
IonSpew(IonSpew_RegAlloc, "Beginning preliminary register allocation");
if (!allocateRegisters())
return false;
IonSpew(IonSpew_LSRA, "Preliminary register allocation complete");
IonSpew(IonSpew_RegAlloc, "Preliminary register allocation complete");
IonSpew(IonSpew_LSRA, "Beginning control flow resolution");
IonSpew(IonSpew_RegAlloc, "Beginning control flow resolution");
if (!resolveControlFlow())
return false;
IonSpew(IonSpew_LSRA, "Control flow resolution complete");
IonSpew(IonSpew_RegAlloc, "Control flow resolution complete");
IonSpew(IonSpew_LSRA, "Beginning register allocation reification");
IonSpew(IonSpew_RegAlloc, "Beginning register allocation reification");
if (!reifyAllocations())
return false;
IonSpew(IonSpew_LSRA, "Register allocation reification complete");
IonSpew(IonSpew_RegAlloc, "Register allocation reification complete");
IonSpew(IonSpew_LSRA, "Register allocation complete");
IonSpew(IonSpew_RegAlloc, "Register allocation complete");
return true;
}

View File

@ -49,9 +49,7 @@ MoveEmitterX86::MoveEmitterX86(MacroAssembler &masm)
masm(masm),
pushedAtCycle_(-1),
pushedAtSpill_(-1),
pushedAtDoubleSpill_(-1),
spilledReg_(InvalidReg),
spilledFloatReg_(InvalidFloatReg)
spilledReg_(InvalidReg)
{
pushedAtStart_ = masm.framePushed();
}
@ -86,12 +84,6 @@ MoveEmitterX86::spillSlot() const
return Operand(StackPointer, masm.framePushed() - pushedAtSpill_);
}
Operand
MoveEmitterX86::doubleSpillSlot() const
{
return Operand(StackPointer, masm.framePushed() - pushedAtDoubleSpill_);
}
Operand
MoveEmitterX86::toOperand(const MoveOperand &operand) const
{
@ -129,23 +121,6 @@ MoveEmitterX86::tempReg()
return spilledReg_;
}
FloatRegister
MoveEmitterX86::tempFloatReg()
{
if (spilledFloatReg_ != InvalidFloatReg)
return spilledFloatReg_;
// For now, just pick xmm7 as the eviction point. This is totally random,
// and if it ends up being bad, we can use actual heuristics later.
spilledFloatReg_ = FloatRegister::FromCode(7);
if (pushedAtDoubleSpill_ == -1) {
masm.reserveStack(sizeof(double));
pushedAtDoubleSpill_ = masm.framePushed();
}
masm.movsd(spilledFloatReg_, doubleSpillSlot());
return spilledFloatReg_;
}
void
MoveEmitterX86::breakCycle(const MoveOperand &from, const MoveOperand &to, Move::Kind kind)
{
@ -155,11 +130,10 @@ MoveEmitterX86::breakCycle(const MoveOperand &from, const MoveOperand &to, Move:
//
// This case handles (A -> B), which we reach first. We save B, then allow
// the original move to continue.
if (to.isDouble()) {
if (kind == Move::DOUBLE) {
if (to.isMemory()) {
FloatRegister temp = tempFloatReg();
masm.movsd(toOperand(to), temp);
masm.movsd(temp, cycleSlot());
masm.movsd(toOperand(to), ScratchFloatReg);
masm.movsd(ScratchFloatReg, cycleSlot());
} else {
masm.movsd(to.floatReg(), cycleSlot());
}
@ -185,9 +159,8 @@ MoveEmitterX86::completeCycle(const MoveOperand &from, const MoveOperand &to, Mo
// saved value of B, to A.
if (kind == Move::DOUBLE) {
if (to.isMemory()) {
FloatRegister temp = tempFloatReg();
masm.movsd(cycleSlot(), temp);
masm.movsd(temp, toOperand(to));
masm.movsd(cycleSlot(), ScratchFloatReg);
masm.movsd(ScratchFloatReg, toOperand(to));
} else {
masm.movsd(cycleSlot(), to.floatReg());
}
@ -232,25 +205,13 @@ void
MoveEmitterX86::emitDoubleMove(const MoveOperand &from, const MoveOperand &to)
{
if (from.isFloatReg()) {
if (from.floatReg() == spilledFloatReg_) {
// If the source is a register that has been spilled, make
// sure to load the source back into that register.
masm.movsd(doubleSpillSlot(), spilledFloatReg_);
spilledFloatReg_ = InvalidFloatReg;
}
masm.movsd(from.floatReg(), toOperand(to));
} else if (to.isFloatReg()) {
if (to.floatReg() == spilledFloatReg_) {
// If the destination is the spilled register, make sure we
// don't re-clobber its value.
spilledFloatReg_ = InvalidFloatReg;
}
masm.movsd(toOperand(from), to.floatReg());
} else {
// Memory to memory float move.
FloatRegister reg = tempFloatReg();
masm.movsd(toOperand(from), reg);
masm.movsd(reg, toOperand(to));
masm.movsd(toOperand(from), ScratchFloatReg);
masm.movsd(ScratchFloatReg, toOperand(to));
}
}
@ -288,8 +249,6 @@ MoveEmitterX86::finish()
{
assertDone();
if (pushedAtDoubleSpill_ != -1 && spilledFloatReg_ != InvalidFloatReg)
masm.movsd(doubleSpillSlot(), spilledFloatReg_);
if (pushedAtSpill_ != -1 && spilledReg_ != InvalidReg)
masm.mov(spillSlot(), spilledReg_);

View File

@ -66,20 +66,16 @@ class MoveEmitterX86
// stack space has been allocated for that particular spill.
int32 pushedAtCycle_;
int32 pushedAtSpill_;
int32 pushedAtDoubleSpill_;
// These are registers that are available for temporary use. They may be
// assigned InvalidReg. If no corresponding spill space has been assigned,
// then these registers do not need to be spilled.
// Register that is available for temporary use. It may be assigned
// InvalidReg. If no corresponding spill space has been assigned,
// then this register do not need to be spilled.
Register spilledReg_;
FloatRegister spilledFloatReg_;
void assertDone();
Register tempReg();
FloatRegister tempFloatReg();
Operand cycleSlot() const;
Operand spillSlot() const;
Operand doubleSpillSlot() const;
Operand toOperand(const MoveOperand &operand) const;
void emitMove(const MoveOperand &from, const MoveOperand &to);

View File

@ -0,0 +1,46 @@
function f0(p0) {
var v0 = 0.5;
var v1 = 1.5;
var v2 = 2.5;
var v3 = 3.5;
var v4 = 4.5;
var v5 = 5.5;
var v6 = 6.5;
var v7 = 7.5;
var v8 = 8.5;
var v9 = 9.5;
var v10 = 10.5;
var v11 = 11.5;
var v12 = 12.5;
var v13 = 13.5;
var v14 = 14.5;
var v15 = 15.5;
var v16 = 16.5;
// 0.125 is used to avoid the oracle choice for int32.
while (0) {
// p0 = false;
var tmp = v0;
v0 = 0.125 + v0 + v1;
v1 = 0.125 + v1 + v2;
v2 = 0.125 + v2 + v3;
v3 = 0.125 + v3 + v4;
v4 = 0.125 + v4 + v5;
v5 = 0.125 + v5 + v6;
v6 = 0.125 + v6 + v7;
v7 = 0.125 + v7 + v8;
v8 = 0.125 + v8 + v9;
v9 = 0.125 + v9 + v10;
v10 = 0.125 + v10 + v11;
v11 = 0.125 + v11 + v12;
v12 = 0.125 + v12 + v13;
v13 = 0.125 + v13 + v14;
v14 = 0.125 + v14 + v15;
v15 = 0.125 + v15 + v16;
v16 = 0.125 + v16 + tmp;
}
return 0.5 + v0 + v1 + v2 + v3 + v4 + v5 + v6 + v7 + v8 + v9 + v10 + v11 + v12 + v13 + v14 + v15 + v16;
}
// expect 145
assertEq(f0(false), 145);