Backout 70f1b2db9f5f (bug 822116) for M2 crashes

This commit is contained in:
Ed Morley 2012-12-19 09:16:26 +00:00
parent 22b5286ab6
commit 85776abd2f
5 changed files with 135 additions and 465 deletions

View File

@ -136,12 +136,6 @@ class SplayTree
checkCoherency(root, NULL);
}
template <class Op>
void forEach(Op op)
{
forEachInner(op, root);
}
private:
Node *lookup(const T &v)
@ -240,17 +234,6 @@ class SplayTree
}
}
template <class Op>
void forEachInner(Op op, Node *node)
{
if (!node)
return;
forEachInner(op, node->left);
op(node->item);
forEachInner(op, node->right);
}
Node *checkCoherency(Node *node, Node *minimum)
{
#ifdef DEBUG

View File

@ -118,7 +118,7 @@ BacktrackingAllocator::go()
if (!init())
return false;
if (!allocationQueue.reserve(graph.numVirtualRegisters() * 3 / 2))
if (!queuedIntervals.reserve(graph.numVirtualRegisters() * 3 / 2))
return false;
if (!groupAndQueueRegisters())
@ -128,12 +128,12 @@ BacktrackingAllocator::go()
dumpRegisterGroups();
// Allocate, spill and split register intervals until finished.
while (!allocationQueue.empty()) {
while (!queuedIntervals.empty()) {
if (mir->shouldCancel("Backtracking Allocation"))
return false;
QueueItem item = allocationQueue.removeHighest();
if (item.interval ? !processInterval(item.interval) : !processGroup(item.group))
LiveInterval *interval = queuedIntervals.removeHighest().interval;
if (!processInterval(interval))
return false;
}
@ -144,37 +144,21 @@ BacktrackingAllocator::go()
}
static bool
LifetimesOverlap(BacktrackingVirtualRegister *reg0, BacktrackingVirtualRegister *reg1)
LifetimesMightOverlap(BacktrackingVirtualRegister *reg0, BacktrackingVirtualRegister *reg1)
{
// Registers may have been eagerly split in two, see tryGroupReusedRegister.
// In such cases, only consider the first interval.
JS_ASSERT(reg0->numIntervals() <= 2 && reg1->numIntervals() <= 2);
LiveInterval *interval0 = reg0->getInterval(0), *interval1 = reg1->getInterval(0);
// Interval ranges are sorted in reverse order. The lifetimes overlap if
// any of their ranges overlap.
size_t index0 = 0, index1 = 0;
while (index0 < interval0->numRanges() && index1 < interval1->numRanges()) {
const LiveInterval::Range
*range0 = interval0->getRange(index0),
*range1 = interval1->getRange(index1);
if (range0->from >= range1->to)
index0++;
else if (range1->from >= range0->to)
index1++;
else
return true;
}
return false;
// No fine grained testing, just see if there is a possibility of overlap.
CodePosition start0 = reg0->getFirstInterval()->start();
CodePosition start1 = reg1->getFirstInterval()->start();
CodePosition end0 = reg0->lastInterval()->end();
CodePosition end1 = reg1->lastInterval()->end();
return (end0 > start1) && (end1 > start0);
}
bool
BacktrackingAllocator::canAddToGroup(VirtualRegisterGroup *group, BacktrackingVirtualRegister *reg)
{
for (size_t i = 0; i < group->registers.length(); i++) {
if (LifetimesOverlap(reg, &vregs[group->registers[i]]))
if (LifetimesMightOverlap(reg, &vregs[group->registers[i]]))
return false;
}
return true;
@ -223,7 +207,7 @@ BacktrackingAllocator::tryGroupRegisters(uint32_t vreg0, uint32_t vreg1)
return true;
}
if (LifetimesOverlap(reg0, reg1))
if (LifetimesMightOverlap(reg0, reg1))
return true;
VirtualRegisterGroup *group = new VirtualRegisterGroup();
@ -235,171 +219,61 @@ BacktrackingAllocator::tryGroupRegisters(uint32_t vreg0, uint32_t vreg1)
return true;
}
bool
BacktrackingAllocator::tryGroupReusedRegister(uint32_t def, uint32_t use)
{
BacktrackingVirtualRegister &reg = vregs[def], &usedReg = vregs[use];
// reg is a vreg which reuses its input usedReg for its output physical
// register. Try to group reg with usedReg if at all possible, as avoiding
// copies before reg's instruction is crucial for the quality of the
// generated code (MUST_REUSE_INPUT is used by all arithmetic instructions
// on x86/x64).
if (reg.intervalFor(inputOf(reg.ins()))) {
JS_ASSERT(reg.isTemp());
reg.setMustCopyInput();
return true;
}
if (!usedReg.intervalFor(outputOf(reg.ins()))) {
// The input is not live after the instruction, either in a safepoint
// for the instruction or in subsequent code. The input and output
// can thus be in the same group.
return tryGroupRegisters(use, def);
}
// The input is live afterwards, either in future instructions or in a
// safepoint for the reusing instruction. This is impossible to satisfy
// without copying the input.
//
// It may or may not be better to split the interval at the point of the
// definition, which may permit grouping. One case where it is definitely
// better to split is if the input never has any register uses after the
// instruction. Handle this splitting eagerly.
if (usedReg.numIntervals() != 1 ||
(usedReg.def()->isPreset() && !usedReg.def()->output()->isRegister())) {
reg.setMustCopyInput();
return true;
}
LiveInterval *interval = usedReg.getInterval(0);
LBlock *block = insData[reg.ins()].block();
// The input's lifetime must end within the same block as the definition,
// otherwise it could live on in phis elsewhere.
if (interval->end() > outputOf(block->lastId())) {
reg.setMustCopyInput();
return true;
}
for (UsePositionIterator iter = interval->usesBegin(); iter != interval->usesEnd(); iter++) {
if (iter->pos <= inputOf(reg.ins()))
continue;
LUse *use = iter->use;
if (FindReusingDefinition(insData[iter->pos].ins(), use)) {
reg.setMustCopyInput();
return true;
}
if (use->policy() != LUse::ANY && use->policy() != LUse::KEEPALIVE) {
reg.setMustCopyInput();
return true;
}
}
LiveInterval *preInterval = new LiveInterval(interval->vreg(), 0);
for (size_t i = 0; i < interval->numRanges(); i++) {
const LiveInterval::Range *range = interval->getRange(i);
JS_ASSERT(range->from <= inputOf(reg.ins()));
CodePosition to = (range->to <= outputOf(reg.ins())) ? range->to : outputOf(reg.ins());
if (!preInterval->addRange(range->from, to))
return false;
}
LiveInterval *postInterval = new LiveInterval(interval->vreg(), 0);
if (!postInterval->addRange(inputOf(reg.ins()), interval->end()))
return false;
LiveIntervalVector newIntervals;
if (!newIntervals.append(preInterval) || !newIntervals.append(postInterval))
return false;
if (!split(interval, newIntervals))
return false;
JS_ASSERT(usedReg.numIntervals() == 2);
usedReg.setCanonicalSpillExclude(inputOf(reg.ins()));
return tryGroupRegisters(use, def);
}
bool
BacktrackingAllocator::groupAndQueueRegisters()
{
// Try to group registers with their reused inputs.
for (size_t i = 0; i < graph.numVirtualRegisters(); i++) {
BacktrackingVirtualRegister &reg = vregs[i];
if (!reg.numIntervals())
continue;
if (reg.def()->policy() == LDefinition::MUST_REUSE_INPUT) {
LUse *use = reg.ins()->getOperand(reg.def()->getReusedInput())->toUse();
if (!tryGroupReusedRegister(i, use->virtualRegister()))
return false;
}
}
// Try to group phis with their inputs.
for (size_t i = 0; i < graph.numBlocks(); i++) {
LBlock *block = graph.getBlock(i);
for (size_t j = 0; j < block->numPhis(); j++) {
LPhi *phi = block->getPhi(j);
uint32_t output = phi->getDef(0)->virtualRegister();
for (size_t k = 0; k < phi->numOperands(); k++) {
uint32_t input = phi->getOperand(k)->toUse()->virtualRegister();
if (!tryGroupRegisters(input, output))
return false;
}
}
}
for (size_t i = 0; i < graph.numVirtualRegisters(); i++) {
if (mir->shouldCancel("Backtracking Enqueue Registers"))
if (mir->shouldCancel("Backtracking Group Registers"))
return false;
BacktrackingVirtualRegister &reg = vregs[i];
JS_ASSERT(reg.numIntervals() <= 2);
JS_ASSERT(!reg.canonicalSpill());
if (!reg.numIntervals())
continue;
// Eagerly set the canonical spill slot for registers which are preset
// for that slot, and reuse it for other registers in the group.
LDefinition *def = reg.def();
if (def->policy() == LDefinition::PRESET && !def->output()->isRegister()) {
reg.setCanonicalSpill(*def->output());
if (reg.group() && reg.group()->spill.isUse())
reg.group()->spill = *def->output();
}
// Place all intervals for this register on the allocation queue.
// During initial queueing use single queue items for groups of
// registers, so that they will be allocated together and reduce the
// risk of unnecessary conflicts. This is in keeping with the idea that
// register groups are effectively single registers whose value changes
// during execution. If any intervals in the group are evicted later
// then they will be reallocated individually.
size_t start = 0;
if (VirtualRegisterGroup *group = reg.group()) {
if (i == group->canonicalReg()) {
size_t priority = computePriority(group);
if (!allocationQueue.insert(QueueItem(group, priority)))
return false;
}
start++;
}
for (; start < reg.numIntervals(); start++) {
LiveInterval *interval = reg.getInterval(start);
for (size_t j = 0; j < reg.numIntervals(); j++) {
LiveInterval *interval = reg.getInterval(j);
if (interval->numRanges() > 0) {
size_t priority = computePriority(interval);
if (!allocationQueue.insert(QueueItem(interval, priority)))
if (!queuedIntervals.insert(QueuedInterval(interval, priority)))
return false;
}
}
LDefinition *def = reg.def();
if (def && def->policy() == LDefinition::MUST_REUSE_INPUT) {
LUse *use = reg.ins()->getOperand(def->getReusedInput())->toUse();
VirtualRegister &usedReg = vregs[use->virtualRegister()];
if (usedReg.intervalFor(outputOf(reg.ins())) || reg.intervalFor(inputOf(reg.ins()))) {
// This definitions reuses an input that is live afterwards
// (either in future instructions or a safepoint for the
// definition). This is impossible to satisfy without first
// copying the input, and rather than encoding this by
// splitting intervals (which may require even more copying
// later) mark the register as needing this copy during
// reification and relax the MUST_REUSE_INPUT constraint.
IonSpew(IonSpew_RegAlloc, "Relaxing reuse-input constraint on v%u", i);
reg.setMustCopyInput();
} else {
// This definition reuses an input that is not live afterwards.
// The input and output can use the same allocation, and it is
// desirable to do this to avoid unnecessary copies.
if (!tryGroupRegisters(use->virtualRegister(), def->virtualRegister()))
return false;
}
}
// Try to group phis with their inputs.
for (size_t i = 0; i < graph.numBlocks(); i++) {
LBlock *block = graph.getBlock(i);
for (size_t j = 0; j < block->numPhis(); j++) {
LPhi *phi = block->getPhi(j);
uint32_t output = phi->getDef(0)->virtualRegister();
for (size_t k = 0; k < phi->numOperands(); k++) {
uint32_t input = phi->getOperand(k)->toUse()->virtualRegister();
if (!tryGroupRegisters(input, output))
return false;
}
}
}
}
return true;
@ -509,48 +383,6 @@ BacktrackingAllocator::processInterval(LiveInterval *interval)
}
}
bool
BacktrackingAllocator::processGroup(VirtualRegisterGroup *group)
{
if (IonSpewEnabled(IonSpew_RegAlloc)) {
IonSpew(IonSpew_RegAlloc, "Allocating group v%u [priority %lu] [weight %lu]",
group->registers[0], computePriority(group), computeSpillWeight(group));
}
LiveInterval *conflict;
for (size_t attempt = 0;; attempt++) {
// Search for any available register which the group can be allocated to.
conflict = NULL;
for (size_t i = 0; i < AnyRegister::Total; i++) {
bool success;
if (!tryAllocateGroupRegister(registers[i], group, &success, &conflict))
return false;
if (success) {
conflict = NULL;
break;
}
}
if (attempt < MAX_ATTEMPTS &&
conflict &&
computeSpillWeight(conflict) < computeSpillWeight(group))
{
if (!evictInterval(conflict))
return false;
continue;
}
for (size_t i = 0; i < group->registers.length(); i++) {
VirtualRegister &reg = vregs[group->registers[i]];
JS_ASSERT(reg.numIntervals() <= 2);
if (!processInterval(reg.getInterval(0)))
return false;
}
return true;
}
}
bool
BacktrackingAllocator::setIntervalRequirement(LiveInterval *interval)
{
@ -608,53 +440,6 @@ BacktrackingAllocator::setIntervalRequirement(LiveInterval *interval)
return true;
}
bool
BacktrackingAllocator::tryAllocateGroupRegister(PhysicalRegister &r, VirtualRegisterGroup *group,
bool *psuccess, LiveInterval **pconflicting)
{
*psuccess = false;
if (!r.allocatable)
return true;
if (r.reg.isFloat() != vregs[group->registers[0]].isDouble())
return true;
bool allocatable = true;
LiveInterval *conflicting = NULL;
for (size_t i = 0; i < group->registers.length(); i++) {
VirtualRegister &reg = vregs[group->registers[i]];
JS_ASSERT(reg.numIntervals() <= 2);
LiveInterval *interval = reg.getInterval(0);
for (size_t j = 0; j < interval->numRanges(); j++) {
AllocatedRange range(interval, interval->getRange(j)), existing;
if (r.allocations.contains(range, &existing)) {
if (conflicting) {
if (conflicting != existing.interval)
return true;
} else {
conflicting = existing.interval;
}
allocatable = false;
}
}
}
if (!allocatable) {
JS_ASSERT(conflicting);
if (!*pconflicting || computeSpillWeight(conflicting) < computeSpillWeight(*pconflicting))
*pconflicting = conflicting;
return true;
}
*psuccess = true;
group->allocation = LAllocation(r.reg);
return true;
}
bool
BacktrackingAllocator::tryAllocateRegister(PhysicalRegister &r, LiveInterval *interval,
bool *success, LiveInterval **pconflicting)
@ -738,18 +523,17 @@ BacktrackingAllocator::evictInterval(LiveInterval *interval)
interval->setAllocation(LAllocation());
size_t priority = computePriority(interval);
return allocationQueue.insert(QueueItem(interval, priority));
return queuedIntervals.insert(QueuedInterval(interval, priority));
}
bool
BacktrackingAllocator::split(LiveInterval *interval,
const LiveIntervalVector &newIntervals)
BacktrackingAllocator::splitAndRequeueInterval(LiveInterval *interval,
const LiveIntervalVector &newIntervals)
{
JS_ASSERT(newIntervals.length() >= 2);
if (IonSpewEnabled(IonSpew_RegAlloc)) {
IonSpew(IonSpew_RegAlloc, "splitting interval v%u %s:",
interval->vreg(), IntervalString(interval));
IonSpew(IonSpew_RegAlloc, "splitting interval %s:", IntervalString(interval));
for (size_t i = 0; i < newIntervals.length(); i++)
IonSpew(IonSpew_RegAlloc, " %s", IntervalString(newIntervals[i]));
}
@ -770,41 +554,31 @@ BacktrackingAllocator::split(LiveInterval *interval,
}
// Redistribute uses from the old interval to the new intervals. Intervals
// are permitted to overlap. In such cases, assign the use to either any
// minimal interval containing it, otherwise the interval with the latest
// start position.
// are permitted to overlap. In such cases, assign the use to the interval
// with the latest start position.
for (UsePositionIterator iter(interval->usesBegin());
iter != interval->usesEnd();
iter++)
{
CodePosition pos = iter->pos;
LiveInterval *addInterval = NULL;
LiveInterval *maxInterval = NULL;
for (size_t i = 0; i < newIntervals.length(); i++) {
LiveInterval *newInterval = newIntervals[i];
if (newInterval->covers(pos)) {
if (minimalUse(newInterval, insData[pos].ins())) {
addInterval = newInterval;
break;
}
if (!addInterval || newInterval->start() < addInterval->start())
addInterval = newInterval;
if (newIntervals[i]->covers(pos)) {
if (!maxInterval || newIntervals[i]->start() > maxInterval->start())
maxInterval = newIntervals[i];
}
}
addInterval->addUse(new UsePosition(iter->use, iter->pos));
maxInterval->addUse(new UsePosition(iter->use, iter->pos));
}
return true;
}
bool BacktrackingAllocator::requeueIntervals(const LiveIntervalVector &newIntervals)
{
// Queue the new intervals for register assignment.
for (size_t i = 0; i < newIntervals.length(); i++) {
LiveInterval *newInterval = newIntervals[i];
size_t priority = computePriority(newInterval);
if (!allocationQueue.insert(QueueItem(newInterval, priority)))
if (!queuedIntervals.insert(QueuedInterval(newInterval, priority)))
return false;
}
return true;
}
@ -820,42 +594,36 @@ BacktrackingAllocator::spill(LiveInterval *interval)
BacktrackingVirtualRegister *reg = &vregs[interval->vreg()];
bool useCanonical = !reg->hasCanonicalSpillExclude()
|| interval->start() < reg->canonicalSpillExclude();
if (reg->canonicalSpill()) {
IonSpew(IonSpew_RegAlloc, " Picked canonical spill location %u",
reg->canonicalSpill()->toStackSlot()->slot());
interval->setAllocation(*reg->canonicalSpill());
return;
}
if (useCanonical) {
if (reg->canonicalSpill()) {
IonSpew(IonSpew_RegAlloc, " Picked canonical spill location %s",
reg->canonicalSpill()->toString());
interval->setAllocation(*reg->canonicalSpill());
return;
}
if (reg->group() && !reg->group()->spill.isUse()) {
IonSpew(IonSpew_RegAlloc, " Reusing group spill location %s",
reg->group()->spill.toString());
interval->setAllocation(reg->group()->spill);
reg->setCanonicalSpill(reg->group()->spill);
return;
}
if (reg->group() && reg->group()->spill.isStackSlot()) {
IonSpew(IonSpew_RegAlloc, " Reusing group spill location %u",
reg->group()->spill.toStackSlot()->slot());
interval->setAllocation(reg->group()->spill);
reg->setCanonicalSpill(reg->group()->spill);
return;
}
uint32_t stackSlot;
if (reg->isDouble())
if (reg->isDouble()) {
stackSlot = stackSlotAllocator.allocateDoubleSlot();
else
} else {
stackSlot = stackSlotAllocator.allocateSlot();
}
JS_ASSERT(stackSlot <= stackSlotAllocator.stackHeight());
LStackSlot alloc(stackSlot, reg->isDouble());
interval->setAllocation(alloc);
IonSpew(IonSpew_RegAlloc, " Allocating canonical spill location %u", stackSlot);
interval->setAllocation(LStackSlot(stackSlot, reg->isDouble()));
reg->setCanonicalSpill(*interval->getAllocation());
IonSpew(IonSpew_RegAlloc, " Allocating spill location %s", alloc.toString());
if (useCanonical) {
reg->setCanonicalSpill(alloc);
if (reg->group())
reg->group()->spill = alloc;
if (reg->group()) {
JS_ASSERT(!reg->group()->spill.isStackSlot());
reg->group()->spill = *interval->getAllocation();
}
}
@ -964,6 +732,24 @@ BacktrackingAllocator::resolveControlFlow()
return true;
}
static LDefinition *
FindReusingDefinition(LInstruction *ins, LAllocation *alloc)
{
for (size_t i = 0; i < ins->numDefs(); i++) {
LDefinition *def = ins->getDef(i);
if (def->policy() == LDefinition::MUST_REUSE_INPUT &&
ins->getOperand(def->getReusedInput()) == alloc)
return def;
}
for (size_t i = 0; i < ins->numTemps(); i++) {
LDefinition *def = ins->getTemp(i);
if (def->policy() == LDefinition::MUST_REUSE_INPUT &&
ins->getOperand(def->getReusedInput()) == alloc)
return def;
}
return NULL;
}
bool
BacktrackingAllocator::isReusedInput(LUse *use, LInstruction *ins, bool considerCopy)
{
@ -1033,11 +819,19 @@ BacktrackingAllocator::dumpRegisterGroups()
{
printf("Register groups:\n");
for (size_t i = 0; i < graph.numVirtualRegisters(); i++) {
VirtualRegisterGroup *group = vregs[i].group();
if (group && i == group->canonicalReg()) {
for (size_t j = 0; j < group->registers.length(); j++)
printf(" v%u", group->registers[j]);
printf("\n");
if (VirtualRegisterGroup *group = vregs[i].group()) {
bool minimum = true;
for (size_t j = 0; j < group->registers.length(); j++) {
if (group->registers[j] < i) {
minimum = false;
break;
}
}
if (minimum) {
for (size_t j = 0; j < group->registers.length(); j++)
printf(" v%u", group->registers[j]);
printf("\n");
}
}
}
}
@ -1111,20 +905,6 @@ BacktrackingAllocator::dumpLiveness()
#endif // DEBUG
}
#ifdef DEBUG
struct BacktrackingAllocator::PrintLiveIntervalRange
{
void operator()(const AllocatedRange &item)
{
if (item.range == item.interval->getRange(0)) {
printf(" v%u: %s\n",
item.interval->hasVreg() ? item.interval->vreg() : 0,
IntervalString(item.interval));
}
}
};
#endif
void
BacktrackingAllocator::dumpAllocations()
{
@ -1144,13 +924,6 @@ BacktrackingAllocator::dumpAllocations()
}
printf("\n");
for (size_t i = 0; i < AnyRegister::Total; i++) {
printf("reg %s:\n", AnyRegister::FromCode(i).name());
registers[i].allocations.forEach(PrintLiveIntervalRange());
}
printf("\n");
#endif // DEBUG
}
@ -1182,17 +955,6 @@ BacktrackingAllocator::computePriority(const LiveInterval *interval)
return lifetimeTotal;
}
size_t
BacktrackingAllocator::computePriority(const VirtualRegisterGroup *group)
{
size_t priority = 0;
for (size_t j = 0; j < group->registers.length(); j++) {
uint32_t vreg = group->registers[j];
priority += computePriority(vregs[vreg].getInterval(0));
}
return priority;
}
CodePosition
BacktrackingAllocator::minimalDefEnd(LInstruction *ins)
{
@ -1229,11 +991,6 @@ BacktrackingAllocator::minimalUse(const LiveInterval *interval, LInstruction *in
bool
BacktrackingAllocator::minimalInterval(const LiveInterval *interval, bool *pfixed)
{
if (!interval->hasVreg()) {
*pfixed = true;
return true;
}
if (interval->index() == 0) {
VirtualRegister &reg = vregs[interval->vreg()];
if (pfixed)
@ -1314,17 +1071,6 @@ BacktrackingAllocator::computeSpillWeight(const LiveInterval *interval)
return lifetimeTotal ? usesTotal / lifetimeTotal : 0;
}
size_t
BacktrackingAllocator::computeSpillWeight(const VirtualRegisterGroup *group)
{
size_t maxWeight = 0;
for (size_t j = 0; j < group->registers.length(); j++) {
uint32_t vreg = group->registers[j];
maxWeight = Max(maxWeight, computeSpillWeight(vregs[vreg].getInterval(0)));
}
return maxWeight;
}
bool
BacktrackingAllocator::trySplitAcrossHotcode(LiveInterval *interval, bool *success)
{
@ -1397,7 +1143,7 @@ BacktrackingAllocator::trySplitAcrossHotcode(LiveInterval *interval, bool *succe
return false;
*success = true;
return split(interval, newIntervals) && requeueIntervals(newIntervals);
return splitAndRequeueInterval(interval, newIntervals);
}
bool
@ -1467,7 +1213,7 @@ BacktrackingAllocator::trySplitAfterLastRegisterUse(LiveInterval *interval, bool
return false;
*success = true;
return split(interval, newIntervals) && requeueIntervals(newIntervals);
return splitAndRequeueInterval(interval, newIntervals);
}
bool
@ -1542,17 +1288,10 @@ BacktrackingAllocator::splitAtAllRegisterUses(LiveInterval *interval)
return false;
}
LiveInterval *spillInterval = new LiveInterval(vreg, 0);
for (size_t i = 0; i < interval->numRanges(); i++) {
const LiveInterval::Range *range = interval->getRange(i);
CodePosition from = range->from < spillStart ? spillStart : range->from;
if (!spillInterval->addRange(from, range->to))
return false;
}
if (!newIntervals.append(spillInterval))
if (!addLiveInterval(newIntervals, vreg, spillStart, interval->end()))
return false;
return split(interval, newIntervals) && requeueIntervals(newIntervals);
return splitAndRequeueInterval(interval, newIntervals);
}
bool

View File

@ -39,13 +39,6 @@ struct VirtualRegisterGroup : public TempObject
VirtualRegisterGroup()
: allocation(LUse(0, LUse::ANY)), spill(LUse(0, LUse::ANY))
{}
uint32_t canonicalReg() {
uint32_t minimum = registers[0];
for (size_t i = 1; i < registers.length(); i++)
minimum = Min(minimum, registers[i]);
return minimum;
}
};
class BacktrackingVirtualRegister : public VirtualRegister
@ -57,10 +50,6 @@ class BacktrackingVirtualRegister : public VirtualRegister
// Spill location to use for this register.
LAllocation canonicalSpill_;
// Code position above which the canonical spill cannot be used; such
// intervals may overlap other registers in the same group.
CodePosition canonicalSpillExclude_;
// If this register is associated with a group of other registers,
// information about the group. This structure is shared between all
// registers in the group.
@ -78,18 +67,10 @@ class BacktrackingVirtualRegister : public VirtualRegister
canonicalSpill_ = alloc;
}
const LAllocation *canonicalSpill() const {
return canonicalSpill_.isUse() ? NULL : &canonicalSpill_;
return canonicalSpill_.isStackSlot() ? &canonicalSpill_ : NULL;
}
void setCanonicalSpillExclude(CodePosition pos) {
canonicalSpillExclude_ = pos;
}
bool hasCanonicalSpillExclude() const {
return canonicalSpillExclude_.pos() != 0;
}
CodePosition canonicalSpillExclude() const {
JS_ASSERT(hasCanonicalSpillExclude());
return canonicalSpillExclude_;
unsigned canonicalSpillSlot() const {
return canonicalSpill_.toStackSlot()->slot();
}
void setGroup(VirtualRegisterGroup *group) {
@ -102,22 +83,16 @@ class BacktrackingVirtualRegister : public VirtualRegister
class BacktrackingAllocator : public LiveRangeAllocator<BacktrackingVirtualRegister>
{
// Priority queue element: either an interval or group of intervals and the
// associated priority.
struct QueueItem
// Priority queue element: an interval and its immutable priority.
struct QueuedInterval
{
LiveInterval *interval;
VirtualRegisterGroup *group;
QueueItem(LiveInterval *interval, size_t priority)
: interval(interval), group(NULL), priority_(priority)
QueuedInterval(LiveInterval *interval, size_t priority)
: interval(interval), priority_(priority)
{}
QueueItem(VirtualRegisterGroup *group, size_t priority)
: interval(NULL), group(group), priority_(priority)
{}
static size_t priority(const QueueItem &v) {
static size_t priority(const QueuedInterval &v) {
return v.priority_;
}
@ -125,7 +100,7 @@ class BacktrackingAllocator : public LiveRangeAllocator<BacktrackingVirtualRegis
size_t priority_;
};
PriorityQueue<QueueItem, QueueItem, 0, SystemAllocPolicy> allocationQueue;
PriorityQueue<QueuedInterval, QueuedInterval, 0, SystemAllocPolicy> queuedIntervals;
// A subrange over which a physical register is allocated.
struct AllocatedRange {
@ -181,18 +156,14 @@ class BacktrackingAllocator : public LiveRangeAllocator<BacktrackingVirtualRegis
bool init();
bool canAddToGroup(VirtualRegisterGroup *group, BacktrackingVirtualRegister *reg);
bool tryGroupRegisters(uint32_t vreg0, uint32_t vreg1);
bool tryGroupReusedRegister(uint32_t def, uint32_t use);
bool groupAndQueueRegisters();
bool processInterval(LiveInterval *interval);
bool processGroup(VirtualRegisterGroup *group);
bool setIntervalRequirement(LiveInterval *interval);
bool tryAllocateRegister(PhysicalRegister &r, LiveInterval *interval,
bool *success, LiveInterval **pconflicting);
bool tryAllocateGroupRegister(PhysicalRegister &r, VirtualRegisterGroup *group,
bool *psuccess, LiveInterval **pconflicting);
bool evictInterval(LiveInterval *interval);
bool split(LiveInterval *interval, const LiveIntervalVector &newIntervals);
bool requeueIntervals(const LiveIntervalVector &newIntervals);
bool splitAndRequeueInterval(LiveInterval *interval,
const LiveIntervalVector &newIntervals);
void spill(LiveInterval *interval);
bool isReusedInput(LUse *use, LInstruction *ins, bool considerCopy = false);
@ -206,8 +177,6 @@ class BacktrackingAllocator : public LiveRangeAllocator<BacktrackingVirtualRegis
void dumpLiveness();
void dumpAllocations();
struct PrintLiveIntervalRange;
CodePosition minimalDefEnd(LInstruction *ins);
bool minimalDef(const LiveInterval *interval, LInstruction *ins);
bool minimalUse(const LiveInterval *interval, LInstruction *ins);
@ -218,9 +187,6 @@ class BacktrackingAllocator : public LiveRangeAllocator<BacktrackingVirtualRegis
size_t computePriority(const LiveInterval *interval);
size_t computeSpillWeight(const LiveInterval *interval);
size_t computePriority(const VirtualRegisterGroup *group);
size_t computeSpillWeight(const VirtualRegisterGroup *group);
bool chooseIntervalSplit(LiveInterval *interval);
bool trySplitAcrossHotcode(LiveInterval *interval, bool *success);
bool trySplitAfterLastRegisterUse(LiveInterval *interval, bool *success);

View File

@ -155,24 +155,6 @@ DefinitionCompatibleWith(LInstruction *ins, const LDefinition *def, LAllocation
#endif // DEBUG
static inline LDefinition *
FindReusingDefinition(LInstruction *ins, LAllocation *alloc)
{
for (size_t i = 0; i < ins->numDefs(); i++) {
LDefinition *def = ins->getDef(i);
if (def->policy() == LDefinition::MUST_REUSE_INPUT &&
ins->getOperand(def->getReusedInput()) == alloc)
return def;
}
for (size_t i = 0; i < ins->numTemps(); i++) {
LDefinition *def = ins->getTemp(i);
if (def->policy() == LDefinition::MUST_REUSE_INPUT &&
ins->getOperand(def->getReusedInput()) == alloc)
return def;
}
return NULL;
}
/*
* A live interval is a set of disjoint ranges of code positions where a
* virtual register is live. Register allocation operates on these intervals,

View File

@ -466,7 +466,7 @@ LIRGenerator::visitTest(MTest *test)
LIsNullOrLikeUndefinedAndBranch *lir =
new LIsNullOrLikeUndefinedAndBranch(ifTrue, ifFalse, temp0, temp1);
if (!useBoxAtStart(lir, LIsNullOrLikeUndefinedAndBranch::Value, left))
if (!useBox(lir, LIsNullOrLikeUndefinedAndBranch::Value, left))
return false;
return add(lir, comp);
}