Bug 814966 - Add backtracking register allocator, r=jandem.

This commit is contained in:
Brian Hackett 2012-12-14 11:57:30 -07:00
parent 6715b2cff1
commit c6a01859e2
19 changed files with 1833 additions and 184 deletions

View File

@ -277,6 +277,7 @@ VPATH += $(srcdir)/ion
VPATH += $(srcdir)/ion/shared
CPPSRCS += MIR.cpp \
BacktrackingAllocator.cpp \
Bailouts.cpp \
BitSet.cpp \
C1Spewer.cpp \

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,199 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=4 sw=4 et tw=99:
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef js_ion_backtrackingallocator_h__
#define js_ion_backtrackingallocator_h__
#include "LiveRangeAllocator.h"
#include "ds/PriorityQueue.h"
#include "ds/SplayTree.h"
// Backtracking priority queue based register allocator based on that described
// in the following blog post:
//
// http://blog.llvm.org/2011/09/greedy-register-allocation-in-llvm-30.html
namespace js {
namespace ion {
// Information about a group of registers. Registers may be grouped together
// when (a) all of their lifetimes are disjoint, (b) they are of the same type
// (double / non-double) and (c) it is desirable that they have the same
// allocation.
struct VirtualRegisterGroup : public TempObject
{
// All virtual registers in the group.
Vector<uint32_t, 2, IonAllocPolicy> registers;
// Desired physical register to use for registers in the group.
LAllocation allocation;
// Spill location to be shared by registers in the group.
LAllocation spill;
VirtualRegisterGroup()
: allocation(LUse(0, LUse::ANY)), spill(LUse(0, LUse::ANY))
{}
};
class BacktrackingVirtualRegister : public VirtualRegister
{
// If this register's definition is MUST_REUSE_INPUT, whether a copy must
// be introduced before the definition that relaxes the policy.
bool mustCopyInput_;
// Spill location to use for this register.
LAllocation canonicalSpill_;
// If this register is associated with a group of other registers,
// information about the group. This structure is shared between all
// registers in the group.
VirtualRegisterGroup *group_;
public:
void setMustCopyInput() {
mustCopyInput_ = true;
}
bool mustCopyInput() {
return mustCopyInput_;
}
void setCanonicalSpill(LAllocation alloc) {
canonicalSpill_ = alloc;
}
const LAllocation *canonicalSpill() const {
return canonicalSpill_.isStackSlot() ? &canonicalSpill_ : NULL;
}
unsigned canonicalSpillSlot() const {
return canonicalSpill_.toStackSlot()->slot();
}
void setGroup(VirtualRegisterGroup *group) {
group_ = group;
}
VirtualRegisterGroup *group() {
return group_;
}
};
class BacktrackingAllocator : public LiveRangeAllocator<BacktrackingVirtualRegister>
{
// Priority queue element: an interval and its immutable priority.
struct QueuedInterval
{
LiveInterval *interval;
QueuedInterval(LiveInterval *interval, size_t priority)
: interval(interval), priority_(priority)
{}
static size_t priority(const QueuedInterval &v) {
return v.priority_;
}
private:
size_t priority_;
};
PriorityQueue<QueuedInterval, QueuedInterval, 0, SystemAllocPolicy> queuedIntervals;
// A subrange over which a physical register is allocated.
struct AllocatedRange {
LiveInterval *interval;
const LiveInterval::Range *range;
AllocatedRange()
: interval(NULL), range(NULL)
{}
AllocatedRange(LiveInterval *interval, const LiveInterval::Range *range)
: interval(interval), range(range)
{}
static int compare(const AllocatedRange &v0, const AllocatedRange &v1) {
// LiveInterval::Range includes 'from' but excludes 'to'.
if (v0.range->to.pos() <= v1.range->from.pos())
return -1;
if (v0.range->from.pos() >= v1.range->to.pos())
return 1;
return 0;
}
};
typedef SplayTree<AllocatedRange, AllocatedRange> AllocatedRangeSet;
// Each physical register is associated with the set of ranges over which
// that register is currently allocated.
struct PhysicalRegister {
bool allocatable;
AnyRegister reg;
AllocatedRangeSet allocations;
PhysicalRegister() : allocatable(false) {}
};
FixedArityList<PhysicalRegister, AnyRegister::Total> registers;
// Ranges of code which are considered to be hot, for which good allocation
// should be prioritized.
AllocatedRangeSet hotcode;
public:
BacktrackingAllocator(MIRGenerator *mir, LIRGenerator *lir, LIRGraph &graph)
: LiveRangeAllocator<BacktrackingVirtualRegister>(mir, lir, graph, /* forLSRA = */ false)
{ }
bool go();
private:
typedef Vector<LiveInterval *, 4, SystemAllocPolicy> LiveIntervalVector;
bool init();
bool canAddToGroup(VirtualRegisterGroup *group, BacktrackingVirtualRegister *reg);
bool tryGroupRegisters(uint32_t vreg0, uint32_t vreg1);
bool groupAndQueueRegisters();
bool processInterval(LiveInterval *interval);
bool setIntervalRequirement(LiveInterval *interval);
bool tryAllocateRegister(PhysicalRegister &r, LiveInterval *interval,
bool *success, LiveInterval **pconflicting);
bool evictInterval(LiveInterval *interval);
bool splitAndRequeueInterval(LiveInterval *interval,
const LiveIntervalVector &newIntervals);
void spill(LiveInterval *interval);
bool isReusedInput(LUse *use, LInstruction *ins, bool considerCopy = false);
bool addLiveInterval(LiveIntervalVector &intervals, uint32_t vreg,
CodePosition from, CodePosition to);
bool resolveControlFlow();
bool reifyAllocations();
void dumpRegisterGroups();
void dumpLiveness();
void dumpAllocations();
CodePosition minimalDefEnd(LInstruction *ins);
bool minimalDef(const LiveInterval *interval, LInstruction *ins);
bool minimalUse(const LiveInterval *interval, LInstruction *ins);
bool minimalInterval(const LiveInterval *interval, bool *pfixed = NULL);
// Heuristic methods.
size_t computePriority(const LiveInterval *interval);
size_t computeSpillWeight(const LiveInterval *interval);
bool chooseIntervalSplit(LiveInterval *interval);
bool trySplitAcrossHotcode(LiveInterval *interval, bool *success);
bool trySplitAfterLastRegisterUse(LiveInterval *interval, bool *success);
bool splitAtAllRegisterUses(LiveInterval *interval);
};
} // namespace ion
} // namespace js
#endif

View File

@ -117,7 +117,7 @@ C1Spewer::spewIntervals(FILE *fp, LinearScanAllocator *regalloc, LInstruction *i
LiveInterval *live = vreg->getInterval(i);
if (live->numRanges()) {
fprintf(fp, "%d object \"", (i == 0) ? vreg->id() : int32_t(nextId++));
LAllocation::PrintAllocation(fp, live->getAllocation());
fprintf(fp, "%s", live->getAllocation()->toString());
fprintf(fp, "\" %d -1", vreg->id());
for (size_t j = 0; j < live->numRanges(); j++) {
fprintf(fp, " [%d, %d[", live->getRange(j)->from.pos(),

View File

@ -21,6 +21,7 @@
#include "jsworkers.h"
#include "IonCompartment.h"
#include "CodeGenerator.h"
#include "BacktrackingAllocator.h"
#include "StupidAllocator.h"
#include "UnreachableCodeElimination.h"
@ -999,6 +1000,19 @@ CompileBackEnd(MIRGenerator *mir)
break;
}
case RegisterAllocator_Backtracking: {
integrity.record();
BacktrackingAllocator regalloc(mir, &lirgen, *lir);
if (!regalloc.go())
return NULL;
if (!integrity.check(true))
return NULL;
IonSpewPass("Allocate Registers [Backtracking]");
break;
}
case RegisterAllocator_Stupid: {
// Use the integrity checker to populate safepoint information, so
// run it in all builds.

View File

@ -22,6 +22,7 @@ class TempAllocator;
// Possible register allocators which may be used.
enum IonRegisterAllocator {
RegisterAllocator_LSRA,
RegisterAllocator_Backtracking,
RegisterAllocator_Stupid
};

View File

@ -405,9 +405,7 @@ JSONSpewer::spewIntervals(LinearScanAllocator *regalloc)
if (live->numRanges()) {
beginObject();
property("allocation");
fprintf(fp_, "\"");
LAllocation::PrintAllocation(fp_, live->getAllocation());
fprintf(fp_, "\"");
fprintf(fp_, "\"%s\"", live->getAllocation()->toString());
beginListProperty("ranges");
for (size_t j = 0; j < live->numRanges(); j++) {

View File

@ -836,7 +836,7 @@ class LTestIAndBranch : public LInstructionHelper<0, 1, 0>
};
// Takes in either an integer or boolean input and tests it for truthiness.
class LTestDAndBranch : public LInstructionHelper<0, 1, 1>
class LTestDAndBranch : public LInstructionHelper<0, 1, 0>
{
MBasicBlock *ifTrue_;
MBasicBlock *ifFalse_;

View File

@ -203,9 +203,7 @@ PrintDefinition(FILE *fp, const LDefinition &def)
if (def.virtualRegister())
fprintf(fp, ":%d", def.virtualRegister());
if (def.policy() == LDefinition::PRESET) {
fprintf(fp, " (");
LAllocation::PrintAllocation(fp, def.output());
fprintf(fp, ")");
fprintf(fp, " (%s)", def.output()->toString());
} else if (def.policy() == LDefinition::MUST_REUSE_INPUT) {
fprintf(fp, " (!)");
} else if (def.policy() == LDefinition::PASSTHROUGH) {
@ -215,60 +213,62 @@ PrintDefinition(FILE *fp, const LDefinition &def)
}
static void
PrintUse(FILE *fp, const LUse *use)
PrintUse(char *buf, size_t size, const LUse *use)
{
fprintf(fp, "v%d:", use->virtualRegister());
if (use->policy() == LUse::ANY) {
fprintf(fp, "*");
JS_snprintf(buf, size, "v%d:*", use->virtualRegister());
} else if (use->policy() == LUse::REGISTER) {
fprintf(fp, "r");
JS_snprintf(buf, size, "v%d:r", use->virtualRegister());
} else {
// Unfortunately, we don't know here whether the virtual register is a
// float or a double. Should we steal a bit in LUse for help? For now,
// nothing defines any fixed xmm registers.
fprintf(fp, "%s", Registers::GetName(Registers::Code(use->registerCode())));
JS_snprintf(buf, size, "v%d:%s", use->virtualRegister(),
Registers::GetName(Registers::Code(use->registerCode())));
}
}
void
LAllocation::PrintAllocation(FILE *fp, const LAllocation *a)
#ifdef DEBUG
const char *
LAllocation::toString() const
{
switch (a->kind()) {
// Not reentrant!
static char buf[40];
switch (kind()) {
case LAllocation::CONSTANT_VALUE:
case LAllocation::CONSTANT_INDEX:
fprintf(fp, "c");
break;
return "c";
case LAllocation::GPR:
fprintf(fp, "=%s", a->toGeneralReg()->reg().name());
break;
JS_snprintf(buf, sizeof(buf), "=%s", toGeneralReg()->reg().name());
return buf;
case LAllocation::FPU:
fprintf(fp, "=%s", a->toFloatReg()->reg().name());
break;
JS_snprintf(buf, sizeof(buf), "=%s", toFloatReg()->reg().name());
return buf;
case LAllocation::STACK_SLOT:
fprintf(fp, "stack:i%d", a->toStackSlot()->slot());
break;
JS_snprintf(buf, sizeof(buf), "stack:i%d", toStackSlot()->slot());
return buf;
case LAllocation::DOUBLE_SLOT:
fprintf(fp, "stack:d%d", a->toStackSlot()->slot());
break;
JS_snprintf(buf, sizeof(buf), "stack:d%d", toStackSlot()->slot());
return buf;
case LAllocation::ARGUMENT:
fprintf(fp, "arg:%d", a->toArgument()->index());
break;
JS_snprintf(buf, sizeof(buf), "arg:%d", toArgument()->index());
return buf;
case LAllocation::USE:
PrintUse(fp, a->toUse());
break;
PrintUse(buf, sizeof(buf), toUse());
return buf;
default:
JS_NOT_REACHED("what?");
break;
return "???";
}
}
#endif // DEBUG
void
LInstruction::printOperands(FILE *fp)
{
for (size_t i = 0; i < numOperands(); i++) {
fprintf(fp, " (");
LAllocation::PrintAllocation(fp, getOperand(i));
fprintf(fp, ")");
fprintf(fp, " (%s)", getOperand(i)->toString());
if (i != numOperands() - 1)
fprintf(fp, ",");
}
@ -368,11 +368,9 @@ LMoveGroup::printOperands(FILE *fp)
{
for (size_t i = 0; i < numMoves(); i++) {
const LMove &move = getMove(i);
fprintf(fp, "[");
LAllocation::PrintAllocation(fp, move.from());
fprintf(fp, " -> ");
LAllocation::PrintAllocation(fp, move.to());
fprintf(fp, "]");
// Use two printfs, as LAllocation::toString is not reentrant.
fprintf(fp, "[%s", move.from()->toString());
fprintf(fp, " -> %s]", move.to()->toString());
if (i != numMoves() - 1)
fprintf(fp, ", ");
}

View File

@ -198,7 +198,11 @@ class LAllocation : public TempObject
return bits_;
}
static void PrintAllocation(FILE *fp, const LAllocation *a);
#ifdef DEBUG
const char *toString() const;
#else
const char *toString() const { return "???"; }
#endif
};
class LUse : public LAllocation
@ -1038,7 +1042,7 @@ class LSafepoint : public TempObject
for (size_t i = 0; i < nunboxParts_.length(); i++) {
if (nunboxParts_[i].type == type)
return true;
if (nunboxParts_[i].type == LUse(LUse::ANY, typeVreg)) {
if (nunboxParts_[i].type == LUse(typeVreg, LUse::ANY)) {
nunboxParts_[i].type = type;
partialNunboxes_--;
return true;
@ -1067,7 +1071,7 @@ class LSafepoint : public TempObject
for (size_t i = 0; i < nunboxParts_.length(); i++) {
if (nunboxParts_[i].payload == payload)
return true;
if (nunboxParts_[i].payload == LUse(LUse::ANY, payloadVreg)) {
if (nunboxParts_[i].payload == LUse(payloadVreg, LUse::ANY)) {
partialNunboxes_--;
nunboxParts_[i].payload = payload;
return true;

View File

@ -92,8 +92,8 @@ LinearScanAllocator::allocateRegisters()
return false;
CodePosition position = current->start();
Requirement *req = current->requirement();
Requirement *hint = current->hint();
const Requirement *req = current->requirement();
const Requirement *hint = current->hint();
IonSpew(IonSpew_RegAlloc, "Processing %d = [%u, %u] (pri=%d)",
current->hasVreg() ? current->vreg() : 0, current->start().pos(),
@ -775,18 +775,6 @@ LinearScanAllocator::assign(LAllocation allocation)
return true;
}
#ifdef JS_NUNBOX32
LinearScanVirtualRegister *
LinearScanAllocator::otherHalfOfNunbox(VirtualRegister *vreg)
{
signed offset = OffsetToOtherHalfOfNunbox(vreg->type());
LinearScanVirtualRegister *other = &vregs[vreg->def()->virtualRegister() + offset];
AssertTypesFormANunbox(vreg->type(), other->type());
return other;
}
#endif
uint32_t
LinearScanAllocator::allocateSlotFor(const LiveInterval *interval)
{
@ -991,7 +979,7 @@ LinearScanAllocator::findBestFreeRegister(CodePosition *freeUntil)
}
// Assign the register suggested by the hint if it's free.
Requirement *hint = current->hint();
const Requirement *hint = current->hint();
if (hint->kind() == Requirement::FIXED && hint->allocation().isRegister()) {
AnyRegister hintReg = hint->allocation().toRegister();
if (freeUntilPos[hintReg.code()] > hint->pos())
@ -1112,28 +1100,6 @@ LinearScanAllocator::canCoexist(LiveInterval *a, LiveInterval *b)
return true;
}
bool
LinearScanAllocator::addMove(LMoveGroup *moves, LiveInterval *from, LiveInterval *to)
{
if (*from->getAllocation() == *to->getAllocation())
return true;
return moves->add(from->getAllocation(), to->getAllocation());
}
bool
LinearScanAllocator::moveInput(CodePosition pos, LiveInterval *from, LiveInterval *to)
{
LMoveGroup *moves = getInputMoveGroup(pos);
return addMove(moves, from, to);
}
bool
LinearScanAllocator::moveAfter(CodePosition pos, LiveInterval *from, LiveInterval *to)
{
LMoveGroup *moves = getMoveGroupAfter(pos);
return addMove(moves, from, to);
}
#ifdef DEBUG
/*
* Ensure intervals appear in exactly the appropriate one of {active,inactive,

View File

@ -10,7 +10,6 @@
#include "LiveRangeAllocator.h"
#include "BitSet.h"
#include "StackSlotAllocator.h"
#include "js/Vector.h"
@ -79,9 +78,6 @@ class LinearScanAllocator : public LiveRangeAllocator<LinearScanVirtualRegister>
LiveInterval *dequeue();
};
// Allocation state
StackSlotAllocator stackSlotAllocator;
typedef Vector<LiveInterval *, 0, SystemAllocPolicy> SlotList;
SlotList finishedSlots_;
SlotList finishedDoubleSlots_;
@ -112,10 +108,7 @@ class LinearScanAllocator : public LiveRangeAllocator<LinearScanVirtualRegister>
AnyRegister::Code findBestFreeRegister(CodePosition *freeUntil);
AnyRegister::Code findBestBlockedRegister(CodePosition *nextUsed);
bool canCoexist(LiveInterval *a, LiveInterval *b);
bool addMove(LMoveGroup *moves, LiveInterval *from, LiveInterval *to);
bool moveInput(CodePosition pos, LiveInterval *from, LiveInterval *to);
bool moveInputAlloc(CodePosition pos, LAllocation *from, LAllocation *to);
bool moveAfter(CodePosition pos, LiveInterval *from, LiveInterval *to);
void setIntervalRequirement(LiveInterval *interval);
size_t findFirstSafepoint(LiveInterval *interval, size_t firstSafepoint);
size_t findFirstNonCallSafepoint(CodePosition from);
@ -129,13 +122,9 @@ class LinearScanAllocator : public LiveRangeAllocator<LinearScanVirtualRegister>
inline void validateAllocations() { }
#endif
#ifdef JS_NUNBOX32
LinearScanVirtualRegister *otherHalfOfNunbox(VirtualRegister *vreg);
#endif
public:
LinearScanAllocator(MIRGenerator *mir, LIRGenerator *lir, LIRGraph &graph)
: LiveRangeAllocator<LinearScanVirtualRegister>(mir, lir, graph)
: LiveRangeAllocator<LinearScanVirtualRegister>(mir, lir, graph, /* forLSRA = */ true)
{
}

View File

@ -7,6 +7,7 @@
#include "LiveRangeAllocator.h"
#include "BacktrackingAllocator.h"
#include "LinearScan.h"
using namespace js;
@ -33,6 +34,43 @@ Requirement::priority() const
}
}
bool
LiveInterval::Range::contains(const Range *other) const
{
Range pre, inside, post;
intersect(other, &pre, &inside, &post);
return inside.from == other->from && inside.to == other->to;
}
void
LiveInterval::Range::intersect(const Range *other, Range *pre, Range *inside, Range *post) const
{
JS_ASSERT(pre->empty() && inside->empty() && post->empty());
CodePosition innerFrom = from;
if (from < other->from) {
if (to < other->from) {
*pre = Range(from, to);
return;
}
*pre = Range(from, other->from);
innerFrom = other->from;
}
CodePosition innerTo = to;
if (to > other->to) {
if (from >= other->to) {
*post = Range(from, to);
return;
}
*post = Range(other->to, to);
innerTo = other->to;
}
if (innerFrom != innerTo)
*inside = Range(innerFrom, innerTo);
}
bool
LiveInterval::addRangeAtHead(CodePosition from, CodePosition to)
{
@ -316,8 +354,11 @@ VirtualRegister::getFirstInterval()
void
EnsureLiveRangeAllocatorInstantiation(MIRGenerator *mir, LIRGenerator *lir, LIRGraph &graph)
{
LiveRangeAllocator<LinearScanVirtualRegister> allocator(mir, lir, graph);
allocator.buildLivenessInfo();
LiveRangeAllocator<LinearScanVirtualRegister> lsra(mir, lir, graph, true);
lsra.buildLivenessInfo();
LiveRangeAllocator<BacktrackingVirtualRegister> backtracking(mir, lir, graph, false);
backtracking.buildLivenessInfo();
}
#ifdef DEBUG
@ -499,8 +540,21 @@ LiveRangeAllocator<VREG>::buildLivenessInfo()
// Calls may clobber registers, so force a spill and reload around the callsite.
if (ins->isCall()) {
for (AnyRegisterIterator iter(allRegisters_); iter.more(); iter++) {
if (!addFixedRangeAtHead(*iter, inputOf(*ins), outputOf(*ins)))
return false;
if (forLSRA) {
if (!addFixedRangeAtHead(*iter, inputOf(*ins), outputOf(*ins)))
return false;
} else {
bool found = false;
for (size_t i = 0; i < ins->numDefs(); i++) {
if (ins->getDef(i)->isPreset() &&
*ins->getDef(i)->output() == LAllocation(*iter)) {
found = true;
break;
}
}
if (!found && !addFixedRangeAtHead(*iter, outputOf(*ins), outputOf(*ins).next()))
return false;
}
}
}
@ -509,7 +563,7 @@ LiveRangeAllocator<VREG>::buildLivenessInfo()
LDefinition *def = ins->getDef(i);
CodePosition from;
if (def->policy() == LDefinition::PRESET && def->output()->isRegister()) {
if (def->policy() == LDefinition::PRESET && def->output()->isRegister() && forLSRA) {
// The fixed range covers the current instruction so the
// interval for the virtual register starts at the next
// instruction. If the next instruction has a fixed use,
@ -523,7 +577,7 @@ LiveRangeAllocator<VREG>::buildLivenessInfo()
return false;
from = outputOf(*ins).next();
} else {
from = inputOf(*ins);
from = forLSRA ? inputOf(*ins) : outputOf(*ins);
}
if (def->policy() == LDefinition::MUST_REUSE_INPUT) {
@ -555,17 +609,22 @@ LiveRangeAllocator<VREG>::buildLivenessInfo()
LDefinition *temp = ins->getTemp(i);
if (temp->isBogusTemp())
continue;
if (ins->isCall()) {
JS_ASSERT(temp->isPreset());
continue;
}
if (temp->policy() == LDefinition::PRESET) {
AnyRegister reg = temp->output()->toRegister();
if (!addFixedRangeAtHead(reg, inputOf(*ins), outputOf(*ins)))
return false;
if (forLSRA) {
if (temp->policy() == LDefinition::PRESET) {
if (ins->isCall())
continue;
AnyRegister reg = temp->output()->toRegister();
if (!addFixedRangeAtHead(reg, inputOf(*ins), outputOf(*ins)))
return false;
} else {
JS_ASSERT(!ins->isCall());
if (!vregs[temp].getInterval(0)->addRangeAtHead(inputOf(*ins), outputOf(*ins)))
return false;
}
} else {
if (!vregs[temp].getInterval(0)->addRangeAtHead(inputOf(*ins), outputOf(*ins)))
CodePosition to = ins->isCall() ? outputOf(*ins) : outputOf(*ins).next();
if (!vregs[temp].getInterval(0)->addRangeAtHead(inputOf(*ins), to))
return false;
}
}
@ -613,18 +672,30 @@ LiveRangeAllocator<VREG>::buildLivenessInfo()
continue;
CodePosition to;
if (use->isFixedRegister()) {
JS_ASSERT(!use->usedAtStart());
AnyRegister reg = GetFixedRegister(vregs[use].def(), use);
if (!addFixedRangeAtHead(reg, inputOf(*ins), outputOf(*ins)))
return false;
to = inputOf(*ins);
if (forLSRA) {
if (use->isFixedRegister()) {
JS_ASSERT(!use->usedAtStart());
AnyRegister reg = GetFixedRegister(vregs[use].def(), use);
if (!addFixedRangeAtHead(reg, inputOf(*ins), outputOf(*ins)))
return false;
to = inputOf(*ins);
} else {
to = use->usedAtStart() ? inputOf(*ins) : outputOf(*ins);
}
} else {
to = use->usedAtStart() ? inputOf(*ins) : outputOf(*ins);
to = (use->usedAtStart() || ins->isCall()) ? inputOf(*ins) : outputOf(*ins);
if (use->isFixedRegister()) {
LAllocation reg(AnyRegister::FromCode(use->registerCode()));
for (size_t i = 0; i < ins->numDefs(); i++) {
LDefinition *def = ins->getDef(i);
if (def->policy() == LDefinition::PRESET && *def->output() == reg)
to = inputOf(*ins);
}
}
}
LiveInterval *interval = vregs[use].getInterval(0);
if (!interval->addRangeAtHead(inputOf(block->firstId()), to))
if (!interval->addRangeAtHead(inputOf(block->firstId()), forLSRA ? to : to.next()))
return false;
interval->addUse(new UsePosition(use, to));

View File

@ -9,6 +9,7 @@
#define js_ion_liverangeallocator_h__
#include "RegisterAllocator.h"
#include "StackSlotAllocator.h"
// Common structures and functions used by register allocators that operate on
// virtual register live ranges.
@ -167,6 +168,10 @@ class LiveInterval
* register associated with this interval is live.
*/
struct Range {
Range()
: from(),
to()
{ }
Range(CodePosition f, CodePosition t)
: from(f),
to(t)
@ -177,6 +182,17 @@ class LiveInterval
// The end of this range, exclusive.
CodePosition to;
bool empty() const {
return from >= to;
}
// Whether this range wholly contains other.
bool contains(const Range *other) const;
// Intersect this range with other, returning the subranges of this
// that are before, inside, or after other.
void intersect(const Range *other, Range *pre, Range *inside, Range *post) const;
};
private:
@ -257,22 +273,35 @@ class LiveInterval
void setIndex(uint32_t index) {
index_ = index;
}
Requirement *requirement() {
const Requirement *requirement() const {
return &requirement_;
}
void setRequirement(const Requirement &requirement) {
// A SAME_AS_OTHER requirement complicates regalloc too much; it
// should only be used as hint.
JS_ASSERT(requirement.kind() != Requirement::SAME_AS_OTHER);
// Fixed registers are handled with fixed intervals, so fixed requirements
// are only valid for non-register allocations.f
JS_ASSERT_IF(requirement.kind() == Requirement::FIXED,
!requirement.allocation().isRegister());
requirement_ = requirement;
}
Requirement *hint() {
bool addRequirement(const Requirement &newRequirement) {
// Merge newRequirement with any existing requirement, returning false
// if the new and old requirements conflict.
JS_ASSERT(newRequirement.kind() != Requirement::SAME_AS_OTHER);
if (newRequirement.kind() == Requirement::FIXED) {
if (requirement_.kind() == Requirement::FIXED)
return newRequirement.allocation() == requirement_.allocation();
requirement_ = newRequirement;
return true;
}
JS_ASSERT(newRequirement.kind() == Requirement::REGISTER);
if (requirement_.kind() == Requirement::FIXED)
return requirement_.allocation().isRegister();
requirement_ = newRequirement;
return true;
}
const Requirement *hint() const {
return &hint_;
}
void setHint(const Requirement &hint) {
@ -319,6 +348,7 @@ class VirtualRegister
public:
bool init(uint32_t id, LBlock *block, LInstruction *ins, LDefinition *def, bool isTemp) {
JS_ASSERT(block && !block_);
id_ = id;
block_ = block;
ins_ = ins;
@ -357,6 +387,11 @@ class VirtualRegister
JS_ASSERT(numIntervals() > 0);
return getInterval(numIntervals() - 1);
}
void replaceInterval(LiveInterval *old, LiveInterval *interval) {
JS_ASSERT(intervals_[old->index()] == old);
interval->setIndex(old->index());
intervals_[old->index()] = interval;
}
bool addInterval(LiveInterval *interval) {
JS_ASSERT(interval->numRanges());
@ -371,6 +406,7 @@ class VirtualRegister
}
if (!found)
found = intervals_.end();
interval->setIndex(found - intervals_.begin());
return intervals_.insert(found, interval);
}
bool isDouble() const {
@ -469,11 +505,22 @@ class LiveRangeAllocator : public RegisterAllocator
// whether an interval intersects with a fixed register.
LiveInterval *fixedIntervalsUnion;
// Whether the underlying allocator is LSRA. This changes the generated
// live ranges in various ways: inserting additional fixed uses of
// registers, and shifting the boundaries of live ranges by small amounts.
// This exists because different allocators handle live ranges differently;
// ideally, they would all treat live ranges in the same way.
bool forLSRA;
// Allocation state
StackSlotAllocator stackSlotAllocator;
public:
LiveRangeAllocator(MIRGenerator *mir, LIRGenerator *lir, LIRGraph &graph)
LiveRangeAllocator(MIRGenerator *mir, LIRGenerator *lir, LIRGraph &graph, bool forLSRA)
: RegisterAllocator(mir, lir, graph),
liveIn(NULL),
fixedIntervalsUnion(NULL)
fixedIntervalsUnion(NULL),
forLSRA(forLSRA)
{
}
@ -511,6 +558,31 @@ class LiveRangeAllocator : public RegisterAllocator
}
#endif
}
#ifdef JS_NUNBOX32
VREG *otherHalfOfNunbox(VirtualRegister *vreg) {
signed offset = OffsetToOtherHalfOfNunbox(vreg->type());
VREG *other = &vregs[vreg->def()->virtualRegister() + offset];
AssertTypesFormANunbox(vreg->type(), other->type());
return other;
}
#endif
bool addMove(LMoveGroup *moves, LiveInterval *from, LiveInterval *to) {
if (*from->getAllocation() == *to->getAllocation())
return true;
return moves->add(from->getAllocation(), to->getAllocation());
}
bool moveInput(CodePosition pos, LiveInterval *from, LiveInterval *to) {
LMoveGroup *moves = getInputMoveGroup(pos);
return addMove(moves, from, to);
}
bool moveAfter(CodePosition pos, LiveInterval *from, LiveInterval *to) {
LMoveGroup *moves = getMoveGroupAfter(pos);
return addMove(moves, from, to);
}
};
} // namespace ion

View File

@ -63,10 +63,19 @@ AllocationIntegrityState::record()
virtualRegisters[vreg] = ins->getDef(k);
if (!info.outputs.append(vreg))
return false;
if (ins->getDef(k)->policy() == LDefinition::MUST_REUSE_INPUT) {
JS_ASSERT(k == 0);
info.reusedInput = ins->getDef(k)->getReusedInput();
}
}
for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) {
if (!info.inputs.append(alloc->isUse() ? alloc->toUse()->virtualRegister() : UINT32_MAX))
return false;
if (alloc->isUse() && alloc->toUse()->policy() != LUse::RECOVERED_INPUT) {
if (!info.inputs.append(alloc->toUse()->virtualRegister()))
return false;
} else {
if (!info.inputs.append(UINT32_MAX))
return false;
}
}
}
}
@ -80,6 +89,9 @@ AllocationIntegrityState::check(bool populateSafepoints)
JS_ASSERT(!instructions.empty());
#ifdef DEBUG
if (IonSpewEnabled(IonSpew_RegAlloc))
dump();
for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
LBlock *block = graph.getBlock(blockIndex);
@ -100,6 +112,15 @@ AllocationIntegrityState::check(bool populateSafepoints)
LSafepoint *safepoint = ins->safepoint();
JS_ASSERT_IF(safepoint, !safepoint->liveRegs().has(def->output()->toRegister()));
}
uint32_t reusedInput = instructions[ins->id()].reusedInput;
JS_ASSERT_IF(reusedInput != UINT32_MAX,
*def->output() == *ins->getOperand(reusedInput));
}
for (size_t i = 0; i < ins->numTemps(); i++) {
LDefinition *temp = ins->getTemp(i);
JS_ASSERT_IF(!temp->isBogusTemp(), temp->output()->isRegister());
}
}
}
@ -141,9 +162,6 @@ AllocationIntegrityState::check(bool populateSafepoints)
}
}
if (IonSpewEnabled(IonSpew_RegAlloc))
dump();
return true;
}
@ -178,23 +196,19 @@ AllocationIntegrityState::checkIntegrity(LBlock *block, LInstruction *ins,
if (def->policy() == LDefinition::PASSTHROUGH)
continue;
if (info.outputs[i] == vreg) {
check(*def->output() == alloc,
"Found vreg definition, but tracked value does not match");
JS_ASSERT(*def->output() == alloc);
// Found the original definition, done scanning.
return true;
} else {
check(*def->output() != alloc,
"Tracked value clobbered by intermediate definition");
JS_ASSERT(*def->output() != alloc);
}
}
for (size_t i = 0; i < ins->numTemps(); i++) {
LDefinition *temp = ins->getTemp(i);
if (!temp->isBogusTemp()) {
check(*temp->output() != alloc,
"Tracked value clobbered by intermediate temporary");
}
if (!temp->isBogusTemp())
JS_ASSERT(*temp->output() != alloc);
}
LSafepoint *safepoint = ins->safepoint();
@ -206,7 +220,7 @@ AllocationIntegrityState::checkIntegrity(LBlock *block, LInstruction *ins,
if (populateSafepoints)
safepoint->addLiveRegister(reg);
else
check(safepoint->liveRegs().has(reg), "Register not marked in safepoint");
JS_ASSERT(safepoint->liveRegs().has(reg));
}
LDefinition::Type type = virtualRegisters[vreg]
@ -215,29 +229,42 @@ AllocationIntegrityState::checkIntegrity(LBlock *block, LInstruction *ins,
switch (type) {
case LDefinition::OBJECT:
if (populateSafepoints)
if (populateSafepoints) {
IonSpew(IonSpew_RegAlloc, "Safepoint object v%u i%u %s",
vreg, ins->id(), alloc.toString());
safepoint->addGcPointer(alloc);
else
check(safepoint->hasGcPointer(alloc), "GC register not marked in safepoint");
} else {
JS_ASSERT(safepoint->hasGcPointer(alloc));
}
break;
#ifdef JS_NUNBOX32
// If a vreg for a value's components are copied in multiple places
// Do not assert that safepoint information for nunboxes is complete,
// as if a vreg for a value's components are copied in multiple places
// then the safepoint information may be incomplete and not reflect
// all copies. See SafepointWriter::writeNunboxParts.
case LDefinition::TYPE:
if (populateSafepoints)
if (populateSafepoints) {
IonSpew(IonSpew_RegAlloc, "Safepoint type v%u i%u %s",
vreg, ins->id(), alloc.toString());
safepoint->addNunboxType(vreg, alloc);
}
break;
case LDefinition::PAYLOAD:
if (populateSafepoints)
if (populateSafepoints) {
IonSpew(IonSpew_RegAlloc, "Safepoint payload v%u i%u %s",
vreg, ins->id(), alloc.toString());
safepoint->addNunboxPayload(vreg, alloc);
}
break;
#else
case LDefinition::BOX:
if (populateSafepoints)
if (populateSafepoints) {
IonSpew(IonSpew_RegAlloc, "Safepoint boxed value v%u i%u %s",
vreg, ins->id(), alloc.toString());
safepoint->addBoxedValue(alloc);
else
check(safepoint->hasBoxedValue(alloc), "Boxed value not marked in safepoint");
} else {
JS_ASSERT(safepoint->hasBoxedValue(alloc));
}
break;
#endif
default:
@ -295,17 +322,6 @@ AllocationIntegrityState::addPredecessor(LBlock *block, uint32_t vreg, LAllocati
return worklist.append(item);
}
void
AllocationIntegrityState::check(bool cond, const char *msg)
{
if (!cond) {
if (IonSpewEnabled(IonSpew_RegAlloc))
dump();
printf("%s\n", msg);
JS_NOT_REACHED("Regalloc integrity failure");
}
}
void
AllocationIntegrityState::dump()
{
@ -335,16 +351,17 @@ AllocationIntegrityState::dump()
LInstruction *ins = *iter;
InstructionInfo &info = instructions[ins->id()];
printf("[%s]", ins->opName());
CodePosition input(ins->id(), CodePosition::INPUT);
CodePosition output(ins->id(), CodePosition::OUTPUT);
printf("[%u,%u %s]", input.pos(), output.pos(), ins->opName());
if (ins->isMoveGroup()) {
LMoveGroup *group = ins->toMoveGroup();
for (int i = group->numMoves() - 1; i >= 0; i--) {
printf(" [");
LAllocation::PrintAllocation(stdout, group->getMove(i).from());
printf(" -> ");
LAllocation::PrintAllocation(stdout, group->getMove(i).to());
printf("]");
// Use two printfs, as LAllocation::toString is not reentant.
printf(" [%s", group->getMove(i).from()->toString());
printf(" -> %s]", group->getMove(i).to()->toString());
}
printf("\n");
continue;
@ -352,18 +369,13 @@ AllocationIntegrityState::dump()
for (size_t i = 0; i < ins->numTemps(); i++) {
LDefinition *temp = ins->getTemp(i);
if (!temp->isBogusTemp()) {
printf(" [temp ");
LAllocation::PrintAllocation(stdout, temp->output());
printf("]");
}
if (!temp->isBogusTemp())
printf(" [temp %s]", temp->output()->toString());
}
for (size_t i = 0; i < ins->numDefs(); i++) {
LDefinition *def = ins->getDef(i);
printf(" [def v%u ", info.outputs[i]);
LAllocation::PrintAllocation(stdout, def->output());
printf("]");
printf(" [def v%u %s]", info.outputs[i], def->output()->toString());
}
size_t index = 0;
@ -371,9 +383,7 @@ AllocationIntegrityState::dump()
uint32_t vreg = info.inputs[index++];
if (vreg == UINT32_MAX)
continue;
printf(" [use v%u ", vreg);
LAllocation::PrintAllocation(stdout, *alloc);
printf("]");
printf(" [use v%u %s]", vreg, alloc->toString());
}
printf("\n");
@ -396,9 +406,8 @@ AllocationIntegrityState::dump()
for (size_t i = 0; i < seenOrdered.length(); i++) {
IntegrityItem item = seenOrdered[i];
printf("block %u reg v%u alloc ", item.block->mir()->id(), item.vreg);
LAllocation::PrintAllocation(stdout, &item.alloc);
printf("\n");
printf("block %u reg v%u alloc %s\n",
item.block->mir()->id(), item.vreg, item.alloc.toString());
}
printf("\n");

View File

@ -57,8 +57,15 @@ struct AllocationIntegrityState
struct InstructionInfo {
Vector<uint32_t, 2, SystemAllocPolicy> inputs;
Vector<uint32_t, 1, SystemAllocPolicy> outputs;
InstructionInfo() {}
InstructionInfo(const InstructionInfo &o) {
uint32_t reusedInput;
InstructionInfo()
: reusedInput(UINT32_MAX)
{}
InstructionInfo(const InstructionInfo &o)
: reusedInput(o.reusedInput)
{
for (size_t i = 0; i < o.inputs.length(); i++)
inputs.append(o.inputs[i]);
for (size_t i = 0; i < o.outputs.length(); i++)
@ -116,7 +123,6 @@ struct AllocationIntegrityState
bool populateSafepoints);
bool addPredecessor(LBlock *block, uint32_t vreg, LAllocation alloc);
void check(bool cond, const char *msg);
void dump();
};

View File

@ -216,11 +216,14 @@ SafepointWriter::writeNunboxParts(LSafepoint *safepoint)
# ifdef DEBUG
if (IonSpewEnabled(IonSpew_Safepoints)) {
for (uint32_t i = 0; i < entries.length(); i++) {
SafepointNunboxEntry &entry = entries[i];
if (entry.type.isUse() || entry.payload.isUse())
continue;
IonSpewHeader(IonSpew_Safepoints);
fprintf(IonSpewFile, " nunbox (type in ");
DumpNunboxPart(entries[i].type);
DumpNunboxPart(entry.type);
fprintf(IonSpewFile, ", payload in ");
DumpNunboxPart(entries[i].payload);
DumpNunboxPart(entry.payload);
fprintf(IonSpewFile, ")\n");
}
}

View File

@ -2848,6 +2848,8 @@ ScriptAnalysis::addSingletonTypeBarrier(JSContext *cx, const jsbytecode *pc, Typ
void
TypeCompartment::print(JSContext *cx, bool force)
{
gc::AutoSuppressGC suppressGC(cx);
JSCompartment *compartment = this->compartment();
AutoEnterAnalysis enter(compartment);

View File

@ -4849,6 +4849,8 @@ ProcessArgs(JSContext *cx, JSObject *obj_, OptionParser *op)
if (const char *str = op->getStringOption("ion-regalloc")) {
if (strcmp(str, "lsra") == 0)
ion::js_IonOptions.registerAllocator = ion::RegisterAllocator_LSRA;
else if (strcmp(str, "backtracking") == 0)
ion::js_IonOptions.registerAllocator = ion::RegisterAllocator_Backtracking;
else if (strcmp(str, "stupid") == 0)
ion::js_IonOptions.registerAllocator = ion::RegisterAllocator_Stupid;
else
@ -5080,7 +5082,8 @@ main(int argc, char **argv, char **envp)
|| !op.addStringOption('\0', "ion-regalloc", "[mode]",
"Specify Ion register allocation:\n"
" lsra: Linear Scan register allocation (default)\n"
" stupid: Simple greedy register allocation")
" backtracking: Priority based backtracking register allocation\n"
" stupid: Simple block local register allocation")
|| !op.addBoolOption('\0', "ion-eager", "Always ion-compile methods")
#ifdef JS_THREADSAFE
|| !op.addStringOption('\0', "ion-parallel-compile", "on/off",