Bug 1067610 - Refactor backtracking allocator to handle grouped registers better, r=sunfish.

This commit is contained in:
Brian Hackett 2015-05-18 20:20:14 -06:00
parent 58834fa8d7
commit 462ad33e02
26 changed files with 2565 additions and 3279 deletions

View File

@ -13,7 +13,14 @@ function recur(n, limit) {
function checkRecursion(n, limit) {
print("checkRecursion(" + uneval(n) + ", " + uneval(limit) + ")");
var stack = recur(n, limit);
try {
var stack = recur(n, limit);
} catch (e) {
// Some platforms, like ASAN builds, can end up overrecursing. Tolerate
// these failures.
assertEq(/too much recursion/.test("" + e), true);
return;
}
// Async stacks are limited even if we didn't ask for a limit. There is a
// default limit on frames attached on top of any synchronous frames. In this

File diff suppressed because it is too large Load Diff

View File

@ -11,7 +11,8 @@
#include "ds/PriorityQueue.h"
#include "ds/SplayTree.h"
#include "jit/LiveRangeAllocator.h"
#include "jit/RegisterAllocator.h"
#include "jit/StackSlotAllocator.h"
// Backtracking priority queue based register allocator based on that described
// in the following blog post:
@ -21,55 +22,489 @@
namespace js {
namespace jit {
// Information about a group of registers. Registers may be grouped together
// when (a) all of their lifetimes are disjoint, (b) they are of the same type
// (double / non-double) and (c) it is desirable that they have the same
// allocation.
struct VirtualRegisterGroup : public TempObject
class Requirement
{
// All virtual registers in the group.
Vector<uint32_t, 2, JitAllocPolicy> registers;
public:
enum Kind {
NONE,
REGISTER,
FIXED,
MUST_REUSE_INPUT
};
// Desired physical register to use for registers in the group.
LAllocation allocation;
Requirement()
: kind_(NONE)
{ }
// Spill location to be shared by registers in the group.
LAllocation spill;
explicit Requirement(Kind kind)
: kind_(kind)
{
// These have dedicated constructors.
MOZ_ASSERT(kind != FIXED && kind != MUST_REUSE_INPUT);
}
explicit VirtualRegisterGroup(TempAllocator& alloc)
: registers(alloc), allocation(LUse(0, LUse::ANY)), spill(LUse(0, LUse::ANY))
{}
Requirement(Kind kind, CodePosition at)
: kind_(kind),
position_(at)
{
// These have dedicated constructors.
MOZ_ASSERT(kind != FIXED && kind != MUST_REUSE_INPUT);
}
uint32_t canonicalReg() {
uint32_t minimum = registers[0];
for (size_t i = 1; i < registers.length(); i++)
minimum = Min(minimum, registers[i]);
return minimum;
explicit Requirement(LAllocation fixed)
: kind_(FIXED),
allocation_(fixed)
{
MOZ_ASSERT(!fixed.isBogus() && !fixed.isUse());
}
// Only useful as a hint, encodes where the fixed requirement is used to
// avoid allocating a fixed register too early.
Requirement(LAllocation fixed, CodePosition at)
: kind_(FIXED),
allocation_(fixed),
position_(at)
{
MOZ_ASSERT(!fixed.isBogus() && !fixed.isUse());
}
Requirement(uint32_t vreg, CodePosition at)
: kind_(MUST_REUSE_INPUT),
allocation_(LUse(vreg, LUse::ANY)),
position_(at)
{ }
Kind kind() const {
return kind_;
}
LAllocation allocation() const {
MOZ_ASSERT(!allocation_.isBogus() && !allocation_.isUse());
return allocation_;
}
uint32_t virtualRegister() const {
MOZ_ASSERT(allocation_.isUse());
MOZ_ASSERT(kind() == MUST_REUSE_INPUT);
return allocation_.toUse()->virtualRegister();
}
CodePosition pos() const {
return position_;
}
int priority() const;
bool merge(const Requirement& newRequirement) {
// Merge newRequirement with any existing requirement, returning false
// if the new and old requirements conflict.
MOZ_ASSERT(newRequirement.kind() != Requirement::MUST_REUSE_INPUT);
if (newRequirement.kind() == Requirement::FIXED) {
if (kind() == Requirement::FIXED)
return newRequirement.allocation() == allocation();
*this = newRequirement;
return true;
}
MOZ_ASSERT(newRequirement.kind() == Requirement::REGISTER);
if (kind() == Requirement::FIXED)
return allocation().isRegister();
*this = newRequirement;
return true;
}
void dump() const;
private:
Kind kind_;
LAllocation allocation_;
CodePosition position_;
};
struct UsePosition : public TempObject,
public InlineForwardListNode<UsePosition>
{
LUse* use;
CodePosition pos;
UsePosition(LUse* use, CodePosition pos) :
use(use),
pos(pos)
{
// Verify that the usedAtStart() flag is consistent with the
// subposition. For now ignore fixed registers, because they
// are handled specially around calls.
MOZ_ASSERT_IF(!use->isFixedRegister(),
pos.subpos() == (use->usedAtStart()
? CodePosition::INPUT
: CodePosition::OUTPUT));
}
};
class BacktrackingVirtualRegister : public VirtualRegister
typedef InlineForwardListIterator<UsePosition> UsePositionIterator;
// Backtracking allocator data structures overview.
//
// LiveRange: A continuous range of positions where a virtual register is live.
// LiveBundle: A set of LiveRanges which do not overlap.
// VirtualRegister: A set of all LiveRanges used for some LDefinition.
//
// The allocator first performs a liveness ananlysis on the LIR graph which
// constructs LiveRanges for each VirtualRegister, determining where the
// registers are live.
//
// The ranges are then bundled together according to heuristics, and placed on
// the allocation queue.
//
// As bundles are removed from the allocation queue, we attempt to find a
// physical register or stack slot allocation for all ranges in the removed
// bundle, possibly evicting already-allocated bundles. See processBundle()
// for details.
//
// If we are not able to allocate a bundle, it is split according to heuristics
// into two or more smaller bundles which cover all the ranges of the original.
// These smaller bundles are then allocated independently.
class LiveBundle;
class LiveRange : public TempObject
{
public:
// Linked lists are used to keep track of the ranges in each LiveBundle and
// VirtualRegister. Since a LiveRange may be in two lists simultaneously, use
// these auxiliary classes to keep things straight.
class BundleLink : public InlineForwardListNode<BundleLink> {};
class RegisterLink : public InlineForwardListNode<RegisterLink> {};
typedef InlineForwardListIterator<BundleLink> BundleLinkIterator;
typedef InlineForwardListIterator<RegisterLink> RegisterLinkIterator;
// Links in the lists in LiveBundle and VirtualRegister.
BundleLink bundleLink;
RegisterLink registerLink;
static LiveRange* get(BundleLink* link) {
return reinterpret_cast<LiveRange*>(reinterpret_cast<uint8_t*>(link) -
offsetof(LiveRange, bundleLink));
}
static LiveRange* get(RegisterLink* link) {
return reinterpret_cast<LiveRange*>(reinterpret_cast<uint8_t*>(link) -
offsetof(LiveRange, registerLink));
}
struct Range
{
// The beginning of this range, inclusive.
CodePosition from;
// The end of this range, exclusive.
CodePosition to;
Range() {}
Range(CodePosition from, CodePosition to)
: from(from), to(to)
{
MOZ_ASSERT(!empty());
}
bool empty() {
MOZ_ASSERT(from <= to);
return from == to;
}
};
private:
// The virtual register this range is for, or zero if this does not have a
// virtual register (for example, it is in the callRanges bundle).
uint32_t vreg_;
// The bundle containing this range, null if liveness information is being
// constructed and we haven't started allocating bundles yet.
LiveBundle* bundle_;
// The code positions in this range.
Range range_;
// All uses of the virtual register in this range, ordered by location.
InlineForwardList<UsePosition> uses_;
// Whether this range contains the virtual register's definition.
bool hasDefinition_;
LiveRange(uint32_t vreg, Range range)
: vreg_(vreg), bundle_(nullptr), range_(range), hasDefinition_(false)
{
MOZ_ASSERT(!range.empty());
}
public:
static LiveRange* New(TempAllocator& alloc, uint32_t vreg,
CodePosition from, CodePosition to) {
return new(alloc) LiveRange(vreg, Range(from, to));
}
uint32_t vreg() const {
MOZ_ASSERT(hasVreg());
return vreg_;
}
bool hasVreg() const {
return vreg_ != 0;
}
LiveBundle* bundle() const {
return bundle_;
}
CodePosition from() const {
return range_.from;
}
CodePosition to() const {
return range_.to;
}
bool covers(CodePosition pos) const {
return pos >= from() && pos < to();
}
// Whether this range wholly contains other.
bool contains(LiveRange* other) const;
// Intersect this range with other, returning the subranges of this
// that are before, inside, or after other.
void intersect(LiveRange* other, Range* pre, Range* inside, Range* post) const;
// Whether this range has any intersection with other.
bool intersects(LiveRange* other) const;
UsePositionIterator usesBegin() const {
return uses_.begin();
}
UsePosition* lastUse() const {
return uses_.back();
}
bool hasUses() const {
return !!usesBegin();
}
UsePosition* popUse() {
return uses_.popFront();
}
bool hasDefinition() const {
return hasDefinition_;
}
void setFrom(CodePosition from) {
range_.from = from;
MOZ_ASSERT(!range_.empty());
}
void setTo(CodePosition to) {
range_.to = to;
MOZ_ASSERT(!range_.empty());
}
void setBundle(LiveBundle* bundle) {
bundle_ = bundle;
}
void addUse(UsePosition* use);
void distributeUses(LiveRange* other);
void setHasDefinition() {
MOZ_ASSERT(!hasDefinition_);
hasDefinition_ = true;
}
// Return a string describing this range. This is not re-entrant!
#ifdef DEBUG
const char* toString() const;
#else
const char* toString() const { return "???"; }
#endif
// Comparator for use in range splay trees.
static int compare(LiveRange* v0, LiveRange* v1) {
// LiveRange includes 'from' but excludes 'to'.
if (v0->to() <= v1->from())
return -1;
if (v0->from() >= v1->to())
return 1;
return 0;
}
};
// Tracks information about bundles that should all be spilled to the same
// physical location. At the beginning of allocation, each bundle has its own
// spill set. As bundles are split, the new smaller bundles continue to use the
// same spill set.
class SpillSet : public TempObject
{
// All bundles with this spill set which have been spilled. All bundles in
// this list will be given the same physical slot.
Vector<LiveBundle*, 1, JitAllocPolicy> list_;
explicit SpillSet(TempAllocator& alloc)
: list_(alloc)
{ }
public:
static SpillSet* New(TempAllocator& alloc) {
return new(alloc) SpillSet(alloc);
}
bool addSpilledBundle(LiveBundle* bundle) {
return list_.append(bundle);
}
size_t numSpilledBundles() const {
return list_.length();
}
LiveBundle* spilledBundle(size_t i) const {
return list_[i];
}
void setAllocation(LAllocation alloc);
};
// A set of live ranges which are all pairwise disjoint. The register allocator
// attempts to find allocations for an entire bundle, and if it fails the
// bundle will be broken into smaller ones which are allocated independently.
class LiveBundle : public TempObject
{
// Set to use if this bundle or one it is split into is spilled.
SpillSet* spill_;
// All the ranges in this set, ordered by location.
InlineForwardList<LiveRange::BundleLink> ranges_;
// Allocation to use for ranges in this set, bogus if unallocated or spilled
// and not yet given a physical stack slot.
LAllocation alloc_;
// Bundle which entirely contains this one and has no register uses. This
// may or may not be spilled by the allocator, but it can be spilled and
// will not be split.
LiveBundle* spillParent_;
LiveBundle(SpillSet* spill, LiveBundle* spillParent)
: spill_(spill), spillParent_(spillParent)
{ }
public:
static LiveBundle* New(TempAllocator& alloc, SpillSet* spill, LiveBundle* spillParent) {
return new(alloc) LiveBundle(spill, spillParent);
}
SpillSet* spillSet() const {
return spill_;
}
void setSpillSet(SpillSet* spill) {
spill_ = spill;
}
LiveRange::BundleLinkIterator rangesBegin() const {
return ranges_.begin();
}
bool hasRanges() const {
return !!rangesBegin();
}
LiveRange* firstRange() const {
return LiveRange::get(*rangesBegin());
}
LiveRange* lastRange() const {
return LiveRange::get(ranges_.back());
}
LiveRange* rangeFor(CodePosition pos) const;
void removeRange(LiveRange* range);
void removeRangeAndIncrementIterator(LiveRange::BundleLinkIterator& iter) {
ranges_.removeAndIncrement(iter);
}
void addRange(LiveRange* range);
bool addRange(TempAllocator& alloc, uint32_t vreg, CodePosition from, CodePosition to);
bool addRangeAndDistributeUses(TempAllocator& alloc, LiveRange* oldRange,
CodePosition from, CodePosition to);
LiveRange* popFirstRange();
#ifdef DEBUG
size_t numRanges() const;
#endif
LAllocation allocation() const {
return alloc_;
}
void setAllocation(LAllocation alloc) {
alloc_ = alloc;
}
LiveBundle* spillParent() const {
return spillParent_;
}
// Return a string describing this bundle. This is not re-entrant!
#ifdef DEBUG
const char* toString() const;
#else
const char* toString() const { return "???"; }
#endif
};
// Information about the allocation for a virtual register.
class VirtualRegister
{
// Instruction which defines this register.
LNode* ins_;
// Definition in the instruction for this register.
LDefinition* def_;
// All live ranges for this register. These may overlap each other, and are
// ordered by their start position.
InlineForwardList<LiveRange::RegisterLink> ranges_;
// Whether def_ is a temp or an output.
bool isTemp_;
// If this register's definition is MUST_REUSE_INPUT, whether a copy must
// be introduced before the definition that relaxes the policy.
bool mustCopyInput_;
// Spill location to use for this register.
LAllocation canonicalSpill_;
// Code position above which the canonical spill cannot be used; such
// intervals may overlap other registers in the same group.
CodePosition canonicalSpillExclude_;
// If this register is associated with a group of other registers,
// information about the group. This structure is shared between all
// registers in the group.
VirtualRegisterGroup* group_;
void operator=(const VirtualRegister&) = delete;
VirtualRegister(const VirtualRegister&) = delete;
public:
explicit BacktrackingVirtualRegister(TempAllocator& alloc)
: VirtualRegister(alloc)
{}
explicit VirtualRegister()
{
// Note: This class is zeroed before it is constructed.
}
void init(LNode* ins, LDefinition* def, bool isTemp) {
MOZ_ASSERT(!ins_);
ins_ = ins;
def_ = def;
isTemp_ = isTemp;
}
LNode* ins() const {
return ins_;
}
LDefinition* def() const {
return def_;
}
LDefinition::Type type() const {
return def()->type();
}
uint32_t vreg() const {
return def()->virtualRegister();
}
bool isCompatible(const AnyRegister& r) const {
return def_->isCompatibleReg(r);
}
bool isCompatible(const VirtualRegister& vr) const {
return def_->isCompatibleDef(*vr.def_);
}
bool isTemp() const {
return isTemp_;
}
void setMustCopyInput() {
mustCopyInput_ = true;
}
@ -77,56 +512,56 @@ class BacktrackingVirtualRegister : public VirtualRegister
return mustCopyInput_;
}
void setCanonicalSpill(LAllocation alloc) {
MOZ_ASSERT(!alloc.isUse());
canonicalSpill_ = alloc;
LiveRange::RegisterLinkIterator rangesBegin() const {
return ranges_.begin();
}
const LAllocation* canonicalSpill() const {
return canonicalSpill_.isBogus() ? nullptr : &canonicalSpill_;
bool hasRanges() const {
return !!rangesBegin();
}
LiveRange* firstRange() const {
return LiveRange::get(*rangesBegin());
}
LiveRange* lastRange() const {
return LiveRange::get(ranges_.back());
}
LiveRange* rangeFor(CodePosition pos) const;
void removeRange(LiveRange* range);
void addRange(LiveRange* range);
LiveBundle* firstBundle() const {
return firstRange()->bundle();
}
void setCanonicalSpillExclude(CodePosition pos) {
canonicalSpillExclude_ = pos;
}
bool hasCanonicalSpillExclude() const {
return canonicalSpillExclude_.bits() != 0;
}
CodePosition canonicalSpillExclude() const {
MOZ_ASSERT(hasCanonicalSpillExclude());
return canonicalSpillExclude_;
}
void setGroup(VirtualRegisterGroup* group) {
group_ = group;
}
VirtualRegisterGroup* group() {
return group_;
}
bool addInitialRange(TempAllocator& alloc, CodePosition from, CodePosition to);
void addInitialUse(UsePosition* use);
void setInitialDefinition(CodePosition from);
};
// A sequence of code positions, for tellings BacktrackingAllocator::splitAt
// where to split.
typedef js::Vector<CodePosition, 4, SystemAllocPolicy> SplitPositionVector;
class BacktrackingAllocator
: private LiveRangeAllocator<BacktrackingVirtualRegister>
class BacktrackingAllocator : protected RegisterAllocator
{
friend class C1Spewer;
friend class JSONSpewer;
// Priority queue element: either an interval or group of intervals and the
// associated priority.
BitSet* liveIn;
FixedList<VirtualRegister> vregs;
// Ranges where all registers must be spilled due to call instructions.
LiveBundle* callRanges;
// Allocation state.
StackSlotAllocator stackSlotAllocator;
// Priority queue element: a bundle and the associated priority.
struct QueueItem
{
LiveInterval* interval;
VirtualRegisterGroup* group;
LiveBundle* bundle;
QueueItem(LiveInterval* interval, size_t priority)
: interval(interval), group(nullptr), priority_(priority)
{}
QueueItem(VirtualRegisterGroup* group, size_t priority)
: interval(nullptr), group(group), priority_(priority)
QueueItem(LiveBundle* bundle, size_t priority)
: bundle(bundle), priority_(priority)
{}
static size_t priority(const QueueItem& v) {
@ -139,37 +574,14 @@ class BacktrackingAllocator
PriorityQueue<QueueItem, QueueItem, 0, SystemAllocPolicy> allocationQueue;
// A subrange over which a physical register is allocated.
struct AllocatedRange {
LiveInterval* interval;
const LiveInterval::Range* range;
AllocatedRange()
: interval(nullptr), range(nullptr)
{}
AllocatedRange(LiveInterval* interval, const LiveInterval::Range* range)
: interval(interval), range(range)
{}
static int compare(const AllocatedRange& v0, const AllocatedRange& v1) {
// LiveInterval::Range includes 'from' but excludes 'to'.
if (v0.range->to <= v1.range->from)
return -1;
if (v0.range->from >= v1.range->to)
return 1;
return 0;
}
};
typedef SplayTree<AllocatedRange, AllocatedRange> AllocatedRangeSet;
typedef SplayTree<LiveRange*, LiveRange> LiveRangeSet;
// Each physical register is associated with the set of ranges over which
// that register is currently allocated.
struct PhysicalRegister {
bool allocatable;
AnyRegister reg;
AllocatedRangeSet allocations;
LiveRangeSet allocations;
PhysicalRegister() : allocatable(false) {}
};
@ -177,16 +589,12 @@ class BacktrackingAllocator
// Ranges of code which are considered to be hot, for which good allocation
// should be prioritized.
AllocatedRangeSet hotcode;
// During register allocation, virtual stack slots are used for spills.
// These are converted to actual spill locations
size_t numVirtualStackSlots;
LiveRangeSet hotcode;
// Information about an allocated stack slot.
struct SpillSlot : public TempObject, public InlineForwardListNode<SpillSlot> {
LStackSlot alloc;
AllocatedRangeSet allocated;
LiveRangeSet allocated;
SpillSlot(uint32_t slot, LifoAlloc* alloc)
: alloc(slot), allocated(alloc)
@ -199,93 +607,130 @@ class BacktrackingAllocator
public:
BacktrackingAllocator(MIRGenerator* mir, LIRGenerator* lir, LIRGraph& graph)
: LiveRangeAllocator<BacktrackingVirtualRegister>(mir, lir, graph),
numVirtualStackSlots(0)
: RegisterAllocator(mir, lir, graph),
liveIn(nullptr),
callRanges(nullptr)
{ }
bool go();
private:
typedef Vector<LiveInterval*, 4, SystemAllocPolicy> LiveIntervalVector;
typedef Vector<LiveRange*, 4, SystemAllocPolicy> LiveRangeVector;
typedef Vector<LiveBundle*, 4, SystemAllocPolicy> LiveBundleVector;
// Liveness methods.
bool init();
bool canAddToGroup(VirtualRegisterGroup* group, BacktrackingVirtualRegister* reg);
bool tryGroupRegisters(uint32_t vreg0, uint32_t vreg1);
bool tryGroupReusedRegister(uint32_t def, uint32_t use);
bool groupAndQueueRegisters();
bool tryAllocateFixed(LiveInterval* interval, bool* success, bool* pfixed,
LiveIntervalVector& conflicting);
bool tryAllocateNonFixed(LiveInterval* interval, bool* success, bool* pfixed,
LiveIntervalVector& conflicting);
bool processInterval(LiveInterval* interval);
bool processGroup(VirtualRegisterGroup* group);
bool setIntervalRequirement(LiveInterval* interval);
bool tryAllocateRegister(PhysicalRegister& r, LiveInterval* interval,
bool* success, bool* pfixed, LiveIntervalVector& conflicting);
bool tryAllocateGroupRegister(PhysicalRegister& r, VirtualRegisterGroup* group,
bool* psuccess, bool* pfixed, LiveInterval** pconflicting);
bool evictInterval(LiveInterval* interval);
void distributeUses(LiveInterval* interval, const LiveIntervalVector& newIntervals);
bool split(LiveInterval* interval, const LiveIntervalVector& newIntervals);
bool requeueIntervals(const LiveIntervalVector& newIntervals);
void spill(LiveInterval* interval);
bool buildLivenessInfo();
bool addInitialFixedRange(AnyRegister reg, CodePosition from, CodePosition to);
VirtualRegister& vreg(const LDefinition* def) {
return vregs[def->virtualRegister()];
}
VirtualRegister& vreg(const LAllocation* alloc) {
MOZ_ASSERT(alloc->isUse());
return vregs[alloc->toUse()->virtualRegister()];
}
// Allocation methods.
bool tryMergeBundles(LiveBundle* bundle0, LiveBundle* bundle1);
bool tryMergeReusedRegister(VirtualRegister& def, VirtualRegister& input);
bool mergeAndQueueRegisters();
bool tryAllocateFixed(LiveBundle* bundle, Requirement requirement,
bool* success, bool* pfixed, LiveBundleVector& conflicting);
bool tryAllocateNonFixed(LiveBundle* bundle, Requirement requirement, Requirement hint,
bool* success, bool* pfixed, LiveBundleVector& conflicting);
bool processBundle(LiveBundle* bundle);
bool computeRequirement(LiveBundle* bundle, Requirement *prequirement, Requirement *phint);
bool tryAllocateRegister(PhysicalRegister& r, LiveBundle* bundle,
bool* success, bool* pfixed, LiveBundleVector& conflicting);
bool evictBundle(LiveBundle* bundle);
bool splitAndRequeueBundles(LiveBundle* bundle, const LiveBundleVector& newBundles);
bool spill(LiveBundle* bundle);
bool isReusedInput(LUse* use, LNode* ins, bool considerCopy);
bool isRegisterUse(LUse* use, LNode* ins, bool considerCopy = false);
bool isRegisterDefinition(LiveInterval* interval);
bool addLiveInterval(LiveIntervalVector& intervals, uint32_t vreg,
LiveInterval* spillInterval,
CodePosition from, CodePosition to);
bool pickStackSlot(LiveInterval* interval);
bool reuseOrAllocateStackSlot(const LiveIntervalVector& intervals, LDefinition::Type type,
LAllocation* palloc);
bool insertAllRanges(AllocatedRangeSet& set, const LiveIntervalVector& intervals);
bool isRegisterDefinition(LiveRange* range);
bool pickStackSlot(SpillSet* spill);
bool insertAllRanges(LiveRangeSet& set, LiveBundle* bundle);
// Reification methods.
bool pickStackSlots();
bool resolveControlFlow();
bool reifyAllocations();
bool populateSafepoints();
bool annotateMoveGroups();
size_t findFirstNonCallSafepoint(CodePosition from);
size_t findFirstSafepoint(CodePosition pos, size_t startFrom);
void addLiveRegistersForRange(VirtualRegister& reg, LiveRange* range);
void dumpRegisterGroups();
bool addMove(LMoveGroup* moves, LiveRange* from, LiveRange* to, LDefinition::Type type) {
LAllocation fromAlloc = from->bundle()->allocation();
LAllocation toAlloc = to->bundle()->allocation();
MOZ_ASSERT(fromAlloc != toAlloc);
return moves->add(fromAlloc, toAlloc, type);
}
bool moveInput(LInstruction* ins, LiveRange* from, LiveRange* to, LDefinition::Type type) {
if (from->bundle()->allocation() == to->bundle()->allocation())
return true;
LMoveGroup* moves = getInputMoveGroup(ins);
return addMove(moves, from, to, type);
}
bool moveAfter(LInstruction* ins, LiveRange* from, LiveRange* to, LDefinition::Type type) {
if (from->bundle()->allocation() == to->bundle()->allocation())
return true;
LMoveGroup* moves = getMoveGroupAfter(ins);
return addMove(moves, from, to, type);
}
bool moveAtExit(LBlock* block, LiveRange* from, LiveRange* to, LDefinition::Type type) {
if (from->bundle()->allocation() == to->bundle()->allocation())
return true;
LMoveGroup* moves = block->getExitMoveGroup(alloc());
return addMove(moves, from, to, type);
}
bool moveAtEntry(LBlock* block, LiveRange* from, LiveRange* to, LDefinition::Type type) {
if (from->bundle()->allocation() == to->bundle()->allocation())
return true;
LMoveGroup* moves = block->getEntryMoveGroup(alloc());
return addMove(moves, from, to, type);
}
// Debugging methods.
void dumpFixedRanges();
void dumpAllocations();
struct PrintLiveIntervalRange;
struct PrintLiveRange;
bool minimalDef(const LiveInterval* interval, LNode* ins);
bool minimalUse(const LiveInterval* interval, LNode* ins);
bool minimalInterval(const LiveInterval* interval, bool* pfixed = nullptr);
bool minimalDef(LiveRange* range, LNode* ins);
bool minimalUse(LiveRange* range, LNode* ins);
bool minimalBundle(LiveBundle* bundle, bool* pfixed = nullptr);
// Heuristic methods.
size_t computePriority(const LiveInterval* interval);
size_t computeSpillWeight(const LiveInterval* interval);
size_t computePriority(LiveBundle* bundle);
size_t computeSpillWeight(LiveBundle* bundle);
size_t computePriority(const VirtualRegisterGroup* group);
size_t computeSpillWeight(const VirtualRegisterGroup* group);
size_t maximumSpillWeight(const LiveBundleVector& bundles);
size_t maximumSpillWeight(const LiveIntervalVector& intervals);
bool chooseBundleSplit(LiveBundle* bundle, bool fixed, LiveBundle* conflict);
bool chooseIntervalSplit(LiveInterval* interval, bool fixed, LiveInterval* conflict);
bool splitAt(LiveInterval* interval,
bool splitAt(LiveBundle* bundle,
const SplitPositionVector& splitPositions);
bool trySplitAcrossHotcode(LiveInterval* interval, bool* success);
bool trySplitAfterLastRegisterUse(LiveInterval* interval, LiveInterval* conflict, bool* success);
bool trySplitBeforeFirstRegisterUse(LiveInterval* interval, LiveInterval* conflict, bool* success);
bool splitAtAllRegisterUses(LiveInterval* interval);
bool splitAcrossCalls(LiveInterval* interval);
bool trySplitAcrossHotcode(LiveBundle* bundle, bool* success);
bool trySplitAfterLastRegisterUse(LiveBundle* bundle, LiveBundle* conflict, bool* success);
bool trySplitBeforeFirstRegisterUse(LiveBundle* bundle, LiveBundle* conflict, bool* success);
bool splitAcrossCalls(LiveBundle* bundle);
bool compilingAsmJS() {
return mir->info().compilingAsmJS();
}
bool isVirtualStackSlot(LAllocation alloc) {
return alloc.isStackSlot() &&
LAllocation::DATA_MASK - alloc.toStackSlot()->slot() < numVirtualStackSlots;
}
void dumpVregs();
};
} // namespace jit

View File

@ -63,19 +63,18 @@ C1Spewer::spewPass(const char* pass)
}
void
C1Spewer::spewIntervals(const char* pass, BacktrackingAllocator* regalloc)
C1Spewer::spewRanges(const char* pass, BacktrackingAllocator* regalloc)
{
if (!spewout_)
return;
fprintf(spewout_, "begin_intervals\n");
fprintf(spewout_, "begin_ranges\n");
fprintf(spewout_, " name \"%s\"\n", pass);
size_t nextId = 0x4000;
for (MBasicBlockIterator block(graph->begin()); block != graph->end(); block++)
spewIntervals(spewout_, *block, regalloc, nextId);
spewRanges(spewout_, *block, regalloc);
fprintf(spewout_, "end_intervals\n");
fprintf(spewout_, "end_ranges\n");
fflush(spewout_);
}
@ -112,42 +111,37 @@ DumpLIR(FILE* fp, LNode* ins)
}
void
C1Spewer::spewIntervals(FILE* fp, BacktrackingAllocator* regalloc, LNode* ins, size_t& nextId)
C1Spewer::spewRanges(FILE* fp, BacktrackingAllocator* regalloc, LNode* ins)
{
for (size_t k = 0; k < ins->numDefs(); k++) {
uint32_t id = ins->getDef(k)->virtualRegister();
VirtualRegister* vreg = &regalloc->vregs[id];
for (size_t i = 0; i < vreg->numIntervals(); i++) {
LiveInterval* live = vreg->getInterval(i);
if (live->numRanges()) {
fprintf(fp, "%d object \"", (i == 0) ? id : int32_t(nextId++));
fprintf(fp, "%s", live->getAllocation()->toString());
fprintf(fp, "\" %d -1", id);
for (size_t j = 0; j < live->numRanges(); j++) {
fprintf(fp, " [%u, %u[", live->getRange(j)->from.bits(),
live->getRange(j)->to.bits());
}
for (UsePositionIterator usePos(live->usesBegin()); usePos != live->usesEnd(); usePos++)
fprintf(fp, " %u M", usePos->pos.bits());
fprintf(fp, " \"\"\n");
}
for (LiveRange::RegisterLinkIterator iter = vreg->rangesBegin(); iter; iter++) {
LiveRange* range = LiveRange::get(*iter);
fprintf(fp, "%d object \"", id);
fprintf(fp, "%s", range->bundle()->allocation().toString());
fprintf(fp, "\" %d -1", id);
fprintf(fp, " [%u, %u[", range->from().bits(), range->to().bits());
for (UsePositionIterator usePos(range->usesBegin()); usePos; usePos++)
fprintf(fp, " %u M", usePos->pos.bits());
fprintf(fp, " \"\"\n");
}
}
}
void
C1Spewer::spewIntervals(FILE* fp, MBasicBlock* block, BacktrackingAllocator* regalloc, size_t& nextId)
C1Spewer::spewRanges(FILE* fp, MBasicBlock* block, BacktrackingAllocator* regalloc)
{
LBlock* lir = block->lir();
if (!lir)
return;
for (size_t i = 0; i < lir->numPhis(); i++)
spewIntervals(fp, regalloc, lir->getPhi(i), nextId);
spewRanges(fp, regalloc, lir->getPhi(i));
for (LInstructionIterator ins = lir->begin(); ins != lir->end(); ins++)
spewIntervals(fp, regalloc, *ins, nextId);
spewRanges(fp, regalloc, *ins);
}
void

View File

@ -34,14 +34,14 @@ class C1Spewer
bool init(const char* path);
void beginFunction(MIRGraph* graph, HandleScript script);
void spewPass(const char* pass);
void spewIntervals(const char* pass, BacktrackingAllocator* regalloc);
void spewRanges(const char* pass, BacktrackingAllocator* regalloc);
void endFunction();
void finish();
private:
void spewPass(FILE* fp, MBasicBlock* block);
void spewIntervals(FILE* fp, BacktrackingAllocator* regalloc, LNode* ins, size_t& nextId);
void spewIntervals(FILE* fp, MBasicBlock* block, BacktrackingAllocator* regalloc, size_t& nextId);
void spewRanges(FILE* fp, BacktrackingAllocator* regalloc, LNode* ins);
void spewRanges(FILE* fp, MBasicBlock* block, BacktrackingAllocator* regalloc);
};
} // namespace jit

View File

@ -2120,13 +2120,13 @@ CodeGenerator::visitMoveGroup(LMoveGroup* group)
for (size_t i = 0; i < group->numMoves(); i++) {
const LMove& move = group->getMove(i);
const LAllocation* from = move.from();
const LAllocation* to = move.to();
LAllocation from = move.from();
LAllocation to = move.to();
LDefinition::Type type = move.type();
// No bogus moves.
MOZ_ASSERT(*from != *to);
MOZ_ASSERT(!from->isConstant());
MOZ_ASSERT(from != to);
MOZ_ASSERT(!from.isConstant());
MoveOp::Type moveType;
switch (type) {
case LDefinition::OBJECT:

View File

@ -68,11 +68,6 @@ class InlineForwardList : protected InlineForwardListNode<T>
return iterator(nullptr);
}
void removeAt(iterator where) {
iterator iter(where);
iter++;
#ifdef DEBUG
iter.modifyCount_++;
#endif
removeAfter(where.prev, where.iter);
}
void pushFront(Node* t) {
@ -92,7 +87,7 @@ class InlineForwardList : protected InlineForwardListNode<T>
removeAfter(this, result);
return result;
}
T* back() {
T* back() const {
MOZ_ASSERT(!empty());
return static_cast<T*>(tail_);
}
@ -116,6 +111,18 @@ class InlineForwardList : protected InlineForwardListNode<T>
at->next = item->next;
item->next = nullptr;
}
void removeAndIncrement(iterator &where) {
// Do not change modifyCount_ here. The iterator can still be used
// after calling this method, unlike the other methods that modify
// the list.
Node* item = where.iter;
where.iter = item->next;
if (item == tail_)
tail_ = where.prev;
MOZ_ASSERT(where.prev->next == item);
where.prev->next = where.iter;
item->next = nullptr;
}
void splitAfter(Node* at, InlineForwardList<T>* to) {
MOZ_ASSERT(to->empty());
if (!at)
@ -185,6 +192,9 @@ public:
bool operator ==(const InlineForwardListIterator<T>& where) const {
return iter == where.iter;
}
explicit operator bool() const {
return iter != nullptr;
}
private:
Node* prev;

View File

@ -396,12 +396,12 @@ JSONSpewer::spewLIR(MIRGraph* mir)
}
void
JSONSpewer::spewIntervals(BacktrackingAllocator* regalloc)
JSONSpewer::spewRanges(BacktrackingAllocator* regalloc)
{
if (!fp_)
return;
beginObjectProperty("intervals");
beginObjectProperty("ranges");
beginListProperty("blocks");
for (size_t bno = 0; bno < regalloc->graph.numBlocks(); bno++) {
@ -417,27 +417,17 @@ JSONSpewer::spewIntervals(BacktrackingAllocator* regalloc)
beginObject();
integerProperty("vreg", id);
beginListProperty("intervals");
beginListProperty("ranges");
for (size_t i = 0; i < vreg->numIntervals(); i++) {
LiveInterval* live = vreg->getInterval(i);
for (LiveRange::RegisterLinkIterator iter = vreg->rangesBegin(); iter; iter++) {
LiveRange* range = LiveRange::get(*iter);
if (live->numRanges()) {
beginObject();
property("allocation");
fprintf(fp_, "\"%s\"", live->getAllocation()->toString());
beginListProperty("ranges");
for (size_t j = 0; j < live->numRanges(); j++) {
beginObject();
integerProperty("start", live->getRange(j)->from.bits());
integerProperty("end", live->getRange(j)->to.bits());
endObject();
}
endList();
endObject();
}
beginObject();
property("allocation");
fprintf(fp_, "\"%s\"", range->bundle()->allocation().toString());
integerProperty("start", range->from().bits());
integerProperty("end", range->to().bits());
endObject();
}
endList();

View File

@ -61,7 +61,7 @@ class JSONSpewer
void spewMIR(MIRGraph* mir);
void spewLIns(LNode* ins);
void spewLIR(MIRGraph* mir);
void spewIntervals(BacktrackingAllocator* regalloc);
void spewRanges(BacktrackingAllocator* regalloc);
void endPass();
void endFunction();
void finish();

View File

@ -177,11 +177,11 @@ IonSpewer::spewPass(const char* pass, BacktrackingAllocator* ra)
return;
c1Spewer.spewPass(pass);
c1Spewer.spewIntervals(pass, ra);
c1Spewer.spewRanges(pass, ra);
jsonSpewer.beginPass(pass);
jsonSpewer.spewMIR(graph);
jsonSpewer.spewLIR(graph);
jsonSpewer.spewIntervals(ra);
jsonSpewer.spewRanges(ra);
jsonSpewer.endPass();
}

View File

@ -58,27 +58,21 @@ class LOsiPoint : public LInstructionHelper<0, 0, 0>
class LMove
{
LAllocation* from_;
LAllocation* to_;
LAllocation from_;
LAllocation to_;
LDefinition::Type type_;
public:
LMove(LAllocation* from, LAllocation* to, LDefinition::Type type)
LMove(LAllocation from, LAllocation to, LDefinition::Type type)
: from_(from),
to_(to),
type_(type)
{ }
LAllocation* from() {
LAllocation from() const {
return from_;
}
const LAllocation* from() const {
return from_;
}
LAllocation* to() {
return to_;
}
const LAllocation* to() const {
LAllocation to() const {
return to_;
}
LDefinition::Type type() const {
@ -109,10 +103,10 @@ class LMoveGroup : public LInstructionHelper<0, 0, 0>
void printOperands(FILE* fp);
// Add a move which takes place simultaneously with all others in the group.
bool add(LAllocation* from, LAllocation* to, LDefinition::Type type);
bool add(LAllocation from, LAllocation to, LDefinition::Type type);
// Add a move which takes place after existing moves in the group.
bool addAfter(LAllocation* from, LAllocation* to, LDefinition::Type type);
bool addAfter(LAllocation from, LAllocation to, LDefinition::Type type);
size_t numMoves() const {
return moves_.length();
@ -137,7 +131,7 @@ class LMoveGroup : public LInstructionHelper<0, 0, 0>
bool uses(Register reg) {
for (size_t i = 0; i < numMoves(); i++) {
LMove move = getMove(i);
if (*move.from() == LGeneralReg(reg) || *move.to() == LGeneralReg(reg))
if (move.from() == LGeneralReg(reg) || move.to() == LGeneralReg(reg))
return true;
}
return false;

View File

@ -535,28 +535,28 @@ LInstruction::initSafepoint(TempAllocator& alloc)
}
bool
LMoveGroup::add(LAllocation* from, LAllocation* to, LDefinition::Type type)
LMoveGroup::add(LAllocation from, LAllocation to, LDefinition::Type type)
{
#ifdef DEBUG
MOZ_ASSERT(*from != *to);
MOZ_ASSERT(from != to);
for (size_t i = 0; i < moves_.length(); i++)
MOZ_ASSERT(*to != *moves_[i].to());
MOZ_ASSERT(to != moves_[i].to());
// Check that SIMD moves are aligned according to ABI requirements.
if (LDefinition(type).isSimdType()) {
MOZ_ASSERT(from->isMemory() || from->isFloatReg());
if (from->isMemory()) {
if (from->isArgument())
MOZ_ASSERT(from->toArgument()->index() % SimdMemoryAlignment == 0);
MOZ_ASSERT(from.isMemory() || from.isFloatReg());
if (from.isMemory()) {
if (from.isArgument())
MOZ_ASSERT(from.toArgument()->index() % SimdMemoryAlignment == 0);
else
MOZ_ASSERT(from->toStackSlot()->slot() % SimdMemoryAlignment == 0);
MOZ_ASSERT(from.toStackSlot()->slot() % SimdMemoryAlignment == 0);
}
MOZ_ASSERT(to->isMemory() || to->isFloatReg());
if (to->isMemory()) {
if (to->isArgument())
MOZ_ASSERT(to->toArgument()->index() % SimdMemoryAlignment == 0);
MOZ_ASSERT(to.isMemory() || to.isFloatReg());
if (to.isMemory()) {
if (to.isArgument())
MOZ_ASSERT(to.toArgument()->index() % SimdMemoryAlignment == 0);
else
MOZ_ASSERT(to->toStackSlot()->slot() % SimdMemoryAlignment == 0);
MOZ_ASSERT(to.toStackSlot()->slot() % SimdMemoryAlignment == 0);
}
}
#endif
@ -564,24 +564,24 @@ LMoveGroup::add(LAllocation* from, LAllocation* to, LDefinition::Type type)
}
bool
LMoveGroup::addAfter(LAllocation* from, LAllocation* to, LDefinition::Type type)
LMoveGroup::addAfter(LAllocation from, LAllocation to, LDefinition::Type type)
{
// Transform the operands to this move so that performing the result
// simultaneously with existing moves in the group will have the same
// effect as if the original move took place after the existing moves.
for (size_t i = 0; i < moves_.length(); i++) {
if (*moves_[i].to() == *from) {
if (moves_[i].to() == from) {
from = moves_[i].from();
break;
}
}
if (*from == *to)
if (from == to)
return true;
for (size_t i = 0; i < moves_.length(); i++) {
if (*to == *moves_[i].to()) {
if (to == moves_[i].to()) {
moves_[i] = LMove(from, to, type);
return true;
}
@ -596,8 +596,8 @@ LMoveGroup::printOperands(FILE* fp)
for (size_t i = 0; i < numMoves(); i++) {
const LMove& move = getMove(i);
// Use two printfs, as LAllocation::toString is not reentrant.
fprintf(fp, " [%s", move.from()->toString());
fprintf(fp, " -> %s", move.to()->toString());
fprintf(fp, " [%s", move.from().toString());
fprintf(fp, " -> %s", move.to().toString());
#ifdef DEBUG
fprintf(fp, ", %s", TypeChars[move.type()]);
#endif

View File

@ -1,996 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "jit/LiveRangeAllocator.h"
#include "mozilla/DebugOnly.h"
#include "jsprf.h"
#include "jit/BacktrackingAllocator.h"
#include "jit/BitSet.h"
using namespace js;
using namespace js::jit;
using mozilla::DebugOnly;
int
Requirement::priority() const
{
switch (kind_) {
case Requirement::FIXED:
return 0;
case Requirement::REGISTER:
return 1;
case Requirement::NONE:
return 2;
default:
MOZ_CRASH("Unknown requirement kind.");
}
}
const char*
Requirement::toString() const
{
#ifdef DEBUG
// Not reentrant!
static char buf[1000];
char* cursor = buf;
char* end = cursor + sizeof(buf);
int n = -1; // initialize to silence GCC warning
switch (kind()) {
case NONE:
return "none";
case REGISTER:
n = JS_snprintf(cursor, end - cursor, "r");
break;
case FIXED:
n = JS_snprintf(cursor, end - cursor, "%s", allocation().toString());
break;
case MUST_REUSE_INPUT:
n = JS_snprintf(cursor, end - cursor, "v%u", virtualRegister());
break;
}
if (n < 0)
return "???";
cursor += n;
if (pos() != CodePosition::MIN) {
n = JS_snprintf(cursor, end - cursor, "@%u", pos().bits());
if (n < 0)
return "???";
cursor += n;
}
return buf;
#else
return " ???";
#endif
}
void
Requirement::dump() const
{
fprintf(stderr, "%s\n", toString());
}
bool
LiveInterval::Range::contains(const Range* other) const
{
return from <= other->from && to >= other->to;
}
void
LiveInterval::Range::intersect(const Range* other, Range* pre, Range* inside, Range* post) const
{
MOZ_ASSERT(pre->empty() && inside->empty() && post->empty());
CodePosition innerFrom = from;
if (from < other->from) {
if (to < other->from) {
*pre = Range(from, to);
return;
}
*pre = Range(from, other->from);
innerFrom = other->from;
}
CodePosition innerTo = to;
if (to > other->to) {
if (from >= other->to) {
*post = Range(from, to);
return;
}
*post = Range(other->to, to);
innerTo = other->to;
}
if (innerFrom != innerTo)
*inside = Range(innerFrom, innerTo);
}
const char*
LiveInterval::Range::toString() const
{
#ifdef DEBUG
// Not reentrant!
static char buf[1000];
char* cursor = buf;
char* end = cursor + sizeof(buf);
int n = JS_snprintf(cursor, end - cursor, "[%u,%u)", from.bits(), to.bits());
if (n < 0)
return " ???";
cursor += n;
return buf;
#else
return " ???";
#endif
}
void
LiveInterval::Range::dump() const
{
fprintf(stderr, "%s\n", toString());
}
bool
LiveInterval::addRangeAtHead(CodePosition from, CodePosition to)
{
MOZ_ASSERT(from < to);
MOZ_ASSERT(ranges_.empty() || from <= ranges_.back().from);
Range newRange(from, to);
if (ranges_.empty())
return ranges_.append(newRange);
Range& first = ranges_.back();
if (to < first.from)
return ranges_.append(newRange);
if (to == first.from) {
first.from = from;
return true;
}
MOZ_ASSERT(from < first.to);
MOZ_ASSERT(to > first.from);
if (from < first.from)
first.from = from;
if (to > first.to)
first.to = to;
return true;
}
bool
LiveInterval::addRange(CodePosition from, CodePosition to)
{
MOZ_ASSERT(from < to);
Range newRange(from, to);
Range* i;
// Find the location to insert the new range
for (i = ranges_.end(); i > ranges_.begin(); i--) {
if (newRange.from <= i[-1].to) {
if (i[-1].from < newRange.from)
newRange.from = i[-1].from;
break;
}
}
// Perform coalescing on overlapping ranges
Range* coalesceEnd = i;
for (; i > ranges_.begin(); i--) {
if (newRange.to < i[-1].from)
break;
if (newRange.to < i[-1].to)
newRange.to = i[-1].to;
}
if (i == coalesceEnd)
return ranges_.insert(i, newRange);
i[0] = newRange;
ranges_.erase(i + 1, coalesceEnd);
return true;
}
void
LiveInterval::setFrom(CodePosition from)
{
while (!ranges_.empty()) {
if (ranges_.back().to < from) {
ranges_.popBack();
} else {
if (from == ranges_.back().to)
ranges_.popBack();
else
ranges_.back().from = from;
break;
}
}
}
bool
LiveInterval::covers(CodePosition pos)
{
if (pos < start() || pos >= end())
return false;
// Loop over the ranges in ascending order.
size_t i = lastProcessedRangeIfValid(pos);
for (; i < ranges_.length(); i--) {
if (pos < ranges_[i].from)
return false;
setLastProcessedRange(i, pos);
if (pos < ranges_[i].to)
return true;
}
return false;
}
CodePosition
LiveInterval::intersect(LiveInterval* other)
{
if (start() > other->start())
return other->intersect(this);
// Loop over the ranges in ascending order. As an optimization,
// try to start at the last processed range.
size_t i = lastProcessedRangeIfValid(other->start());
size_t j = other->ranges_.length() - 1;
if (i >= ranges_.length() || j >= other->ranges_.length())
return CodePosition::MIN;
while (true) {
const Range& r1 = ranges_[i];
const Range& r2 = other->ranges_[j];
if (r1.from <= r2.from) {
if (r1.from <= other->start())
setLastProcessedRange(i, other->start());
if (r2.from < r1.to)
return r2.from;
if (i == 0 || ranges_[i-1].from > other->end())
break;
i--;
} else {
if (r1.from < r2.to)
return r1.from;
if (j == 0 || other->ranges_[j-1].from > end())
break;
j--;
}
}
return CodePosition::MIN;
}
/*
* This function takes the callee interval and moves all ranges following or
* including provided position to the target interval. Additionally, if a
* range in the callee interval spans the given position, it is split and the
* latter half is placed in the target interval.
*
* This function should only be called if it is known that the interval should
* actually be split (and, presumably, a move inserted). As such, it is an
* error for the caller to request a split that moves all intervals into the
* target. Doing so will trip the assertion at the bottom of the function.
*/
bool
LiveInterval::splitFrom(CodePosition pos, LiveInterval* after)
{
MOZ_ASSERT(pos >= start() && pos < end());
MOZ_ASSERT(after->ranges_.empty());
// Move all intervals over to the target
size_t bufferLength = ranges_.length();
Range* buffer = ranges_.extractRawBuffer();
if (!buffer)
return false;
after->ranges_.replaceRawBuffer(buffer, bufferLength);
// Move intervals back as required
for (Range* i = &after->ranges_.back(); i >= after->ranges_.begin(); i--) {
if (pos >= i->to)
continue;
if (pos > i->from) {
// Split the range
Range split(i->from, pos);
i->from = pos;
if (!ranges_.append(split))
return false;
}
if (!ranges_.append(i + 1, after->ranges_.end()))
return false;
after->ranges_.shrinkBy(after->ranges_.end() - i - 1);
break;
}
// Split the linked list of use positions
UsePosition* prev = nullptr;
for (UsePositionIterator usePos(usesBegin()); usePos != usesEnd(); usePos++) {
if (usePos->pos > pos)
break;
prev = *usePos;
}
uses_.splitAfter(prev, &after->uses_);
return true;
}
void
LiveInterval::addUse(UsePosition* use)
{
// Insert use positions in ascending order. Note that instructions
// are visited in reverse order, so in most cases the loop terminates
// at the first iteration and the use position will be added to the
// front of the list.
UsePosition* prev = nullptr;
for (UsePositionIterator current(usesBegin()); current != usesEnd(); current++) {
if (current->pos >= use->pos)
break;
prev = *current;
}
if (prev)
uses_.insertAfter(prev, use);
else
uses_.pushFront(use);
}
void
LiveInterval::addUseAtEnd(UsePosition* use)
{
MOZ_ASSERT(uses_.empty() || use->pos >= uses_.back()->pos);
uses_.pushBack(use);
}
UsePosition*
LiveInterval::nextUseAfter(CodePosition after)
{
for (UsePositionIterator usePos(usesBegin()); usePos != usesEnd(); usePos++) {
if (usePos->pos >= after) {
LUse::Policy policy = usePos->use->policy();
MOZ_ASSERT(policy != LUse::RECOVERED_INPUT);
if (policy != LUse::KEEPALIVE)
return *usePos;
}
}
return nullptr;
}
UsePosition*
LiveInterval::popUse()
{
return uses_.popFront();
}
/*
* This function locates the first "real" use of this interval that follows
* the given code position. Non-"real" uses are currently just snapshots,
* which keep virtual registers alive but do not result in the
* generation of code that use them.
*/
CodePosition
LiveInterval::nextUsePosAfter(CodePosition after)
{
UsePosition* min = nextUseAfter(after);
return min ? min->pos : CodePosition::MAX;
}
LiveInterval*
VirtualRegister::intervalFor(CodePosition pos)
{
// Intervals are sorted in ascending order by their start position.
for (LiveInterval** i = intervals_.begin(); i != intervals_.end(); i++) {
if ((*i)->covers(pos))
return *i;
if (pos < (*i)->start())
break;
}
return nullptr;
}
LiveInterval*
VirtualRegister::getFirstInterval()
{
MOZ_ASSERT(!intervals_.empty());
return intervals_[0];
}
// Instantiate LiveRangeAllocator for each template instance.
template bool LiveRangeAllocator<BacktrackingVirtualRegister>::buildLivenessInfo();
template void LiveRangeAllocator<BacktrackingVirtualRegister>::dumpVregs();
#ifdef DEBUG
// Returns true iff ins has a def/temp reusing the input allocation.
static bool
IsInputReused(LInstruction* ins, LUse* use)
{
for (size_t i = 0; i < ins->numDefs(); i++) {
if (ins->getDef(i)->policy() == LDefinition::MUST_REUSE_INPUT &&
ins->getOperand(ins->getDef(i)->getReusedInput())->toUse() == use)
{
return true;
}
}
for (size_t i = 0; i < ins->numTemps(); i++) {
if (ins->getTemp(i)->policy() == LDefinition::MUST_REUSE_INPUT &&
ins->getOperand(ins->getTemp(i)->getReusedInput())->toUse() == use)
{
return true;
}
}
return false;
}
#endif
/*
* This function pre-allocates and initializes as much global state as possible
* to avoid littering the algorithms with memory management cruft.
*/
template <typename VREG>
bool
LiveRangeAllocator<VREG>::init()
{
if (!RegisterAllocator::init())
return false;
liveIn = mir->allocate<BitSet>(graph.numBlockIds());
if (!liveIn)
return false;
// Initialize fixed intervals.
for (size_t i = 0; i < AnyRegister::Total; i++) {
AnyRegister reg = AnyRegister::FromCode(i);
LiveInterval* interval = LiveInterval::New(alloc(), 0);
interval->setAllocation(LAllocation(reg));
fixedIntervals[i] = interval;
}
fixedIntervalsUnion = LiveInterval::New(alloc(), 0);
if (!vregs.init(mir, graph.numVirtualRegisters()))
return false;
// Build virtual register objects
for (size_t i = 0; i < graph.numBlocks(); i++) {
if (mir->shouldCancel("Create data structures (main loop)"))
return false;
LBlock* block = graph.getBlock(i);
for (LInstructionIterator ins = block->begin(); ins != block->end(); ins++) {
for (size_t j = 0; j < ins->numDefs(); j++) {
LDefinition* def = ins->getDef(j);
if (def->isBogusTemp())
continue;
if (!vregs[def].init(alloc(), *ins, def, /* isTemp */ false))
return false;
}
for (size_t j = 0; j < ins->numTemps(); j++) {
LDefinition* def = ins->getTemp(j);
if (def->isBogusTemp())
continue;
if (!vregs[def].init(alloc(), *ins, def, /* isTemp */ true))
return false;
}
}
for (size_t j = 0; j < block->numPhis(); j++) {
LPhi* phi = block->getPhi(j);
LDefinition* def = phi->getDef(0);
if (!vregs[def].init(alloc(), phi, def, /* isTemp */ false))
return false;
}
}
return true;
}
/*
* This function builds up liveness intervals for all virtual registers
* defined in the function. Additionally, it populates the liveIn array with
* information about which registers are live at the beginning of a block, to
* aid resolution and reification in a later phase.
*
* The algorithm is based on the one published in:
*
* Wimmer, Christian, and Michael Franz. "Linear Scan Register Allocation on
* SSA Form." Proceedings of the International Symposium on Code Generation
* and Optimization. Toronto, Ontario, Canada, ACM. 2010. 170-79. PDF.
*
* The algorithm operates on blocks ordered such that dominators of a block
* are before the block itself, and such that all blocks of a loop are
* contiguous. It proceeds backwards over the instructions in this order,
* marking registers live at their uses, ending their live intervals at
* definitions, and recording which registers are live at the top of every
* block. To deal with loop backedges, variables live at the beginning of
* a loop gain an interval covering the entire loop.
*/
template <typename VREG>
bool
LiveRangeAllocator<VREG>::buildLivenessInfo()
{
JitSpew(JitSpew_RegAlloc, "Beginning liveness analysis");
if (!init())
return false;
Vector<MBasicBlock*, 1, SystemAllocPolicy> loopWorkList;
BitSet loopDone(graph.numBlockIds());
if (!loopDone.init(alloc()))
return false;
for (size_t i = graph.numBlocks(); i > 0; i--) {
if (mir->shouldCancel("Build Liveness Info (main loop)"))
return false;
LBlock* block = graph.getBlock(i - 1);
MBasicBlock* mblock = block->mir();
BitSet& live = liveIn[mblock->id()];
new (&live) BitSet(graph.numVirtualRegisters());
if (!live.init(alloc()))
return false;
// Propagate liveIn from our successors to us
for (size_t i = 0; i < mblock->lastIns()->numSuccessors(); i++) {
MBasicBlock* successor = mblock->lastIns()->getSuccessor(i);
// Skip backedges, as we fix them up at the loop header.
if (mblock->id() < successor->id())
live.insertAll(liveIn[successor->id()]);
}
// Add successor phis
if (mblock->successorWithPhis()) {
LBlock* phiSuccessor = mblock->successorWithPhis()->lir();
for (unsigned int j = 0; j < phiSuccessor->numPhis(); j++) {
LPhi* phi = phiSuccessor->getPhi(j);
LAllocation* use = phi->getOperand(mblock->positionInPhiSuccessor());
uint32_t reg = use->toUse()->virtualRegister();
live.insert(reg);
}
}
// Variables are assumed alive for the entire block, a define shortens
// the interval to the point of definition.
for (BitSet::Iterator liveRegId(live); liveRegId; ++liveRegId) {
if (!vregs[*liveRegId].getInterval(0)->addRangeAtHead(entryOf(block),
exitOf(block).next()))
{
return false;
}
}
// Shorten the front end of live intervals for live variables to their
// point of definition, if found.
for (LInstructionReverseIterator ins = block->rbegin(); ins != block->rend(); ins++) {
// Calls may clobber registers, so force a spill and reload around the callsite.
if (ins->isCall()) {
for (AnyRegisterIterator iter(allRegisters_.asLiveSet()); iter.more(); iter++) {
bool found = false;
for (size_t i = 0; i < ins->numDefs(); i++) {
if (ins->getDef(i)->isFixed() &&
ins->getDef(i)->output()->aliases(LAllocation(*iter))) {
found = true;
break;
}
}
if (!found && !addFixedRangeAtHead(*iter, outputOf(*ins), outputOf(*ins).next()))
return false;
}
}
DebugOnly<bool> hasDoubleDef = false;
DebugOnly<bool> hasFloat32Def = false;
for (size_t i = 0; i < ins->numDefs(); i++) {
LDefinition* def = ins->getDef(i);
if (def->isBogusTemp())
continue;
#ifdef DEBUG
if (def->type() == LDefinition::DOUBLE)
hasDoubleDef = true;
if (def->type() == LDefinition::FLOAT32)
hasFloat32Def = true;
#endif
CodePosition from = outputOf(*ins);
if (def->policy() == LDefinition::MUST_REUSE_INPUT) {
// MUST_REUSE_INPUT is implemented by allocating an output
// register and moving the input to it. Register hints are
// used to avoid unnecessary moves. We give the input an
// LUse::ANY policy to avoid allocating a register for the
// input.
LUse* inputUse = ins->getOperand(def->getReusedInput())->toUse();
MOZ_ASSERT(inputUse->policy() == LUse::REGISTER);
MOZ_ASSERT(inputUse->usedAtStart());
*inputUse = LUse(inputUse->virtualRegister(), LUse::ANY, /* usedAtStart = */ true);
}
LiveInterval* interval = vregs[def].getInterval(0);
interval->setFrom(from);
// Ensure that if there aren't any uses, there's at least
// some interval for the output to go into.
if (interval->numRanges() == 0) {
if (!interval->addRangeAtHead(from, from.next()))
return false;
}
live.remove(def->virtualRegister());
}
for (size_t i = 0; i < ins->numTemps(); i++) {
LDefinition* temp = ins->getTemp(i);
if (temp->isBogusTemp())
continue;
// Normally temps are considered to cover both the input
// and output of the associated instruction. In some cases
// though we want to use a fixed register as both an input
// and clobbered register in the instruction, so watch for
// this and shorten the temp to cover only the output.
CodePosition from = inputOf(*ins);
if (temp->policy() == LDefinition::FIXED) {
AnyRegister reg = temp->output()->toRegister();
for (LInstruction::InputIterator alloc(**ins); alloc.more(); alloc.next()) {
if (alloc->isUse()) {
LUse* use = alloc->toUse();
if (use->isFixedRegister()) {
if (GetFixedRegister(vregs[use].def(), use) == reg)
from = outputOf(*ins);
}
}
}
}
CodePosition to =
ins->isCall() ? outputOf(*ins) : outputOf(*ins).next();
if (!vregs[temp].getInterval(0)->addRangeAtHead(from, to))
return false;
}
DebugOnly<bool> hasUseRegister = false;
DebugOnly<bool> hasUseRegisterAtStart = false;
for (LInstruction::InputIterator inputAlloc(**ins); inputAlloc.more(); inputAlloc.next()) {
if (inputAlloc->isUse()) {
LUse* use = inputAlloc->toUse();
// Call uses should always be at-start or fixed, since the fixed intervals
// use all registers.
MOZ_ASSERT_IF(ins->isCall() && !inputAlloc.isSnapshotInput(),
use->isFixedRegister() || use->usedAtStart());
#ifdef DEBUG
// Don't allow at-start call uses if there are temps of the same kind,
// so that we don't assign the same register.
if (ins->isCall() && use->usedAtStart()) {
for (size_t i = 0; i < ins->numTemps(); i++)
MOZ_ASSERT(vregs[ins->getTemp(i)].isFloatReg() != vregs[use].isFloatReg());
}
// If there are both useRegisterAtStart(x) and useRegister(y)
// uses, we may assign the same register to both operands due to
// interval splitting (bug 772830). Don't allow this for now.
if (use->policy() == LUse::REGISTER) {
if (use->usedAtStart()) {
if (!IsInputReused(*ins, use))
hasUseRegisterAtStart = true;
} else {
hasUseRegister = true;
}
}
MOZ_ASSERT(!(hasUseRegister && hasUseRegisterAtStart));
#endif
// Don't treat RECOVERED_INPUT uses as keeping the vreg alive.
if (use->policy() == LUse::RECOVERED_INPUT)
continue;
// Fixed uses on calls are specially overridden to happen
// at the input position.
CodePosition to =
(use->usedAtStart() || (ins->isCall() && use->isFixedRegister()))
? inputOf(*ins)
: outputOf(*ins);
if (use->isFixedRegister()) {
LAllocation reg(AnyRegister::FromCode(use->registerCode()));
for (size_t i = 0; i < ins->numDefs(); i++) {
LDefinition* def = ins->getDef(i);
if (def->policy() == LDefinition::FIXED && *def->output() == reg)
to = inputOf(*ins);
}
}
LiveInterval* interval = vregs[use].getInterval(0);
if (!interval->addRangeAtHead(entryOf(block), to.next()))
return false;
interval->addUse(new(alloc()) UsePosition(use, to));
live.insert(use->virtualRegister());
}
}
}
// Phis have simultaneous assignment semantics at block begin, so at
// the beginning of the block we can be sure that liveIn does not
// contain any phi outputs.
for (unsigned int i = 0; i < block->numPhis(); i++) {
LDefinition* def = block->getPhi(i)->getDef(0);
if (live.contains(def->virtualRegister())) {
live.remove(def->virtualRegister());
} else {
// This is a dead phi, so add a dummy range over all phis. This
// can go away if we have an earlier dead code elimination pass.
CodePosition entryPos = entryOf(block);
if (!vregs[def].getInterval(0)->addRangeAtHead(entryPos, entryPos.next()))
return false;
}
}
if (mblock->isLoopHeader()) {
// A divergence from the published algorithm is required here, as
// our block order does not guarantee that blocks of a loop are
// contiguous. As a result, a single live interval spanning the
// loop is not possible. Additionally, we require liveIn in a later
// pass for resolution, so that must also be fixed up here.
MBasicBlock* loopBlock = mblock->backedge();
while (true) {
// Blocks must already have been visited to have a liveIn set.
MOZ_ASSERT(loopBlock->id() >= mblock->id());
// Add an interval for this entire loop block
CodePosition from = entryOf(loopBlock->lir());
CodePosition to = exitOf(loopBlock->lir()).next();
for (BitSet::Iterator liveRegId(live); liveRegId; ++liveRegId) {
if (!vregs[*liveRegId].getInterval(0)->addRange(from, to))
return false;
}
// Fix up the liveIn set to account for the new interval
liveIn[loopBlock->id()].insertAll(live);
// Make sure we don't visit this node again
loopDone.insert(loopBlock->id());
// If this is the loop header, any predecessors are either the
// backedge or out of the loop, so skip any predecessors of
// this block
if (loopBlock != mblock) {
for (size_t i = 0; i < loopBlock->numPredecessors(); i++) {
MBasicBlock* pred = loopBlock->getPredecessor(i);
if (loopDone.contains(pred->id()))
continue;
if (!loopWorkList.append(pred))
return false;
}
}
// Terminate loop if out of work.
if (loopWorkList.empty())
break;
// Grab the next block off the work list, skipping any OSR block.
MBasicBlock* osrBlock = graph.mir().osrBlock();
while (!loopWorkList.empty()) {
loopBlock = loopWorkList.popCopy();
if (loopBlock != osrBlock)
break;
}
// If end is reached without finding a non-OSR block, then no more work items were found.
if (loopBlock == osrBlock) {
MOZ_ASSERT(loopWorkList.empty());
break;
}
}
// Clear the done set for other loops
loopDone.clear();
}
MOZ_ASSERT_IF(!mblock->numPredecessors(), live.empty());
}
validateVirtualRegisters();
// If the script has an infinite loop, there may be no MReturn and therefore
// no fixed intervals. Add a small range to fixedIntervalsUnion so that the
// rest of the allocator can assume it has at least one range.
if (fixedIntervalsUnion->numRanges() == 0) {
if (!fixedIntervalsUnion->addRangeAtHead(CodePosition(0, CodePosition::INPUT),
CodePosition(0, CodePosition::OUTPUT)))
{
return false;
}
}
JitSpew(JitSpew_RegAlloc, "Liveness analysis complete");
if (JitSpewEnabled(JitSpew_RegAlloc)) {
dumpInstructions();
fprintf(stderr, "Live ranges by virtual register:\n");
dumpVregs();
}
return true;
}
template <typename VREG>
void
LiveRangeAllocator<VREG>::dumpVregs()
{
#ifdef DEBUG
// Virtual register number 0 is unused.
MOZ_ASSERT(vregs[0u].numIntervals() == 0);
for (uint32_t i = 1; i < graph.numVirtualRegisters(); i++) {
fprintf(stderr, " ");
VirtualRegister& vreg = vregs[i];
for (size_t j = 0; j < vreg.numIntervals(); j++) {
if (j)
fprintf(stderr, " / ");
fprintf(stderr, "%s", vreg.getInterval(j)->toString());
}
fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
#endif
}
#ifdef DEBUG
void
LiveInterval::validateRanges()
{
Range* prev = nullptr;
for (size_t i = ranges_.length() - 1; i < ranges_.length(); i--) {
Range* range = &ranges_[i];
MOZ_ASSERT(range->from < range->to);
MOZ_ASSERT_IF(prev, prev->to <= range->from);
prev = range;
}
}
#endif // DEBUG
const char*
LiveInterval::rangesToString() const
{
#ifdef DEBUG
// Not reentrant!
static char buf[2000];
char* cursor = buf;
char* end = cursor + sizeof(buf);
int n;
for (size_t i = ranges_.length() - 1; i < ranges_.length(); i--) {
const LiveInterval::Range* range = getRange(i);
n = JS_snprintf(cursor, end - cursor, " %s", range->toString());
if (n < 0)
return " ???";
cursor += n;
}
return buf;
#else
return " ???";
#endif
}
#ifdef DEBUG
static bool
IsHintInteresting(const Requirement& requirement, const Requirement& hint)
{
if (hint.kind() == Requirement::NONE)
return false;
if (hint.kind() != Requirement::FIXED && hint.kind() != Requirement::REGISTER)
return true;
Requirement merge = requirement;
if (!merge.mergeRequirement(hint))
return true;
return merge.kind() != requirement.kind();
}
#endif
const char*
LiveInterval::toString() const
{
#ifdef DEBUG
// Not reentrant!
static char buf[2000];
char* cursor = buf;
char* end = cursor + sizeof(buf);
int n;
if (hasVreg()) {
n = JS_snprintf(cursor, end - cursor, "v%u", vreg());
if (n < 0) return "???";
cursor += n;
}
n = JS_snprintf(cursor, end - cursor, "[%u]", index());
if (n < 0) return "???";
cursor += n;
if (requirement_.kind() != Requirement::NONE || hint_.kind() != Requirement::NONE) {
n = JS_snprintf(cursor, end - cursor, " req(");
if (n < 0) return "???";
cursor += n;
bool printHint = IsHintInteresting(requirement_, hint_);
if (requirement_.kind() != Requirement::NONE) {
n = JS_snprintf(cursor, end - cursor, "%s%s",
requirement_.toString(),
printHint ? "," : "");
if (n < 0) return "???";
cursor += n;
}
if (printHint) {
n = JS_snprintf(cursor, end - cursor, "%s?", hint_.toString());
if (n < 0) return "???";
cursor += n;
}
n = JS_snprintf(cursor, end - cursor, ")");
if (n < 0) return "???";
cursor += n;
}
if (!alloc_.isBogus()) {
n = JS_snprintf(cursor, end - cursor, " has(%s)", alloc_.toString());
if (n < 0) return "???";
cursor += n;
}
n = JS_snprintf(cursor, end - cursor, "%s", rangesToString());
if (n < 0) return "???";
cursor += n;
for (UsePositionIterator usePos(usesBegin()); usePos != usesEnd(); usePos++) {
n = JS_snprintf(cursor, end - cursor, " %s@%u",
usePos->use->toString(), usePos->pos.bits());
if (n < 0) return "???";
cursor += n;
}
return buf;
#else
return "???";
#endif
}
void
LiveInterval::dump() const
{
fprintf(stderr, "%s\n", toString());
}

View File

@ -1,758 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef jit_LiveRangeAllocator_h
#define jit_LiveRangeAllocator_h
#include "mozilla/Array.h"
#include "mozilla/DebugOnly.h"
#include "jit/RegisterAllocator.h"
#include "jit/StackSlotAllocator.h"
// Common structures and functions used by register allocators that operate on
// virtual register live ranges.
namespace js {
namespace jit {
class Requirement
{
public:
enum Kind {
NONE,
REGISTER,
FIXED,
MUST_REUSE_INPUT
};
Requirement()
: kind_(NONE)
{ }
explicit Requirement(Kind kind)
: kind_(kind)
{
// These have dedicated constructors.
MOZ_ASSERT(kind != FIXED && kind != MUST_REUSE_INPUT);
}
Requirement(Kind kind, CodePosition at)
: kind_(kind),
position_(at)
{
// These have dedicated constructors.
MOZ_ASSERT(kind != FIXED && kind != MUST_REUSE_INPUT);
}
explicit Requirement(LAllocation fixed)
: kind_(FIXED),
allocation_(fixed)
{
MOZ_ASSERT(!fixed.isBogus() && !fixed.isUse());
}
// Only useful as a hint, encodes where the fixed requirement is used to
// avoid allocating a fixed register too early.
Requirement(LAllocation fixed, CodePosition at)
: kind_(FIXED),
allocation_(fixed),
position_(at)
{
MOZ_ASSERT(!fixed.isBogus() && !fixed.isUse());
}
Requirement(uint32_t vreg, CodePosition at)
: kind_(MUST_REUSE_INPUT),
allocation_(LUse(vreg, LUse::ANY)),
position_(at)
{ }
Kind kind() const {
return kind_;
}
LAllocation allocation() const {
MOZ_ASSERT(!allocation_.isBogus() && !allocation_.isUse());
return allocation_;
}
uint32_t virtualRegister() const {
MOZ_ASSERT(allocation_.isUse());
MOZ_ASSERT(kind() == MUST_REUSE_INPUT);
return allocation_.toUse()->virtualRegister();
}
CodePosition pos() const {
return position_;
}
int priority() const;
bool mergeRequirement(const Requirement& newRequirement) {
// Merge newRequirement with any existing requirement, returning false
// if the new and old requirements conflict.
MOZ_ASSERT(newRequirement.kind() != Requirement::MUST_REUSE_INPUT);
if (newRequirement.kind() == Requirement::FIXED) {
if (kind() == Requirement::FIXED)
return newRequirement.allocation() == allocation();
*this = newRequirement;
return true;
}
MOZ_ASSERT(newRequirement.kind() == Requirement::REGISTER);
if (kind() == Requirement::FIXED)
return allocation().isRegister();
*this = newRequirement;
return true;
}
// Return a string describing this requirement. This is not re-entrant!
const char* toString() const;
void dump() const;
private:
Kind kind_;
LAllocation allocation_;
CodePosition position_;
};
struct UsePosition : public TempObject,
public InlineForwardListNode<UsePosition>
{
LUse* use;
CodePosition pos;
UsePosition(LUse* use, CodePosition pos) :
use(use),
pos(pos)
{
// Verify that the usedAtStart() flag is consistent with the
// subposition. For now ignore fixed registers, because they
// are handled specially around calls.
MOZ_ASSERT_IF(!use->isFixedRegister(),
pos.subpos() == (use->usedAtStart()
? CodePosition::INPUT
: CodePosition::OUTPUT));
}
};
typedef InlineForwardListIterator<UsePosition> UsePositionIterator;
static inline bool
UseCompatibleWith(const LUse* use, LAllocation alloc)
{
switch (use->policy()) {
case LUse::ANY:
case LUse::KEEPALIVE:
return alloc.isRegister() || alloc.isMemory();
case LUse::REGISTER:
return alloc.isRegister();
case LUse::FIXED:
// Fixed uses are handled using fixed intervals. The
// UsePosition is only used as hint.
return alloc.isRegister();
default:
MOZ_CRASH("Unknown use policy");
}
}
#ifdef DEBUG
static inline bool
DefinitionCompatibleWith(LNode* ins, const LDefinition* def, LAllocation alloc)
{
if (ins->isPhi()) {
if (def->isFloatReg())
return alloc.isFloatReg() || alloc.isStackSlot();
return alloc.isGeneralReg() || alloc.isStackSlot();
}
switch (def->policy()) {
case LDefinition::REGISTER:
if (!alloc.isRegister())
return false;
return alloc.isFloatReg() == def->isFloatReg();
case LDefinition::FIXED:
return alloc == *def->output();
case LDefinition::MUST_REUSE_INPUT:
if (!alloc.isRegister() || !ins->numOperands())
return false;
return alloc == *ins->getOperand(def->getReusedInput());
default:
MOZ_CRASH("Unknown definition policy");
}
}
#endif // DEBUG
static inline LDefinition*
FindReusingDefinition(LNode* ins, LAllocation* alloc)
{
for (size_t i = 0; i < ins->numDefs(); i++) {
LDefinition* def = ins->getDef(i);
if (def->policy() == LDefinition::MUST_REUSE_INPUT &&
ins->getOperand(def->getReusedInput()) == alloc)
return def;
}
for (size_t i = 0; i < ins->numTemps(); i++) {
LDefinition* def = ins->getTemp(i);
if (def->policy() == LDefinition::MUST_REUSE_INPUT &&
ins->getOperand(def->getReusedInput()) == alloc)
return def;
}
return nullptr;
}
/*
* A live interval is a set of disjoint ranges of code positions where a
* virtual register is live. Register allocation operates on these intervals,
* splitting them as necessary and assigning allocations to them as it runs.
*/
class LiveInterval
: public TempObject
{
public:
/*
* A range is a contiguous sequence of CodePositions where the virtual
* register associated with this interval is live.
*/
struct Range {
Range()
: from(),
to()
{ }
Range(CodePosition f, CodePosition t)
: from(f),
to(t)
{
MOZ_ASSERT(from < to);
}
// The beginning of this range, inclusive.
CodePosition from;
// The end of this range, exclusive.
CodePosition to;
bool empty() const {
return from >= to;
}
// Whether this range wholly contains other.
bool contains(const Range* other) const;
// Intersect this range with other, returning the subranges of this
// that are before, inside, or after other.
void intersect(const Range* other, Range* pre, Range* inside, Range* post) const;
// Return a string describing this range. This is not re-entrant!
const char* toString() const;
void dump() const;
};
private:
Vector<Range, 1, JitAllocPolicy> ranges_;
LAllocation alloc_;
LiveInterval* spillInterval_;
uint32_t vreg_;
uint32_t index_;
Requirement requirement_;
Requirement hint_;
InlineForwardList<UsePosition> uses_;
size_t lastProcessedRange_;
LiveInterval(TempAllocator& alloc, uint32_t vreg, uint32_t index)
: ranges_(alloc),
spillInterval_(nullptr),
vreg_(vreg),
index_(index),
lastProcessedRange_(size_t(-1))
{ }
LiveInterval(TempAllocator& alloc, uint32_t index)
: ranges_(alloc),
spillInterval_(nullptr),
vreg_(UINT32_MAX),
index_(index),
lastProcessedRange_(size_t(-1))
{ }
public:
static LiveInterval* New(TempAllocator& alloc, uint32_t vreg, uint32_t index) {
return new(alloc) LiveInterval(alloc, vreg, index);
}
static LiveInterval* New(TempAllocator& alloc, uint32_t index) {
return new(alloc) LiveInterval(alloc, index);
}
bool addRange(CodePosition from, CodePosition to);
bool addRangeAtHead(CodePosition from, CodePosition to);
void setFrom(CodePosition from);
CodePosition intersect(LiveInterval* other);
bool covers(CodePosition pos);
CodePosition start() const {
MOZ_ASSERT(!ranges_.empty());
return ranges_.back().from;
}
CodePosition end() const {
MOZ_ASSERT(!ranges_.empty());
return ranges_.begin()->to;
}
size_t numRanges() const {
return ranges_.length();
}
const Range* getRange(size_t i) const {
return &ranges_[i];
}
void setLastProcessedRange(size_t range, mozilla::DebugOnly<CodePosition> pos) {
// If the range starts after pos, we may not be able to use
// it in the next lastProcessedRangeIfValid call.
MOZ_ASSERT(ranges_[range].from <= pos);
lastProcessedRange_ = range;
}
size_t lastProcessedRangeIfValid(CodePosition pos) const {
if (lastProcessedRange_ < ranges_.length() && ranges_[lastProcessedRange_].from <= pos)
return lastProcessedRange_;
return ranges_.length() - 1;
}
LAllocation* getAllocation() {
return &alloc_;
}
void setAllocation(LAllocation alloc) {
alloc_ = alloc;
}
void setSpillInterval(LiveInterval* spill) {
spillInterval_ = spill;
}
LiveInterval* spillInterval() {
return spillInterval_;
}
bool hasVreg() const {
return vreg_ != UINT32_MAX;
}
uint32_t vreg() const {
MOZ_ASSERT(hasVreg());
return vreg_;
}
uint32_t index() const {
return index_;
}
void setIndex(uint32_t index) {
index_ = index;
}
const Requirement* requirement() const {
return &requirement_;
}
void setRequirement(const Requirement& requirement) {
// A MUST_REUSE_INPUT requirement complicates regalloc too much; it
// should only be used as hint.
MOZ_ASSERT(requirement.kind() != Requirement::MUST_REUSE_INPUT);
requirement_ = requirement;
}
bool addRequirement(const Requirement& newRequirement) {
return requirement_.mergeRequirement(newRequirement);
}
void addHint(const Requirement& newHint) {
// Unlike addRequirement, here in addHint we ignore merge failures,
// because these are just hints.
hint_.mergeRequirement(newHint);
}
const Requirement* hint() const {
return &hint_;
}
void setHint(const Requirement& hint) {
hint_ = hint;
}
bool isSpill() const {
return alloc_.isStackSlot();
}
bool splitFrom(CodePosition pos, LiveInterval* after);
void addUse(UsePosition* use);
void addUseAtEnd(UsePosition* use);
UsePosition* popUse();
UsePosition* nextUseAfter(CodePosition pos);
CodePosition nextUsePosAfter(CodePosition pos);
UsePositionIterator usesBegin() const {
return uses_.begin();
}
UsePositionIterator usesEnd() const {
return uses_.end();
}
bool usesEmpty() const {
return uses_.empty();
}
UsePosition* usesBack() {
return uses_.back();
}
#ifdef DEBUG
void validateRanges();
#endif
// Return a string describing the ranges in this LiveInterval. This is
// not re-entrant!
const char* rangesToString() const;
// Return a string describing this LiveInterval. This is not re-entrant!
const char* toString() const;
void dump() const;
};
/*
* Represents all of the register allocation state associated with a virtual
* register, including all associated intervals and pointers to relevant LIR
* structures.
*/
class VirtualRegister
{
LNode* ins_;
LDefinition* def_;
Vector<LiveInterval*, 1, JitAllocPolicy> intervals_;
// Whether def_ is a temp or an output.
bool isTemp_ : 1;
void operator=(const VirtualRegister&) = delete;
VirtualRegister(const VirtualRegister&) = delete;
protected:
explicit VirtualRegister(TempAllocator& alloc)
: intervals_(alloc)
{}
public:
bool init(TempAllocator& alloc, LNode* ins, LDefinition* def,
bool isTemp)
{
MOZ_ASSERT(ins && !ins_);
ins_ = ins;
def_ = def;
isTemp_ = isTemp;
LiveInterval* initial = LiveInterval::New(alloc, def->virtualRegister(), 0);
if (!initial)
return false;
return intervals_.append(initial);
}
LBlock* block() {
return ins_->block();
}
LNode* ins() {
return ins_;
}
LDefinition* def() const {
return def_;
}
LDefinition::Type type() const {
return def()->type();
}
bool isTemp() const {
return isTemp_;
}
size_t numIntervals() const {
return intervals_.length();
}
LiveInterval* getInterval(size_t i) const {
return intervals_[i];
}
LiveInterval* lastInterval() const {
MOZ_ASSERT(numIntervals() > 0);
return getInterval(numIntervals() - 1);
}
void replaceInterval(LiveInterval* old, LiveInterval* interval) {
MOZ_ASSERT(intervals_[old->index()] == old);
interval->setIndex(old->index());
intervals_[old->index()] = interval;
}
bool addInterval(LiveInterval* interval) {
MOZ_ASSERT(interval->numRanges());
MOZ_ASSERT(interval->vreg() != 0);
// Preserve ascending order for faster lookups.
LiveInterval** found = nullptr;
LiveInterval** i;
for (i = intervals_.begin(); i != intervals_.end(); i++) {
if (!found && interval->start() < (*i)->start())
found = i;
if (found)
(*i)->setIndex((*i)->index() + 1);
}
if (!found)
found = intervals_.end();
interval->setIndex(found - intervals_.begin());
return intervals_.insert(found, interval);
}
void removeInterval(LiveInterval* interval) {
intervals_.erase(intervals_.begin() + interval->index());
for (size_t i = interval->index(), e = intervals_.length(); i < e; ++i)
intervals_[i]->setIndex(i);
interval->setIndex(-1);
}
bool isFloatReg() const {
return def_->isFloatReg();
}
bool isCompatibleReg(const AnyRegister& r) const {
return def_->isCompatibleReg(r);
}
bool isCompatibleVReg(const VirtualRegister& vr) const {
return def_->isCompatibleDef(*vr.def_);
}
LiveInterval* intervalFor(CodePosition pos);
LiveInterval* getFirstInterval();
};
// Index of the virtual registers in a graph. VREG is a subclass of
// VirtualRegister extended with any allocator specific state for the vreg.
template <typename VREG>
class VirtualRegisterMap
{
private:
FixedList<VREG> vregs_;
void operator=(const VirtualRegisterMap&) = delete;
VirtualRegisterMap(const VirtualRegisterMap&) = delete;
public:
VirtualRegisterMap()
: vregs_()
{ }
bool init(MIRGenerator* gen, uint32_t numVregs) {
if (!vregs_.init(gen->alloc(), numVregs))
return false;
memset(&vregs_[0], 0, sizeof(VREG) * numVregs);
TempAllocator& alloc = gen->alloc();
for (uint32_t i = 0; i < numVregs; i++)
new(&vregs_[i]) VREG(alloc);
return true;
}
VREG& operator[](unsigned int index) {
return vregs_[index];
}
VREG& operator[](const LAllocation* alloc) {
MOZ_ASSERT(alloc->isUse());
return vregs_[alloc->toUse()->virtualRegister()];
}
VREG& operator[](const LDefinition* def) {
return vregs_[def->virtualRegister()];
}
uint32_t numVirtualRegisters() const {
return vregs_.length();
}
};
static inline bool
IsNunbox(VirtualRegister* vreg)
{
#ifdef JS_NUNBOX32
return vreg->type() == LDefinition::TYPE ||
vreg->type() == LDefinition::PAYLOAD;
#else
return false;
#endif
}
static inline bool
IsSlotsOrElements(VirtualRegister* vreg)
{
return vreg->type() == LDefinition::SLOTS;
}
static inline bool
IsTraceable(VirtualRegister* reg)
{
if (reg->type() == LDefinition::OBJECT)
return true;
#ifdef JS_PUNBOX64
if (reg->type() == LDefinition::BOX)
return true;
#endif
return false;
}
template <typename VREG>
class LiveRangeAllocator : protected RegisterAllocator
{
protected:
// Computed inforamtion
BitSet* liveIn;
VirtualRegisterMap<VREG> vregs;
mozilla::Array<LiveInterval*, AnyRegister::Total> fixedIntervals;
// Union of all ranges in fixedIntervals, used to quickly determine
// whether an interval intersects with a fixed register.
LiveInterval* fixedIntervalsUnion;
// Allocation state
StackSlotAllocator stackSlotAllocator;
LiveRangeAllocator(MIRGenerator* mir, LIRGenerator* lir, LIRGraph& graph)
: RegisterAllocator(mir, lir, graph),
liveIn(nullptr),
fixedIntervalsUnion(nullptr)
{
}
bool buildLivenessInfo();
bool init();
bool addFixedRangeAtHead(AnyRegister reg, CodePosition from, CodePosition to) {
if (!fixedIntervals[reg.code()]->addRangeAtHead(from, to))
return false;
return fixedIntervalsUnion->addRangeAtHead(from, to);
}
void validateVirtualRegisters()
{
#ifdef DEBUG
if (!js_JitOptions.checkGraphConsistency)
return;
for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
VirtualRegister* reg = &vregs[i];
LiveInterval* prev = nullptr;
for (size_t j = 0; j < reg->numIntervals(); j++) {
LiveInterval* interval = reg->getInterval(j);
MOZ_ASSERT(interval->vreg() == i);
MOZ_ASSERT(interval->index() == j);
if (interval->numRanges() == 0)
continue;
MOZ_ASSERT_IF(prev, prev->end() <= interval->start());
interval->validateRanges();
prev = interval;
}
}
#endif
}
bool addMove(LMoveGroup* moves, LiveInterval* from, LiveInterval* to, LDefinition::Type type) {
MOZ_ASSERT(*from->getAllocation() != *to->getAllocation());
return moves->add(from->getAllocation(), to->getAllocation(), type);
}
bool moveInput(LInstruction* ins, LiveInterval* from, LiveInterval* to, LDefinition::Type type) {
if (*from->getAllocation() == *to->getAllocation())
return true;
LMoveGroup* moves = getInputMoveGroup(ins);
return addMove(moves, from, to, type);
}
bool moveAfter(LInstruction* ins, LiveInterval* from, LiveInterval* to, LDefinition::Type type) {
if (*from->getAllocation() == *to->getAllocation())
return true;
LMoveGroup* moves = getMoveGroupAfter(ins);
return addMove(moves, from, to, type);
}
bool moveAtExit(LBlock* block, LiveInterval* from, LiveInterval* to, LDefinition::Type type) {
if (*from->getAllocation() == *to->getAllocation())
return true;
LMoveGroup* moves = block->getExitMoveGroup(alloc());
return addMove(moves, from, to, type);
}
bool moveAtEntry(LBlock* block, LiveInterval* from, LiveInterval* to, LDefinition::Type type) {
if (*from->getAllocation() == *to->getAllocation())
return true;
LMoveGroup* moves = block->getEntryMoveGroup(alloc());
return addMove(moves, from, to, type);
}
size_t findFirstNonCallSafepoint(CodePosition from) const
{
size_t i = 0;
for (; i < graph.numNonCallSafepoints(); i++) {
const LInstruction* ins = graph.getNonCallSafepoint(i);
if (from <= inputOf(ins))
break;
}
return i;
}
void addLiveRegistersForInterval(VirtualRegister* reg, LiveInterval* interval)
{
// Fill in the live register sets for all non-call safepoints.
LAllocation* a = interval->getAllocation();
if (!a->isRegister())
return;
// Don't add output registers to the safepoint.
CodePosition start = interval->start();
if (interval->index() == 0 && !reg->isTemp()) {
#ifdef CHECK_OSIPOINT_REGISTERS
// We don't add the output register to the safepoint,
// but it still might get added as one of the inputs.
// So eagerly add this reg to the safepoint clobbered registers.
if (reg->ins()->isInstruction()) {
if (LSafepoint* safepoint = reg->ins()->toInstruction()->safepoint())
safepoint->addClobberedRegister(a->toRegister());
}
#endif
start = start.next();
}
size_t i = findFirstNonCallSafepoint(start);
for (; i < graph.numNonCallSafepoints(); i++) {
LInstruction* ins = graph.getNonCallSafepoint(i);
CodePosition pos = inputOf(ins);
// Safepoints are sorted, so we can shortcut out of this loop
// if we go out of range.
if (interval->end() <= pos)
break;
if (!interval->covers(pos))
continue;
LSafepoint* safepoint = ins->safepoint();
safepoint->addLiveRegister(a->toRegister());
#ifdef CHECK_OSIPOINT_REGISTERS
if (reg->isTemp())
safepoint->addClobberedRegister(a->toRegister());
#endif
}
}
// Finds the first safepoint that is within range of an interval.
size_t findFirstSafepoint(const LiveInterval* interval, size_t startFrom) const
{
size_t i = startFrom;
for (; i < graph.numSafepoints(); i++) {
LInstruction* ins = graph.getSafepoint(i);
if (interval->start() <= inputOf(ins))
break;
}
return i;
}
void dumpVregs();
};
} // namespace jit
} // namespace js
#endif /* jit_LiveRangeAllocator_h */

View File

@ -191,8 +191,8 @@ AllocationIntegrityState::checkIntegrity(LBlock* block, LInstruction* ins,
if (ins->isMoveGroup()) {
LMoveGroup* group = ins->toMoveGroup();
for (int i = group->numMoves() - 1; i >= 0; i--) {
if (*group->getMove(i).to() == alloc) {
alloc = *group->getMove(i).from();
if (group->getMove(i).to() == alloc) {
alloc = group->getMove(i).from();
break;
}
}
@ -412,8 +412,8 @@ AllocationIntegrityState::dump()
LMoveGroup* group = ins->toMoveGroup();
for (int i = group->numMoves() - 1; i >= 0; i--) {
// Use two printfs, as LAllocation::toString is not reentrant.
fprintf(stderr, " [%s", group->getMove(i).from()->toString());
fprintf(stderr, " -> %s]", group->getMove(i).to()->toString());
fprintf(stderr, " [%s", group->getMove(i).from().toString());
fprintf(stderr, " -> %s]", group->getMove(i).to().toString());
}
fprintf(stderr, "\n");
continue;
@ -552,8 +552,8 @@ RegisterAllocator::dumpInstructions()
LMoveGroup* group = ins->toMoveGroup();
for (int i = group->numMoves() - 1; i >= 0; i--) {
// Use two printfs, as LAllocation::toString is not reentant.
fprintf(stderr, " [%s", group->getMove(i).from()->toString());
fprintf(stderr, " -> %s]", group->getMove(i).to()->toString());
fprintf(stderr, " [%s", group->getMove(i).from().toString());
fprintf(stderr, " -> %s]", group->getMove(i).to().toString());
}
fprintf(stderr, "\n");
continue;

View File

@ -186,11 +186,11 @@ StupidAllocator::syncRegister(LInstruction* ins, RegisterIndex index)
{
if (registers[index].dirty) {
LMoveGroup* input = getInputMoveGroup(ins);
LAllocation* source = new(alloc()) LAllocation(registers[index].reg);
LAllocation source(registers[index].reg);
uint32_t existing = registers[index].vreg;
LAllocation* dest = stackLocation(existing);
input->addAfter(source, dest, registers[index].type);
input->addAfter(source, *dest, registers[index].type);
registers[index].dirty = false;
}
@ -219,8 +219,8 @@ StupidAllocator::loadRegister(LInstruction* ins, uint32_t vreg, RegisterIndex in
// Load a vreg from its stack location to a register.
LMoveGroup* input = getInputMoveGroup(ins);
LAllocation* source = stackLocation(vreg);
LAllocation* dest = new(alloc()) LAllocation(registers[index].reg);
input->addAfter(source, dest, type);
LAllocation dest(registers[index].reg);
input->addAfter(*source, dest, type);
registers[index].set(vreg, ins);
registers[index].type = type;
}
@ -321,7 +321,7 @@ StupidAllocator::syncForBlockEnd(LBlock* block, LInstruction* ins)
}
}
group->add(source, dest, phi->getDef(0)->type());
group->add(*source, *dest, phi->getDef(0)->type());
}
}
}

View File

@ -1009,11 +1009,11 @@ CodeGeneratorARM::visitPowHalfD(LPowHalfD* ins)
}
MoveOperand
CodeGeneratorARM::toMoveOperand(const LAllocation* a) const
CodeGeneratorARM::toMoveOperand(LAllocation a) const
{
if (a->isGeneralReg())
if (a.isGeneralReg())
return MoveOperand(ToRegister(a));
if (a->isFloatReg())
if (a.isFloatReg())
return MoveOperand(ToFloatRegister(a));
int32_t offset = ToStackOffset(a);
MOZ_ASSERT((offset & 3) == 0);

View File

@ -43,7 +43,7 @@ class CodeGeneratorARM : public CodeGeneratorShared
return ToOperand(def->output());
}
MoveOperand toMoveOperand(const LAllocation* a) const;
MoveOperand toMoveOperand(LAllocation a) const;
void bailoutIf(Assembler::Condition condition, LSnapshot* snapshot);
void bailoutFrom(Label* label, LSnapshot* snapshot);

View File

@ -948,11 +948,11 @@ CodeGeneratorMIPS::visitPowHalfD(LPowHalfD* ins)
}
MoveOperand
CodeGeneratorMIPS::toMoveOperand(const LAllocation* a) const
CodeGeneratorMIPS::toMoveOperand(LAllocation a) const
{
if (a->isGeneralReg())
if (a.isGeneralReg())
return MoveOperand(ToRegister(a));
if (a->isFloatReg()) {
if (a.isFloatReg()) {
return MoveOperand(ToFloatRegister(a));
}
int32_t offset = ToStackOffset(a);

View File

@ -58,7 +58,7 @@ class CodeGeneratorMIPS : public CodeGeneratorShared
return ToOperand(def->output());
}
MoveOperand toMoveOperand(const LAllocation* a) const;
MoveOperand toMoveOperand(LAllocation a) const;
template <typename T1, typename T2>
void bailoutCmp32(Assembler::Condition c, T1 lhs, T2 rhs, LSnapshot* snapshot) {

View File

@ -235,10 +235,13 @@ class CodeGeneratorShared : public LElementVisitor
return offset;
}
inline int32_t ToStackOffset(LAllocation a) const {
if (a.isArgument())
return ArgToStackOffset(a.toArgument()->index());
return SlotToStackOffset(a.toStackSlot()->slot());
}
inline int32_t ToStackOffset(const LAllocation* a) const {
if (a->isArgument())
return ArgToStackOffset(a->toArgument()->index());
return SlotToStackOffset(a->toStackSlot()->slot());
return ToStackOffset(*a);
}
uint32_t frameSize() const {

View File

@ -1528,11 +1528,11 @@ CodeGeneratorX86Shared::visitUrshD(LUrshD* ins)
}
MoveOperand
CodeGeneratorX86Shared::toMoveOperand(const LAllocation* a) const
CodeGeneratorX86Shared::toMoveOperand(LAllocation a) const
{
if (a->isGeneralReg())
if (a.isGeneralReg())
return MoveOperand(ToRegister(a));
if (a->isFloatReg())
if (a.isFloatReg())
return MoveOperand(ToFloatRegister(a));
return MoveOperand(StackPointer, ToStackOffset(a));
}

View File

@ -115,7 +115,7 @@ class CodeGeneratorX86Shared : public CodeGeneratorShared
return ToOperand(def->output());
}
MoveOperand toMoveOperand(const LAllocation* a) const;
MoveOperand toMoveOperand(LAllocation a) const;
void bailoutIf(Assembler::Condition condition, LSnapshot* snapshot);
void bailoutIf(Assembler::DoubleCondition condition, LSnapshot* snapshot);

View File

@ -12,6 +12,7 @@
#define jsexn_h
#include "jsapi.h"
#include "jscntxt.h"
#include "NamespaceImports.h"
namespace js {

View File

@ -213,7 +213,6 @@ UNIFIED_SOURCES += [
'jit/JSONSpewer.cpp',
'jit/LICM.cpp',
'jit/LIR.cpp',
'jit/LiveRangeAllocator.cpp',
'jit/LoopUnroller.cpp',
'jit/Lowering.cpp',
'jit/MacroAssembler.cpp',

View File

@ -3387,9 +3387,9 @@ XPCJSRuntime::XPCJSRuntime(nsXPConnect* aXPConnect)
// were not taken at the time of this writing, so we hazard a guess that
// ASAN builds have roughly thrice the stack overhead as normal builds.
// On normal builds, the largest stack frame size we might encounter is
// 9.0k (see above), so let's use a buffer of 9.0 * 3 * 10 = 270k.
// 9.0k (see above), so let's use a buffer of 9.0 * 5 * 10 = 450k.
const size_t kStackQuota = 2 * kDefaultStackQuota;
const size_t kTrustedScriptBuffer = 270 * 1024;
const size_t kTrustedScriptBuffer = 450 * 1024;
#elif defined(XP_WIN)
// 1MB is the default stack size on Windows, so use 900k.
// Windows PGO stack frames have unfortunately gotten pretty large lately. :-(