From 1b93ef6944486ae252757e57b8e159a2eb22125c Mon Sep 17 00:00:00 2001 From: Wes Kocher Date: Fri, 8 May 2015 19:44:11 -0700 Subject: [PATCH] Backed out changeset 4963ecd92915 (bug 1067610) for m-oth permafail CLOSED TREE --- .../saved-stacks/async-max-frame-count.js | 9 +- js/src/jit/BacktrackingAllocator.cpp | 2643 ++++++----------- js/src/jit/BacktrackingAllocator.h | 661 +---- js/src/jit/C1Spewer.cpp | 40 +- js/src/jit/C1Spewer.h | 6 +- js/src/jit/CodeGenerator.cpp | 8 +- js/src/jit/InlineList.h | 20 +- js/src/jit/JSONSpewer.cpp | 32 +- js/src/jit/JSONSpewer.h | 2 +- js/src/jit/JitSpewer.cpp | 4 +- js/src/jit/LIR-Common.h | 22 +- js/src/jit/LIR.cpp | 38 +- js/src/jit/LiveRangeAllocator.cpp | 996 +++++++ js/src/jit/LiveRangeAllocator.h | 758 +++++ js/src/jit/RegisterAllocator.cpp | 12 +- js/src/jit/StupidAllocator.cpp | 10 +- js/src/jit/arm/CodeGenerator-arm.cpp | 6 +- js/src/jit/arm/CodeGenerator-arm.h | 2 +- js/src/jit/mips/CodeGenerator-mips.cpp | 6 +- js/src/jit/mips/CodeGenerator-mips.h | 2 +- js/src/jit/shared/CodeGenerator-shared.h | 9 +- .../x86-shared/CodeGenerator-x86-shared.cpp | 6 +- .../jit/x86-shared/CodeGenerator-x86-shared.h | 2 +- js/src/jsexn.h | 1 - js/src/moz.build | 1 + 25 files changed, 2845 insertions(+), 2451 deletions(-) create mode 100644 js/src/jit/LiveRangeAllocator.cpp create mode 100644 js/src/jit/LiveRangeAllocator.h diff --git a/js/src/jit-test/tests/saved-stacks/async-max-frame-count.js b/js/src/jit-test/tests/saved-stacks/async-max-frame-count.js index 4717897ac3e..3a9ceb9ea6d 100644 --- a/js/src/jit-test/tests/saved-stacks/async-max-frame-count.js +++ b/js/src/jit-test/tests/saved-stacks/async-max-frame-count.js @@ -13,14 +13,7 @@ function recur(n, limit) { function checkRecursion(n, limit) { print("checkRecursion(" + uneval(n) + ", " + uneval(limit) + ")"); - try { - var stack = recur(n, limit); - } catch (e) { - // Some platforms, like ASAN builds, can end up overrecursing. Tolerate - // these failures. - assertEq(/too much recursion/.test("" + e), true); - return; - } + var stack = recur(n, limit); // Async stacks are limited even if we didn't ask for a limit. There is a // default limit on frames attached on top of any synchronous frames. In this diff --git a/js/src/jit/BacktrackingAllocator.cpp b/js/src/jit/BacktrackingAllocator.cpp index b7f48fd0994..e44fde5bae6 100644 --- a/js/src/jit/BacktrackingAllocator.cpp +++ b/js/src/jit/BacktrackingAllocator.cpp @@ -12,349 +12,9 @@ using namespace js::jit; using mozilla::DebugOnly; -///////////////////////////////////////////////////////////////////// -// Utility -///////////////////////////////////////////////////////////////////// - -static inline bool -SortBefore(UsePosition* a, UsePosition* b) -{ - return a->pos <= b->pos; -} - -static inline bool -SortBefore(LiveRange::BundleLink* a, LiveRange::BundleLink* b) -{ - return LiveRange::get(a)->from() <= LiveRange::get(b)->from(); -} - -static inline bool -SortBefore(LiveRange::RegisterLink* a, LiveRange::RegisterLink* b) -{ - return LiveRange::get(a)->from() <= LiveRange::get(b)->from(); -} - -template -static inline void -InsertSortedList(InlineForwardList &list, T* value) -{ - if (list.empty()) { - list.pushFront(value); - return; - } - - if (SortBefore(list.back(), value)) { - list.pushBack(value); - return; - } - - T* prev = nullptr; - for (InlineForwardListIterator iter = list.begin(); iter; iter++) { - if (SortBefore(value, *iter)) - break; - prev = *iter; - } - - if (prev) - list.insertAfter(prev, value); - else - list.pushFront(value); -} - -///////////////////////////////////////////////////////////////////// -// LiveRange -///////////////////////////////////////////////////////////////////// - -void -LiveRange::addUse(UsePosition* use) -{ - MOZ_ASSERT(covers(use->pos)); - InsertSortedList(uses_, use); -} - -void -LiveRange::distributeUses(LiveRange* other) -{ - MOZ_ASSERT(other->vreg() == vreg()); - MOZ_ASSERT(this != other); - - // Move over all uses which fit in |other|'s boundaries. - for (UsePositionIterator iter = usesBegin(); iter; ) { - UsePosition* use = *iter; - if (other->covers(use->pos)) { - uses_.removeAndIncrement(iter); - other->addUse(use); - } else { - iter++; - } - } - - // Distribute the definition to |other| as well, if possible. - if (hasDefinition() && from() == other->from()) - other->setHasDefinition(); -} - -bool -LiveRange::contains(LiveRange* other) const -{ - return from() <= other->from() && to() >= other->to(); -} - -void -LiveRange::intersect(LiveRange* other, Range* pre, Range* inside, Range* post) const -{ - MOZ_ASSERT(pre->empty() && inside->empty() && post->empty()); - - CodePosition innerFrom = from(); - if (from() < other->from()) { - if (to() < other->from()) { - *pre = range_; - return; - } - *pre = Range(from(), other->from()); - innerFrom = other->from(); - } - - CodePosition innerTo = to(); - if (to() > other->to()) { - if (from() >= other->to()) { - *post = range_; - return; - } - *post = Range(other->to(), to()); - innerTo = other->to(); - } - - if (innerFrom != innerTo) - *inside = Range(innerFrom, innerTo); -} - -///////////////////////////////////////////////////////////////////// -// LiveBundle -///////////////////////////////////////////////////////////////////// - -#ifdef DEBUG -size_t -LiveBundle::numRanges() const -{ - size_t count = 0; - for (LiveRange::BundleLinkIterator iter = rangesBegin(); iter; iter++) - count++; - return count; -} -#endif // DEBUG - -LiveRange* -LiveBundle::rangeFor(CodePosition pos) const -{ - for (LiveRange::BundleLinkIterator iter = rangesBegin(); iter; iter++) { - LiveRange* range = LiveRange::get(*iter); - if (range->covers(pos)) - return range; - } - return nullptr; -} - -void -LiveBundle::addRange(LiveRange* range) -{ - MOZ_ASSERT(!range->bundle()); - range->setBundle(this); - InsertSortedList(ranges_, &range->bundleLink); -} - -bool -LiveBundle::addRange(TempAllocator& alloc, uint32_t vreg, CodePosition from, CodePosition to) -{ - LiveRange* range = LiveRange::New(alloc, vreg, from, to); - if (!range) - return false; - addRange(range); - return true; -} - -bool -LiveBundle::addRangeAndDistributeUses(TempAllocator& alloc, LiveRange* oldRange, - CodePosition from, CodePosition to) -{ - LiveRange* range = LiveRange::New(alloc, oldRange->vreg(), from, to); - if (!range) - return false; - addRange(range); - oldRange->distributeUses(range); - return true; -} - -///////////////////////////////////////////////////////////////////// -// VirtualRegister -///////////////////////////////////////////////////////////////////// - -bool -VirtualRegister::addInitialRange(TempAllocator& alloc, CodePosition from, CodePosition to) -{ - MOZ_ASSERT(from < to); - - // Mark [from,to) as a live range for this register during the initial - // liveness analysis, coalescing with any existing overlapping ranges. - - LiveRange* prev = nullptr; - LiveRange* merged = nullptr; - for (LiveRange::RegisterLinkIterator iter(rangesBegin()); iter; ) { - LiveRange* existing = LiveRange::get(*iter); - - if (from > existing->to()) { - // The new range should go after this one. - prev = existing; - iter++; - continue; - } - - if (to.next() < existing->from()) { - // The new range should go before this one. - break; - } - - if (!merged) { - // This is the first old range we've found that overlaps the new - // range. Extend this one to cover its union with the new range. - merged = existing; - - if (from < existing->from()) - existing->setFrom(from); - if (to > existing->to()) - existing->setTo(to); - - // Continue searching to see if any other old ranges can be - // coalesced with the new merged range. - iter++; - continue; - } - - // Coalesce this range into the previous range we merged into. - MOZ_ASSERT(existing->from() >= merged->from()); - if (existing->to() > merged->to()) - merged->setTo(existing->to()); - - MOZ_ASSERT(!existing->hasDefinition()); - existing->distributeUses(merged); - MOZ_ASSERT(!existing->hasUses()); - - ranges_.removeAndIncrement(iter); - } - - if (!merged) { - // The new range does not overlap any existing range for the vreg. - LiveRange* range = LiveRange::New(alloc, vreg(), from, to); - if (!range) - return false; - - if (prev) - ranges_.insertAfter(&prev->registerLink, &range->registerLink); - else - ranges_.pushFront(&range->registerLink); - } - - return true; -} - -void -VirtualRegister::addInitialUse(UsePosition* use) -{ - LiveRange::get(*rangesBegin())->addUse(use); -} - -void -VirtualRegister::setInitialDefinition(CodePosition from) -{ - LiveRange* first = LiveRange::get(*rangesBegin()); - MOZ_ASSERT(from >= first->from()); - first->setFrom(from); - first->setHasDefinition(); -} - -LiveRange* -VirtualRegister::rangeFor(CodePosition pos) const -{ - for (LiveRange::RegisterLinkIterator iter = rangesBegin(); iter; iter++) { - LiveRange* range = LiveRange::get(*iter); - if (range->covers(pos)) - return range; - } - return nullptr; -} - -void -VirtualRegister::addRange(LiveRange* range) -{ - InsertSortedList(ranges_, &range->registerLink); -} - -void -VirtualRegister::removeRange(LiveRange* range) -{ - for (LiveRange::RegisterLinkIterator iter = rangesBegin(); iter; iter++) { - LiveRange* existing = LiveRange::get(*iter); - if (existing == range) { - ranges_.removeAt(iter); - return; - } - } - MOZ_CRASH(); -} - -///////////////////////////////////////////////////////////////////// -// BacktrackingAllocator -///////////////////////////////////////////////////////////////////// - -// This function pre-allocates and initializes as much global state as possible -// to avoid littering the algorithms with memory management cruft. bool BacktrackingAllocator::init() { - if (!RegisterAllocator::init()) - return false; - - liveIn = mir->allocate(graph.numBlockIds()); - if (!liveIn) - return false; - - callRanges = LiveBundle::New(alloc()); - - size_t numVregs = graph.numVirtualRegisters(); - if (!vregs.init(mir->alloc(), numVregs)) - return false; - memset(&vregs[0], 0, sizeof(VirtualRegister) * numVregs); - for (uint32_t i = 0; i < numVregs; i++) - new(&vregs[i]) VirtualRegister(); - - // Build virtual register objects. - for (size_t i = 0; i < graph.numBlocks(); i++) { - if (mir->shouldCancel("Create data structures (main loop)")) - return false; - - LBlock* block = graph.getBlock(i); - for (LInstructionIterator ins = block->begin(); ins != block->end(); ins++) { - for (size_t j = 0; j < ins->numDefs(); j++) { - LDefinition* def = ins->getDef(j); - if (def->isBogusTemp()) - continue; - vreg(def).init(*ins, def, /* isTemp = */ false); - } - - for (size_t j = 0; j < ins->numTemps(); j++) { - LDefinition* def = ins->getTemp(j); - if (def->isBogusTemp()) - continue; - vreg(def).init(*ins, def, /* isTemp = */ true); - } - } - for (size_t j = 0; j < block->numPhis(); j++) { - LPhi* phi = block->getPhi(j); - LDefinition* def = phi->getDef(0); - vreg(def).init(phi, def, /* isTemp = */ false); - } - } - LiveRegisterSet remainingRegisters(allRegisters_.asLiveSet()); while (!remainingRegisters.emptyGeneral()) { AnyRegister reg = AnyRegister(remainingRegisters.takeAnyGeneral()); @@ -369,6 +29,13 @@ BacktrackingAllocator::init() for (size_t i = 0; i < AnyRegister::Total; i++) { registers[i].reg = AnyRegister::FromCode(i); registers[i].allocations.setAllocator(lifoAlloc); + + LiveInterval* fixed = fixedIntervals[i]; + for (size_t j = 0; j < fixed->numRanges(); j++) { + AllocatedRange range(fixed, fixed->getRange(j)); + if (!registers[i].allocations.insert(range)) + return false; + } } hotcode.setAllocator(lifoAlloc); @@ -378,6 +45,8 @@ BacktrackingAllocator::init() // crapshoot, so just mark the bodies of inner loops as hot and everything // else as cold. + LiveInterval* hotcodeInterval = LiveInterval::New(alloc(), 0); + LBlock* backedge = nullptr; for (size_t i = 0; i < graph.numBlocks(); i++) { LBlock* block = graph.getBlock(i); @@ -390,353 +59,17 @@ BacktrackingAllocator::init() if (block == backedge) { LBlock* header = block->mir()->loopHeaderOfBackedge()->lir(); - LiveRange* range = LiveRange::New(alloc(), 0, entryOf(header), exitOf(block).next()); - if (!range || !hotcode.insert(range)) + CodePosition from = entryOf(header); + CodePosition to = exitOf(block).next(); + if (!hotcodeInterval->addRange(from, to)) return false; } } - return true; -} - -bool -BacktrackingAllocator::addInitialFixedRange(AnyRegister reg, CodePosition from, CodePosition to) -{ - LiveRange* range = LiveRange::New(alloc(), 0, from, to); - return range && registers[reg.code()].allocations.insert(range); -} - -#ifdef DEBUG -// Returns true iff ins has a def/temp reusing the input allocation. -static bool -IsInputReused(LInstruction* ins, LUse* use) -{ - for (size_t i = 0; i < ins->numDefs(); i++) { - if (ins->getDef(i)->policy() == LDefinition::MUST_REUSE_INPUT && - ins->getOperand(ins->getDef(i)->getReusedInput())->toUse() == use) - { - return true; - } - } - - for (size_t i = 0; i < ins->numTemps(); i++) { - if (ins->getTemp(i)->policy() == LDefinition::MUST_REUSE_INPUT && - ins->getOperand(ins->getTemp(i)->getReusedInput())->toUse() == use) - { - return true; - } - } - - return false; -} -#endif - -/* - * This function builds up liveness ranges for all virtual registers - * defined in the function. Additionally, it populates the liveIn array with - * information about which registers are live at the beginning of a block, to - * aid resolution and reification in a later phase. - * - * The algorithm is based on the one published in: - * - * Wimmer, Christian, and Michael Franz. "Linear Scan Register Allocation on - * SSA Form." Proceedings of the International Symposium on Code Generation - * and Optimization. Toronto, Ontario, Canada, ACM. 2010. 170-79. PDF. - * - * The algorithm operates on blocks ordered such that dominators of a block - * are before the block itself, and such that all blocks of a loop are - * contiguous. It proceeds backwards over the instructions in this order, - * marking registers live at their uses, ending their live ranges at - * definitions, and recording which registers are live at the top of every - * block. To deal with loop backedges, registers live at the beginning of - * a loop gain a range covering the entire loop. - */ -bool -BacktrackingAllocator::buildLivenessInfo() -{ - JitSpew(JitSpew_RegAlloc, "Beginning liveness analysis"); - - Vector loopWorkList; - BitSet loopDone(graph.numBlockIds()); - if (!loopDone.init(alloc())) - return false; - - for (size_t i = graph.numBlocks(); i > 0; i--) { - if (mir->shouldCancel("Build Liveness Info (main loop)")) + for (size_t i = 0; i < hotcodeInterval->numRanges(); i++) { + AllocatedRange range(hotcodeInterval, hotcodeInterval->getRange(i)); + if (!hotcode.insert(range)) return false; - - LBlock* block = graph.getBlock(i - 1); - MBasicBlock* mblock = block->mir(); - - BitSet& live = liveIn[mblock->id()]; - new (&live) BitSet(graph.numVirtualRegisters()); - if (!live.init(alloc())) - return false; - - // Propagate liveIn from our successors to us. - for (size_t i = 0; i < mblock->lastIns()->numSuccessors(); i++) { - MBasicBlock* successor = mblock->lastIns()->getSuccessor(i); - // Skip backedges, as we fix them up at the loop header. - if (mblock->id() < successor->id()) - live.insertAll(liveIn[successor->id()]); - } - - // Add successor phis. - if (mblock->successorWithPhis()) { - LBlock* phiSuccessor = mblock->successorWithPhis()->lir(); - for (unsigned int j = 0; j < phiSuccessor->numPhis(); j++) { - LPhi* phi = phiSuccessor->getPhi(j); - LAllocation* use = phi->getOperand(mblock->positionInPhiSuccessor()); - uint32_t reg = use->toUse()->virtualRegister(); - live.insert(reg); - } - } - - // Registers are assumed alive for the entire block, a define shortens - // the range to the point of definition. - for (BitSet::Iterator liveRegId(live); liveRegId; ++liveRegId) { - if (!vregs[*liveRegId].addInitialRange(alloc(), entryOf(block), exitOf(block).next())) - return false; - } - - // Shorten the front end of ranges for live variables to their point of - // definition, if found. - for (LInstructionReverseIterator ins = block->rbegin(); ins != block->rend(); ins++) { - // Calls may clobber registers, so force a spill and reload around the callsite. - if (ins->isCall()) { - for (AnyRegisterIterator iter(allRegisters_.asLiveSet()); iter.more(); iter++) { - bool found = false; - for (size_t i = 0; i < ins->numDefs(); i++) { - if (ins->getDef(i)->isFixed() && - ins->getDef(i)->output()->aliases(LAllocation(*iter))) { - found = true; - break; - } - } - if (!found) { - if (!addInitialFixedRange(*iter, outputOf(*ins), outputOf(*ins).next())) - return false; - } - } - if (!callRanges->addRange(alloc(), 0, outputOf(*ins), outputOf(*ins).next())) - return false; - } - DebugOnly hasDoubleDef = false; - DebugOnly hasFloat32Def = false; - for (size_t i = 0; i < ins->numDefs(); i++) { - LDefinition* def = ins->getDef(i); - if (def->isBogusTemp()) - continue; -#ifdef DEBUG - if (def->type() == LDefinition::DOUBLE) - hasDoubleDef = true; - if (def->type() == LDefinition::FLOAT32) - hasFloat32Def = true; -#endif - CodePosition from = outputOf(*ins); - - if (def->policy() == LDefinition::MUST_REUSE_INPUT) { - // MUST_REUSE_INPUT is implemented by allocating an output - // register and moving the input to it. Register hints are - // used to avoid unnecessary moves. We give the input an - // LUse::ANY policy to avoid allocating a register for the - // input. - LUse* inputUse = ins->getOperand(def->getReusedInput())->toUse(); - MOZ_ASSERT(inputUse->policy() == LUse::REGISTER); - MOZ_ASSERT(inputUse->usedAtStart()); - *inputUse = LUse(inputUse->virtualRegister(), LUse::ANY, /* usedAtStart = */ true); - } - - if (!vreg(def).addInitialRange(alloc(), from, from.next())) - return false; - vreg(def).setInitialDefinition(from); - live.remove(def->virtualRegister()); - } - - for (size_t i = 0; i < ins->numTemps(); i++) { - LDefinition* temp = ins->getTemp(i); - if (temp->isBogusTemp()) - continue; - - // Normally temps are considered to cover both the input - // and output of the associated instruction. In some cases - // though we want to use a fixed register as both an input - // and clobbered register in the instruction, so watch for - // this and shorten the temp to cover only the output. - CodePosition from = inputOf(*ins); - if (temp->policy() == LDefinition::FIXED) { - AnyRegister reg = temp->output()->toRegister(); - for (LInstruction::InputIterator alloc(**ins); alloc.more(); alloc.next()) { - if (alloc->isUse()) { - LUse* use = alloc->toUse(); - if (use->isFixedRegister()) { - if (GetFixedRegister(vreg(use).def(), use) == reg) - from = outputOf(*ins); - } - } - } - } - - CodePosition to = - ins->isCall() ? outputOf(*ins) : outputOf(*ins).next(); - - if (!vreg(temp).addInitialRange(alloc(), from, to)) - return false; - vreg(temp).setInitialDefinition(from); - } - - DebugOnly hasUseRegister = false; - DebugOnly hasUseRegisterAtStart = false; - - for (LInstruction::InputIterator inputAlloc(**ins); inputAlloc.more(); inputAlloc.next()) { - if (inputAlloc->isUse()) { - LUse* use = inputAlloc->toUse(); - - // Call uses should always be at-start or fixed, since - // calls use all registers. - MOZ_ASSERT_IF(ins->isCall() && !inputAlloc.isSnapshotInput(), - use->isFixedRegister() || use->usedAtStart()); - -#ifdef DEBUG - // Don't allow at-start call uses if there are temps of the same kind, - // so that we don't assign the same register. - if (ins->isCall() && use->usedAtStart()) { - for (size_t i = 0; i < ins->numTemps(); i++) - MOZ_ASSERT(vreg(ins->getTemp(i)).type() != vreg(use).type()); - } - - // If there are both useRegisterAtStart(x) and useRegister(y) - // uses, we may assign the same register to both operands - // (bug 772830). Don't allow this for now. - if (use->policy() == LUse::REGISTER) { - if (use->usedAtStart()) { - if (!IsInputReused(*ins, use)) - hasUseRegisterAtStart = true; - } else { - hasUseRegister = true; - } - } - MOZ_ASSERT(!(hasUseRegister && hasUseRegisterAtStart)); -#endif - - // Don't treat RECOVERED_INPUT uses as keeping the vreg alive. - if (use->policy() == LUse::RECOVERED_INPUT) - continue; - - // Fixed uses on calls are specially overridden to happen - // at the input position. - CodePosition to = - (use->usedAtStart() || (ins->isCall() && use->isFixedRegister())) - ? inputOf(*ins) - : outputOf(*ins); - if (use->isFixedRegister()) { - LAllocation reg(AnyRegister::FromCode(use->registerCode())); - for (size_t i = 0; i < ins->numDefs(); i++) { - LDefinition* def = ins->getDef(i); - if (def->policy() == LDefinition::FIXED && *def->output() == reg) - to = inputOf(*ins); - } - } - - if (!vreg(use).addInitialRange(alloc(), entryOf(block), to.next())) - return false; - UsePosition* usePosition = new(alloc()) UsePosition(use, to); - if (!usePosition) - return false; - vreg(use).addInitialUse(usePosition); - live.insert(use->virtualRegister()); - } - } - } - - // Phis have simultaneous assignment semantics at block begin, so at - // the beginning of the block we can be sure that liveIn does not - // contain any phi outputs. - for (unsigned int i = 0; i < block->numPhis(); i++) { - LDefinition* def = block->getPhi(i)->getDef(0); - if (live.contains(def->virtualRegister())) { - live.remove(def->virtualRegister()); - } else { - // This is a dead phi, so add a dummy range over all phis. This - // can go away if we have an earlier dead code elimination pass. - CodePosition entryPos = entryOf(block); - if (!vreg(def).addInitialRange(alloc(), entryPos, entryPos.next())) - return false; - } - } - - if (mblock->isLoopHeader()) { - // A divergence from the published algorithm is required here, as - // our block order does not guarantee that blocks of a loop are - // contiguous. As a result, a single live range spanning the - // loop is not possible. Additionally, we require liveIn in a later - // pass for resolution, so that must also be fixed up here. - MBasicBlock* loopBlock = mblock->backedge(); - while (true) { - // Blocks must already have been visited to have a liveIn set. - MOZ_ASSERT(loopBlock->id() >= mblock->id()); - - // Add a range for this entire loop block - CodePosition from = entryOf(loopBlock->lir()); - CodePosition to = exitOf(loopBlock->lir()).next(); - - for (BitSet::Iterator liveRegId(live); liveRegId; ++liveRegId) { - if (!vregs[*liveRegId].addInitialRange(alloc(), from, to)) - return false; - } - - // Fix up the liveIn set. - liveIn[loopBlock->id()].insertAll(live); - - // Make sure we don't visit this node again - loopDone.insert(loopBlock->id()); - - // If this is the loop header, any predecessors are either the - // backedge or out of the loop, so skip any predecessors of - // this block - if (loopBlock != mblock) { - for (size_t i = 0; i < loopBlock->numPredecessors(); i++) { - MBasicBlock* pred = loopBlock->getPredecessor(i); - if (loopDone.contains(pred->id())) - continue; - if (!loopWorkList.append(pred)) - return false; - } - } - - // Terminate loop if out of work. - if (loopWorkList.empty()) - break; - - // Grab the next block off the work list, skipping any OSR block. - MBasicBlock* osrBlock = graph.mir().osrBlock(); - while (!loopWorkList.empty()) { - loopBlock = loopWorkList.popCopy(); - if (loopBlock != osrBlock) - break; - } - - // If end is reached without finding a non-OSR block, then no more work items were found. - if (loopBlock == osrBlock) { - MOZ_ASSERT(loopWorkList.empty()); - break; - } - } - - // Clear the done set for other loops - loopDone.clear(); - } - - MOZ_ASSERT_IF(!mblock->numPredecessors(), live.empty()); - } - - JitSpew(JitSpew_RegAlloc, "Liveness analysis complete"); - - if (JitSpewEnabled(JitSpew_RegAlloc)) { - dumpInstructions(); - - fprintf(stderr, "Live ranges by virtual register:\n"); - dumpVregs(); } return true; @@ -747,10 +80,10 @@ BacktrackingAllocator::go() { JitSpew(JitSpew_RegAlloc, "Beginning register allocation"); - if (!init()) + if (!buildLivenessInfo()) return false; - if (!buildLivenessInfo()) + if (!init()) return false; if (JitSpewEnabled(JitSpew_RegAlloc)) @@ -769,13 +102,13 @@ BacktrackingAllocator::go() JitSpew(JitSpew_RegAlloc, "Beginning main allocation loop"); - // Allocate, spill and split bundles until finished. + // Allocate, spill and split register intervals until finished. while (!allocationQueue.empty()) { if (mir->shouldCancel("Backtracking Allocation")) return false; QueueItem item = allocationQueue.removeHighest(); - if (item.bundle ? !processBundle(item.bundle) : !processGroup(item.group)) + if (item.interval ? !processInterval(item.interval) : !processGroup(item.group)) return false; } JitSpew(JitSpew_RegAlloc, "Main allocation loop complete"); @@ -802,27 +135,26 @@ BacktrackingAllocator::go() } static bool -LifetimesOverlapIgnoreGroupExclude(VirtualRegister* reg0, VirtualRegister* reg1) +LifetimesOverlap(BacktrackingVirtualRegister* reg0, BacktrackingVirtualRegister* reg1) { - LiveRange::RegisterLinkIterator iter0 = reg0->rangesBegin(), iter1 = reg1->rangesBegin(); - while (iter0 && iter1) { - LiveRange* range0 = LiveRange::get(*iter0); - LiveRange* range1 = LiveRange::get(*iter1); + // Registers may have been eagerly split in two, see tryGroupReusedRegister. + // In such cases, only consider the first interval. + MOZ_ASSERT(reg0->numIntervals() <= 2 && reg1->numIntervals() <= 2); - // Registers may have had portions split off into their groupExclude bundle, - // see tryGroupReusedRegister. Ignore any overlap in these ranges. - if (range0 == reg0->groupExclude()) { - iter0++; - continue; - } else if (range1 == reg1->groupExclude()) { - iter1++; - continue; - } + LiveInterval* interval0 = reg0->getInterval(0); + LiveInterval* interval1 = reg1->getInterval(0); - if (range0->from() >= range1->to()) - iter1++; - else if (range1->from() >= range0->to()) - iter0++; + // Interval ranges are sorted in reverse order. The lifetimes overlap if + // any of their ranges overlap. + size_t index0 = 0, index1 = 0; + while (index0 < interval0->numRanges() && index1 < interval1->numRanges()) { + const LiveInterval::Range + *range0 = interval0->getRange(index0), + *range1 = interval1->getRange(index1); + if (range0->from >= range1->to) + index0++; + else if (range1->from >= range0->to) + index1++; else return true; } @@ -831,10 +163,10 @@ LifetimesOverlapIgnoreGroupExclude(VirtualRegister* reg0, VirtualRegister* reg1) } bool -BacktrackingAllocator::canAddToGroup(VirtualRegisterGroup* group, VirtualRegister* reg) +BacktrackingAllocator::canAddToGroup(VirtualRegisterGroup* group, BacktrackingVirtualRegister* reg) { for (size_t i = 0; i < group->registers.length(); i++) { - if (LifetimesOverlapIgnoreGroupExclude(reg, &vregs[group->registers[i]])) + if (LifetimesOverlap(reg, &vregs[group->registers[i]])) return false; } return true; @@ -850,7 +182,7 @@ static bool IsThisSlotDefinition(LDefinition* def) { return IsArgumentSlotDefinition(def) && - def->output()->toArgument()->index() < THIS_FRAME_ARGSLOT + sizeof(Value); + def->output()->toArgument()->index() < THIS_FRAME_ARGSLOT + sizeof(Value); } bool @@ -859,10 +191,10 @@ BacktrackingAllocator::tryGroupRegisters(uint32_t vreg0, uint32_t vreg1) // See if reg0 and reg1 can be placed in the same group, following the // restrictions imposed by VirtualRegisterGroup and any other registers // already grouped with reg0 or reg1. - VirtualRegister* reg0 = &vregs[vreg0]; - VirtualRegister* reg1 = &vregs[vreg1]; + BacktrackingVirtualRegister* reg0 = &vregs[vreg0]; + BacktrackingVirtualRegister* reg1 = &vregs[vreg1]; - if (!reg0->isCompatible(*reg1)) + if (!reg0->isCompatibleVReg(*reg1)) return true; // Registers which might spill to the frame's |this| slot can only be @@ -918,7 +250,7 @@ BacktrackingAllocator::tryGroupRegisters(uint32_t vreg0, uint32_t vreg1) return true; } - if (LifetimesOverlapIgnoreGroupExclude(reg0, reg1)) + if (LifetimesOverlap(reg0, reg1)) return true; VirtualRegisterGroup* group = new(alloc()) VirtualRegisterGroup(alloc()); @@ -930,29 +262,11 @@ BacktrackingAllocator::tryGroupRegisters(uint32_t vreg0, uint32_t vreg1) return true; } -static inline LDefinition* -FindReusingDefinition(LNode* ins, LAllocation* alloc) -{ - for (size_t i = 0; i < ins->numDefs(); i++) { - LDefinition* def = ins->getDef(i); - if (def->policy() == LDefinition::MUST_REUSE_INPUT && - ins->getOperand(def->getReusedInput()) == alloc) - return def; - } - for (size_t i = 0; i < ins->numTemps(); i++) { - LDefinition* def = ins->getTemp(i); - if (def->policy() == LDefinition::MUST_REUSE_INPUT && - ins->getOperand(def->getReusedInput()) == alloc) - return def; - } - return nullptr; -} - bool BacktrackingAllocator::tryGroupReusedRegister(uint32_t def, uint32_t use) { - VirtualRegister& reg = vregs[def]; - VirtualRegister& usedReg = vregs[use]; + BacktrackingVirtualRegister& reg = vregs[def]; + BacktrackingVirtualRegister& usedReg = vregs[use]; // reg is a vreg which reuses its input usedReg for its output physical // register. Try to group reg with usedReg if at all possible, as avoiding @@ -960,13 +274,13 @@ BacktrackingAllocator::tryGroupReusedRegister(uint32_t def, uint32_t use) // generated code (MUST_REUSE_INPUT is used by all arithmetic instructions // on x86/x64). - if (reg.rangeFor(inputOf(reg.ins()))) { + if (reg.intervalFor(inputOf(reg.ins()))) { MOZ_ASSERT(reg.isTemp()); reg.setMustCopyInput(); return true; } - if (!usedReg.rangeFor(outputOf(reg.ins()))) { + if (!usedReg.intervalFor(outputOf(reg.ins()))) { // The input is not live after the instruction, either in a safepoint // for the instruction or in subsequent code. The input and output // can thus be in the same group. @@ -977,29 +291,27 @@ BacktrackingAllocator::tryGroupReusedRegister(uint32_t def, uint32_t use) // safepoint for the reusing instruction. This is impossible to satisfy // without copying the input. // - // It may or may not be better to split at the point of the definition, - // which may permit grouping. One case where it is definitely better to - // split is if the input never has any register uses after the instruction. - // Handle this splitting eagerly. + // It may or may not be better to split the interval at the point of the + // definition, which may permit grouping. One case where it is definitely + // better to split is if the input never has any register uses after the + // instruction. Handle this splitting eagerly. - if (usedReg.groupExclude() || + if (usedReg.numIntervals() != 1 || (usedReg.def()->isFixed() && !usedReg.def()->output()->isRegister())) { reg.setMustCopyInput(); return true; } + LiveInterval* interval = usedReg.getInterval(0); LBlock* block = reg.ins()->block(); // The input's lifetime must end within the same block as the definition, // otherwise it could live on in phis elsewhere. - LiveRange* lastUsedRange = nullptr; - for (LiveRange::RegisterLinkIterator iter = usedReg.rangesBegin(); iter; iter++) - lastUsedRange = LiveRange::get(*iter); - if (lastUsedRange->to() > exitOf(block)) { + if (interval->end() > exitOf(block)) { reg.setMustCopyInput(); return true; } - for (UsePositionIterator iter = lastUsedRange->usesBegin(); iter; iter++) { + for (UsePositionIterator iter = interval->usesBegin(); iter != interval->usesEnd(); iter++) { if (iter->pos <= inputOf(reg.ins())) continue; @@ -1014,29 +326,38 @@ BacktrackingAllocator::tryGroupReusedRegister(uint32_t def, uint32_t use) } } - LiveRange* preRange = LiveRange::New(alloc(), use, lastUsedRange->from(), outputOf(reg.ins())); - if (!preRange) - return false; + LiveInterval* preInterval = LiveInterval::New(alloc(), interval->vreg(), 0); + for (size_t i = 0; i < interval->numRanges(); i++) { + const LiveInterval::Range* range = interval->getRange(i); + MOZ_ASSERT(range->from <= inputOf(reg.ins())); - // The new range starts at reg's input position, which means it overlaps - // with the old range at one position. This is what we want, because we + CodePosition to = Min(range->to, outputOf(reg.ins())); + if (!preInterval->addRange(range->from, to)) + return false; + } + + // The new interval starts at reg's input position, which means it overlaps + // with the old interval at one position. This is what we want, because we // need to copy the input before the instruction. - LiveRange* postRange = LiveRange::New(alloc(), use, inputOf(reg.ins()), lastUsedRange->to()); - if (!postRange) + LiveInterval* postInterval = LiveInterval::New(alloc(), interval->vreg(), 0); + if (!postInterval->addRange(inputOf(reg.ins()), interval->end())) return false; - lastUsedRange->distributeUses(preRange); - lastUsedRange->distributeUses(postRange); - MOZ_ASSERT(!lastUsedRange->hasUses()); + LiveIntervalVector newIntervals; + if (!newIntervals.append(preInterval) || !newIntervals.append(postInterval)) + return false; + + distributeUses(interval, newIntervals); JitSpew(JitSpew_RegAlloc, " splitting reused input at %u to try to help grouping", inputOf(reg.ins())); - usedReg.removeRange(lastUsedRange); - usedReg.addRange(preRange); - usedReg.addRange(postRange); + if (!split(interval, newIntervals)) + return false; - usedReg.setGroupExclude(postRange); + MOZ_ASSERT(usedReg.numIntervals() == 2); + + usedReg.setCanonicalSpillExclude(inputOf(reg.ins())); return tryGroupRegisters(use, def); } @@ -1070,10 +391,11 @@ BacktrackingAllocator::groupAndQueueRegisters() } // Try to group registers with their reused inputs. - MOZ_ASSERT(!vregs[0u].hasRanges()); + // Virtual register number 0 is unused. + MOZ_ASSERT(vregs[0u].numIntervals() == 0); for (size_t i = 1; i < graph.numVirtualRegisters(); i++) { - VirtualRegister& reg = vregs[i]; - if (!reg.hasRanges()) + BacktrackingVirtualRegister& reg = vregs[i]; + if (!reg.numIntervals()) continue; if (reg.def()->policy() == LDefinition::MUST_REUSE_INPUT) { @@ -1097,15 +419,17 @@ BacktrackingAllocator::groupAndQueueRegisters() } } - MOZ_ASSERT(!vregs[0u].hasRanges()); + // Virtual register number 0 is unused. + MOZ_ASSERT(vregs[0u].numIntervals() == 0); for (size_t i = 1; i < graph.numVirtualRegisters(); i++) { if (mir->shouldCancel("Backtracking Enqueue Registers")) return false; - VirtualRegister& reg = vregs[i]; + BacktrackingVirtualRegister& reg = vregs[i]; + MOZ_ASSERT(reg.numIntervals() <= 2); MOZ_ASSERT(!reg.canonicalSpill()); - if (!reg.hasRanges()) + if (!reg.numIntervals()) continue; // Eagerly set the canonical spill slot for registers which are fixed @@ -1118,45 +442,29 @@ BacktrackingAllocator::groupAndQueueRegisters() reg.group()->spill = *def->output(); } - // If the register has a range that needs to be excluded from its - // group, queue a bundle containing that range separately. - if (reg.groupExclude()) { - LiveBundle* bundle = LiveBundle::New(alloc()); - if (!bundle) - return false; - bundle->addRange(reg.groupExclude()); - size_t priority = computePriority(bundle); - if (!allocationQueue.insert(QueueItem(bundle, priority))) - return false; - } - - // Create a bundle containing all other ranges for the register. - LiveBundle* bundle = LiveBundle::New(alloc()); - if (!bundle) - return false; - for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) { - LiveRange* range = LiveRange::get(*iter); - if (range != reg.groupExclude()) - bundle->addRange(range); - } - - // Place the new bundle on the allocation queue. During initial - // queueing use single queue items for groups of registers, so that - // they will be allocated together and reduce the risk of unnecessary - // conflicts. This is in keeping with the idea that register groups are - // effectively single registers whose value changes during execution. - // If any ranges in the group are evicted later then they will be - // reallocated individually. + // Place all intervals for this register on the allocation queue. + // During initial queueing use single queue items for groups of + // registers, so that they will be allocated together and reduce the + // risk of unnecessary conflicts. This is in keeping with the idea that + // register groups are effectively single registers whose value changes + // during execution. If any intervals in the group are evicted later + // then they will be reallocated individually. + size_t start = 0; if (VirtualRegisterGroup* group = reg.group()) { if (i == group->canonicalReg()) { size_t priority = computePriority(group); if (!allocationQueue.insert(QueueItem(group, priority))) return false; } - } else { - size_t priority = computePriority(bundle); - if (!allocationQueue.insert(QueueItem(bundle, priority))) - return false; + start++; + } + for (; start < reg.numIntervals(); start++) { + LiveInterval* interval = reg.getInterval(start); + if (interval->numRanges() > 0) { + size_t priority = computePriority(interval); + if (!allocationQueue.insert(QueueItem(interval, priority))) + return false; + } } } @@ -1166,119 +474,117 @@ BacktrackingAllocator::groupAndQueueRegisters() static const size_t MAX_ATTEMPTS = 2; bool -BacktrackingAllocator::tryAllocateFixed(LiveBundle* bundle, Requirement requirement, - bool* success, bool* pfixed, - LiveBundleVector& conflicting) +BacktrackingAllocator::tryAllocateFixed(LiveInterval* interval, bool* success, + bool* pfixed, LiveIntervalVector& conflicting) { - // Spill bundles which are required to be in a certain stack slot. - if (!requirement.allocation().isRegister()) { + // Spill intervals which are required to be in a certain stack slot. + if (!interval->requirement()->allocation().isRegister()) { JitSpew(JitSpew_RegAlloc, " stack allocation requirement"); - bundle->setAllocation(requirement.allocation()); + interval->setAllocation(interval->requirement()->allocation()); *success = true; return true; } - AnyRegister reg = requirement.allocation().toRegister(); - return tryAllocateRegister(registers[reg.code()], bundle, success, pfixed, conflicting); + AnyRegister reg = interval->requirement()->allocation().toRegister(); + return tryAllocateRegister(registers[reg.code()], interval, success, pfixed, conflicting); } bool -BacktrackingAllocator::tryAllocateNonFixed(LiveBundle* bundle, - Requirement requirement, Requirement hint, - bool* success, bool* pfixed, - LiveBundleVector& conflicting) +BacktrackingAllocator::tryAllocateNonFixed(LiveInterval* interval, bool* success, + bool* pfixed, LiveIntervalVector& conflicting) { - // If we want, but do not require a bundle to be in a specific register, - // only look at that register for allocating and evict or spill if it is - // not available. Picking a separate register may be even worse than - // spilling, as it will still necessitate moves and will tie up more - // registers than if we spilled. - if (hint.kind() == Requirement::FIXED) { - AnyRegister reg = hint.allocation().toRegister(); - if (!tryAllocateRegister(registers[reg.code()], bundle, success, pfixed, conflicting)) + // If we want, but do not require an interval to be in a specific + // register, only look at that register for allocating and evict + // or spill if it is not available. Picking a separate register may + // be even worse than spilling, as it will still necessitate moves + // and will tie up more registers than if we spilled. + if (interval->hint()->kind() == Requirement::FIXED) { + AnyRegister reg = interval->hint()->allocation().toRegister(); + if (!tryAllocateRegister(registers[reg.code()], interval, success, pfixed, conflicting)) return false; if (*success) return true; } - // Spill bundles which have no hint or register requirement. - if (requirement.kind() == Requirement::NONE && hint.kind() != Requirement::REGISTER) { - spill(bundle); + // Spill intervals which have no hint or register requirement. + if (interval->requirement()->kind() == Requirement::NONE && + interval->hint()->kind() != Requirement::REGISTER) + { + spill(interval); *success = true; return true; } - if (conflicting.empty() || minimalBundle(bundle)) { - // Search for any available register which the bundle can be + if (conflicting.empty() || minimalInterval(interval)) { + // Search for any available register which the interval can be // allocated to. for (size_t i = 0; i < AnyRegister::Total; i++) { - if (!tryAllocateRegister(registers[i], bundle, success, pfixed, conflicting)) + if (!tryAllocateRegister(registers[i], interval, success, pfixed, conflicting)) return false; if (*success) return true; } } - // Spill bundles which have no register requirement if they didn't get + // Spill intervals which have no register requirement if they didn't get // allocated. - if (requirement.kind() == Requirement::NONE) { - spill(bundle); + if (interval->requirement()->kind() == Requirement::NONE) { + spill(interval); *success = true; return true; } - // We failed to allocate this bundle. + // We failed to allocate this interval. MOZ_ASSERT(!*success); return true; } bool -BacktrackingAllocator::processBundle(LiveBundle* bundle) +BacktrackingAllocator::processInterval(LiveInterval* interval) { if (JitSpewEnabled(JitSpew_RegAlloc)) { JitSpew(JitSpew_RegAlloc, "Allocating %s [priority %lu] [weight %lu]", - bundle->toString(), computePriority(bundle), computeSpillWeight(bundle)); + interval->toString(), computePriority(interval), computeSpillWeight(interval)); } - // A bundle can be processed by doing any of the following: + // An interval can be processed by doing any of the following: // - // - Assigning the bundle a register. The bundle cannot overlap any other - // bundle allocated for that physical register. + // - Assigning the interval a register. The interval cannot overlap any + // other interval allocated for that physical register. // - // - Spilling the bundle, provided it has no register uses. + // - Spilling the interval, provided it has no register uses. // - // - Splitting the bundle into two or more bundles which cover the original - // one. The new bundles are placed back onto the priority queue for later - // processing. + // - Splitting the interval into two or more intervals which cover the + // original one. The new intervals are placed back onto the priority + // queue for later processing. // - // - Evicting one or more existing allocated bundles, and then doing one - // of the above operations. Evicted bundles are placed back on the - // priority queue. Any evicted bundles must have a lower spill weight - // than the bundle being processed. + // - Evicting one or more existing allocated intervals, and then doing one + // of the above operations. Evicted intervals are placed back on the + // priority queue. Any evicted intervals must have a lower spill weight + // than the interval being processed. // // As long as this structure is followed, termination is guaranteed. - // In general, we want to minimize the amount of bundle splitting (which - // generally necessitates spills), so allocate longer lived, lower weight - // bundles first and evict and split them later if they prevent allocation - // for higher weight bundles. + // In general, we want to minimize the amount of interval splitting + // (which generally necessitates spills), so allocate longer lived, lower + // weight intervals first and evict and split them later if they prevent + // allocation for higher weight intervals. - Requirement requirement, hint; - bool canAllocate = computeRequirement(bundle, &requirement, &hint); + bool canAllocate = setIntervalRequirement(interval); bool fixed; - LiveBundleVector conflicting; + LiveIntervalVector conflicting; for (size_t attempt = 0;; attempt++) { if (canAllocate) { bool success = false; fixed = false; conflicting.clear(); - // Ok, let's try allocating for this bundle. - if (requirement.kind() == Requirement::FIXED) { - if (!tryAllocateFixed(bundle, requirement, &success, &fixed, conflicting)) + // Ok, let's try allocating for this interval. + if (interval->requirement()->kind() == Requirement::FIXED) { + if (!tryAllocateFixed(interval, &success, &fixed, conflicting)) return false; } else { - if (!tryAllocateNonFixed(bundle, requirement, hint, &success, &fixed, conflicting)) + if (!tryAllocateNonFixed(interval, &success, &fixed, conflicting)) return false; } @@ -1286,29 +592,29 @@ BacktrackingAllocator::processBundle(LiveBundle* bundle) if (success) return true; - // If that didn't work, but we have one or more non-fixed bundles + // If that didn't work, but we have one or more non-fixed intervals // known to be conflicting, maybe we can evict them and try again. if (attempt < MAX_ATTEMPTS && !fixed && !conflicting.empty() && - maximumSpillWeight(conflicting) < computeSpillWeight(bundle)) - { - for (size_t i = 0; i < conflicting.length(); i++) { - if (!evictBundle(conflicting[i])) - return false; - } - continue; + maximumSpillWeight(conflicting) < computeSpillWeight(interval)) + { + for (size_t i = 0; i < conflicting.length(); i++) { + if (!evictInterval(conflicting[i])) + return false; } + continue; + } } - // A minimal bundle cannot be split any further. If we try to split it - // it at this point we will just end up with the same bundle and will - // enter an infinite loop. Weights and the initial live ranges must - // be constructed so that any minimal bundle is allocatable. - MOZ_ASSERT(!minimalBundle(bundle)); + // A minimal interval cannot be split any further. If we try to split + // it at this point we will just end up with the same interval and will + // enter an infinite loop. Weights and the initial live intervals must + // be constructed so that any minimal interval is allocatable. + MOZ_ASSERT(!minimalInterval(interval)); - LiveBundle* conflict = conflicting.empty() ? nullptr : conflicting[0]; - return chooseBundleSplit(bundle, canAllocate && fixed, conflict); + LiveInterval* conflict = conflicting.empty() ? nullptr : conflicting[0]; + return chooseIntervalSplit(interval, canAllocate && fixed, conflict); } } @@ -1320,13 +626,15 @@ BacktrackingAllocator::processGroup(VirtualRegisterGroup* group) group->registers[0], computePriority(group), computeSpillWeight(group)); } - LiveBundle* conflict; + bool fixed; + LiveInterval* conflict; for (size_t attempt = 0;; attempt++) { // Search for any available register which the group can be allocated to. + fixed = false; conflict = nullptr; for (size_t i = 0; i < AnyRegister::Total; i++) { bool success; - if (!tryAllocateGroupRegister(registers[i], group, &success, &conflict)) + if (!tryAllocateGroupRegister(registers[i], group, &success, &fixed, &conflict)) return false; if (success) { conflict = nullptr; @@ -1335,19 +643,20 @@ BacktrackingAllocator::processGroup(VirtualRegisterGroup* group) } if (attempt < MAX_ATTEMPTS && + !fixed && conflict && + conflict->hasVreg() && computeSpillWeight(conflict) < computeSpillWeight(group)) { - if (!evictBundle(conflict)) + if (!evictInterval(conflict)) return false; continue; } for (size_t i = 0; i < group->registers.length(); i++) { VirtualRegister& reg = vregs[group->registers[i]]; - LiveRange* firstRange = LiveRange::get(*reg.rangesBegin()); - MOZ_ASSERT(firstRange != reg.groupExclude()); - if (!processBundle(firstRange->bundle())) + MOZ_ASSERT(reg.numIntervals() <= 2); + if (!processInterval(reg.getInterval(0))) return false; } @@ -1356,66 +665,73 @@ BacktrackingAllocator::processGroup(VirtualRegisterGroup* group) } bool -BacktrackingAllocator::computeRequirement(LiveBundle* bundle, - Requirement *requirement, Requirement *hint) +BacktrackingAllocator::setIntervalRequirement(LiveInterval* interval) { - // Set any requirement or hint on bundle according to its definition and + // Set any requirement or hint on interval according to its definition and // uses. Return false if there are conflicting requirements which will - // require the bundle to be split. + // require the interval to be split. + interval->setHint(Requirement()); + interval->setRequirement(Requirement()); - for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) { - LiveRange* range = LiveRange::get(*iter); - VirtualRegister ® = vregs[range->vreg()]; + BacktrackingVirtualRegister* reg = &vregs[interval->vreg()]; - // Set a hint if another range in the same group is in a register. - if (VirtualRegisterGroup* group = reg.group()) { - if (group->allocation.isRegister()) { + // Set a hint if another interval in the same group is in a register. + if (VirtualRegisterGroup* group = reg->group()) { + if (group->allocation.isRegister()) { + if (JitSpewEnabled(JitSpew_RegAlloc)) { JitSpew(JitSpew_RegAlloc, " Hint %s, used by group allocation", group->allocation.toString()); - hint->merge(Requirement(group->allocation)); } + interval->setHint(Requirement(group->allocation)); } + } - if (range->hasDefinition()) { - // Deal with any definition constraints/hints. - LDefinition::Policy policy = reg.def()->policy(); - if (policy == LDefinition::FIXED) { - // Fixed policies get a FIXED requirement. + if (interval->index() == 0) { + // The first interval is the definition, so deal with any definition + // constraints/hints. + + LDefinition::Policy policy = reg->def()->policy(); + if (policy == LDefinition::FIXED) { + // Fixed policies get a FIXED requirement. + if (JitSpewEnabled(JitSpew_RegAlloc)) { JitSpew(JitSpew_RegAlloc, " Requirement %s, fixed by definition", - reg.def()->output()->toString()); - if (!requirement->merge(Requirement(*reg.def()->output()))) - return false; - } else if (reg.ins()->isPhi()) { - // Phis don't have any requirements, but they should prefer their - // input allocations. This is captured by the group hints above. - } else { - // Non-phis get a REGISTER requirement. - if (!requirement->merge(Requirement(Requirement::REGISTER))) - return false; + reg->def()->output()->toString()); } + interval->setRequirement(Requirement(*reg->def()->output())); + } else if (reg->ins()->isPhi()) { + // Phis don't have any requirements, but they should prefer their + // input allocations. This is captured by the group hints above. + } else { + // Non-phis get a REGISTER requirement. + interval->setRequirement(Requirement(Requirement::REGISTER)); } + } - // Search uses for requirements. - for (UsePositionIterator iter = range->usesBegin(); iter; iter++) { - LUse::Policy policy = iter->use->policy(); - if (policy == LUse::FIXED) { - AnyRegister required = GetFixedRegister(reg.def(), iter->use); + // Search uses for requirements. + for (UsePositionIterator iter = interval->usesBegin(); + iter != interval->usesEnd(); + iter++) + { + LUse::Policy policy = iter->use->policy(); + if (policy == LUse::FIXED) { + AnyRegister required = GetFixedRegister(reg->def(), iter->use); + if (JitSpewEnabled(JitSpew_RegAlloc)) { JitSpew(JitSpew_RegAlloc, " Requirement %s, due to use at %u", required.name(), iter->pos.bits()); - - // If there are multiple fixed registers which the bundle is - // required to use, fail. The bundle will need to be split before - // it can be allocated. - if (!requirement->merge(Requirement(LAllocation(required)))) - return false; - } else if (policy == LUse::REGISTER) { - if (!requirement->merge(Requirement(Requirement::REGISTER))) - return false; - } else if (policy == LUse::ANY) { - // ANY differs from KEEPALIVE by actively preferring a register. - hint->merge(Requirement(Requirement::REGISTER)); } + + // If there are multiple fixed registers which the interval is + // required to use, fail. The interval will need to be split before + // it can be allocated. + if (!interval->addRequirement(Requirement(LAllocation(required)))) + return false; + } else if (policy == LUse::REGISTER) { + if (!interval->addRequirement(Requirement(Requirement::REGISTER))) + return false; + } else if (policy == LUse::ANY) { + // ANY differs from KEEPALIVE by actively preferring a register. + interval->addHint(Requirement(Requirement::REGISTER)); } } @@ -1424,36 +740,32 @@ BacktrackingAllocator::computeRequirement(LiveBundle* bundle, bool BacktrackingAllocator::tryAllocateGroupRegister(PhysicalRegister& r, VirtualRegisterGroup* group, - bool* psuccess, LiveBundle** pconflicting) + bool* psuccess, bool* pfixed, LiveInterval** pconflicting) { *psuccess = false; if (!r.allocatable) return true; - if (!vregs[group->registers[0]].isCompatible(r.reg)) + if (!vregs[group->registers[0]].isCompatibleReg(r.reg)) return true; bool allocatable = true; - LiveBundle* conflicting = nullptr; + LiveInterval* conflicting = nullptr; for (size_t i = 0; i < group->registers.length(); i++) { VirtualRegister& reg = vregs[group->registers[i]]; - for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) { - LiveRange* range = LiveRange::get(*iter); - if (range == reg.groupExclude()) - continue; - LiveRange* existing; + MOZ_ASSERT(reg.numIntervals() <= 2); + LiveInterval* interval = reg.getInterval(0); + + for (size_t j = 0; j < interval->numRanges(); j++) { + AllocatedRange range(interval, interval->getRange(j)), existing; if (r.allocations.contains(range, &existing)) { - if (!existing->bundle()) { - // The group's range spans a call. - return true; - } if (conflicting) { - if (conflicting != existing->bundle()) + if (conflicting != existing.interval) return true; } else { - conflicting = existing->bundle(); + conflicting = existing.interval; } allocatable = false; } @@ -1464,6 +776,8 @@ BacktrackingAllocator::tryAllocateGroupRegister(PhysicalRegister& r, VirtualRegi MOZ_ASSERT(conflicting); if (!*pconflicting || computeSpillWeight(conflicting) < computeSpillWeight(*pconflicting)) *pconflicting = conflicting; + if (!conflicting->hasVreg()) + *pfixed = true; return true; } @@ -1474,42 +788,45 @@ BacktrackingAllocator::tryAllocateGroupRegister(PhysicalRegister& r, VirtualRegi } bool -BacktrackingAllocator::tryAllocateRegister(PhysicalRegister& r, LiveBundle* bundle, - bool* success, bool* pfixed, LiveBundleVector& conflicting) +BacktrackingAllocator::tryAllocateRegister(PhysicalRegister& r, LiveInterval* interval, + bool* success, bool* pfixed, LiveIntervalVector& conflicting) { *success = false; if (!r.allocatable) return true; - LiveBundleVector aliasedConflicting; + BacktrackingVirtualRegister* reg = &vregs[interval->vreg()]; + if (!reg->isCompatibleReg(r.reg)) + return true; - for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) { - LiveRange* range = LiveRange::get(*iter); - VirtualRegister ® = vregs[range->vreg()]; + MOZ_ASSERT_IF(interval->requirement()->kind() == Requirement::FIXED, + interval->requirement()->allocation() == LAllocation(r.reg)); - if (!reg.isCompatible(r.reg)) - return true; + LiveIntervalVector aliasedConflicting; + for (size_t i = 0; i < interval->numRanges(); i++) { + AllocatedRange range(interval, interval->getRange(i)), existing; for (size_t a = 0; a < r.reg.numAliased(); a++) { PhysicalRegister& rAlias = registers[r.reg.aliased(a).code()]; - LiveRange* existing; if (!rAlias.allocations.contains(range, &existing)) continue; - if (existing->hasVreg()) { - MOZ_ASSERT(existing->bundle()->allocation().toRegister() == rAlias.reg); + if (existing.interval->hasVreg()) { + MOZ_ASSERT(existing.interval->getAllocation()->toRegister() == rAlias.reg); bool duplicate = false; for (size_t i = 0; i < aliasedConflicting.length(); i++) { - if (aliasedConflicting[i] == existing->bundle()) { + if (aliasedConflicting[i] == existing.interval) { duplicate = true; break; } } - if (!duplicate && !aliasedConflicting.append(existing->bundle())) + if (!duplicate && !aliasedConflicting.append(existing.interval)) return false; } else { - JitSpew(JitSpew_RegAlloc, " %s collides with fixed use %s", - rAlias.reg.name(), existing->toString()); + if (JitSpewEnabled(JitSpew_RegAlloc)) { + JitSpew(JitSpew_RegAlloc, " %s collides with fixed use %s", + rAlias.reg.name(), existing.range->toString()); + } *pfixed = true; return true; } @@ -1517,22 +834,24 @@ BacktrackingAllocator::tryAllocateRegister(PhysicalRegister& r, LiveBundle* bund } if (!aliasedConflicting.empty()) { - // One or more aliased registers is allocated to another bundle + // One or more aliased registers is allocated to another live interval // overlapping this one. Keep track of the conflicting set, and in the // case of multiple conflicting sets keep track of the set with the // lowest maximum spill weight. if (JitSpewEnabled(JitSpew_RegAlloc)) { if (aliasedConflicting.length() == 1) { - LiveBundle* existing = aliasedConflicting[0]; - JitSpew(JitSpew_RegAlloc, " %s collides with %s [weight %lu]", - r.reg.name(), existing->toString(), computeSpillWeight(existing)); + LiveInterval* existing = aliasedConflicting[0]; + JitSpew(JitSpew_RegAlloc, " %s collides with v%u[%u] %s [weight %lu]", + r.reg.name(), existing->vreg(), existing->index(), + existing->rangesToString(), computeSpillWeight(existing)); } else { JitSpew(JitSpew_RegAlloc, " %s collides with the following", r.reg.name()); for (size_t i = 0; i < aliasedConflicting.length(); i++) { - LiveBundle* existing = aliasedConflicting[i]; - JitSpew(JitSpew_RegAlloc, " %s [weight %lu]", - existing->toString(), computeSpillWeight(existing)); + LiveInterval* existing = aliasedConflicting[i]; + JitSpew(JitSpew_RegAlloc, " v%u[%u] %s [weight %lu]", + existing->vreg(), existing->index(), + existing->rangesToString(), computeSpillWeight(existing)); } } } @@ -1552,150 +871,174 @@ BacktrackingAllocator::tryAllocateRegister(PhysicalRegister& r, LiveBundle* bund JitSpew(JitSpew_RegAlloc, " allocated to %s", r.reg.name()); - for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) { - LiveRange* range = LiveRange::get(*iter); - - // Set any register hint for allocating other bundles in the same group. - if (VirtualRegisterGroup* group = vregs[range->vreg()].group()) { - if (!group->allocation.isRegister()) - group->allocation = LAllocation(r.reg); - } - + for (size_t i = 0; i < interval->numRanges(); i++) { + AllocatedRange range(interval, interval->getRange(i)); if (!r.allocations.insert(range)) return false; } - bundle->setAllocation(LAllocation(r.reg)); + // Set any register hint for allocating other intervals in the same group. + if (VirtualRegisterGroup* group = reg->group()) { + if (!group->allocation.isRegister()) + group->allocation = LAllocation(r.reg); + } + + interval->setAllocation(LAllocation(r.reg)); *success = true; return true; } bool -BacktrackingAllocator::evictBundle(LiveBundle* bundle) +BacktrackingAllocator::evictInterval(LiveInterval* interval) { if (JitSpewEnabled(JitSpew_RegAlloc)) { JitSpew(JitSpew_RegAlloc, " Evicting %s [priority %lu] [weight %lu]", - bundle->toString(), computePriority(bundle), computeSpillWeight(bundle)); + interval->toString(), computePriority(interval), computeSpillWeight(interval)); } - AnyRegister reg(bundle->allocation().toRegister()); + MOZ_ASSERT(interval->getAllocation()->isRegister()); + + AnyRegister reg(interval->getAllocation()->toRegister()); PhysicalRegister& physical = registers[reg.code()]; MOZ_ASSERT(physical.reg == reg && physical.allocatable); - for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) { - LiveRange* range = LiveRange::get(*iter); + for (size_t i = 0; i < interval->numRanges(); i++) { + AllocatedRange range(interval, interval->getRange(i)); physical.allocations.remove(range); } - bundle->setAllocation(LAllocation()); + interval->setAllocation(LAllocation()); - size_t priority = computePriority(bundle); - return allocationQueue.insert(QueueItem(bundle, priority)); + size_t priority = computePriority(interval); + return allocationQueue.insert(QueueItem(interval, priority)); +} + +void +BacktrackingAllocator::distributeUses(LiveInterval* interval, + const LiveIntervalVector& newIntervals) +{ + MOZ_ASSERT(newIntervals.length() >= 2); + + // Simple redistribution of uses from an old interval to a set of new + // intervals. Intervals are permitted to overlap, in which case this will + // assign uses in the overlapping section to the interval with the latest + // start position. + for (UsePositionIterator iter(interval->usesBegin()); + iter != interval->usesEnd(); + iter++) + { + CodePosition pos = iter->pos; + LiveInterval* addInterval = nullptr; + for (size_t i = 0; i < newIntervals.length(); i++) { + LiveInterval* newInterval = newIntervals[i]; + if (newInterval->covers(pos)) { + if (!addInterval || newInterval->start() < addInterval->start()) + addInterval = newInterval; + } + } + addInterval->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos)); + } } bool -BacktrackingAllocator::splitAndRequeueBundles(LiveBundle* bundle, - const LiveBundleVector& newBundles) +BacktrackingAllocator::split(LiveInterval* interval, + const LiveIntervalVector& newIntervals) { if (JitSpewEnabled(JitSpew_RegAlloc)) { - JitSpew(JitSpew_RegAlloc, " splitting bundle %s into:", bundle->toString()); - for (size_t i = 0; i < newBundles.length(); i++) - JitSpew(JitSpew_RegAlloc, " %s", newBundles[i]->toString()); - } - - // Remove all ranges in the old bundle from their register's list. - for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) { - LiveRange* range = LiveRange::get(*iter); - MOZ_ASSERT(range != vregs[range->vreg()].groupExclude()); - vregs[range->vreg()].removeRange(range); - } - - // Add all ranges in the new bundles to their register's list. - MOZ_ASSERT(newBundles.length() >= 2); - for (size_t i = 0; i < newBundles.length(); i++) { - LiveBundle* newBundle = newBundles[i]; - for (LiveRange::BundleLinkIterator iter = newBundle->rangesBegin(); iter; iter++) { - LiveRange* range = LiveRange::get(*iter); - vregs[range->vreg()].addRange(range); + JitSpew(JitSpew_RegAlloc, " splitting interval %s into:", interval->toString()); + for (size_t i = 0; i < newIntervals.length(); i++) { + JitSpew(JitSpew_RegAlloc, " %s", newIntervals[i]->toString()); + MOZ_ASSERT(newIntervals[i]->start() >= interval->start()); + MOZ_ASSERT(newIntervals[i]->end() <= interval->end()); } } - // Queue the new bundles for register assignment. - for (size_t i = 0; i < newBundles.length(); i++) { - LiveBundle* newBundle = newBundles[i]; - size_t priority = computePriority(newBundle); - if (!allocationQueue.insert(QueueItem(newBundle, priority))) + MOZ_ASSERT(newIntervals.length() >= 2); + + // Find the earliest interval in the new list. + LiveInterval* first = newIntervals[0]; + for (size_t i = 1; i < newIntervals.length(); i++) { + if (newIntervals[i]->start() < first->start()) + first = newIntervals[i]; + } + + // Replace the old interval in the virtual register's state with the new intervals. + VirtualRegister* reg = &vregs[interval->vreg()]; + reg->replaceInterval(interval, first); + for (size_t i = 0; i < newIntervals.length(); i++) { + if (newIntervals[i] != first && !reg->addInterval(newIntervals[i])) return false; } return true; } -void -BacktrackingAllocator::spill(LiveBundle* bundle) +bool BacktrackingAllocator::requeueIntervals(const LiveIntervalVector& newIntervals) { - JitSpew(JitSpew_RegAlloc, " Spilling bundle"); + // Queue the new intervals for register assignment. + for (size_t i = 0; i < newIntervals.length(); i++) { + LiveInterval* newInterval = newIntervals[i]; + size_t priority = computePriority(newInterval); + if (!allocationQueue.insert(QueueItem(newInterval, priority))) + return false; + } + return true; +} - if (LiveBundle* spillParent = bundle->spillParent()) { - JitSpew(JitSpew_RegAlloc, " Using existing spill bundle"); - for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) { - LiveRange* range = LiveRange::get(*iter); - LiveRange* parentRange = spillParent->rangeFor(range->from()); - MOZ_ASSERT(parentRange->from() <= range->from()); - MOZ_ASSERT(parentRange->to() >= range->to()); - MOZ_ASSERT(range->vreg() == parentRange->vreg()); - range->distributeUses(parentRange); - MOZ_ASSERT(!range->hasUses()); - vregs[range->vreg()].removeRange(range); - } +void +BacktrackingAllocator::spill(LiveInterval* interval) +{ + JitSpew(JitSpew_RegAlloc, " Spilling interval"); + + MOZ_ASSERT(interval->requirement()->kind() == Requirement::NONE); + MOZ_ASSERT(!interval->getAllocation()->isStackSlot()); + + // We can't spill bogus intervals. + MOZ_ASSERT(interval->hasVreg()); + + BacktrackingVirtualRegister* reg = &vregs[interval->vreg()]; + + if (LiveInterval* spillInterval = interval->spillInterval()) { + JitSpew(JitSpew_RegAlloc, " Spilling to existing spill interval"); + while (!interval->usesEmpty()) + spillInterval->addUse(interval->popUse()); + reg->removeInterval(interval); return; } - // For now, we require that all ranges in the bundle have the same vreg. See bug 1067610. - LiveRange* firstRange = LiveRange::get(*bundle->rangesBegin()); - VirtualRegister ® = vregs[firstRange->vreg()]; - -#ifdef DEBUG - for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) { - LiveRange* range = LiveRange::get(*iter); - MOZ_ASSERT(range->vreg() == reg.vreg()); - MOZ_ASSERT_IF(range == reg.groupExclude(), bundle->numRanges() == 1); - } -#endif - - // Canonical spill slots cannot be used for the groupExclude range, which - // might overlap other ranges in the register's group. - bool useCanonical = firstRange != reg.groupExclude(); + bool useCanonical = !reg->hasCanonicalSpillExclude() + || interval->start() < reg->canonicalSpillExclude(); if (useCanonical) { - if (reg.canonicalSpill()) { + if (reg->canonicalSpill()) { JitSpew(JitSpew_RegAlloc, " Picked canonical spill location %s", - reg.canonicalSpill()->toString()); - bundle->setAllocation(*reg.canonicalSpill()); + reg->canonicalSpill()->toString()); + interval->setAllocation(*reg->canonicalSpill()); return; } - if (reg.group() && !reg.group()->spill.isUse()) { + if (reg->group() && !reg->group()->spill.isUse()) { JitSpew(JitSpew_RegAlloc, " Reusing group spill location %s", - reg.group()->spill.toString()); - bundle->setAllocation(reg.group()->spill); - reg.setCanonicalSpill(reg.group()->spill); + reg->group()->spill.toString()); + interval->setAllocation(reg->group()->spill); + reg->setCanonicalSpill(reg->group()->spill); return; } } uint32_t virtualSlot = numVirtualStackSlots++; - // Count virtual stack slots down from the maximum representable value. + // Count virtual stack slots down from the maximum representable value, so + // that virtual slots are more obviously distinguished from real slots. LStackSlot alloc(LAllocation::DATA_MASK - virtualSlot); - bundle->setAllocation(alloc); + interval->setAllocation(alloc); JitSpew(JitSpew_RegAlloc, " Allocating spill location %s", alloc.toString()); if (useCanonical) { - reg.setCanonicalSpill(alloc); - if (reg.group()) - reg.group()->spill = alloc; + reg->setCanonicalSpill(alloc); + if (reg->group()) + reg->group()->spill = alloc; } } @@ -1703,14 +1046,14 @@ bool BacktrackingAllocator::pickStackSlots() { for (size_t i = 1; i < graph.numVirtualRegisters(); i++) { - VirtualRegister& reg = vregs[i]; + BacktrackingVirtualRegister* reg = &vregs[i]; if (mir->shouldCancel("Backtracking Pick Stack Slots")) return false; - for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) { - LiveRange* range = LiveRange::get(*iter); - if (!pickStackSlot(range->bundle())) + for (size_t j = 0; j < reg->numIntervals(); j++) { + LiveInterval* interval = reg->getInterval(j); + if (!pickStackSlot(interval)) return false; } } @@ -1719,90 +1062,68 @@ BacktrackingAllocator::pickStackSlots() } bool -BacktrackingAllocator::addBundlesUsingAllocation(VirtualRegister ®, LAllocation alloc, - LiveBundleVector &bundles) +BacktrackingAllocator::pickStackSlot(LiveInterval* interval) { - for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) { - LiveRange* range = LiveRange::get(*iter); - LiveBundle* bundle = range->bundle(); - if (bundle->allocation() == alloc) { - bool duplicate = false; - for (size_t i = 0; i < bundles.length(); i++) { - if (bundles[i] == bundle) { - duplicate = true; - break; - } - } - if (!duplicate && !bundles.append(bundle)) - return false; - } - } - return true; -} - -bool -BacktrackingAllocator::pickStackSlot(LiveBundle* bundle) -{ - LAllocation alloc = bundle->allocation(); + LAllocation alloc = *interval->getAllocation(); MOZ_ASSERT(!alloc.isUse()); if (!isVirtualStackSlot(alloc)) return true; - // For now, we require that all ranges in the bundle have the same vreg. See bug 1067610. - LiveRange* firstRange = LiveRange::get(*bundle->rangesBegin()); - VirtualRegister ® = vregs[firstRange->vreg()]; + BacktrackingVirtualRegister& reg = vregs[interval->vreg()]; -#ifdef DEBUG - for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) { - LiveRange* range = LiveRange::get(*iter); - MOZ_ASSERT(range->vreg() == reg.vreg()); - MOZ_ASSERT_IF(range == reg.groupExclude(), bundle->numRanges() == 1); - } -#endif + // Get a list of all the intervals which will share this stack slot. + LiveIntervalVector commonIntervals; - // Get a list of all the bundles which will share this stack slot. - LiveBundleVector commonBundles; - - if (!commonBundles.append(bundle)) + if (!commonIntervals.append(interval)) return false; if (reg.canonicalSpill() && alloc == *reg.canonicalSpill()) { - // Look for other bundles with ranges in the vreg using this spill slot. - if (!addBundlesUsingAllocation(reg, alloc, commonBundles)) - return false; + // Look for other intervals in the vreg using this spill slot. + for (size_t i = 0; i < reg.numIntervals(); i++) { + LiveInterval* ninterval = reg.getInterval(i); + if (ninterval != interval && *ninterval->getAllocation() == alloc) { + if (!commonIntervals.append(ninterval)) + return false; + } + } - // Look for bundles in other registers with the same group using this + // Look for intervals in other registers with the same group using this // spill slot. if (reg.group() && alloc == reg.group()->spill) { for (size_t i = 0; i < reg.group()->registers.length(); i++) { uint32_t nvreg = reg.group()->registers[i]; - if (nvreg == reg.vreg()) + if (nvreg == interval->vreg()) continue; - VirtualRegister& nreg = vregs[nvreg]; - if (!addBundlesUsingAllocation(nreg, alloc, commonBundles)) - return false; + BacktrackingVirtualRegister& nreg = vregs[nvreg]; + for (size_t j = 0; j < nreg.numIntervals(); j++) { + LiveInterval* ninterval = nreg.getInterval(j); + if (*ninterval->getAllocation() == alloc) { + if (!commonIntervals.append(ninterval)) + return false; + } + } } } } else { MOZ_ASSERT_IF(reg.group(), alloc != reg.group()->spill); } - if (!reuseOrAllocateStackSlot(commonBundles, reg.type(), &alloc)) + if (!reuseOrAllocateStackSlot(commonIntervals, reg.type(), &alloc)) return false; MOZ_ASSERT(!isVirtualStackSlot(alloc)); - // Set the physical stack slot for each of the bundles found earlier. - for (size_t i = 0; i < commonBundles.length(); i++) - commonBundles[i]->setAllocation(alloc); + // Set the physical stack slot for each of the intervals found earlier. + for (size_t i = 0; i < commonIntervals.length(); i++) + commonIntervals[i]->setAllocation(alloc); return true; } bool -BacktrackingAllocator::reuseOrAllocateStackSlot(const LiveBundleVector& bundles, - LDefinition::Type type, LAllocation* palloc) +BacktrackingAllocator::reuseOrAllocateStackSlot(const LiveIntervalVector& intervals, LDefinition::Type type, + LAllocation* palloc) { SpillSlotList* slotList; switch (StackSlotAllocator::width(type)) { @@ -1830,11 +1151,10 @@ BacktrackingAllocator::reuseOrAllocateStackSlot(const LiveBundleVector& bundles, } bool success = true; - for (size_t i = 0; i < bundles.length() && success; i++) { - LiveBundle* bundle = bundles[i]; - for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) { - LiveRange* range = LiveRange::get(*iter); - LiveRange* existing; + for (size_t i = 0; i < intervals.length() && success; i++) { + LiveInterval* interval = intervals[i]; + for (size_t j = 0; j < interval->numRanges(); j++) { + AllocatedRange range(interval, interval->getRange(j)), existing; if (spill->allocated.contains(range, &existing)) { success = false; break; @@ -1842,9 +1162,9 @@ BacktrackingAllocator::reuseOrAllocateStackSlot(const LiveBundleVector& bundles, } } if (success) { - // We can reuse this physical stack slot for the new bundles. + // We can reuse this physical stack slot for the new intervals. // Update the allocated ranges for the slot. - if (!insertAllRanges(spill->allocated, bundles)) + if (!insertAllRanges(spill->allocated, intervals)) return false; *palloc = spill->alloc; return true; @@ -1872,7 +1192,7 @@ BacktrackingAllocator::reuseOrAllocateStackSlot(const LiveBundleVector& bundles, if (!spill) return false; - if (!insertAllRanges(spill->allocated, bundles)) + if (!insertAllRanges(spill->allocated, intervals)) return false; *palloc = spill->alloc; @@ -1882,12 +1202,12 @@ BacktrackingAllocator::reuseOrAllocateStackSlot(const LiveBundleVector& bundles, } bool -BacktrackingAllocator::insertAllRanges(LiveRangeSet& set, const LiveBundleVector& bundles) +BacktrackingAllocator::insertAllRanges(AllocatedRangeSet& set, const LiveIntervalVector& intervals) { - for (size_t i = 0; i < bundles.length(); i++) { - LiveBundle* bundle = bundles[i]; - for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) { - LiveRange* range = LiveRange::get(*iter); + for (size_t i = 0; i < intervals.length(); i++) { + LiveInterval* interval = intervals[i]; + for (size_t j = 0; j < interval->numRanges(); j++) { + AllocatedRange range(interval, interval->getRange(j)); if (!set.insert(range)) return false; } @@ -1895,48 +1215,31 @@ BacktrackingAllocator::insertAllRanges(LiveRangeSet& set, const LiveBundleVector return true; } +// Add moves to resolve conflicting assignments between a block and its +// predecessors. bool BacktrackingAllocator::resolveControlFlow() { - // Add moves to handle changing assignments for vregs over their lifetime. JitSpew(JitSpew_RegAlloc, "Resolving control flow (vreg loop)"); - // Look for places where a register's assignment changes in the middle of a - // basic block. - MOZ_ASSERT(!vregs[0u].hasRanges()); + // Virtual register number 0 is unused. + MOZ_ASSERT(vregs[0u].numIntervals() == 0); for (size_t i = 1; i < graph.numVirtualRegisters(); i++) { - VirtualRegister& reg = vregs[i]; + BacktrackingVirtualRegister* reg = &vregs[i]; if (mir->shouldCancel("Backtracking Resolve Control Flow (vreg loop)")) return false; - for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) { - LiveRange* range = LiveRange::get(*iter); + for (size_t j = 1; j < reg->numIntervals(); j++) { + LiveInterval* interval = reg->getInterval(j); + MOZ_ASSERT(interval->index() == j); - // The range which defines the register does not have a predecessor - // to add moves from. - if (range->hasDefinition()) - continue; - - // Ignore ranges that start at block boundaries. We will handle - // these in the next phase. - CodePosition start = range->from(); - LNode* ins = insData[start]; - if (start == entryOf(ins->block())) - continue; - - // If we already saw a range which covers the start of this range - // and has the same allocation, we don't need an explicit move at - // the start of this range. bool skip = false; - for (LiveRange::RegisterLinkIterator prevIter = reg.rangesBegin(); - prevIter != iter; - prevIter++) - { - LiveRange* prevRange = LiveRange::get(*prevIter); - if (prevRange->covers(start) && - prevRange->bundle()->allocation() == range->bundle()->allocation()) - { + for (int k = j - 1; k >= 0; k--) { + LiveInterval* prevInterval = reg->getInterval(k); + if (prevInterval->start() != interval->start()) + break; + if (*prevInterval->getAllocation() == *interval->getAllocation()) { skip = true; break; } @@ -1944,13 +1247,19 @@ BacktrackingAllocator::resolveControlFlow() if (skip) continue; - LiveRange* predecessorRange = reg.rangeFor(start.previous()); - if (start.subpos() == CodePosition::INPUT) { - if (!moveInput(ins->toInstruction(), predecessorRange, range, reg.type())) - return false; - } else { - if (!moveAfter(ins->toInstruction(), predecessorRange, range, reg.type())) - return false; + CodePosition start = interval->start(); + LNode* ins = insData[start]; + if (start > entryOf(ins->block())) { + MOZ_ASSERT(start == inputOf(ins) || start == outputOf(ins)); + + LiveInterval* prevInterval = reg->intervalFor(start.previous()); + if (start.subpos() == CodePosition::INPUT) { + if (!moveInput(ins->toInstruction(), prevInterval, interval, reg->type())) + return false; + } else { + if (!moveAfter(ins->toInstruction(), prevInterval, interval, reg->type())) + return false; + } } } } @@ -1966,13 +1275,13 @@ BacktrackingAllocator::resolveControlFlow() if (mSuccessor->numPredecessors() < 1) continue; - // Resolve phis to moves. + // Resolve phis to moves for (size_t j = 0; j < successor->numPhis(); j++) { LPhi* phi = successor->getPhi(j); MOZ_ASSERT(phi->numDefs() == 1); LDefinition* def = phi->getDef(0); - VirtualRegister& reg = vreg(def); - LiveRange* to = reg.rangeFor(entryOf(successor)); + VirtualRegister* vreg = &vregs[def]; + LiveInterval* to = vreg->intervalFor(entryOf(successor)); MOZ_ASSERT(to); for (size_t k = 0; k < mSuccessor->numPredecessors(); k++) { @@ -1980,7 +1289,7 @@ BacktrackingAllocator::resolveControlFlow() MOZ_ASSERT(predecessor->mir()->numSuccessors() == 1); LAllocation* input = phi->getOperand(k); - LiveRange* from = vreg(input).rangeFor(exitOf(predecessor)); + LiveInterval* from = vregs[input].intervalFor(exitOf(predecessor)); MOZ_ASSERT(from); if (!moveAtExit(predecessor, from, to, def->type())) @@ -1988,8 +1297,7 @@ BacktrackingAllocator::resolveControlFlow() } } - // Add moves to resolve graph edges with different allocations at their - // source and target. + // Resolve split intervals with moves BitSet& live = liveIn[mSuccessor->id()]; for (BitSet::Iterator liveRegId(live); liveRegId; ++liveRegId) { @@ -1998,14 +1306,14 @@ BacktrackingAllocator::resolveControlFlow() for (size_t j = 0; j < mSuccessor->numPredecessors(); j++) { LBlock* predecessor = mSuccessor->getPredecessor(j)->lir(); - for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) { - LiveRange* to = LiveRange::get(*iter); + for (size_t k = 0; k < reg.numIntervals(); k++) { + LiveInterval* to = reg.getInterval(k); if (!to->covers(entryOf(successor))) continue; if (to->covers(exitOf(predecessor))) continue; - LiveRange* from = reg.rangeFor(exitOf(predecessor)); + LiveInterval* from = reg.intervalFor(exitOf(predecessor)); if (mSuccessor->numPredecessors() > 1) { MOZ_ASSERT(predecessor->mir()->numSuccessors() == 1); @@ -2048,12 +1356,12 @@ BacktrackingAllocator::isRegisterUse(LUse* use, LNode* ins, bool considerCopy) } bool -BacktrackingAllocator::isRegisterDefinition(LiveRange* range) +BacktrackingAllocator::isRegisterDefinition(LiveInterval* interval) { - if (!range->hasDefinition()) + if (interval->index() != 0) return false; - VirtualRegister& reg = vregs[range->vreg()]; + VirtualRegister& reg = vregs[interval->vreg()]; if (reg.ins()->isPhi()) return false; @@ -2068,50 +1376,56 @@ BacktrackingAllocator::reifyAllocations() { JitSpew(JitSpew_RegAlloc, "Reifying Allocations"); - MOZ_ASSERT(!vregs[0u].hasRanges()); + // Virtual register number 0 is unused. + MOZ_ASSERT(vregs[0u].numIntervals() == 0); for (size_t i = 1; i < graph.numVirtualRegisters(); i++) { - VirtualRegister& reg = vregs[i]; + VirtualRegister* reg = &vregs[i]; if (mir->shouldCancel("Backtracking Reify Allocations (main loop)")) return false; - for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) { - LiveRange* range = LiveRange::get(*iter); + for (size_t j = 0; j < reg->numIntervals(); j++) { + LiveInterval* interval = reg->getInterval(j); + MOZ_ASSERT(interval->index() == j); - if (range->hasDefinition()) { - reg.def()->setOutput(range->bundle()->allocation()); - if (reg.ins()->recoversInput()) { - LSnapshot* snapshot = reg.ins()->toInstruction()->snapshot(); + if (interval->index() == 0) { + reg->def()->setOutput(*interval->getAllocation()); + if (reg->ins()->recoversInput()) { + LSnapshot* snapshot = reg->ins()->toInstruction()->snapshot(); for (size_t i = 0; i < snapshot->numEntries(); i++) { LAllocation* entry = snapshot->getEntry(i); if (entry->isUse() && entry->toUse()->policy() == LUse::RECOVERED_INPUT) - *entry = *reg.def()->output(); + *entry = *reg->def()->output(); } } } - for (UsePositionIterator iter(range->usesBegin()); iter; iter++) { + for (UsePositionIterator iter(interval->usesBegin()); + iter != interval->usesEnd(); + iter++) + { LAllocation* alloc = iter->use; - *alloc = range->bundle()->allocation(); + *alloc = *interval->getAllocation(); // For any uses which feed into MUST_REUSE_INPUT definitions, // add copies if the use and def have different allocations. LNode* ins = insData[iter->pos]; if (LDefinition* def = FindReusingDefinition(ins, alloc)) { - LiveRange* outputRange = vreg(def).rangeFor(outputOf(ins)); - LAllocation res = outputRange->bundle()->allocation(); - LAllocation sourceAlloc = range->bundle()->allocation(); + LiveInterval* outputInterval = + vregs[def->virtualRegister()].intervalFor(outputOf(ins)); + LAllocation* res = outputInterval->getAllocation(); + LAllocation* sourceAlloc = interval->getAllocation(); - if (res != *alloc) { + if (*res != *alloc) { LMoveGroup* group = getInputMoveGroup(ins->toInstruction()); - if (!group->addAfter(sourceAlloc, res, reg.type())) + if (!group->addAfter(sourceAlloc, res, reg->type())) return false; - *alloc = res; + *alloc = *res; } } } - addLiveRegistersForRange(reg, range); + addLiveRegistersForInterval(reg, interval); } } @@ -2119,104 +1433,6 @@ BacktrackingAllocator::reifyAllocations() return true; } -size_t -BacktrackingAllocator::findFirstNonCallSafepoint(CodePosition from) -{ - size_t i = 0; - for (; i < graph.numNonCallSafepoints(); i++) { - const LInstruction* ins = graph.getNonCallSafepoint(i); - if (from <= inputOf(ins)) - break; - } - return i; -} - -void -BacktrackingAllocator::addLiveRegistersForRange(VirtualRegister& reg, LiveRange* range) -{ - // Fill in the live register sets for all non-call safepoints. - LAllocation a = range->bundle()->allocation(); - if (!a.isRegister()) - return; - - // Don't add output registers to the safepoint. - CodePosition start = range->from(); - if (range->hasDefinition() && !reg.isTemp()) { -#ifdef CHECK_OSIPOINT_REGISTERS - // We don't add the output register to the safepoint, - // but it still might get added as one of the inputs. - // So eagerly add this reg to the safepoint clobbered registers. - if (reg.ins()->isInstruction()) { - if (LSafepoint* safepoint = reg.ins()->toInstruction()->safepoint()) - safepoint->addClobberedRegister(a.toRegister()); - } -#endif - start = start.next(); - } - - size_t i = findFirstNonCallSafepoint(start); - for (; i < graph.numNonCallSafepoints(); i++) { - LInstruction* ins = graph.getNonCallSafepoint(i); - CodePosition pos = inputOf(ins); - - // Safepoints are sorted, so we can shortcut out of this loop - // if we go out of range. - if (range->to() <= pos) - break; - - MOZ_ASSERT(range->covers(pos)); - - LSafepoint* safepoint = ins->safepoint(); - safepoint->addLiveRegister(a.toRegister()); - -#ifdef CHECK_OSIPOINT_REGISTERS - if (reg.isTemp()) - safepoint->addClobberedRegister(a.toRegister()); -#endif - } -} - -static inline bool -IsNunbox(VirtualRegister& reg) -{ -#ifdef JS_NUNBOX32 - return reg.type() == LDefinition::TYPE || - reg.type() == LDefinition::PAYLOAD; -#else - return false; -#endif -} - -static inline bool -IsSlotsOrElements(VirtualRegister& reg) -{ - return reg.type() == LDefinition::SLOTS; -} - -static inline bool -IsTraceable(VirtualRegister& reg) -{ - if (reg.type() == LDefinition::OBJECT) - return true; -#ifdef JS_PUNBOX64 - if (reg.type() == LDefinition::BOX) - return true; -#endif - return false; -} - -size_t -BacktrackingAllocator::findFirstSafepoint(CodePosition pos, size_t startFrom) -{ - size_t i = startFrom; - for (; i < graph.numSafepoints(); i++) { - LInstruction* ins = graph.getSafepoint(i); - if (pos <= inputOf(ins)) - break; - } - return i; -} - bool BacktrackingAllocator::populateSafepoints() { @@ -2224,65 +1440,73 @@ BacktrackingAllocator::populateSafepoints() size_t firstSafepoint = 0; + // Virtual register number 0 is unused. MOZ_ASSERT(!vregs[0u].def()); - for (uint32_t i = 1; i < graph.numVirtualRegisters(); i++) { - VirtualRegister& reg = vregs[i]; + for (uint32_t i = 1; i < vregs.numVirtualRegisters(); i++) { + BacktrackingVirtualRegister* reg = &vregs[i]; - if (!reg.def() || (!IsTraceable(reg) && !IsSlotsOrElements(reg) && !IsNunbox(reg))) + if (!reg->def() || (!IsTraceable(reg) && !IsSlotsOrElements(reg) && !IsNunbox(reg))) continue; - firstSafepoint = findFirstSafepoint(inputOf(reg.ins()), firstSafepoint); + firstSafepoint = findFirstSafepoint(reg->getInterval(0), firstSafepoint); if (firstSafepoint >= graph.numSafepoints()) break; - for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) { - LiveRange* range = LiveRange::get(*iter); + // Find the furthest endpoint. Intervals are sorted, but by start + // position, and we want the greatest end position. + CodePosition end = reg->getInterval(0)->end(); + for (size_t j = 1; j < reg->numIntervals(); j++) + end = Max(end, reg->getInterval(j)->end()); - for (size_t j = firstSafepoint; j < graph.numSafepoints(); j++) { - LInstruction* ins = graph.getSafepoint(j); + for (size_t j = firstSafepoint; j < graph.numSafepoints(); j++) { + LInstruction* ins = graph.getSafepoint(j); - if (!range->covers(inputOf(ins))) { - if (inputOf(ins) >= range->to()) - break; - continue; - } + // Stop processing safepoints if we know we're out of this virtual + // register's range. + if (end < outputOf(ins)) + break; - // Include temps but not instruction outputs. Also make sure - // MUST_REUSE_INPUT is not used with gcthings or nunboxes, or - // we would have to add the input reg to this safepoint. - if (ins == reg.ins() && !reg.isTemp()) { - DebugOnly def = reg.def(); - MOZ_ASSERT_IF(def->policy() == LDefinition::MUST_REUSE_INPUT, - def->type() == LDefinition::GENERAL || - def->type() == LDefinition::INT32 || - def->type() == LDefinition::FLOAT32 || - def->type() == LDefinition::DOUBLE); - continue; - } + // Include temps but not instruction outputs. Also make sure MUST_REUSE_INPUT + // is not used with gcthings or nunboxes, or we would have to add the input reg + // to this safepoint. + if (ins == reg->ins() && !reg->isTemp()) { + DebugOnly def = reg->def(); + MOZ_ASSERT_IF(def->policy() == LDefinition::MUST_REUSE_INPUT, + def->type() == LDefinition::GENERAL || + def->type() == LDefinition::INT32 || + def->type() == LDefinition::FLOAT32 || + def->type() == LDefinition::DOUBLE); + continue; + } - LSafepoint* safepoint = ins->safepoint(); + LSafepoint* safepoint = ins->safepoint(); - LAllocation a = range->bundle()->allocation(); - if (a.isGeneralReg() && ins->isCall()) + for (size_t k = 0; k < reg->numIntervals(); k++) { + LiveInterval* interval = reg->getInterval(k); + if (!interval->covers(inputOf(ins))) continue; - switch (reg.type()) { + LAllocation* a = interval->getAllocation(); + if (a->isGeneralReg() && ins->isCall()) + continue; + + switch (reg->type()) { case LDefinition::OBJECT: - safepoint->addGcPointer(a); + safepoint->addGcPointer(*a); break; case LDefinition::SLOTS: - safepoint->addSlotsOrElementsPointer(a); + safepoint->addSlotsOrElementsPointer(*a); break; #ifdef JS_NUNBOX32 case LDefinition::TYPE: - safepoint->addNunboxType(i, a); + safepoint->addNunboxType(i, *a); break; case LDefinition::PAYLOAD: - safepoint->addNunboxPayload(i, a); + safepoint->addNunboxPayload(i, *a); break; #else case LDefinition::BOX: - safepoint->addBoxedValue(a); + safepoint->addBoxedValue(*a); break; #endif default: @@ -2303,10 +1527,6 @@ BacktrackingAllocator::annotateMoveGroups() // only required for x86, as other platforms always have scratch registers // available for use. #ifdef JS_CODEGEN_X86 - LiveRange* range = LiveRange::New(alloc(), 0, CodePosition(), CodePosition().next()); - if (!range) - return false; - for (size_t i = 0; i < graph.numBlocks(); i++) { if (mir->shouldCancel("Backtracking Annotate Move Groups")) return false; @@ -2316,8 +1536,8 @@ BacktrackingAllocator::annotateMoveGroups() for (LInstructionIterator iter = block->begin(); iter != block->end(); ++iter) { if (iter->isMoveGroup()) { CodePosition from = last ? outputOf(last) : entryOf(block); - range->setFrom(from); - range->setTo(from.next()); + LiveInterval::Range range(from, from.next()); + AllocatedRange search(nullptr, &range), existing; for (size_t i = 0; i < AnyRegister::Total; i++) { PhysicalRegister& reg = registers[i]; @@ -2325,9 +1545,9 @@ BacktrackingAllocator::annotateMoveGroups() continue; // This register is unavailable for use if (a) it is in use - // by some live range immediately before the move group, + // by some live interval immediately before the move group, // or (b) it is an operand in one of the group's moves. The - // latter case handles live ranges which end immediately + // latter case handles live intervals which end immediately // before the move group or start immediately after. // For (b) we need to consider move groups immediately // preceding or following this one. @@ -2361,8 +1581,7 @@ BacktrackingAllocator::annotateMoveGroups() } while (riter != block->begin()); } - LiveRange* existing; - if (found || reg.allocations.contains(range, &existing)) + if (found || reg.allocations.contains(search, &existing)) continue; iter->toMoveGroup()->setScratchRegister(reg.reg.gpr()); @@ -2378,101 +1597,6 @@ BacktrackingAllocator::annotateMoveGroups() return true; } -bool -BacktrackingAllocator::addLiveBundle(LiveBundleVector& bundles, uint32_t vreg, - LiveBundle* spillParent, - CodePosition from, CodePosition to) -{ - LiveBundle* bundle = LiveBundle::New(alloc()); - if (!bundle || !bundles.append(bundle)) - return false; - bundle->setSpillParent(spillParent); - return bundle->addRange(alloc(), vreg, from, to); -} - -///////////////////////////////////////////////////////////////////// -// Debugging methods -///////////////////////////////////////////////////////////////////// - -#ifdef DEBUG - -const char* -LiveRange::toString() const -{ - // Not reentrant! - static char buf[2000]; - - char* cursor = buf; - char* end = cursor + sizeof(buf); - - int n = JS_snprintf(cursor, end - cursor, "v%u [%u,%u)", - hasVreg() ? vreg() : 0, from().bits(), to().bits()); - if (n < 0) MOZ_CRASH(); - cursor += n; - - if (bundle() && !bundle()->allocation().isBogus()) { - n = JS_snprintf(cursor, end - cursor, " %s", bundle()->allocation().toString()); - if (n < 0) MOZ_CRASH(); - cursor += n; - } - - if (hasDefinition()) { - n = JS_snprintf(cursor, end - cursor, " (def)"); - if (n < 0) MOZ_CRASH(); - cursor += n; - } - - for (UsePositionIterator iter = usesBegin(); iter; iter++) { - n = JS_snprintf(cursor, end - cursor, " %s@%u", iter->use->toString(), iter->pos.bits()); - if (n < 0) MOZ_CRASH(); - cursor += n; - } - - return buf; -} - -const char* -LiveBundle::toString() const -{ - // Not reentrant! - static char buf[2000]; - - char* cursor = buf; - char* end = cursor + sizeof(buf); - - for (LiveRange::BundleLinkIterator iter = rangesBegin(); iter; iter++) { - int n = JS_snprintf(cursor, end - cursor, "%s %s", - (iter == rangesBegin()) ? "" : " ##", - LiveRange::get(*iter)->toString()); - if (n < 0) MOZ_CRASH(); - cursor += n; - } - - return buf; -} - -#endif // DEBUG - -void -BacktrackingAllocator::dumpVregs() -{ -#ifdef DEBUG - MOZ_ASSERT(!vregs[0u].hasRanges()); - for (uint32_t i = 1; i < graph.numVirtualRegisters(); i++) { - fprintf(stderr, " "); - VirtualRegister& reg = vregs[i]; - for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) { - if (iter != reg.rangesBegin()) - fprintf(stderr, " / "); - fprintf(stderr, "%s", LiveRange::get(*iter)->toString()); - } - fprintf(stderr, "\n"); - } - - fprintf(stderr, "\n"); -#endif -} - void BacktrackingAllocator::dumpRegisterGroups() { @@ -2503,24 +1627,41 @@ void BacktrackingAllocator::dumpFixedRanges() { #ifdef DEBUG - fprintf(stderr, "Live ranges by physical register: %s\n", callRanges->toString()); + bool any = false; + + for (size_t i = 0; i < AnyRegister::Total; i++) { + if (registers[i].allocatable && fixedIntervals[i]->numRanges() != 0) { + if (!any) { + fprintf(stderr, "Live ranges by physical register:\n"); + any = true; + } + fprintf(stderr, " %s: %s\n", AnyRegister::FromCode(i).name(), fixedIntervals[i]->toString()); + } + } + + if (any) + fprintf(stderr, "\n"); #endif // DEBUG } #ifdef DEBUG -struct BacktrackingAllocator::PrintLiveRange +struct BacktrackingAllocator::PrintLiveIntervalRange { bool& first_; - explicit PrintLiveRange(bool& first) : first_(first) {} + explicit PrintLiveIntervalRange(bool& first) : first_(first) {} - void operator()(const LiveRange* range) + void operator()(const AllocatedRange& item) { - if (first_) - first_ = false; - else - fprintf(stderr, " /"); - fprintf(stderr, " %s", range->toString()); + if (item.range == item.interval->getRange(0)) { + if (first_) + first_ = false; + else + fprintf(stderr, " /"); + if (item.interval->hasVreg()) + fprintf(stderr, " v%u[%u]", item.interval->vreg(), item.interval->index()); + fprintf(stderr, "%s", item.interval->rangesToString()); + } } }; #endif @@ -2539,7 +1680,7 @@ BacktrackingAllocator::dumpAllocations() if (registers[i].allocatable && !registers[i].allocations.empty()) { fprintf(stderr, " %s:", AnyRegister::FromCode(i).name()); bool first = true; - registers[i].allocations.forEach(PrintLiveRange(first)); + registers[i].allocations.forEach(PrintLiveIntervalRange(first)); fprintf(stderr, "\n"); } } @@ -2548,21 +1689,31 @@ BacktrackingAllocator::dumpAllocations() #endif // DEBUG } +bool +BacktrackingAllocator::addLiveInterval(LiveIntervalVector& intervals, uint32_t vreg, + LiveInterval* spillInterval, + CodePosition from, CodePosition to) +{ + LiveInterval* interval = LiveInterval::New(alloc(), vreg, 0); + interval->setSpillInterval(spillInterval); + return interval->addRange(from, to) && intervals.append(interval); +} + /////////////////////////////////////////////////////////////////////////////// // Heuristic Methods /////////////////////////////////////////////////////////////////////////////// size_t -BacktrackingAllocator::computePriority(LiveBundle* bundle) +BacktrackingAllocator::computePriority(const LiveInterval* interval) { - // The priority of a bundle is its total length, so that longer lived - // bundles will be processed before shorter ones (even if the longer ones - // have a low spill weight). See processBundle(). + // The priority of an interval is its total length, so that longer lived + // intervals will be processed before shorter ones (even if the longer ones + // have a low spill weight). See processInterval(). size_t lifetimeTotal = 0; - for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) { - LiveRange* range = LiveRange::get(*iter); - lifetimeTotal += range->to() - range->from(); + for (size_t i = 0; i < interval->numRanges(); i++) { + const LiveInterval::Range* range = interval->getRange(i); + lifetimeTotal += range->to - range->from; } return lifetimeTotal; @@ -2574,54 +1725,46 @@ BacktrackingAllocator::computePriority(const VirtualRegisterGroup* group) size_t priority = 0; for (size_t j = 0; j < group->registers.length(); j++) { uint32_t vreg = group->registers[j]; - priority += computePriority(LiveRange::get(*vregs[vreg].rangesBegin())->bundle()); + priority += computePriority(vregs[vreg].getInterval(0)); } return priority; } bool -BacktrackingAllocator::minimalDef(LiveRange* range, LNode* ins) +BacktrackingAllocator::minimalDef(const LiveInterval* interval, LNode* ins) { - // Whether this is a minimal range capturing a definition at ins. - return (range->to() <= minimalDefEnd(ins).next()) && - ((!ins->isPhi() && range->from() == inputOf(ins)) || range->from() == outputOf(ins)); + // Whether interval is a minimal interval capturing a definition at ins. + return (interval->end() <= minimalDefEnd(ins).next()) && + ((!ins->isPhi() && interval->start() == inputOf(ins)) || interval->start() == outputOf(ins)); } bool -BacktrackingAllocator::minimalUse(LiveRange* range, LNode* ins) +BacktrackingAllocator::minimalUse(const LiveInterval* interval, LNode* ins) { - // Whether this is a minimal range capturing a use at ins. - return (range->from() == inputOf(ins)) && - (range->to() == outputOf(ins) || range->to() == outputOf(ins).next()); + // Whether interval is a minimal interval capturing a use at ins. + return (interval->start() == inputOf(ins)) && + (interval->end() == outputOf(ins) || interval->end() == outputOf(ins).next()); } bool -BacktrackingAllocator::minimalBundle(LiveBundle* bundle, bool* pfixed) +BacktrackingAllocator::minimalInterval(const LiveInterval* interval, bool* pfixed) { - LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); - LiveRange* range = LiveRange::get(*iter); - - if (!range->hasVreg()) { + if (!interval->hasVreg()) { *pfixed = true; return true; } - // If a bundle contains multiple ranges, splitAtAllRegisterUses will split - // each range into a separate bundle. - if (++iter) - return false; - - if (range->hasDefinition()) { - VirtualRegister& reg = vregs[range->vreg()]; + if (interval->index() == 0) { + VirtualRegister& reg = vregs[interval->vreg()]; if (pfixed) *pfixed = reg.def()->policy() == LDefinition::FIXED && reg.def()->output()->isRegister(); - return minimalDef(range, reg.ins()); + return minimalDef(interval, reg.ins()); } bool fixed = false, minimal = false, multiple = false; - for (UsePositionIterator iter = range->usesBegin(); iter; iter++) { - if (iter != range->usesBegin()) + for (UsePositionIterator iter = interval->usesBegin(); iter != interval->usesEnd(); iter++) { + if (iter != interval->usesBegin()) multiple = true; LUse* use = iter->use; @@ -2630,12 +1773,12 @@ BacktrackingAllocator::minimalBundle(LiveBundle* bundle, bool* pfixed) if (fixed) return false; fixed = true; - if (minimalUse(range, insData[iter->pos])) + if (minimalUse(interval, insData[iter->pos])) minimal = true; break; case LUse::REGISTER: - if (minimalUse(range, insData[iter->pos])) + if (minimalUse(interval, insData[iter->pos])) minimal = true; break; @@ -2644,8 +1787,8 @@ BacktrackingAllocator::minimalBundle(LiveBundle* bundle, bool* pfixed) } } - // If a range contains a fixed use and at least one other use, - // splitAtAllRegisterUses will split each use into a different bundle. + // If an interval contains a fixed use and at least one other use, + // splitAtAllRegisterUses will split each use into a different interval. if (multiple && fixed) minimal = false; @@ -2655,57 +1798,53 @@ BacktrackingAllocator::minimalBundle(LiveBundle* bundle, bool* pfixed) } size_t -BacktrackingAllocator::computeSpillWeight(LiveBundle* bundle) +BacktrackingAllocator::computeSpillWeight(const LiveInterval* interval) { - // Minimal bundles have an extremely high spill weight, to ensure they - // can evict any other bundles and be allocated to a register. + // Minimal intervals have an extremely high spill weight, to ensure they + // can evict any other intervals and be allocated to a register. bool fixed; - if (minimalBundle(bundle, &fixed)) + if (minimalInterval(interval, &fixed)) return fixed ? 2000000 : 1000000; size_t usesTotal = 0; - for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) { - LiveRange* range = LiveRange::get(*iter); - - if (range->hasDefinition()) { - VirtualRegister& reg = vregs[range->vreg()]; - if (reg.def()->policy() == LDefinition::FIXED && reg.def()->output()->isRegister()) - usesTotal += 2000; - else if (!reg.ins()->isPhi()) - usesTotal += 2000; - } - - for (UsePositionIterator iter = range->usesBegin(); iter; iter++) { - LUse* use = iter->use; - - switch (use->policy()) { - case LUse::ANY: - usesTotal += 1000; - break; - - case LUse::REGISTER: - case LUse::FIXED: - usesTotal += 2000; - break; - - case LUse::KEEPALIVE: - break; - - default: - // Note: RECOVERED_INPUT will not appear in UsePositionIterator. - MOZ_CRASH("Bad use"); - } - } - - // Ranges for registers in groups get higher weights. - if (vregs[range->vreg()].group()) + if (interval->index() == 0) { + VirtualRegister* reg = &vregs[interval->vreg()]; + if (reg->def()->policy() == LDefinition::FIXED && reg->def()->output()->isRegister()) + usesTotal += 2000; + else if (!reg->ins()->isPhi()) usesTotal += 2000; } + for (UsePositionIterator iter = interval->usesBegin(); iter != interval->usesEnd(); iter++) { + LUse* use = iter->use; + + switch (use->policy()) { + case LUse::ANY: + usesTotal += 1000; + break; + + case LUse::REGISTER: + case LUse::FIXED: + usesTotal += 2000; + break; + + case LUse::KEEPALIVE: + break; + + default: + // Note: RECOVERED_INPUT will not appear in UsePositionIterator. + MOZ_CRASH("Bad use"); + } + } + + // Intervals for registers in groups get higher weights. + if (interval->hint()->kind() != Requirement::NONE) + usesTotal += 2000; + // Compute spill weight as a use density, lowering the weight for long - // lived bundles with relatively few uses. - size_t lifetimeTotal = computePriority(bundle); + // lived intervals with relatively few uses. + size_t lifetimeTotal = computePriority(interval); return lifetimeTotal ? usesTotal / lifetimeTotal : 0; } @@ -2715,51 +1854,52 @@ BacktrackingAllocator::computeSpillWeight(const VirtualRegisterGroup* group) size_t maxWeight = 0; for (size_t j = 0; j < group->registers.length(); j++) { uint32_t vreg = group->registers[j]; - maxWeight = Max(maxWeight, computeSpillWeight(LiveRange::get(*vregs[vreg].rangesBegin())->bundle())); + maxWeight = Max(maxWeight, computeSpillWeight(vregs[vreg].getInterval(0))); } return maxWeight; } size_t -BacktrackingAllocator::maximumSpillWeight(const LiveBundleVector& bundles) +BacktrackingAllocator::maximumSpillWeight(const LiveIntervalVector& intervals) { size_t maxWeight = 0; - for (size_t i = 0; i < bundles.length(); i++) - maxWeight = Max(maxWeight, computeSpillWeight(bundles[i])); + for (size_t i = 0; i < intervals.length(); i++) + maxWeight = Max(maxWeight, computeSpillWeight(intervals[i])); return maxWeight; } bool -BacktrackingAllocator::trySplitAcrossHotcode(LiveBundle* bundle, bool* success) +BacktrackingAllocator::trySplitAcrossHotcode(LiveInterval* interval, bool* success) { - // If this bundle has portions that are hot and portions that are cold, + // If this interval has portions that are hot and portions that are cold, // split it at the boundaries between hot and cold code. - LiveRange* hotRange = nullptr; + const LiveInterval::Range* hotRange = nullptr; - for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) { - LiveRange* range = LiveRange::get(*iter); - if (hotcode.contains(range, &hotRange)) + for (size_t i = 0; i < interval->numRanges(); i++) { + AllocatedRange range(interval, interval->getRange(i)), existing; + if (hotcode.contains(range, &existing)) { + hotRange = existing.range; break; + } } - // Don't split if there is no hot code in the bundle. + // Don't split if there is no hot code in the interval. if (!hotRange) { - JitSpew(JitSpew_RegAlloc, " bundle does not contain hot code"); + JitSpew(JitSpew_RegAlloc, " interval does not contain hot code"); return true; } - // Don't split if there is no cold code in the bundle. + // Don't split if there is no cold code in the interval. bool coldCode = false; - for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) { - LiveRange* range = LiveRange::get(*iter); - if (!hotRange->contains(range)) { + for (size_t i = 0; i < interval->numRanges(); i++) { + if (!hotRange->contains(interval->getRange(i))) { coldCode = true; break; } } if (!coldCode) { - JitSpew(JitSpew_RegAlloc, " bundle does not contain cold code"); + JitSpew(JitSpew_RegAlloc, " interval does not contain cold code"); return true; } @@ -2771,111 +1911,104 @@ BacktrackingAllocator::trySplitAcrossHotcode(LiveBundle* bundle, bool* success) // soon and this special case removed. See bug 948838. if (compilingAsmJS()) { SplitPositionVector splitPositions; - if (!splitPositions.append(hotRange->from()) || !splitPositions.append(hotRange->to())) + if (!splitPositions.append(hotRange->from) || !splitPositions.append(hotRange->to)) return false; *success = true; - return splitAt(bundle, splitPositions); + return splitAt(interval, splitPositions); } - LiveBundle* hotBundle = LiveBundle::New(alloc()); - if (!hotBundle) - return false; - LiveBundle* preBundle = nullptr; - LiveBundle* postBundle = nullptr; + LiveInterval* hotInterval = LiveInterval::New(alloc(), interval->vreg(), 0); + LiveInterval* preInterval = nullptr; + LiveInterval* postInterval = nullptr; - // Accumulate the ranges of hot and cold code in the bundle. Note that + // Accumulate the ranges of hot and cold code in the interval. Note that // we are only comparing with the single hot range found, so the cold code // may contain separate hot ranges. - for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) { - LiveRange* range = LiveRange::get(*iter); - LiveRange::Range hot, coldPre, coldPost; - range->intersect(hotRange, &coldPre, &hot, &coldPost); + Vector hotList, coldList; + for (size_t i = 0; i < interval->numRanges(); i++) { + LiveInterval::Range hot, coldPre, coldPost; + interval->getRange(i)->intersect(hotRange, &coldPre, &hot, &coldPost); - if (!hot.empty()) { - if (!hotBundle->addRangeAndDistributeUses(alloc(), range, hot.from, hot.to)) - return false; - } + if (!hot.empty() && !hotInterval->addRange(hot.from, hot.to)) + return false; if (!coldPre.empty()) { - if (!preBundle) { - preBundle = LiveBundle::New(alloc()); - if (!preBundle) - return false; - } - if (!preBundle->addRangeAndDistributeUses(alloc(), range, coldPre.from, coldPre.to)) + if (!preInterval) + preInterval = LiveInterval::New(alloc(), interval->vreg(), 0); + if (!preInterval->addRange(coldPre.from, coldPre.to)) return false; } if (!coldPost.empty()) { - if (!postBundle) - postBundle = LiveBundle::New(alloc()); - if (!postBundle->addRangeAndDistributeUses(alloc(), range, coldPost.from, coldPost.to)) + if (!postInterval) + postInterval = LiveInterval::New(alloc(), interval->vreg(), 0); + if (!postInterval->addRange(coldPost.from, coldPost.to)) return false; } } - MOZ_ASSERT(preBundle || postBundle); - MOZ_ASSERT(hotBundle->numRanges() != 0); + MOZ_ASSERT(preInterval || postInterval); + MOZ_ASSERT(hotInterval->numRanges()); - LiveBundleVector newBundles; - if (!newBundles.append(hotBundle)) + LiveIntervalVector newIntervals; + if (!newIntervals.append(hotInterval)) return false; - if (preBundle && !newBundles.append(preBundle)) + if (preInterval && !newIntervals.append(preInterval)) return false; - if (postBundle && !newBundles.append(postBundle)) + if (postInterval && !newIntervals.append(postInterval)) return false; + distributeUses(interval, newIntervals); + *success = true; - return splitAndRequeueBundles(bundle, newBundles); + return split(interval, newIntervals) && requeueIntervals(newIntervals); } bool -BacktrackingAllocator::trySplitAfterLastRegisterUse(LiveBundle* bundle, LiveBundle* conflict, - bool* success) +BacktrackingAllocator::trySplitAfterLastRegisterUse(LiveInterval* interval, LiveInterval* conflict, bool* success) { - // If this bundle's later uses do not require it to be in a register, + // If this interval's later uses do not require it to be in a register, // split it after the last use which does require a register. If conflict // is specified, only consider register uses before the conflict starts. CodePosition lastRegisterFrom, lastRegisterTo, lastUse; - for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) { - LiveRange* range = LiveRange::get(*iter); - - // If the range defines a register, consider that a register use for - // our purposes here. - if (isRegisterDefinition(range)) { - CodePosition spillStart = minimalDefEnd(insData[range->from()]).next(); - if (!conflict || spillStart < conflict->firstRange()->from()) { - lastUse = lastRegisterFrom = range->from(); - lastRegisterTo = spillStart; - } + // If the definition of the interval is in a register, consider that a + // register use too for our purposes here. + if (isRegisterDefinition(interval)) { + CodePosition spillStart = minimalDefEnd(insData[interval->start()]).next(); + if (!conflict || spillStart < conflict->start()) { + lastUse = lastRegisterFrom = interval->start(); + lastRegisterTo = spillStart; } + } - for (UsePositionIterator iter(range->usesBegin()); iter; iter++) { - LUse* use = iter->use; - LNode* ins = insData[iter->pos]; + for (UsePositionIterator iter(interval->usesBegin()); + iter != interval->usesEnd(); + iter++) + { + LUse* use = iter->use; + LNode* ins = insData[iter->pos]; - // Uses in the bundle should be sorted. - MOZ_ASSERT(iter->pos >= lastUse); - lastUse = inputOf(ins); + // Uses in the interval should be sorted. + MOZ_ASSERT(iter->pos >= lastUse); + lastUse = inputOf(ins); - if (!conflict || outputOf(ins) < conflict->firstRange()->from()) { - if (isRegisterUse(use, ins, /* considerCopy = */ true)) { - lastRegisterFrom = inputOf(ins); - lastRegisterTo = iter->pos.next(); - } + if (!conflict || outputOf(ins) < conflict->start()) { + if (isRegisterUse(use, ins, /* considerCopy = */ true)) { + lastRegisterFrom = inputOf(ins); + lastRegisterTo = iter->pos.next(); } } } // Can't trim non-register uses off the end by splitting. if (!lastRegisterFrom.bits()) { - JitSpew(JitSpew_RegAlloc, " bundle has no register uses"); + JitSpew(JitSpew_RegAlloc, " interval has no register uses"); return true; } if (lastRegisterFrom == lastUse) { - JitSpew(JitSpew_RegAlloc, " bundle's last use is a register use"); + JitSpew(JitSpew_RegAlloc, " interval's last use is a register use"); return true; } @@ -2886,55 +2019,45 @@ BacktrackingAllocator::trySplitAfterLastRegisterUse(LiveBundle* bundle, LiveBund if (!splitPositions.append(lastRegisterTo)) return false; *success = true; - return splitAt(bundle, splitPositions); + return splitAt(interval, splitPositions); } bool -BacktrackingAllocator::trySplitBeforeFirstRegisterUse(LiveBundle* bundle, LiveBundle* conflict, bool* success) +BacktrackingAllocator::trySplitBeforeFirstRegisterUse(LiveInterval* interval, LiveInterval* conflict, bool* success) { - // If this bundle's earlier uses do not require it to be in a register, + // If this interval's earlier uses do not require it to be in a register, // split it before the first use which does require a register. If conflict // is specified, only consider register uses after the conflict ends. - if (isRegisterDefinition(bundle->firstRange())) { - JitSpew(JitSpew_RegAlloc, " bundle is defined by a register"); + if (isRegisterDefinition(interval)) { + JitSpew(JitSpew_RegAlloc, " interval is defined by a register"); return true; } - if (!bundle->firstRange()->hasDefinition()) { - JitSpew(JitSpew_RegAlloc, " bundle does not have definition"); + if (interval->index() != 0) { + JitSpew(JitSpew_RegAlloc, " interval is not defined in memory"); return true; } CodePosition firstRegisterFrom; - CodePosition conflictEnd; - if (conflict) { - for (LiveRange::BundleLinkIterator iter = conflict->rangesBegin(); iter; iter++) { - LiveRange* range = LiveRange::get(*iter); - if (range->to() > conflictEnd) - conflictEnd = range->to(); - } - } + for (UsePositionIterator iter(interval->usesBegin()); + iter != interval->usesEnd(); + iter++) + { + LUse* use = iter->use; + LNode* ins = insData[iter->pos]; - for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) { - LiveRange* range = LiveRange::get(*iter); - - for (UsePositionIterator iter(range->usesBegin()); iter; iter++) { - LUse* use = iter->use; - LNode* ins = insData[iter->pos]; - - if (!conflict || outputOf(ins) >= conflictEnd) { - if (isRegisterUse(use, ins, /* considerCopy = */ true)) { - firstRegisterFrom = inputOf(ins); - break; - } + if (!conflict || outputOf(ins) >= conflict->end()) { + if (isRegisterUse(use, ins, /* considerCopy = */ true)) { + firstRegisterFrom = inputOf(ins); + break; } } } if (!firstRegisterFrom.bits()) { // Can't trim non-register uses off the beginning by splitting. - JitSpew(JitSpew_RegAlloc, " bundle has no register uses"); + JitSpew(JitSpew_RegAlloc, " interval has no register uses"); return true; } @@ -2945,93 +2068,86 @@ BacktrackingAllocator::trySplitBeforeFirstRegisterUse(LiveBundle* bundle, LiveBu if (!splitPositions.append(firstRegisterFrom)) return false; *success = true; - return splitAt(bundle, splitPositions); + return splitAt(interval, splitPositions); } bool -BacktrackingAllocator::splitAtAllRegisterUses(LiveBundle* bundle) +BacktrackingAllocator::splitAtAllRegisterUses(LiveInterval* interval) { - // Split this bundle so that all its register uses are placed in minimal - // bundles and allow the original bundle to be spilled throughout its range. + // Split this interval so that all its register uses become minimal + // intervals and allow the vreg to be spilled throughout its range. - LiveBundleVector newBundles; + LiveIntervalVector newIntervals; + uint32_t vreg = interval->vreg(); JitSpew(JitSpew_RegAlloc, " split at all register uses"); - // If this bundle is the result of an earlier split which created a spill - // parent, that spill parent covers the whole range, so we don't need to - // create a new one. - bool spillBundleIsNew = false; - LiveBundle* spillBundle = bundle->spillParent(); - if (!spillBundle) { - spillBundle = LiveBundle::New(alloc()); - spillBundleIsNew = true; + // If this LiveInterval is the result of an earlier split which created a + // spill interval, that spill interval covers the whole range, so we don't + // need to create a new one. + bool spillIntervalIsNew = false; + LiveInterval* spillInterval = interval->spillInterval(); + if (!spillInterval) { + spillInterval = LiveInterval::New(alloc(), vreg, 0); + spillIntervalIsNew = true; } - CodePosition spillStart = bundle->firstRange()->from(); - if (isRegisterDefinition(bundle->firstRange())) { - // Treat the definition of the first range as a register use so that it + CodePosition spillStart = interval->start(); + if (isRegisterDefinition(interval)) { + // Treat the definition of the interval as a register use so that it // can be split and spilled ASAP. - CodePosition from = spillStart; + CodePosition from = interval->start(); CodePosition to = minimalDefEnd(insData[from]).next(); - if (!addLiveBundle(newBundles, bundle->firstRange()->vreg(), spillBundle, from, to)) + if (!addLiveInterval(newIntervals, vreg, spillInterval, from, to)) return false; - bundle->firstRange()->distributeUses(newBundles.back()->firstRange()); spillStart = to; } - if (spillBundleIsNew) { - for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) { - LiveRange* range = LiveRange::get(*iter); - CodePosition from = Max(range->from(), spillStart); - if (!spillBundle->addRange(alloc(), range->vreg(), from, range->to())) + if (spillIntervalIsNew) { + for (size_t i = 0; i < interval->numRanges(); i++) { + const LiveInterval::Range* range = interval->getRange(i); + CodePosition from = Max(range->from, spillStart); + if (!spillInterval->addRange(from, range->to)) return false; } } - for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) { - LiveRange* range = LiveRange::get(*iter); + for (UsePositionIterator iter(interval->usesBegin()); + iter != interval->usesEnd(); + iter++) + { + LNode* ins = insData[iter->pos]; + if (iter->pos < spillStart) { + newIntervals.back()->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos)); + } else if (isRegisterUse(iter->use, ins)) { + // For register uses which are not useRegisterAtStart, pick an + // interval that covers both the instruction's input and output, so + // that the register is not reused for an output. + CodePosition from = inputOf(ins); + CodePosition to = iter->use->usedAtStart() ? outputOf(ins) : iter->pos.next(); - LiveRange* lastNewRange = nullptr; - while (range->hasUses()) { - UsePosition* use = range->popUse(); - LNode* ins = insData[use->pos]; - - // The earlier call to distributeUses should have taken care of - // uses before the spill bundle started. - MOZ_ASSERT(use->pos >= spillStart); - - if (isRegisterUse(use->use, ins)) { - // For register uses which are not useRegisterAtStart, pick a - // bundle that covers both the instruction's input and output, so - // that the register is not reused for an output. - CodePosition from = inputOf(ins); - CodePosition to = use->use->usedAtStart() ? outputOf(ins) : use->pos.next(); - - // Use the same bundle for duplicate use positions, except when - // the uses are fixed (they may require incompatible registers). - if (!lastNewRange || - lastNewRange->to() != to || - lastNewRange->usesBegin()->use->policy() == LUse::FIXED || - use->use->policy() == LUse::FIXED) - { - if (!addLiveBundle(newBundles, range->vreg(), spillBundle, from, to)) - return false; - lastNewRange = newBundles.back()->firstRange(); - } - - lastNewRange->addUse(use); - } else { - MOZ_ASSERT(spillBundleIsNew); - spillBundle->rangeFor(use->pos)->addUse(use); + // Use the same interval for duplicate use positions, except when + // the uses are fixed (they may require incompatible registers). + if (newIntervals.empty() || + newIntervals.back()->end() != to || + newIntervals.back()->usesBegin()->use->policy() == LUse::FIXED || + iter->use->policy() == LUse::FIXED) + { + if (!addLiveInterval(newIntervals, vreg, spillInterval, from, to)) + return false; } + + newIntervals.back()->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos)); + } else { + MOZ_ASSERT(spillIntervalIsNew); + spillInterval->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos)); } } - if (spillBundleIsNew && !newBundles.append(spillBundle)) + if (spillIntervalIsNew && !newIntervals.append(spillInterval)) return false; - return splitAndRequeueBundles(bundle, newBundles); + return split(interval, newIntervals) && requeueIntervals(newIntervals); } // Find the next split position after the current position. @@ -3057,139 +2173,131 @@ static bool SplitHere(size_t activeSplitPosition, } bool -BacktrackingAllocator::splitAt(LiveBundle* bundle, const SplitPositionVector& splitPositions) +BacktrackingAllocator::splitAt(LiveInterval* interval, + const SplitPositionVector& splitPositions) { - // Split the bundle at the given split points. Unlike splitAtAllRegisterUses, + // Split the interval at the given split points. Unlike splitAtAllRegisterUses, // consolidate any register uses which have no intervening split points into the - // same resulting bundle. + // same resulting interval. // splitPositions should be non-empty and sorted. MOZ_ASSERT(!splitPositions.empty()); for (size_t i = 1; i < splitPositions.length(); ++i) MOZ_ASSERT(splitPositions[i-1] < splitPositions[i]); - // Don't spill the bundle until after the end of its definition. - CodePosition start = bundle->firstRange()->from(); - CodePosition spillStart = start; - if (isRegisterDefinition(bundle->firstRange())) - spillStart = minimalDefEnd(insData[spillStart]).next(); + // Don't spill the interval until after the end of its definition. + CodePosition spillStart = interval->start(); + if (isRegisterDefinition(interval)) + spillStart = minimalDefEnd(insData[interval->start()]).next(); - // As in splitAtAllRegisterUses, we don't need to create a new spill bundle - // if there already is one. - bool spillBundleIsNew = false; - LiveBundle* spillBundle = bundle->spillParent(); - if (!spillBundle) { - spillBundle = LiveBundle::New(alloc()); - if (!spillBundle) - return false; - spillBundleIsNew = true; + uint32_t vreg = interval->vreg(); - for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) { - LiveRange* range = LiveRange::get(*iter); - CodePosition from = Max(range->from(), spillStart); - if (!spillBundle->addRange(alloc(), range->vreg(), from, range->to())) + // If this LiveInterval is the result of an earlier split which created a + // spill interval, that spill interval covers the whole range, so we don't + // need to create a new one. + bool spillIntervalIsNew = false; + LiveInterval* spillInterval = interval->spillInterval(); + if (!spillInterval) { + spillInterval = LiveInterval::New(alloc(), vreg, 0); + spillIntervalIsNew = true; + + for (size_t i = 0; i < interval->numRanges(); i++) { + const LiveInterval::Range* range = interval->getRange(i); + CodePosition from = Max(range->from, spillStart); + if (!spillInterval->addRange(from, range->to)) return false; } - - if (bundle->firstRange()->hasDefinition() && !isRegisterDefinition(bundle->firstRange())) { - MOZ_ASSERT(spillStart == start); - spillBundle->firstRange()->setHasDefinition(); - } } - LiveBundleVector newBundles; + LiveIntervalVector newIntervals; - // The last point where a register use was seen. CodePosition lastRegisterUse; - - // The new range which the last register use was added to. - LiveRange* activeRange = nullptr; - - if (spillStart != start) { - if (!addLiveBundle(newBundles, bundle->firstRange()->vreg(), spillBundle, start, spillStart)) + if (spillStart != interval->start()) { + LiveInterval* newInterval = LiveInterval::New(alloc(), vreg, 0); + newInterval->setSpillInterval(spillInterval); + if (!newIntervals.append(newInterval)) return false; - bundle->firstRange()->distributeUses(newBundles.back()->firstRange()); - activeRange = newBundles.back()->firstRange(); - lastRegisterUse = start.previous(); + lastRegisterUse = interval->start(); } - size_t activeSplitPosition = NextSplitPosition(0, splitPositions, start); - - // All ranges in the bundle which we have finished processing since the - // last register use. - LiveRangeVector originalRanges; - - for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) { - LiveRange* range = LiveRange::get(*iter); - - while (range->hasUses()) { - UsePosition* use = range->popUse(); - LNode* ins = insData[use->pos]; - - // The earlier call to distributeUses should have taken care of - // uses before the spill bundle started. - MOZ_ASSERT(use->pos >= spillStart); - - if (isRegisterUse(use->use, ins)) { - // Place this register use into a different bundle from the + size_t activeSplitPosition = NextSplitPosition(0, splitPositions, interval->start()); + for (UsePositionIterator iter(interval->usesBegin()); iter != interval->usesEnd(); iter++) { + LNode* ins = insData[iter->pos]; + if (iter->pos < spillStart) { + newIntervals.back()->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos)); + activeSplitPosition = NextSplitPosition(activeSplitPosition, splitPositions, iter->pos); + } else if (isRegisterUse(iter->use, ins)) { + if (lastRegisterUse.bits() == 0 || + SplitHere(activeSplitPosition, splitPositions, iter->pos)) + { + // Place this register use into a different interval from the // last one if there are any split points between the two uses. - if (lastRegisterUse.bits() == 0 || - SplitHere(activeSplitPosition, splitPositions, use->pos)) - { - if (!addLiveBundle(newBundles, range->vreg(), spillBundle, inputOf(ins), use->pos.next())) - return false; - activeSplitPosition = NextSplitPosition(activeSplitPosition, - splitPositions, - use->pos); - activeRange = newBundles.back()->firstRange(); - } else { - if (!originalRanges.empty()) { - activeRange->setTo(originalRanges[0]->to()); - for (size_t i = 1; i < originalRanges.length(); i++) { - LiveRange* orange = originalRanges[i]; - if (!newBundles.back()->addRange(alloc(), orange->vreg(), orange->from(), orange->to())) - return false; - } - activeRange = LiveRange::New(alloc(), range->vreg(), range->from(), use->pos.next()); - if (!activeRange) - return false; - newBundles.back()->addRange(activeRange); - } else { - activeRange->setTo(use->pos.next()); - } - MOZ_ASSERT(range->vreg() == activeRange->vreg()); - } - activeRange->addUse(use); - originalRanges.clear(); - lastRegisterUse = use->pos; - } else { - MOZ_ASSERT(spillBundleIsNew); - spillBundle->rangeFor(use->pos)->addUse(use); + LiveInterval* newInterval = LiveInterval::New(alloc(), vreg, 0); + newInterval->setSpillInterval(spillInterval); + if (!newIntervals.append(newInterval)) + return false; + activeSplitPosition = NextSplitPosition(activeSplitPosition, + splitPositions, + iter->pos); } + newIntervals.back()->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos)); + lastRegisterUse = iter->pos; + } else { + MOZ_ASSERT(spillIntervalIsNew); + spillInterval->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos)); } - - if (!originalRanges.append(range)) - return false; } - if (spillBundleIsNew && !newBundles.append(spillBundle)) + // Compute ranges for each new interval that cover all its uses. + size_t activeRange = interval->numRanges(); + for (size_t i = 0; i < newIntervals.length(); i++) { + LiveInterval* newInterval = newIntervals[i]; + CodePosition start, end; + if (i == 0 && spillStart != interval->start()) { + start = interval->start(); + if (newInterval->usesEmpty()) + end = spillStart; + else + end = newInterval->usesBack()->pos.next(); + } else { + start = inputOf(insData[newInterval->usesBegin()->pos]); + end = newInterval->usesBack()->pos.next(); + } + for (; activeRange > 0; --activeRange) { + const LiveInterval::Range* range = interval->getRange(activeRange - 1); + if (range->to <= start) + continue; + if (range->from >= end) + break; + if (!newInterval->addRange(Max(range->from, start), + Min(range->to, end))) + return false; + if (range->to >= end) + break; + } + } + + if (spillIntervalIsNew && !newIntervals.append(spillInterval)) return false; - return splitAndRequeueBundles(bundle, newBundles); + return split(interval, newIntervals) && requeueIntervals(newIntervals); } bool -BacktrackingAllocator::splitAcrossCalls(LiveBundle* bundle) +BacktrackingAllocator::splitAcrossCalls(LiveInterval* interval) { - // Split the bundle to separate register uses and non-register uses and + // Split the interval to separate register uses and non-register uses and // allow the vreg to be spilled across its range. - // Find the locations of all calls in the bundle's range. + // Find the locations of all calls in the interval's range. Fixed intervals + // are introduced by buildLivenessInfo only for calls when allocating for + // the backtracking allocator. fixedIntervalsUnion is sorted backwards, so + // iterate through it backwards. SplitPositionVector callPositions; - for (LiveRange::BundleLinkIterator iter = callRanges->rangesBegin(); iter; iter++) { - LiveRange* callRange = LiveRange::get(*iter); - if (bundle->rangeFor(callRange->from()) && bundle->rangeFor(callRange->from().previous())) { - if (!callPositions.append(callRange->from())) + for (size_t i = fixedIntervalsUnion->numRanges(); i > 0; i--) { + const LiveInterval::Range* range = fixedIntervalsUnion->getRange(i - 1); + if (interval->covers(range->from) && interval->covers(range->from.previous())) { + if (!callPositions.append(range->from)) return false; } } @@ -3197,36 +2305,37 @@ BacktrackingAllocator::splitAcrossCalls(LiveBundle* bundle) #ifdef DEBUG JitSpewStart(JitSpew_RegAlloc, " split across calls at "); - for (size_t i = 0; i < callPositions.length(); ++i) + for (size_t i = 0; i < callPositions.length(); ++i) { JitSpewCont(JitSpew_RegAlloc, "%s%u", i != 0 ? ", " : "", callPositions[i].bits()); + } JitSpewFin(JitSpew_RegAlloc); #endif - return splitAt(bundle, callPositions); + return splitAt(interval, callPositions); } bool -BacktrackingAllocator::chooseBundleSplit(LiveBundle* bundle, bool fixed, LiveBundle* conflict) +BacktrackingAllocator::chooseIntervalSplit(LiveInterval* interval, bool fixed, LiveInterval* conflict) { bool success = false; - if (!trySplitAcrossHotcode(bundle, &success)) + if (!trySplitAcrossHotcode(interval, &success)) return false; if (success) return true; if (fixed) - return splitAcrossCalls(bundle); + return splitAcrossCalls(interval); - if (!trySplitBeforeFirstRegisterUse(bundle, conflict, &success)) + if (!trySplitBeforeFirstRegisterUse(interval, conflict, &success)) return false; if (success) return true; - if (!trySplitAfterLastRegisterUse(bundle, conflict, &success)) + if (!trySplitAfterLastRegisterUse(interval, conflict, &success)) return false; if (success) return true; - return splitAtAllRegisterUses(bundle); + return splitAtAllRegisterUses(interval); } diff --git a/js/src/jit/BacktrackingAllocator.h b/js/src/jit/BacktrackingAllocator.h index 73e4f8d1574..5b4948c38ea 100644 --- a/js/src/jit/BacktrackingAllocator.h +++ b/js/src/jit/BacktrackingAllocator.h @@ -11,8 +11,7 @@ #include "ds/PriorityQueue.h" #include "ds/SplayTree.h" -#include "jit/RegisterAllocator.h" -#include "jit/StackSlotAllocator.h" +#include "jit/LiveRangeAllocator.h" // Backtracking priority queue based register allocator based on that described // in the following blog post: @@ -22,374 +21,6 @@ namespace js { namespace jit { -class Requirement -{ - public: - enum Kind { - NONE, - REGISTER, - FIXED, - MUST_REUSE_INPUT - }; - - Requirement() - : kind_(NONE) - { } - - explicit Requirement(Kind kind) - : kind_(kind) - { - // These have dedicated constructors. - MOZ_ASSERT(kind != FIXED && kind != MUST_REUSE_INPUT); - } - - Requirement(Kind kind, CodePosition at) - : kind_(kind), - position_(at) - { - // These have dedicated constructors. - MOZ_ASSERT(kind != FIXED && kind != MUST_REUSE_INPUT); - } - - explicit Requirement(LAllocation fixed) - : kind_(FIXED), - allocation_(fixed) - { - MOZ_ASSERT(!fixed.isBogus() && !fixed.isUse()); - } - - // Only useful as a hint, encodes where the fixed requirement is used to - // avoid allocating a fixed register too early. - Requirement(LAllocation fixed, CodePosition at) - : kind_(FIXED), - allocation_(fixed), - position_(at) - { - MOZ_ASSERT(!fixed.isBogus() && !fixed.isUse()); - } - - Requirement(uint32_t vreg, CodePosition at) - : kind_(MUST_REUSE_INPUT), - allocation_(LUse(vreg, LUse::ANY)), - position_(at) - { } - - Kind kind() const { - return kind_; - } - - LAllocation allocation() const { - MOZ_ASSERT(!allocation_.isBogus() && !allocation_.isUse()); - return allocation_; - } - - uint32_t virtualRegister() const { - MOZ_ASSERT(allocation_.isUse()); - MOZ_ASSERT(kind() == MUST_REUSE_INPUT); - return allocation_.toUse()->virtualRegister(); - } - - CodePosition pos() const { - return position_; - } - - int priority() const; - - bool merge(const Requirement& newRequirement) { - // Merge newRequirement with any existing requirement, returning false - // if the new and old requirements conflict. - MOZ_ASSERT(newRequirement.kind() != Requirement::MUST_REUSE_INPUT); - - if (newRequirement.kind() == Requirement::FIXED) { - if (kind() == Requirement::FIXED) - return newRequirement.allocation() == allocation(); - *this = newRequirement; - return true; - } - - MOZ_ASSERT(newRequirement.kind() == Requirement::REGISTER); - if (kind() == Requirement::FIXED) - return allocation().isRegister(); - - *this = newRequirement; - return true; - } - - void dump() const; - - private: - Kind kind_; - LAllocation allocation_; - CodePosition position_; -}; - -struct UsePosition : public TempObject, - public InlineForwardListNode -{ - LUse* use; - CodePosition pos; - - UsePosition(LUse* use, CodePosition pos) : - use(use), - pos(pos) - { - // Verify that the usedAtStart() flag is consistent with the - // subposition. For now ignore fixed registers, because they - // are handled specially around calls. - MOZ_ASSERT_IF(!use->isFixedRegister(), - pos.subpos() == (use->usedAtStart() - ? CodePosition::INPUT - : CodePosition::OUTPUT)); - } -}; - -typedef InlineForwardListIterator UsePositionIterator; - -// Backtracking allocator data structures overview. -// -// LiveRange: A continuous range of positions where a virtual register is live. -// LiveBundle: A set of LiveRanges which do not overlap. -// VirtualRegister: A set of all LiveRanges used for some LDefinition. -// -// The allocator first performs a liveness ananlysis on the LIR graph which -// constructs LiveRanges for each VirtualRegister, determining where the -// registers are live. -// -// The ranges are then bundled together according to heuristics, and placed on -// the allocation queue. -// -// As bundles are removed from the allocation queue, we attempt to find a -// physical register or stack slot allocation for all ranges in the removed -// bundle, possibly evicting already-allocated bundles. See processBundle() -// for details. -// -// If we are not able to allocate a bundle, it is split according to heuristics -// into two or more smaller bundles which cover all the ranges of the original. -// These smaller bundles are then allocated independently. - -class LiveBundle; - -class LiveRange : public TempObject -{ - public: - // Linked lists are used to keep track of the ranges in each LiveBundle and - // VirtualRegister. Since a LiveRange may be in two lists simultaneously, use - // these auxiliary classes to keep things straight. - class BundleLink : public InlineForwardListNode {}; - class RegisterLink : public InlineForwardListNode {}; - - typedef InlineForwardListIterator BundleLinkIterator; - typedef InlineForwardListIterator RegisterLinkIterator; - - // Links in the lists in LiveBundle and VirtualRegister. - BundleLink bundleLink; - RegisterLink registerLink; - - static LiveRange* get(BundleLink* link) { - return reinterpret_cast(reinterpret_cast(link) - - offsetof(LiveRange, bundleLink)); - } - static LiveRange* get(RegisterLink* link) { - return reinterpret_cast(reinterpret_cast(link) - - offsetof(LiveRange, registerLink)); - } - - struct Range - { - // The beginning of this range, inclusive. - CodePosition from; - - // The end of this range, exclusive. - CodePosition to; - - Range() {} - - Range(CodePosition from, CodePosition to) - : from(from), to(to) - { - MOZ_ASSERT(from < to); - } - - bool empty() { - return from == to; - } - }; - - private: - // The virtual register this range is for, or zero if this does not have a - // virtual register (for example, it is in the callRanges bundle). - uint32_t vreg_; - - // The bundle containing this range, null if liveness information is being - // constructed and we haven't started allocating bundles yet. - LiveBundle* bundle_; - - // The code positions in this range. - Range range_; - - // All uses of the virtual register in this range, ordered by location. - InlineForwardList uses_; - - // Whether this range contains the virtual register's definition. - bool hasDefinition_; - - LiveRange(uint32_t vreg, Range range) - : vreg_(vreg), bundle_(nullptr), range_(range), hasDefinition_(false) - { - MOZ_ASSERT(!range.empty()); - } - - public: - static LiveRange* New(TempAllocator& alloc, uint32_t vreg, - CodePosition from, CodePosition to) { - return new(alloc) LiveRange(vreg, Range(from, to)); - } - - uint32_t vreg() const { - MOZ_ASSERT(hasVreg()); - return vreg_; - } - bool hasVreg() const { - return vreg_ != 0; - } - - LiveBundle* bundle() const { - return bundle_; - } - - CodePosition from() const { - return range_.from; - } - CodePosition to() const { - return range_.to; - } - bool covers(CodePosition pos) const { - return pos >= from() && pos < to(); - } - - // Whether this range wholly contains other. - bool contains(LiveRange* other) const; - - // Intersect this range with other, returning the subranges of this - // that are before, inside, or after other. - void intersect(LiveRange* other, Range* pre, Range* inside, Range* post) const; - - UsePositionIterator usesBegin() const { - return uses_.begin(); - } - bool hasUses() const { - return !!usesBegin(); - } - UsePosition* popUse() { - return uses_.popFront(); - } - - bool hasDefinition() const { - return hasDefinition_; - } - - void setFrom(CodePosition from) { - range_.from = from; - } - void setTo(CodePosition to) { - range_.to = to; - } - - void setBundle(LiveBundle* bundle) { - bundle_ = bundle; - } - - void addUse(UsePosition* use); - void distributeUses(LiveRange* other); - - void setHasDefinition() { - MOZ_ASSERT(!hasDefinition_); - hasDefinition_ = true; - } - - // Return a string describing this range. This is not re-entrant! -#ifdef DEBUG - const char* toString() const; -#else - const char* toString() const { return "???"; } -#endif - - // Comparator for use in range splay trees. - static int compare(LiveRange* v0, LiveRange* v1) { - // LiveRange includes 'from' but excludes 'to'. - if (v0->to() <= v1->from()) - return -1; - if (v0->from() >= v1->to()) - return 1; - return 0; - } -}; - -// A set of live ranges which are all pairwise disjoint. The register allocator -// attempts to find allocations for an entire bundle, and if it fails the -// bundle will be broken into smaller ones which are allocated independently. -class LiveBundle : public TempObject -{ - // All the ranges in this set, ordered by location. - InlineForwardList ranges_; - - // Allocation to use for ranges in this set, bogus if not allocated. - LAllocation alloc_; - - // Optional live range set which is a superset of this one, and which has - // been spilled. - LiveBundle* spillParent_; - - LiveBundle() - : spillParent_(nullptr) - { } - - public: - static LiveBundle* New(TempAllocator& alloc) { - return new(alloc) LiveBundle(); - } - - LiveRange::BundleLinkIterator rangesBegin() const { - return ranges_.begin(); - } - LiveRange* firstRange() const { - return LiveRange::get(*rangesBegin()); - } - LiveRange* rangeFor(CodePosition pos) const; - void addRange(LiveRange* range); - bool addRange(TempAllocator& alloc, uint32_t vreg, CodePosition from, CodePosition to); - bool addRangeAndDistributeUses(TempAllocator& alloc, LiveRange* oldRange, - CodePosition from, CodePosition to); -#ifdef DEBUG - size_t numRanges() const; -#endif - - LAllocation allocation() const { - return alloc_; - } - void setAllocation(LAllocation alloc) { - alloc_ = alloc; - } - - LiveBundle* spillParent() { - return spillParent_; - } - void setSpillParent(LiveBundle* spill) { - spillParent_ = spill; - } - - bool isSpill() const { - return alloc_.isStackSlot(); - } - - // Return a string describing this bundle. This is not re-entrant! -#ifdef DEBUG - const char* toString() const; -#else - const char* toString() const { return "???"; } -#endif -}; - // Information about a group of registers. Registers may be grouped together // when (a) all of their lifetimes are disjoint, (b) they are of the same type // (double / non-double) and (c) it is desirable that they have the same @@ -417,77 +48,28 @@ struct VirtualRegisterGroup : public TempObject } }; -// Information about the allocation for a virtual register. -class VirtualRegister +class BacktrackingVirtualRegister : public VirtualRegister { - // Instruction which defines this register. - LNode* ins_; - - // Definition in the instruction for this register. - LDefinition* def_; - - // All live ranges for this register. These may overlap each other, and are - // ordered by their start position. - InlineForwardList ranges_; - - // Whether def_ is a temp or an output. - bool isTemp_; - // If this register's definition is MUST_REUSE_INPUT, whether a copy must // be introduced before the definition that relaxes the policy. bool mustCopyInput_; + // Spill location to use for this register. + LAllocation canonicalSpill_; + + // Code position above which the canonical spill cannot be used; such + // intervals may overlap other registers in the same group. + CodePosition canonicalSpillExclude_; + // If this register is associated with a group of other registers, // information about the group. This structure is shared between all // registers in the group. VirtualRegisterGroup* group_; - // Range which might overlap other registers in the group, and should be - // allocated separately. If it exists, this range is always spillable, and - // cannot be split. - LiveRange* groupExclude_; - - // Spill location to use for ranges this register, except groupExclude. - LAllocation canonicalSpill_; - - void operator=(const VirtualRegister&) = delete; - VirtualRegister(const VirtualRegister&) = delete; - public: - explicit VirtualRegister() - { - // Note: This class is zeroed before it is constructed. - } - - void init(LNode* ins, LDefinition* def, bool isTemp) { - MOZ_ASSERT(!ins_); - ins_ = ins; - def_ = def; - isTemp_ = isTemp; - } - - LNode* ins() const { - return ins_; - } - LDefinition* def() const { - return def_; - } - LDefinition::Type type() const { - return def()->type(); - } - uint32_t vreg() const { - return def()->virtualRegister(); - } - bool isCompatible(const AnyRegister& r) const { - return def_->isCompatibleReg(r); - } - bool isCompatible(const VirtualRegister& vr) const { - return def_->isCompatibleDef(*vr.def_); - } - bool isTemp() const { - return isTemp_; - } - + explicit BacktrackingVirtualRegister(TempAllocator& alloc) + : VirtualRegister(alloc) + {} void setMustCopyInput() { mustCopyInput_ = true; } @@ -503,66 +85,48 @@ class VirtualRegister return canonicalSpill_.isBogus() ? nullptr : &canonicalSpill_; } + void setCanonicalSpillExclude(CodePosition pos) { + canonicalSpillExclude_ = pos; + } + bool hasCanonicalSpillExclude() const { + return canonicalSpillExclude_.bits() != 0; + } + CodePosition canonicalSpillExclude() const { + MOZ_ASSERT(hasCanonicalSpillExclude()); + return canonicalSpillExclude_; + } + void setGroup(VirtualRegisterGroup* group) { group_ = group; } VirtualRegisterGroup* group() { return group_; } - - void setGroupExclude(LiveRange* range) { - groupExclude_ = range; - } - LiveRange* groupExclude() { - return groupExclude_; - } - - LiveRange::RegisterLinkIterator rangesBegin() const { - return ranges_.begin(); - } - bool hasRanges() const { - return !!rangesBegin(); - } - LiveRange* rangeFor(CodePosition pos) const; - void removeRange(LiveRange* range); - void addRange(LiveRange* range); - - bool addInitialRange(TempAllocator& alloc, CodePosition from, CodePosition to); - void addInitialUse(UsePosition* use); - void setInitialDefinition(CodePosition from); }; // A sequence of code positions, for tellings BacktrackingAllocator::splitAt // where to split. typedef js::Vector SplitPositionVector; -class BacktrackingAllocator : protected RegisterAllocator +class BacktrackingAllocator + : private LiveRangeAllocator { friend class C1Spewer; friend class JSONSpewer; - BitSet* liveIn; - FixedList vregs; - - // Ranges where all registers must be spilled due to call instructions. - LiveBundle* callRanges; - - // Allocation state. - StackSlotAllocator stackSlotAllocator; - - // Priority queue element: either a bundle or a group of registers and the + // Priority queue element: either an interval or group of intervals and the // associated priority. struct QueueItem { - LiveBundle* bundle; + LiveInterval* interval; VirtualRegisterGroup* group; - QueueItem(LiveBundle* bundle, size_t priority) - : bundle(bundle), group(nullptr), priority_(priority) + QueueItem(LiveInterval* interval, size_t priority) + : interval(interval), group(nullptr), priority_(priority) {} QueueItem(VirtualRegisterGroup* group, size_t priority) - : bundle(nullptr), group(group), priority_(priority) + : interval(nullptr), group(group), priority_(priority) {} static size_t priority(const QueueItem& v) { @@ -575,14 +139,37 @@ class BacktrackingAllocator : protected RegisterAllocator PriorityQueue allocationQueue; - typedef SplayTree LiveRangeSet; + // A subrange over which a physical register is allocated. + struct AllocatedRange { + LiveInterval* interval; + const LiveInterval::Range* range; + + AllocatedRange() + : interval(nullptr), range(nullptr) + {} + + AllocatedRange(LiveInterval* interval, const LiveInterval::Range* range) + : interval(interval), range(range) + {} + + static int compare(const AllocatedRange& v0, const AllocatedRange& v1) { + // LiveInterval::Range includes 'from' but excludes 'to'. + if (v0.range->to <= v1.range->from) + return -1; + if (v0.range->from >= v1.range->to) + return 1; + return 0; + } + }; + + typedef SplayTree AllocatedRangeSet; // Each physical register is associated with the set of ranges over which // that register is currently allocated. struct PhysicalRegister { bool allocatable; AnyRegister reg; - LiveRangeSet allocations; + AllocatedRangeSet allocations; PhysicalRegister() : allocatable(false) {} }; @@ -590,7 +177,7 @@ class BacktrackingAllocator : protected RegisterAllocator // Ranges of code which are considered to be hot, for which good allocation // should be prioritized. - LiveRangeSet hotcode; + AllocatedRangeSet hotcode; // During register allocation, virtual stack slots are used for spills. // These are converted to actual spill locations @@ -599,7 +186,7 @@ class BacktrackingAllocator : protected RegisterAllocator // Information about an allocated stack slot. struct SpillSlot : public TempObject, public InlineForwardListNode { LStackSlot alloc; - LiveRangeSet allocated; + AllocatedRangeSet allocated; SpillSlot(uint32_t slot, LifoAlloc* alloc) : alloc(slot), allocated(alloc) @@ -612,9 +199,7 @@ class BacktrackingAllocator : protected RegisterAllocator public: BacktrackingAllocator(MIRGenerator* mir, LIRGenerator* lir, LIRGraph& graph) - : RegisterAllocator(mir, lir, graph), - liveIn(nullptr), - callRanges(nullptr), + : LiveRangeAllocator(mir, lir, graph), numVirtualStackSlots(0) { } @@ -622,130 +207,76 @@ class BacktrackingAllocator : protected RegisterAllocator private: - typedef Vector LiveRangeVector; - typedef Vector LiveBundleVector; + typedef Vector LiveIntervalVector; - // Liveness methods. bool init(); - bool buildLivenessInfo(); - - bool addInitialFixedRange(AnyRegister reg, CodePosition from, CodePosition to); - - VirtualRegister& vreg(const LDefinition* def) { - return vregs[def->virtualRegister()]; - } - VirtualRegister& vreg(const LAllocation* alloc) { - MOZ_ASSERT(alloc->isUse()); - return vregs[alloc->toUse()->virtualRegister()]; - } - - // Allocation methods. - bool canAddToGroup(VirtualRegisterGroup* group, VirtualRegister* reg); + bool canAddToGroup(VirtualRegisterGroup* group, BacktrackingVirtualRegister* reg); bool tryGroupRegisters(uint32_t vreg0, uint32_t vreg1); bool tryGroupReusedRegister(uint32_t def, uint32_t use); bool groupAndQueueRegisters(); - bool tryAllocateFixed(LiveBundle* bundle, Requirement requirement, - bool* success, bool* pfixed, LiveBundleVector& conflicting); - bool tryAllocateNonFixed(LiveBundle* bundle, Requirement requirement, Requirement hint, - bool* success, bool* pfixed, LiveBundleVector& conflicting); - bool processBundle(LiveBundle* bundle); + bool tryAllocateFixed(LiveInterval* interval, bool* success, bool* pfixed, + LiveIntervalVector& conflicting); + bool tryAllocateNonFixed(LiveInterval* interval, bool* success, bool* pfixed, + LiveIntervalVector& conflicting); + bool processInterval(LiveInterval* interval); bool processGroup(VirtualRegisterGroup* group); - bool computeRequirement(LiveBundle* bundle, Requirement *prequirement, Requirement *phint); - bool tryAllocateRegister(PhysicalRegister& r, LiveBundle* bundle, - bool* success, bool* pfixed, LiveBundleVector& conflicting); + bool setIntervalRequirement(LiveInterval* interval); + bool tryAllocateRegister(PhysicalRegister& r, LiveInterval* interval, + bool* success, bool* pfixed, LiveIntervalVector& conflicting); bool tryAllocateGroupRegister(PhysicalRegister& r, VirtualRegisterGroup* group, - bool* psuccess, LiveBundle** pconflicting); - bool evictBundle(LiveBundle* bundle); - bool splitAndRequeueBundles(LiveBundle* bundle, const LiveBundleVector& newBundles); - void spill(LiveBundle* bundle); + bool* psuccess, bool* pfixed, LiveInterval** pconflicting); + bool evictInterval(LiveInterval* interval); + void distributeUses(LiveInterval* interval, const LiveIntervalVector& newIntervals); + bool split(LiveInterval* interval, const LiveIntervalVector& newIntervals); + bool requeueIntervals(const LiveIntervalVector& newIntervals); + void spill(LiveInterval* interval); bool isReusedInput(LUse* use, LNode* ins, bool considerCopy); bool isRegisterUse(LUse* use, LNode* ins, bool considerCopy = false); - bool isRegisterDefinition(LiveRange* range); - bool addLiveBundle(LiveBundleVector& bundles, uint32_t vreg, LiveBundle* spillParent, - CodePosition from, CodePosition to); - bool pickStackSlot(LiveBundle* bundle); - bool addBundlesUsingAllocation(VirtualRegister ®, LAllocation alloc, - LiveBundleVector &bundles); - bool reuseOrAllocateStackSlot(const LiveBundleVector& bundles, LDefinition::Type type, + bool isRegisterDefinition(LiveInterval* interval); + bool addLiveInterval(LiveIntervalVector& intervals, uint32_t vreg, + LiveInterval* spillInterval, + CodePosition from, CodePosition to); + bool pickStackSlot(LiveInterval* interval); + bool reuseOrAllocateStackSlot(const LiveIntervalVector& intervals, LDefinition::Type type, LAllocation* palloc); - bool insertAllRanges(LiveRangeSet& set, const LiveBundleVector& bundles); + bool insertAllRanges(AllocatedRangeSet& set, const LiveIntervalVector& intervals); - // Reification methods. bool pickStackSlots(); bool resolveControlFlow(); bool reifyAllocations(); bool populateSafepoints(); bool annotateMoveGroups(); - size_t findFirstNonCallSafepoint(CodePosition from); - size_t findFirstSafepoint(CodePosition pos, size_t startFrom); - void addLiveRegistersForRange(VirtualRegister& reg, LiveRange* range); - bool addMove(LMoveGroup* moves, LiveRange* from, LiveRange* to, LDefinition::Type type) { - LAllocation fromAlloc = from->bundle()->allocation(); - LAllocation toAlloc = to->bundle()->allocation(); - MOZ_ASSERT(fromAlloc != toAlloc); - return moves->add(fromAlloc, toAlloc, type); - } - - bool moveInput(LInstruction* ins, LiveRange* from, LiveRange* to, LDefinition::Type type) { - if (from->bundle()->allocation() == to->bundle()->allocation()) - return true; - LMoveGroup* moves = getInputMoveGroup(ins); - return addMove(moves, from, to, type); - } - - bool moveAfter(LInstruction* ins, LiveRange* from, LiveRange* to, LDefinition::Type type) { - if (from->bundle()->allocation() == to->bundle()->allocation()) - return true; - LMoveGroup* moves = getMoveGroupAfter(ins); - return addMove(moves, from, to, type); - } - - bool moveAtExit(LBlock* block, LiveRange* from, LiveRange* to, LDefinition::Type type) { - if (from->bundle()->allocation() == to->bundle()->allocation()) - return true; - LMoveGroup* moves = block->getExitMoveGroup(alloc()); - return addMove(moves, from, to, type); - } - - bool moveAtEntry(LBlock* block, LiveRange* from, LiveRange* to, LDefinition::Type type) { - if (from->bundle()->allocation() == to->bundle()->allocation()) - return true; - LMoveGroup* moves = block->getEntryMoveGroup(alloc()); - return addMove(moves, from, to, type); - } - - // Debugging methods. void dumpRegisterGroups(); void dumpFixedRanges(); void dumpAllocations(); - struct PrintLiveRange; + struct PrintLiveIntervalRange; - bool minimalDef(LiveRange* range, LNode* ins); - bool minimalUse(LiveRange* range, LNode* ins); - bool minimalBundle(LiveBundle* bundle, bool* pfixed = nullptr); + bool minimalDef(const LiveInterval* interval, LNode* ins); + bool minimalUse(const LiveInterval* interval, LNode* ins); + bool minimalInterval(const LiveInterval* interval, bool* pfixed = nullptr); // Heuristic methods. - size_t computePriority(LiveBundle* bundle); - size_t computeSpillWeight(LiveBundle* bundle); + size_t computePriority(const LiveInterval* interval); + size_t computeSpillWeight(const LiveInterval* interval); size_t computePriority(const VirtualRegisterGroup* group); size_t computeSpillWeight(const VirtualRegisterGroup* group); - size_t maximumSpillWeight(const LiveBundleVector& bundles); + size_t maximumSpillWeight(const LiveIntervalVector& intervals); - bool chooseBundleSplit(LiveBundle* bundle, bool fixed, LiveBundle* conflict); + bool chooseIntervalSplit(LiveInterval* interval, bool fixed, LiveInterval* conflict); - bool splitAt(LiveBundle* bundle, + bool splitAt(LiveInterval* interval, const SplitPositionVector& splitPositions); - bool trySplitAcrossHotcode(LiveBundle* bundle, bool* success); - bool trySplitAfterLastRegisterUse(LiveBundle* bundle, LiveBundle* conflict, bool* success); - bool trySplitBeforeFirstRegisterUse(LiveBundle* bundle, LiveBundle* conflict, bool* success); - bool splitAtAllRegisterUses(LiveBundle* bundle); - bool splitAcrossCalls(LiveBundle* bundle); + bool trySplitAcrossHotcode(LiveInterval* interval, bool* success); + bool trySplitAfterLastRegisterUse(LiveInterval* interval, LiveInterval* conflict, bool* success); + bool trySplitBeforeFirstRegisterUse(LiveInterval* interval, LiveInterval* conflict, bool* success); + bool splitAtAllRegisterUses(LiveInterval* interval); + bool splitAcrossCalls(LiveInterval* interval); bool compilingAsmJS() { return mir->info().compilingAsmJS(); @@ -755,8 +286,6 @@ class BacktrackingAllocator : protected RegisterAllocator return alloc.isStackSlot() && LAllocation::DATA_MASK - alloc.toStackSlot()->slot() < numVirtualStackSlots; } - - void dumpVregs(); }; } // namespace jit diff --git a/js/src/jit/C1Spewer.cpp b/js/src/jit/C1Spewer.cpp index 942f7747dd8..8a6d9807c6e 100644 --- a/js/src/jit/C1Spewer.cpp +++ b/js/src/jit/C1Spewer.cpp @@ -63,18 +63,19 @@ C1Spewer::spewPass(const char* pass) } void -C1Spewer::spewRanges(const char* pass, BacktrackingAllocator* regalloc) +C1Spewer::spewIntervals(const char* pass, BacktrackingAllocator* regalloc) { if (!spewout_) return; - fprintf(spewout_, "begin_ranges\n"); + fprintf(spewout_, "begin_intervals\n"); fprintf(spewout_, " name \"%s\"\n", pass); + size_t nextId = 0x4000; for (MBasicBlockIterator block(graph->begin()); block != graph->end(); block++) - spewRanges(spewout_, *block, regalloc); + spewIntervals(spewout_, *block, regalloc, nextId); - fprintf(spewout_, "end_ranges\n"); + fprintf(spewout_, "end_intervals\n"); fflush(spewout_); } @@ -111,37 +112,42 @@ DumpLIR(FILE* fp, LNode* ins) } void -C1Spewer::spewRanges(FILE* fp, BacktrackingAllocator* regalloc, LNode* ins) +C1Spewer::spewIntervals(FILE* fp, BacktrackingAllocator* regalloc, LNode* ins, size_t& nextId) { for (size_t k = 0; k < ins->numDefs(); k++) { uint32_t id = ins->getDef(k)->virtualRegister(); VirtualRegister* vreg = ®alloc->vregs[id]; - for (LiveRange::RegisterLinkIterator iter = vreg->rangesBegin(); iter; iter++) { - LiveRange* range = LiveRange::get(*iter); - fprintf(fp, "%d object \"", id); - fprintf(fp, "%s", range->bundle()->allocation().toString()); - fprintf(fp, "\" %d -1", id); - fprintf(fp, " [%u, %u[", range->from().bits(), range->to().bits()); - for (UsePositionIterator usePos(range->usesBegin()); usePos; usePos++) - fprintf(fp, " %u M", usePos->pos.bits()); - fprintf(fp, " \"\"\n"); + for (size_t i = 0; i < vreg->numIntervals(); i++) { + LiveInterval* live = vreg->getInterval(i); + if (live->numRanges()) { + fprintf(fp, "%d object \"", (i == 0) ? id : int32_t(nextId++)); + fprintf(fp, "%s", live->getAllocation()->toString()); + fprintf(fp, "\" %d -1", id); + for (size_t j = 0; j < live->numRanges(); j++) { + fprintf(fp, " [%u, %u[", live->getRange(j)->from.bits(), + live->getRange(j)->to.bits()); + } + for (UsePositionIterator usePos(live->usesBegin()); usePos != live->usesEnd(); usePos++) + fprintf(fp, " %u M", usePos->pos.bits()); + fprintf(fp, " \"\"\n"); + } } } } void -C1Spewer::spewRanges(FILE* fp, MBasicBlock* block, BacktrackingAllocator* regalloc) +C1Spewer::spewIntervals(FILE* fp, MBasicBlock* block, BacktrackingAllocator* regalloc, size_t& nextId) { LBlock* lir = block->lir(); if (!lir) return; for (size_t i = 0; i < lir->numPhis(); i++) - spewRanges(fp, regalloc, lir->getPhi(i)); + spewIntervals(fp, regalloc, lir->getPhi(i), nextId); for (LInstructionIterator ins = lir->begin(); ins != lir->end(); ins++) - spewRanges(fp, regalloc, *ins); + spewIntervals(fp, regalloc, *ins, nextId); } void diff --git a/js/src/jit/C1Spewer.h b/js/src/jit/C1Spewer.h index 5becbc8c431..9caa6f11b62 100644 --- a/js/src/jit/C1Spewer.h +++ b/js/src/jit/C1Spewer.h @@ -34,14 +34,14 @@ class C1Spewer bool init(const char* path); void beginFunction(MIRGraph* graph, HandleScript script); void spewPass(const char* pass); - void spewRanges(const char* pass, BacktrackingAllocator* regalloc); + void spewIntervals(const char* pass, BacktrackingAllocator* regalloc); void endFunction(); void finish(); private: void spewPass(FILE* fp, MBasicBlock* block); - void spewRanges(FILE* fp, BacktrackingAllocator* regalloc, LNode* ins); - void spewRanges(FILE* fp, MBasicBlock* block, BacktrackingAllocator* regalloc); + void spewIntervals(FILE* fp, BacktrackingAllocator* regalloc, LNode* ins, size_t& nextId); + void spewIntervals(FILE* fp, MBasicBlock* block, BacktrackingAllocator* regalloc, size_t& nextId); }; } // namespace jit diff --git a/js/src/jit/CodeGenerator.cpp b/js/src/jit/CodeGenerator.cpp index 8553431d430..e9f34ae30a6 100644 --- a/js/src/jit/CodeGenerator.cpp +++ b/js/src/jit/CodeGenerator.cpp @@ -2120,13 +2120,13 @@ CodeGenerator::visitMoveGroup(LMoveGroup* group) for (size_t i = 0; i < group->numMoves(); i++) { const LMove& move = group->getMove(i); - LAllocation from = move.from(); - LAllocation to = move.to(); + const LAllocation* from = move.from(); + const LAllocation* to = move.to(); LDefinition::Type type = move.type(); // No bogus moves. - MOZ_ASSERT(from != to); - MOZ_ASSERT(!from.isConstant()); + MOZ_ASSERT(*from != *to); + MOZ_ASSERT(!from->isConstant()); MoveOp::Type moveType; switch (type) { case LDefinition::OBJECT: diff --git a/js/src/jit/InlineList.h b/js/src/jit/InlineList.h index 1876fb2437d..33b5d171573 100644 --- a/js/src/jit/InlineList.h +++ b/js/src/jit/InlineList.h @@ -68,6 +68,11 @@ class InlineForwardList : protected InlineForwardListNode return iterator(nullptr); } void removeAt(iterator where) { + iterator iter(where); + iter++; +#ifdef DEBUG + iter.modifyCount_++; +#endif removeAfter(where.prev, where.iter); } void pushFront(Node* t) { @@ -111,18 +116,6 @@ class InlineForwardList : protected InlineForwardListNode at->next = item->next; item->next = nullptr; } - void removeAndIncrement(iterator &where) { - // Do not change modifyCount_ here. The iterator can still be used - // after calling this method, unlike the other methods that modify - // the list. - Node* item = where.iter; - where.iter = item->next; - if (item == tail_) - tail_ = where.prev; - MOZ_ASSERT(where.prev->next == item); - where.prev->next = where.iter; - item->next = nullptr; - } void splitAfter(Node* at, InlineForwardList* to) { MOZ_ASSERT(to->empty()); if (!at) @@ -192,9 +185,6 @@ public: bool operator ==(const InlineForwardListIterator& where) const { return iter == where.iter; } - explicit operator bool() const { - return iter != nullptr; - } private: Node* prev; diff --git a/js/src/jit/JSONSpewer.cpp b/js/src/jit/JSONSpewer.cpp index b40a0dd2c02..91d462f03b9 100644 --- a/js/src/jit/JSONSpewer.cpp +++ b/js/src/jit/JSONSpewer.cpp @@ -396,12 +396,12 @@ JSONSpewer::spewLIR(MIRGraph* mir) } void -JSONSpewer::spewRanges(BacktrackingAllocator* regalloc) +JSONSpewer::spewIntervals(BacktrackingAllocator* regalloc) { if (!fp_) return; - beginObjectProperty("ranges"); + beginObjectProperty("intervals"); beginListProperty("blocks"); for (size_t bno = 0; bno < regalloc->graph.numBlocks(); bno++) { @@ -417,17 +417,27 @@ JSONSpewer::spewRanges(BacktrackingAllocator* regalloc) beginObject(); integerProperty("vreg", id); - beginListProperty("ranges"); + beginListProperty("intervals"); - for (LiveRange::RegisterLinkIterator iter = vreg->rangesBegin(); iter; iter++) { - LiveRange* range = LiveRange::get(*iter); + for (size_t i = 0; i < vreg->numIntervals(); i++) { + LiveInterval* live = vreg->getInterval(i); - beginObject(); - property("allocation"); - fprintf(fp_, "\"%s\"", range->bundle()->allocation().toString()); - integerProperty("start", range->from().bits()); - integerProperty("end", range->to().bits()); - endObject(); + if (live->numRanges()) { + beginObject(); + property("allocation"); + fprintf(fp_, "\"%s\"", live->getAllocation()->toString()); + beginListProperty("ranges"); + + for (size_t j = 0; j < live->numRanges(); j++) { + beginObject(); + integerProperty("start", live->getRange(j)->from.bits()); + integerProperty("end", live->getRange(j)->to.bits()); + endObject(); + } + + endList(); + endObject(); + } } endList(); diff --git a/js/src/jit/JSONSpewer.h b/js/src/jit/JSONSpewer.h index 41faab5bcd4..db121a8dfab 100644 --- a/js/src/jit/JSONSpewer.h +++ b/js/src/jit/JSONSpewer.h @@ -61,7 +61,7 @@ class JSONSpewer void spewMIR(MIRGraph* mir); void spewLIns(LNode* ins); void spewLIR(MIRGraph* mir); - void spewRanges(BacktrackingAllocator* regalloc); + void spewIntervals(BacktrackingAllocator* regalloc); void endPass(); void endFunction(); void finish(); diff --git a/js/src/jit/JitSpewer.cpp b/js/src/jit/JitSpewer.cpp index 21870fe3765..d00c995dc4a 100644 --- a/js/src/jit/JitSpewer.cpp +++ b/js/src/jit/JitSpewer.cpp @@ -177,11 +177,11 @@ IonSpewer::spewPass(const char* pass, BacktrackingAllocator* ra) return; c1Spewer.spewPass(pass); - c1Spewer.spewRanges(pass, ra); + c1Spewer.spewIntervals(pass, ra); jsonSpewer.beginPass(pass); jsonSpewer.spewMIR(graph); jsonSpewer.spewLIR(graph); - jsonSpewer.spewRanges(ra); + jsonSpewer.spewIntervals(ra); jsonSpewer.endPass(); } diff --git a/js/src/jit/LIR-Common.h b/js/src/jit/LIR-Common.h index 2cb2bc4b5c5..34227c2db63 100644 --- a/js/src/jit/LIR-Common.h +++ b/js/src/jit/LIR-Common.h @@ -58,21 +58,27 @@ class LOsiPoint : public LInstructionHelper<0, 0, 0> class LMove { - LAllocation from_; - LAllocation to_; + LAllocation* from_; + LAllocation* to_; LDefinition::Type type_; public: - LMove(LAllocation from, LAllocation to, LDefinition::Type type) + LMove(LAllocation* from, LAllocation* to, LDefinition::Type type) : from_(from), to_(to), type_(type) { } - LAllocation from() const { + LAllocation* from() { return from_; } - LAllocation to() const { + const LAllocation* from() const { + return from_; + } + LAllocation* to() { + return to_; + } + const LAllocation* to() const { return to_; } LDefinition::Type type() const { @@ -103,10 +109,10 @@ class LMoveGroup : public LInstructionHelper<0, 0, 0> void printOperands(FILE* fp); // Add a move which takes place simultaneously with all others in the group. - bool add(LAllocation from, LAllocation to, LDefinition::Type type); + bool add(LAllocation* from, LAllocation* to, LDefinition::Type type); // Add a move which takes place after existing moves in the group. - bool addAfter(LAllocation from, LAllocation to, LDefinition::Type type); + bool addAfter(LAllocation* from, LAllocation* to, LDefinition::Type type); size_t numMoves() const { return moves_.length(); @@ -131,7 +137,7 @@ class LMoveGroup : public LInstructionHelper<0, 0, 0> bool uses(Register reg) { for (size_t i = 0; i < numMoves(); i++) { LMove move = getMove(i); - if (move.from() == LGeneralReg(reg) || move.to() == LGeneralReg(reg)) + if (*move.from() == LGeneralReg(reg) || *move.to() == LGeneralReg(reg)) return true; } return false; diff --git a/js/src/jit/LIR.cpp b/js/src/jit/LIR.cpp index 7dc77134584..61b0441512e 100644 --- a/js/src/jit/LIR.cpp +++ b/js/src/jit/LIR.cpp @@ -535,28 +535,28 @@ LInstruction::initSafepoint(TempAllocator& alloc) } bool -LMoveGroup::add(LAllocation from, LAllocation to, LDefinition::Type type) +LMoveGroup::add(LAllocation* from, LAllocation* to, LDefinition::Type type) { #ifdef DEBUG - MOZ_ASSERT(from != to); + MOZ_ASSERT(*from != *to); for (size_t i = 0; i < moves_.length(); i++) - MOZ_ASSERT(to != moves_[i].to()); + MOZ_ASSERT(*to != *moves_[i].to()); // Check that SIMD moves are aligned according to ABI requirements. if (LDefinition(type).isSimdType()) { - MOZ_ASSERT(from.isMemory() || from.isFloatReg()); - if (from.isMemory()) { - if (from.isArgument()) - MOZ_ASSERT(from.toArgument()->index() % SimdMemoryAlignment == 0); + MOZ_ASSERT(from->isMemory() || from->isFloatReg()); + if (from->isMemory()) { + if (from->isArgument()) + MOZ_ASSERT(from->toArgument()->index() % SimdMemoryAlignment == 0); else - MOZ_ASSERT(from.toStackSlot()->slot() % SimdMemoryAlignment == 0); + MOZ_ASSERT(from->toStackSlot()->slot() % SimdMemoryAlignment == 0); } - MOZ_ASSERT(to.isMemory() || to.isFloatReg()); - if (to.isMemory()) { - if (to.isArgument()) - MOZ_ASSERT(to.toArgument()->index() % SimdMemoryAlignment == 0); + MOZ_ASSERT(to->isMemory() || to->isFloatReg()); + if (to->isMemory()) { + if (to->isArgument()) + MOZ_ASSERT(to->toArgument()->index() % SimdMemoryAlignment == 0); else - MOZ_ASSERT(to.toStackSlot()->slot() % SimdMemoryAlignment == 0); + MOZ_ASSERT(to->toStackSlot()->slot() % SimdMemoryAlignment == 0); } } #endif @@ -564,24 +564,24 @@ LMoveGroup::add(LAllocation from, LAllocation to, LDefinition::Type type) } bool -LMoveGroup::addAfter(LAllocation from, LAllocation to, LDefinition::Type type) +LMoveGroup::addAfter(LAllocation* from, LAllocation* to, LDefinition::Type type) { // Transform the operands to this move so that performing the result // simultaneously with existing moves in the group will have the same // effect as if the original move took place after the existing moves. for (size_t i = 0; i < moves_.length(); i++) { - if (moves_[i].to() == from) { + if (*moves_[i].to() == *from) { from = moves_[i].from(); break; } } - if (from == to) + if (*from == *to) return true; for (size_t i = 0; i < moves_.length(); i++) { - if (to == moves_[i].to()) { + if (*to == *moves_[i].to()) { moves_[i] = LMove(from, to, type); return true; } @@ -596,8 +596,8 @@ LMoveGroup::printOperands(FILE* fp) for (size_t i = 0; i < numMoves(); i++) { const LMove& move = getMove(i); // Use two printfs, as LAllocation::toString is not reentrant. - fprintf(fp, " [%s", move.from().toString()); - fprintf(fp, " -> %s", move.to().toString()); + fprintf(fp, " [%s", move.from()->toString()); + fprintf(fp, " -> %s", move.to()->toString()); #ifdef DEBUG fprintf(fp, ", %s", TypeChars[move.type()]); #endif diff --git a/js/src/jit/LiveRangeAllocator.cpp b/js/src/jit/LiveRangeAllocator.cpp new file mode 100644 index 00000000000..ba94a9c35d1 --- /dev/null +++ b/js/src/jit/LiveRangeAllocator.cpp @@ -0,0 +1,996 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "jit/LiveRangeAllocator.h" + +#include "mozilla/DebugOnly.h" + +#include "jsprf.h" + +#include "jit/BacktrackingAllocator.h" +#include "jit/BitSet.h" + +using namespace js; +using namespace js::jit; + +using mozilla::DebugOnly; + +int +Requirement::priority() const +{ + switch (kind_) { + case Requirement::FIXED: + return 0; + + case Requirement::REGISTER: + return 1; + + case Requirement::NONE: + return 2; + + default: + MOZ_CRASH("Unknown requirement kind."); + } +} + +const char* +Requirement::toString() const +{ +#ifdef DEBUG + // Not reentrant! + static char buf[1000]; + + char* cursor = buf; + char* end = cursor + sizeof(buf); + + int n = -1; // initialize to silence GCC warning + switch (kind()) { + case NONE: + return "none"; + case REGISTER: + n = JS_snprintf(cursor, end - cursor, "r"); + break; + case FIXED: + n = JS_snprintf(cursor, end - cursor, "%s", allocation().toString()); + break; + case MUST_REUSE_INPUT: + n = JS_snprintf(cursor, end - cursor, "v%u", virtualRegister()); + break; + } + if (n < 0) + return "???"; + cursor += n; + + if (pos() != CodePosition::MIN) { + n = JS_snprintf(cursor, end - cursor, "@%u", pos().bits()); + if (n < 0) + return "???"; + cursor += n; + } + + return buf; +#else + return " ???"; +#endif +} + +void +Requirement::dump() const +{ + fprintf(stderr, "%s\n", toString()); +} + +bool +LiveInterval::Range::contains(const Range* other) const +{ + return from <= other->from && to >= other->to; +} + +void +LiveInterval::Range::intersect(const Range* other, Range* pre, Range* inside, Range* post) const +{ + MOZ_ASSERT(pre->empty() && inside->empty() && post->empty()); + + CodePosition innerFrom = from; + if (from < other->from) { + if (to < other->from) { + *pre = Range(from, to); + return; + } + *pre = Range(from, other->from); + innerFrom = other->from; + } + + CodePosition innerTo = to; + if (to > other->to) { + if (from >= other->to) { + *post = Range(from, to); + return; + } + *post = Range(other->to, to); + innerTo = other->to; + } + + if (innerFrom != innerTo) + *inside = Range(innerFrom, innerTo); +} + +const char* +LiveInterval::Range::toString() const +{ +#ifdef DEBUG + // Not reentrant! + static char buf[1000]; + + char* cursor = buf; + char* end = cursor + sizeof(buf); + + int n = JS_snprintf(cursor, end - cursor, "[%u,%u)", from.bits(), to.bits()); + if (n < 0) + return " ???"; + cursor += n; + + return buf; +#else + return " ???"; +#endif +} + +void +LiveInterval::Range::dump() const +{ + fprintf(stderr, "%s\n", toString()); +} + +bool +LiveInterval::addRangeAtHead(CodePosition from, CodePosition to) +{ + MOZ_ASSERT(from < to); + MOZ_ASSERT(ranges_.empty() || from <= ranges_.back().from); + + Range newRange(from, to); + + if (ranges_.empty()) + return ranges_.append(newRange); + + Range& first = ranges_.back(); + if (to < first.from) + return ranges_.append(newRange); + + if (to == first.from) { + first.from = from; + return true; + } + + MOZ_ASSERT(from < first.to); + MOZ_ASSERT(to > first.from); + if (from < first.from) + first.from = from; + if (to > first.to) + first.to = to; + + return true; +} + +bool +LiveInterval::addRange(CodePosition from, CodePosition to) +{ + MOZ_ASSERT(from < to); + + Range newRange(from, to); + + Range* i; + // Find the location to insert the new range + for (i = ranges_.end(); i > ranges_.begin(); i--) { + if (newRange.from <= i[-1].to) { + if (i[-1].from < newRange.from) + newRange.from = i[-1].from; + break; + } + } + // Perform coalescing on overlapping ranges + Range* coalesceEnd = i; + for (; i > ranges_.begin(); i--) { + if (newRange.to < i[-1].from) + break; + if (newRange.to < i[-1].to) + newRange.to = i[-1].to; + } + + if (i == coalesceEnd) + return ranges_.insert(i, newRange); + + i[0] = newRange; + ranges_.erase(i + 1, coalesceEnd); + return true; +} + +void +LiveInterval::setFrom(CodePosition from) +{ + while (!ranges_.empty()) { + if (ranges_.back().to < from) { + ranges_.popBack(); + } else { + if (from == ranges_.back().to) + ranges_.popBack(); + else + ranges_.back().from = from; + break; + } + } +} + +bool +LiveInterval::covers(CodePosition pos) +{ + if (pos < start() || pos >= end()) + return false; + + // Loop over the ranges in ascending order. + size_t i = lastProcessedRangeIfValid(pos); + for (; i < ranges_.length(); i--) { + if (pos < ranges_[i].from) + return false; + setLastProcessedRange(i, pos); + if (pos < ranges_[i].to) + return true; + } + return false; +} + +CodePosition +LiveInterval::intersect(LiveInterval* other) +{ + if (start() > other->start()) + return other->intersect(this); + + // Loop over the ranges in ascending order. As an optimization, + // try to start at the last processed range. + size_t i = lastProcessedRangeIfValid(other->start()); + size_t j = other->ranges_.length() - 1; + if (i >= ranges_.length() || j >= other->ranges_.length()) + return CodePosition::MIN; + + while (true) { + const Range& r1 = ranges_[i]; + const Range& r2 = other->ranges_[j]; + + if (r1.from <= r2.from) { + if (r1.from <= other->start()) + setLastProcessedRange(i, other->start()); + if (r2.from < r1.to) + return r2.from; + if (i == 0 || ranges_[i-1].from > other->end()) + break; + i--; + } else { + if (r1.from < r2.to) + return r1.from; + if (j == 0 || other->ranges_[j-1].from > end()) + break; + j--; + } + } + + return CodePosition::MIN; +} + +/* + * This function takes the callee interval and moves all ranges following or + * including provided position to the target interval. Additionally, if a + * range in the callee interval spans the given position, it is split and the + * latter half is placed in the target interval. + * + * This function should only be called if it is known that the interval should + * actually be split (and, presumably, a move inserted). As such, it is an + * error for the caller to request a split that moves all intervals into the + * target. Doing so will trip the assertion at the bottom of the function. + */ +bool +LiveInterval::splitFrom(CodePosition pos, LiveInterval* after) +{ + MOZ_ASSERT(pos >= start() && pos < end()); + MOZ_ASSERT(after->ranges_.empty()); + + // Move all intervals over to the target + size_t bufferLength = ranges_.length(); + Range* buffer = ranges_.extractRawBuffer(); + if (!buffer) + return false; + after->ranges_.replaceRawBuffer(buffer, bufferLength); + + // Move intervals back as required + for (Range* i = &after->ranges_.back(); i >= after->ranges_.begin(); i--) { + if (pos >= i->to) + continue; + + if (pos > i->from) { + // Split the range + Range split(i->from, pos); + i->from = pos; + if (!ranges_.append(split)) + return false; + } + if (!ranges_.append(i + 1, after->ranges_.end())) + return false; + after->ranges_.shrinkBy(after->ranges_.end() - i - 1); + break; + } + + // Split the linked list of use positions + UsePosition* prev = nullptr; + for (UsePositionIterator usePos(usesBegin()); usePos != usesEnd(); usePos++) { + if (usePos->pos > pos) + break; + prev = *usePos; + } + + uses_.splitAfter(prev, &after->uses_); + return true; +} + +void +LiveInterval::addUse(UsePosition* use) +{ + // Insert use positions in ascending order. Note that instructions + // are visited in reverse order, so in most cases the loop terminates + // at the first iteration and the use position will be added to the + // front of the list. + UsePosition* prev = nullptr; + for (UsePositionIterator current(usesBegin()); current != usesEnd(); current++) { + if (current->pos >= use->pos) + break; + prev = *current; + } + + if (prev) + uses_.insertAfter(prev, use); + else + uses_.pushFront(use); +} + +void +LiveInterval::addUseAtEnd(UsePosition* use) +{ + MOZ_ASSERT(uses_.empty() || use->pos >= uses_.back()->pos); + uses_.pushBack(use); +} + +UsePosition* +LiveInterval::nextUseAfter(CodePosition after) +{ + for (UsePositionIterator usePos(usesBegin()); usePos != usesEnd(); usePos++) { + if (usePos->pos >= after) { + LUse::Policy policy = usePos->use->policy(); + MOZ_ASSERT(policy != LUse::RECOVERED_INPUT); + if (policy != LUse::KEEPALIVE) + return *usePos; + } + } + return nullptr; +} + +UsePosition* +LiveInterval::popUse() +{ + return uses_.popFront(); +} + +/* + * This function locates the first "real" use of this interval that follows + * the given code position. Non-"real" uses are currently just snapshots, + * which keep virtual registers alive but do not result in the + * generation of code that use them. + */ +CodePosition +LiveInterval::nextUsePosAfter(CodePosition after) +{ + UsePosition* min = nextUseAfter(after); + return min ? min->pos : CodePosition::MAX; +} + +LiveInterval* +VirtualRegister::intervalFor(CodePosition pos) +{ + // Intervals are sorted in ascending order by their start position. + for (LiveInterval** i = intervals_.begin(); i != intervals_.end(); i++) { + if ((*i)->covers(pos)) + return *i; + if (pos < (*i)->start()) + break; + } + return nullptr; +} + +LiveInterval* +VirtualRegister::getFirstInterval() +{ + MOZ_ASSERT(!intervals_.empty()); + return intervals_[0]; +} + +// Instantiate LiveRangeAllocator for each template instance. +template bool LiveRangeAllocator::buildLivenessInfo(); +template void LiveRangeAllocator::dumpVregs(); + +#ifdef DEBUG +// Returns true iff ins has a def/temp reusing the input allocation. +static bool +IsInputReused(LInstruction* ins, LUse* use) +{ + for (size_t i = 0; i < ins->numDefs(); i++) { + if (ins->getDef(i)->policy() == LDefinition::MUST_REUSE_INPUT && + ins->getOperand(ins->getDef(i)->getReusedInput())->toUse() == use) + { + return true; + } + } + + for (size_t i = 0; i < ins->numTemps(); i++) { + if (ins->getTemp(i)->policy() == LDefinition::MUST_REUSE_INPUT && + ins->getOperand(ins->getTemp(i)->getReusedInput())->toUse() == use) + { + return true; + } + } + + return false; +} +#endif + +/* + * This function pre-allocates and initializes as much global state as possible + * to avoid littering the algorithms with memory management cruft. + */ +template +bool +LiveRangeAllocator::init() +{ + if (!RegisterAllocator::init()) + return false; + + liveIn = mir->allocate(graph.numBlockIds()); + if (!liveIn) + return false; + + // Initialize fixed intervals. + for (size_t i = 0; i < AnyRegister::Total; i++) { + AnyRegister reg = AnyRegister::FromCode(i); + LiveInterval* interval = LiveInterval::New(alloc(), 0); + interval->setAllocation(LAllocation(reg)); + fixedIntervals[i] = interval; + } + + fixedIntervalsUnion = LiveInterval::New(alloc(), 0); + + if (!vregs.init(mir, graph.numVirtualRegisters())) + return false; + + // Build virtual register objects + for (size_t i = 0; i < graph.numBlocks(); i++) { + if (mir->shouldCancel("Create data structures (main loop)")) + return false; + + LBlock* block = graph.getBlock(i); + for (LInstructionIterator ins = block->begin(); ins != block->end(); ins++) { + for (size_t j = 0; j < ins->numDefs(); j++) { + LDefinition* def = ins->getDef(j); + if (def->isBogusTemp()) + continue; + if (!vregs[def].init(alloc(), *ins, def, /* isTemp */ false)) + return false; + } + + for (size_t j = 0; j < ins->numTemps(); j++) { + LDefinition* def = ins->getTemp(j); + if (def->isBogusTemp()) + continue; + if (!vregs[def].init(alloc(), *ins, def, /* isTemp */ true)) + return false; + } + } + for (size_t j = 0; j < block->numPhis(); j++) { + LPhi* phi = block->getPhi(j); + LDefinition* def = phi->getDef(0); + if (!vregs[def].init(alloc(), phi, def, /* isTemp */ false)) + return false; + } + } + + return true; +} + +/* + * This function builds up liveness intervals for all virtual registers + * defined in the function. Additionally, it populates the liveIn array with + * information about which registers are live at the beginning of a block, to + * aid resolution and reification in a later phase. + * + * The algorithm is based on the one published in: + * + * Wimmer, Christian, and Michael Franz. "Linear Scan Register Allocation on + * SSA Form." Proceedings of the International Symposium on Code Generation + * and Optimization. Toronto, Ontario, Canada, ACM. 2010. 170-79. PDF. + * + * The algorithm operates on blocks ordered such that dominators of a block + * are before the block itself, and such that all blocks of a loop are + * contiguous. It proceeds backwards over the instructions in this order, + * marking registers live at their uses, ending their live intervals at + * definitions, and recording which registers are live at the top of every + * block. To deal with loop backedges, variables live at the beginning of + * a loop gain an interval covering the entire loop. + */ +template +bool +LiveRangeAllocator::buildLivenessInfo() +{ + JitSpew(JitSpew_RegAlloc, "Beginning liveness analysis"); + + if (!init()) + return false; + + Vector loopWorkList; + BitSet loopDone(graph.numBlockIds()); + if (!loopDone.init(alloc())) + return false; + + for (size_t i = graph.numBlocks(); i > 0; i--) { + if (mir->shouldCancel("Build Liveness Info (main loop)")) + return false; + + LBlock* block = graph.getBlock(i - 1); + MBasicBlock* mblock = block->mir(); + + BitSet& live = liveIn[mblock->id()]; + new (&live) BitSet(graph.numVirtualRegisters()); + if (!live.init(alloc())) + return false; + + // Propagate liveIn from our successors to us + for (size_t i = 0; i < mblock->lastIns()->numSuccessors(); i++) { + MBasicBlock* successor = mblock->lastIns()->getSuccessor(i); + // Skip backedges, as we fix them up at the loop header. + if (mblock->id() < successor->id()) + live.insertAll(liveIn[successor->id()]); + } + + // Add successor phis + if (mblock->successorWithPhis()) { + LBlock* phiSuccessor = mblock->successorWithPhis()->lir(); + for (unsigned int j = 0; j < phiSuccessor->numPhis(); j++) { + LPhi* phi = phiSuccessor->getPhi(j); + LAllocation* use = phi->getOperand(mblock->positionInPhiSuccessor()); + uint32_t reg = use->toUse()->virtualRegister(); + live.insert(reg); + } + } + + // Variables are assumed alive for the entire block, a define shortens + // the interval to the point of definition. + for (BitSet::Iterator liveRegId(live); liveRegId; ++liveRegId) { + if (!vregs[*liveRegId].getInterval(0)->addRangeAtHead(entryOf(block), + exitOf(block).next())) + { + return false; + } + } + + // Shorten the front end of live intervals for live variables to their + // point of definition, if found. + for (LInstructionReverseIterator ins = block->rbegin(); ins != block->rend(); ins++) { + // Calls may clobber registers, so force a spill and reload around the callsite. + if (ins->isCall()) { + for (AnyRegisterIterator iter(allRegisters_.asLiveSet()); iter.more(); iter++) { + bool found = false; + + for (size_t i = 0; i < ins->numDefs(); i++) { + if (ins->getDef(i)->isFixed() && + ins->getDef(i)->output()->aliases(LAllocation(*iter))) { + found = true; + break; + } + } + if (!found && !addFixedRangeAtHead(*iter, outputOf(*ins), outputOf(*ins).next())) + return false; + } + } + DebugOnly hasDoubleDef = false; + DebugOnly hasFloat32Def = false; + for (size_t i = 0; i < ins->numDefs(); i++) { + LDefinition* def = ins->getDef(i); + if (def->isBogusTemp()) + continue; +#ifdef DEBUG + if (def->type() == LDefinition::DOUBLE) + hasDoubleDef = true; + if (def->type() == LDefinition::FLOAT32) + hasFloat32Def = true; +#endif + CodePosition from = outputOf(*ins); + + if (def->policy() == LDefinition::MUST_REUSE_INPUT) { + // MUST_REUSE_INPUT is implemented by allocating an output + // register and moving the input to it. Register hints are + // used to avoid unnecessary moves. We give the input an + // LUse::ANY policy to avoid allocating a register for the + // input. + LUse* inputUse = ins->getOperand(def->getReusedInput())->toUse(); + MOZ_ASSERT(inputUse->policy() == LUse::REGISTER); + MOZ_ASSERT(inputUse->usedAtStart()); + *inputUse = LUse(inputUse->virtualRegister(), LUse::ANY, /* usedAtStart = */ true); + } + + LiveInterval* interval = vregs[def].getInterval(0); + interval->setFrom(from); + + // Ensure that if there aren't any uses, there's at least + // some interval for the output to go into. + if (interval->numRanges() == 0) { + if (!interval->addRangeAtHead(from, from.next())) + return false; + } + live.remove(def->virtualRegister()); + } + + for (size_t i = 0; i < ins->numTemps(); i++) { + LDefinition* temp = ins->getTemp(i); + if (temp->isBogusTemp()) + continue; + + // Normally temps are considered to cover both the input + // and output of the associated instruction. In some cases + // though we want to use a fixed register as both an input + // and clobbered register in the instruction, so watch for + // this and shorten the temp to cover only the output. + CodePosition from = inputOf(*ins); + if (temp->policy() == LDefinition::FIXED) { + AnyRegister reg = temp->output()->toRegister(); + for (LInstruction::InputIterator alloc(**ins); alloc.more(); alloc.next()) { + if (alloc->isUse()) { + LUse* use = alloc->toUse(); + if (use->isFixedRegister()) { + if (GetFixedRegister(vregs[use].def(), use) == reg) + from = outputOf(*ins); + } + } + } + } + + CodePosition to = + ins->isCall() ? outputOf(*ins) : outputOf(*ins).next(); + if (!vregs[temp].getInterval(0)->addRangeAtHead(from, to)) + return false; + } + + DebugOnly hasUseRegister = false; + DebugOnly hasUseRegisterAtStart = false; + + for (LInstruction::InputIterator inputAlloc(**ins); inputAlloc.more(); inputAlloc.next()) { + if (inputAlloc->isUse()) { + LUse* use = inputAlloc->toUse(); + + // Call uses should always be at-start or fixed, since the fixed intervals + // use all registers. + MOZ_ASSERT_IF(ins->isCall() && !inputAlloc.isSnapshotInput(), + use->isFixedRegister() || use->usedAtStart()); + +#ifdef DEBUG + // Don't allow at-start call uses if there are temps of the same kind, + // so that we don't assign the same register. + if (ins->isCall() && use->usedAtStart()) { + for (size_t i = 0; i < ins->numTemps(); i++) + MOZ_ASSERT(vregs[ins->getTemp(i)].isFloatReg() != vregs[use].isFloatReg()); + } + + // If there are both useRegisterAtStart(x) and useRegister(y) + // uses, we may assign the same register to both operands due to + // interval splitting (bug 772830). Don't allow this for now. + if (use->policy() == LUse::REGISTER) { + if (use->usedAtStart()) { + if (!IsInputReused(*ins, use)) + hasUseRegisterAtStart = true; + } else { + hasUseRegister = true; + } + } + MOZ_ASSERT(!(hasUseRegister && hasUseRegisterAtStart)); +#endif + + // Don't treat RECOVERED_INPUT uses as keeping the vreg alive. + if (use->policy() == LUse::RECOVERED_INPUT) + continue; + + // Fixed uses on calls are specially overridden to happen + // at the input position. + CodePosition to = + (use->usedAtStart() || (ins->isCall() && use->isFixedRegister())) + ? inputOf(*ins) + : outputOf(*ins); + if (use->isFixedRegister()) { + LAllocation reg(AnyRegister::FromCode(use->registerCode())); + for (size_t i = 0; i < ins->numDefs(); i++) { + LDefinition* def = ins->getDef(i); + if (def->policy() == LDefinition::FIXED && *def->output() == reg) + to = inputOf(*ins); + } + } + + LiveInterval* interval = vregs[use].getInterval(0); + if (!interval->addRangeAtHead(entryOf(block), to.next())) + return false; + interval->addUse(new(alloc()) UsePosition(use, to)); + + live.insert(use->virtualRegister()); + } + } + } + + // Phis have simultaneous assignment semantics at block begin, so at + // the beginning of the block we can be sure that liveIn does not + // contain any phi outputs. + for (unsigned int i = 0; i < block->numPhis(); i++) { + LDefinition* def = block->getPhi(i)->getDef(0); + if (live.contains(def->virtualRegister())) { + live.remove(def->virtualRegister()); + } else { + // This is a dead phi, so add a dummy range over all phis. This + // can go away if we have an earlier dead code elimination pass. + CodePosition entryPos = entryOf(block); + if (!vregs[def].getInterval(0)->addRangeAtHead(entryPos, entryPos.next())) + return false; + } + } + + if (mblock->isLoopHeader()) { + // A divergence from the published algorithm is required here, as + // our block order does not guarantee that blocks of a loop are + // contiguous. As a result, a single live interval spanning the + // loop is not possible. Additionally, we require liveIn in a later + // pass for resolution, so that must also be fixed up here. + MBasicBlock* loopBlock = mblock->backedge(); + while (true) { + // Blocks must already have been visited to have a liveIn set. + MOZ_ASSERT(loopBlock->id() >= mblock->id()); + + // Add an interval for this entire loop block + CodePosition from = entryOf(loopBlock->lir()); + CodePosition to = exitOf(loopBlock->lir()).next(); + + for (BitSet::Iterator liveRegId(live); liveRegId; ++liveRegId) { + if (!vregs[*liveRegId].getInterval(0)->addRange(from, to)) + return false; + } + + // Fix up the liveIn set to account for the new interval + liveIn[loopBlock->id()].insertAll(live); + + // Make sure we don't visit this node again + loopDone.insert(loopBlock->id()); + + // If this is the loop header, any predecessors are either the + // backedge or out of the loop, so skip any predecessors of + // this block + if (loopBlock != mblock) { + for (size_t i = 0; i < loopBlock->numPredecessors(); i++) { + MBasicBlock* pred = loopBlock->getPredecessor(i); + if (loopDone.contains(pred->id())) + continue; + if (!loopWorkList.append(pred)) + return false; + } + } + + // Terminate loop if out of work. + if (loopWorkList.empty()) + break; + + // Grab the next block off the work list, skipping any OSR block. + MBasicBlock* osrBlock = graph.mir().osrBlock(); + while (!loopWorkList.empty()) { + loopBlock = loopWorkList.popCopy(); + if (loopBlock != osrBlock) + break; + } + + // If end is reached without finding a non-OSR block, then no more work items were found. + if (loopBlock == osrBlock) { + MOZ_ASSERT(loopWorkList.empty()); + break; + } + } + + // Clear the done set for other loops + loopDone.clear(); + } + + MOZ_ASSERT_IF(!mblock->numPredecessors(), live.empty()); + } + + validateVirtualRegisters(); + + // If the script has an infinite loop, there may be no MReturn and therefore + // no fixed intervals. Add a small range to fixedIntervalsUnion so that the + // rest of the allocator can assume it has at least one range. + if (fixedIntervalsUnion->numRanges() == 0) { + if (!fixedIntervalsUnion->addRangeAtHead(CodePosition(0, CodePosition::INPUT), + CodePosition(0, CodePosition::OUTPUT))) + { + return false; + } + } + + JitSpew(JitSpew_RegAlloc, "Liveness analysis complete"); + + if (JitSpewEnabled(JitSpew_RegAlloc)) { + dumpInstructions(); + + fprintf(stderr, "Live ranges by virtual register:\n"); + dumpVregs(); + } + + return true; +} + +template +void +LiveRangeAllocator::dumpVregs() +{ +#ifdef DEBUG + // Virtual register number 0 is unused. + MOZ_ASSERT(vregs[0u].numIntervals() == 0); + for (uint32_t i = 1; i < graph.numVirtualRegisters(); i++) { + fprintf(stderr, " "); + VirtualRegister& vreg = vregs[i]; + for (size_t j = 0; j < vreg.numIntervals(); j++) { + if (j) + fprintf(stderr, " / "); + fprintf(stderr, "%s", vreg.getInterval(j)->toString()); + } + fprintf(stderr, "\n"); + } + + fprintf(stderr, "\n"); +#endif +} + +#ifdef DEBUG + +void +LiveInterval::validateRanges() +{ + Range* prev = nullptr; + + for (size_t i = ranges_.length() - 1; i < ranges_.length(); i--) { + Range* range = &ranges_[i]; + + MOZ_ASSERT(range->from < range->to); + MOZ_ASSERT_IF(prev, prev->to <= range->from); + prev = range; + } +} + +#endif // DEBUG + +const char* +LiveInterval::rangesToString() const +{ +#ifdef DEBUG + // Not reentrant! + static char buf[2000]; + + char* cursor = buf; + char* end = cursor + sizeof(buf); + + int n; + + for (size_t i = ranges_.length() - 1; i < ranges_.length(); i--) { + const LiveInterval::Range* range = getRange(i); + n = JS_snprintf(cursor, end - cursor, " %s", range->toString()); + if (n < 0) + return " ???"; + cursor += n; + } + + return buf; +#else + return " ???"; +#endif +} + +#ifdef DEBUG +static bool +IsHintInteresting(const Requirement& requirement, const Requirement& hint) +{ + if (hint.kind() == Requirement::NONE) + return false; + + if (hint.kind() != Requirement::FIXED && hint.kind() != Requirement::REGISTER) + return true; + + Requirement merge = requirement; + if (!merge.mergeRequirement(hint)) + return true; + + return merge.kind() != requirement.kind(); +} +#endif + +const char* +LiveInterval::toString() const +{ +#ifdef DEBUG + // Not reentrant! + static char buf[2000]; + + char* cursor = buf; + char* end = cursor + sizeof(buf); + + int n; + + if (hasVreg()) { + n = JS_snprintf(cursor, end - cursor, "v%u", vreg()); + if (n < 0) return "???"; + cursor += n; + } + + n = JS_snprintf(cursor, end - cursor, "[%u]", index()); + if (n < 0) return "???"; + cursor += n; + + if (requirement_.kind() != Requirement::NONE || hint_.kind() != Requirement::NONE) { + n = JS_snprintf(cursor, end - cursor, " req("); + if (n < 0) return "???"; + cursor += n; + + bool printHint = IsHintInteresting(requirement_, hint_); + + if (requirement_.kind() != Requirement::NONE) { + n = JS_snprintf(cursor, end - cursor, "%s%s", + requirement_.toString(), + printHint ? "," : ""); + if (n < 0) return "???"; + cursor += n; + } + if (printHint) { + n = JS_snprintf(cursor, end - cursor, "%s?", hint_.toString()); + if (n < 0) return "???"; + cursor += n; + } + + n = JS_snprintf(cursor, end - cursor, ")"); + if (n < 0) return "???"; + cursor += n; + } + + if (!alloc_.isBogus()) { + n = JS_snprintf(cursor, end - cursor, " has(%s)", alloc_.toString()); + if (n < 0) return "???"; + cursor += n; + } + + n = JS_snprintf(cursor, end - cursor, "%s", rangesToString()); + if (n < 0) return "???"; + cursor += n; + + for (UsePositionIterator usePos(usesBegin()); usePos != usesEnd(); usePos++) { + n = JS_snprintf(cursor, end - cursor, " %s@%u", + usePos->use->toString(), usePos->pos.bits()); + if (n < 0) return "???"; + cursor += n; + } + + return buf; +#else + return "???"; +#endif +} + +void +LiveInterval::dump() const +{ + fprintf(stderr, "%s\n", toString()); +} diff --git a/js/src/jit/LiveRangeAllocator.h b/js/src/jit/LiveRangeAllocator.h new file mode 100644 index 00000000000..23d3449ea51 --- /dev/null +++ b/js/src/jit/LiveRangeAllocator.h @@ -0,0 +1,758 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef jit_LiveRangeAllocator_h +#define jit_LiveRangeAllocator_h + +#include "mozilla/Array.h" +#include "mozilla/DebugOnly.h" + +#include "jit/RegisterAllocator.h" +#include "jit/StackSlotAllocator.h" + +// Common structures and functions used by register allocators that operate on +// virtual register live ranges. + +namespace js { +namespace jit { + +class Requirement +{ + public: + enum Kind { + NONE, + REGISTER, + FIXED, + MUST_REUSE_INPUT + }; + + Requirement() + : kind_(NONE) + { } + + explicit Requirement(Kind kind) + : kind_(kind) + { + // These have dedicated constructors. + MOZ_ASSERT(kind != FIXED && kind != MUST_REUSE_INPUT); + } + + Requirement(Kind kind, CodePosition at) + : kind_(kind), + position_(at) + { + // These have dedicated constructors. + MOZ_ASSERT(kind != FIXED && kind != MUST_REUSE_INPUT); + } + + explicit Requirement(LAllocation fixed) + : kind_(FIXED), + allocation_(fixed) + { + MOZ_ASSERT(!fixed.isBogus() && !fixed.isUse()); + } + + // Only useful as a hint, encodes where the fixed requirement is used to + // avoid allocating a fixed register too early. + Requirement(LAllocation fixed, CodePosition at) + : kind_(FIXED), + allocation_(fixed), + position_(at) + { + MOZ_ASSERT(!fixed.isBogus() && !fixed.isUse()); + } + + Requirement(uint32_t vreg, CodePosition at) + : kind_(MUST_REUSE_INPUT), + allocation_(LUse(vreg, LUse::ANY)), + position_(at) + { } + + Kind kind() const { + return kind_; + } + + LAllocation allocation() const { + MOZ_ASSERT(!allocation_.isBogus() && !allocation_.isUse()); + return allocation_; + } + + uint32_t virtualRegister() const { + MOZ_ASSERT(allocation_.isUse()); + MOZ_ASSERT(kind() == MUST_REUSE_INPUT); + return allocation_.toUse()->virtualRegister(); + } + + CodePosition pos() const { + return position_; + } + + int priority() const; + + bool mergeRequirement(const Requirement& newRequirement) { + // Merge newRequirement with any existing requirement, returning false + // if the new and old requirements conflict. + MOZ_ASSERT(newRequirement.kind() != Requirement::MUST_REUSE_INPUT); + + if (newRequirement.kind() == Requirement::FIXED) { + if (kind() == Requirement::FIXED) + return newRequirement.allocation() == allocation(); + *this = newRequirement; + return true; + } + + MOZ_ASSERT(newRequirement.kind() == Requirement::REGISTER); + if (kind() == Requirement::FIXED) + return allocation().isRegister(); + + *this = newRequirement; + return true; + } + + // Return a string describing this requirement. This is not re-entrant! + const char* toString() const; + + void dump() const; + + private: + Kind kind_; + LAllocation allocation_; + CodePosition position_; +}; + +struct UsePosition : public TempObject, + public InlineForwardListNode +{ + LUse* use; + CodePosition pos; + + UsePosition(LUse* use, CodePosition pos) : + use(use), + pos(pos) + { + // Verify that the usedAtStart() flag is consistent with the + // subposition. For now ignore fixed registers, because they + // are handled specially around calls. + MOZ_ASSERT_IF(!use->isFixedRegister(), + pos.subpos() == (use->usedAtStart() + ? CodePosition::INPUT + : CodePosition::OUTPUT)); + } +}; + +typedef InlineForwardListIterator UsePositionIterator; + +static inline bool +UseCompatibleWith(const LUse* use, LAllocation alloc) +{ + switch (use->policy()) { + case LUse::ANY: + case LUse::KEEPALIVE: + return alloc.isRegister() || alloc.isMemory(); + case LUse::REGISTER: + return alloc.isRegister(); + case LUse::FIXED: + // Fixed uses are handled using fixed intervals. The + // UsePosition is only used as hint. + return alloc.isRegister(); + default: + MOZ_CRASH("Unknown use policy"); + } +} + +#ifdef DEBUG + +static inline bool +DefinitionCompatibleWith(LNode* ins, const LDefinition* def, LAllocation alloc) +{ + if (ins->isPhi()) { + if (def->isFloatReg()) + return alloc.isFloatReg() || alloc.isStackSlot(); + return alloc.isGeneralReg() || alloc.isStackSlot(); + } + + switch (def->policy()) { + case LDefinition::REGISTER: + if (!alloc.isRegister()) + return false; + return alloc.isFloatReg() == def->isFloatReg(); + case LDefinition::FIXED: + return alloc == *def->output(); + case LDefinition::MUST_REUSE_INPUT: + if (!alloc.isRegister() || !ins->numOperands()) + return false; + return alloc == *ins->getOperand(def->getReusedInput()); + default: + MOZ_CRASH("Unknown definition policy"); + } +} + +#endif // DEBUG + +static inline LDefinition* +FindReusingDefinition(LNode* ins, LAllocation* alloc) +{ + for (size_t i = 0; i < ins->numDefs(); i++) { + LDefinition* def = ins->getDef(i); + if (def->policy() == LDefinition::MUST_REUSE_INPUT && + ins->getOperand(def->getReusedInput()) == alloc) + return def; + } + for (size_t i = 0; i < ins->numTemps(); i++) { + LDefinition* def = ins->getTemp(i); + if (def->policy() == LDefinition::MUST_REUSE_INPUT && + ins->getOperand(def->getReusedInput()) == alloc) + return def; + } + return nullptr; +} + +/* + * A live interval is a set of disjoint ranges of code positions where a + * virtual register is live. Register allocation operates on these intervals, + * splitting them as necessary and assigning allocations to them as it runs. + */ +class LiveInterval + : public TempObject +{ + public: + /* + * A range is a contiguous sequence of CodePositions where the virtual + * register associated with this interval is live. + */ + struct Range { + Range() + : from(), + to() + { } + Range(CodePosition f, CodePosition t) + : from(f), + to(t) + { + MOZ_ASSERT(from < to); + } + + // The beginning of this range, inclusive. + CodePosition from; + + // The end of this range, exclusive. + CodePosition to; + + bool empty() const { + return from >= to; + } + + // Whether this range wholly contains other. + bool contains(const Range* other) const; + + // Intersect this range with other, returning the subranges of this + // that are before, inside, or after other. + void intersect(const Range* other, Range* pre, Range* inside, Range* post) const; + + // Return a string describing this range. This is not re-entrant! + const char* toString() const; + + void dump() const; + }; + + private: + Vector ranges_; + LAllocation alloc_; + LiveInterval* spillInterval_; + uint32_t vreg_; + uint32_t index_; + Requirement requirement_; + Requirement hint_; + InlineForwardList uses_; + size_t lastProcessedRange_; + + LiveInterval(TempAllocator& alloc, uint32_t vreg, uint32_t index) + : ranges_(alloc), + spillInterval_(nullptr), + vreg_(vreg), + index_(index), + lastProcessedRange_(size_t(-1)) + { } + + LiveInterval(TempAllocator& alloc, uint32_t index) + : ranges_(alloc), + spillInterval_(nullptr), + vreg_(UINT32_MAX), + index_(index), + lastProcessedRange_(size_t(-1)) + { } + + public: + static LiveInterval* New(TempAllocator& alloc, uint32_t vreg, uint32_t index) { + return new(alloc) LiveInterval(alloc, vreg, index); + } + static LiveInterval* New(TempAllocator& alloc, uint32_t index) { + return new(alloc) LiveInterval(alloc, index); + } + + bool addRange(CodePosition from, CodePosition to); + bool addRangeAtHead(CodePosition from, CodePosition to); + void setFrom(CodePosition from); + CodePosition intersect(LiveInterval* other); + bool covers(CodePosition pos); + + CodePosition start() const { + MOZ_ASSERT(!ranges_.empty()); + return ranges_.back().from; + } + + CodePosition end() const { + MOZ_ASSERT(!ranges_.empty()); + return ranges_.begin()->to; + } + + size_t numRanges() const { + return ranges_.length(); + } + const Range* getRange(size_t i) const { + return &ranges_[i]; + } + void setLastProcessedRange(size_t range, mozilla::DebugOnly pos) { + // If the range starts after pos, we may not be able to use + // it in the next lastProcessedRangeIfValid call. + MOZ_ASSERT(ranges_[range].from <= pos); + lastProcessedRange_ = range; + } + size_t lastProcessedRangeIfValid(CodePosition pos) const { + if (lastProcessedRange_ < ranges_.length() && ranges_[lastProcessedRange_].from <= pos) + return lastProcessedRange_; + return ranges_.length() - 1; + } + + LAllocation* getAllocation() { + return &alloc_; + } + void setAllocation(LAllocation alloc) { + alloc_ = alloc; + } + void setSpillInterval(LiveInterval* spill) { + spillInterval_ = spill; + } + LiveInterval* spillInterval() { + return spillInterval_; + } + bool hasVreg() const { + return vreg_ != UINT32_MAX; + } + uint32_t vreg() const { + MOZ_ASSERT(hasVreg()); + return vreg_; + } + uint32_t index() const { + return index_; + } + void setIndex(uint32_t index) { + index_ = index; + } + const Requirement* requirement() const { + return &requirement_; + } + void setRequirement(const Requirement& requirement) { + // A MUST_REUSE_INPUT requirement complicates regalloc too much; it + // should only be used as hint. + MOZ_ASSERT(requirement.kind() != Requirement::MUST_REUSE_INPUT); + requirement_ = requirement; + } + bool addRequirement(const Requirement& newRequirement) { + return requirement_.mergeRequirement(newRequirement); + } + void addHint(const Requirement& newHint) { + // Unlike addRequirement, here in addHint we ignore merge failures, + // because these are just hints. + hint_.mergeRequirement(newHint); + } + const Requirement* hint() const { + return &hint_; + } + void setHint(const Requirement& hint) { + hint_ = hint; + } + bool isSpill() const { + return alloc_.isStackSlot(); + } + bool splitFrom(CodePosition pos, LiveInterval* after); + + void addUse(UsePosition* use); + void addUseAtEnd(UsePosition* use); + UsePosition* popUse(); + UsePosition* nextUseAfter(CodePosition pos); + CodePosition nextUsePosAfter(CodePosition pos); + + UsePositionIterator usesBegin() const { + return uses_.begin(); + } + + UsePositionIterator usesEnd() const { + return uses_.end(); + } + + bool usesEmpty() const { + return uses_.empty(); + } + + UsePosition* usesBack() { + return uses_.back(); + } + +#ifdef DEBUG + void validateRanges(); +#endif + + // Return a string describing the ranges in this LiveInterval. This is + // not re-entrant! + const char* rangesToString() const; + + // Return a string describing this LiveInterval. This is not re-entrant! + const char* toString() const; + + void dump() const; +}; + +/* + * Represents all of the register allocation state associated with a virtual + * register, including all associated intervals and pointers to relevant LIR + * structures. + */ +class VirtualRegister +{ + LNode* ins_; + LDefinition* def_; + Vector intervals_; + + // Whether def_ is a temp or an output. + bool isTemp_ : 1; + + void operator=(const VirtualRegister&) = delete; + VirtualRegister(const VirtualRegister&) = delete; + + protected: + explicit VirtualRegister(TempAllocator& alloc) + : intervals_(alloc) + {} + + public: + bool init(TempAllocator& alloc, LNode* ins, LDefinition* def, + bool isTemp) + { + MOZ_ASSERT(ins && !ins_); + ins_ = ins; + def_ = def; + isTemp_ = isTemp; + LiveInterval* initial = LiveInterval::New(alloc, def->virtualRegister(), 0); + if (!initial) + return false; + return intervals_.append(initial); + } + LBlock* block() { + return ins_->block(); + } + LNode* ins() { + return ins_; + } + LDefinition* def() const { + return def_; + } + LDefinition::Type type() const { + return def()->type(); + } + bool isTemp() const { + return isTemp_; + } + size_t numIntervals() const { + return intervals_.length(); + } + LiveInterval* getInterval(size_t i) const { + return intervals_[i]; + } + LiveInterval* lastInterval() const { + MOZ_ASSERT(numIntervals() > 0); + return getInterval(numIntervals() - 1); + } + void replaceInterval(LiveInterval* old, LiveInterval* interval) { + MOZ_ASSERT(intervals_[old->index()] == old); + interval->setIndex(old->index()); + intervals_[old->index()] = interval; + } + bool addInterval(LiveInterval* interval) { + MOZ_ASSERT(interval->numRanges()); + MOZ_ASSERT(interval->vreg() != 0); + + // Preserve ascending order for faster lookups. + LiveInterval** found = nullptr; + LiveInterval** i; + for (i = intervals_.begin(); i != intervals_.end(); i++) { + if (!found && interval->start() < (*i)->start()) + found = i; + if (found) + (*i)->setIndex((*i)->index() + 1); + } + if (!found) + found = intervals_.end(); + interval->setIndex(found - intervals_.begin()); + return intervals_.insert(found, interval); + } + void removeInterval(LiveInterval* interval) { + intervals_.erase(intervals_.begin() + interval->index()); + for (size_t i = interval->index(), e = intervals_.length(); i < e; ++i) + intervals_[i]->setIndex(i); + interval->setIndex(-1); + } + bool isFloatReg() const { + return def_->isFloatReg(); + } + bool isCompatibleReg(const AnyRegister& r) const { + return def_->isCompatibleReg(r); + } + bool isCompatibleVReg(const VirtualRegister& vr) const { + return def_->isCompatibleDef(*vr.def_); + } + + LiveInterval* intervalFor(CodePosition pos); + LiveInterval* getFirstInterval(); +}; + +// Index of the virtual registers in a graph. VREG is a subclass of +// VirtualRegister extended with any allocator specific state for the vreg. +template +class VirtualRegisterMap +{ + private: + FixedList vregs_; + + void operator=(const VirtualRegisterMap&) = delete; + VirtualRegisterMap(const VirtualRegisterMap&) = delete; + + public: + VirtualRegisterMap() + : vregs_() + { } + + bool init(MIRGenerator* gen, uint32_t numVregs) { + if (!vregs_.init(gen->alloc(), numVregs)) + return false; + memset(&vregs_[0], 0, sizeof(VREG) * numVregs); + TempAllocator& alloc = gen->alloc(); + for (uint32_t i = 0; i < numVregs; i++) + new(&vregs_[i]) VREG(alloc); + return true; + } + VREG& operator[](unsigned int index) { + return vregs_[index]; + } + VREG& operator[](const LAllocation* alloc) { + MOZ_ASSERT(alloc->isUse()); + return vregs_[alloc->toUse()->virtualRegister()]; + } + VREG& operator[](const LDefinition* def) { + return vregs_[def->virtualRegister()]; + } + uint32_t numVirtualRegisters() const { + return vregs_.length(); + } +}; + +static inline bool +IsNunbox(VirtualRegister* vreg) +{ +#ifdef JS_NUNBOX32 + return vreg->type() == LDefinition::TYPE || + vreg->type() == LDefinition::PAYLOAD; +#else + return false; +#endif +} + +static inline bool +IsSlotsOrElements(VirtualRegister* vreg) +{ + return vreg->type() == LDefinition::SLOTS; +} + +static inline bool +IsTraceable(VirtualRegister* reg) +{ + if (reg->type() == LDefinition::OBJECT) + return true; +#ifdef JS_PUNBOX64 + if (reg->type() == LDefinition::BOX) + return true; +#endif + return false; +} + +template +class LiveRangeAllocator : protected RegisterAllocator +{ + protected: + // Computed inforamtion + BitSet* liveIn; + VirtualRegisterMap vregs; + mozilla::Array fixedIntervals; + + // Union of all ranges in fixedIntervals, used to quickly determine + // whether an interval intersects with a fixed register. + LiveInterval* fixedIntervalsUnion; + + // Allocation state + StackSlotAllocator stackSlotAllocator; + + LiveRangeAllocator(MIRGenerator* mir, LIRGenerator* lir, LIRGraph& graph) + : RegisterAllocator(mir, lir, graph), + liveIn(nullptr), + fixedIntervalsUnion(nullptr) + { + } + + bool buildLivenessInfo(); + + bool init(); + + bool addFixedRangeAtHead(AnyRegister reg, CodePosition from, CodePosition to) { + if (!fixedIntervals[reg.code()]->addRangeAtHead(from, to)) + return false; + return fixedIntervalsUnion->addRangeAtHead(from, to); + } + + void validateVirtualRegisters() + { +#ifdef DEBUG + if (!js_JitOptions.checkGraphConsistency) + return; + + for (size_t i = 1; i < graph.numVirtualRegisters(); i++) { + VirtualRegister* reg = &vregs[i]; + + LiveInterval* prev = nullptr; + for (size_t j = 0; j < reg->numIntervals(); j++) { + LiveInterval* interval = reg->getInterval(j); + MOZ_ASSERT(interval->vreg() == i); + MOZ_ASSERT(interval->index() == j); + + if (interval->numRanges() == 0) + continue; + + MOZ_ASSERT_IF(prev, prev->end() <= interval->start()); + interval->validateRanges(); + + prev = interval; + } + } +#endif + } + + bool addMove(LMoveGroup* moves, LiveInterval* from, LiveInterval* to, LDefinition::Type type) { + MOZ_ASSERT(*from->getAllocation() != *to->getAllocation()); + return moves->add(from->getAllocation(), to->getAllocation(), type); + } + + bool moveInput(LInstruction* ins, LiveInterval* from, LiveInterval* to, LDefinition::Type type) { + if (*from->getAllocation() == *to->getAllocation()) + return true; + LMoveGroup* moves = getInputMoveGroup(ins); + return addMove(moves, from, to, type); + } + + bool moveAfter(LInstruction* ins, LiveInterval* from, LiveInterval* to, LDefinition::Type type) { + if (*from->getAllocation() == *to->getAllocation()) + return true; + LMoveGroup* moves = getMoveGroupAfter(ins); + return addMove(moves, from, to, type); + } + + bool moveAtExit(LBlock* block, LiveInterval* from, LiveInterval* to, LDefinition::Type type) { + if (*from->getAllocation() == *to->getAllocation()) + return true; + LMoveGroup* moves = block->getExitMoveGroup(alloc()); + return addMove(moves, from, to, type); + } + + bool moveAtEntry(LBlock* block, LiveInterval* from, LiveInterval* to, LDefinition::Type type) { + if (*from->getAllocation() == *to->getAllocation()) + return true; + LMoveGroup* moves = block->getEntryMoveGroup(alloc()); + return addMove(moves, from, to, type); + } + + size_t findFirstNonCallSafepoint(CodePosition from) const + { + size_t i = 0; + for (; i < graph.numNonCallSafepoints(); i++) { + const LInstruction* ins = graph.getNonCallSafepoint(i); + if (from <= inputOf(ins)) + break; + } + return i; + } + + void addLiveRegistersForInterval(VirtualRegister* reg, LiveInterval* interval) + { + // Fill in the live register sets for all non-call safepoints. + LAllocation* a = interval->getAllocation(); + if (!a->isRegister()) + return; + + // Don't add output registers to the safepoint. + CodePosition start = interval->start(); + if (interval->index() == 0 && !reg->isTemp()) { +#ifdef CHECK_OSIPOINT_REGISTERS + // We don't add the output register to the safepoint, + // but it still might get added as one of the inputs. + // So eagerly add this reg to the safepoint clobbered registers. + if (reg->ins()->isInstruction()) { + if (LSafepoint* safepoint = reg->ins()->toInstruction()->safepoint()) + safepoint->addClobberedRegister(a->toRegister()); + } +#endif + start = start.next(); + } + + size_t i = findFirstNonCallSafepoint(start); + for (; i < graph.numNonCallSafepoints(); i++) { + LInstruction* ins = graph.getNonCallSafepoint(i); + CodePosition pos = inputOf(ins); + + // Safepoints are sorted, so we can shortcut out of this loop + // if we go out of range. + if (interval->end() <= pos) + break; + + if (!interval->covers(pos)) + continue; + + LSafepoint* safepoint = ins->safepoint(); + safepoint->addLiveRegister(a->toRegister()); + +#ifdef CHECK_OSIPOINT_REGISTERS + if (reg->isTemp()) + safepoint->addClobberedRegister(a->toRegister()); +#endif + } + } + + // Finds the first safepoint that is within range of an interval. + size_t findFirstSafepoint(const LiveInterval* interval, size_t startFrom) const + { + size_t i = startFrom; + for (; i < graph.numSafepoints(); i++) { + LInstruction* ins = graph.getSafepoint(i); + if (interval->start() <= inputOf(ins)) + break; + } + return i; + } + + void dumpVregs(); +}; + +} // namespace jit +} // namespace js + +#endif /* jit_LiveRangeAllocator_h */ diff --git a/js/src/jit/RegisterAllocator.cpp b/js/src/jit/RegisterAllocator.cpp index fec89cdc3b8..5873eca0270 100644 --- a/js/src/jit/RegisterAllocator.cpp +++ b/js/src/jit/RegisterAllocator.cpp @@ -191,8 +191,8 @@ AllocationIntegrityState::checkIntegrity(LBlock* block, LInstruction* ins, if (ins->isMoveGroup()) { LMoveGroup* group = ins->toMoveGroup(); for (int i = group->numMoves() - 1; i >= 0; i--) { - if (group->getMove(i).to() == alloc) { - alloc = group->getMove(i).from(); + if (*group->getMove(i).to() == alloc) { + alloc = *group->getMove(i).from(); break; } } @@ -412,8 +412,8 @@ AllocationIntegrityState::dump() LMoveGroup* group = ins->toMoveGroup(); for (int i = group->numMoves() - 1; i >= 0; i--) { // Use two printfs, as LAllocation::toString is not reentrant. - fprintf(stderr, " [%s", group->getMove(i).from().toString()); - fprintf(stderr, " -> %s]", group->getMove(i).to().toString()); + fprintf(stderr, " [%s", group->getMove(i).from()->toString()); + fprintf(stderr, " -> %s]", group->getMove(i).to()->toString()); } fprintf(stderr, "\n"); continue; @@ -552,8 +552,8 @@ RegisterAllocator::dumpInstructions() LMoveGroup* group = ins->toMoveGroup(); for (int i = group->numMoves() - 1; i >= 0; i--) { // Use two printfs, as LAllocation::toString is not reentant. - fprintf(stderr, " [%s", group->getMove(i).from().toString()); - fprintf(stderr, " -> %s]", group->getMove(i).to().toString()); + fprintf(stderr, " [%s", group->getMove(i).from()->toString()); + fprintf(stderr, " -> %s]", group->getMove(i).to()->toString()); } fprintf(stderr, "\n"); continue; diff --git a/js/src/jit/StupidAllocator.cpp b/js/src/jit/StupidAllocator.cpp index 8e3ea628650..5a8a5d9e95a 100644 --- a/js/src/jit/StupidAllocator.cpp +++ b/js/src/jit/StupidAllocator.cpp @@ -186,11 +186,11 @@ StupidAllocator::syncRegister(LInstruction* ins, RegisterIndex index) { if (registers[index].dirty) { LMoveGroup* input = getInputMoveGroup(ins); - LAllocation source(registers[index].reg); + LAllocation* source = new(alloc()) LAllocation(registers[index].reg); uint32_t existing = registers[index].vreg; LAllocation* dest = stackLocation(existing); - input->addAfter(source, *dest, registers[index].type); + input->addAfter(source, dest, registers[index].type); registers[index].dirty = false; } @@ -219,8 +219,8 @@ StupidAllocator::loadRegister(LInstruction* ins, uint32_t vreg, RegisterIndex in // Load a vreg from its stack location to a register. LMoveGroup* input = getInputMoveGroup(ins); LAllocation* source = stackLocation(vreg); - LAllocation dest(registers[index].reg); - input->addAfter(*source, dest, type); + LAllocation* dest = new(alloc()) LAllocation(registers[index].reg); + input->addAfter(source, dest, type); registers[index].set(vreg, ins); registers[index].type = type; } @@ -321,7 +321,7 @@ StupidAllocator::syncForBlockEnd(LBlock* block, LInstruction* ins) } } - group->add(*source, *dest, phi->getDef(0)->type()); + group->add(source, dest, phi->getDef(0)->type()); } } } diff --git a/js/src/jit/arm/CodeGenerator-arm.cpp b/js/src/jit/arm/CodeGenerator-arm.cpp index b24b399ec93..b15cb76f878 100644 --- a/js/src/jit/arm/CodeGenerator-arm.cpp +++ b/js/src/jit/arm/CodeGenerator-arm.cpp @@ -1009,11 +1009,11 @@ CodeGeneratorARM::visitPowHalfD(LPowHalfD* ins) } MoveOperand -CodeGeneratorARM::toMoveOperand(LAllocation a) const +CodeGeneratorARM::toMoveOperand(const LAllocation* a) const { - if (a.isGeneralReg()) + if (a->isGeneralReg()) return MoveOperand(ToRegister(a)); - if (a.isFloatReg()) + if (a->isFloatReg()) return MoveOperand(ToFloatRegister(a)); int32_t offset = ToStackOffset(a); MOZ_ASSERT((offset & 3) == 0); diff --git a/js/src/jit/arm/CodeGenerator-arm.h b/js/src/jit/arm/CodeGenerator-arm.h index f171c2b1f19..1d6cd42bebc 100644 --- a/js/src/jit/arm/CodeGenerator-arm.h +++ b/js/src/jit/arm/CodeGenerator-arm.h @@ -43,7 +43,7 @@ class CodeGeneratorARM : public CodeGeneratorShared return ToOperand(def->output()); } - MoveOperand toMoveOperand(LAllocation a) const; + MoveOperand toMoveOperand(const LAllocation* a) const; void bailoutIf(Assembler::Condition condition, LSnapshot* snapshot); void bailoutFrom(Label* label, LSnapshot* snapshot); diff --git a/js/src/jit/mips/CodeGenerator-mips.cpp b/js/src/jit/mips/CodeGenerator-mips.cpp index aa169e8a2c5..d722ed528ed 100644 --- a/js/src/jit/mips/CodeGenerator-mips.cpp +++ b/js/src/jit/mips/CodeGenerator-mips.cpp @@ -948,11 +948,11 @@ CodeGeneratorMIPS::visitPowHalfD(LPowHalfD* ins) } MoveOperand -CodeGeneratorMIPS::toMoveOperand(LAllocation a) const +CodeGeneratorMIPS::toMoveOperand(const LAllocation* a) const { - if (a.isGeneralReg()) + if (a->isGeneralReg()) return MoveOperand(ToRegister(a)); - if (a.isFloatReg()) { + if (a->isFloatReg()) { return MoveOperand(ToFloatRegister(a)); } int32_t offset = ToStackOffset(a); diff --git a/js/src/jit/mips/CodeGenerator-mips.h b/js/src/jit/mips/CodeGenerator-mips.h index e164fd57bd3..93a1cad97a2 100644 --- a/js/src/jit/mips/CodeGenerator-mips.h +++ b/js/src/jit/mips/CodeGenerator-mips.h @@ -58,7 +58,7 @@ class CodeGeneratorMIPS : public CodeGeneratorShared return ToOperand(def->output()); } - MoveOperand toMoveOperand(LAllocation a) const; + MoveOperand toMoveOperand(const LAllocation* a) const; template void bailoutCmp32(Assembler::Condition c, T1 lhs, T2 rhs, LSnapshot* snapshot) { diff --git a/js/src/jit/shared/CodeGenerator-shared.h b/js/src/jit/shared/CodeGenerator-shared.h index d29666c1b6f..2aef80aede5 100644 --- a/js/src/jit/shared/CodeGenerator-shared.h +++ b/js/src/jit/shared/CodeGenerator-shared.h @@ -235,13 +235,10 @@ class CodeGeneratorShared : public LElementVisitor return offset; } - inline int32_t ToStackOffset(LAllocation a) const { - if (a.isArgument()) - return ArgToStackOffset(a.toArgument()->index()); - return SlotToStackOffset(a.toStackSlot()->slot()); - } inline int32_t ToStackOffset(const LAllocation* a) const { - return ToStackOffset(*a); + if (a->isArgument()) + return ArgToStackOffset(a->toArgument()->index()); + return SlotToStackOffset(a->toStackSlot()->slot()); } uint32_t frameSize() const { diff --git a/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp b/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp index d3cb9ef36ac..654b6d4375a 100644 --- a/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp +++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp @@ -1528,11 +1528,11 @@ CodeGeneratorX86Shared::visitUrshD(LUrshD* ins) } MoveOperand -CodeGeneratorX86Shared::toMoveOperand(LAllocation a) const +CodeGeneratorX86Shared::toMoveOperand(const LAllocation* a) const { - if (a.isGeneralReg()) + if (a->isGeneralReg()) return MoveOperand(ToRegister(a)); - if (a.isFloatReg()) + if (a->isFloatReg()) return MoveOperand(ToFloatRegister(a)); return MoveOperand(StackPointer, ToStackOffset(a)); } diff --git a/js/src/jit/x86-shared/CodeGenerator-x86-shared.h b/js/src/jit/x86-shared/CodeGenerator-x86-shared.h index a9a080c0b8e..08fe1a048db 100644 --- a/js/src/jit/x86-shared/CodeGenerator-x86-shared.h +++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.h @@ -115,7 +115,7 @@ class CodeGeneratorX86Shared : public CodeGeneratorShared return ToOperand(def->output()); } - MoveOperand toMoveOperand(LAllocation a) const; + MoveOperand toMoveOperand(const LAllocation* a) const; void bailoutIf(Assembler::Condition condition, LSnapshot* snapshot); void bailoutIf(Assembler::DoubleCondition condition, LSnapshot* snapshot); diff --git a/js/src/jsexn.h b/js/src/jsexn.h index 842782d6a32..3bcf29abd38 100644 --- a/js/src/jsexn.h +++ b/js/src/jsexn.h @@ -12,7 +12,6 @@ #define jsexn_h #include "jsapi.h" -#include "jscntxt.h" #include "NamespaceImports.h" namespace js { diff --git a/js/src/moz.build b/js/src/moz.build index d9b3a44fefa..adc69372f64 100644 --- a/js/src/moz.build +++ b/js/src/moz.build @@ -213,6 +213,7 @@ UNIFIED_SOURCES += [ 'jit/JSONSpewer.cpp', 'jit/LICM.cpp', 'jit/LIR.cpp', + 'jit/LiveRangeAllocator.cpp', 'jit/LoopUnroller.cpp', 'jit/Lowering.cpp', 'jit/MacroAssembler.cpp',