diff --git a/js/src/Makefile.in b/js/src/Makefile.in index 8c3c2ef4574..1e83d71629f 100644 --- a/js/src/Makefile.in +++ b/js/src/Makefile.in @@ -368,6 +368,7 @@ CPPSRCS += MIR.cpp \ Ion.cpp \ BitSet.cpp \ IonLIR.cpp \ + GreedyAllocator.cpp \ $(NULL) ifeq (86, $(findstring 86,$(TARGET_CPU))) diff --git a/js/src/ion/GreedyAllocator.cpp b/js/src/ion/GreedyAllocator.cpp new file mode 100644 index 00000000000..cda7a1e6dba --- /dev/null +++ b/js/src/ion/GreedyAllocator.cpp @@ -0,0 +1,862 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=4 sw=4 et tw=79: + * + * ***** BEGIN LICENSE BLOCK ***** + * Version: MPL 1.1/GPL 2.0/LGPL 2.1 + * + * The contents of this file are subject to the Mozilla Public License Version + * 1.1 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS IS" basis, + * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License + * for the specific language governing rights and limitations under the + * License. + * + * The Original Code is Mozilla Communicator client code, released + * March 31, 1998. + * + * The Initial Developer of the Original Code is + * Netscape Communications Corporation. + * Portions created by the Initial Developer are Copyright (C) 1998 + * the Initial Developer. All Rights Reserved. + * + * Contributor(s): + * David Anderson + * + * Alternatively, the contents of this file may be used under the terms of + * either of the GNU General Public License Version 2 or later (the "GPL"), + * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), + * in which case the provisions of the GPL or the LGPL are applicable instead + * of those above. If you wish to allow use of your version of this file only + * under the terms of either the GPL or the LGPL, and not to allow others to + * use your version of this file under the terms of the MPL, indicate your + * decision by deleting the provisions above and replace them with the notice + * and other provisions required by the GPL or the LGPL. If you do not delete + * the provisions above, a recipient may use your version of this file under + * the terms of any one of the MPL, the GPL or the LGPL. + * + * ***** END LICENSE BLOCK ***** */ + +#include "GreedyAllocator.h" + +using namespace js; +using namespace js::ion; + +GreedyAllocator::GreedyAllocator(MIRGenerator *gen, LIRGraph &graph) + : gen(gen), + graph(graph) +{ +} + +void +GreedyAllocator::findDefinitionsInLIR(LInstruction *ins) +{ + for (size_t i = 0; i < ins->numDefs(); i++) { + LDefinition *def = ins->getDef(i); + JS_ASSERT(def->virtualRegister() < graph.numVirtualRegisters()); + + vars[def->virtualRegister()].def = def; +#ifdef DEBUG + vars[def->virtualRegister()].ins = ins; +#endif + } +} + +void +GreedyAllocator::findDefinitionsInBlock(LBlock *block) +{ + for (size_t i = 0; i < block->numPhis(); i++) + findDefinitionsInLIR(block->getPhi(i)); + for (LInstructionIterator i = block->begin(); i != block->end(); i++) + findDefinitionsInLIR(*i); +} + +void +GreedyAllocator::findDefinitions() +{ + for (size_t i = 0; i < graph.numBlocks(); i++) + findDefinitionsInBlock(graph.getBlock(i)); +} + +bool +GreedyAllocator::maybeEvict(AnyRegister reg) +{ + if (!state.free.has(reg)) + return evict(reg); + return true; +} + +static inline AnyRegister +GetFixedRegister(LDefinition *def, LUse *use) +{ + return def->type() == LDefinition::DOUBLE + ? AnyRegister(FloatRegister::FromCode(use->registerCode())) + : AnyRegister(Register::FromCode(use->registerCode())); +} + +static inline AnyRegister +GetAllocatedRegister(const LAllocation *a) +{ + JS_ASSERT(a->isRegister()); + return a->isFloatReg() + ? AnyRegister(a->toFloatReg()->reg()) + : AnyRegister(a->toGeneralReg()->reg()); +} + +static inline AnyRegister +GetPresetRegister(const LDefinition *def) +{ + JS_ASSERT(def->policy() == LDefinition::PRESET); + return GetAllocatedRegister(def->output()); +} + +bool +GreedyAllocator::prescanDefinition(LDefinition *def) +{ + VirtualRegister *vr = getVirtualRegister(def); + + // Add its stack slot and register to the free pool. + if (!kill(vr)) + return false; + + // If it has a register, prevent it from being allocated this round. + if (vr->hasRegister()) + disallowed.add(vr->reg()); + + if (def->policy() == LDefinition::PRESET) { + const LAllocation *a = def->output(); + if (a->isRegister()) { + // Evict fixed registers. Use the unchecked version of set-add + // because the register does not reflect any allocation state, so + // it may have already been added. + AnyRegister reg = GetPresetRegister(def); + disallowed.addUnchecked(reg); + if (!maybeEvict(reg)) + return false; + } + } + return true; +} + +bool +GreedyAllocator::prescanDefinitions(LInstruction *ins) +{ + for (size_t i = 0; i < ins->numDefs(); i++) { + if (!prescanDefinition(ins->getDef(i))) + return false; + } + for (size_t i = 0; i < ins->numTemps(); i++) { + if (!prescanDefinition(ins->getTemp(i))) + return false; + } + return true; +} + +bool +GreedyAllocator::prescanUses(LInstruction *ins) +{ + for (size_t i = 0; i < ins->numOperands(); i++) { + LAllocation *a = ins->getOperand(i); + if (!a->isUse()) { + JS_ASSERT(a->isConstant()); + continue; + } + + LUse *use = a->toUse(); + VirtualRegister *vr = getVirtualRegister(use); + + if (use->policy() == LUse::FIXED) + disallowed.add(GetFixedRegister(vr->def, use)); + else if (vr->hasRegister()) + discouraged.add(vr->reg()); + } + return true; +} + +bool +GreedyAllocator::allocateStack(VirtualRegister *vr) +{ + if (vr->hasBackingStack()) + return true; + + uint32 index; + if (vr->isDouble()) { + if (!stackSlots.allocateDoubleSlot(&index)) + return false; + } else { + if (!stackSlots.allocateSlot(&index)) + return false; + } + + vr->setStackSlot(index); + return true; +} + +bool +GreedyAllocator::allocate(LDefinition::Type type, Policy policy, AnyRegister *out) +{ + RegisterSet allowed = RegisterSet::Not(disallowed); + RegisterSet free = allocatableRegs(); + RegisterSet tryme = RegisterSet::Intersect(free, RegisterSet::Not(discouraged)); + + if (tryme.empty(type == LDefinition::DOUBLE)) { + if (free.empty(type == LDefinition::DOUBLE)) { + *out = allowed.takeAny(type == LDefinition::DOUBLE); + if (!evict(*out)) + return false; + } else { + *out = free.takeAny(type == LDefinition::DOUBLE); + } + } else { + *out = tryme.takeAny(type == LDefinition::DOUBLE); + } + + if (policy != TEMPORARY) + disallowed.add(*out); + + return true; +} + +void +GreedyAllocator::freeStack(VirtualRegister *vr) +{ + if (vr->isDouble()) + stackSlots.freeDoubleSlot(vr->stackSlot()); + else + stackSlots.freeSlot(vr->stackSlot()); +} + +void +GreedyAllocator::freeReg(AnyRegister reg) +{ + state[reg] = NULL; + state.free.add(reg); +} + +bool +GreedyAllocator::kill(VirtualRegister *vr) +{ + if (vr->hasRegister()) { + AnyRegister reg = vr->reg(); + JS_ASSERT(state[reg] == vr); + + freeReg(reg); + } + if (vr->hasStackSlot()) + freeStack(vr); + return true; +} + +bool +GreedyAllocator::evict(AnyRegister reg) +{ + VirtualRegister *vr = state[reg]; + JS_ASSERT(vr->reg() == reg); + + // If the virtual register does not have a stack slot, allocate one now. + if (!allocateStack(vr)) + return false; + + // We're allocating bottom-up, so eviction *restores* a register, otherwise + // it could not be used downstream. + if (!restore(vr->backingStack(), reg)) + return false; + + freeReg(reg); + vr->unsetRegister(); + return true; +} + +void +GreedyAllocator::assign(VirtualRegister *vr, AnyRegister reg) +{ + JS_ASSERT(!state[reg]); + state[reg] = vr; + vr->setRegister(reg); + state.free.take(reg); +} + +bool +GreedyAllocator::allocateRegisterOperand(LAllocation *a, VirtualRegister *vr) +{ + AnyRegister reg; + + // Note that the disallow policy is required to prevent other allocations + // in later uses clobbering the register. + if (vr->hasRegister()) { + reg = vr->reg(); + disallowed.add(reg); + } else { + // If it does not have a register, allocate one now. + if (!allocate(vr->type(), DISALLOW, ®)) + return false; + assign(vr, reg); + } + + *a = LAllocation(reg); + return true; +} + +bool +GreedyAllocator::allocateAnyOperand(LAllocation *a, VirtualRegister *vr) +{ + if (vr->hasRegister()) { + *a = LAllocation(vr->reg()); + return true; + } + + // Are any registers free? Don't bother if the requestee is a type tag. + if (vr->type() != LDefinition::TYPE && !allocatableRegs().empty(vr->isDouble())) + return allocateRegisterOperand(a, vr); + + // Otherwise, use a memory operand. + if (!allocateStack(vr)) + return false; + *a = vr->backingStack(); + return true; +} + +bool +GreedyAllocator::allocateFixedOperand(LAllocation *a, VirtualRegister *vr) +{ + // Note that this register is already in the disallow set. + AnyRegister needed = GetFixedRegister(vr->def, a->toUse()); + + *a = LAllocation(needed); + + if (!vr->hasRegister()) { + if (!maybeEvict(needed)) + return false; + assign(vr, needed); + return true; + } + + if (vr->reg() == needed) + return true; + + // Otherwise, we need to align the input. + return align(vr->reg(), needed); +} + +bool +GreedyAllocator::allocateSameAsInput(LDefinition *def, LAllocation *a, AnyRegister *out) +{ + LUse *use = a->toUse(); + VirtualRegister *vdef = getVirtualRegister(def); + VirtualRegister *vuse = getVirtualRegister(use); + + JS_ASSERT(vdef->isDouble() == vuse->isDouble()); + + AnyRegister reg; + + // Find a suitable output register. For simplicity, we do not consider the + // current allocation of the input virtual register, which means it could + // be evicted. + if (use->isFixedRegister()) { + reg = GetFixedRegister(def, use); + } else if (vdef->hasRegister()) { + reg = vdef->reg(); + } else { + if (!allocate(vdef->type(), DISALLOW, ®)) + return false; + } + JS_ASSERT(disallowed.has(reg)); + + if (vuse->hasRegister()) { + // Load the actual register into the desired input operand. + LAllocation from; + if (vuse->hasRegister()) + from = LAllocation(vuse->reg()); + else + from = vuse->backingStack(); + if (!align(from, reg)) + return false; + } else { + // If the input has no register, we can just re-use the output register + // directly, because nothing downstream could be clobbered by consuming + // the register. + assign(vuse, reg); + } + + // Overwrite the input allocation now. + *a = LAllocation(reg); + + *out = reg; + return true; +} + +bool +GreedyAllocator::allocateDefinitions(LInstruction *ins) +{ + for (size_t i = 0; i < ins->numDefs(); i++) { + LDefinition *def = ins->getDef(i); + VirtualRegister *vr = getVirtualRegister(def); + + LAllocation output; + switch (def->policy()) { + case LDefinition::DEFAULT: + { + // Either take the register requested, or allocate a new one. + if (vr->hasRegister()) { + output = LAllocation(vr->reg()); + } else { + AnyRegister reg; + if (!allocate(vr->type(), DISALLOW, ®)) + return false; + output = LAllocation(reg); + } + break; + } + + case LDefinition::PRESET: + { + // Eviction and disallowing occurred during the definition + // pre-scan pass. + output = *def->output(); + break; + } + + case LDefinition::MUST_REUSE_INPUT: + { + AnyRegister out_reg; + if (!allocateSameAsInput(def, ins->getOperand(0), &out_reg)) + return false; + output = LAllocation(out_reg); + break; + } + } + + if (output.isRegister()) { + JS_ASSERT_IF(output.isFloatReg(), disallowed.has(output.toFloatReg()->reg())); + JS_ASSERT_IF(output.isGeneralReg(), disallowed.has(output.toGeneralReg()->reg())); + } + + // |output| is now the allocation state leaving the instruction. + // However, this is not necessarily the allocation state expected + // downstream, so emit moves where necessary. + if (output.isRegister()) { + if (vr->hasRegister()) { + // If the returned register is different from the output + // register, a move is required. + AnyRegister out = GetAllocatedRegister(&output); + if (out != vr->reg()) { + if (!spill(output, vr->reg())) + return false; + } + } + + // Spill to the stack if needed. + if (vr->hasStackSlot() && !spill(output, vr->backingStack())) + return false; + } else if (vr->hasRegister()) { + // This definition has a canonical spill location, so make sure to + // load it to the resulting register, if any. + JS_ASSERT(!vr->hasStackSlot()); + JS_ASSERT(vr->hasBackingStack()); + if (!spill(output, vr->reg())) + return false; + } + + // Finally, set the output. + *def = LDefinition(def->type(), output); + } + + return true; +} + +bool +GreedyAllocator::allocateTemporaries(LInstruction *ins) +{ + for (size_t i = 0; i < ins->numTemps(); i++) { + LDefinition *def = ins->getTemp(0); + if (def->policy() == LDefinition::PRESET) + continue; + + JS_ASSERT(def->policy() == LDefinition::DEFAULT); + AnyRegister reg; + if (!allocate(def->type(), DISALLOW, ®)) + return false; + *def = LDefinition(def->type(), LAllocation(reg)); + } + return true; +} + +bool +GreedyAllocator::allocateInputs(LInstruction *ins) +{ + // First deal with fixed-register policies and policies that require + // registers. + for (size_t i = 0; i < ins->numOperands(); i++) { + LAllocation *a = ins->getOperand(i); + if (!a->isUse()) + continue; + LUse *use = a->toUse(); + VirtualRegister *vr = getVirtualRegister(use); + if (use->policy() == LUse::FIXED) { + if (!allocateFixedOperand(a, vr)) + return false; + } else if (use->policy() == LUse::REGISTER) { + if (!allocateRegisterOperand(a, vr)) + return false; + } + } + + // Allocate temporaries before uses that accept memory operands, because + // temporaries require registers. + if (!allocateTemporaries(ins)) + return false; + + // Finally, deal with things that take either registers or memory. + for (size_t i = 0; i < ins->numOperands(); i++) { + LAllocation *a = ins->getOperand(i); + if (!a->isUse()) + continue; + + LUse *use = a->toUse(); + JS_ASSERT(use->policy() == LUse::ANY); + + VirtualRegister *vr = getVirtualRegister(use); + if (!allocateAnyOperand(a, vr)) + return false; + } + + return true; +} + +bool +GreedyAllocator::informSnapshot(LSnapshot *snapshot) +{ + for (size_t i = 0; i < snapshot->numEntries(); i++) { + LAllocation *a = snapshot->getEntry(i); + if (!a->isUse()) + continue; + + LUse *use = a->toUse(); + VirtualRegister *vr = getVirtualRegister(use); + if (vr->hasRegister()) { + *a = LAllocation(vr->reg()); + } else { + if (!allocateStack(vr)) + return false; + *a = vr->backingStack(); + } + } + return true; +} + +bool +GreedyAllocator::allocateRegistersInBlock(LBlock *block) +{ + for (LInstructionReverseIterator ri = block->instructions().rbegin(); + ri != block->instructions().rend(); + ri++) + { + if (!gen->ensureBallast()) + return false; + + LInstruction *ins = *ri; + + // Reset internal state used for evicting. + reset(); + + // Step 1. Find all fixed writable registers, adding them to the + // disallow set. + if (!prescanDefinitions(ins)) + return false; + + // Step 2. For each use, add fixed policies to the disallow set and + // already allocated registers to the discouraged set. + if (!prescanUses(ins)) + return false; + + // Step 3. Allocate registers for each definition. + if (!allocateDefinitions(ins)) + return false; + + // Step 4. Allocate inputs and temporaries. + if (!allocateInputs(ins)) + return false; + + // Step 5. Assign fields of a snapshot. + if (ins->snapshot() && !informSnapshot(ins->snapshot())) + return false; + + // Step 6. Insert move instructions. + if (restores) + block->insertAfter(ins, restores); + if (spills) + block->insertAfter(ins, spills); + if (aligns) { + block->insertBefore(ins, aligns); + ri++; + } + } + return true; +} + +bool +GreedyAllocator::mergeRegisterState(const AnyRegister ®, LBlock *left, LBlock *right) +{ + VirtualRegister *vleft = state[reg]; + VirtualRegister *vright = blockInfo(right)->in[reg]; + + // Make sure virtual registers have sensible register state. + if (vleft) + vleft->setRegister(vleft->reg()); + + // If the input register is unused or occupied by the same vr, we're done. + if (vleft == vright) + return true; + + // If the right-hand side has no allocation, then do nothing because the + // left-hand side has already propagated its value up. + if (!vright) + return true; + + // If the left-hand side has no allocation, merge the right-hand side in. + if (!vleft) { + assign(vright, reg); + return true; + } + + BlockInfo *info = blockInfo(right); + + // Otherwise, the same register is occupied by two different allocations: + // the left side expects R1=A, and the right side expects R1=B. + if (allocatableRegs().empty(vright->isDouble())) { + // There are no free registers, so put a move on the right-hand block + // that loads the correct register out of vright's stack. + if (!allocateStack(vright)) + return false; + if (!info->restores.move(vright->backingStack(), reg)) + return false; + + vright->unsetRegister(); + } else { + // There is a free register, so grab it and assign it, and emit a move + // on the right-hand block. + AnyRegister newreg; + if (!allocate(vright->type(), TEMPORARY, &newreg)) + return false; + if (!info->restores.move(newreg, reg)) + return false; + + assign(vright, newreg); + } + + return true; +} + +bool +GreedyAllocator::mergePhiState(LBlock *block) +{ + MBasicBlock *mblock = block->mir(); + if (!mblock->successorWithPhis()) + return true; + + bool isLoopExit = mblock->successorWithPhis()->isLoopHeader() && + mblock->id() >= mblock->successorWithPhis()->id(); + + BlockInfo *info = blockInfo(block); + + // Reset state so evictions will work. + reset(); + + uint32 pos = mblock->positionInPhiSuccessor(); + LBlock *successor = mblock->successorWithPhis()->lir(); + for (size_t i = 0; i < successor->numPhis(); i++) { + LPhi *phi = successor->getPhi(i); + VirtualRegister *def = getVirtualRegister(phi->getDef(0)); + + if (!def->hasRegister() && !def->hasStackSlot()) { + // The phi has no uses, but this might be because it's a loop exit. + // If it's a loop exit, we haven't seen uses yet and must be + // pessimistic. Note that this generally results in suboptimal + // code, for example, at the loop edge: + // + // move (def -> [stack]) + // move ([stack] -> phi) + // + // It should not be difficult to either boil this away or come up + // with a trick to pin phis to registers. + if (!isLoopExit) + continue; + + if (state.free.empty(def->isDouble())) { + if (!allocateStack(def)) + return false; + } else { + AnyRegister reg; + if (!allocate(def->type(), TEMPORARY, ®)) + return false; + assign(def, reg); + } + } + + LAllocation *a = phi->getOperand(pos); + + // Handle constant inputs. + if (a->isConstant()) { + LAllocation dest; + if (def->hasRegister()) + dest = LAllocation(def->reg()); + else + dest = def->backingStack(); + if (!info->phis.move(*a, dest)) + return false; + continue; + } + + VirtualRegister *use = getVirtualRegister(a->toUse()); + + // The definition contains the storage desired by the successor, and + // the use contains the storage currently allocated in this block. + if (use->hasRegister() && def->hasRegister()) { + // If both are in different registers, perform a parallel move. + if (use->reg() != def->reg()) { + if (!info->phis.move(use->reg(), def->reg())) + return false; + } + } else if (use->hasRegister() && !def->hasRegister()) { + // Emit a store to the stack slot. + if (!info->phis.move(use->reg(), def->backingStack())) + return false; + } else { + if (def->hasRegister()) { + // Is its register free? + if (!state[def->reg()]) { + assign(use, def->reg()); + } else { + // Emit a load from the stack, since eviction is + // inevitable. + if (!allocateStack(use)) + return false; + if (!info->phis.move(use->backingStack(), def->reg())) + return false; + } + } else { + if (!allocateStack(use)) + return false; + + AnyRegister reg; + if (!allocate(use->type(), TEMPORARY, ®)) + return false; + + // Memory to memory moves are not parallel. + LMove *move = new LMove; + if (!move->add(use->backingStack(), LAllocation(reg))) + return false; + if (!move->add(LAllocation(reg), def->backingStack())) + return false; + block->insertBefore(*block->instructions().rbegin(), move); + } + } + } + + // Now insert restores (if any) and phi moves. + JS_ASSERT(!aligns); + JS_ASSERT(!spills); + if (restores) + block->insertBefore(*block->instructions().rbegin(), restores); + if (info->phis.moves) + block->insertBefore(*block->instructions().rbegin(), info->phis.moves); + + return true; +} + +bool +GreedyAllocator::mergeAllocationState(LBlock *block) +{ + MBasicBlock *mblock = block->mir(); + + if (!mblock->numSuccessors()) { + state = AllocationState(); + return true; + } + + // Prefer the successor with phis as the baseline state + LBlock *leftblock = mblock->getSuccessor(0)->lir(); + state = blockInfo(leftblock)->in; + + // Merge state from each additional successor. + for (size_t i = 1; i < mblock->numSuccessors(); i++) { + LBlock *rightblock = mblock->getSuccessor(i)->lir(); + + for (size_t i = 0; i < RegisterCodes::Total; i++) { + AnyRegister reg = AnyRegister(Register::FromCode(i)); + if (!mergeRegisterState(reg, leftblock, rightblock)) + return false; + } + for (size_t i = 0; i < FloatRegisterCodes::Total; i++) { + AnyRegister reg = AnyRegister(FloatRegister::FromCode(i)); + if (!mergeRegisterState(reg, leftblock, rightblock)) + return false; + } + + // If there were parallel moves, append them now. + BlockInfo *info = blockInfo(rightblock); + if (info->restores.moves) + rightblock->insertBefore(*rightblock->begin(), info->restores.moves); + } + + // Insert moves for phis. + if (!mergePhiState(block)) + return false; + + return true; +} + +bool +GreedyAllocator::allocateRegisters() +{ + // Allocate registers bottom-up, such that we see all uses before their + // definitions. + for (size_t i = graph.numBlocks() - 1; i < graph.numBlocks(); i--) { + LBlock *block = graph.getBlock(i); + + // Merge allocation state from our predecessors. + if (!mergeAllocationState(block)) + return false; + + // Allocate registers. + if (!allocateRegistersInBlock(block)) + return false; + + // Kill phis. + for (size_t i = 0; i < block->numPhis(); i++) { + LPhi *phi = block->getPhi(i); + JS_ASSERT(phi->numDefs() == 1); + + VirtualRegister *vr = getVirtualRegister(phi->getDef(0)); + kill(vr); + } + + // At the top of the block, copy our allocation state for our + // predecessors. + blockInfo(block)->in = state; + } + return true; +} + +bool +GreedyAllocator::allocate() +{ + vars = gen->allocate(graph.numVirtualRegisters()); + if (!vars) + return false; + memset(vars, 0, sizeof(VirtualRegister) * graph.numVirtualRegisters()); + + blocks = gen->allocate(graph.numBlocks()); + for (size_t i = 0; i < graph.numBlocks(); i++) + new (&blocks[i]) BlockInfo(); + + findDefinitions(); + if (!allocateRegisters()) + return false; + graph.setStackHeight(stackSlots.stackHeight()); + + return true; +} + diff --git a/js/src/ion/GreedyAllocator.h b/js/src/ion/GreedyAllocator.h new file mode 100644 index 00000000000..232a53da918 --- /dev/null +++ b/js/src/ion/GreedyAllocator.h @@ -0,0 +1,314 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=4 sw=4 et tw=79: + * + * ***** BEGIN LICENSE BLOCK ***** + * Version: MPL 1.1/GPL 2.0/LGPL 2.1 + * + * The contents of this file are subject to the Mozilla Public License Version + * 1.1 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS IS" basis, + * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License + * for the specific language governing rights and limitations under the + * License. + * + * The Original Code is Mozilla Communicator client code, released + * March 31, 1998. + * + * The Initial Developer of the Original Code is + * Netscape Communications Corporation. + * Portions created by the Initial Developer are Copyright (C) 1998 + * the Initial Developer. All Rights Reserved. + * + * Contributor(s): + * David Anderson + * + * Alternatively, the contents of this file may be used under the terms of + * either of the GNU General Public License Version 2 or later (the "GPL"), + * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), + * in which case the provisions of the GPL or the LGPL are applicable instead + * of those above. If you wish to allow use of your version of this file only + * under the terms of either the GPL or the LGPL, and not to allow others to + * use your version of this file under the terms of the MPL, indicate your + * decision by deleting the provisions above and replace them with the notice + * and other provisions required by the GPL or the LGPL. If you do not delete + * the provisions above, a recipient may use your version of this file under + * the terms of any one of the MPL, the GPL or the LGPL. + * + * ***** END LICENSE BLOCK ***** */ + +#ifndef jsion_include_greedy_h__ +#define jsion_include_greedy_h__ + +#include "MIR.h" +#include "MIRGraph.h" +#include "IonLIR.h" + +namespace js { +namespace ion { + +class GreedyAllocator +{ + struct Mover { + LMove *moves; + + Mover() : moves(NULL) + { } + + template + bool move(const From &from, const To &to) { + if (!moves) + moves = new LMove; + return moves->add(LAllocation(from), LAllocation(to)); + } + }; + + struct VirtualRegister { + LDefinition *def; + uint32 stackSlot_; + union { + RegisterCodes::Code gprCode; + FloatRegisterCodes::Code fpuCode; + uint32 registerCode; + }; + bool hasRegister_; + bool hasStackSlot_; + +#ifdef DEBUG + LInstruction *ins; +#endif + + LDefinition::Type type() const { + return def->type(); + } + bool isDouble() const { + return type() == LDefinition::DOUBLE; + } + Register gpr() const { + JS_ASSERT(!isDouble()); + JS_ASSERT(hasRegister()); + return Register::FromCode(gprCode); + } + FloatRegister fpu() const { + JS_ASSERT(isDouble()); + JS_ASSERT(hasRegister()); + return FloatRegister::FromCode(fpuCode); + } + AnyRegister reg() const { + return isDouble() ? AnyRegister(fpu()) : AnyRegister(gpr()); + } + void setRegister(FloatRegister reg) { + JS_ASSERT(isDouble()); + fpuCode = reg.code(); + hasRegister_ = true; + } + void setRegister(Register reg) { + JS_ASSERT(!isDouble()); + gprCode = reg.code(); + hasRegister_ = true; + } + void setRegister(AnyRegister reg) { + if (reg.isFloat()) + setRegister(reg.fpu()); + else + setRegister(reg.gpr()); + } + uint32 stackSlot() const { + return stackSlot_; + } + bool hasBackingStack() const { + return hasStackSlot() || + (def->isPreset() && def->output()->isMemory()); + } + LAllocation backingStack() const { + if (hasStackSlot()) + return LStackSlot(stackSlot_); + JS_ASSERT(def->policy() == LDefinition::PRESET); + JS_ASSERT(def->output()->isMemory()); + return *def->output(); + } + void setStackSlot(uint32 index) { + JS_ASSERT(!hasStackSlot()); + stackSlot_ = index; + hasStackSlot_ = true; + } + bool hasRegister() const { + return hasRegister_; + } + void unsetRegister() { + hasRegister_ = false; + } + bool hasSameRegister(uint32 code) const { + return hasRegister() && registerCode == code; + } + bool hasStackSlot() const { + return hasStackSlot_; + } + }; + + struct AllocationState { + RegisterSet free; + VirtualRegister *gprs[RegisterCodes::Total]; + VirtualRegister *fpus[FloatRegisterCodes::Total]; + + VirtualRegister *& operator[](const AnyRegister ®) { + if (reg.isFloat()) + return fpus[reg.fpu().code()]; + return gprs[reg.gpr().code()]; + } + + AllocationState() + : free(RegisterSet::All()), + gprs(), + fpus() + { } + }; + + struct BlockInfo { + AllocationState in; + Mover restores; + Mover phis; + }; + + private: + MIRGenerator *gen; + LIRGraph &graph; + VirtualRegister *vars; + RegisterSet disallowed; + RegisterSet discouraged; + AllocationState state; + StackAssignment stackSlots; + BlockInfo *blocks; + + // Aligns: If a register shuffle must occur to align input parameters (for + // example, ecx loading into fixed edx), it goes here. + // Spills: A definition may have to spill its result register to the stack, + // if restore code lies downstream. + // Restores: If a register is evicted, an instruction will load it off the + // stack for downstream uses. + // + // Moves happen in this order: + // Aligns + // + // Spills + // Restores + // + LMove *aligns; + LMove *spills; + LMove *restores; + + bool restore(const LAllocation &from, const AnyRegister &to) { + if (!restores) + restores = new LMove; + return restores->add(from, LAllocation(to)); + } + + template + bool spill(const LA &from, const LB &to) { + if (!spills) + spills = new LMove; + return spills->add(LAllocation(from), LAllocation(to)); + } + + template + bool align(const LA &from, const LB &to) { + if (!aligns) + aligns = new LMove; + return aligns->add(LAllocation(from), LAllocation(to)); + } + + void reset() { + aligns = NULL; + spills = NULL; + restores = NULL; + disallowed = RegisterSet(); + discouraged = RegisterSet(); + } + + private: + void findDefinitionsInLIR(LInstruction *ins); + void findDefinitionsInBlock(LBlock *block); + void findDefinitions(); + + // Kills a definition, freeing its stack allocation and register. + bool kill(VirtualRegister *vr); + + // Evicts a register, spilling it to the stack and allowing it to be + // allocated. + bool evict(AnyRegister reg); + bool maybeEvict(AnyRegister reg); + + // Allocates or frees a stack slot. + bool allocateStack(VirtualRegister *vr); + void freeStack(VirtualRegister *vr); + + // Marks a register as being free. + void freeReg(AnyRegister reg); + + // Takes a free register and assigns it to a virtual register. + void assign(VirtualRegister *vr, AnyRegister reg); + + enum Policy { + // A temporary register may be allocated again immediately. It is not + // added to the disallow or used set. + TEMPORARY, + + // A disallowed register can be re-allocated next instruction, but is + // pinned for further allocations during this instruction. + DISALLOW + }; + + // Allocate a free register of a particular type, possibly evicting in the + // process. + bool allocate(LDefinition::Type type, Policy policy, AnyRegister *out); + + // Allocate a physical register for a virtual register, possibly evicting + // in the process. + bool allocateRegisterOperand(LAllocation *a, VirtualRegister *vr); + bool allocateAnyOperand(LAllocation *a, VirtualRegister *vr); + bool allocateFixedOperand(LAllocation *a, VirtualRegister *vr); + + bool prescanDefinition(LDefinition *def); + bool prescanDefinitions(LInstruction *ins); + bool prescanUses(LInstruction *ins); + bool informSnapshot(LSnapshot *snapshot); + bool allocateSameAsInput(LDefinition *def, LAllocation *a, AnyRegister *out); + bool allocateDefinitions(LInstruction *ins); + bool allocateTemporaries(LInstruction *ins); + bool allocateInputs(LInstruction *ins); + + bool allocateRegisters(); + bool allocateRegistersInBlock(LBlock *block); + bool mergePhiState(LBlock *block); + bool mergeAllocationState(LBlock *block); + bool mergeRegisterState(const AnyRegister ®, LBlock *left, LBlock *right); + + VirtualRegister *getVirtualRegister(LDefinition *def) { + JS_ASSERT(def->virtualRegister() < graph.numVirtualRegisters()); + return &vars[def->virtualRegister()]; + } + VirtualRegister *getVirtualRegister(LUse *use) { + JS_ASSERT(use->virtualRegister() < graph.numVirtualRegisters()); + JS_ASSERT(vars[use->virtualRegister()].def); + return &vars[use->virtualRegister()]; + } + RegisterSet allocatableRegs() const { + return RegisterSet::Intersect(state.free, RegisterSet::Not(disallowed)); + } + BlockInfo *blockInfo(LBlock *block) { + return &blocks[block->mir()->id()]; + } + + public: + GreedyAllocator(MIRGenerator *gen, LIRGraph &graph); + + bool allocate(); +}; + +} // namespace ion +} // namespace js + +#endif // jsion_include_greedy_h__ + diff --git a/js/src/ion/InlineList.h b/js/src/ion/InlineList.h index 7738d9287b1..bf438933346 100644 --- a/js/src/ion/InlineList.h +++ b/js/src/ion/InlineList.h @@ -107,6 +107,36 @@ class InlineList } }; + class reverse_iterator + { + friend class InlineList; + Node *iter; + public: + reverse_iterator(Node *iter) : iter(iter) { } + + reverse_iterator & operator ++() { + iter = iter->prev; + return *iter; + } + reverse_iterator operator ++(int) { + reverse_iterator old(*this); + iter = iter->prev; + return old; + } + T * operator *() { + return static_cast(iter); + } + T * operator ->() { + return static_cast(iter); + } + bool operator != (const reverse_iterator &where) const { + return iter != where.iter; + } + bool operator == (const reverse_iterator &where) const { + return iter == where.iter; + } + }; + class const_iterator { friend class InlineList; @@ -144,6 +174,12 @@ class InlineList iterator end() { return iterator(&head); } + reverse_iterator rbegin() { + return reverse_iterator(head.prev); + } + reverse_iterator rend() { + return reverse_iterator(&head); + } const_iterator begin() const { return const_iterator(head.next); } diff --git a/js/src/ion/Ion.cpp b/js/src/ion/Ion.cpp index 6c6eff22d21..36f912fc493 100644 --- a/js/src/ion/Ion.cpp +++ b/js/src/ion/Ion.cpp @@ -44,6 +44,7 @@ #include "IonBuilder.h" #include "IonSpew.h" #include "IonLIR.h" +#include "GreedyAllocator.h" #if defined(JS_CPU_X86) # include "x86/Lowering-x86.h" @@ -144,11 +145,17 @@ ion::Go(JSContext *cx, JSScript *script, StackFrame *fp) return false; spew.spew("Apply types"); - LIRBuilder lirgen(&builder, graph); + LIRGraph lir; + LIRBuilder lirgen(&builder, graph, lir); if (!lirgen.generate()) return false; spew.spew("Generate LIR"); + GreedyAllocator greedy(&builder, lir); + if (!greedy.allocate()) + return false; + spew.spew("Allocate registers"); + return false; } diff --git a/js/src/ion/IonAssembler.h b/js/src/ion/IonAssembler.h index 434e8b40cd2..8e5a80b8ca2 100644 --- a/js/src/ion/IonAssembler.h +++ b/js/src/ion/IonAssembler.h @@ -54,50 +54,233 @@ namespace js { namespace ion { struct Register { - RegisterCodes::Code code_; + typedef RegisterCodes Codes; + typedef Codes::Code Code; + + Code code_; static Register FromCode(uint32 i) { JS_ASSERT(i < RegisterCodes::Total); Register r = { (RegisterCodes::Code)i }; return r; } - - RegisterCodes::Code code() const { + Code code() const { + JS_ASSERT(code_ < RegisterCodes::Total); return code_; } - const char *name() const { return RegisterCodes::GetName(code()); } }; struct FloatRegister { - FloatRegisterCodes::Code code_; + typedef FloatRegisterCodes Codes; + typedef Codes::Code Code; + + Code code_; static FloatRegister FromCode(uint32 i) { JS_ASSERT(i < FloatRegisterCodes::Total); FloatRegister r = { (FloatRegisterCodes::Code)i }; return r; } - - FloatRegisterCodes::Code code() const { + Code code() const { + JS_ASSERT(code_ < FloatRegisterCodes::Total); return code_; } - const char *name() const { return FloatRegisterCodes::GetName(code()); } }; +struct AnyRegister { + union { + RegisterCodes::Code gpr_; + FloatRegisterCodes::Code fpu_; + }; + bool isFloat_; + + AnyRegister() + { } + explicit AnyRegister(Register gpr) { + gpr_ = gpr.code(); + isFloat_ = false; + } + explicit AnyRegister(FloatRegister fpu) { + fpu_ = fpu.code(); + isFloat_ = true; + } + bool isFloat() const { + return isFloat_; + } + Register gpr() const { + JS_ASSERT(!isFloat()); + return Register::FromCode(gpr_); + } + FloatRegister fpu() const { + JS_ASSERT(isFloat()); + return FloatRegister::FromCode(fpu_); + } + bool operator ==(const AnyRegister &other) { + return isFloat() + ? (other.isFloat() && fpu_ == other.fpu_) + : (!other.isFloat() && gpr_ == other.gpr_); + } + bool operator !=(const AnyRegister &other) { + return isFloat() + ? (!other.isFloat() || fpu_ != other.fpu_) + : (other.isFloat() || gpr_ != other.gpr_); + } +}; + +template +class TypedRegisterSet +{ + uint32 bits_; + + explicit TypedRegisterSet(uint32 bits) + : bits_(bits) + { } + + public: + TypedRegisterSet() : bits_(0) + { } + static inline TypedRegisterSet All() { + return TypedRegisterSet(T::Codes::AllocatableMask); + } + static inline TypedRegisterSet Intersect(const TypedRegisterSet &lhs, + const TypedRegisterSet &rhs) { + return TypedRegisterSet(lhs.bits_ & rhs.bits_); + } + static inline TypedRegisterSet Not(const TypedRegisterSet &in) { + return TypedRegisterSet(~in.bits_ & T::Codes::AllocatableMask); + } + void intersect(TypedRegisterSet other) { + bits_ &= ~other.bits_; + } + bool has(T reg) const { + return !!(bits_ & (1 << reg.code())); + } + void addUnchecked(T reg) { + bits_ |= (1 << reg.code()); + } + void add(T reg) { + JS_ASSERT(!has(reg)); + addUnchecked(reg); + } + bool empty() const { + return !bits_; + } + void take(T reg) { + JS_ASSERT(has(reg)); + bits_ &= ~(1 << reg.code()); + } + T getAny() const { + JS_ASSERT(!empty()); + int ireg; + JS_FLOOR_LOG2(ireg, bits_); + return T::FromCode(ireg); + } + T takeAny() { + JS_ASSERT(!empty()); + T reg = getAny(); + take(reg); + return reg; + } +}; + +typedef TypedRegisterSet GeneralRegisterSet; +typedef TypedRegisterSet FloatRegisterSet; + +class RegisterSet { + GeneralRegisterSet gpr_; + FloatRegisterSet fpu_; + + public: + RegisterSet() + { } + RegisterSet(const GeneralRegisterSet &gpr, const FloatRegisterSet &fpu) + : gpr_(gpr), + fpu_(fpu) + { } + static inline RegisterSet All() { + return RegisterSet(GeneralRegisterSet::All(), FloatRegisterSet::All()); + } + static inline RegisterSet Intersect(const RegisterSet &lhs, const RegisterSet &rhs) { + return RegisterSet(GeneralRegisterSet::Intersect(lhs.gpr_, rhs.gpr_), + FloatRegisterSet::Intersect(lhs.fpu_, rhs.fpu_)); + } + static inline RegisterSet Not(const RegisterSet &in) { + return RegisterSet(GeneralRegisterSet::Not(in.gpr_), + FloatRegisterSet::Not(in.fpu_)); + } + bool has(Register reg) const { + return gpr_.has(reg); + } + bool has(FloatRegister reg) const { + return fpu_.has(reg); + } + bool has(AnyRegister reg) const { + return reg.isFloat() ? has(reg.fpu()) : has(reg.gpr()); + } + void add(Register reg) { + gpr_.add(reg); + } + void add(FloatRegister reg) { + fpu_.add(reg); + } + void add(const AnyRegister &any) { + if (any.isFloat()) + add(any.fpu()); + else + add(any.gpr()); + } + void addUnchecked(Register reg) { + gpr_.add(reg); + } + void addUnchecked(FloatRegister reg) { + fpu_.add(reg); + } + void addUnchecked(const AnyRegister &any) { + if (any.isFloat()) + addUnchecked(any.gpr()); + else + addUnchecked(any.fpu()); + } + bool empty(bool floats) const { + return floats ? fpu_.empty() : gpr_.empty(); + } + FloatRegister takeFloat() { + return fpu_.takeAny(); + } + Register takeGeneral() { + return gpr_.takeAny(); + } + void take(const AnyRegister ®) { + if (reg.isFloat()) + fpu_.take(reg.fpu()); + else + gpr_.take(reg.gpr()); + } + AnyRegister takeAny(bool isFloat) { + if (isFloat) + return AnyRegister(takeFloat()); + return AnyRegister(takeGeneral()); + } +}; + } // namespace js } // namespace ion #if defined(JS_CPU_X86) # include "x86/Assembler-x86.h" +# include "x86/StackAssignment-x86.h" #elif defined(JS_CPU_X64) # include "x64/Assembler-x64.h" +# include "x64/StackAssignment-x64.h" #elif defined(JS_CPU_ARM) # include "arm/Assembler-ARM.h" +# include "arm/StackAssignment-arm.h" #endif #endif // jsion_cpu_assembler_h__ diff --git a/js/src/ion/IonLIR.cpp b/js/src/ion/IonLIR.cpp index 5b5247f9d5b..d5b59074219 100644 --- a/js/src/ion/IonLIR.cpp +++ b/js/src/ion/IonLIR.cpp @@ -47,6 +47,12 @@ using namespace js; using namespace js::ion; +LIRGraph::LIRGraph() + : numVirtualRegisters_(0), + stackHeight_(0) +{ +} + LSnapshot::LSnapshot(MSnapshot *mir) : numSlots_(mir->numOperands() * BOX_PIECES), slots_(NULL), @@ -134,10 +140,8 @@ PrintDefinition(FILE *fp, const LDefinition &def) JS_NOT_REACHED("unexpected preset allocation type"); break; } - } else if (def.policy() == LDefinition::CAN_REUSE_INPUT) { - fprintf(fp, " (r?)"); } else if (def.policy() == LDefinition::MUST_REUSE_INPUT) { - fprintf(fp, " (rr)"); + fprintf(fp, " (!)"); } fprintf(fp, "]"); } @@ -161,39 +165,44 @@ PrintUse(FILE *fp, LUse *use) fprintf(fp, ")"); } +static void +PrintOperand(FILE *fp, LAllocation *a) +{ + switch (a->kind()) { + case LAllocation::CONSTANT_VALUE: + case LAllocation::CONSTANT_INDEX: + fprintf(fp, "(c)"); + break; + case LAllocation::GPR: + fprintf(fp, "(%s)", a->toGeneralReg()->reg().name()); + break; + case LAllocation::FPU: + fprintf(fp, "(%s)", a->toFloatReg()->reg().name()); + break; + case LAllocation::STACK_SLOT: + fprintf(fp, "(stack:i%d)", a->toStackSlot()->slot()); + break; + case LAllocation::DOUBLE_SLOT: + fprintf(fp, "(stack:d%d)", a->toStackSlot()->slot()); + break; + case LAllocation::ARGUMENT: + fprintf(fp, "(arg:%d)", a->toArgument()->index()); + break; + case LAllocation::USE: + PrintUse(fp, a->toUse()); + break; + default: + JS_NOT_REACHED("what?"); + break; + } +} + void LInstruction::printOperands(FILE *fp) { for (size_t i = 0; i < numOperands(); i++) { fprintf(fp, " "); - LAllocation *a = getOperand(i); - switch (a->kind()) { - case LAllocation::CONSTANT_VALUE: - case LAllocation::CONSTANT_INDEX: - fprintf(fp, "(c)"); - break; - case LAllocation::GPR: - fprintf(fp, "(=%s)", a->toGeneralReg()->reg().name()); - break; - case LAllocation::FPU: - fprintf(fp, "(=%s)", a->toFloatReg()->reg().name()); - break; - case LAllocation::STACK_SLOT: - fprintf(fp, "(stack:i%d)", a->toStackSlot()->slot()); - break; - case LAllocation::DOUBLE_SLOT: - fprintf(fp, "(stack:d%d)", a->toStackSlot()->slot()); - break; - case LAllocation::ARGUMENT: - fprintf(fp, "(arg:%d)", a->toArgument()->index()); - break; - case LAllocation::USE: - PrintUse(fp, a->toUse()); - break; - default: - JS_NOT_REACHED("what?"); - break; - } + PrintOperand(fp, getOperand(i)); if (i != numOperands() - 1) fprintf(fp, ","); } @@ -213,5 +222,30 @@ LInstruction::print(FILE *fp) fprintf(fp, ")"); printInfo(fp); + + if (numTemps()) { + fprintf(fp, " t=("); + for (size_t i = 0; i < numTemps(); i++) { + PrintDefinition(fp, *getTemp(i)); + if (i != numTemps() - 1) + fprintf(fp, ", "); + } + fprintf(fp, ")"); + } +} + +void +LMove::printOperands(FILE *fp) +{ + for (size_t i = 0; i < numEntries(); i++) { + Entry *e = getEntry(i); + fprintf(fp, "["); + PrintOperand(fp, &e->from); + fprintf(fp, " -> "); + PrintOperand(fp, &e->to); + fprintf(fp, "]"); + if (i != numEntries() - 1) + fprintf(fp, ", "); + } } diff --git a/js/src/ion/IonLIR.h b/js/src/ion/IonLIR.h index 5cd34fc3960..45dd477b858 100644 --- a/js/src/ion/IonLIR.h +++ b/js/src/ion/IonLIR.h @@ -140,6 +140,7 @@ class LAllocation JS_ASSERT(!isTagged()); bits_ |= TAG_MASK; } + inline explicit LAllocation(const AnyRegister ®); Kind kind() const { if (isTagged()) @@ -150,6 +151,9 @@ class LAllocation bool isUse() const { return kind() == USE; } + bool isConstant() const { + return isConstantValue() || isConstantIndex(); + } bool isConstantValue() const { return kind() == CONSTANT_VALUE; } @@ -171,6 +175,12 @@ class LAllocation bool isArgument() const { return kind() == ARGUMENT; } + bool isRegister() const { + return isGeneralReg() || isFloatReg(); + } + bool isMemory() const { + return isStackSlot() || isArgument(); + } inline LUse *toUse(); inline const LUse *toUse() const; inline const LGeneralReg *toGeneralReg() const; @@ -262,6 +272,9 @@ class LUse : public LAllocation JS_ASSERT(policy() == FIXED); return (data() >> REG_SHIFT) & REG_MASK; } + bool isFixedRegister() const { + return policy() == FIXED; + } }; class LGeneralReg : public LAllocation @@ -371,14 +384,12 @@ class LDefinition // definition. The allocation may be: // * A register, which may not appear as any fixed temporary. // * A stack slot or argument. - // * A constant. // // Register allocation will not modify a preset allocation. PRESET, - // With these policies, one definition may re-use the first input - // allocation. - CAN_REUSE_INPUT, + // One definition per instruction must re-use the first input + // allocation, which (for now) must be a register. MUST_REUSE_INPUT }; @@ -427,7 +438,9 @@ class LDefinition const LAllocation *output() const { return &output_; } - + bool isPreset() const { + return policy() == PRESET; + } void setVirtualRegister(uint32 index) { JS_ASSERT(index < VREG_MASK); bits_ &= ~(VREG_MASK << VREG_SHIFT); @@ -494,8 +507,7 @@ class LInstruction : public TempObject, virtual LDefinition *getDef(size_t index) = 0; virtual void setDef(size_t index, const LDefinition &def) = 0; - // Returns information about operands. Each unallocated operand is an LUse - // with a non-TEMPORARY policy. + // Returns information about operands. virtual size_t numOperands() const = 0; virtual LAllocation *getOperand(size_t index) = 0; virtual void setOperand(size_t index, const LAllocation &a) = 0; @@ -540,6 +552,7 @@ class LInstruction : public TempObject, }; typedef InlineList::iterator LInstructionIterator; +typedef InlineList::reverse_iterator LInstructionReverseIterator; class LPhi; class LBlock : public TempObject @@ -577,6 +590,15 @@ class LBlock : public TempObject LInstructionIterator end() { return instructions_.end(); } + InlineList &instructions() { + return instructions_; + } + void insertAfter(LInstruction *at, LInstruction *ins) { + instructions_.insertAfter(at, ins); + } + void insertBefore(LInstruction *at, LInstruction *ins) { + instructions_.insertBefore(at, ins); + } }; template @@ -647,6 +669,49 @@ class LSnapshot : public TempObject } }; +static const uint32 VREG_INCREMENT = 1; + +class LIRGraph +{ + Vector blocks_; + uint32 numVirtualRegisters_; + uint32 stackHeight_; + + public: + LIRGraph(); + + size_t numBlocks() const { + return blocks_.length(); + } + LBlock *getBlock(size_t i) const { + return blocks_[i]; + } + bool addBlock(LBlock *block) { + return blocks_.append(block); + } + uint32 getVirtualRegister() { + numVirtualRegisters_ += VREG_INCREMENT; + return numVirtualRegisters_; + } + uint32 numVirtualRegisters() const { + // Virtual registers are 1-based, not 0-based, so add one as a + // convenience for 0-based arrays. + return numVirtualRegisters_ + 1; + } + void setStackHeight(uint32 stackHeight) { + // Note that the stack height is counted in slots. + stackHeight_ = stackHeight; + } +}; + +LAllocation::LAllocation(const AnyRegister ®) +{ + if (reg.isFloat()) + *this = LFloatReg(reg.fpu()); + else + *this = LGeneralReg(reg.gpr()); +} + } // namespace ion } // namespace js @@ -666,5 +731,7 @@ class LSnapshot : public TempObject #undef LIR_HEADER +#include "IonLIR-inl.h" + #endif // jsion_lir_h__ diff --git a/js/src/ion/IonLowering-inl.h b/js/src/ion/IonLowering-inl.h index 1e4a1858ecc..1b44ffdec74 100644 --- a/js/src/ion/IonLowering-inl.h +++ b/js/src/ion/IonLowering-inl.h @@ -51,7 +51,7 @@ namespace ion { template bool LIRGenerator::define(LInstructionHelper<1, X, Y> *lir, MInstruction *mir, const LDefinition &def) { - uint32 vreg = nextVirtualRegister(); + uint32 vreg = getVirtualRegister(); if (vreg >= MAX_VIRTUAL_REGISTERS) return false; @@ -80,14 +80,14 @@ template bool LIRGenerator::defineBox(LInstructionHelper *lir, MInstruction *mir, LDefinition::Policy policy) { - uint32 vreg = nextVirtualRegister(); + uint32 vreg = getVirtualRegister(); if (vreg >= MAX_VIRTUAL_REGISTERS) return false; #if defined(JS_NUNBOX32) lir->setDef(0, LDefinition(vreg, LDefinition::TYPE, policy)); lir->setDef(1, LDefinition(vreg + 1, LDefinition::PAYLOAD, policy)); - if (nextVirtualRegister() >= MAX_VIRTUAL_REGISTERS) + if (getVirtualRegister() >= MAX_VIRTUAL_REGISTERS) return false; #elif defined(JS_PUNBOX64) lir->setDef(0, LDefinition(vreg, LDefinition::BOX, policy)); @@ -151,7 +151,7 @@ LIRGenerator::useFixed(MInstruction *mir, FloatRegister reg) LDefinition LIRGenerator::temp(LDefinition::Type type) { - uint32 vreg = nextVirtualRegister(); + uint32 vreg = getVirtualRegister(); if (vreg >= MAX_VIRTUAL_REGISTERS) { gen->error("max virtual registers"); return LDefinition(); @@ -159,6 +159,27 @@ LIRGenerator::temp(LDefinition::Type type) return LDefinition(vreg, type); } +template bool +LIRGenerator::annotate(T *ins) +{ + if (ins->numDefs()) { + ins->setId(ins->getDef(0)->virtualRegister()); + } else { + ins->setId(getVirtualRegister()); + if (ins->id() >= MAX_VIRTUAL_REGISTERS) + return false; + } + return true; +} + +template bool +LIRGenerator::add(T *ins) +{ + JS_ASSERT(!ins->isPhi()); + current->add(ins); + return annotate(ins); +} + } // namespace js } // namespace ion diff --git a/js/src/ion/IonLowering.cpp b/js/src/ion/IonLowering.cpp index 2610166e1e3..60e3510974c 100644 --- a/js/src/ion/IonLowering.cpp +++ b/js/src/ion/IonLowering.cpp @@ -361,6 +361,8 @@ LIRGenerator::visitBlock(MBasicBlock *block) if (!visitInstruction(block->lastIns())) return false; + if (!lirGraph_.addBlock(current)) + return false; block->assignLir(current); return true; } diff --git a/js/src/ion/IonLowering.h b/js/src/ion/IonLowering.h index 93bb3d07eea..4351c2eac10 100644 --- a/js/src/ion/IonLowering.h +++ b/js/src/ion/IonLowering.h @@ -57,8 +57,6 @@ class MIRGenerator; class MIRGraph; class MInstruction; -static const uint32 VREG_INCREMENT = 1; - class LIRGenerator : public MInstructionVisitor { protected: @@ -66,23 +64,19 @@ class LIRGenerator : public MInstructionVisitor private: MIRGraph &graph; + LIRGraph &lirGraph_; LBlock *current; - uint32 vregGen_; MSnapshot *last_snapshot_; public: - LIRGenerator(MIRGenerator *gen, MIRGraph &graph) + LIRGenerator(MIRGenerator *gen, MIRGraph &graph, LIRGraph &lirGraph) : gen(gen), graph(graph), - vregGen_(0), + lirGraph_(lirGraph), last_snapshot_(NULL) { } bool generate(); - uint32 nextVirtualRegister() { - vregGen_ += VREG_INCREMENT; - return vregGen_; - } protected: // A backend can decide that an instruction should be emitted at its uses, @@ -135,24 +129,12 @@ class LIRGenerator : public MInstructionVisitor typedef LInstructionHelper<1, 2, 0> LMathI; virtual bool lowerForALU(LMathI *ins, MInstruction *mir, MInstruction *lhs, MInstruction *rhs) = 0; - template - bool annotate(T *ins) { - if (ins->numDefs()) { - ins->setId(ins->getDef(0)->virtualRegister()); - } else { - ins->setId(nextVirtualRegister()); - if (ins->id() >= MAX_VIRTUAL_REGISTERS) - return false; - } - return true; + uint32 getVirtualRegister() { + return lirGraph_.getVirtualRegister(); } - template - bool add(T *ins) { - JS_ASSERT(!ins->isPhi()); - current->add(ins); - return annotate(ins); - } + template bool annotate(T *ins); + template bool add(T *ins); bool addPhi(LPhi *phi) { return current->addPhi(phi) && annotate(phi); diff --git a/js/src/ion/LIR-Common.h b/js/src/ion/LIR-Common.h index cf79f89c723..48505561830 100644 --- a/js/src/ion/LIR-Common.h +++ b/js/src/ion/LIR-Common.h @@ -54,6 +54,11 @@ class LMove : public LInstructionHelper<0, 0, 0> struct Entry { LAllocation from; LAllocation to; + + Entry() { } + Entry(const LAllocation &from, const LAllocation &to) + : from(from), to(to) + { } }; private: @@ -62,21 +67,22 @@ class LMove : public LInstructionHelper<0, 0, 0> public: LIR_HEADER(Move); + bool add(const LAllocation &from, const LAllocation &to) { + return entries_.append(Entry(from, to)); + } + bool add(const Entry &ent) { + return entries_.append(ent); + } size_t numEntries() { return entries_.length(); } - - bool addEntry(Entry ent) { - return entries_.append(ent); - } - Entry *getEntry(size_t i) { return &entries_[i]; } - void setEntry(size_t i, Entry ent) { entries_[i] = ent; } + void printOperands(FILE *fp); }; // Constant 32-bit integer. @@ -280,7 +286,6 @@ class LPhi : public LInstruction } }; - } // namespace ion } // namespace js diff --git a/js/src/ion/x64/Architecture-x64.h b/js/src/ion/x64/Architecture-x64.h index a84011752eb..39abad21c61 100644 --- a/js/src/ion/x64/Architecture-x64.h +++ b/js/src/ion/x64/Architecture-x64.h @@ -46,6 +46,7 @@ namespace js { namespace ion { static const ptrdiff_t STACK_SLOT_SIZE = 8; +static const uint32 MAX_STACK_SLOTS = 256; class RegisterCodes { public: diff --git a/js/src/ion/x64/Lowering-x64.cpp b/js/src/ion/x64/Lowering-x64.cpp index ebc4c37a5dc..96718948f35 100644 --- a/js/src/ion/x64/Lowering-x64.cpp +++ b/js/src/ion/x64/Lowering-x64.cpp @@ -97,7 +97,7 @@ LIRGeneratorX64::visitUnbox(MUnbox *unbox) } case MIRType_Object: { // Objects don't need a temporary. - LDefinition out(LDefinition::POINTER, LDefinition::CAN_REUSE_INPUT); + LDefinition out(LDefinition::POINTER); LUnboxObject *ins = new LUnboxObject(useRegister(box)); return define(ins, unbox, out) && assignSnapshot(ins); } @@ -127,7 +127,7 @@ LIRGeneratorX64::visitReturn(MReturn *ret) bool LIRGeneratorX64::preparePhi(MPhi *phi) { - uint32 vreg = nextVirtualRegister(); + uint32 vreg = getVirtualRegister(); if (vreg >= MAX_VIRTUAL_REGISTERS) return false; diff --git a/js/src/ion/x64/Lowering-x64.h b/js/src/ion/x64/Lowering-x64.h index 320ab95e67a..7ac89ef93ca 100644 --- a/js/src/ion/x64/Lowering-x64.h +++ b/js/src/ion/x64/Lowering-x64.h @@ -50,8 +50,8 @@ namespace ion { class LIRGeneratorX64 : public LIRGenerator { public: - LIRGeneratorX64(MIRGenerator *gen, MIRGraph &graph) - : LIRGenerator(gen, graph) + LIRGeneratorX64(MIRGenerator *gen, MIRGraph &graph, LIRGraph &lirGraph) + : LIRGenerator(gen, graph, lirGraph) { } protected: diff --git a/js/src/ion/x64/StackAssignment-x64.h b/js/src/ion/x64/StackAssignment-x64.h new file mode 100644 index 00000000000..c8d8ed7a7d3 --- /dev/null +++ b/js/src/ion/x64/StackAssignment-x64.h @@ -0,0 +1,89 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=4 sw=4 et tw=79: + * + * ***** BEGIN LICENSE BLOCK ***** + * Version: MPL 1.1/GPL 2.0/LGPL 2.1 + * + * The contents of this file are subject to the Mozilla Public License Version + * 1.1 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS IS" basis, + * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License + * for the specific language governing rights and limitations under the + * License. + * + * The Original Code is Mozilla Communicator client code, released + * March 31, 1998. + * + * The Initial Developer of the Original Code is + * Netscape Communications Corporation. + * Portions created by the Initial Developer are Copyright (C) 1998 + * the Initial Developer. All Rights Reserved. + * + * Contributor(s): + * David Anderson + * + * Alternatively, the contents of this file may be used under the terms of + * either of the GNU General Public License Version 2 or later (the "GPL"), + * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), + * in which case the provisions of the GPL or the LGPL are applicable instead + * of those above. If you wish to allow use of your version of this file only + * under the terms of either the GPL or the LGPL, and not to allow others to + * use your version of this file under the terms of the MPL, indicate your + * decision by deleting the provisions above and replace them with the notice + * and other provisions required by the GPL or the LGPL. If you do not delete + * the provisions above, a recipient may use your version of this file under + * the terms of any one of the MPL, the GPL or the LGPL. + * + * ***** END LICENSE BLOCK ***** */ + +#ifndef jsion_cpu_x64_stack_assignment_h__ +#define jsion_cpu_x64_stack_assignment_h__ + +namespace js { +namespace ion { + +class StackAssignmentX64 +{ + js::Vector slots; + uint32 height_; + + public: + StackAssignmentX64() : height_(0) + { } + + void freeSlot(uint32 index) { + slots.append(index); + } + + void freeDoubleSlot(uint32 index) { + freeSlot(index); + } + + bool allocateDoubleSlot(uint32 *index) { + return allocateSlot(index); + } + + bool allocateSlot(uint32 *index) { + if (!slots.empty()) { + *index = slots.popCopy(); + return true; + } + *index = height_++; + return height_ < MAX_STACK_SLOTS; + } + + uint32 stackHeight() const { + return height_; + } +}; + +typedef StackAssignmentX64 StackAssignment; + +} // namespace ion +} // namespace js + +#endif // jsion_cpu_x64_stack_assignment_h__ + diff --git a/js/src/ion/x86/Architecture-x86.h b/js/src/ion/x86/Architecture-x86.h index 36bedfcc784..f69c0fa8a33 100644 --- a/js/src/ion/x86/Architecture-x86.h +++ b/js/src/ion/x86/Architecture-x86.h @@ -46,6 +46,8 @@ namespace js { namespace ion { static const ptrdiff_t STACK_SLOT_SIZE = 4; +static const uint32 MAX_STACK_SLOTS = 256; +static const uint32 DOUBLE_ALIGNMENT = 8; class RegisterCodes { public: diff --git a/js/src/ion/x86/Lowering-x86.cpp b/js/src/ion/x86/Lowering-x86.cpp index e2bcd651497..3364ecb4be6 100644 --- a/js/src/ion/x86/Lowering-x86.cpp +++ b/js/src/ion/x86/Lowering-x86.cpp @@ -89,7 +89,7 @@ LIRGeneratorX86::visitUnbox(MUnbox *unbox) LUnbox *lir = new LUnbox(unbox->type()); lir->setOperand(0, usePayloadInRegister(inner)); lir->setOperand(1, useType(inner)); - if (!define(lir, unbox, LDefinition::CAN_REUSE_INPUT)) + if (!defineReuseInput(lir, unbox)) return false; return assignSnapshot(lir); } @@ -109,14 +109,14 @@ LIRGeneratorX86::visitReturn(MReturn *ret) bool LIRGeneratorX86::preparePhi(MPhi *phi) { - uint32 first_vreg = nextVirtualRegister(); + uint32 first_vreg = getVirtualRegister(); if (first_vreg >= MAX_VIRTUAL_REGISTERS) return false; phi->setId(first_vreg); if (phi->type() == MIRType_Value) { - uint32 payload_vreg = nextVirtualRegister(); + uint32 payload_vreg = getVirtualRegister(); if (payload_vreg >= MAX_VIRTUAL_REGISTERS) return false; JS_ASSERT(first_vreg + VREG_INCREMENT == payload_vreg); diff --git a/js/src/ion/x86/Lowering-x86.h b/js/src/ion/x86/Lowering-x86.h index 6fb3e6772ed..67675b2c03b 100644 --- a/js/src/ion/x86/Lowering-x86.h +++ b/js/src/ion/x86/Lowering-x86.h @@ -50,8 +50,8 @@ namespace ion { class LIRGeneratorX86 : public LIRGenerator { public: - LIRGeneratorX86(MIRGenerator *gen, MIRGraph &graph) - : LIRGenerator(gen, graph) + LIRGeneratorX86(MIRGenerator *gen, MIRGraph &graph, LIRGraph &lirGraph) + : LIRGenerator(gen, graph, lirGraph) { } protected: diff --git a/js/src/ion/x86/StackAssignment-x86.h b/js/src/ion/x86/StackAssignment-x86.h new file mode 100644 index 00000000000..58ceb236b9d --- /dev/null +++ b/js/src/ion/x86/StackAssignment-x86.h @@ -0,0 +1,103 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=4 sw=4 et tw=79: + * + * ***** BEGIN LICENSE BLOCK ***** + * Version: MPL 1.1/GPL 2.0/LGPL 2.1 + * + * The contents of this file are subject to the Mozilla Public License Version + * 1.1 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS IS" basis, + * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License + * for the specific language governing rights and limitations under the + * License. + * + * The Original Code is Mozilla Communicator client code, released + * March 31, 1998. + * + * The Initial Developer of the Original Code is + * Netscape Communications Corporation. + * Portions created by the Initial Developer are Copyright (C) 1998 + * the Initial Developer. All Rights Reserved. + * + * Contributor(s): + * David Anderson + * + * Alternatively, the contents of this file may be used under the terms of + * either of the GNU General Public License Version 2 or later (the "GPL"), + * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), + * in which case the provisions of the GPL or the LGPL are applicable instead + * of those above. If you wish to allow use of your version of this file only + * under the terms of either the GPL or the LGPL, and not to allow others to + * use your version of this file under the terms of the MPL, indicate your + * decision by deleting the provisions above and replace them with the notice + * and other provisions required by the GPL or the LGPL. If you do not delete + * the provisions above, a recipient may use your version of this file under + * the terms of any one of the MPL, the GPL or the LGPL. + * + * ***** END LICENSE BLOCK ***** */ + +#ifndef jsion_cpu_x86_stack_assignment_h__ +#define jsion_cpu_x86_stack_assignment_h__ + +namespace js { +namespace ion { + +class StackAssignmentX86 +{ + js::Vector normalSlots; + js::Vector doubleSlots; + uint32 height_; + + public: + StackAssignmentX86() : height_(0) + { } + + void freeSlot(uint32 index) { + normalSlots.append(index); + } + void freeDoubleSlot(uint32 index) { + doubleSlots.append(index); + } + + bool allocateDoubleSlot(uint32 *index) { + if (!doubleSlots.empty()) { + *index = doubleSlots.popCopy(); + return false; + } + if (ComputeByteAlignment(height_, DOUBLE_ALIGNMENT)) { + normalSlots.append(height_++); + JS_ASSERT(!ComputeByteAlignment(height_, DOUBLE_ALIGNMENT)); + } + *index = height_; + height_ += 2; + return height_ < MAX_STACK_SLOTS; + } + + bool allocateSlot(uint32 *index) { + if (!normalSlots.empty()) { + *index = normalSlots.popCopy(); + return true; + } + if (!doubleSlots.empty()) { + *index = doubleSlots.popCopy(); + return normalSlots.append(*index + 1); + } + *index = height_++; + return height_ < MAX_STACK_SLOTS; + } + + uint32 stackHeight() const { + return height_; + } +}; + +typedef StackAssignmentX86 StackAssignment; + +} // namespace ion +} // namespace js + +#endif // jsion_cpu_x86_stack_assignment_h__ + diff --git a/js/src/jsutil.h b/js/src/jsutil.h index a9c65988f0f..fafd4632f38 100644 --- a/js/src/jsutil.h +++ b/js/src/jsutil.h @@ -1,4 +1,5 @@ /* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=4 sw=4 tw=99 et: * * ***** BEGIN LICENSE BLOCK ***** * Version: MPL 1.1/GPL 2.0/LGPL 2.1 @@ -674,6 +675,28 @@ PodEqual(T *one, T *two, size_t len) return !memcmp(one, two, len * sizeof(T)); } +template +static inline bool +IsPowerOfTwo(T t) +{ + return t && !(t & (t - 1)); +} + +template +static inline U +ComputeByteAlignment(T bytes, U alignment) +{ + JS_ASSERT(IsPowerOfTwo(alignment)); + return (alignment - (bytes % alignment)) % alignment; +} + +template +static inline T +AlignBytes(T bytes, U alignment) +{ + return bytes + ComputeByteAlignment(bytes, alignment); +} + } /* namespace js */ #endif /* defined(__cplusplus) */