Bug 937540 part 7 - Use placement new syntax for range analysis and some other classes. r=nbp

This commit is contained in:
Jan de Mooij 2013-12-05 13:32:13 +01:00
parent 752584ada7
commit d1c0b907ce
13 changed files with 371 additions and 297 deletions

View File

@ -165,9 +165,6 @@ class AutoIonContextAlloc
struct TempObject
{
inline void *operator new(size_t nbytes) {
return GetIonContext()->temp->allocateInfallible(nbytes);
}
inline void *operator new(size_t nbytes, TempAllocator &alloc) {
return alloc.allocateInfallible(nbytes);
}
@ -179,6 +176,18 @@ struct TempObject
}
};
// Deprecated, don't use for (new) classes. Will be removed when all classes have
// been converted to placement new/TempObject (bug 937540).
struct OldTempObject
: public TempObject
{
using TempObject::operator new;
inline void *operator new(size_t nbytes) {
return GetIonContext()->temp->allocateInfallible(nbytes);
}
};
template <typename T>
class TempObjectPool
{

View File

@ -666,7 +666,7 @@ IonBuilder::build()
// this will create an OSI point that will read the incoming argument
// values, which is nice to do before their last real use, to minimize
// register/stack pressure.
MCheckOverRecursed *check = new MCheckOverRecursed;
MCheckOverRecursed *check = MCheckOverRecursed::New(alloc());
current->add(check);
check->setResumePoint(current->entryResumePoint());
@ -2445,7 +2445,7 @@ IonBuilder::processBreak(JSOp op, jssrcnote *sn)
CFGState &cfg = cfgStack_[labels_[i].cfgEntry];
JS_ASSERT(cfg.state == CFGState::LABEL);
if (cfg.stopAt == target) {
cfg.label.breaks = new DeferredEdge(current, cfg.label.breaks);
cfg.label.breaks = new(alloc()) DeferredEdge(current, cfg.label.breaks);
found = true;
break;
}
@ -2455,7 +2455,7 @@ IonBuilder::processBreak(JSOp op, jssrcnote *sn)
CFGState &cfg = cfgStack_[loops_[i].cfgEntry];
JS_ASSERT(cfg.isLoop());
if (cfg.loop.exitpc == target) {
cfg.loop.breaks = new DeferredEdge(current, cfg.loop.breaks);
cfg.loop.breaks = new(alloc()) DeferredEdge(current, cfg.loop.breaks);
found = true;
break;
}
@ -2499,7 +2499,7 @@ IonBuilder::processContinue(JSOp op)
JS_ASSERT(found);
CFGState &state = *found;
state.loop.continues = new DeferredEdge(current, state.loop.continues);
state.loop.continues = new(alloc()) DeferredEdge(current, state.loop.continues);
setCurrent(nullptr);
pc += js_CodeSpec[op].length;
@ -2538,7 +2538,7 @@ IonBuilder::processSwitchBreak(JSOp op)
MOZ_ASSUME_UNREACHABLE("Unexpected switch state.");
}
*breaks = new DeferredEdge(current, *breaks);
*breaks = new(alloc()) DeferredEdge(current, *breaks);
setCurrent(nullptr);
pc += js_CodeSpec[op].length;
@ -5210,7 +5210,7 @@ IonBuilder::makeCallHelper(JSFunction *target, CallInfo &callInfo, bool cloneAtC
// potentially perform rearrangement.
JS_ASSERT(callInfo.thisArg()->isPassArg());
MPassArg *thisArg = callInfo.thisArg()->toPassArg();
MPrepareCall *start = new MPrepareCall;
MPrepareCall *start = MPrepareCall::New(alloc());
thisArg->block()->insertBefore(thisArg, start);
call->initPrepareCall(start);
@ -6726,10 +6726,11 @@ IonBuilder::getElemTryComplexElemOfTypedObject(bool *emitted,
loadTypedObjectData(obj, indexAsByteOffset, &owner, &ownerOffset);
// Create the derived type object.
MInstruction *derived = new MNewDerivedTypedObject(elemTypeReprs,
elemType,
owner,
ownerOffset);
MInstruction *derived = MNewDerivedTypedObject::New(alloc(),
elemTypeReprs,
elemType,
owner,
ownerOffset);
types::TemporaryTypeSet *resultTypes = bytecodeTypes(pc);
derived->setResultTypeSet(resultTypes);
@ -7701,7 +7702,7 @@ IonBuilder::jsop_length_fastPath()
current->add(elements);
// Read length.
MArrayLength *length = new MArrayLength(elements);
MArrayLength *length = MArrayLength::New(alloc(), elements);
current->add(length);
current->push(length);
return true;
@ -7849,7 +7850,7 @@ IonBuilder::jsop_not()
{
MDefinition *value = current->pop();
MNot *ins = new MNot(value);
MNot *ins = MNot::New(alloc(), value);
current->add(ins);
current->push(ins);
ins->infer();
@ -8362,10 +8363,11 @@ IonBuilder::getPropTryComplexPropOfTypedObject(bool *emitted,
&owner, &ownerOffset);
// Create the derived type object.
MInstruction *derived = new MNewDerivedTypedObject(fieldTypeReprs,
fieldType,
owner,
ownerOffset);
MInstruction *derived = MNewDerivedTypedObject::New(alloc(),
fieldTypeReprs,
fieldType,
owner,
ownerOffset);
derived->setResultTypeSet(resultTypes);
current->add(derived);
current->push(derived);
@ -9495,7 +9497,7 @@ IonBuilder::jsop_in()
current->pop();
current->pop();
MIn *ins = new MIn(id, obj);
MIn *ins = MIn::New(alloc(), id, obj);
current->add(ins);
current->push(ins);
@ -9558,7 +9560,7 @@ IonBuilder::jsop_instanceof()
rhs->setFoldedUnchecked();
MInstanceOf *ins = new MInstanceOf(obj, protoObject);
MInstanceOf *ins = MInstanceOf::New(alloc(), obj, protoObject);
current->add(ins);
current->push(ins);
@ -9566,7 +9568,7 @@ IonBuilder::jsop_instanceof()
return resumeAfter(ins);
} while (false);
MCallInstanceOf *ins = new MCallInstanceOf(obj, rhs);
MCallInstanceOf *ins = MCallInstanceOf::New(alloc(), obj, rhs);
current->add(ins);
current->push(ins);

View File

@ -129,7 +129,7 @@ LSnapshot::init(MIRGenerator *gen)
LSnapshot *
LSnapshot::New(MIRGenerator *gen, MResumePoint *mir, BailoutKind kind)
{
LSnapshot *snapshot = new LSnapshot(mir, kind);
LSnapshot *snapshot = new(gen->alloc()) LSnapshot(mir, kind);
if (!snapshot->init(gen))
return nullptr;
@ -357,7 +357,7 @@ void
LInstruction::initSafepoint(TempAllocator &alloc)
{
JS_ASSERT(!safepoint_);
safepoint_ = new LSafepoint(alloc);
safepoint_ = new(alloc) LSafepoint(alloc);
JS_ASSERT(safepoint_);
}

View File

@ -111,12 +111,12 @@ class LAllocation : public TempObject
LAllocation() : bits_(0)
{ }
static LAllocation *New() {
return new LAllocation();
static LAllocation *New(TempAllocator &alloc) {
return new(alloc) LAllocation();
}
template <typename T>
static LAllocation *New(const T &other) {
return new LAllocation(other);
static LAllocation *New(TempAllocator &alloc, const T &other) {
return new(alloc) LAllocation(other);
}
// The value pointer must be rooted in MIR and have its low bit cleared.
@ -569,7 +569,7 @@ class LSafepoint;
class LInstructionVisitor;
class LInstruction
: public TempObject,
: public OldTempObject,
public InlineListNode<LInstruction>
{
uint32_t id_;

View File

@ -381,13 +381,13 @@ LinearScanAllocator::reifyAllocations()
spillFrom = from->getAllocation();
} else {
if (def->policy() == LDefinition::MUST_REUSE_INPUT) {
LAllocation *alloc = reg->ins()->getOperand(def->getReusedInput());
LAllocation *origAlloc = LAllocation::New(*alloc);
LAllocation *inputAlloc = reg->ins()->getOperand(def->getReusedInput());
LAllocation *origAlloc = LAllocation::New(alloc(), *inputAlloc);
JS_ASSERT(!alloc->isUse());
JS_ASSERT(!inputAlloc->isUse());
*alloc = *interval->getAllocation();
if (!moveInputAlloc(inputOf(reg->ins()), origAlloc, alloc))
*inputAlloc = *interval->getAllocation();
if (!moveInputAlloc(inputOf(reg->ins()), origAlloc, inputAlloc))
return false;
}

View File

@ -573,7 +573,7 @@ IonBuilder::inlineMathFloor(CallInfo &callInfo)
if (IsFloatingPointType(argType) && returnType == MIRType_Int32) {
callInfo.unwrapArgs();
MFloor *ins = new MFloor(callInfo.getArg(0));
MFloor *ins = MFloor::New(alloc(), callInfo.getArg(0));
current->add(ins);
current->push(ins);
return InliningStatus_Inlined;
@ -611,7 +611,7 @@ IonBuilder::inlineMathRound(CallInfo &callInfo)
if (argType == MIRType_Double && returnType == MIRType_Int32) {
callInfo.unwrapArgs();
MRound *ins = new MRound(callInfo.getArg(0));
MRound *ins = MRound::New(alloc(), callInfo.getArg(0));
current->add(ins);
current->push(ins);
return InliningStatus_Inlined;
@ -1367,7 +1367,7 @@ IonBuilder::inlineParallelArrayTail(CallInfo &callInfo,
// Place an MPrepareCall before the first passed argument, before we
// potentially perform rearrangement.
MPrepareCall *start = new MPrepareCall;
MPrepareCall *start = MPrepareCall::New(alloc());
oldThis->block()->insertBefore(oldThis, start);
call->initPrepareCall(start);
@ -1439,9 +1439,10 @@ IonBuilder::inlineNewDenseArrayForParallelExecution(CallInfo &callInfo)
callInfo.unwrapArgs();
MNewDenseArrayPar *newObject = new MNewDenseArrayPar(graph().forkJoinSlice(),
callInfo.getArg(0),
templateObject);
MNewDenseArrayPar *newObject = MNewDenseArrayPar::New(alloc(),
graph().forkJoinSlice(),
callInfo.getArg(0),
templateObject);
current->add(newObject);
current->push(newObject);

View File

@ -389,7 +389,7 @@ class MDefinition : public MNode
bool earlyAbortCheck();
// Compute an absolute or symbolic range for the value of this node.
virtual void computeRange() {
virtual void computeRange(TempAllocator &alloc) {
}
// Collect information from the pre-truncated ranges.
@ -991,7 +991,7 @@ class MConstant : public MNullaryInstruction
return true;
}
void computeRange();
void computeRange(TempAllocator &alloc);
bool truncate();
bool canProduceFloat32() const;
@ -1512,9 +1512,6 @@ class MNewPar : public MUnaryInstruction
{
CompilerRootObject templateObject_;
public:
INSTRUCTION_HEADER(NewPar);
MNewPar(MDefinition *slice, JSObject *templateObject)
: MUnaryInstruction(slice),
templateObject_(templateObject)
@ -1522,6 +1519,13 @@ class MNewPar : public MUnaryInstruction
setResultType(MIRType_Object);
}
public:
INSTRUCTION_HEADER(NewPar);
static MNewPar *New(TempAllocator &alloc, MDefinition *slice, JSObject *templateObject) {
return new(alloc) MNewPar(slice, templateObject);
}
MDefinition *forkJoinSlice() const {
return getOperand(0);
}
@ -1552,9 +1556,6 @@ class MNewDerivedTypedObject
private:
TypeRepresentationSet set_;
public:
INSTRUCTION_HEADER(NewDerivedTypedObject);
MNewDerivedTypedObject(TypeRepresentationSet set,
MDefinition *type,
MDefinition *owner,
@ -1566,6 +1567,15 @@ class MNewDerivedTypedObject
setResultType(MIRType_Object);
}
public:
INSTRUCTION_HEADER(NewDerivedTypedObject);
static MNewDerivedTypedObject *New(TempAllocator &alloc, TypeRepresentationSet set,
MDefinition *type, MDefinition *owner, MDefinition *offset)
{
return new(alloc) MNewDerivedTypedObject(set, type, owner, offset);
}
TypeRepresentationSet set() const {
return set_;
}
@ -1764,8 +1774,9 @@ class MPrepareCall : public MNullaryInstruction
public:
INSTRUCTION_HEADER(PrepareCall)
MPrepareCall()
{ }
static MPrepareCall *New(TempAllocator &alloc) {
return new(alloc) MPrepareCall();
}
// Get the vector size for the upcoming call by looking at the call.
uint32_t argc() const;
@ -2899,7 +2910,7 @@ class MToDouble
return AliasSet::None();
}
void computeRange();
void computeRange(TempAllocator &alloc);
bool truncate();
bool isOperandTruncated(size_t index) const;
@ -2961,7 +2972,7 @@ class MToFloat32
return AliasSet::None();
}
void computeRange();
void computeRange(TempAllocator &alloc);
bool canConsumeFloat32() const { return true; }
bool canProduceFloat32() const { return true; }
@ -3068,7 +3079,7 @@ class MToInt32
AliasSet getAliasSet() const {
return AliasSet::None();
}
void computeRange();
void computeRange(TempAllocator &alloc);
#ifdef DEBUG
bool isConsistentFloat32Use() const { return true; }
@ -3104,7 +3115,7 @@ class MTruncateToInt32 : public MUnaryInstruction
return AliasSet::None();
}
void computeRange();
void computeRange(TempAllocator &alloc);
bool isOperandTruncated(size_t index) const;
# ifdef DEBUG
bool isConsistentFloat32Use() const {
@ -3173,7 +3184,7 @@ class MBitNot
return AliasSet::Store(AliasSet::Any);
return AliasSet::None();
}
void computeRange();
void computeRange(TempAllocator &alloc);
};
class MTypeOf
@ -3300,7 +3311,7 @@ class MBitAnd : public MBinaryBitwiseInstruction
MDefinition *foldIfEqual() {
return getOperand(0); // x & x => x;
}
void computeRange();
void computeRange(TempAllocator &alloc);
};
class MBitOr : public MBinaryBitwiseInstruction
@ -3323,7 +3334,7 @@ class MBitOr : public MBinaryBitwiseInstruction
MDefinition *foldIfEqual() {
return getOperand(0); // x | x => x
}
void computeRange();
void computeRange(TempAllocator &alloc);
};
class MBitXor : public MBinaryBitwiseInstruction
@ -3346,7 +3357,7 @@ class MBitXor : public MBinaryBitwiseInstruction
MDefinition *foldIfEqual() {
return this;
}
void computeRange();
void computeRange(TempAllocator &alloc);
};
class MShiftInstruction
@ -3384,7 +3395,7 @@ class MLsh : public MShiftInstruction
return getOperand(0);
}
void computeRange();
void computeRange(TempAllocator &alloc);
};
class MRsh : public MShiftInstruction
@ -3403,7 +3414,7 @@ class MRsh : public MShiftInstruction
// x >> 0 => x
return getOperand(0);
}
void computeRange();
void computeRange(TempAllocator &alloc);
};
class MUrsh : public MShiftInstruction
@ -3436,7 +3447,7 @@ class MUrsh : public MShiftInstruction
bool fallible() const;
void computeRange();
void computeRange(TempAllocator &alloc);
void collectRangeInfoPreTrunc();
};
@ -3546,7 +3557,7 @@ class MMinMax
AliasSet getAliasSet() const {
return AliasSet::None();
}
void computeRange();
void computeRange(TempAllocator &alloc);
};
class MAbs
@ -3590,7 +3601,7 @@ class MAbs
AliasSet getAliasSet() const {
return AliasSet::None();
}
void computeRange();
void computeRange(TempAllocator &alloc);
bool isFloat32Commutative() const { return true; }
void trySpecializeFloat32(TempAllocator &alloc);
};
@ -3630,7 +3641,7 @@ class MSqrt
AliasSet getAliasSet() const {
return AliasSet::None();
}
void computeRange();
void computeRange(TempAllocator &alloc);
bool isFloat32Commutative() const { return true; }
void trySpecializeFloat32(TempAllocator &alloc);
@ -3830,7 +3841,7 @@ class MRandom : public MNullaryInstruction
return true;
}
void computeRange();
void computeRange(TempAllocator &alloc);
};
class MMathFunction
@ -3920,7 +3931,7 @@ class MMathFunction
|| function_ == ASin || function_ == ACos || function_ == Floor;
}
void trySpecializeFloat32(TempAllocator &alloc);
void computeRange();
void computeRange(TempAllocator &alloc);
};
class MAdd : public MBinaryArithInstruction
@ -3958,7 +3969,7 @@ class MAdd : public MBinaryArithInstruction
}
bool fallible() const;
void computeRange();
void computeRange(TempAllocator &alloc);
bool truncate();
bool isOperandTruncated(size_t index) const;
};
@ -3994,7 +4005,7 @@ class MSub : public MBinaryArithInstruction
bool isFloat32Commutative() const { return true; }
bool fallible() const;
void computeRange();
void computeRange(TempAllocator &alloc);
bool truncate();
bool isOperandTruncated(size_t index) const;
};
@ -4083,7 +4094,7 @@ class MMul : public MBinaryArithInstruction
bool isFloat32Commutative() const { return true; }
void computeRange();
void computeRange(TempAllocator &alloc);
bool truncate();
bool isOperandTruncated(size_t index) const;
@ -4162,7 +4173,7 @@ class MDiv : public MBinaryArithInstruction
bool isFloat32Commutative() const { return true; }
void computeRange();
void computeRange(TempAllocator &alloc);
bool fallible() const;
bool truncate();
void collectRangeInfoPreTrunc();
@ -4217,7 +4228,7 @@ class MMod : public MBinaryArithInstruction
bool fallible() const;
void computeRange();
void computeRange(TempAllocator &alloc);
bool truncate();
void collectRangeInfoPreTrunc();
};
@ -4316,7 +4327,7 @@ class MCharCodeAt
return AliasSet::None();
}
void computeRange();
void computeRange(TempAllocator &alloc);
};
class MFromCharCode
@ -4527,7 +4538,7 @@ class MPhi MOZ_FINAL : public MDefinition, public InlineForwardListNode<MPhi>
AliasSet getAliasSet() const {
return AliasSet::None();
}
void computeRange();
void computeRange(TempAllocator &alloc);
MDefinition *operandIfRedundant() {
// If this phi is redundant (e.g., phi(a,a) or b=phi(a,this)),
@ -4589,7 +4600,7 @@ class MBeta : public MUnaryInstruction
return AliasSet::None();
}
void computeRange();
void computeRange(TempAllocator &alloc);
};
// MIR representation of a Value on the OSR StackFrame.
@ -4696,15 +4707,16 @@ class MCheckOverRecursed : public MNullaryInstruction
{
public:
INSTRUCTION_HEADER(CheckOverRecursed)
static MCheckOverRecursed *New(TempAllocator &alloc) {
return new(alloc) MCheckOverRecursed();
}
};
// Check the current frame for over-recursion past the global stack limit.
// Uses the per-thread recursion limit.
class MCheckOverRecursedPar : public MUnaryInstruction
{
public:
INSTRUCTION_HEADER(CheckOverRecursedPar);
MCheckOverRecursedPar(MDefinition *slice)
: MUnaryInstruction(slice)
{
@ -4713,6 +4725,13 @@ class MCheckOverRecursedPar : public MUnaryInstruction
setMovable();
}
public:
INSTRUCTION_HEADER(CheckOverRecursedPar);
static MCheckOverRecursedPar *New(TempAllocator &alloc, MDefinition *slice) {
return new(alloc) MCheckOverRecursedPar(slice);
}
MDefinition *forkJoinSlice() const {
return getOperand(0);
}
@ -4721,9 +4740,6 @@ class MCheckOverRecursedPar : public MUnaryInstruction
// Check for an interrupt (or rendezvous) in parallel mode.
class MCheckInterruptPar : public MUnaryInstruction
{
public:
INSTRUCTION_HEADER(CheckInterruptPar);
MCheckInterruptPar(MDefinition *slice)
: MUnaryInstruction(slice)
{
@ -4732,6 +4748,13 @@ class MCheckInterruptPar : public MUnaryInstruction
setMovable();
}
public:
INSTRUCTION_HEADER(CheckInterruptPar);
static MCheckInterruptPar *New(TempAllocator &alloc, MDefinition *slice) {
return new(alloc) MCheckInterruptPar(slice);
}
MDefinition *forkJoinSlice() const {
return getOperand(0);
}
@ -5234,7 +5257,7 @@ class MInitializedLength
return AliasSet::Load(AliasSet::ObjectFields);
}
void computeRange();
void computeRange(TempAllocator &alloc);
};
// Store to the initialized length in an elements header. Note the input is an
@ -5269,7 +5292,6 @@ class MSetInitializedLength
class MArrayLength
: public MUnaryInstruction
{
public:
MArrayLength(MDefinition *elements)
: MUnaryInstruction(elements)
{
@ -5277,8 +5299,13 @@ class MArrayLength
setMovable();
}
public:
INSTRUCTION_HEADER(ArrayLength)
static MArrayLength *New(TempAllocator &alloc, MDefinition *elements) {
return new(alloc) MArrayLength(elements);
}
MDefinition *elements() const {
return getOperand(0);
}
@ -5289,7 +5316,7 @@ class MArrayLength
return AliasSet::Load(AliasSet::ObjectFields);
}
void computeRange();
void computeRange(TempAllocator &alloc);
};
// Store to the length in an elements header. Note the input is an *index*, one
@ -5354,7 +5381,7 @@ class MTypedArrayLength
return AliasSet::None();
}
void computeRange();
void computeRange(TempAllocator &alloc);
};
// Load a typed array's elements vector.
@ -5546,7 +5573,7 @@ class MBoundsCheck
virtual AliasSet getAliasSet() const {
return AliasSet::None();
}
void computeRange();
void computeRange(TempAllocator &alloc);
};
// Bailout if index < minimum.
@ -5905,7 +5932,7 @@ class MArrayPush
AliasSet getAliasSet() const {
return AliasSet::Store(AliasSet::Element | AliasSet::ObjectFields);
}
void computeRange();
void computeRange(TempAllocator &alloc);
};
// Array.prototype.concat on two dense arrays.
@ -5997,7 +6024,7 @@ class MLoadTypedArrayElement
void printOpcode(FILE *fp) const;
void computeRange();
void computeRange(TempAllocator &alloc);
bool canProduceFloat32() const { return arrayType_ == ScalarTypeRepresentation::TYPE_FLOAT32; }
};
@ -6105,7 +6132,7 @@ class MLoadTypedArrayElementStatic
return this;
}
void computeRange();
void computeRange(TempAllocator &alloc);
bool truncate();
bool canProduceFloat32() const { return typedArray_->type() == ScalarTypeRepresentation::TYPE_FLOAT32; }
};
@ -6355,7 +6382,7 @@ class MClampToUint8
AliasSet getAliasSet() const {
return AliasSet::None();
}
void computeRange();
void computeRange(TempAllocator &alloc);
};
class MLoadFixedSlot
@ -8015,7 +8042,7 @@ class MStringLength
return AliasSet::None();
}
void computeRange();
void computeRange(TempAllocator &alloc);
};
// Inlined version of Math.floor().
@ -8023,7 +8050,6 @@ class MFloor
: public MUnaryInstruction,
public FloatingPointPolicy<0>
{
public:
MFloor(MDefinition *num)
: MUnaryInstruction(num)
{
@ -8032,8 +8058,13 @@ class MFloor
setMovable();
}
public:
INSTRUCTION_HEADER(Floor)
static MFloor *New(TempAllocator &alloc, MDefinition *num) {
return new(alloc) MFloor(num);
}
MDefinition *num() const {
return getOperand(0);
}
@ -8059,7 +8090,6 @@ class MRound
: public MUnaryInstruction,
public DoublePolicy<0>
{
public:
MRound(MDefinition *num)
: MUnaryInstruction(num)
{
@ -8067,8 +8097,13 @@ class MRound
setMovable();
}
public:
INSTRUCTION_HEADER(Round)
static MRound *New(TempAllocator &alloc, MDefinition *num) {
return new(alloc) MRound(num);
}
MDefinition *num() const {
return getOperand(0);
}
@ -8188,15 +8223,19 @@ class MIn
: public MBinaryInstruction,
public MixPolicy<BoxPolicy<0>, ObjectPolicy<1> >
{
public:
MIn(MDefinition *key, MDefinition *obj)
: MBinaryInstruction(key, obj)
{
setResultType(MIRType_Boolean);
}
public:
INSTRUCTION_HEADER(In)
static MIn *New(TempAllocator &alloc, MDefinition *key, MDefinition *obj) {
return new(alloc) MIn(key, obj);
}
TypePolicy *typePolicy() {
return this;
}
@ -8273,7 +8312,6 @@ class MInstanceOf
{
CompilerRootObject protoObj_;
public:
MInstanceOf(MDefinition *obj, JSObject *proto)
: MUnaryInstruction(obj),
protoObj_(proto)
@ -8281,8 +8319,13 @@ class MInstanceOf
setResultType(MIRType_Boolean);
}
public:
INSTRUCTION_HEADER(InstanceOf)
static MInstanceOf *New(TempAllocator &alloc, MDefinition *obj, JSObject *proto) {
return new(alloc) MInstanceOf(obj, proto);
}
TypePolicy *typePolicy() {
return this;
}
@ -8297,15 +8340,19 @@ class MCallInstanceOf
: public MBinaryInstruction,
public MixPolicy<BoxPolicy<0>, ObjectPolicy<1> >
{
public:
MCallInstanceOf(MDefinition *obj, MDefinition *proto)
: MBinaryInstruction(obj, proto)
{
setResultType(MIRType_Boolean);
}
public:
INSTRUCTION_HEADER(CallInstanceOf)
static MCallInstanceOf *New(TempAllocator &alloc, MDefinition *obj, MDefinition *proto) {
return new(alloc) MCallInstanceOf(obj, proto);
}
TypePolicy *typePolicy() {
return this;
}
@ -8334,7 +8381,7 @@ class MArgumentsLength : public MNullaryInstruction
return AliasSet::None();
}
void computeRange();
void computeRange(TempAllocator &alloc);
};
// This MIR instruction is used to get an argument from the actual arguments.
@ -8915,9 +8962,6 @@ class MNewDenseArrayPar : public MBinaryInstruction
{
CompilerRootObject templateObject_;
public:
INSTRUCTION_HEADER(NewDenseArrayPar);
MNewDenseArrayPar(MDefinition *slice, MDefinition *length, JSObject *templateObject)
: MBinaryInstruction(slice, length),
templateObject_(templateObject)
@ -8925,6 +8969,15 @@ class MNewDenseArrayPar : public MBinaryInstruction
setResultType(MIRType_Object);
}
public:
INSTRUCTION_HEADER(NewDenseArrayPar);
static MNewDenseArrayPar *New(TempAllocator &alloc, MDefinition *slice, MDefinition *length,
JSObject *templateObject)
{
return new(alloc) MNewDenseArrayPar(slice, length, templateObject);
}
MDefinition *forkJoinSlice() const {
return getOperand(0);
}

View File

@ -157,7 +157,7 @@ class MoveResolver
private:
struct PendingMove
: public Move,
public TempObject,
public OldTempObject,
public InlineListNode<PendingMove>
{
PendingMove()

View File

@ -596,7 +596,7 @@ bool
ParallelSafetyVisitor::replaceWithNewPar(MInstruction *newInstruction,
JSObject *templateObject)
{
replace(newInstruction, new MNewPar(forkJoinSlice(), templateObject));
replace(newInstruction, MNewPar::New(alloc(), forkJoinSlice(), templateObject));
return true;
}
@ -739,13 +739,13 @@ ParallelSafetyVisitor::visitCall(MCall *ins)
bool
ParallelSafetyVisitor::visitCheckOverRecursed(MCheckOverRecursed *ins)
{
return replace(ins, new MCheckOverRecursedPar(forkJoinSlice()));
return replace(ins, MCheckOverRecursedPar::New(alloc(), forkJoinSlice()));
}
bool
ParallelSafetyVisitor::visitInterruptCheck(MInterruptCheck *ins)
{
return replace(ins, new MCheckInterruptPar(forkJoinSlice()));
return replace(ins, MCheckInterruptPar::New(alloc(), forkJoinSlice()));
}
/////////////////////////////////////////////////////////////////////////////

View File

@ -191,11 +191,13 @@ RangeAnalysis::addBetaNodes()
}
if (smaller && greater) {
MBeta *beta;
beta = MBeta::New(alloc(), smaller, Range::NewInt32Range(JSVAL_INT_MIN, JSVAL_INT_MAX-1));
beta = MBeta::New(alloc(), smaller,
Range::NewInt32Range(alloc(), JSVAL_INT_MIN, JSVAL_INT_MAX-1));
block->insertBefore(*block->begin(), beta);
replaceDominatedUsesWith(smaller, beta, block);
IonSpew(IonSpew_Range, "Adding beta node for smaller %d", smaller->id());
beta = MBeta::New(alloc(), greater, Range::NewInt32Range(JSVAL_INT_MIN+1, JSVAL_INT_MAX));
beta = MBeta::New(alloc(), greater,
Range::NewInt32Range(alloc(), JSVAL_INT_MIN+1, JSVAL_INT_MAX));
block->insertBefore(*block->begin(), beta);
replaceDominatedUsesWith(greater, beta, block);
IonSpew(IonSpew_Range, "Adding beta node for greater %d", greater->id());
@ -249,7 +251,7 @@ RangeAnalysis::addBetaNodes()
comp.dump(IonSpewFile);
}
MBeta *beta = MBeta::New(alloc(), val, new Range(comp));
MBeta *beta = MBeta::New(alloc(), val, new(alloc()) Range(comp));
block->insertBefore(*block->begin(), beta);
replaceDominatedUsesWith(val, beta, block);
}
@ -381,7 +383,7 @@ Range::dump() const
}
Range *
Range::intersect(const Range *lhs, const Range *rhs, bool *emptyRange)
Range::intersect(TempAllocator &alloc, const Range *lhs, const Range *rhs, bool *emptyRange)
{
*emptyRange = false;
@ -389,9 +391,9 @@ Range::intersect(const Range *lhs, const Range *rhs, bool *emptyRange)
return nullptr;
if (!lhs)
return new Range(*rhs);
return new(alloc) Range(*rhs);
if (!rhs)
return new Range(*lhs);
return new(alloc) Range(*lhs);
int32_t newLower = Max(lhs->lower_, rhs->lower_);
int32_t newUpper = Min(lhs->upper_, rhs->upper_);
@ -461,8 +463,8 @@ Range::intersect(const Range *lhs, const Range *rhs, bool *emptyRange)
}
}
return new Range(newLower, newHasInt32LowerBound, newUpper, newHasInt32UpperBound,
newFractional, newExponent);
return new(alloc) Range(newLower, newHasInt32LowerBound, newUpper, newHasInt32UpperBound,
newFractional, newExponent);
}
void
@ -590,7 +592,7 @@ MissingAnyInt32Bounds(const Range *lhs, const Range *rhs)
}
Range *
Range::add(const Range *lhs, const Range *rhs)
Range::add(TempAllocator &alloc, const Range *lhs, const Range *rhs)
{
int64_t l = (int64_t) lhs->lower_ + (int64_t) rhs->lower_;
if (!lhs->hasInt32LowerBound() || !rhs->hasInt32LowerBound())
@ -610,11 +612,11 @@ Range::add(const Range *lhs, const Range *rhs)
if (lhs->canBeInfiniteOrNaN() && rhs->canBeInfiniteOrNaN())
e = Range::IncludesInfinityAndNaN;
return new Range(l, h, lhs->canHaveFractionalPart() || rhs->canHaveFractionalPart(), e);
return new(alloc) Range(l, h, lhs->canHaveFractionalPart() || rhs->canHaveFractionalPart(), e);
}
Range *
Range::sub(const Range *lhs, const Range *rhs)
Range::sub(TempAllocator &alloc, const Range *lhs, const Range *rhs)
{
int64_t l = (int64_t) lhs->lower_ - (int64_t) rhs->upper_;
if (!lhs->hasInt32LowerBound() || !rhs->hasInt32UpperBound())
@ -634,18 +636,18 @@ Range::sub(const Range *lhs, const Range *rhs)
if (lhs->canBeInfiniteOrNaN() && rhs->canBeInfiniteOrNaN())
e = Range::IncludesInfinityAndNaN;
return new Range(l, h, lhs->canHaveFractionalPart() || rhs->canHaveFractionalPart(), e);
return new(alloc) Range(l, h, lhs->canHaveFractionalPart() || rhs->canHaveFractionalPart(), e);
}
Range *
Range::and_(const Range *lhs, const Range *rhs)
Range::and_(TempAllocator &alloc, const Range *lhs, const Range *rhs)
{
JS_ASSERT(lhs->isInt32());
JS_ASSERT(rhs->isInt32());
// If both numbers can be negative, result can be negative in the whole range
if (lhs->lower() < 0 && rhs->lower() < 0)
return Range::NewInt32Range(INT32_MIN, Max(lhs->upper(), rhs->upper()));
return Range::NewInt32Range(alloc, INT32_MIN, Max(lhs->upper(), rhs->upper()));
// Only one of both numbers can be negative.
// - result can't be negative
@ -661,11 +663,11 @@ Range::and_(const Range *lhs, const Range *rhs)
if (rhs->lower() < 0)
upper = lhs->upper();
return Range::NewInt32Range(lower, upper);
return Range::NewInt32Range(alloc, lower, upper);
}
Range *
Range::or_(const Range *lhs, const Range *rhs)
Range::or_(TempAllocator &alloc, const Range *lhs, const Range *rhs)
{
JS_ASSERT(lhs->isInt32());
JS_ASSERT(rhs->isInt32());
@ -675,15 +677,15 @@ Range::or_(const Range *lhs, const Range *rhs)
// operand or from shifting an int32_t by 32.
if (lhs->lower() == lhs->upper()) {
if (lhs->lower() == 0)
return new Range(*rhs);
return new(alloc) Range(*rhs);
if (lhs->lower() == -1)
return new Range(*lhs);;
return new(alloc) Range(*lhs);;
}
if (rhs->lower() == rhs->upper()) {
if (rhs->lower() == 0)
return new Range(*lhs);
return new(alloc) Range(*lhs);
if (rhs->lower() == -1)
return new Range(*rhs);;
return new(alloc) Range(*rhs);;
}
// The code below uses CountLeadingZeroes32, which has undefined behavior
@ -718,11 +720,11 @@ Range::or_(const Range *lhs, const Range *rhs)
}
}
return Range::NewInt32Range(lower, upper);
return Range::NewInt32Range(alloc, lower, upper);
}
Range *
Range::xor_(const Range *lhs, const Range *rhs)
Range::xor_(TempAllocator &alloc, const Range *lhs, const Range *rhs)
{
JS_ASSERT(lhs->isInt32());
JS_ASSERT(rhs->isInt32());
@ -782,18 +784,18 @@ Range::xor_(const Range *lhs, const Range *rhs)
Swap(lower, upper);
}
return Range::NewInt32Range(lower, upper);
return Range::NewInt32Range(alloc, lower, upper);
}
Range *
Range::not_(const Range *op)
Range::not_(TempAllocator &alloc, const Range *op)
{
JS_ASSERT(op->isInt32());
return Range::NewInt32Range(~op->upper(), ~op->lower());
return Range::NewInt32Range(alloc, ~op->upper(), ~op->lower());
}
Range *
Range::mul(const Range *lhs, const Range *rhs)
Range::mul(TempAllocator &alloc, const Range *lhs, const Range *rhs)
{
bool fractional = lhs->canHaveFractionalPart() || rhs->canHaveFractionalPart();
@ -816,19 +818,19 @@ Range::mul(const Range *lhs, const Range *rhs)
}
if (MissingAnyInt32Bounds(lhs, rhs))
return new Range(NoInt32LowerBound, NoInt32UpperBound, fractional, exponent);
return new(alloc) Range(NoInt32LowerBound, NoInt32UpperBound, fractional, exponent);
int64_t a = (int64_t)lhs->lower() * (int64_t)rhs->lower();
int64_t b = (int64_t)lhs->lower() * (int64_t)rhs->upper();
int64_t c = (int64_t)lhs->upper() * (int64_t)rhs->lower();
int64_t d = (int64_t)lhs->upper() * (int64_t)rhs->upper();
return new Range(
return new(alloc) Range(
Min( Min(a, b), Min(c, d) ),
Max( Max(a, b), Max(c, d) ),
fractional, exponent);
}
Range *
Range::lsh(const Range *lhs, int32_t c)
Range::lsh(TempAllocator &alloc, const Range *lhs, int32_t c)
{
JS_ASSERT(lhs->isInt32());
int32_t shift = c & 0x1f;
@ -838,26 +840,26 @@ Range::lsh(const Range *lhs, int32_t c)
if ((int32_t)((uint32_t)lhs->lower() << shift << 1 >> shift >> 1) == lhs->lower() &&
(int32_t)((uint32_t)lhs->upper() << shift << 1 >> shift >> 1) == lhs->upper())
{
return Range::NewInt32Range(
return Range::NewInt32Range(alloc,
uint32_t(lhs->lower()) << shift,
uint32_t(lhs->upper()) << shift);
}
return Range::NewInt32Range(INT32_MIN, INT32_MAX);
return Range::NewInt32Range(alloc, INT32_MIN, INT32_MAX);
}
Range *
Range::rsh(const Range *lhs, int32_t c)
Range::rsh(TempAllocator &alloc, const Range *lhs, int32_t c)
{
JS_ASSERT(lhs->isInt32());
int32_t shift = c & 0x1f;
return Range::NewInt32Range(
return Range::NewInt32Range(alloc,
lhs->lower() >> shift,
lhs->upper() >> shift);
}
Range *
Range::ursh(const Range *lhs, int32_t c)
Range::ursh(TempAllocator &alloc, const Range *lhs, int32_t c)
{
// ursh's left operand is uint32, not int32, but for range analysis we
// currently approximate it as int32. We assume here that the range has
@ -869,84 +871,84 @@ Range::ursh(const Range *lhs, int32_t c)
// If the value is always non-negative or always negative, we can simply
// compute the correct range by shifting.
if (lhs->isFiniteNonNegative() || lhs->isFiniteNegative()) {
return Range::NewUInt32Range(
return Range::NewUInt32Range(alloc,
uint32_t(lhs->lower()) >> shift,
uint32_t(lhs->upper()) >> shift);
}
// Otherwise return the most general range after the shift.
return Range::NewUInt32Range(0, UINT32_MAX >> shift);
return Range::NewUInt32Range(alloc, 0, UINT32_MAX >> shift);
}
Range *
Range::lsh(const Range *lhs, const Range *rhs)
Range::lsh(TempAllocator &alloc, const Range *lhs, const Range *rhs)
{
JS_ASSERT(lhs->isInt32());
JS_ASSERT(rhs->isInt32());
return Range::NewInt32Range(INT32_MIN, INT32_MAX);
return Range::NewInt32Range(alloc, INT32_MIN, INT32_MAX);
}
Range *
Range::rsh(const Range *lhs, const Range *rhs)
Range::rsh(TempAllocator &alloc, const Range *lhs, const Range *rhs)
{
JS_ASSERT(lhs->isInt32());
JS_ASSERT(rhs->isInt32());
return Range::NewInt32Range(Min(lhs->lower(), 0), Max(lhs->upper(), 0));
return Range::NewInt32Range(alloc, Min(lhs->lower(), 0), Max(lhs->upper(), 0));
}
Range *
Range::ursh(const Range *lhs, const Range *rhs)
Range::ursh(TempAllocator &alloc, const Range *lhs, const Range *rhs)
{
// ursh's left operand is uint32, not int32, but for range analysis we
// currently approximate it as int32. We assume here that the range has
// already been adjusted accordingly by our callers.
JS_ASSERT(lhs->isInt32());
JS_ASSERT(rhs->isInt32());
return Range::NewUInt32Range(0, lhs->isFiniteNonNegative() ? lhs->upper() : UINT32_MAX);
return Range::NewUInt32Range(alloc, 0, lhs->isFiniteNonNegative() ? lhs->upper() : UINT32_MAX);
}
Range *
Range::abs(const Range *op)
Range::abs(TempAllocator &alloc, const Range *op)
{
int32_t l = op->lower_;
int32_t u = op->upper_;
return new Range(Max(Max(int32_t(0), l), u == INT32_MIN ? INT32_MAX : -u),
true,
Max(Max(int32_t(0), u), l == INT32_MIN ? INT32_MAX : -l),
op->hasInt32LowerBound_ && op->hasInt32UpperBound_ && l != INT32_MIN,
op->canHaveFractionalPart_,
op->max_exponent_);
return new(alloc) Range(Max(Max(int32_t(0), l), u == INT32_MIN ? INT32_MAX : -u),
true,
Max(Max(int32_t(0), u), l == INT32_MIN ? INT32_MAX : -l),
op->hasInt32LowerBound_ && op->hasInt32UpperBound_ && l != INT32_MIN,
op->canHaveFractionalPart_,
op->max_exponent_);
}
Range *
Range::min(const Range *lhs, const Range *rhs)
Range::min(TempAllocator &alloc, const Range *lhs, const Range *rhs)
{
// If either operand is NaN, the result is NaN.
if (lhs->canBeNaN() || rhs->canBeNaN())
return nullptr;
return new Range(Min(lhs->lower_, rhs->lower_),
lhs->hasInt32LowerBound_ && rhs->hasInt32LowerBound_,
Min(lhs->upper_, rhs->upper_),
lhs->hasInt32UpperBound_ || rhs->hasInt32UpperBound_,
lhs->canHaveFractionalPart_ || rhs->canHaveFractionalPart_,
Max(lhs->max_exponent_, rhs->max_exponent_));
return new(alloc) Range(Min(lhs->lower_, rhs->lower_),
lhs->hasInt32LowerBound_ && rhs->hasInt32LowerBound_,
Min(lhs->upper_, rhs->upper_),
lhs->hasInt32UpperBound_ || rhs->hasInt32UpperBound_,
lhs->canHaveFractionalPart_ || rhs->canHaveFractionalPart_,
Max(lhs->max_exponent_, rhs->max_exponent_));
}
Range *
Range::max(const Range *lhs, const Range *rhs)
Range::max(TempAllocator &alloc, const Range *lhs, const Range *rhs)
{
// If either operand is NaN, the result is NaN.
if (lhs->canBeNaN() || rhs->canBeNaN())
return nullptr;
return new Range(Max(lhs->lower_, rhs->lower_),
lhs->hasInt32LowerBound_ || rhs->hasInt32LowerBound_,
Max(lhs->upper_, rhs->upper_),
lhs->hasInt32UpperBound_ && rhs->hasInt32UpperBound_,
lhs->canHaveFractionalPart_ || rhs->canHaveFractionalPart_,
Max(lhs->max_exponent_, rhs->max_exponent_));
return new(alloc) Range(Max(lhs->lower_, rhs->lower_),
lhs->hasInt32LowerBound_ || rhs->hasInt32LowerBound_,
Max(lhs->upper_, rhs->upper_),
lhs->hasInt32UpperBound_ && rhs->hasInt32UpperBound_,
lhs->canHaveFractionalPart_ || rhs->canHaveFractionalPart_,
Max(lhs->max_exponent_, rhs->max_exponent_));
}
bool
@ -986,7 +988,7 @@ Range::update(const Range *other)
///////////////////////////////////////////////////////////////////////////////
void
MPhi::computeRange()
MPhi::computeRange(TempAllocator &alloc)
{
if (type() != MIRType_Int32 && type() != MIRType_Double)
return;
@ -1012,19 +1014,19 @@ MPhi::computeRange()
if (range)
range->unionWith(&input);
else
range = new Range(input);
range = new(alloc) Range(input);
}
setRange(range);
}
void
MBeta::computeRange()
MBeta::computeRange(TempAllocator &alloc)
{
bool emptyRange = false;
Range opRange(getOperand(0));
Range *range = Range::intersect(&opRange, comparison_, &emptyRange);
Range *range = Range::intersect(alloc, &opRange, comparison_, &emptyRange);
if (emptyRange) {
IonSpew(IonSpew_Range, "Marking block for inst %d unexitable", id());
block()->setEarlyAbort();
@ -1034,74 +1036,74 @@ MBeta::computeRange()
}
void
MConstant::computeRange()
MConstant::computeRange(TempAllocator &alloc)
{
if (value().isNumber()) {
double d = value().toNumber();
setRange(Range::NewDoubleRange(d, d));
setRange(Range::NewDoubleRange(alloc, d, d));
} else if (value().isBoolean()) {
bool b = value().toBoolean();
setRange(Range::NewInt32Range(b, b));
setRange(Range::NewInt32Range(alloc, b, b));
}
}
void
MCharCodeAt::computeRange()
MCharCodeAt::computeRange(TempAllocator &alloc)
{
// ECMA 262 says that the integer will be non-negative and at most 65535.
setRange(Range::NewInt32Range(0, 65535));
setRange(Range::NewInt32Range(alloc, 0, 65535));
}
void
MClampToUint8::computeRange()
MClampToUint8::computeRange(TempAllocator &alloc)
{
setRange(Range::NewUInt32Range(0, 255));
setRange(Range::NewUInt32Range(alloc, 0, 255));
}
void
MBitAnd::computeRange()
MBitAnd::computeRange(TempAllocator &alloc)
{
Range left(getOperand(0));
Range right(getOperand(1));
left.wrapAroundToInt32();
right.wrapAroundToInt32();
setRange(Range::and_(&left, &right));
setRange(Range::and_(alloc, &left, &right));
}
void
MBitOr::computeRange()
MBitOr::computeRange(TempAllocator &alloc)
{
Range left(getOperand(0));
Range right(getOperand(1));
left.wrapAroundToInt32();
right.wrapAroundToInt32();
setRange(Range::or_(&left, &right));
setRange(Range::or_(alloc, &left, &right));
}
void
MBitXor::computeRange()
MBitXor::computeRange(TempAllocator &alloc)
{
Range left(getOperand(0));
Range right(getOperand(1));
left.wrapAroundToInt32();
right.wrapAroundToInt32();
setRange(Range::xor_(&left, &right));
setRange(Range::xor_(alloc, &left, &right));
}
void
MBitNot::computeRange()
MBitNot::computeRange(TempAllocator &alloc)
{
Range op(getOperand(0));
op.wrapAroundToInt32();
setRange(Range::not_(&op));
setRange(Range::not_(alloc, &op));
}
void
MLsh::computeRange()
MLsh::computeRange(TempAllocator &alloc)
{
Range left(getOperand(0));
Range right(getOperand(1));
@ -1110,16 +1112,16 @@ MLsh::computeRange()
MDefinition *rhs = getOperand(1);
if (!rhs->isConstant()) {
right.wrapAroundToShiftCount();
setRange(Range::lsh(&left, &right));
setRange(Range::lsh(alloc, &left, &right));
return;
}
int32_t c = rhs->toConstant()->value().toInt32();
setRange(Range::lsh(&left, c));
setRange(Range::lsh(alloc, &left, c));
}
void
MRsh::computeRange()
MRsh::computeRange(TempAllocator &alloc)
{
Range left(getOperand(0));
Range right(getOperand(1));
@ -1128,16 +1130,16 @@ MRsh::computeRange()
MDefinition *rhs = getOperand(1);
if (!rhs->isConstant()) {
right.wrapAroundToShiftCount();
setRange(Range::rsh(&left, &right));
setRange(Range::rsh(alloc, &left, &right));
return;
}
int32_t c = rhs->toConstant()->value().toInt32();
setRange(Range::rsh(&left, c));
setRange(Range::rsh(alloc, &left, c));
}
void
MUrsh::computeRange()
MUrsh::computeRange(TempAllocator &alloc)
{
Range left(getOperand(0));
Range right(getOperand(1));
@ -1152,67 +1154,67 @@ MUrsh::computeRange()
MDefinition *rhs = getOperand(1);
if (!rhs->isConstant()) {
setRange(Range::ursh(&left, &right));
setRange(Range::ursh(alloc, &left, &right));
} else {
int32_t c = rhs->toConstant()->value().toInt32();
setRange(Range::ursh(&left, c));
setRange(Range::ursh(alloc, &left, c));
}
JS_ASSERT(range()->lower() >= 0);
}
void
MAbs::computeRange()
MAbs::computeRange(TempAllocator &alloc)
{
if (specialization_ != MIRType_Int32 && specialization_ != MIRType_Double)
return;
Range other(getOperand(0));
Range *next = Range::abs(&other);
Range *next = Range::abs(alloc, &other);
if (implicitTruncate_)
next->wrapAroundToInt32();
setRange(next);
}
void
MMinMax::computeRange()
MMinMax::computeRange(TempAllocator &alloc)
{
if (specialization_ != MIRType_Int32 && specialization_ != MIRType_Double)
return;
Range left(getOperand(0));
Range right(getOperand(1));
setRange(isMax() ? Range::max(&left, &right) : Range::min(&left, &right));
setRange(isMax() ? Range::max(alloc, &left, &right) : Range::min(alloc, &left, &right));
}
void
MAdd::computeRange()
MAdd::computeRange(TempAllocator &alloc)
{
if (specialization() != MIRType_Int32 && specialization() != MIRType_Double)
return;
Range left(getOperand(0));
Range right(getOperand(1));
Range *next = Range::add(&left, &right);
Range *next = Range::add(alloc, &left, &right);
if (isTruncated())
next->wrapAroundToInt32();
setRange(next);
}
void
MSub::computeRange()
MSub::computeRange(TempAllocator &alloc)
{
if (specialization() != MIRType_Int32 && specialization() != MIRType_Double)
return;
Range left(getOperand(0));
Range right(getOperand(1));
Range *next = Range::sub(&left, &right);
Range *next = Range::sub(alloc, &left, &right);
if (isTruncated())
next->wrapAroundToInt32();
setRange(next);
}
void
MMul::computeRange()
MMul::computeRange(TempAllocator &alloc)
{
if (specialization() != MIRType_Int32 && specialization() != MIRType_Double)
return;
@ -1220,7 +1222,7 @@ MMul::computeRange()
Range right(getOperand(1));
if (canBeNegativeZero())
canBeNegativeZero_ = Range::negativeZeroMul(&left, &right);
Range *next = Range::mul(&left, &right);
Range *next = Range::mul(alloc, &left, &right);
// Truncated multiplications could overflow in both directions
if (isTruncated())
next->wrapAroundToInt32();
@ -1228,7 +1230,7 @@ MMul::computeRange()
}
void
MMod::computeRange()
MMod::computeRange(TempAllocator &alloc)
{
if (specialization() != MIRType_Int32 && specialization() != MIRType_Double)
return;
@ -1275,7 +1277,7 @@ MMod::computeRange()
--rhsBound;
// This gives us two upper bounds, so we can take the best one.
setRange(Range::NewUInt32Range(0, Min(lhsBound, rhsBound)));
setRange(Range::NewUInt32Range(alloc, 0, Min(lhsBound, rhsBound)));
return;
}
@ -1307,12 +1309,12 @@ MMod::computeRange()
int64_t lower = lhs.lower() >= 0 ? 0 : -absBound;
int64_t upper = lhs.upper() <= 0 ? 0 : absBound;
setRange(new Range(lower, upper, lhs.canHaveFractionalPart() || rhs.canHaveFractionalPart(),
Min(lhs.exponent(), rhs.exponent())));
setRange(new(alloc) Range(lower, upper, lhs.canHaveFractionalPart() || rhs.canHaveFractionalPart(),
Min(lhs.exponent(), rhs.exponent())));
}
void
MDiv::computeRange()
MDiv::computeRange(TempAllocator &alloc)
{
if (specialization() != MIRType_Int32 && specialization() != MIRType_Double)
return;
@ -1327,7 +1329,7 @@ MDiv::computeRange()
// Something simple for now: When dividing by a positive rhs, the result
// won't be further from zero than lhs.
if (lhs.lower() >= 0 && rhs.lower() >= 1) {
setRange(new Range(0, lhs.upper(), true, lhs.exponent()));
setRange(new(alloc) Range(0, lhs.upper(), true, lhs.exponent()));
// Also, we can optimize by converting this to an unsigned div.
if (specialization() == MIRType_Int32 &&
@ -1340,12 +1342,12 @@ MDiv::computeRange()
// fractional parts.
JS_ASSERT(!lhs.canHaveFractionalPart() && !rhs.canHaveFractionalPart());
// Unsigned division by a non-zero rhs will return a uint32 value.
setRange(Range::NewUInt32Range(0, UINT32_MAX));
setRange(Range::NewUInt32Range(alloc, 0, UINT32_MAX));
}
}
void
MSqrt::computeRange()
MSqrt::computeRange(TempAllocator &alloc)
{
Range input(getOperand(0));
@ -1360,54 +1362,54 @@ MSqrt::computeRange()
// Something simple for now: When taking the sqrt of a positive value, the
// result won't be further from zero than the input.
setRange(new Range(0, input.upper(), true, input.exponent()));
setRange(new(alloc) Range(0, input.upper(), true, input.exponent()));
}
void
MToDouble::computeRange()
MToDouble::computeRange(TempAllocator &alloc)
{
setRange(new Range(getOperand(0)));
setRange(new(alloc) Range(getOperand(0)));
}
void
MToFloat32::computeRange()
MToFloat32::computeRange(TempAllocator &alloc)
{
setRange(new Range(getOperand(0)));
setRange(new(alloc) Range(getOperand(0)));
}
void
MTruncateToInt32::computeRange()
MTruncateToInt32::computeRange(TempAllocator &alloc)
{
Range *output = new Range(getOperand(0));
Range *output = new(alloc) Range(getOperand(0));
output->wrapAroundToInt32();
setRange(output);
}
void
MToInt32::computeRange()
MToInt32::computeRange(TempAllocator &alloc)
{
Range *output = new Range(getOperand(0));
Range *output = new(alloc) Range(getOperand(0));
output->clampToInt32();
setRange(output);
}
static Range *GetTypedArrayRange(int type)
static Range *GetTypedArrayRange(TempAllocator &alloc, int type)
{
switch (type) {
case ScalarTypeRepresentation::TYPE_UINT8_CLAMPED:
case ScalarTypeRepresentation::TYPE_UINT8:
return Range::NewUInt32Range(0, UINT8_MAX);
return Range::NewUInt32Range(alloc, 0, UINT8_MAX);
case ScalarTypeRepresentation::TYPE_UINT16:
return Range::NewUInt32Range(0, UINT16_MAX);
return Range::NewUInt32Range(alloc, 0, UINT16_MAX);
case ScalarTypeRepresentation::TYPE_UINT32:
return Range::NewUInt32Range(0, UINT32_MAX);
return Range::NewUInt32Range(alloc, 0, UINT32_MAX);
case ScalarTypeRepresentation::TYPE_INT8:
return Range::NewInt32Range(INT8_MIN, INT8_MAX);
return Range::NewInt32Range(alloc, INT8_MIN, INT8_MAX);
case ScalarTypeRepresentation::TYPE_INT16:
return Range::NewInt32Range(INT16_MIN, INT16_MAX);
return Range::NewInt32Range(alloc, INT16_MIN, INT16_MAX);
case ScalarTypeRepresentation::TYPE_INT32:
return Range::NewInt32Range(INT32_MIN, INT32_MAX);
return Range::NewInt32Range(alloc, INT32_MIN, INT32_MAX);
case ScalarTypeRepresentation::TYPE_FLOAT32:
case ScalarTypeRepresentation::TYPE_FLOAT64:
@ -1418,87 +1420,87 @@ static Range *GetTypedArrayRange(int type)
}
void
MLoadTypedArrayElement::computeRange()
MLoadTypedArrayElement::computeRange(TempAllocator &alloc)
{
// We have an Int32 type and if this is a UInt32 load it may produce a value
// outside of our range, but we have a bailout to handle those cases.
setRange(GetTypedArrayRange(arrayType()));
setRange(GetTypedArrayRange(alloc, arrayType()));
}
void
MLoadTypedArrayElementStatic::computeRange()
MLoadTypedArrayElementStatic::computeRange(TempAllocator &alloc)
{
// We don't currently use MLoadTypedArrayElementStatic for uint32, so we
// don't have to worry about it returning a value outside our type.
JS_ASSERT(typedArray_->type() != ScalarTypeRepresentation::TYPE_UINT32);
setRange(GetTypedArrayRange(typedArray_->type()));
setRange(GetTypedArrayRange(alloc, typedArray_->type()));
}
void
MArrayLength::computeRange()
MArrayLength::computeRange(TempAllocator &alloc)
{
// Array lengths can go up to UINT32_MAX, but we only create MArrayLength
// nodes when the value is known to be int32 (see the
// OBJECT_FLAG_LENGTH_OVERFLOW flag).
setRange(Range::NewUInt32Range(0, INT32_MAX));
setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
}
void
MInitializedLength::computeRange()
MInitializedLength::computeRange(TempAllocator &alloc)
{
setRange(Range::NewUInt32Range(0, JSObject::NELEMENTS_LIMIT));
setRange(Range::NewUInt32Range(alloc, 0, JSObject::NELEMENTS_LIMIT));
}
void
MTypedArrayLength::computeRange()
MTypedArrayLength::computeRange(TempAllocator &alloc)
{
setRange(Range::NewUInt32Range(0, INT32_MAX));
setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
}
void
MStringLength::computeRange()
MStringLength::computeRange(TempAllocator &alloc)
{
static_assert(JSString::MAX_LENGTH <= UINT32_MAX,
"NewUInt32Range requires a uint32 value");
setRange(Range::NewUInt32Range(0, JSString::MAX_LENGTH));
setRange(Range::NewUInt32Range(alloc, 0, JSString::MAX_LENGTH));
}
void
MArgumentsLength::computeRange()
MArgumentsLength::computeRange(TempAllocator &alloc)
{
// This is is a conservative upper bound on what |TooManyArguments| checks.
// If exceeded, Ion will not be entered in the first place.
static_assert(SNAPSHOT_MAX_NARGS <= UINT32_MAX,
"NewUInt32Range requires a uint32 value");
setRange(Range::NewUInt32Range(0, SNAPSHOT_MAX_NARGS));
setRange(Range::NewUInt32Range(alloc, 0, SNAPSHOT_MAX_NARGS));
}
void
MBoundsCheck::computeRange()
MBoundsCheck::computeRange(TempAllocator &alloc)
{
// Just transfer the incoming index range to the output. The length() is
// also interesting, but it is handled as a bailout check, and we're
// computing a pre-bailout range here.
setRange(new Range(index()));
setRange(new(alloc) Range(index()));
}
void
MArrayPush::computeRange()
MArrayPush::computeRange(TempAllocator &alloc)
{
// MArrayPush returns the new array length.
setRange(Range::NewUInt32Range(0, UINT32_MAX));
setRange(Range::NewUInt32Range(alloc, 0, UINT32_MAX));
}
void
MMathFunction::computeRange()
MMathFunction::computeRange(TempAllocator &alloc)
{
Range opRange(getOperand(0));
switch (function()) {
case Sin:
case Cos:
if (!opRange.canBeInfiniteOrNaN())
setRange(Range::NewDoubleRange(-1.0, 1.0));
setRange(Range::NewDoubleRange(alloc, -1.0, 1.0));
break;
case Sign:
if (!opRange.canBeNaN()) {
@ -1509,7 +1511,7 @@ MMathFunction::computeRange()
lower = 0;
if (opRange.hasInt32UpperBound() && opRange.upper() <= 0)
upper = 0;
setRange(Range::NewInt32Range(lower, upper));
setRange(Range::NewInt32Range(alloc, lower, upper));
}
break;
default:
@ -1518,9 +1520,9 @@ MMathFunction::computeRange()
}
void
MRandom::computeRange()
MRandom::computeRange(TempAllocator &alloc)
{
setRange(Range::NewDoubleRange(0.0, 1.0));
setRange(Range::NewDoubleRange(alloc, 0.0, 1.0));
}
///////////////////////////////////////////////////////////////////////////////
@ -1767,7 +1769,7 @@ RangeAnalysis::analyzeLoopIterationCount(MBasicBlock *header,
return nullptr;
}
return new LoopIterationBound(header, test, bound);
return new(alloc()) LoopIterationBound(header, test, bound);
}
void
@ -1798,7 +1800,7 @@ RangeAnalysis::analyzeLoopPhi(MBasicBlock *header, LoopIterationBound *loopBound
return;
if (!phi->range())
phi->setRange(new Range());
phi->setRange(new(alloc()) Range());
LinearSum initialSum(alloc());
if (!initialSum.add(initial, 1))
@ -1832,13 +1834,13 @@ RangeAnalysis::analyzeLoopPhi(MBasicBlock *header, LoopIterationBound *loopBound
if (modified.constant > 0) {
if (initRange && initRange->hasInt32LowerBound())
phi->range()->refineLower(initRange->lower());
phi->range()->setSymbolicLower(new SymbolicBound(nullptr, initialSum));
phi->range()->setSymbolicUpper(new SymbolicBound(loopBound, limitSum));
phi->range()->setSymbolicLower(SymbolicBound::New(alloc(), nullptr, initialSum));
phi->range()->setSymbolicUpper(SymbolicBound::New(alloc(), loopBound, limitSum));
} else {
if (initRange && initRange->hasInt32UpperBound())
phi->range()->refineUpper(initRange->upper());
phi->range()->setSymbolicUpper(new SymbolicBound(nullptr, initialSum));
phi->range()->setSymbolicLower(new SymbolicBound(loopBound, limitSum));
phi->range()->setSymbolicUpper(SymbolicBound::New(alloc(), nullptr, initialSum));
phi->range()->setSymbolicLower(SymbolicBound::New(alloc(), loopBound, limitSum));
}
IonSpew(IonSpew_Range, "added symbolic range on %d", phi->id());
@ -1875,7 +1877,7 @@ ConvertLinearSum(TempAllocator &alloc, MBasicBlock *block, const LinearSum &sum)
def = MAdd::New(alloc, def, term.term);
def->toAdd()->setInt32();
block->insertBefore(block->lastIns(), def->toInstruction());
def->computeRange();
def->computeRange(alloc);
} else {
def = term.term;
}
@ -1883,12 +1885,12 @@ ConvertLinearSum(TempAllocator &alloc, MBasicBlock *block, const LinearSum &sum)
if (!def) {
def = MConstant::New(alloc, Int32Value(0));
block->insertBefore(block->lastIns(), def->toInstruction());
def->computeRange();
def->computeRange(alloc);
}
def = MSub::New(alloc, def, term.term);
def->toSub()->setInt32();
block->insertBefore(block->lastIns(), def->toInstruction());
def->computeRange();
def->computeRange(alloc);
} else {
JS_ASSERT(term.scale != 0);
MConstant *factor = MConstant::New(alloc, Int32Value(term.scale));
@ -1896,12 +1898,12 @@ ConvertLinearSum(TempAllocator &alloc, MBasicBlock *block, const LinearSum &sum)
MMul *mul = MMul::New(alloc, term.term, factor);
mul->setInt32();
block->insertBefore(block->lastIns(), mul);
mul->computeRange();
mul->computeRange(alloc);
if (def) {
def = MAdd::New(alloc, def, mul);
def->toAdd()->setInt32();
block->insertBefore(block->lastIns(), def->toInstruction());
def->computeRange();
def->computeRange(alloc);
} else {
def = mul;
}
@ -1911,7 +1913,7 @@ ConvertLinearSum(TempAllocator &alloc, MBasicBlock *block, const LinearSum &sum)
if (!def) {
def = MConstant::New(alloc, Int32Value(0));
block->insertBefore(block->lastIns(), def->toInstruction());
def->computeRange();
def->computeRange(alloc);
}
return def;
@ -1999,7 +2001,7 @@ RangeAnalysis::analyze()
for (MDefinitionIterator iter(block); iter; iter++) {
MDefinition *def = *iter;
def->computeRange();
def->computeRange(alloc());
IonSpew(IonSpew_Range, "computing range on %d", def->id());
SpewRange(def);
}
@ -2074,7 +2076,7 @@ RangeAnalysis::addRangeAssertions()
if (ins->isPassArg())
continue;
MAssertRange *guard = MAssertRange::New(alloc(), ins, new Range(r));
MAssertRange *guard = MAssertRange::New(alloc(), ins, new(alloc()) Range(r));
// The code that removes beta nodes assumes that it can find them
// in a contiguous run at the top of each block. Don't insert

View File

@ -46,6 +46,13 @@ struct LoopIterationBound : public TempObject
// A symbolic upper or lower bound computed for a term.
struct SymbolicBound : public TempObject
{
private:
SymbolicBound(LoopIterationBound *loop, LinearSum sum)
: loop(loop), sum(sum)
{
}
public:
// Any loop iteration bound from which this was derived.
//
// If non-nullptr, then 'sum' is only valid within the loop body, at
@ -54,14 +61,13 @@ struct SymbolicBound : public TempObject
// If nullptr, then 'sum' is always valid.
LoopIterationBound *loop;
static SymbolicBound *New(TempAllocator &alloc, LoopIterationBound *loop, LinearSum sum) {
return new(alloc) SymbolicBound(loop, sum);
}
// Computed symbolic bound, see above.
LinearSum sum;
SymbolicBound(LoopIterationBound *loop, LinearSum sum)
: loop(loop), sum(sum)
{
}
void print(Sprinter &sp) const;
void dump() const;
};
@ -352,21 +358,21 @@ class Range : public TempObject {
// *after* any bailout checks.
Range(const MDefinition *def);
static Range *NewInt32Range(int32_t l, int32_t h) {
return new Range(l, h, false, MaxInt32Exponent);
static Range *NewInt32Range(TempAllocator &alloc, int32_t l, int32_t h) {
return new(alloc) Range(l, h, false, MaxInt32Exponent);
}
static Range *NewUInt32Range(uint32_t l, uint32_t h) {
static Range *NewUInt32Range(TempAllocator &alloc, uint32_t l, uint32_t h) {
// For now, just pass them to the constructor as int64_t values.
// They'll become unbounded if they're not in the int32_t range.
return new Range(l, h, false, MaxUInt32Exponent);
return new(alloc) Range(l, h, false, MaxUInt32Exponent);
}
static Range *NewDoubleRange(double l, double h) {
static Range *NewDoubleRange(TempAllocator &alloc, double l, double h) {
if (mozilla::IsNaN(l) && mozilla::IsNaN(h))
return nullptr;
Range *r = new Range();
Range *r = new(alloc) Range();
r->setDouble(l, h);
return r;
}
@ -381,23 +387,24 @@ class Range : public TempObject {
// copying when chaining together unions when handling Phi
// nodes.
void unionWith(const Range *other);
static Range * intersect(const Range *lhs, const Range *rhs, bool *emptyRange);
static Range * add(const Range *lhs, const Range *rhs);
static Range * sub(const Range *lhs, const Range *rhs);
static Range * mul(const Range *lhs, const Range *rhs);
static Range * and_(const Range *lhs, const Range *rhs);
static Range * or_(const Range *lhs, const Range *rhs);
static Range * xor_(const Range *lhs, const Range *rhs);
static Range * not_(const Range *op);
static Range * lsh(const Range *lhs, int32_t c);
static Range * rsh(const Range *lhs, int32_t c);
static Range * ursh(const Range *lhs, int32_t c);
static Range * lsh(const Range *lhs, const Range *rhs);
static Range * rsh(const Range *lhs, const Range *rhs);
static Range * ursh(const Range *lhs, const Range *rhs);
static Range * abs(const Range *op);
static Range * min(const Range *lhs, const Range *rhs);
static Range * max(const Range *lhs, const Range *rhs);
static Range *intersect(TempAllocator &alloc, const Range *lhs, const Range *rhs,
bool *emptyRange);
static Range *add(TempAllocator &alloc, const Range *lhs, const Range *rhs);
static Range *sub(TempAllocator &alloc, const Range *lhs, const Range *rhs);
static Range *mul(TempAllocator &alloc, const Range *lhs, const Range *rhs);
static Range *and_(TempAllocator &alloc, const Range *lhs, const Range *rhs);
static Range *or_(TempAllocator &alloc, const Range *lhs, const Range *rhs);
static Range *xor_(TempAllocator &alloc, const Range *lhs, const Range *rhs);
static Range *not_(TempAllocator &alloc, const Range *op);
static Range *lsh(TempAllocator &alloc, const Range *lhs, int32_t c);
static Range *rsh(TempAllocator &alloc, const Range *lhs, int32_t c);
static Range *ursh(TempAllocator &alloc, const Range *lhs, int32_t c);
static Range *lsh(TempAllocator &alloc, const Range *lhs, const Range *rhs);
static Range *rsh(TempAllocator &alloc, const Range *lhs, const Range *rhs);
static Range *ursh(TempAllocator &alloc, const Range *lhs, const Range *rhs);
static Range *abs(TempAllocator &alloc, const Range *op);
static Range *min(TempAllocator &alloc, const Range *lhs, const Range *rhs);
static Range *max(TempAllocator &alloc, const Range *lhs, const Range *rhs);
static bool negativeZeroMul(const Range *lhs, const Range *rhs);

View File

@ -28,7 +28,7 @@ StupidAllocator::stackLocation(uint32_t vreg)
if (def->policy() == LDefinition::PRESET && def->output()->isArgument())
return def->output();
return new LStackSlot(DefaultStackSlot(vreg), def->type() == LDefinition::DOUBLE);
return new(alloc()) LStackSlot(DefaultStackSlot(vreg), def->type() == LDefinition::DOUBLE);
}
StupidAllocator::RegisterIndex
@ -186,7 +186,7 @@ StupidAllocator::syncRegister(LInstruction *ins, RegisterIndex index)
{
if (registers[index].dirty) {
LMoveGroup *input = getInputMoveGroup(ins->id());
LAllocation *source = new LAllocation(registers[index].reg);
LAllocation *source = new(alloc()) LAllocation(registers[index].reg);
uint32_t existing = registers[index].vreg;
LAllocation *dest = stackLocation(existing);
@ -209,7 +209,7 @@ StupidAllocator::loadRegister(LInstruction *ins, uint32_t vreg, RegisterIndex in
// Load a vreg from its stack location to a register.
LMoveGroup *input = getInputMoveGroup(ins->id());
LAllocation *source = stackLocation(vreg);
LAllocation *dest = new LAllocation(registers[index].reg);
LAllocation *dest = new(alloc()) LAllocation(registers[index].reg);
input->addAfter(source, dest);
registers[index].set(vreg, ins);
}

View File

@ -440,7 +440,7 @@ class CodeGeneratorShared : public LInstructionVisitor
};
// An out-of-line path is generated at the end of the function.
class OutOfLineCode : public TempObject
class OutOfLineCode : public OldTempObject
{
Label entry_;
Label rejoin_;