mirror of
https://gitlab.winehq.org/wine/wine-gecko.git
synced 2024-09-13 09:24:08 -07:00
Bug 1249482 - OdinMonkey: Remove the needsBoundsCheck flag from the frontend. r=luke
This commit is contained in:
parent
f2cbec6a6f
commit
deebc5f2f0
@ -1793,7 +1793,12 @@ class MOZ_STACK_CLASS ModuleValidator
|
||||
return false;
|
||||
}
|
||||
|
||||
return mg_.init(Move(genData), Move(filename));
|
||||
if (!mg_.init(Move(genData), Move(filename)))
|
||||
return false;
|
||||
|
||||
mg_.bumpMinHeapLength(module_->minHeapLength);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
ExclusiveContext* cx() const { return cx_; }
|
||||
@ -2094,8 +2099,10 @@ class MOZ_STACK_CLASS ModuleValidator
|
||||
if (len > uint64_t(INT32_MAX) + 1)
|
||||
return false;
|
||||
len = RoundUpToNextValidAsmJSHeapLength(len);
|
||||
if (len > module_->minHeapLength)
|
||||
if (len > module_->minHeapLength) {
|
||||
module_->minHeapLength = len;
|
||||
mg_.bumpMinHeapLength(len);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -3521,40 +3528,12 @@ IsLiteralOrConstInt(FunctionValidator& f, ParseNode* pn, uint32_t* u32)
|
||||
return IsLiteralInt(lit, u32);
|
||||
}
|
||||
|
||||
static bool
|
||||
FoldMaskedArrayIndex(FunctionValidator& f, ParseNode** indexExpr, int32_t* mask,
|
||||
NeedsBoundsCheck* needsBoundsCheck)
|
||||
{
|
||||
MOZ_ASSERT((*indexExpr)->isKind(PNK_BITAND));
|
||||
|
||||
ParseNode* indexNode = BitwiseLeft(*indexExpr);
|
||||
ParseNode* maskNode = BitwiseRight(*indexExpr);
|
||||
|
||||
uint32_t mask2;
|
||||
if (IsLiteralOrConstInt(f, maskNode, &mask2)) {
|
||||
// Flag the access to skip the bounds check if the mask ensures that an
|
||||
// 'out of bounds' access can not occur based on the current heap length
|
||||
// constraint. The unsigned maximum of a masked index is the mask
|
||||
// itself, so check that the mask is not negative and compare the mask
|
||||
// to the known minimum heap length.
|
||||
if (int32_t(mask2) >= 0 && mask2 < f.m().minHeapLength())
|
||||
*needsBoundsCheck = NO_BOUNDS_CHECK;
|
||||
*mask &= mask2;
|
||||
*indexExpr = indexNode;
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static const int32_t NoMask = -1;
|
||||
|
||||
static bool
|
||||
CheckArrayAccess(FunctionValidator& f, ParseNode* viewName, ParseNode* indexExpr,
|
||||
Scalar::Type* viewType, NeedsBoundsCheck* needsBoundsCheck, int32_t* mask)
|
||||
Scalar::Type* viewType, int32_t* mask)
|
||||
{
|
||||
*needsBoundsCheck = NEEDS_BOUNDS_CHECK;
|
||||
|
||||
if (!viewName->isKind(PNK_NAME))
|
||||
return f.fail(viewName, "base of array access must be a typed array view name");
|
||||
|
||||
@ -3571,7 +3550,6 @@ CheckArrayAccess(FunctionValidator& f, ParseNode* viewName, ParseNode* indexExpr
|
||||
return f.fail(indexExpr, "constant index out of range");
|
||||
|
||||
*mask = NoMask;
|
||||
*needsBoundsCheck = NO_BOUNDS_CHECK;
|
||||
return f.writeInt32Lit(byteOffset);
|
||||
}
|
||||
|
||||
@ -3593,9 +3571,6 @@ CheckArrayAccess(FunctionValidator& f, ParseNode* viewName, ParseNode* indexExpr
|
||||
|
||||
ParseNode* pointerNode = BitwiseLeft(indexExpr);
|
||||
|
||||
if (pointerNode->isKind(PNK_BITAND))
|
||||
FoldMaskedArrayIndex(f, &pointerNode, mask, needsBoundsCheck);
|
||||
|
||||
Type pointerType;
|
||||
if (!CheckExpr(f, pointerNode, &pointerType))
|
||||
return false;
|
||||
@ -3612,9 +3587,6 @@ CheckArrayAccess(FunctionValidator& f, ParseNode* viewName, ParseNode* indexExpr
|
||||
|
||||
ParseNode* pointerNode = indexExpr;
|
||||
|
||||
if (pointerNode->isKind(PNK_BITAND))
|
||||
folded = FoldMaskedArrayIndex(f, &pointerNode, mask, needsBoundsCheck);
|
||||
|
||||
Type pointerType;
|
||||
if (!CheckExpr(f, pointerNode, &pointerType))
|
||||
return false;
|
||||
@ -3633,13 +3605,13 @@ CheckArrayAccess(FunctionValidator& f, ParseNode* viewName, ParseNode* indexExpr
|
||||
|
||||
static bool
|
||||
CheckAndPrepareArrayAccess(FunctionValidator& f, ParseNode* viewName, ParseNode* indexExpr,
|
||||
Scalar::Type* viewType, NeedsBoundsCheck* needsBoundsCheck, int32_t* mask)
|
||||
Scalar::Type* viewType, int32_t* mask)
|
||||
{
|
||||
size_t prepareAt;
|
||||
if (!f.encoder().writePatchableExpr(&prepareAt))
|
||||
return false;
|
||||
|
||||
if (!CheckArrayAccess(f, viewName, indexExpr, viewType, needsBoundsCheck, mask))
|
||||
if (!CheckArrayAccess(f, viewName, indexExpr, viewType, mask))
|
||||
return false;
|
||||
|
||||
// Don't generate the mask op if there is no need for it which could happen for
|
||||
@ -3657,17 +3629,13 @@ static bool
|
||||
CheckLoadArray(FunctionValidator& f, ParseNode* elem, Type* type)
|
||||
{
|
||||
Scalar::Type viewType;
|
||||
NeedsBoundsCheck needsBoundsCheck;
|
||||
int32_t mask;
|
||||
|
||||
size_t opcodeAt;
|
||||
size_t needsBoundsCheckAt;
|
||||
if (!f.encoder().writePatchableExpr(&opcodeAt))
|
||||
return false;
|
||||
if (!f.encoder().writePatchableU8(&needsBoundsCheckAt))
|
||||
return false;
|
||||
|
||||
if (!CheckAndPrepareArrayAccess(f, ElemBase(elem), ElemIndex(elem), &viewType, &needsBoundsCheck, &mask))
|
||||
if (!CheckAndPrepareArrayAccess(f, ElemBase(elem), ElemIndex(elem), &viewType, &mask))
|
||||
return false;
|
||||
|
||||
switch (viewType) {
|
||||
@ -3682,8 +3650,6 @@ CheckLoadArray(FunctionValidator& f, ParseNode* elem, Type* type)
|
||||
default: MOZ_CRASH("unexpected scalar type");
|
||||
}
|
||||
|
||||
f.encoder().patchU8(needsBoundsCheckAt, uint8_t(needsBoundsCheck));
|
||||
|
||||
switch (viewType) {
|
||||
case Scalar::Int8:
|
||||
case Scalar::Int16:
|
||||
@ -3709,16 +3675,12 @@ static bool
|
||||
CheckStoreArray(FunctionValidator& f, ParseNode* lhs, ParseNode* rhs, Type* type)
|
||||
{
|
||||
size_t opcodeAt;
|
||||
size_t needsBoundsCheckAt;
|
||||
if (!f.encoder().writePatchableExpr(&opcodeAt))
|
||||
return false;
|
||||
if (!f.encoder().writePatchableU8(&needsBoundsCheckAt))
|
||||
return false;
|
||||
|
||||
Scalar::Type viewType;
|
||||
NeedsBoundsCheck needsBoundsCheck;
|
||||
int32_t mask;
|
||||
if (!CheckAndPrepareArrayAccess(f, ElemBase(lhs), ElemIndex(lhs), &viewType, &needsBoundsCheck, &mask))
|
||||
if (!CheckAndPrepareArrayAccess(f, ElemBase(lhs), ElemIndex(lhs), &viewType, &mask))
|
||||
return false;
|
||||
|
||||
Type rhsType;
|
||||
@ -3775,8 +3737,6 @@ CheckStoreArray(FunctionValidator& f, ParseNode* lhs, ParseNode* rhs, Type* type
|
||||
default: MOZ_CRASH("unexpected scalar type");
|
||||
}
|
||||
|
||||
f.encoder().patchU8(needsBoundsCheckAt, uint8_t(needsBoundsCheck));
|
||||
|
||||
*type = rhsType;
|
||||
return true;
|
||||
}
|
||||
@ -4015,10 +3975,9 @@ CheckMathMinMax(FunctionValidator& f, ParseNode* callNode, bool isMax, Type* typ
|
||||
|
||||
static bool
|
||||
CheckSharedArrayAtomicAccess(FunctionValidator& f, ParseNode* viewName, ParseNode* indexExpr,
|
||||
Scalar::Type* viewType, NeedsBoundsCheck* needsBoundsCheck,
|
||||
int32_t* mask)
|
||||
Scalar::Type* viewType, int32_t* mask)
|
||||
{
|
||||
if (!CheckAndPrepareArrayAccess(f, viewName, indexExpr, viewType, needsBoundsCheck, mask))
|
||||
if (!CheckAndPrepareArrayAccess(f, viewName, indexExpr, viewType, mask))
|
||||
return false;
|
||||
|
||||
// The global will be sane, CheckArrayAccess checks it.
|
||||
@ -4054,10 +4013,9 @@ CheckAtomicsFence(FunctionValidator& f, ParseNode* call, Type* type)
|
||||
}
|
||||
|
||||
static bool
|
||||
WriteAtomicOperator(FunctionValidator& f, Expr opcode, size_t* needsBoundsCheckAt, size_t* viewTypeAt)
|
||||
WriteAtomicOperator(FunctionValidator& f, Expr opcode, size_t* viewTypeAt)
|
||||
{
|
||||
return f.encoder().writeExpr(opcode) &&
|
||||
f.encoder().writePatchableU8(needsBoundsCheckAt) &&
|
||||
f.encoder().writePatchableU8(viewTypeAt);
|
||||
}
|
||||
|
||||
@ -4070,18 +4028,15 @@ CheckAtomicsLoad(FunctionValidator& f, ParseNode* call, Type* type)
|
||||
ParseNode* arrayArg = CallArgList(call);
|
||||
ParseNode* indexArg = NextNode(arrayArg);
|
||||
|
||||
size_t needsBoundsCheckAt;
|
||||
size_t viewTypeAt;
|
||||
if (!WriteAtomicOperator(f, Expr::I32AtomicsLoad, &needsBoundsCheckAt, &viewTypeAt))
|
||||
if (!WriteAtomicOperator(f, Expr::I32AtomicsLoad, &viewTypeAt))
|
||||
return false;
|
||||
|
||||
Scalar::Type viewType;
|
||||
NeedsBoundsCheck needsBoundsCheck;
|
||||
int32_t mask;
|
||||
if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType, &needsBoundsCheck, &mask))
|
||||
if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType, &mask))
|
||||
return false;
|
||||
|
||||
f.encoder().patchU8(needsBoundsCheckAt, uint8_t(needsBoundsCheck));
|
||||
f.encoder().patchU8(viewTypeAt, uint8_t(viewType));
|
||||
|
||||
*type = Type::Int;
|
||||
@ -4098,15 +4053,13 @@ CheckAtomicsStore(FunctionValidator& f, ParseNode* call, Type* type)
|
||||
ParseNode* indexArg = NextNode(arrayArg);
|
||||
ParseNode* valueArg = NextNode(indexArg);
|
||||
|
||||
size_t needsBoundsCheckAt;
|
||||
size_t viewTypeAt;
|
||||
if (!WriteAtomicOperator(f, Expr::I32AtomicsStore, &needsBoundsCheckAt, &viewTypeAt))
|
||||
if (!WriteAtomicOperator(f, Expr::I32AtomicsStore, &viewTypeAt))
|
||||
return false;
|
||||
|
||||
Scalar::Type viewType;
|
||||
NeedsBoundsCheck needsBoundsCheck;
|
||||
int32_t mask;
|
||||
if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType, &needsBoundsCheck, &mask))
|
||||
if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType, &mask))
|
||||
return false;
|
||||
|
||||
Type rhsType;
|
||||
@ -4116,7 +4069,6 @@ CheckAtomicsStore(FunctionValidator& f, ParseNode* call, Type* type)
|
||||
if (!rhsType.isIntish())
|
||||
return f.failf(arrayArg, "%s is not a subtype of intish", rhsType.toChars());
|
||||
|
||||
f.encoder().patchU8(needsBoundsCheckAt, uint8_t(needsBoundsCheck));
|
||||
f.encoder().patchU8(viewTypeAt, uint8_t(viewType));
|
||||
|
||||
*type = rhsType;
|
||||
@ -4133,17 +4085,15 @@ CheckAtomicsBinop(FunctionValidator& f, ParseNode* call, Type* type, AtomicOp op
|
||||
ParseNode* indexArg = NextNode(arrayArg);
|
||||
ParseNode* valueArg = NextNode(indexArg);
|
||||
|
||||
size_t needsBoundsCheckAt;
|
||||
size_t viewTypeAt;
|
||||
if (!WriteAtomicOperator(f, Expr::I32AtomicsBinOp, &needsBoundsCheckAt, &viewTypeAt))
|
||||
if (!WriteAtomicOperator(f, Expr::I32AtomicsBinOp, &viewTypeAt))
|
||||
return false;
|
||||
if (!f.encoder().writeU8(uint8_t(op)))
|
||||
return false;
|
||||
|
||||
Scalar::Type viewType;
|
||||
NeedsBoundsCheck needsBoundsCheck;
|
||||
int32_t mask;
|
||||
if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType, &needsBoundsCheck, &mask))
|
||||
if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType, &mask))
|
||||
return false;
|
||||
|
||||
Type valueArgType;
|
||||
@ -4153,7 +4103,6 @@ CheckAtomicsBinop(FunctionValidator& f, ParseNode* call, Type* type, AtomicOp op
|
||||
if (!valueArgType.isIntish())
|
||||
return f.failf(valueArg, "%s is not a subtype of intish", valueArgType.toChars());
|
||||
|
||||
f.encoder().patchU8(needsBoundsCheckAt, uint8_t(needsBoundsCheck));
|
||||
f.encoder().patchU8(viewTypeAt, uint8_t(viewType));
|
||||
|
||||
*type = Type::Int;
|
||||
@ -4187,15 +4136,13 @@ CheckAtomicsCompareExchange(FunctionValidator& f, ParseNode* call, Type* type)
|
||||
ParseNode* oldValueArg = NextNode(indexArg);
|
||||
ParseNode* newValueArg = NextNode(oldValueArg);
|
||||
|
||||
size_t needsBoundsCheckAt;
|
||||
size_t viewTypeAt;
|
||||
if (!WriteAtomicOperator(f, Expr::I32AtomicsCompareExchange, &needsBoundsCheckAt, &viewTypeAt))
|
||||
if (!WriteAtomicOperator(f, Expr::I32AtomicsCompareExchange, &viewTypeAt))
|
||||
return false;
|
||||
|
||||
Scalar::Type viewType;
|
||||
NeedsBoundsCheck needsBoundsCheck;
|
||||
int32_t mask;
|
||||
if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType, &needsBoundsCheck, &mask))
|
||||
if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType, &mask))
|
||||
return false;
|
||||
|
||||
Type oldValueArgType;
|
||||
@ -4212,7 +4159,6 @@ CheckAtomicsCompareExchange(FunctionValidator& f, ParseNode* call, Type* type)
|
||||
if (!newValueArgType.isIntish())
|
||||
return f.failf(newValueArg, "%s is not a subtype of intish", newValueArgType.toChars());
|
||||
|
||||
f.encoder().patchU8(needsBoundsCheckAt, uint8_t(needsBoundsCheck));
|
||||
f.encoder().patchU8(viewTypeAt, uint8_t(viewType));
|
||||
|
||||
*type = Type::Int;
|
||||
@ -4229,15 +4175,13 @@ CheckAtomicsExchange(FunctionValidator& f, ParseNode* call, Type* type)
|
||||
ParseNode* indexArg = NextNode(arrayArg);
|
||||
ParseNode* valueArg = NextNode(indexArg);
|
||||
|
||||
size_t needsBoundsCheckAt;
|
||||
size_t viewTypeAt;
|
||||
if (!WriteAtomicOperator(f, Expr::I32AtomicsExchange, &needsBoundsCheckAt, &viewTypeAt))
|
||||
if (!WriteAtomicOperator(f, Expr::I32AtomicsExchange, &viewTypeAt))
|
||||
return false;
|
||||
|
||||
Scalar::Type viewType;
|
||||
NeedsBoundsCheck needsBoundsCheck;
|
||||
int32_t mask;
|
||||
if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType, &needsBoundsCheck, &mask))
|
||||
if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType, &mask))
|
||||
return false;
|
||||
|
||||
Type valueArgType;
|
||||
@ -4247,7 +4191,6 @@ CheckAtomicsExchange(FunctionValidator& f, ParseNode* call, Type* type)
|
||||
if (!valueArgType.isIntish())
|
||||
return f.failf(arrayArg, "%s is not a subtype of intish", valueArgType.toChars());
|
||||
|
||||
f.encoder().patchU8(needsBoundsCheckAt, uint8_t(needsBoundsCheck));
|
||||
f.encoder().patchU8(viewTypeAt, uint8_t(viewType));
|
||||
|
||||
*type = Type::Int;
|
||||
@ -5099,12 +5042,9 @@ CheckSimdLoadStoreArgs(FunctionValidator& f, ParseNode* call)
|
||||
if (IsLiteralOrConstInt(f, indexExpr, &indexLit)) {
|
||||
if (!f.m().tryConstantAccess(indexLit, Simd128DataSize))
|
||||
return f.fail(indexExpr, "constant index out of range");
|
||||
return f.encoder().writeU8(NO_BOUNDS_CHECK) && f.writeInt32Lit(indexLit);
|
||||
return f.writeInt32Lit(indexLit);
|
||||
}
|
||||
|
||||
if (!f.encoder().writeU8(NEEDS_BOUNDS_CHECK))
|
||||
return false;
|
||||
|
||||
Type indexType;
|
||||
if (!CheckExpr(f, indexExpr, &indexType))
|
||||
return false;
|
||||
|
@ -322,12 +322,6 @@ enum class Expr : uint16_t
|
||||
Limit
|
||||
};
|
||||
|
||||
enum NeedsBoundsCheck : uint8_t
|
||||
{
|
||||
NO_BOUNDS_CHECK,
|
||||
NEEDS_BOUNDS_CHECK
|
||||
};
|
||||
|
||||
typedef Vector<uint8_t, 0, SystemAllocPolicy> Bytecode;
|
||||
typedef UniquePtr<Bytecode> UniqueBytecode;
|
||||
|
||||
|
@ -651,6 +651,15 @@ ModuleGenerator::initFuncSig(uint32_t funcIndex, uint32_t sigIndex)
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
ModuleGenerator::bumpMinHeapLength(uint32_t newMinHeapLength)
|
||||
{
|
||||
MOZ_ASSERT(isAsmJS());
|
||||
MOZ_ASSERT(newMinHeapLength >= shared_->minHeapLength);
|
||||
|
||||
shared_->minHeapLength = newMinHeapLength;
|
||||
}
|
||||
|
||||
const DeclaredSig&
|
||||
ModuleGenerator::funcSig(uint32_t funcIndex) const
|
||||
{
|
||||
|
@ -99,6 +99,7 @@ struct ModuleGeneratorData
|
||||
CompileArgs args;
|
||||
ModuleKind kind;
|
||||
uint32_t numTableElems;
|
||||
mozilla::Atomic<uint32_t> minHeapLength;
|
||||
|
||||
DeclaredSigVector sigs;
|
||||
TableModuleGeneratorDataVector sigToTable;
|
||||
@ -111,7 +112,7 @@ struct ModuleGeneratorData
|
||||
}
|
||||
|
||||
explicit ModuleGeneratorData(ExclusiveContext* cx, ModuleKind kind = ModuleKind::Wasm)
|
||||
: args(cx), kind(kind), numTableElems(0)
|
||||
: args(cx), kind(kind), numTableElems(0), minHeapLength(0)
|
||||
{}
|
||||
};
|
||||
|
||||
@ -140,6 +141,9 @@ class ModuleGeneratorThreadView
|
||||
MOZ_ASSERT(!isAsmJS());
|
||||
return shared_.numTableElems;
|
||||
}
|
||||
uint32_t minHeapLength() const {
|
||||
return shared_.minHeapLength;
|
||||
}
|
||||
const DeclaredSig& sig(uint32_t sigIndex) const {
|
||||
return shared_.sigs[sigIndex];
|
||||
}
|
||||
@ -258,6 +262,7 @@ class MOZ_STACK_CLASS ModuleGenerator
|
||||
bool initImport(uint32_t importIndex, uint32_t sigIndex);
|
||||
bool initSigTableLength(uint32_t sigIndex, uint32_t numElems);
|
||||
void initSigTableElems(uint32_t sigIndex, Uint32Vector&& elemFuncIndices);
|
||||
void bumpMinHeapLength(uint32_t newMinHeapLength);
|
||||
|
||||
// asm.js global variables:
|
||||
bool allocateGlobalVar(ValType type, bool isConst, uint32_t* index);
|
||||
|
@ -479,53 +479,46 @@ class FunctionCompiler
|
||||
curBlock_->setSlot(info().localSlot(slot), def);
|
||||
}
|
||||
|
||||
MDefinition* loadHeap(Scalar::Type accessType, MDefinition* ptr, NeedsBoundsCheck chk)
|
||||
MDefinition* loadHeap(Scalar::Type accessType, MDefinition* ptr)
|
||||
{
|
||||
if (inDeadCode())
|
||||
return nullptr;
|
||||
|
||||
bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK;
|
||||
MOZ_ASSERT(!Scalar::isSimdType(accessType), "SIMD loads should use loadSimdHeap");
|
||||
MAsmJSLoadHeap* load = MAsmJSLoadHeap::New(alloc(), accessType, ptr, needsBoundsCheck);
|
||||
MAsmJSLoadHeap* load = MAsmJSLoadHeap::New(alloc(), accessType, ptr);
|
||||
curBlock_->add(load);
|
||||
return load;
|
||||
}
|
||||
|
||||
MDefinition* loadSimdHeap(Scalar::Type accessType, MDefinition* ptr, NeedsBoundsCheck chk,
|
||||
unsigned numElems)
|
||||
MDefinition* loadSimdHeap(Scalar::Type accessType, MDefinition* ptr, unsigned numElems)
|
||||
{
|
||||
if (inDeadCode())
|
||||
return nullptr;
|
||||
|
||||
bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK;
|
||||
MOZ_ASSERT(Scalar::isSimdType(accessType), "loadSimdHeap can only load from a SIMD view");
|
||||
MAsmJSLoadHeap* load = MAsmJSLoadHeap::New(alloc(), accessType, ptr, needsBoundsCheck,
|
||||
numElems);
|
||||
MAsmJSLoadHeap* load = MAsmJSLoadHeap::New(alloc(), accessType, ptr, numElems);
|
||||
curBlock_->add(load);
|
||||
return load;
|
||||
}
|
||||
|
||||
void storeHeap(Scalar::Type accessType, MDefinition* ptr, MDefinition* v, NeedsBoundsCheck chk)
|
||||
void storeHeap(Scalar::Type accessType, MDefinition* ptr, MDefinition* v)
|
||||
{
|
||||
if (inDeadCode())
|
||||
return;
|
||||
|
||||
bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK;
|
||||
MOZ_ASSERT(!Scalar::isSimdType(accessType), "SIMD stores should use storeSimdHeap");
|
||||
MAsmJSStoreHeap* store = MAsmJSStoreHeap::New(alloc(), accessType, ptr, v, needsBoundsCheck);
|
||||
MAsmJSStoreHeap* store = MAsmJSStoreHeap::New(alloc(), accessType, ptr, v);
|
||||
curBlock_->add(store);
|
||||
}
|
||||
|
||||
void storeSimdHeap(Scalar::Type accessType, MDefinition* ptr, MDefinition* v,
|
||||
NeedsBoundsCheck chk, unsigned numElems)
|
||||
unsigned numElems)
|
||||
{
|
||||
if (inDeadCode())
|
||||
return;
|
||||
|
||||
bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK;
|
||||
MOZ_ASSERT(Scalar::isSimdType(accessType), "storeSimdHeap can only load from a SIMD view");
|
||||
MAsmJSStoreHeap* store = MAsmJSStoreHeap::New(alloc(), accessType, ptr, v, needsBoundsCheck,
|
||||
numElems);
|
||||
MAsmJSStoreHeap* store = MAsmJSStoreHeap::New(alloc(), accessType, ptr, v, numElems);
|
||||
curBlock_->add(store);
|
||||
}
|
||||
|
||||
@ -537,66 +530,59 @@ class FunctionCompiler
|
||||
curBlock_->add(ins);
|
||||
}
|
||||
|
||||
MDefinition* atomicLoadHeap(Scalar::Type accessType, MDefinition* ptr, NeedsBoundsCheck chk)
|
||||
MDefinition* atomicLoadHeap(Scalar::Type accessType, MDefinition* ptr)
|
||||
{
|
||||
if (inDeadCode())
|
||||
return nullptr;
|
||||
|
||||
bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK;
|
||||
MAsmJSLoadHeap* load = MAsmJSLoadHeap::New(alloc(), accessType, ptr, needsBoundsCheck,
|
||||
/* numElems */ 0,
|
||||
MAsmJSLoadHeap* load = MAsmJSLoadHeap::New(alloc(), accessType, ptr, /* numElems */ 0,
|
||||
MembarBeforeLoad, MembarAfterLoad);
|
||||
curBlock_->add(load);
|
||||
return load;
|
||||
}
|
||||
|
||||
void atomicStoreHeap(Scalar::Type accessType, MDefinition* ptr, MDefinition* v, NeedsBoundsCheck chk)
|
||||
void atomicStoreHeap(Scalar::Type accessType, MDefinition* ptr, MDefinition* v)
|
||||
{
|
||||
if (inDeadCode())
|
||||
return;
|
||||
|
||||
bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK;
|
||||
MAsmJSStoreHeap* store = MAsmJSStoreHeap::New(alloc(), accessType, ptr, v, needsBoundsCheck,
|
||||
MAsmJSStoreHeap* store = MAsmJSStoreHeap::New(alloc(), accessType, ptr, v,
|
||||
/* numElems = */ 0,
|
||||
MembarBeforeStore, MembarAfterStore);
|
||||
curBlock_->add(store);
|
||||
}
|
||||
|
||||
MDefinition* atomicCompareExchangeHeap(Scalar::Type accessType, MDefinition* ptr, MDefinition* oldv,
|
||||
MDefinition* newv, NeedsBoundsCheck chk)
|
||||
MDefinition* newv)
|
||||
{
|
||||
if (inDeadCode())
|
||||
return nullptr;
|
||||
|
||||
bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK;
|
||||
MAsmJSCompareExchangeHeap* cas =
|
||||
MAsmJSCompareExchangeHeap::New(alloc(), accessType, ptr, oldv, newv, needsBoundsCheck);
|
||||
MAsmJSCompareExchangeHeap::New(alloc(), accessType, ptr, oldv, newv);
|
||||
curBlock_->add(cas);
|
||||
return cas;
|
||||
}
|
||||
|
||||
MDefinition* atomicExchangeHeap(Scalar::Type accessType, MDefinition* ptr, MDefinition* value,
|
||||
NeedsBoundsCheck chk)
|
||||
MDefinition* atomicExchangeHeap(Scalar::Type accessType, MDefinition* ptr, MDefinition* value)
|
||||
{
|
||||
if (inDeadCode())
|
||||
return nullptr;
|
||||
|
||||
bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK;
|
||||
MAsmJSAtomicExchangeHeap* cas =
|
||||
MAsmJSAtomicExchangeHeap::New(alloc(), accessType, ptr, value, needsBoundsCheck);
|
||||
MAsmJSAtomicExchangeHeap::New(alloc(), accessType, ptr, value);
|
||||
curBlock_->add(cas);
|
||||
return cas;
|
||||
}
|
||||
|
||||
MDefinition* atomicBinopHeap(js::jit::AtomicOp op, Scalar::Type accessType, MDefinition* ptr,
|
||||
MDefinition* v, NeedsBoundsCheck chk)
|
||||
MDefinition* v)
|
||||
{
|
||||
if (inDeadCode())
|
||||
return nullptr;
|
||||
|
||||
bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK;
|
||||
MAsmJSAtomicBinopHeap* binop =
|
||||
MAsmJSAtomicBinopHeap::New(alloc(), op, accessType, ptr, v, needsBoundsCheck);
|
||||
MAsmJSAtomicBinopHeap::New(alloc(), op, accessType, ptr, v);
|
||||
curBlock_->add(binop);
|
||||
return binop;
|
||||
}
|
||||
@ -1437,19 +1423,16 @@ static bool EmitExprStmt(FunctionCompiler&, MDefinition**, LabelVector* = nullpt
|
||||
static bool
|
||||
EmitLoadArray(FunctionCompiler& f, Scalar::Type scalarType, MDefinition** def)
|
||||
{
|
||||
NeedsBoundsCheck needsBoundsCheck = NeedsBoundsCheck(f.readU8());
|
||||
MDefinition* ptr;
|
||||
if (!EmitExpr(f, ExprType::I32, &ptr))
|
||||
return false;
|
||||
*def = f.loadHeap(scalarType, ptr, needsBoundsCheck);
|
||||
*def = f.loadHeap(scalarType, ptr);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
EmitStore(FunctionCompiler& f, Scalar::Type viewType, MDefinition** def)
|
||||
{
|
||||
NeedsBoundsCheck needsBoundsCheck = NeedsBoundsCheck(f.readU8());
|
||||
|
||||
MDefinition* ptr;
|
||||
if (!EmitExpr(f, ExprType::I32, &ptr))
|
||||
return false;
|
||||
@ -1473,7 +1456,7 @@ EmitStore(FunctionCompiler& f, Scalar::Type viewType, MDefinition** def)
|
||||
default: MOZ_CRASH("unexpected scalar type");
|
||||
}
|
||||
|
||||
f.storeHeap(viewType, ptr, rhs, needsBoundsCheck);
|
||||
f.storeHeap(viewType, ptr, rhs);
|
||||
*def = rhs;
|
||||
return true;
|
||||
}
|
||||
@ -1482,7 +1465,6 @@ static bool
|
||||
EmitStoreWithCoercion(FunctionCompiler& f, Scalar::Type rhsType, Scalar::Type viewType,
|
||||
MDefinition **def)
|
||||
{
|
||||
NeedsBoundsCheck needsBoundsCheck = NeedsBoundsCheck(f.readU8());
|
||||
MDefinition* ptr;
|
||||
if (!EmitExpr(f, ExprType::I32, &ptr))
|
||||
return false;
|
||||
@ -1501,7 +1483,7 @@ EmitStoreWithCoercion(FunctionCompiler& f, Scalar::Type rhsType, Scalar::Type vi
|
||||
MOZ_CRASH("unexpected coerced store");
|
||||
}
|
||||
|
||||
f.storeHeap(viewType, ptr, coerced, needsBoundsCheck);
|
||||
f.storeHeap(viewType, ptr, coerced);
|
||||
*def = rhs;
|
||||
return true;
|
||||
}
|
||||
@ -1553,19 +1535,17 @@ EmitMathMinMax(FunctionCompiler& f, ExprType type, bool isMax, MDefinition** def
|
||||
static bool
|
||||
EmitAtomicsLoad(FunctionCompiler& f, MDefinition** def)
|
||||
{
|
||||
NeedsBoundsCheck needsBoundsCheck = NeedsBoundsCheck(f.readU8());
|
||||
Scalar::Type viewType = Scalar::Type(f.readU8());
|
||||
MDefinition* index;
|
||||
if (!EmitExpr(f, ExprType::I32, &index))
|
||||
return false;
|
||||
*def = f.atomicLoadHeap(viewType, index, needsBoundsCheck);
|
||||
*def = f.atomicLoadHeap(viewType, index);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
EmitAtomicsStore(FunctionCompiler& f, MDefinition** def)
|
||||
{
|
||||
NeedsBoundsCheck needsBoundsCheck = NeedsBoundsCheck(f.readU8());
|
||||
Scalar::Type viewType = Scalar::Type(f.readU8());
|
||||
MDefinition* index;
|
||||
if (!EmitExpr(f, ExprType::I32, &index))
|
||||
@ -1573,7 +1553,7 @@ EmitAtomicsStore(FunctionCompiler& f, MDefinition** def)
|
||||
MDefinition* value;
|
||||
if (!EmitExpr(f, ExprType::I32, &value))
|
||||
return false;
|
||||
f.atomicStoreHeap(viewType, index, value, needsBoundsCheck);
|
||||
f.atomicStoreHeap(viewType, index, value);
|
||||
*def = value;
|
||||
return true;
|
||||
}
|
||||
@ -1581,7 +1561,6 @@ EmitAtomicsStore(FunctionCompiler& f, MDefinition** def)
|
||||
static bool
|
||||
EmitAtomicsBinOp(FunctionCompiler& f, MDefinition** def)
|
||||
{
|
||||
NeedsBoundsCheck needsBoundsCheck = NeedsBoundsCheck(f.readU8());
|
||||
Scalar::Type viewType = Scalar::Type(f.readU8());
|
||||
js::jit::AtomicOp op = js::jit::AtomicOp(f.readU8());
|
||||
MDefinition* index;
|
||||
@ -1590,14 +1569,13 @@ EmitAtomicsBinOp(FunctionCompiler& f, MDefinition** def)
|
||||
MDefinition* value;
|
||||
if (!EmitExpr(f, ExprType::I32, &value))
|
||||
return false;
|
||||
*def = f.atomicBinopHeap(op, viewType, index, value, needsBoundsCheck);
|
||||
*def = f.atomicBinopHeap(op, viewType, index, value);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
EmitAtomicsCompareExchange(FunctionCompiler& f, MDefinition** def)
|
||||
{
|
||||
NeedsBoundsCheck needsBoundsCheck = NeedsBoundsCheck(f.readU8());
|
||||
Scalar::Type viewType = Scalar::Type(f.readU8());
|
||||
MDefinition* index;
|
||||
if (!EmitExpr(f, ExprType::I32, &index))
|
||||
@ -1608,14 +1586,13 @@ EmitAtomicsCompareExchange(FunctionCompiler& f, MDefinition** def)
|
||||
MDefinition* newValue;
|
||||
if (!EmitExpr(f, ExprType::I32, &newValue))
|
||||
return false;
|
||||
*def = f.atomicCompareExchangeHeap(viewType, index, oldValue, newValue, needsBoundsCheck);
|
||||
*def = f.atomicCompareExchangeHeap(viewType, index, oldValue, newValue);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
EmitAtomicsExchange(FunctionCompiler& f, MDefinition** def)
|
||||
{
|
||||
NeedsBoundsCheck needsBoundsCheck = NeedsBoundsCheck(f.readU8());
|
||||
Scalar::Type viewType = Scalar::Type(f.readU8());
|
||||
MDefinition* index;
|
||||
if (!EmitExpr(f, ExprType::I32, &index))
|
||||
@ -1623,7 +1600,7 @@ EmitAtomicsExchange(FunctionCompiler& f, MDefinition** def)
|
||||
MDefinition* value;
|
||||
if (!EmitExpr(f, ExprType::I32, &value))
|
||||
return false;
|
||||
*def = f.atomicExchangeHeap(viewType, index, value, needsBoundsCheck);
|
||||
*def = f.atomicExchangeHeap(viewType, index, value);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1992,13 +1969,11 @@ EmitSimdLoad(FunctionCompiler& f, ExprType type, unsigned numElems, MDefinition*
|
||||
if (!numElems)
|
||||
numElems = defaultNumElems;
|
||||
|
||||
NeedsBoundsCheck needsBoundsCheck = NeedsBoundsCheck(f.readU8());
|
||||
|
||||
MDefinition* index;
|
||||
if (!EmitExpr(f, ExprType::I32, &index))
|
||||
return false;
|
||||
|
||||
*def = f.loadSimdHeap(viewType, index, needsBoundsCheck, numElems);
|
||||
*def = f.loadSimdHeap(viewType, index, numElems);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -2011,8 +1986,6 @@ EmitSimdStore(FunctionCompiler& f, ExprType type, unsigned numElems, MDefinition
|
||||
if (!numElems)
|
||||
numElems = defaultNumElems;
|
||||
|
||||
NeedsBoundsCheck needsBoundsCheck = NeedsBoundsCheck(f.readU8());
|
||||
|
||||
MDefinition* index;
|
||||
if (!EmitExpr(f, ExprType::I32, &index))
|
||||
return false;
|
||||
@ -2021,7 +1994,7 @@ EmitSimdStore(FunctionCompiler& f, ExprType type, unsigned numElems, MDefinition
|
||||
if (!EmitExpr(f, type, &vec))
|
||||
return false;
|
||||
|
||||
f.storeSimdHeap(viewType, index, vec, needsBoundsCheck, numElems);
|
||||
f.storeSimdHeap(viewType, index, vec, numElems);
|
||||
*def = vec;
|
||||
return true;
|
||||
}
|
||||
@ -3070,8 +3043,9 @@ wasm::IonCompileFunction(IonCompileTask* task)
|
||||
MIRGraph graph(&results.alloc());
|
||||
CompileInfo compileInfo(func.numLocals());
|
||||
MIRGenerator mir(nullptr, options, &results.alloc(), &graph, &compileInfo,
|
||||
IonOptimizations.get(OptimizationLevel::AsmJS),
|
||||
task->mg().args().useSignalHandlersForOOB);
|
||||
IonOptimizations.get(OptimizationLevel::AsmJS));
|
||||
mir.initUsesSignalHandlersForAsmJSOOB(task->mg().args().useSignalHandlersForOOB);
|
||||
mir.initMinAsmJSHeapLength(task->mg().minHeapLength());
|
||||
|
||||
// Build MIR graph
|
||||
{
|
||||
|
@ -148,6 +148,14 @@ EffectiveAddressAnalysis::analyzeAsmHeapAccess(MAsmJSHeapAccessType* ins)
|
||||
ins->block()->insertBefore(ins, zero);
|
||||
ins->replacePtr(zero);
|
||||
}
|
||||
|
||||
// If the index is within the minimum heap length, we can optimize
|
||||
// away the bounds check.
|
||||
if (imm >= 0) {
|
||||
int32_t end = (uint32_t)imm + ins->byteSize();
|
||||
if (end >= imm && (uint32_t)end <= mir_->minAsmJSHeapLength())
|
||||
ins->removeBoundsCheck();
|
||||
}
|
||||
} else if (ptr->isAdd()) {
|
||||
// Look for heap[a+i] where i is a constant offset, and fold the offset.
|
||||
// Alignment masks have already been moved out of the way by the
|
||||
|
@ -13715,12 +13715,12 @@ class MAsmJSHeapAccess
|
||||
MemoryBarrierBits barrierAfter_;
|
||||
|
||||
public:
|
||||
MAsmJSHeapAccess(Scalar::Type accessType, bool needsBoundsCheck, unsigned numSimdElems = 0,
|
||||
MemoryBarrierBits barrierBefore = MembarNobits,
|
||||
MemoryBarrierBits barrierAfter = MembarNobits)
|
||||
explicit MAsmJSHeapAccess(Scalar::Type accessType, unsigned numSimdElems = 0,
|
||||
MemoryBarrierBits barrierBefore = MembarNobits,
|
||||
MemoryBarrierBits barrierAfter = MembarNobits)
|
||||
: offset_(0),
|
||||
accessType_(accessType),
|
||||
needsBoundsCheck_(needsBoundsCheck),
|
||||
needsBoundsCheck_(true),
|
||||
numSimdElems_(numSimdElems),
|
||||
barrierBefore_(barrierBefore),
|
||||
barrierAfter_(barrierAfter)
|
||||
@ -13753,10 +13753,10 @@ class MAsmJSLoadHeap
|
||||
public MAsmJSHeapAccess,
|
||||
public NoTypePolicy::Data
|
||||
{
|
||||
MAsmJSLoadHeap(Scalar::Type accessType, MDefinition* ptr, bool needsBoundsCheck,
|
||||
unsigned numSimdElems, MemoryBarrierBits before, MemoryBarrierBits after)
|
||||
MAsmJSLoadHeap(Scalar::Type accessType, MDefinition* ptr, unsigned numSimdElems,
|
||||
MemoryBarrierBits before, MemoryBarrierBits after)
|
||||
: MUnaryInstruction(ptr),
|
||||
MAsmJSHeapAccess(accessType, needsBoundsCheck, numSimdElems, before, after)
|
||||
MAsmJSHeapAccess(accessType, numSimdElems, before, after)
|
||||
{
|
||||
if (before|after)
|
||||
setGuard(); // Not removable
|
||||
@ -13794,13 +13794,12 @@ class MAsmJSLoadHeap
|
||||
INSTRUCTION_HEADER(AsmJSLoadHeap)
|
||||
|
||||
static MAsmJSLoadHeap* New(TempAllocator& alloc, Scalar::Type accessType,
|
||||
MDefinition* ptr, bool needsBoundsCheck,
|
||||
unsigned numSimdElems = 0,
|
||||
MDefinition* ptr, unsigned numSimdElems = 0,
|
||||
MemoryBarrierBits barrierBefore = MembarNobits,
|
||||
MemoryBarrierBits barrierAfter = MembarNobits)
|
||||
{
|
||||
return new(alloc) MAsmJSLoadHeap(accessType, ptr, needsBoundsCheck,
|
||||
numSimdElems, barrierBefore, barrierAfter);
|
||||
return new(alloc) MAsmJSLoadHeap(accessType, ptr, numSimdElems,
|
||||
barrierBefore, barrierAfter);
|
||||
}
|
||||
|
||||
MDefinition* ptr() const { return getOperand(0); }
|
||||
@ -13822,10 +13821,10 @@ class MAsmJSStoreHeap
|
||||
public MAsmJSHeapAccess,
|
||||
public NoTypePolicy::Data
|
||||
{
|
||||
MAsmJSStoreHeap(Scalar::Type accessType, MDefinition* ptr, MDefinition* v, bool needsBoundsCheck,
|
||||
MAsmJSStoreHeap(Scalar::Type accessType, MDefinition* ptr, MDefinition* v,
|
||||
unsigned numSimdElems, MemoryBarrierBits before, MemoryBarrierBits after)
|
||||
: MBinaryInstruction(ptr, v),
|
||||
MAsmJSHeapAccess(accessType, needsBoundsCheck, numSimdElems, before, after)
|
||||
MAsmJSHeapAccess(accessType, numSimdElems, before, after)
|
||||
{
|
||||
if (before|after)
|
||||
setGuard(); // Not removable
|
||||
@ -13835,13 +13834,12 @@ class MAsmJSStoreHeap
|
||||
INSTRUCTION_HEADER(AsmJSStoreHeap)
|
||||
|
||||
static MAsmJSStoreHeap* New(TempAllocator& alloc, Scalar::Type accessType,
|
||||
MDefinition* ptr, MDefinition* v, bool needsBoundsCheck,
|
||||
unsigned numSimdElems = 0,
|
||||
MDefinition* ptr, MDefinition* v, unsigned numSimdElems = 0,
|
||||
MemoryBarrierBits barrierBefore = MembarNobits,
|
||||
MemoryBarrierBits barrierAfter = MembarNobits)
|
||||
{
|
||||
return new(alloc) MAsmJSStoreHeap(accessType, ptr, v, needsBoundsCheck,
|
||||
numSimdElems, barrierBefore, barrierAfter);
|
||||
return new(alloc) MAsmJSStoreHeap(accessType, ptr, v, numSimdElems,
|
||||
barrierBefore, barrierAfter);
|
||||
}
|
||||
|
||||
MDefinition* ptr() const { return getOperand(0); }
|
||||
@ -13859,9 +13857,9 @@ class MAsmJSCompareExchangeHeap
|
||||
public NoTypePolicy::Data
|
||||
{
|
||||
MAsmJSCompareExchangeHeap(Scalar::Type accessType, MDefinition* ptr, MDefinition* oldv,
|
||||
MDefinition* newv, bool needsBoundsCheck)
|
||||
MDefinition* newv)
|
||||
: MTernaryInstruction(ptr, oldv, newv),
|
||||
MAsmJSHeapAccess(accessType, needsBoundsCheck)
|
||||
MAsmJSHeapAccess(accessType)
|
||||
{
|
||||
setGuard(); // Not removable
|
||||
setResultType(MIRType_Int32);
|
||||
@ -13872,9 +13870,9 @@ class MAsmJSCompareExchangeHeap
|
||||
|
||||
static MAsmJSCompareExchangeHeap* New(TempAllocator& alloc, Scalar::Type accessType,
|
||||
MDefinition* ptr, MDefinition* oldv,
|
||||
MDefinition* newv, bool needsBoundsCheck)
|
||||
MDefinition* newv)
|
||||
{
|
||||
return new(alloc) MAsmJSCompareExchangeHeap(accessType, ptr, oldv, newv, needsBoundsCheck);
|
||||
return new(alloc) MAsmJSCompareExchangeHeap(accessType, ptr, oldv, newv);
|
||||
}
|
||||
|
||||
MDefinition* ptr() const { return getOperand(0); }
|
||||
@ -13891,10 +13889,9 @@ class MAsmJSAtomicExchangeHeap
|
||||
public MAsmJSHeapAccess,
|
||||
public NoTypePolicy::Data
|
||||
{
|
||||
MAsmJSAtomicExchangeHeap(Scalar::Type accessType, MDefinition* ptr, MDefinition* value,
|
||||
bool needsBoundsCheck)
|
||||
MAsmJSAtomicExchangeHeap(Scalar::Type accessType, MDefinition* ptr, MDefinition* value)
|
||||
: MBinaryInstruction(ptr, value),
|
||||
MAsmJSHeapAccess(accessType, needsBoundsCheck)
|
||||
MAsmJSHeapAccess(accessType)
|
||||
{
|
||||
setGuard(); // Not removable
|
||||
setResultType(MIRType_Int32);
|
||||
@ -13904,10 +13901,9 @@ class MAsmJSAtomicExchangeHeap
|
||||
INSTRUCTION_HEADER(AsmJSAtomicExchangeHeap)
|
||||
|
||||
static MAsmJSAtomicExchangeHeap* New(TempAllocator& alloc, Scalar::Type accessType,
|
||||
MDefinition* ptr, MDefinition* value,
|
||||
bool needsBoundsCheck)
|
||||
MDefinition* ptr, MDefinition* value)
|
||||
{
|
||||
return new(alloc) MAsmJSAtomicExchangeHeap(accessType, ptr, value, needsBoundsCheck);
|
||||
return new(alloc) MAsmJSAtomicExchangeHeap(accessType, ptr, value);
|
||||
}
|
||||
|
||||
MDefinition* ptr() const { return getOperand(0); }
|
||||
@ -13925,10 +13921,9 @@ class MAsmJSAtomicBinopHeap
|
||||
{
|
||||
AtomicOp op_;
|
||||
|
||||
MAsmJSAtomicBinopHeap(AtomicOp op, Scalar::Type accessType, MDefinition* ptr, MDefinition* v,
|
||||
bool needsBoundsCheck)
|
||||
MAsmJSAtomicBinopHeap(AtomicOp op, Scalar::Type accessType, MDefinition* ptr, MDefinition* v)
|
||||
: MBinaryInstruction(ptr, v),
|
||||
MAsmJSHeapAccess(accessType, needsBoundsCheck),
|
||||
MAsmJSHeapAccess(accessType),
|
||||
op_(op)
|
||||
{
|
||||
setGuard(); // Not removable
|
||||
@ -13939,9 +13934,9 @@ class MAsmJSAtomicBinopHeap
|
||||
INSTRUCTION_HEADER(AsmJSAtomicBinopHeap)
|
||||
|
||||
static MAsmJSAtomicBinopHeap* New(TempAllocator& alloc, AtomicOp op, Scalar::Type accessType,
|
||||
MDefinition* ptr, MDefinition* v, bool needsBoundsCheck)
|
||||
MDefinition* ptr, MDefinition* v)
|
||||
{
|
||||
return new(alloc) MAsmJSAtomicBinopHeap(op, accessType, ptr, v, needsBoundsCheck);
|
||||
return new(alloc) MAsmJSAtomicBinopHeap(op, accessType, ptr, v);
|
||||
}
|
||||
|
||||
AtomicOp operation() const { return op_; }
|
||||
|
@ -37,8 +37,16 @@ class MIRGenerator
|
||||
public:
|
||||
MIRGenerator(CompileCompartment* compartment, const JitCompileOptions& options,
|
||||
TempAllocator* alloc, MIRGraph* graph,
|
||||
const CompileInfo* info, const OptimizationInfo* optimizationInfo,
|
||||
bool usesSignalHandlersForAsmJSOOB = false);
|
||||
const CompileInfo* info, const OptimizationInfo* optimizationInfo);
|
||||
|
||||
void initUsesSignalHandlersForAsmJSOOB(bool init) {
|
||||
#if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
|
||||
usesSignalHandlersForAsmJSOOB_ = init;
|
||||
#endif
|
||||
}
|
||||
void initMinAsmJSHeapLength(uint32_t init) {
|
||||
minAsmJSHeapLength_ = init;
|
||||
}
|
||||
|
||||
TempAllocator& alloc() {
|
||||
return *alloc_;
|
||||
@ -141,6 +149,9 @@ class MIRGenerator
|
||||
MOZ_ASSERT(compilingAsmJS());
|
||||
maxAsmJSStackArgBytes_ = n;
|
||||
}
|
||||
uint32_t minAsmJSHeapLength() const {
|
||||
return minAsmJSHeapLength_;
|
||||
}
|
||||
void setPerformsCall() {
|
||||
performsCall_ = true;
|
||||
}
|
||||
@ -197,6 +208,7 @@ class MIRGenerator
|
||||
#if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
|
||||
bool usesSignalHandlersForAsmJSOOB_;
|
||||
#endif
|
||||
uint32_t minAsmJSHeapLength_;
|
||||
|
||||
void setForceAbort() {
|
||||
shouldForceAbort_ = true;
|
||||
|
@ -19,8 +19,7 @@ using mozilla::Swap;
|
||||
|
||||
MIRGenerator::MIRGenerator(CompileCompartment* compartment, const JitCompileOptions& options,
|
||||
TempAllocator* alloc, MIRGraph* graph, const CompileInfo* info,
|
||||
const OptimizationInfo* optimizationInfo,
|
||||
bool usesSignalHandlersForAsmJSOOB)
|
||||
const OptimizationInfo* optimizationInfo)
|
||||
: compartment(compartment),
|
||||
info_(info),
|
||||
optimizationInfo_(optimizationInfo),
|
||||
@ -41,8 +40,9 @@ MIRGenerator::MIRGenerator(CompileCompartment* compartment, const JitCompileOpti
|
||||
instrumentedProfilingIsCached_(false),
|
||||
safeForMinorGC_(true),
|
||||
#if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
|
||||
usesSignalHandlersForAsmJSOOB_(usesSignalHandlersForAsmJSOOB),
|
||||
usesSignalHandlersForAsmJSOOB_(false),
|
||||
#endif
|
||||
minAsmJSHeapLength_(0),
|
||||
options(options),
|
||||
gs_(alloc)
|
||||
{ }
|
||||
|
Loading…
Reference in New Issue
Block a user