mirror of
https://gitlab.winehq.org/wine/wine-gecko.git
synced 2024-09-13 09:24:08 -07:00
Bug 1073096 - Support for Odin and asm.js. r=luke
This commit is contained in:
parent
42ede852fa
commit
afc67ba730
@ -30,6 +30,7 @@
|
||||
#include "jswrapper.h"
|
||||
|
||||
#include "asmjs/AsmJSModule.h"
|
||||
#include "builtin/AtomicsObject.h"
|
||||
#include "builtin/SIMD.h"
|
||||
#include "frontend/BytecodeCompiler.h"
|
||||
#include "jit/Ion.h"
|
||||
@ -219,7 +220,7 @@ ValidateFFI(JSContext *cx, AsmJSModule::Global &global, HandleValue importVal,
|
||||
}
|
||||
|
||||
static bool
|
||||
ValidateArrayView(JSContext *cx, AsmJSModule::Global &global, HandleValue globalVal)
|
||||
ValidateArrayView(JSContext *cx, AsmJSModule::Global &global, HandleValue globalVal, bool isShared)
|
||||
{
|
||||
RootedPropertyName field(cx, global.maybeViewName());
|
||||
if (!field)
|
||||
@ -229,11 +230,10 @@ ValidateArrayView(JSContext *cx, AsmJSModule::Global &global, HandleValue global
|
||||
if (!GetDataProperty(cx, globalVal, field, &v))
|
||||
return false;
|
||||
|
||||
if (!IsTypedArrayConstructor(v, global.viewType()) &&
|
||||
!IsSharedTypedArrayConstructor(v, global.viewType()))
|
||||
{
|
||||
bool tac = IsTypedArrayConstructor(v, global.viewType());
|
||||
bool stac = IsSharedTypedArrayConstructor(v, global.viewType());
|
||||
if (!((tac || stac) && stac == isShared))
|
||||
return LinkFail(cx, "bad typed array constructor");
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -406,6 +406,35 @@ ValidateSimdOperation(JSContext *cx, AsmJSModule::Global &global, HandleValue gl
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
ValidateAtomicsBuiltinFunction(JSContext *cx, AsmJSModule::Global &global, HandleValue globalVal)
|
||||
{
|
||||
RootedValue v(cx);
|
||||
if (!GetDataProperty(cx, globalVal, cx->names().Atomics, &v))
|
||||
return false;
|
||||
RootedPropertyName field(cx, global.atomicsName());
|
||||
if (!GetDataProperty(cx, v, field, &v))
|
||||
return false;
|
||||
|
||||
Native native = nullptr;
|
||||
switch (global.atomicsBuiltinFunction()) {
|
||||
case AsmJSAtomicsBuiltin_compareExchange: native = atomics_compareExchange; break;
|
||||
case AsmJSAtomicsBuiltin_load: native = atomics_load; break;
|
||||
case AsmJSAtomicsBuiltin_store: native = atomics_store; break;
|
||||
case AsmJSAtomicsBuiltin_fence: native = atomics_fence; break;
|
||||
case AsmJSAtomicsBuiltin_add: native = atomics_add; break;
|
||||
case AsmJSAtomicsBuiltin_sub: native = atomics_sub; break;
|
||||
case AsmJSAtomicsBuiltin_and: native = atomics_and; break;
|
||||
case AsmJSAtomicsBuiltin_or: native = atomics_or; break;
|
||||
case AsmJSAtomicsBuiltin_xor: native = atomics_xor; break;
|
||||
}
|
||||
|
||||
if (!IsNativeFunction(v, native))
|
||||
return LinkFail(cx, "bad Atomics.* builtin function");
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
ValidateConstant(JSContext *cx, AsmJSModule::Global &global, HandleValue globalVal)
|
||||
{
|
||||
@ -533,8 +562,9 @@ DynamicallyLinkModule(JSContext *cx, CallArgs args, AsmJSModule &module)
|
||||
return false;
|
||||
break;
|
||||
case AsmJSModule::Global::ArrayView:
|
||||
case AsmJSModule::Global::SharedArrayView:
|
||||
case AsmJSModule::Global::ArrayViewCtor:
|
||||
if (!ValidateArrayView(cx, global, globalVal))
|
||||
if (!ValidateArrayView(cx, global, globalVal, module.hasArrayView() && module.isSharedView()))
|
||||
return false;
|
||||
break;
|
||||
case AsmJSModule::Global::ByteLength:
|
||||
@ -545,6 +575,10 @@ DynamicallyLinkModule(JSContext *cx, CallArgs args, AsmJSModule &module)
|
||||
if (!ValidateMathBuiltinFunction(cx, global, globalVal))
|
||||
return false;
|
||||
break;
|
||||
case AsmJSModule::Global::AtomicsBuiltinFunction:
|
||||
if (!ValidateAtomicsBuiltinFunction(cx, global, globalVal))
|
||||
return false;
|
||||
break;
|
||||
case AsmJSModule::Global::Constant:
|
||||
if (!ValidateConstant(cx, global, globalVal))
|
||||
return false;
|
||||
|
@ -778,12 +778,13 @@ AsmJSModule::initHeap(Handle<ArrayBufferObjectMaybeShared *> heap, JSContext *cx
|
||||
X86Assembler::setPointer(addr, (void *)(heapOffset + disp));
|
||||
}
|
||||
#elif defined(JS_CODEGEN_X64)
|
||||
if (usesSignalHandlersForOOB())
|
||||
return;
|
||||
// If we cannot use the signal handlers, we need to patch the heap length
|
||||
// Even with signal handling being used for most bounds checks, there may be
|
||||
// atomic operations that depend on explicit checks.
|
||||
//
|
||||
// If we have any explicit bounds checks, we need to patch the heap length
|
||||
// checks at the right places. All accesses that have been recorded are the
|
||||
// only ones that need bound checks (see also
|
||||
// CodeGeneratorX64::visitAsmJS{Load,Store}Heap)
|
||||
// CodeGeneratorX64::visitAsmJS{Load,Store,CompareExchange,AtomicBinop}Heap)
|
||||
int32_t heapLength = int32_t(intptr_t(heap->byteLength()));
|
||||
for (size_t i = 0; i < heapAccesses_.length(); i++) {
|
||||
const jit::AsmJSHeapAccess &access = heapAccesses_[i];
|
||||
|
@ -66,6 +66,20 @@ enum AsmJSMathBuiltinFunction
|
||||
AsmJSMathBuiltin_clz32
|
||||
};
|
||||
|
||||
// The asm.js spec will recognize this set of builtin Atomics functions.
|
||||
enum AsmJSAtomicsBuiltinFunction
|
||||
{
|
||||
AsmJSAtomicsBuiltin_compareExchange,
|
||||
AsmJSAtomicsBuiltin_load,
|
||||
AsmJSAtomicsBuiltin_store,
|
||||
AsmJSAtomicsBuiltin_fence,
|
||||
AsmJSAtomicsBuiltin_add,
|
||||
AsmJSAtomicsBuiltin_sub,
|
||||
AsmJSAtomicsBuiltin_and,
|
||||
AsmJSAtomicsBuiltin_or,
|
||||
AsmJSAtomicsBuiltin_xor
|
||||
};
|
||||
|
||||
// Set of known global object SIMD's attributes, i.e. types
|
||||
enum AsmJSSimdType
|
||||
{
|
||||
@ -200,8 +214,8 @@ class AsmJSModule
|
||||
class Global
|
||||
{
|
||||
public:
|
||||
enum Which { Variable, FFI, ArrayView, ArrayViewCtor, MathBuiltinFunction, Constant,
|
||||
SimdCtor, SimdOperation, ByteLength };
|
||||
enum Which { Variable, FFI, ArrayView, ArrayViewCtor, SharedArrayView, MathBuiltinFunction,
|
||||
AtomicsBuiltinFunction, Constant, SimdCtor, SimdOperation, ByteLength };
|
||||
enum VarInitKind { InitConstant, InitImport };
|
||||
enum ConstantKind { GlobalConstant, MathConstant };
|
||||
|
||||
@ -220,6 +234,7 @@ class AsmJSModule
|
||||
uint32_t ffiIndex_;
|
||||
Scalar::Type viewType_;
|
||||
AsmJSMathBuiltinFunction mathBuiltinFunc_;
|
||||
AsmJSAtomicsBuiltinFunction atomicsBuiltinFunc_;
|
||||
AsmJSSimdType simdCtorType_;
|
||||
struct {
|
||||
AsmJSSimdType type_;
|
||||
@ -289,21 +304,29 @@ class AsmJSModule
|
||||
// var i32 = new I32(buffer);
|
||||
// the second import has nothing to validate and thus has a null field.
|
||||
PropertyName *maybeViewName() const {
|
||||
MOZ_ASSERT(pod.which_ == ArrayView || pod.which_ == ArrayViewCtor);
|
||||
MOZ_ASSERT(pod.which_ == ArrayView || pod.which_ == SharedArrayView || pod.which_ == ArrayViewCtor);
|
||||
return name_;
|
||||
}
|
||||
Scalar::Type viewType() const {
|
||||
MOZ_ASSERT(pod.which_ == ArrayView || pod.which_ == ArrayViewCtor);
|
||||
MOZ_ASSERT(pod.which_ == ArrayView || pod.which_ == SharedArrayView || pod.which_ == ArrayViewCtor);
|
||||
return pod.u.viewType_;
|
||||
}
|
||||
PropertyName *mathName() const {
|
||||
MOZ_ASSERT(pod.which_ == MathBuiltinFunction);
|
||||
return name_;
|
||||
}
|
||||
PropertyName *atomicsName() const {
|
||||
MOZ_ASSERT(pod.which_ == AtomicsBuiltinFunction);
|
||||
return name_;
|
||||
}
|
||||
AsmJSMathBuiltinFunction mathBuiltinFunction() const {
|
||||
MOZ_ASSERT(pod.which_ == MathBuiltinFunction);
|
||||
return pod.u.mathBuiltinFunc_;
|
||||
}
|
||||
AsmJSAtomicsBuiltinFunction atomicsBuiltinFunction() const {
|
||||
MOZ_ASSERT(pod.which_ == AtomicsBuiltinFunction);
|
||||
return pod.u.atomicsBuiltinFunc_;
|
||||
}
|
||||
AsmJSSimdType simdCtorType() const {
|
||||
MOZ_ASSERT(pod.which_ == SimdCtor);
|
||||
return pod.u.simdCtorType_;
|
||||
@ -783,6 +806,7 @@ class AsmJSModule
|
||||
uint32_t srcLengthWithRightBrace_;
|
||||
bool strict_;
|
||||
bool hasArrayView_;
|
||||
bool isSharedView_;
|
||||
bool hasFixedMinHeapLength_;
|
||||
bool usesSignalHandlers_;
|
||||
} pod;
|
||||
@ -982,16 +1006,20 @@ class AsmJSModule
|
||||
g.pod.u.ffiIndex_ = *ffiIndex = pod.numFFIs_++;
|
||||
return globals_.append(g);
|
||||
}
|
||||
bool addArrayView(Scalar::Type vt, PropertyName *maybeField) {
|
||||
bool addArrayView(Scalar::Type vt, PropertyName *maybeField, bool isSharedView) {
|
||||
MOZ_ASSERT(!isFinishedWithModulePrologue());
|
||||
MOZ_ASSERT(!pod.hasArrayView_ || (pod.isSharedView_ == isSharedView));
|
||||
pod.hasArrayView_ = true;
|
||||
pod.isSharedView_ = isSharedView;
|
||||
Global g(Global::ArrayView, maybeField);
|
||||
g.pod.u.viewType_ = vt;
|
||||
return globals_.append(g);
|
||||
}
|
||||
bool addArrayViewCtor(Scalar::Type vt, PropertyName *field) {
|
||||
bool addArrayViewCtor(Scalar::Type vt, PropertyName *field, bool isSharedView) {
|
||||
MOZ_ASSERT(!isFinishedWithModulePrologue());
|
||||
MOZ_ASSERT(field);
|
||||
MOZ_ASSERT(!pod.isSharedView_ || isSharedView);
|
||||
pod.isSharedView_ = isSharedView;
|
||||
Global g(Global::ArrayViewCtor, field);
|
||||
g.pod.u.viewType_ = vt;
|
||||
return globals_.append(g);
|
||||
@ -1014,6 +1042,12 @@ class AsmJSModule
|
||||
g.pod.u.constant.kind_ = Global::MathConstant;
|
||||
return globals_.append(g);
|
||||
}
|
||||
bool addAtomicsBuiltinFunction(AsmJSAtomicsBuiltinFunction func, PropertyName *field) {
|
||||
MOZ_ASSERT(!isFinishedWithModulePrologue());
|
||||
Global g(Global::AtomicsBuiltinFunction, field);
|
||||
g.pod.u.atomicsBuiltinFunc_ = func;
|
||||
return globals_.append(g);
|
||||
}
|
||||
bool addSimdCtor(AsmJSSimdType type, PropertyName *field) {
|
||||
Global g(Global::SimdCtor, field);
|
||||
g.pod.u.simdCtorType_ = type;
|
||||
@ -1038,6 +1072,11 @@ class AsmJSModule
|
||||
Global &global(unsigned i) {
|
||||
return globals_[i];
|
||||
}
|
||||
bool isValidViewSharedness(bool shared) const {
|
||||
if (pod.hasArrayView_)
|
||||
return pod.isSharedView_ == shared;
|
||||
return !pod.isSharedView_ || shared;
|
||||
}
|
||||
|
||||
/*************************************************************************/
|
||||
|
||||
@ -1054,6 +1093,10 @@ class AsmJSModule
|
||||
MOZ_ASSERT(isFinishedWithModulePrologue());
|
||||
return pod.hasArrayView_;
|
||||
}
|
||||
bool isSharedView() const {
|
||||
MOZ_ASSERT(pod.hasArrayView_);
|
||||
return pod.isSharedView_;
|
||||
}
|
||||
void addChangeHeap(uint32_t mask, uint32_t min, uint32_t max) {
|
||||
MOZ_ASSERT(isFinishedWithModulePrologue());
|
||||
MOZ_ASSERT(!pod.hasFixedMinHeapLength_);
|
||||
|
@ -457,6 +457,7 @@ HandleFault(PEXCEPTION_POINTERS exception)
|
||||
if (heapAccess->isLoad())
|
||||
SetRegisterToCoercedUndefined(context, heapAccess->isFloat32Load(), heapAccess->loadedReg());
|
||||
*ppc += heapAccess->opLength();
|
||||
|
||||
return true;
|
||||
# else
|
||||
return false;
|
||||
@ -848,6 +849,7 @@ HandleFault(int signum, siginfo_t *info, void *ctx)
|
||||
if (heapAccess->isLoad())
|
||||
SetRegisterToCoercedUndefined(context, heapAccess->isFloat32Load(), heapAccess->loadedReg());
|
||||
*ppc += heapAccess->opLength();
|
||||
|
||||
return true;
|
||||
# else
|
||||
return false;
|
||||
|
@ -1064,6 +1064,7 @@ class MOZ_STACK_CLASS ModuleCompiler
|
||||
ArrayView,
|
||||
ArrayViewCtor,
|
||||
MathBuiltinFunction,
|
||||
AtomicsBuiltinFunction,
|
||||
SimdCtor,
|
||||
SimdOperation,
|
||||
ByteLength,
|
||||
@ -1083,6 +1084,7 @@ class MOZ_STACK_CLASS ModuleCompiler
|
||||
uint32_t ffiIndex_;
|
||||
Scalar::Type viewType_;
|
||||
AsmJSMathBuiltinFunction mathBuiltinFunc_;
|
||||
AsmJSAtomicsBuiltinFunction atomicsBuiltinFunc_;
|
||||
AsmJSSimdType simdCtorType_;
|
||||
struct {
|
||||
AsmJSSimdType type_;
|
||||
@ -1130,8 +1132,11 @@ class MOZ_STACK_CLASS ModuleCompiler
|
||||
MOZ_ASSERT(which_ == FFI);
|
||||
return u.ffiIndex_;
|
||||
}
|
||||
bool isAnyArrayView() const {
|
||||
return which_ == ArrayView || which_ == ArrayViewCtor;
|
||||
}
|
||||
Scalar::Type viewType() const {
|
||||
MOZ_ASSERT(which_ == ArrayView || which_ == ArrayViewCtor);
|
||||
MOZ_ASSERT(isAnyArrayView());
|
||||
return u.viewType_;
|
||||
}
|
||||
bool isMathFunction() const {
|
||||
@ -1141,6 +1146,13 @@ class MOZ_STACK_CLASS ModuleCompiler
|
||||
MOZ_ASSERT(which_ == MathBuiltinFunction);
|
||||
return u.mathBuiltinFunc_;
|
||||
}
|
||||
bool isAtomicsFunction() const {
|
||||
return which_ == AtomicsBuiltinFunction;
|
||||
}
|
||||
AsmJSAtomicsBuiltinFunction atomicsBuiltinFunction() const {
|
||||
MOZ_ASSERT(which_ == AtomicsBuiltinFunction);
|
||||
return u.atomicsBuiltinFunc_;
|
||||
}
|
||||
bool isSimdCtor() const {
|
||||
return which_ == SimdCtor;
|
||||
}
|
||||
@ -1278,6 +1290,7 @@ class MOZ_STACK_CLASS ModuleCompiler
|
||||
};
|
||||
|
||||
typedef HashMap<PropertyName*, MathBuiltin> MathNameMap;
|
||||
typedef HashMap<PropertyName*, AsmJSAtomicsBuiltinFunction> AtomicsNameMap;
|
||||
typedef HashMap<PropertyName*, AsmJSSimdOperation> SimdOperationNameMap;
|
||||
typedef HashMap<PropertyName*, Global*> GlobalMap;
|
||||
typedef Vector<Func*> FuncVector;
|
||||
@ -1301,6 +1314,7 @@ class MOZ_STACK_CLASS ModuleCompiler
|
||||
ArrayViewVector arrayViews_;
|
||||
ExitMap exits_;
|
||||
MathNameMap standardLibraryMathNames_;
|
||||
AtomicsNameMap standardLibraryAtomicsNames_;
|
||||
SimdOperationNameMap standardLibrarySimdOpNames_;
|
||||
NonAssertingLabel stackOverflowLabel_;
|
||||
NonAssertingLabel asyncInterruptLabel_;
|
||||
@ -1333,6 +1347,12 @@ class MOZ_STACK_CLASS ModuleCompiler
|
||||
MathBuiltin builtin(cst);
|
||||
return standardLibraryMathNames_.putNew(atom->asPropertyName(), builtin);
|
||||
}
|
||||
bool addStandardLibraryAtomicsName(const char *name, AsmJSAtomicsBuiltinFunction func) {
|
||||
JSAtom *atom = Atomize(cx_, name, strlen(name));
|
||||
if (!atom)
|
||||
return false;
|
||||
return standardLibraryAtomicsNames_.putNew(atom->asPropertyName(), func);
|
||||
}
|
||||
bool addStandardLibrarySimdOpName(const char *name, AsmJSSimdOperation op) {
|
||||
JSAtom *atom = Atomize(cx_, name, strlen(name));
|
||||
if (!atom)
|
||||
@ -1354,6 +1374,7 @@ class MOZ_STACK_CLASS ModuleCompiler
|
||||
arrayViews_(cx),
|
||||
exits_(cx),
|
||||
standardLibraryMathNames_(cx),
|
||||
standardLibraryAtomicsNames_(cx),
|
||||
standardLibrarySimdOpNames_(cx),
|
||||
errorString_(nullptr),
|
||||
errorOffset_(UINT32_MAX),
|
||||
@ -1415,6 +1436,20 @@ class MOZ_STACK_CLASS ModuleCompiler
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!standardLibraryAtomicsNames_.init() ||
|
||||
!addStandardLibraryAtomicsName("compareExchange", AsmJSAtomicsBuiltin_compareExchange) ||
|
||||
!addStandardLibraryAtomicsName("load", AsmJSAtomicsBuiltin_load) ||
|
||||
!addStandardLibraryAtomicsName("store", AsmJSAtomicsBuiltin_store) ||
|
||||
!addStandardLibraryAtomicsName("fence", AsmJSAtomicsBuiltin_fence) ||
|
||||
!addStandardLibraryAtomicsName("add", AsmJSAtomicsBuiltin_add) ||
|
||||
!addStandardLibraryAtomicsName("sub", AsmJSAtomicsBuiltin_sub) ||
|
||||
!addStandardLibraryAtomicsName("and", AsmJSAtomicsBuiltin_and) ||
|
||||
!addStandardLibraryAtomicsName("or", AsmJSAtomicsBuiltin_or) ||
|
||||
!addStandardLibraryAtomicsName("xor", AsmJSAtomicsBuiltin_xor))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
#define ADDSTDLIBSIMDOPNAME(op) || !addStandardLibrarySimdOpName(#op, AsmJSSimdOperation_##op)
|
||||
if (!standardLibrarySimdOpNames_.init()
|
||||
FORALL_SIMD_OP(ADDSTDLIBSIMDOPNAME))
|
||||
@ -1544,6 +1579,13 @@ class MOZ_STACK_CLASS ModuleCompiler
|
||||
}
|
||||
return false;
|
||||
}
|
||||
bool lookupStandardLibraryAtomicsName(PropertyName *name, AsmJSAtomicsBuiltinFunction *atomicsBuiltin) const {
|
||||
if (AtomicsNameMap::Ptr p = standardLibraryAtomicsNames_.lookup(name)) {
|
||||
*atomicsBuiltin = p->value();
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
bool lookupStandardSimdOpName(PropertyName *name, AsmJSSimdOperation *op) const {
|
||||
if (SimdOperationNameMap::Ptr p = standardLibrarySimdOpNames_.lookup(name)) {
|
||||
*op = p->value();
|
||||
@ -1660,22 +1702,22 @@ class MOZ_STACK_CLASS ModuleCompiler
|
||||
global->u.ffiIndex_ = index;
|
||||
return globals_.putNew(varName, global);
|
||||
}
|
||||
bool addArrayView(PropertyName *varName, Scalar::Type vt, PropertyName *maybeField) {
|
||||
bool addArrayView(PropertyName *varName, Scalar::Type vt, PropertyName *maybeField, bool isSharedView) {
|
||||
if (!arrayViews_.append(ArrayView(varName, vt)))
|
||||
return false;
|
||||
Global *global = moduleLifo_.new_<Global>(Global::ArrayView);
|
||||
if (!global)
|
||||
return false;
|
||||
if (!module_->addArrayView(vt, maybeField))
|
||||
if (!module_->addArrayView(vt, maybeField, isSharedView))
|
||||
return false;
|
||||
global->u.viewType_ = vt;
|
||||
return globals_.putNew(varName, global);
|
||||
}
|
||||
bool addArrayViewCtor(PropertyName *varName, Scalar::Type vt, PropertyName *fieldName) {
|
||||
bool addArrayViewCtor(PropertyName *varName, Scalar::Type vt, PropertyName *fieldName, bool isSharedView) {
|
||||
Global *global = moduleLifo_.new_<Global>(Global::ArrayViewCtor);
|
||||
if (!global)
|
||||
return false;
|
||||
if (!module_->addArrayViewCtor(vt, fieldName))
|
||||
if (!module_->addArrayViewCtor(vt, fieldName, isSharedView))
|
||||
return false;
|
||||
global->u.viewType_ = vt;
|
||||
return globals_.putNew(varName, global);
|
||||
@ -1689,6 +1731,15 @@ class MOZ_STACK_CLASS ModuleCompiler
|
||||
global->u.mathBuiltinFunc_ = func;
|
||||
return globals_.putNew(varName, global);
|
||||
}
|
||||
bool addAtomicsBuiltinFunction(PropertyName *varName, AsmJSAtomicsBuiltinFunction func, PropertyName *fieldName) {
|
||||
if (!module_->addAtomicsBuiltinFunction(func, fieldName))
|
||||
return false;
|
||||
Global *global = moduleLifo_.new_<Global>(Global::AtomicsBuiltinFunction);
|
||||
if (!global)
|
||||
return false;
|
||||
global->u.atomicsBuiltinFunc_ = func;
|
||||
return globals_.putNew(varName, global);
|
||||
}
|
||||
bool addSimdCtor(PropertyName *varName, AsmJSSimdType type, PropertyName *fieldName) {
|
||||
if (!module_->addSimdCtor(type, fieldName))
|
||||
return false;
|
||||
@ -2751,6 +2802,63 @@ class FunctionCompiler
|
||||
curBlock_->add(store);
|
||||
}
|
||||
|
||||
void memoryBarrier(MemoryBarrierBits type)
|
||||
{
|
||||
if (inDeadCode())
|
||||
return;
|
||||
MMemoryBarrier *ins = MMemoryBarrier::New(alloc(), type);
|
||||
curBlock_->add(ins);
|
||||
}
|
||||
|
||||
MDefinition *atomicLoadHeap(Scalar::Type vt, MDefinition *ptr, NeedsBoundsCheck chk)
|
||||
{
|
||||
if (inDeadCode())
|
||||
return nullptr;
|
||||
|
||||
bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK && !m().usesSignalHandlersForOOB();
|
||||
MAsmJSLoadHeap *load = MAsmJSLoadHeap::New(alloc(), vt, ptr, needsBoundsCheck,
|
||||
MembarBeforeLoad, MembarAfterLoad);
|
||||
curBlock_->add(load);
|
||||
return load;
|
||||
}
|
||||
|
||||
void atomicStoreHeap(Scalar::Type vt, MDefinition *ptr, MDefinition *v, NeedsBoundsCheck chk)
|
||||
{
|
||||
if (inDeadCode())
|
||||
return;
|
||||
|
||||
bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK && !m().usesSignalHandlersForOOB();
|
||||
MAsmJSStoreHeap *store = MAsmJSStoreHeap::New(alloc(), vt, ptr, v, needsBoundsCheck,
|
||||
MembarBeforeStore, MembarAfterStore);
|
||||
curBlock_->add(store);
|
||||
}
|
||||
|
||||
MDefinition *atomicCompareExchangeHeap(Scalar::Type vt, MDefinition *ptr, MDefinition *oldv, MDefinition *newv, NeedsBoundsCheck chk)
|
||||
{
|
||||
if (inDeadCode())
|
||||
return nullptr;
|
||||
|
||||
// The code generator requires explicit bounds checking for compareExchange.
|
||||
bool needsBoundsCheck = true;
|
||||
MAsmJSCompareExchangeHeap *cas =
|
||||
MAsmJSCompareExchangeHeap::New(alloc(), vt, ptr, oldv, newv, needsBoundsCheck);
|
||||
curBlock_->add(cas);
|
||||
return cas;
|
||||
}
|
||||
|
||||
MDefinition *atomicBinopHeap(js::jit::AtomicOp op, Scalar::Type vt, MDefinition *ptr, MDefinition *v, NeedsBoundsCheck chk)
|
||||
{
|
||||
if (inDeadCode())
|
||||
return nullptr;
|
||||
|
||||
// The code generator requires explicit bounds checking for the binops.
|
||||
bool needsBoundsCheck = true;
|
||||
MAsmJSAtomicBinopHeap *binop =
|
||||
MAsmJSAtomicBinopHeap::New(alloc(), op, vt, ptr, v, needsBoundsCheck);
|
||||
curBlock_->add(binop);
|
||||
return binop;
|
||||
}
|
||||
|
||||
MDefinition *loadGlobalVar(const ModuleCompiler::Global &global)
|
||||
{
|
||||
if (inDeadCode())
|
||||
@ -3648,27 +3756,53 @@ CheckGlobalVariableInitImport(ModuleCompiler &m, PropertyName *varName, ParseNod
|
||||
}
|
||||
|
||||
static bool
|
||||
IsArrayViewCtorName(ModuleCompiler &m, PropertyName *name, Scalar::Type *type)
|
||||
IsArrayViewCtorName(ModuleCompiler &m, PropertyName *name, Scalar::Type *type, bool *shared)
|
||||
{
|
||||
JSAtomState &names = m.cx()->names();
|
||||
if (name == names.Int8Array || name == names.SharedInt8Array)
|
||||
*shared = false;
|
||||
if (name == names.Int8Array) {
|
||||
*type = Scalar::Int8;
|
||||
else if (name == names.Uint8Array || name == names.SharedUint8Array)
|
||||
} else if (name == names.Uint8Array) {
|
||||
*type = Scalar::Uint8;
|
||||
else if (name == names.Int16Array || name == names.SharedInt16Array)
|
||||
} else if (name == names.Int16Array) {
|
||||
*type = Scalar::Int16;
|
||||
else if (name == names.Uint16Array || name == names.SharedUint16Array)
|
||||
} else if (name == names.Uint16Array) {
|
||||
*type = Scalar::Uint16;
|
||||
else if (name == names.Int32Array || name == names.SharedInt32Array)
|
||||
} else if (name == names.Int32Array) {
|
||||
*type = Scalar::Int32;
|
||||
else if (name == names.Uint32Array || name == names.SharedUint32Array)
|
||||
} else if (name == names.Uint32Array) {
|
||||
*type = Scalar::Uint32;
|
||||
else if (name == names.Float32Array || name == names.SharedFloat32Array)
|
||||
} else if (name == names.Float32Array) {
|
||||
*type = Scalar::Float32;
|
||||
else if (name == names.Float64Array || name == names.SharedFloat64Array)
|
||||
} else if (name == names.Float64Array) {
|
||||
*type = Scalar::Float64;
|
||||
else
|
||||
} else if (name == names.SharedInt8Array) {
|
||||
*shared = true;
|
||||
*type = Scalar::Int8;
|
||||
} else if (name == names.SharedUint8Array) {
|
||||
*shared = true;
|
||||
*type = Scalar::Uint8;
|
||||
} else if (name == names.SharedInt16Array) {
|
||||
*shared = true;
|
||||
*type = Scalar::Int16;
|
||||
} else if (name == names.SharedUint16Array) {
|
||||
*shared = true;
|
||||
*type = Scalar::Uint16;
|
||||
} else if (name == names.SharedInt32Array) {
|
||||
*shared = true;
|
||||
*type = Scalar::Int32;
|
||||
} else if (name == names.SharedUint32Array) {
|
||||
*shared = true;
|
||||
*type = Scalar::Uint32;
|
||||
} else if (name == names.SharedFloat32Array) {
|
||||
*shared = true;
|
||||
*type = Scalar::Float32;
|
||||
} else if (name == names.SharedFloat64Array) {
|
||||
*shared = true;
|
||||
*type = Scalar::Float64;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -3700,6 +3834,7 @@ CheckNewArrayView(ModuleCompiler &m, PropertyName *varName, ParseNode *newExpr)
|
||||
|
||||
PropertyName *field;
|
||||
Scalar::Type type;
|
||||
bool shared = false;
|
||||
if (ctorExpr->isKind(PNK_DOT)) {
|
||||
ParseNode *base = DotBase(ctorExpr);
|
||||
|
||||
@ -3707,7 +3842,7 @@ CheckNewArrayView(ModuleCompiler &m, PropertyName *varName, ParseNode *newExpr)
|
||||
return m.failName(base, "expecting '%s.*Array", globalName);
|
||||
|
||||
field = DotMember(ctorExpr);
|
||||
if (!IsArrayViewCtorName(m, field, &type))
|
||||
if (!IsArrayViewCtorName(m, field, &type, &shared))
|
||||
return m.fail(ctorExpr, "could not match typed array name");
|
||||
} else {
|
||||
if (!ctorExpr->isKind(PNK_NAME))
|
||||
@ -3728,7 +3863,10 @@ CheckNewArrayView(ModuleCompiler &m, PropertyName *varName, ParseNode *newExpr)
|
||||
if (!CheckNewArrayViewArgs(m, ctorExpr, bufferName))
|
||||
return false;
|
||||
|
||||
return m.addArrayView(varName, type, field);
|
||||
if (!m.module().isValidViewSharedness(shared))
|
||||
return m.failName(ctorExpr, "%s has different sharedness than previous view constructors", globalName);
|
||||
|
||||
return m.addArrayView(varName, type, field, shared);
|
||||
}
|
||||
|
||||
static bool
|
||||
@ -3781,6 +3919,18 @@ CheckGlobalMathImport(ModuleCompiler &m, ParseNode *initNode, PropertyName *varN
|
||||
MOZ_CRASH("unexpected or uninitialized math builtin type");
|
||||
}
|
||||
|
||||
static bool
|
||||
CheckGlobalAtomicsImport(ModuleCompiler &m, ParseNode *initNode, PropertyName *varName,
|
||||
PropertyName *field)
|
||||
{
|
||||
// Atomics builtin, with the form glob.Atomics.[[builtin]]
|
||||
AsmJSAtomicsBuiltinFunction func;
|
||||
if (!m.lookupStandardLibraryAtomicsName(field, &func))
|
||||
return m.failName(initNode, "'%s' is not a standard Atomics builtin", field);
|
||||
|
||||
return m.addAtomicsBuiltinFunction(varName, func, field);
|
||||
}
|
||||
|
||||
static bool
|
||||
CheckGlobalSimdImport(ModuleCompiler &m, ParseNode *initNode, PropertyName *varName,
|
||||
PropertyName *field)
|
||||
@ -3817,7 +3967,7 @@ CheckGlobalDotImport(ModuleCompiler &m, PropertyName *varName, ParseNode *initNo
|
||||
|
||||
if (base->isKind(PNK_DOT)) {
|
||||
ParseNode *global = DotBase(base);
|
||||
PropertyName *mathOrSimd = DotMember(base);
|
||||
PropertyName *mathOrAtomicsOrSimd = DotMember(base);
|
||||
|
||||
PropertyName *globalName = m.module().globalArgumentName();
|
||||
if (!globalName)
|
||||
@ -3831,9 +3981,11 @@ CheckGlobalDotImport(ModuleCompiler &m, PropertyName *varName, ParseNode *initNo
|
||||
return m.failName(base, "expecting %s.*", globalName);
|
||||
}
|
||||
|
||||
if (mathOrSimd == m.cx()->names().Math)
|
||||
if (mathOrAtomicsOrSimd == m.cx()->names().Math)
|
||||
return CheckGlobalMathImport(m, initNode, varName, field);
|
||||
if (mathOrSimd == m.cx()->names().SIMD)
|
||||
if (mathOrAtomicsOrSimd == m.cx()->names().Atomics)
|
||||
return CheckGlobalAtomicsImport(m, initNode, varName, field);
|
||||
if (mathOrAtomicsOrSimd == m.cx()->names().SIMD)
|
||||
return CheckGlobalSimdImport(m, initNode, varName, field);
|
||||
return m.failName(base, "expecting %s.{Math|SIMD}", globalName);
|
||||
}
|
||||
@ -3850,8 +4002,12 @@ CheckGlobalDotImport(ModuleCompiler &m, PropertyName *varName, ParseNode *initNo
|
||||
return m.addByteLength(varName);
|
||||
|
||||
Scalar::Type type;
|
||||
if (IsArrayViewCtorName(m, field, &type))
|
||||
return m.addArrayViewCtor(varName, type, field);
|
||||
bool shared = false;
|
||||
if (IsArrayViewCtorName(m, field, &type, &shared)) {
|
||||
if (!m.module().isValidViewSharedness(shared))
|
||||
return m.failName(initNode, "'%s' has different sharedness than previous view constructors", field);
|
||||
return m.addArrayViewCtor(varName, type, field, shared);
|
||||
}
|
||||
|
||||
return m.failName(initNode, "'%s' is not a standard constant or typed array name", field);
|
||||
}
|
||||
@ -4159,6 +4315,7 @@ CheckVarRef(FunctionCompiler &f, ParseNode *varRef, MDefinition **def, Type *typ
|
||||
case ModuleCompiler::Global::Function:
|
||||
case ModuleCompiler::Global::FFI:
|
||||
case ModuleCompiler::Global::MathBuiltinFunction:
|
||||
case ModuleCompiler::Global::AtomicsBuiltinFunction:
|
||||
case ModuleCompiler::Global::FuncPtrTable:
|
||||
case ModuleCompiler::Global::ArrayView:
|
||||
case ModuleCompiler::Global::ArrayViewCtor:
|
||||
@ -4208,18 +4365,16 @@ FoldMaskedArrayIndex(FunctionCompiler &f, ParseNode **indexExpr, int32_t *mask,
|
||||
}
|
||||
|
||||
static bool
|
||||
CheckArrayAccess(FunctionCompiler &f, ParseNode *elem, Scalar::Type *viewType,
|
||||
MDefinition **def, NeedsBoundsCheck *needsBoundsCheck)
|
||||
CheckArrayAccess(FunctionCompiler &f, ParseNode *viewName, ParseNode *indexExpr,
|
||||
Scalar::Type *viewType, MDefinition **def, NeedsBoundsCheck *needsBoundsCheck)
|
||||
{
|
||||
ParseNode *viewName = ElemBase(elem);
|
||||
ParseNode *indexExpr = ElemIndex(elem);
|
||||
*needsBoundsCheck = NEEDS_BOUNDS_CHECK;
|
||||
|
||||
if (!viewName->isKind(PNK_NAME))
|
||||
return f.fail(viewName, "base of array access must be a typed array view name");
|
||||
|
||||
const ModuleCompiler::Global *global = f.lookupGlobal(viewName->name());
|
||||
if (!global || global->which() != ModuleCompiler::Global::ArrayView)
|
||||
if (!global || !global->isAnyArrayView())
|
||||
return f.fail(viewName, "base of array access must be a typed array view name");
|
||||
|
||||
*viewType = global->viewType();
|
||||
@ -4316,7 +4471,7 @@ CheckLoadArray(FunctionCompiler &f, ParseNode *elem, MDefinition **def, Type *ty
|
||||
Scalar::Type viewType;
|
||||
MDefinition *pointerDef;
|
||||
NeedsBoundsCheck needsBoundsCheck;
|
||||
if (!CheckArrayAccess(f, elem, &viewType, &pointerDef, &needsBoundsCheck))
|
||||
if (!CheckArrayAccess(f, ElemBase(elem), ElemIndex(elem), &viewType, &pointerDef, &needsBoundsCheck))
|
||||
return false;
|
||||
|
||||
*def = f.loadHeap(viewType, pointerDef, needsBoundsCheck);
|
||||
@ -4371,7 +4526,7 @@ CheckStoreArray(FunctionCompiler &f, ParseNode *lhs, ParseNode *rhs, MDefinition
|
||||
Scalar::Type viewType;
|
||||
MDefinition *pointerDef;
|
||||
NeedsBoundsCheck needsBoundsCheck;
|
||||
if (!CheckArrayAccess(f, lhs, &viewType, &pointerDef, &needsBoundsCheck))
|
||||
if (!CheckArrayAccess(f, ElemBase(lhs), ElemIndex(lhs), &viewType, &pointerDef, &needsBoundsCheck))
|
||||
return false;
|
||||
|
||||
f.enterHeapExpression();
|
||||
@ -4621,6 +4776,193 @@ CheckMathMinMax(FunctionCompiler &f, ParseNode *callNode, MDefinition **def, boo
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
CheckSharedArrayAtomicAccess(FunctionCompiler &f, ParseNode *viewName, ParseNode *indexExpr,
|
||||
Scalar::Type *viewType, MDefinition** pointerDef,
|
||||
NeedsBoundsCheck *needsBoundsCheck)
|
||||
{
|
||||
if (!CheckArrayAccess(f, viewName, indexExpr, viewType, pointerDef, needsBoundsCheck))
|
||||
return false;
|
||||
|
||||
// Atomic accesses may be made on shared integer arrays only.
|
||||
|
||||
// The global will be sane, CheckArrayAccess checks it.
|
||||
const ModuleCompiler::Global *global = f.lookupGlobal(viewName->name());
|
||||
if (global->which() != ModuleCompiler::Global::ArrayView || !f.m().module().isSharedView())
|
||||
return f.fail(viewName, "base of array access must be a shared typed array view name");
|
||||
|
||||
switch (*viewType) {
|
||||
case Scalar::Int8:
|
||||
case Scalar::Int16:
|
||||
case Scalar::Int32:
|
||||
case Scalar::Uint8:
|
||||
case Scalar::Uint16:
|
||||
case Scalar::Uint32:
|
||||
return true;
|
||||
default:
|
||||
return f.failf(viewName, "not an integer array");
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
CheckAtomicsFence(FunctionCompiler &f, ParseNode *call, MDefinition **def, Type *type)
|
||||
{
|
||||
if (CallArgListLength(call) != 0)
|
||||
return f.fail(call, "Atomics.fence must be passed 0 arguments");
|
||||
|
||||
f.memoryBarrier(MembarFull);
|
||||
*type = Type::Void;
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
CheckAtomicsLoad(FunctionCompiler &f, ParseNode *call, MDefinition **def, Type *type)
|
||||
{
|
||||
if (CallArgListLength(call) != 2)
|
||||
return f.fail(call, "Atomics.load must be passed 2 arguments");
|
||||
|
||||
ParseNode *arrayArg = CallArgList(call);
|
||||
ParseNode *indexArg = NextNode(arrayArg);
|
||||
|
||||
Scalar::Type viewType;
|
||||
MDefinition *pointerDef;
|
||||
NeedsBoundsCheck needsBoundsCheck;
|
||||
if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType, &pointerDef, &needsBoundsCheck))
|
||||
return false;
|
||||
|
||||
*def = f.atomicLoadHeap(viewType, pointerDef, needsBoundsCheck);
|
||||
*type = Type::Signed;
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
CheckAtomicsStore(FunctionCompiler &f, ParseNode *call, MDefinition **def, Type *type)
|
||||
{
|
||||
if (CallArgListLength(call) != 3)
|
||||
return f.fail(call, "Atomics.store must be passed 3 arguments");
|
||||
|
||||
ParseNode *arrayArg = CallArgList(call);
|
||||
ParseNode *indexArg = NextNode(arrayArg);
|
||||
ParseNode *valueArg = NextNode(indexArg);
|
||||
|
||||
Scalar::Type viewType;
|
||||
MDefinition *pointerDef;
|
||||
NeedsBoundsCheck needsBoundsCheck;
|
||||
if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType, &pointerDef, &needsBoundsCheck))
|
||||
return false;
|
||||
|
||||
MDefinition *rhsDef;
|
||||
Type rhsType;
|
||||
if (!CheckExpr(f, valueArg, &rhsDef, &rhsType))
|
||||
return false;
|
||||
|
||||
if (!rhsType.isIntish())
|
||||
return f.failf(arrayArg, "%s is not a subtype of intish", rhsType.toChars());
|
||||
|
||||
f.atomicStoreHeap(viewType, pointerDef, rhsDef, needsBoundsCheck);
|
||||
|
||||
*def = rhsDef;
|
||||
*type = Type::Signed;
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
CheckAtomicsBinop(FunctionCompiler &f, ParseNode *call, MDefinition **def, Type *type, js::jit::AtomicOp op)
|
||||
{
|
||||
if (CallArgListLength(call) != 3)
|
||||
return f.fail(call, "Atomics binary operator must be passed 3 arguments");
|
||||
|
||||
ParseNode *arrayArg = CallArgList(call);
|
||||
ParseNode *indexArg = NextNode(arrayArg);
|
||||
ParseNode *valueArg = NextNode(indexArg);
|
||||
|
||||
Scalar::Type viewType;
|
||||
MDefinition *pointerDef;
|
||||
NeedsBoundsCheck needsBoundsCheck;
|
||||
if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType, &pointerDef, &needsBoundsCheck))
|
||||
return false;
|
||||
|
||||
MDefinition *valueArgDef;
|
||||
Type valueArgType;
|
||||
if (!CheckExpr(f, valueArg, &valueArgDef, &valueArgType))
|
||||
return false;
|
||||
|
||||
if (!valueArgType.isIntish())
|
||||
return f.failf(valueArg, "%s is not a subtype of intish", valueArgType.toChars());
|
||||
|
||||
*def = f.atomicBinopHeap(op, viewType, pointerDef, valueArgDef, needsBoundsCheck);
|
||||
*type = Type::Signed;
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
CheckAtomicsCompareExchange(FunctionCompiler &f, ParseNode *call, MDefinition **def, Type *type)
|
||||
{
|
||||
if (CallArgListLength(call) != 4)
|
||||
return f.fail(call, "Atomics.compareExchange must be passed 4 arguments");
|
||||
|
||||
ParseNode *arrayArg = CallArgList(call);
|
||||
ParseNode *indexArg = NextNode(arrayArg);
|
||||
ParseNode *oldValueArg = NextNode(indexArg);
|
||||
ParseNode *newValueArg = NextNode(oldValueArg);
|
||||
|
||||
Scalar::Type viewType;
|
||||
MDefinition *pointerDef;
|
||||
NeedsBoundsCheck needsBoundsCheck;
|
||||
if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType, &pointerDef, &needsBoundsCheck))
|
||||
return false;
|
||||
|
||||
MDefinition *oldValueArgDef;
|
||||
Type oldValueArgType;
|
||||
if (!CheckExpr(f, oldValueArg, &oldValueArgDef, &oldValueArgType))
|
||||
return false;
|
||||
|
||||
MDefinition *newValueArgDef;
|
||||
Type newValueArgType;
|
||||
if (!CheckExpr(f, newValueArg, &newValueArgDef, &newValueArgType))
|
||||
return false;
|
||||
|
||||
if (!oldValueArgType.isIntish())
|
||||
return f.failf(oldValueArg, "%s is not a subtype of intish", oldValueArgType.toChars());
|
||||
|
||||
if (!newValueArgType.isIntish())
|
||||
return f.failf(newValueArg, "%s is not a subtype of intish", newValueArgType.toChars());
|
||||
|
||||
*def = f.atomicCompareExchangeHeap(viewType, pointerDef, oldValueArgDef, newValueArgDef, needsBoundsCheck);
|
||||
*type = Type::Signed;
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
CheckAtomicsBuiltinCall(FunctionCompiler &f, ParseNode *callNode, AsmJSAtomicsBuiltinFunction func,
|
||||
MDefinition **resultDef, Type *resultType)
|
||||
{
|
||||
switch (func) {
|
||||
case AsmJSAtomicsBuiltin_compareExchange:
|
||||
return CheckAtomicsCompareExchange(f, callNode, resultDef, resultType);
|
||||
case AsmJSAtomicsBuiltin_load:
|
||||
return CheckAtomicsLoad(f, callNode, resultDef, resultType);
|
||||
case AsmJSAtomicsBuiltin_store:
|
||||
return CheckAtomicsStore(f, callNode, resultDef, resultType);
|
||||
case AsmJSAtomicsBuiltin_fence:
|
||||
return CheckAtomicsFence(f, callNode, resultDef, resultType);
|
||||
case AsmJSAtomicsBuiltin_add:
|
||||
return CheckAtomicsBinop(f, callNode, resultDef, resultType, AtomicFetchAddOp);
|
||||
case AsmJSAtomicsBuiltin_sub:
|
||||
return CheckAtomicsBinop(f, callNode, resultDef, resultType, AtomicFetchSubOp);
|
||||
case AsmJSAtomicsBuiltin_and:
|
||||
return CheckAtomicsBinop(f, callNode, resultDef, resultType, AtomicFetchAndOp);
|
||||
case AsmJSAtomicsBuiltin_or:
|
||||
return CheckAtomicsBinop(f, callNode, resultDef, resultType, AtomicFetchOrOp);
|
||||
case AsmJSAtomicsBuiltin_xor:
|
||||
return CheckAtomicsBinop(f, callNode, resultDef, resultType, AtomicFetchXorOp);
|
||||
default:
|
||||
MOZ_CRASH("unexpected atomicsBuiltin function");
|
||||
}
|
||||
}
|
||||
|
||||
typedef bool (*CheckArgType)(FunctionCompiler &f, ParseNode *argNode, Type type);
|
||||
|
||||
static bool
|
||||
@ -5406,6 +5748,8 @@ CheckUncoercedCall(FunctionCompiler &f, ParseNode *expr, MDefinition **def, Type
|
||||
if (IsCallToGlobal(f.m(), expr, &global)) {
|
||||
if (global->isMathFunction())
|
||||
return CheckMathBuiltinCall(f, expr, global->mathBuiltinFunction(), def, type);
|
||||
if (global->isAtomicsFunction())
|
||||
return CheckAtomicsBuiltinCall(f, expr, global->atomicsBuiltinFunction(), def, type);
|
||||
if (global->isSimdCtor())
|
||||
return CheckSimdCtorCall(f, expr, global, def, type);
|
||||
if (global->isSimdOperation())
|
||||
@ -5509,6 +5853,18 @@ CheckCoercedSimdCall(FunctionCompiler &f, ParseNode *call, const ModuleCompiler:
|
||||
return CoerceResult(f, call, retType, *def, *type, def, type);
|
||||
}
|
||||
|
||||
static bool
|
||||
CheckCoercedAtomicsBuiltinCall(FunctionCompiler &f, ParseNode *callNode,
|
||||
AsmJSAtomicsBuiltinFunction func, RetType retType,
|
||||
MDefinition **resultDef, Type *resultType)
|
||||
{
|
||||
MDefinition *operand;
|
||||
Type actualRetType;
|
||||
if (!CheckAtomicsBuiltinCall(f, callNode, func, &operand, &actualRetType))
|
||||
return false;
|
||||
return CoerceResult(f, callNode, retType, operand, actualRetType, resultDef, resultType);
|
||||
}
|
||||
|
||||
static bool
|
||||
CheckCoercedCall(FunctionCompiler &f, ParseNode *call, RetType retType, MDefinition **def, Type *type)
|
||||
{
|
||||
@ -5541,6 +5897,8 @@ CheckCoercedCall(FunctionCompiler &f, ParseNode *call, RetType retType, MDefinit
|
||||
return CheckFFICall(f, call, global->ffiIndex(), retType, def, type);
|
||||
case ModuleCompiler::Global::MathBuiltinFunction:
|
||||
return CheckCoercedMathBuiltinCall(f, call, global->mathBuiltinFunction(), retType, def, type);
|
||||
case ModuleCompiler::Global::AtomicsBuiltinFunction:
|
||||
return CheckCoercedAtomicsBuiltinCall(f, call, global->atomicsBuiltinFunction(), retType, def, type);
|
||||
case ModuleCompiler::Global::ConstantLiteral:
|
||||
case ModuleCompiler::Global::ConstantImport:
|
||||
case ModuleCompiler::Global::Variable:
|
||||
|
253
js/src/jit-test/tests/asm.js/testAtomics.js
Normal file
253
js/src/jit-test/tests/asm.js/testAtomics.js
Normal file
@ -0,0 +1,253 @@
|
||||
if (!this.SharedArrayBuffer || !this.SharedInt32Array || !this.Atomics)
|
||||
quit();
|
||||
|
||||
function loadModule_int32(stdlib, foreign, heap) {
|
||||
"use asm";
|
||||
|
||||
var atomic_fence = stdlib.Atomics.fence;
|
||||
var atomic_load = stdlib.Atomics.load;
|
||||
var atomic_store = stdlib.Atomics.store;
|
||||
var atomic_cmpxchg = stdlib.Atomics.compareExchange;
|
||||
var atomic_add = stdlib.Atomics.add;
|
||||
var atomic_sub = stdlib.Atomics.sub;
|
||||
var atomic_and = stdlib.Atomics.and;
|
||||
var atomic_or = stdlib.Atomics.or;
|
||||
var atomic_xor = stdlib.Atomics.xor;
|
||||
|
||||
var i32a = new stdlib.SharedInt32Array(heap);
|
||||
|
||||
function do_fence() {
|
||||
atomic_fence();
|
||||
}
|
||||
|
||||
// Load element 0
|
||||
function do_load() {
|
||||
var v = 0;
|
||||
v = atomic_load(i32a, 0);
|
||||
return v|0;
|
||||
}
|
||||
|
||||
// Load element i
|
||||
function do_load_i(i) {
|
||||
i = i|0;
|
||||
var v = 0;
|
||||
v = atomic_load(i32a, i>>2);
|
||||
return v|0;
|
||||
}
|
||||
|
||||
// Store 37 in element 0
|
||||
function do_store() {
|
||||
var v = 0;
|
||||
v = atomic_store(i32a, 0, 37);
|
||||
return v|0;
|
||||
}
|
||||
|
||||
// Store 37 in element i
|
||||
function do_store_i(i) {
|
||||
i = i|0;
|
||||
var v = 0;
|
||||
v = atomic_store(i32a, i>>2, 37);
|
||||
return v|0;
|
||||
}
|
||||
|
||||
// Add 37 to element 10
|
||||
function do_add() {
|
||||
var v = 0;
|
||||
v = atomic_add(i32a, 10, 37);
|
||||
return v|0;
|
||||
}
|
||||
|
||||
// Add 37 to element i
|
||||
function do_add_i(i) {
|
||||
i = i|0;
|
||||
var v = 0;
|
||||
v = atomic_add(i32a, i>>2, 37);
|
||||
return v|0;
|
||||
}
|
||||
|
||||
// Subtract 148 from element 20
|
||||
function do_sub() {
|
||||
var v = 0;
|
||||
v = atomic_sub(i32a, 20, 148);
|
||||
return v|0;
|
||||
}
|
||||
|
||||
// Subtract 148 from element i
|
||||
function do_sub_i(i) {
|
||||
i = i|0;
|
||||
var v = 0;
|
||||
v = atomic_sub(i32a, i>>2, 148);
|
||||
return v|0;
|
||||
}
|
||||
|
||||
// AND 0x33333333 into element 30
|
||||
function do_and() {
|
||||
var v = 0;
|
||||
v = atomic_and(i32a, 30, 0x33333333);
|
||||
return v|0;
|
||||
}
|
||||
|
||||
// AND 0x33333333 into element i
|
||||
function do_and_i(i) {
|
||||
i = i|0;
|
||||
var v = 0;
|
||||
v = atomic_and(i32a, i>>2, 0x33333333);
|
||||
return v|0;
|
||||
}
|
||||
|
||||
// OR 0x33333333 into element 40
|
||||
function do_or() {
|
||||
var v = 0;
|
||||
v = atomic_or(i32a, 40, 0x33333333);
|
||||
return v|0;
|
||||
}
|
||||
|
||||
// OR 0x33333333 into element i
|
||||
function do_or_i(i) {
|
||||
i = i|0;
|
||||
var v = 0;
|
||||
v = atomic_or(i32a, i>>2, 0x33333333);
|
||||
return v|0;
|
||||
}
|
||||
|
||||
// XOR 0x33333333 into element 50
|
||||
function do_xor() {
|
||||
var v = 0;
|
||||
v = atomic_xor(i32a, 50, 0x33333333);
|
||||
return v|0;
|
||||
}
|
||||
|
||||
// XOR 0x33333333 into element i
|
||||
function do_xor_i(i) {
|
||||
i = i|0;
|
||||
var v = 0;
|
||||
v = atomic_xor(i32a, i>>2, 0x33333333);
|
||||
return v|0;
|
||||
}
|
||||
|
||||
// CAS element 100: 0 -> -1
|
||||
function do_cas1() {
|
||||
var v = 0;
|
||||
v = atomic_cmpxchg(i32a, 100, 0, -1);
|
||||
return v|0;
|
||||
}
|
||||
|
||||
// CAS element 100: -1 -> 0x5A5A5A5A
|
||||
function do_cas2() {
|
||||
var v = 0;
|
||||
v = atomic_cmpxchg(i32a, 100, -1, 0x5A5A5A5A);
|
||||
return v|0;
|
||||
}
|
||||
|
||||
// CAS element i: 0 -> -1
|
||||
function do_cas1_i(i) {
|
||||
i = i|0;
|
||||
var v = 0;
|
||||
v = atomic_cmpxchg(i32a, i>>2, 0, -1);
|
||||
return v|0;
|
||||
}
|
||||
|
||||
// CAS element i: -1 -> 0x5A5A5A5A
|
||||
function do_cas2_i(i) {
|
||||
i = i|0;
|
||||
var v = 0;
|
||||
v = atomic_cmpxchg(i32a, i>>2, -1, 0x5A5A5A5A);
|
||||
return v|0;
|
||||
}
|
||||
|
||||
return { fence: do_fence,
|
||||
load: do_load,
|
||||
load_i: do_load_i,
|
||||
store: do_store,
|
||||
store_i: do_store_i,
|
||||
add: do_add,
|
||||
add_i: do_add_i,
|
||||
sub: do_sub,
|
||||
sub_i: do_sub_i,
|
||||
and: do_and,
|
||||
and_i: do_and_i,
|
||||
or: do_or,
|
||||
or_i: do_or_i,
|
||||
xor: do_xor,
|
||||
xor_i: do_xor_i,
|
||||
cas1: do_cas1,
|
||||
cas2: do_cas2,
|
||||
cas1_i: do_cas1_i,
|
||||
cas2_i: do_cas2_i };
|
||||
}
|
||||
|
||||
// TODO: byte arrays
|
||||
// TODO: halfword arrays
|
||||
// TODO: signed vs unsigned; negative results
|
||||
|
||||
var heap = new SharedArrayBuffer(65536);
|
||||
var i32a = new SharedInt32Array(heap);
|
||||
var module = loadModule_int32(this, {}, heap);
|
||||
|
||||
var size = 4;
|
||||
|
||||
module.fence();
|
||||
|
||||
i32a[0] = 12345;
|
||||
assertEq(module.load(), 12345);
|
||||
assertEq(module.load_i(size*0), 12345);
|
||||
|
||||
assertEq(module.store(), 37);
|
||||
assertEq(i32a[0], 37);
|
||||
assertEq(module.store_i(size*0), 37);
|
||||
|
||||
i32a[10] = 18;
|
||||
assertEq(module.add(), 18);
|
||||
assertEq(i32a[10], 18+37);
|
||||
assertEq(module.add_i(size*10), 18+37);
|
||||
assertEq(i32a[10], 18+37+37);
|
||||
|
||||
i32a[20] = 4972;
|
||||
assertEq(module.sub(), 4972);
|
||||
assertEq(i32a[20], 4972 - 148);
|
||||
assertEq(module.sub_i(size*20), 4972 - 148);
|
||||
assertEq(i32a[20], 4972 - 148 - 148);
|
||||
|
||||
i32a[30] = 0x66666666;
|
||||
assertEq(module.and(), 0x66666666);
|
||||
assertEq(i32a[30], 0x22222222);
|
||||
i32a[30] = 0x66666666;
|
||||
assertEq(module.and_i(size*30), 0x66666666);
|
||||
assertEq(i32a[30], 0x22222222);
|
||||
|
||||
i32a[40] = 0x22222222;
|
||||
assertEq(module.or(), 0x22222222);
|
||||
assertEq(i32a[40], 0x33333333);
|
||||
i32a[40] = 0x22222222;
|
||||
assertEq(module.or_i(size*40), 0x22222222);
|
||||
assertEq(i32a[40], 0x33333333);
|
||||
|
||||
i32a[50] = 0x22222222;
|
||||
assertEq(module.xor(), 0x22222222);
|
||||
assertEq(i32a[50], 0x11111111);
|
||||
i32a[50] = 0x22222222;
|
||||
assertEq(module.xor_i(size*50), 0x22222222);
|
||||
assertEq(i32a[50], 0x11111111);
|
||||
|
||||
i32a[100] = 0;
|
||||
assertEq(module.cas1(), 0);
|
||||
assertEq(module.cas2(), -1);
|
||||
assertEq(i32a[100], 0x5A5A5A5A);
|
||||
|
||||
i32a[100] = 0;
|
||||
assertEq(module.cas1_i(size*100), 0);
|
||||
assertEq(module.cas2_i(size*100), -1);
|
||||
assertEq(i32a[100], 0x5A5A5A5A);
|
||||
|
||||
// Out-of-bounds accesses.
|
||||
|
||||
assertEq(module.cas1_i(size*20000), 0);
|
||||
assertEq(module.cas2_i(size*20000), 0);
|
||||
|
||||
assertEq(module.or_i(size*20001), 0);
|
||||
assertEq(module.xor_i(size*20001), 0);
|
||||
assertEq(module.and_i(size*20001), 0);
|
||||
assertEq(module.add_i(size*20001), 0);
|
||||
assertEq(module.sub_i(size*20001), 0);
|
||||
|
||||
print("Done");
|
@ -7,6 +7,7 @@
|
||||
#ifndef jit_LIR_Common_h
|
||||
#define jit_LIR_Common_h
|
||||
|
||||
#include "jit/AtomicOp.h"
|
||||
#include "jit/shared/Assembler-shared.h"
|
||||
|
||||
// This file declares LIR instructions that are common to every platform.
|
||||
@ -6590,6 +6591,60 @@ class LAsmJSStoreHeap : public LInstructionHelper<0, 2, 0>
|
||||
}
|
||||
};
|
||||
|
||||
class LAsmJSCompareExchangeHeap : public LInstructionHelper<1, 3, 0>
|
||||
{
|
||||
public:
|
||||
LIR_HEADER(AsmJSCompareExchangeHeap);
|
||||
|
||||
LAsmJSCompareExchangeHeap(const LAllocation &ptr, const LAllocation &oldValue,
|
||||
const LAllocation &newValue)
|
||||
{
|
||||
setOperand(0, ptr);
|
||||
setOperand(1, oldValue);
|
||||
setOperand(2, newValue);
|
||||
}
|
||||
|
||||
const LAllocation *ptr() {
|
||||
return getOperand(0);
|
||||
}
|
||||
const LAllocation *oldValue() {
|
||||
return getOperand(1);
|
||||
}
|
||||
const LAllocation *newValue() {
|
||||
return getOperand(2);
|
||||
}
|
||||
|
||||
MAsmJSCompareExchangeHeap *mir() const {
|
||||
return mir_->toAsmJSCompareExchangeHeap();
|
||||
}
|
||||
};
|
||||
|
||||
class LAsmJSAtomicBinopHeap : public LInstructionHelper<1, 2, 1>
|
||||
{
|
||||
public:
|
||||
LIR_HEADER(AsmJSAtomicBinopHeap);
|
||||
LAsmJSAtomicBinopHeap(const LAllocation &ptr, const LAllocation &value,
|
||||
const LDefinition &temp)
|
||||
{
|
||||
setOperand(0, ptr);
|
||||
setOperand(1, value);
|
||||
setTemp(0, temp);
|
||||
}
|
||||
const LAllocation *ptr() {
|
||||
return getOperand(0);
|
||||
}
|
||||
const LAllocation *value() {
|
||||
return getOperand(1);
|
||||
}
|
||||
const LDefinition *temp() {
|
||||
return getTemp(0);
|
||||
}
|
||||
|
||||
MAsmJSAtomicBinopHeap *mir() const {
|
||||
return mir_->toAsmJSAtomicBinopHeap();
|
||||
}
|
||||
};
|
||||
|
||||
class LAsmJSLoadGlobalVar : public LInstructionHelper<1, 0, 0>
|
||||
{
|
||||
public:
|
||||
|
@ -333,6 +333,8 @@
|
||||
_(AsmJSVoidReturn) \
|
||||
_(AsmJSPassStackArg) \
|
||||
_(AsmJSCall) \
|
||||
_(AsmJSCompareExchangeHeap) \
|
||||
_(AsmJSAtomicBinopHeap) \
|
||||
_(InterruptCheckPar) \
|
||||
_(RecompileCheck) \
|
||||
_(MemoryBarrier) \
|
||||
|
111
js/src/jit/MIR.h
111
js/src/jit/MIR.h
@ -12107,10 +12107,20 @@ class MAsmJSHeapAccess
|
||||
|
||||
class MAsmJSLoadHeap : public MUnaryInstruction, public MAsmJSHeapAccess
|
||||
{
|
||||
MAsmJSLoadHeap(Scalar::Type vt, MDefinition *ptr, bool needsBoundsCheck)
|
||||
: MUnaryInstruction(ptr), MAsmJSHeapAccess(vt, needsBoundsCheck)
|
||||
MemoryBarrierBits barrierBefore_;
|
||||
MemoryBarrierBits barrierAfter_;
|
||||
|
||||
MAsmJSLoadHeap(Scalar::Type vt, MDefinition *ptr, bool needsBoundsCheck,
|
||||
MemoryBarrierBits before, MemoryBarrierBits after)
|
||||
: MUnaryInstruction(ptr),
|
||||
MAsmJSHeapAccess(vt, needsBoundsCheck),
|
||||
barrierBefore_(before),
|
||||
barrierAfter_(after)
|
||||
{
|
||||
setMovable();
|
||||
if (before|after)
|
||||
setGuard(); // Not removable
|
||||
else
|
||||
setMovable();
|
||||
if (vt == Scalar::Float32)
|
||||
setResultType(MIRType_Float32);
|
||||
else if (vt == Scalar::Float64)
|
||||
@ -12123,12 +12133,16 @@ class MAsmJSLoadHeap : public MUnaryInstruction, public MAsmJSHeapAccess
|
||||
INSTRUCTION_HEADER(AsmJSLoadHeap);
|
||||
|
||||
static MAsmJSLoadHeap *New(TempAllocator &alloc, Scalar::Type vt,
|
||||
MDefinition *ptr, bool needsBoundsCheck)
|
||||
MDefinition *ptr, bool needsBoundsCheck,
|
||||
MemoryBarrierBits barrierBefore = MembarNobits,
|
||||
MemoryBarrierBits barrierAfter = MembarNobits)
|
||||
{
|
||||
return new(alloc) MAsmJSLoadHeap(vt, ptr, needsBoundsCheck);
|
||||
return new(alloc) MAsmJSLoadHeap(vt, ptr, needsBoundsCheck, barrierBefore, barrierAfter);
|
||||
}
|
||||
|
||||
MDefinition *ptr() const { return getOperand(0); }
|
||||
MemoryBarrierBits barrierBefore() const { return barrierBefore_; }
|
||||
MemoryBarrierBits barrierAfter() const { return barrierAfter_; }
|
||||
|
||||
bool congruentTo(const MDefinition *ins) const;
|
||||
AliasSet getAliasSet() const {
|
||||
@ -12139,19 +12153,96 @@ class MAsmJSLoadHeap : public MUnaryInstruction, public MAsmJSHeapAccess
|
||||
|
||||
class MAsmJSStoreHeap : public MBinaryInstruction, public MAsmJSHeapAccess
|
||||
{
|
||||
MAsmJSStoreHeap(Scalar::Type vt, MDefinition *ptr, MDefinition *v, bool needsBoundsCheck)
|
||||
: MBinaryInstruction(ptr, v) , MAsmJSHeapAccess(vt, needsBoundsCheck)
|
||||
{}
|
||||
MemoryBarrierBits barrierBefore_;
|
||||
MemoryBarrierBits barrierAfter_;
|
||||
|
||||
MAsmJSStoreHeap(Scalar::Type vt, MDefinition *ptr, MDefinition *v, bool needsBoundsCheck,
|
||||
MemoryBarrierBits before, MemoryBarrierBits after)
|
||||
: MBinaryInstruction(ptr, v),
|
||||
MAsmJSHeapAccess(vt, needsBoundsCheck),
|
||||
barrierBefore_(before),
|
||||
barrierAfter_(after)
|
||||
{
|
||||
if (before|after)
|
||||
setGuard(); // Not removable
|
||||
}
|
||||
|
||||
public:
|
||||
INSTRUCTION_HEADER(AsmJSStoreHeap);
|
||||
|
||||
static MAsmJSStoreHeap *New(TempAllocator &alloc, Scalar::Type vt,
|
||||
MDefinition *ptr, MDefinition *v, bool needsBoundsCheck)
|
||||
MDefinition *ptr, MDefinition *v, bool needsBoundsCheck,
|
||||
MemoryBarrierBits barrierBefore = MembarNobits,
|
||||
MemoryBarrierBits barrierAfter = MembarNobits)
|
||||
{
|
||||
return new(alloc) MAsmJSStoreHeap(vt, ptr, v, needsBoundsCheck);
|
||||
return new(alloc) MAsmJSStoreHeap(vt, ptr, v, needsBoundsCheck,
|
||||
barrierBefore, barrierAfter);
|
||||
}
|
||||
|
||||
MDefinition *ptr() const { return getOperand(0); }
|
||||
MDefinition *value() const { return getOperand(1); }
|
||||
MemoryBarrierBits barrierBefore() const { return barrierBefore_; }
|
||||
MemoryBarrierBits barrierAfter() const { return barrierAfter_; }
|
||||
|
||||
AliasSet getAliasSet() const {
|
||||
return AliasSet::Store(AliasSet::AsmJSHeap);
|
||||
}
|
||||
};
|
||||
|
||||
class MAsmJSCompareExchangeHeap : public MTernaryInstruction, public MAsmJSHeapAccess
|
||||
{
|
||||
MAsmJSCompareExchangeHeap(Scalar::Type vt, MDefinition *ptr, MDefinition *oldv, MDefinition *newv,
|
||||
bool needsBoundsCheck)
|
||||
: MTernaryInstruction(ptr, oldv, newv),
|
||||
MAsmJSHeapAccess(vt, needsBoundsCheck)
|
||||
{
|
||||
setGuard(); // Not removable
|
||||
setResultType(MIRType_Int32);
|
||||
}
|
||||
|
||||
public:
|
||||
INSTRUCTION_HEADER(AsmJSCompareExchangeHeap);
|
||||
|
||||
static MAsmJSCompareExchangeHeap *New(TempAllocator &alloc, Scalar::Type vt,
|
||||
MDefinition *ptr, MDefinition *oldv,
|
||||
MDefinition *newv, bool needsBoundsCheck)
|
||||
{
|
||||
return new(alloc) MAsmJSCompareExchangeHeap(vt, ptr, oldv, newv, needsBoundsCheck);
|
||||
}
|
||||
|
||||
MDefinition *ptr() const { return getOperand(0); }
|
||||
MDefinition *oldValue() const { return getOperand(1); }
|
||||
MDefinition *newValue() const { return getOperand(2); }
|
||||
|
||||
AliasSet getAliasSet() const {
|
||||
return AliasSet::Store(AliasSet::AsmJSHeap);
|
||||
}
|
||||
};
|
||||
|
||||
class MAsmJSAtomicBinopHeap : public MBinaryInstruction, public MAsmJSHeapAccess
|
||||
{
|
||||
AtomicOp op_;
|
||||
|
||||
MAsmJSAtomicBinopHeap(AtomicOp op, Scalar::Type vt, MDefinition *ptr, MDefinition *v,
|
||||
bool needsBoundsCheck)
|
||||
: MBinaryInstruction(ptr, v),
|
||||
MAsmJSHeapAccess(vt, needsBoundsCheck),
|
||||
op_(op)
|
||||
{
|
||||
setGuard(); // Not removable
|
||||
setResultType(MIRType_Int32);
|
||||
}
|
||||
|
||||
public:
|
||||
INSTRUCTION_HEADER(AsmJSAtomicBinopHeap);
|
||||
|
||||
static MAsmJSAtomicBinopHeap *New(TempAllocator &alloc, AtomicOp op, Scalar::Type vt,
|
||||
MDefinition *ptr, MDefinition *v, bool needsBoundsCheck)
|
||||
{
|
||||
return new(alloc) MAsmJSAtomicBinopHeap(op, vt, ptr, v, needsBoundsCheck);
|
||||
}
|
||||
|
||||
AtomicOp operation() const { return op_; }
|
||||
MDefinition *ptr() const { return getOperand(0); }
|
||||
MDefinition *value() const { return getOperand(1); }
|
||||
|
||||
|
@ -261,6 +261,8 @@ namespace jit {
|
||||
_(InterruptCheckPar) \
|
||||
_(RecompileCheck) \
|
||||
_(MemoryBarrier) \
|
||||
_(AsmJSCompareExchangeHeap) \
|
||||
_(AsmJSAtomicBinopHeap) \
|
||||
_(UnknownValue) \
|
||||
_(LexicalCheck) \
|
||||
_(ThrowUninitializedLexical) \
|
||||
|
@ -358,6 +358,8 @@ class ParallelSafetyVisitor : public MDefinitionVisitor
|
||||
UNSAFE_OP(CompareExchangeTypedArrayElement)
|
||||
UNSAFE_OP(AtomicTypedArrayElementBinop)
|
||||
UNSAFE_OP(MemoryBarrier)
|
||||
UNSAFE_OP(AsmJSCompareExchangeHeap)
|
||||
UNSAFE_OP(AsmJSAtomicBinopHeap)
|
||||
UNSAFE_OP(UnknownValue)
|
||||
UNSAFE_OP(LexicalCheck)
|
||||
UNSAFE_OP(ThrowUninitializedLexical)
|
||||
|
@ -2015,6 +2015,18 @@ CodeGeneratorARM::visitAsmJSStoreHeap(LAsmJSStoreHeap *ins)
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
CodeGeneratorARM::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap *ins)
|
||||
{
|
||||
MOZ_CRASH("NYI");
|
||||
}
|
||||
|
||||
bool
|
||||
CodeGeneratorARM::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap *ins)
|
||||
{
|
||||
MOZ_CRASH("NYI");
|
||||
}
|
||||
|
||||
bool
|
||||
CodeGeneratorARM::visitAsmJSPassStackArg(LAsmJSPassStackArg *ins)
|
||||
{
|
||||
|
@ -206,6 +206,8 @@ class CodeGeneratorARM : public CodeGeneratorShared
|
||||
bool visitAsmJSCall(LAsmJSCall *ins);
|
||||
bool visitAsmJSLoadHeap(LAsmJSLoadHeap *ins);
|
||||
bool visitAsmJSStoreHeap(LAsmJSStoreHeap *ins);
|
||||
bool visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap *ins);
|
||||
bool visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap *ins);
|
||||
bool visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar *ins);
|
||||
bool visitAsmJSStoreGlobalVar(LAsmJSStoreGlobalVar *ins);
|
||||
bool visitAsmJSLoadFuncPtr(LAsmJSLoadFuncPtr *ins);
|
||||
|
@ -636,3 +636,15 @@ LIRGeneratorARM::visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArra
|
||||
|
||||
return define(lir, ins);
|
||||
}
|
||||
|
||||
bool
|
||||
LIRGeneratorARM::visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap *ins)
|
||||
{
|
||||
MOZ_CRASH("NYI");
|
||||
}
|
||||
|
||||
bool
|
||||
LIRGeneratorARM::visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap *ins)
|
||||
{
|
||||
MOZ_CRASH("NYI");
|
||||
}
|
||||
|
@ -101,6 +101,8 @@ class LIRGeneratorARM : public LIRGeneratorShared
|
||||
bool visitAsmJSLoadHeap(MAsmJSLoadHeap *ins);
|
||||
bool visitAsmJSStoreHeap(MAsmJSStoreHeap *ins);
|
||||
bool visitAsmJSLoadFuncPtr(MAsmJSLoadFuncPtr *ins);
|
||||
bool visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap *ins);
|
||||
bool visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap *ins);
|
||||
bool visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic *ins);
|
||||
bool visitForkJoinGetSlice(MForkJoinGetSlice *ins);
|
||||
bool visitSimdTernaryBitwise(MSimdTernaryBitwise *ins);
|
||||
|
@ -80,6 +80,8 @@ class LIRGeneratorNone : public LIRGeneratorShared
|
||||
bool visitForkJoinGetSlice(MForkJoinGetSlice *ins) { MOZ_CRASH(); }
|
||||
bool visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop *ins) { MOZ_CRASH(); }
|
||||
bool visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement *ins) { MOZ_CRASH(); }
|
||||
bool visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap *ins) { MOZ_CRASH(); }
|
||||
bool visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap *ins) { MOZ_CRASH(); }
|
||||
|
||||
LTableSwitch *newLTableSwitch(LAllocation, LDefinition, MTableSwitch *) { MOZ_CRASH(); }
|
||||
LTableSwitchV *newLTableSwitchV(MTableSwitch *) { MOZ_CRASH(); }
|
||||
|
@ -1072,6 +1072,9 @@ class AssemblerX86Shared : public AssemblerShared
|
||||
void addl(Imm32 imm, Register dest) {
|
||||
masm.addl_ir(imm.value, dest.code());
|
||||
}
|
||||
void addl_wide(Imm32 imm, Register dest) {
|
||||
masm.addl_ir_wide(imm.value, dest.code());
|
||||
}
|
||||
void addl(Imm32 imm, const Operand &op) {
|
||||
switch (op.kind()) {
|
||||
case Operand::REG:
|
||||
|
@ -610,6 +610,13 @@ public:
|
||||
m_formatter.immediate32(imm);
|
||||
}
|
||||
}
|
||||
void addl_ir_wide(int imm, RegisterID dst)
|
||||
{
|
||||
// 32-bit immediate always, for patching.
|
||||
spew("addl $0x%x, %s", imm, nameIReg(4,dst));
|
||||
m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
|
||||
m_formatter.immediate32(imm);
|
||||
}
|
||||
|
||||
void addl_im(int imm, int offset, RegisterID base)
|
||||
{
|
||||
|
@ -488,6 +488,129 @@ LIRGeneratorX86Shared::visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElemen
|
||||
return fixedOutput ? defineFixed(lir, ins, LAllocation(AnyRegister(eax))) : define(lir, ins);
|
||||
}
|
||||
|
||||
bool
|
||||
LIRGeneratorX86Shared::visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap *ins)
|
||||
{
|
||||
MDefinition *ptr = ins->ptr();
|
||||
MOZ_ASSERT(ptr->type() == MIRType_Int32);
|
||||
|
||||
bool byteArray = false;
|
||||
switch (ins->viewType()) {
|
||||
case Scalar::Int8:
|
||||
case Scalar::Uint8:
|
||||
byteArray = true;
|
||||
break;
|
||||
case Scalar::Int16:
|
||||
case Scalar::Uint16:
|
||||
case Scalar::Int32:
|
||||
case Scalar::Uint32:
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Unexpected array type");
|
||||
}
|
||||
|
||||
// Register allocation:
|
||||
//
|
||||
// The output must be eax.
|
||||
//
|
||||
// oldval must be in a register (it'll eventually end up in eax so
|
||||
// ideally it's there to begin with).
|
||||
//
|
||||
// newval will need to be in a register. If the source is a byte
|
||||
// array then the newval must be a register that has a byte size:
|
||||
// ebx, ecx, or edx, since eax is taken for the output in this
|
||||
// case. We pick ebx but it would be more flexible to pick any of
|
||||
// the three that wasn't being used.
|
||||
//
|
||||
// Bug #1077036 describes some optimization opportunities.
|
||||
|
||||
const LAllocation newval = byteArray ? useFixed(ins->newValue(), ebx) : useRegister(ins->newValue());
|
||||
const LAllocation oldval = useRegister(ins->oldValue());
|
||||
|
||||
LAsmJSCompareExchangeHeap *lir =
|
||||
new(alloc()) LAsmJSCompareExchangeHeap(useRegister(ptr), oldval, newval);
|
||||
|
||||
return defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
|
||||
}
|
||||
|
||||
bool
|
||||
LIRGeneratorX86Shared::visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap *ins)
|
||||
{
|
||||
MDefinition *ptr = ins->ptr();
|
||||
MOZ_ASSERT(ptr->type() == MIRType_Int32);
|
||||
|
||||
bool byteArray = false;
|
||||
switch (ins->viewType()) {
|
||||
case Scalar::Int8:
|
||||
case Scalar::Uint8:
|
||||
byteArray = true;
|
||||
break;
|
||||
case Scalar::Int16:
|
||||
case Scalar::Uint16:
|
||||
case Scalar::Int32:
|
||||
case Scalar::Uint32:
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Unexpected array type");
|
||||
}
|
||||
|
||||
// Register allocation:
|
||||
//
|
||||
// For ADD and SUB we'll use XADD:
|
||||
//
|
||||
// movl value, output
|
||||
// lock xaddl output, mem
|
||||
//
|
||||
// For the 8-bit variants XADD needs a byte register for the
|
||||
// output only, we can still set up with movl; just pin the output
|
||||
// to eax (or ebx / ecx / edx).
|
||||
//
|
||||
// For AND/OR/XOR we need to use a CMPXCHG loop:
|
||||
//
|
||||
// movl *mem, eax
|
||||
// L: mov eax, temp
|
||||
// andl value, temp
|
||||
// lock cmpxchg temp, mem ; reads eax also
|
||||
// jnz L
|
||||
// ; result in eax
|
||||
//
|
||||
// Note the placement of L, cmpxchg will update eax with *mem if
|
||||
// *mem does not have the expected value, so reloading it at the
|
||||
// top of the loop is redundant.
|
||||
//
|
||||
// We want to fix eax as the output. We also need a temp for
|
||||
// the intermediate value.
|
||||
//
|
||||
// For the 8-bit variants the temp must have a byte register.
|
||||
//
|
||||
// There are optimization opportunities:
|
||||
// - when the result is unused, Bug #1077014.
|
||||
// - better register allocation and instruction selection, Bug #1077036.
|
||||
|
||||
bool bitOp = !(ins->operation() == AtomicFetchAddOp || ins->operation() == AtomicFetchSubOp);
|
||||
LDefinition tempDef = LDefinition::BogusTemp();
|
||||
LAllocation value;
|
||||
|
||||
// Optimization opportunity: "value" need not be pinned to something that
|
||||
// has a byte register unless the back-end insists on using a byte move
|
||||
// for the setup or the payload computation, which really it need not do.
|
||||
|
||||
if (byteArray) {
|
||||
value = useFixed(ins->value(), ebx);
|
||||
if (bitOp)
|
||||
tempDef = tempFixed(ecx);
|
||||
} else {
|
||||
value = useRegister(ins->value());
|
||||
if (bitOp)
|
||||
tempDef = temp();
|
||||
}
|
||||
|
||||
LAsmJSAtomicBinopHeap *lir =
|
||||
new(alloc()) LAsmJSAtomicBinopHeap(useRegister(ptr), value, tempDef);
|
||||
|
||||
return defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
|
||||
}
|
||||
|
||||
bool
|
||||
LIRGeneratorX86Shared::visitSimdTernaryBitwise(MSimdTernaryBitwise *ins)
|
||||
{
|
||||
|
@ -57,6 +57,8 @@ class LIRGeneratorX86Shared : public LIRGeneratorShared
|
||||
bool visitSimdValueX4(MSimdValueX4 *ins);
|
||||
bool visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement *ins);
|
||||
bool visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop *ins);
|
||||
bool visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap *ins);
|
||||
bool visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap *ins);
|
||||
};
|
||||
|
||||
} // namespace jit
|
||||
|
@ -258,6 +258,13 @@ CodeGeneratorX64::visitAsmJSCall(LAsmJSCall *ins)
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
CodeGeneratorX64::memoryBarrier(MemoryBarrierBits barrier)
|
||||
{
|
||||
if (barrier & MembarStoreLoad)
|
||||
masm.storeLoadFence();
|
||||
}
|
||||
|
||||
bool
|
||||
CodeGeneratorX64::visitAsmJSLoadHeap(LAsmJSLoadHeap *ins)
|
||||
{
|
||||
@ -275,6 +282,7 @@ CodeGeneratorX64::visitAsmJSLoadHeap(LAsmJSLoadHeap *ins)
|
||||
srcAddr = Operand(HeapReg, ToRegister(ptr), TimesOne);
|
||||
}
|
||||
|
||||
memoryBarrier(ins->mir()->barrierBefore());
|
||||
OutOfLineLoadTypedArrayOutOfBounds *ool = nullptr;
|
||||
uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
|
||||
if (mir->needsBoundsCheck()) {
|
||||
@ -303,6 +311,7 @@ CodeGeneratorX64::visitAsmJSLoadHeap(LAsmJSLoadHeap *ins)
|
||||
uint32_t after = masm.size();
|
||||
if (ool)
|
||||
masm.bind(ool->rejoin());
|
||||
memoryBarrier(ins->mir()->barrierAfter());
|
||||
masm.append(AsmJSHeapAccess(before, after, vt, ToAnyRegister(out), maybeCmpOffset));
|
||||
return true;
|
||||
}
|
||||
@ -323,6 +332,7 @@ CodeGeneratorX64::visitAsmJSStoreHeap(LAsmJSStoreHeap *ins)
|
||||
dstAddr = Operand(HeapReg, ToRegister(ptr), TimesOne);
|
||||
}
|
||||
|
||||
memoryBarrier(ins->mir()->barrierBefore());
|
||||
Label rejoin;
|
||||
uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
|
||||
if (mir->needsBoundsCheck()) {
|
||||
@ -358,10 +368,98 @@ CodeGeneratorX64::visitAsmJSStoreHeap(LAsmJSStoreHeap *ins)
|
||||
uint32_t after = masm.size();
|
||||
if (rejoin.used())
|
||||
masm.bind(&rejoin);
|
||||
memoryBarrier(ins->mir()->barrierAfter());
|
||||
masm.append(AsmJSHeapAccess(before, after, maybeCmpOffset));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
CodeGeneratorX64::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap *ins)
|
||||
{
|
||||
MAsmJSCompareExchangeHeap *mir = ins->mir();
|
||||
Scalar::Type vt = mir->viewType();
|
||||
const LAllocation *ptr = ins->ptr();
|
||||
|
||||
MOZ_ASSERT(ptr->isRegister());
|
||||
BaseIndex srcAddr(HeapReg, ToRegister(ptr), TimesOne);
|
||||
|
||||
Register oldval = ToRegister(ins->oldValue());
|
||||
Register newval = ToRegister(ins->newValue());
|
||||
|
||||
Label rejoin;
|
||||
uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
|
||||
MOZ_ASSERT(mir->needsBoundsCheck());
|
||||
{
|
||||
maybeCmpOffset = masm.cmplWithPatch(ToRegister(ptr), Imm32(0)).offset();
|
||||
Label goahead;
|
||||
masm.j(Assembler::LessThan, &goahead);
|
||||
memoryBarrier(MembarFull);
|
||||
Register out = ToRegister(ins->output());
|
||||
masm.xorl(out,out);
|
||||
masm.jmp(&rejoin);
|
||||
masm.bind(&goahead);
|
||||
}
|
||||
masm.compareExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
|
||||
srcAddr,
|
||||
oldval,
|
||||
newval,
|
||||
InvalidReg,
|
||||
ToAnyRegister(ins->output()));
|
||||
uint32_t after = masm.size();
|
||||
if (rejoin.used())
|
||||
masm.bind(&rejoin);
|
||||
masm.append(AsmJSHeapAccess(after, after, maybeCmpOffset));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
CodeGeneratorX64::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap *ins)
|
||||
{
|
||||
MAsmJSAtomicBinopHeap *mir = ins->mir();
|
||||
Scalar::Type vt = mir->viewType();
|
||||
const LAllocation *ptr = ins->ptr();
|
||||
Register temp = ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp());
|
||||
const LAllocation* value = ins->value();
|
||||
AtomicOp op = mir->operation();
|
||||
|
||||
MOZ_ASSERT(ptr->isRegister());
|
||||
BaseIndex srcAddr(HeapReg, ToRegister(ptr), TimesOne);
|
||||
|
||||
Label rejoin;
|
||||
uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
|
||||
MOZ_ASSERT(mir->needsBoundsCheck());
|
||||
{
|
||||
maybeCmpOffset = masm.cmplWithPatch(ToRegister(ptr), Imm32(0)).offset();
|
||||
Label goahead;
|
||||
masm.j(Assembler::LessThan, &goahead);
|
||||
memoryBarrier(MembarFull);
|
||||
Register out = ToRegister(ins->output());
|
||||
masm.xorl(out,out);
|
||||
masm.jmp(&rejoin);
|
||||
masm.bind(&goahead);
|
||||
}
|
||||
if (value->isConstant()) {
|
||||
masm.atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt,
|
||||
Imm32(ToInt32(value)),
|
||||
srcAddr,
|
||||
temp,
|
||||
InvalidReg,
|
||||
ToAnyRegister(ins->output()));
|
||||
} else {
|
||||
masm.atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt,
|
||||
ToRegister(value),
|
||||
srcAddr,
|
||||
temp,
|
||||
InvalidReg,
|
||||
ToAnyRegister(ins->output()));
|
||||
}
|
||||
uint32_t after = masm.size();
|
||||
if (rejoin.used())
|
||||
masm.bind(&rejoin);
|
||||
masm.append(AsmJSHeapAccess(after, after, maybeCmpOffset));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
CodeGeneratorX64::visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar *ins)
|
||||
{
|
||||
|
@ -25,6 +25,7 @@ class CodeGeneratorX64 : public CodeGeneratorX86Shared
|
||||
|
||||
void storeUnboxedValue(const LAllocation *value, MIRType valueType,
|
||||
Operand dest, MIRType slotType);
|
||||
void memoryBarrier(MemoryBarrierBits barrier);
|
||||
|
||||
public:
|
||||
CodeGeneratorX64(MIRGenerator *gen, LIRGraph *graph, MacroAssembler *masm);
|
||||
@ -44,6 +45,8 @@ class CodeGeneratorX64 : public CodeGeneratorX86Shared
|
||||
bool visitAsmJSCall(LAsmJSCall *ins);
|
||||
bool visitAsmJSLoadHeap(LAsmJSLoadHeap *ins);
|
||||
bool visitAsmJSStoreHeap(LAsmJSStoreHeap *ins);
|
||||
bool visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap *ins);
|
||||
bool visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap *ins);
|
||||
bool visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar *ins);
|
||||
bool visitAsmJSStoreGlobalVar(LAsmJSStoreGlobalVar *ins);
|
||||
bool visitAsmJSLoadFuncPtr(LAsmJSLoadFuncPtr *ins);
|
||||
|
@ -358,6 +358,13 @@ CodeGeneratorX86::visitAsmJSCall(LAsmJSCall *ins)
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
CodeGeneratorX86::memoryBarrier(MemoryBarrierBits barrier)
|
||||
{
|
||||
if (barrier & MembarStoreLoad)
|
||||
masm.storeLoadFence();
|
||||
}
|
||||
|
||||
bool
|
||||
CodeGeneratorX86::visitAsmJSLoadHeap(LAsmJSLoadHeap *ins)
|
||||
{
|
||||
@ -366,20 +373,27 @@ CodeGeneratorX86::visitAsmJSLoadHeap(LAsmJSLoadHeap *ins)
|
||||
const LAllocation *ptr = ins->ptr();
|
||||
const LDefinition *out = ins->output();
|
||||
|
||||
memoryBarrier(ins->mir()->barrierBefore());
|
||||
|
||||
if (ptr->isConstant()) {
|
||||
// The constant displacement still needs to be added to the as-yet-unknown
|
||||
// base address of the heap. For now, embed the displacement as an
|
||||
// immediate in the instruction. This displacement will fixed up when the
|
||||
// base address is known during dynamic linking (AsmJSModule::initHeap).
|
||||
PatchedAbsoluteAddress srcAddr((void *) ptr->toConstant()->toInt32());
|
||||
return loadAndNoteViewTypeElement(vt, srcAddr, out);
|
||||
loadAndNoteViewTypeElement(vt, srcAddr, out);
|
||||
memoryBarrier(ins->mir()->barrierAfter());
|
||||
return true;
|
||||
}
|
||||
|
||||
Register ptrReg = ToRegister(ptr);
|
||||
Address srcAddr(ptrReg, 0);
|
||||
|
||||
if (!mir->needsBoundsCheck())
|
||||
return loadAndNoteViewTypeElement(vt, srcAddr, out);
|
||||
if (!mir->needsBoundsCheck()) {
|
||||
loadAndNoteViewTypeElement(vt, srcAddr, out);
|
||||
memoryBarrier(ins->mir()->barrierAfter());
|
||||
return true;
|
||||
}
|
||||
|
||||
bool isFloat32Load = vt == Scalar::Float32;
|
||||
OutOfLineLoadTypedArrayOutOfBounds *ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(out), isFloat32Load);
|
||||
@ -393,6 +407,7 @@ CodeGeneratorX86::visitAsmJSLoadHeap(LAsmJSLoadHeap *ins)
|
||||
loadViewTypeElement(vt, srcAddr, out);
|
||||
uint32_t after = masm.size();
|
||||
masm.bind(ool->rejoin());
|
||||
memoryBarrier(ins->mir()->barrierAfter());
|
||||
masm.append(AsmJSHeapAccess(before, after, vt, ToAnyRegister(out), cmp.offset()));
|
||||
return true;
|
||||
}
|
||||
@ -454,6 +469,8 @@ CodeGeneratorX86::visitAsmJSStoreHeap(LAsmJSStoreHeap *ins)
|
||||
const LAllocation *value = ins->value();
|
||||
const LAllocation *ptr = ins->ptr();
|
||||
|
||||
memoryBarrier(ins->mir()->barrierBefore());
|
||||
|
||||
if (ptr->isConstant()) {
|
||||
// The constant displacement still needs to be added to the as-yet-unknown
|
||||
// base address of the heap. For now, embed the displacement as an
|
||||
@ -461,6 +478,7 @@ CodeGeneratorX86::visitAsmJSStoreHeap(LAsmJSStoreHeap *ins)
|
||||
// base address is known during dynamic linking (AsmJSModule::initHeap).
|
||||
PatchedAbsoluteAddress dstAddr((void *) ptr->toConstant()->toInt32());
|
||||
storeAndNoteViewTypeElement(vt, value, dstAddr);
|
||||
memoryBarrier(ins->mir()->barrierAfter());
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -469,6 +487,7 @@ CodeGeneratorX86::visitAsmJSStoreHeap(LAsmJSStoreHeap *ins)
|
||||
|
||||
if (!mir->needsBoundsCheck()) {
|
||||
storeAndNoteViewTypeElement(vt, value, dstAddr);
|
||||
memoryBarrier(ins->mir()->barrierAfter());
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -480,10 +499,115 @@ CodeGeneratorX86::visitAsmJSStoreHeap(LAsmJSStoreHeap *ins)
|
||||
storeViewTypeElement(vt, value, dstAddr);
|
||||
uint32_t after = masm.size();
|
||||
masm.bind(&rejoin);
|
||||
memoryBarrier(ins->mir()->barrierAfter());
|
||||
masm.append(AsmJSHeapAccess(before, after, cmp.offset()));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
CodeGeneratorX86::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap *ins)
|
||||
{
|
||||
MAsmJSCompareExchangeHeap *mir = ins->mir();
|
||||
Scalar::Type vt = mir->viewType();
|
||||
const LAllocation *ptr = ins->ptr();
|
||||
Register oldval = ToRegister(ins->oldValue());
|
||||
Register newval = ToRegister(ins->newValue());
|
||||
|
||||
MOZ_ASSERT(ptr->isRegister());
|
||||
// Set up the offset within the heap in the pointer reg.
|
||||
Register ptrReg = ToRegister(ptr);
|
||||
|
||||
Label rejoin;
|
||||
uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
|
||||
|
||||
if (mir->needsBoundsCheck()) {
|
||||
maybeCmpOffset = masm.cmplWithPatch(ptrReg, Imm32(0)).offset();
|
||||
Label goahead;
|
||||
masm.j(Assembler::LessThan, &goahead);
|
||||
memoryBarrier(MembarFull);
|
||||
Register out = ToRegister(ins->output());
|
||||
masm.xorl(out,out);
|
||||
masm.jmp(&rejoin);
|
||||
masm.bind(&goahead);
|
||||
}
|
||||
|
||||
// Add in the actual heap pointer explicitly, to avoid opening up
|
||||
// the abstraction that is compareExchangeToTypedIntArray at this time.
|
||||
uint32_t before = masm.size();
|
||||
masm.addl_wide(Imm32(0), ptrReg);
|
||||
uint32_t after = masm.size();
|
||||
masm.append(AsmJSHeapAccess(before, after, maybeCmpOffset));
|
||||
|
||||
Address memAddr(ToRegister(ptr), 0);
|
||||
masm.compareExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
|
||||
memAddr,
|
||||
oldval,
|
||||
newval,
|
||||
InvalidReg,
|
||||
ToAnyRegister(ins->output()));
|
||||
if (rejoin.used())
|
||||
masm.bind(&rejoin);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
CodeGeneratorX86::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap *ins)
|
||||
{
|
||||
MAsmJSAtomicBinopHeap *mir = ins->mir();
|
||||
Scalar::Type vt = mir->viewType();
|
||||
const LAllocation *ptr = ins->ptr();
|
||||
Register temp = ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp());
|
||||
const LAllocation* value = ins->value();
|
||||
AtomicOp op = mir->operation();
|
||||
|
||||
MOZ_ASSERT(ptr->isRegister());
|
||||
// Set up the offset within the heap in the pointer reg.
|
||||
Register ptrReg = ToRegister(ptr);
|
||||
|
||||
Label rejoin;
|
||||
uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
|
||||
|
||||
if (mir->needsBoundsCheck()) {
|
||||
maybeCmpOffset = masm.cmplWithPatch(ptrReg, Imm32(0)).offset();
|
||||
Label goahead;
|
||||
masm.j(Assembler::LessThan, &goahead);
|
||||
memoryBarrier(MembarFull);
|
||||
Register out = ToRegister(ins->output());
|
||||
masm.xorl(out,out);
|
||||
masm.jmp(&rejoin);
|
||||
masm.bind(&goahead);
|
||||
}
|
||||
|
||||
// Add in the actual heap pointer explicitly, to avoid opening up
|
||||
// the abstraction that is atomicBinopToTypedIntArray at this time.
|
||||
uint32_t before = masm.size();
|
||||
masm.addl_wide(Imm32(0), ptrReg);
|
||||
uint32_t after = masm.size();
|
||||
masm.append(AsmJSHeapAccess(before, after, maybeCmpOffset));
|
||||
|
||||
Address memAddr(ptrReg, 0);
|
||||
if (value->isConstant()) {
|
||||
masm.atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt,
|
||||
Imm32(ToInt32(value)),
|
||||
memAddr,
|
||||
temp,
|
||||
InvalidReg,
|
||||
ToAnyRegister(ins->output()));
|
||||
} else {
|
||||
masm.atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt,
|
||||
ToRegister(value),
|
||||
memAddr,
|
||||
temp,
|
||||
InvalidReg,
|
||||
ToAnyRegister(ins->output()));
|
||||
}
|
||||
if (rejoin.used())
|
||||
masm.bind(&rejoin);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
CodeGeneratorX86::visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar *ins)
|
||||
{
|
||||
|
@ -40,6 +40,8 @@ class CodeGeneratorX86 : public CodeGeneratorX86Shared
|
||||
template<typename T>
|
||||
void storeViewTypeElement(Scalar::Type vt, const LAllocation *value,
|
||||
const T &dstAddr);
|
||||
void memoryBarrier(MemoryBarrierBits barrier);
|
||||
|
||||
public:
|
||||
CodeGeneratorX86(MIRGenerator *gen, LIRGraph *graph, MacroAssembler *masm);
|
||||
|
||||
@ -61,6 +63,8 @@ class CodeGeneratorX86 : public CodeGeneratorX86Shared
|
||||
bool visitAsmJSCall(LAsmJSCall *ins);
|
||||
bool visitAsmJSLoadHeap(LAsmJSLoadHeap *ins);
|
||||
bool visitAsmJSStoreHeap(LAsmJSStoreHeap *ins);
|
||||
bool visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap *ins);
|
||||
bool visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap *ins);
|
||||
bool visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar *ins);
|
||||
bool visitAsmJSStoreGlobalVar(LAsmJSStoreGlobalVar *ins);
|
||||
bool visitAsmJSLoadFuncPtr(LAsmJSLoadFuncPtr *ins);
|
||||
|
Loading…
Reference in New Issue
Block a user