mirror of
https://gitlab.winehq.org/wine/wine-gecko.git
synced 2024-09-13 09:24:08 -07:00
Bug 1141986 - Atomics.exchange on integer elements -- asm.js parts. r=bbouvier
This commit is contained in:
parent
24dd1b2639
commit
d04d123c1d
@ -690,6 +690,7 @@ BuiltinToName(AsmJSExit::BuiltinKind builtin)
|
||||
case AsmJSExit::Builtin_IDivMod: return "software idivmod (in asm.js)";
|
||||
case AsmJSExit::Builtin_UDivMod: return "software uidivmod (in asm.js)";
|
||||
case AsmJSExit::Builtin_AtomicCmpXchg: return "Atomics.compareExchange (in asm.js)";
|
||||
case AsmJSExit::Builtin_AtomicXchg: return "Atomics.exchange (in asm.js)";
|
||||
case AsmJSExit::Builtin_AtomicFetchAdd: return "Atomics.add (in asm.js)";
|
||||
case AsmJSExit::Builtin_AtomicFetchSub: return "Atomics.sub (in asm.js)";
|
||||
case AsmJSExit::Builtin_AtomicFetchAnd: return "Atomics.and (in asm.js)";
|
||||
|
@ -80,6 +80,7 @@ namespace AsmJSExit
|
||||
Builtin_IDivMod,
|
||||
Builtin_UDivMod,
|
||||
Builtin_AtomicCmpXchg,
|
||||
Builtin_AtomicXchg,
|
||||
Builtin_AtomicFetchAdd,
|
||||
Builtin_AtomicFetchSub,
|
||||
Builtin_AtomicFetchAnd,
|
||||
|
@ -417,6 +417,7 @@ ValidateAtomicsBuiltinFunction(JSContext* cx, AsmJSModule::Global& global, Handl
|
||||
Native native = nullptr;
|
||||
switch (global.atomicsBuiltinFunction()) {
|
||||
case AsmJSAtomicsBuiltin_compareExchange: native = atomics_compareExchange; break;
|
||||
case AsmJSAtomicsBuiltin_exchange: native = atomics_exchange; break;
|
||||
case AsmJSAtomicsBuiltin_load: native = atomics_load; break;
|
||||
case AsmJSAtomicsBuiltin_store: native = atomics_store; break;
|
||||
case AsmJSAtomicsBuiltin_fence: native = atomics_fence; break;
|
||||
|
@ -706,6 +706,8 @@ AddressOf(AsmJSImmKind kind, ExclusiveContext* cx)
|
||||
return RedirectCall(FuncCast(__aeabi_uidivmod), Args_General2);
|
||||
case AsmJSImm_AtomicCmpXchg:
|
||||
return RedirectCall(FuncCast<int32_t (int32_t, int32_t, int32_t, int32_t)>(js::atomics_cmpxchg_asm_callout), Args_General4);
|
||||
case AsmJSImm_AtomicXchg:
|
||||
return RedirectCall(FuncCast<int32_t (int32_t, int32_t, int32_t)>(js::atomics_xchg_asm_callout), Args_General3);
|
||||
case AsmJSImm_AtomicFetchAdd:
|
||||
return RedirectCall(FuncCast<int32_t (int32_t, int32_t, int32_t)>(js::atomics_add_asm_callout), Args_General3);
|
||||
case AsmJSImm_AtomicFetchSub:
|
||||
@ -860,7 +862,7 @@ AsmJSModule::initHeap(Handle<ArrayBufferObjectMaybeShared*> heap, JSContext* cx)
|
||||
// If we have any explicit bounds checks, we need to patch the heap length
|
||||
// checks at the right places. All accesses that have been recorded are the
|
||||
// only ones that need bound checks (see also
|
||||
// CodeGeneratorX64::visitAsmJS{Load,Store,CompareExchange,AtomicBinop}Heap)
|
||||
// CodeGeneratorX64::visitAsmJS{Load,Store,CompareExchange,Exchange,AtomicBinop}Heap)
|
||||
uint32_t heapLength = heap->byteLength();
|
||||
for (size_t i = 0; i < heapAccesses_.length(); i++) {
|
||||
const jit::AsmJSHeapAccess& access = heapAccesses_[i];
|
||||
|
@ -70,6 +70,7 @@ enum AsmJSMathBuiltinFunction
|
||||
enum AsmJSAtomicsBuiltinFunction
|
||||
{
|
||||
AsmJSAtomicsBuiltin_compareExchange,
|
||||
AsmJSAtomicsBuiltin_exchange,
|
||||
AsmJSAtomicsBuiltin_load,
|
||||
AsmJSAtomicsBuiltin_store,
|
||||
AsmJSAtomicsBuiltin_fence,
|
||||
|
@ -1547,6 +1547,7 @@ class MOZ_STACK_CLASS ModuleCompiler
|
||||
|
||||
if (!standardLibraryAtomicsNames_.init() ||
|
||||
!addStandardLibraryAtomicsName("compareExchange", AsmJSAtomicsBuiltin_compareExchange) ||
|
||||
!addStandardLibraryAtomicsName("exchange", AsmJSAtomicsBuiltin_exchange) ||
|
||||
!addStandardLibraryAtomicsName("load", AsmJSAtomicsBuiltin_load) ||
|
||||
!addStandardLibraryAtomicsName("store", AsmJSAtomicsBuiltin_store) ||
|
||||
!addStandardLibraryAtomicsName("fence", AsmJSAtomicsBuiltin_fence) ||
|
||||
@ -2569,6 +2570,7 @@ enum class I32 : uint8_t {
|
||||
|
||||
// Atomics opcodes
|
||||
AtomicsCompareExchange,
|
||||
AtomicsExchange,
|
||||
AtomicsLoad,
|
||||
AtomicsStore,
|
||||
AtomicsBinOp,
|
||||
@ -3597,6 +3599,19 @@ class FunctionCompiler
|
||||
return cas;
|
||||
}
|
||||
|
||||
MDefinition* atomicExchangeHeap(Scalar::Type accessType, MDefinition* ptr, MDefinition* value,
|
||||
NeedsBoundsCheck chk)
|
||||
{
|
||||
if (inDeadCode())
|
||||
return nullptr;
|
||||
|
||||
bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK;
|
||||
MAsmJSAtomicExchangeHeap* cas =
|
||||
MAsmJSAtomicExchangeHeap::New(alloc(), accessType, ptr, value, needsBoundsCheck);
|
||||
curBlock_->add(cas);
|
||||
return cas;
|
||||
}
|
||||
|
||||
MDefinition* atomicBinopHeap(js::jit::AtomicOp op, Scalar::Type accessType, MDefinition* ptr,
|
||||
MDefinition* v, NeedsBoundsCheck chk)
|
||||
{
|
||||
@ -6132,6 +6147,55 @@ EmitAtomicsCompareExchange(FunctionCompiler& f, MDefinition** def)
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
CheckAtomicsExchange(FunctionBuilder& f, ParseNode* call, Type* type)
|
||||
{
|
||||
if (CallArgListLength(call) != 3)
|
||||
return f.fail(call, "Atomics.exchange must be passed 3 arguments");
|
||||
|
||||
ParseNode* arrayArg = CallArgList(call);
|
||||
ParseNode* indexArg = NextNode(arrayArg);
|
||||
ParseNode* valueArg = NextNode(indexArg);
|
||||
|
||||
f.writeOp(I32::AtomicsExchange);
|
||||
size_t needsBoundsCheckAt = f.tempU8();
|
||||
size_t viewTypeAt = f.tempU8();
|
||||
|
||||
Scalar::Type viewType;
|
||||
NeedsBoundsCheck needsBoundsCheck;
|
||||
int32_t mask;
|
||||
if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType, &needsBoundsCheck, &mask))
|
||||
return false;
|
||||
|
||||
Type valueArgType;
|
||||
if (!CheckExpr(f, valueArg, &valueArgType))
|
||||
return false;
|
||||
|
||||
if (!valueArgType.isIntish())
|
||||
return f.failf(arrayArg, "%s is not a subtype of intish", valueArgType.toChars());
|
||||
|
||||
f.patchU8(needsBoundsCheckAt, uint8_t(needsBoundsCheck));
|
||||
f.patchU8(viewTypeAt, uint8_t(viewType));
|
||||
|
||||
*type = Type::Intish;
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
EmitAtomicsExchange(FunctionCompiler& f, MDefinition** def)
|
||||
{
|
||||
NeedsBoundsCheck needsBoundsCheck = NeedsBoundsCheck(f.readU8());
|
||||
Scalar::Type viewType = Scalar::Type(f.readU8());
|
||||
MDefinition* index;
|
||||
if (!EmitI32Expr(f, &index))
|
||||
return false;
|
||||
MDefinition* value;
|
||||
if (!EmitI32Expr(f, &value))
|
||||
return false;
|
||||
*def = f.atomicExchangeHeap(viewType, index, value, needsBoundsCheck);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
CheckAtomicsBuiltinCall(FunctionBuilder& f, ParseNode* callNode, AsmJSAtomicsBuiltinFunction func,
|
||||
Type* resultType)
|
||||
@ -6139,6 +6203,8 @@ CheckAtomicsBuiltinCall(FunctionBuilder& f, ParseNode* callNode, AsmJSAtomicsBui
|
||||
switch (func) {
|
||||
case AsmJSAtomicsBuiltin_compareExchange:
|
||||
return CheckAtomicsCompareExchange(f, callNode, resultType);
|
||||
case AsmJSAtomicsBuiltin_exchange:
|
||||
return CheckAtomicsExchange(f, callNode, resultType);
|
||||
case AsmJSAtomicsBuiltin_load:
|
||||
return CheckAtomicsLoad(f, callNode, resultType);
|
||||
case AsmJSAtomicsBuiltin_store:
|
||||
@ -9877,6 +9943,8 @@ EmitI32Expr(FunctionCompiler& f, MDefinition** def)
|
||||
return EmitComparison(f, op, def);
|
||||
case I32::AtomicsCompareExchange:
|
||||
return EmitAtomicsCompareExchange(f, def);
|
||||
case I32::AtomicsExchange:
|
||||
return EmitAtomicsExchange(f, def);
|
||||
case I32::AtomicsLoad:
|
||||
return EmitAtomicsLoad(f, def);
|
||||
case I32::AtomicsStore:
|
||||
@ -11512,6 +11580,7 @@ GenerateBuiltinThunk(ModuleCompiler& m, AsmJSExit::BuiltinKind builtin)
|
||||
argTypes.infallibleAppend(MIRType_Int32);
|
||||
argTypes.infallibleAppend(MIRType_Int32);
|
||||
break;
|
||||
case AsmJSExit::Builtin_AtomicXchg:
|
||||
case AsmJSExit::Builtin_AtomicFetchAdd:
|
||||
case AsmJSExit::Builtin_AtomicFetchSub:
|
||||
case AsmJSExit::Builtin_AtomicFetchAnd:
|
||||
|
@ -133,7 +133,7 @@ js::atomics_fence(JSContext* cx, unsigned argc, Value* vp)
|
||||
|
||||
static int32_t
|
||||
CompareExchange(Scalar::Type viewType, int32_t oldCandidate, int32_t newCandidate, void* viewData,
|
||||
uint32_t offset, bool* badArrayType)
|
||||
uint32_t offset, bool* badArrayType=nullptr)
|
||||
{
|
||||
switch (viewType) {
|
||||
case Scalar::Int8: {
|
||||
@ -179,6 +179,7 @@ CompareExchange(Scalar::Type viewType, int32_t oldCandidate, int32_t newCandidat
|
||||
return (int32_t)oldval;
|
||||
}
|
||||
default:
|
||||
if (badArrayType)
|
||||
*badArrayType = true;
|
||||
return 0;
|
||||
}
|
||||
@ -288,7 +289,7 @@ enum XchgStoreOp {
|
||||
template<XchgStoreOp op>
|
||||
static int32_t
|
||||
ExchangeOrStore(Scalar::Type viewType, int32_t numberValue, void* viewData, uint32_t offset,
|
||||
bool* badArrayType)
|
||||
bool* badArrayType=nullptr)
|
||||
{
|
||||
#define INT_OP(ptr, value) \
|
||||
JS_BEGIN_MACRO \
|
||||
@ -335,6 +336,7 @@ ExchangeOrStore(Scalar::Type viewType, int32_t numberValue, void* viewData, uint
|
||||
return (int32_t)value;
|
||||
}
|
||||
default:
|
||||
if (badArrayType)
|
||||
*badArrayType = true;
|
||||
return 0;
|
||||
}
|
||||
@ -562,12 +564,9 @@ js::atomics_isLockFree(JSContext* cx, unsigned argc, Value* vp)
|
||||
// asm.js callouts for platforms that do not have non-word-sized
|
||||
// atomics where we don't want to inline the logic for the atomics.
|
||||
//
|
||||
// size is currently -1 (signed byte), 1 (unsigned byte), -2 (signed halfword),
|
||||
// or 2 (halfword).
|
||||
// ptr is the byte offset within the heap array. This will have low bit zero
|
||||
// for halfword accesses.
|
||||
// value (for binops) and oldval/newval (for cmpxchg) are the values
|
||||
// to be operated upon.
|
||||
// To test this, either run on eg Raspberry Pi Model 1, or invoke the ARM
|
||||
// simulator build with ARMHWCAP=vfp set. Do not set any other flags; other
|
||||
// vfp/neon flags force ARMv7 to be set.
|
||||
|
||||
static void
|
||||
GetCurrentAsmJSHeap(void** heap, size_t* length)
|
||||
@ -584,7 +583,8 @@ js::atomics_add_asm_callout(int32_t vt, int32_t offset, int32_t value)
|
||||
void* heap;
|
||||
size_t heapLength;
|
||||
GetCurrentAsmJSHeap(&heap, &heapLength);
|
||||
if ((size_t)offset >= heapLength) return 0;
|
||||
if (size_t(offset) >= heapLength)
|
||||
return 0;
|
||||
switch (Scalar::Type(vt)) {
|
||||
case Scalar::Int8:
|
||||
return PerformAdd::operate((int8_t*)heap + offset, value);
|
||||
@ -605,7 +605,8 @@ js::atomics_sub_asm_callout(int32_t vt, int32_t offset, int32_t value)
|
||||
void* heap;
|
||||
size_t heapLength;
|
||||
GetCurrentAsmJSHeap(&heap, &heapLength);
|
||||
if ((size_t)offset >= heapLength) return 0;
|
||||
if (size_t(offset) >= heapLength)
|
||||
return 0;
|
||||
switch (Scalar::Type(vt)) {
|
||||
case Scalar::Int8:
|
||||
return PerformSub::operate((int8_t*)heap + offset, value);
|
||||
@ -626,7 +627,8 @@ js::atomics_and_asm_callout(int32_t vt, int32_t offset, int32_t value)
|
||||
void* heap;
|
||||
size_t heapLength;
|
||||
GetCurrentAsmJSHeap(&heap, &heapLength);
|
||||
if ((size_t)offset >= heapLength) return 0;
|
||||
if (size_t(offset) >= heapLength)
|
||||
return 0;
|
||||
switch (Scalar::Type(vt)) {
|
||||
case Scalar::Int8:
|
||||
return PerformAnd::operate((int8_t*)heap + offset, value);
|
||||
@ -647,7 +649,8 @@ js::atomics_or_asm_callout(int32_t vt, int32_t offset, int32_t value)
|
||||
void* heap;
|
||||
size_t heapLength;
|
||||
GetCurrentAsmJSHeap(&heap, &heapLength);
|
||||
if ((size_t)offset >= heapLength) return 0;
|
||||
if (size_t(offset) >= heapLength)
|
||||
return 0;
|
||||
switch (Scalar::Type(vt)) {
|
||||
case Scalar::Int8:
|
||||
return PerformOr::operate((int8_t*)heap + offset, value);
|
||||
@ -668,7 +671,8 @@ js::atomics_xor_asm_callout(int32_t vt, int32_t offset, int32_t value)
|
||||
void* heap;
|
||||
size_t heapLength;
|
||||
GetCurrentAsmJSHeap(&heap, &heapLength);
|
||||
if ((size_t)offset >= heapLength) return 0;
|
||||
if (size_t(offset) >= heapLength)
|
||||
return 0;
|
||||
switch (Scalar::Type(vt)) {
|
||||
case Scalar::Int8:
|
||||
return PerformXor::operate((int8_t*)heap + offset, value);
|
||||
@ -683,23 +687,45 @@ js::atomics_xor_asm_callout(int32_t vt, int32_t offset, int32_t value)
|
||||
}
|
||||
}
|
||||
|
||||
int32_t
|
||||
js::atomics_xchg_asm_callout(int32_t vt, int32_t offset, int32_t value)
|
||||
{
|
||||
void* heap;
|
||||
size_t heapLength;
|
||||
GetCurrentAsmJSHeap(&heap, &heapLength);
|
||||
if (size_t(offset) >= heapLength)
|
||||
return 0;
|
||||
switch (Scalar::Type(vt)) {
|
||||
case Scalar::Int8:
|
||||
return ExchangeOrStore<DoExchange>(Scalar::Int8, value, heap, offset);
|
||||
case Scalar::Uint8:
|
||||
return ExchangeOrStore<DoExchange>(Scalar::Uint8, value, heap, offset);
|
||||
case Scalar::Int16:
|
||||
return ExchangeOrStore<DoExchange>(Scalar::Int16, value, heap, offset>>1);
|
||||
case Scalar::Uint16:
|
||||
return ExchangeOrStore<DoExchange>(Scalar::Uint16, value, heap, offset>>1);
|
||||
default:
|
||||
MOZ_CRASH("Invalid size");
|
||||
}
|
||||
}
|
||||
|
||||
int32_t
|
||||
js::atomics_cmpxchg_asm_callout(int32_t vt, int32_t offset, int32_t oldval, int32_t newval)
|
||||
{
|
||||
void* heap;
|
||||
size_t heapLength;
|
||||
GetCurrentAsmJSHeap(&heap, &heapLength);
|
||||
if ((size_t)offset >= heapLength) return 0;
|
||||
bool badType = false;
|
||||
if (size_t(offset) >= heapLength)
|
||||
return 0;
|
||||
switch (Scalar::Type(vt)) {
|
||||
case Scalar::Int8:
|
||||
return CompareExchange(Scalar::Int8, oldval, newval, heap, offset, &badType);
|
||||
return CompareExchange(Scalar::Int8, oldval, newval, heap, offset);
|
||||
case Scalar::Uint8:
|
||||
return CompareExchange(Scalar::Uint8, oldval, newval, heap, offset, &badType);
|
||||
return CompareExchange(Scalar::Uint8, oldval, newval, heap, offset);
|
||||
case Scalar::Int16:
|
||||
return CompareExchange(Scalar::Int16, oldval, newval, heap, offset>>1, &badType);
|
||||
return CompareExchange(Scalar::Int16, oldval, newval, heap, offset>>1);
|
||||
case Scalar::Uint16:
|
||||
return CompareExchange(Scalar::Uint16, oldval, newval, heap, offset>>1, &badType);
|
||||
return CompareExchange(Scalar::Uint16, oldval, newval, heap, offset>>1);
|
||||
default:
|
||||
MOZ_CRASH("Invalid size");
|
||||
}
|
||||
|
@ -53,6 +53,7 @@ int32_t atomics_and_asm_callout(int32_t vt, int32_t offset, int32_t value);
|
||||
int32_t atomics_or_asm_callout(int32_t vt, int32_t offset, int32_t value);
|
||||
int32_t atomics_xor_asm_callout(int32_t vt, int32_t offset, int32_t value);
|
||||
int32_t atomics_cmpxchg_asm_callout(int32_t vt, int32_t offset, int32_t oldval, int32_t newval);
|
||||
int32_t atomics_xchg_asm_callout(int32_t vt, int32_t offset, int32_t value);
|
||||
|
||||
class FutexRuntime
|
||||
{
|
||||
|
@ -12,6 +12,7 @@ function loadModule_int32(stdlib, foreign, heap) {
|
||||
var atomic_load = stdlib.Atomics.load;
|
||||
var atomic_store = stdlib.Atomics.store;
|
||||
var atomic_cmpxchg = stdlib.Atomics.compareExchange;
|
||||
var atomic_exchange = stdlib.Atomics.exchange;
|
||||
var atomic_add = stdlib.Atomics.add;
|
||||
var atomic_sub = stdlib.Atomics.sub;
|
||||
var atomic_and = stdlib.Atomics.and;
|
||||
@ -54,6 +55,31 @@ function loadModule_int32(stdlib, foreign, heap) {
|
||||
return v|0;
|
||||
}
|
||||
|
||||
// Exchange 37 into element 200
|
||||
function do_xchg() {
|
||||
var v = 0;
|
||||
v = atomic_exchange(i32a, 200, 37)|0;
|
||||
return v|0;
|
||||
}
|
||||
|
||||
// Exchange 42 into element i
|
||||
function do_xchg_i(i) {
|
||||
i = i|0;
|
||||
var v = 0;
|
||||
v = atomic_exchange(i32a, i>>2, 42)|0;
|
||||
return v|0;
|
||||
}
|
||||
|
||||
// Exchange 1+2 into element 200. This is not called; all we're
|
||||
// checking is that the compilation succeeds, since 1+2 has type
|
||||
// "intish" (asm.js spec "AdditiveExpression") and this should be
|
||||
// allowed.
|
||||
function do_xchg_intish() {
|
||||
var v = 0;
|
||||
v = atomic_exchange(i32a, 200, 1+2)|0;
|
||||
return v|0;
|
||||
}
|
||||
|
||||
// Add 37 to element 10
|
||||
function do_add() {
|
||||
var v = 0;
|
||||
@ -69,6 +95,14 @@ function loadModule_int32(stdlib, foreign, heap) {
|
||||
return v|0;
|
||||
}
|
||||
|
||||
// As for do_xchg_intish, above. Given the structure of the
|
||||
// compiler, this covers all the binops.
|
||||
function do_add_intish() {
|
||||
var v = 0;
|
||||
v = atomic_add(i32a, 10, 1+2)|0;
|
||||
return v|0;
|
||||
}
|
||||
|
||||
// Subtract 148 from element 20
|
||||
function do_sub() {
|
||||
var v = 0;
|
||||
@ -136,6 +170,14 @@ function loadModule_int32(stdlib, foreign, heap) {
|
||||
return v|0;
|
||||
}
|
||||
|
||||
// As for do_xchg_intish, above. Will not be called, is here just
|
||||
// to test that the compiler allows intish arguments.
|
||||
function do_cas_intish() {
|
||||
var v = 0;
|
||||
v = atomic_cmpxchg(i32a, 100, 1+2, 2+3)|0;
|
||||
return v|0;
|
||||
}
|
||||
|
||||
// CAS element 100: -1 -> 0x5A5A5A5A
|
||||
function do_cas2() {
|
||||
var v = 0;
|
||||
@ -164,8 +206,12 @@ function loadModule_int32(stdlib, foreign, heap) {
|
||||
load_i: do_load_i,
|
||||
store: do_store,
|
||||
store_i: do_store_i,
|
||||
xchg: do_xchg,
|
||||
xchg_i: do_xchg_i,
|
||||
xchg_intish: do_xchg_intish,
|
||||
add: do_add,
|
||||
add_i: do_add_i,
|
||||
add_intish: do_add_intish,
|
||||
sub: do_sub,
|
||||
sub_i: do_sub_i,
|
||||
and: do_and,
|
||||
@ -176,6 +222,7 @@ function loadModule_int32(stdlib, foreign, heap) {
|
||||
xor_i: do_xor_i,
|
||||
cas1: do_cas1,
|
||||
cas2: do_cas2,
|
||||
cas_intish: do_cas_intish,
|
||||
cas1_i: do_cas1_i,
|
||||
cas2_i: do_cas2_i };
|
||||
}
|
||||
@ -183,6 +230,27 @@ function loadModule_int32(stdlib, foreign, heap) {
|
||||
if (isAsmJSCompilationAvailable())
|
||||
assertEq(isAsmJSModule(loadModule_int32), true);
|
||||
|
||||
// Test that compilation fails without a coercion on the return value.
|
||||
// The module is never created, we use it only for its effect.
|
||||
|
||||
function loadModule_int32_return_xchg(stdlib, foreign, heap) {
|
||||
"use asm";
|
||||
|
||||
var atomic_exchange = stdlib.Atomics.exchange;
|
||||
var i32a = new stdlib.SharedInt32Array(heap);
|
||||
|
||||
function do_xchg() {
|
||||
var v = 0;
|
||||
v = atomic_exchange(i32a, 200, 37); // Should not be allowed without |0 at the end
|
||||
return v|0;
|
||||
}
|
||||
|
||||
return { xchg: do_xchg }
|
||||
}
|
||||
|
||||
if (isAsmJSCompilationAvailable())
|
||||
assertEq(isAsmJSModule(loadModule_int32_return_xchg), false);
|
||||
|
||||
function test_int32(heap) {
|
||||
var i32a = new SharedInt32Array(heap);
|
||||
var i32m = loadModule_int32(this, {}, heap);
|
||||
@ -199,6 +267,12 @@ function test_int32(heap) {
|
||||
assertEq(i32a[0], 37);
|
||||
assertEq(i32m.store_i(size*0), 37);
|
||||
|
||||
i32a[200] = 78;
|
||||
assertEq(i32m.xchg(), 78); // 37 into #200
|
||||
assertEq(i32a[0], 37);
|
||||
assertEq(i32m.xchg_i(size*200), 37); // 42 into #200
|
||||
assertEq(i32a[200], 42);
|
||||
|
||||
i32a[10] = 18;
|
||||
assertEq(i32m.add(), 18);
|
||||
assertEq(i32a[10], 18+37);
|
||||
@ -263,6 +337,7 @@ function loadModule_uint32(stdlib, foreign, heap) {
|
||||
var atomic_load = stdlib.Atomics.load;
|
||||
var atomic_store = stdlib.Atomics.store;
|
||||
var atomic_cmpxchg = stdlib.Atomics.compareExchange;
|
||||
var atomic_exchange = stdlib.Atomics.exchange;
|
||||
var atomic_add = stdlib.Atomics.add;
|
||||
var atomic_sub = stdlib.Atomics.sub;
|
||||
var atomic_and = stdlib.Atomics.and;
|
||||
@ -301,6 +376,21 @@ function loadModule_uint32(stdlib, foreign, heap) {
|
||||
return +(v>>>0);
|
||||
}
|
||||
|
||||
// Exchange 37 into element 200
|
||||
function do_xchg() {
|
||||
var v = 0;
|
||||
v = atomic_exchange(i32a, 200, 37)|0;
|
||||
return v|0;
|
||||
}
|
||||
|
||||
// Exchange 42 into element i
|
||||
function do_xchg_i(i) {
|
||||
i = i|0;
|
||||
var v = 0;
|
||||
v = atomic_exchange(i32a, i>>2, 42)|0;
|
||||
return v|0;
|
||||
}
|
||||
|
||||
// Add 37 to element 10
|
||||
function do_add() {
|
||||
var v = 0;
|
||||
@ -410,6 +500,8 @@ function loadModule_uint32(stdlib, foreign, heap) {
|
||||
load_i: do_load_i,
|
||||
store: do_store,
|
||||
store_i: do_store_i,
|
||||
xchg: do_xchg,
|
||||
xchg_i: do_xchg_i,
|
||||
add: do_add,
|
||||
add_i: do_add_i,
|
||||
sub: do_sub,
|
||||
@ -443,6 +535,12 @@ function test_uint32(heap) {
|
||||
assertEq(i32a[0], 37);
|
||||
assertEq(i32m.store_i(size*0), 37);
|
||||
|
||||
i32a[200] = 78;
|
||||
assertEq(i32m.xchg(), 78); // 37 into #200
|
||||
assertEq(i32a[0], 37);
|
||||
assertEq(i32m.xchg_i(size*200), 37); // 42 into #200
|
||||
assertEq(i32a[200], 42);
|
||||
|
||||
i32a[10] = 18;
|
||||
assertEq(i32m.add(), 18);
|
||||
assertEq(i32a[10], 18+37);
|
||||
@ -507,6 +605,7 @@ function loadModule_int16(stdlib, foreign, heap) {
|
||||
var atomic_load = stdlib.Atomics.load;
|
||||
var atomic_store = stdlib.Atomics.store;
|
||||
var atomic_cmpxchg = stdlib.Atomics.compareExchange;
|
||||
var atomic_exchange = stdlib.Atomics.exchange;
|
||||
var atomic_add = stdlib.Atomics.add;
|
||||
var atomic_sub = stdlib.Atomics.sub;
|
||||
var atomic_and = stdlib.Atomics.and;
|
||||
@ -549,6 +648,21 @@ function loadModule_int16(stdlib, foreign, heap) {
|
||||
return v|0;
|
||||
}
|
||||
|
||||
// Exchange 37 into element 200
|
||||
function do_xchg() {
|
||||
var v = 0;
|
||||
v = atomic_exchange(i16a, 200, 37)|0;
|
||||
return v|0;
|
||||
}
|
||||
|
||||
// Exchange 42 into element i
|
||||
function do_xchg_i(i) {
|
||||
i = i|0;
|
||||
var v = 0;
|
||||
v = atomic_exchange(i16a, i>>1, 42)|0;
|
||||
return v|0;
|
||||
}
|
||||
|
||||
// Add 37 to element 10
|
||||
function do_add() {
|
||||
var v = 0;
|
||||
@ -659,6 +773,8 @@ function loadModule_int16(stdlib, foreign, heap) {
|
||||
load_i: do_load_i,
|
||||
store: do_store,
|
||||
store_i: do_store_i,
|
||||
xchg: do_xchg,
|
||||
xchg_i: do_xchg_i,
|
||||
add: do_add,
|
||||
add_i: do_add_i,
|
||||
sub: do_sub,
|
||||
@ -698,6 +814,12 @@ function test_int16(heap) {
|
||||
assertEq(i16a[0], 37);
|
||||
assertEq(i16m.store_i(size*0), 37);
|
||||
|
||||
i16a[200] = 78;
|
||||
assertEq(i16m.xchg(), 78); // 37 into #200
|
||||
assertEq(i16a[0], 37);
|
||||
assertEq(i16m.xchg_i(size*200), 37); // 42 into #200
|
||||
assertEq(i16a[200], 42);
|
||||
|
||||
i16a[10] = 18;
|
||||
assertEq(i16m.add(), 18);
|
||||
assertEq(i16a[10], 18+37);
|
||||
@ -765,6 +887,7 @@ function loadModule_uint16(stdlib, foreign, heap) {
|
||||
var atomic_load = stdlib.Atomics.load;
|
||||
var atomic_store = stdlib.Atomics.store;
|
||||
var atomic_cmpxchg = stdlib.Atomics.compareExchange;
|
||||
var atomic_exchange = stdlib.Atomics.exchange;
|
||||
var atomic_add = stdlib.Atomics.add;
|
||||
var atomic_sub = stdlib.Atomics.sub;
|
||||
var atomic_and = stdlib.Atomics.and;
|
||||
@ -803,6 +926,21 @@ function loadModule_uint16(stdlib, foreign, heap) {
|
||||
return v|0;
|
||||
}
|
||||
|
||||
// Exchange 37 into element 200
|
||||
function do_xchg() {
|
||||
var v = 0;
|
||||
v = atomic_exchange(i16a, 200, 37)|0;
|
||||
return v|0;
|
||||
}
|
||||
|
||||
// Exchange 42 into element i
|
||||
function do_xchg_i(i) {
|
||||
i = i|0;
|
||||
var v = 0;
|
||||
v = atomic_exchange(i16a, i>>1, 42)|0;
|
||||
return v|0;
|
||||
}
|
||||
|
||||
// Add 37 to element 10
|
||||
function do_add() {
|
||||
var v = 0;
|
||||
@ -912,6 +1050,8 @@ function loadModule_uint16(stdlib, foreign, heap) {
|
||||
load_i: do_load_i,
|
||||
store: do_store,
|
||||
store_i: do_store_i,
|
||||
xchg: do_xchg,
|
||||
xchg_i: do_xchg_i,
|
||||
add: do_add,
|
||||
add_i: do_add_i,
|
||||
sub: do_sub,
|
||||
@ -949,6 +1089,12 @@ function test_uint16(heap) {
|
||||
assertEq(i16a[0], 37);
|
||||
assertEq(i16m.store_i(size*0), 37);
|
||||
|
||||
i16a[200] = 78;
|
||||
assertEq(i16m.xchg(), 78); // 37 into #200
|
||||
assertEq(i16a[0], 37);
|
||||
assertEq(i16m.xchg_i(size*200), 37); // 42 into #200
|
||||
assertEq(i16a[200], 42);
|
||||
|
||||
i16a[10] = 18;
|
||||
assertEq(i16m.add(), 18);
|
||||
assertEq(i16a[10], 18+37);
|
||||
@ -1016,6 +1162,7 @@ function loadModule_int8(stdlib, foreign, heap) {
|
||||
var atomic_load = stdlib.Atomics.load;
|
||||
var atomic_store = stdlib.Atomics.store;
|
||||
var atomic_cmpxchg = stdlib.Atomics.compareExchange;
|
||||
var atomic_exchange = stdlib.Atomics.exchange;
|
||||
var atomic_add = stdlib.Atomics.add;
|
||||
var atomic_sub = stdlib.Atomics.sub;
|
||||
var atomic_and = stdlib.Atomics.and;
|
||||
@ -1054,6 +1201,21 @@ function loadModule_int8(stdlib, foreign, heap) {
|
||||
return v|0;
|
||||
}
|
||||
|
||||
// Exchange 37 into element 200
|
||||
function do_xchg() {
|
||||
var v = 0;
|
||||
v = atomic_exchange(i8a, 200, 37)|0;
|
||||
return v|0;
|
||||
}
|
||||
|
||||
// Exchange 42 into element i
|
||||
function do_xchg_i(i) {
|
||||
i = i|0;
|
||||
var v = 0;
|
||||
v = atomic_exchange(i8a, i, 42)|0;
|
||||
return v|0;
|
||||
}
|
||||
|
||||
// Add 37 to element 10
|
||||
function do_add() {
|
||||
var v = 0;
|
||||
@ -1163,6 +1325,8 @@ function loadModule_int8(stdlib, foreign, heap) {
|
||||
load_i: do_load_i,
|
||||
store: do_store,
|
||||
store_i: do_store_i,
|
||||
xchg: do_xchg,
|
||||
xchg_i: do_xchg_i,
|
||||
add: do_add,
|
||||
add_i: do_add_i,
|
||||
sub: do_sub,
|
||||
@ -1199,6 +1363,12 @@ function test_int8(heap) {
|
||||
assertEq(i8a[0], 37);
|
||||
assertEq(i8m.store_i(0), 37);
|
||||
|
||||
i8a[200] = 78;
|
||||
assertEq(i8m.xchg(), 78); // 37 into #200
|
||||
assertEq(i8a[0], 37);
|
||||
assertEq(i8m.xchg_i(size*200), 37); // 42 into #200
|
||||
assertEq(i8a[200], 42);
|
||||
|
||||
i8a[10] = 18;
|
||||
assertEq(i8m.add(), 18);
|
||||
assertEq(i8a[10], 18+37);
|
||||
@ -1260,6 +1430,7 @@ function loadModule_uint8(stdlib, foreign, heap) {
|
||||
var atomic_load = stdlib.Atomics.load;
|
||||
var atomic_store = stdlib.Atomics.store;
|
||||
var atomic_cmpxchg = stdlib.Atomics.compareExchange;
|
||||
var atomic_exchange = stdlib.Atomics.exchange;
|
||||
var atomic_add = stdlib.Atomics.add;
|
||||
var atomic_sub = stdlib.Atomics.sub;
|
||||
var atomic_and = stdlib.Atomics.and;
|
||||
@ -1298,6 +1469,21 @@ function loadModule_uint8(stdlib, foreign, heap) {
|
||||
return v|0;
|
||||
}
|
||||
|
||||
// Exchange 37 into element 200
|
||||
function do_xchg() {
|
||||
var v = 0;
|
||||
v = atomic_exchange(i8a, 200, 37)|0;
|
||||
return v|0;
|
||||
}
|
||||
|
||||
// Exchange 42 into element i
|
||||
function do_xchg_i(i) {
|
||||
i = i|0;
|
||||
var v = 0;
|
||||
v = atomic_exchange(i8a, i, 42)|0;
|
||||
return v|0;
|
||||
}
|
||||
|
||||
// Add 37 to element 10
|
||||
function do_add() {
|
||||
var v = 0;
|
||||
@ -1407,6 +1593,8 @@ function loadModule_uint8(stdlib, foreign, heap) {
|
||||
load_i: do_load_i,
|
||||
store: do_store,
|
||||
store_i: do_store_i,
|
||||
xchg: do_xchg,
|
||||
xchg_i: do_xchg_i,
|
||||
add: do_add,
|
||||
add_i: do_add_i,
|
||||
sub: do_sub,
|
||||
@ -1447,6 +1635,12 @@ function test_uint8(heap) {
|
||||
assertEq(i8a[0], 37);
|
||||
assertEq(i8m.store_i(0), 37);
|
||||
|
||||
i8a[200] = 78;
|
||||
assertEq(i8m.xchg(), 78); // 37 into #200
|
||||
assertEq(i8a[0], 37);
|
||||
assertEq(i8m.xchg_i(size*200), 37); // 42 into #200
|
||||
assertEq(i8a[200], 42);
|
||||
|
||||
i8a[10] = 18;
|
||||
assertEq(i8m.add(), 18);
|
||||
assertEq(i8a[10], 18+37);
|
||||
|
@ -6658,6 +6658,37 @@ class LAsmJSCompareExchangeHeap : public LInstructionHelper<1, 3, 1>
|
||||
}
|
||||
};
|
||||
|
||||
class LAsmJSAtomicExchangeHeap : public LInstructionHelper<1, 2, 1>
|
||||
{
|
||||
public:
|
||||
LIR_HEADER(AsmJSAtomicExchangeHeap);
|
||||
|
||||
LAsmJSAtomicExchangeHeap(const LAllocation& ptr, const LAllocation& value)
|
||||
{
|
||||
setOperand(0, ptr);
|
||||
setOperand(1, value);
|
||||
setTemp(0, LDefinition::BogusTemp());
|
||||
}
|
||||
|
||||
const LAllocation* ptr() {
|
||||
return getOperand(0);
|
||||
}
|
||||
const LAllocation* value() {
|
||||
return getOperand(1);
|
||||
}
|
||||
const LDefinition* addrTemp() {
|
||||
return getTemp(0);
|
||||
}
|
||||
|
||||
void setAddrTemp(const LDefinition& addrTemp) {
|
||||
setTemp(0, addrTemp);
|
||||
}
|
||||
|
||||
MAsmJSAtomicExchangeHeap* mir() const {
|
||||
return mir_->toAsmJSAtomicExchangeHeap();
|
||||
}
|
||||
};
|
||||
|
||||
class LAsmJSAtomicBinopHeap : public LInstructionHelper<1, 2, 2>
|
||||
{
|
||||
public:
|
||||
|
@ -339,6 +339,7 @@
|
||||
_(AsmJSPassStackArg) \
|
||||
_(AsmJSCall) \
|
||||
_(AsmJSCompareExchangeHeap) \
|
||||
_(AsmJSAtomicExchangeHeap) \
|
||||
_(AsmJSAtomicBinopHeap) \
|
||||
_(AsmJSAtomicBinopHeapForEffect)\
|
||||
_(RecompileCheck) \
|
||||
|
@ -13295,6 +13295,38 @@ class MAsmJSCompareExchangeHeap
|
||||
}
|
||||
};
|
||||
|
||||
class MAsmJSAtomicExchangeHeap
|
||||
: public MBinaryInstruction,
|
||||
public MAsmJSHeapAccess,
|
||||
public NoTypePolicy::Data
|
||||
{
|
||||
MAsmJSAtomicExchangeHeap(Scalar::Type accessType, MDefinition* ptr, MDefinition* value,
|
||||
bool needsBoundsCheck)
|
||||
: MBinaryInstruction(ptr, value),
|
||||
MAsmJSHeapAccess(accessType, needsBoundsCheck)
|
||||
{
|
||||
setGuard(); // Not removable
|
||||
setResultType(MIRType_Int32);
|
||||
}
|
||||
|
||||
public:
|
||||
INSTRUCTION_HEADER(AsmJSAtomicExchangeHeap)
|
||||
|
||||
static MAsmJSAtomicExchangeHeap* New(TempAllocator& alloc, Scalar::Type accessType,
|
||||
MDefinition* ptr, MDefinition* value,
|
||||
bool needsBoundsCheck)
|
||||
{
|
||||
return new(alloc) MAsmJSAtomicExchangeHeap(accessType, ptr, value, needsBoundsCheck);
|
||||
}
|
||||
|
||||
MDefinition* ptr() const { return getOperand(0); }
|
||||
MDefinition* value() const { return getOperand(1); }
|
||||
|
||||
AliasSet getAliasSet() const override {
|
||||
return AliasSet::Store(AliasSet::AsmJSHeap);
|
||||
}
|
||||
};
|
||||
|
||||
class MAsmJSAtomicBinopHeap
|
||||
: public MBinaryInstruction,
|
||||
public MAsmJSHeapAccess,
|
||||
|
@ -266,6 +266,7 @@ namespace jit {
|
||||
_(RecompileCheck) \
|
||||
_(MemoryBarrier) \
|
||||
_(AsmJSCompareExchangeHeap) \
|
||||
_(AsmJSAtomicExchangeHeap) \
|
||||
_(AsmJSAtomicBinopHeap) \
|
||||
_(UnknownValue) \
|
||||
_(LexicalCheck) \
|
||||
|
@ -1972,6 +1972,58 @@ CodeGeneratorARM::visitAsmJSCompareExchangeCallout(LAsmJSCompareExchangeCallout*
|
||||
masm.callWithABI(AsmJSImm_AtomicCmpXchg);
|
||||
}
|
||||
|
||||
void
|
||||
CodeGeneratorARM::visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins)
|
||||
{
|
||||
MAsmJSAtomicExchangeHeap* mir = ins->mir();
|
||||
Scalar::Type vt = mir->accessType();
|
||||
const LAllocation* ptr = ins->ptr();
|
||||
Register ptrReg = ToRegister(ptr);
|
||||
BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
|
||||
MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
|
||||
|
||||
Register value = ToRegister(ins->value());
|
||||
|
||||
Label rejoin;
|
||||
uint32_t maybeCmpOffset = 0;
|
||||
if (mir->needsBoundsCheck()) {
|
||||
Label goahead;
|
||||
BufferOffset bo = masm.ma_BoundsCheck(ptrReg);
|
||||
Register out = ToRegister(ins->output());
|
||||
maybeCmpOffset = bo.getOffset();
|
||||
masm.ma_b(&goahead, Assembler::Below);
|
||||
memoryBarrier(MembarFull);
|
||||
masm.as_eor(out, out, O2Reg(out));
|
||||
masm.ma_b(&rejoin, Assembler::Always);
|
||||
masm.bind(&goahead);
|
||||
}
|
||||
masm.atomicExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
|
||||
srcAddr, value, InvalidReg, ToAnyRegister(ins->output()));
|
||||
if (rejoin.used()) {
|
||||
masm.bind(&rejoin);
|
||||
masm.append(AsmJSHeapAccess(maybeCmpOffset));
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
CodeGeneratorARM::visitAsmJSAtomicExchangeCallout(LAsmJSAtomicExchangeCallout* ins)
|
||||
{
|
||||
const MAsmJSAtomicExchangeHeap* mir = ins->mir();
|
||||
Scalar::Type viewType = mir->accessType();
|
||||
Register ptr = ToRegister(ins->ptr());
|
||||
Register value = ToRegister(ins->value());
|
||||
|
||||
MOZ_ASSERT(ToRegister(ins->output()) == ReturnReg);
|
||||
|
||||
masm.setupAlignedABICall(3);
|
||||
masm.ma_mov(Imm32(viewType), ScratchRegister);
|
||||
masm.passABIArg(ScratchRegister);
|
||||
masm.passABIArg(ptr);
|
||||
masm.passABIArg(value);
|
||||
|
||||
masm.callWithABI(AsmJSImm_AtomicXchg);
|
||||
}
|
||||
|
||||
void
|
||||
CodeGeneratorARM::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins)
|
||||
{
|
||||
|
@ -203,6 +203,8 @@ class CodeGeneratorARM : public CodeGeneratorShared
|
||||
void visitAsmJSStoreHeap(LAsmJSStoreHeap* ins);
|
||||
void visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins);
|
||||
void visitAsmJSCompareExchangeCallout(LAsmJSCompareExchangeCallout* ins);
|
||||
void visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins);
|
||||
void visitAsmJSAtomicExchangeCallout(LAsmJSAtomicExchangeCallout* ins);
|
||||
void visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins);
|
||||
void visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins);
|
||||
void visitAsmJSAtomicBinopCallout(LAsmJSAtomicBinopCallout* ins);
|
||||
|
@ -465,7 +465,7 @@ class LAsmJSLoadFuncPtr : public LInstructionHelper<1, 1, 1>
|
||||
}
|
||||
};
|
||||
|
||||
class LAsmJSCompareExchangeCallout : public LInstructionHelper<1, 3, 0>
|
||||
class LAsmJSCompareExchangeCallout : public LCallInstructionHelper<1, 3, 0>
|
||||
{
|
||||
public:
|
||||
LIR_HEADER(AsmJSCompareExchangeCallout)
|
||||
@ -491,7 +491,29 @@ class LAsmJSCompareExchangeCallout : public LInstructionHelper<1, 3, 0>
|
||||
}
|
||||
};
|
||||
|
||||
class LAsmJSAtomicBinopCallout : public LInstructionHelper<1, 2, 0>
|
||||
class LAsmJSAtomicExchangeCallout : public LCallInstructionHelper<1, 2, 0>
|
||||
{
|
||||
public:
|
||||
LIR_HEADER(AsmJSAtomicExchangeCallout)
|
||||
|
||||
LAsmJSAtomicExchangeCallout(const LAllocation& ptr, const LAllocation& value)
|
||||
{
|
||||
setOperand(0, ptr);
|
||||
setOperand(1, value);
|
||||
}
|
||||
const LAllocation* ptr() {
|
||||
return getOperand(0);
|
||||
}
|
||||
const LAllocation* value() {
|
||||
return getOperand(1);
|
||||
}
|
||||
|
||||
const MAsmJSAtomicExchangeHeap* mir() const {
|
||||
return mir_->toAsmJSAtomicExchangeHeap();
|
||||
}
|
||||
};
|
||||
|
||||
class LAsmJSAtomicBinopCallout : public LCallInstructionHelper<1, 2, 0>
|
||||
{
|
||||
public:
|
||||
LIR_HEADER(AsmJSAtomicBinopCallout)
|
||||
|
@ -27,6 +27,7 @@
|
||||
_(SoftUDivOrMod) \
|
||||
_(AsmJSLoadFuncPtr) \
|
||||
_(AsmJSCompareExchangeCallout) \
|
||||
_(AsmJSAtomicExchangeCallout) \
|
||||
_(AsmJSAtomicBinopCallout)
|
||||
|
||||
#endif /* jit_arm_LOpcodes_arm_h */
|
||||
|
@ -700,6 +700,26 @@ LIRGeneratorARM::visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins)
|
||||
define(lir, ins);
|
||||
}
|
||||
|
||||
void
|
||||
LIRGeneratorARM::visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins)
|
||||
{
|
||||
MOZ_ASSERT(ins->ptr()->type() == MIRType_Int32);
|
||||
MOZ_ASSERT(ins->accessType() < Scalar::Float32);
|
||||
|
||||
const LAllocation ptr = useRegister(ins->ptr());
|
||||
const LAllocation value = useRegister(ins->value());
|
||||
|
||||
if (byteSize(ins->accessType()) < 4 && !HasLDSTREXBHD()) {
|
||||
// Call out on ARMv6.
|
||||
defineFixed(new(alloc()) LAsmJSAtomicExchangeCallout(ptr, value),
|
||||
ins,
|
||||
LAllocation(AnyRegister(ReturnReg)));
|
||||
return;
|
||||
}
|
||||
|
||||
define(new(alloc()) LAsmJSAtomicExchangeHeap(ptr, value), ins);
|
||||
}
|
||||
|
||||
void
|
||||
LIRGeneratorARM::visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins)
|
||||
{
|
||||
|
@ -96,6 +96,7 @@ class LIRGeneratorARM : public LIRGeneratorShared
|
||||
void visitAsmJSStoreHeap(MAsmJSStoreHeap* ins);
|
||||
void visitAsmJSLoadFuncPtr(MAsmJSLoadFuncPtr* ins);
|
||||
void visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins);
|
||||
void visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins);
|
||||
void visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins);
|
||||
void visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic* ins);
|
||||
void visitSimdBinaryArith(MSimdBinaryArith* ins);
|
||||
|
@ -77,6 +77,7 @@ class LIRGeneratorNone : public LIRGeneratorShared
|
||||
void visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement* ins) { MOZ_CRASH(); }
|
||||
void visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement* ins) { MOZ_CRASH(); }
|
||||
void visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins) { MOZ_CRASH(); }
|
||||
void visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins) { MOZ_CRASH(); }
|
||||
void visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins) { MOZ_CRASH(); }
|
||||
|
||||
LTableSwitch* newLTableSwitch(LAllocation, LDefinition, MTableSwitch*) { MOZ_CRASH(); }
|
||||
|
@ -819,6 +819,7 @@ enum AsmJSImmKind
|
||||
AsmJSImm_aeabi_idivmod = AsmJSExit::Builtin_IDivMod,
|
||||
AsmJSImm_aeabi_uidivmod = AsmJSExit::Builtin_UDivMod,
|
||||
AsmJSImm_AtomicCmpXchg = AsmJSExit::Builtin_AtomicCmpXchg,
|
||||
AsmJSImm_AtomicXchg = AsmJSExit::Builtin_AtomicXchg,
|
||||
AsmJSImm_AtomicFetchAdd = AsmJSExit::Builtin_AtomicFetchAdd,
|
||||
AsmJSImm_AtomicFetchSub = AsmJSExit::Builtin_AtomicFetchSub,
|
||||
AsmJSImm_AtomicFetchAnd = AsmJSExit::Builtin_AtomicFetchAnd,
|
||||
|
@ -609,6 +609,49 @@ CodeGeneratorX64::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
|
||||
masm.append(AsmJSHeapAccess(before, AsmJSHeapAccess::Throw, maybeCmpOffset));
|
||||
}
|
||||
|
||||
void
|
||||
CodeGeneratorX64::visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins)
|
||||
{
|
||||
MAsmJSAtomicExchangeHeap* mir = ins->mir();
|
||||
Scalar::Type accessType = mir->accessType();
|
||||
const LAllocation* ptr = ins->ptr();
|
||||
|
||||
MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
|
||||
MOZ_ASSERT(ptr->isRegister());
|
||||
MOZ_ASSERT(accessType <= Scalar::Uint32);
|
||||
|
||||
BaseIndex srcAddr(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
|
||||
Register value = ToRegister(ins->value());
|
||||
|
||||
// Note that we can't use
|
||||
// needsAsmJSBoundsCheckBranch/emitAsmJSBoundsCheckBranch/cleanupAfterAsmJSBoundsCheckBranch
|
||||
// since signal-handler bounds checking is not yet implemented for atomic accesses.
|
||||
Label rejoin;
|
||||
uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
|
||||
if (mir->needsBoundsCheck()) {
|
||||
maybeCmpOffset = masm.cmp32WithPatch(ToRegister(ptr), Imm32(-mir->endOffset())).offset();
|
||||
Label goahead;
|
||||
masm.j(Assembler::BelowOrEqual, &goahead);
|
||||
memoryBarrier(MembarFull);
|
||||
Register out = ToRegister(ins->output());
|
||||
masm.xorl(out, out);
|
||||
masm.jmp(&rejoin);
|
||||
masm.bind(&goahead);
|
||||
}
|
||||
uint32_t before = masm.size();
|
||||
masm.atomicExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
|
||||
srcAddr,
|
||||
value,
|
||||
InvalidReg,
|
||||
ToAnyRegister(ins->output()));
|
||||
if (rejoin.used())
|
||||
masm.bind(&rejoin);
|
||||
MOZ_ASSERT(mir->offset() == 0,
|
||||
"The AsmJS signal handler doesn't yet support emulating "
|
||||
"atomic accesses in the case of a fault from an unwrapped offset");
|
||||
masm.append(AsmJSHeapAccess(before, AsmJSHeapAccess::Throw, maybeCmpOffset));
|
||||
}
|
||||
|
||||
void
|
||||
CodeGeneratorX64::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins)
|
||||
{
|
||||
|
@ -52,6 +52,7 @@ class CodeGeneratorX64 : public CodeGeneratorX86Shared
|
||||
void visitAsmJSLoadHeap(LAsmJSLoadHeap* ins);
|
||||
void visitAsmJSStoreHeap(LAsmJSStoreHeap* ins);
|
||||
void visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins);
|
||||
void visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins);
|
||||
void visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins);
|
||||
void visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins);
|
||||
void visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar* ins);
|
||||
|
@ -225,6 +225,23 @@ LIRGeneratorX64::visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins)
|
||||
defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
|
||||
}
|
||||
|
||||
void
|
||||
LIRGeneratorX64::visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins)
|
||||
{
|
||||
MOZ_ASSERT(ins->ptr()->type() == MIRType_Int32);
|
||||
|
||||
const LAllocation ptr = useRegister(ins->ptr());
|
||||
const LAllocation value = useRegister(ins->value());
|
||||
|
||||
// The output may not be used but will be clobbered regardless,
|
||||
// so ignore the case where we're not using the value and just
|
||||
// use the output register as a temp.
|
||||
|
||||
LAsmJSAtomicExchangeHeap* lir =
|
||||
new(alloc()) LAsmJSAtomicExchangeHeap(ptr, value);
|
||||
define(lir, ins);
|
||||
}
|
||||
|
||||
void
|
||||
LIRGeneratorX64::visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins)
|
||||
{
|
||||
|
@ -49,6 +49,7 @@ class LIRGeneratorX64 : public LIRGeneratorX86Shared
|
||||
void visitAsmJSStoreHeap(MAsmJSStoreHeap* ins);
|
||||
void visitAsmJSLoadFuncPtr(MAsmJSLoadFuncPtr* ins);
|
||||
void visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins);
|
||||
void visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins);
|
||||
void visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins);
|
||||
void visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic* ins);
|
||||
void visitSubstr(MSubstr* ins);
|
||||
|
@ -720,6 +720,31 @@ CodeGeneratorX86::asmJSAtomicComputeAddress(Register addrTemp, Register ptrReg,
|
||||
masm.append(AsmJSHeapAccess(before, after, maybeCmpOffset));
|
||||
}
|
||||
|
||||
void
|
||||
CodeGeneratorX86::visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins)
|
||||
{
|
||||
MAsmJSAtomicExchangeHeap* mir = ins->mir();
|
||||
Scalar::Type accessType = mir->accessType();
|
||||
const LAllocation* ptr = ins->ptr();
|
||||
Register ptrReg = ToRegister(ptr);
|
||||
Register value = ToRegister(ins->value());
|
||||
Register addrTemp = ToRegister(ins->addrTemp());
|
||||
Label rejoin;
|
||||
|
||||
asmJSAtomicComputeAddress(addrTemp, ptrReg, mir->needsBoundsCheck(), mir->offset(),
|
||||
mir->endOffset(), ToRegister(ins->output()), rejoin);
|
||||
|
||||
Address memAddr(addrTemp, mir->offset());
|
||||
masm.atomicExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
|
||||
memAddr,
|
||||
value,
|
||||
InvalidReg,
|
||||
ToAnyRegister(ins->output()));
|
||||
|
||||
if (rejoin.used())
|
||||
masm.bind(&rejoin);
|
||||
}
|
||||
|
||||
void
|
||||
CodeGeneratorX86::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins)
|
||||
{
|
||||
|
@ -61,6 +61,7 @@ class CodeGeneratorX86 : public CodeGeneratorX86Shared
|
||||
void visitAsmJSLoadHeap(LAsmJSLoadHeap* ins);
|
||||
void visitAsmJSStoreHeap(LAsmJSStoreHeap* ins);
|
||||
void visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins);
|
||||
void visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins);
|
||||
void visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins);
|
||||
void visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins);
|
||||
void visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar* ins);
|
||||
|
@ -310,6 +310,24 @@ LIRGeneratorX86::visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins)
|
||||
defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
|
||||
}
|
||||
|
||||
void
|
||||
LIRGeneratorX86::visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins)
|
||||
{
|
||||
MOZ_ASSERT(ins->ptr()->type() == MIRType_Int32);
|
||||
|
||||
const LAllocation ptr = useRegister(ins->ptr());
|
||||
const LAllocation value = useRegister(ins->value());
|
||||
|
||||
LAsmJSAtomicExchangeHeap* lir =
|
||||
new(alloc()) LAsmJSAtomicExchangeHeap(ptr, value);
|
||||
|
||||
lir->setAddrTemp(temp());
|
||||
if (byteSize(ins->accessType()) == 1)
|
||||
defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
|
||||
else
|
||||
define(lir, ins);
|
||||
}
|
||||
|
||||
void
|
||||
LIRGeneratorX86::visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins)
|
||||
{
|
||||
|
@ -55,6 +55,7 @@ class LIRGeneratorX86 : public LIRGeneratorX86Shared
|
||||
void visitAsmJSStoreHeap(MAsmJSStoreHeap* ins);
|
||||
void visitAsmJSLoadFuncPtr(MAsmJSLoadFuncPtr* ins);
|
||||
void visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins);
|
||||
void visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins);
|
||||
void visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins);
|
||||
void visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic* ins);
|
||||
void visitSubstr(MSubstr* ins);
|
||||
|
Loading…
Reference in New Issue
Block a user