Bug 1141986 - Atomics.exchange on integer elements -- ion parts. r=h4writer r=sstangl

This commit is contained in:
Lars T Hansen 2015-07-10 14:00:28 +02:00
parent d0c90cb110
commit 7380b0bec9
29 changed files with 781 additions and 194 deletions

View File

@ -117,7 +117,7 @@ js::atomics_fullMemoryBarrier()
}
static bool
atomics_fence_impl(JSContext* cx, MutableHandleValue r)
AtomicsFence(JSContext* cx, MutableHandleValue r)
{
atomics_fullMemoryBarrier();
r.setUndefined();
@ -128,55 +128,55 @@ bool
js::atomics_fence(JSContext* cx, unsigned argc, Value* vp)
{
CallArgs args = CallArgsFromVp(argc, vp);
return atomics_fence_impl(cx, args.rval());
return AtomicsFence(cx, args.rval());
}
static int32_t
do_cmpxchg(Scalar::Type viewType, int32_t oldCandidate, int32_t newCandidate, void* viewData,
uint32_t offset, bool* badArrayType)
CompareExchange(Scalar::Type viewType, int32_t oldCandidate, int32_t newCandidate, void* viewData,
uint32_t offset, bool* badArrayType)
{
switch (viewType) {
case Scalar::Int8: {
int8_t oldval = (int8_t)oldCandidate;
int8_t newval = (int8_t)newCandidate;
oldval = jit::AtomicOperations::compareExchangeSeqCst((int8_t*)viewData + offset, oldval, newval);
return oldval;
int8_t oldval = (int8_t)oldCandidate;
int8_t newval = (int8_t)newCandidate;
oldval = jit::AtomicOperations::compareExchangeSeqCst((int8_t*)viewData + offset, oldval, newval);
return oldval;
}
case Scalar::Uint8: {
uint8_t oldval = (uint8_t)oldCandidate;
uint8_t newval = (uint8_t)newCandidate;
oldval = jit::AtomicOperations::compareExchangeSeqCst((uint8_t*)viewData + offset, oldval, newval);
return oldval;
uint8_t oldval = (uint8_t)oldCandidate;
uint8_t newval = (uint8_t)newCandidate;
oldval = jit::AtomicOperations::compareExchangeSeqCst((uint8_t*)viewData + offset, oldval, newval);
return oldval;
}
case Scalar::Uint8Clamped: {
uint8_t oldval = ClampIntForUint8Array(oldCandidate);
uint8_t newval = ClampIntForUint8Array(newCandidate);
oldval = jit::AtomicOperations::compareExchangeSeqCst((uint8_t*)viewData + offset, oldval, newval);
return oldval;
uint8_t oldval = ClampIntForUint8Array(oldCandidate);
uint8_t newval = ClampIntForUint8Array(newCandidate);
oldval = jit::AtomicOperations::compareExchangeSeqCst((uint8_t*)viewData + offset, oldval, newval);
return oldval;
}
case Scalar::Int16: {
int16_t oldval = (int16_t)oldCandidate;
int16_t newval = (int16_t)newCandidate;
oldval = jit::AtomicOperations::compareExchangeSeqCst((int16_t*)viewData + offset, oldval, newval);
return oldval;
int16_t oldval = (int16_t)oldCandidate;
int16_t newval = (int16_t)newCandidate;
oldval = jit::AtomicOperations::compareExchangeSeqCst((int16_t*)viewData + offset, oldval, newval);
return oldval;
}
case Scalar::Uint16: {
uint16_t oldval = (uint16_t)oldCandidate;
uint16_t newval = (uint16_t)newCandidate;
oldval = jit::AtomicOperations::compareExchangeSeqCst((uint16_t*)viewData + offset, oldval, newval);
return oldval;
uint16_t oldval = (uint16_t)oldCandidate;
uint16_t newval = (uint16_t)newCandidate;
oldval = jit::AtomicOperations::compareExchangeSeqCst((uint16_t*)viewData + offset, oldval, newval);
return oldval;
}
case Scalar::Int32: {
int32_t oldval = oldCandidate;
int32_t newval = newCandidate;
oldval = jit::AtomicOperations::compareExchangeSeqCst((int32_t*)viewData + offset, oldval, newval);
return oldval;
int32_t oldval = oldCandidate;
int32_t newval = newCandidate;
oldval = jit::AtomicOperations::compareExchangeSeqCst((int32_t*)viewData + offset, oldval, newval);
return oldval;
}
case Scalar::Uint32: {
uint32_t oldval = (uint32_t)oldCandidate;
uint32_t newval = (uint32_t)newCandidate;
oldval = jit::AtomicOperations::compareExchangeSeqCst((uint32_t*)viewData + offset, oldval, newval);
return (int32_t)oldval;
uint32_t oldval = (uint32_t)oldCandidate;
uint32_t newval = (uint32_t)newCandidate;
oldval = jit::AtomicOperations::compareExchangeSeqCst((uint32_t*)viewData + offset, oldval, newval);
return (int32_t)oldval;
}
default:
*badArrayType = true;
@ -209,10 +209,10 @@ js::atomics_compareExchange(JSContext* cx, unsigned argc, Value* vp)
return false;
if (!inRange)
return atomics_fence_impl(cx, r);
return AtomicsFence(cx, r);
bool badType = false;
int32_t result = do_cmpxchg(view->type(), oldCandidate, newCandidate, view->viewData(), offset, &badType);
int32_t result = CompareExchange(view->type(), oldCandidate, newCandidate, view->viewData(), offset, &badType);
if (badType)
return ReportBadArrayType(cx);
@ -241,47 +241,109 @@ js::atomics_load(JSContext* cx, unsigned argc, Value* vp)
return false;
if (!inRange)
return atomics_fence_impl(cx, r);
return AtomicsFence(cx, r);
switch (view->type()) {
case Scalar::Uint8:
case Scalar::Uint8Clamped: {
uint8_t v = jit::AtomicOperations::loadSeqCst((uint8_t*)view->viewData() + offset);
r.setInt32(v);
return true;
uint8_t v = jit::AtomicOperations::loadSeqCst((uint8_t*)view->viewData() + offset);
r.setInt32(v);
return true;
}
case Scalar::Int8: {
int8_t v = jit::AtomicOperations::loadSeqCst((uint8_t*)view->viewData() + offset);
r.setInt32(v);
return true;
int8_t v = jit::AtomicOperations::loadSeqCst((uint8_t*)view->viewData() + offset);
r.setInt32(v);
return true;
}
case Scalar::Int16: {
int16_t v = jit::AtomicOperations::loadSeqCst((int16_t*)view->viewData() + offset);
r.setInt32(v);
return true;
int16_t v = jit::AtomicOperations::loadSeqCst((int16_t*)view->viewData() + offset);
r.setInt32(v);
return true;
}
case Scalar::Uint16: {
uint16_t v = jit::AtomicOperations::loadSeqCst((uint16_t*)view->viewData() + offset);
r.setInt32(v);
return true;
uint16_t v = jit::AtomicOperations::loadSeqCst((uint16_t*)view->viewData() + offset);
r.setInt32(v);
return true;
}
case Scalar::Int32: {
int32_t v = jit::AtomicOperations::loadSeqCst((int32_t*)view->viewData() + offset);
r.setInt32(v);
return true;
int32_t v = jit::AtomicOperations::loadSeqCst((int32_t*)view->viewData() + offset);
r.setInt32(v);
return true;
}
case Scalar::Uint32: {
uint32_t v = jit::AtomicOperations::loadSeqCst((uint32_t*)view->viewData() + offset);
r.setNumber(v);
return true;
uint32_t v = jit::AtomicOperations::loadSeqCst((uint32_t*)view->viewData() + offset);
r.setNumber(v);
return true;
}
default:
return ReportBadArrayType(cx);
return ReportBadArrayType(cx);
}
}
bool
js::atomics_store(JSContext* cx, unsigned argc, Value* vp)
enum XchgStoreOp {
DoExchange,
DoStore
};
template<XchgStoreOp op>
static int32_t
ExchangeOrStore(Scalar::Type viewType, int32_t numberValue, void* viewData, uint32_t offset,
bool* badArrayType)
{
#define INT_OP(ptr, value) \
JS_BEGIN_MACRO \
if (op == DoStore) \
jit::AtomicOperations::storeSeqCst(ptr, value); \
else \
value = jit::AtomicOperations::exchangeSeqCst(ptr, value); \
JS_END_MACRO
switch (viewType) {
case Scalar::Int8: {
int8_t value = (int8_t)numberValue;
INT_OP((int8_t*)viewData + offset, value);
return value;
}
case Scalar::Uint8: {
uint8_t value = (uint8_t)numberValue;
INT_OP((uint8_t*)viewData + offset, value);
return value;
}
case Scalar::Uint8Clamped: {
uint8_t value = ClampIntForUint8Array(numberValue);
INT_OP((uint8_t*)viewData + offset, value);
return value;
}
case Scalar::Int16: {
int16_t value = (int16_t)numberValue;
INT_OP((int16_t*)viewData + offset, value);
return value;
}
case Scalar::Uint16: {
uint16_t value = (uint16_t)numberValue;
INT_OP((uint16_t*)viewData + offset, value);
return value;
}
case Scalar::Int32: {
int32_t value = numberValue;
INT_OP((int32_t*)viewData + offset, value);
return value;
}
case Scalar::Uint32: {
uint32_t value = (uint32_t)numberValue;
INT_OP((uint32_t*)viewData + offset, value);
return (int32_t)value;
}
default:
*badArrayType = true;
return 0;
}
#undef INT_OP
}
template<XchgStoreOp op>
static bool
ExchangeOrStore(JSContext* cx, unsigned argc, Value* vp)
{
CallArgs args = CallArgsFromVp(argc, vp);
HandleValue objv = args.get(0);
@ -306,58 +368,35 @@ js::atomics_store(JSContext* cx, unsigned argc, Value* vp)
return true;
}
switch (view->type()) {
case Scalar::Int8: {
int8_t value = (int8_t)numberValue;
jit::AtomicOperations::storeSeqCst((int8_t*)view->viewData() + offset, value);
r.setInt32(value);
return true;
}
case Scalar::Uint8: {
uint8_t value = (uint8_t)numberValue;
jit::AtomicOperations::storeSeqCst((uint8_t*)view->viewData() + offset, value);
r.setInt32(value);
return true;
}
case Scalar::Uint8Clamped: {
uint8_t value = ClampIntForUint8Array(numberValue);
jit::AtomicOperations::storeSeqCst((uint8_t*)view->viewData() + offset, value);
r.setInt32(value);
return true;
}
case Scalar::Int16: {
int16_t value = (int16_t)numberValue;
jit::AtomicOperations::storeSeqCst((int16_t*)view->viewData() + offset, value);
r.setInt32(value);
return true;
}
case Scalar::Uint16: {
uint16_t value = (uint16_t)numberValue;
jit::AtomicOperations::storeSeqCst((uint16_t*)view->viewData() + offset, value);
r.setInt32(value);
return true;
}
case Scalar::Int32: {
int32_t value = numberValue;
jit::AtomicOperations::storeSeqCst((int32_t*)view->viewData() + offset, value);
r.setInt32(value);
return true;
}
case Scalar::Uint32: {
uint32_t value = (uint32_t)numberValue;
jit::AtomicOperations::storeSeqCst((uint32_t*)view->viewData() + offset, value);
r.setNumber((double)value);
return true;
}
default:
bool badType = false;
int32_t result = ExchangeOrStore<op>(view->type(), numberValue, view->viewData(), offset, &badType);
if (badType)
return ReportBadArrayType(cx);
}
if (view->type() == Scalar::Uint32)
r.setNumber((double)(uint32_t)result);
else
r.setInt32(result);
return true;
}
bool
js::atomics_store(JSContext* cx, unsigned argc, Value* vp)
{
return ExchangeOrStore<DoStore>(cx, argc, vp);
}
bool
js::atomics_exchange(JSContext* cx, unsigned argc, Value* vp)
{
return ExchangeOrStore<DoExchange>(cx, argc, vp);
}
template<typename T>
static bool
atomics_binop_impl(JSContext* cx, HandleValue objv, HandleValue idxv, HandleValue valv,
MutableHandleValue r)
AtomicsBinop(JSContext* cx, HandleValue objv, HandleValue idxv, HandleValue valv,
MutableHandleValue r)
{
Rooted<SharedTypedArrayObject*> view(cx, nullptr);
if (!GetSharedTypedArray(cx, objv, &view))
@ -371,58 +410,58 @@ atomics_binop_impl(JSContext* cx, HandleValue objv, HandleValue idxv, HandleValu
return false;
if (!inRange)
return atomics_fence_impl(cx, r);
return AtomicsFence(cx, r);
switch (view->type()) {
case Scalar::Int8: {
int8_t v = (int8_t)numberValue;
r.setInt32(T::operate((int8_t*)view->viewData() + offset, v));
return true;
int8_t v = (int8_t)numberValue;
r.setInt32(T::operate((int8_t*)view->viewData() + offset, v));
return true;
}
case Scalar::Uint8: {
uint8_t v = (uint8_t)numberValue;
r.setInt32(T::operate((uint8_t*)view->viewData() + offset, v));
return true;
uint8_t v = (uint8_t)numberValue;
r.setInt32(T::operate((uint8_t*)view->viewData() + offset, v));
return true;
}
case Scalar::Uint8Clamped: {
// Spec says:
// - clamp the input value
// - perform the operation
// - clamp the result
// - store the result
// This requires a CAS loop.
int32_t value = ClampIntForUint8Array(numberValue);
uint8_t* loc = (uint8_t*)view->viewData() + offset;
for (;;) {
uint8_t old = *loc;
uint8_t result = (uint8_t)ClampIntForUint8Array(T::perform(old, value));
uint8_t tmp = jit::AtomicOperations::compareExchangeSeqCst(loc, old, result);
if (tmp == old) {
r.setInt32(old);
break;
}
}
return true;
// Spec says:
// - clamp the input value
// - perform the operation
// - clamp the result
// - store the result
// This requires a CAS loop.
int32_t value = ClampIntForUint8Array(numberValue);
uint8_t* loc = (uint8_t*)view->viewData() + offset;
for (;;) {
uint8_t old = *loc;
uint8_t result = (uint8_t)ClampIntForUint8Array(T::perform(old, value));
uint8_t tmp = jit::AtomicOperations::compareExchangeSeqCst(loc, old, result);
if (tmp == old) {
r.setInt32(old);
break;
}
}
return true;
}
case Scalar::Int16: {
int16_t v = (int16_t)numberValue;
r.setInt32(T::operate((int16_t*)view->viewData() + offset, v));
return true;
int16_t v = (int16_t)numberValue;
r.setInt32(T::operate((int16_t*)view->viewData() + offset, v));
return true;
}
case Scalar::Uint16: {
uint16_t v = (uint16_t)numberValue;
r.setInt32(T::operate((uint16_t*)view->viewData() + offset, v));
return true;
uint16_t v = (uint16_t)numberValue;
r.setInt32(T::operate((uint16_t*)view->viewData() + offset, v));
return true;
}
case Scalar::Int32: {
int32_t v = numberValue;
r.setInt32(T::operate((int32_t*)view->viewData() + offset, v));
return true;
int32_t v = numberValue;
r.setInt32(T::operate((int32_t*)view->viewData() + offset, v));
return true;
}
case Scalar::Uint32: {
uint32_t v = (uint32_t)numberValue;
r.setNumber((double)T::operate((uint32_t*)view->viewData() + offset, v));
return true;
uint32_t v = (uint32_t)numberValue;
r.setNumber((double)T::operate((uint32_t*)view->viewData() + offset, v));
return true;
}
default:
return ReportBadArrayType(cx);
@ -437,7 +476,7 @@ atomics_binop_impl(JSContext* cx, HandleValue objv, HandleValue idxv, HandleValu
static int32_t operate(int32_t* addr, int32_t v) { return NAME(addr, v); } \
static uint32_t operate(uint32_t* addr, uint32_t v) { return NAME(addr, v); }
class do_add
class PerformAdd
{
public:
INTEGRAL_TYPES_FOR_EACH(jit::AtomicOperations::fetchAddSeqCst)
@ -448,10 +487,10 @@ bool
js::atomics_add(JSContext* cx, unsigned argc, Value* vp)
{
CallArgs args = CallArgsFromVp(argc, vp);
return atomics_binop_impl<do_add>(cx, args.get(0), args.get(1), args.get(2), args.rval());
return AtomicsBinop<PerformAdd>(cx, args.get(0), args.get(1), args.get(2), args.rval());
}
class do_sub
class PerformSub
{
public:
INTEGRAL_TYPES_FOR_EACH(jit::AtomicOperations::fetchSubSeqCst)
@ -462,10 +501,10 @@ bool
js::atomics_sub(JSContext* cx, unsigned argc, Value* vp)
{
CallArgs args = CallArgsFromVp(argc, vp);
return atomics_binop_impl<do_sub>(cx, args.get(0), args.get(1), args.get(2), args.rval());
return AtomicsBinop<PerformSub>(cx, args.get(0), args.get(1), args.get(2), args.rval());
}
class do_and
class PerformAnd
{
public:
INTEGRAL_TYPES_FOR_EACH(jit::AtomicOperations::fetchAndSeqCst)
@ -476,10 +515,10 @@ bool
js::atomics_and(JSContext* cx, unsigned argc, Value* vp)
{
CallArgs args = CallArgsFromVp(argc, vp);
return atomics_binop_impl<do_and>(cx, args.get(0), args.get(1), args.get(2), args.rval());
return AtomicsBinop<PerformAnd>(cx, args.get(0), args.get(1), args.get(2), args.rval());
}
class do_or
class PerformOr
{
public:
INTEGRAL_TYPES_FOR_EACH(jit::AtomicOperations::fetchOrSeqCst)
@ -490,10 +529,10 @@ bool
js::atomics_or(JSContext* cx, unsigned argc, Value* vp)
{
CallArgs args = CallArgsFromVp(argc, vp);
return atomics_binop_impl<do_or>(cx, args.get(0), args.get(1), args.get(2), args.rval());
return AtomicsBinop<PerformOr>(cx, args.get(0), args.get(1), args.get(2), args.rval());
}
class do_xor
class PerformXor
{
public:
INTEGRAL_TYPES_FOR_EACH(jit::AtomicOperations::fetchXorSeqCst)
@ -504,7 +543,7 @@ bool
js::atomics_xor(JSContext* cx, unsigned argc, Value* vp)
{
CallArgs args = CallArgsFromVp(argc, vp);
return atomics_binop_impl<do_xor>(cx, args.get(0), args.get(1), args.get(2), args.rval());
return AtomicsBinop<PerformXor>(cx, args.get(0), args.get(1), args.get(2), args.rval());
}
bool
@ -548,13 +587,13 @@ js::atomics_add_asm_callout(int32_t vt, int32_t offset, int32_t value)
if ((size_t)offset >= heapLength) return 0;
switch (Scalar::Type(vt)) {
case Scalar::Int8:
return do_add::operate((int8_t*)heap + offset, value);
return PerformAdd::operate((int8_t*)heap + offset, value);
case Scalar::Uint8:
return do_add::operate((uint8_t*)heap + offset, value);
return PerformAdd::operate((uint8_t*)heap + offset, value);
case Scalar::Int16:
return do_add::operate((int16_t*)heap + (offset >> 1), value);
return PerformAdd::operate((int16_t*)heap + (offset >> 1), value);
case Scalar::Uint16:
return do_add::operate((uint16_t*)heap + (offset >> 1), value);
return PerformAdd::operate((uint16_t*)heap + (offset >> 1), value);
default:
MOZ_CRASH("Invalid size");
}
@ -569,13 +608,13 @@ js::atomics_sub_asm_callout(int32_t vt, int32_t offset, int32_t value)
if ((size_t)offset >= heapLength) return 0;
switch (Scalar::Type(vt)) {
case Scalar::Int8:
return do_sub::operate((int8_t*)heap + offset, value);
return PerformSub::operate((int8_t*)heap + offset, value);
case Scalar::Uint8:
return do_sub::operate((uint8_t*)heap + offset, value);
return PerformSub::operate((uint8_t*)heap + offset, value);
case Scalar::Int16:
return do_sub::operate((int16_t*)heap + (offset >> 1), value);
return PerformSub::operate((int16_t*)heap + (offset >> 1), value);
case Scalar::Uint16:
return do_sub::operate((uint16_t*)heap + (offset >> 1), value);
return PerformSub::operate((uint16_t*)heap + (offset >> 1), value);
default:
MOZ_CRASH("Invalid size");
}
@ -590,13 +629,13 @@ js::atomics_and_asm_callout(int32_t vt, int32_t offset, int32_t value)
if ((size_t)offset >= heapLength) return 0;
switch (Scalar::Type(vt)) {
case Scalar::Int8:
return do_and::operate((int8_t*)heap + offset, value);
return PerformAnd::operate((int8_t*)heap + offset, value);
case Scalar::Uint8:
return do_and::operate((uint8_t*)heap + offset, value);
return PerformAnd::operate((uint8_t*)heap + offset, value);
case Scalar::Int16:
return do_and::operate((int16_t*)heap + (offset >> 1), value);
return PerformAnd::operate((int16_t*)heap + (offset >> 1), value);
case Scalar::Uint16:
return do_and::operate((uint16_t*)heap + (offset >> 1), value);
return PerformAnd::operate((uint16_t*)heap + (offset >> 1), value);
default:
MOZ_CRASH("Invalid size");
}
@ -611,13 +650,13 @@ js::atomics_or_asm_callout(int32_t vt, int32_t offset, int32_t value)
if ((size_t)offset >= heapLength) return 0;
switch (Scalar::Type(vt)) {
case Scalar::Int8:
return do_or::operate((int8_t*)heap + offset, value);
return PerformOr::operate((int8_t*)heap + offset, value);
case Scalar::Uint8:
return do_or::operate((uint8_t*)heap + offset, value);
return PerformOr::operate((uint8_t*)heap + offset, value);
case Scalar::Int16:
return do_or::operate((int16_t*)heap + (offset >> 1), value);
return PerformOr::operate((int16_t*)heap + (offset >> 1), value);
case Scalar::Uint16:
return do_or::operate((uint16_t*)heap + (offset >> 1), value);
return PerformOr::operate((uint16_t*)heap + (offset >> 1), value);
default:
MOZ_CRASH("Invalid size");
}
@ -632,13 +671,13 @@ js::atomics_xor_asm_callout(int32_t vt, int32_t offset, int32_t value)
if ((size_t)offset >= heapLength) return 0;
switch (Scalar::Type(vt)) {
case Scalar::Int8:
return do_xor::operate((int8_t*)heap + offset, value);
return PerformXor::operate((int8_t*)heap + offset, value);
case Scalar::Uint8:
return do_xor::operate((uint8_t*)heap + offset, value);
return PerformXor::operate((uint8_t*)heap + offset, value);
case Scalar::Int16:
return do_xor::operate((int16_t*)heap + (offset >> 1), value);
return PerformXor::operate((int16_t*)heap + (offset >> 1), value);
case Scalar::Uint16:
return do_xor::operate((uint16_t*)heap + (offset >> 1), value);
return PerformXor::operate((uint16_t*)heap + (offset >> 1), value);
default:
MOZ_CRASH("Invalid size");
}
@ -654,13 +693,13 @@ js::atomics_cmpxchg_asm_callout(int32_t vt, int32_t offset, int32_t oldval, int3
bool badType = false;
switch (Scalar::Type(vt)) {
case Scalar::Int8:
return do_cmpxchg(Scalar::Int8, oldval, newval, heap, offset, &badType);
return CompareExchange(Scalar::Int8, oldval, newval, heap, offset, &badType);
case Scalar::Uint8:
return do_cmpxchg(Scalar::Uint8, oldval, newval, heap, offset, &badType);
return CompareExchange(Scalar::Uint8, oldval, newval, heap, offset, &badType);
case Scalar::Int16:
return do_cmpxchg(Scalar::Int16, oldval, newval, heap, offset>>1, &badType);
return CompareExchange(Scalar::Int16, oldval, newval, heap, offset>>1, &badType);
case Scalar::Uint16:
return do_cmpxchg(Scalar::Uint16, oldval, newval, heap, offset>>1, &badType);
return CompareExchange(Scalar::Uint16, oldval, newval, heap, offset>>1, &badType);
default:
MOZ_CRASH("Invalid size");
}
@ -1202,6 +1241,7 @@ const JSFunctionSpec AtomicsMethods[] = {
JS_FN("compareExchange", atomics_compareExchange, 4,0),
JS_FN("load", atomics_load, 2,0),
JS_FN("store", atomics_store, 3,0),
JS_FN("exchange", atomics_exchange, 3,0),
JS_FN("fence", atomics_fence, 0,0),
JS_FN("add", atomics_add, 3,0),
JS_FN("sub", atomics_sub, 3,0),

View File

@ -32,6 +32,7 @@ class AtomicsObject : public JSObject
void atomics_fullMemoryBarrier();
bool atomics_compareExchange(JSContext* cx, unsigned argc, Value* vp);
bool atomics_exchange(JSContext* cx, unsigned argc, Value* vp);
bool atomics_load(JSContext* cx, unsigned argc, Value* vp);
bool atomics_store(JSContext* cx, unsigned argc, Value* vp);
bool atomics_fence(JSContext* cx, unsigned argc, Value* vp);

View File

@ -49,6 +49,11 @@ function testMethod(a, ...indices) {
// val = 9
assertEq(Atomics.compareExchange(a, x, 5, 0), 9); // should also fail
// val = 9
assertEq(Atomics.exchange(a, x, 4), 9);
// val = 4
assertEq(Atomics.exchange(a, x, 9), 4);
// val = 9
assertEq(Atomics.load(a, x), 9);
// val = 9
@ -115,6 +120,11 @@ function testFunction(a, ...indices) {
// val = 9
assertEq(gAtomics_compareExchange(a, x, 5, 0), 9); // should also fail
// val = 9
assertEq(gAtomics_exchange(a, x, 4), 9);
// val = 4
assertEq(gAtomics_exchange(a, x, 9), 4);
// val = 9
assertEq(gAtomics_load(a, x), 9);
// val = 9
@ -246,6 +256,9 @@ function testInt8Extremes(a) {
assertEq(a[10], 0);
assertEq(Atomics.load(a, 10), 0);
Atomics.store(a, 10, 255);
assertEq(Atomics.exchange(a, 10, 0), -1);
assertEq(a[11], 0);
}
@ -279,6 +292,9 @@ function testUint8Extremes(a) {
assertEq(a[10], 0);
assertEq(Atomics.load(a, 10), 0);
Atomics.store(a, 10, 255);
assertEq(Atomics.exchange(a, 10, 0), 255);
assertEq(a[11], 0);
}
@ -333,6 +349,25 @@ function testUint32(a) {
assertEq(sum, k);
}
// This test is a reliable test of sign extension in the JIT where
// testInt8Extremes is not (because there may not be enough type
// information without a loop - see bug 1181062 for a description
// of the general problem).
function exchangeLoop(ta) {
var sum = 0;
for ( var i=0 ; i < 100000 ; i++ )
sum += Atomics.exchange(ta, i & 15, 255);
return sum;
}
function adHocExchange() {
var a = new SharedInt8Array(16)
for ( var i=0 ; i < a.length ; i++ )
a[i] = 255;
assertEq(exchangeLoop(a), -100000);
}
var sizes = [ 1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12];
var answers = [ true, true, false, true, false, false, false, {},
@ -395,7 +430,7 @@ function runTests() {
assertEq(t2[0], 37 << 16);
t1[0] = 0;
// Test that invoking as Atomics.whatever() works, on correct arguments
// Test that invoking as Atomics.whatever() works, on correct arguments.
CLONE(testMethod)(new SharedInt8Array(sab), 0, 42, 4095);
CLONE(testMethod)(new SharedUint8Array(sab), 0, 42, 4095);
CLONE(testMethod)(new SharedUint8ClampedArray(sab), 0, 42, 4095);
@ -404,8 +439,9 @@ function runTests() {
CLONE(testMethod)(new SharedInt32Array(sab), 0, 42, 1023);
CLONE(testMethod)(new SharedUint32Array(sab), 0, 42, 1023);
// Test that invoking as v = Atomics.whatever; v() works, on correct arguments
// Test that invoking as v = Atomics.whatever; v() works, on correct arguments.
gAtomics_compareExchange = Atomics.compareExchange;
gAtomics_exchange = Atomics.exchange;
gAtomics_load = Atomics.load;
gAtomics_store = Atomics.store;
gAtomics_fence = Atomics.fence;
@ -452,6 +488,9 @@ function runTests() {
testInt16Extremes(new SharedInt16Array(sab));
testUint32(new SharedUint32Array(sab));
// Misc ad-hoc tests
adHocExchange();
// Misc
testIsLockFree();
}

View File

@ -9263,9 +9263,6 @@ CodeGenerator::visitCompareExchangeTypedArrayElement(LCompareExchangeTypedArrayE
AnyRegister output = ToAnyRegister(lir->output());
Register temp = lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
MOZ_ASSERT(lir->oldval()->isRegister());
MOZ_ASSERT(lir->newval()->isRegister());
Register oldval = ToRegister(lir->oldval());
Register newval = ToRegister(lir->newval());
@ -9281,6 +9278,27 @@ CodeGenerator::visitCompareExchangeTypedArrayElement(LCompareExchangeTypedArrayE
}
}
void
CodeGenerator::visitAtomicExchangeTypedArrayElement(LAtomicExchangeTypedArrayElement* lir)
{
Register elements = ToRegister(lir->elements());
AnyRegister output = ToAnyRegister(lir->output());
Register temp = lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
Register value = ToRegister(lir->value());
Scalar::Type arrayType = lir->mir()->arrayType();
int width = Scalar::byteSize(arrayType);
if (lir->index()->isConstant()) {
Address dest(elements, ToInt32(lir->index()) * width);
masm.atomicExchangeToTypedIntArray(arrayType, dest, value, temp, output);
} else {
BaseIndex dest(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
masm.atomicExchangeToTypedIntArray(arrayType, dest, value, temp, output);
}
}
template <typename T>
static inline void
AtomicBinopToTypedArray(MacroAssembler& masm, AtomicOp op,

View File

@ -280,6 +280,7 @@ class CodeGenerator : public CodeGeneratorSpecific
void visitStoreTypedArrayElementHole(LStoreTypedArrayElementHole* lir);
void visitAtomicIsLockFree(LAtomicIsLockFree* lir);
void visitCompareExchangeTypedArrayElement(LCompareExchangeTypedArrayElement* lir);
void visitAtomicExchangeTypedArrayElement(LAtomicExchangeTypedArrayElement* lir);
void visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBinop* lir);
void visitAtomicTypedArrayElementBinopForEffect(LAtomicTypedArrayElementBinopForEffect* lir);
void visitClampIToUint8(LClampIToUint8* lir);

View File

@ -778,6 +778,7 @@ class IonBuilder
// Atomics natives.
InliningStatus inlineAtomicsCompareExchange(CallInfo& callInfo);
InliningStatus inlineAtomicsExchange(CallInfo& callInfo);
InliningStatus inlineAtomicsLoad(CallInfo& callInfo);
InliningStatus inlineAtomicsStore(CallInfo& callInfo);
InliningStatus inlineAtomicsFence(CallInfo& callInfo);

View File

@ -5158,6 +5158,38 @@ class LCompareExchangeTypedArrayElement : public LInstructionHelper<1, 4, 1>
}
};
class LAtomicExchangeTypedArrayElement : public LInstructionHelper<1, 3, 1>
{
public:
LIR_HEADER(AtomicExchangeTypedArrayElement)
LAtomicExchangeTypedArrayElement(const LAllocation& elements, const LAllocation& index,
const LAllocation& value, const LDefinition& temp)
{
setOperand(0, elements);
setOperand(1, index);
setOperand(2, value);
setTemp(0, temp);
}
const LAllocation* elements() {
return getOperand(0);
}
const LAllocation* index() {
return getOperand(1);
}
const LAllocation* value() {
return getOperand(2);
}
const LDefinition* temp() {
return getTemp(0);
}
const MAtomicExchangeTypedArrayElement* mir() const {
return mir_->toAtomicExchangeTypedArrayElement();
}
};
class LAtomicTypedArrayElementBinop : public LInstructionHelper<1, 3, 2>
{
public:

View File

@ -248,6 +248,7 @@
_(StoreTypedArrayElementStatic) \
_(AtomicIsLockFree) \
_(CompareExchangeTypedArrayElement) \
_(AtomicExchangeTypedArrayElement) \
_(AtomicTypedArrayElementBinop) \
_(AtomicTypedArrayElementBinopForEffect) \
_(EffectiveAddress) \

View File

@ -57,6 +57,8 @@ IonBuilder::inlineNativeCall(CallInfo& callInfo, JSFunction* target)
// Atomic natives.
if (native == atomics_compareExchange)
return inlineAtomicsCompareExchange(callInfo);
if (native == atomics_exchange)
return inlineAtomicsExchange(callInfo);
if (native == atomics_load)
return inlineAtomicsLoad(callInfo);
if (native == atomics_store)
@ -2815,6 +2817,46 @@ IonBuilder::inlineAtomicsCompareExchange(CallInfo& callInfo)
return InliningStatus_Inlined;
}
IonBuilder::InliningStatus
IonBuilder::inlineAtomicsExchange(CallInfo& callInfo)
{
if (callInfo.argc() != 3 || callInfo.constructing()) {
trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
return InliningStatus_NotInlined;
}
Scalar::Type arrayType;
if (!atomicsMeetsPreconditions(callInfo, &arrayType))
return InliningStatus_NotInlined;
MDefinition* value = callInfo.getArg(2);
if (!(value->type() == MIRType_Int32 || value->type() == MIRType_Double))
return InliningStatus_NotInlined;
callInfo.setImplicitlyUsedUnchecked();
MInstruction* elements;
MDefinition* index;
atomicsCheckBounds(callInfo, &elements, &index);
MDefinition* toWrite = value;
if (value->type() == MIRType_Double) {
toWrite = MTruncateToInt32::New(alloc(), value);
current->add(toWrite->toInstruction());
}
MInstruction* exchange =
MAtomicExchangeTypedArrayElement::New(alloc(), elements, index, toWrite, arrayType);
exchange->setResultType(getInlineReturnType());
current->add(exchange);
current->push(exchange);
if (!resumeAfter(exchange))
return InliningStatus_Error;
return InliningStatus_Inlined;
}
IonBuilder::InliningStatus
IonBuilder::inlineAtomicsLoad(CallInfo& callInfo)
{

View File

@ -12846,9 +12846,9 @@ class MRecompileCheck : public MNullaryInstruction
};
// All barriered operations - MMemoryBarrier, MCompareExchangeTypedArrayElement,
// and MAtomicTypedArrayElementBinop, as well as MLoadUnboxedScalar and
// MStoreUnboxedSclaar when they are marked as requiring a memory barrer - have
// the following attributes:
// MExchangeTypedArrayElement, and MAtomicTypedArrayElementBinop, as well as
// MLoadUnboxedScalar and MStoreUnboxedSclaar when they are marked as requiring
// a memory barrer - have the following attributes:
//
// - Not movable
// - Not removable
@ -12979,6 +12979,55 @@ class MCompareExchangeTypedArrayElement
}
};
class MAtomicExchangeTypedArrayElement
: public MAryInstruction<3>,
public Mix3Policy<ObjectPolicy<0>, IntPolicy<1>, IntPolicy<2>>::Data
{
Scalar::Type arrayType_;
MAtomicExchangeTypedArrayElement(MDefinition* elements, MDefinition* index, MDefinition* value,
Scalar::Type arrayType)
: arrayType_(arrayType)
{
MOZ_ASSERT(arrayType <= Scalar::Uint32);
initOperand(0, elements);
initOperand(1, index);
initOperand(2, value);
setGuard(); // Not removable
}
public:
INSTRUCTION_HEADER(AtomicExchangeTypedArrayElement)
static MAtomicExchangeTypedArrayElement* New(TempAllocator& alloc, MDefinition* elements,
MDefinition* index, MDefinition* value,
Scalar::Type arrayType)
{
return new(alloc) MAtomicExchangeTypedArrayElement(elements, index, value, arrayType);
}
bool isByteArray() const {
return (arrayType_ == Scalar::Int8 ||
arrayType_ == Scalar::Uint8 ||
arrayType_ == Scalar::Uint8Clamped);
}
MDefinition* elements() {
return getOperand(0);
}
MDefinition* index() {
return getOperand(1);
}
MDefinition* value() {
return getOperand(2);
}
Scalar::Type arrayType() const {
return arrayType_;
}
AliasSet getAliasSet() const override {
return AliasSet::Store(AliasSet::UnboxedElement);
}
};
class MAtomicTypedArrayElementBinop
: public MAryInstruction<3>,
public Mix3Policy< ObjectPolicy<0>, IntPolicy<1>, IntPolicy<2> >::Data

View File

@ -208,6 +208,7 @@ namespace jit {
_(StoreTypedArrayElementStatic) \
_(AtomicIsLockFree) \
_(CompareExchangeTypedArrayElement) \
_(AtomicExchangeTypedArrayElement) \
_(AtomicTypedArrayElementBinop) \
_(EffectiveAddress) \
_(ClampToUint8) \

View File

@ -491,6 +491,49 @@ MacroAssembler::compareExchangeToTypedIntArray(Scalar::Type arrayType, const Bas
Register oldval, Register newval, Register temp,
AnyRegister output);
template<typename T>
void
MacroAssembler::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem,
Register value, Register temp, AnyRegister output)
{
switch (arrayType) {
case Scalar::Int8:
atomicExchange8SignExtend(mem, value, output.gpr());
break;
case Scalar::Uint8:
atomicExchange8ZeroExtend(mem, value, output.gpr());
break;
case Scalar::Uint8Clamped:
atomicExchange8ZeroExtend(mem, value, output.gpr());
break;
case Scalar::Int16:
atomicExchange16SignExtend(mem, value, output.gpr());
break;
case Scalar::Uint16:
atomicExchange16ZeroExtend(mem, value, output.gpr());
break;
case Scalar::Int32:
atomicExchange32(mem, value, output.gpr());
break;
case Scalar::Uint32:
// At the moment, the code in MCallOptimize.cpp requires the output
// type to be double for uint32 arrays. See bug 1077305.
MOZ_ASSERT(output.isFloat());
atomicExchange32(mem, value, temp);
convertUInt32ToDouble(temp, output.fpu());
break;
default:
MOZ_CRASH("Invalid typed array type");
}
}
template void
MacroAssembler::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const Address& mem,
Register value, Register temp, AnyRegister output);
template void
MacroAssembler::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem,
Register value, Register temp, AnyRegister output);
template<typename S, typename T>
void
MacroAssembler::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value,

View File

@ -850,6 +850,10 @@ class MacroAssembler : public MacroAssemblerSpecific
void compareExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, Register oldval, Register newval,
Register temp, AnyRegister output);
template<typename T>
void atomicExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, Register value,
Register temp, AnyRegister output);
// Generating a result.
template<typename S, typename T>
void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value,

View File

@ -573,6 +573,34 @@ LIRGeneratorARM::visitSimdValueX4(MSimdValueX4* ins)
MOZ_CRASH("NYI");
}
void
LIRGeneratorARM::visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement* ins)
{
MOZ_ASSERT(HasLDSTREXBHD());
MOZ_ASSERT(ins->arrayType() <= Scalar::Uint32);
MOZ_ASSERT(ins->elements()->type() == MIRType_Elements);
MOZ_ASSERT(ins->index()->type() == MIRType_Int32);
const LUse elements = useRegister(ins->elements());
const LAllocation index = useRegisterOrConstant(ins->index());
// If the target is a floating register then we need a temp at the
// CodeGenerator level for creating the result.
const LAllocation value = useRegister(ins->value());
LDefinition tempDef = LDefinition::BogusTemp();
if (ins->arrayType() == Scalar::Uint32) {
MOZ_ASSERT(ins->type() == MIRType_Double);
tempDef = temp();
}
LAtomicExchangeTypedArrayElement* lir =
new(alloc()) LAtomicExchangeTypedArrayElement(elements, index, value, tempDef);
define(lir, ins);
}
void
LIRGeneratorARM::visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop* ins)
{

View File

@ -103,6 +103,7 @@ class LIRGeneratorARM : public LIRGeneratorShared
void visitSimdSplatX4(MSimdSplatX4* ins);
void visitSimdValueX4(MSimdValueX4* ins);
void visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement* ins);
void visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement* ins);
void visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop* ins);
void visitSubstr(MSubstr* ins);
void visitRandom(MRandom* ins);

View File

@ -4756,11 +4756,11 @@ void
MacroAssemblerARMCompat::compareExchangeARMv7(int nbytes, bool signExtend, const T& mem,
Register oldval, Register newval, Register output)
{
Label Lagain;
Label Ldone;
Label again;
Label done;
ma_dmb(BarrierST);
Register ptr = computePointer(mem, secondScratchReg_);
bind(&Lagain);
bind(&again);
switch (nbytes) {
case 1:
as_ldrexb(output, ptr);
@ -4789,7 +4789,7 @@ MacroAssemblerARMCompat::compareExchangeARMv7(int nbytes, bool signExtend, const
as_cmp(output, O2Reg(ScratchRegister));
else
as_cmp(output, O2Reg(oldval));
as_b(&Ldone, NotEqual);
as_b(&done, NotEqual);
switch (nbytes) {
case 1:
as_strexb(ScratchRegister, newval, ptr);
@ -4802,8 +4802,8 @@ MacroAssemblerARMCompat::compareExchangeARMv7(int nbytes, bool signExtend, const
break;
}
as_cmp(ScratchRegister, Imm8(1));
as_b(&Lagain, Equal);
bind(&Ldone);
as_b(&again, Equal);
bind(&done);
ma_dmb();
}
@ -4826,6 +4826,78 @@ js::jit::MacroAssemblerARMCompat::compareExchange(int nbytes, bool signExtend,
const BaseIndex& address, Register oldval,
Register newval, Register output);
template<typename T>
void
MacroAssemblerARMCompat::atomicExchange(int nbytes, bool signExtend, const T& mem,
Register value, Register output)
{
// If LDREXB/H and STREXB/H are not available we use the
// word-width operations with read-modify-add. That does not
// abstract well, so fork.
//
// Bug 1077321: We may further optimize for ARMv8 (AArch32) here.
if (nbytes < 4 && !HasLDSTREXBHD())
atomicExchangeARMv6(nbytes, signExtend, mem, value, output);
else
atomicExchangeARMv7(nbytes, signExtend, mem, value, output);
}
template<typename T>
void
MacroAssemblerARMCompat::atomicExchangeARMv7(int nbytes, bool signExtend, const T& mem,
Register value, Register output)
{
Label again;
Label done;
ma_dmb(BarrierST);
Register ptr = computePointer(mem, secondScratchReg_);
bind(&again);
switch (nbytes) {
case 1:
as_ldrexb(output, ptr);
if (signExtend)
as_sxtb(output, output, 0);
as_strexb(ScratchRegister, value, ptr);
break;
case 2:
as_ldrexh(output, ptr);
if (signExtend)
as_sxth(output, output, 0);
as_strexh(ScratchRegister, value, ptr);
break;
case 4:
MOZ_ASSERT(!signExtend);
as_ldrex(output, ptr);
as_strex(ScratchRegister, value, ptr);
break;
default:
MOZ_CRASH();
}
as_cmp(ScratchRegister, Imm8(1));
as_b(&again, Equal);
bind(&done);
ma_dmb();
}
template<typename T>
void
MacroAssemblerARMCompat::atomicExchangeARMv6(int nbytes, bool signExtend, const T& mem,
Register value, Register output)
{
// Bug 1077318: Must use read-modify-write with LDREX / STREX.
MOZ_ASSERT(nbytes == 1 || nbytes == 2);
MOZ_CRASH("NYI");
}
template void
js::jit::MacroAssemblerARMCompat::atomicExchange(int nbytes, bool signExtend,
const Address& address, Register value,
Register output);
template void
js::jit::MacroAssemblerARMCompat::atomicExchange(int nbytes, bool signExtend,
const BaseIndex& address, Register value,
Register output);
template<typename T>
void
MacroAssemblerARMCompat::atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Imm32& value,
@ -4880,10 +4952,10 @@ void
MacroAssemblerARMCompat::atomicFetchOpARMv7(int nbytes, bool signExtend, AtomicOp op,
const Register& value, const T& mem, Register output)
{
Label Lagain;
Label again;
Register ptr = computePointer(mem, secondScratchReg_);
ma_dmb();
bind(&Lagain);
bind(&again);
switch (nbytes) {
case 1:
as_ldrexb(output, ptr);
@ -4929,7 +5001,7 @@ MacroAssemblerARMCompat::atomicFetchOpARMv7(int nbytes, bool signExtend, AtomicO
break;
}
as_cmp(ScratchRegister, Imm8(1));
as_b(&Lagain, Equal);
as_b(&again, Equal);
ma_dmb();
}
@ -4987,10 +5059,10 @@ void
MacroAssemblerARMCompat::atomicEffectOpARMv7(int nbytes, AtomicOp op, const Register& value,
const T& mem)
{
Label Lagain;
Label again;
Register ptr = computePointer(mem, secondScratchReg_);
ma_dmb();
bind(&Lagain);
bind(&again);
switch (nbytes) {
case 1:
as_ldrexb(ScratchRegister, ptr);
@ -5031,7 +5103,7 @@ MacroAssemblerARMCompat::atomicEffectOpARMv7(int nbytes, AtomicOp op, const Regi
break;
}
as_cmp(ScratchRegister, Imm8(1));
as_b(&Lagain, Equal);
as_b(&again, Equal);
ma_dmb();
}

View File

@ -1379,6 +1379,18 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
void compareExchange(int nbytes, bool signExtend, const T& address, Register oldval,
Register newval, Register output);
template<typename T>
void atomicExchangeARMv6(int nbytes, bool signExtend, const T& mem, Register value,
Register output);
template<typename T>
void atomicExchangeARMv7(int nbytes, bool signExtend, const T& mem, Register value,
Register output);
template<typename T>
void atomicExchange(int nbytes, bool signExtend, const T& address, Register value,
Register output);
template<typename T>
void atomicFetchOpARMv6(int nbytes, bool signExtend, AtomicOp op, const Register& value,
const T& mem, Register temp, Register output);
@ -1436,6 +1448,31 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
compareExchange(4, false, mem, oldval, newval, output);
}
template<typename T>
void atomicExchange8SignExtend(const T& mem, Register value, Register output)
{
atomicExchange(1, true, mem, value, output);
}
template<typename T>
void atomicExchange8ZeroExtend(const T& mem, Register value, Register output)
{
atomicExchange(1, false, mem, value, output);
}
template<typename T>
void atomicExchange16SignExtend(const T& mem, Register value, Register output)
{
atomicExchange(2, true, mem, value, output);
}
template<typename T>
void atomicExchange16ZeroExtend(const T& mem, Register value, Register output)
{
atomicExchange(2, false, mem, value, output);
}
template<typename T>
void atomicExchange32(const T& mem, Register value, Register output) {
atomicExchange(4, false, mem, value, output);
}
template<typename T, typename S>
void atomicFetchAdd8SignExtend(const S& value, const T& mem, Register temp, Register output) {
atomicFetchOp(1, true, AtomicFetchAddOp, value, mem, temp, output);

View File

@ -75,6 +75,7 @@ class LIRGeneratorNone : public LIRGeneratorShared
void visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic* ins) { MOZ_CRASH(); }
void visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop* ins) { MOZ_CRASH(); }
void visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement* ins) { MOZ_CRASH(); }
void visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement* ins) { MOZ_CRASH(); }
void visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins) { MOZ_CRASH(); }
void visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins) { MOZ_CRASH(); }

View File

@ -325,6 +325,11 @@ class MacroAssemblerNone : public Assembler
template <typename T> void compareExchange16SignExtend(const T& mem, Register oldval, Register newval, Register output) { MOZ_CRASH(); }
template <typename T> void compareExchange16ZeroExtend(const T& mem, Register oldval, Register newval, Register output) { MOZ_CRASH(); }
template <typename T> void compareExchange32(const T& mem, Register oldval, Register newval, Register output) { MOZ_CRASH(); }
template<typename T> void atomicExchange8SignExtend(const T& mem, Register value, Register output) { MOZ_CRASH(); }
template<typename T> void atomicExchange8ZeroExtend(const T& mem, Register value, Register output) { MOZ_CRASH(); }
template<typename T> void atomicExchange16SignExtend(const T& mem, Register value, Register output) { MOZ_CRASH(); }
template<typename T> void atomicExchange16ZeroExtend(const T& mem, Register value, Register output) { MOZ_CRASH(); }
template<typename T> void atomicExchange32(const T& mem, Register value, Register output) { MOZ_CRASH(); }
template <typename T, typename S> void atomicFetchAdd8SignExtend(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
template <typename T, typename S> void atomicFetchAdd8ZeroExtend(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
template <typename T, typename S> void atomicFetchAdd16SignExtend(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }

View File

@ -127,6 +127,12 @@ LIRGeneratorX64::visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArra
lowerCompareExchangeTypedArrayElement(ins, /* useI386ByteRegisters = */ false);
}
void
LIRGeneratorX64::visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement* ins)
{
lowerAtomicExchangeTypedArrayElement(ins, /* useI386ByteRegisters = */ false);
}
void
LIRGeneratorX64::visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop* ins)
{

View File

@ -41,6 +41,7 @@ class LIRGeneratorX64 : public LIRGeneratorX86Shared
void visitUnbox(MUnbox* unbox);
void visitReturn(MReturn* ret);
void visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement* ins);
void visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement* ins);
void visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop* ins);
void visitAsmJSUnsignedToDouble(MAsmJSUnsignedToDouble* ins);
void visitAsmJSUnsignedToFloat32(MAsmJSUnsignedToFloat32* ins);

View File

@ -1718,6 +1718,43 @@ class AssemblerX86Shared : public AssemblerShared
}
}
void xchgb(Register src, const Operand& mem) {
switch (mem.kind()) {
case Operand::MEM_REG_DISP:
masm.xchgb_rm(src.encoding(), mem.disp(), mem.base());
break;
case Operand::MEM_SCALE:
masm.xchgb_rm(src.encoding(), mem.disp(), mem.base(), mem.index(), mem.scale());
break;
default:
MOZ_CRASH("unexpected operand kind");
}
}
void xchgw(Register src, const Operand& mem) {
switch (mem.kind()) {
case Operand::MEM_REG_DISP:
masm.xchgw_rm(src.encoding(), mem.disp(), mem.base());
break;
case Operand::MEM_SCALE:
masm.xchgw_rm(src.encoding(), mem.disp(), mem.base(), mem.index(), mem.scale());
break;
default:
MOZ_CRASH("unexpected operand kind");
}
}
void xchgl(Register src, const Operand& mem) {
switch (mem.kind()) {
case Operand::MEM_REG_DISP:
masm.xchgl_rm(src.encoding(), mem.disp(), mem.base());
break;
case Operand::MEM_SCALE:
masm.xchgl_rm(src.encoding(), mem.disp(), mem.base(), mem.index(), mem.scale());
break;
default:
MOZ_CRASH("unexpected operand kind");
}
}
void lock_xaddb(Register srcdest, const Operand& mem) {
switch (mem.kind()) {
case Operand::MEM_REG_DISP:

View File

@ -1839,11 +1839,45 @@ public:
m_formatter.oneByteOp(OP_CDQ);
}
void xchgb_rm(RegisterID src, int32_t offset, RegisterID base)
{
spew("xchgb %s, " MEM_ob, GPReg8Name(src), ADDR_ob(offset, base));
m_formatter.oneByteOp8(OP_XCHG_GbEb, offset, base, src);
}
void xchgb_rm(RegisterID src, int32_t offset, RegisterID base, RegisterID index, int scale)
{
spew("xchgb %s, " MEM_obs, GPReg8Name(src), ADDR_obs(offset, base, index, scale));
m_formatter.oneByteOp8(OP_XCHG_GbEb, offset, base, index, scale, src);
}
void xchgw_rm(RegisterID src, int32_t offset, RegisterID base)
{
spew("xchgw %s, " MEM_ob, GPReg16Name(src), ADDR_ob(offset, base));
m_formatter.prefix(PRE_OPERAND_SIZE);
m_formatter.oneByteOp(OP_XCHG_GvEv, offset, base, src);
}
void xchgw_rm(RegisterID src, int32_t offset, RegisterID base, RegisterID index, int scale)
{
spew("xchgw %s, " MEM_obs, GPReg16Name(src), ADDR_obs(offset, base, index, scale));
m_formatter.prefix(PRE_OPERAND_SIZE);
m_formatter.oneByteOp(OP_XCHG_GvEv, offset, base, index, scale, src);
}
void xchgl_rr(RegisterID src, RegisterID dst)
{
spew("xchgl %s, %s", GPReg32Name(src), GPReg32Name(dst));
m_formatter.oneByteOp(OP_XCHG_GvEv, src, dst);
}
void xchgl_rm(RegisterID src, int32_t offset, RegisterID base)
{
spew("xchgl %s, " MEM_ob, GPReg32Name(src), ADDR_ob(offset, base));
m_formatter.oneByteOp(OP_XCHG_GvEv, offset, base, src);
}
void xchgl_rm(RegisterID src, int32_t offset, RegisterID base, RegisterID index, int scale)
{
spew("xchgl %s, " MEM_obs, GPReg32Name(src), ADDR_obs(offset, base, index, scale));
m_formatter.oneByteOp(OP_XCHG_GvEv, offset, base, index, scale, src);
}
#ifdef JS_CODEGEN_X64
void xchgq_rr(RegisterID src, RegisterID dst)
@ -1851,6 +1885,16 @@ public:
spew("xchgq %s, %s", GPReg64Name(src), GPReg64Name(dst));
m_formatter.oneByteOp64(OP_XCHG_GvEv, src, dst);
}
void xchgq_rm(RegisterID src, int32_t offset, RegisterID base)
{
spew("xchgq %s, " MEM_ob, GPReg64Name(src), ADDR_ob(offset, base));
m_formatter.oneByteOp64(OP_XCHG_GvEv, offset, base, src);
}
void xchgq_rm(RegisterID src, int32_t offset, RegisterID base, RegisterID index, int scale)
{
spew("xchgq %s, " MEM_obs, GPReg64Name(src), ADDR_obs(offset, base, index, scale));
m_formatter.oneByteOp64(OP_XCHG_GvEv, offset, base, index, scale, src);
}
#endif
void movl_rr(RegisterID src, RegisterID dst)

View File

@ -74,6 +74,7 @@ enum OneByteOpcodeID {
OP_TEST_EbGb = 0x84,
OP_NOP_84 = 0x84,
OP_TEST_EvGv = 0x85,
OP_XCHG_GbEb = 0x86,
OP_XCHG_GvEv = 0x87,
OP_MOV_EbGv = 0x88,
OP_MOV_EvGv = 0x89,

View File

@ -451,6 +451,45 @@ LIRGeneratorX86Shared::lowerCompareExchangeTypedArrayElement(MCompareExchangeTyp
define(lir, ins);
}
void
LIRGeneratorX86Shared::lowerAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement* ins,
bool useI386ByteRegisters)
{
MOZ_ASSERT(ins->arrayType() <= Scalar::Uint32);
MOZ_ASSERT(ins->elements()->type() == MIRType_Elements);
MOZ_ASSERT(ins->index()->type() == MIRType_Int32);
const LUse elements = useRegister(ins->elements());
const LAllocation index = useRegisterOrConstant(ins->index());
const LAllocation value = useRegister(ins->value());
// The underlying instruction is XCHG, which can operate on any
// register.
//
// If the target is a floating register (for Uint32) then we need
// a temp into which to exchange.
//
// If the source is a byte array then we need a register that has
// a byte size; in this case -- on x86 only -- pin the output to
// an appropriate register and use that as a temp in the back-end.
LDefinition tempDef = LDefinition::BogusTemp();
if (ins->arrayType() == Scalar::Uint32) {
// This restriction is bug 1077305.
MOZ_ASSERT(ins->type() == MIRType_Double);
tempDef = temp();
}
LAtomicExchangeTypedArrayElement* lir =
new(alloc()) LAtomicExchangeTypedArrayElement(elements, index, value, tempDef);
if (useI386ByteRegisters && ins->isByteArray())
defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
else
define(lir, ins);
}
void
LIRGeneratorX86Shared::lowerAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop* ins,
bool useI386ByteRegisters)

View File

@ -58,6 +58,8 @@ class LIRGeneratorX86Shared : public LIRGeneratorShared
void visitSimdValueX4(MSimdValueX4* ins);
void lowerCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement* ins,
bool useI386ByteRegisters);
void lowerAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement* ins,
bool useI386ByteRegisters);
void lowerAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop* ins,
bool useI386ByteRegisters);
};

View File

@ -770,6 +770,20 @@ class MacroAssemblerX86Shared : public Assembler
lock_cmpxchgb(newval, Operand(mem));
movsbl(output, output);
}
template <typename T>
void atomicExchange8ZeroExtend(const T& mem, Register value, Register output) {
if (value != output)
movl(value, output);
xchgb(output, Operand(mem));
movzbl(output, output);
}
template <typename T>
void atomicExchange8SignExtend(const T& mem, Register value, Register output) {
if (value != output)
movl(value, output);
xchgb(output, Operand(mem));
movsbl(output, output);
}
void load16ZeroExtend(const Address& src, Register dest) {
movzwl(Operand(src), dest);
}
@ -796,6 +810,20 @@ class MacroAssemblerX86Shared : public Assembler
lock_cmpxchgw(newval, Operand(mem));
movswl(output, output);
}
template <typename T>
void atomicExchange16ZeroExtend(const T& mem, Register value, Register output) {
if (value != output)
movl(value, output);
xchgw(output, Operand(mem));
movzwl(output, output);
}
template <typename T>
void atomicExchange16SignExtend(const T& mem, Register value, Register output) {
if (value != output)
movl(value, output);
xchgw(output, Operand(mem));
movswl(output, output);
}
void load16SignExtend(const Address& src, Register dest) {
movswl(Operand(src), dest);
}
@ -822,6 +850,12 @@ class MacroAssemblerX86Shared : public Assembler
movl(oldval, output);
lock_cmpxchgl(newval, Operand(mem));
}
template <typename T>
void atomicExchange32(const T& mem, Register value, Register output) {
if (value != output)
movl(value, output);
xchgl(output, Operand(mem));
}
template <typename S, typename T>
void store32_NoSecondScratch(const S& src, const T& dest) {
store32(src, dest);

View File

@ -177,6 +177,12 @@ LIRGeneratorX86::visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArra
lowerCompareExchangeTypedArrayElement(ins, /* useI386ByteRegisters = */ true);
}
void
LIRGeneratorX86::visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement* ins)
{
lowerAtomicExchangeTypedArrayElement(ins, /*useI386ByteRegisters=*/ true);
}
void
LIRGeneratorX86::visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop* ins)
{

View File

@ -47,6 +47,7 @@ class LIRGeneratorX86 : public LIRGeneratorX86Shared
void visitUnbox(MUnbox* unbox);
void visitReturn(MReturn* ret);
void visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement* ins);
void visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement* ins);
void visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop* ins);
void visitAsmJSUnsignedToDouble(MAsmJSUnsignedToDouble* ins);
void visitAsmJSUnsignedToFloat32(MAsmJSUnsignedToFloat32* ins);