Bug 1084248 - add a cast<> method, remove the casting constructor as requested. r=waldo

This commit is contained in:
Lars T Hansen 2015-10-01 00:46:45 +02:00
parent 3ed3cd7306
commit a91e31b413
6 changed files with 87 additions and 97 deletions

View File

@ -406,7 +406,7 @@ MOZ_COLD static void
SetGPRegToLoadedValueSext32(SharedMem<void*> addr, size_t size, void* gp_reg)
{
MOZ_RELEASE_ASSERT(size <= sizeof(int32_t));
int8_t msb = AtomicOperations::loadSafeWhenRacy(SharedMem<uint8_t*>(addr) + (size - 1));
int8_t msb = AtomicOperations::loadSafeWhenRacy(addr.cast<uint8_t*>() + (size - 1));
memset(gp_reg, 0, sizeof(void*));
memset(gp_reg, msb >> 7, sizeof(int32_t));
AtomicOperations::memcpySafeWhenRacy(gp_reg, addr, size);
@ -689,13 +689,13 @@ EmulateHeapAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddre
MOZ_RELEASE_ASSERT(wrappedAddress + size <= module.maybeHeap() + module.heapLength());
switch (access.kind()) {
case Disassembler::HeapAccess::Load:
SetRegisterToLoadedValue(context, SharedMem<void*>(wrappedAddress), size, access.otherOperand());
SetRegisterToLoadedValue(context, wrappedAddress.cast<void*>(), size, access.otherOperand());
break;
case Disassembler::HeapAccess::LoadSext32:
SetRegisterToLoadedValueSext32(context, SharedMem<void*>(wrappedAddress), size, access.otherOperand());
SetRegisterToLoadedValueSext32(context, wrappedAddress.cast<void*>(), size, access.otherOperand());
break;
case Disassembler::HeapAccess::Store:
StoreValueFromRegister(context, SharedMem<void*>(wrappedAddress), size, access.otherOperand());
StoreValueFromRegister(context, wrappedAddress.cast<void*>(), size, access.otherOperand());
break;
case Disassembler::HeapAccess::Unknown:
MOZ_CRASH("Failed to disassemble instruction");

View File

@ -129,49 +129,49 @@ CompareExchange(Scalar::Type viewType, int32_t oldCandidate, int32_t newCandidat
case Scalar::Int8: {
int8_t oldval = (int8_t)oldCandidate;
int8_t newval = (int8_t)newCandidate;
oldval = jit::AtomicOperations::compareExchangeSeqCst(SharedMem<int8_t*>(viewData) + offset,
oldval = jit::AtomicOperations::compareExchangeSeqCst(viewData.cast<int8_t*>() + offset,
oldval, newval);
return oldval;
}
case Scalar::Uint8: {
uint8_t oldval = (uint8_t)oldCandidate;
uint8_t newval = (uint8_t)newCandidate;
oldval = jit::AtomicOperations::compareExchangeSeqCst(SharedMem<uint8_t*>(viewData) + offset,
oldval = jit::AtomicOperations::compareExchangeSeqCst(viewData.cast<uint8_t*>() + offset,
oldval, newval);
return oldval;
}
case Scalar::Uint8Clamped: {
uint8_t oldval = ClampIntForUint8Array(oldCandidate);
uint8_t newval = ClampIntForUint8Array(newCandidate);
oldval = jit::AtomicOperations::compareExchangeSeqCst(SharedMem<uint8_t*>(viewData) + offset,
oldval = jit::AtomicOperations::compareExchangeSeqCst(viewData.cast<uint8_t*>() + offset,
oldval, newval);
return oldval;
}
case Scalar::Int16: {
int16_t oldval = (int16_t)oldCandidate;
int16_t newval = (int16_t)newCandidate;
oldval = jit::AtomicOperations::compareExchangeSeqCst(SharedMem<int16_t*>(viewData) + offset,
oldval = jit::AtomicOperations::compareExchangeSeqCst(viewData.cast<int16_t*>() + offset,
oldval, newval);
return oldval;
}
case Scalar::Uint16: {
uint16_t oldval = (uint16_t)oldCandidate;
uint16_t newval = (uint16_t)newCandidate;
oldval = jit::AtomicOperations::compareExchangeSeqCst(SharedMem<uint16_t*>(viewData) + offset,
oldval = jit::AtomicOperations::compareExchangeSeqCst(viewData.cast<uint16_t*>() + offset,
oldval, newval);
return oldval;
}
case Scalar::Int32: {
int32_t oldval = oldCandidate;
int32_t newval = newCandidate;
oldval = jit::AtomicOperations::compareExchangeSeqCst(SharedMem<int32_t*>(viewData) + offset,
oldval = jit::AtomicOperations::compareExchangeSeqCst(viewData.cast<int32_t*>() + offset,
oldval, newval);
return oldval;
}
case Scalar::Uint32: {
uint32_t oldval = (uint32_t)oldCandidate;
uint32_t newval = (uint32_t)newCandidate;
oldval = jit::AtomicOperations::compareExchangeSeqCst(SharedMem<uint32_t*>(viewData) + offset,
oldval = jit::AtomicOperations::compareExchangeSeqCst(viewData.cast<uint32_t*>() + offset,
oldval, newval);
return (int32_t)oldval;
}
@ -238,32 +238,32 @@ js::atomics_load(JSContext* cx, unsigned argc, Value* vp)
switch (view->type()) {
case Scalar::Uint8:
case Scalar::Uint8Clamped: {
uint8_t v = jit::AtomicOperations::loadSeqCst(SharedMem<uint8_t*>(viewData) + offset);
uint8_t v = jit::AtomicOperations::loadSeqCst(viewData.cast<uint8_t*>() + offset);
r.setInt32(v);
return true;
}
case Scalar::Int8: {
int8_t v = jit::AtomicOperations::loadSeqCst(SharedMem<uint8_t*>(viewData) + offset);
int8_t v = jit::AtomicOperations::loadSeqCst(viewData.cast<uint8_t*>() + offset);
r.setInt32(v);
return true;
}
case Scalar::Int16: {
int16_t v = jit::AtomicOperations::loadSeqCst(SharedMem<int16_t*>(viewData) + offset);
int16_t v = jit::AtomicOperations::loadSeqCst(viewData.cast<int16_t*>() + offset);
r.setInt32(v);
return true;
}
case Scalar::Uint16: {
uint16_t v = jit::AtomicOperations::loadSeqCst(SharedMem<uint16_t*>(viewData) + offset);
uint16_t v = jit::AtomicOperations::loadSeqCst(viewData.cast<uint16_t*>() + offset);
r.setInt32(v);
return true;
}
case Scalar::Int32: {
int32_t v = jit::AtomicOperations::loadSeqCst(SharedMem<int32_t*>(viewData) + offset);
int32_t v = jit::AtomicOperations::loadSeqCst(viewData.cast<int32_t*>() + offset);
r.setInt32(v);
return true;
}
case Scalar::Uint32: {
uint32_t v = jit::AtomicOperations::loadSeqCst(SharedMem<uint32_t*>(viewData) + offset);
uint32_t v = jit::AtomicOperations::loadSeqCst(viewData.cast<uint32_t*>() + offset);
r.setNumber(v);
return true;
}
@ -293,37 +293,37 @@ ExchangeOrStore(Scalar::Type viewType, int32_t numberValue, SharedMem<void*> vie
switch (viewType) {
case Scalar::Int8: {
int8_t value = (int8_t)numberValue;
INT_OP(SharedMem<int8_t*>(viewData) + offset, value);
INT_OP(viewData.cast<int8_t*>() + offset, value);
return value;
}
case Scalar::Uint8: {
uint8_t value = (uint8_t)numberValue;
INT_OP(SharedMem<uint8_t*>(viewData) + offset, value);
INT_OP(viewData.cast<uint8_t*>() + offset, value);
return value;
}
case Scalar::Uint8Clamped: {
uint8_t value = ClampIntForUint8Array(numberValue);
INT_OP(SharedMem<uint8_t*>(viewData) + offset, value);
INT_OP(viewData.cast<uint8_t*>() + offset, value);
return value;
}
case Scalar::Int16: {
int16_t value = (int16_t)numberValue;
INT_OP(SharedMem<int16_t*>(viewData) + offset, value);
INT_OP(viewData.cast<int16_t*>() + offset, value);
return value;
}
case Scalar::Uint16: {
uint16_t value = (uint16_t)numberValue;
INT_OP(SharedMem<uint16_t*>(viewData) + offset, value);
INT_OP(viewData.cast<uint16_t*>() + offset, value);
return value;
}
case Scalar::Int32: {
int32_t value = numberValue;
INT_OP(SharedMem<int32_t*>(viewData) + offset, value);
INT_OP(viewData.cast<int32_t*>() + offset, value);
return value;
}
case Scalar::Uint32: {
uint32_t value = (uint32_t)numberValue;
INT_OP(SharedMem<uint32_t*>(viewData) + offset, value);
INT_OP(viewData.cast<uint32_t*>() + offset, value);
return (int32_t)value;
}
default:
@ -399,12 +399,12 @@ AtomicsBinop(JSContext* cx, HandleValue objv, HandleValue idxv, HandleValue valv
switch (view->type()) {
case Scalar::Int8: {
int8_t v = (int8_t)numberValue;
r.setInt32(T::operate(SharedMem<int8_t*>(viewData) + offset, v));
r.setInt32(T::operate(viewData.cast<int8_t*>() + offset, v));
return true;
}
case Scalar::Uint8: {
uint8_t v = (uint8_t)numberValue;
r.setInt32(T::operate(SharedMem<uint8_t*>(viewData) + offset, v));
r.setInt32(T::operate(viewData.cast<uint8_t*>() + offset, v));
return true;
}
case Scalar::Uint8Clamped: {
@ -415,7 +415,7 @@ AtomicsBinop(JSContext* cx, HandleValue objv, HandleValue idxv, HandleValue valv
// - store the result
// This requires a CAS loop.
int32_t value = ClampIntForUint8Array(numberValue);
SharedMem<uint8_t*> loc = SharedMem<uint8_t*>(viewData) + offset;
SharedMem<uint8_t*> loc = viewData.cast<uint8_t*>() + offset;
for (;;) {
uint8_t old = jit::AtomicOperations::loadSafeWhenRacy(loc);
uint8_t result = (uint8_t)ClampIntForUint8Array(T::perform(old, value));
@ -429,22 +429,22 @@ AtomicsBinop(JSContext* cx, HandleValue objv, HandleValue idxv, HandleValue valv
}
case Scalar::Int16: {
int16_t v = (int16_t)numberValue;
r.setInt32(T::operate(SharedMem<int16_t*>(viewData) + offset, v));
r.setInt32(T::operate(viewData.cast<int16_t*>() + offset, v));
return true;
}
case Scalar::Uint16: {
uint16_t v = (uint16_t)numberValue;
r.setInt32(T::operate(SharedMem<uint16_t*>(viewData) + offset, v));
r.setInt32(T::operate(viewData.cast<uint16_t*>() + offset, v));
return true;
}
case Scalar::Int32: {
int32_t v = numberValue;
r.setInt32(T::operate(SharedMem<int32_t*>(viewData) + offset, v));
r.setInt32(T::operate(viewData.cast<int32_t*>() + offset, v));
return true;
}
case Scalar::Uint32: {
uint32_t v = (uint32_t)numberValue;
r.setNumber((double)T::operate(SharedMem<uint32_t*>(viewData) + offset, v));
r.setNumber((double)T::operate(viewData.cast<uint32_t*>() + offset, v));
return true;
}
default:
@ -558,7 +558,7 @@ GetCurrentAsmJSHeap(SharedMem<void*>* heap, size_t* length)
{
JSRuntime* rt = js::TlsPerThreadData.get()->runtimeFromMainThread();
AsmJSModule& mod = rt->asmJSActivationStack()->module();
*heap = SharedMem<void*>(mod.maybeHeap());
*heap = mod.maybeHeap().cast<void*>();
*length = mod.heapLength();
}
@ -572,13 +572,13 @@ js::atomics_add_asm_callout(int32_t vt, int32_t offset, int32_t value)
return 0;
switch (Scalar::Type(vt)) {
case Scalar::Int8:
return PerformAdd::operate(SharedMem<int8_t*>(heap) + offset, value);
return PerformAdd::operate(heap.cast<int8_t*>() + offset, value);
case Scalar::Uint8:
return PerformAdd::operate(SharedMem<uint8_t*>(heap) + offset, value);
return PerformAdd::operate(heap.cast<uint8_t*>() + offset, value);
case Scalar::Int16:
return PerformAdd::operate(SharedMem<int16_t*>(heap) + (offset >> 1), value);
return PerformAdd::operate(heap.cast<int16_t*>() + (offset >> 1), value);
case Scalar::Uint16:
return PerformAdd::operate(SharedMem<uint16_t*>(heap) + (offset >> 1), value);
return PerformAdd::operate(heap.cast<uint16_t*>() + (offset >> 1), value);
default:
MOZ_CRASH("Invalid size");
}
@ -594,13 +594,13 @@ js::atomics_sub_asm_callout(int32_t vt, int32_t offset, int32_t value)
return 0;
switch (Scalar::Type(vt)) {
case Scalar::Int8:
return PerformSub::operate(SharedMem<int8_t*>(heap) + offset, value);
return PerformSub::operate(heap.cast<int8_t*>() + offset, value);
case Scalar::Uint8:
return PerformSub::operate(SharedMem<uint8_t*>(heap) + offset, value);
return PerformSub::operate(heap.cast<uint8_t*>() + offset, value);
case Scalar::Int16:
return PerformSub::operate(SharedMem<int16_t*>(heap) + (offset >> 1), value);
return PerformSub::operate(heap.cast<int16_t*>() + (offset >> 1), value);
case Scalar::Uint16:
return PerformSub::operate(SharedMem<uint16_t*>(heap) + (offset >> 1), value);
return PerformSub::operate(heap.cast<uint16_t*>() + (offset >> 1), value);
default:
MOZ_CRASH("Invalid size");
}
@ -616,13 +616,13 @@ js::atomics_and_asm_callout(int32_t vt, int32_t offset, int32_t value)
return 0;
switch (Scalar::Type(vt)) {
case Scalar::Int8:
return PerformAnd::operate(SharedMem<int8_t*>(heap) + offset, value);
return PerformAnd::operate(heap.cast<int8_t*>() + offset, value);
case Scalar::Uint8:
return PerformAnd::operate(SharedMem<uint8_t*>(heap) + offset, value);
return PerformAnd::operate(heap.cast<uint8_t*>() + offset, value);
case Scalar::Int16:
return PerformAnd::operate(SharedMem<int16_t*>(heap) + (offset >> 1), value);
return PerformAnd::operate(heap.cast<int16_t*>() + (offset >> 1), value);
case Scalar::Uint16:
return PerformAnd::operate(SharedMem<uint16_t*>(heap) + (offset >> 1), value);
return PerformAnd::operate(heap.cast<uint16_t*>() + (offset >> 1), value);
default:
MOZ_CRASH("Invalid size");
}
@ -638,13 +638,13 @@ js::atomics_or_asm_callout(int32_t vt, int32_t offset, int32_t value)
return 0;
switch (Scalar::Type(vt)) {
case Scalar::Int8:
return PerformOr::operate(SharedMem<int8_t*>(heap) + offset, value);
return PerformOr::operate(heap.cast<int8_t*>() + offset, value);
case Scalar::Uint8:
return PerformOr::operate(SharedMem<uint8_t*>(heap) + offset, value);
return PerformOr::operate(heap.cast<uint8_t*>() + offset, value);
case Scalar::Int16:
return PerformOr::operate(SharedMem<int16_t*>(heap) + (offset >> 1), value);
return PerformOr::operate(heap.cast<int16_t*>() + (offset >> 1), value);
case Scalar::Uint16:
return PerformOr::operate(SharedMem<uint16_t*>(heap) + (offset >> 1), value);
return PerformOr::operate(heap.cast<uint16_t*>() + (offset >> 1), value);
default:
MOZ_CRASH("Invalid size");
}
@ -660,13 +660,13 @@ js::atomics_xor_asm_callout(int32_t vt, int32_t offset, int32_t value)
return 0;
switch (Scalar::Type(vt)) {
case Scalar::Int8:
return PerformXor::operate(SharedMem<int8_t*>(heap) + offset, value);
return PerformXor::operate(heap.cast<int8_t*>() + offset, value);
case Scalar::Uint8:
return PerformXor::operate(SharedMem<uint8_t*>(heap) + offset, value);
return PerformXor::operate(heap.cast<uint8_t*>() + offset, value);
case Scalar::Int16:
return PerformXor::operate(SharedMem<int16_t*>(heap) + (offset >> 1), value);
return PerformXor::operate(heap.cast<int16_t*>() + (offset >> 1), value);
case Scalar::Uint16:
return PerformXor::operate(SharedMem<uint16_t*>(heap) + (offset >> 1), value);
return PerformXor::operate(heap.cast<uint16_t*>() + (offset >> 1), value);
default:
MOZ_CRASH("Invalid size");
}
@ -811,7 +811,7 @@ js::atomics_futexWait(JSContext* cx, unsigned argc, Value* vp)
// and it provides the necessary memory fence.
AutoLockFutexAPI lock;
SharedMem<int32_t*>(addr) = SharedMem<int32_t*>(view->viewDataShared()) + offset;
SharedMem<int32_t*>(addr) = view->viewDataShared().cast<int32_t*>() + offset;
if (jit::AtomicOperations::loadSafeWhenRacy(addr) != value) {
r.setInt32(AtomicsObject::FutexNotequal);
return true;
@ -927,7 +927,7 @@ js::atomics_futexWakeOrRequeue(JSContext* cx, unsigned argc, Value* vp)
AutoLockFutexAPI lock;
SharedMem<int32_t*> addr = SharedMem<int32_t*>(view->viewDataShared()) + offset1;
SharedMem<int32_t*> addr = view->viewDataShared().cast<int32_t*>() + offset1;
if (jit::AtomicOperations::loadSafeWhenRacy(addr) != value) {
r.setInt32(AtomicsObject::FutexNotequal);
return true;

View File

@ -1148,7 +1148,7 @@ Load(JSContext* cx, unsigned argc, Value* vp)
if (!result)
return false;
SharedMem<Elem*> src = SharedMem<Elem*>(AnyTypedArrayViewData(typedArray).addBytes(byteStart));
SharedMem<Elem*> src = AnyTypedArrayViewData(typedArray).addBytes(byteStart).cast<Elem*>();
Elem* dst = reinterpret_cast<Elem*>(result->typedMem());
jit::AtomicOperations::memcpySafeWhenRacy(dst, src, sizeof(Elem) * NumElem);
@ -1175,7 +1175,7 @@ Store(JSContext* cx, unsigned argc, Value* vp)
return ErrorBadArgs(cx);
Elem* src = TypedObjectMemory<Elem*>(args[2]);
SharedMem<Elem*> dst = SharedMem<Elem*>(AnyTypedArrayViewData(typedArray).addBytes(byteStart));
SharedMem<Elem*> dst = AnyTypedArrayViewData(typedArray).addBytes(byteStart).cast<Elem*>();
js::jit::AtomicOperations::memcpySafeWhenRacy(dst, src, sizeof(Elem) * NumElem);
args.rval().setObject(args[2].toObject());

View File

@ -33,6 +33,7 @@ class SharedMem
{}
public:
// Create a SharedMem<T> that is an unshared nullptr.
SharedMem()
: ptr_(nullptr)
#ifdef DEBUG
@ -49,30 +50,6 @@ class SharedMem
#endif
{}
SharedMem(const SharedMem& that)
: ptr_(that.ptr_)
#ifdef DEBUG
, sharedness_(that.sharedness_)
#endif
{}
// Cast from U* u where static_cast<T>(u) is legal.
template<typename U>
explicit SharedMem(const SharedMem<U>& that)
: ptr_(static_cast<T>(that.unwrap()))
#ifdef DEBUG
, sharedness_((SharedMem::Sharedness)that.sharedness())
#endif
{}
#ifdef DEBUG
// Needed by the constructor directly above, no credible "friend"
// solution presents itself.
Sharedness sharedness() const {
return sharedness_;
}
#endif
// Create a SharedMem<T> that's marked as shared.
static SharedMem shared(void* p) {
return SharedMem(static_cast<T>(p), IsShared);
@ -91,6 +68,19 @@ class SharedMem
return *this;
}
// Reinterpret-cast the pointer to type U, preserving sharedness.
template<typename U>
inline SharedMem<U> cast() const {
#ifdef DEBUG
MOZ_ASSERT(asValue() % sizeof(mozilla::Conditional<mozilla::IsVoid<typename mozilla::RemovePointer<U>::Type>::value,
char,
typename mozilla::RemovePointer<U>::Type>) == 0);
if (sharedness_ == IsUnshared)
return SharedMem<U>::unshared(unwrap());
#endif
return SharedMem<U>::shared(unwrap());
}
explicit operator bool() { return ptr_ != nullptr; }
SharedMem operator +(size_t offset) {

View File

@ -368,7 +368,7 @@ class SharedTypedArrayObjectTemplate : public SharedTypedArrayObject
{
SharedTypedArrayObject& tarray = obj->as<SharedTypedArrayObject>();
MOZ_ASSERT(index < tarray.length());
return jit::AtomicOperations::loadSafeWhenRacy(SharedMem<NativeType*>(tarray.viewDataShared()) + index);
return jit::AtomicOperations::loadSafeWhenRacy(tarray.viewDataShared().template cast<NativeType*>() + index);
}
static void
@ -400,7 +400,7 @@ class SharedTypedArrayObjectTemplate : public SharedTypedArrayObject
setIndex(SharedTypedArrayObject& tarray, uint32_t index, NativeType val)
{
MOZ_ASSERT(index < tarray.length());
jit::AtomicOperations::storeSafeWhenRacy(SharedMem<NativeType*>(tarray.viewDataShared()) + index, val);
jit::AtomicOperations::storeSafeWhenRacy(tarray.viewDataShared().template cast<NativeType*>() + index, val);
}
static Value getIndexValue(JSObject* tarray, uint32_t index);

View File

@ -218,11 +218,11 @@ class ElementSpecific
return setFromOverlappingTypedArray(cx, target, src, offset);
}
SharedMem<T*> dest = SharedMem<T*>(AnyTypedArrayViewData(target)) + offset;
SharedMem<T*> dest = AnyTypedArrayViewData(target).template cast<T*>() + offset;
uint32_t count = AnyTypedArrayLength(source);
if (AnyTypedArrayType(source) == target->type()) {
Ops::memcpy(SharedMem<void*>(dest), AnyTypedArrayViewData(source), count*sizeof(T));
Ops::memcpy(dest.template cast<void*>(), AnyTypedArrayViewData(source), count*sizeof(T));
return true;
}
@ -236,50 +236,50 @@ class ElementSpecific
SharedMem<void*> data = AnyTypedArrayViewData(source);
switch (AnyTypedArrayType(source)) {
case Scalar::Int8: {
SharedMem<JS_VOLATILE_ARM int8_t*> src = SharedMem<JS_VOLATILE_ARM int8_t*>(data);
SharedMem<JS_VOLATILE_ARM int8_t*> src = data.cast<JS_VOLATILE_ARM int8_t*>();
for (uint32_t i = 0; i < count; ++i)
Ops::store(dest++, T(Ops::load(src++)));
break;
}
case Scalar::Uint8:
case Scalar::Uint8Clamped: {
SharedMem<JS_VOLATILE_ARM uint8_t*> src = SharedMem<JS_VOLATILE_ARM uint8_t*>(data);
SharedMem<JS_VOLATILE_ARM uint8_t*> src = data.cast<JS_VOLATILE_ARM uint8_t*>();
for (uint32_t i = 0; i < count; ++i)
Ops::store(dest++, T(Ops::load(src++)));
break;
}
case Scalar::Int16: {
SharedMem<JS_VOLATILE_ARM int16_t*> src = SharedMem<JS_VOLATILE_ARM int16_t*>(data);
SharedMem<JS_VOLATILE_ARM int16_t*> src = data.cast<JS_VOLATILE_ARM int16_t*>();
for (uint32_t i = 0; i < count; ++i)
Ops::store(dest++, T(Ops::load(src++)));
break;
}
case Scalar::Uint16: {
SharedMem<JS_VOLATILE_ARM uint16_t*> src = SharedMem<JS_VOLATILE_ARM uint16_t*>(data);
SharedMem<JS_VOLATILE_ARM uint16_t*> src = data.cast<JS_VOLATILE_ARM uint16_t*>();
for (uint32_t i = 0; i < count; ++i)
Ops::store(dest++, T(Ops::load(src++)));
break;
}
case Scalar::Int32: {
SharedMem<JS_VOLATILE_ARM int32_t*> src = SharedMem<JS_VOLATILE_ARM int32_t*>(data);
SharedMem<JS_VOLATILE_ARM int32_t*> src = data.cast<JS_VOLATILE_ARM int32_t*>();
for (uint32_t i = 0; i < count; ++i)
Ops::store(dest++, T(Ops::load(src++)));
break;
}
case Scalar::Uint32: {
SharedMem<JS_VOLATILE_ARM uint32_t*> src = SharedMem<JS_VOLATILE_ARM uint32_t*>(data);
SharedMem<JS_VOLATILE_ARM uint32_t*> src = data.cast<JS_VOLATILE_ARM uint32_t*>();
for (uint32_t i = 0; i < count; ++i)
Ops::store(dest++, T(Ops::load(src++)));
break;
}
case Scalar::Float32: {
SharedMem<JS_VOLATILE_ARM float*> src = SharedMem<JS_VOLATILE_ARM float*>(data);
SharedMem<JS_VOLATILE_ARM float*> src = data.cast<JS_VOLATILE_ARM float*>();
for (uint32_t i = 0; i < count; ++i)
Ops::store(dest++, T(Ops::load(src++)));
break;
}
case Scalar::Float64: {
SharedMem<JS_VOLATILE_ARM double*> src = SharedMem<JS_VOLATILE_ARM double*>(data);
SharedMem<JS_VOLATILE_ARM double*> src = data.cast<JS_VOLATILE_ARM double*>();
for (uint32_t i = 0; i < count; ++i)
Ops::store(dest++, T(Ops::load(src++)));
break;
@ -313,7 +313,7 @@ class ElementSpecific
// the first potentially side-effectful lookup or conversion.
uint32_t bound = Min(source->as<NativeObject>().getDenseInitializedLength(), len);
SharedMem<T*> dest = SharedMem<T*>(AnyTypedArrayViewData(target)) + offset;
SharedMem<T*> dest = AnyTypedArrayViewData(target).template cast<T*>() + offset;
MOZ_ASSERT(!canConvertInfallibly(MagicValue(JS_ELEMENTS_HOLE)),
"the following loop must abort on holes");
@ -343,7 +343,7 @@ class ElementSpecific
break;
// Compute every iteration in case getElement/valueToNative is wacky.
Ops::store(SharedMem<T*>(AnyTypedArrayViewData(target)) + offset + i, n);
Ops::store(AnyTypedArrayViewData(target).template cast<T*>() + offset + i, n);
}
return true;
@ -365,11 +365,11 @@ class ElementSpecific
MOZ_ASSERT(offset <= target->length());
MOZ_ASSERT(source->length() <= target->length() - offset);
SharedMem<T*> dest = SharedMem<T*>(AnyTypedArrayViewData(target)) + offset;
SharedMem<T*> dest = AnyTypedArrayViewData(target).template cast<T*>() + offset;
uint32_t len = source->length();
if (source->type() == target->type()) {
Ops::memmove(dest, SharedMem<T*>(AnyTypedArrayViewData(source)), len*sizeof(T));
Ops::memmove(dest, AnyTypedArrayViewData(source).template cast<T*>(), len*sizeof(T));
return true;
}
@ -723,7 +723,7 @@ class TypedArrayMethods
MOZ_ASSERT(byteSrc <= viewByteLength - byteSize);
#endif
SharedMem<uint8_t*> data = SharedMem<uint8_t*>(AnyTypedArrayViewData(obj));
SharedMem<uint8_t*> data = AnyTypedArrayViewData(obj).template cast<uint8_t*>();
SharedOps::memmove(data + byteDest, data + byteSrc, byteSize);
// Step 19.