Bug 1225026 - Remove support for atomics on Uint8ClampedArray. r=nbp

This commit is contained in:
Lars T Hansen 2015-12-02 18:26:18 +01:00
parent c158bf9e60
commit b4189bcd19
13 changed files with 55 additions and 79 deletions

View File

@ -139,13 +139,6 @@ CompareExchange(Scalar::Type viewType, int32_t oldCandidate, int32_t newCandidat
oldval, newval);
return oldval;
}
case Scalar::Uint8Clamped: {
uint8_t oldval = ClampIntForUint8Array(oldCandidate);
uint8_t newval = ClampIntForUint8Array(newCandidate);
oldval = jit::AtomicOperations::compareExchangeSeqCst(viewData.cast<uint8_t*>() + offset,
oldval, newval);
return oldval;
}
case Scalar::Int16: {
int16_t oldval = (int16_t)oldCandidate;
int16_t newval = (int16_t)newCandidate;
@ -235,8 +228,7 @@ js::atomics_load(JSContext* cx, unsigned argc, Value* vp)
SharedMem<void*> viewData = view->viewDataShared();
switch (view->type()) {
case Scalar::Uint8:
case Scalar::Uint8Clamped: {
case Scalar::Uint8: {
uint8_t v = jit::AtomicOperations::loadSeqCst(viewData.cast<uint8_t*>() + offset);
r.setInt32(v);
return true;
@ -300,11 +292,6 @@ ExchangeOrStore(Scalar::Type viewType, int32_t numberValue, SharedMem<void*> vie
INT_OP(viewData.cast<uint8_t*>() + offset, value);
return value;
}
case Scalar::Uint8Clamped: {
uint8_t value = ClampIntForUint8Array(numberValue);
INT_OP(viewData.cast<uint8_t*>() + offset, value);
return value;
}
case Scalar::Int16: {
int16_t value = (int16_t)numberValue;
INT_OP(viewData.cast<int16_t*>() + offset, value);
@ -406,26 +393,6 @@ AtomicsBinop(JSContext* cx, HandleValue objv, HandleValue idxv, HandleValue valv
r.setInt32(T::operate(viewData.cast<uint8_t*>() + offset, v));
return true;
}
case Scalar::Uint8Clamped: {
// Spec says:
// - clamp the input value
// - perform the operation
// - clamp the result
// - store the result
// This requires a CAS loop.
int32_t value = ClampIntForUint8Array(numberValue);
SharedMem<uint8_t*> loc = viewData.cast<uint8_t*>() + offset;
for (;;) {
uint8_t old = jit::AtomicOperations::loadSafeWhenRacy(loc);
uint8_t result = (uint8_t)ClampIntForUint8Array(T::perform(old, value));
uint8_t tmp = jit::AtomicOperations::compareExchangeSeqCst(loc, old, result);
if (tmp == old) {
r.setInt32(old);
break;
}
}
return true;
}
case Scalar::Int16: {
int16_t v = (int16_t)numberValue;
r.setInt32(T::operate(viewData.cast<int16_t*>() + offset, v));

View File

@ -409,6 +409,19 @@ function testIsLockFree() {
assertEq(Atomics.isLockFree(12), false);
}
function testUint8Clamped(sab) {
var ta = new Uint8ClampedArray(sab);
var thrown = false;
try {
CLONE(testMethod)(ta, 0);
}
catch (e) {
thrown = true;
assertEq(e instanceof TypeError, true);
}
assertEq(thrown, true);
}
function isLittleEndian() {
var xxx = new ArrayBuffer(2);
var xxa = new Int16Array(xxx);
@ -440,7 +453,6 @@ function runTests() {
// Test that invoking as Atomics.whatever() works, on correct arguments.
CLONE(testMethod)(new Int8Array(sab), 0, 42, 4095);
CLONE(testMethod)(new Uint8Array(sab), 0, 42, 4095);
CLONE(testMethod)(new Uint8ClampedArray(sab), 0, 42, 4095);
CLONE(testMethod)(new Int16Array(sab), 0, 42, 2047);
CLONE(testMethod)(new Uint16Array(sab), 0, 42, 2047);
CLONE(testMethod)(new Int32Array(sab), 0, 42, 1023);
@ -460,7 +472,6 @@ function runTests() {
CLONE(testFunction)(new Int8Array(sab), 0, 42, 4095);
CLONE(testFunction)(new Uint8Array(sab), 0, 42, 4095);
CLONE(testFunction)(new Uint8ClampedArray(sab), 0, 42, 4095);
CLONE(testFunction)(new Int16Array(sab), 0, 42, 2047);
CLONE(testFunction)(new Uint16Array(sab), 0, 42, 2047);
CLONE(testFunction)(new Int32Array(sab), 0, 42, 1023);
@ -497,6 +508,9 @@ function runTests() {
testInt16Extremes(new Int16Array(sab));
testUint32(new Uint32Array(sab));
// Test that Uint8ClampedArray is not accepted.
testUint8Clamped(sab);
// Misc ad-hoc tests
adHocExchange();

View File

@ -2975,7 +2975,7 @@ IonBuilder::atomicsMeetsPreconditions(CallInfo& callInfo, Scalar::Type* arrayTyp
// be.
return checkResult == DontCheckAtomicResult || getInlineReturnType() == MIRType_Double;
default:
// Excludes floating types and Uint8Clamped
// Excludes floating types and Uint8Clamped.
return false;
}
}

View File

@ -13195,8 +13195,7 @@ class MCompareExchangeTypedArrayElement
}
bool isByteArray() const {
return (arrayType_ == Scalar::Int8 ||
arrayType_ == Scalar::Uint8 ||
arrayType_ == Scalar::Uint8Clamped);
arrayType_ == Scalar::Uint8);
}
MDefinition* elements() {
return getOperand(0);
@ -13250,8 +13249,7 @@ class MAtomicExchangeTypedArrayElement
bool isByteArray() const {
return (arrayType_ == Scalar::Int8 ||
arrayType_ == Scalar::Uint8 ||
arrayType_ == Scalar::Uint8Clamped);
arrayType_ == Scalar::Uint8);
}
MDefinition* elements() {
return getOperand(0);
@ -13302,8 +13300,7 @@ class MAtomicTypedArrayElementBinop
bool isByteArray() const {
return (arrayType_ == Scalar::Int8 ||
arrayType_ == Scalar::Uint8 ||
arrayType_ == Scalar::Uint8Clamped);
arrayType_ == Scalar::Uint8);
}
AtomicOp operation() const {
return op_;

View File

@ -1742,7 +1742,6 @@ CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType
MOZ_ASSERT(flagTemp != InvalidReg);
MOZ_ASSERT_IF(arrayType == Scalar::Uint32, outTemp != InvalidReg);
// Uint8Clamped is explicitly not supported here
switch (arrayType) {
case Scalar::Int8:
switch (op) {
@ -1908,7 +1907,6 @@ CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType
{
MOZ_ASSERT(flagTemp != InvalidReg);
// Uint8Clamped is explicitly not supported here
switch (arrayType) {
case Scalar::Int8:
case Scalar::Uint8:

View File

@ -4828,9 +4828,6 @@ MacroAssemblerARMCompat::compareExchangeToTypedIntArray(Scalar::Type arrayType,
case Scalar::Uint8:
compareExchange8ZeroExtend(mem, oldval, newval, output.gpr());
break;
case Scalar::Uint8Clamped:
compareExchange8ZeroExtend(mem, oldval, newval, output.gpr());
break;
case Scalar::Int16:
compareExchange16SignExtend(mem, oldval, newval, output.gpr());
break;
@ -4873,9 +4870,6 @@ MacroAssemblerARMCompat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, c
case Scalar::Uint8:
atomicExchange8ZeroExtend(mem, value, output.gpr());
break;
case Scalar::Uint8Clamped:
atomicExchange8ZeroExtend(mem, value, output.gpr());
break;
case Scalar::Int16:
atomicExchange16SignExtend(mem, value, output.gpr());
break;

View File

@ -273,9 +273,6 @@ MacroAssemblerCompat::compareExchangeToTypedIntArray(Scalar::Type arrayType, con
case Scalar::Uint8:
compareExchange8ZeroExtend(mem, oldval, newval, output.gpr());
break;
case Scalar::Uint8Clamped:
compareExchange8ZeroExtend(mem, oldval, newval, output.gpr());
break;
case Scalar::Int16:
compareExchange16SignExtend(mem, oldval, newval, output.gpr());
break;
@ -318,9 +315,6 @@ MacroAssemblerCompat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, cons
case Scalar::Uint8:
atomicExchange8ZeroExtend(mem, value, output.gpr());
break;
case Scalar::Uint8Clamped:
atomicExchange8ZeroExtend(mem, value, output.gpr());
break;
case Scalar::Int16:
atomicExchange16SignExtend(mem, value, output.gpr());
break;

View File

@ -2042,7 +2042,6 @@ CodeGeneratorMIPSShared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type ar
MOZ_ASSERT(flagTemp != InvalidReg);
MOZ_ASSERT_IF(arrayType == Scalar::Uint32, outTemp != InvalidReg);
// Uint8Clamped is explicitly not supported here
switch (arrayType) {
case Scalar::Int8:
switch (op) {
@ -2213,7 +2212,6 @@ CodeGeneratorMIPSShared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type ar
{
MOZ_ASSERT(flagTemp != InvalidReg);
// Uint8Clamped is explicitly not supported here
switch (arrayType) {
case Scalar::Int8:
case Scalar::Uint8:

View File

@ -2437,9 +2437,6 @@ MacroAssemblerMIPSCompat::compareExchangeToTypedIntArray(Scalar::Type arrayType,
case Scalar::Uint8:
compareExchange8ZeroExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case Scalar::Uint8Clamped:
compareExchange8ZeroExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case Scalar::Int16:
compareExchange16SignExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
@ -2486,9 +2483,6 @@ MacroAssemblerMIPSCompat::atomicExchangeToTypedIntArray(Scalar::Type arrayType,
case Scalar::Uint8:
atomicExchange8ZeroExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case Scalar::Uint8Clamped:
atomicExchange8ZeroExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case Scalar::Int16:
atomicExchange16SignExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
break;

View File

@ -2540,9 +2540,6 @@ MacroAssemblerMIPS64Compat::compareExchangeToTypedIntArray(Scalar::Type arrayTyp
case Scalar::Uint8:
compareExchange8ZeroExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case Scalar::Uint8Clamped:
compareExchange8ZeroExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case Scalar::Int16:
compareExchange16SignExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
@ -2589,9 +2586,6 @@ MacroAssemblerMIPS64Compat::atomicExchangeToTypedIntArray(Scalar::Type arrayType
case Scalar::Uint8:
atomicExchange8ZeroExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case Scalar::Uint8Clamped:
atomicExchange8ZeroExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case Scalar::Int16:
atomicExchange16SignExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
break;

View File

@ -3366,7 +3366,6 @@ void
CodeGeneratorX86Shared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value,
const T& mem, Register temp1, Register temp2, AnyRegister output)
{
// Uint8Clamped is explicitly not supported here
switch (arrayType) {
case Scalar::Int8:
switch (op) {
@ -3526,7 +3525,6 @@ void
CodeGeneratorX86Shared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value,
const T& mem)
{
// Uint8Clamped is explicitly not supported here
switch (arrayType) {
case Scalar::Int8:
case Scalar::Uint8:

View File

@ -156,9 +156,6 @@ MacroAssemblerX86Shared::compareExchangeToTypedIntArray(Scalar::Type arrayType,
case Scalar::Uint8:
compareExchange8ZeroExtend(mem, oldval, newval, output.gpr());
break;
case Scalar::Uint8Clamped:
compareExchange8ZeroExtend(mem, oldval, newval, output.gpr());
break;
case Scalar::Int16:
compareExchange16SignExtend(mem, oldval, newval, output.gpr());
break;
@ -201,9 +198,6 @@ MacroAssemblerX86Shared::atomicExchangeToTypedIntArray(Scalar::Type arrayType, c
case Scalar::Uint8:
atomicExchange8ZeroExtend(mem, value, output.gpr());
break;
case Scalar::Uint8Clamped:
atomicExchange8ZeroExtend(mem, value, output.gpr());
break;
case Scalar::Int16:
atomicExchange16SignExtend(mem, value, output.gpr());
break;

View File

@ -191,10 +191,44 @@ function testClone2() {
assertEq(ia2[10], 37);
}
function testApplicable() {
var sab = b;
var x;
// Just make sure we can create all the view types on shared memory.
x = new Int32Array(sab);
assertEq(x.length, sab.byteLength / Int32Array.BYTES_PER_ELEMENT);
x = new Uint32Array(sab);
assertEq(x.length, sab.byteLength / Uint32Array.BYTES_PER_ELEMENT);
x = new Int16Array(sab);
assertEq(x.length, sab.byteLength / Int16Array.BYTES_PER_ELEMENT);
x = new Uint16Array(sab);
assertEq(x.length, sab.byteLength / Uint16Array.BYTES_PER_ELEMENT);
x = new Int8Array(sab);
assertEq(x.length, sab.byteLength / Int8Array.BYTES_PER_ELEMENT);
x = new Uint8Array(sab);
assertEq(x.length, sab.byteLength / Uint8Array.BYTES_PER_ELEMENT);
// Though the atomic operations are illegal on Uint8ClampedArray and the
// float arrays, they can still be used to create views on shared memory.
x = new Uint8ClampedArray(sab);
assertEq(x.length, sab.byteLength / Uint8ClampedArray.BYTES_PER_ELEMENT);
x = new Float32Array(sab);
assertEq(x.length, sab.byteLength / Float32Array.BYTES_PER_ELEMENT);
x = new Float64Array(sab);
assertEq(x.length, sab.byteLength / Float64Array.BYTES_PER_ELEMENT);
}
testSharedArrayBuffer();
testSharedTypedArray();
testSharedTypedArrayMethods();
testClone1();
testClone2();
testApplicable();
reportCompare(0, 0, 'ok');