mirror of
https://gitlab.winehq.org/wine/wine-gecko.git
synced 2024-09-13 09:24:08 -07:00
Bug 1077014 - MacroAssembler API. r=h4writer
This commit is contained in:
parent
70f238c341
commit
cbb2121a90
@ -642,6 +642,98 @@ MacroAssembler::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
|
||||
const Register &value, const BaseIndex &mem,
|
||||
Register temp1, Register temp2, AnyRegister output);
|
||||
|
||||
// Binary operation for effect, result discarded.
|
||||
template<typename S, typename T>
|
||||
void
|
||||
MacroAssembler::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S &value,
|
||||
const T &mem)
|
||||
{
|
||||
// Uint8Clamped is explicitly not supported here
|
||||
switch (arrayType) {
|
||||
case Scalar::Int8:
|
||||
case Scalar::Uint8:
|
||||
switch (op) {
|
||||
case AtomicFetchAddOp:
|
||||
atomicAdd8(value, mem);
|
||||
break;
|
||||
case AtomicFetchSubOp:
|
||||
atomicSub8(value, mem);
|
||||
break;
|
||||
case AtomicFetchAndOp:
|
||||
atomicAnd8(value, mem);
|
||||
break;
|
||||
case AtomicFetchOrOp:
|
||||
atomicOr8(value, mem);
|
||||
break;
|
||||
case AtomicFetchXorOp:
|
||||
atomicXor8(value, mem);
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Invalid typed array atomic operation");
|
||||
}
|
||||
break;
|
||||
case Scalar::Int16:
|
||||
case Scalar::Uint16:
|
||||
switch (op) {
|
||||
case AtomicFetchAddOp:
|
||||
atomicAdd16(value, mem);
|
||||
break;
|
||||
case AtomicFetchSubOp:
|
||||
atomicSub16(value, mem);
|
||||
break;
|
||||
case AtomicFetchAndOp:
|
||||
atomicAnd16(value, mem);
|
||||
break;
|
||||
case AtomicFetchOrOp:
|
||||
atomicOr16(value, mem);
|
||||
break;
|
||||
case AtomicFetchXorOp:
|
||||
atomicXor16(value, mem);
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Invalid typed array atomic operation");
|
||||
}
|
||||
break;
|
||||
case Scalar::Int32:
|
||||
case Scalar::Uint32:
|
||||
switch (op) {
|
||||
case AtomicFetchAddOp:
|
||||
atomicAdd32(value, mem);
|
||||
break;
|
||||
case AtomicFetchSubOp:
|
||||
atomicSub32(value, mem);
|
||||
break;
|
||||
case AtomicFetchAndOp:
|
||||
atomicAnd32(value, mem);
|
||||
break;
|
||||
case AtomicFetchOrOp:
|
||||
atomicOr32(value, mem);
|
||||
break;
|
||||
case AtomicFetchXorOp:
|
||||
atomicXor32(value, mem);
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Invalid typed array atomic operation");
|
||||
}
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Invalid typed array type");
|
||||
}
|
||||
}
|
||||
|
||||
template void
|
||||
MacroAssembler::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
|
||||
const Imm32 &value, const Address &mem);
|
||||
template void
|
||||
MacroAssembler::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
|
||||
const Imm32 &value, const BaseIndex &mem);
|
||||
template void
|
||||
MacroAssembler::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
|
||||
const Register &value, const Address &mem);
|
||||
template void
|
||||
MacroAssembler::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
|
||||
const Register &value, const BaseIndex &mem);
|
||||
|
||||
template <typename T>
|
||||
void
|
||||
MacroAssembler::loadUnboxedProperty(T address, JSValueType type, TypedOrValueRegister output)
|
||||
|
@ -748,10 +748,15 @@ class MacroAssembler : public MacroAssemblerSpecific
|
||||
void compareExchangeToTypedIntArray(Scalar::Type arrayType, const T &mem, Register oldval, Register newval,
|
||||
Register temp, AnyRegister output);
|
||||
|
||||
// Generating a result.
|
||||
template<typename S, typename T>
|
||||
void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S &value,
|
||||
const T &mem, Register temp1, Register temp2, AnyRegister output);
|
||||
|
||||
// Generating no result.
|
||||
template<typename S, typename T>
|
||||
void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S &value, const T &mem);
|
||||
|
||||
void storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value, const BaseIndex &dest);
|
||||
void storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value, const Address &dest);
|
||||
|
||||
|
@ -1494,6 +1494,18 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
|
||||
void atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Register &value,
|
||||
const T &address, Register temp, Register output);
|
||||
|
||||
template<typename T>
|
||||
void atomicEffectOpARMv6(int nbytes, AtomicOp op, const Register &value, const T &address);
|
||||
|
||||
template<typename T>
|
||||
void atomicEffectOpARMv7(int nbytes, AtomicOp op, const Register &value, const T &address);
|
||||
|
||||
template<typename T>
|
||||
void atomicEffectOp(int nbytes, AtomicOp op, const Imm32 &value, const T &address);
|
||||
|
||||
template<typename T>
|
||||
void atomicEffectOp(int nbytes, AtomicOp op, const Register &value, const T &address);
|
||||
|
||||
public:
|
||||
// T in {Address,BaseIndex}
|
||||
// S in {Imm32,Register}
|
||||
@ -1543,6 +1555,18 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
|
||||
void atomicFetchAdd32(const S &value, const T &mem, Register temp, Register output) {
|
||||
atomicFetchOp(4, false, AtomicFetchAddOp, value, mem, temp, output);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicAdd8(const S &value, const T &mem) {
|
||||
atomicEffectOp(1, AtomicFetchAddOp, value, mem);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicAdd16(const S &value, const T &mem) {
|
||||
atomicEffectOp(2, AtomicFetchAddOp, value, mem);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicAdd32(const S &value, const T &mem) {
|
||||
atomicEffectOp(4, AtomicFetchAddOp, value, mem);
|
||||
}
|
||||
|
||||
template<typename T, typename S>
|
||||
void atomicFetchSub8SignExtend(const S &value, const T &mem, Register temp, Register output) {
|
||||
@ -1564,6 +1588,18 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
|
||||
void atomicFetchSub32(const S &value, const T &mem, Register temp, Register output) {
|
||||
atomicFetchOp(4, false, AtomicFetchSubOp, value, mem, temp, output);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicSub8(const S &value, const T &mem) {
|
||||
atomicEffectOp(1, AtomicFetchSubOp, value, mem);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicSub16(const S &value, const T &mem) {
|
||||
atomicEffectOp(2, AtomicFetchSubOp, value, mem);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicSub32(const S &value, const T &mem) {
|
||||
atomicEffectOp(4, AtomicFetchSubOp, value, mem);
|
||||
}
|
||||
|
||||
template<typename T, typename S>
|
||||
void atomicFetchAnd8SignExtend(const S &value, const T &mem, Register temp, Register output) {
|
||||
@ -1585,6 +1621,18 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
|
||||
void atomicFetchAnd32(const S &value, const T &mem, Register temp, Register output) {
|
||||
atomicFetchOp(4, false, AtomicFetchAndOp, value, mem, temp, output);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicAnd8(const S &value, const T &mem) {
|
||||
atomicEffectOp(1, AtomicFetchAndOp, value, mem);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicAnd16(const S &value, const T &mem) {
|
||||
atomicEffectOp(2, AtomicFetchAndOp, value, mem);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicAnd32(const S &value, const T &mem) {
|
||||
atomicEffectOp(4, AtomicFetchAndOp, value, mem);
|
||||
}
|
||||
|
||||
template<typename T, typename S>
|
||||
void atomicFetchOr8SignExtend(const S &value, const T &mem, Register temp, Register output) {
|
||||
@ -1606,6 +1654,18 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
|
||||
void atomicFetchOr32(const S &value, const T &mem, Register temp, Register output) {
|
||||
atomicFetchOp(4, false, AtomicFetchOrOp, value, mem, temp, output);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicOr8(const S &value, const T &mem) {
|
||||
atomicEffectOp(1, AtomicFetchOrOp, value, mem);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicOr16(const S &value, const T &mem) {
|
||||
atomicEffectOp(2, AtomicFetchOrOp, value, mem);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicOr32(const S &value, const T &mem) {
|
||||
atomicEffectOp(4, AtomicFetchOrOp, value, mem);
|
||||
}
|
||||
|
||||
template<typename T, typename S>
|
||||
void atomicFetchXor8SignExtend(const S &value, const T &mem, Register temp, Register output) {
|
||||
@ -1627,6 +1687,18 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
|
||||
void atomicFetchXor32(const S &value, const T &mem, Register temp, Register output) {
|
||||
atomicFetchOp(4, false, AtomicFetchXorOp, value, mem, temp, output);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicXor8(const S &value, const T &mem) {
|
||||
atomicEffectOp(1, AtomicFetchXorOp, value, mem);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicXor16(const S &value, const T &mem) {
|
||||
atomicEffectOp(2, AtomicFetchXorOp, value, mem);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicXor32(const S &value, const T &mem) {
|
||||
atomicEffectOp(4, AtomicFetchXorOp, value, mem);
|
||||
}
|
||||
|
||||
void clampIntToUint8(Register reg) {
|
||||
// Look at (reg >> 8) if it is 0, then reg shouldn't be clamped if it is
|
||||
|
@ -324,26 +324,41 @@ class MacroAssemblerNone : public Assembler
|
||||
template <typename T, typename S> void atomicFetchAdd16SignExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchAdd16ZeroExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchAdd32(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicAdd8(const T &value, const S &mem) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicAdd16(const T &value, const S &mem) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicAdd32(const T &value, const S &mem) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchSub8SignExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchSub8ZeroExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchSub16SignExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchSub16ZeroExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchSub32(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicSub8(const T &value, const S &mem) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicSub16(const T &value, const S &mem) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicSub32(const T &value, const S &mem) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchAnd8SignExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchAnd8ZeroExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchAnd16SignExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchAnd16ZeroExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchAnd32(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicAnd8(const T &value, const S &mem) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicAnd16(const T &value, const S &mem) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicAnd32(const T &value, const S &mem) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchOr8SignExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchOr8ZeroExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchOr16SignExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchOr16ZeroExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchOr32(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicOr8(const T &value, const S &mem) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicOr16(const T &value, const S &mem) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicOr32(const T &value, const S &mem) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchXor8SignExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchXor8ZeroExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchXor16SignExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchXor16ZeroExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchXor32(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicXor8(const T &value, const S &mem) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicXor16(const T &value, const S &mem) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicXor32(const T &value, const S &mem) { MOZ_CRASH(); }
|
||||
|
||||
void clampIntToUint8(Register) { MOZ_CRASH(); }
|
||||
|
||||
|
@ -500,6 +500,69 @@ class MacroAssemblerX86Shared : public Assembler
|
||||
|
||||
#undef ATOMIC_BITOP_BODY
|
||||
|
||||
// S is Register or Imm32; T is Address or BaseIndex.
|
||||
|
||||
template <typename S, typename T>
|
||||
void atomicAdd8(const S &src, const T &mem) {
|
||||
lock_addb(src, Operand(mem));
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicAdd16(const S &src, const T &mem) {
|
||||
lock_addw(src, Operand(mem));
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicAdd32(const S &src, const T &mem) {
|
||||
lock_addl(src, Operand(mem));
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicSub8(const S &src, const T &mem) {
|
||||
lock_subb(src, Operand(mem));
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicSub16(const S &src, const T &mem) {
|
||||
lock_subw(src, Operand(mem));
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicSub32(const S &src, const T &mem) {
|
||||
lock_subl(src, Operand(mem));
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicAnd8(const S &src, const T &mem) {
|
||||
lock_andb(src, Operand(mem));
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicAnd16(const S &src, const T &mem) {
|
||||
lock_andw(src, Operand(mem));
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicAnd32(const S &src, const T &mem) {
|
||||
lock_andl(src, Operand(mem));
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicOr8(const S &src, const T &mem) {
|
||||
lock_orb(src, Operand(mem));
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicOr16(const S &src, const T &mem) {
|
||||
lock_orw(src, Operand(mem));
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicOr32(const S &src, const T &mem) {
|
||||
lock_orl(src, Operand(mem));
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicXor8(const S &src, const T &mem) {
|
||||
lock_xorb(src, Operand(mem));
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicXor16(const S &src, const T &mem) {
|
||||
lock_xorw(src, Operand(mem));
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicXor32(const S &src, const T &mem) {
|
||||
lock_xorl(src, Operand(mem));
|
||||
}
|
||||
|
||||
void storeLoadFence() {
|
||||
// This implementation follows Linux.
|
||||
if (HasSSE2())
|
||||
|
Loading…
Reference in New Issue
Block a user