mirror of
https://gitlab.winehq.org/wine/wine-gecko.git
synced 2024-09-13 09:24:08 -07:00
Bug 1154104 - Clean up atomics code for old Visual Studio versions. r=nfroyd
This commit is contained in:
parent
796e712243
commit
a7c442b84d
392
mfbt/Atomics.h
392
mfbt/Atomics.h
@ -500,398 +500,6 @@ struct AtomicIntrinsics<T*, Order> : public IntrinsicMemoryOps<T*, Order>,
|
||||
} // namespace detail
|
||||
} // namespace mozilla
|
||||
|
||||
#elif defined(_MSC_VER)
|
||||
|
||||
/*
|
||||
* Windows comes with a full complement of atomic operations.
|
||||
* Unfortunately, most of those aren't available for Windows XP (even if
|
||||
* the compiler supports intrinsics for them), which is the oldest
|
||||
* version of Windows we support. Therefore, we only provide operations
|
||||
* on 32-bit datatypes for 32-bit Windows versions; for 64-bit Windows
|
||||
* versions, we support 64-bit datatypes as well.
|
||||
*/
|
||||
|
||||
# include <intrin.h>
|
||||
|
||||
# pragma intrinsic(_InterlockedExchangeAdd)
|
||||
# pragma intrinsic(_InterlockedOr)
|
||||
# pragma intrinsic(_InterlockedXor)
|
||||
# pragma intrinsic(_InterlockedAnd)
|
||||
# pragma intrinsic(_InterlockedExchange)
|
||||
# pragma intrinsic(_InterlockedCompareExchange)
|
||||
|
||||
namespace mozilla {
|
||||
namespace detail {
|
||||
|
||||
# if !defined(_M_IX86) && !defined(_M_X64)
|
||||
/*
|
||||
* The implementations below are optimized for x86ish systems. You
|
||||
* will have to modify them if you are porting to Windows on a
|
||||
* different architecture.
|
||||
*/
|
||||
# error "Unknown CPU type"
|
||||
# endif
|
||||
|
||||
/*
|
||||
* The PrimitiveIntrinsics template should define |Type|, the datatype of size
|
||||
* DataSize upon which we operate, and the following eight functions.
|
||||
*
|
||||
* static Type add(Type* aPtr, Type aVal);
|
||||
* static Type sub(Type* aPtr, Type aVal);
|
||||
* static Type or_(Type* aPtr, Type aVal);
|
||||
* static Type xor_(Type* aPtr, Type aVal);
|
||||
* static Type and_(Type* aPtr, Type aVal);
|
||||
*
|
||||
* These functions perform the obvious operation on the value contained in
|
||||
* |*aPtr| combined with |aVal| and return the value previously stored in
|
||||
* |*aPtr|.
|
||||
*
|
||||
* static void store(Type* aPtr, Type aVal);
|
||||
*
|
||||
* This function atomically stores |aVal| into |*aPtr| and must provide a full
|
||||
* memory fence after the store to prevent compiler and hardware instruction
|
||||
* reordering. It should also act as a compiler barrier to prevent reads and
|
||||
* writes from moving to after the store.
|
||||
*
|
||||
* static Type exchange(Type* aPtr, Type aVal);
|
||||
*
|
||||
* This function atomically stores |aVal| into |*aPtr| and returns the
|
||||
* previous contents of |*aPtr|;
|
||||
*
|
||||
* static bool compareExchange(Type* aPtr, Type aOldVal, Type aNewVal);
|
||||
*
|
||||
* This function atomically performs the following operation:
|
||||
*
|
||||
* if (*aPtr == aOldVal) {
|
||||
* *aPtr = aNewVal;
|
||||
* return true;
|
||||
* } else {
|
||||
* return false;
|
||||
* }
|
||||
*
|
||||
*/
|
||||
template<size_t DataSize> struct PrimitiveIntrinsics;
|
||||
|
||||
template<>
|
||||
struct PrimitiveIntrinsics<4>
|
||||
{
|
||||
typedef long Type;
|
||||
|
||||
static Type add(Type* aPtr, Type aVal)
|
||||
{
|
||||
return _InterlockedExchangeAdd(aPtr, aVal);
|
||||
}
|
||||
|
||||
static Type sub(Type* aPtr, Type aVal)
|
||||
{
|
||||
/*
|
||||
* _InterlockedExchangeSubtract isn't available before Windows 7,
|
||||
* and we must support Windows XP.
|
||||
*/
|
||||
return _InterlockedExchangeAdd(aPtr, -aVal);
|
||||
}
|
||||
|
||||
static Type or_(Type* aPtr, Type aVal)
|
||||
{
|
||||
return _InterlockedOr(aPtr, aVal);
|
||||
}
|
||||
|
||||
static Type xor_(Type* aPtr, Type aVal)
|
||||
{
|
||||
return _InterlockedXor(aPtr, aVal);
|
||||
}
|
||||
|
||||
static Type and_(Type* aPtr, Type aVal)
|
||||
{
|
||||
return _InterlockedAnd(aPtr, aVal);
|
||||
}
|
||||
|
||||
static void store(Type* aPtr, Type aVal)
|
||||
{
|
||||
_InterlockedExchange(aPtr, aVal);
|
||||
}
|
||||
|
||||
static Type exchange(Type* aPtr, Type aVal)
|
||||
{
|
||||
return _InterlockedExchange(aPtr, aVal);
|
||||
}
|
||||
|
||||
static bool compareExchange(Type* aPtr, Type aOldVal, Type aNewVal)
|
||||
{
|
||||
return _InterlockedCompareExchange(aPtr, aNewVal, aOldVal) == aOldVal;
|
||||
}
|
||||
};
|
||||
|
||||
# if defined(_M_X64)
|
||||
|
||||
# pragma intrinsic(_InterlockedExchangeAdd64)
|
||||
# pragma intrinsic(_InterlockedOr64)
|
||||
# pragma intrinsic(_InterlockedXor64)
|
||||
# pragma intrinsic(_InterlockedAnd64)
|
||||
# pragma intrinsic(_InterlockedExchange64)
|
||||
# pragma intrinsic(_InterlockedCompareExchange64)
|
||||
|
||||
template <>
|
||||
struct PrimitiveIntrinsics<8>
|
||||
{
|
||||
typedef __int64 Type;
|
||||
|
||||
static Type add(Type* aPtr, Type aVal)
|
||||
{
|
||||
return _InterlockedExchangeAdd64(aPtr, aVal);
|
||||
}
|
||||
|
||||
static Type sub(Type* aPtr, Type aVal)
|
||||
{
|
||||
/*
|
||||
* There is no _InterlockedExchangeSubtract64.
|
||||
*/
|
||||
return _InterlockedExchangeAdd64(aPtr, -aVal);
|
||||
}
|
||||
|
||||
static Type or_(Type* aPtr, Type aVal)
|
||||
{
|
||||
return _InterlockedOr64(aPtr, aVal);
|
||||
}
|
||||
|
||||
static Type xor_(Type* aPtr, Type aVal)
|
||||
{
|
||||
return _InterlockedXor64(aPtr, aVal);
|
||||
}
|
||||
|
||||
static Type and_(Type* aPtr, Type aVal)
|
||||
{
|
||||
return _InterlockedAnd64(aPtr, aVal);
|
||||
}
|
||||
|
||||
static void store(Type* aPtr, Type aVal)
|
||||
{
|
||||
_InterlockedExchange64(aPtr, aVal);
|
||||
}
|
||||
|
||||
static Type exchange(Type* aPtr, Type aVal)
|
||||
{
|
||||
return _InterlockedExchange64(aPtr, aVal);
|
||||
}
|
||||
|
||||
static bool compareExchange(Type* aPtr, Type aOldVal, Type aNewVal)
|
||||
{
|
||||
return _InterlockedCompareExchange64(aPtr, aNewVal, aOldVal) == aOldVal;
|
||||
}
|
||||
};
|
||||
|
||||
# endif
|
||||
|
||||
# pragma intrinsic(_ReadWriteBarrier)
|
||||
|
||||
template<MemoryOrdering Order> struct Barrier;
|
||||
|
||||
/*
|
||||
* We do not provide an afterStore method in Barrier, as Relaxed and
|
||||
* ReleaseAcquire orderings do not require one, and the required barrier
|
||||
* for SequentiallyConsistent is handled by PrimitiveIntrinsics.
|
||||
*/
|
||||
|
||||
template<>
|
||||
struct Barrier<Relaxed>
|
||||
{
|
||||
static void beforeLoad() {}
|
||||
static void afterLoad() {}
|
||||
static void beforeStore() {}
|
||||
};
|
||||
|
||||
template<>
|
||||
struct Barrier<ReleaseAcquire>
|
||||
{
|
||||
static void beforeLoad() {}
|
||||
static void afterLoad() { _ReadWriteBarrier(); }
|
||||
static void beforeStore() { _ReadWriteBarrier(); }
|
||||
};
|
||||
|
||||
template<>
|
||||
struct Barrier<SequentiallyConsistent>
|
||||
{
|
||||
static void beforeLoad() { _ReadWriteBarrier(); }
|
||||
static void afterLoad() { _ReadWriteBarrier(); }
|
||||
static void beforeStore() { _ReadWriteBarrier(); }
|
||||
};
|
||||
|
||||
template<typename PrimType, typename T>
|
||||
struct CastHelper
|
||||
{
|
||||
static PrimType toPrimType(T aVal) { return static_cast<PrimType>(aVal); }
|
||||
static T fromPrimType(PrimType aVal) { return static_cast<T>(aVal); }
|
||||
};
|
||||
|
||||
template<typename PrimType, typename T>
|
||||
struct CastHelper<PrimType, T*>
|
||||
{
|
||||
static PrimType toPrimType(T* aVal) { return reinterpret_cast<PrimType>(aVal); }
|
||||
static T* fromPrimType(PrimType aVal) { return reinterpret_cast<T*>(aVal); }
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
struct IntrinsicBase
|
||||
{
|
||||
typedef T ValueType;
|
||||
typedef PrimitiveIntrinsics<sizeof(T)> Primitives;
|
||||
typedef typename Primitives::Type PrimType;
|
||||
static_assert(sizeof(PrimType) == sizeof(T),
|
||||
"Selection of PrimitiveIntrinsics was wrong");
|
||||
typedef CastHelper<PrimType, T> Cast;
|
||||
};
|
||||
|
||||
template<typename T, MemoryOrdering Order>
|
||||
struct IntrinsicMemoryOps : public IntrinsicBase<T>
|
||||
{
|
||||
typedef typename IntrinsicBase<T>::ValueType ValueType;
|
||||
typedef typename IntrinsicBase<T>::Primitives Primitives;
|
||||
typedef typename IntrinsicBase<T>::PrimType PrimType;
|
||||
typedef typename IntrinsicBase<T>::Cast Cast;
|
||||
|
||||
static ValueType load(const ValueType& aPtr)
|
||||
{
|
||||
Barrier<Order>::beforeLoad();
|
||||
ValueType val = aPtr;
|
||||
Barrier<Order>::afterLoad();
|
||||
return val;
|
||||
}
|
||||
|
||||
static void store(ValueType& aPtr, ValueType aVal)
|
||||
{
|
||||
// For SequentiallyConsistent, Primitives::store() will generate the
|
||||
// proper memory fence. Everything else just needs a barrier before
|
||||
// the store.
|
||||
if (Order == SequentiallyConsistent) {
|
||||
Primitives::store(reinterpret_cast<PrimType*>(&aPtr),
|
||||
Cast::toPrimType(aVal));
|
||||
} else {
|
||||
Barrier<Order>::beforeStore();
|
||||
aPtr = aVal;
|
||||
}
|
||||
}
|
||||
|
||||
static ValueType exchange(ValueType& aPtr, ValueType aVal)
|
||||
{
|
||||
PrimType oldval =
|
||||
Primitives::exchange(reinterpret_cast<PrimType*>(&aPtr),
|
||||
Cast::toPrimType(aVal));
|
||||
return Cast::fromPrimType(oldval);
|
||||
}
|
||||
|
||||
static bool compareExchange(ValueType& aPtr, ValueType aOldVal,
|
||||
ValueType aNewVal)
|
||||
{
|
||||
return Primitives::compareExchange(reinterpret_cast<PrimType*>(&aPtr),
|
||||
Cast::toPrimType(aOldVal),
|
||||
Cast::toPrimType(aNewVal));
|
||||
}
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
struct IntrinsicApplyHelper : public IntrinsicBase<T>
|
||||
{
|
||||
typedef typename IntrinsicBase<T>::ValueType ValueType;
|
||||
typedef typename IntrinsicBase<T>::PrimType PrimType;
|
||||
typedef typename IntrinsicBase<T>::Cast Cast;
|
||||
typedef PrimType (*BinaryOp)(PrimType*, PrimType);
|
||||
typedef PrimType (*UnaryOp)(PrimType*);
|
||||
|
||||
static ValueType applyBinaryFunction(BinaryOp aOp, ValueType& aPtr,
|
||||
ValueType aVal)
|
||||
{
|
||||
PrimType* primTypePtr = reinterpret_cast<PrimType*>(&aPtr);
|
||||
PrimType primTypeVal = Cast::toPrimType(aVal);
|
||||
return Cast::fromPrimType(aOp(primTypePtr, primTypeVal));
|
||||
}
|
||||
|
||||
static ValueType applyUnaryFunction(UnaryOp aOp, ValueType& aPtr)
|
||||
{
|
||||
PrimType* primTypePtr = reinterpret_cast<PrimType*>(&aPtr);
|
||||
return Cast::fromPrimType(aOp(primTypePtr));
|
||||
}
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
struct IntrinsicAddSub : public IntrinsicApplyHelper<T>
|
||||
{
|
||||
typedef typename IntrinsicApplyHelper<T>::ValueType ValueType;
|
||||
typedef typename IntrinsicBase<T>::Primitives Primitives;
|
||||
|
||||
static ValueType add(ValueType& aPtr, ValueType aVal)
|
||||
{
|
||||
return applyBinaryFunction(&Primitives::add, aPtr, aVal);
|
||||
}
|
||||
|
||||
static ValueType sub(ValueType& aPtr, ValueType aVal)
|
||||
{
|
||||
return applyBinaryFunction(&Primitives::sub, aPtr, aVal);
|
||||
}
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
struct IntrinsicAddSub<T*> : public IntrinsicApplyHelper<T*>
|
||||
{
|
||||
typedef typename IntrinsicApplyHelper<T*>::ValueType ValueType;
|
||||
typedef typename IntrinsicBase<T*>::Primitives Primitives;
|
||||
|
||||
static ValueType add(ValueType& aPtr, ptrdiff_t aAmount)
|
||||
{
|
||||
return applyBinaryFunction(&Primitives::add, aPtr,
|
||||
(ValueType)(aAmount * sizeof(T)));
|
||||
}
|
||||
|
||||
static ValueType sub(ValueType& aPtr, ptrdiff_t aAmount)
|
||||
{
|
||||
return applyBinaryFunction(&Primitives::sub, aPtr,
|
||||
(ValueType)(aAmount * sizeof(T)));
|
||||
}
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
struct IntrinsicIncDec : public IntrinsicAddSub<T>
|
||||
{
|
||||
typedef typename IntrinsicAddSub<T>::ValueType ValueType;
|
||||
static ValueType inc(ValueType& aPtr) { return add(aPtr, 1); }
|
||||
static ValueType dec(ValueType& aPtr) { return sub(aPtr, 1); }
|
||||
};
|
||||
|
||||
template<typename T, MemoryOrdering Order>
|
||||
struct AtomicIntrinsics : public IntrinsicMemoryOps<T, Order>,
|
||||
public IntrinsicIncDec<T>
|
||||
{
|
||||
typedef typename IntrinsicIncDec<T>::ValueType ValueType;
|
||||
typedef typename IntrinsicBase<T>::Primitives Primitives;
|
||||
|
||||
static ValueType or_(ValueType& aPtr, T aVal)
|
||||
{
|
||||
return applyBinaryFunction(&Primitives::or_, aPtr, aVal);
|
||||
}
|
||||
|
||||
static ValueType xor_(ValueType& aPtr, T aVal)
|
||||
{
|
||||
return applyBinaryFunction(&Primitives::xor_, aPtr, aVal);
|
||||
}
|
||||
|
||||
static ValueType and_(ValueType& aPtr, T aVal)
|
||||
{
|
||||
return applyBinaryFunction(&Primitives::and_, aPtr, aVal);
|
||||
}
|
||||
};
|
||||
|
||||
template<typename T, MemoryOrdering Order>
|
||||
struct AtomicIntrinsics<T*, Order> : public IntrinsicMemoryOps<T*, Order>,
|
||||
public IntrinsicIncDec<T*>
|
||||
{
|
||||
typedef typename IntrinsicMemoryOps<T*, Order>::ValueType ValueType;
|
||||
// This is required to make us be able to build with MSVC10, for unknown
|
||||
// reasons.
|
||||
typedef typename IntrinsicBase<T*>::Primitives Primitives;
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
} // namespace mozilla
|
||||
|
||||
#else
|
||||
# error "Atomic compiler intrinsics are not supported on your platform"
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user