Bug 1026319 - Convert the second quarter of MFBT to Gecko style. r=froydnj.

--HG--
extra : rebase_source : 98d2557c7fe4648d79143c654e7e31767fca2e65
This commit is contained in:
Nicholas Nethercote 2014-06-12 23:34:08 -07:00
parent 2ff2a27904
commit 386e3bffeb
30 changed files with 1794 additions and 1672 deletions

View File

@ -8814,7 +8814,7 @@ CodeGenerator::emitAssertRangeD(const Range *r, FloatRegister input, FloatRegist
// assembler interfaces to make rounding instructions available.
if (!r->hasInt32Bounds() && !r->canBeInfiniteOrNaN() &&
r->exponent() < FloatingPoint<double>::ExponentBias)
r->exponent() < FloatingPoint<double>::kExponentBias)
{
// Check the bounds implied by the maximum exponent.
Label exponentLoOk;

View File

@ -120,10 +120,10 @@ class Range : public TempObject {
// Maximal exponenent under which we have no precission loss on double
// operations. Double has 52 bits of mantissa, so 2^52+1 cannot be
// represented without loss.
static const uint16_t MaxTruncatableExponent = mozilla::FloatingPoint<double>::ExponentShift;
static const uint16_t MaxTruncatableExponent = mozilla::FloatingPoint<double>::kExponentShift;
// Maximum exponent for finite values.
static const uint16_t MaxFiniteExponent = mozilla::FloatingPoint<double>::ExponentBias;
static const uint16_t MaxFiniteExponent = mozilla::FloatingPoint<double>::kExponentBias;
// An special exponent value representing all non-NaN values. This
// includes finite values and the infinities.

View File

@ -538,7 +538,7 @@ CodeGeneratorX86Shared::visitAbsD(LAbsD *ins)
FloatRegister input = ToFloatRegister(ins->input());
JS_ASSERT(input == ToFloatRegister(ins->output()));
// Load a value which is all ones except for the sign bit.
masm.loadConstantDouble(SpecificNaN<double>(0, FloatingPoint<double>::SignificandBits),
masm.loadConstantDouble(SpecificNaN<double>(0, FloatingPoint<double>::kSignificandBits),
ScratchFloatReg);
masm.andpd(ScratchFloatReg, input);
return true;
@ -550,7 +550,7 @@ CodeGeneratorX86Shared::visitAbsF(LAbsF *ins)
FloatRegister input = ToFloatRegister(ins->input());
JS_ASSERT(input == ToFloatRegister(ins->output()));
// Same trick as visitAbsD above.
masm.loadConstantFloat32(SpecificNaN<float>(0, FloatingPoint<float>::SignificandBits),
masm.loadConstantFloat32(SpecificNaN<float>(0, FloatingPoint<float>::kSignificandBits),
ScratchFloatReg);
masm.andps(ScratchFloatReg, input);
return true;

View File

@ -705,8 +705,8 @@ CodeGeneratorX86::visitOutOfLineTruncate(OutOfLineTruncate *ool)
masm.storeDouble(input, Operand(esp, 0));
static const uint32_t EXPONENT_MASK = 0x7ff00000;
static const uint32_t EXPONENT_SHIFT = FloatingPoint<double>::ExponentShift - 32;
static const uint32_t TOO_BIG_EXPONENT = (FloatingPoint<double>::ExponentBias + 63)
static const uint32_t EXPONENT_SHIFT = FloatingPoint<double>::kExponentShift - 32;
static const uint32_t TOO_BIG_EXPONENT = (FloatingPoint<double>::kExponentBias + 63)
<< EXPONENT_SHIFT;
// Check exponent to avoid fp exceptions.
@ -793,10 +793,10 @@ CodeGeneratorX86::visitOutOfLineTruncateFloat32(OutOfLineTruncateFloat32 *ool)
masm.subl(Imm32(sizeof(uint64_t)), esp);
masm.storeFloat32(input, Operand(esp, 0));
static const uint32_t EXPONENT_MASK = FloatingPoint<float>::ExponentBits;
static const uint32_t EXPONENT_SHIFT = FloatingPoint<float>::ExponentShift;
static const uint32_t EXPONENT_MASK = FloatingPoint<float>::kExponentBits;
static const uint32_t EXPONENT_SHIFT = FloatingPoint<float>::kExponentShift;
// Integers are still 64 bits long, so we can still test for an exponent > 63.
static const uint32_t TOO_BIG_EXPONENT = (FloatingPoint<float>::ExponentBias + 63)
static const uint32_t TOO_BIG_EXPONENT = (FloatingPoint<float>::kExponentBias + 63)
<< EXPONENT_SHIFT;
// Check exponent to avoid fp exceptions.

View File

@ -784,7 +784,7 @@ js::math_round_impl(double x)
return x;
/* Some numbers are so big that adding 0.5 would give the wrong number. */
if (ExponentComponent(x) >= int_fast16_t(FloatingPoint<double>::ExponentShift))
if (ExponentComponent(x) >= int_fast16_t(FloatingPoint<double>::kExponentShift))
return x;
return js_copysign(floor(x + 0.5), x);
@ -798,7 +798,7 @@ js::math_roundf_impl(float x)
return x;
/* Some numbers are so big that adding 0.5 would give the wrong number. */
if (ExponentComponent(x) >= int_fast16_t(FloatingPoint<float>::ExponentShift))
if (ExponentComponent(x) >= int_fast16_t(FloatingPoint<float>::kExponentShift))
return x;
return js_copysign(floorf(x + 0.5f), x);

View File

@ -39,13 +39,13 @@ ToUintWidth(double d)
"ResultType must be an unsigned type");
uint64_t bits = mozilla::BitwiseCast<uint64_t>(d);
unsigned DoubleExponentShift = mozilla::FloatingPoint<double>::ExponentShift;
unsigned DoubleExponentShift = mozilla::FloatingPoint<double>::kExponentShift;
// Extract the exponent component. (Be careful here! It's not technically
// the exponent in NaN, infinities, and subnormals.)
int_fast16_t exp =
int_fast16_t((bits & mozilla::FloatingPoint<double>::ExponentBits) >> DoubleExponentShift) -
int_fast16_t(mozilla::FloatingPoint<double>::ExponentBias);
int_fast16_t((bits & mozilla::FloatingPoint<double>::kExponentBits) >> DoubleExponentShift) -
int_fast16_t(mozilla::FloatingPoint<double>::kExponentBias);
// If the exponent's less than zero, abs(d) < 1, so the result is 0. (This
// also handles subnormals.)
@ -104,7 +104,7 @@ ToUintWidth(double d)
}
// Compute the congruent value in the signed range.
return (bits & mozilla::FloatingPoint<double>::SignBit) ? ~result + 1 : result;
return (bits & mozilla::FloatingPoint<double>::kSignBit) ? ~result + 1 : result;
}
template<typename ResultType>

View File

@ -409,8 +409,9 @@ void ValidateAssertConditionType()
#ifdef DEBUG
# define MOZ_ASSERT_IF(cond, expr) \
do { \
if (cond) \
if (cond) { \
MOZ_ASSERT(expr); \
} \
} while (0)
#else
# define MOZ_ASSERT_IF(cond, expr) do { } while (0)

View File

@ -273,8 +273,8 @@
* struct NonCopyable
* {
* private:
* NonCopyable(const NonCopyable& other) MOZ_DELETE;
* void operator=(const NonCopyable& other) MOZ_DELETE;
* NonCopyable(const NonCopyable& aOther) MOZ_DELETE;
* void operator=(const NonCopyable& aOther) MOZ_DELETE;
* };
*
* If MOZ_DELETE can't be implemented for the current compiler, use of the

View File

@ -197,8 +197,8 @@ BloomFilter<KeySize, T>::add(uint32_t aHash)
if (MOZ_LIKELY(!full(slot1))) {
++slot1;
}
uint8_t& slot2 = secondSlot(aHash); {
if (MOZ_LIKELY(!full(slot2)))
uint8_t& slot2 = secondSlot(aHash);
if (MOZ_LIKELY(!full(slot2))) {
++slot2;
}
}

View File

@ -42,21 +42,22 @@
*
* class ExampleHeader
* {
* private:
* uint32_t magic;
* uint32_t length;
* uint32_t totalRecords;
* uint64_t checksum;
* private:
* uint32_t mMagic;
* uint32_t mLength;
* uint32_t mTotalRecords;
* uint64_t mChecksum;
*
* public:
* ExampleHeader(const void* data) {
* const uint8_t* ptr = static_cast<const uint8_t*>(data);
* magic = BigEndian::readUint32(ptr); ptr += sizeof(uint32_t);
* length = BigEndian::readUint32(ptr); ptr += sizeof(uint32_t);
* totalRecords = BigEndian::readUint32(ptr); ptr += sizeof(uint32_t);
* checksum = BigEndian::readUint64(ptr);
* }
* ...
* public:
* ExampleHeader(const void* data)
* {
* const uint8_t* ptr = static_cast<const uint8_t*>(data);
* mMagic = BigEndian::readUint32(ptr); ptr += sizeof(uint32_t);
* mLength = BigEndian::readUint32(ptr); ptr += sizeof(uint32_t);
* mTotalRecords = BigEndian::readUint32(ptr); ptr += sizeof(uint32_t);
* mChecksum = BigEndian::readUint64(ptr);
* }
* ...
* };
*/
@ -169,12 +170,12 @@ struct Swapper;
template<typename T>
struct Swapper<T, 2>
{
static T swap(T value)
static T swap(T aValue)
{
#if defined(MOZ_HAVE_BUILTIN_BYTESWAP16)
return MOZ_HAVE_BUILTIN_BYTESWAP16(value);
return MOZ_HAVE_BUILTIN_BYTESWAP16(aValue);
#else
return T(((value & 0x00ff) << 8) | ((value & 0xff00) >> 8));
return T(((aValue & 0x00ff) << 8) | ((aValue & 0xff00) >> 8));
#endif
}
};
@ -182,17 +183,17 @@ struct Swapper<T, 2>
template<typename T>
struct Swapper<T, 4>
{
static T swap(T value)
static T swap(T aValue)
{
#if defined(__clang__) || defined(__GNUC__)
return T(__builtin_bswap32(value));
return T(__builtin_bswap32(aValue));
#elif defined(_MSC_VER)
return T(_byteswap_ulong(value));
return T(_byteswap_ulong(aValue));
#else
return T(((value & 0x000000ffU) << 24) |
((value & 0x0000ff00U) << 8) |
((value & 0x00ff0000U) >> 8) |
((value & 0xff000000U) >> 24));
return T(((aValue & 0x000000ffU) << 24) |
((aValue & 0x0000ff00U) << 8) |
((aValue & 0x00ff0000U) >> 8) |
((aValue & 0xff000000U) >> 24));
#endif
}
};
@ -200,21 +201,21 @@ struct Swapper<T, 4>
template<typename T>
struct Swapper<T, 8>
{
static inline T swap(T value)
static inline T swap(T aValue)
{
#if defined(__clang__) || defined(__GNUC__)
return T(__builtin_bswap64(value));
return T(__builtin_bswap64(aValue));
#elif defined(_MSC_VER)
return T(_byteswap_uint64(value));
return T(_byteswap_uint64(aValue));
#else
return T(((value & 0x00000000000000ffULL) << 56) |
((value & 0x000000000000ff00ULL) << 40) |
((value & 0x0000000000ff0000ULL) << 24) |
((value & 0x00000000ff000000ULL) << 8) |
((value & 0x000000ff00000000ULL) >> 8) |
((value & 0x0000ff0000000000ULL) >> 24) |
((value & 0x00ff000000000000ULL) >> 40) |
((value & 0xff00000000000000ULL) >> 56));
return T(((aValue & 0x00000000000000ffULL) << 56) |
((aValue & 0x000000000000ff00ULL) << 40) |
((aValue & 0x0000000000ff0000ULL) << 24) |
((aValue & 0x00000000ff000000ULL) << 8) |
((aValue & 0x000000ff00000000ULL) >> 8) |
((aValue & 0x0000ff0000000000ULL) >> 24) |
((aValue & 0x00ff000000000000ULL) >> 40) |
((aValue & 0xff00000000000000ULL) >> 56));
#endif
}
};
@ -229,360 +230,414 @@ enum Endianness { Little, Big };
class EndianUtils
{
/**
* Assert that the memory regions [dest, dest+count) and [src, src+count]
* do not overlap. count is given in bytes.
*/
static void assertNoOverlap(const void* dest, const void* src, size_t count)
{
DebugOnly<const uint8_t*> byteDestPtr = static_cast<const uint8_t*>(dest);
DebugOnly<const uint8_t*> byteSrcPtr = static_cast<const uint8_t*>(src);
MOZ_ASSERT((byteDestPtr <= byteSrcPtr &&
byteDestPtr + count <= byteSrcPtr) ||
(byteSrcPtr <= byteDestPtr &&
byteSrcPtr + count <= byteDestPtr));
/**
* Assert that the memory regions [aDest, aDest+aCount) and
* [aSrc, aSrc+aCount] do not overlap. aCount is given in bytes.
*/
static void assertNoOverlap(const void* aDest, const void* aSrc,
size_t aCount)
{
DebugOnly<const uint8_t*> byteDestPtr = static_cast<const uint8_t*>(aDest);
DebugOnly<const uint8_t*> byteSrcPtr = static_cast<const uint8_t*>(aSrc);
MOZ_ASSERT((byteDestPtr <= byteSrcPtr &&
byteDestPtr + aCount <= byteSrcPtr) ||
(byteSrcPtr <= byteDestPtr &&
byteSrcPtr + aCount <= byteDestPtr));
}
template<typename T>
static void assertAligned(T* aPtr)
{
MOZ_ASSERT((uintptr_t(aPtr) % sizeof(T)) == 0, "Unaligned pointer!");
}
protected:
/**
* Return |aValue| converted from SourceEndian encoding to DestEndian
* encoding.
*/
template<Endianness SourceEndian, Endianness DestEndian, typename T>
static inline T maybeSwap(T aValue)
{
if (SourceEndian == DestEndian) {
return aValue;
}
return Swapper<T>::swap(aValue);
}
/**
* Convert |aCount| elements at |aPtr| from SourceEndian encoding to
* DestEndian encoding.
*/
template<Endianness SourceEndian, Endianness DestEndian, typename T>
static inline void maybeSwapInPlace(T* aPtr, size_t aCount)
{
assertAligned(aPtr);
if (SourceEndian == DestEndian) {
return;
}
for (size_t i = 0; i < aCount; i++) {
aPtr[i] = Swapper<T>::swap(aPtr[i]);
}
}
/**
* Write |aCount| elements to the unaligned address |aDest| in DestEndian
* format, using elements found at |aSrc| in SourceEndian format.
*/
template<Endianness SourceEndian, Endianness DestEndian, typename T>
static void copyAndSwapTo(void* aDest, const T* aSrc, size_t aCount)
{
assertNoOverlap(aDest, aSrc, aCount * sizeof(T));
assertAligned(aSrc);
if (SourceEndian == DestEndian) {
memcpy(aDest, aSrc, aCount * sizeof(T));
return;
}
template<typename T>
static void assertAligned(T* ptr)
{
MOZ_ASSERT((uintptr_t(ptr) % sizeof(T)) == 0, "Unaligned pointer!");
uint8_t* byteDestPtr = static_cast<uint8_t*>(aDest);
for (size_t i = 0; i < aCount; ++i) {
union
{
T mVal;
uint8_t mBuffer[sizeof(T)];
} u;
u.mVal = maybeSwap<SourceEndian, DestEndian>(aSrc[i]);
memcpy(byteDestPtr, u.mBuffer, sizeof(T));
byteDestPtr += sizeof(T);
}
}
/**
* Write |aCount| elements to |aDest| in DestEndian format, using elements
* found at the unaligned address |aSrc| in SourceEndian format.
*/
template<Endianness SourceEndian, Endianness DestEndian, typename T>
static void copyAndSwapFrom(T* aDest, const void* aSrc, size_t aCount)
{
assertNoOverlap(aDest, aSrc, aCount * sizeof(T));
assertAligned(aDest);
if (SourceEndian == DestEndian) {
memcpy(aDest, aSrc, aCount * sizeof(T));
return;
}
protected:
/**
* Return |value| converted from SourceEndian encoding to DestEndian
* encoding.
*/
template<Endianness SourceEndian, Endianness DestEndian, typename T>
static inline T maybeSwap(T value)
{
if (SourceEndian == DestEndian)
return value;
return Swapper<T>::swap(value);
}
/**
* Convert |count| elements at |ptr| from SourceEndian encoding to
* DestEndian encoding.
*/
template<Endianness SourceEndian, Endianness DestEndian, typename T>
static inline void maybeSwapInPlace(T* ptr, size_t count)
{
assertAligned(ptr);
if (SourceEndian == DestEndian)
return;
for (size_t i = 0; i < count; i++)
ptr[i] = Swapper<T>::swap(ptr[i]);
}
/**
* Write |count| elements to the unaligned address |dest| in DestEndian
* format, using elements found at |src| in SourceEndian format.
*/
template<Endianness SourceEndian, Endianness DestEndian, typename T>
static void copyAndSwapTo(void* dest, const T* src, size_t count)
{
assertNoOverlap(dest, src, count * sizeof(T));
assertAligned(src);
if (SourceEndian == DestEndian) {
memcpy(dest, src, count * sizeof(T));
return;
}
uint8_t* byteDestPtr = static_cast<uint8_t*>(dest);
for (size_t i = 0; i < count; ++i) {
union {
T val;
uint8_t buffer[sizeof(T)];
} u;
u.val = maybeSwap<SourceEndian, DestEndian>(src[i]);
memcpy(byteDestPtr, u.buffer, sizeof(T));
byteDestPtr += sizeof(T);
}
}
/**
* Write |count| elements to |dest| in DestEndian format, using elements
* found at the unaligned address |src| in SourceEndian format.
*/
template<Endianness SourceEndian, Endianness DestEndian, typename T>
static void copyAndSwapFrom(T* dest, const void* src, size_t count)
{
assertNoOverlap(dest, src, count * sizeof(T));
assertAligned(dest);
if (SourceEndian == DestEndian) {
memcpy(dest, src, count * sizeof(T));
return;
}
const uint8_t* byteSrcPtr = static_cast<const uint8_t*>(src);
for (size_t i = 0; i < count; ++i) {
union {
T val;
uint8_t buffer[sizeof(T)];
} u;
memcpy(u.buffer, byteSrcPtr, sizeof(T));
dest[i] = maybeSwap<SourceEndian, DestEndian>(u.val);
byteSrcPtr += sizeof(T);
}
const uint8_t* byteSrcPtr = static_cast<const uint8_t*>(aSrc);
for (size_t i = 0; i < aCount; ++i) {
union
{
T mVal;
uint8_t mBuffer[sizeof(T)];
} u;
memcpy(u.mBuffer, byteSrcPtr, sizeof(T));
aDest[i] = maybeSwap<SourceEndian, DestEndian>(u.mVal);
byteSrcPtr += sizeof(T);
}
}
};
template<Endianness ThisEndian>
class Endian : private EndianUtils
{
protected:
/** Read a uint16_t in ThisEndian endianness from |p| and return it. */
static MOZ_WARN_UNUSED_RESULT uint16_t readUint16(const void* p) {
return read<uint16_t>(p);
}
protected:
/** Read a uint16_t in ThisEndian endianness from |aPtr| and return it. */
static MOZ_WARN_UNUSED_RESULT uint16_t readUint16(const void* aPtr)
{
return read<uint16_t>(aPtr);
}
/** Read a uint32_t in ThisEndian endianness from |p| and return it. */
static MOZ_WARN_UNUSED_RESULT uint32_t readUint32(const void* p) {
return read<uint32_t>(p);
}
/** Read a uint32_t in ThisEndian endianness from |aPtr| and return it. */
static MOZ_WARN_UNUSED_RESULT uint32_t readUint32(const void* aPtr)
{
return read<uint32_t>(aPtr);
}
/** Read a uint64_t in ThisEndian endianness from |p| and return it. */
static MOZ_WARN_UNUSED_RESULT uint64_t readUint64(const void* p) {
return read<uint64_t>(p);
}
/** Read a uint64_t in ThisEndian endianness from |aPtr| and return it. */
static MOZ_WARN_UNUSED_RESULT uint64_t readUint64(const void* aPtr)
{
return read<uint64_t>(aPtr);
}
/** Read an int16_t in ThisEndian endianness from |p| and return it. */
static MOZ_WARN_UNUSED_RESULT int16_t readInt16(const void* p) {
return read<int16_t>(p);
}
/** Read an int16_t in ThisEndian endianness from |aPtr| and return it. */
static MOZ_WARN_UNUSED_RESULT int16_t readInt16(const void* aPtr)
{
return read<int16_t>(aPtr);
}
/** Read an int32_t in ThisEndian endianness from |p| and return it. */
static MOZ_WARN_UNUSED_RESULT int32_t readInt32(const void* p) {
return read<uint32_t>(p);
}
/** Read an int32_t in ThisEndian endianness from |aPtr| and return it. */
static MOZ_WARN_UNUSED_RESULT int32_t readInt32(const void* aPtr)
{
return read<uint32_t>(aPtr);
}
/** Read an int64_t in ThisEndian endianness from |p| and return it. */
static MOZ_WARN_UNUSED_RESULT int64_t readInt64(const void* p) {
return read<int64_t>(p);
}
/** Read an int64_t in ThisEndian endianness from |aPtr| and return it. */
static MOZ_WARN_UNUSED_RESULT int64_t readInt64(const void* aPtr)
{
return read<int64_t>(aPtr);
}
/** Write |val| to |p| using ThisEndian endianness. */
static void writeUint16(void* p, uint16_t val) {
write(p, val);
}
/** Write |val| to |p| using ThisEndian endianness. */
static void writeUint32(void* p, uint32_t val) {
write(p, val);
}
/** Write |val| to |p| using ThisEndian endianness. */
static void writeUint64(void* p, uint64_t val) {
write(p, val);
}
/** Write |aValue| to |aPtr| using ThisEndian endianness. */
static void writeUint16(void* aPtr, uint16_t aValue)
{
write(aPtr, aValue);
}
/** Write |val| to |p| using ThisEndian endianness. */
static void writeInt16(void* p, int16_t val) {
write(p, val);
}
/** Write |val| to |p| using ThisEndian endianness. */
static void writeInt32(void* p, int32_t val) {
write(p, val);
}
/** Write |val| to |p| using ThisEndian endianness. */
static void writeInt64(void* p, int64_t val) {
write(p, val);
}
/** Write |aValue| to |aPtr| using ThisEndian endianness. */
static void writeUint32(void* aPtr, uint32_t aValue)
{
write(aPtr, aValue);
}
/*
* Converts a value of type T to little-endian format.
*
* This function is intended for cases where you have data in your
* native-endian format and you need it to appear in little-endian
* format for transmission.
*/
template<typename T>
MOZ_WARN_UNUSED_RESULT static T swapToLittleEndian(T value) {
return maybeSwap<ThisEndian, Little>(value);
}
/*
* Copies count values of type T starting at src to dest, converting
* them to little-endian format if ThisEndian is Big.
* As with memcpy, dest and src must not overlap.
*/
template<typename T>
static void copyAndSwapToLittleEndian(void* dest, const T* src,
size_t count) {
copyAndSwapTo<ThisEndian, Little>(dest, src, count);
}
/*
* Likewise, but converts values in place.
*/
template<typename T>
static void swapToLittleEndianInPlace(T* p, size_t count) {
maybeSwapInPlace<ThisEndian, Little>(p, count);
}
/** Write |aValue| to |aPtr| using ThisEndian endianness. */
static void writeUint64(void* aPtr, uint64_t aValue)
{
write(aPtr, aValue);
}
/*
* Converts a value of type T to big-endian format.
*/
template<typename T>
MOZ_WARN_UNUSED_RESULT static T swapToBigEndian(T value) {
return maybeSwap<ThisEndian, Big>(value);
}
/*
* Copies count values of type T starting at src to dest, converting
* them to big-endian format if ThisEndian is Little.
* As with memcpy, dest and src must not overlap.
*/
template<typename T>
static void copyAndSwapToBigEndian(void* dest, const T* src, size_t count) {
copyAndSwapTo<ThisEndian, Big>(dest, src, count);
}
/*
* Likewise, but converts values in place.
*/
template<typename T>
static void swapToBigEndianInPlace(T* p, size_t count) {
maybeSwapInPlace<ThisEndian, Big>(p, count);
}
/** Write |aValue| to |aPtr| using ThisEndian endianness. */
static void writeInt16(void* aPtr, int16_t aValue)
{
write(aPtr, aValue);
}
/*
* Synonyms for the big-endian functions, for better readability
* in network code.
*/
template<typename T>
MOZ_WARN_UNUSED_RESULT static T swapToNetworkOrder(T value) {
return swapToBigEndian(value);
}
template<typename T>
static void
copyAndSwapToNetworkOrder(void* dest, const T* src, size_t count) {
copyAndSwapToBigEndian(dest, src, count);
}
template<typename T>
static void
swapToNetworkOrderInPlace(T* p, size_t count) {
swapToBigEndianInPlace(p, count);
}
/** Write |aValue| to |aPtr| using ThisEndian endianness. */
static void writeInt32(void* aPtr, int32_t aValue)
{
write(aPtr, aValue);
}
/*
* Converts a value of type T from little-endian format.
*/
template<typename T>
MOZ_WARN_UNUSED_RESULT static T swapFromLittleEndian(T value) {
return maybeSwap<Little, ThisEndian>(value);
}
/*
* Copies count values of type T starting at src to dest, converting
* them to little-endian format if ThisEndian is Big.
* As with memcpy, dest and src must not overlap.
*/
template<typename T>
static void copyAndSwapFromLittleEndian(T* dest, const void* src,
size_t count) {
copyAndSwapFrom<Little, ThisEndian>(dest, src, count);
}
/*
* Likewise, but converts values in place.
*/
template<typename T>
static void swapFromLittleEndianInPlace(T* p, size_t count) {
maybeSwapInPlace<Little, ThisEndian>(p, count);
}
/** Write |aValue| to |aPtr| using ThisEndian endianness. */
static void writeInt64(void* aPtr, int64_t aValue)
{
write(aPtr, aValue);
}
/*
* Converts a value of type T from big-endian format.
*/
template<typename T>
MOZ_WARN_UNUSED_RESULT static T swapFromBigEndian(T value) {
return maybeSwap<Big, ThisEndian>(value);
}
/*
* Copies count values of type T starting at src to dest, converting
* them to big-endian format if ThisEndian is Little.
* As with memcpy, dest and src must not overlap.
*/
template<typename T>
static void copyAndSwapFromBigEndian(T* dest, const void* src,
size_t count) {
copyAndSwapFrom<Big, ThisEndian>(dest, src, count);
}
/*
* Likewise, but converts values in place.
*/
template<typename T>
static void swapFromBigEndianInPlace(T* p, size_t count) {
maybeSwapInPlace<Big, ThisEndian>(p, count);
}
/*
* Converts a value of type T to little-endian format.
*
* This function is intended for cases where you have data in your
* native-endian format and you need it to appear in little-endian
* format for transmission.
*/
template<typename T>
MOZ_WARN_UNUSED_RESULT static T swapToLittleEndian(T aValue)
{
return maybeSwap<ThisEndian, Little>(aValue);
}
/*
* Synonyms for the big-endian functions, for better readability
* in network code.
*/
template<typename T>
MOZ_WARN_UNUSED_RESULT static T swapFromNetworkOrder(T value) {
return swapFromBigEndian(value);
}
template<typename T>
static void copyAndSwapFromNetworkOrder(T* dest, const void* src,
size_t count) {
copyAndSwapFromBigEndian(dest, src, count);
}
template<typename T>
static void swapFromNetworkOrderInPlace(T* p, size_t count) {
swapFromBigEndianInPlace(p, count);
}
/*
* Copies |aCount| values of type T starting at |aSrc| to |aDest|, converting
* them to little-endian format if ThisEndian is Big.
* As with memcpy, |aDest| and |aSrc| must not overlap.
*/
template<typename T>
static void copyAndSwapToLittleEndian(void* aDest, const T* aSrc,
size_t aCount)
{
copyAndSwapTo<ThisEndian, Little>(aDest, aSrc, aCount);
}
private:
/**
* Read a value of type T, encoded in endianness ThisEndian from |p|.
* Return that value encoded in native endianness.
*/
template<typename T>
static T read(const void* p) {
union {
T val;
uint8_t buffer[sizeof(T)];
} u;
memcpy(u.buffer, p, sizeof(T));
return maybeSwap<ThisEndian, MOZ_NATIVE_ENDIANNESS>(u.val);
}
/*
* Likewise, but converts values in place.
*/
template<typename T>
static void swapToLittleEndianInPlace(T* aPtr, size_t aCount)
{
maybeSwapInPlace<ThisEndian, Little>(aPtr, aCount);
}
/**
* Write a value of type T, in native endianness, to |p|, in ThisEndian
* endianness.
*/
template<typename T>
static void write(void* p, T value) {
T tmp = maybeSwap<MOZ_NATIVE_ENDIANNESS, ThisEndian>(value);
memcpy(p, &tmp, sizeof(T));
}
/*
* Converts a value of type T to big-endian format.
*/
template<typename T>
MOZ_WARN_UNUSED_RESULT static T swapToBigEndian(T aValue)
{
return maybeSwap<ThisEndian, Big>(aValue);
}
Endian() MOZ_DELETE;
Endian(const Endian& other) MOZ_DELETE;
void operator=(const Endian& other) MOZ_DELETE;
/*
* Copies |aCount| values of type T starting at |aSrc| to |aDest|, converting
* them to big-endian format if ThisEndian is Little.
* As with memcpy, |aDest| and |aSrc| must not overlap.
*/
template<typename T>
static void copyAndSwapToBigEndian(void* aDest, const T* aSrc,
size_t aCount)
{
copyAndSwapTo<ThisEndian, Big>(aDest, aSrc, aCount);
}
/*
* Likewise, but converts values in place.
*/
template<typename T>
static void swapToBigEndianInPlace(T* aPtr, size_t aCount) {
maybeSwapInPlace<ThisEndian, Big>(aPtr, aCount);
}
/*
* Synonyms for the big-endian functions, for better readability
* in network code.
*/
template<typename T>
MOZ_WARN_UNUSED_RESULT static T swapToNetworkOrder(T aValue)
{
return swapToBigEndian(aValue);
}
template<typename T>
static void
copyAndSwapToNetworkOrder(void* aDest, const T* aSrc, size_t aCount)
{
copyAndSwapToBigEndian(aDest, aSrc, aCount);
}
template<typename T>
static void
swapToNetworkOrderInPlace(T* aPtr, size_t aCount)
{
swapToBigEndianInPlace(aPtr, aCount);
}
/*
* Converts a value of type T from little-endian format.
*/
template<typename T>
MOZ_WARN_UNUSED_RESULT static T swapFromLittleEndian(T aValue)
{
return maybeSwap<Little, ThisEndian>(aValue);
}
/*
* Copies |aCount| values of type T starting at |aSrc| to |aDest|, converting
* them to little-endian format if ThisEndian is Big.
* As with memcpy, |aDest| and |aSrc| must not overlap.
*/
template<typename T>
static void copyAndSwapFromLittleEndian(T* aDest, const void* aSrc,
size_t aCount)
{
copyAndSwapFrom<Little, ThisEndian>(aDest, aSrc, aCount);
}
/*
* Likewise, but converts values in place.
*/
template<typename T>
static void swapFromLittleEndianInPlace(T* aPtr, size_t aCount)
{
maybeSwapInPlace<Little, ThisEndian>(aPtr, aCount);
}
/*
* Converts a value of type T from big-endian format.
*/
template<typename T>
MOZ_WARN_UNUSED_RESULT static T swapFromBigEndian(T aValue)
{
return maybeSwap<Big, ThisEndian>(aValue);
}
/*
* Copies |aCount| values of type T starting at |aSrc| to |aDest|, converting
* them to big-endian format if ThisEndian is Little.
* As with memcpy, |aDest| and |aSrc| must not overlap.
*/
template<typename T>
static void copyAndSwapFromBigEndian(T* aDest, const void* aSrc,
size_t aCount)
{
copyAndSwapFrom<Big, ThisEndian>(aDest, aSrc, aCount);
}
/*
* Likewise, but converts values in place.
*/
template<typename T>
static void swapFromBigEndianInPlace(T* aPtr, size_t aCount)
{
maybeSwapInPlace<Big, ThisEndian>(aPtr, aCount);
}
/*
* Synonyms for the big-endian functions, for better readability
* in network code.
*/
template<typename T>
MOZ_WARN_UNUSED_RESULT static T swapFromNetworkOrder(T aValue)
{
return swapFromBigEndian(aValue);
}
template<typename T>
static void copyAndSwapFromNetworkOrder(T* aDest, const void* aSrc,
size_t aCount)
{
copyAndSwapFromBigEndian(aDest, aSrc, aCount);
}
template<typename T>
static void swapFromNetworkOrderInPlace(T* aPtr, size_t aCount)
{
swapFromBigEndianInPlace(aPtr, aCount);
}
private:
/**
* Read a value of type T, encoded in endianness ThisEndian from |aPtr|.
* Return that value encoded in native endianness.
*/
template<typename T>
static T read(const void* aPtr)
{
union
{
T mVal;
uint8_t mBuffer[sizeof(T)];
} u;
memcpy(u.mBuffer, aPtr, sizeof(T));
return maybeSwap<ThisEndian, MOZ_NATIVE_ENDIANNESS>(u.mVal);
}
/**
* Write a value of type T, in native endianness, to |aPtr|, in ThisEndian
* endianness.
*/
template<typename T>
static void write(void* aPtr, T aValue)
{
T tmp = maybeSwap<MOZ_NATIVE_ENDIANNESS, ThisEndian>(aValue);
memcpy(aPtr, &tmp, sizeof(T));
}
Endian() MOZ_DELETE;
Endian(const Endian& aTther) MOZ_DELETE;
void operator=(const Endian& aOther) MOZ_DELETE;
};
template<Endianness ThisEndian>
class EndianReadWrite : public Endian<ThisEndian>
{
private:
typedef Endian<ThisEndian> super;
private:
typedef Endian<ThisEndian> super;
public:
using super::readUint16;
using super::readUint32;
using super::readUint64;
using super::readInt16;
using super::readInt32;
using super::readInt64;
using super::writeUint16;
using super::writeUint32;
using super::writeUint64;
using super::writeInt16;
using super::writeInt32;
using super::writeInt64;
public:
using super::readUint16;
using super::readUint32;
using super::readUint64;
using super::readInt16;
using super::readInt32;
using super::readInt64;
using super::writeUint16;
using super::writeUint32;
using super::writeUint64;
using super::writeInt16;
using super::writeInt32;
using super::writeInt64;
};
} /* namespace detail */
@ -597,39 +652,39 @@ typedef BigEndian NetworkEndian;
class NativeEndian MOZ_FINAL : public detail::Endian<MOZ_NATIVE_ENDIANNESS>
{
private:
typedef detail::Endian<MOZ_NATIVE_ENDIANNESS> super;
private:
typedef detail::Endian<MOZ_NATIVE_ENDIANNESS> super;
public:
/*
* These functions are intended for cases where you have data in your
* native-endian format and you need the data to appear in the appropriate
* endianness for transmission, serialization, etc.
*/
using super::swapToLittleEndian;
using super::copyAndSwapToLittleEndian;
using super::swapToLittleEndianInPlace;
using super::swapToBigEndian;
using super::copyAndSwapToBigEndian;
using super::swapToBigEndianInPlace;
using super::swapToNetworkOrder;
using super::copyAndSwapToNetworkOrder;
using super::swapToNetworkOrderInPlace;
public:
/*
* These functions are intended for cases where you have data in your
* native-endian format and you need the data to appear in the appropriate
* endianness for transmission, serialization, etc.
*/
using super::swapToLittleEndian;
using super::copyAndSwapToLittleEndian;
using super::swapToLittleEndianInPlace;
using super::swapToBigEndian;
using super::copyAndSwapToBigEndian;
using super::swapToBigEndianInPlace;
using super::swapToNetworkOrder;
using super::copyAndSwapToNetworkOrder;
using super::swapToNetworkOrderInPlace;
/*
* These functions are intended for cases where you have data in the
* given endianness (e.g. reading from disk or a file-format) and you
* need the data to appear in native-endian format for processing.
*/
using super::swapFromLittleEndian;
using super::copyAndSwapFromLittleEndian;
using super::swapFromLittleEndianInPlace;
using super::swapFromBigEndian;
using super::copyAndSwapFromBigEndian;
using super::swapFromBigEndianInPlace;
using super::swapFromNetworkOrder;
using super::copyAndSwapFromNetworkOrder;
using super::swapFromNetworkOrderInPlace;
/*
* These functions are intended for cases where you have data in the
* given endianness (e.g. reading from disk or a file-format) and you
* need the data to appear in native-endian format for processing.
*/
using super::swapFromLittleEndian;
using super::copyAndSwapFromLittleEndian;
using super::swapFromLittleEndianInPlace;
using super::swapFromBigEndian;
using super::copyAndSwapFromBigEndian;
using super::swapFromBigEndianInPlace;
using super::swapFromNetworkOrder;
using super::copyAndSwapFromNetworkOrder;
using super::swapFromNetworkOrderInPlace;
};
#undef MOZ_NATIVE_ENDIANNESS

View File

@ -24,165 +24,181 @@ namespace mozilla {
template<typename T>
class EnumSet
{
public:
EnumSet()
: mBitField(0)
{ }
public:
EnumSet()
: mBitField(0)
{ }
MOZ_IMPLICIT EnumSet(T aEnum)
: mBitField(bitFor(aEnum))
{ }
MOZ_IMPLICIT EnumSet(T aEnum)
: mBitField(bitFor(aEnum))
{ }
EnumSet(T aEnum1, T aEnum2)
: mBitField(bitFor(aEnum1) |
bitFor(aEnum2))
{ }
EnumSet(T aEnum1, T aEnum2)
: mBitField(bitFor(aEnum1) |
bitFor(aEnum2))
{ }
EnumSet(T aEnum1, T aEnum2, T aEnum3)
: mBitField(bitFor(aEnum1) |
bitFor(aEnum2) |
bitFor(aEnum3))
{ }
EnumSet(T aEnum1, T aEnum2, T aEnum3)
: mBitField(bitFor(aEnum1) |
bitFor(aEnum2) |
bitFor(aEnum3))
{ }
EnumSet(T aEnum1, T aEnum2, T aEnum3, T aEnum4)
: mBitField(bitFor(aEnum1) |
bitFor(aEnum2) |
bitFor(aEnum3) |
bitFor(aEnum4))
{ }
EnumSet(T aEnum1, T aEnum2, T aEnum3, T aEnum4)
: mBitField(bitFor(aEnum1) |
bitFor(aEnum2) |
bitFor(aEnum3) |
bitFor(aEnum4))
{ }
EnumSet(const EnumSet& aEnumSet)
: mBitField(aEnumSet.mBitField)
{ }
EnumSet(const EnumSet& aEnumSet)
: mBitField(aEnumSet.mBitField)
{ }
/**
* Add an element
*/
void operator+=(T aEnum) {
mBitField |= bitFor(aEnum);
}
/**
* Add an element
*/
void operator+=(T aEnum)
{
mBitField |= bitFor(aEnum);
}
/**
* Add an element
*/
EnumSet<T> operator+(T aEnum) const {
EnumSet<T> result(*this);
result += aEnum;
return result;
}
/**
* Add an element
*/
EnumSet<T> operator+(T aEnum) const
{
EnumSet<T> result(*this);
result += aEnum;
return result;
}
/**
* Union
*/
void operator+=(const EnumSet<T> aEnumSet) {
mBitField |= aEnumSet.mBitField;
}
/**
* Union
*/
void operator+=(const EnumSet<T> aEnumSet)
{
mBitField |= aEnumSet.mBitField;
}
/**
* Union
*/
EnumSet<T> operator+(const EnumSet<T> aEnumSet) const {
EnumSet<T> result(*this);
result += aEnumSet;
return result;
}
/**
* Union
*/
EnumSet<T> operator+(const EnumSet<T> aEnumSet) const
{
EnumSet<T> result(*this);
result += aEnumSet;
return result;
}
/**
* Remove an element
*/
void operator-=(T aEnum) {
mBitField &= ~(bitFor(aEnum));
}
/**
* Remove an element
*/
void operator-=(T aEnum)
{
mBitField &= ~(bitFor(aEnum));
}
/**
* Remove an element
*/
EnumSet<T> operator-(T aEnum) const {
EnumSet<T> result(*this);
result -= aEnum;
return result;
}
/**
* Remove an element
*/
EnumSet<T> operator-(T aEnum) const
{
EnumSet<T> result(*this);
result -= aEnum;
return result;
}
/**
* Remove a set of elements
*/
void operator-=(const EnumSet<T> aEnumSet) {
mBitField &= ~(aEnumSet.mBitField);
}
/**
* Remove a set of elements
*/
void operator-=(const EnumSet<T> aEnumSet)
{
mBitField &= ~(aEnumSet.mBitField);
}
/**
* Remove a set of elements
*/
EnumSet<T> operator-(const EnumSet<T> aEnumSet) const {
EnumSet<T> result(*this);
result -= aEnumSet;
return result;
}
/**
* Remove a set of elements
*/
EnumSet<T> operator-(const EnumSet<T> aEnumSet) const
{
EnumSet<T> result(*this);
result -= aEnumSet;
return result;
}
/**
* Intersection
*/
void operator&=(const EnumSet<T> aEnumSet) {
mBitField &= aEnumSet.mBitField;
}
/**
* Intersection
*/
void operator&=(const EnumSet<T> aEnumSet)
{
mBitField &= aEnumSet.mBitField;
}
/**
* Intersection
*/
EnumSet<T> operator&(const EnumSet<T> aEnumSet) const {
EnumSet<T> result(*this);
result &= aEnumSet;
return result;
}
/**
* Intersection
*/
EnumSet<T> operator&(const EnumSet<T> aEnumSet) const
{
EnumSet<T> result(*this);
result &= aEnumSet;
return result;
}
/**
* Equality
*/
/**
* Equality
*/
bool operator==(const EnumSet<T> aEnumSet) const
{
return mBitField == aEnumSet.mBitField;
}
bool operator==(const EnumSet<T> aEnumSet) const {
return mBitField == aEnumSet.mBitField;
}
/**
* Test is an element is contained in the set.
*/
bool contains(T aEnum) const
{
return mBitField & bitFor(aEnum);
}
/**
* Test is an element is contained in the set
*/
bool contains(T aEnum) const {
return mBitField & bitFor(aEnum);
}
/**
* Return the number of elements in the set
*/
uint8_t size() {
uint8_t count = 0;
for (uint32_t bitField = mBitField; bitField; bitField >>= 1) {
if (bitField & 1)
count++;
/**
* Return the number of elements in the set.
*/
uint8_t size()
{
uint8_t count = 0;
for (uint32_t bitField = mBitField; bitField; bitField >>= 1) {
if (bitField & 1) {
count++;
}
return count;
}
return count;
}
bool isEmpty() const {
return mBitField == 0;
}
bool isEmpty() const
{
return mBitField == 0;
}
uint32_t serialize() const {
return mBitField;
}
uint32_t serialize() const
{
return mBitField;
}
void deserialize(uint32_t aValue) {
mBitField = aValue;
}
void deserialize(uint32_t aValue)
{
mBitField = aValue;
}
private:
static uint32_t bitFor(T aEnum) {
uint32_t bitNumber = (uint32_t)aEnum;
MOZ_ASSERT(bitNumber < 32);
return 1U << bitNumber;
}
private:
static uint32_t bitFor(T aEnum)
{
uint32_t bitNumber = (uint32_t)aEnum;
MOZ_ASSERT(bitNumber < 32);
return 1U << bitNumber;
}
uint32_t mBitField;
uint32_t mBitField;
};
} // namespace mozilla

View File

@ -44,30 +44,31 @@ template<typename IndexType,
typename ValueType>
class EnumeratedArray
{
public:
static const size_t Size = size_t(SizeAsEnumValue);
public:
static const size_t kSize = size_t(SizeAsEnumValue);
private:
Array<ValueType, Size> mArray;
private:
Array<ValueType, kSize> mArray;
public:
EnumeratedArray() {}
public:
EnumeratedArray() {}
explicit EnumeratedArray(const EnumeratedArray& aOther)
{
for (size_t i = 0; i < Size; i++)
mArray[i] = aOther.mArray[i];
explicit EnumeratedArray(const EnumeratedArray& aOther)
{
for (size_t i = 0; i < kSize; i++) {
mArray[i] = aOther.mArray[i];
}
}
ValueType& operator[](IndexType aIndex)
{
return mArray[size_t(aIndex)];
}
ValueType& operator[](IndexType aIndex)
{
return mArray[size_t(aIndex)];
}
const ValueType& operator[](IndexType aIndex) const
{
return mArray[size_t(aIndex)];
}
const ValueType& operator[](IndexType aIndex) const
{
return mArray[size_t(aIndex)];
}
};
} // namespace mozilla

View File

@ -10,11 +10,11 @@
namespace mozilla {
bool
IsFloat32Representable(double x)
IsFloat32Representable(double aFloat32)
{
float asFloat = static_cast<float>(x);
double floatAsDouble = static_cast<double>(asFloat);
return floatAsDouble == x;
float asFloat = static_cast<float>(aFloat32);
double floatAsDouble = static_cast<double>(asFloat);
return floatAsDouble == aFloat32;
}
} /* namespace mozilla */

View File

@ -36,26 +36,26 @@ namespace mozilla {
struct FloatTypeTraits
{
typedef uint32_t Bits;
typedef uint32_t Bits;
static const unsigned ExponentBias = 127;
static const unsigned ExponentShift = 23;
static const unsigned kExponentBias = 127;
static const unsigned kExponentShift = 23;
static const Bits SignBit = 0x80000000UL;
static const Bits ExponentBits = 0x7F800000UL;
static const Bits SignificandBits = 0x007FFFFFUL;
static const Bits kSignBit = 0x80000000UL;
static const Bits kExponentBits = 0x7F800000UL;
static const Bits kSignificandBits = 0x007FFFFFUL;
};
struct DoubleTypeTraits
{
typedef uint64_t Bits;
typedef uint64_t Bits;
static const unsigned ExponentBias = 1023;
static const unsigned ExponentShift = 52;
static const unsigned kExponentBias = 1023;
static const unsigned kExponentShift = 52;
static const Bits SignBit = 0x8000000000000000ULL;
static const Bits ExponentBits = 0x7ff0000000000000ULL;
static const Bits SignificandBits = 0x000fffffffffffffULL;
static const Bits kSignBit = 0x8000000000000000ULL;
static const Bits kExponentBits = 0x7ff0000000000000ULL;
static const Bits kSignificandBits = 0x000fffffffffffffULL;
};
template<typename T> struct SelectTrait;
@ -71,139 +71,141 @@ template<> struct SelectTrait<double> : public DoubleTypeTraits {};
* as T: uint32_t for float and uint64_t for double (static assertions
* double-check these assumptions).
*
* ExponentBias is the offset that is subtracted from the exponent when
* kExponentBias is the offset that is subtracted from the exponent when
* computing the value, i.e. one plus the opposite of the mininum possible
* exponent.
* ExponentShift is the shift that one needs to apply to retrieve the exponent
* component of the value.
* kExponentShift is the shift that one needs to apply to retrieve the
* exponent component of the value.
*
* SignBit contains a bits mask. Bit-and-ing with this mask will result in
* kSignBit contains a bits mask. Bit-and-ing with this mask will result in
* obtaining the sign bit.
* ExponentBits contains the mask needed for obtaining the exponent bits and
* SignificandBits contains the mask needed for obtaining the significand bits.
* kExponentBits contains the mask needed for obtaining the exponent bits and
* kSignificandBits contains the mask needed for obtaining the significand
* bits.
*
* Full details of how floating point number formats are encoded are beyond the
* scope of this comment. For more information, see
* Full details of how floating point number formats are encoded are beyond
* the scope of this comment. For more information, see
* http://en.wikipedia.org/wiki/IEEE_floating_point
* http://en.wikipedia.org/wiki/Floating_point#IEEE_754:_floating_point_in_modern_computers
*/
template<typename T>
struct FloatingPoint : public SelectTrait<T>
{
typedef SelectTrait<T> Base;
typedef typename Base::Bits Bits;
typedef SelectTrait<T> Base;
typedef typename Base::Bits Bits;
static_assert((Base::SignBit & Base::ExponentBits) == 0,
"sign bit shouldn't overlap exponent bits");
static_assert((Base::SignBit & Base::SignificandBits) == 0,
"sign bit shouldn't overlap significand bits");
static_assert((Base::ExponentBits & Base::SignificandBits) == 0,
"exponent bits shouldn't overlap significand bits");
static_assert((Base::kSignBit & Base::kExponentBits) == 0,
"sign bit shouldn't overlap exponent bits");
static_assert((Base::kSignBit & Base::kSignificandBits) == 0,
"sign bit shouldn't overlap significand bits");
static_assert((Base::kExponentBits & Base::kSignificandBits) == 0,
"exponent bits shouldn't overlap significand bits");
static_assert((Base::SignBit | Base::ExponentBits | Base::SignificandBits) ==
~Bits(0),
"all bits accounted for");
static_assert((Base::kSignBit | Base::kExponentBits | Base::kSignificandBits) ==
~Bits(0),
"all bits accounted for");
/*
* These implementations assume float/double are 32/64-bit single/double format
* number types compatible with the IEEE-754 standard. C++ don't require this
* to be the case. But we required this in implementations of these algorithms
* that preceded this header, so we shouldn't break anything if we keep doing so.
*/
static_assert(sizeof(T) == sizeof(Bits), "Bits must be same size as T");
/*
* These implementations assume float/double are 32/64-bit single/double
* format number types compatible with the IEEE-754 standard. C++ don't
* require this to be the case. But we required this in implementations of
* these algorithms that preceded this header, so we shouldn't break anything
* if we keep doing so.
*/
static_assert(sizeof(T) == sizeof(Bits), "Bits must be same size as T");
};
/** Determines whether a double is NaN. */
template<typename T>
static MOZ_ALWAYS_INLINE bool
IsNaN(T t)
IsNaN(T aValue)
{
/*
* A float/double is NaN if all exponent bits are 1 and the significand contains at
* least one non-zero bit.
* A float/double is NaN if all exponent bits are 1 and the significand
* contains at least one non-zero bit.
*/
typedef FloatingPoint<T> Traits;
typedef typename Traits::Bits Bits;
Bits bits = BitwiseCast<Bits>(t);
return (bits & Traits::ExponentBits) == Traits::ExponentBits &&
(bits & Traits::SignificandBits) != 0;
Bits bits = BitwiseCast<Bits>(aValue);
return (bits & Traits::kExponentBits) == Traits::kExponentBits &&
(bits & Traits::kSignificandBits) != 0;
}
/** Determines whether a float/double is +Infinity or -Infinity. */
template<typename T>
static MOZ_ALWAYS_INLINE bool
IsInfinite(T t)
IsInfinite(T aValue)
{
/* Infinities have all exponent bits set to 1 and an all-0 significand. */
typedef FloatingPoint<T> Traits;
typedef typename Traits::Bits Bits;
Bits bits = BitwiseCast<Bits>(t);
return (bits & ~Traits::SignBit) == Traits::ExponentBits;
Bits bits = BitwiseCast<Bits>(aValue);
return (bits & ~Traits::kSignBit) == Traits::kExponentBits;
}
/** Determines whether a float/double is not NaN or infinite. */
template<typename T>
static MOZ_ALWAYS_INLINE bool
IsFinite(T t)
IsFinite(T aValue)
{
/*
* NaN and Infinities are the only non-finite floats/doubles, and both have all
* exponent bits set to 1.
* NaN and Infinities are the only non-finite floats/doubles, and both have
* all exponent bits set to 1.
*/
typedef FloatingPoint<T> Traits;
typedef typename Traits::Bits Bits;
Bits bits = BitwiseCast<Bits>(t);
return (bits & Traits::ExponentBits) != Traits::ExponentBits;
Bits bits = BitwiseCast<Bits>(aValue);
return (bits & Traits::kExponentBits) != Traits::kExponentBits;
}
/**
* Determines whether a float/double is negative. It is an error to call this method
* on a float/double which is NaN.
* Determines whether a float/double is negative. It is an error to call this
* method on a float/double which is NaN.
*/
template<typename T>
static MOZ_ALWAYS_INLINE bool
IsNegative(T t)
IsNegative(T aValue)
{
MOZ_ASSERT(!IsNaN(t), "NaN does not have a sign");
MOZ_ASSERT(!IsNaN(aValue), "NaN does not have a sign");
/* The sign bit is set if the double is negative. */
typedef FloatingPoint<T> Traits;
typedef typename Traits::Bits Bits;
Bits bits = BitwiseCast<Bits>(t);
return (bits & Traits::SignBit) != 0;
Bits bits = BitwiseCast<Bits>(aValue);
return (bits & Traits::kSignBit) != 0;
}
/** Determines whether a float/double represents -0. */
template<typename T>
static MOZ_ALWAYS_INLINE bool
IsNegativeZero(T t)
IsNegativeZero(T aValue)
{
/* Only the sign bit is set if the value is -0. */
typedef FloatingPoint<T> Traits;
typedef typename Traits::Bits Bits;
Bits bits = BitwiseCast<Bits>(t);
return bits == Traits::SignBit;
Bits bits = BitwiseCast<Bits>(aValue);
return bits == Traits::kSignBit;
}
/**
* Returns the exponent portion of the float/double.
*
* Zero is not special-cased, so ExponentComponent(0.0) is
* -int_fast16_t(Traits::ExponentBias).
* -int_fast16_t(Traits::kExponentBias).
*/
template<typename T>
static MOZ_ALWAYS_INLINE int_fast16_t
ExponentComponent(T t)
ExponentComponent(T aValue)
{
/*
* The exponent component of a float/double is an unsigned number, biased from its
* actual value. Subtract the bias to retrieve the actual exponent.
* The exponent component of a float/double is an unsigned number, biased
* from its actual value. Subtract the bias to retrieve the actual exponent.
*/
typedef FloatingPoint<T> Traits;
typedef typename Traits::Bits Bits;
Bits bits = BitwiseCast<Bits>(t);
return int_fast16_t((bits & Traits::ExponentBits) >> Traits::ExponentShift) -
int_fast16_t(Traits::ExponentBias);
Bits bits = BitwiseCast<Bits>(aValue);
return int_fast16_t((bits & Traits::kExponentBits) >> Traits::kExponentShift) -
int_fast16_t(Traits::kExponentBias);
}
/** Returns +Infinity. */
@ -216,7 +218,7 @@ PositiveInfinity()
* significand.
*/
typedef FloatingPoint<T> Traits;
return BitwiseCast<T>(Traits::ExponentBits);
return BitwiseCast<T>(Traits::kExponentBits);
}
/** Returns -Infinity. */
@ -229,7 +231,7 @@ NegativeInfinity()
* significand.
*/
typedef FloatingPoint<T> Traits;
return BitwiseCast<T>(Traits::SignBit | Traits::ExponentBits);
return BitwiseCast<T>(Traits::kSignBit | Traits::kExponentBits);
}
@ -240,11 +242,11 @@ SpecificNaN(int signbit, typename FloatingPoint<T>::Bits significand)
{
typedef FloatingPoint<T> Traits;
MOZ_ASSERT(signbit == 0 || signbit == 1);
MOZ_ASSERT((significand & ~Traits::SignificandBits) == 0);
MOZ_ASSERT(significand & Traits::SignificandBits);
MOZ_ASSERT((significand & ~Traits::kSignificandBits) == 0);
MOZ_ASSERT(significand & Traits::kSignificandBits);
T t = BitwiseCast<T>((signbit ? Traits::SignBit : 0) |
Traits::ExponentBits |
T t = BitwiseCast<T>((signbit ? Traits::kSignBit : 0) |
Traits::kExponentBits |
significand);
MOZ_ASSERT(IsNaN(t));
return t;
@ -261,15 +263,15 @@ MinNumberValue()
}
/**
* If t is equal to some int32_t value, set *i to that value and return true;
* otherwise return false.
* If aValue is equal to some int32_t value, set *aInt32 to that value and
* return true; otherwise return false.
*
* Note that negative zero is "equal" to zero here. To test whether a value can
* be losslessly converted to int32_t and back, use NumberIsInt32 instead.
*/
template<typename T>
static MOZ_ALWAYS_INLINE bool
NumberEqualsInt32(T t, int32_t* i)
NumberEqualsInt32(T aValue, int32_t* aInt32)
{
/*
* XXX Casting a floating-point value that doesn't truncate to int32_t, to
@ -277,21 +279,21 @@ NumberEqualsInt32(T t, int32_t* i)
* (bug 744965), but as apparently it "works" in practice, it's not a
* pressing concern now.
*/
return t == (*i = int32_t(t));
return aValue == (*aInt32 = int32_t(aValue));
}
/**
* If d can be converted to int32_t and back to an identical double value,
* set *i to that value and return true; otherwise return false.
* set *aInt32 to that value and return true; otherwise return false.
*
* The difference between this and NumberEqualsInt32 is that this method returns
* false for negative zero.
*/
template<typename T>
static MOZ_ALWAYS_INLINE bool
NumberIsInt32(T t, int32_t* i)
NumberIsInt32(T aValue, int32_t* aInt32)
{
return !IsNegativeZero(t) && NumberEqualsInt32(t, i);
return !IsNegativeZero(aValue) && NumberEqualsInt32(aValue, aInt32);
}
/**
@ -309,7 +311,7 @@ UnspecifiedNaN()
* it to be stored to memory in a single instruction).
*/
typedef FloatingPoint<T> Traits;
return SpecificNaN<T>(1, Traits::SignificandBits);
return SpecificNaN<T>(1, Traits::kSignificandBits);
}
/**
@ -319,13 +321,14 @@ UnspecifiedNaN()
*/
template<typename T>
static inline bool
NumbersAreIdentical(T t1, T t2)
NumbersAreIdentical(T aValue1, T aValue2)
{
typedef FloatingPoint<T> Traits;
typedef typename Traits::Bits Bits;
if (IsNaN(t1))
return IsNaN(t2);
return BitwiseCast<Bits>(t1) == BitwiseCast<Bits>(t2);
if (IsNaN(aValue1)) {
return IsNaN(aValue2);
}
return BitwiseCast<Bits>(aValue1) == BitwiseCast<Bits>(aValue2);
}
namespace detail {
@ -336,16 +339,14 @@ struct FuzzyEqualsEpsilon;
template<>
struct FuzzyEqualsEpsilon<float>
{
// A number near 1e-5 that is exactly representable in
// floating point
// A number near 1e-5 that is exactly representable in a float.
static float value() { return 1.0f / (1 << 17); }
};
template<>
struct FuzzyEqualsEpsilon<double>
{
// A number near 1e-12 that is exactly representable in
// a double
// A number near 1e-12 that is exactly representable in a double.
static double value() { return 1.0 / (1LL << 40); }
};
@ -354,42 +355,45 @@ struct FuzzyEqualsEpsilon<double>
/**
* Compare two floating point values for equality, modulo rounding error. That
* is, the two values are considered equal if they are both not NaN and if they
* are less than or equal to epsilon apart. The default value of epsilon is near
* 1e-5.
* are less than or equal to aEpsilon apart. The default value of aEpsilon is
* near 1e-5.
*
* For most scenarios you will want to use FuzzyEqualsMultiplicative instead,
* as it is more reasonable over the entire range of floating point numbers.
* This additive version should only be used if you know the range of the numbers
* you are dealing with is bounded and stays around the same order of magnitude.
* This additive version should only be used if you know the range of the
* numbers you are dealing with is bounded and stays around the same order of
* magnitude.
*/
template<typename T>
static MOZ_ALWAYS_INLINE bool
FuzzyEqualsAdditive(T val1, T val2, T epsilon = detail::FuzzyEqualsEpsilon<T>::value())
FuzzyEqualsAdditive(T aValue1, T aValue2,
T aEpsilon = detail::FuzzyEqualsEpsilon<T>::value())
{
static_assert(IsFloatingPoint<T>::value, "floating point type required");
return Abs(val1 - val2) <= epsilon;
return Abs(aValue1 - aValue2) <= aEpsilon;
}
/**
* Compare two floating point values for equality, allowing for rounding error
* relative to the magnitude of the values. That is, the two values are
* considered equal if they are both not NaN and they are less than or equal to
* some epsilon apart, where the epsilon is scaled by the smaller of the two
* some aEpsilon apart, where the aEpsilon is scaled by the smaller of the two
* argument values.
*
* In most cases you will want to use this rather than FuzzyEqualsAdditive, as
* this function effectively masks out differences in the bottom few bits of
* the floating point numbers being compared, regardless of what order of magnitude
* those numbers are at.
* the floating point numbers being compared, regardless of what order of
* magnitude those numbers are at.
*/
template<typename T>
static MOZ_ALWAYS_INLINE bool
FuzzyEqualsMultiplicative(T val1, T val2, T epsilon = detail::FuzzyEqualsEpsilon<T>::value())
FuzzyEqualsMultiplicative(T aValue1, T aValue2,
T aEpsilon = detail::FuzzyEqualsEpsilon<T>::value())
{
static_assert(IsFloatingPoint<T>::value, "floating point type required");
// can't use std::min because of bug 965340
T smaller = Abs(val1) < Abs(val2) ? Abs(val1) : Abs(val2);
return Abs(val1 - val2) <= epsilon * smaller;
T smaller = Abs(aValue1) < Abs(aValue2) ? Abs(aValue1) : Abs(aValue2);
return Abs(aValue1 - aValue2) <= aEpsilon * smaller;
}
/**
@ -402,7 +406,7 @@ FuzzyEqualsMultiplicative(T val1, T val2, T epsilon = detail::FuzzyEqualsEpsilon
*/
MOZ_WARN_UNUSED_RESULT
extern MFBT_API bool
IsFloat32Representable(double x);
IsFloat32Representable(double aFloat32);
} /* namespace mozilla */

View File

@ -70,46 +70,47 @@ namespace detail {
*/
class GuardObjectNotifier
{
private:
bool* statementDone;
private:
bool* mStatementDone;
public:
GuardObjectNotifier() : statementDone(nullptr) { }
public:
GuardObjectNotifier() : mStatementDone(nullptr) { }
~GuardObjectNotifier() {
*statementDone = true;
}
~GuardObjectNotifier() { *mStatementDone = true; }
void setStatementDone(bool* statementIsDone) {
statementDone = statementIsDone;
}
void setStatementDone(bool* aStatementIsDone)
{
mStatementDone = aStatementIsDone;
}
};
class GuardObjectNotificationReceiver
{
private:
bool statementDone;
private:
bool mStatementDone;
public:
GuardObjectNotificationReceiver() : statementDone(false) { }
public:
GuardObjectNotificationReceiver() : mStatementDone(false) { }
~GuardObjectNotificationReceiver() {
/*
* Assert that the guard object was not used as a temporary. (Note that
* this assert might also fire if init is not called because the guard
* object's implementation is not using the above macros correctly.)
*/
MOZ_ASSERT(statementDone);
}
~GuardObjectNotificationReceiver() {
/*
* Assert that the guard object was not used as a temporary. (Note that
* this assert might also fire if init is not called because the guard
* object's implementation is not using the above macros correctly.)
*/
MOZ_ASSERT(mStatementDone);
}
void init(const GuardObjectNotifier& constNotifier) {
/*
* constNotifier is passed as a const reference so that we can pass a
* temporary, but we really intend it as non-const.
*/
GuardObjectNotifier& notifier = const_cast<GuardObjectNotifier&>(constNotifier);
notifier.setStatementDone(&statementDone);
}
void init(const GuardObjectNotifier& aConstNotifier)
{
/*
* aConstNotifier is passed as a const reference so that we can pass a
* temporary, but we really intend it as non-const.
*/
GuardObjectNotifier& notifier =
const_cast<GuardObjectNotifier&>(aConstNotifier);
notifier.setStatementDone(&mStatementDone);
}
};
} /* namespace detail */

View File

@ -13,14 +13,14 @@
namespace mozilla {
uint32_t
HashBytes(const void* bytes, size_t length)
HashBytes(const void* aBytes, size_t aLength)
{
uint32_t hash = 0;
const char* b = reinterpret_cast<const char*>(bytes);
const char* b = reinterpret_cast<const char*>(aBytes);
/* Walk word by word. */
size_t i = 0;
for (; i < length - (length % sizeof(size_t)); i += sizeof(size_t)) {
for (; i < aLength - (aLength % sizeof(size_t)); i += sizeof(size_t)) {
/* Do an explicitly unaligned load of the data. */
size_t data;
memcpy(&data, b + i, sizeof(size_t));
@ -29,9 +29,9 @@ HashBytes(const void* bytes, size_t length)
}
/* Get the remaining bytes. */
for (; i < length; i++)
for (; i < aLength; i++) {
hash = AddToHash(hash, b[i]);
}
return hash;
}

View File

@ -27,16 +27,17 @@
*
* class ComplexObject
* {
* char* str;
* uint32_t uint1, uint2;
* void (*callbackFn)();
* char* mStr;
* uint32_t mUint1, mUint2;
* void (*mCallbackFn)();
*
* public:
* uint32_t hash() {
* uint32_t hash = HashString(str);
* hash = AddToHash(hash, uint1, uint2);
* return AddToHash(hash, callbackFn);
* }
* public:
* uint32_t hash()
* {
* uint32_t hash = HashString(mStr);
* hash = AddToHash(hash, mUint1, mUint2);
* return AddToHash(hash, mCallbackFn);
* }
* };
*
* If you want to hash an nsAString or nsACString, use the HashString functions
@ -59,19 +60,19 @@ namespace mozilla {
/**
* The golden ratio as a 32-bit fixed-point value.
*/
static const uint32_t GoldenRatioU32 = 0x9E3779B9U;
static const uint32_t kGoldenRatioU32 = 0x9E3779B9U;
inline uint32_t
RotateBitsLeft32(uint32_t value, uint8_t bits)
RotateBitsLeft32(uint32_t aValue, uint8_t aBits)
{
MOZ_ASSERT(bits < 32);
return (value << bits) | (value >> (32 - bits));
MOZ_ASSERT(aBits < 32);
return (aValue << aBits) | (aValue >> (32 - aBits));
}
namespace detail {
inline uint32_t
AddU32ToHash(uint32_t hash, uint32_t value)
AddU32ToHash(uint32_t aHash, uint32_t aValue)
{
/*
* This is the meat of all our hash routines. This hash function is not
@ -91,12 +92,12 @@ AddU32ToHash(uint32_t hash, uint32_t value)
* preferable so our hash explores the whole universe of possible rotations.
*
* Finally, we multiply by the golden ratio *after* xor'ing, not before.
* Otherwise, if |hash| is 0 (as it often is for the beginning of a message),
* the expression
* Otherwise, if |aHash| is 0 (as it often is for the beginning of a
* message), the expression
*
* (GoldenRatioU32 * RotateBitsLeft(hash, 5)) |xor| value
* (kGoldenRatioU32 * RotateBitsLeft(aHash, 5)) |xor| aValue
*
* evaluates to |value|.
* evaluates to |aValue|.
*
* (Number-theoretic aside: Because any odd number |m| is relatively prime to
* our modulus (2^32), the list
@ -112,7 +113,7 @@ AddU32ToHash(uint32_t hash, uint32_t value)
* multiplicative effect. Our golden ratio constant has order 2^29, which is
* more than enough for our purposes.)
*/
return GoldenRatioU32 * (RotateBitsLeft32(hash, 5) ^ value);
return kGoldenRatioU32 * (RotateBitsLeft32(aHash, 5) ^ aValue);
}
/**
@ -120,29 +121,29 @@ AddU32ToHash(uint32_t hash, uint32_t value)
*/
template<size_t PtrSize>
inline uint32_t
AddUintptrToHash(uint32_t hash, uintptr_t value);
AddUintptrToHash(uint32_t aHash, uintptr_t aValue);
template<>
inline uint32_t
AddUintptrToHash<4>(uint32_t hash, uintptr_t value)
AddUintptrToHash<4>(uint32_t aHash, uintptr_t aValue)
{
return AddU32ToHash(hash, static_cast<uint32_t>(value));
return AddU32ToHash(aHash, static_cast<uint32_t>(aValue));
}
template<>
inline uint32_t
AddUintptrToHash<8>(uint32_t hash, uintptr_t value)
AddUintptrToHash<8>(uint32_t aHash, uintptr_t aValue)
{
/*
* The static cast to uint64_t below is necessary because this function
* sometimes gets compiled on 32-bit platforms (yes, even though it's a
* template and we never call this particular override in a 32-bit build). If
* we do value >> 32 on a 32-bit machine, we're shifting a 32-bit uintptr_t
* we do aValue >> 32 on a 32-bit machine, we're shifting a 32-bit uintptr_t
* right 32 bits, and the compiler throws an error.
*/
uint32_t v1 = static_cast<uint32_t>(value);
uint32_t v2 = static_cast<uint32_t>(static_cast<uint64_t>(value) >> 32);
return AddU32ToHash(AddU32ToHash(hash, v1), v2);
uint32_t v1 = static_cast<uint32_t>(aValue);
uint32_t v2 = static_cast<uint32_t>(static_cast<uint64_t>(aValue) >> 32);
return AddU32ToHash(AddU32ToHash(aHash, v1), v2);
}
} /* namespace detail */
@ -155,71 +156,63 @@ AddUintptrToHash<8>(uint32_t hash, uintptr_t value)
* convert to uint32_t, data pointers, and function pointers.
*/
template<typename A>
MOZ_WARN_UNUSED_RESULT
inline uint32_t
AddToHash(uint32_t hash, A a)
MOZ_WARN_UNUSED_RESULT inline uint32_t
AddToHash(uint32_t aHash, A aA)
{
/*
* Try to convert |A| to uint32_t implicitly. If this works, great. If not,
* we'll error out.
*/
return detail::AddU32ToHash(hash, a);
return detail::AddU32ToHash(aHash, aA);
}
template<typename A>
MOZ_WARN_UNUSED_RESULT
inline uint32_t
AddToHash(uint32_t hash, A* a)
MOZ_WARN_UNUSED_RESULT inline uint32_t
AddToHash(uint32_t aHash, A* aA)
{
/*
* You might think this function should just take a void*. But then we'd only
* catch data pointers and couldn't handle function pointers.
*/
static_assert(sizeof(a) == sizeof(uintptr_t),
"Strange pointer!");
static_assert(sizeof(aA) == sizeof(uintptr_t), "Strange pointer!");
return detail::AddUintptrToHash<sizeof(uintptr_t)>(hash, uintptr_t(a));
return detail::AddUintptrToHash<sizeof(uintptr_t)>(aHash, uintptr_t(aA));
}
template<>
MOZ_WARN_UNUSED_RESULT
inline uint32_t
AddToHash(uint32_t hash, uintptr_t a)
MOZ_WARN_UNUSED_RESULT inline uint32_t
AddToHash(uint32_t aHash, uintptr_t aA)
{
return detail::AddUintptrToHash<sizeof(uintptr_t)>(hash, a);
return detail::AddUintptrToHash<sizeof(uintptr_t)>(aHash, aA);
}
template<typename A, typename B>
MOZ_WARN_UNUSED_RESULT
uint32_t
AddToHash(uint32_t hash, A a, B b)
MOZ_WARN_UNUSED_RESULT uint32_t
AddToHash(uint32_t aHash, A aA, B aB)
{
return AddToHash(AddToHash(hash, a), b);
return AddToHash(AddToHash(aHash, aA), aB);
}
template<typename A, typename B, typename C>
MOZ_WARN_UNUSED_RESULT
uint32_t
AddToHash(uint32_t hash, A a, B b, C c)
MOZ_WARN_UNUSED_RESULT uint32_t
AddToHash(uint32_t aHash, A aA, B aB, C aC)
{
return AddToHash(AddToHash(hash, a, b), c);
return AddToHash(AddToHash(aHash, aA, aB), aC);
}
template<typename A, typename B, typename C, typename D>
MOZ_WARN_UNUSED_RESULT
uint32_t
AddToHash(uint32_t hash, A a, B b, C c, D d)
MOZ_WARN_UNUSED_RESULT uint32_t
AddToHash(uint32_t aHash, A aA, B aB, C aC, D aD)
{
return AddToHash(AddToHash(hash, a, b, c), d);
return AddToHash(AddToHash(aHash, aA, aB, aC), aD);
}
template<typename A, typename B, typename C, typename D, typename E>
MOZ_WARN_UNUSED_RESULT
uint32_t
AddToHash(uint32_t hash, A a, B b, C c, D d, E e)
MOZ_WARN_UNUSED_RESULT uint32_t
AddToHash(uint32_t aHash, A aA, B aB, C aC, D aD, E aE)
{
return AddToHash(AddToHash(hash, a, b, c, d), e);
return AddToHash(AddToHash(aHash, aA, aB, aC, aD), aE);
}
/**
@ -230,64 +223,61 @@ AddToHash(uint32_t hash, A a, B b, C c, D d, E e)
* that x has already been hashed.
*/
template<typename A>
MOZ_WARN_UNUSED_RESULT
inline uint32_t
HashGeneric(A a)
MOZ_WARN_UNUSED_RESULT inline uint32_t
HashGeneric(A aA)
{
return AddToHash(0, a);
return AddToHash(0, aA);
}
template<typename A, typename B>
MOZ_WARN_UNUSED_RESULT
inline uint32_t
HashGeneric(A a, B b)
MOZ_WARN_UNUSED_RESULT inline uint32_t
HashGeneric(A aA, B aB)
{
return AddToHash(0, a, b);
return AddToHash(0, aA, aB);
}
template<typename A, typename B, typename C>
MOZ_WARN_UNUSED_RESULT
inline uint32_t
HashGeneric(A a, B b, C c)
MOZ_WARN_UNUSED_RESULT inline uint32_t
HashGeneric(A aA, B aB, C aC)
{
return AddToHash(0, a, b, c);
return AddToHash(0, aA, aB, aC);
}
template<typename A, typename B, typename C, typename D>
MOZ_WARN_UNUSED_RESULT
inline uint32_t
HashGeneric(A a, B b, C c, D d)
MOZ_WARN_UNUSED_RESULT inline uint32_t
HashGeneric(A aA, B aB, C aC, D aD)
{
return AddToHash(0, a, b, c, d);
return AddToHash(0, aA, aB, aC, aD);
}
template<typename A, typename B, typename C, typename D, typename E>
MOZ_WARN_UNUSED_RESULT
inline uint32_t
HashGeneric(A a, B b, C c, D d, E e)
MOZ_WARN_UNUSED_RESULT inline uint32_t
HashGeneric(A aA, B aB, C aC, D aD, E aE)
{
return AddToHash(0, a, b, c, d, e);
return AddToHash(0, aA, aB, aC, aD, aE);
}
namespace detail {
template<typename T>
uint32_t
HashUntilZero(const T* str)
HashUntilZero(const T* aStr)
{
uint32_t hash = 0;
for (T c; (c = *str); str++)
for (T c; (c = *aStr); aStr++) {
hash = AddToHash(hash, c);
}
return hash;
}
template<typename T>
uint32_t
HashKnownLength(const T* str, size_t length)
HashKnownLength(const T* aStr, size_t aLength)
{
uint32_t hash = 0;
for (size_t i = 0; i < length; i++)
hash = AddToHash(hash, str[i]);
for (size_t i = 0; i < aLength; i++) {
hash = AddToHash(hash, aStr[i]);
}
return hash;
}
@ -299,54 +289,48 @@ HashKnownLength(const T* str, size_t length)
* If you have the string's length, you might as well call the overload which
* includes the length. It may be marginally faster.
*/
MOZ_WARN_UNUSED_RESULT
inline uint32_t
HashString(const char* str)
MOZ_WARN_UNUSED_RESULT inline uint32_t
HashString(const char* aStr)
{
return detail::HashUntilZero(str);
return detail::HashUntilZero(aStr);
}
MOZ_WARN_UNUSED_RESULT inline uint32_t
HashString(const char* aStr, size_t aLength)
{
return detail::HashKnownLength(aStr, aLength);
}
MOZ_WARN_UNUSED_RESULT
inline uint32_t
HashString(const char* str, size_t length)
HashString(const unsigned char* aStr, size_t aLength)
{
return detail::HashKnownLength(str, length);
return detail::HashKnownLength(aStr, aLength);
}
MOZ_WARN_UNUSED_RESULT
inline uint32_t
HashString(const unsigned char* str, size_t length)
MOZ_WARN_UNUSED_RESULT inline uint32_t
HashString(const uint16_t* aStr)
{
return detail::HashKnownLength(str, length);
return detail::HashUntilZero(aStr);
}
MOZ_WARN_UNUSED_RESULT
inline uint32_t
HashString(const uint16_t* str)
MOZ_WARN_UNUSED_RESULT inline uint32_t
HashString(const uint16_t* aStr, size_t aLength)
{
return detail::HashUntilZero(str);
}
MOZ_WARN_UNUSED_RESULT
inline uint32_t
HashString(const uint16_t* str, size_t length)
{
return detail::HashKnownLength(str, length);
return detail::HashKnownLength(aStr, aLength);
}
#ifdef MOZ_CHAR16_IS_NOT_WCHAR
MOZ_WARN_UNUSED_RESULT
inline uint32_t
HashString(const char16_t* str)
MOZ_WARN_UNUSED_RESULT inline uint32_t
HashString(const char16_t* aStr)
{
return detail::HashUntilZero(str);
return detail::HashUntilZero(aStr);
}
MOZ_WARN_UNUSED_RESULT
inline uint32_t
HashString(const char16_t* str, size_t length)
MOZ_WARN_UNUSED_RESULT inline uint32_t
HashString(const char16_t* aStr, size_t aLength)
{
return detail::HashKnownLength(str, length);
return detail::HashKnownLength(aStr, aLength);
}
#endif
@ -355,18 +339,16 @@ HashString(const char16_t* str, size_t length)
* the same width!
*/
#ifdef WIN32
MOZ_WARN_UNUSED_RESULT
inline uint32_t
HashString(const wchar_t* str)
MOZ_WARN_UNUSED_RESULT inline uint32_t
HashString(const wchar_t* aStr)
{
return detail::HashUntilZero(str);
return detail::HashUntilZero(aStr);
}
MOZ_WARN_UNUSED_RESULT
inline uint32_t
HashString(const wchar_t* str, size_t length)
MOZ_WARN_UNUSED_RESULT inline uint32_t
HashString(const wchar_t* aStr, size_t aLength)
{
return detail::HashKnownLength(str, length);
return detail::HashKnownLength(aStr, aLength);
}
#endif
@ -376,9 +358,8 @@ HashString(const wchar_t* str, size_t length)
* This hash walks word-by-word, rather than byte-by-byte, so you won't get the
* same result out of HashBytes as you would out of HashString.
*/
MOZ_WARN_UNUSED_RESULT
extern MFBT_API uint32_t
HashBytes(const void* bytes, size_t length);
MOZ_WARN_UNUSED_RESULT extern MFBT_API uint32_t
HashBytes(const void* bytes, size_t aLength);
} /* namespace mozilla */
#endif /* __cplusplus */

View File

@ -26,35 +26,51 @@ struct StdintTypeForSizeAndSignedness;
template<>
struct StdintTypeForSizeAndSignedness<1, true>
{ typedef int8_t Type; };
{
typedef int8_t Type;
};
template<>
struct StdintTypeForSizeAndSignedness<1, false>
{ typedef uint8_t Type; };
{
typedef uint8_t Type;
};
template<>
struct StdintTypeForSizeAndSignedness<2, true>
{ typedef int16_t Type; };
{
typedef int16_t Type;
};
template<>
struct StdintTypeForSizeAndSignedness<2, false>
{ typedef uint16_t Type; };
{
typedef uint16_t Type;
};
template<>
struct StdintTypeForSizeAndSignedness<4, true>
{ typedef int32_t Type; };
{
typedef int32_t Type;
};
template<>
struct StdintTypeForSizeAndSignedness<4, false>
{ typedef uint32_t Type; };
{
typedef uint32_t Type;
};
template<>
struct StdintTypeForSizeAndSignedness<8, true>
{ typedef int64_t Type; };
{
typedef int64_t Type;
};
template<>
struct StdintTypeForSizeAndSignedness<8, false>
{ typedef uint64_t Type; };
{
typedef uint64_t Type;
};
} // namespace detail
@ -79,23 +95,23 @@ struct PositionOfSignBit
template<typename IntegerType>
struct MinValue
{
private:
static_assert(IsIntegral<IntegerType>::value, "MinValue is only for integral types");
private:
static_assert(IsIntegral<IntegerType>::value, "MinValue is only for integral types");
typedef typename MakeUnsigned<IntegerType>::Type UnsignedIntegerType;
static const size_t PosOfSignBit = PositionOfSignBit<IntegerType>::value;
typedef typename MakeUnsigned<IntegerType>::Type UnsignedIntegerType;
static const size_t PosOfSignBit = PositionOfSignBit<IntegerType>::value;
public:
// Bitwise ops may return a larger type, that's why we cast explicitly.
// In C++, left bit shifts on signed values is undefined by the standard
// unless the shifted value is representable.
// Notice that signed-to-unsigned conversions are always well-defined in
// the standard as the value congruent to 2**n, as expected. By contrast,
// unsigned-to-signed is only well-defined if the value is representable.
static const IntegerType value =
IsSigned<IntegerType>::value
? IntegerType(UnsignedIntegerType(1) << PosOfSignBit)
: IntegerType(0);
public:
// Bitwise ops may return a larger type, that's why we cast explicitly.
// In C++, left bit shifts on signed values is undefined by the standard
// unless the shifted value is representable.
// Notice that signed-to-unsigned conversions are always well-defined in
// the standard as the value congruent to 2**n, as expected. By contrast,
// unsigned-to-signed is only well-defined if the value is representable.
static const IntegerType value =
IsSigned<IntegerType>::value
? IntegerType(UnsignedIntegerType(1) << PosOfSignBit)
: IntegerType(0);
};
/**
@ -106,12 +122,12 @@ struct MinValue
template<typename IntegerType>
struct MaxValue
{
static_assert(IsIntegral<IntegerType>::value, "MaxValue is only for integral types");
static_assert(IsIntegral<IntegerType>::value, "MaxValue is only for integral types");
// Tricksy, but covered by the CheckedInt unit test.
// Relies on the type of MinValue<IntegerType>::value
// being IntegerType.
static const IntegerType value = ~MinValue<IntegerType>::value;
// Tricksy, but covered by the CheckedInt unit test.
// Relies on the type of MinValue<IntegerType>::value
// being IntegerType.
static const IntegerType value = ~MinValue<IntegerType>::value;
};
} // namespace mozilla

View File

@ -23,32 +23,36 @@
*
* class Observer : public LinkedListElement<Observer>
* {
* public:
* void observe(char* topic) { ... }
* public:
* void observe(char* aTopic) { ... }
* };
*
* class ObserverContainer
* {
* private:
* LinkedList<Observer> list;
* private:
* LinkedList<Observer> list;
*
* public:
* void addObserver(Observer* observer) {
* // Will assert if |observer| is part of another list.
* list.insertBack(observer);
* }
* public:
* void addObserver(Observer* aObserver)
* {
* // Will assert if |aObserver| is part of another list.
* list.insertBack(aObserver);
* }
*
* void removeObserver(Observer* observer) {
* // Will assert if |observer| is not part of some list.
* observer.remove();
* // Or, will assert if |observer| is not part of |list| specifically.
* // observer.removeFrom(list);
* }
* void removeObserver(Observer* aObserver)
* {
* // Will assert if |aObserver| is not part of some list.
* aObserver.remove();
* // Or, will assert if |aObserver| is not part of |list| specifically.
* // aObserver.removeFrom(list);
* }
*
* void notifyObservers(char* topic) {
* for (Observer* o = list.getFirst(); o != nullptr; o = o->getNext())
* o->observe(topic);
* void notifyObservers(char* aTopic)
* {
* for (Observer* o = list.getFirst(); o != nullptr; o = o->getNext()) {
* o->observe(aTopic);
* }
* }
* };
*
*/
@ -72,411 +76,406 @@ class LinkedList;
template<typename T>
class LinkedListElement
{
/*
* It's convenient that we return nullptr when getNext() or getPrevious()
* hits the end of the list, but doing so costs an extra word of storage in
* each linked list node (to keep track of whether |this| is the sentinel
* node) and a branch on this value in getNext/getPrevious.
*
* We could get rid of the extra word of storage by shoving the "is
* sentinel" bit into one of the pointers, although this would, of course,
* have performance implications of its own.
*
* But the goal here isn't to win an award for the fastest or slimmest
* linked list; rather, we want a *convenient* linked list. So we won't
* waste time guessing which micro-optimization strategy is best.
*
*
* Speaking of unnecessary work, it's worth addressing here why we wrote
* mozilla::LinkedList in the first place, instead of using stl::list.
*
* The key difference between mozilla::LinkedList and stl::list is that
* mozilla::LinkedList stores the prev/next pointers in the object itself,
* while stl::list stores the prev/next pointers in a list element which
* itself points to the object being stored.
*
* mozilla::LinkedList's approach makes it harder to store an object in more
* than one list. But the upside is that you can call next() / prev() /
* remove() directly on the object. With stl::list, you'd need to store a
* pointer to its iterator in the object in order to accomplish this. Not
* only would this waste space, but you'd have to remember to update that
* pointer every time you added or removed the object from a list.
*
* In-place, constant-time removal is a killer feature of doubly-linked
* lists, and supporting this painlessly was a key design criterion.
*/
/*
* It's convenient that we return nullptr when getNext() or getPrevious()
* hits the end of the list, but doing so costs an extra word of storage in
* each linked list node (to keep track of whether |this| is the sentinel
* node) and a branch on this value in getNext/getPrevious.
*
* We could get rid of the extra word of storage by shoving the "is
* sentinel" bit into one of the pointers, although this would, of course,
* have performance implications of its own.
*
* But the goal here isn't to win an award for the fastest or slimmest
* linked list; rather, we want a *convenient* linked list. So we won't
* waste time guessing which micro-optimization strategy is best.
*
*
* Speaking of unnecessary work, it's worth addressing here why we wrote
* mozilla::LinkedList in the first place, instead of using stl::list.
*
* The key difference between mozilla::LinkedList and stl::list is that
* mozilla::LinkedList stores the mPrev/mNext pointers in the object itself,
* while stl::list stores the mPrev/mNext pointers in a list element which
* itself points to the object being stored.
*
* mozilla::LinkedList's approach makes it harder to store an object in more
* than one list. But the upside is that you can call next() / prev() /
* remove() directly on the object. With stl::list, you'd need to store a
* pointer to its iterator in the object in order to accomplish this. Not
* only would this waste space, but you'd have to remember to update that
* pointer every time you added or removed the object from a list.
*
* In-place, constant-time removal is a killer feature of doubly-linked
* lists, and supporting this painlessly was a key design criterion.
*/
private:
LinkedListElement* next;
LinkedListElement* prev;
const bool isSentinel;
private:
LinkedListElement* mNext;
LinkedListElement* mPrev;
const bool mIsSentinel;
public:
LinkedListElement()
: next(MOZ_THIS_IN_INITIALIZER_LIST()),
prev(MOZ_THIS_IN_INITIALIZER_LIST()),
isSentinel(false)
{ }
public:
LinkedListElement()
: mNext(MOZ_THIS_IN_INITIALIZER_LIST()),
mPrev(MOZ_THIS_IN_INITIALIZER_LIST()),
mIsSentinel(false)
{ }
LinkedListElement(LinkedListElement<T>&& other)
: isSentinel(other.isSentinel)
{
if (!other.isInList()) {
next = this;
prev = this;
return;
}
MOZ_ASSERT(other.next->prev == &other);
MOZ_ASSERT(other.prev->next == &other);
/*
* Initialize |this| with |other|'s prev/next pointers, and adjust those
* element to point to this one.
*/
next = other.next;
prev = other.prev;
next->prev = this;
prev->next = this;
/*
* Adjust |other| so it doesn't think it's in a list. This makes it
* safely destructable.
*/
other.next = &other;
other.prev = &other;
LinkedListElement(LinkedListElement<T>&& other)
: mIsSentinel(other.mIsSentinel)
{
if (!other.isInList()) {
mNext = this;
mPrev = this;
return;
}
~LinkedListElement() {
if (!isSentinel && isInList())
remove();
}
MOZ_ASSERT(other.mNext->mPrev == &other);
MOZ_ASSERT(other.mPrev->mNext == &other);
/*
* Get the next element in the list, or nullptr if this is the last element
* in the list.
* Initialize |this| with |other|'s mPrev/mNext pointers, and adjust those
* element to point to this one.
*/
T* getNext() {
return next->asT();
}
const T* getNext() const {
return next->asT();
}
mNext = other.mNext;
mPrev = other.mPrev;
mNext->mPrev = this;
mPrev->mNext = this;
/*
* Get the previous element in the list, or nullptr if this is the first
* element in the list.
* Adjust |other| so it doesn't think it's in a list. This makes it
* safely destructable.
*/
T* getPrevious() {
return prev->asT();
}
const T* getPrevious() const {
return prev->asT();
}
other.mNext = &other;
other.mPrev = &other;
}
/*
* Insert elem after this element in the list. |this| must be part of a
* linked list when you call setNext(); otherwise, this method will assert.
*/
void setNext(T* elem) {
MOZ_ASSERT(isInList());
setNextUnsafe(elem);
}
/*
* Insert elem before this element in the list. |this| must be part of a
* linked list when you call setPrevious(); otherwise, this method will
* assert.
*/
void setPrevious(T* elem) {
MOZ_ASSERT(isInList());
setPreviousUnsafe(elem);
}
/*
* Remove this element from the list which contains it. If this element is
* not currently part of a linked list, this method asserts.
*/
void remove() {
MOZ_ASSERT(isInList());
prev->next = next;
next->prev = prev;
next = this;
prev = this;
}
/*
* Identical to remove(), but also asserts in debug builds that this element
* is in list.
*/
void removeFrom(const LinkedList<T>& list) {
list.assertContains(asT());
~LinkedListElement()
{
if (!mIsSentinel && isInList()) {
remove();
}
}
/*
* Return true if |this| part is of a linked list, and false otherwise.
*/
bool isInList() const {
MOZ_ASSERT((next == this) == (prev == this));
return next != this;
}
/*
* Get the next element in the list, or nullptr if this is the last element
* in the list.
*/
T* getNext() { return mNext->asT(); }
const T* getNext() const { return mNext->asT(); }
private:
friend class LinkedList<T>;
/*
* Get the previous element in the list, or nullptr if this is the first
* element in the list.
*/
T* getPrevious() { return mPrev->asT(); }
const T* getPrevious() const { return mPrev->asT(); }
enum NodeKind {
NODE_KIND_NORMAL,
NODE_KIND_SENTINEL
};
/*
* Insert aElem after this element in the list. |this| must be part of a
* linked list when you call setNext(); otherwise, this method will assert.
*/
void setNext(T* aElem)
{
MOZ_ASSERT(isInList());
setNextUnsafe(aElem);
}
explicit LinkedListElement(NodeKind nodeKind)
: next(MOZ_THIS_IN_INITIALIZER_LIST()),
prev(MOZ_THIS_IN_INITIALIZER_LIST()),
isSentinel(nodeKind == NODE_KIND_SENTINEL)
{ }
/*
* Insert aElem before this element in the list. |this| must be part of a
* linked list when you call setPrevious(); otherwise, this method will
* assert.
*/
void setPrevious(T* aElem)
{
MOZ_ASSERT(isInList());
setPreviousUnsafe(aElem);
}
/*
* Return |this| cast to T* if we're a normal node, or return nullptr if
* we're a sentinel node.
*/
T* asT() {
if (isSentinel)
return nullptr;
/*
* Remove this element from the list which contains it. If this element is
* not currently part of a linked list, this method asserts.
*/
void remove()
{
MOZ_ASSERT(isInList());
return static_cast<T*>(this);
}
const T* asT() const {
if (isSentinel)
return nullptr;
mPrev->mNext = mNext;
mNext->mPrev = mPrev;
mNext = this;
mPrev = this;
}
return static_cast<const T*>(this);
}
/*
* Identical to remove(), but also asserts in debug builds that this element
* is in aList.
*/
void removeFrom(const LinkedList<T>& aList)
{
aList.assertContains(asT());
remove();
}
/*
* Insert elem after this element, but don't check that this element is in
* the list. This is called by LinkedList::insertFront().
*/
void setNextUnsafe(T* elem) {
LinkedListElement *listElem = static_cast<LinkedListElement*>(elem);
MOZ_ASSERT(!listElem->isInList());
/*
* Return true if |this| part is of a linked list, and false otherwise.
*/
bool isInList() const
{
MOZ_ASSERT((mNext == this) == (mPrev == this));
return mNext != this;
}
listElem->next = this->next;
listElem->prev = this;
this->next->prev = listElem;
this->next = listElem;
}
private:
friend class LinkedList<T>;
/*
* Insert elem before this element, but don't check that this element is in
* the list. This is called by LinkedList::insertBack().
*/
void setPreviousUnsafe(T* elem) {
LinkedListElement<T>* listElem = static_cast<LinkedListElement<T>*>(elem);
MOZ_ASSERT(!listElem->isInList());
enum NodeKind {
NODE_KIND_NORMAL,
NODE_KIND_SENTINEL
};
listElem->next = this;
listElem->prev = this->prev;
this->prev->next = listElem;
this->prev = listElem;
}
explicit LinkedListElement(NodeKind nodeKind)
: mNext(MOZ_THIS_IN_INITIALIZER_LIST()),
mPrev(MOZ_THIS_IN_INITIALIZER_LIST()),
mIsSentinel(nodeKind == NODE_KIND_SENTINEL)
{ }
private:
LinkedListElement& operator=(const LinkedListElement<T>& other) MOZ_DELETE;
LinkedListElement(const LinkedListElement<T>& other) MOZ_DELETE;
/*
* Return |this| cast to T* if we're a normal node, or return nullptr if
* we're a sentinel node.
*/
T* asT()
{
return mIsSentinel ? nullptr : static_cast<T*>(this);
}
const T* asT() const
{
return mIsSentinel ? nullptr : static_cast<const T*>(this);
}
/*
* Insert aElem after this element, but don't check that this element is in
* the list. This is called by LinkedList::insertFront().
*/
void setNextUnsafe(T* aElem)
{
LinkedListElement *listElem = static_cast<LinkedListElement*>(aElem);
MOZ_ASSERT(!listElem->isInList());
listElem->mNext = this->mNext;
listElem->mPrev = this;
this->mNext->mPrev = listElem;
this->mNext = listElem;
}
/*
* Insert aElem before this element, but don't check that this element is in
* the list. This is called by LinkedList::insertBack().
*/
void setPreviousUnsafe(T* aElem)
{
LinkedListElement<T>* listElem = static_cast<LinkedListElement<T>*>(aElem);
MOZ_ASSERT(!listElem->isInList());
listElem->mNext = this;
listElem->mPrev = this->mPrev;
this->mPrev->mNext = listElem;
this->mPrev = listElem;
}
private:
LinkedListElement& operator=(const LinkedListElement<T>& aOther) MOZ_DELETE;
LinkedListElement(const LinkedListElement<T>& aOther) MOZ_DELETE;
};
template<typename T>
class LinkedList
{
private:
LinkedListElement<T> sentinel;
private:
LinkedListElement<T> sentinel;
public:
LinkedList() : sentinel(LinkedListElement<T>::NODE_KIND_SENTINEL) { }
public:
LinkedList() : sentinel(LinkedListElement<T>::NODE_KIND_SENTINEL) { }
LinkedList(LinkedList<T>&& other)
: sentinel(mozilla::Move(other.sentinel))
{ }
LinkedList(LinkedList<T>&& aOther)
: sentinel(mozilla::Move(aOther.sentinel))
{ }
~LinkedList() {
MOZ_ASSERT(isEmpty());
~LinkedList() { MOZ_ASSERT(isEmpty()); }
/*
* Add aElem to the front of the list.
*/
void insertFront(T* aElem)
{
/* Bypass setNext()'s this->isInList() assertion. */
sentinel.setNextUnsafe(aElem);
}
/*
* Add aElem to the back of the list.
*/
void insertBack(T* aElem)
{
sentinel.setPreviousUnsafe(aElem);
}
/*
* Get the first element of the list, or nullptr if the list is empty.
*/
T* getFirst() { return sentinel.getNext(); }
const T* getFirst() const { return sentinel.getNext(); }
/*
* Get the last element of the list, or nullptr if the list is empty.
*/
T* getLast() { return sentinel.getPrevious(); }
const T* getLast() const { return sentinel.getPrevious(); }
/*
* Get and remove the first element of the list. If the list is empty,
* return nullptr.
*/
T* popFirst()
{
T* ret = sentinel.getNext();
if (ret) {
static_cast<LinkedListElement<T>*>(ret)->remove();
}
return ret;
}
/*
* Add elem to the front of the list.
*/
void insertFront(T* elem) {
/* Bypass setNext()'s this->isInList() assertion. */
sentinel.setNextUnsafe(elem);
/*
* Get and remove the last element of the list. If the list is empty,
* return nullptr.
*/
T* popLast()
{
T* ret = sentinel.getPrevious();
if (ret) {
static_cast<LinkedListElement<T>*>(ret)->remove();
}
return ret;
}
/*
* Add elem to the back of the list.
*/
void insertBack(T* elem) {
sentinel.setPreviousUnsafe(elem);
}
/*
* Return true if the list is empty, or false otherwise.
*/
bool isEmpty() const
{
return !sentinel.isInList();
}
/*
* Get the first element of the list, or nullptr if the list is empty.
*/
T* getFirst() {
return sentinel.getNext();
}
const T* getFirst() const {
return sentinel.getNext();
/*
* Remove all the elements from the list.
*
* This runs in time linear to the list's length, because we have to mark
* each element as not in the list.
*/
void clear()
{
while (popFirst()) {
continue;
}
}
/*
* Get the last element of the list, or nullptr if the list is empty.
*/
T* getLast() {
return sentinel.getPrevious();
}
const T* getLast() const {
return sentinel.getPrevious();
/*
* Measures the memory consumption of the list excluding |this|. Note that
* it only measures the list elements themselves. If the list elements
* contain pointers to other memory blocks, those blocks must be measured
* separately during a subsequent iteration over the list.
*/
size_t sizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
{
size_t n = 0;
for (const T* t = getFirst(); t; t = t->getNext()) {
n += aMallocSizeOf(t);
}
return n;
}
/*
* Get and remove the first element of the list. If the list is empty,
* return nullptr.
*/
T* popFirst() {
T* ret = sentinel.getNext();
if (ret)
static_cast<LinkedListElement<T>*>(ret)->remove();
return ret;
}
/*
* Like sizeOfExcludingThis(), but measures |this| as well.
*/
size_t sizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const
{
return aMallocSizeOf(this) + sizeOfExcludingThis(aMallocSizeOf);
}
/*
* Get and remove the last element of the list. If the list is empty,
* return nullptr.
*/
T* popLast() {
T* ret = sentinel.getPrevious();
if (ret)
static_cast<LinkedListElement<T>*>(ret)->remove();
return ret;
}
/*
* Return true if the list is empty, or false otherwise.
*/
bool isEmpty() const {
return !sentinel.isInList();
}
/*
* Remove all the elements from the list.
*
* This runs in time linear to the list's length, because we have to mark
* each element as not in the list.
*/
void clear() {
while (popFirst())
continue;
}
/*
* Measures the memory consumption of the list excluding |this|. Note that
* it only measures the list elements themselves. If the list elements
* contain pointers to other memory blocks, those blocks must be measured
* separately during a subsequent iteration over the list.
*/
size_t sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
size_t n = 0;
for (const T* t = getFirst(); t; t = t->getNext())
n += mallocSizeOf(t);
return n;
}
/*
* Like sizeOfExcludingThis(), but measures |this| as well.
*/
size_t sizeOfIncludingThis(MallocSizeOf mallocSizeOf) const {
return mallocSizeOf(this) + sizeOfExcludingThis(mallocSizeOf);
}
/*
* In a debug build, make sure that the list is sane (no cycles, consistent
* next/prev pointers, only one sentinel). Has no effect in release builds.
*/
void debugAssertIsSane() const {
/*
* In a debug build, make sure that the list is sane (no cycles, consistent
* mNext/mPrev pointers, only one sentinel). Has no effect in release builds.
*/
void debugAssertIsSane() const
{
#ifdef DEBUG
const LinkedListElement<T>* slow;
const LinkedListElement<T>* fast1;
const LinkedListElement<T>* fast2;
const LinkedListElement<T>* slow;
const LinkedListElement<T>* fast1;
const LinkedListElement<T>* fast2;
/*
* Check for cycles in the forward singly-linked list using the
* tortoise/hare algorithm.
*/
for (slow = sentinel.next,
fast1 = sentinel.next->next,
fast2 = sentinel.next->next->next;
slow != &sentinel && fast1 != &sentinel && fast2 != &sentinel;
slow = slow->next, fast1 = fast2->next, fast2 = fast1->next)
{
MOZ_ASSERT(slow != fast1);
MOZ_ASSERT(slow != fast2);
}
/*
* Check for cycles in the forward singly-linked list using the
* tortoise/hare algorithm.
*/
for (slow = sentinel.mNext,
fast1 = sentinel.mNext->mNext,
fast2 = sentinel.mNext->mNext->mNext;
slow != &sentinel && fast1 != &sentinel && fast2 != &sentinel;
slow = slow->mNext, fast1 = fast2->mNext, fast2 = fast1->mNext) {
MOZ_ASSERT(slow != fast1);
MOZ_ASSERT(slow != fast2);
}
/* Check for cycles in the backward singly-linked list. */
for (slow = sentinel.prev,
fast1 = sentinel.prev->prev,
fast2 = sentinel.prev->prev->prev;
slow != &sentinel && fast1 != &sentinel && fast2 != &sentinel;
slow = slow->prev, fast1 = fast2->prev, fast2 = fast1->prev)
{
MOZ_ASSERT(slow != fast1);
MOZ_ASSERT(slow != fast2);
}
/* Check for cycles in the backward singly-linked list. */
for (slow = sentinel.mPrev,
fast1 = sentinel.mPrev->mPrev,
fast2 = sentinel.mPrev->mPrev->mPrev;
slow != &sentinel && fast1 != &sentinel && fast2 != &sentinel;
slow = slow->mPrev, fast1 = fast2->mPrev, fast2 = fast1->mPrev) {
MOZ_ASSERT(slow != fast1);
MOZ_ASSERT(slow != fast2);
}
/*
* Check that |sentinel| is the only node in the list with
* isSentinel == true.
*/
for (const LinkedListElement<T>* elem = sentinel.next;
elem != &sentinel;
elem = elem->next)
{
MOZ_ASSERT(!elem->isSentinel);
}
/*
* Check that |sentinel| is the only node in the list with
* mIsSentinel == true.
*/
for (const LinkedListElement<T>* elem = sentinel.mNext;
elem != &sentinel;
elem = elem->mNext) {
MOZ_ASSERT(!elem->mIsSentinel);
}
/* Check that the next/prev pointers match up. */
const LinkedListElement<T>* prev = &sentinel;
const LinkedListElement<T>* cur = sentinel.next;
do {
MOZ_ASSERT(cur->prev == prev);
MOZ_ASSERT(prev->next == cur);
/* Check that the mNext/mPrev pointers match up. */
const LinkedListElement<T>* prev = &sentinel;
const LinkedListElement<T>* cur = sentinel.mNext;
do {
MOZ_ASSERT(cur->mPrev == prev);
MOZ_ASSERT(prev->mNext == cur);
prev = cur;
cur = cur->next;
} while (cur != &sentinel);
prev = cur;
cur = cur->mNext;
} while (cur != &sentinel);
#endif /* ifdef DEBUG */
}
}
private:
friend class LinkedListElement<T>;
private:
friend class LinkedListElement<T>;
void assertContains(const T* t) const {
void assertContains(const T* aValue) const {
#ifdef DEBUG
for (const T* elem = getFirst();
elem;
elem = elem->getNext())
{
if (elem == t)
return;
for (const T* elem = getFirst(); elem; elem = elem->getNext()) {
if (elem == aValue) {
return;
}
MOZ_CRASH("element wasn't found in this list!");
#endif
}
MOZ_CRASH("element wasn't found in this list!");
#endif
}
LinkedList& operator=(const LinkedList<T>& other) MOZ_DELETE;
LinkedList(const LinkedList<T>& other) MOZ_DELETE;
LinkedList& operator=(const LinkedList<T>& aOther) MOZ_DELETE;
LinkedList(const LinkedList<T>& aOther) MOZ_DELETE;
};
} /* namespace mozilla */

View File

@ -1,32 +1,32 @@
// ISO C9x compliant inttypes.h for Microsoft Visual Studio
// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
//
// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
//
// Copyright (c) 2006 Alexander Chemeris
//
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
//
// 3. The name of the author may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
//
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
//
///////////////////////////////////////////////////////////////////////////////
#ifndef _MSC_VER // [

View File

@ -21,31 +21,31 @@ namespace mozilla {
// Greatest Common Divisor
template<typename IntegerType>
MOZ_ALWAYS_INLINE IntegerType
EuclidGCD(IntegerType a, IntegerType b)
EuclidGCD(IntegerType aA, IntegerType aB)
{
// Euclid's algorithm; O(N) in the worst case. (There are better
// ways, but we don't need them for the current use of this algo.)
MOZ_ASSERT(a > IntegerType(0));
MOZ_ASSERT(b > IntegerType(0));
MOZ_ASSERT(aA > IntegerType(0));
MOZ_ASSERT(aB > IntegerType(0));
while (a != b) {
if (a > b) {
a = a - b;
while (aA != aB) {
if (aA > aB) {
aA = aA - aB;
} else {
b = b - a;
aB = aB - aA;
}
}
return a;
return aA;
}
// Least Common Multiple
template<typename IntegerType>
MOZ_ALWAYS_INLINE IntegerType
EuclidLCM(IntegerType a, IntegerType b)
EuclidLCM(IntegerType aA, IntegerType aB)
{
// Divide first to reduce overflow risk.
return (a / EuclidGCD(a, b)) * b;
return (aA / EuclidGCD(aA, aB)) * aB;
}
namespace detail {
@ -68,7 +68,7 @@ template<> struct AllowDeprecatedAbs<long> : TrueType {};
// to Abs below, and it will be removed when all callers have been changed.
template<typename T>
inline typename mozilla::EnableIf<detail::AllowDeprecatedAbs<T>::value, T>::Type
DeprecatedAbs(const T t)
DeprecatedAbs(const T aValue)
{
// The absolute value of the smallest possible value of a signed-integer type
// won't fit in that type (on twos-complement systems -- and we're blithely
@ -79,10 +79,10 @@ DeprecatedAbs(const T t)
// value in the range [-maxvalue, 0]), then negating (giving a value in the
// range [0, maxvalue]), doesn't produce maxvalue (because in twos-complement,
// (minvalue + 1) == -maxvalue).
MOZ_ASSERT(t >= 0 ||
-(t + 1) != T((1ULL << (CHAR_BIT * sizeof(T) - 1)) - 1),
MOZ_ASSERT(aValue >= 0 ||
-(aValue + 1) != T((1ULL << (CHAR_BIT * sizeof(T) - 1)) - 1),
"You can't negate the smallest possible negative integer!");
return t >= 0 ? t : -t;
return aValue >= 0 ? aValue : -aValue;
}
namespace detail {
@ -116,31 +116,31 @@ template<> struct AbsReturnType<long double> { typedef long double Type; };
template<typename T>
inline typename detail::AbsReturnType<T>::Type
Abs(const T t)
Abs(const T aValue)
{
typedef typename detail::AbsReturnType<T>::Type ReturnType;
return t >= 0 ? ReturnType(t) : ~ReturnType(t) + 1;
return aValue >= 0 ? ReturnType(aValue) : ~ReturnType(aValue) + 1;
}
template<>
inline float
Abs<float>(const float f)
Abs<float>(const float aFloat)
{
return std::fabs(f);
return std::fabs(aFloat);
}
template<>
inline double
Abs<double>(const double d)
Abs<double>(const double aDouble)
{
return std::fabs(d);
return std::fabs(aDouble);
}
template<>
inline long double
Abs<long double>(const long double d)
Abs<long double>(const long double aLongDouble)
{
return std::fabs(d);
return std::fabs(aLongDouble);
}
} // namespace mozilla
@ -164,60 +164,62 @@ namespace detail {
#if defined(MOZ_BITSCAN_WINDOWS)
inline uint_fast8_t
CountLeadingZeroes32(uint32_t u)
{
unsigned long index;
_BitScanReverse(&index, static_cast<unsigned long>(u));
return uint_fast8_t(31 - index);
}
inline uint_fast8_t
CountLeadingZeroes32(uint32_t aValue)
{
unsigned long index;
_BitScanReverse(&index, static_cast<unsigned long>(aValue));
return uint_fast8_t(31 - index);
}
inline uint_fast8_t
CountTrailingZeroes32(uint32_t u)
{
unsigned long index;
_BitScanForward(&index, static_cast<unsigned long>(u));
return uint_fast8_t(index);
}
inline uint_fast8_t
CountTrailingZeroes32(uint32_t aValue)
{
unsigned long index;
_BitScanForward(&index, static_cast<unsigned long>(aValue));
return uint_fast8_t(index);
}
inline uint_fast8_t
CountPopulation32(uint32_t u)
{
uint32_t x = u - ((u >> 1) & 0x55555555);
x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
return (((x + (x >> 4)) & 0xf0f0f0f) * 0x1010101) >> 24;
}
inline uint_fast8_t
CountPopulation32(uint32_t aValue)
{
uint32_t x = aValue - ((aValue >> 1) & 0x55555555);
x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
return (((x + (x >> 4)) & 0xf0f0f0f) * 0x1010101) >> 24;
}
inline uint_fast8_t
CountLeadingZeroes64(uint64_t u)
{
# if defined(MOZ_BITSCAN_WINDOWS64)
unsigned long index;
_BitScanReverse64(&index, static_cast<unsigned __int64>(u));
return uint_fast8_t(63 - index);
# else
uint32_t hi = uint32_t(u >> 32);
if (hi != 0)
return CountLeadingZeroes32(hi);
return 32u + CountLeadingZeroes32(uint32_t(u));
# endif
inline uint_fast8_t
CountLeadingZeroes64(uint64_t aValue)
{
#if defined(MOZ_BITSCAN_WINDOWS64)
unsigned long index;
_BitScanReverse64(&index, static_cast<unsigned __int64>(aValue));
return uint_fast8_t(63 - index);
#else
uint32_t hi = uint32_t(aValue >> 32);
if (hi != 0) {
return CountLeadingZeroes32(hi);
}
return 32u + CountLeadingZeroes32(uint32_t(aValue));
#endif
}
inline uint_fast8_t
CountTrailingZeroes64(uint64_t u)
{
# if defined(MOZ_BITSCAN_WINDOWS64)
unsigned long index;
_BitScanForward64(&index, static_cast<unsigned __int64>(u));
return uint_fast8_t(index);
# else
uint32_t lo = uint32_t(u);
if (lo != 0)
return CountTrailingZeroes32(lo);
return 32u + CountTrailingZeroes32(uint32_t(u >> 32));
# endif
inline uint_fast8_t
CountTrailingZeroes64(uint64_t aValue)
{
#if defined(MOZ_BITSCAN_WINDOWS64)
unsigned long index;
_BitScanForward64(&index, static_cast<unsigned __int64>(aValue));
return uint_fast8_t(index);
#else
uint32_t lo = uint32_t(aValue);
if (lo != 0) {
return CountTrailingZeroes32(lo);
}
return 32u + CountTrailingZeroes32(uint32_t(aValue >> 32));
#endif
}
# ifdef MOZ_HAVE_BITSCAN64
# undef MOZ_HAVE_BITSCAN64
@ -233,52 +235,52 @@ namespace detail {
// gcc has had __builtin_clz and friends since 3.4: no need to check.
# endif
inline uint_fast8_t
CountLeadingZeroes32(uint32_t u)
{
return __builtin_clz(u);
}
inline uint_fast8_t
CountLeadingZeroes32(uint32_t aValue)
{
return __builtin_clz(aValue);
}
inline uint_fast8_t
CountTrailingZeroes32(uint32_t u)
{
return __builtin_ctz(u);
}
inline uint_fast8_t
CountTrailingZeroes32(uint32_t aValue)
{
return __builtin_ctz(aValue);
}
inline uint_fast8_t
CountPopulation32(uint32_t u)
{
return __builtin_popcount(u);
}
inline uint_fast8_t
CountPopulation32(uint32_t aValue)
{
return __builtin_popcount(aValue);
}
inline uint_fast8_t
CountLeadingZeroes64(uint64_t u)
{
return __builtin_clzll(u);
}
inline uint_fast8_t
CountLeadingZeroes64(uint64_t aValue)
{
return __builtin_clzll(aValue);
}
inline uint_fast8_t
CountTrailingZeroes64(uint64_t u)
{
return __builtin_ctzll(u);
}
inline uint_fast8_t
CountTrailingZeroes64(uint64_t aValue)
{
return __builtin_ctzll(aValue);
}
#else
# error "Implement these!"
inline uint_fast8_t CountLeadingZeroes32(uint32_t u) MOZ_DELETE;
inline uint_fast8_t CountTrailingZeroes32(uint32_t u) MOZ_DELETE;
inline uint_fast8_t CountPopulation32(uint32_t u) MOZ_DELETE;
inline uint_fast8_t CountLeadingZeroes64(uint64_t u) MOZ_DELETE;
inline uint_fast8_t CountTrailingZeroes64(uint64_t u) MOZ_DELETE;
inline uint_fast8_t CountLeadingZeroes32(uint32_t aValue) MOZ_DELETE;
inline uint_fast8_t CountTrailingZeroes32(uint32_t aValue) MOZ_DELETE;
inline uint_fast8_t CountPopulation32(uint32_t aValue) MOZ_DELETE;
inline uint_fast8_t CountLeadingZeroes64(uint64_t aValue) MOZ_DELETE;
inline uint_fast8_t CountTrailingZeroes64(uint64_t aValue) MOZ_DELETE;
#endif
} // namespace detail
/**
* Compute the number of high-order zero bits in the NON-ZERO number |u|. That
* is, looking at the bitwise representation of the number, with the highest-
* valued bits at the start, return the number of zeroes before the first one
* is observed.
* Compute the number of high-order zero bits in the NON-ZERO number |aValue|.
* That is, looking at the bitwise representation of the number, with the
* highest- valued bits at the start, return the number of zeroes before the
* first one is observed.
*
* CountLeadingZeroes32(0xF0FF1000) is 0;
* CountLeadingZeroes32(0x7F8F0001) is 1;
@ -286,17 +288,17 @@ namespace detail {
* CountLeadingZeroes32(0x1FF50010) is 3; and so on.
*/
inline uint_fast8_t
CountLeadingZeroes32(uint32_t u)
CountLeadingZeroes32(uint32_t aValue)
{
MOZ_ASSERT(u != 0);
return detail::CountLeadingZeroes32(u);
MOZ_ASSERT(aValue != 0);
return detail::CountLeadingZeroes32(aValue);
}
/**
* Compute the number of low-order zero bits in the NON-ZERO number |u|. That
* is, looking at the bitwise representation of the number, with the lowest-
* valued bits at the start, return the number of zeroes before the first one
* is observed.
* Compute the number of low-order zero bits in the NON-ZERO number |aValue|.
* That is, looking at the bitwise representation of the number, with the
* lowest- valued bits at the start, return the number of zeroes before the
* first one is observed.
*
* CountTrailingZeroes32(0x0100FFFF) is 0;
* CountTrailingZeroes32(0x7000FFFE) is 1;
@ -304,35 +306,35 @@ CountLeadingZeroes32(uint32_t u)
* CountTrailingZeroes32(0x0080FFF8) is 3; and so on.
*/
inline uint_fast8_t
CountTrailingZeroes32(uint32_t u)
CountTrailingZeroes32(uint32_t aValue)
{
MOZ_ASSERT(u != 0);
return detail::CountTrailingZeroes32(u);
MOZ_ASSERT(aValue != 0);
return detail::CountTrailingZeroes32(aValue);
}
/**
* Compute the number of one bits in the number |u|,
* Compute the number of one bits in the number |aValue|,
*/
inline uint_fast8_t
CountPopulation32(uint32_t u)
CountPopulation32(uint32_t aValue)
{
return detail::CountPopulation32(u);
return detail::CountPopulation32(aValue);
}
/** Analogous to CountLeadingZeroes32, but for 64-bit numbers. */
inline uint_fast8_t
CountLeadingZeroes64(uint64_t u)
CountLeadingZeroes64(uint64_t aValue)
{
MOZ_ASSERT(u != 0);
return detail::CountLeadingZeroes64(u);
MOZ_ASSERT(aValue != 0);
return detail::CountLeadingZeroes64(aValue);
}
/** Analogous to CountTrailingZeroes32, but for 64-bit numbers. */
inline uint_fast8_t
CountTrailingZeroes64(uint64_t u)
CountTrailingZeroes64(uint64_t aValue)
{
MOZ_ASSERT(u != 0);
return detail::CountTrailingZeroes64(u);
MOZ_ASSERT(aValue != 0);
return detail::CountTrailingZeroes64(aValue);
}
namespace detail {
@ -343,27 +345,29 @@ class CeilingLog2;
template<typename T>
class CeilingLog2<T, 4>
{
public:
static uint_fast8_t compute(const T t) {
// Check for <= 1 to avoid the == 0 undefined case.
return t <= 1 ? 0u : 32u - CountLeadingZeroes32(t - 1);
}
public:
static uint_fast8_t compute(const T aValue)
{
// Check for <= 1 to avoid the == 0 undefined case.
return aValue <= 1 ? 0u : 32u - CountLeadingZeroes32(aValue - 1);
}
};
template<typename T>
class CeilingLog2<T, 8>
{
public:
static uint_fast8_t compute(const T t) {
// Check for <= 1 to avoid the == 0 undefined case.
return t <= 1 ? 0 : 64 - CountLeadingZeroes64(t - 1);
}
public:
static uint_fast8_t compute(const T aValue)
{
// Check for <= 1 to avoid the == 0 undefined case.
return aValue <= 1 ? 0 : 64 - CountLeadingZeroes64(aValue - 1);
}
};
} // namespace detail
/**
* Compute the log of the least power of 2 greater than or equal to |t|.
* Compute the log of the least power of 2 greater than or equal to |aValue|.
*
* CeilingLog2(0..1) is 0;
* CeilingLog2(2) is 1;
@ -373,16 +377,16 @@ class CeilingLog2<T, 8>
*/
template<typename T>
inline uint_fast8_t
CeilingLog2(const T t)
CeilingLog2(const T aValue)
{
return detail::CeilingLog2<T>::compute(t);
return detail::CeilingLog2<T>::compute(aValue);
}
/** A CeilingLog2 variant that accepts only size_t. */
inline uint_fast8_t
CeilingLog2Size(size_t n)
CeilingLog2Size(size_t aValue)
{
return CeilingLog2(n);
return CeilingLog2(aValue);
}
namespace detail {
@ -393,25 +397,27 @@ class FloorLog2;
template<typename T>
class FloorLog2<T, 4>
{
public:
static uint_fast8_t compute(const T t) {
return 31u - CountLeadingZeroes32(t | 1);
}
public:
static uint_fast8_t compute(const T aValue)
{
return 31u - CountLeadingZeroes32(aValue | 1);
}
};
template<typename T>
class FloorLog2<T, 8>
{
public:
static uint_fast8_t compute(const T t) {
return 63u - CountLeadingZeroes64(t | 1);
}
public:
static uint_fast8_t compute(const T aValue)
{
return 63u - CountLeadingZeroes64(aValue | 1);
}
};
} // namespace detail
/**
* Compute the log of the greatest power of 2 less than or equal to |t|.
* Compute the log of the greatest power of 2 less than or equal to |aValue|.
*
* FloorLog2(0..1) is 0;
* FloorLog2(2..3) is 1;
@ -420,16 +426,16 @@ class FloorLog2<T, 8>
*/
template<typename T>
inline uint_fast8_t
FloorLog2(const T t)
FloorLog2(const T aValue)
{
return detail::FloorLog2<T>::compute(t);
return detail::FloorLog2<T>::compute(aValue);
}
/** A FloorLog2 variant that accepts only size_t. */
inline uint_fast8_t
FloorLog2Size(size_t n)
FloorLog2Size(size_t aValue)
{
return FloorLog2(n);
return FloorLog2(aValue);
}
/*
@ -437,11 +443,11 @@ FloorLog2Size(size_t n)
* be so great that the computed value would overflow |size_t|.
*/
inline size_t
RoundUpPow2(size_t x)
RoundUpPow2(size_t aValue)
{
MOZ_ASSERT(x <= (size_t(1) << (sizeof(size_t) * CHAR_BIT - 1)),
MOZ_ASSERT(aValue <= (size_t(1) << (sizeof(size_t) * CHAR_BIT - 1)),
"can't round up -- will overflow!");
return size_t(1) << CeilingLog2(x);
return size_t(1) << CeilingLog2(aValue);
}
/**
@ -449,11 +455,11 @@ RoundUpPow2(size_t x)
*/
template<typename T>
inline T
RotateLeft(const T t, uint_fast8_t shift)
RotateLeft(const T aValue, uint_fast8_t aShift)
{
MOZ_ASSERT(shift < sizeof(T) * CHAR_BIT, "Shift value is too large!");
MOZ_ASSERT(aShift < sizeof(T) * CHAR_BIT, "Shift value is too large!");
static_assert(IsUnsigned<T>::value, "Rotates require unsigned values");
return (t << shift) | (t >> (sizeof(T) * CHAR_BIT - shift));
return (aValue << aShift) | (aValue >> (sizeof(T) * CHAR_BIT - aShift));
}
/**
@ -461,11 +467,11 @@ RotateLeft(const T t, uint_fast8_t shift)
*/
template<typename T>
inline T
RotateRight(const T t, uint_fast8_t shift)
RotateRight(const T aValue, uint_fast8_t aShift)
{
MOZ_ASSERT(shift < sizeof(T) * CHAR_BIT, "Shift value is too large!");
MOZ_ASSERT(aShift < sizeof(T) * CHAR_BIT, "Shift value is too large!");
static_assert(IsUnsigned<T>::value, "Rotates require unsigned values");
return (t >> shift) | (t << (sizeof(T) * CHAR_BIT - shift));
return (aValue >> aShift) | (aValue << (sizeof(T) * CHAR_BIT - aShift));
}
} /* namespace mozilla */

View File

@ -31,131 +31,150 @@ namespace mozilla {
template<class T>
class Maybe
{
AlignedStorage2<T> storage;
bool constructed;
AlignedStorage2<T> storage;
bool constructed;
T& asT() { return *storage.addr(); }
T& asT() { return *storage.addr(); }
public:
Maybe() { constructed = false; }
~Maybe() { if (constructed) asT().~T(); }
public:
Maybe() { constructed = false; }
~Maybe() { if (constructed) { asT().~T(); } }
bool empty() const { return !constructed; }
bool empty() const { return !constructed; }
void construct() {
MOZ_ASSERT(!constructed);
::new (storage.addr()) T();
constructed = true;
void construct()
{
MOZ_ASSERT(!constructed);
::new (storage.addr()) T();
constructed = true;
}
template<class T1>
void construct(const T1& aT1)
{
MOZ_ASSERT(!constructed);
::new (storage.addr()) T(aT1);
constructed = true;
}
template<class T1, class T2>
void construct(const T1& aT1, const T2& aT2)
{
MOZ_ASSERT(!constructed);
::new (storage.addr()) T(aT1, aT2);
constructed = true;
}
template<class T1, class T2, class T3>
void construct(const T1& aT1, const T2& aT2, const T3& aT3)
{
MOZ_ASSERT(!constructed);
::new (storage.addr()) T(aT1, aT2, aT3);
constructed = true;
}
template<class T1, class T2, class T3, class T4>
void construct(const T1& aT1, const T2& aT2, const T3& aT3, const T4& aT4)
{
MOZ_ASSERT(!constructed);
::new (storage.addr()) T(aT1, aT2, aT3, aT4);
constructed = true;
}
template<class T1, class T2, class T3, class T4, class T5>
void construct(const T1& aT1, const T2& aT2, const T3& aT3, const T4& aT4,
const T5& aT5)
{
MOZ_ASSERT(!constructed);
::new (storage.addr()) T(aT1, aT2, aT3, aT4, aT5);
constructed = true;
}
template<class T1, class T2, class T3, class T4, class T5, class T6>
void construct(const T1& aT1, const T2& aT2, const T3& aT3, const T4& aT4,
const T5& aT5, const T6& aT6)
{
MOZ_ASSERT(!constructed);
::new (storage.addr()) T(aT1, aT2, aT3, aT4, aT5, aT6);
constructed = true;
}
template<class T1, class T2, class T3, class T4, class T5, class T6,
class T7>
void construct(const T1& aT1, const T2& aT2, const T3& aT3, const T4& aT4,
const T5& aT5, const T6& aT6, const T7& aT7)
{
MOZ_ASSERT(!constructed);
::new (storage.addr()) T(aT1, aT2, aT3, aT4, aT5, aT6, aT7);
constructed = true;
}
template<class T1, class T2, class T3, class T4, class T5, class T6,
class T7, class T8>
void construct(const T1& aT1, const T2& aT2, const T3& aT3, const T4& aT4,
const T5& aT5, const T6& aT6, const T7& aT7, const T8& aT8)
{
MOZ_ASSERT(!constructed);
::new (storage.addr()) T(aT1, aT2, aT3, aT4, aT5, aT6, aT7, aT8);
constructed = true;
}
template<class T1, class T2, class T3, class T4, class T5, class T6,
class T7, class T8, class T9>
void construct(const T1& aT1, const T2& aT2, const T3& aT3, const T4& aT4,
const T5& aT5, const T6& aT6, const T7& aT7, const T8& aT8,
const T9& aT9)
{
MOZ_ASSERT(!constructed);
::new (storage.addr()) T(aT1, aT2, aT3, aT4, aT5, aT6, aT7, aT8, aT9);
constructed = true;
}
template<class T1, class T2, class T3, class T4, class T5, class T6,
class T7, class T8, class T9, class T10>
void construct(const T1& aT1, const T2& aT2, const T3& aT3, const T4& aT4,
const T5& aT5, const T6& aT6, const T7& aT7, const T8& aT8,
const T9& aT9, const T10& aT10)
{
MOZ_ASSERT(!constructed);
::new (storage.addr()) T(aT1, aT2, aT3, aT4, aT5, aT6, aT7, aT8, aT9, aT10);
constructed = true;
}
T* addr()
{
MOZ_ASSERT(constructed);
return &asT();
}
T& ref()
{
MOZ_ASSERT(constructed);
return asT();
}
const T& ref() const
{
MOZ_ASSERT(constructed);
return const_cast<Maybe*>(this)->asT();
}
void destroy()
{
ref().~T();
constructed = false;
}
void destroyIfConstructed()
{
if (!empty()) {
destroy();
}
}
template<class T1>
void construct(const T1& t1) {
MOZ_ASSERT(!constructed);
::new (storage.addr()) T(t1);
constructed = true;
}
template<class T1, class T2>
void construct(const T1& t1, const T2& t2) {
MOZ_ASSERT(!constructed);
::new (storage.addr()) T(t1, t2);
constructed = true;
}
template<class T1, class T2, class T3>
void construct(const T1& t1, const T2& t2, const T3& t3) {
MOZ_ASSERT(!constructed);
::new (storage.addr()) T(t1, t2, t3);
constructed = true;
}
template<class T1, class T2, class T3, class T4>
void construct(const T1& t1, const T2& t2, const T3& t3, const T4& t4) {
MOZ_ASSERT(!constructed);
::new (storage.addr()) T(t1, t2, t3, t4);
constructed = true;
}
template<class T1, class T2, class T3, class T4, class T5>
void construct(const T1& t1, const T2& t2, const T3& t3, const T4& t4, const T5& t5) {
MOZ_ASSERT(!constructed);
::new (storage.addr()) T(t1, t2, t3, t4, t5);
constructed = true;
}
template<class T1, class T2, class T3, class T4, class T5,
class T6>
void construct(const T1& t1, const T2& t2, const T3& t3, const T4& t4, const T5& t5,
const T6& t6) {
MOZ_ASSERT(!constructed);
::new (storage.addr()) T(t1, t2, t3, t4, t5, t6);
constructed = true;
}
template<class T1, class T2, class T3, class T4, class T5,
class T6, class T7>
void construct(const T1& t1, const T2& t2, const T3& t3, const T4& t4, const T5& t5,
const T6& t6, const T7& t7) {
MOZ_ASSERT(!constructed);
::new (storage.addr()) T(t1, t2, t3, t4, t5, t6, t7);
constructed = true;
}
template<class T1, class T2, class T3, class T4, class T5,
class T6, class T7, class T8>
void construct(const T1& t1, const T2& t2, const T3& t3, const T4& t4, const T5& t5,
const T6& t6, const T7& t7, const T8& t8) {
MOZ_ASSERT(!constructed);
::new (storage.addr()) T(t1, t2, t3, t4, t5, t6, t7, t8);
constructed = true;
}
template<class T1, class T2, class T3, class T4, class T5,
class T6, class T7, class T8, class T9>
void construct(const T1& t1, const T2& t2, const T3& t3, const T4& t4, const T5& t5,
const T6& t6, const T7& t7, const T8& t8, const T9& t9) {
MOZ_ASSERT(!constructed);
::new (storage.addr()) T(t1, t2, t3, t4, t5, t6, t7, t8, t9);
constructed = true;
}
template<class T1, class T2, class T3, class T4, class T5,
class T6, class T7, class T8, class T9, class T10>
void construct(const T1& t1, const T2& t2, const T3& t3, const T4& t4, const T5& t5,
const T6& t6, const T7& t7, const T8& t8, const T9& t9, const T10& t10) {
MOZ_ASSERT(!constructed);
::new (storage.addr()) T(t1, t2, t3, t4, t5, t6, t7, t8, t9, t10);
constructed = true;
}
T* addr() {
MOZ_ASSERT(constructed);
return &asT();
}
T& ref() {
MOZ_ASSERT(constructed);
return asT();
}
const T& ref() const {
MOZ_ASSERT(constructed);
return const_cast<Maybe*>(this)->asT();
}
void destroy() {
ref().~T();
constructed = false;
}
void destroyIfConstructed() {
if (!empty())
destroy();
}
private:
Maybe(const Maybe& other) MOZ_DELETE;
const Maybe& operator=(const Maybe& other) MOZ_DELETE;
private:
Maybe(const Maybe& aOther) MOZ_DELETE;
const Maybe& operator=(const Maybe& aOther) MOZ_DELETE;
};
} // namespace mozilla

View File

@ -46,7 +46,7 @@ class MaybeOneOf
return *(T*)storage.addr();
}
public:
public:
MaybeOneOf() : state(None) {}
~MaybeOneOf() { destroyIfConstructed(); }
@ -56,60 +56,70 @@ class MaybeOneOf
bool constructed() const { return state == Type2State<T>::result; }
template <class T>
void construct() {
void construct()
{
MOZ_ASSERT(state == None);
state = Type2State<T>::result;
::new (storage.addr()) T();
}
template <class T, class U>
void construct(U&& u) {
void construct(U&& aU)
{
MOZ_ASSERT(state == None);
state = Type2State<T>::result;
::new (storage.addr()) T(Move(u));
::new (storage.addr()) T(Move(aU));
}
template <class T, class U1>
void construct(const U1& u1) {
void construct(const U1& aU1)
{
MOZ_ASSERT(state == None);
state = Type2State<T>::result;
::new (storage.addr()) T(u1);
::new (storage.addr()) T(aU1);
}
template <class T, class U1, class U2>
void construct(const U1& u1, const U2& u2) {
void construct(const U1& aU1, const U2& aU2)
{
MOZ_ASSERT(state == None);
state = Type2State<T>::result;
::new (storage.addr()) T(u1, u2);
::new (storage.addr()) T(aU1, aU2);
}
template <class T>
T& ref() {
T& ref()
{
return as<T>();
}
template <class T>
const T& ref() const {
const T& ref() const
{
return as<T>();
}
void destroy() {
void destroy()
{
MOZ_ASSERT(state == SomeT1 || state == SomeT2);
if (state == SomeT1)
if (state == SomeT1) {
as<T1>().~T1();
else if (state == SomeT2)
} else if (state == SomeT2) {
as<T2>().~T2();
}
state = None;
}
void destroyIfConstructed() {
if (!empty())
void destroyIfConstructed()
{
if (!empty()) {
destroy();
}
}
private:
MaybeOneOf(const MaybeOneOf& other) MOZ_DELETE;
const MaybeOneOf& operator=(const MaybeOneOf& other) MOZ_DELETE;
private:
MaybeOneOf(const MaybeOneOf& aOther) MOZ_DELETE;
const MaybeOneOf& operator=(const MaybeOneOf& aOther) MOZ_DELETE;
};
template <class T1, class T2>

View File

@ -5,7 +5,7 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/*
* Provides a common interface to the ASan (AddressSanitizer) and Valgrind
* Provides a common interface to the ASan (AddressSanitizer) and Valgrind
* functions used to mark memory in certain ways. In detail, the following
* three macros are provided:
*
@ -35,13 +35,13 @@
#include <stddef.h>
extern "C" {
/* These definitions are usually provided through the
* sanitizer/asan_interface.h header installed by ASan.
*/
void __asan_poison_memory_region(void const volatile *addr, size_t size)
__attribute__((visibility("default")));
void __asan_unpoison_memory_region(void const volatile *addr, size_t size)
__attribute__((visibility("default")));
/* These definitions are usually provided through the
* sanitizer/asan_interface.h header installed by ASan.
*/
void __asan_poison_memory_region(void const volatile *addr, size_t size)
__attribute__((visibility("default")));
void __asan_unpoison_memory_region(void const volatile *addr, size_t size)
__attribute__((visibility("default")));
#define MOZ_MAKE_MEM_NOACCESS(addr, size) \
__asan_poison_memory_region((addr), (size))

View File

@ -210,9 +210,9 @@ namespace mozilla {
*/
template<typename T>
inline typename RemoveReference<T>::Type&&
Move(T&& a)
Move(T&& aX)
{
return static_cast<typename RemoveReference<T>::Type&&>(a);
return static_cast<typename RemoveReference<T>::Type&&>(aX);
}
/**
@ -221,28 +221,28 @@ Move(T&& a)
*/
template<typename T>
inline T&&
Forward(typename RemoveReference<T>::Type& a)
Forward(typename RemoveReference<T>::Type& aX)
{
return static_cast<T&&>(a);
return static_cast<T&&>(aX);
}
template<typename T>
inline T&&
Forward(typename RemoveReference<T>::Type&& t)
Forward(typename RemoveReference<T>::Type&& aX)
{
static_assert(!IsLvalueReference<T>::value,
"misuse of Forward detected! try the other overload");
return static_cast<T&&>(t);
return static_cast<T&&>(aX);
}
/** Swap |t| and |u| using move-construction if possible. */
/** Swap |aX| and |aY| using move-construction if possible. */
template<typename T>
inline void
Swap(T& t, T& u)
Swap(T& aX, T& aY)
{
T tmp(Move(t));
t = Move(u);
u = Move(tmp);
T tmp(Move(aX));
aX = Move(aY);
aY = Move(tmp);
}
} // namespace mozilla

View File

@ -92,19 +92,19 @@ struct IsNullPointer { static const bool value = false; };
* (Currently b2g is the only impediment to this.)
*/
#ifdef MOZ_HAVE_CXX11_NULLPTR
// decltype does the right thing for actual nullptr.
namespace mozilla {
typedef decltype(nullptr) NullptrT;
template<>
struct IsNullPointer<decltype(nullptr)> { static const bool value = true; };
}
// decltype does the right thing for actual nullptr.
namespace mozilla {
typedef decltype(nullptr) NullptrT;
template<>
struct IsNullPointer<decltype(nullptr)> { static const bool value = true; };
}
# undef MOZ_HAVE_CXX11_NULLPTR
#elif MOZ_IS_GCC
# define nullptr __null
// void* sweeps up more than just nullptr, but compilers supporting true
// nullptr are the majority now, so they should detect mistakes. If you're
// feeling paranoid, check/assert that your NullptrT equals nullptr.
namespace mozilla { typedef void* NullptrT; }
// void* sweeps up more than just nullptr, but compilers supporting true
// nullptr are the majority now, so they should detect mistakes. If you're
// feeling paranoid, check/assert that your NullptrT equals nullptr.
namespace mozilla { typedef void* NullptrT; }
#else
# error "No compiler support for nullptr or its emulation."
#endif

View File

@ -17,10 +17,10 @@
namespace mozilla {
/**
* The NumericLimits class provides a compatibility layer with std::numeric_limits
* for char16_t, otherwise it is exactly the same as std::numeric_limits.
* Code which does not need std::numeric_limits<char16_t> should avoid using
* NumericLimits.
* The NumericLimits class provides a compatibility layer with
* std::numeric_limits for char16_t, otherwise it is exactly the same as
* std::numeric_limits. Code which does not need std::numeric_limits<char16_t>
* should avoid using NumericLimits.
*/
template<typename T>
class NumericLimits : public std::numeric_limits<T>

View File

@ -24,26 +24,27 @@
namespace mozilla {
/** Set the contents of |t| to 0. */
/** Set the contents of |aT| to 0. */
template<typename T>
static MOZ_ALWAYS_INLINE void
PodZero(T* t)
PodZero(T* aT)
{
memset(t, 0, sizeof(T));
memset(aT, 0, sizeof(T));
}
/** Set the contents of |nelem| elements starting at |t| to 0. */
/** Set the contents of |aNElem| elements starting at |aT| to 0. */
template<typename T>
static MOZ_ALWAYS_INLINE void
PodZero(T* t, size_t nelem)
PodZero(T* aT, size_t aNElem)
{
/*
* This function is often called with 'nelem' small; we use an inline loop
* This function is often called with 'aNElem' small; we use an inline loop
* instead of calling 'memset' with a non-constant length. The compiler
* should inline the memset call with constant size, though.
*/
for (T* end = t + nelem; t < end; t++)
memset(t, 0, sizeof(T));
for (T* end = aT + aNElem; aT < end; aT++) {
memset(aT, 0, sizeof(T));
}
}
/*
@ -54,107 +55,116 @@ PodZero(T* t, size_t nelem)
* compile error involving PodZero and array types, use PodArrayZero instead.
*/
template<typename T, size_t N>
static void PodZero(T (&t)[N]) MOZ_DELETE;
static void PodZero(T (&aT)[N]) MOZ_DELETE;
template<typename T, size_t N>
static void PodZero(T (&t)[N], size_t nelem) MOZ_DELETE;
static void PodZero(T (&aT)[N], size_t aNElem) MOZ_DELETE;
/** Set the contents of the array |t| to zero. */
/** Set the contents of the array |aT| to zero. */
template <class T, size_t N>
static MOZ_ALWAYS_INLINE void
PodArrayZero(T (&t)[N])
PodArrayZero(T (&aT)[N])
{
memset(t, 0, N * sizeof(T));
memset(aT, 0, N * sizeof(T));
}
template <typename T, size_t N>
static MOZ_ALWAYS_INLINE void
PodArrayZero(Array<T, N>& arr)
PodArrayZero(Array<T, N>& aArr)
{
memset(&arr[0], 0, N * sizeof(T));
memset(&aArr[0], 0, N * sizeof(T));
}
/**
* Assign |*src| to |*dst|. The locations must not be the same and must not
* Assign |*aSrc| to |*aDst|. The locations must not be the same and must not
* overlap.
*/
template<typename T>
static MOZ_ALWAYS_INLINE void
PodAssign(T* dst, const T* src)
PodAssign(T* aDst, const T* aSrc)
{
MOZ_ASSERT(dst != src);
MOZ_ASSERT_IF(src < dst, PointerRangeSize(src, static_cast<const T*>(dst)) >= 1);
MOZ_ASSERT_IF(dst < src, PointerRangeSize(static_cast<const T*>(dst), src) >= 1);
memcpy(reinterpret_cast<char*>(dst), reinterpret_cast<const char*>(src), sizeof(T));
MOZ_ASSERT(aDst != aSrc);
MOZ_ASSERT_IF(aSrc < aDst,
PointerRangeSize(aSrc, static_cast<const T*>(aDst)) >= 1);
MOZ_ASSERT_IF(aDst < aSrc,
PointerRangeSize(static_cast<const T*>(aDst), aSrc) >= 1);
memcpy(reinterpret_cast<char*>(aDst), reinterpret_cast<const char*>(aSrc),
sizeof(T));
}
/**
* Copy |nelem| T elements from |src| to |dst|. The two memory ranges must not
* overlap!
* Copy |aNElem| T elements from |aSrc| to |aDst|. The two memory ranges must
* not overlap!
*/
template<typename T>
static MOZ_ALWAYS_INLINE void
PodCopy(T* dst, const T* src, size_t nelem)
PodCopy(T* aDst, const T* aSrc, size_t aNElem)
{
MOZ_ASSERT(dst != src);
MOZ_ASSERT_IF(src < dst, PointerRangeSize(src, static_cast<const T*>(dst)) >= nelem);
MOZ_ASSERT_IF(dst < src, PointerRangeSize(static_cast<const T*>(dst), src) >= nelem);
MOZ_ASSERT(aDst != aSrc);
MOZ_ASSERT_IF(aSrc < aDst,
PointerRangeSize(aSrc, static_cast<const T*>(aDst)) >= aNElem);
MOZ_ASSERT_IF(aDst < aSrc,
PointerRangeSize(static_cast<const T*>(aDst), aSrc) >= aNElem);
if (nelem < 128) {
if (aNElem < 128) {
/*
* Avoid using operator= in this loop, as it may have been
* intentionally deleted by the POD type.
*/
for (const T* srcend = src + nelem; src < srcend; src++, dst++)
PodAssign(dst, src);
for (const T* srcend = aSrc + aNElem; aSrc < srcend; aSrc++, aDst++) {
PodAssign(aDst, aSrc);
}
} else {
memcpy(dst, src, nelem * sizeof(T));
memcpy(aDst, aSrc, aNElem * sizeof(T));
}
}
template<typename T>
static MOZ_ALWAYS_INLINE void
PodCopy(volatile T* dst, const volatile T* src, size_t nelem)
PodCopy(volatile T* aDst, const volatile T* aSrc, size_t aNElem)
{
MOZ_ASSERT(dst != src);
MOZ_ASSERT_IF(src < dst,
PointerRangeSize(src, static_cast<const volatile T*>(dst)) >= nelem);
MOZ_ASSERT_IF(dst < src,
PointerRangeSize(static_cast<const volatile T*>(dst), src) >= nelem);
MOZ_ASSERT(aDst != aSrc);
MOZ_ASSERT_IF(aSrc < aDst,
PointerRangeSize(aSrc, static_cast<const volatile T*>(aDst)) >= aNElem);
MOZ_ASSERT_IF(aDst < aSrc,
PointerRangeSize(static_cast<const volatile T*>(aDst), aSrc) >= aNElem);
/*
* Volatile |dst| requires extra work, because it's undefined behavior to
* Volatile |aDst| requires extra work, because it's undefined behavior to
* modify volatile objects using the mem* functions. Just write out the
* loops manually, using operator= rather than memcpy for the same reason,
* and let the compiler optimize to the extent it can.
*/
for (const volatile T* srcend = src + nelem; src < srcend; src++, dst++)
*dst = *src;
for (const volatile T* srcend = aSrc + aNElem;
aSrc < srcend;
aSrc++, aDst++) {
*aDst = *aSrc;
}
}
/*
* Copy the contents of the array |src| into the array |dst|, both of size N.
* Copy the contents of the array |aSrc| into the array |aDst|, both of size N.
* The arrays must not overlap!
*/
template <class T, size_t N>
static MOZ_ALWAYS_INLINE void
PodArrayCopy(T (&dst)[N], const T (&src)[N])
PodArrayCopy(T (&aDst)[N], const T (&aSrc)[N])
{
PodCopy(dst, src, N);
PodCopy(aDst, aSrc, N);
}
/**
* Copy the memory for |nelem| T elements from |src| to |dst|. If the two
* memory ranges overlap, then the effect is as if the |nelem| elements are
* first copied from |src| to a temporary array, and then from the temporary
* array to |dst|.
* Copy the memory for |aNElem| T elements from |aSrc| to |aDst|. If the two
* memory ranges overlap, then the effect is as if the |aNElem| elements are
* first copied from |aSrc| to a temporary array, and then from the temporary
* array to |aDst|.
*/
template<typename T>
static MOZ_ALWAYS_INLINE void
PodMove(T* dst, const T* src, size_t nelem)
PodMove(T* aDst, const T* aSrc, size_t aNElem)
{
MOZ_ASSERT(nelem <= SIZE_MAX / sizeof(T),
MOZ_ASSERT(aNElem <= SIZE_MAX / sizeof(T),
"trying to move an impossible number of elements");
memmove(dst, src, nelem * sizeof(T));
memmove(aDst, aSrc, aNElem * sizeof(T));
}
/**
@ -170,8 +180,9 @@ PodEqual(const T* one, const T* two, size_t len)
const T* p1 = one;
const T* p2 = two;
for (; p1 < p1end; p1++, p2++) {
if (*p1 != *p2)
if (*p1 != *p2) {
return false;
}
}
return true;
}

View File

@ -42,24 +42,24 @@ uintptr_t gMozillaPoisonSize;
#ifdef _WIN32
static void *
ReserveRegion(uintptr_t region, uintptr_t size)
ReserveRegion(uintptr_t aRegion, uintptr_t aSize)
{
return VirtualAlloc((void *)region, size, MEM_RESERVE, PAGE_NOACCESS);
return VirtualAlloc((void *)aRegion, aSize, MEM_RESERVE, PAGE_NOACCESS);
}
static void
ReleaseRegion(void *region, uintptr_t size)
ReleaseRegion(void *aRegion, uintptr_t aSize)
{
VirtualFree(region, size, MEM_RELEASE);
VirtualFree(aRegion, aSize, MEM_RELEASE);
}
static bool
ProbeRegion(uintptr_t region, uintptr_t size)
ProbeRegion(uintptr_t aRegion, uintptr_t aSize)
{
SYSTEM_INFO sinfo;
GetSystemInfo(&sinfo);
if (region >= (uintptr_t)sinfo.lpMaximumApplicationAddress &&
region + size >= (uintptr_t)sinfo.lpMaximumApplicationAddress) {
if (aRegion >= (uintptr_t)sinfo.lpMaximumApplicationAddress &&
aRegion + aSize >= (uintptr_t)sinfo.lpMaximumApplicationAddress) {
return true;
} else {
return false;
@ -78,7 +78,7 @@ GetDesiredRegionSize()
#elif defined(__OS2__)
static void *
ReserveRegion(uintptr_t region, uintptr_t size)
ReserveRegion(uintptr_t aRegion, uintptr_t aSize)
{
// OS/2 doesn't support allocation at an arbitrary address,
// so return an address that is known to be invalid.
@ -86,13 +86,13 @@ ReserveRegion(uintptr_t region, uintptr_t size)
}
static void
ReleaseRegion(void *region, uintptr_t size)
ReleaseRegion(void *aRegion, uintptr_t aSize)
{
return;
}
static bool
ProbeRegion(uintptr_t region, uintptr_t size)
ProbeRegion(uintptr_t aRegion, uintptr_t aSize)
{
// There's no reliable way to probe an address in the system
// arena other than by touching it and seeing if a trap occurs.
@ -113,21 +113,23 @@ GetDesiredRegionSize()
#include "mozilla/TaggedAnonymousMemory.h"
static void *
ReserveRegion(uintptr_t region, uintptr_t size)
ReserveRegion(uintptr_t aRegion, uintptr_t aSize)
{
return MozTaggedAnonymousMmap(reinterpret_cast<void*>(region), size, PROT_NONE, MAP_PRIVATE|MAP_ANON, -1, 0, "poison");
return MozTaggedAnonymousMmap(reinterpret_cast<void*>(aRegion), aSize,
PROT_NONE, MAP_PRIVATE|MAP_ANON, -1, 0,
"poison");
}
static void
ReleaseRegion(void *region, uintptr_t size)
ReleaseRegion(void *aRegion, uintptr_t aSize)
{
munmap(region, size);
munmap(aRegion, aSize);
}
static bool
ProbeRegion(uintptr_t region, uintptr_t size)
ProbeRegion(uintptr_t aRegion, uintptr_t aSize)
{
if (madvise(reinterpret_cast<void*>(region), size, MADV_NORMAL)) {
if (madvise(reinterpret_cast<void*>(aRegion), aSize, MADV_NORMAL)) {
return true;
} else {
return false;
@ -157,42 +159,42 @@ ReservePoisonArea(uintptr_t rgnsize)
return
(((uintptr_t(0x7FFFFFFFu) << 31) << 1 | uintptr_t(0xF0DEAFFFu))
& ~(rgnsize-1));
} else {
// First see if we can allocate the preferred poison address from the OS.
uintptr_t candidate = (0xF0DEAFFF & ~(rgnsize-1));
void *result = ReserveRegion(candidate, rgnsize);
if (result == (void *)candidate) {
// success - inaccessible page allocated
return candidate;
}
// That didn't work, so see if the preferred address is within a range
// of permanently inacessible memory.
if (ProbeRegion(candidate, rgnsize)) {
// success - selected page cannot be usable memory
if (result != RESERVE_FAILED)
ReleaseRegion(result, rgnsize);
return candidate;
}
// The preferred address is already in use. Did the OS give us a
// consolation prize?
if (result != RESERVE_FAILED) {
return uintptr_t(result);
}
// It didn't, so try to allocate again, without any constraint on
// the address.
result = ReserveRegion(0, rgnsize);
if (result != RESERVE_FAILED) {
return uintptr_t(result);
}
// no usable poison region identified
MOZ_CRASH();
return 0;
}
// First see if we can allocate the preferred poison address from the OS.
uintptr_t candidate = (0xF0DEAFFF & ~(rgnsize-1));
void *result = ReserveRegion(candidate, rgnsize);
if (result == (void *)candidate) {
// success - inaccessible page allocated
return candidate;
}
// That didn't work, so see if the preferred address is within a range
// of permanently inacessible memory.
if (ProbeRegion(candidate, rgnsize)) {
// success - selected page cannot be usable memory
if (result != RESERVE_FAILED) {
ReleaseRegion(result, rgnsize);
}
return candidate;
}
// The preferred address is already in use. Did the OS give us a
// consolation prize?
if (result != RESERVE_FAILED) {
return uintptr_t(result);
}
// It didn't, so try to allocate again, without any constraint on
// the address.
result = ReserveRegion(0, rgnsize);
if (result != RESERVE_FAILED) {
return uintptr_t(result);
}
// no usable poison region identified
MOZ_CRASH();
return 0;
}
void
@ -201,8 +203,8 @@ mozPoisonValueInit()
gMozillaPoisonSize = GetDesiredRegionSize();
gMozillaPoisonBase = ReservePoisonArea(gMozillaPoisonSize);
if (gMozillaPoisonSize == 0) // can't happen
if (gMozillaPoisonSize == 0) { // can't happen
return;
gMozillaPoisonValue = gMozillaPoisonBase + gMozillaPoisonSize/2 - 1;
}
gMozillaPoisonValue = gMozillaPoisonBase + gMozillaPoisonSize / 2 - 1;
}

View File

@ -190,33 +190,33 @@ TestAreIdentical()
static void
TestDoubleExponentComponent()
{
MOZ_RELEASE_ASSERT(ExponentComponent(0.0) == -int_fast16_t(FloatingPoint<double>::ExponentBias));
MOZ_RELEASE_ASSERT(ExponentComponent(-0.0) == -int_fast16_t(FloatingPoint<double>::ExponentBias));
MOZ_RELEASE_ASSERT(ExponentComponent(0.0) == -int_fast16_t(FloatingPoint<double>::kExponentBias));
MOZ_RELEASE_ASSERT(ExponentComponent(-0.0) == -int_fast16_t(FloatingPoint<double>::kExponentBias));
MOZ_RELEASE_ASSERT(ExponentComponent(0.125) == -3);
MOZ_RELEASE_ASSERT(ExponentComponent(0.5) == -1);
MOZ_RELEASE_ASSERT(ExponentComponent(1.0) == 0);
MOZ_RELEASE_ASSERT(ExponentComponent(1.5) == 0);
MOZ_RELEASE_ASSERT(ExponentComponent(2.0) == 1);
MOZ_RELEASE_ASSERT(ExponentComponent(7.0) == 2);
MOZ_RELEASE_ASSERT(ExponentComponent(PositiveInfinity<double>()) == FloatingPoint<double>::ExponentBias + 1);
MOZ_RELEASE_ASSERT(ExponentComponent(NegativeInfinity<double>()) == FloatingPoint<double>::ExponentBias + 1);
MOZ_RELEASE_ASSERT(ExponentComponent(UnspecifiedNaN<double>()) == FloatingPoint<double>::ExponentBias + 1);
MOZ_RELEASE_ASSERT(ExponentComponent(PositiveInfinity<double>()) == FloatingPoint<double>::kExponentBias + 1);
MOZ_RELEASE_ASSERT(ExponentComponent(NegativeInfinity<double>()) == FloatingPoint<double>::kExponentBias + 1);
MOZ_RELEASE_ASSERT(ExponentComponent(UnspecifiedNaN<double>()) == FloatingPoint<double>::kExponentBias + 1);
}
static void
TestFloatExponentComponent()
{
MOZ_RELEASE_ASSERT(ExponentComponent(0.0f) == -int_fast16_t(FloatingPoint<float>::ExponentBias));
MOZ_RELEASE_ASSERT(ExponentComponent(-0.0f) == -int_fast16_t(FloatingPoint<float>::ExponentBias));
MOZ_RELEASE_ASSERT(ExponentComponent(0.0f) == -int_fast16_t(FloatingPoint<float>::kExponentBias));
MOZ_RELEASE_ASSERT(ExponentComponent(-0.0f) == -int_fast16_t(FloatingPoint<float>::kExponentBias));
MOZ_RELEASE_ASSERT(ExponentComponent(0.125f) == -3);
MOZ_RELEASE_ASSERT(ExponentComponent(0.5f) == -1);
MOZ_RELEASE_ASSERT(ExponentComponent(1.0f) == 0);
MOZ_RELEASE_ASSERT(ExponentComponent(1.5f) == 0);
MOZ_RELEASE_ASSERT(ExponentComponent(2.0f) == 1);
MOZ_RELEASE_ASSERT(ExponentComponent(7.0f) == 2);
MOZ_RELEASE_ASSERT(ExponentComponent(PositiveInfinity<float>()) == FloatingPoint<float>::ExponentBias + 1);
MOZ_RELEASE_ASSERT(ExponentComponent(NegativeInfinity<float>()) == FloatingPoint<float>::ExponentBias + 1);
MOZ_RELEASE_ASSERT(ExponentComponent(UnspecifiedNaN<float>()) == FloatingPoint<float>::ExponentBias + 1);
MOZ_RELEASE_ASSERT(ExponentComponent(PositiveInfinity<float>()) == FloatingPoint<float>::kExponentBias + 1);
MOZ_RELEASE_ASSERT(ExponentComponent(NegativeInfinity<float>()) == FloatingPoint<float>::kExponentBias + 1);
MOZ_RELEASE_ASSERT(ExponentComponent(UnspecifiedNaN<float>()) == FloatingPoint<float>::kExponentBias + 1);
}
static void