Imported Upstream version 6.10.0.49

Former-commit-id: 1d6753294b2993e1fbf92de9366bb9544db4189b
This commit is contained in:
Xamarin Public Jenkins (auto-signing)
2020-01-16 16:38:04 +00:00
parent d94e79959b
commit 468663ddbb
48518 changed files with 2789335 additions and 61176 deletions

View File

@@ -0,0 +1,72 @@
add_compiler_rt_component(scudo)
include_directories(..)
set(SCUDO_CFLAGS ${SANITIZER_COMMON_CFLAGS})
# SANITIZER_COMMON_CFLAGS include -fno-builtin, but we actually want builtins!
list(APPEND SCUDO_CFLAGS -fbuiltin)
append_rtti_flag(OFF SCUDO_CFLAGS)
set(SCUDO_SOURCES
scudo_allocator.cpp
scudo_flags.cpp
scudo_crc32.cpp
scudo_interceptors.cpp
scudo_termination.cpp
scudo_tsd_exclusive.cpp
scudo_tsd_shared.cpp
scudo_utils.cpp)
set(SCUDO_CXX_SOURCES
scudo_new_delete.cpp)
# Enable the SSE 4.2 instruction set for scudo_crc32.cpp, if available.
if (COMPILER_RT_HAS_MSSE4_2_FLAG)
set_source_files_properties(scudo_crc32.cpp PROPERTIES COMPILE_FLAGS -msse4.2)
endif()
# Enable the AArch64 CRC32 feature for scudo_crc32.cpp, if available.
# Note that it is enabled by default starting with armv8.1-a.
if (COMPILER_RT_HAS_MCRC_FLAG)
set_source_files_properties(scudo_crc32.cpp PROPERTIES COMPILE_FLAGS -mcrc)
endif()
if(COMPILER_RT_HAS_SCUDO)
set(SCUDO_DYNAMIC_LIBS ${SANITIZER_COMMON_LINK_LIBS})
append_list_if(COMPILER_RT_HAS_LIBDL dl SCUDO_DYNAMIC_LIBS)
append_list_if(COMPILER_RT_HAS_LIBRT rt SCUDO_DYNAMIC_LIBS)
append_list_if(COMPILER_RT_HAS_LIBPTHREAD pthread SCUDO_DYNAMIC_LIBS)
append_list_if(COMPILER_RT_HAS_LIBLOG log SCUDO_DYNAMIC_LIBS)
add_compiler_rt_runtime(clang_rt.scudo
STATIC
ARCHS ${SCUDO_SUPPORTED_ARCH}
SOURCES ${SCUDO_SOURCES}
OBJECT_LIBS RTSanitizerCommonNoTermination
RTSanitizerCommonLibc
RTInterception
RTUbsan
CFLAGS ${SCUDO_CFLAGS}
PARENT_TARGET scudo)
add_compiler_rt_runtime(clang_rt.scudo_cxx
STATIC
ARCHS ${SCUDO_SUPPORTED_ARCH}
SOURCES ${SCUDO_CXX_SOURCES}
OBJECT_LIBS RTUbsan_cxx
CFLAGS ${SCUDO_CFLAGS}
PARENT_TARGET scudo)
add_compiler_rt_runtime(clang_rt.scudo
SHARED
ARCHS ${SCUDO_SUPPORTED_ARCH}
SOURCES ${SCUDO_SOURCES} ${SCUDO_CXX_SOURCES}
OBJECT_LIBS RTSanitizerCommonNoTermination
RTSanitizerCommonLibc
RTInterception
RTUbsan
RTUbsan_cxx
CFLAGS ${SCUDO_CFLAGS}
LINK_LIBS ${SCUDO_DYNAMIC_LIBS}
PARENT_TARGET scudo)
endif()

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,129 @@
//===-- scudo_allocator.h ---------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// Header for scudo_allocator.cpp.
///
//===----------------------------------------------------------------------===//
#ifndef SCUDO_ALLOCATOR_H_
#define SCUDO_ALLOCATOR_H_
#include "scudo_platform.h"
namespace __scudo {
enum AllocType : u8 {
FromMalloc = 0, // Memory block came from malloc, realloc, calloc, etc.
FromNew = 1, // Memory block came from operator new.
FromNewArray = 2, // Memory block came from operator new [].
FromMemalign = 3, // Memory block came from memalign, posix_memalign, etc.
};
enum ChunkState : u8 {
ChunkAvailable = 0,
ChunkAllocated = 1,
ChunkQuarantine = 2
};
// Our header requires 64 bits of storage. Having the offset saves us from
// using functions such as GetBlockBegin, that is fairly costly. Our first
// implementation used the MetaData as well, which offers the advantage of
// being stored away from the chunk itself, but accessing it was costly as
// well. The header will be atomically loaded and stored.
typedef u64 PackedHeader;
struct UnpackedHeader {
u64 Checksum : 16;
u64 ClassId : 8;
u64 SizeOrUnusedBytes : 20; // Size for Primary backed allocations, amount of
// unused bytes in the chunk for Secondary ones.
u64 State : 2; // available, allocated, or quarantined
u64 AllocType : 2; // malloc, new, new[], or memalign
u64 Offset : 16; // Offset from the beginning of the backend
// allocation to the beginning of the chunk
// itself, in multiples of MinAlignment. See
// comment about its maximum value and in init().
};
typedef atomic_uint64_t AtomicPackedHeader;
COMPILER_CHECK(sizeof(UnpackedHeader) == sizeof(PackedHeader));
// Minimum alignment of 8 bytes for 32-bit, 16 for 64-bit
const uptr MinAlignmentLog = FIRST_32_SECOND_64(3, 4);
const uptr MaxAlignmentLog = 24; // 16 MB
const uptr MinAlignment = 1 << MinAlignmentLog;
const uptr MaxAlignment = 1 << MaxAlignmentLog;
const uptr ChunkHeaderSize = sizeof(PackedHeader);
const uptr AlignedChunkHeaderSize =
(ChunkHeaderSize + MinAlignment - 1) & ~(MinAlignment - 1);
#if SANITIZER_CAN_USE_ALLOCATOR64
const uptr AllocatorSpace = ~0ULL;
struct AP64 {
static const uptr kSpaceBeg = AllocatorSpace;
static const uptr kSpaceSize = AllocatorSize;
static const uptr kMetadataSize = 0;
typedef __scudo::SizeClassMap SizeClassMap;
typedef NoOpMapUnmapCallback MapUnmapCallback;
static const uptr kFlags =
SizeClassAllocator64FlagMasks::kRandomShuffleChunks;
};
typedef SizeClassAllocator64<AP64> PrimaryAllocator;
#else
static const uptr NumRegions = SANITIZER_MMAP_RANGE_SIZE >> RegionSizeLog;
# if SANITIZER_WORDSIZE == 32
typedef FlatByteMap<NumRegions> ByteMap;
# elif SANITIZER_WORDSIZE == 64
typedef TwoLevelByteMap<(NumRegions >> 12), 1 << 12> ByteMap;
# endif // SANITIZER_WORDSIZE
struct AP32 {
static const uptr kSpaceBeg = 0;
static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
static const uptr kMetadataSize = 0;
typedef __scudo::SizeClassMap SizeClassMap;
static const uptr kRegionSizeLog = RegionSizeLog;
typedef __scudo::ByteMap ByteMap;
typedef NoOpMapUnmapCallback MapUnmapCallback;
static const uptr kFlags =
SizeClassAllocator32FlagMasks::kRandomShuffleChunks |
SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch;
};
typedef SizeClassAllocator32<AP32> PrimaryAllocator;
#endif // SANITIZER_CAN_USE_ALLOCATOR64
// __sanitizer::RoundUp has a CHECK that is extraneous for us. Use our own.
INLINE uptr RoundUpTo(uptr Size, uptr Boundary) {
return (Size + Boundary - 1) & ~(Boundary - 1);
}
#include "scudo_allocator_secondary.h"
#include "scudo_allocator_combined.h"
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef ScudoLargeMmapAllocator SecondaryAllocator;
typedef ScudoCombinedAllocator<PrimaryAllocator, AllocatorCache,
SecondaryAllocator> ScudoBackendAllocator;
void initScudo();
void *scudoMalloc(uptr Size, AllocType Type);
void scudoFree(void *Ptr, AllocType Type);
void scudoSizedFree(void *Ptr, uptr Size, AllocType Type);
void *scudoRealloc(void *Ptr, uptr Size);
void *scudoCalloc(uptr NMemB, uptr Size);
void *scudoMemalign(uptr Alignment, uptr Size);
void *scudoValloc(uptr Size);
void *scudoPvalloc(uptr Size);
int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size);
void *scudoAlignedAlloc(uptr Alignment, uptr Size);
uptr scudoMallocUsableSize(void *Ptr);
} // namespace __scudo
#endif // SCUDO_ALLOCATOR_H_

View File

@@ -0,0 +1,76 @@
//===-- scudo_allocator_combined.h ------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// Scudo Combined Allocator, dispatches allocation & deallocation requests to
/// the Primary or the Secondary backend allocators.
///
//===----------------------------------------------------------------------===//
#ifndef SCUDO_ALLOCATOR_COMBINED_H_
#define SCUDO_ALLOCATOR_COMBINED_H_
#ifndef SCUDO_ALLOCATOR_H_
#error "This file must be included inside scudo_allocator.h."
#endif
template <class PrimaryAllocator, class AllocatorCache,
class SecondaryAllocator>
class ScudoCombinedAllocator {
public:
void init(s32 ReleaseToOSIntervalMs) {
Primary.Init(ReleaseToOSIntervalMs);
Secondary.Init();
Stats.Init();
}
// Primary allocations are always MinAlignment aligned, and as such do not
// require an Alignment parameter.
void *allocatePrimary(AllocatorCache *Cache, uptr ClassId) {
return Cache->Allocate(&Primary, ClassId);
}
// Secondary allocations do not require a Cache, but do require an Alignment
// parameter.
void *allocateSecondary(uptr Size, uptr Alignment) {
return Secondary.Allocate(&Stats, Size, Alignment);
}
void deallocatePrimary(AllocatorCache *Cache, void *Ptr, uptr ClassId) {
Cache->Deallocate(&Primary, ClassId, Ptr);
}
void deallocateSecondary(void *Ptr) {
Secondary.Deallocate(&Stats, Ptr);
}
uptr getActuallyAllocatedSize(void *Ptr, uptr ClassId) {
if (ClassId)
return PrimaryAllocator::ClassIdToSize(ClassId);
return Secondary.GetActuallyAllocatedSize(Ptr);
}
void initCache(AllocatorCache *Cache) {
Cache->Init(&Stats);
}
void destroyCache(AllocatorCache *Cache) {
Cache->Destroy(&Primary, &Stats);
}
void getStats(AllocatorStatCounters StatType) const {
Stats.Get(StatType);
}
private:
PrimaryAllocator Primary;
SecondaryAllocator Secondary;
AllocatorGlobalStats Stats;
};
#endif // SCUDO_ALLOCATOR_COMBINED_H_

View File

@@ -0,0 +1,140 @@
//===-- scudo_allocator_secondary.h -----------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// Scudo Secondary Allocator.
/// This services allocation that are too large to be serviced by the Primary
/// Allocator. It is directly backed by the memory mapping functions of the
/// operating system.
///
//===----------------------------------------------------------------------===//
#ifndef SCUDO_ALLOCATOR_SECONDARY_H_
#define SCUDO_ALLOCATOR_SECONDARY_H_
#ifndef SCUDO_ALLOCATOR_H_
# error "This file must be included inside scudo_allocator.h."
#endif
class ScudoLargeMmapAllocator {
public:
void Init() {
PageSizeCached = GetPageSizeCached();
}
void *Allocate(AllocatorStats *Stats, uptr Size, uptr Alignment) {
const uptr UserSize = Size - AlignedChunkHeaderSize;
// The Scudo frontend prevents us from allocating more than
// MaxAllowedMallocSize, so integer overflow checks would be superfluous.
uptr MapSize = Size + AlignedReservedAddressRangeSize;
if (Alignment > MinAlignment)
MapSize += Alignment;
const uptr PageSize = PageSizeCached;
MapSize = RoundUpTo(MapSize, PageSize);
// Account for 2 guard pages, one before and one after the chunk.
MapSize += 2 * PageSize;
ReservedAddressRange AddressRange;
uptr MapBeg = AddressRange.Init(MapSize);
if (MapBeg == ~static_cast<uptr>(0))
return ReturnNullOrDieOnFailure::OnOOM();
// A page-aligned pointer is assumed after that, so check it now.
CHECK(IsAligned(MapBeg, PageSize));
uptr MapEnd = MapBeg + MapSize;
// The beginning of the user area for that allocation comes after the
// initial guard page, and both headers. This is the pointer that has to
// abide by alignment requirements.
uptr UserBeg = MapBeg + PageSize + HeadersSize;
uptr UserEnd = UserBeg + UserSize;
// In the rare event of larger alignments, we will attempt to fit the mmap
// area better and unmap extraneous memory. This will also ensure that the
// offset and unused bytes field of the header stay small.
if (Alignment > MinAlignment) {
if (!IsAligned(UserBeg, Alignment)) {
UserBeg = RoundUpTo(UserBeg, Alignment);
CHECK_GE(UserBeg, MapBeg);
uptr NewMapBeg = RoundDownTo(UserBeg - HeadersSize, PageSize) -
PageSize;
CHECK_GE(NewMapBeg, MapBeg);
if (NewMapBeg != MapBeg) {
AddressRange.Unmap(MapBeg, NewMapBeg - MapBeg);
MapBeg = NewMapBeg;
}
UserEnd = UserBeg + UserSize;
}
uptr NewMapEnd = RoundUpTo(UserEnd, PageSize) + PageSize;
if (NewMapEnd != MapEnd) {
AddressRange.Unmap(NewMapEnd, MapEnd - NewMapEnd);
MapEnd = NewMapEnd;
}
MapSize = MapEnd - MapBeg;
}
CHECK_LE(UserEnd, MapEnd - PageSize);
// Actually mmap the memory, preserving the guard pages on either side
CHECK_EQ(MapBeg + PageSize,
AddressRange.Map(MapBeg + PageSize, MapSize - 2 * PageSize));
const uptr Ptr = UserBeg - AlignedChunkHeaderSize;
ReservedAddressRange *StoredRange = getReservedAddressRange(Ptr);
*StoredRange = AddressRange;
// The primary adds the whole class size to the stats when allocating a
// chunk, so we will do something similar here. But we will not account for
// the guard pages.
{
SpinMutexLock l(&StatsMutex);
Stats->Add(AllocatorStatAllocated, MapSize - 2 * PageSize);
Stats->Add(AllocatorStatMapped, MapSize - 2 * PageSize);
}
return reinterpret_cast<void *>(Ptr);
}
void Deallocate(AllocatorStats *Stats, void *Ptr) {
// Since we're unmapping the entirety of where the ReservedAddressRange
// actually is, copy onto the stack.
const uptr PageSize = PageSizeCached;
ReservedAddressRange AddressRange = *getReservedAddressRange(Ptr);
{
SpinMutexLock l(&StatsMutex);
Stats->Sub(AllocatorStatAllocated, AddressRange.size() - 2 * PageSize);
Stats->Sub(AllocatorStatMapped, AddressRange.size() - 2 * PageSize);
}
AddressRange.Unmap(reinterpret_cast<uptr>(AddressRange.base()),
AddressRange.size());
}
uptr GetActuallyAllocatedSize(void *Ptr) {
ReservedAddressRange *StoredRange = getReservedAddressRange(Ptr);
// Deduct PageSize as ReservedAddressRange size includes the trailing guard
// page.
uptr MapEnd = reinterpret_cast<uptr>(StoredRange->base()) +
StoredRange->size() - PageSizeCached;
return MapEnd - reinterpret_cast<uptr>(Ptr);
}
private:
ReservedAddressRange *getReservedAddressRange(uptr Ptr) {
return reinterpret_cast<ReservedAddressRange*>(
Ptr - sizeof(ReservedAddressRange));
}
ReservedAddressRange *getReservedAddressRange(const void *Ptr) {
return getReservedAddressRange(reinterpret_cast<uptr>(Ptr));
}
static constexpr uptr AlignedReservedAddressRangeSize =
(sizeof(ReservedAddressRange) + MinAlignment - 1) & ~(MinAlignment - 1);
static constexpr uptr HeadersSize =
AlignedReservedAddressRangeSize + AlignedChunkHeaderSize;
uptr PageSizeCached;
SpinMutex StatsMutex;
};
#endif // SCUDO_ALLOCATOR_SECONDARY_H_

View File

@@ -0,0 +1,25 @@
//===-- scudo_crc32.cpp -----------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// CRC32 function leveraging hardware specific instructions. This has to be
/// kept separated to restrict the use of compiler specific flags to this file.
///
//===----------------------------------------------------------------------===//
#include "scudo_crc32.h"
namespace __scudo {
#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
u32 computeHardwareCRC32(u32 Crc, uptr Data) {
return CRC32_INTRINSIC(Crc, Data);
}
#endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
} // namespace __scudo

View File

@@ -0,0 +1,101 @@
//===-- scudo_crc32.h -------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// Scudo chunk header checksum related definitions.
///
//===----------------------------------------------------------------------===//
#ifndef SCUDO_CRC32_H_
#define SCUDO_CRC32_H_
#include "sanitizer_common/sanitizer_internal_defs.h"
// Hardware CRC32 is supported at compilation via the following:
// - for i386 & x86_64: -msse4.2
// - for ARM & AArch64: -march=armv8-a+crc or -mcrc
// An additional check must be performed at runtime as well to make sure the
// emitted instructions are valid on the target host.
#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
# ifdef __SSE4_2__
# include <smmintrin.h>
# define CRC32_INTRINSIC FIRST_32_SECOND_64(_mm_crc32_u32, _mm_crc32_u64)
# endif
# ifdef __ARM_FEATURE_CRC32
# include <arm_acle.h>
# define CRC32_INTRINSIC FIRST_32_SECOND_64(__crc32cw, __crc32cd)
# endif
#endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
namespace __scudo {
enum : u8 {
CRC32Software = 0,
CRC32Hardware = 1,
};
static const u32 CRC32Table[] = {
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f,
0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2,
0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c,
0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423,
0xcfba9599, 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, 0x01db7106,
0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d,
0x91646c97, 0xe6635c01, 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7,
0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa,
0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81,
0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, 0xe3630b12, 0x94643b84,
0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 0xd6d6a3e8, 0xa1d1937e,
0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55,
0x316e8eef, 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, 0xb2bd0b28,
0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f,
0x72076785, 0x05005713, 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69,
0x616bffd3, 0x166ccf45, 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc,
0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693,
0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
};
INLINE u32 computeSoftwareCRC32(u32 Crc, uptr Data) {
for (uptr i = 0; i < sizeof(Data); i++) {
Crc = CRC32Table[(Crc ^ Data) & 0xff] ^ (Crc >> 8);
Data >>= 8;
}
return Crc;
}
SANITIZER_WEAK_ATTRIBUTE u32 computeHardwareCRC32(u32 Crc, uptr Data);
} // namespace __scudo
#endif // SCUDO_CRC32_H_

View File

@@ -0,0 +1,121 @@
//===-- scudo_flags.cpp -----------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// Hardened Allocator flag parsing logic.
///
//===----------------------------------------------------------------------===//
#include "scudo_flags.h"
#include "scudo_utils.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_flag_parser.h"
SANITIZER_INTERFACE_WEAK_DEF(const char*, __scudo_default_options, void);
namespace __scudo {
static Flags ScudoFlags; // Use via getFlags().
void Flags::setDefaults() {
#define SCUDO_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
#include "scudo_flags.inc"
#undef SCUDO_FLAG
}
static void RegisterScudoFlags(FlagParser *parser, Flags *f) {
#define SCUDO_FLAG(Type, Name, DefaultValue, Description) \
RegisterFlag(parser, #Name, Description, &f->Name);
#include "scudo_flags.inc"
#undef SCUDO_FLAG
}
static const char *getScudoDefaultOptions() {
return (&__scudo_default_options) ? __scudo_default_options() : "";
}
void initFlags() {
SetCommonFlagsDefaults();
{
CommonFlags cf;
cf.CopyFrom(*common_flags());
cf.exitcode = 1;
OverrideCommonFlags(cf);
}
Flags *f = getFlags();
f->setDefaults();
FlagParser ScudoParser;
RegisterScudoFlags(&ScudoParser, f);
RegisterCommonFlags(&ScudoParser);
// Override from user-specified string.
ScudoParser.ParseString(getScudoDefaultOptions());
// Override from environment.
ScudoParser.ParseString(GetEnv("SCUDO_OPTIONS"));
InitializeCommonFlags();
// Sanity checks and default settings for the Quarantine parameters.
if (f->QuarantineSizeMb >= 0) {
// Backward compatible logic if QuarantineSizeMb is set.
if (f->QuarantineSizeKb >= 0) {
dieWithMessage("ERROR: please use either QuarantineSizeMb (deprecated) "
"or QuarantineSizeKb, but not both\n");
}
if (f->QuarantineChunksUpToSize >= 0) {
dieWithMessage("ERROR: QuarantineChunksUpToSize cannot be used in "
" conjunction with the deprecated QuarantineSizeMb option\n");
}
// If everything is in order, update QuarantineSizeKb accordingly.
f->QuarantineSizeKb = f->QuarantineSizeMb * 1024;
} else {
// Otherwise proceed with the new options.
if (f->QuarantineSizeKb < 0) {
const int DefaultQuarantineSizeKb = FIRST_32_SECOND_64(64, 256);
f->QuarantineSizeKb = DefaultQuarantineSizeKb;
}
if (f->QuarantineChunksUpToSize < 0) {
const int DefaultQuarantineChunksUpToSize = FIRST_32_SECOND_64(512, 2048);
f->QuarantineChunksUpToSize = DefaultQuarantineChunksUpToSize;
}
}
// We enforce an upper limit for the chunk quarantine threshold of 4Mb.
if (f->QuarantineChunksUpToSize > (4 * 1024 * 1024)) {
dieWithMessage("ERROR: the chunk quarantine threshold is too large\n");
}
// We enforce an upper limit for the quarantine size of 32Mb.
if (f->QuarantineSizeKb > (32 * 1024)) {
dieWithMessage("ERROR: the quarantine size is too large\n");
}
if (f->ThreadLocalQuarantineSizeKb < 0) {
const int DefaultThreadLocalQuarantineSizeKb = FIRST_32_SECOND_64(16, 64);
f->ThreadLocalQuarantineSizeKb = DefaultThreadLocalQuarantineSizeKb;
}
// And an upper limit of 8Mb for the thread quarantine cache.
if (f->ThreadLocalQuarantineSizeKb > (8 * 1024)) {
dieWithMessage("ERROR: the per thread quarantine cache size is too "
"large\n");
}
if (f->ThreadLocalQuarantineSizeKb == 0 && f->QuarantineSizeKb > 0) {
dieWithMessage("ERROR: ThreadLocalQuarantineSizeKb can be set to 0 only "
"when QuarantineSizeKb is set to 0\n");
}
}
Flags *getFlags() {
return &ScudoFlags;
}
} // namespace __scudo

View File

@@ -0,0 +1,33 @@
//===-- scudo_flags.h -------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// Header for scudo_flags.cpp.
///
//===----------------------------------------------------------------------===//
#ifndef SCUDO_FLAGS_H_
#define SCUDO_FLAGS_H_
namespace __scudo {
struct Flags {
#define SCUDO_FLAG(Type, Name, DefaultValue, Description) Type Name;
#include "scudo_flags.inc"
#undef SCUDO_FLAG
void setDefaults();
};
Flags *getFlags();
void initFlags();
} // namespace __scudo
#endif // SCUDO_FLAGS_H_

View File

@@ -0,0 +1,47 @@
//===-- scudo_flags.inc -----------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// Hardened Allocator runtime flags.
///
//===----------------------------------------------------------------------===//
#ifndef SCUDO_FLAG
# error "Define SCUDO_FLAG prior to including this file!"
#endif
SCUDO_FLAG(int, QuarantineSizeMb, -1,
"Deprecated. Please use QuarantineSizeKb.")
// Default value is set in scudo_flags.cpp based on architecture.
SCUDO_FLAG(int, QuarantineSizeKb, -1,
"Size in KB of quarantine used to delay the actual deallocation of "
"chunks. Lower value may reduce memory usage but decrease the "
"effectiveness of the mitigation. Defaults to 64KB (32-bit) or "
"256KB (64-bit)")
// Default value is set in scudo_flags.cpp based on architecture.
SCUDO_FLAG(int, ThreadLocalQuarantineSizeKb, -1,
"Size in KB of per-thread cache used to offload the global "
"quarantine. Lower value may reduce memory usage but might increase "
"the contention on the global quarantine. Defaults to 16KB (32-bit) "
"or 64KB (64-bit)")
// Default value is set in scudo_flags.cpp based on architecture.
SCUDO_FLAG(int, QuarantineChunksUpToSize, -1,
"Size in bytes up to which chunks will be quarantined (if lower than"
"or equal to). Defaults to 256 (32-bit) or 2048 (64-bit)")
SCUDO_FLAG(bool, DeallocationTypeMismatch, true,
"Report errors on malloc/delete, new/free, new/delete[], etc.")
SCUDO_FLAG(bool, DeleteSizeMismatch, true,
"Report errors on mismatch between size of new and delete.")
SCUDO_FLAG(bool, ZeroContents, false,
"Zero chunk contents on allocation and deallocation.")

View File

@@ -0,0 +1,75 @@
//===-- scudo_interceptors.cpp ----------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// Linux specific malloc interception functions.
///
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_LINUX
#include "scudo_allocator.h"
#include "interception/interception.h"
using namespace __scudo;
INTERCEPTOR(void, free, void *ptr) {
scudoFree(ptr, FromMalloc);
}
INTERCEPTOR(void, cfree, void *ptr) {
scudoFree(ptr, FromMalloc);
}
INTERCEPTOR(void*, malloc, uptr size) {
return scudoMalloc(size, FromMalloc);
}
INTERCEPTOR(void*, realloc, void *ptr, uptr size) {
return scudoRealloc(ptr, size);
}
INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
return scudoCalloc(nmemb, size);
}
INTERCEPTOR(void*, valloc, uptr size) {
return scudoValloc(size);
}
INTERCEPTOR(void*, memalign, uptr alignment, uptr size) {
return scudoMemalign(alignment, size);
}
INTERCEPTOR(void*, __libc_memalign, uptr alignment, uptr size) {
return scudoMemalign(alignment, size);
}
INTERCEPTOR(void*, pvalloc, uptr size) {
return scudoPvalloc(size);
}
INTERCEPTOR(void*, aligned_alloc, uptr alignment, uptr size) {
return scudoAlignedAlloc(alignment, size);
}
INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
return scudoPosixMemalign(memptr, alignment, size);
}
INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
return scudoMallocUsableSize(ptr);
}
INTERCEPTOR(int, mallopt, int cmd, int value) {
return -1;
}
#endif // SANITIZER_LINUX

View File

@@ -0,0 +1,22 @@
//===-- scudo_interface_internal.h ------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// Private Scudo interface header.
///
//===----------------------------------------------------------------------===//
#ifndef SCUDO_INTERFACE_INTERNAL_H_
#define SCUDO_INTERFACE_INTERNAL_H_
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
void __scudo_set_rss_limit(unsigned long LimitMb, int HardLimit); // NOLINT
} // extern "C"
#endif // SCUDO_INTERFACE_INTERNAL_H_

View File

@@ -0,0 +1,74 @@
//===-- scudo_new_delete.cpp ------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// Interceptors for operators new and delete.
///
//===----------------------------------------------------------------------===//
#include "scudo_allocator.h"
#include "interception/interception.h"
#include <stddef.h>
using namespace __scudo;
#define CXX_OPERATOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE
// Fake std::nothrow_t to avoid including <new>.
namespace std {
struct nothrow_t {};
} // namespace std
// TODO(alekseys): throw std::bad_alloc instead of dying on OOM.
CXX_OPERATOR_ATTRIBUTE
void *operator new(size_t size) {
void *res = scudoMalloc(size, FromNew);
if (UNLIKELY(!res)) DieOnFailure::OnOOM();
return res;
}
CXX_OPERATOR_ATTRIBUTE
void *operator new[](size_t size) {
void *res = scudoMalloc(size, FromNewArray);
if (UNLIKELY(!res)) DieOnFailure::OnOOM();
return res;
}
CXX_OPERATOR_ATTRIBUTE
void *operator new(size_t size, std::nothrow_t const&) {
return scudoMalloc(size, FromNew);
}
CXX_OPERATOR_ATTRIBUTE
void *operator new[](size_t size, std::nothrow_t const&) {
return scudoMalloc(size, FromNewArray);
}
CXX_OPERATOR_ATTRIBUTE
void operator delete(void *ptr) NOEXCEPT {
return scudoFree(ptr, FromNew);
}
CXX_OPERATOR_ATTRIBUTE
void operator delete[](void *ptr) NOEXCEPT {
return scudoFree(ptr, FromNewArray);
}
CXX_OPERATOR_ATTRIBUTE
void operator delete(void *ptr, std::nothrow_t const&) NOEXCEPT {
return scudoFree(ptr, FromNew);
}
CXX_OPERATOR_ATTRIBUTE
void operator delete[](void *ptr, std::nothrow_t const&) NOEXCEPT {
return scudoFree(ptr, FromNewArray);
}
CXX_OPERATOR_ATTRIBUTE
void operator delete(void *ptr, size_t size) NOEXCEPT {
scudoSizedFree(ptr, size, FromNew);
}
CXX_OPERATOR_ATTRIBUTE
void operator delete[](void *ptr, size_t size) NOEXCEPT {
scudoSizedFree(ptr, size, FromNewArray);
}

View File

@@ -0,0 +1,80 @@
//===-- scudo_platform.h ----------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// Scudo platform specific definitions.
/// TODO(kostyak): add tests for the compile time defines.
///
//===----------------------------------------------------------------------===//
#ifndef SCUDO_PLATFORM_H_
#define SCUDO_PLATFORM_H_
#include "sanitizer_common/sanitizer_allocator.h"
#if !SANITIZER_LINUX && !SANITIZER_FUCHSIA
# error "The Scudo hardened allocator is not supported on this platform."
#endif
#define SCUDO_TSD_EXCLUSIVE_SUPPORTED (!SANITIZER_ANDROID && !SANITIZER_FUCHSIA)
#ifndef SCUDO_TSD_EXCLUSIVE
// SCUDO_TSD_EXCLUSIVE wasn't defined, use a default TSD model for the platform.
# if SANITIZER_ANDROID || SANITIZER_FUCHSIA
// Android and Fuchsia use a pool of TSDs shared between threads.
# define SCUDO_TSD_EXCLUSIVE 0
# elif SANITIZER_LINUX && !SANITIZER_ANDROID
// Non-Android Linux use an exclusive TSD per thread.
# define SCUDO_TSD_EXCLUSIVE 1
# else
# error "No default TSD model defined for this platform."
# endif // SANITIZER_ANDROID || SANITIZER_FUCHSIA
#endif // SCUDO_TSD_EXCLUSIVE
// If the exclusive TSD model is chosen, make sure the platform supports it.
#if SCUDO_TSD_EXCLUSIVE && !SCUDO_TSD_EXCLUSIVE_SUPPORTED
# error "The exclusive TSD model is not supported on this platform."
#endif
// Maximum number of TSDs that can be created for the Shared model.
#ifndef SCUDO_SHARED_TSD_POOL_SIZE
# define SCUDO_SHARED_TSD_POOL_SIZE 32U
#endif // SCUDO_SHARED_TSD_POOL_SIZE
// The following allows the public interface functions to be disabled.
#ifndef SCUDO_CAN_USE_PUBLIC_INTERFACE
# define SCUDO_CAN_USE_PUBLIC_INTERFACE 1
#endif
namespace __scudo {
#if SANITIZER_CAN_USE_ALLOCATOR64
# if defined(__aarch64__) && SANITIZER_ANDROID
const uptr AllocatorSize = 0x4000000000ULL; // 256G.
# elif defined(__aarch64__)
const uptr AllocatorSize = 0x10000000000ULL; // 1T.
# else
const uptr AllocatorSize = 0x40000000000ULL; // 4T.
# endif
#else
const uptr RegionSizeLog = SANITIZER_ANDROID ? 19 : 20;
#endif // SANITIZER_CAN_USE_ALLOCATOR64
#if !defined(SCUDO_SIZE_CLASS_MAP)
# define SCUDO_SIZE_CLASS_MAP Default
#endif
#define SIZE_CLASS_MAP_TYPE SIZE_CLASS_MAP_TYPE_(SCUDO_SIZE_CLASS_MAP)
#define SIZE_CLASS_MAP_TYPE_(T) SIZE_CLASS_MAP_TYPE__(T)
#define SIZE_CLASS_MAP_TYPE__(T) T##SizeClassMap
typedef SIZE_CLASS_MAP_TYPE SizeClassMap;
} // namespace __scudo
#endif // SCUDO_PLATFORM_H_

View File

@@ -0,0 +1,42 @@
//===-- scudo_termination.cpp -----------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// This file contains bare-bones termination functions to replace the
/// __sanitizer ones, in order to avoid any potential abuse of the callbacks
/// functionality.
///
//===----------------------------------------------------------------------===//
#include "scudo_utils.h"
#include "sanitizer_common/sanitizer_common.h"
namespace __sanitizer {
bool AddDieCallback(DieCallbackType Callback) { return true; }
bool RemoveDieCallback(DieCallbackType Callback) { return true; }
void SetUserDieCallback(DieCallbackType Callback) {}
void NORETURN Die() {
if (common_flags()->abort_on_error)
Abort();
internal__exit(common_flags()->exitcode);
}
void SetCheckFailedCallback(CheckFailedCallbackType callback) {}
void NORETURN CheckFailed(const char *File, int Line, const char *Condition,
u64 Value1, u64 Value2) {
__scudo::dieWithMessage("Scudo CHECK failed: %s:%d %s (%lld, %lld)\n",
File, Line, Condition, Value1, Value2);
}
} // namespace __sanitizer

View File

@@ -0,0 +1,72 @@
//===-- scudo_tsd.h ---------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// Scudo thread specific data definition.
/// Implementation will differ based on the thread local storage primitives
/// offered by the underlying platform.
///
//===----------------------------------------------------------------------===//
#ifndef SCUDO_TSD_H_
#define SCUDO_TSD_H_
#include "scudo_allocator.h"
#include "scudo_utils.h"
#include <pthread.h>
namespace __scudo {
struct ALIGNED(64) ScudoTSD {
AllocatorCache Cache;
uptr QuarantineCachePlaceHolder[4];
void init(bool Shared);
void commitBack();
INLINE bool tryLock() {
if (Mutex.TryLock()) {
atomic_store_relaxed(&Precedence, 0);
return true;
}
if (atomic_load_relaxed(&Precedence) == 0)
atomic_store_relaxed(&Precedence, MonotonicNanoTime());
return false;
}
INLINE void lock() {
Mutex.Lock();
atomic_store_relaxed(&Precedence, 0);
}
INLINE void unlock() {
if (!UnlockRequired)
return;
Mutex.Unlock();
}
INLINE u64 getPrecedence() {
return atomic_load_relaxed(&Precedence);
}
private:
bool UnlockRequired;
StaticSpinMutex Mutex;
atomic_uint64_t Precedence;
};
void initThread(bool MinimalInit);
// TSD model specific fastpath functions definitions.
#include "scudo_tsd_exclusive.inc"
#include "scudo_tsd_shared.inc"
} // namespace __scudo
#endif // SCUDO_TSD_H_

View File

@@ -0,0 +1,68 @@
//===-- scudo_tsd_exclusive.cpp ---------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// Scudo exclusive TSD implementation.
///
//===----------------------------------------------------------------------===//
#include "scudo_tsd.h"
#if SCUDO_TSD_EXCLUSIVE
namespace __scudo {
static pthread_once_t GlobalInitialized = PTHREAD_ONCE_INIT;
static pthread_key_t PThreadKey;
__attribute__((tls_model("initial-exec")))
THREADLOCAL ThreadState ScudoThreadState = ThreadNotInitialized;
__attribute__((tls_model("initial-exec")))
THREADLOCAL ScudoTSD TSD;
// Fallback TSD for when the thread isn't initialized yet or is torn down. It
// can be shared between multiple threads and as such must be locked.
ScudoTSD FallbackTSD;
static void teardownThread(void *Ptr) {
uptr I = reinterpret_cast<uptr>(Ptr);
// The glibc POSIX thread-local-storage deallocation routine calls user
// provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
// We want to be called last since other destructors might call free and the
// like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
// quarantine and swallowing the cache.
if (I > 1) {
// If pthread_setspecific fails, we will go ahead with the teardown.
if (LIKELY(pthread_setspecific(PThreadKey,
reinterpret_cast<void *>(I - 1)) == 0))
return;
}
TSD.commitBack();
ScudoThreadState = ThreadTornDown;
}
static void initOnce() {
CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread), 0);
initScudo();
FallbackTSD.init(/*Shared=*/true);
}
void initThread(bool MinimalInit) {
CHECK_EQ(pthread_once(&GlobalInitialized, initOnce), 0);
if (UNLIKELY(MinimalInit))
return;
CHECK_EQ(pthread_setspecific(PThreadKey, reinterpret_cast<void *>(
GetPthreadDestructorIterations())), 0);
TSD.init(/*Shared=*/false);
ScudoThreadState = ThreadInitialized;
}
} // namespace __scudo
#endif // SCUDO_TSD_EXCLUSIVE

View File

@@ -0,0 +1,46 @@
//===-- scudo_tsd_exclusive.inc ---------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// Scudo exclusive TSD fastpath functions implementation.
///
//===----------------------------------------------------------------------===//
#ifndef SCUDO_TSD_H_
# error "This file must be included inside scudo_tsd.h."
#endif // SCUDO_TSD_H_
#if SCUDO_TSD_EXCLUSIVE
enum ThreadState : u8 {
ThreadNotInitialized = 0,
ThreadInitialized,
ThreadTornDown,
};
__attribute__((tls_model("initial-exec")))
extern THREADLOCAL ThreadState ScudoThreadState;
__attribute__((tls_model("initial-exec")))
extern THREADLOCAL ScudoTSD TSD;
extern ScudoTSD FallbackTSD;
ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
if (LIKELY(ScudoThreadState != ThreadNotInitialized))
return;
initThread(MinimalInit);
}
ALWAYS_INLINE ScudoTSD *getTSDAndLock() {
if (UNLIKELY(ScudoThreadState != ThreadInitialized)) {
FallbackTSD.lock();
return &FallbackTSD;
}
return &TSD;
}
#endif // SCUDO_TSD_EXCLUSIVE

View File

@@ -0,0 +1,87 @@
//===-- scudo_tsd_shared.cpp ------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// Scudo shared TSD implementation.
///
//===----------------------------------------------------------------------===//
#include "scudo_tsd.h"
#if !SCUDO_TSD_EXCLUSIVE
namespace __scudo {
static pthread_once_t GlobalInitialized = PTHREAD_ONCE_INIT;
pthread_key_t PThreadKey;
static atomic_uint32_t CurrentIndex;
static ScudoTSD *TSDs;
static u32 NumberOfTSDs;
static void initOnce() {
CHECK_EQ(pthread_key_create(&PThreadKey, NULL), 0);
initScudo();
NumberOfTSDs = Min(Max(1U, GetNumberOfCPUsCached()),
static_cast<u32>(SCUDO_SHARED_TSD_POOL_SIZE));
TSDs = reinterpret_cast<ScudoTSD *>(
MmapOrDie(sizeof(ScudoTSD) * NumberOfTSDs, "ScudoTSDs"));
for (u32 i = 0; i < NumberOfTSDs; i++)
TSDs[i].init(/*Shared=*/true);
}
ALWAYS_INLINE void setCurrentTSD(ScudoTSD *TSD) {
#if SANITIZER_ANDROID
*get_android_tls_ptr() = reinterpret_cast<uptr>(TSD);
#else
CHECK_EQ(pthread_setspecific(PThreadKey, reinterpret_cast<void *>(TSD)), 0);
#endif // SANITIZER_ANDROID
}
void initThread(bool MinimalInit) {
pthread_once(&GlobalInitialized, initOnce);
// Initial context assignment is done in a plain round-robin fashion.
u32 Index = atomic_fetch_add(&CurrentIndex, 1, memory_order_relaxed);
setCurrentTSD(&TSDs[Index % NumberOfTSDs]);
}
ScudoTSD *getTSDAndLockSlow() {
ScudoTSD *TSD;
if (NumberOfTSDs > 1) {
// Go through all the contexts and find the first unlocked one.
for (u32 i = 0; i < NumberOfTSDs; i++) {
TSD = &TSDs[i];
if (TSD->tryLock()) {
setCurrentTSD(TSD);
return TSD;
}
}
// No luck, find the one with the lowest Precedence, and slow lock it.
u64 LowestPrecedence = UINT64_MAX;
for (u32 i = 0; i < NumberOfTSDs; i++) {
u64 Precedence = TSDs[i].getPrecedence();
if (Precedence && Precedence < LowestPrecedence) {
TSD = &TSDs[i];
LowestPrecedence = Precedence;
}
}
if (LIKELY(LowestPrecedence != UINT64_MAX)) {
TSD->lock();
setCurrentTSD(TSD);
return TSD;
}
}
// Last resort, stick with the current one.
TSD = getCurrentTSD();
TSD->lock();
return TSD;
}
} // namespace __scudo
#endif // !SCUDO_TSD_EXCLUSIVE

View File

@@ -0,0 +1,48 @@
//===-- scudo_tsd_shared.inc ------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// Scudo shared TSD fastpath functions implementation.
///
//===----------------------------------------------------------------------===//
#ifndef SCUDO_TSD_H_
# error "This file must be included inside scudo_tsd.h."
#endif // SCUDO_TSD_H_
#if !SCUDO_TSD_EXCLUSIVE
extern pthread_key_t PThreadKey;
ALWAYS_INLINE ScudoTSD* getCurrentTSD() {
#if SANITIZER_ANDROID
return reinterpret_cast<ScudoTSD *>(*get_android_tls_ptr());
#else
return reinterpret_cast<ScudoTSD *>(pthread_getspecific(PThreadKey));
#endif // SANITIZER_ANDROID
}
ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
if (LIKELY(getCurrentTSD()))
return;
initThread(MinimalInit);
}
ScudoTSD *getTSDAndLockSlow();
ALWAYS_INLINE ScudoTSD *getTSDAndLock() {
ScudoTSD *TSD = getCurrentTSD();
CHECK(TSD && "No TSD associated with the current thread!");
// Try to lock the currently associated context.
if (TSD->tryLock())
return TSD;
// If it failed, go the slow path.
return getTSDAndLockSlow();
}
#endif // !SCUDO_TSD_EXCLUSIVE

Some files were not shown because too many files have changed in this diff Show More