Imported Upstream version 6.10.0.49

Former-commit-id: 1d6753294b2993e1fbf92de9366bb9544db4189b
This commit is contained in:
Xamarin Public Jenkins (auto-signing)
2020-01-16 16:38:04 +00:00
parent d94e79959b
commit 468663ddbb
48518 changed files with 2789335 additions and 61176 deletions

View File

@@ -0,0 +1 @@
BasedOnStyle: Google

View File

@@ -0,0 +1,64 @@
include_directories(..)
set(LSAN_CFLAGS ${SANITIZER_COMMON_CFLAGS})
append_rtti_flag(OFF LSAN_CFLAGS)
set(LSAN_COMMON_SOURCES
lsan_common.cc
lsan_common_linux.cc
lsan_common_mac.cc)
set(LSAN_SOURCES
lsan.cc
lsan_allocator.cc
lsan_linux.cc
lsan_interceptors.cc
lsan_mac.cc
lsan_malloc_mac.cc
lsan_preinit.cc
lsan_thread.cc)
set(LSAN_SRC_DIR ${CMAKE_CURRENT_SOURCE_DIR})
add_compiler_rt_object_libraries(RTLSanCommon
OS ${SANITIZER_COMMON_SUPPORTED_OS}
ARCHS ${LSAN_COMMON_SUPPORTED_ARCH}
SOURCES ${LSAN_COMMON_SOURCES}
CFLAGS ${LSAN_CFLAGS})
if(COMPILER_RT_HAS_LSAN)
add_compiler_rt_component(lsan)
if(APPLE)
set(LSAN_LINK_LIBS ${SANITIZER_COMMON_LINK_LIBS})
add_weak_symbols("lsan" WEAK_SYMBOL_LINK_FLAGS)
add_weak_symbols("sanitizer_common" WEAK_SYMBOL_LINK_FLAGS)
add_compiler_rt_runtime(clang_rt.lsan
SHARED
OS ${SANITIZER_COMMON_SUPPORTED_OS}
ARCHS ${LSAN_SUPPORTED_ARCH}
SOURCES ${LSAN_SOURCES}
OBJECT_LIBS RTLSanCommon
RTInterception
RTSanitizerCommon
RTSanitizerCommonLibc
CFLAGS ${LSAN_CFLAGS}
LINK_FLAGS ${SANITIZER_COMMON_LINK_FLAGS} ${WEAK_SYMBOL_LINK_FLAGS}
LINK_LIBS ${LSAN_LINK_LIBS}
PARENT_TARGET lsan)
else()
foreach(arch ${LSAN_SUPPORTED_ARCH})
add_compiler_rt_runtime(clang_rt.lsan
STATIC
ARCHS ${arch}
SOURCES ${LSAN_SOURCES}
$<TARGET_OBJECTS:RTInterception.${arch}>
$<TARGET_OBJECTS:RTSanitizerCommon.${arch}>
$<TARGET_OBJECTS:RTSanitizerCommonLibc.${arch}>
$<TARGET_OBJECTS:RTLSanCommon.${arch}>
CFLAGS ${LSAN_CFLAGS}
PARENT_TARGET lsan)
endforeach()
endif()
endif()

View File

@@ -0,0 +1,116 @@
//=-- lsan.cc -------------------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of LeakSanitizer.
// Standalone LSan RTL.
//
//===----------------------------------------------------------------------===//
#include "lsan.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_flag_parser.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "lsan_allocator.h"
#include "lsan_common.h"
#include "lsan_thread.h"
bool lsan_inited;
bool lsan_init_is_running;
namespace __lsan {
///// Interface to the common LSan module. /////
bool WordIsPoisoned(uptr addr) {
return false;
}
} // namespace __lsan
using namespace __lsan; // NOLINT
static void InitializeFlags() {
// Set all the default values.
SetCommonFlagsDefaults();
{
CommonFlags cf;
cf.CopyFrom(*common_flags());
cf.external_symbolizer_path = GetEnv("LSAN_SYMBOLIZER_PATH");
cf.malloc_context_size = 30;
cf.intercept_tls_get_addr = true;
cf.detect_leaks = true;
cf.exitcode = 23;
OverrideCommonFlags(cf);
}
Flags *f = flags();
f->SetDefaults();
FlagParser parser;
RegisterLsanFlags(&parser, f);
RegisterCommonFlags(&parser);
// Override from user-specified string.
const char *lsan_default_options = MaybeCallLsanDefaultOptions();
parser.ParseString(lsan_default_options);
parser.ParseString(GetEnv("LSAN_OPTIONS"));
SetVerbosity(common_flags()->verbosity);
if (Verbosity()) ReportUnrecognizedFlags();
if (common_flags()->help) parser.PrintFlagDescriptions();
}
static void OnStackUnwind(const SignalContext &sig, const void *,
BufferedStackTrace *stack) {
GetStackTrace(stack, kStackTraceMax, sig.pc, sig.bp, sig.context,
common_flags()->fast_unwind_on_fatal);
}
static void LsanOnDeadlySignal(int signo, void *siginfo, void *context) {
HandleDeadlySignal(siginfo, context, GetCurrentThread(), &OnStackUnwind,
nullptr);
}
extern "C" void __lsan_init() {
CHECK(!lsan_init_is_running);
if (lsan_inited)
return;
lsan_init_is_running = true;
SanitizerToolName = "LeakSanitizer";
CacheBinaryName();
AvoidCVE_2016_2143();
InitializeFlags();
InitCommonLsan();
InitializeAllocator();
ReplaceSystemMalloc();
InitTlsSize();
InitializeInterceptors();
InitializeThreadRegistry();
InstallDeadlySignalHandlers(LsanOnDeadlySignal);
u32 tid = ThreadCreate(0, 0, true);
CHECK_EQ(tid, 0);
ThreadStart(tid, GetTid());
SetCurrentThread(tid);
if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit)
Atexit(DoLeakCheck);
InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
lsan_inited = true;
lsan_init_is_running = false;
}
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_print_stack_trace() {
GET_STACK_TRACE_FATAL;
stack.Print();
}

View File

@@ -0,0 +1,67 @@
//=-- lsan.h --------------------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of LeakSanitizer.
// Private header for standalone LSan RTL.
//
//===----------------------------------------------------------------------===//
#include "lsan_thread.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#define GET_STACK_TRACE(max_size, fast) \
__sanitizer::BufferedStackTrace stack; \
GetStackTrace(&stack, max_size, StackTrace::GetCurrentPc(), \
GET_CURRENT_FRAME(), nullptr, fast);
#define GET_STACK_TRACE_FATAL \
GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal)
#define GET_STACK_TRACE_MALLOC \
GET_STACK_TRACE(__sanitizer::common_flags()->malloc_context_size, \
common_flags()->fast_unwind_on_malloc)
#define GET_STACK_TRACE_THREAD GET_STACK_TRACE(kStackTraceMax, true)
namespace __lsan {
void InitializeInterceptors();
void ReplaceSystemMalloc();
#define ENSURE_LSAN_INITED do { \
CHECK(!lsan_init_is_running); \
if (!lsan_inited) \
__lsan_init(); \
} while (0)
// Get the stack trace with the given pc and bp.
// The pc will be in the position 0 of the resulting stack trace.
// The bp may refer to the current frame or to the caller's frame.
ALWAYS_INLINE
void GetStackTrace(__sanitizer::BufferedStackTrace *stack,
__sanitizer::uptr max_depth, __sanitizer::uptr pc,
__sanitizer::uptr bp, void *context, bool fast) {
uptr stack_top = 0, stack_bottom = 0;
ThreadContext *t;
if (fast && (t = CurrentThreadContext())) {
stack_top = t->stack_end();
stack_bottom = t->stack_begin();
}
if (!SANITIZER_MIPS || IsValidFrame(bp, stack_top, stack_bottom)) {
stack->Unwind(max_depth, pc, bp, context, stack_top, stack_bottom, fast);
}
}
} // namespace __lsan
extern bool lsan_inited;
extern bool lsan_init_is_running;
extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __lsan_init();

View File

@@ -0,0 +1,290 @@
//=-- lsan_allocator.cc ---------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of LeakSanitizer.
// See lsan_allocator.h for details.
//
//===----------------------------------------------------------------------===//
#include "lsan_allocator.h"
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_allocator_checks.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_errno.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "lsan_common.h"
extern "C" void *memset(void *ptr, int value, uptr num);
namespace __lsan {
#if defined(__i386__) || defined(__arm__)
static const uptr kMaxAllowedMallocSize = 1UL << 30;
#elif defined(__mips64) || defined(__aarch64__)
static const uptr kMaxAllowedMallocSize = 4UL << 30;
#else
static const uptr kMaxAllowedMallocSize = 8UL << 30;
#endif
typedef LargeMmapAllocator<> SecondaryAllocator;
typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
SecondaryAllocator> Allocator;
static Allocator allocator;
void InitializeAllocator() {
SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
allocator.InitLinkerInitialized(
common_flags()->allocator_release_to_os_interval_ms);
}
void AllocatorThreadFinish() {
allocator.SwallowCache(GetAllocatorCache());
}
static ChunkMetadata *Metadata(const void *p) {
return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p));
}
static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
if (!p) return;
ChunkMetadata *m = Metadata(p);
CHECK(m);
m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked;
m->stack_trace_id = StackDepotPut(stack);
m->requested_size = size;
atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed);
}
static void RegisterDeallocation(void *p) {
if (!p) return;
ChunkMetadata *m = Metadata(p);
CHECK(m);
atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed);
}
void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
bool cleared) {
if (size == 0)
size = 1;
if (size > kMaxAllowedMallocSize) {
Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size);
return Allocator::FailureHandler::OnBadRequest();
}
void *p = allocator.Allocate(GetAllocatorCache(), size, alignment);
// Do not rely on the allocator to clear the memory (it's slow).
if (cleared && allocator.FromPrimary(p))
memset(p, 0, size);
RegisterAllocation(stack, p, size);
if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(p, size);
RunMallocHooks(p, size);
return p;
}
static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) {
if (UNLIKELY(CheckForCallocOverflow(size, nmemb)))
return Allocator::FailureHandler::OnBadRequest();
size *= nmemb;
return Allocate(stack, size, 1, true);
}
void Deallocate(void *p) {
if (&__sanitizer_free_hook) __sanitizer_free_hook(p);
RunFreeHooks(p);
RegisterDeallocation(p);
allocator.Deallocate(GetAllocatorCache(), p);
}
void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
uptr alignment) {
RegisterDeallocation(p);
if (new_size > kMaxAllowedMallocSize) {
Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size);
allocator.Deallocate(GetAllocatorCache(), p);
return Allocator::FailureHandler::OnBadRequest();
}
p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment);
RegisterAllocation(stack, p, new_size);
return p;
}
void GetAllocatorCacheRange(uptr *begin, uptr *end) {
*begin = (uptr)GetAllocatorCache();
*end = *begin + sizeof(AllocatorCache);
}
uptr GetMallocUsableSize(const void *p) {
ChunkMetadata *m = Metadata(p);
if (!m) return 0;
return m->requested_size;
}
void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) {
if (UNLIKELY(!IsPowerOfTwo(alignment))) {
errno = errno_EINVAL;
return Allocator::FailureHandler::OnBadRequest();
}
return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));
}
void *lsan_malloc(uptr size, const StackTrace &stack) {
return SetErrnoOnNull(Allocate(stack, size, 1, kAlwaysClearMemory));
}
void lsan_free(void *p) {
Deallocate(p);
}
void *lsan_realloc(void *p, uptr size, const StackTrace &stack) {
return SetErrnoOnNull(Reallocate(stack, p, size, 1));
}
void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack) {
return SetErrnoOnNull(Calloc(nmemb, size, stack));
}
void *lsan_valloc(uptr size, const StackTrace &stack) {
return SetErrnoOnNull(
Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory));
}
uptr lsan_mz_size(const void *p) {
return GetMallocUsableSize(p);
}
///// Interface to the common LSan module. /////
void LockAllocator() {
allocator.ForceLock();
}
void UnlockAllocator() {
allocator.ForceUnlock();
}
void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
*begin = (uptr)&allocator;
*end = *begin + sizeof(allocator);
}
uptr PointsIntoChunk(void* p) {
uptr addr = reinterpret_cast<uptr>(p);
uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p));
if (!chunk) return 0;
// LargeMmapAllocator considers pointers to the meta-region of a chunk to be
// valid, but we don't want that.
if (addr < chunk) return 0;
ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk));
CHECK(m);
if (!m->allocated)
return 0;
if (addr < chunk + m->requested_size)
return chunk;
if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr))
return chunk;
return 0;
}
uptr GetUserBegin(uptr chunk) {
return chunk;
}
LsanMetadata::LsanMetadata(uptr chunk) {
metadata_ = Metadata(reinterpret_cast<void *>(chunk));
CHECK(metadata_);
}
bool LsanMetadata::allocated() const {
return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated;
}
ChunkTag LsanMetadata::tag() const {
return reinterpret_cast<ChunkMetadata *>(metadata_)->tag;
}
void LsanMetadata::set_tag(ChunkTag value) {
reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value;
}
uptr LsanMetadata::requested_size() const {
return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size;
}
u32 LsanMetadata::stack_trace_id() const {
return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id;
}
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
allocator.ForEachChunk(callback, arg);
}
IgnoreObjectResult IgnoreObjectLocked(const void *p) {
void *chunk = allocator.GetBlockBegin(p);
if (!chunk || p < chunk) return kIgnoreObjectInvalid;
ChunkMetadata *m = Metadata(chunk);
CHECK(m);
if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) {
if (m->tag == kIgnored)
return kIgnoreObjectAlreadyIgnored;
m->tag = kIgnored;
return kIgnoreObjectSuccess;
} else {
return kIgnoreObjectInvalid;
}
}
} // namespace __lsan
using namespace __lsan;
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
uptr __sanitizer_get_current_allocated_bytes() {
uptr stats[AllocatorStatCount];
allocator.GetStats(stats);
return stats[AllocatorStatAllocated];
}
SANITIZER_INTERFACE_ATTRIBUTE
uptr __sanitizer_get_heap_size() {
uptr stats[AllocatorStatCount];
allocator.GetStats(stats);
return stats[AllocatorStatMapped];
}
SANITIZER_INTERFACE_ATTRIBUTE
uptr __sanitizer_get_free_bytes() { return 0; }
SANITIZER_INTERFACE_ATTRIBUTE
uptr __sanitizer_get_unmapped_bytes() { return 0; }
SANITIZER_INTERFACE_ATTRIBUTE
uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
SANITIZER_INTERFACE_ATTRIBUTE
int __sanitizer_get_ownership(const void *p) { return Metadata(p) != nullptr; }
SANITIZER_INTERFACE_ATTRIBUTE
uptr __sanitizer_get_allocated_size(const void *p) {
return GetMallocUsableSize(p);
}
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
// Provide default (no-op) implementation of malloc hooks.
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_malloc_hook(void *ptr, uptr size) {
(void)ptr;
(void)size;
}
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_free_hook(void *ptr) {
(void)ptr;
}
#endif
} // extern "C"

View File

@@ -0,0 +1,103 @@
//=-- lsan_allocator.h ----------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of LeakSanitizer.
// Allocator for standalone LSan.
//
//===----------------------------------------------------------------------===//
#ifndef LSAN_ALLOCATOR_H
#define LSAN_ALLOCATOR_H
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "lsan_common.h"
namespace __lsan {
void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
bool cleared);
void Deallocate(void *p);
void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
uptr alignment);
uptr GetMallocUsableSize(const void *p);
template<typename Callable>
void ForEachChunk(const Callable &callback);
void GetAllocatorCacheRange(uptr *begin, uptr *end);
void AllocatorThreadFinish();
void InitializeAllocator();
const bool kAlwaysClearMemory = true;
struct ChunkMetadata {
u8 allocated : 8; // Must be first.
ChunkTag tag : 2;
#if SANITIZER_WORDSIZE == 64
uptr requested_size : 54;
#else
uptr requested_size : 32;
uptr padding : 22;
#endif
u32 stack_trace_id;
};
#if defined(__mips64) || defined(__aarch64__) || defined(__i386__) || \
defined(__arm__)
static const uptr kRegionSizeLog = 20;
static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
struct AP32 {
static const uptr kSpaceBeg = 0;
static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
static const uptr kMetadataSize = sizeof(ChunkMetadata);
typedef __sanitizer::CompactSizeClassMap SizeClassMap;
static const uptr kRegionSizeLog = __lsan::kRegionSizeLog;
typedef __lsan::ByteMap ByteMap;
typedef NoOpMapUnmapCallback MapUnmapCallback;
static const uptr kFlags = 0;
};
typedef SizeClassAllocator32<AP32> PrimaryAllocator;
#elif defined(__x86_64__) || defined(__powerpc64__)
# if defined(__powerpc64__)
const uptr kAllocatorSpace = 0xa0000000000ULL;
const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
# else
const uptr kAllocatorSpace = 0x600000000000ULL;
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
# endif
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
static const uptr kSpaceBeg = kAllocatorSpace;
static const uptr kSpaceSize = kAllocatorSize;
static const uptr kMetadataSize = sizeof(ChunkMetadata);
typedef DefaultSizeClassMap SizeClassMap;
typedef NoOpMapUnmapCallback MapUnmapCallback;
static const uptr kFlags = 0;
};
typedef SizeClassAllocator64<AP64> PrimaryAllocator;
#endif
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
AllocatorCache *GetAllocatorCache();
void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack);
void *lsan_malloc(uptr size, const StackTrace &stack);
void lsan_free(void *p);
void *lsan_realloc(void *p, uptr size, const StackTrace &stack);
void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack);
void *lsan_valloc(uptr size, const StackTrace &stack);
uptr lsan_mz_size(const void *p);
} // namespace __lsan
#endif // LSAN_ALLOCATOR_H

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,264 @@
//=-- lsan_common.h -------------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of LeakSanitizer.
// Private LSan header.
//
//===----------------------------------------------------------------------===//
#ifndef LSAN_COMMON_H
#define LSAN_COMMON_H
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_platform.h"
#include "sanitizer_common/sanitizer_stoptheworld.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
// LeakSanitizer relies on some Glibc's internals (e.g. TLS machinery) thus
// supported for Linux only. Also, LSan doesn't like 32 bit architectures
// because of "small" (4 bytes) pointer size that leads to high false negative
// ratio on large leaks. But we still want to have it for some 32 bit arches
// (e.g. x86), see https://github.com/google/sanitizers/issues/403.
// To enable LeakSanitizer on a new architecture, one needs to implement the
// internal_clone function as well as (probably) adjust the TLS machinery for
// the new architecture inside the sanitizer library.
#if (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC) && \
(SANITIZER_WORDSIZE == 64) && \
(defined(__x86_64__) || defined(__mips64) || defined(__aarch64__) || \
defined(__powerpc64__))
#define CAN_SANITIZE_LEAKS 1
#elif defined(__i386__) && \
(SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC)
#define CAN_SANITIZE_LEAKS 1
#elif defined(__arm__) && \
SANITIZER_LINUX && !SANITIZER_ANDROID
#define CAN_SANITIZE_LEAKS 1
#else
#define CAN_SANITIZE_LEAKS 0
#endif
namespace __sanitizer {
class FlagParser;
struct DTLS;
}
namespace __lsan {
// Chunk tags.
enum ChunkTag {
kDirectlyLeaked = 0, // default
kIndirectlyLeaked = 1,
kReachable = 2,
kIgnored = 3
};
const u32 kInvalidTid = (u32) -1;
struct Flags {
#define LSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
#include "lsan_flags.inc"
#undef LSAN_FLAG
void SetDefaults();
uptr pointer_alignment() const {
return use_unaligned ? 1 : sizeof(uptr);
}
};
extern Flags lsan_flags;
inline Flags *flags() { return &lsan_flags; }
void RegisterLsanFlags(FlagParser *parser, Flags *f);
struct Leak {
u32 id;
uptr hit_count;
uptr total_size;
u32 stack_trace_id;
bool is_directly_leaked;
bool is_suppressed;
};
struct LeakedObject {
u32 leak_id;
uptr addr;
uptr size;
};
// Aggregates leaks by stack trace prefix.
class LeakReport {
public:
LeakReport() : next_id_(0), leaks_(1), leaked_objects_(1) {}
void AddLeakedChunk(uptr chunk, u32 stack_trace_id, uptr leaked_size,
ChunkTag tag);
void ReportTopLeaks(uptr max_leaks);
void PrintSummary();
void ApplySuppressions();
uptr UnsuppressedLeakCount();
private:
void PrintReportForLeak(uptr index);
void PrintLeakedObjectsForLeak(uptr index);
u32 next_id_;
InternalMmapVector<Leak> leaks_;
InternalMmapVector<LeakedObject> leaked_objects_;
};
typedef InternalMmapVector<uptr> Frontier;
// Platform-specific functions.
void InitializePlatformSpecificModules();
void ProcessGlobalRegions(Frontier *frontier);
void ProcessPlatformSpecificAllocations(Frontier *frontier);
struct RootRegion {
uptr begin;
uptr size;
};
InternalMmapVector<RootRegion> const *GetRootRegions();
void ScanRootRegion(Frontier *frontier, RootRegion const &region,
uptr region_begin, uptr region_end, bool is_readable);
// Run stoptheworld while holding any platform-specific locks.
void DoStopTheWorld(StopTheWorldCallback callback, void* argument);
void ScanRangeForPointers(uptr begin, uptr end,
Frontier *frontier,
const char *region_type, ChunkTag tag);
void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier);
enum IgnoreObjectResult {
kIgnoreObjectSuccess,
kIgnoreObjectAlreadyIgnored,
kIgnoreObjectInvalid
};
// Functions called from the parent tool.
const char *MaybeCallLsanDefaultOptions();
void InitCommonLsan();
void DoLeakCheck();
void DoRecoverableLeakCheckVoid();
void DisableCounterUnderflow();
bool DisabledInThisThread();
// Used to implement __lsan::ScopedDisabler.
void DisableInThisThread();
void EnableInThisThread();
// Can be used to ignore memory allocated by an intercepted
// function.
struct ScopedInterceptorDisabler {
ScopedInterceptorDisabler() { DisableInThisThread(); }
~ScopedInterceptorDisabler() { EnableInThisThread(); }
};
// According to Itanium C++ ABI array cookie is a one word containing
// size of allocated array.
static inline bool IsItaniumABIArrayCookie(uptr chunk_beg, uptr chunk_size,
uptr addr) {
return chunk_size == sizeof(uptr) && chunk_beg + chunk_size == addr &&
*reinterpret_cast<uptr *>(chunk_beg) == 0;
}
// According to ARM C++ ABI array cookie consists of two words:
// struct array_cookie {
// std::size_t element_size; // element_size != 0
// std::size_t element_count;
// };
static inline bool IsARMABIArrayCookie(uptr chunk_beg, uptr chunk_size,
uptr addr) {
return chunk_size == 2 * sizeof(uptr) && chunk_beg + chunk_size == addr &&
*reinterpret_cast<uptr *>(chunk_beg + sizeof(uptr)) == 0;
}
// Special case for "new T[0]" where T is a type with DTOR.
// new T[0] will allocate a cookie (one or two words) for the array size (0)
// and store a pointer to the end of allocated chunk. The actual cookie layout
// varies between platforms according to their C++ ABI implementation.
inline bool IsSpecialCaseOfOperatorNew0(uptr chunk_beg, uptr chunk_size,
uptr addr) {
#if defined(__arm__)
return IsARMABIArrayCookie(chunk_beg, chunk_size, addr);
#else
return IsItaniumABIArrayCookie(chunk_beg, chunk_size, addr);
#endif
}
// The following must be implemented in the parent tool.
void ForEachChunk(ForEachChunkCallback callback, void *arg);
// Returns the address range occupied by the global allocator object.
void GetAllocatorGlobalRange(uptr *begin, uptr *end);
// Wrappers for allocator's ForceLock()/ForceUnlock().
void LockAllocator();
void UnlockAllocator();
// Returns true if [addr, addr + sizeof(void *)) is poisoned.
bool WordIsPoisoned(uptr addr);
// Wrappers for ThreadRegistry access.
void LockThreadRegistry();
void UnlockThreadRegistry();
bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
uptr *cache_end, DTLS **dtls);
void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
void *arg);
// If called from the main thread, updates the main thread's TID in the thread
// registry. We need this to handle processes that fork() without a subsequent
// exec(), which invalidates the recorded TID. To update it, we must call
// gettid() from the main thread. Our solution is to call this function before
// leak checking and also before every call to pthread_create() (to handle cases
// where leak checking is initiated from a non-main thread).
void EnsureMainThreadIDIsCorrect();
// If p points into a chunk that has been allocated to the user, returns its
// user-visible address. Otherwise, returns 0.
uptr PointsIntoChunk(void *p);
// Returns address of user-visible chunk contained in this allocator chunk.
uptr GetUserBegin(uptr chunk);
// Helper for __lsan_ignore_object().
IgnoreObjectResult IgnoreObjectLocked(const void *p);
// Return the linker module, if valid for the platform.
LoadedModule *GetLinker();
// Return true if LSan has finished leak checking and reported leaks.
bool HasReportedLeaks();
// Run platform-specific leak handlers.
void HandleLeaks();
// Wrapper for chunk metadata operations.
class LsanMetadata {
public:
// Constructor accepts address of user-visible chunk.
explicit LsanMetadata(uptr chunk);
bool allocated() const;
ChunkTag tag() const;
void set_tag(ChunkTag value);
uptr requested_size() const;
u32 stack_trace_id() const;
private:
void *metadata_;
};
} // namespace __lsan
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
const char *__lsan_default_options();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
int __lsan_is_turned_off();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
const char *__lsan_default_suppressions();
} // extern "C"
#endif // LSAN_COMMON_H

View File

@@ -0,0 +1,140 @@
//=-- lsan_common_linux.cc ------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of LeakSanitizer.
// Implementation of common leak checking functionality. Linux-specific code.
//
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
#include "lsan_common.h"
#if CAN_SANITIZE_LEAKS && SANITIZER_LINUX
#include <link.h>
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_getauxval.h"
#include "sanitizer_common/sanitizer_linux.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
namespace __lsan {
static const char kLinkerName[] = "ld";
static char linker_placeholder[sizeof(LoadedModule)] ALIGNED(64);
static LoadedModule *linker = nullptr;
static bool IsLinker(const LoadedModule& module) {
#if SANITIZER_USE_GETAUXVAL
return module.base_address() == getauxval(AT_BASE);
#else
return LibraryNameIs(module.full_name(), kLinkerName);
#endif // SANITIZER_USE_GETAUXVAL
}
__attribute__((tls_model("initial-exec")))
THREADLOCAL int disable_counter;
bool DisabledInThisThread() { return disable_counter > 0; }
void DisableInThisThread() { disable_counter++; }
void EnableInThisThread() {
if (disable_counter == 0) {
DisableCounterUnderflow();
}
disable_counter--;
}
void InitializePlatformSpecificModules() {
ListOfModules modules;
modules.init();
for (LoadedModule &module : modules) {
if (!IsLinker(module))
continue;
if (linker == nullptr) {
linker = reinterpret_cast<LoadedModule *>(linker_placeholder);
*linker = module;
module = LoadedModule();
} else {
VReport(1, "LeakSanitizer: Multiple modules match \"%s\". "
"TLS and other allocations originating from linker might be "
"falsely reported as leaks.\n", kLinkerName);
linker->clear();
linker = nullptr;
return;
}
}
if (linker == nullptr) {
VReport(1, "LeakSanitizer: Dynamic linker not found. TLS and other "
"allocations originating from linker might be falsely reported "
"as leaks.\n");
}
}
static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
void *data) {
Frontier *frontier = reinterpret_cast<Frontier *>(data);
for (uptr j = 0; j < info->dlpi_phnum; j++) {
const ElfW(Phdr) *phdr = &(info->dlpi_phdr[j]);
// We're looking for .data and .bss sections, which reside in writeable,
// loadable segments.
if (!(phdr->p_flags & PF_W) || (phdr->p_type != PT_LOAD) ||
(phdr->p_memsz == 0))
continue;
uptr begin = info->dlpi_addr + phdr->p_vaddr;
uptr end = begin + phdr->p_memsz;
ScanGlobalRange(begin, end, frontier);
}
return 0;
}
// Scans global variables for heap pointers.
void ProcessGlobalRegions(Frontier *frontier) {
if (!flags()->use_globals) return;
dl_iterate_phdr(ProcessGlobalRegionsCallback, frontier);
}
LoadedModule *GetLinker() { return linker; }
void ProcessPlatformSpecificAllocations(Frontier *frontier) {}
struct DoStopTheWorldParam {
StopTheWorldCallback callback;
void *argument;
};
// While calling Die() here is undefined behavior and can potentially
// cause race conditions, it isn't possible to intercept exit on linux,
// so we have no choice but to call Die() from the atexit handler.
void HandleLeaks() {
if (common_flags()->exitcode) Die();
}
static int DoStopTheWorldCallback(struct dl_phdr_info *info, size_t size,
void *data) {
DoStopTheWorldParam *param = reinterpret_cast<DoStopTheWorldParam *>(data);
StopTheWorld(param->callback, param->argument);
return 1;
}
// LSan calls dl_iterate_phdr() from the tracer task. This may deadlock: if one
// of the threads is frozen while holding the libdl lock, the tracer will hang
// in dl_iterate_phdr() forever.
// Luckily, (a) the lock is reentrant and (b) libc can't distinguish between the
// tracer task and the thread that spawned it. Thus, if we run the tracer task
// while holding the libdl lock in the parent thread, we can safely reenter it
// in the tracer. The solution is to run stoptheworld from a dl_iterate_phdr()
// callback in the parent thread.
void DoStopTheWorld(StopTheWorldCallback callback, void *argument) {
DoStopTheWorldParam param = {callback, argument};
dl_iterate_phdr(DoStopTheWorldCallback, &param);
}
} // namespace __lsan
#endif // CAN_SANITIZE_LEAKS && SANITIZER_LINUX

View File

@@ -0,0 +1,199 @@
//=-- lsan_common_mac.cc --------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of LeakSanitizer.
// Implementation of common leak checking functionality. Darwin-specific code.
//
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
#include "lsan_common.h"
#if CAN_SANITIZE_LEAKS && SANITIZER_MAC
#include "sanitizer_common/sanitizer_allocator_internal.h"
#include "lsan_allocator.h"
#include <pthread.h>
#include <mach/mach.h>
namespace __lsan {
typedef struct {
int disable_counter;
u32 current_thread_id;
AllocatorCache cache;
} thread_local_data_t;
static pthread_key_t key;
static pthread_once_t key_once = PTHREAD_ONCE_INIT;
// The main thread destructor requires the current thread id,
// so we can't destroy it until it's been used and reset to invalid tid
void restore_tid_data(void *ptr) {
thread_local_data_t *data = (thread_local_data_t *)ptr;
if (data->current_thread_id != kInvalidTid)
pthread_setspecific(key, data);
}
static void make_tls_key() {
CHECK_EQ(pthread_key_create(&key, restore_tid_data), 0);
}
static thread_local_data_t *get_tls_val(bool alloc) {
pthread_once(&key_once, make_tls_key);
thread_local_data_t *ptr = (thread_local_data_t *)pthread_getspecific(key);
if (ptr == NULL && alloc) {
ptr = (thread_local_data_t *)InternalAlloc(sizeof(*ptr));
ptr->disable_counter = 0;
ptr->current_thread_id = kInvalidTid;
ptr->cache = AllocatorCache();
pthread_setspecific(key, ptr);
}
return ptr;
}
bool DisabledInThisThread() {
thread_local_data_t *data = get_tls_val(false);
return data ? data->disable_counter > 0 : false;
}
void DisableInThisThread() { ++get_tls_val(true)->disable_counter; }
void EnableInThisThread() {
int *disable_counter = &get_tls_val(true)->disable_counter;
if (*disable_counter == 0) {
DisableCounterUnderflow();
}
--*disable_counter;
}
u32 GetCurrentThread() {
thread_local_data_t *data = get_tls_val(false);
return data ? data->current_thread_id : kInvalidTid;
}
void SetCurrentThread(u32 tid) { get_tls_val(true)->current_thread_id = tid; }
AllocatorCache *GetAllocatorCache() { return &get_tls_val(true)->cache; }
LoadedModule *GetLinker() { return nullptr; }
// Required on Linux for initialization of TLS behavior, but should not be
// required on Darwin.
void InitializePlatformSpecificModules() {}
// Sections which can't contain contain global pointers. This list errs on the
// side of caution to avoid false positives, at the expense of performance.
//
// Other potentially safe sections include:
// __all_image_info, __crash_info, __const, __got, __interpose, __objc_msg_break
//
// Sections which definitely cannot be included here are:
// __objc_data, __objc_const, __data, __bss, __common, __thread_data,
// __thread_bss, __thread_vars, __objc_opt_rw, __objc_opt_ptrs
static const char *kSkippedSecNames[] = {
"__cfstring", "__la_symbol_ptr", "__mod_init_func",
"__mod_term_func", "__nl_symbol_ptr", "__objc_classlist",
"__objc_classrefs", "__objc_imageinfo", "__objc_nlclslist",
"__objc_protolist", "__objc_selrefs", "__objc_superrefs"};
// Scans global variables for heap pointers.
void ProcessGlobalRegions(Frontier *frontier) {
for (auto name : kSkippedSecNames) CHECK(ARRAY_SIZE(name) < kMaxSegName);
MemoryMappingLayout memory_mapping(false);
InternalMmapVector<LoadedModule> modules(/*initial_capacity*/ 128);
memory_mapping.DumpListOfModules(&modules);
for (uptr i = 0; i < modules.size(); ++i) {
// Even when global scanning is disabled, we still need to scan
// system libraries for stashed pointers
if (!flags()->use_globals && modules[i].instrumented()) continue;
for (const __sanitizer::LoadedModule::AddressRange &range :
modules[i].ranges()) {
// Sections storing global variables are writable and non-executable
if (range.executable || !range.writable) continue;
for (auto name : kSkippedSecNames) {
if (!internal_strcmp(range.name, name)) continue;
}
ScanGlobalRange(range.beg, range.end, frontier);
}
}
}
void ProcessPlatformSpecificAllocations(Frontier *frontier) {
mach_port_name_t port;
if (task_for_pid(mach_task_self(), internal_getpid(), &port)
!= KERN_SUCCESS) {
return;
}
unsigned depth = 1;
vm_size_t size = 0;
vm_address_t address = 0;
kern_return_t err = KERN_SUCCESS;
mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
InternalMmapVector<RootRegion> const *root_regions = GetRootRegions();
while (err == KERN_SUCCESS) {
struct vm_region_submap_info_64 info;
err = vm_region_recurse_64(port, &address, &size, &depth,
(vm_region_info_t)&info, &count);
uptr end_address = address + size;
// libxpc stashes some pointers in the Kernel Alloc Once page,
// make sure not to report those as leaks.
if (info.user_tag == VM_MEMORY_OS_ALLOC_ONCE) {
ScanRangeForPointers(address, end_address, frontier, "GLOBAL",
kReachable);
// Recursing over the full memory map is very slow, break out
// early if we don't need the full iteration.
if (!flags()->use_root_regions || !root_regions->size())
break;
}
// This additional root region scan is required on Darwin in order to
// detect root regions contained within mmap'd memory regions, because
// the Darwin implementation of sanitizer_procmaps traverses images
// as loaded by dyld, and not the complete set of all memory regions.
//
// TODO(fjricci) - remove this once sanitizer_procmaps_mac has the same
// behavior as sanitizer_procmaps_linux and traverses all memory regions
if (flags()->use_root_regions) {
for (uptr i = 0; i < root_regions->size(); i++) {
ScanRootRegion(frontier, (*root_regions)[i], address, end_address,
info.protection & kProtectionRead);
}
}
address = end_address;
}
}
// On darwin, we can intercept _exit gracefully, and return a failing exit code
// if required at that point. Calling Die() here is undefined behavior and
// causes rare race conditions.
void HandleLeaks() {}
void DoStopTheWorld(StopTheWorldCallback callback, void *argument) {
StopTheWorld(callback, argument);
}
} // namespace __lsan
#endif // CAN_SANITIZE_LEAKS && SANITIZER_MAC

View File

@@ -0,0 +1,47 @@
//===-- lsan_flags.inc ------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// LSan runtime flags.
//
//===----------------------------------------------------------------------===//
#ifndef LSAN_FLAG
# error "Define LSAN_FLAG prior to including this file!"
#endif
// LSAN_FLAG(Type, Name, DefaultValue, Description)
// See COMMON_FLAG in sanitizer_flags.inc for more details.
LSAN_FLAG(bool, report_objects, false,
"Print addresses of leaked objects after main leak report.")
LSAN_FLAG(
int, resolution, 0,
"Aggregate two objects into one leak if this many stack frames match. If "
"zero, the entire stack trace must match.")
LSAN_FLAG(int, max_leaks, 0, "The number of leaks reported.")
// Flags controlling the root set of reachable memory.
LSAN_FLAG(bool, use_globals, true,
"Root set: include global variables (.data and .bss)")
LSAN_FLAG(bool, use_stacks, true, "Root set: include thread stacks")
LSAN_FLAG(bool, use_registers, true, "Root set: include thread registers")
LSAN_FLAG(bool, use_tls, true,
"Root set: include TLS and thread-specific storage")
LSAN_FLAG(bool, use_root_regions, true,
"Root set: include regions added via __lsan_register_root_region().")
LSAN_FLAG(bool, use_ld_allocations, true,
"Root set: mark as reachable all allocations made from dynamic "
"linker. This was the old way to handle dynamic TLS, and will "
"be removed soon. Do not use this flag.")
LSAN_FLAG(bool, use_unaligned, false, "Consider unaligned pointers valid.")
LSAN_FLAG(bool, use_poisoned, false,
"Consider pointers found in poisoned memory to be valid.")
LSAN_FLAG(bool, log_pointers, false, "Debug logging")
LSAN_FLAG(bool, log_threads, false, "Debug logging")
LSAN_FLAG(const char *, suppressions, "", "Suppressions file name.")

View File

@@ -0,0 +1,455 @@
//=-- lsan_interceptors.cc ------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of LeakSanitizer.
// Interceptors for standalone LSan.
//
//===----------------------------------------------------------------------===//
#include "interception/interception.h"
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_linux.h"
#include "sanitizer_common/sanitizer_platform_interceptors.h"
#include "sanitizer_common/sanitizer_platform_limits_netbsd.h"
#include "sanitizer_common/sanitizer_platform_limits_posix.h"
#include "sanitizer_common/sanitizer_posix.h"
#include "sanitizer_common/sanitizer_tls_get_addr.h"
#include "lsan.h"
#include "lsan_allocator.h"
#include "lsan_common.h"
#include "lsan_thread.h"
#include <stddef.h>
using namespace __lsan;
extern "C" {
int pthread_attr_init(void *attr);
int pthread_attr_destroy(void *attr);
int pthread_attr_getdetachstate(void *attr, int *v);
int pthread_key_create(unsigned *key, void (*destructor)(void* v));
int pthread_setspecific(unsigned key, const void *v);
}
///// Malloc/free interceptors. /////
namespace std {
struct nothrow_t;
enum class align_val_t: size_t;
}
#if !SANITIZER_MAC
INTERCEPTOR(void*, malloc, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
return lsan_malloc(size, stack);
}
INTERCEPTOR(void, free, void *p) {
ENSURE_LSAN_INITED;
lsan_free(p);
}
INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
if (lsan_init_is_running) {
// Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
const uptr kCallocPoolSize = 1024;
static uptr calloc_memory_for_dlsym[kCallocPoolSize];
static uptr allocated;
uptr size_in_words = ((nmemb * size) + kWordSize - 1) / kWordSize;
void *mem = (void*)&calloc_memory_for_dlsym[allocated];
allocated += size_in_words;
CHECK(allocated < kCallocPoolSize);
return mem;
}
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
return lsan_calloc(nmemb, size, stack);
}
INTERCEPTOR(void*, realloc, void *q, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
return lsan_realloc(q, size, stack);
}
INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
*memptr = lsan_memalign(alignment, size, stack);
// FIXME: Return ENOMEM if user requested more than max alloc size.
return 0;
}
INTERCEPTOR(void*, valloc, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
return lsan_valloc(size, stack);
}
#endif
#if SANITIZER_INTERCEPT_MEMALIGN
INTERCEPTOR(void*, memalign, uptr alignment, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
return lsan_memalign(alignment, size, stack);
}
#define LSAN_MAYBE_INTERCEPT_MEMALIGN INTERCEPT_FUNCTION(memalign)
INTERCEPTOR(void *, __libc_memalign, uptr alignment, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
void *res = lsan_memalign(alignment, size, stack);
DTLS_on_libc_memalign(res, size);
return res;
}
#define LSAN_MAYBE_INTERCEPT___LIBC_MEMALIGN INTERCEPT_FUNCTION(__libc_memalign)
#else
#define LSAN_MAYBE_INTERCEPT_MEMALIGN
#define LSAN_MAYBE_INTERCEPT___LIBC_MEMALIGN
#endif // SANITIZER_INTERCEPT_MEMALIGN
#if SANITIZER_INTERCEPT_ALIGNED_ALLOC
INTERCEPTOR(void*, aligned_alloc, uptr alignment, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
return lsan_memalign(alignment, size, stack);
}
#define LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC INTERCEPT_FUNCTION(aligned_alloc)
#else
#define LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC
#endif
#if SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE
INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
ENSURE_LSAN_INITED;
return GetMallocUsableSize(ptr);
}
#define LSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE \
INTERCEPT_FUNCTION(malloc_usable_size)
#else
#define LSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE
#endif
#if SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO
struct fake_mallinfo {
int x[10];
};
INTERCEPTOR(struct fake_mallinfo, mallinfo, void) {
struct fake_mallinfo res;
internal_memset(&res, 0, sizeof(res));
return res;
}
#define LSAN_MAYBE_INTERCEPT_MALLINFO INTERCEPT_FUNCTION(mallinfo)
INTERCEPTOR(int, mallopt, int cmd, int value) {
return -1;
}
#define LSAN_MAYBE_INTERCEPT_MALLOPT INTERCEPT_FUNCTION(mallopt)
#else
#define LSAN_MAYBE_INTERCEPT_MALLINFO
#define LSAN_MAYBE_INTERCEPT_MALLOPT
#endif // SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO
#if SANITIZER_INTERCEPT_PVALLOC
INTERCEPTOR(void*, pvalloc, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
uptr PageSize = GetPageSizeCached();
size = RoundUpTo(size, PageSize);
if (size == 0) {
// pvalloc(0) should allocate one page.
size = PageSize;
}
return Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory);
}
#define LSAN_MAYBE_INTERCEPT_PVALLOC INTERCEPT_FUNCTION(pvalloc)
#else
#define LSAN_MAYBE_INTERCEPT_PVALLOC
#endif // SANITIZER_INTERCEPT_PVALLOC
#if SANITIZER_INTERCEPT_CFREE
INTERCEPTOR(void, cfree, void *p) ALIAS(WRAPPER_NAME(free));
#define LSAN_MAYBE_INTERCEPT_CFREE INTERCEPT_FUNCTION(cfree)
#else
#define LSAN_MAYBE_INTERCEPT_CFREE
#endif // SANITIZER_INTERCEPT_CFREE
#if SANITIZER_INTERCEPT_MCHECK_MPROBE
INTERCEPTOR(int, mcheck, void (*abortfunc)(int mstatus)) {
return 0;
}
INTERCEPTOR(int, mcheck_pedantic, void (*abortfunc)(int mstatus)) {
return 0;
}
INTERCEPTOR(int, mprobe, void *ptr) {
return 0;
}
#endif // SANITIZER_INTERCEPT_MCHECK_MPROBE
// TODO(alekseys): throw std::bad_alloc instead of dying on OOM.
#define OPERATOR_NEW_BODY(nothrow) \
ENSURE_LSAN_INITED; \
GET_STACK_TRACE_MALLOC; \
void *res = lsan_malloc(size, stack); \
if (!nothrow && UNLIKELY(!res)) DieOnFailure::OnOOM(); \
return res;
#define OPERATOR_NEW_BODY_ALIGN(nothrow) \
ENSURE_LSAN_INITED; \
GET_STACK_TRACE_MALLOC; \
void *res = lsan_memalign((uptr)align, size, stack); \
if (!nothrow && UNLIKELY(!res)) DieOnFailure::OnOOM(); \
return res;
#define OPERATOR_DELETE_BODY \
ENSURE_LSAN_INITED; \
lsan_free(ptr);
// On OS X it's not enough to just provide our own 'operator new' and
// 'operator delete' implementations, because they're going to be in the runtime
// dylib, and the main executable will depend on both the runtime dylib and
// libstdc++, each of has its implementation of new and delete.
// To make sure that C++ allocation/deallocation operators are overridden on
// OS X we need to intercept them using their mangled names.
#if !SANITIZER_MAC
INTERCEPTOR_ATTRIBUTE
void *operator new(size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); }
INTERCEPTOR_ATTRIBUTE
void *operator new[](size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); }
INTERCEPTOR_ATTRIBUTE
void *operator new(size_t size, std::nothrow_t const&)
{ OPERATOR_NEW_BODY(true /*nothrow*/); }
INTERCEPTOR_ATTRIBUTE
void *operator new[](size_t size, std::nothrow_t const&)
{ OPERATOR_NEW_BODY(true /*nothrow*/); }
INTERCEPTOR_ATTRIBUTE
void *operator new(size_t size, std::align_val_t align)
{ OPERATOR_NEW_BODY_ALIGN(false /*nothrow*/); }
INTERCEPTOR_ATTRIBUTE
void *operator new[](size_t size, std::align_val_t align)
{ OPERATOR_NEW_BODY_ALIGN(false /*nothrow*/); }
INTERCEPTOR_ATTRIBUTE
void *operator new(size_t size, std::align_val_t align, std::nothrow_t const&)
{ OPERATOR_NEW_BODY_ALIGN(true /*nothrow*/); }
INTERCEPTOR_ATTRIBUTE
void *operator new[](size_t size, std::align_val_t align, std::nothrow_t const&)
{ OPERATOR_NEW_BODY_ALIGN(true /*nothrow*/); }
INTERCEPTOR_ATTRIBUTE
void operator delete(void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
void operator delete[](void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
void operator delete(void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
void operator delete[](void *ptr, std::nothrow_t const &)
{ OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
void operator delete(void *ptr, size_t size) NOEXCEPT
{ OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
void operator delete[](void *ptr, size_t size) NOEXCEPT
{ OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
void operator delete(void *ptr, std::align_val_t) NOEXCEPT
{ OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
void operator delete[](void *ptr, std::align_val_t) NOEXCEPT
{ OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
void operator delete(void *ptr, std::align_val_t, std::nothrow_t const&)
{ OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
void operator delete[](void *ptr, std::align_val_t, std::nothrow_t const&)
{ OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
void operator delete(void *ptr, size_t size, std::align_val_t) NOEXCEPT
{ OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
void operator delete[](void *ptr, size_t size, std::align_val_t) NOEXCEPT
{ OPERATOR_DELETE_BODY; }
#else // SANITIZER_MAC
INTERCEPTOR(void *, _Znwm, size_t size)
{ OPERATOR_NEW_BODY(false /*nothrow*/); }
INTERCEPTOR(void *, _Znam, size_t size)
{ OPERATOR_NEW_BODY(false /*nothrow*/); }
INTERCEPTOR(void *, _ZnwmRKSt9nothrow_t, size_t size, std::nothrow_t const&)
{ OPERATOR_NEW_BODY(true /*nothrow*/); }
INTERCEPTOR(void *, _ZnamRKSt9nothrow_t, size_t size, std::nothrow_t const&)
{ OPERATOR_NEW_BODY(true /*nothrow*/); }
INTERCEPTOR(void, _ZdlPv, void *ptr)
{ OPERATOR_DELETE_BODY; }
INTERCEPTOR(void, _ZdaPv, void *ptr)
{ OPERATOR_DELETE_BODY; }
INTERCEPTOR(void, _ZdlPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&)
{ OPERATOR_DELETE_BODY; }
INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&)
{ OPERATOR_DELETE_BODY; }
#endif // !SANITIZER_MAC
///// Thread initialization and finalization. /////
#if !SANITIZER_NETBSD
static unsigned g_thread_finalize_key;
static void thread_finalize(void *v) {
uptr iter = (uptr)v;
if (iter > 1) {
if (pthread_setspecific(g_thread_finalize_key, (void*)(iter - 1))) {
Report("LeakSanitizer: failed to set thread key.\n");
Die();
}
return;
}
ThreadFinish();
}
#endif
#if SANITIZER_NETBSD
INTERCEPTOR(void, _lwp_exit) {
ENSURE_LSAN_INITED;
ThreadFinish();
REAL(_lwp_exit)();
}
#define LSAN_MAYBE_INTERCEPT__LWP_EXIT INTERCEPT_FUNCTION(_lwp_exit)
#else
#define LSAN_MAYBE_INTERCEPT__LWP_EXIT
#endif
struct ThreadParam {
void *(*callback)(void *arg);
void *param;
atomic_uintptr_t tid;
};
extern "C" void *__lsan_thread_start_func(void *arg) {
ThreadParam *p = (ThreadParam*)arg;
void* (*callback)(void *arg) = p->callback;
void *param = p->param;
// Wait until the last iteration to maximize the chance that we are the last
// destructor to run.
#if !SANITIZER_NETBSD
if (pthread_setspecific(g_thread_finalize_key,
(void*)GetPthreadDestructorIterations())) {
Report("LeakSanitizer: failed to set thread key.\n");
Die();
}
#endif
int tid = 0;
while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0)
internal_sched_yield();
SetCurrentThread(tid);
ThreadStart(tid, GetTid());
atomic_store(&p->tid, 0, memory_order_release);
return callback(param);
}
INTERCEPTOR(int, pthread_create, void *th, void *attr,
void *(*callback)(void *), void *param) {
ENSURE_LSAN_INITED;
EnsureMainThreadIDIsCorrect();
__sanitizer_pthread_attr_t myattr;
if (!attr) {
pthread_attr_init(&myattr);
attr = &myattr;
}
AdjustStackSize(attr);
int detached = 0;
pthread_attr_getdetachstate(attr, &detached);
ThreadParam p;
p.callback = callback;
p.param = param;
atomic_store(&p.tid, 0, memory_order_relaxed);
int res;
{
// Ignore all allocations made by pthread_create: thread stack/TLS may be
// stored by pthread for future reuse even after thread destruction, and
// the linked list it's stored in doesn't even hold valid pointers to the
// objects, the latter are calculated by obscure pointer arithmetic.
ScopedInterceptorDisabler disabler;
res = REAL(pthread_create)(th, attr, __lsan_thread_start_func, &p);
}
if (res == 0) {
int tid = ThreadCreate(GetCurrentThread(), *(uptr *)th,
IsStateDetached(detached));
CHECK_NE(tid, 0);
atomic_store(&p.tid, tid, memory_order_release);
while (atomic_load(&p.tid, memory_order_acquire) != 0)
internal_sched_yield();
}
if (attr == &myattr)
pthread_attr_destroy(&myattr);
return res;
}
INTERCEPTOR(int, pthread_join, void *th, void **ret) {
ENSURE_LSAN_INITED;
int tid = ThreadTid((uptr)th);
int res = REAL(pthread_join)(th, ret);
if (res == 0)
ThreadJoin(tid);
return res;
}
INTERCEPTOR(void, _exit, int status) {
if (status == 0 && HasReportedLeaks()) status = common_flags()->exitcode;
REAL(_exit)(status);
}
#define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name)
#include "sanitizer_common/sanitizer_signal_interceptors.inc"
namespace __lsan {
void InitializeInterceptors() {
InitializeSignalInterceptors();
INTERCEPT_FUNCTION(malloc);
INTERCEPT_FUNCTION(free);
LSAN_MAYBE_INTERCEPT_CFREE;
INTERCEPT_FUNCTION(calloc);
INTERCEPT_FUNCTION(realloc);
LSAN_MAYBE_INTERCEPT_MEMALIGN;
LSAN_MAYBE_INTERCEPT___LIBC_MEMALIGN;
LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC;
INTERCEPT_FUNCTION(posix_memalign);
INTERCEPT_FUNCTION(valloc);
LSAN_MAYBE_INTERCEPT_PVALLOC;
LSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE;
LSAN_MAYBE_INTERCEPT_MALLINFO;
LSAN_MAYBE_INTERCEPT_MALLOPT;
INTERCEPT_FUNCTION(pthread_create);
INTERCEPT_FUNCTION(pthread_join);
INTERCEPT_FUNCTION(_exit);
LSAN_MAYBE_INTERCEPT__LWP_EXIT;
#if !SANITIZER_NETBSD
if (pthread_key_create(&g_thread_finalize_key, &thread_finalize)) {
Report("LeakSanitizer: failed to create thread key.\n");
Die();
}
#endif
}
} // namespace __lsan

View File

@@ -0,0 +1,33 @@
//=-- lsan_linux.cc -------------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of LeakSanitizer. Linux-specific code.
//
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_LINUX
#include "lsan_allocator.h"
namespace __lsan {
static THREADLOCAL u32 current_thread_tid = kInvalidTid;
u32 GetCurrentThread() { return current_thread_tid; }
void SetCurrentThread(u32 tid) { current_thread_tid = tid; }
static THREADLOCAL AllocatorCache allocator_cache;
AllocatorCache *GetAllocatorCache() { return &allocator_cache; }
void ReplaceSystemMalloc() {}
} // namespace __lsan
#endif // SANITIZER_LINUX

View File

@@ -0,0 +1,192 @@
//===-- lsan_mac.cc -------------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of LeakSanitizer, a memory leak checker.
//
// Mac-specific details.
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_MAC
#include "interception/interception.h"
#include "lsan.h"
#include "lsan_allocator.h"
#include "lsan_thread.h"
#include <pthread.h>
namespace __lsan {
// Support for the following functions from libdispatch on Mac OS:
// dispatch_async_f()
// dispatch_async()
// dispatch_sync_f()
// dispatch_sync()
// dispatch_after_f()
// dispatch_after()
// dispatch_group_async_f()
// dispatch_group_async()
// TODO(glider): libdispatch API contains other functions that we don't support
// yet.
//
// dispatch_sync() and dispatch_sync_f() are synchronous, although chances are
// they can cause jobs to run on a thread different from the current one.
// TODO(glider): if so, we need a test for this (otherwise we should remove
// them).
//
// The following functions use dispatch_barrier_async_f() (which isn't a library
// function but is exported) and are thus supported:
// dispatch_source_set_cancel_handler_f()
// dispatch_source_set_cancel_handler()
// dispatch_source_set_event_handler_f()
// dispatch_source_set_event_handler()
//
// The reference manual for Grand Central Dispatch is available at
// http://developer.apple.com/library/mac/#documentation/Performance/Reference/GCD_libdispatch_Ref/Reference/reference.html
// The implementation details are at
// http://libdispatch.macosforge.org/trac/browser/trunk/src/queue.c
typedef void *dispatch_group_t;
typedef void *dispatch_queue_t;
typedef void *dispatch_source_t;
typedef u64 dispatch_time_t;
typedef void (*dispatch_function_t)(void *block);
typedef void *(*worker_t)(void *block);
// A wrapper for the ObjC blocks used to support libdispatch.
typedef struct {
void *block;
dispatch_function_t func;
u32 parent_tid;
} lsan_block_context_t;
ALWAYS_INLINE
void lsan_register_worker_thread(int parent_tid) {
if (GetCurrentThread() == kInvalidTid) {
u32 tid = ThreadCreate(parent_tid, 0, true);
ThreadStart(tid, GetTid());
SetCurrentThread(tid);
}
}
// For use by only those functions that allocated the context via
// alloc_lsan_context().
extern "C" void lsan_dispatch_call_block_and_release(void *block) {
lsan_block_context_t *context = (lsan_block_context_t *)block;
VReport(2,
"lsan_dispatch_call_block_and_release(): "
"context: %p, pthread_self: %p\n",
block, pthread_self());
lsan_register_worker_thread(context->parent_tid);
// Call the original dispatcher for the block.
context->func(context->block);
lsan_free(context);
}
} // namespace __lsan
using namespace __lsan; // NOLINT
// Wrap |ctxt| and |func| into an lsan_block_context_t.
// The caller retains control of the allocated context.
extern "C" lsan_block_context_t *alloc_lsan_context(void *ctxt,
dispatch_function_t func) {
GET_STACK_TRACE_THREAD;
lsan_block_context_t *lsan_ctxt =
(lsan_block_context_t *)lsan_malloc(sizeof(lsan_block_context_t), stack);
lsan_ctxt->block = ctxt;
lsan_ctxt->func = func;
lsan_ctxt->parent_tid = GetCurrentThread();
return lsan_ctxt;
}
// Define interceptor for dispatch_*_f function with the three most common
// parameters: dispatch_queue_t, context, dispatch_function_t.
#define INTERCEPT_DISPATCH_X_F_3(dispatch_x_f) \
INTERCEPTOR(void, dispatch_x_f, dispatch_queue_t dq, void *ctxt, \
dispatch_function_t func) { \
lsan_block_context_t *lsan_ctxt = alloc_lsan_context(ctxt, func); \
return REAL(dispatch_x_f)(dq, (void *)lsan_ctxt, \
lsan_dispatch_call_block_and_release); \
}
INTERCEPT_DISPATCH_X_F_3(dispatch_async_f)
INTERCEPT_DISPATCH_X_F_3(dispatch_sync_f)
INTERCEPT_DISPATCH_X_F_3(dispatch_barrier_async_f)
INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when, dispatch_queue_t dq,
void *ctxt, dispatch_function_t func) {
lsan_block_context_t *lsan_ctxt = alloc_lsan_context(ctxt, func);
return REAL(dispatch_after_f)(when, dq, (void *)lsan_ctxt,
lsan_dispatch_call_block_and_release);
}
INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group,
dispatch_queue_t dq, void *ctxt, dispatch_function_t func) {
lsan_block_context_t *lsan_ctxt = alloc_lsan_context(ctxt, func);
REAL(dispatch_group_async_f)
(group, dq, (void *)lsan_ctxt, lsan_dispatch_call_block_and_release);
}
#if !defined(MISSING_BLOCKS_SUPPORT)
extern "C" {
void dispatch_async(dispatch_queue_t dq, void (^work)(void));
void dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq,
void (^work)(void));
void dispatch_after(dispatch_time_t when, dispatch_queue_t queue,
void (^work)(void));
void dispatch_source_set_cancel_handler(dispatch_source_t ds,
void (^work)(void));
void dispatch_source_set_event_handler(dispatch_source_t ds,
void (^work)(void));
}
#define GET_LSAN_BLOCK(work) \
void (^lsan_block)(void); \
int parent_tid = GetCurrentThread(); \
lsan_block = ^(void) { \
lsan_register_worker_thread(parent_tid); \
work(); \
}
INTERCEPTOR(void, dispatch_async, dispatch_queue_t dq, void (^work)(void)) {
GET_LSAN_BLOCK(work);
REAL(dispatch_async)(dq, lsan_block);
}
INTERCEPTOR(void, dispatch_group_async, dispatch_group_t dg,
dispatch_queue_t dq, void (^work)(void)) {
GET_LSAN_BLOCK(work);
REAL(dispatch_group_async)(dg, dq, lsan_block);
}
INTERCEPTOR(void, dispatch_after, dispatch_time_t when, dispatch_queue_t queue,
void (^work)(void)) {
GET_LSAN_BLOCK(work);
REAL(dispatch_after)(when, queue, lsan_block);
}
INTERCEPTOR(void, dispatch_source_set_cancel_handler, dispatch_source_t ds,
void (^work)(void)) {
if (!work) {
REAL(dispatch_source_set_cancel_handler)(ds, work);
return;
}
GET_LSAN_BLOCK(work);
REAL(dispatch_source_set_cancel_handler)(ds, lsan_block);
}
INTERCEPTOR(void, dispatch_source_set_event_handler, dispatch_source_t ds,
void (^work)(void)) {
GET_LSAN_BLOCK(work);
REAL(dispatch_source_set_event_handler)(ds, lsan_block);
}
#endif
#endif // SANITIZER_MAC

View File

@@ -0,0 +1,55 @@
//===-- lsan_malloc_mac.cc ------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of LeakSanitizer (LSan), a memory leak detector.
//
// Mac-specific malloc interception.
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_MAC
#include "lsan.h"
#include "lsan_allocator.h"
#include "lsan_thread.h"
using namespace __lsan;
#define COMMON_MALLOC_ZONE_NAME "lsan"
#define COMMON_MALLOC_ENTER() ENSURE_LSAN_INITED
#define COMMON_MALLOC_SANITIZER_INITIALIZED lsan_inited
#define COMMON_MALLOC_FORCE_LOCK()
#define COMMON_MALLOC_FORCE_UNLOCK()
#define COMMON_MALLOC_MEMALIGN(alignment, size) \
GET_STACK_TRACE_MALLOC; \
void *p = lsan_memalign(alignment, size, stack)
#define COMMON_MALLOC_MALLOC(size) \
GET_STACK_TRACE_MALLOC; \
void *p = lsan_malloc(size, stack)
#define COMMON_MALLOC_REALLOC(ptr, size) \
GET_STACK_TRACE_MALLOC; \
void *p = lsan_realloc(ptr, size, stack)
#define COMMON_MALLOC_CALLOC(count, size) \
GET_STACK_TRACE_MALLOC; \
void *p = lsan_calloc(count, size, stack)
#define COMMON_MALLOC_VALLOC(size) \
GET_STACK_TRACE_MALLOC; \
void *p = lsan_valloc(size, stack)
#define COMMON_MALLOC_FREE(ptr) \
lsan_free(ptr)
#define COMMON_MALLOC_SIZE(ptr) \
uptr size = lsan_mz_size(ptr)
#define COMMON_MALLOC_FILL_STATS(zone, stats)
#define COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name) \
(void)zone_name; \
Report("mz_realloc(%p) -- attempting to realloc unallocated memory.\n", ptr);
#define COMMON_MALLOC_NAMESPACE __lsan
#include "sanitizer_common/sanitizer_malloc_mac.inc"
#endif // SANITIZER_MAC

View File

@@ -0,0 +1,22 @@
//===-- lsan_preinit.cc ---------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of LeakSanitizer.
//
// Call __lsan_init at the very early stage of process startup.
//===----------------------------------------------------------------------===//
#include "lsan.h"
#if SANITIZER_CAN_USE_PREINIT_ARRAY
// We force __lsan_init to be called before anyone else by placing it into
// .preinit_array section.
__attribute__((section(".preinit_array"), used))
void (*__local_lsan_preinit)(void) = __lsan_init;
#endif

View File

@@ -0,0 +1,158 @@
//=-- lsan_thread.cc ------------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of LeakSanitizer.
// See lsan_thread.h for details.
//
//===----------------------------------------------------------------------===//
#include "lsan_thread.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_thread_registry.h"
#include "sanitizer_common/sanitizer_tls_get_addr.h"
#include "lsan_allocator.h"
#include "lsan_common.h"
namespace __lsan {
static ThreadRegistry *thread_registry;
static ThreadContextBase *CreateThreadContext(u32 tid) {
void *mem = MmapOrDie(sizeof(ThreadContext), "ThreadContext");
return new(mem) ThreadContext(tid);
}
static const uptr kMaxThreads = 1 << 13;
static const uptr kThreadQuarantineSize = 64;
void InitializeThreadRegistry() {
static ALIGNED(64) char thread_registry_placeholder[sizeof(ThreadRegistry)];
thread_registry = new(thread_registry_placeholder)
ThreadRegistry(CreateThreadContext, kMaxThreads, kThreadQuarantineSize);
}
ThreadContext::ThreadContext(int tid)
: ThreadContextBase(tid),
stack_begin_(0),
stack_end_(0),
cache_begin_(0),
cache_end_(0),
tls_begin_(0),
tls_end_(0),
dtls_(nullptr) {}
struct OnStartedArgs {
uptr stack_begin, stack_end,
cache_begin, cache_end,
tls_begin, tls_end;
DTLS *dtls;
};
void ThreadContext::OnStarted(void *arg) {
OnStartedArgs *args = reinterpret_cast<OnStartedArgs *>(arg);
stack_begin_ = args->stack_begin;
stack_end_ = args->stack_end;
tls_begin_ = args->tls_begin;
tls_end_ = args->tls_end;
cache_begin_ = args->cache_begin;
cache_end_ = args->cache_end;
dtls_ = args->dtls;
}
void ThreadContext::OnFinished() {
AllocatorThreadFinish();
DTLS_Destroy();
}
u32 ThreadCreate(u32 parent_tid, uptr user_id, bool detached) {
return thread_registry->CreateThread(user_id, detached, parent_tid,
/* arg */ nullptr);
}
void ThreadStart(u32 tid, tid_t os_id, bool workerthread) {
OnStartedArgs args;
uptr stack_size = 0;
uptr tls_size = 0;
GetThreadStackAndTls(tid == 0, &args.stack_begin, &stack_size,
&args.tls_begin, &tls_size);
args.stack_end = args.stack_begin + stack_size;
args.tls_end = args.tls_begin + tls_size;
GetAllocatorCacheRange(&args.cache_begin, &args.cache_end);
args.dtls = DTLS_Get();
thread_registry->StartThread(tid, os_id, workerthread, &args);
}
void ThreadFinish() {
thread_registry->FinishThread(GetCurrentThread());
SetCurrentThread(kInvalidTid);
}
ThreadContext *CurrentThreadContext() {
if (!thread_registry) return nullptr;
if (GetCurrentThread() == kInvalidTid)
return nullptr;
// No lock needed when getting current thread.
return (ThreadContext *)thread_registry->GetThreadLocked(GetCurrentThread());
}
static bool FindThreadByUid(ThreadContextBase *tctx, void *arg) {
uptr uid = (uptr)arg;
if (tctx->user_id == uid && tctx->status != ThreadStatusInvalid) {
return true;
}
return false;
}
u32 ThreadTid(uptr uid) {
return thread_registry->FindThread(FindThreadByUid, (void*)uid);
}
void ThreadJoin(u32 tid) {
CHECK_NE(tid, kInvalidTid);
thread_registry->JoinThread(tid, /* arg */nullptr);
}
void EnsureMainThreadIDIsCorrect() {
if (GetCurrentThread() == 0)
CurrentThreadContext()->os_id = GetTid();
}
///// Interface to the common LSan module. /////
bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
uptr *cache_end, DTLS **dtls) {
ThreadContext *context = static_cast<ThreadContext *>(
thread_registry->FindThreadContextByOsIDLocked(os_id));
if (!context) return false;
*stack_begin = context->stack_begin();
*stack_end = context->stack_end();
*tls_begin = context->tls_begin();
*tls_end = context->tls_end();
*cache_begin = context->cache_begin();
*cache_end = context->cache_end();
*dtls = context->dtls();
return true;
}
void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
void *arg) {
}
void LockThreadRegistry() {
thread_registry->Lock();
}
void UnlockThreadRegistry() {
thread_registry->Unlock();
}
} // namespace __lsan

View File

@@ -0,0 +1,60 @@
//=-- lsan_thread.h -------------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of LeakSanitizer.
// Thread registry for standalone LSan.
//
//===----------------------------------------------------------------------===//
#ifndef LSAN_THREAD_H
#define LSAN_THREAD_H
#include "sanitizer_common/sanitizer_thread_registry.h"
namespace __sanitizer {
struct DTLS;
}
namespace __lsan {
class ThreadContext : public ThreadContextBase {
public:
explicit ThreadContext(int tid);
void OnStarted(void *arg) override;
void OnFinished() override;
uptr stack_begin() { return stack_begin_; }
uptr stack_end() { return stack_end_; }
uptr tls_begin() { return tls_begin_; }
uptr tls_end() { return tls_end_; }
uptr cache_begin() { return cache_begin_; }
uptr cache_end() { return cache_end_; }
DTLS *dtls() { return dtls_; }
private:
uptr stack_begin_, stack_end_,
cache_begin_, cache_end_,
tls_begin_, tls_end_;
DTLS *dtls_;
};
void InitializeThreadRegistry();
void ThreadStart(u32 tid, tid_t os_id, bool workerthread = false);
void ThreadFinish();
u32 ThreadCreate(u32 tid, uptr uid, bool detached);
void ThreadJoin(u32 tid);
u32 ThreadTid(uptr uid);
u32 GetCurrentThread();
void SetCurrentThread(u32 tid);
ThreadContext *CurrentThreadContext();
void EnsureMainThreadIDIsCorrect();
} // namespace __lsan
#endif // LSAN_THREAD_H

View File

@@ -0,0 +1,3 @@
___lsan_default_options
___lsan_default_suppressions
___lsan_is_turned_off