Imported Upstream version 6.10.0.49

Former-commit-id: 1d6753294b2993e1fbf92de9366bb9544db4189b
This commit is contained in:
Xamarin Public Jenkins (auto-signing)
2020-01-16 16:38:04 +00:00
parent d94e79959b
commit 468663ddbb
48518 changed files with 2789335 additions and 61176 deletions

View File

@ -0,0 +1 @@
BasedOnStyle: Google

View File

@ -0,0 +1,290 @@
# Build for the AddressSanitizer runtime support library.
set(ASAN_SOURCES
asan_allocator.cc
asan_activation.cc
asan_debugging.cc
asan_descriptions.cc
asan_errors.cc
asan_fake_stack.cc
asan_flags.cc
asan_fuchsia.cc
asan_globals.cc
asan_globals_win.cc
asan_interceptors.cc
asan_interceptors_memintrinsics.cc
asan_linux.cc
asan_mac.cc
asan_malloc_linux.cc
asan_malloc_mac.cc
asan_malloc_win.cc
asan_memory_profile.cc
asan_poisoning.cc
asan_posix.cc
asan_premap_shadow.cc
asan_report.cc
asan_rtl.cc
asan_shadow_setup.cc
asan_stack.cc
asan_stats.cc
asan_suppressions.cc
asan_thread.cc
asan_win.cc)
set(ASAN_CXX_SOURCES
asan_new_delete.cc)
set(ASAN_PREINIT_SOURCES
asan_preinit.cc)
include_directories(..)
set(ASAN_CFLAGS ${SANITIZER_COMMON_CFLAGS})
set(ASAN_COMMON_DEFINITIONS ${COMPILER_RT_ASAN_SHADOW_SCALE_DEFINITION})
append_rtti_flag(OFF ASAN_CFLAGS)
set(ASAN_DYNAMIC_LINK_FLAGS ${SANITIZER_COMMON_LINK_FLAGS})
if(ANDROID)
# On Android, -z global does not do what it is documented to do.
# On Android, -z global moves the library ahead in the lookup order,
# placing it right after the LD_PRELOADs. This is used to compensate for the fact
# that Android linker does not look at the dependencies of the main executable
# that aren't dependencies of the current DSO when resolving symbols from said DSO.
# As a net result, this allows running ASan executables without LD_PRELOAD-ing the
# ASan runtime library.
# The above is applicable to L MR1 or newer.
if (COMPILER_RT_HAS_Z_GLOBAL)
list(APPEND ASAN_DYNAMIC_LINK_FLAGS -Wl,-z,global)
endif()
endif()
set(ASAN_DYNAMIC_DEFINITIONS
${ASAN_COMMON_DEFINITIONS} ASAN_DYNAMIC=1)
append_list_if(WIN32 INTERCEPTION_DYNAMIC_CRT ASAN_DYNAMIC_DEFINITIONS)
set(ASAN_DYNAMIC_CFLAGS ${ASAN_CFLAGS})
append_list_if(COMPILER_RT_HAS_FTLS_MODEL_INITIAL_EXEC
-ftls-model=initial-exec ASAN_DYNAMIC_CFLAGS)
append_list_if(MSVC /DEBUG ASAN_DYNAMIC_LINK_FLAGS)
set(ASAN_DYNAMIC_LIBS ${SANITIZER_CXX_ABI_LIBRARY} ${SANITIZER_COMMON_LINK_LIBS})
append_list_if(COMPILER_RT_HAS_LIBDL dl ASAN_DYNAMIC_LIBS)
append_list_if(COMPILER_RT_HAS_LIBRT rt ASAN_DYNAMIC_LIBS)
append_list_if(COMPILER_RT_HAS_LIBM m ASAN_DYNAMIC_LIBS)
append_list_if(COMPILER_RT_HAS_LIBPTHREAD pthread ASAN_DYNAMIC_LIBS)
append_list_if(COMPILER_RT_HAS_LIBLOG log ASAN_DYNAMIC_LIBS)
# Compile ASan sources into an object library.
add_compiler_rt_object_libraries(RTAsan_dynamic
OS ${SANITIZER_COMMON_SUPPORTED_OS}
ARCHS ${ASAN_SUPPORTED_ARCH}
SOURCES ${ASAN_SOURCES} ${ASAN_CXX_SOURCES}
CFLAGS ${ASAN_DYNAMIC_CFLAGS}
DEFS ${ASAN_DYNAMIC_DEFINITIONS})
if(NOT APPLE)
add_compiler_rt_object_libraries(RTAsan
ARCHS ${ASAN_SUPPORTED_ARCH}
SOURCES ${ASAN_SOURCES} CFLAGS ${ASAN_CFLAGS}
DEFS ${ASAN_COMMON_DEFINITIONS})
add_compiler_rt_object_libraries(RTAsan_cxx
ARCHS ${ASAN_SUPPORTED_ARCH}
SOURCES ${ASAN_CXX_SOURCES} CFLAGS ${ASAN_CFLAGS}
DEFS ${ASAN_COMMON_DEFINITIONS})
add_compiler_rt_object_libraries(RTAsan_preinit
ARCHS ${ASAN_SUPPORTED_ARCH}
SOURCES ${ASAN_PREINIT_SOURCES} CFLAGS ${ASAN_CFLAGS}
DEFS ${ASAN_COMMON_DEFINITIONS})
file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/dummy.cc "")
add_compiler_rt_object_libraries(RTAsan_dynamic_version_script_dummy
ARCHS ${ASAN_SUPPORTED_ARCH}
SOURCES ${CMAKE_CURRENT_BINARY_DIR}/dummy.cc
CFLAGS ${ASAN_DYNAMIC_CFLAGS}
DEFS ${ASAN_DYNAMIC_DEFINITIONS})
endif()
# Build ASan runtimes shipped with Clang.
add_compiler_rt_component(asan)
if(APPLE)
add_weak_symbols("asan" WEAK_SYMBOL_LINK_FLAGS)
add_weak_symbols("lsan" WEAK_SYMBOL_LINK_FLAGS)
add_weak_symbols("ubsan" WEAK_SYMBOL_LINK_FLAGS)
add_weak_symbols("sanitizer_common" WEAK_SYMBOL_LINK_FLAGS)
add_compiler_rt_runtime(clang_rt.asan
SHARED
OS ${SANITIZER_COMMON_SUPPORTED_OS}
ARCHS ${ASAN_SUPPORTED_ARCH}
OBJECT_LIBS RTAsan_dynamic
RTInterception
RTSanitizerCommon
RTSanitizerCommonLibc
RTLSanCommon
RTUbsan
CFLAGS ${ASAN_DYNAMIC_CFLAGS}
LINK_FLAGS ${WEAK_SYMBOL_LINK_FLAGS}
DEFS ${ASAN_DYNAMIC_DEFINITIONS}
PARENT_TARGET asan)
else()
# Build separate libraries for each target.
set(ASAN_COMMON_RUNTIME_OBJECT_LIBS
RTInterception
RTSanitizerCommon
RTSanitizerCommonLibc
RTLSanCommon
RTUbsan)
add_compiler_rt_runtime(clang_rt.asan
STATIC
ARCHS ${ASAN_SUPPORTED_ARCH}
OBJECT_LIBS RTAsan_preinit
RTAsan
${ASAN_COMMON_RUNTIME_OBJECT_LIBS}
CFLAGS ${ASAN_CFLAGS}
DEFS ${ASAN_COMMON_DEFINITIONS}
PARENT_TARGET asan)
add_compiler_rt_runtime(clang_rt.asan_cxx
STATIC
ARCHS ${ASAN_SUPPORTED_ARCH}
OBJECT_LIBS RTAsan_cxx
RTUbsan_cxx
CFLAGS ${ASAN_CFLAGS}
DEFS ${ASAN_COMMON_DEFINITIONS}
PARENT_TARGET asan)
add_compiler_rt_runtime(clang_rt.asan-preinit
STATIC
ARCHS ${ASAN_SUPPORTED_ARCH}
OBJECT_LIBS RTAsan_preinit
CFLAGS ${ASAN_CFLAGS}
DEFS ${ASAN_COMMON_DEFINITIONS}
PARENT_TARGET asan)
foreach(arch ${ASAN_SUPPORTED_ARCH})
if (UNIX)
add_sanitizer_rt_version_list(clang_rt.asan-dynamic-${arch}
LIBS clang_rt.asan-${arch} clang_rt.asan_cxx-${arch}
EXTRA asan.syms.extra)
set(VERSION_SCRIPT_FLAG
-Wl,--version-script,${CMAKE_CURRENT_BINARY_DIR}/clang_rt.asan-dynamic-${arch}.vers)
# The Solaris 11.4 linker supports a subset of GNU ld version scripts,
# but requires a special option to enable it.
if (OS_NAME MATCHES "SunOS")
list(APPEND VERSION_SCRIPT_FLAG -Wl,-z,gnu-version-script-compat)
endif()
set_property(SOURCE
${CMAKE_CURRENT_BINARY_DIR}/dummy.cc
APPEND PROPERTY
OBJECT_DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/clang_rt.asan-dynamic-${arch}.vers)
else()
set(VERSION_SCRIPT_FLAG)
endif()
set(ASAN_DYNAMIC_WEAK_INTERCEPTION)
if (MSVC)
add_compiler_rt_object_libraries(AsanWeakInterception
${SANITIZER_COMMON_SUPPORTED_OS}
ARCHS ${arch}
SOURCES asan_win_weak_interception.cc
CFLAGS ${ASAN_CFLAGS} -DSANITIZER_DYNAMIC
DEFS ${ASAN_COMMON_DEFINITIONS})
set(ASAN_DYNAMIC_WEAK_INTERCEPTION
AsanWeakInterception
UbsanWeakInterception
SancovWeakInterception
SanitizerCommonWeakInterception)
endif()
add_compiler_rt_runtime(clang_rt.asan
SHARED
ARCHS ${arch}
OBJECT_LIBS ${ASAN_COMMON_RUNTIME_OBJECT_LIBS}
RTAsan_dynamic
# The only purpose of RTAsan_dynamic_version_script_dummy is to
# carry a dependency of the shared runtime on the version script.
# Replacing it with a straightforward
# add_dependencies(clang_rt.asan-dynamic-${arch} clang_rt.asan-dynamic-${arch}-version-list)
# generates an order-only dependency in ninja.
RTAsan_dynamic_version_script_dummy
RTUbsan_cxx
${ASAN_DYNAMIC_WEAK_INTERCEPTION}
CFLAGS ${ASAN_DYNAMIC_CFLAGS}
LINK_FLAGS ${ASAN_DYNAMIC_LINK_FLAGS}
${VERSION_SCRIPT_FLAG}
LINK_LIBS ${ASAN_DYNAMIC_LIBS}
DEFS ${ASAN_DYNAMIC_DEFINITIONS}
PARENT_TARGET asan)
if (UNIX AND NOT ${arch} STREQUAL "i386")
add_sanitizer_rt_symbols(clang_rt.asan_cxx
ARCHS ${arch})
add_dependencies(asan clang_rt.asan_cxx-${arch}-symbols)
add_sanitizer_rt_symbols(clang_rt.asan
ARCHS ${arch}
EXTRA asan.syms.extra)
add_dependencies(asan clang_rt.asan-${arch}-symbols)
endif()
if (WIN32)
add_compiler_rt_object_libraries(AsanDllThunk
${SANITIZER_COMMON_SUPPORTED_OS}
ARCHS ${arch}
SOURCES asan_globals_win.cc
asan_win_dll_thunk.cc
CFLAGS ${ASAN_CFLAGS} -DSANITIZER_DLL_THUNK
DEFS ${ASAN_COMMON_DEFINITIONS})
add_compiler_rt_runtime(clang_rt.asan_dll_thunk
STATIC
ARCHS ${arch}
OBJECT_LIBS AsanDllThunk
UbsanDllThunk
SancovDllThunk
SanitizerCommonDllThunk
SOURCES $<TARGET_OBJECTS:RTInterception.${arch}>
PARENT_TARGET asan)
set(DYNAMIC_RUNTIME_THUNK_CFLAGS "-DSANITIZER_DYNAMIC_RUNTIME_THUNK")
if(MSVC)
list(APPEND DYNAMIC_RUNTIME_THUNK_CFLAGS "-Zl")
elseif(CMAKE_C_COMPILER_ID MATCHES Clang)
list(APPEND DYNAMIC_RUNTIME_THUNK_CFLAGS "-nodefaultlibs")
endif()
add_compiler_rt_object_libraries(AsanDynamicRuntimeThunk
${SANITIZER_COMMON_SUPPORTED_OS}
ARCHS ${arch}
SOURCES asan_globals_win.cc
asan_win_dynamic_runtime_thunk.cc
CFLAGS ${ASAN_CFLAGS} ${DYNAMIC_RUNTIME_THUNK_CFLAGS}
DEFS ${ASAN_COMMON_DEFINITIONS})
add_compiler_rt_runtime(clang_rt.asan_dynamic_runtime_thunk
STATIC
ARCHS ${arch}
OBJECT_LIBS AsanDynamicRuntimeThunk
UbsanDynamicRuntimeThunk
SancovDynamicRuntimeThunk
SanitizerCommonDynamicRuntimeThunk
CFLAGS ${ASAN_CFLAGS} ${DYNAMIC_RUNTIME_THUNK_CFLAGS}
DEFS ${ASAN_COMMON_DEFINITIONS}
PARENT_TARGET asan)
endif()
endforeach()
endif()
add_compiler_rt_resource_file(asan_blacklist asan_blacklist.txt asan)
add_subdirectory(scripts)
if(COMPILER_RT_INCLUDE_TESTS)
add_subdirectory(tests)
endif()

View File

@ -0,0 +1,26 @@
AddressSanitizer RT
================================
This directory contains sources of the AddressSanitizer (ASan) runtime library.
Directory structure:
README.txt : This file.
Makefile.mk : File for make-based build.
CMakeLists.txt : File for cmake-based build.
asan_*.{cc,h} : Sources of the asan runtime library.
scripts/* : Helper scripts.
tests/* : ASan unit tests.
Also ASan runtime needs the following libraries:
lib/interception/ : Machinery used to intercept function calls.
lib/sanitizer_common/ : Code shared between various sanitizers.
ASan runtime currently also embeds part of LeakSanitizer runtime for
leak detection (lib/lsan/lsan_common.{cc,h}).
ASan runtime can only be built by CMake. You can run ASan tests
from the root of your CMake build tree:
make check-asan
For more instructions see:
https://github.com/google/sanitizers/wiki/AddressSanitizerHowToBuild

View File

@ -0,0 +1,4 @@
__asan_*
__lsan_*
__ubsan_*
__sancov_*

View File

@ -0,0 +1,144 @@
//===-- asan_activation.cc --------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// ASan activation/deactivation logic.
//===----------------------------------------------------------------------===//
#include "asan_activation.h"
#include "asan_allocator.h"
#include "asan_flags.h"
#include "asan_internal.h"
#include "asan_mapping.h"
#include "asan_poisoning.h"
#include "asan_stack.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
namespace __asan {
static struct AsanDeactivatedFlags {
AllocatorOptions allocator_options;
int malloc_context_size;
bool poison_heap;
bool coverage;
const char *coverage_dir;
void RegisterActivationFlags(FlagParser *parser, Flags *f, CommonFlags *cf) {
#define ASAN_ACTIVATION_FLAG(Type, Name) \
RegisterFlag(parser, #Name, "", &f->Name);
#define COMMON_ACTIVATION_FLAG(Type, Name) \
RegisterFlag(parser, #Name, "", &cf->Name);
#include "asan_activation_flags.inc"
#undef ASAN_ACTIVATION_FLAG
#undef COMMON_ACTIVATION_FLAG
RegisterIncludeFlags(parser, cf);
}
void OverrideFromActivationFlags() {
Flags f;
CommonFlags cf;
FlagParser parser;
RegisterActivationFlags(&parser, &f, &cf);
cf.SetDefaults();
// Copy the current activation flags.
allocator_options.CopyTo(&f, &cf);
cf.malloc_context_size = malloc_context_size;
f.poison_heap = poison_heap;
cf.coverage = coverage;
cf.coverage_dir = coverage_dir;
cf.verbosity = Verbosity();
cf.help = false; // this is activation-specific help
// Check if activation flags need to be overriden.
if (const char *env = GetEnv("ASAN_ACTIVATION_OPTIONS")) {
parser.ParseString(env);
}
InitializeCommonFlags(&cf);
if (Verbosity()) ReportUnrecognizedFlags();
if (cf.help) parser.PrintFlagDescriptions();
allocator_options.SetFrom(&f, &cf);
malloc_context_size = cf.malloc_context_size;
poison_heap = f.poison_heap;
coverage = cf.coverage;
coverage_dir = cf.coverage_dir;
}
void Print() {
Report(
"quarantine_size_mb %d, thread_local_quarantine_size_kb %d, "
"max_redzone %d, poison_heap %d, malloc_context_size %d, "
"alloc_dealloc_mismatch %d, allocator_may_return_null %d, coverage %d, "
"coverage_dir %s, allocator_release_to_os_interval_ms %d\n",
allocator_options.quarantine_size_mb,
allocator_options.thread_local_quarantine_size_kb,
allocator_options.max_redzone, poison_heap, malloc_context_size,
allocator_options.alloc_dealloc_mismatch,
allocator_options.may_return_null, coverage, coverage_dir,
allocator_options.release_to_os_interval_ms);
}
} asan_deactivated_flags;
static bool asan_is_deactivated;
void AsanDeactivate() {
CHECK(!asan_is_deactivated);
VReport(1, "Deactivating ASan\n");
// Stash runtime state.
GetAllocatorOptions(&asan_deactivated_flags.allocator_options);
asan_deactivated_flags.malloc_context_size = GetMallocContextSize();
asan_deactivated_flags.poison_heap = CanPoisonMemory();
asan_deactivated_flags.coverage = common_flags()->coverage;
asan_deactivated_flags.coverage_dir = common_flags()->coverage_dir;
// Deactivate the runtime.
SetCanPoisonMemory(false);
SetMallocContextSize(1);
AllocatorOptions disabled = asan_deactivated_flags.allocator_options;
disabled.quarantine_size_mb = 0;
disabled.thread_local_quarantine_size_kb = 0;
// Redzone must be at least Max(16, granularity) bytes long.
disabled.min_redzone = Max(16, (int)SHADOW_GRANULARITY);
disabled.max_redzone = disabled.min_redzone;
disabled.alloc_dealloc_mismatch = false;
disabled.may_return_null = true;
ReInitializeAllocator(disabled);
asan_is_deactivated = true;
}
void AsanActivate() {
if (!asan_is_deactivated) return;
VReport(1, "Activating ASan\n");
UpdateProcessName();
asan_deactivated_flags.OverrideFromActivationFlags();
SetCanPoisonMemory(asan_deactivated_flags.poison_heap);
SetMallocContextSize(asan_deactivated_flags.malloc_context_size);
ReInitializeAllocator(asan_deactivated_flags.allocator_options);
asan_is_deactivated = false;
if (Verbosity()) {
Report("Activated with flags:\n");
asan_deactivated_flags.Print();
}
}
} // namespace __asan

View File

@ -0,0 +1,23 @@
//===-- asan_activation.h ---------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// ASan activation/deactivation logic.
//===----------------------------------------------------------------------===//
#ifndef ASAN_ACTIVATION_H
#define ASAN_ACTIVATION_H
namespace __asan {
void AsanDeactivate();
void AsanActivate();
} // namespace __asan
#endif // ASAN_ACTIVATION_H

View File

@ -0,0 +1,37 @@
//===-- asan_activation_flags.inc -------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// A subset of ASan (and common) runtime flags supported at activation time.
//
//===----------------------------------------------------------------------===//
#ifndef ASAN_ACTIVATION_FLAG
# error "Define ASAN_ACTIVATION_FLAG prior to including this file!"
#endif
#ifndef COMMON_ACTIVATION_FLAG
# error "Define COMMON_ACTIVATION_FLAG prior to including this file!"
#endif
// ASAN_ACTIVATION_FLAG(Type, Name)
// See COMMON_FLAG in sanitizer_flags.inc for more details.
ASAN_ACTIVATION_FLAG(int, redzone)
ASAN_ACTIVATION_FLAG(int, max_redzone)
ASAN_ACTIVATION_FLAG(int, quarantine_size_mb)
ASAN_ACTIVATION_FLAG(int, thread_local_quarantine_size_kb)
ASAN_ACTIVATION_FLAG(bool, alloc_dealloc_mismatch)
ASAN_ACTIVATION_FLAG(bool, poison_heap)
COMMON_ACTIVATION_FLAG(bool, allocator_may_return_null)
COMMON_ACTIVATION_FLAG(int, malloc_context_size)
COMMON_ACTIVATION_FLAG(bool, coverage)
COMMON_ACTIVATION_FLAG(const char *, coverage_dir)
COMMON_ACTIVATION_FLAG(int, verbosity)
COMMON_ACTIVATION_FLAG(bool, help)
COMMON_ACTIVATION_FLAG(s32, allocator_release_to_os_interval_ms)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,222 @@
//===-- asan_allocator.h ----------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// ASan-private header for asan_allocator.cc.
//===----------------------------------------------------------------------===//
#ifndef ASAN_ALLOCATOR_H
#define ASAN_ALLOCATOR_H
#include "asan_flags.h"
#include "asan_internal.h"
#include "asan_interceptors.h"
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_list.h"
namespace __asan {
enum AllocType {
FROM_MALLOC = 1, // Memory block came from malloc, calloc, realloc, etc.
FROM_NEW = 2, // Memory block came from operator new.
FROM_NEW_BR = 3 // Memory block came from operator new [ ]
};
struct AsanChunk;
struct AllocatorOptions {
u32 quarantine_size_mb;
u32 thread_local_quarantine_size_kb;
u16 min_redzone;
u16 max_redzone;
u8 may_return_null;
u8 alloc_dealloc_mismatch;
s32 release_to_os_interval_ms;
void SetFrom(const Flags *f, const CommonFlags *cf);
void CopyTo(Flags *f, CommonFlags *cf);
};
void InitializeAllocator(const AllocatorOptions &options);
void ReInitializeAllocator(const AllocatorOptions &options);
void GetAllocatorOptions(AllocatorOptions *options);
class AsanChunkView {
public:
explicit AsanChunkView(AsanChunk *chunk) : chunk_(chunk) {}
bool IsValid() const; // Checks if AsanChunkView points to a valid
// allocated or quarantined chunk.
bool IsAllocated() const; // Checks if the memory is currently allocated.
bool IsQuarantined() const; // Checks if the memory is currently quarantined.
uptr Beg() const; // First byte of user memory.
uptr End() const; // Last byte of user memory.
uptr UsedSize() const; // Size requested by the user.
u32 UserRequestedAlignment() const; // Originally requested alignment.
uptr AllocTid() const;
uptr FreeTid() const;
bool Eq(const AsanChunkView &c) const { return chunk_ == c.chunk_; }
u32 GetAllocStackId() const;
u32 GetFreeStackId() const;
StackTrace GetAllocStack() const;
StackTrace GetFreeStack() const;
AllocType GetAllocType() const;
bool AddrIsInside(uptr addr, uptr access_size, sptr *offset) const {
if (addr >= Beg() && (addr + access_size) <= End()) {
*offset = addr - Beg();
return true;
}
return false;
}
bool AddrIsAtLeft(uptr addr, uptr access_size, sptr *offset) const {
(void)access_size;
if (addr < Beg()) {
*offset = Beg() - addr;
return true;
}
return false;
}
bool AddrIsAtRight(uptr addr, uptr access_size, sptr *offset) const {
if (addr + access_size > End()) {
*offset = addr - End();
return true;
}
return false;
}
private:
AsanChunk *const chunk_;
};
AsanChunkView FindHeapChunkByAddress(uptr address);
AsanChunkView FindHeapChunkByAllocBeg(uptr address);
// List of AsanChunks with total size.
class AsanChunkFifoList: public IntrusiveList<AsanChunk> {
public:
explicit AsanChunkFifoList(LinkerInitialized) { }
AsanChunkFifoList() { clear(); }
void Push(AsanChunk *n);
void PushList(AsanChunkFifoList *q);
AsanChunk *Pop();
uptr size() { return size_; }
void clear() {
IntrusiveList<AsanChunk>::clear();
size_ = 0;
}
private:
uptr size_;
};
struct AsanMapUnmapCallback {
void OnMap(uptr p, uptr size) const;
void OnUnmap(uptr p, uptr size) const;
};
#if SANITIZER_CAN_USE_ALLOCATOR64
# if SANITIZER_FUCHSIA
const uptr kAllocatorSpace = ~(uptr)0;
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
typedef DefaultSizeClassMap SizeClassMap;
# elif defined(__powerpc64__)
const uptr kAllocatorSpace = 0xa0000000000ULL;
const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
typedef DefaultSizeClassMap SizeClassMap;
# elif defined(__aarch64__) && SANITIZER_ANDROID
const uptr kAllocatorSpace = 0x3000000000ULL;
const uptr kAllocatorSize = 0x2000000000ULL; // 128G.
typedef VeryCompactSizeClassMap SizeClassMap;
# elif defined(__aarch64__)
// AArch64/SANITIZER_CAN_USER_ALLOCATOR64 is only for 42-bit VMA
// so no need to different values for different VMA.
const uptr kAllocatorSpace = 0x10000000000ULL;
const uptr kAllocatorSize = 0x10000000000ULL; // 3T.
typedef DefaultSizeClassMap SizeClassMap;
# elif SANITIZER_WINDOWS
const uptr kAllocatorSpace = ~(uptr)0;
const uptr kAllocatorSize = 0x8000000000ULL; // 500G
typedef DefaultSizeClassMap SizeClassMap;
# else
const uptr kAllocatorSpace = 0x600000000000ULL;
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
typedef DefaultSizeClassMap SizeClassMap;
# endif
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
static const uptr kSpaceBeg = kAllocatorSpace;
static const uptr kSpaceSize = kAllocatorSize;
static const uptr kMetadataSize = 0;
typedef __asan::SizeClassMap SizeClassMap;
typedef AsanMapUnmapCallback MapUnmapCallback;
static const uptr kFlags = 0;
};
typedef SizeClassAllocator64<AP64> PrimaryAllocator;
#else // Fallback to SizeClassAllocator32.
static const uptr kRegionSizeLog = 20;
static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
# if SANITIZER_WORDSIZE == 32
typedef FlatByteMap<kNumRegions> ByteMap;
# elif SANITIZER_WORDSIZE == 64
typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
# endif
typedef CompactSizeClassMap SizeClassMap;
struct AP32 {
static const uptr kSpaceBeg = 0;
static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
static const uptr kMetadataSize = 16;
typedef __asan::SizeClassMap SizeClassMap;
static const uptr kRegionSizeLog = __asan::kRegionSizeLog;
typedef __asan::ByteMap ByteMap;
typedef AsanMapUnmapCallback MapUnmapCallback;
static const uptr kFlags = 0;
};
typedef SizeClassAllocator32<AP32> PrimaryAllocator;
#endif // SANITIZER_CAN_USE_ALLOCATOR64
static const uptr kNumberOfSizeClasses = SizeClassMap::kNumClasses;
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<AsanMapUnmapCallback> SecondaryAllocator;
typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
SecondaryAllocator> AsanAllocator;
struct AsanThreadLocalMallocStorage {
uptr quarantine_cache[16];
AllocatorCache allocator_cache;
void CommitBack();
private:
// These objects are allocated via mmap() and are zero-initialized.
AsanThreadLocalMallocStorage() {}
};
void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
AllocType alloc_type);
void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type);
void asan_delete(void *ptr, uptr size, uptr alignment,
BufferedStackTrace *stack, AllocType alloc_type);
void *asan_malloc(uptr size, BufferedStackTrace *stack);
void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack);
void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack);
void *asan_valloc(uptr size, BufferedStackTrace *stack);
void *asan_pvalloc(uptr size, BufferedStackTrace *stack);
int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
BufferedStackTrace *stack);
uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp);
uptr asan_mz_size(const void *ptr);
void asan_mz_force_lock();
void asan_mz_force_unlock();
void PrintInternalAllocatorStats();
void AsanSoftRssLimitExceededCallback(bool exceeded);
} // namespace __asan
#endif // ASAN_ALLOCATOR_H

View File

@ -0,0 +1,13 @@
# Blacklist for AddressSanitizer. Turns off instrumentation of particular
# functions or sources. Use with care. You may set location of blacklist
# at compile-time using -fsanitize-blacklist=<path> flag.
# Example usage:
# fun:*bad_function_name*
# src:file_with_tricky_code.cc
# global:*global_with_bad_access_or_initialization*
# global:*global_with_initialization_issues*=init
# type:*Namespace::ClassName*=init
# Stack buffer overflow in VC/INCLUDE/xlocnum, see http://goo.gl/L4qqUG
fun:*_Find_elem@*@std*

View File

@ -0,0 +1,146 @@
//===-- asan_debugging.cc -------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// This file contains various functions that are generally useful to call when
// using a debugger (LLDB, GDB).
//===----------------------------------------------------------------------===//
#include "asan_allocator.h"
#include "asan_descriptions.h"
#include "asan_flags.h"
#include "asan_internal.h"
#include "asan_mapping.h"
#include "asan_report.h"
#include "asan_thread.h"
namespace {
using namespace __asan;
static void FindInfoForStackVar(uptr addr, const char *frame_descr, uptr offset,
char *name, uptr name_size,
uptr &region_address, uptr &region_size) {
InternalMmapVector<StackVarDescr> vars(16);
if (!ParseFrameDescription(frame_descr, &vars)) {
return;
}
for (uptr i = 0; i < vars.size(); i++) {
if (offset <= vars[i].beg + vars[i].size) {
// We use name_len + 1 because strlcpy will guarantee a \0 at the end, so
// if we're limiting the copy due to name_len, we add 1 to ensure we copy
// the whole name and then terminate with '\0'.
internal_strlcpy(name, vars[i].name_pos,
Min(name_size, vars[i].name_len + 1));
region_address = addr - (offset - vars[i].beg);
region_size = vars[i].size;
return;
}
}
}
uptr AsanGetStack(uptr addr, uptr *trace, u32 size, u32 *thread_id,
bool alloc_stack) {
AsanChunkView chunk = FindHeapChunkByAddress(addr);
if (!chunk.IsValid()) return 0;
StackTrace stack(nullptr, 0);
if (alloc_stack) {
if (chunk.AllocTid() == kInvalidTid) return 0;
stack = chunk.GetAllocStack();
if (thread_id) *thread_id = chunk.AllocTid();
} else {
if (chunk.FreeTid() == kInvalidTid) return 0;
stack = chunk.GetFreeStack();
if (thread_id) *thread_id = chunk.FreeTid();
}
if (trace && size) {
size = Min(size, Min(stack.size, kStackTraceMax));
for (uptr i = 0; i < size; i++)
trace[i] = StackTrace::GetPreviousInstructionPc(stack.trace[i]);
return size;
}
return 0;
}
} // namespace
SANITIZER_INTERFACE_ATTRIBUTE
const char *__asan_locate_address(uptr addr, char *name, uptr name_size,
uptr *region_address_ptr,
uptr *region_size_ptr) {
AddressDescription descr(addr);
uptr region_address = 0;
uptr region_size = 0;
const char *region_kind = nullptr;
if (name && name_size > 0) name[0] = 0;
if (auto shadow = descr.AsShadow()) {
// region_{address,size} are already 0
switch (shadow->kind) {
case kShadowKindLow:
region_kind = "low shadow";
break;
case kShadowKindGap:
region_kind = "shadow gap";
break;
case kShadowKindHigh:
region_kind = "high shadow";
break;
}
} else if (auto heap = descr.AsHeap()) {
region_kind = "heap";
region_address = heap->chunk_access.chunk_begin;
region_size = heap->chunk_access.chunk_size;
} else if (auto stack = descr.AsStack()) {
region_kind = "stack";
if (!stack->frame_descr) {
// region_{address,size} are already 0
} else {
FindInfoForStackVar(addr, stack->frame_descr, stack->offset, name,
name_size, region_address, region_size);
}
} else if (auto global = descr.AsGlobal()) {
region_kind = "global";
auto &g = global->globals[0];
internal_strlcpy(name, g.name, name_size);
region_address = g.beg;
region_size = g.size;
} else {
// region_{address,size} are already 0
region_kind = "heap-invalid";
}
CHECK(region_kind);
if (region_address_ptr) *region_address_ptr = region_address;
if (region_size_ptr) *region_size_ptr = region_size;
return region_kind;
}
SANITIZER_INTERFACE_ATTRIBUTE
uptr __asan_get_alloc_stack(uptr addr, uptr *trace, uptr size, u32 *thread_id) {
return AsanGetStack(addr, trace, size, thread_id, /* alloc_stack */ true);
}
SANITIZER_INTERFACE_ATTRIBUTE
uptr __asan_get_free_stack(uptr addr, uptr *trace, uptr size, u32 *thread_id) {
return AsanGetStack(addr, trace, size, thread_id, /* alloc_stack */ false);
}
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_get_shadow_mapping(uptr *shadow_scale, uptr *shadow_offset) {
if (shadow_scale)
*shadow_scale = SHADOW_SCALE;
if (shadow_offset)
*shadow_offset = SHADOW_OFFSET;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,252 @@
//===-- asan_descriptions.h -------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// ASan-private header for asan_descriptions.cc.
// TODO(filcab): Most struct definitions should move to the interface headers.
//===----------------------------------------------------------------------===//
#ifndef ASAN_DESCRIPTIONS_H
#define ASAN_DESCRIPTIONS_H
#include "asan_allocator.h"
#include "asan_thread.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_report_decorator.h"
namespace __asan {
void DescribeThread(AsanThreadContext *context);
static inline void DescribeThread(AsanThread *t) {
if (t) DescribeThread(t->context());
}
const char *ThreadNameWithParenthesis(AsanThreadContext *t, char buff[],
uptr buff_len);
const char *ThreadNameWithParenthesis(u32 tid, char buff[], uptr buff_len);
class Decorator : public __sanitizer::SanitizerCommonDecorator {
public:
Decorator() : SanitizerCommonDecorator() {}
const char *Access() { return Blue(); }
const char *Location() { return Green(); }
const char *Allocation() { return Magenta(); }
const char *ShadowByte(u8 byte) {
switch (byte) {
case kAsanHeapLeftRedzoneMagic:
case kAsanArrayCookieMagic:
return Red();
case kAsanHeapFreeMagic:
return Magenta();
case kAsanStackLeftRedzoneMagic:
case kAsanStackMidRedzoneMagic:
case kAsanStackRightRedzoneMagic:
return Red();
case kAsanStackAfterReturnMagic:
return Magenta();
case kAsanInitializationOrderMagic:
return Cyan();
case kAsanUserPoisonedMemoryMagic:
case kAsanContiguousContainerOOBMagic:
case kAsanAllocaLeftMagic:
case kAsanAllocaRightMagic:
return Blue();
case kAsanStackUseAfterScopeMagic:
return Magenta();
case kAsanGlobalRedzoneMagic:
return Red();
case kAsanInternalHeapMagic:
return Yellow();
case kAsanIntraObjectRedzone:
return Yellow();
default:
return Default();
}
}
};
enum ShadowKind : u8 {
kShadowKindLow,
kShadowKindGap,
kShadowKindHigh,
};
static const char *const ShadowNames[] = {"low shadow", "shadow gap",
"high shadow"};
struct ShadowAddressDescription {
uptr addr;
ShadowKind kind;
u8 shadow_byte;
void Print() const;
};
bool GetShadowAddressInformation(uptr addr, ShadowAddressDescription *descr);
bool DescribeAddressIfShadow(uptr addr);
enum AccessType {
kAccessTypeLeft,
kAccessTypeRight,
kAccessTypeInside,
kAccessTypeUnknown, // This means we have an AddressSanitizer bug!
};
struct ChunkAccess {
uptr bad_addr;
sptr offset;
uptr chunk_begin;
uptr chunk_size;
u32 user_requested_alignment : 12;
u32 access_type : 2;
u32 alloc_type : 2;
};
struct HeapAddressDescription {
uptr addr;
uptr alloc_tid;
uptr free_tid;
u32 alloc_stack_id;
u32 free_stack_id;
ChunkAccess chunk_access;
void Print() const;
};
bool GetHeapAddressInformation(uptr addr, uptr access_size,
HeapAddressDescription *descr);
bool DescribeAddressIfHeap(uptr addr, uptr access_size = 1);
struct StackAddressDescription {
uptr addr;
uptr tid;
uptr offset;
uptr frame_pc;
uptr access_size;
const char *frame_descr;
void Print() const;
};
bool GetStackAddressInformation(uptr addr, uptr access_size,
StackAddressDescription *descr);
struct GlobalAddressDescription {
uptr addr;
// Assume address is close to at most four globals.
static const int kMaxGlobals = 4;
__asan_global globals[kMaxGlobals];
u32 reg_sites[kMaxGlobals];
uptr access_size;
u8 size;
void Print(const char *bug_type = "") const;
// Returns true when this descriptions points inside the same global variable
// as other. Descriptions can have different address within the variable
bool PointsInsideTheSameVariable(const GlobalAddressDescription &other) const;
};
bool GetGlobalAddressInformation(uptr addr, uptr access_size,
GlobalAddressDescription *descr);
bool DescribeAddressIfGlobal(uptr addr, uptr access_size, const char *bug_type);
// General function to describe an address. Will try to describe the address as
// a shadow, global (variable), stack, or heap address.
// bug_type is optional and is used for checking if we're reporting an
// initialization-order-fiasco
// The proper access_size should be passed for stack, global, and heap
// addresses. Defaults to 1.
// Each of the *AddressDescription functions has its own Print() member, which
// may take access_size and bug_type parameters if needed.
void PrintAddressDescription(uptr addr, uptr access_size = 1,
const char *bug_type = "");
enum AddressKind {
kAddressKindWild,
kAddressKindShadow,
kAddressKindHeap,
kAddressKindStack,
kAddressKindGlobal,
};
class AddressDescription {
struct AddressDescriptionData {
AddressKind kind;
union {
ShadowAddressDescription shadow;
HeapAddressDescription heap;
StackAddressDescription stack;
GlobalAddressDescription global;
uptr addr;
};
};
AddressDescriptionData data;
public:
AddressDescription() = default;
// shouldLockThreadRegistry allows us to skip locking if we're sure we already
// have done it.
AddressDescription(uptr addr, bool shouldLockThreadRegistry = true)
: AddressDescription(addr, 1, shouldLockThreadRegistry) {}
AddressDescription(uptr addr, uptr access_size,
bool shouldLockThreadRegistry = true);
uptr Address() const {
switch (data.kind) {
case kAddressKindWild:
return data.addr;
case kAddressKindShadow:
return data.shadow.addr;
case kAddressKindHeap:
return data.heap.addr;
case kAddressKindStack:
return data.stack.addr;
case kAddressKindGlobal:
return data.global.addr;
}
UNREACHABLE("AddressInformation kind is invalid");
}
void Print(const char *bug_descr = nullptr) const {
switch (data.kind) {
case kAddressKindWild:
Printf("Address %p is a wild pointer.\n", data.addr);
return;
case kAddressKindShadow:
return data.shadow.Print();
case kAddressKindHeap:
return data.heap.Print();
case kAddressKindStack:
return data.stack.Print();
case kAddressKindGlobal:
// initialization-order-fiasco has a special Print()
return data.global.Print(bug_descr);
}
UNREACHABLE("AddressInformation kind is invalid");
}
void StoreTo(AddressDescriptionData *dst) const { *dst = data; }
const ShadowAddressDescription *AsShadow() const {
return data.kind == kAddressKindShadow ? &data.shadow : nullptr;
}
const HeapAddressDescription *AsHeap() const {
return data.kind == kAddressKindHeap ? &data.heap : nullptr;
}
const StackAddressDescription *AsStack() const {
return data.kind == kAddressKindStack ? &data.stack : nullptr;
}
const GlobalAddressDescription *AsGlobal() const {
return data.kind == kAddressKindGlobal ? &data.global : nullptr;
}
};
} // namespace __asan
#endif // ASAN_DESCRIPTIONS_H

View File

@ -0,0 +1,481 @@
//===-- asan_errors.cc ------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// ASan implementation for error structures.
//===----------------------------------------------------------------------===//
#include "asan_errors.h"
#include "asan_descriptions.h"
#include "asan_mapping.h"
#include "asan_report.h"
#include "asan_stack.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
namespace __asan {
static void OnStackUnwind(const SignalContext &sig,
const void *callback_context,
BufferedStackTrace *stack) {
bool fast = common_flags()->fast_unwind_on_fatal;
#if SANITIZER_FREEBSD || SANITIZER_NETBSD
// On FreeBSD the slow unwinding that leverages _Unwind_Backtrace()
// yields the call stack of the signal's handler and not of the code
// that raised the signal (as it does on Linux).
fast = true;
#endif
// Tests and maybe some users expect that scariness is going to be printed
// just before the stack. As only asan has scariness score we have no
// corresponding code in the sanitizer_common and we use this callback to
// print it.
static_cast<const ScarinessScoreBase *>(callback_context)->Print();
GetStackTrace(stack, kStackTraceMax, sig.pc, sig.bp, sig.context, fast);
}
void ErrorDeadlySignal::Print() {
ReportDeadlySignal(signal, tid, &OnStackUnwind, &scariness);
}
void ErrorDoubleFree::Print() {
Decorator d;
Printf("%s", d.Warning());
char tname[128];
Report(
"ERROR: AddressSanitizer: attempting %s on %p in "
"thread T%d%s:\n",
scariness.GetDescription(), addr_description.addr, tid,
ThreadNameWithParenthesis(tid, tname, sizeof(tname)));
Printf("%s", d.Default());
scariness.Print();
GET_STACK_TRACE_FATAL(second_free_stack->trace[0],
second_free_stack->top_frame_bp);
stack.Print();
addr_description.Print();
ReportErrorSummary(scariness.GetDescription(), &stack);
}
void ErrorNewDeleteTypeMismatch::Print() {
Decorator d;
Printf("%s", d.Warning());
char tname[128];
Report(
"ERROR: AddressSanitizer: %s on %p in thread "
"T%d%s:\n",
scariness.GetDescription(), addr_description.addr, tid,
ThreadNameWithParenthesis(tid, tname, sizeof(tname)));
Printf("%s object passed to delete has wrong type:\n", d.Default());
if (delete_size != 0) {
Printf(
" size of the allocated type: %zd bytes;\n"
" size of the deallocated type: %zd bytes.\n",
addr_description.chunk_access.chunk_size, delete_size);
}
const uptr user_alignment =
addr_description.chunk_access.user_requested_alignment;
if (delete_alignment != user_alignment) {
char user_alignment_str[32];
char delete_alignment_str[32];
internal_snprintf(user_alignment_str, sizeof(user_alignment_str),
"%zd bytes", user_alignment);
internal_snprintf(delete_alignment_str, sizeof(delete_alignment_str),
"%zd bytes", delete_alignment);
static const char *kDefaultAlignment = "default-aligned";
Printf(
" alignment of the allocated type: %s;\n"
" alignment of the deallocated type: %s.\n",
user_alignment > 0 ? user_alignment_str : kDefaultAlignment,
delete_alignment > 0 ? delete_alignment_str : kDefaultAlignment);
}
CHECK_GT(free_stack->size, 0);
scariness.Print();
GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
stack.Print();
addr_description.Print();
ReportErrorSummary(scariness.GetDescription(), &stack);
Report(
"HINT: if you don't care about these errors you may set "
"ASAN_OPTIONS=new_delete_type_mismatch=0\n");
}
void ErrorFreeNotMalloced::Print() {
Decorator d;
Printf("%s", d.Warning());
char tname[128];
Report(
"ERROR: AddressSanitizer: attempting free on address "
"which was not malloc()-ed: %p in thread T%d%s\n",
addr_description.Address(), tid,
ThreadNameWithParenthesis(tid, tname, sizeof(tname)));
Printf("%s", d.Default());
CHECK_GT(free_stack->size, 0);
scariness.Print();
GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
stack.Print();
addr_description.Print();
ReportErrorSummary(scariness.GetDescription(), &stack);
}
void ErrorAllocTypeMismatch::Print() {
static const char *alloc_names[] = {"INVALID", "malloc", "operator new",
"operator new []"};
static const char *dealloc_names[] = {"INVALID", "free", "operator delete",
"operator delete []"};
CHECK_NE(alloc_type, dealloc_type);
Decorator d;
Printf("%s", d.Warning());
Report("ERROR: AddressSanitizer: %s (%s vs %s) on %p\n",
scariness.GetDescription(),
alloc_names[alloc_type], dealloc_names[dealloc_type],
addr_description.addr);
Printf("%s", d.Default());
CHECK_GT(dealloc_stack->size, 0);
scariness.Print();
GET_STACK_TRACE_FATAL(dealloc_stack->trace[0], dealloc_stack->top_frame_bp);
stack.Print();
addr_description.Print();
ReportErrorSummary(scariness.GetDescription(), &stack);
Report(
"HINT: if you don't care about these errors you may set "
"ASAN_OPTIONS=alloc_dealloc_mismatch=0\n");
}
void ErrorMallocUsableSizeNotOwned::Print() {
Decorator d;
Printf("%s", d.Warning());
Report(
"ERROR: AddressSanitizer: attempting to call malloc_usable_size() for "
"pointer which is not owned: %p\n",
addr_description.Address());
Printf("%s", d.Default());
stack->Print();
addr_description.Print();
ReportErrorSummary(scariness.GetDescription(), stack);
}
void ErrorSanitizerGetAllocatedSizeNotOwned::Print() {
Decorator d;
Printf("%s", d.Warning());
Report(
"ERROR: AddressSanitizer: attempting to call "
"__sanitizer_get_allocated_size() for pointer which is not owned: %p\n",
addr_description.Address());
Printf("%s", d.Default());
stack->Print();
addr_description.Print();
ReportErrorSummary(scariness.GetDescription(), stack);
}
void ErrorStringFunctionMemoryRangesOverlap::Print() {
Decorator d;
char bug_type[100];
internal_snprintf(bug_type, sizeof(bug_type), "%s-param-overlap", function);
Printf("%s", d.Warning());
Report(
"ERROR: AddressSanitizer: %s: memory ranges [%p,%p) and [%p, %p) "
"overlap\n",
bug_type, addr1_description.Address(),
addr1_description.Address() + length1, addr2_description.Address(),
addr2_description.Address() + length2);
Printf("%s", d.Default());
scariness.Print();
stack->Print();
addr1_description.Print();
addr2_description.Print();
ReportErrorSummary(bug_type, stack);
}
void ErrorStringFunctionSizeOverflow::Print() {
Decorator d;
Printf("%s", d.Warning());
Report("ERROR: AddressSanitizer: %s: (size=%zd)\n",
scariness.GetDescription(), size);
Printf("%s", d.Default());
scariness.Print();
stack->Print();
addr_description.Print();
ReportErrorSummary(scariness.GetDescription(), stack);
}
void ErrorBadParamsToAnnotateContiguousContainer::Print() {
Report(
"ERROR: AddressSanitizer: bad parameters to "
"__sanitizer_annotate_contiguous_container:\n"
" beg : %p\n"
" end : %p\n"
" old_mid : %p\n"
" new_mid : %p\n",
beg, end, old_mid, new_mid);
uptr granularity = SHADOW_GRANULARITY;
if (!IsAligned(beg, granularity))
Report("ERROR: beg is not aligned by %d\n", granularity);
stack->Print();
ReportErrorSummary(scariness.GetDescription(), stack);
}
void ErrorODRViolation::Print() {
Decorator d;
Printf("%s", d.Warning());
Report("ERROR: AddressSanitizer: %s (%p):\n", scariness.GetDescription(),
global1.beg);
Printf("%s", d.Default());
InternalScopedString g1_loc(256), g2_loc(256);
PrintGlobalLocation(&g1_loc, global1);
PrintGlobalLocation(&g2_loc, global2);
Printf(" [1] size=%zd '%s' %s\n", global1.size,
MaybeDemangleGlobalName(global1.name), g1_loc.data());
Printf(" [2] size=%zd '%s' %s\n", global2.size,
MaybeDemangleGlobalName(global2.name), g2_loc.data());
if (stack_id1 && stack_id2) {
Printf("These globals were registered at these points:\n");
Printf(" [1]:\n");
StackDepotGet(stack_id1).Print();
Printf(" [2]:\n");
StackDepotGet(stack_id2).Print();
}
Report(
"HINT: if you don't care about these errors you may set "
"ASAN_OPTIONS=detect_odr_violation=0\n");
InternalScopedString error_msg(256);
error_msg.append("%s: global '%s' at %s", scariness.GetDescription(),
MaybeDemangleGlobalName(global1.name), g1_loc.data());
ReportErrorSummary(error_msg.data());
}
void ErrorInvalidPointerPair::Print() {
Decorator d;
Printf("%s", d.Warning());
Report("ERROR: AddressSanitizer: %s: %p %p\n", scariness.GetDescription(),
addr1_description.Address(), addr2_description.Address());
Printf("%s", d.Default());
GET_STACK_TRACE_FATAL(pc, bp);
stack.Print();
addr1_description.Print();
addr2_description.Print();
ReportErrorSummary(scariness.GetDescription(), &stack);
}
static bool AdjacentShadowValuesAreFullyPoisoned(u8 *s) {
return s[-1] > 127 && s[1] > 127;
}
ErrorGeneric::ErrorGeneric(u32 tid, uptr pc_, uptr bp_, uptr sp_, uptr addr,
bool is_write_, uptr access_size_)
: ErrorBase(tid),
addr_description(addr, access_size_, /*shouldLockThreadRegistry=*/false),
pc(pc_),
bp(bp_),
sp(sp_),
access_size(access_size_),
is_write(is_write_),
shadow_val(0) {
scariness.Clear();
if (access_size) {
if (access_size <= 9) {
char desr[] = "?-byte";
desr[0] = '0' + access_size;
scariness.Scare(access_size + access_size / 2, desr);
} else if (access_size >= 10) {
scariness.Scare(15, "multi-byte");
}
is_write ? scariness.Scare(20, "write") : scariness.Scare(1, "read");
// Determine the error type.
bug_descr = "unknown-crash";
if (AddrIsInMem(addr)) {
u8 *shadow_addr = (u8 *)MemToShadow(addr);
// If we are accessing 16 bytes, look at the second shadow byte.
if (*shadow_addr == 0 && access_size > SHADOW_GRANULARITY) shadow_addr++;
// If we are in the partial right redzone, look at the next shadow byte.
if (*shadow_addr > 0 && *shadow_addr < 128) shadow_addr++;
bool far_from_bounds = false;
shadow_val = *shadow_addr;
int bug_type_score = 0;
// For use-after-frees reads are almost as bad as writes.
int read_after_free_bonus = 0;
switch (shadow_val) {
case kAsanHeapLeftRedzoneMagic:
case kAsanArrayCookieMagic:
bug_descr = "heap-buffer-overflow";
bug_type_score = 10;
far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
break;
case kAsanHeapFreeMagic:
bug_descr = "heap-use-after-free";
bug_type_score = 20;
if (!is_write) read_after_free_bonus = 18;
break;
case kAsanStackLeftRedzoneMagic:
bug_descr = "stack-buffer-underflow";
bug_type_score = 25;
far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
break;
case kAsanInitializationOrderMagic:
bug_descr = "initialization-order-fiasco";
bug_type_score = 1;
break;
case kAsanStackMidRedzoneMagic:
case kAsanStackRightRedzoneMagic:
bug_descr = "stack-buffer-overflow";
bug_type_score = 25;
far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
break;
case kAsanStackAfterReturnMagic:
bug_descr = "stack-use-after-return";
bug_type_score = 30;
if (!is_write) read_after_free_bonus = 18;
break;
case kAsanUserPoisonedMemoryMagic:
bug_descr = "use-after-poison";
bug_type_score = 20;
break;
case kAsanContiguousContainerOOBMagic:
bug_descr = "container-overflow";
bug_type_score = 10;
break;
case kAsanStackUseAfterScopeMagic:
bug_descr = "stack-use-after-scope";
bug_type_score = 10;
break;
case kAsanGlobalRedzoneMagic:
bug_descr = "global-buffer-overflow";
bug_type_score = 10;
far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
break;
case kAsanIntraObjectRedzone:
bug_descr = "intra-object-overflow";
bug_type_score = 10;
break;
case kAsanAllocaLeftMagic:
case kAsanAllocaRightMagic:
bug_descr = "dynamic-stack-buffer-overflow";
bug_type_score = 25;
far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
break;
}
scariness.Scare(bug_type_score + read_after_free_bonus, bug_descr);
if (far_from_bounds) scariness.Scare(10, "far-from-bounds");
}
}
}
static void PrintContainerOverflowHint() {
Printf("HINT: if you don't care about these errors you may set "
"ASAN_OPTIONS=detect_container_overflow=0.\n"
"If you suspect a false positive see also: "
"https://github.com/google/sanitizers/wiki/"
"AddressSanitizerContainerOverflow.\n");
}
static void PrintShadowByte(InternalScopedString *str, const char *before,
u8 byte, const char *after = "\n") {
PrintMemoryByte(str, before, byte, /*in_shadow*/true, after);
}
static void PrintLegend(InternalScopedString *str) {
str->append(
"Shadow byte legend (one shadow byte represents %d "
"application bytes):\n",
(int)SHADOW_GRANULARITY);
PrintShadowByte(str, " Addressable: ", 0);
str->append(" Partially addressable: ");
for (u8 i = 1; i < SHADOW_GRANULARITY; i++) PrintShadowByte(str, "", i, " ");
str->append("\n");
PrintShadowByte(str, " Heap left redzone: ",
kAsanHeapLeftRedzoneMagic);
PrintShadowByte(str, " Freed heap region: ", kAsanHeapFreeMagic);
PrintShadowByte(str, " Stack left redzone: ",
kAsanStackLeftRedzoneMagic);
PrintShadowByte(str, " Stack mid redzone: ",
kAsanStackMidRedzoneMagic);
PrintShadowByte(str, " Stack right redzone: ",
kAsanStackRightRedzoneMagic);
PrintShadowByte(str, " Stack after return: ",
kAsanStackAfterReturnMagic);
PrintShadowByte(str, " Stack use after scope: ",
kAsanStackUseAfterScopeMagic);
PrintShadowByte(str, " Global redzone: ", kAsanGlobalRedzoneMagic);
PrintShadowByte(str, " Global init order: ",
kAsanInitializationOrderMagic);
PrintShadowByte(str, " Poisoned by user: ",
kAsanUserPoisonedMemoryMagic);
PrintShadowByte(str, " Container overflow: ",
kAsanContiguousContainerOOBMagic);
PrintShadowByte(str, " Array cookie: ",
kAsanArrayCookieMagic);
PrintShadowByte(str, " Intra object redzone: ",
kAsanIntraObjectRedzone);
PrintShadowByte(str, " ASan internal: ", kAsanInternalHeapMagic);
PrintShadowByte(str, " Left alloca redzone: ", kAsanAllocaLeftMagic);
PrintShadowByte(str, " Right alloca redzone: ", kAsanAllocaRightMagic);
}
static void PrintShadowBytes(InternalScopedString *str, const char *before,
u8 *bytes, u8 *guilty, uptr n) {
Decorator d;
if (before) str->append("%s%p:", before, bytes);
for (uptr i = 0; i < n; i++) {
u8 *p = bytes + i;
const char *before =
p == guilty ? "[" : (p - 1 == guilty && i != 0) ? "" : " ";
const char *after = p == guilty ? "]" : "";
PrintShadowByte(str, before, *p, after);
}
str->append("\n");
}
static void PrintShadowMemoryForAddress(uptr addr) {
if (!AddrIsInMem(addr)) return;
uptr shadow_addr = MemToShadow(addr);
const uptr n_bytes_per_row = 16;
uptr aligned_shadow = shadow_addr & ~(n_bytes_per_row - 1);
InternalScopedString str(4096 * 8);
str.append("Shadow bytes around the buggy address:\n");
for (int i = -5; i <= 5; i++) {
uptr row_shadow_addr = aligned_shadow + i * n_bytes_per_row;
// Skip rows that would be outside the shadow range. This can happen when
// the user address is near the bottom, top, or shadow gap of the address
// space.
if (!AddrIsInShadow(row_shadow_addr)) continue;
const char *prefix = (i == 0) ? "=>" : " ";
PrintShadowBytes(&str, prefix, (u8 *)row_shadow_addr, (u8 *)shadow_addr,
n_bytes_per_row);
}
if (flags()->print_legend) PrintLegend(&str);
Printf("%s", str.data());
}
void ErrorGeneric::Print() {
Decorator d;
Printf("%s", d.Warning());
uptr addr = addr_description.Address();
Report("ERROR: AddressSanitizer: %s on address %p at pc %p bp %p sp %p\n",
bug_descr, (void *)addr, pc, bp, sp);
Printf("%s", d.Default());
char tname[128];
Printf("%s%s of size %zu at %p thread T%d%s%s\n", d.Access(),
access_size ? (is_write ? "WRITE" : "READ") : "ACCESS", access_size,
(void *)addr, tid,
ThreadNameWithParenthesis(tid, tname, sizeof(tname)), d.Default());
scariness.Print();
GET_STACK_TRACE_FATAL(pc, bp);
stack.Print();
// Pass bug_descr because we have a special case for
// initialization-order-fiasco
addr_description.Print(bug_descr);
if (shadow_val == kAsanContiguousContainerOOBMagic)
PrintContainerOverflowHint();
ReportErrorSummary(bug_descr, &stack);
PrintShadowMemoryForAddress(addr);
}
} // namespace __asan

View File

@ -0,0 +1,358 @@
//===-- asan_errors.h -------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// ASan-private header for error structures.
//===----------------------------------------------------------------------===//
#ifndef ASAN_ERRORS_H
#define ASAN_ERRORS_H
#include "asan_descriptions.h"
#include "asan_scariness_score.h"
#include "sanitizer_common/sanitizer_common.h"
namespace __asan {
struct ErrorBase {
ErrorBase() = default;
explicit ErrorBase(u32 tid_) : tid(tid_) {}
ScarinessScoreBase scariness;
u32 tid;
};
struct ErrorDeadlySignal : ErrorBase {
SignalContext signal;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorDeadlySignal() = default;
ErrorDeadlySignal(u32 tid, const SignalContext &sig)
: ErrorBase(tid), signal(sig) {
scariness.Clear();
if (signal.IsStackOverflow()) {
scariness.Scare(10, "stack-overflow");
} else if (!signal.is_memory_access) {
scariness.Scare(10, "signal");
} else if (signal.addr < GetPageSizeCached()) {
scariness.Scare(10, "null-deref");
} else if (signal.addr == signal.pc) {
scariness.Scare(60, "wild-jump");
} else if (signal.write_flag == SignalContext::WRITE) {
scariness.Scare(30, "wild-addr-write");
} else if (signal.write_flag == SignalContext::READ) {
scariness.Scare(20, "wild-addr-read");
} else {
scariness.Scare(25, "wild-addr");
}
}
void Print();
};
struct ErrorDoubleFree : ErrorBase {
// ErrorDoubleFree doesn't own the stack trace.
const BufferedStackTrace *second_free_stack;
HeapAddressDescription addr_description;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorDoubleFree() = default;
ErrorDoubleFree(u32 tid, BufferedStackTrace *stack, uptr addr)
: ErrorBase(tid), second_free_stack(stack) {
CHECK_GT(second_free_stack->size, 0);
GetHeapAddressInformation(addr, 1, &addr_description);
scariness.Clear();
scariness.Scare(42, "double-free");
}
void Print();
};
struct ErrorNewDeleteTypeMismatch : ErrorBase {
// ErrorNewDeleteTypeMismatch doesn't own the stack trace.
const BufferedStackTrace *free_stack;
HeapAddressDescription addr_description;
uptr delete_size;
uptr delete_alignment;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorNewDeleteTypeMismatch() = default;
ErrorNewDeleteTypeMismatch(u32 tid, BufferedStackTrace *stack, uptr addr,
uptr delete_size_, uptr delete_alignment_)
: ErrorBase(tid), free_stack(stack), delete_size(delete_size_),
delete_alignment(delete_alignment_) {
GetHeapAddressInformation(addr, 1, &addr_description);
scariness.Clear();
scariness.Scare(10, "new-delete-type-mismatch");
}
void Print();
};
struct ErrorFreeNotMalloced : ErrorBase {
// ErrorFreeNotMalloced doesn't own the stack trace.
const BufferedStackTrace *free_stack;
AddressDescription addr_description;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorFreeNotMalloced() = default;
ErrorFreeNotMalloced(u32 tid, BufferedStackTrace *stack, uptr addr)
: ErrorBase(tid),
free_stack(stack),
addr_description(addr, /*shouldLockThreadRegistry=*/false) {
scariness.Clear();
scariness.Scare(40, "bad-free");
}
void Print();
};
struct ErrorAllocTypeMismatch : ErrorBase {
// ErrorAllocTypeMismatch doesn't own the stack trace.
const BufferedStackTrace *dealloc_stack;
HeapAddressDescription addr_description;
AllocType alloc_type, dealloc_type;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorAllocTypeMismatch() = default;
ErrorAllocTypeMismatch(u32 tid, BufferedStackTrace *stack, uptr addr,
AllocType alloc_type_, AllocType dealloc_type_)
: ErrorBase(tid),
dealloc_stack(stack),
alloc_type(alloc_type_),
dealloc_type(dealloc_type_) {
GetHeapAddressInformation(addr, 1, &addr_description);
scariness.Clear();
scariness.Scare(10, "alloc-dealloc-mismatch");
};
void Print();
};
struct ErrorMallocUsableSizeNotOwned : ErrorBase {
// ErrorMallocUsableSizeNotOwned doesn't own the stack trace.
const BufferedStackTrace *stack;
AddressDescription addr_description;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorMallocUsableSizeNotOwned() = default;
ErrorMallocUsableSizeNotOwned(u32 tid, BufferedStackTrace *stack_, uptr addr)
: ErrorBase(tid),
stack(stack_),
addr_description(addr, /*shouldLockThreadRegistry=*/false) {
scariness.Clear();
scariness.Scare(10, "bad-malloc_usable_size");
}
void Print();
};
struct ErrorSanitizerGetAllocatedSizeNotOwned : ErrorBase {
// ErrorSanitizerGetAllocatedSizeNotOwned doesn't own the stack trace.
const BufferedStackTrace *stack;
AddressDescription addr_description;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorSanitizerGetAllocatedSizeNotOwned() = default;
ErrorSanitizerGetAllocatedSizeNotOwned(u32 tid, BufferedStackTrace *stack_,
uptr addr)
: ErrorBase(tid),
stack(stack_),
addr_description(addr, /*shouldLockThreadRegistry=*/false) {
scariness.Clear();
scariness.Scare(10, "bad-__sanitizer_get_allocated_size");
}
void Print();
};
struct ErrorStringFunctionMemoryRangesOverlap : ErrorBase {
// ErrorStringFunctionMemoryRangesOverlap doesn't own the stack trace.
const BufferedStackTrace *stack;
uptr length1, length2;
AddressDescription addr1_description;
AddressDescription addr2_description;
const char *function;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorStringFunctionMemoryRangesOverlap() = default;
ErrorStringFunctionMemoryRangesOverlap(u32 tid, BufferedStackTrace *stack_,
uptr addr1, uptr length1_, uptr addr2,
uptr length2_, const char *function_)
: ErrorBase(tid),
stack(stack_),
length1(length1_),
length2(length2_),
addr1_description(addr1, length1, /*shouldLockThreadRegistry=*/false),
addr2_description(addr2, length2, /*shouldLockThreadRegistry=*/false),
function(function_) {
char bug_type[100];
internal_snprintf(bug_type, sizeof(bug_type), "%s-param-overlap", function);
scariness.Clear();
scariness.Scare(10, bug_type);
}
void Print();
};
struct ErrorStringFunctionSizeOverflow : ErrorBase {
// ErrorStringFunctionSizeOverflow doesn't own the stack trace.
const BufferedStackTrace *stack;
AddressDescription addr_description;
uptr size;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorStringFunctionSizeOverflow() = default;
ErrorStringFunctionSizeOverflow(u32 tid, BufferedStackTrace *stack_,
uptr addr, uptr size_)
: ErrorBase(tid),
stack(stack_),
addr_description(addr, /*shouldLockThreadRegistry=*/false),
size(size_) {
scariness.Clear();
scariness.Scare(10, "negative-size-param");
}
void Print();
};
struct ErrorBadParamsToAnnotateContiguousContainer : ErrorBase {
// ErrorBadParamsToAnnotateContiguousContainer doesn't own the stack trace.
const BufferedStackTrace *stack;
uptr beg, end, old_mid, new_mid;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorBadParamsToAnnotateContiguousContainer() = default;
// PS4: Do we want an AddressDescription for beg?
ErrorBadParamsToAnnotateContiguousContainer(u32 tid,
BufferedStackTrace *stack_,
uptr beg_, uptr end_,
uptr old_mid_, uptr new_mid_)
: ErrorBase(tid),
stack(stack_),
beg(beg_),
end(end_),
old_mid(old_mid_),
new_mid(new_mid_) {
scariness.Clear();
scariness.Scare(10, "bad-__sanitizer_annotate_contiguous_container");
}
void Print();
};
struct ErrorODRViolation : ErrorBase {
__asan_global global1, global2;
u32 stack_id1, stack_id2;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorODRViolation() = default;
ErrorODRViolation(u32 tid, const __asan_global *g1, u32 stack_id1_,
const __asan_global *g2, u32 stack_id2_)
: ErrorBase(tid),
global1(*g1),
global2(*g2),
stack_id1(stack_id1_),
stack_id2(stack_id2_) {
scariness.Clear();
scariness.Scare(10, "odr-violation");
}
void Print();
};
struct ErrorInvalidPointerPair : ErrorBase {
uptr pc, bp, sp;
AddressDescription addr1_description;
AddressDescription addr2_description;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorInvalidPointerPair() = default;
ErrorInvalidPointerPair(u32 tid, uptr pc_, uptr bp_, uptr sp_, uptr p1,
uptr p2)
: ErrorBase(tid),
pc(pc_),
bp(bp_),
sp(sp_),
addr1_description(p1, 1, /*shouldLockThreadRegistry=*/false),
addr2_description(p2, 1, /*shouldLockThreadRegistry=*/false) {
scariness.Clear();
scariness.Scare(10, "invalid-pointer-pair");
}
void Print();
};
struct ErrorGeneric : ErrorBase {
AddressDescription addr_description;
uptr pc, bp, sp;
uptr access_size;
const char *bug_descr;
bool is_write;
u8 shadow_val;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorGeneric() = default;
ErrorGeneric(u32 tid, uptr addr, uptr pc_, uptr bp_, uptr sp_, bool is_write_,
uptr access_size_);
void Print();
};
// clang-format off
#define ASAN_FOR_EACH_ERROR_KIND(macro) \
macro(DeadlySignal) \
macro(DoubleFree) \
macro(NewDeleteTypeMismatch) \
macro(FreeNotMalloced) \
macro(AllocTypeMismatch) \
macro(MallocUsableSizeNotOwned) \
macro(SanitizerGetAllocatedSizeNotOwned) \
macro(StringFunctionMemoryRangesOverlap) \
macro(StringFunctionSizeOverflow) \
macro(BadParamsToAnnotateContiguousContainer) \
macro(ODRViolation) \
macro(InvalidPointerPair) \
macro(Generic)
// clang-format on
#define ASAN_DEFINE_ERROR_KIND(name) kErrorKind##name,
#define ASAN_ERROR_DESCRIPTION_MEMBER(name) Error##name name;
#define ASAN_ERROR_DESCRIPTION_CONSTRUCTOR(name) \
ErrorDescription(Error##name const &e) : kind(kErrorKind##name), name(e) {}
#define ASAN_ERROR_DESCRIPTION_PRINT(name) \
case kErrorKind##name: \
return name.Print();
enum ErrorKind {
kErrorKindInvalid = 0,
ASAN_FOR_EACH_ERROR_KIND(ASAN_DEFINE_ERROR_KIND)
};
struct ErrorDescription {
ErrorKind kind;
// We're using a tagged union because it allows us to have a trivially
// copiable type and use the same structures as the public interface.
//
// We can add a wrapper around it to make it "more c++-like", but that would
// add a lot of code and the benefit wouldn't be that big.
union {
ErrorBase Base;
ASAN_FOR_EACH_ERROR_KIND(ASAN_ERROR_DESCRIPTION_MEMBER)
};
ErrorDescription() { internal_memset(this, 0, sizeof(*this)); }
ASAN_FOR_EACH_ERROR_KIND(ASAN_ERROR_DESCRIPTION_CONSTRUCTOR)
bool IsValid() { return kind != kErrorKindInvalid; }
void Print() {
switch (kind) {
ASAN_FOR_EACH_ERROR_KIND(ASAN_ERROR_DESCRIPTION_PRINT)
case kErrorKindInvalid:
CHECK(0);
}
CHECK(0);
}
};
#undef ASAN_FOR_EACH_ERROR_KIND
#undef ASAN_DEFINE_ERROR_KIND
#undef ASAN_ERROR_DESCRIPTION_MEMBER
#undef ASAN_ERROR_DESCRIPTION_CONSTRUCTOR
#undef ASAN_ERROR_DESCRIPTION_PRINT
} // namespace __asan
#endif // ASAN_ERRORS_H

View File

@ -0,0 +1,283 @@
//===-- asan_fake_stack.cc ------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// FakeStack is used to detect use-after-return bugs.
//===----------------------------------------------------------------------===//
#include "asan_allocator.h"
#include "asan_poisoning.h"
#include "asan_thread.h"
namespace __asan {
static const u64 kMagic1 = kAsanStackAfterReturnMagic;
static const u64 kMagic2 = (kMagic1 << 8) | kMagic1;
static const u64 kMagic4 = (kMagic2 << 16) | kMagic2;
static const u64 kMagic8 = (kMagic4 << 32) | kMagic4;
static const u64 kAllocaRedzoneSize = 32UL;
static const u64 kAllocaRedzoneMask = 31UL;
// For small size classes inline PoisonShadow for better performance.
ALWAYS_INLINE void SetShadow(uptr ptr, uptr size, uptr class_id, u64 magic) {
u64 *shadow = reinterpret_cast<u64*>(MemToShadow(ptr));
if (SHADOW_SCALE == 3 && class_id <= 6) {
// This code expects SHADOW_SCALE=3.
for (uptr i = 0; i < (((uptr)1) << class_id); i++) {
shadow[i] = magic;
// Make sure this does not become memset.
SanitizerBreakOptimization(nullptr);
}
} else {
// The size class is too big, it's cheaper to poison only size bytes.
PoisonShadow(ptr, size, static_cast<u8>(magic));
}
}
FakeStack *FakeStack::Create(uptr stack_size_log) {
static uptr kMinStackSizeLog = 16;
static uptr kMaxStackSizeLog = FIRST_32_SECOND_64(24, 28);
if (stack_size_log < kMinStackSizeLog)
stack_size_log = kMinStackSizeLog;
if (stack_size_log > kMaxStackSizeLog)
stack_size_log = kMaxStackSizeLog;
uptr size = RequiredSize(stack_size_log);
FakeStack *res = reinterpret_cast<FakeStack *>(
flags()->uar_noreserve ? MmapNoReserveOrDie(size, "FakeStack")
: MmapOrDie(size, "FakeStack"));
res->stack_size_log_ = stack_size_log;
u8 *p = reinterpret_cast<u8 *>(res);
VReport(1, "T%d: FakeStack created: %p -- %p stack_size_log: %zd; "
"mmapped %zdK, noreserve=%d \n",
GetCurrentTidOrInvalid(), p,
p + FakeStack::RequiredSize(stack_size_log), stack_size_log,
size >> 10, flags()->uar_noreserve);
return res;
}
void FakeStack::Destroy(int tid) {
PoisonAll(0);
if (Verbosity() >= 2) {
InternalScopedString str(kNumberOfSizeClasses * 50);
for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++)
str.append("%zd: %zd/%zd; ", class_id, hint_position_[class_id],
NumberOfFrames(stack_size_log(), class_id));
Report("T%d: FakeStack destroyed: %s\n", tid, str.data());
}
uptr size = RequiredSize(stack_size_log_);
FlushUnneededASanShadowMemory(reinterpret_cast<uptr>(this), size);
UnmapOrDie(this, size);
}
void FakeStack::PoisonAll(u8 magic) {
PoisonShadow(reinterpret_cast<uptr>(this), RequiredSize(stack_size_log()),
magic);
}
#if !defined(_MSC_VER) || defined(__clang__)
ALWAYS_INLINE USED
#endif
FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id,
uptr real_stack) {
CHECK_LT(class_id, kNumberOfSizeClasses);
if (needs_gc_)
GC(real_stack);
uptr &hint_position = hint_position_[class_id];
const int num_iter = NumberOfFrames(stack_size_log, class_id);
u8 *flags = GetFlags(stack_size_log, class_id);
for (int i = 0; i < num_iter; i++) {
uptr pos = ModuloNumberOfFrames(stack_size_log, class_id, hint_position++);
// This part is tricky. On one hand, checking and setting flags[pos]
// should be atomic to ensure async-signal safety. But on the other hand,
// if the signal arrives between checking and setting flags[pos], the
// signal handler's fake stack will start from a different hint_position
// and so will not touch this particular byte. So, it is safe to do this
// with regular non-atomic load and store (at least I was not able to make
// this code crash).
if (flags[pos]) continue;
flags[pos] = 1;
FakeFrame *res = reinterpret_cast<FakeFrame *>(
GetFrame(stack_size_log, class_id, pos));
res->real_stack = real_stack;
*SavedFlagPtr(reinterpret_cast<uptr>(res), class_id) = &flags[pos];
return res;
}
return nullptr; // We are out of fake stack.
}
uptr FakeStack::AddrIsInFakeStack(uptr ptr, uptr *frame_beg, uptr *frame_end) {
uptr stack_size_log = this->stack_size_log();
uptr beg = reinterpret_cast<uptr>(GetFrame(stack_size_log, 0, 0));
uptr end = reinterpret_cast<uptr>(this) + RequiredSize(stack_size_log);
if (ptr < beg || ptr >= end) return 0;
uptr class_id = (ptr - beg) >> stack_size_log;
uptr base = beg + (class_id << stack_size_log);
CHECK_LE(base, ptr);
CHECK_LT(ptr, base + (((uptr)1) << stack_size_log));
uptr pos = (ptr - base) >> (kMinStackFrameSizeLog + class_id);
uptr res = base + pos * BytesInSizeClass(class_id);
*frame_end = res + BytesInSizeClass(class_id);
*frame_beg = res + sizeof(FakeFrame);
return res;
}
void FakeStack::HandleNoReturn() {
needs_gc_ = true;
}
// When throw, longjmp or some such happens we don't call OnFree() and
// as the result may leak one or more fake frames, but the good news is that
// we are notified about all such events by HandleNoReturn().
// If we recently had such no-return event we need to collect garbage frames.
// We do it based on their 'real_stack' values -- everything that is lower
// than the current real_stack is garbage.
NOINLINE void FakeStack::GC(uptr real_stack) {
uptr collected = 0;
for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) {
u8 *flags = GetFlags(stack_size_log(), class_id);
for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n;
i++) {
if (flags[i] == 0) continue; // not allocated.
FakeFrame *ff = reinterpret_cast<FakeFrame *>(
GetFrame(stack_size_log(), class_id, i));
if (ff->real_stack < real_stack) {
flags[i] = 0;
collected++;
}
}
}
needs_gc_ = false;
}
void FakeStack::ForEachFakeFrame(RangeIteratorCallback callback, void *arg) {
for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) {
u8 *flags = GetFlags(stack_size_log(), class_id);
for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n;
i++) {
if (flags[i] == 0) continue; // not allocated.
FakeFrame *ff = reinterpret_cast<FakeFrame *>(
GetFrame(stack_size_log(), class_id, i));
uptr begin = reinterpret_cast<uptr>(ff);
callback(begin, begin + FakeStack::BytesInSizeClass(class_id), arg);
}
}
}
#if (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_FUCHSIA
static THREADLOCAL FakeStack *fake_stack_tls;
FakeStack *GetTLSFakeStack() {
return fake_stack_tls;
}
void SetTLSFakeStack(FakeStack *fs) {
fake_stack_tls = fs;
}
#else
FakeStack *GetTLSFakeStack() { return 0; }
void SetTLSFakeStack(FakeStack *fs) { }
#endif // (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_FUCHSIA
static FakeStack *GetFakeStack() {
AsanThread *t = GetCurrentThread();
if (!t) return nullptr;
return t->fake_stack();
}
static FakeStack *GetFakeStackFast() {
if (FakeStack *fs = GetTLSFakeStack())
return fs;
if (!__asan_option_detect_stack_use_after_return)
return nullptr;
return GetFakeStack();
}
ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size) {
FakeStack *fs = GetFakeStackFast();
if (!fs) return 0;
uptr local_stack;
uptr real_stack = reinterpret_cast<uptr>(&local_stack);
FakeFrame *ff = fs->Allocate(fs->stack_size_log(), class_id, real_stack);
if (!ff) return 0; // Out of fake stack.
uptr ptr = reinterpret_cast<uptr>(ff);
SetShadow(ptr, size, class_id, 0);
return ptr;
}
ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size) {
FakeStack::Deallocate(ptr, class_id);
SetShadow(ptr, size, class_id, kMagic8);
}
} // namespace __asan
// ---------------------- Interface ---------------- {{{1
using namespace __asan;
#define DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(class_id) \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \
__asan_stack_malloc_##class_id(uptr size) { \
return OnMalloc(class_id, size); \
} \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __asan_stack_free_##class_id( \
uptr ptr, uptr size) { \
OnFree(ptr, class_id, size); \
}
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(0)
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(1)
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(2)
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(3)
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(4)
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(5)
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(6)
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(7)
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(8)
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(9)
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(10)
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
void *__asan_get_current_fake_stack() { return GetFakeStackFast(); }
SANITIZER_INTERFACE_ATTRIBUTE
void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg,
void **end) {
FakeStack *fs = reinterpret_cast<FakeStack*>(fake_stack);
if (!fs) return nullptr;
uptr frame_beg, frame_end;
FakeFrame *frame = reinterpret_cast<FakeFrame *>(fs->AddrIsInFakeStack(
reinterpret_cast<uptr>(addr), &frame_beg, &frame_end));
if (!frame) return nullptr;
if (frame->magic != kCurrentStackFrameMagic)
return nullptr;
if (beg) *beg = reinterpret_cast<void*>(frame_beg);
if (end) *end = reinterpret_cast<void*>(frame_end);
return reinterpret_cast<void*>(frame->real_stack);
}
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_alloca_poison(uptr addr, uptr size) {
uptr LeftRedzoneAddr = addr - kAllocaRedzoneSize;
uptr PartialRzAddr = addr + size;
uptr RightRzAddr = (PartialRzAddr + kAllocaRedzoneMask) & ~kAllocaRedzoneMask;
uptr PartialRzAligned = PartialRzAddr & ~(SHADOW_GRANULARITY - 1);
FastPoisonShadow(LeftRedzoneAddr, kAllocaRedzoneSize, kAsanAllocaLeftMagic);
FastPoisonShadowPartialRightRedzone(
PartialRzAligned, PartialRzAddr % SHADOW_GRANULARITY,
RightRzAddr - PartialRzAligned, kAsanAllocaRightMagic);
FastPoisonShadow(RightRzAddr, kAllocaRedzoneSize, kAsanAllocaRightMagic);
}
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_allocas_unpoison(uptr top, uptr bottom) {
if ((!top) || (top > bottom)) return;
REAL(memset)(reinterpret_cast<void*>(MemToShadow(top)), 0,
(bottom - top) / SHADOW_GRANULARITY);
}
} // extern "C"

View File

@ -0,0 +1,176 @@
//===-- asan_fake_stack.h ---------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// ASan-private header for asan_fake_stack.cc, implements FakeStack.
//===----------------------------------------------------------------------===//
#ifndef ASAN_FAKE_STACK_H
#define ASAN_FAKE_STACK_H
#include "sanitizer_common/sanitizer_common.h"
namespace __asan {
// Fake stack frame contains local variables of one function.
struct FakeFrame {
uptr magic; // Modified by the instrumented code.
uptr descr; // Modified by the instrumented code.
uptr pc; // Modified by the instrumented code.
uptr real_stack;
};
// For each thread we create a fake stack and place stack objects on this fake
// stack instead of the real stack. The fake stack is not really a stack but
// a fast malloc-like allocator so that when a function exits the fake stack
// is not popped but remains there for quite some time until gets used again.
// So, we poison the objects on the fake stack when function returns.
// It helps us find use-after-return bugs.
//
// The FakeStack objects is allocated by a single mmap call and has no other
// pointers. The size of the fake stack depends on the actual thread stack size
// and thus can not be a constant.
// stack_size is a power of two greater or equal to the thread's stack size;
// we store it as its logarithm (stack_size_log).
// FakeStack has kNumberOfSizeClasses (11) size classes, each size class
// is a power of two, starting from 64 bytes. Each size class occupies
// stack_size bytes and thus can allocate
// NumberOfFrames=(stack_size/BytesInSizeClass) fake frames (also a power of 2).
// For each size class we have NumberOfFrames allocation flags,
// each flag indicates whether the given frame is currently allocated.
// All flags for size classes 0 .. 10 are stored in a single contiguous region
// followed by another contiguous region which contains the actual memory for
// size classes. The addresses are computed by GetFlags and GetFrame without
// any memory accesses solely based on 'this' and stack_size_log.
// Allocate() flips the appropriate allocation flag atomically, thus achieving
// async-signal safety.
// This allocator does not have quarantine per se, but it tries to allocate the
// frames in round robin fashion to maximize the delay between a deallocation
// and the next allocation.
class FakeStack {
static const uptr kMinStackFrameSizeLog = 6; // Min frame is 64B.
static const uptr kMaxStackFrameSizeLog = 16; // Max stack frame is 64K.
public:
static const uptr kNumberOfSizeClasses =
kMaxStackFrameSizeLog - kMinStackFrameSizeLog + 1;
// CTOR: create the FakeStack as a single mmap-ed object.
static FakeStack *Create(uptr stack_size_log);
void Destroy(int tid);
// stack_size_log is at least 15 (stack_size >= 32K).
static uptr SizeRequiredForFlags(uptr stack_size_log) {
return ((uptr)1) << (stack_size_log + 1 - kMinStackFrameSizeLog);
}
// Each size class occupies stack_size bytes.
static uptr SizeRequiredForFrames(uptr stack_size_log) {
return (((uptr)1) << stack_size_log) * kNumberOfSizeClasses;
}
// Number of bytes requires for the whole object.
static uptr RequiredSize(uptr stack_size_log) {
return kFlagsOffset + SizeRequiredForFlags(stack_size_log) +
SizeRequiredForFrames(stack_size_log);
}
// Offset of the given flag from the first flag.
// The flags for class 0 begin at offset 000000000
// The flags for class 1 begin at offset 100000000
// ....................2................ 110000000
// ....................3................ 111000000
// and so on.
static uptr FlagsOffset(uptr stack_size_log, uptr class_id) {
uptr t = kNumberOfSizeClasses - 1 - class_id;
const uptr all_ones = (((uptr)1) << (kNumberOfSizeClasses - 1)) - 1;
return ((all_ones >> t) << t) << (stack_size_log - 15);
}
static uptr NumberOfFrames(uptr stack_size_log, uptr class_id) {
return ((uptr)1) << (stack_size_log - kMinStackFrameSizeLog - class_id);
}
// Divide n by the number of frames in size class.
static uptr ModuloNumberOfFrames(uptr stack_size_log, uptr class_id, uptr n) {
return n & (NumberOfFrames(stack_size_log, class_id) - 1);
}
// The pointer to the flags of the given class_id.
u8 *GetFlags(uptr stack_size_log, uptr class_id) {
return reinterpret_cast<u8 *>(this) + kFlagsOffset +
FlagsOffset(stack_size_log, class_id);
}
// Get frame by class_id and pos.
u8 *GetFrame(uptr stack_size_log, uptr class_id, uptr pos) {
return reinterpret_cast<u8 *>(this) + kFlagsOffset +
SizeRequiredForFlags(stack_size_log) +
(((uptr)1) << stack_size_log) * class_id +
BytesInSizeClass(class_id) * pos;
}
// Allocate the fake frame.
FakeFrame *Allocate(uptr stack_size_log, uptr class_id, uptr real_stack);
// Deallocate the fake frame: read the saved flag address and write 0 there.
static void Deallocate(uptr x, uptr class_id) {
**SavedFlagPtr(x, class_id) = 0;
}
// Poison the entire FakeStack's shadow with the magic value.
void PoisonAll(u8 magic);
// Return the beginning of the FakeFrame or 0 if the address is not ours.
uptr AddrIsInFakeStack(uptr addr, uptr *frame_beg, uptr *frame_end);
USED uptr AddrIsInFakeStack(uptr addr) {
uptr t1, t2;
return AddrIsInFakeStack(addr, &t1, &t2);
}
// Number of bytes in a fake frame of this size class.
static uptr BytesInSizeClass(uptr class_id) {
return ((uptr)1) << (class_id + kMinStackFrameSizeLog);
}
// The fake frame is guaranteed to have a right redzone.
// We use the last word of that redzone to store the address of the flag
// that corresponds to the current frame to make faster deallocation.
static u8 **SavedFlagPtr(uptr x, uptr class_id) {
return reinterpret_cast<u8 **>(x + BytesInSizeClass(class_id) - sizeof(x));
}
uptr stack_size_log() const { return stack_size_log_; }
void HandleNoReturn();
void GC(uptr real_stack);
void ForEachFakeFrame(RangeIteratorCallback callback, void *arg);
private:
FakeStack() { }
static const uptr kFlagsOffset = 4096; // This is were the flags begin.
// Must match the number of uses of DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID
COMPILER_CHECK(kNumberOfSizeClasses == 11);
static const uptr kMaxStackMallocSize = ((uptr)1) << kMaxStackFrameSizeLog;
uptr hint_position_[kNumberOfSizeClasses];
uptr stack_size_log_;
// a bit is set if something was allocated from the corresponding size class.
bool needs_gc_;
};
FakeStack *GetTLSFakeStack();
void SetTLSFakeStack(FakeStack *fs);
} // namespace __asan
#endif // ASAN_FAKE_STACK_H

View File

@ -0,0 +1,214 @@
//===-- asan_flags.cc -------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// ASan flag parsing logic.
//===----------------------------------------------------------------------===//
#include "asan_activation.h"
#include "asan_flags.h"
#include "asan_interface_internal.h"
#include "asan_stack.h"
#include "lsan/lsan_common.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_flag_parser.h"
#include "ubsan/ubsan_flags.h"
#include "ubsan/ubsan_platform.h"
namespace __asan {
Flags asan_flags_dont_use_directly; // use via flags().
static const char *MaybeCallAsanDefaultOptions() {
return (&__asan_default_options) ? __asan_default_options() : "";
}
static const char *MaybeUseAsanDefaultOptionsCompileDefinition() {
#ifdef ASAN_DEFAULT_OPTIONS
// Stringize the macro value.
# define ASAN_STRINGIZE(x) #x
# define ASAN_STRINGIZE_OPTIONS(options) ASAN_STRINGIZE(options)
return ASAN_STRINGIZE_OPTIONS(ASAN_DEFAULT_OPTIONS);
#else
return "";
#endif
}
void Flags::SetDefaults() {
#define ASAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
#include "asan_flags.inc"
#undef ASAN_FLAG
}
static void RegisterAsanFlags(FlagParser *parser, Flags *f) {
#define ASAN_FLAG(Type, Name, DefaultValue, Description) \
RegisterFlag(parser, #Name, Description, &f->Name);
#include "asan_flags.inc"
#undef ASAN_FLAG
}
void InitializeFlags() {
// Set the default values and prepare for parsing ASan and common flags.
SetCommonFlagsDefaults();
{
CommonFlags cf;
cf.CopyFrom(*common_flags());
cf.detect_leaks = cf.detect_leaks && CAN_SANITIZE_LEAKS;
cf.external_symbolizer_path = GetEnv("ASAN_SYMBOLIZER_PATH");
cf.malloc_context_size = kDefaultMallocContextSize;
cf.intercept_tls_get_addr = true;
cf.exitcode = 1;
OverrideCommonFlags(cf);
}
Flags *f = flags();
f->SetDefaults();
FlagParser asan_parser;
RegisterAsanFlags(&asan_parser, f);
RegisterCommonFlags(&asan_parser);
// Set the default values and prepare for parsing LSan and UBSan flags
// (which can also overwrite common flags).
#if CAN_SANITIZE_LEAKS
__lsan::Flags *lf = __lsan::flags();
lf->SetDefaults();
FlagParser lsan_parser;
__lsan::RegisterLsanFlags(&lsan_parser, lf);
RegisterCommonFlags(&lsan_parser);
#endif
#if CAN_SANITIZE_UB
__ubsan::Flags *uf = __ubsan::flags();
uf->SetDefaults();
FlagParser ubsan_parser;
__ubsan::RegisterUbsanFlags(&ubsan_parser, uf);
RegisterCommonFlags(&ubsan_parser);
#endif
if (SANITIZER_MAC) {
// Support macOS MallocScribble and MallocPreScribble:
// <https://developer.apple.com/library/content/documentation/Performance/
// Conceptual/ManagingMemory/Articles/MallocDebug.html>
if (GetEnv("MallocScribble")) {
f->max_free_fill_size = 0x1000;
}
if (GetEnv("MallocPreScribble")) {
f->malloc_fill_byte = 0xaa;
}
}
// Override from ASan compile definition.
const char *asan_compile_def = MaybeUseAsanDefaultOptionsCompileDefinition();
asan_parser.ParseString(asan_compile_def);
// Override from user-specified string.
const char *asan_default_options = MaybeCallAsanDefaultOptions();
asan_parser.ParseString(asan_default_options);
#if CAN_SANITIZE_UB
const char *ubsan_default_options = __ubsan::MaybeCallUbsanDefaultOptions();
ubsan_parser.ParseString(ubsan_default_options);
#endif
#if CAN_SANITIZE_LEAKS
const char *lsan_default_options = __lsan::MaybeCallLsanDefaultOptions();
lsan_parser.ParseString(lsan_default_options);
#endif
// Override from command line.
asan_parser.ParseString(GetEnv("ASAN_OPTIONS"));
#if CAN_SANITIZE_LEAKS
lsan_parser.ParseString(GetEnv("LSAN_OPTIONS"));
#endif
#if CAN_SANITIZE_UB
ubsan_parser.ParseString(GetEnv("UBSAN_OPTIONS"));
#endif
InitializeCommonFlags();
// TODO(eugenis): dump all flags at verbosity>=2?
if (Verbosity()) ReportUnrecognizedFlags();
if (common_flags()->help) {
// TODO(samsonov): print all of the flags (ASan, LSan, common).
asan_parser.PrintFlagDescriptions();
}
// Flag validation:
if (!CAN_SANITIZE_LEAKS && common_flags()->detect_leaks) {
Report("%s: detect_leaks is not supported on this platform.\n",
SanitizerToolName);
Die();
}
// Ensure that redzone is at least SHADOW_GRANULARITY.
if (f->redzone < (int)SHADOW_GRANULARITY)
f->redzone = SHADOW_GRANULARITY;
// Make "strict_init_order" imply "check_initialization_order".
// TODO(samsonov): Use a single runtime flag for an init-order checker.
if (f->strict_init_order) {
f->check_initialization_order = true;
}
CHECK_LE((uptr)common_flags()->malloc_context_size, kStackTraceMax);
CHECK_LE(f->min_uar_stack_size_log, f->max_uar_stack_size_log);
CHECK_GE(f->redzone, 16);
CHECK_GE(f->max_redzone, f->redzone);
CHECK_LE(f->max_redzone, 2048);
CHECK(IsPowerOfTwo(f->redzone));
CHECK(IsPowerOfTwo(f->max_redzone));
// quarantine_size is deprecated but we still honor it.
// quarantine_size can not be used together with quarantine_size_mb.
if (f->quarantine_size >= 0 && f->quarantine_size_mb >= 0) {
Report("%s: please use either 'quarantine_size' (deprecated) or "
"quarantine_size_mb, but not both\n", SanitizerToolName);
Die();
}
if (f->quarantine_size >= 0)
f->quarantine_size_mb = f->quarantine_size >> 20;
if (f->quarantine_size_mb < 0) {
const int kDefaultQuarantineSizeMb =
(ASAN_LOW_MEMORY) ? 1UL << 4 : 1UL << 8;
f->quarantine_size_mb = kDefaultQuarantineSizeMb;
}
if (f->thread_local_quarantine_size_kb < 0) {
const u32 kDefaultThreadLocalQuarantineSizeKb =
// It is not advised to go lower than 64Kb, otherwise quarantine batches
// pushed from thread local quarantine to global one will create too
// much overhead. One quarantine batch size is 8Kb and it holds up to
// 1021 chunk, which amounts to 1/8 memory overhead per batch when
// thread local quarantine is set to 64Kb.
(ASAN_LOW_MEMORY) ? 1 << 6 : FIRST_32_SECOND_64(1 << 8, 1 << 10);
f->thread_local_quarantine_size_kb = kDefaultThreadLocalQuarantineSizeKb;
}
if (f->thread_local_quarantine_size_kb == 0 && f->quarantine_size_mb > 0) {
Report("%s: thread_local_quarantine_size_kb can be set to 0 only when "
"quarantine_size_mb is set to 0\n", SanitizerToolName);
Die();
}
if (!f->replace_str && common_flags()->intercept_strlen) {
Report("WARNING: strlen interceptor is enabled even though replace_str=0. "
"Use intercept_strlen=0 to disable it.");
}
if (!f->replace_str && common_flags()->intercept_strchr) {
Report("WARNING: strchr* interceptors are enabled even though "
"replace_str=0. Use intercept_strchr=0 to disable them.");
}
if (!f->replace_str && common_flags()->intercept_strndup) {
Report("WARNING: strndup* interceptors are enabled even though "
"replace_str=0. Use intercept_strndup=0 to disable them.");
}
}
} // namespace __asan
SANITIZER_INTERFACE_WEAK_DEF(const char*, __asan_default_options, void) {
return "";
}

View File

@ -0,0 +1,49 @@
//===-- asan_flags.h -------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// ASan runtime flags.
//===----------------------------------------------------------------------===//
#ifndef ASAN_FLAGS_H
#define ASAN_FLAGS_H
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_flag_parser.h"
// ASan flag values can be defined in four ways:
// 1) initialized with default values at startup.
// 2) overriden during compilation of ASan runtime by providing
// compile definition ASAN_DEFAULT_OPTIONS.
// 3) overriden from string returned by user-specified function
// __asan_default_options().
// 4) overriden from env variable ASAN_OPTIONS.
// 5) overriden during ASan activation (for now used on Android only).
namespace __asan {
struct Flags {
#define ASAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
#include "asan_flags.inc"
#undef ASAN_FLAG
void SetDefaults();
};
extern Flags asan_flags_dont_use_directly;
inline Flags *flags() {
return &asan_flags_dont_use_directly;
}
void InitializeFlags();
} // namespace __asan
#endif // ASAN_FLAGS_H

View File

@ -0,0 +1,162 @@
//===-- asan_flags.inc ------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// ASan runtime flags.
//
//===----------------------------------------------------------------------===//
#ifndef ASAN_FLAG
# error "Define ASAN_FLAG prior to including this file!"
#endif
// ASAN_FLAG(Type, Name, DefaultValue, Description)
// See COMMON_FLAG in sanitizer_flags.inc for more details.
ASAN_FLAG(int, quarantine_size, -1,
"Deprecated, please use quarantine_size_mb.")
ASAN_FLAG(int, quarantine_size_mb, -1,
"Size (in Mb) of quarantine used to detect use-after-free "
"errors. Lower value may reduce memory usage but increase the "
"chance of false negatives.")
ASAN_FLAG(int, thread_local_quarantine_size_kb, -1,
"Size (in Kb) of thread local quarantine used to detect "
"use-after-free errors. Lower value may reduce memory usage but "
"increase the chance of false negatives. It is not advised to go "
"lower than 64Kb, otherwise frequent transfers to global quarantine "
"might affect performance.")
ASAN_FLAG(int, redzone, 16,
"Minimal size (in bytes) of redzones around heap objects. "
"Requirement: redzone >= 16, is a power of two.")
ASAN_FLAG(int, max_redzone, 2048,
"Maximal size (in bytes) of redzones around heap objects.")
ASAN_FLAG(
bool, debug, false,
"If set, prints some debugging information and does additional checks.")
ASAN_FLAG(
int, report_globals, 1,
"Controls the way to handle globals (0 - don't detect buffer overflow on "
"globals, 1 - detect buffer overflow, 2 - print data about registered "
"globals).")
ASAN_FLAG(bool, check_initialization_order, false,
"If set, attempts to catch initialization order issues.")
ASAN_FLAG(
bool, replace_str, true,
"If set, uses custom wrappers and replacements for libc string functions "
"to find more errors.")
ASAN_FLAG(bool, replace_intrin, true,
"If set, uses custom wrappers for memset/memcpy/memmove intrinsics.")
ASAN_FLAG(bool, detect_stack_use_after_return, false,
"Enables stack-use-after-return checking at run-time.")
ASAN_FLAG(int, min_uar_stack_size_log, 16, // We can't do smaller anyway.
"Minimum fake stack size log.")
ASAN_FLAG(int, max_uar_stack_size_log,
20, // 1Mb per size class, i.e. ~11Mb per thread
"Maximum fake stack size log.")
ASAN_FLAG(bool, uar_noreserve, false,
"Use mmap with 'noreserve' flag to allocate fake stack.")
ASAN_FLAG(
int, max_malloc_fill_size, 0x1000, // By default, fill only the first 4K.
"ASan allocator flag. max_malloc_fill_size is the maximal amount of "
"bytes that will be filled with malloc_fill_byte on malloc.")
ASAN_FLAG(
int, max_free_fill_size, 0,
"ASan allocator flag. max_free_fill_size is the maximal amount of "
"bytes that will be filled with free_fill_byte during free.")
ASAN_FLAG(int, malloc_fill_byte, 0xbe,
"Value used to fill the newly allocated memory.")
ASAN_FLAG(int, free_fill_byte, 0x55,
"Value used to fill deallocated memory.")
ASAN_FLAG(bool, allow_user_poisoning, true,
"If set, user may manually mark memory regions as poisoned or "
"unpoisoned.")
ASAN_FLAG(
int, sleep_before_dying, 0,
"Number of seconds to sleep between printing an error report and "
"terminating the program. Useful for debugging purposes (e.g. when one "
"needs to attach gdb).")
ASAN_FLAG(
int, sleep_after_init, 0,
"Number of seconds to sleep after AddressSanitizer is initialized. "
"Useful for debugging purposes (e.g. when one needs to attach gdb).")
ASAN_FLAG(bool, check_malloc_usable_size, true,
"Allows the users to work around the bug in Nvidia drivers prior to "
"295.*.")
ASAN_FLAG(bool, unmap_shadow_on_exit, false,
"If set, explicitly unmaps the (huge) shadow at exit.")
ASAN_FLAG(bool, protect_shadow_gap, true, "If set, mprotect the shadow gap")
ASAN_FLAG(bool, print_stats, false,
"Print various statistics after printing an error message or if "
"atexit=1.")
ASAN_FLAG(bool, print_legend, true, "Print the legend for the shadow bytes.")
ASAN_FLAG(bool, print_scariness, false,
"Print the scariness score. Experimental.")
ASAN_FLAG(bool, atexit, false,
"If set, prints ASan exit stats even after program terminates "
"successfully.")
ASAN_FLAG(
bool, print_full_thread_history, true,
"If set, prints thread creation stacks for the threads involved in the "
"report and their ancestors up to the main thread.")
ASAN_FLAG(
bool, poison_heap, true,
"Poison (or not) the heap memory on [de]allocation. Zero value is useful "
"for benchmarking the allocator or instrumentator.")
ASAN_FLAG(bool, poison_partial, true,
"If true, poison partially addressable 8-byte aligned words "
"(default=true). This flag affects heap and global buffers, but not "
"stack buffers.")
ASAN_FLAG(bool, poison_array_cookie, true,
"Poison (or not) the array cookie after operator new[].")
// Turn off alloc/dealloc mismatch checker on Mac and Windows for now.
// https://github.com/google/sanitizers/issues/131
// https://github.com/google/sanitizers/issues/309
// TODO(glider,timurrrr): Fix known issues and enable this back.
ASAN_FLAG(bool, alloc_dealloc_mismatch,
!SANITIZER_MAC && !SANITIZER_WINDOWS && !SANITIZER_ANDROID,
"Report errors on malloc/delete, new/free, new/delete[], etc.")
ASAN_FLAG(bool, new_delete_type_mismatch, true,
"Report errors on mismatch between size of new and delete.")
ASAN_FLAG(
bool, strict_init_order, false,
"If true, assume that dynamic initializers can never access globals from "
"other modules, even if the latter are already initialized.")
ASAN_FLAG(
bool, start_deactivated, false,
"If true, ASan tweaks a bunch of other flags (quarantine, redzone, heap "
"poisoning) to reduce memory consumption as much as possible, and "
"restores them to original values when the first instrumented module is "
"loaded into the process. This is mainly intended to be used on "
"Android. ")
ASAN_FLAG(
int, detect_invalid_pointer_pairs, 0,
"If non-zero, try to detect operations like <, <=, >, >= and - on "
"invalid pointer pairs (e.g. when pointers belong to different objects). "
"The bigger the value the harder we try.")
ASAN_FLAG(
bool, detect_container_overflow, true,
"If true, honor the container overflow annotations. See "
"https://github.com/google/sanitizers/wiki/AddressSanitizerContainerOverflow")
ASAN_FLAG(int, detect_odr_violation, 2,
"If >=2, detect violation of One-Definition-Rule (ODR); "
"If ==1, detect ODR-violation only if the two variables "
"have different sizes")
ASAN_FLAG(const char *, suppressions, "", "Suppressions file name.")
ASAN_FLAG(bool, halt_on_error, true,
"Crash the program after printing the first error report "
"(WARNING: USE AT YOUR OWN RISK!)")
ASAN_FLAG(bool, use_odr_indicator, false,
"Use special ODR indicator symbol for ODR violation detection")
ASAN_FLAG(bool, allocator_frees_and_returns_null_on_realloc_zero, true,
"realloc(p, 0) is equivalent to free(p) by default (Same as the "
"POSIX standard). If set to false, realloc(p, 0) will return a "
"pointer to an allocated space which can not be used.")
ASAN_FLAG(bool, verify_asan_link_order, true,
"Check position of ASan runtime in library list (needs to be disabled"
" when other library has to be preloaded system-wide)")

Some files were not shown because too many files have changed in this diff Show More