Imported Upstream version 6.10.0.49

Former-commit-id: 1d6753294b2993e1fbf92de9366bb9544db4189b
This commit is contained in:
Xamarin Public Jenkins (auto-signing)
2020-01-16 16:38:04 +00:00
parent d94e79959b
commit 468663ddbb
48518 changed files with 2789335 additions and 61176 deletions

View File

@@ -0,0 +1 @@
BasedOnStyle: Google

View File

@@ -0,0 +1,66 @@
include_directories(..)
# Runtime library sources and build flags.
set(MSAN_RTL_SOURCES
msan.cc
msan_allocator.cc
msan_chained_origin_depot.cc
msan_interceptors.cc
msan_linux.cc
msan_report.cc
msan_thread.cc
msan_poisoning.cc
)
set(MSAN_RTL_CXX_SOURCES
msan_new_delete.cc)
set(MSAN_RTL_CFLAGS ${SANITIZER_COMMON_CFLAGS})
append_rtti_flag(OFF MSAN_RTL_CFLAGS)
append_list_if(COMPILER_RT_HAS_FPIE_FLAG -fPIE MSAN_RTL_CFLAGS)
# Prevent clang from generating libc calls.
append_list_if(COMPILER_RT_HAS_FFREESTANDING_FLAG -ffreestanding MSAN_RTL_CFLAGS)
set(MSAN_RUNTIME_LIBRARIES)
# Static runtime library.
add_compiler_rt_component(msan)
foreach(arch ${MSAN_SUPPORTED_ARCH})
add_compiler_rt_runtime(clang_rt.msan
STATIC
ARCHS ${arch}
SOURCES ${MSAN_RTL_SOURCES}
$<TARGET_OBJECTS:RTInterception.${arch}>
$<TARGET_OBJECTS:RTSanitizerCommon.${arch}>
$<TARGET_OBJECTS:RTSanitizerCommonLibc.${arch}>
$<TARGET_OBJECTS:RTUbsan.${arch}>
CFLAGS ${MSAN_RTL_CFLAGS}
PARENT_TARGET msan)
add_compiler_rt_runtime(clang_rt.msan_cxx
STATIC
ARCHS ${arch}
SOURCES ${MSAN_RTL_CXX_SOURCES}
$<TARGET_OBJECTS:RTUbsan_cxx.${arch}>
CFLAGS ${MSAN_RTL_CFLAGS}
PARENT_TARGET msan)
list(APPEND MSAN_RUNTIME_LIBRARIES clang_rt.msan-${arch}
clang_rt.msan_cxx-${arch})
if(UNIX)
add_sanitizer_rt_symbols(clang_rt.msan
ARCHS ${arch}
EXTRA msan.syms.extra)
add_sanitizer_rt_symbols(clang_rt.msan_cxx
ARCHS ${arch}
EXTRA msan.syms.extra)
add_dependencies(msan clang_rt.msan-${arch}-symbols
clang_rt.msan_cxx-${arch}-symbols)
endif()
endforeach()
add_compiler_rt_resource_file(msan_blacklist msan_blacklist.txt msan)
if(COMPILER_RT_INCLUDE_TESTS)
add_subdirectory(tests)
endif()

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,402 @@
//===-- msan.h --------------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of MemorySanitizer.
//
// Private MSan header.
//===----------------------------------------------------------------------===//
#ifndef MSAN_H
#define MSAN_H
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "msan_interface_internal.h"
#include "msan_flags.h"
#include "ubsan/ubsan_platform.h"
#ifndef MSAN_REPLACE_OPERATORS_NEW_AND_DELETE
# define MSAN_REPLACE_OPERATORS_NEW_AND_DELETE 1
#endif
#ifndef MSAN_CONTAINS_UBSAN
# define MSAN_CONTAINS_UBSAN CAN_SANITIZE_UB
#endif
struct MappingDesc {
uptr start;
uptr end;
enum Type {
INVALID, APP, SHADOW, ORIGIN
} type;
const char *name;
};
#if SANITIZER_LINUX && defined(__mips64)
// MIPS64 maps:
// - 0x0000000000-0x0200000000: Program own segments
// - 0xa200000000-0xc000000000: PIE program segments
// - 0xe200000000-0xffffffffff: libraries segments.
const MappingDesc kMemoryLayout[] = {
{0x000000000000ULL, 0x000200000000ULL, MappingDesc::APP, "app-1"},
{0x000200000000ULL, 0x002200000000ULL, MappingDesc::INVALID, "invalid"},
{0x002200000000ULL, 0x004000000000ULL, MappingDesc::SHADOW, "shadow-2"},
{0x004000000000ULL, 0x004200000000ULL, MappingDesc::INVALID, "invalid"},
{0x004200000000ULL, 0x006000000000ULL, MappingDesc::ORIGIN, "origin-2"},
{0x006000000000ULL, 0x006200000000ULL, MappingDesc::INVALID, "invalid"},
{0x006200000000ULL, 0x008000000000ULL, MappingDesc::SHADOW, "shadow-3"},
{0x008000000000ULL, 0x008200000000ULL, MappingDesc::SHADOW, "shadow-1"},
{0x008200000000ULL, 0x00a000000000ULL, MappingDesc::ORIGIN, "origin-3"},
{0x00a000000000ULL, 0x00a200000000ULL, MappingDesc::ORIGIN, "origin-1"},
{0x00a200000000ULL, 0x00c000000000ULL, MappingDesc::APP, "app-2"},
{0x00c000000000ULL, 0x00e200000000ULL, MappingDesc::INVALID, "invalid"},
{0x00e200000000ULL, 0x00ffffffffffULL, MappingDesc::APP, "app-3"}};
#define MEM_TO_SHADOW(mem) (((uptr)(mem)) ^ 0x8000000000ULL)
#define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x2000000000ULL)
#elif SANITIZER_LINUX && defined(__aarch64__)
// The mapping describes both 39-bits, 42-bits, and 48-bits VMA. AArch64
// maps:
// - 0x0000000000000-0x0000010000000: 39/42/48-bits program own segments
// - 0x0005500000000-0x0005600000000: 39-bits PIE program segments
// - 0x0007f80000000-0x0007fffffffff: 39-bits libraries segments
// - 0x002aa00000000-0x002ab00000000: 42-bits PIE program segments
// - 0x003ff00000000-0x003ffffffffff: 42-bits libraries segments
// - 0x0aaaaa0000000-0x0aaab00000000: 48-bits PIE program segments
// - 0xffff000000000-0x1000000000000: 48-bits libraries segments
// It is fragmented in multiples segments to increase the memory available
// on 42-bits (12.21% of total VMA available for 42-bits and 13.28 for
// 39 bits). The 48-bits segments only cover the usual PIE/default segments
// plus some more segments (262144GB total, 0.39% total VMA).
const MappingDesc kMemoryLayout[] = {
{0x00000000000ULL, 0x01000000000ULL, MappingDesc::INVALID, "invalid"},
{0x01000000000ULL, 0x02000000000ULL, MappingDesc::SHADOW, "shadow-2"},
{0x02000000000ULL, 0x03000000000ULL, MappingDesc::ORIGIN, "origin-2"},
{0x03000000000ULL, 0x04000000000ULL, MappingDesc::SHADOW, "shadow-1"},
{0x04000000000ULL, 0x05000000000ULL, MappingDesc::ORIGIN, "origin-1"},
{0x05000000000ULL, 0x06000000000ULL, MappingDesc::APP, "app-1"},
{0x06000000000ULL, 0x07000000000ULL, MappingDesc::INVALID, "invalid"},
{0x07000000000ULL, 0x08000000000ULL, MappingDesc::APP, "app-2"},
{0x08000000000ULL, 0x09000000000ULL, MappingDesc::INVALID, "invalid"},
// The mappings below are used only for 42-bits VMA.
{0x09000000000ULL, 0x0A000000000ULL, MappingDesc::SHADOW, "shadow-3"},
{0x0A000000000ULL, 0x0B000000000ULL, MappingDesc::ORIGIN, "origin-3"},
{0x0B000000000ULL, 0x0F000000000ULL, MappingDesc::INVALID, "invalid"},
{0x0F000000000ULL, 0x10000000000ULL, MappingDesc::APP, "app-3"},
{0x10000000000ULL, 0x11000000000ULL, MappingDesc::INVALID, "invalid"},
{0x11000000000ULL, 0x12000000000ULL, MappingDesc::APP, "app-4"},
{0x12000000000ULL, 0x17000000000ULL, MappingDesc::INVALID, "invalid"},
{0x17000000000ULL, 0x18000000000ULL, MappingDesc::SHADOW, "shadow-4"},
{0x18000000000ULL, 0x19000000000ULL, MappingDesc::ORIGIN, "origin-4"},
{0x19000000000ULL, 0x20000000000ULL, MappingDesc::INVALID, "invalid"},
{0x20000000000ULL, 0x21000000000ULL, MappingDesc::APP, "app-5"},
{0x21000000000ULL, 0x26000000000ULL, MappingDesc::INVALID, "invalid"},
{0x26000000000ULL, 0x27000000000ULL, MappingDesc::SHADOW, "shadow-5"},
{0x27000000000ULL, 0x28000000000ULL, MappingDesc::ORIGIN, "origin-5"},
{0x28000000000ULL, 0x29000000000ULL, MappingDesc::SHADOW, "shadow-7"},
{0x29000000000ULL, 0x2A000000000ULL, MappingDesc::ORIGIN, "origin-7"},
{0x2A000000000ULL, 0x2B000000000ULL, MappingDesc::APP, "app-6"},
{0x2B000000000ULL, 0x2C000000000ULL, MappingDesc::INVALID, "invalid"},
{0x2C000000000ULL, 0x2D000000000ULL, MappingDesc::SHADOW, "shadow-6"},
{0x2D000000000ULL, 0x2E000000000ULL, MappingDesc::ORIGIN, "origin-6"},
{0x2E000000000ULL, 0x2F000000000ULL, MappingDesc::APP, "app-7"},
{0x2F000000000ULL, 0x39000000000ULL, MappingDesc::INVALID, "invalid"},
{0x39000000000ULL, 0x3A000000000ULL, MappingDesc::SHADOW, "shadow-9"},
{0x3A000000000ULL, 0x3B000000000ULL, MappingDesc::ORIGIN, "origin-9"},
{0x3B000000000ULL, 0x3C000000000ULL, MappingDesc::APP, "app-8"},
{0x3C000000000ULL, 0x3D000000000ULL, MappingDesc::INVALID, "invalid"},
{0x3D000000000ULL, 0x3E000000000ULL, MappingDesc::SHADOW, "shadow-8"},
{0x3E000000000ULL, 0x3F000000000ULL, MappingDesc::ORIGIN, "origin-8"},
{0x3F000000000ULL, 0x40000000000ULL, MappingDesc::APP, "app-9"},
// The mappings below are used only for 48-bits VMA.
// TODO(unknown): 48-bit mapping ony covers the usual PIE, non-PIE
// segments and some more segments totalizing 262144GB of VMA (which cover
// only 0.32% of all 48-bit VMA). Memory avaliability can be increase by
// adding multiple application segments like 39 and 42 mapping.
{0x0040000000000ULL, 0x0041000000000ULL, MappingDesc::INVALID, "invalid"},
{0x0041000000000ULL, 0x0042000000000ULL, MappingDesc::APP, "app-10"},
{0x0042000000000ULL, 0x0047000000000ULL, MappingDesc::INVALID, "invalid"},
{0x0047000000000ULL, 0x0048000000000ULL, MappingDesc::SHADOW, "shadow-10"},
{0x0048000000000ULL, 0x0049000000000ULL, MappingDesc::ORIGIN, "origin-10"},
{0x0049000000000ULL, 0x0050000000000ULL, MappingDesc::INVALID, "invalid"},
{0x0050000000000ULL, 0x0051000000000ULL, MappingDesc::APP, "app-11"},
{0x0051000000000ULL, 0x0056000000000ULL, MappingDesc::INVALID, "invalid"},
{0x0056000000000ULL, 0x0057000000000ULL, MappingDesc::SHADOW, "shadow-11"},
{0x0057000000000ULL, 0x0058000000000ULL, MappingDesc::ORIGIN, "origin-11"},
{0x0058000000000ULL, 0x0059000000000ULL, MappingDesc::APP, "app-12"},
{0x0059000000000ULL, 0x005E000000000ULL, MappingDesc::INVALID, "invalid"},
{0x005E000000000ULL, 0x005F000000000ULL, MappingDesc::SHADOW, "shadow-12"},
{0x005F000000000ULL, 0x0060000000000ULL, MappingDesc::ORIGIN, "origin-12"},
{0x0060000000000ULL, 0x0061000000000ULL, MappingDesc::INVALID, "invalid"},
{0x0061000000000ULL, 0x0062000000000ULL, MappingDesc::APP, "app-13"},
{0x0062000000000ULL, 0x0067000000000ULL, MappingDesc::INVALID, "invalid"},
{0x0067000000000ULL, 0x0068000000000ULL, MappingDesc::SHADOW, "shadow-13"},
{0x0068000000000ULL, 0x0069000000000ULL, MappingDesc::ORIGIN, "origin-13"},
{0x0069000000000ULL, 0x0AAAAA0000000ULL, MappingDesc::INVALID, "invalid"},
{0x0AAAAA0000000ULL, 0x0AAAB00000000ULL, MappingDesc::APP, "app-14"},
{0x0AAAB00000000ULL, 0x0AACAA0000000ULL, MappingDesc::INVALID, "invalid"},
{0x0AACAA0000000ULL, 0x0AACB00000000ULL, MappingDesc::SHADOW, "shadow-14"},
{0x0AACB00000000ULL, 0x0AADAA0000000ULL, MappingDesc::INVALID, "invalid"},
{0x0AADAA0000000ULL, 0x0AADB00000000ULL, MappingDesc::ORIGIN, "origin-14"},
{0x0AADB00000000ULL, 0x0FF9F00000000ULL, MappingDesc::INVALID, "invalid"},
{0x0FF9F00000000ULL, 0x0FFA000000000ULL, MappingDesc::SHADOW, "shadow-15"},
{0x0FFA000000000ULL, 0x0FFAF00000000ULL, MappingDesc::INVALID, "invalid"},
{0x0FFAF00000000ULL, 0x0FFB000000000ULL, MappingDesc::ORIGIN, "origin-15"},
{0x0FFB000000000ULL, 0x0FFFF00000000ULL, MappingDesc::INVALID, "invalid"},
{0x0FFFF00000000ULL, 0x1000000000000ULL, MappingDesc::APP, "app-15"},
};
# define MEM_TO_SHADOW(mem) ((uptr)mem ^ 0x6000000000ULL)
# define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x1000000000ULL)
#elif SANITIZER_LINUX && SANITIZER_PPC64
const MappingDesc kMemoryLayout[] = {
{0x000000000000ULL, 0x000200000000ULL, MappingDesc::APP, "low memory"},
{0x000200000000ULL, 0x080000000000ULL, MappingDesc::INVALID, "invalid"},
{0x080000000000ULL, 0x180200000000ULL, MappingDesc::SHADOW, "shadow"},
{0x180200000000ULL, 0x1C0000000000ULL, MappingDesc::INVALID, "invalid"},
{0x1C0000000000ULL, 0x2C0200000000ULL, MappingDesc::ORIGIN, "origin"},
{0x2C0200000000ULL, 0x300000000000ULL, MappingDesc::INVALID, "invalid"},
{0x300000000000ULL, 0x800000000000ULL, MappingDesc::APP, "high memory"}};
// Various kernels use different low end ranges but we can combine them into one
// big range. They also use different high end ranges but we can map them all to
// one range.
// Maps low and high app ranges to contiguous space with zero base:
// Low: 0000 0000 0000 - 0001 ffff ffff -> 1000 0000 0000 - 1001 ffff ffff
// High: 3000 0000 0000 - 3fff ffff ffff -> 0000 0000 0000 - 0fff ffff ffff
// High: 4000 0000 0000 - 4fff ffff ffff -> 0000 0000 0000 - 0fff ffff ffff
// High: 7000 0000 0000 - 7fff ffff ffff -> 0000 0000 0000 - 0fff ffff ffff
#define LINEARIZE_MEM(mem) \
(((uptr)(mem) & ~0xE00000000000ULL) ^ 0x100000000000ULL)
#define MEM_TO_SHADOW(mem) (LINEARIZE_MEM((mem)) + 0x080000000000ULL)
#define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x140000000000ULL)
#elif SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 64
// Low memory: main binary, MAP_32BIT mappings and modules
// High memory: heap, modules and main thread stack
const MappingDesc kMemoryLayout[] = {
{0x000000000000ULL, 0x010000000000ULL, MappingDesc::APP, "low memory"},
{0x010000000000ULL, 0x100000000000ULL, MappingDesc::INVALID, "invalid"},
{0x100000000000ULL, 0x310000000000ULL, MappingDesc::SHADOW, "shadow"},
{0x310000000000ULL, 0x380000000000ULL, MappingDesc::INVALID, "invalid"},
{0x380000000000ULL, 0x590000000000ULL, MappingDesc::ORIGIN, "origin"},
{0x590000000000ULL, 0x600000000000ULL, MappingDesc::INVALID, "invalid"},
{0x600000000000ULL, 0x800000000000ULL, MappingDesc::APP, "high memory"}};
// Maps low and high app ranges to contiguous space with zero base:
// Low: 0000 0000 0000 - 00ff ffff ffff -> 2000 0000 0000 - 20ff ffff ffff
// High: 6000 0000 0000 - 7fff ffff ffff -> 0000 0000 0000 - 1fff ffff ffff
#define LINEARIZE_MEM(mem) \
(((uptr)(mem) & ~0xc00000000000ULL) ^ 0x200000000000ULL)
#define MEM_TO_SHADOW(mem) (LINEARIZE_MEM((mem)) + 0x100000000000ULL)
#define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x280000000000)
#elif SANITIZER_NETBSD || (SANITIZER_LINUX && SANITIZER_WORDSIZE == 64)
#ifdef MSAN_LINUX_X86_64_OLD_MAPPING
// Requries PIE binary and ASLR enabled.
// Main thread stack and DSOs at 0x7f0000000000 (sometimes 0x7e0000000000).
// Heap at 0x600000000000.
const MappingDesc kMemoryLayout[] = {
{0x000000000000ULL, 0x200000000000ULL, MappingDesc::INVALID, "invalid"},
{0x200000000000ULL, 0x400000000000ULL, MappingDesc::SHADOW, "shadow"},
{0x400000000000ULL, 0x600000000000ULL, MappingDesc::ORIGIN, "origin"},
{0x600000000000ULL, 0x800000000000ULL, MappingDesc::APP, "app"}};
#define MEM_TO_SHADOW(mem) (((uptr)(mem)) & ~0x400000000000ULL)
#define SHADOW_TO_ORIGIN(mem) (((uptr)(mem)) + 0x200000000000ULL)
#else // MSAN_LINUX_X86_64_OLD_MAPPING
// All of the following configurations are supported.
// ASLR disabled: main executable and DSOs at 0x555550000000
// PIE and ASLR: main executable and DSOs at 0x7f0000000000
// non-PIE: main executable below 0x100000000, DSOs at 0x7f0000000000
// Heap at 0x700000000000.
const MappingDesc kMemoryLayout[] = {
{0x000000000000ULL, 0x010000000000ULL, MappingDesc::APP, "app-1"},
{0x010000000000ULL, 0x100000000000ULL, MappingDesc::SHADOW, "shadow-2"},
{0x100000000000ULL, 0x110000000000ULL, MappingDesc::INVALID, "invalid"},
{0x110000000000ULL, 0x200000000000ULL, MappingDesc::ORIGIN, "origin-2"},
{0x200000000000ULL, 0x300000000000ULL, MappingDesc::SHADOW, "shadow-3"},
{0x300000000000ULL, 0x400000000000ULL, MappingDesc::ORIGIN, "origin-3"},
{0x400000000000ULL, 0x500000000000ULL, MappingDesc::INVALID, "invalid"},
{0x500000000000ULL, 0x510000000000ULL, MappingDesc::SHADOW, "shadow-1"},
{0x510000000000ULL, 0x600000000000ULL, MappingDesc::APP, "app-2"},
{0x600000000000ULL, 0x610000000000ULL, MappingDesc::ORIGIN, "origin-1"},
{0x610000000000ULL, 0x700000000000ULL, MappingDesc::INVALID, "invalid"},
{0x700000000000ULL, 0x800000000000ULL, MappingDesc::APP, "app-3"}};
#define MEM_TO_SHADOW(mem) (((uptr)(mem)) ^ 0x500000000000ULL)
#define SHADOW_TO_ORIGIN(mem) (((uptr)(mem)) + 0x100000000000ULL)
#endif // MSAN_LINUX_X86_64_OLD_MAPPING
#else
#error "Unsupported platform"
#endif
const uptr kMemoryLayoutSize = sizeof(kMemoryLayout) / sizeof(kMemoryLayout[0]);
#define MEM_TO_ORIGIN(mem) (SHADOW_TO_ORIGIN(MEM_TO_SHADOW((mem))))
#ifndef __clang__
__attribute__((optimize("unroll-loops")))
#endif
inline bool addr_is_type(uptr addr, MappingDesc::Type mapping_type) {
// It is critical for performance that this loop is unrolled (because then it is
// simplified into just a few constant comparisons).
#ifdef __clang__
#pragma unroll
#endif
for (unsigned i = 0; i < kMemoryLayoutSize; ++i)
if (kMemoryLayout[i].type == mapping_type &&
addr >= kMemoryLayout[i].start && addr < kMemoryLayout[i].end)
return true;
return false;
}
#define MEM_IS_APP(mem) addr_is_type((uptr)(mem), MappingDesc::APP)
#define MEM_IS_SHADOW(mem) addr_is_type((uptr)(mem), MappingDesc::SHADOW)
#define MEM_IS_ORIGIN(mem) addr_is_type((uptr)(mem), MappingDesc::ORIGIN)
// These constants must be kept in sync with the ones in MemorySanitizer.cc.
const int kMsanParamTlsSize = 800;
const int kMsanRetvalTlsSize = 800;
namespace __msan {
extern int msan_inited;
extern bool msan_init_is_running;
extern int msan_report_count;
bool ProtectRange(uptr beg, uptr end);
bool InitShadow(bool init_origins);
char *GetProcSelfMaps();
void InitializeInterceptors();
void MsanAllocatorInit();
void MsanAllocatorThreadFinish();
void MsanDeallocate(StackTrace *stack, void *ptr);
void *msan_malloc(uptr size, StackTrace *stack);
void *msan_calloc(uptr nmemb, uptr size, StackTrace *stack);
void *msan_realloc(void *ptr, uptr size, StackTrace *stack);
void *msan_valloc(uptr size, StackTrace *stack);
void *msan_pvalloc(uptr size, StackTrace *stack);
void *msan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack);
void *msan_memalign(uptr alignment, uptr size, StackTrace *stack);
int msan_posix_memalign(void **memptr, uptr alignment, uptr size,
StackTrace *stack);
void InstallTrapHandler();
void InstallAtExitHandler();
const char *GetStackOriginDescr(u32 id, uptr *pc);
void EnterSymbolizer();
void ExitSymbolizer();
bool IsInSymbolizer();
struct SymbolizerScope {
SymbolizerScope() { EnterSymbolizer(); }
~SymbolizerScope() { ExitSymbolizer(); }
};
void PrintWarning(uptr pc, uptr bp);
void PrintWarningWithOrigin(uptr pc, uptr bp, u32 origin);
void GetStackTrace(BufferedStackTrace *stack, uptr max_s, uptr pc, uptr bp,
void *context, bool request_fast_unwind);
void ReportUMR(StackTrace *stack, u32 origin);
void ReportExpectedUMRNotFound(StackTrace *stack);
void ReportStats();
void ReportAtExitStatistics();
void DescribeMemoryRange(const void *x, uptr size);
void ReportUMRInsideAddressRange(const char *what, const void *start, uptr size,
uptr offset);
// Unpoison first n function arguments.
void UnpoisonParam(uptr n);
void UnpoisonThreadLocalState();
// Returns a "chained" origin id, pointing to the given stack trace followed by
// the previous origin id.
u32 ChainOrigin(u32 id, StackTrace *stack);
const int STACK_TRACE_TAG_POISON = StackTrace::TAG_CUSTOM + 1;
#define GET_MALLOC_STACK_TRACE \
BufferedStackTrace stack; \
if (__msan_get_track_origins() && msan_inited) \
GetStackTrace(&stack, common_flags()->malloc_context_size, \
StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), nullptr, \
common_flags()->fast_unwind_on_malloc)
// For platforms which support slow unwinder only, we restrict the store context
// size to 1, basically only storing the current pc. We do this because the slow
// unwinder which is based on libunwind is not async signal safe and causes
// random freezes in forking applications as well as in signal handlers.
#define GET_STORE_STACK_TRACE_PC_BP(pc, bp) \
BufferedStackTrace stack; \
if (__msan_get_track_origins() > 1 && msan_inited) { \
if (!SANITIZER_CAN_FAST_UNWIND) \
GetStackTrace(&stack, Min(1, flags()->store_context_size), pc, bp, \
nullptr, false); \
else \
GetStackTrace(&stack, flags()->store_context_size, pc, bp, nullptr, \
common_flags()->fast_unwind_on_malloc); \
}
#define GET_FATAL_STACK_TRACE_PC_BP(pc, bp) \
BufferedStackTrace stack; \
if (msan_inited) \
GetStackTrace(&stack, kStackTraceMax, pc, bp, nullptr, \
common_flags()->fast_unwind_on_fatal)
#define GET_STORE_STACK_TRACE \
GET_STORE_STACK_TRACE_PC_BP(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME())
class ScopedThreadLocalStateBackup {
public:
ScopedThreadLocalStateBackup() { Backup(); }
~ScopedThreadLocalStateBackup() { Restore(); }
void Backup();
void Restore();
private:
u64 va_arg_overflow_size_tls;
};
void MsanTSDInit(void (*destructor)(void *tsd));
void *MsanTSDGet();
void MsanTSDSet(void *tsd);
void MsanTSDDtor(void *tsd);
} // namespace __msan
#define MSAN_MALLOC_HOOK(ptr, size) \
do { \
if (&__sanitizer_malloc_hook) { \
UnpoisonParam(2); \
__sanitizer_malloc_hook(ptr, size); \
} \
RunMallocHooks(ptr, size); \
} while (false)
#define MSAN_FREE_HOOK(ptr) \
do { \
if (&__sanitizer_free_hook) { \
UnpoisonParam(1); \
__sanitizer_free_hook(ptr); \
} \
RunFreeHooks(ptr); \
} while (false)
#endif // MSAN_H

View File

@@ -0,0 +1,2 @@
__msan_*
__ubsan_*

View File

@@ -0,0 +1,322 @@
//===-- msan_allocator.cc --------------------------- ---------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of MemorySanitizer.
//
// MemorySanitizer allocator.
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_allocator_checks.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_errno.h"
#include "msan.h"
#include "msan_allocator.h"
#include "msan_origin.h"
#include "msan_thread.h"
#include "msan_poisoning.h"
namespace __msan {
struct Metadata {
uptr requested_size;
};
struct MsanMapUnmapCallback {
void OnMap(uptr p, uptr size) const {}
void OnUnmap(uptr p, uptr size) const {
__msan_unpoison((void *)p, size);
// We are about to unmap a chunk of user memory.
// Mark the corresponding shadow memory as not needed.
uptr shadow_p = MEM_TO_SHADOW(p);
ReleaseMemoryPagesToOS(shadow_p, shadow_p + size);
if (__msan_get_track_origins()) {
uptr origin_p = MEM_TO_ORIGIN(p);
ReleaseMemoryPagesToOS(origin_p, origin_p + size);
}
}
};
#if defined(__mips64)
static const uptr kMaxAllowedMallocSize = 2UL << 30;
static const uptr kRegionSizeLog = 20;
static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
struct AP32 {
static const uptr kSpaceBeg = 0;
static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
static const uptr kMetadataSize = sizeof(Metadata);
typedef __sanitizer::CompactSizeClassMap SizeClassMap;
static const uptr kRegionSizeLog = __msan::kRegionSizeLog;
typedef __msan::ByteMap ByteMap;
typedef MsanMapUnmapCallback MapUnmapCallback;
static const uptr kFlags = 0;
};
typedef SizeClassAllocator32<AP32> PrimaryAllocator;
#elif defined(__x86_64__)
#if SANITIZER_NETBSD || \
(SANITIZER_LINUX && !defined(MSAN_LINUX_X86_64_OLD_MAPPING))
static const uptr kAllocatorSpace = 0x700000000000ULL;
#else
static const uptr kAllocatorSpace = 0x600000000000ULL;
#endif
static const uptr kMaxAllowedMallocSize = 8UL << 30;
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
static const uptr kSpaceBeg = kAllocatorSpace;
static const uptr kSpaceSize = 0x40000000000; // 4T.
static const uptr kMetadataSize = sizeof(Metadata);
typedef DefaultSizeClassMap SizeClassMap;
typedef MsanMapUnmapCallback MapUnmapCallback;
static const uptr kFlags = 0;
};
typedef SizeClassAllocator64<AP64> PrimaryAllocator;
#elif defined(__powerpc64__)
static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
static const uptr kSpaceBeg = 0x300000000000;
static const uptr kSpaceSize = 0x020000000000; // 2T.
static const uptr kMetadataSize = sizeof(Metadata);
typedef DefaultSizeClassMap SizeClassMap;
typedef MsanMapUnmapCallback MapUnmapCallback;
static const uptr kFlags = 0;
};
typedef SizeClassAllocator64<AP64> PrimaryAllocator;
#elif defined(__aarch64__)
static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
static const uptr kRegionSizeLog = 20;
static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
struct AP32 {
static const uptr kSpaceBeg = 0;
static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
static const uptr kMetadataSize = sizeof(Metadata);
typedef __sanitizer::CompactSizeClassMap SizeClassMap;
static const uptr kRegionSizeLog = __msan::kRegionSizeLog;
typedef __msan::ByteMap ByteMap;
typedef MsanMapUnmapCallback MapUnmapCallback;
static const uptr kFlags = 0;
};
typedef SizeClassAllocator32<AP32> PrimaryAllocator;
#endif
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<MsanMapUnmapCallback> SecondaryAllocator;
typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
SecondaryAllocator> Allocator;
static Allocator allocator;
static AllocatorCache fallback_allocator_cache;
static SpinMutex fallback_mutex;
void MsanAllocatorInit() {
SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
allocator.Init(common_flags()->allocator_release_to_os_interval_ms);
}
AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) {
CHECK(ms);
CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator_cache));
return reinterpret_cast<AllocatorCache *>(ms->allocator_cache);
}
void MsanThreadLocalMallocStorage::CommitBack() {
allocator.SwallowCache(GetAllocatorCache(this));
}
static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment,
bool zeroise) {
if (size > kMaxAllowedMallocSize) {
Report("WARNING: MemorySanitizer failed to allocate %p bytes\n",
(void *)size);
return Allocator::FailureHandler::OnBadRequest();
}
MsanThread *t = GetCurrentThread();
void *allocated;
if (t) {
AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
allocated = allocator.Allocate(cache, size, alignment);
} else {
SpinMutexLock l(&fallback_mutex);
AllocatorCache *cache = &fallback_allocator_cache;
allocated = allocator.Allocate(cache, size, alignment);
}
Metadata *meta =
reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
meta->requested_size = size;
if (zeroise) {
__msan_clear_and_unpoison(allocated, size);
} else if (flags()->poison_in_malloc) {
__msan_poison(allocated, size);
if (__msan_get_track_origins()) {
stack->tag = StackTrace::TAG_ALLOC;
Origin o = Origin::CreateHeapOrigin(stack);
__msan_set_origin(allocated, size, o.raw_id());
}
}
MSAN_MALLOC_HOOK(allocated, size);
return allocated;
}
void MsanDeallocate(StackTrace *stack, void *p) {
CHECK(p);
MSAN_FREE_HOOK(p);
Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(p));
uptr size = meta->requested_size;
meta->requested_size = 0;
// This memory will not be reused by anyone else, so we are free to keep it
// poisoned.
if (flags()->poison_in_free) {
__msan_poison(p, size);
if (__msan_get_track_origins()) {
stack->tag = StackTrace::TAG_DEALLOC;
Origin o = Origin::CreateHeapOrigin(stack);
__msan_set_origin(p, size, o.raw_id());
}
}
MsanThread *t = GetCurrentThread();
if (t) {
AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
allocator.Deallocate(cache, p);
} else {
SpinMutexLock l(&fallback_mutex);
AllocatorCache *cache = &fallback_allocator_cache;
allocator.Deallocate(cache, p);
}
}
void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size,
uptr alignment) {
Metadata *meta = reinterpret_cast<Metadata*>(allocator.GetMetaData(old_p));
uptr old_size = meta->requested_size;
uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(old_p);
if (new_size <= actually_allocated_size) {
// We are not reallocating here.
meta->requested_size = new_size;
if (new_size > old_size) {
if (flags()->poison_in_malloc) {
stack->tag = StackTrace::TAG_ALLOC;
PoisonMemory((char *)old_p + old_size, new_size - old_size, stack);
}
}
return old_p;
}
uptr memcpy_size = Min(new_size, old_size);
void *new_p = MsanAllocate(stack, new_size, alignment, false /*zeroise*/);
if (new_p) {
CopyMemory(new_p, old_p, memcpy_size, stack);
MsanDeallocate(stack, old_p);
}
return new_p;
}
static uptr AllocationSize(const void *p) {
if (!p) return 0;
const void *beg = allocator.GetBlockBegin(p);
if (beg != p) return 0;
Metadata *b = (Metadata *)allocator.GetMetaData(p);
return b->requested_size;
}
void *msan_malloc(uptr size, StackTrace *stack) {
return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false));
}
void *msan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
if (UNLIKELY(CheckForCallocOverflow(size, nmemb)))
return SetErrnoOnNull(Allocator::FailureHandler::OnBadRequest());
return SetErrnoOnNull(MsanAllocate(stack, nmemb * size, sizeof(u64), true));
}
void *msan_realloc(void *ptr, uptr size, StackTrace *stack) {
if (!ptr)
return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false));
if (size == 0) {
MsanDeallocate(stack, ptr);
return nullptr;
}
return SetErrnoOnNull(MsanReallocate(stack, ptr, size, sizeof(u64)));
}
void *msan_valloc(uptr size, StackTrace *stack) {
return SetErrnoOnNull(MsanAllocate(stack, size, GetPageSizeCached(), false));
}
void *msan_pvalloc(uptr size, StackTrace *stack) {
uptr PageSize = GetPageSizeCached();
if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
errno = errno_ENOMEM;
return Allocator::FailureHandler::OnBadRequest();
}
// pvalloc(0) should allocate one page.
size = size ? RoundUpTo(size, PageSize) : PageSize;
return SetErrnoOnNull(MsanAllocate(stack, size, PageSize, false));
}
void *msan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) {
if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
errno = errno_EINVAL;
return Allocator::FailureHandler::OnBadRequest();
}
return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false));
}
void *msan_memalign(uptr alignment, uptr size, StackTrace *stack) {
if (UNLIKELY(!IsPowerOfTwo(alignment))) {
errno = errno_EINVAL;
return Allocator::FailureHandler::OnBadRequest();
}
return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false));
}
int msan_posix_memalign(void **memptr, uptr alignment, uptr size,
StackTrace *stack) {
if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
Allocator::FailureHandler::OnBadRequest();
return errno_EINVAL;
}
void *ptr = MsanAllocate(stack, size, alignment, false);
if (UNLIKELY(!ptr))
return errno_ENOMEM;
CHECK(IsAligned((uptr)ptr, alignment));
*memptr = ptr;
return 0;
}
} // namespace __msan
using namespace __msan;
uptr __sanitizer_get_current_allocated_bytes() {
uptr stats[AllocatorStatCount];
allocator.GetStats(stats);
return stats[AllocatorStatAllocated];
}
uptr __sanitizer_get_heap_size() {
uptr stats[AllocatorStatCount];
allocator.GetStats(stats);
return stats[AllocatorStatMapped];
}
uptr __sanitizer_get_free_bytes() { return 1; }
uptr __sanitizer_get_unmapped_bytes() { return 1; }
uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }

View File

@@ -0,0 +1,33 @@
//===-- msan_allocator.h ----------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of MemorySanitizer.
//
//===----------------------------------------------------------------------===//
#ifndef MSAN_ALLOCATOR_H
#define MSAN_ALLOCATOR_H
#include "sanitizer_common/sanitizer_common.h"
namespace __msan {
struct MsanThreadLocalMallocStorage {
uptr quarantine_cache[16];
// Allocator cache contains atomic_uint64_t which must be 8-byte aligned.
ALIGNED(8) uptr allocator_cache[96 * (512 * 8 + 16)]; // Opaque.
void CommitBack();
private:
// These objects are allocated via mmap() and are zero-initialized.
MsanThreadLocalMallocStorage() {}
};
} // namespace __msan
#endif // MSAN_ALLOCATOR_H

View File

@@ -0,0 +1,7 @@
# Blacklist for MemorySanitizer. Turns off instrumentation of particular
# functions or sources. Use with care. You may set location of blacklist
# at compile-time using -fsanitize-blacklist=<path> flag.
# Example usage:
# fun:*bad_function_name*
# src:file_with_tricky_code.cc

View File

@@ -0,0 +1,132 @@
//===-- msan_chained_origin_depot.cc -----------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// A storage for chained origins.
//===----------------------------------------------------------------------===//
#include "msan_chained_origin_depot.h"
#include "sanitizer_common/sanitizer_stackdepotbase.h"
namespace __msan {
struct ChainedOriginDepotDesc {
u32 here_id;
u32 prev_id;
};
struct ChainedOriginDepotNode {
ChainedOriginDepotNode *link;
u32 id;
u32 here_id;
u32 prev_id;
typedef ChainedOriginDepotDesc args_type;
bool eq(u32 hash, const args_type &args) const {
return here_id == args.here_id && prev_id == args.prev_id;
}
static uptr storage_size(const args_type &args) {
return sizeof(ChainedOriginDepotNode);
}
/* This is murmur2 hash for the 64->32 bit case.
It does not behave all that well because the keys have a very biased
distribution (I've seen 7-element buckets with the table only 14% full).
here_id is built of
* (1 bits) Reserved, zero.
* (8 bits) Part id = bits 13..20 of the hash value of here_id's key.
* (23 bits) Sequential number (each part has each own sequence).
prev_id has either the same distribution as here_id (but with 3:8:21)
split, or one of two reserved values (-1) or (-2). Either case can
dominate depending on the workload.
*/
static u32 hash(const args_type &args) {
const u32 m = 0x5bd1e995;
const u32 seed = 0x9747b28c;
const u32 r = 24;
u32 h = seed;
u32 k = args.here_id;
k *= m;
k ^= k >> r;
k *= m;
h *= m;
h ^= k;
k = args.prev_id;
k *= m;
k ^= k >> r;
k *= m;
h *= m;
h ^= k;
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
}
static bool is_valid(const args_type &args) { return true; }
void store(const args_type &args, u32 other_hash) {
here_id = args.here_id;
prev_id = args.prev_id;
}
args_type load() const {
args_type ret = {here_id, prev_id};
return ret;
}
struct Handle {
ChainedOriginDepotNode *node_;
Handle() : node_(nullptr) {}
explicit Handle(ChainedOriginDepotNode *node) : node_(node) {}
bool valid() { return node_; }
u32 id() { return node_->id; }
int here_id() { return node_->here_id; }
int prev_id() { return node_->prev_id; }
};
Handle get_handle() { return Handle(this); }
typedef Handle handle_type;
};
static StackDepotBase<ChainedOriginDepotNode, 4, 20> chainedOriginDepot;
StackDepotStats *ChainedOriginDepotGetStats() {
return chainedOriginDepot.GetStats();
}
bool ChainedOriginDepotPut(u32 here_id, u32 prev_id, u32 *new_id) {
ChainedOriginDepotDesc desc = {here_id, prev_id};
bool inserted;
ChainedOriginDepotNode::Handle h = chainedOriginDepot.Put(desc, &inserted);
*new_id = h.valid() ? h.id() : 0;
return inserted;
}
// Retrieves a stored stack trace by the id.
u32 ChainedOriginDepotGet(u32 id, u32 *other) {
ChainedOriginDepotDesc desc = chainedOriginDepot.Get(id);
*other = desc.prev_id;
return desc.here_id;
}
void ChainedOriginDepotLockAll() {
chainedOriginDepot.LockAll();
}
void ChainedOriginDepotUnlockAll() {
chainedOriginDepot.UnlockAll();
}
} // namespace __msan

View File

@@ -0,0 +1,29 @@
//===-- msan_chained_origin_depot.h --------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// A storage for chained origins.
//===----------------------------------------------------------------------===//
#ifndef MSAN_CHAINED_ORIGIN_DEPOT_H
#define MSAN_CHAINED_ORIGIN_DEPOT_H
#include "sanitizer_common/sanitizer_common.h"
namespace __msan {
StackDepotStats *ChainedOriginDepotGetStats();
bool ChainedOriginDepotPut(u32 here_id, u32 prev_id, u32 *new_id);
// Retrieves a stored stack trace by the id.
u32 ChainedOriginDepotGet(u32 id, u32 *other);
void ChainedOriginDepotLockAll();
void ChainedOriginDepotUnlockAll();
} // namespace __msan
#endif // MSAN_CHAINED_ORIGIN_DEPOT_H

View File

@@ -0,0 +1,30 @@
//===-- msan_flags.h --------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of MemorySanitizer.
//
//===----------------------------------------------------------------------===//
#ifndef MSAN_FLAGS_H
#define MSAN_FLAGS_H
namespace __msan {
struct Flags {
#define MSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
#include "msan_flags.inc"
#undef MSAN_FLAG
void SetDefaults();
};
Flags *flags();
} // namespace __msan
#endif // MSAN_FLAGS_H

View File

@@ -0,0 +1,35 @@
//===-- msan_flags.inc ------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// MSan runtime flags.
//
//===----------------------------------------------------------------------===//
#ifndef MSAN_FLAG
# error "Define MSAN_FLAG prior to including this file!"
#endif
// MSAN_FLAG(Type, Name, DefaultValue, Description)
// See COMMON_FLAG in sanitizer_flags.inc for more details.
MSAN_FLAG(int, exit_code, -1,
"DEPRECATED. Use exitcode from common flags instead.")
MSAN_FLAG(int, origin_history_size, Origin::kMaxDepth, "")
MSAN_FLAG(int, origin_history_per_stack_limit, 20000, "")
MSAN_FLAG(bool, poison_heap_with_zeroes, false, "")
MSAN_FLAG(bool, poison_stack_with_zeroes, false, "")
MSAN_FLAG(bool, poison_in_malloc, true, "")
MSAN_FLAG(bool, poison_in_free, true, "")
MSAN_FLAG(bool, poison_in_dtor, false, "")
MSAN_FLAG(bool, report_umrs, true, "")
MSAN_FLAG(bool, wrap_signals, true, "")
MSAN_FLAG(bool, print_stats, false, "")
MSAN_FLAG(bool, halt_on_error, !&__msan_keep_going, "")
MSAN_FLAG(bool, atexit, false, "")
MSAN_FLAG(int, store_context_size, 20,
"Like malloc_context_size, but for uninit stores.")

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,179 @@
//===-- msan_interface_internal.h -------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of MemorySanitizer.
//
// Private MSan interface header.
//===----------------------------------------------------------------------===//
#ifndef MSAN_INTERFACE_INTERNAL_H
#define MSAN_INTERFACE_INTERNAL_H
#include "sanitizer_common/sanitizer_internal_defs.h"
extern "C" {
// FIXME: document all interface functions.
SANITIZER_INTERFACE_ATTRIBUTE
int __msan_get_track_origins();
SANITIZER_INTERFACE_ATTRIBUTE
void __msan_init();
// Print a warning and maybe return.
// This function can die based on common_flags()->exitcode.
SANITIZER_INTERFACE_ATTRIBUTE
void __msan_warning();
// Print a warning and die.
// Intrumentation inserts calls to this function when building in "fast" mode
// (i.e. -mllvm -msan-keep-going)
SANITIZER_INTERFACE_ATTRIBUTE __attribute__((noreturn))
void __msan_warning_noreturn();
using __sanitizer::uptr;
using __sanitizer::sptr;
using __sanitizer::uu64;
using __sanitizer::uu32;
using __sanitizer::uu16;
using __sanitizer::u64;
using __sanitizer::u32;
using __sanitizer::u16;
using __sanitizer::u8;
SANITIZER_INTERFACE_ATTRIBUTE
void __msan_maybe_warning_1(u8 s, u32 o);
SANITIZER_INTERFACE_ATTRIBUTE
void __msan_maybe_warning_2(u16 s, u32 o);
SANITIZER_INTERFACE_ATTRIBUTE
void __msan_maybe_warning_4(u32 s, u32 o);
SANITIZER_INTERFACE_ATTRIBUTE
void __msan_maybe_warning_8(u64 s, u32 o);
SANITIZER_INTERFACE_ATTRIBUTE
void __msan_maybe_store_origin_1(u8 s, void *p, u32 o);
SANITIZER_INTERFACE_ATTRIBUTE
void __msan_maybe_store_origin_2(u16 s, void *p, u32 o);
SANITIZER_INTERFACE_ATTRIBUTE
void __msan_maybe_store_origin_4(u32 s, void *p, u32 o);
SANITIZER_INTERFACE_ATTRIBUTE
void __msan_maybe_store_origin_8(u64 s, void *p, u32 o);
SANITIZER_INTERFACE_ATTRIBUTE
void __msan_unpoison(const void *a, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void __msan_unpoison_string(const char *s);
SANITIZER_INTERFACE_ATTRIBUTE
void __msan_clear_and_unpoison(void *a, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void* __msan_memcpy(void *dst, const void *src, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void* __msan_memset(void *s, int c, uptr n);
SANITIZER_INTERFACE_ATTRIBUTE
void* __msan_memmove(void* dest, const void* src, uptr n);
SANITIZER_INTERFACE_ATTRIBUTE
void __msan_poison(const void *a, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void __msan_poison_stack(void *a, uptr size);
// Copy size bytes from src to dst and unpoison the result.
// Useful to implement unsafe loads.
SANITIZER_INTERFACE_ATTRIBUTE
void __msan_load_unpoisoned(void *src, uptr size, void *dst);
// Returns the offset of the first (at least partially) poisoned byte,
// or -1 if the whole range is good.
SANITIZER_INTERFACE_ATTRIBUTE
sptr __msan_test_shadow(const void *x, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void __msan_check_mem_is_initialized(const void *x, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void __msan_set_origin(const void *a, uptr size, u32 origin);
SANITIZER_INTERFACE_ATTRIBUTE
void __msan_set_alloca_origin(void *a, uptr size, char *descr);
SANITIZER_INTERFACE_ATTRIBUTE
void __msan_set_alloca_origin4(void *a, uptr size, char *descr, uptr pc);
SANITIZER_INTERFACE_ATTRIBUTE
u32 __msan_chain_origin(u32 id);
SANITIZER_INTERFACE_ATTRIBUTE
u32 __msan_get_origin(const void *a);
// Test that this_id is a descendant of prev_id (or they are simply equal).
// "descendant" here means that are part of the same chain, created with
// __msan_chain_origin.
SANITIZER_INTERFACE_ATTRIBUTE
int __msan_origin_is_descendant_or_same(u32 this_id, u32 prev_id);
SANITIZER_INTERFACE_ATTRIBUTE
void __msan_clear_on_return();
SANITIZER_INTERFACE_ATTRIBUTE
void __msan_set_keep_going(int keep_going);
SANITIZER_INTERFACE_ATTRIBUTE
int __msan_set_poison_in_malloc(int do_poison);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
/* OPTIONAL */ const char* __msan_default_options();
// For testing.
SANITIZER_INTERFACE_ATTRIBUTE
void __msan_set_expect_umr(int expect_umr);
SANITIZER_INTERFACE_ATTRIBUTE
void __msan_print_shadow(const void *x, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void __msan_dump_shadow(const void *x, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
int __msan_has_dynamic_component();
// For testing.
SANITIZER_INTERFACE_ATTRIBUTE
u32 __msan_get_umr_origin();
SANITIZER_INTERFACE_ATTRIBUTE
void __msan_partial_poison(const void* data, void* shadow, uptr size);
// Tell MSan about newly allocated memory (ex.: custom allocator).
// Memory will be marked uninitialized, with origin at the call site.
SANITIZER_INTERFACE_ATTRIBUTE
void __msan_allocated_memory(const void* data, uptr size);
// Tell MSan about newly destroyed memory. Memory will be marked
// uninitialized.
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_dtor_callback(const void* data, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
u16 __sanitizer_unaligned_load16(const uu16 *p);
SANITIZER_INTERFACE_ATTRIBUTE
u32 __sanitizer_unaligned_load32(const uu32 *p);
SANITIZER_INTERFACE_ATTRIBUTE
u64 __sanitizer_unaligned_load64(const uu64 *p);
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_unaligned_store16(uu16 *p, u16 x);
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_unaligned_store32(uu32 *p, u32 x);
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_unaligned_store64(uu64 *p, u64 x);
SANITIZER_INTERFACE_ATTRIBUTE
void __msan_set_death_callback(void (*callback)(void));
SANITIZER_INTERFACE_ATTRIBUTE
void __msan_copy_shadow(void *dst, const void *src, uptr size);
} // extern "C"
#endif // MSAN_INTERFACE_INTERNAL_H

View File

@@ -0,0 +1,216 @@
//===-- msan_linux.cc -----------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of MemorySanitizer.
//
// Linux-, NetBSD- and FreeBSD-specific code.
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
#include "msan.h"
#include "msan_thread.h"
#include <elf.h>
#include <link.h>
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <signal.h>
#include <unistd.h>
#include <unwind.h>
#include <execinfo.h>
#include <sys/time.h>
#include <sys/resource.h>
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_procmaps.h"
namespace __msan {
void ReportMapRange(const char *descr, uptr beg, uptr size) {
if (size > 0) {
uptr end = beg + size - 1;
VPrintf(1, "%s : %p - %p\n", descr, beg, end);
}
}
static bool CheckMemoryRangeAvailability(uptr beg, uptr size) {
if (size > 0) {
uptr end = beg + size - 1;
if (!MemoryRangeIsAvailable(beg, end)) {
Printf("FATAL: Memory range %p - %p is not available.\n", beg, end);
return false;
}
}
return true;
}
static bool ProtectMemoryRange(uptr beg, uptr size, const char *name) {
if (size > 0) {
void *addr = MmapFixedNoAccess(beg, size, name);
if (beg == 0 && addr) {
// Depending on the kernel configuration, we may not be able to protect
// the page at address zero.
uptr gap = 16 * GetPageSizeCached();
beg += gap;
size -= gap;
addr = MmapFixedNoAccess(beg, size, name);
}
if ((uptr)addr != beg) {
uptr end = beg + size - 1;
Printf("FATAL: Cannot protect memory range %p - %p (%s).\n", beg, end,
name);
return false;
}
}
return true;
}
static void CheckMemoryLayoutSanity() {
uptr prev_end = 0;
for (unsigned i = 0; i < kMemoryLayoutSize; ++i) {
uptr start = kMemoryLayout[i].start;
uptr end = kMemoryLayout[i].end;
MappingDesc::Type type = kMemoryLayout[i].type;
CHECK_LT(start, end);
CHECK_EQ(prev_end, start);
CHECK(addr_is_type(start, type));
CHECK(addr_is_type((start + end) / 2, type));
CHECK(addr_is_type(end - 1, type));
if (type == MappingDesc::APP) {
uptr addr = start;
CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
addr = (start + end) / 2;
CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
addr = end - 1;
CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
}
prev_end = end;
}
}
bool InitShadow(bool init_origins) {
// Let user know mapping parameters first.
VPrintf(1, "__msan_init %p\n", &__msan_init);
for (unsigned i = 0; i < kMemoryLayoutSize; ++i)
VPrintf(1, "%s: %zx - %zx\n", kMemoryLayout[i].name, kMemoryLayout[i].start,
kMemoryLayout[i].end - 1);
CheckMemoryLayoutSanity();
if (!MEM_IS_APP(&__msan_init)) {
Printf("FATAL: Code %p is out of application range. Non-PIE build?\n",
(uptr)&__msan_init);
return false;
}
const uptr maxVirtualAddress = GetMaxUserVirtualAddress();
for (unsigned i = 0; i < kMemoryLayoutSize; ++i) {
uptr start = kMemoryLayout[i].start;
uptr end = kMemoryLayout[i].end;
uptr size= end - start;
MappingDesc::Type type = kMemoryLayout[i].type;
// Check if the segment should be mapped based on platform constraints.
if (start >= maxVirtualAddress)
continue;
bool map = type == MappingDesc::SHADOW ||
(init_origins && type == MappingDesc::ORIGIN);
bool protect = type == MappingDesc::INVALID ||
(!init_origins && type == MappingDesc::ORIGIN);
CHECK(!(map && protect));
if (!map && !protect)
CHECK(type == MappingDesc::APP);
if (map) {
if (!CheckMemoryRangeAvailability(start, size))
return false;
if ((uptr)MmapFixedNoReserve(start, size, kMemoryLayout[i].name) != start)
return false;
if (common_flags()->use_madv_dontdump)
DontDumpShadowMemory(start, size);
}
if (protect) {
if (!CheckMemoryRangeAvailability(start, size))
return false;
if (!ProtectMemoryRange(start, size, kMemoryLayout[i].name))
return false;
}
}
return true;
}
static void MsanAtExit(void) {
if (flags()->print_stats && (flags()->atexit || msan_report_count > 0))
ReportStats();
if (msan_report_count > 0) {
ReportAtExitStatistics();
if (common_flags()->exitcode)
internal__exit(common_flags()->exitcode);
}
}
void InstallAtExitHandler() {
atexit(MsanAtExit);
}
// ---------------------- TSD ---------------- {{{1
static pthread_key_t tsd_key;
static bool tsd_key_inited = false;
void MsanTSDInit(void (*destructor)(void *tsd)) {
CHECK(!tsd_key_inited);
tsd_key_inited = true;
CHECK_EQ(0, pthread_key_create(&tsd_key, destructor));
}
static THREADLOCAL MsanThread* msan_current_thread;
MsanThread *GetCurrentThread() {
return msan_current_thread;
}
void SetCurrentThread(MsanThread *t) {
// Make sure we do not reset the current MsanThread.
CHECK_EQ(0, msan_current_thread);
msan_current_thread = t;
// Make sure that MsanTSDDtor gets called at the end.
CHECK(tsd_key_inited);
pthread_setspecific(tsd_key, (void *)t);
}
void MsanTSDDtor(void *tsd) {
MsanThread *t = (MsanThread*)tsd;
if (t->destructor_iterations_ > 1) {
t->destructor_iterations_--;
CHECK_EQ(0, pthread_setspecific(tsd_key, tsd));
return;
}
msan_current_thread = nullptr;
// Make sure that signal handler can not see a stale current thread pointer.
atomic_signal_fence(memory_order_seq_cst);
MsanThread::TSDDtor(tsd);
}
} // namespace __msan
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD

View File

@@ -0,0 +1,108 @@
//===-- msan_new_delete.cc ------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of MemorySanitizer.
//
// Interceptors for operators new and delete.
//===----------------------------------------------------------------------===//
#include "msan.h"
#include "interception/interception.h"
#include "sanitizer_common/sanitizer_allocator.h"
#if MSAN_REPLACE_OPERATORS_NEW_AND_DELETE
#include <stddef.h>
using namespace __msan; // NOLINT
// Fake std::nothrow_t and std::align_val_t to avoid including <new>.
namespace std {
struct nothrow_t {};
enum class align_val_t: size_t {};
} // namespace std
// TODO(alekseys): throw std::bad_alloc instead of dying on OOM.
#define OPERATOR_NEW_BODY(nothrow) \
GET_MALLOC_STACK_TRACE; \
void *res = msan_malloc(size, &stack);\
if (!nothrow && UNLIKELY(!res)) DieOnFailure::OnOOM();\
return res
#define OPERATOR_NEW_BODY_ALIGN(nothrow) \
GET_MALLOC_STACK_TRACE;\
void *res = msan_memalign((uptr)align, size, &stack);\
if (!nothrow && UNLIKELY(!res)) DieOnFailure::OnOOM();\
return res;
INTERCEPTOR_ATTRIBUTE
void *operator new(size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); }
INTERCEPTOR_ATTRIBUTE
void *operator new[](size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); }
INTERCEPTOR_ATTRIBUTE
void *operator new(size_t size, std::nothrow_t const&) {
OPERATOR_NEW_BODY(true /*nothrow*/);
}
INTERCEPTOR_ATTRIBUTE
void *operator new[](size_t size, std::nothrow_t const&) {
OPERATOR_NEW_BODY(true /*nothrow*/);
}
INTERCEPTOR_ATTRIBUTE
void *operator new(size_t size, std::align_val_t align)
{ OPERATOR_NEW_BODY_ALIGN(false /*nothrow*/); }
INTERCEPTOR_ATTRIBUTE
void *operator new[](size_t size, std::align_val_t align)
{ OPERATOR_NEW_BODY_ALIGN(false /*nothrow*/); }
INTERCEPTOR_ATTRIBUTE
void *operator new(size_t size, std::align_val_t align, std::nothrow_t const&)
{ OPERATOR_NEW_BODY_ALIGN(true /*nothrow*/); }
INTERCEPTOR_ATTRIBUTE
void *operator new[](size_t size, std::align_val_t align, std::nothrow_t const&)
{ OPERATOR_NEW_BODY_ALIGN(true /*nothrow*/); }
#define OPERATOR_DELETE_BODY \
GET_MALLOC_STACK_TRACE; \
if (ptr) MsanDeallocate(&stack, ptr)
INTERCEPTOR_ATTRIBUTE
void operator delete(void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
void operator delete[](void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
void operator delete(void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
void operator delete[](void *ptr, std::nothrow_t const&) {
OPERATOR_DELETE_BODY;
}
INTERCEPTOR_ATTRIBUTE
void operator delete(void *ptr, size_t size) NOEXCEPT { OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
void operator delete[](void *ptr, size_t size) NOEXCEPT
{ OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
void operator delete(void *ptr, std::align_val_t align) NOEXCEPT
{ OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
void operator delete[](void *ptr, std::align_val_t align) NOEXCEPT
{ OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
void operator delete(void *ptr, std::align_val_t align, std::nothrow_t const&)
{ OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
void operator delete[](void *ptr, std::align_val_t align, std::nothrow_t const&)
{ OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
void operator delete(void *ptr, size_t size, std::align_val_t align) NOEXCEPT
{ OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
void operator delete[](void *ptr, size_t size, std::align_val_t align) NOEXCEPT
{ OPERATOR_DELETE_BODY; }
#endif // MSAN_REPLACE_OPERATORS_NEW_AND_DELETE

View File

@@ -0,0 +1,169 @@
//===-- msan_origin.h ----------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Origin id utils.
//===----------------------------------------------------------------------===//
#ifndef MSAN_ORIGIN_H
#define MSAN_ORIGIN_H
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "msan_chained_origin_depot.h"
namespace __msan {
// Origin handling.
//
// Origin is a 32-bit identifier that is attached to any uninitialized value in
// the program and describes, more or less exactly, how this memory came to be
// uninitialized.
//
// There are 3 kinds of origin ids:
// 1xxx xxxx xxxx xxxx heap origin id
// 0000 xxxx xxxx xxxx stack origin id
// 0zzz xxxx xxxx xxxx chained origin id
//
// Heap origin id describes a heap memory allocation and contains (in the xxx
// part) a value of StackDepot.
//
// Stack origin id describes a stack memory allocation and contains (in the xxx
// part) an index into StackOriginDescr and StackOriginPC. We don't store a
// stack trace for such origins for performance reasons.
//
// Chained origin id describes an event of storing an uninitialized value to
// memory. The xxx part is a value of ChainedOriginDepot, which is a mapping of
// (stack_id, prev_id) -> id, where
// * stack_id describes the event.
// StackDepot keeps a mapping between those and corresponding stack traces.
// * prev_id is another origin id that describes the earlier part of the
// uninitialized value history.
// Following a chain of prev_id provides the full recorded history of an
// uninitialized value.
//
// This, effectively, defines a tree (or 2 trees, see below) where nodes are
// points in value history marked with origin ids, and edges are events that are
// marked with stack_id.
//
// The "zzz" bits of chained origin id are used to store the length (or depth)
// of the origin chain.
class Origin {
public:
static bool isValidId(u32 id) { return id != 0 && id != (u32)-1; }
u32 raw_id() const { return raw_id_; }
bool isHeapOrigin() const {
// 1xxx xxxx xxxx xxxx
return raw_id_ >> kHeapShift == 0;
}
bool isStackOrigin() const {
// 1000 xxxx xxxx xxxx
return (raw_id_ >> kDepthShift) == (1 << kDepthBits);
}
bool isChainedOrigin() const {
// 1zzz xxxx xxxx xxxx, zzz != 000
return (raw_id_ >> kDepthShift) > (1 << kDepthBits);
}
u32 getChainedId() const {
CHECK(isChainedOrigin());
return raw_id_ & kChainedIdMask;
}
u32 getStackId() const {
CHECK(isStackOrigin());
return raw_id_ & kChainedIdMask;
}
u32 getHeapId() const {
CHECK(isHeapOrigin());
return raw_id_ & kHeapIdMask;
}
// Returns the next origin in the chain and the current stack trace.
Origin getNextChainedOrigin(StackTrace *stack) const {
CHECK(isChainedOrigin());
u32 prev_id;
u32 stack_id = ChainedOriginDepotGet(getChainedId(), &prev_id);
if (stack) *stack = StackDepotGet(stack_id);
return Origin(prev_id);
}
StackTrace getStackTraceForHeapOrigin() const {
return StackDepotGet(getHeapId());
}
static Origin CreateStackOrigin(u32 id) {
CHECK((id & kStackIdMask) == id);
return Origin((1 << kHeapShift) | id);
}
static Origin CreateHeapOrigin(StackTrace *stack) {
u32 stack_id = StackDepotPut(*stack);
CHECK(stack_id);
CHECK((stack_id & kHeapIdMask) == stack_id);
return Origin(stack_id);
}
static Origin CreateChainedOrigin(Origin prev, StackTrace *stack) {
int depth = prev.isChainedOrigin() ? prev.depth() : 0;
// depth is the length of the chain minus 1.
// origin_history_size of 0 means unlimited depth.
if (flags()->origin_history_size > 0) {
if (depth + 1 >= flags()->origin_history_size) {
return prev;
} else {
++depth;
CHECK(depth < (1 << kDepthBits));
}
}
StackDepotHandle h = StackDepotPut_WithHandle(*stack);
if (!h.valid()) return prev;
if (flags()->origin_history_per_stack_limit > 0) {
int use_count = h.use_count();
if (use_count > flags()->origin_history_per_stack_limit) return prev;
}
u32 chained_id;
bool inserted = ChainedOriginDepotPut(h.id(), prev.raw_id(), &chained_id);
CHECK((chained_id & kChainedIdMask) == chained_id);
if (inserted && flags()->origin_history_per_stack_limit > 0)
h.inc_use_count_unsafe();
return Origin((1 << kHeapShift) | (depth << kDepthShift) | chained_id);
}
static Origin FromRawId(u32 id) {
return Origin(id);
}
private:
static const int kDepthBits = 3;
static const int kDepthShift = 32 - kDepthBits - 1;
static const int kHeapShift = 31;
static const u32 kChainedIdMask = ((u32)-1) >> (32 - kDepthShift);
static const u32 kStackIdMask = ((u32)-1) >> (32 - kDepthShift);
static const u32 kHeapIdMask = ((u32)-1) >> (32 - kHeapShift);
u32 raw_id_;
explicit Origin(u32 raw_id) : raw_id_(raw_id) {}
int depth() const {
CHECK(isChainedOrigin());
return (raw_id_ >> kDepthShift) & ((1 << kDepthBits) - 1);
}
public:
static const int kMaxDepth = (1 << kDepthBits) - 1;
};
} // namespace __msan
#endif // MSAN_ORIGIN_H

View File

@@ -0,0 +1,174 @@
//===-- msan_poisoning.cc ---------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of MemorySanitizer.
//
//===----------------------------------------------------------------------===//
#include "msan_poisoning.h"
#include "interception/interception.h"
#include "msan_origin.h"
#include "sanitizer_common/sanitizer_common.h"
DECLARE_REAL(void *, memset, void *dest, int c, uptr n)
DECLARE_REAL(void *, memcpy, void *dest, const void *src, uptr n)
DECLARE_REAL(void *, memmove, void *dest, const void *src, uptr n)
namespace __msan {
u32 GetOriginIfPoisoned(uptr addr, uptr size) {
unsigned char *s = (unsigned char *)MEM_TO_SHADOW(addr);
for (uptr i = 0; i < size; ++i)
if (s[i]) return *(u32 *)SHADOW_TO_ORIGIN(((uptr)s + i) & ~3UL);
return 0;
}
void SetOriginIfPoisoned(uptr addr, uptr src_shadow, uptr size,
u32 src_origin) {
uptr dst_s = MEM_TO_SHADOW(addr);
uptr src_s = src_shadow;
uptr src_s_end = src_s + size;
for (; src_s < src_s_end; ++dst_s, ++src_s)
if (*(u8 *)src_s) *(u32 *)SHADOW_TO_ORIGIN(dst_s & ~3UL) = src_origin;
}
void CopyOrigin(const void *dst, const void *src, uptr size,
StackTrace *stack) {
if (!MEM_IS_APP(dst) || !MEM_IS_APP(src)) return;
uptr d = (uptr)dst;
uptr beg = d & ~3UL;
// Copy left unaligned origin if that memory is poisoned.
if (beg < d) {
u32 o = GetOriginIfPoisoned((uptr)src, d - beg);
if (o) {
if (__msan_get_track_origins() > 1) o = ChainOrigin(o, stack);
*(u32 *)MEM_TO_ORIGIN(beg) = o;
}
beg += 4;
}
uptr end = (d + size) & ~3UL;
// If both ends fall into the same 4-byte slot, we are done.
if (end < beg) return;
// Copy right unaligned origin if that memory is poisoned.
if (end < d + size) {
u32 o = GetOriginIfPoisoned((uptr)src + (end - d), (d + size) - end);
if (o) {
if (__msan_get_track_origins() > 1) o = ChainOrigin(o, stack);
*(u32 *)MEM_TO_ORIGIN(end) = o;
}
}
if (beg < end) {
// Align src up.
uptr s = ((uptr)src + 3) & ~3UL;
// FIXME: factor out to msan_copy_origin_aligned
if (__msan_get_track_origins() > 1) {
u32 *src = (u32 *)MEM_TO_ORIGIN(s);
u32 *src_s = (u32 *)MEM_TO_SHADOW(s);
u32 *src_end = (u32 *)MEM_TO_ORIGIN(s + (end - beg));
u32 *dst = (u32 *)MEM_TO_ORIGIN(beg);
u32 src_o = 0;
u32 dst_o = 0;
for (; src < src_end; ++src, ++src_s, ++dst) {
if (!*src_s) continue;
if (*src != src_o) {
src_o = *src;
dst_o = ChainOrigin(src_o, stack);
}
*dst = dst_o;
}
} else {
REAL(memcpy)((void *)MEM_TO_ORIGIN(beg), (void *)MEM_TO_ORIGIN(s),
end - beg);
}
}
}
void MoveShadowAndOrigin(const void *dst, const void *src, uptr size,
StackTrace *stack) {
if (!MEM_IS_APP(dst)) return;
if (!MEM_IS_APP(src)) return;
if (src == dst) return;
REAL(memmove)((void *)MEM_TO_SHADOW((uptr)dst),
(void *)MEM_TO_SHADOW((uptr)src), size);
if (__msan_get_track_origins()) CopyOrigin(dst, src, size, stack);
}
void CopyShadowAndOrigin(const void *dst, const void *src, uptr size,
StackTrace *stack) {
if (!MEM_IS_APP(dst)) return;
if (!MEM_IS_APP(src)) return;
REAL(memcpy)((void *)MEM_TO_SHADOW((uptr)dst),
(void *)MEM_TO_SHADOW((uptr)src), size);
if (__msan_get_track_origins()) CopyOrigin(dst, src, size, stack);
}
void CopyMemory(void *dst, const void *src, uptr size, StackTrace *stack) {
REAL(memcpy)(dst, src, size);
CopyShadowAndOrigin(dst, src, size, stack);
}
void SetShadow(const void *ptr, uptr size, u8 value) {
uptr PageSize = GetPageSizeCached();
uptr shadow_beg = MEM_TO_SHADOW(ptr);
uptr shadow_end = shadow_beg + size;
if (value ||
shadow_end - shadow_beg < common_flags()->clear_shadow_mmap_threshold) {
REAL(memset)((void *)shadow_beg, value, shadow_end - shadow_beg);
} else {
uptr page_beg = RoundUpTo(shadow_beg, PageSize);
uptr page_end = RoundDownTo(shadow_end, PageSize);
if (page_beg >= page_end) {
REAL(memset)((void *)shadow_beg, 0, shadow_end - shadow_beg);
} else {
if (page_beg != shadow_beg) {
REAL(memset)((void *)shadow_beg, 0, page_beg - shadow_beg);
}
if (page_end != shadow_end) {
REAL(memset)((void *)page_end, 0, shadow_end - page_end);
}
MmapFixedNoReserve(page_beg, page_end - page_beg);
}
}
}
void SetOrigin(const void *dst, uptr size, u32 origin) {
// Origin mapping is 4 bytes per 4 bytes of application memory.
// Here we extend the range such that its left and right bounds are both
// 4 byte aligned.
uptr x = MEM_TO_ORIGIN((uptr)dst);
uptr beg = x & ~3UL; // align down.
uptr end = (x + size + 3) & ~3UL; // align up.
u64 origin64 = ((u64)origin << 32) | origin;
// This is like memset, but the value is 32-bit. We unroll by 2 to write
// 64 bits at once. May want to unroll further to get 128-bit stores.
if (beg & 7ULL) {
*(u32 *)beg = origin;
beg += 4;
}
for (uptr addr = beg; addr < (end & ~7UL); addr += 8) *(u64 *)addr = origin64;
if (end & 7ULL) *(u32 *)(end - 4) = origin;
}
void PoisonMemory(const void *dst, uptr size, StackTrace *stack) {
SetShadow(dst, size, (u8)-1);
if (__msan_get_track_origins()) {
Origin o = Origin::CreateHeapOrigin(stack);
SetOrigin(dst, size, o.raw_id());
}
}
} // namespace __msan

View File

@@ -0,0 +1,59 @@
//===-- msan_poisoning.h ----------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of MemorySanitizer.
//
//===----------------------------------------------------------------------===//
#ifndef MSAN_POISONING_H
#define MSAN_POISONING_H
#include "msan.h"
namespace __msan {
// Return origin for the first poisoned byte in the memory range, or 0.
u32 GetOriginIfPoisoned(uptr addr, uptr size);
// Walk [addr, addr+size) app memory region, copying origin tags from the
// corresponding positions in [src_origin, src_origin+size) where the
// corresponding shadow in [src_shadow, src_shadow+size) is non-zero.
void SetOriginIfPoisoned(uptr addr, uptr src_shadow, uptr size, u32 src_origin);
// Copy origin from src (app address) to dst (app address), creating chained
// origin ids as necessary, without overriding origin for fully initialized
// quads.
void CopyOrigin(const void *dst, const void *src, uptr size, StackTrace *stack);
// memmove() shadow and origin. Dst and src are application addresses.
// See CopyOrigin() for the origin copying logic.
void MoveShadowAndOrigin(const void *dst, const void *src, uptr size,
StackTrace *stack);
// memcpy() shadow and origin. Dst and src are application addresses.
// See CopyOrigin() for the origin copying logic.
void CopyShadowAndOrigin(const void *dst, const void *src, uptr size,
StackTrace *stack);
// memcpy() app memory, and do "the right thing" to the corresponding shadow and
// origin regions.
void CopyMemory(void *dst, const void *src, uptr size, StackTrace *stack);
// Fill shadow will value. Ptr is an application address.
void SetShadow(const void *ptr, uptr size, u8 value);
// Set origin for the memory region.
void SetOrigin(const void *dst, uptr size, u32 origin);
// Mark memory region uninitialized, with origins.
void PoisonMemory(const void *dst, uptr size, StackTrace *stack);
} // namespace __msan
#endif // MSAN_POISONING_H

View File

@@ -0,0 +1,272 @@
//===-- msan_report.cc ----------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of MemorySanitizer.
//
// Error reporting.
//===----------------------------------------------------------------------===//
#include "msan.h"
#include "msan_chained_origin_depot.h"
#include "msan_origin.h"
#include "sanitizer_common/sanitizer_allocator_internal.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_mutex.h"
#include "sanitizer_common/sanitizer_report_decorator.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
using namespace __sanitizer;
namespace __msan {
class Decorator: public __sanitizer::SanitizerCommonDecorator {
public:
Decorator() : SanitizerCommonDecorator() { }
const char *Origin() { return Magenta(); }
const char *Name() { return Green(); }
};
static void DescribeStackOrigin(const char *so, uptr pc) {
Decorator d;
char *s = internal_strdup(so);
char *sep = internal_strchr(s, '@');
CHECK(sep);
*sep = '\0';
Printf("%s", d.Origin());
Printf(
" %sUninitialized value was created by an allocation of '%s%s%s'"
" in the stack frame of function '%s%s%s'%s\n",
d.Origin(), d.Name(), s, d.Origin(), d.Name(), sep + 1, d.Origin(),
d.Default());
InternalFree(s);
if (pc) {
// For some reason function address in LLVM IR is 1 less then the address
// of the first instruction.
pc = StackTrace::GetNextInstructionPc(pc);
StackTrace(&pc, 1).Print();
}
}
static void DescribeOrigin(u32 id) {
VPrintf(1, " raw origin id: %d\n", id);
Decorator d;
Origin o = Origin::FromRawId(id);
while (o.isChainedOrigin()) {
StackTrace stack;
o = o.getNextChainedOrigin(&stack);
Printf(" %sUninitialized value was stored to memory at%s\n", d.Origin(),
d.Default());
stack.Print();
}
if (o.isStackOrigin()) {
uptr pc;
const char *so = GetStackOriginDescr(o.getStackId(), &pc);
DescribeStackOrigin(so, pc);
} else {
StackTrace stack = o.getStackTraceForHeapOrigin();
switch (stack.tag) {
case StackTrace::TAG_ALLOC:
Printf(" %sUninitialized value was created by a heap allocation%s\n",
d.Origin(), d.Default());
break;
case StackTrace::TAG_DEALLOC:
Printf(" %sUninitialized value was created by a heap deallocation%s\n",
d.Origin(), d.Default());
break;
case STACK_TRACE_TAG_POISON:
Printf(" %sMemory was marked as uninitialized%s\n", d.Origin(),
d.Default());
break;
default:
Printf(" %sUninitialized value was created%s\n", d.Origin(),
d.Default());
break;
}
stack.Print();
}
}
void ReportUMR(StackTrace *stack, u32 origin) {
if (!__msan::flags()->report_umrs) return;
ScopedErrorReportLock l;
Decorator d;
Printf("%s", d.Warning());
Report("WARNING: MemorySanitizer: use-of-uninitialized-value\n");
Printf("%s", d.Default());
stack->Print();
if (origin) {
DescribeOrigin(origin);
}
ReportErrorSummary("use-of-uninitialized-value", stack);
}
void ReportExpectedUMRNotFound(StackTrace *stack) {
ScopedErrorReportLock l;
Printf("WARNING: Expected use of uninitialized value not found\n");
stack->Print();
}
void ReportStats() {
ScopedErrorReportLock l;
if (__msan_get_track_origins() > 0) {
StackDepotStats *stack_depot_stats = StackDepotGetStats();
// FIXME: we want this at normal exit, too!
// FIXME: but only with verbosity=1 or something
Printf("Unique heap origins: %zu\n", stack_depot_stats->n_uniq_ids);
Printf("Stack depot allocated bytes: %zu\n", stack_depot_stats->allocated);
StackDepotStats *chained_origin_depot_stats = ChainedOriginDepotGetStats();
Printf("Unique origin histories: %zu\n",
chained_origin_depot_stats->n_uniq_ids);
Printf("History depot allocated bytes: %zu\n",
chained_origin_depot_stats->allocated);
}
}
void ReportAtExitStatistics() {
ScopedErrorReportLock l;
if (msan_report_count > 0) {
Decorator d;
Printf("%s", d.Warning());
Printf("MemorySanitizer: %d warnings reported.\n", msan_report_count);
Printf("%s", d.Default());
}
}
class OriginSet {
public:
OriginSet() : next_id_(0) {}
int insert(u32 o) {
// Scan from the end for better locality.
for (int i = next_id_ - 1; i >= 0; --i)
if (origins_[i] == o) return i;
if (next_id_ == kMaxSize_) return OVERFLOW;
int id = next_id_++;
origins_[id] = o;
return id;
}
int size() { return next_id_; }
u32 get(int id) { return origins_[id]; }
static char asChar(int id) {
switch (id) {
case MISSING:
return '.';
case OVERFLOW:
return '*';
default:
return 'A' + id;
}
}
static const int OVERFLOW = -1;
static const int MISSING = -2;
private:
static const int kMaxSize_ = 'Z' - 'A' + 1;
u32 origins_[kMaxSize_];
int next_id_;
};
void DescribeMemoryRange(const void *x, uptr size) {
// Real limits.
uptr start = MEM_TO_SHADOW(x);
uptr end = start + size;
// Scan limits: align start down to 4; align size up to 16.
uptr s = start & ~3UL;
size = end - s;
size = (size + 15) & ~15UL;
uptr e = s + size;
// Single letter names to origin id mapping.
OriginSet origin_set;
uptr pos = 0; // Offset from aligned start.
bool with_origins = __msan_get_track_origins();
// True if there is at least 1 poisoned bit in the last 4-byte group.
bool last_quad_poisoned;
int origin_ids[4]; // Single letter origin ids for the current line.
Decorator d;
Printf("%s", d.Warning());
Printf("Shadow map of [%p, %p), %zu bytes:\n", start, end, end - start);
Printf("%s", d.Default());
while (s < e) {
// Line start.
if (pos % 16 == 0) {
for (int i = 0; i < 4; ++i) origin_ids[i] = -1;
Printf("%p:", s);
}
// Group start.
if (pos % 4 == 0) {
Printf(" ");
last_quad_poisoned = false;
}
// Print shadow byte.
if (s < start || s >= end) {
Printf("..");
} else {
unsigned char v = *(unsigned char *)s;
if (v) last_quad_poisoned = true;
Printf("%x%x", v >> 4, v & 0xf);
}
// Group end.
if (pos % 4 == 3 && with_origins) {
int id = OriginSet::MISSING;
if (last_quad_poisoned) {
u32 o = *(u32 *)SHADOW_TO_ORIGIN(s - 3);
id = origin_set.insert(o);
}
origin_ids[(pos % 16) / 4] = id;
}
// Line end.
if (pos % 16 == 15) {
if (with_origins) {
Printf(" |");
for (int i = 0; i < 4; ++i) {
char c = OriginSet::asChar(origin_ids[i]);
Printf("%c", c);
if (i != 3) Printf(" ");
}
Printf("|");
}
Printf("\n");
}
size--;
s++;
pos++;
}
Printf("\n");
for (int i = 0; i < origin_set.size(); ++i) {
u32 o = origin_set.get(i);
Printf("Origin %c (origin_id %x):\n", OriginSet::asChar(i), o);
DescribeOrigin(o);
}
}
void ReportUMRInsideAddressRange(const char *what, const void *start, uptr size,
uptr offset) {
Decorator d;
Printf("%s", d.Warning());
Printf("%sUninitialized bytes in %s%s%s at offset %zu inside [%p, %zu)%s\n",
d.Warning(), d.Name(), what, d.Warning(), offset, start, size,
d.Default());
if (__sanitizer::Verbosity())
DescribeMemoryRange(start, size);
}
} // namespace __msan

Some files were not shown because too many files have changed in this diff Show More