Imported Upstream version 6.10.0.49

Former-commit-id: 1d6753294b2993e1fbf92de9366bb9544db4189b
This commit is contained in:
Xamarin Public Jenkins (auto-signing)
2020-01-16 16:38:04 +00:00
parent d94e79959b
commit 468663ddbb
48518 changed files with 2789335 additions and 61176 deletions

View File

@@ -0,0 +1,26 @@
__tsan_init
__tsan_flush_memory
__tsan_read*
__tsan_write*
__tsan_vptr*
__tsan_func*
__tsan_atomic*
__tsan_java*
__tsan_unaligned*
__tsan_release
__tsan_acquire
__tsan_mutex_create
__tsan_mutex_destroy
__tsan_mutex_pre_lock
__tsan_mutex_post_lock
__tsan_mutex_pre_unlock
__tsan_mutex_post_unlock
__tsan_mutex_pre_signal
__tsan_mutex_post_signal
__tsan_mutex_pre_divert
__tsan_mutex_post_divert
__ubsan_*
Annotate*
WTFAnnotate*
RunningOnValgrind
ValgrindSlowdown

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,226 @@
//===-- tsan_clock.h --------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#ifndef TSAN_CLOCK_H
#define TSAN_CLOCK_H
#include "tsan_defs.h"
#include "tsan_dense_alloc.h"
namespace __tsan {
typedef DenseSlabAlloc<ClockBlock, 1<<16, 1<<10> ClockAlloc;
typedef DenseSlabAllocCache ClockCache;
// The clock that lives in sync variables (mutexes, atomics, etc).
class SyncClock {
public:
SyncClock();
~SyncClock();
uptr size() const;
// These are used only in tests.
u64 get(unsigned tid) const;
u64 get_clean(unsigned tid) const;
void Resize(ClockCache *c, uptr nclk);
void Reset(ClockCache *c);
void DebugDump(int(*printf)(const char *s, ...));
// Clock element iterator.
// Note: it iterates only over the table without regard to dirty entries.
class Iter {
public:
explicit Iter(SyncClock* parent);
Iter& operator++();
bool operator!=(const Iter& other);
ClockElem &operator*();
private:
SyncClock *parent_;
// [pos_, end_) is the current continuous range of clock elements.
ClockElem *pos_;
ClockElem *end_;
int block_; // Current number of second level block.
NOINLINE void Next();
};
Iter begin();
Iter end();
private:
friend class ThreadClock;
friend class Iter;
static const uptr kDirtyTids = 2;
struct Dirty {
u64 epoch : kClkBits;
u64 tid : 64 - kClkBits; // kInvalidId if not active
};
unsigned release_store_tid_;
unsigned release_store_reused_;
Dirty dirty_[kDirtyTids];
// If size_ is 0, tab_ is nullptr.
// If size <= 64 (kClockCount), tab_ contains pointer to an array with
// 64 ClockElem's (ClockBlock::clock).
// Otherwise, tab_ points to an array with up to 127 u32 elements,
// each pointing to the second-level 512b block with 64 ClockElem's.
// Unused space in the first level ClockBlock is used to store additional
// clock elements.
// The last u32 element in the first level ClockBlock is always used as
// reference counter.
//
// See the following scheme for details.
// All memory blocks are 512 bytes (allocated from ClockAlloc).
// Clock (clk) elements are 64 bits.
// Idx and ref are 32 bits.
//
// tab_
// |
// \/
// +----------------------------------------------------+
// | clk128 | clk129 | ...unused... | idx1 | idx0 | ref |
// +----------------------------------------------------+
// | |
// | \/
// | +----------------+
// | | clk0 ... clk63 |
// | +----------------+
// \/
// +------------------+
// | clk64 ... clk127 |
// +------------------+
//
// Note: dirty entries, if active, always override what's stored in the clock.
ClockBlock *tab_;
u32 tab_idx_;
u16 size_;
u16 blocks_; // Number of second level blocks.
void Unshare(ClockCache *c);
bool IsShared() const;
bool Cachable() const;
void ResetImpl();
void FlushDirty();
uptr capacity() const;
u32 get_block(uptr bi) const;
void append_block(u32 idx);
ClockElem &elem(unsigned tid) const;
};
// The clock that lives in threads.
class ThreadClock {
public:
typedef DenseSlabAllocCache Cache;
explicit ThreadClock(unsigned tid, unsigned reused = 0);
u64 get(unsigned tid) const;
void set(ClockCache *c, unsigned tid, u64 v);
void set(u64 v);
void tick();
uptr size() const;
void acquire(ClockCache *c, SyncClock *src);
void release(ClockCache *c, SyncClock *dst);
void acq_rel(ClockCache *c, SyncClock *dst);
void ReleaseStore(ClockCache *c, SyncClock *dst);
void ResetCached(ClockCache *c);
void DebugReset();
void DebugDump(int(*printf)(const char *s, ...));
private:
static const uptr kDirtyTids = SyncClock::kDirtyTids;
// Index of the thread associated with he clock ("current thread").
const unsigned tid_;
const unsigned reused_; // tid_ reuse count.
// Current thread time when it acquired something from other threads.
u64 last_acquire_;
// Cached SyncClock (without dirty entries and release_store_tid_).
// We reuse it for subsequent store-release operations without intervening
// acquire operations. Since it is shared (and thus constant), clock value
// for the current thread is then stored in dirty entries in the SyncClock.
// We host a refernece to the table while it is cached here.
u32 cached_idx_;
u16 cached_size_;
u16 cached_blocks_;
// Number of active elements in the clk_ table (the rest is zeros).
uptr nclk_;
u64 clk_[kMaxTidInClock]; // Fixed size vector clock.
bool IsAlreadyAcquired(const SyncClock *src) const;
void UpdateCurrentThread(ClockCache *c, SyncClock *dst) const;
};
ALWAYS_INLINE u64 ThreadClock::get(unsigned tid) const {
DCHECK_LT(tid, kMaxTidInClock);
return clk_[tid];
}
ALWAYS_INLINE void ThreadClock::set(u64 v) {
DCHECK_GE(v, clk_[tid_]);
clk_[tid_] = v;
}
ALWAYS_INLINE void ThreadClock::tick() {
clk_[tid_]++;
}
ALWAYS_INLINE uptr ThreadClock::size() const {
return nclk_;
}
ALWAYS_INLINE SyncClock::Iter SyncClock::begin() {
return Iter(this);
}
ALWAYS_INLINE SyncClock::Iter SyncClock::end() {
return Iter(nullptr);
}
ALWAYS_INLINE uptr SyncClock::size() const {
return size_;
}
ALWAYS_INLINE SyncClock::Iter::Iter(SyncClock* parent)
: parent_(parent)
, pos_(nullptr)
, end_(nullptr)
, block_(-1) {
if (parent)
Next();
}
ALWAYS_INLINE SyncClock::Iter& SyncClock::Iter::operator++() {
pos_++;
if (UNLIKELY(pos_ >= end_))
Next();
return *this;
}
ALWAYS_INLINE bool SyncClock::Iter::operator!=(const SyncClock::Iter& other) {
return parent_ != other.parent_;
}
ALWAYS_INLINE ClockElem &SyncClock::Iter::operator*() {
return *pos_;
}
} // namespace __tsan
#endif // TSAN_CLOCK_H

View File

@@ -0,0 +1,250 @@
//===-- tsan_debugging.cc -------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
// TSan debugging API implementation.
//===----------------------------------------------------------------------===//
#include "tsan_interface.h"
#include "tsan_report.h"
#include "tsan_rtl.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
using namespace __tsan;
static const char *ReportTypeDescription(ReportType typ) {
if (typ == ReportTypeRace) return "data-race";
if (typ == ReportTypeVptrRace) return "data-race-vptr";
if (typ == ReportTypeUseAfterFree) return "heap-use-after-free";
if (typ == ReportTypeVptrUseAfterFree) return "heap-use-after-free-vptr";
if (typ == ReportTypeExternalRace) return "external-race";
if (typ == ReportTypeThreadLeak) return "thread-leak";
if (typ == ReportTypeMutexDestroyLocked) return "locked-mutex-destroy";
if (typ == ReportTypeMutexDoubleLock) return "mutex-double-lock";
if (typ == ReportTypeMutexInvalidAccess) return "mutex-invalid-access";
if (typ == ReportTypeMutexBadUnlock) return "mutex-bad-unlock";
if (typ == ReportTypeMutexBadReadLock) return "mutex-bad-read-lock";
if (typ == ReportTypeMutexBadReadUnlock) return "mutex-bad-read-unlock";
if (typ == ReportTypeSignalUnsafe) return "signal-unsafe-call";
if (typ == ReportTypeErrnoInSignal) return "errno-in-signal-handler";
if (typ == ReportTypeDeadlock) return "lock-order-inversion";
return "";
}
static const char *ReportLocationTypeDescription(ReportLocationType typ) {
if (typ == ReportLocationGlobal) return "global";
if (typ == ReportLocationHeap) return "heap";
if (typ == ReportLocationStack) return "stack";
if (typ == ReportLocationTLS) return "tls";
if (typ == ReportLocationFD) return "fd";
return "";
}
static void CopyTrace(SymbolizedStack *first_frame, void **trace,
uptr trace_size) {
uptr i = 0;
for (SymbolizedStack *frame = first_frame; frame != nullptr;
frame = frame->next) {
trace[i++] = (void *)frame->info.address;
if (i >= trace_size) break;
}
}
// Meant to be called by the debugger.
SANITIZER_INTERFACE_ATTRIBUTE
void *__tsan_get_current_report() {
return const_cast<ReportDesc*>(cur_thread()->current_report);
}
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_get_report_data(void *report, const char **description, int *count,
int *stack_count, int *mop_count, int *loc_count,
int *mutex_count, int *thread_count,
int *unique_tid_count, void **sleep_trace,
uptr trace_size) {
const ReportDesc *rep = (ReportDesc *)report;
*description = ReportTypeDescription(rep->typ);
*count = rep->count;
*stack_count = rep->stacks.Size();
*mop_count = rep->mops.Size();
*loc_count = rep->locs.Size();
*mutex_count = rep->mutexes.Size();
*thread_count = rep->threads.Size();
*unique_tid_count = rep->unique_tids.Size();
if (rep->sleep) CopyTrace(rep->sleep->frames, sleep_trace, trace_size);
return 1;
}
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_get_report_stack(void *report, uptr idx, void **trace,
uptr trace_size) {
const ReportDesc *rep = (ReportDesc *)report;
CHECK_LT(idx, rep->stacks.Size());
ReportStack *stack = rep->stacks[idx];
if (stack) CopyTrace(stack->frames, trace, trace_size);
return stack ? 1 : 0;
}
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_get_report_mop(void *report, uptr idx, int *tid, void **addr,
int *size, int *write, int *atomic, void **trace,
uptr trace_size) {
const ReportDesc *rep = (ReportDesc *)report;
CHECK_LT(idx, rep->mops.Size());
ReportMop *mop = rep->mops[idx];
*tid = mop->tid;
*addr = (void *)mop->addr;
*size = mop->size;
*write = mop->write ? 1 : 0;
*atomic = mop->atomic ? 1 : 0;
if (mop->stack) CopyTrace(mop->stack->frames, trace, trace_size);
return 1;
}
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_get_report_loc(void *report, uptr idx, const char **type,
void **addr, uptr *start, uptr *size, int *tid,
int *fd, int *suppressable, void **trace,
uptr trace_size) {
const ReportDesc *rep = (ReportDesc *)report;
CHECK_LT(idx, rep->locs.Size());
ReportLocation *loc = rep->locs[idx];
*type = ReportLocationTypeDescription(loc->type);
*addr = (void *)loc->global.start;
*start = loc->heap_chunk_start;
*size = loc->heap_chunk_size;
*tid = loc->tid;
*fd = loc->fd;
*suppressable = loc->suppressable;
if (loc->stack) CopyTrace(loc->stack->frames, trace, trace_size);
return 1;
}
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_get_report_loc_object_type(void *report, uptr idx,
const char **object_type) {
const ReportDesc *rep = (ReportDesc *)report;
CHECK_LT(idx, rep->locs.Size());
ReportLocation *loc = rep->locs[idx];
*object_type = GetObjectTypeFromTag(loc->external_tag);
return 1;
}
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_get_report_mutex(void *report, uptr idx, uptr *mutex_id, void **addr,
int *destroyed, void **trace, uptr trace_size) {
const ReportDesc *rep = (ReportDesc *)report;
CHECK_LT(idx, rep->mutexes.Size());
ReportMutex *mutex = rep->mutexes[idx];
*mutex_id = mutex->id;
*addr = (void *)mutex->addr;
*destroyed = mutex->destroyed;
if (mutex->stack) CopyTrace(mutex->stack->frames, trace, trace_size);
return 1;
}
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_get_report_thread(void *report, uptr idx, int *tid, tid_t *os_id,
int *running, const char **name, int *parent_tid,
void **trace, uptr trace_size) {
const ReportDesc *rep = (ReportDesc *)report;
CHECK_LT(idx, rep->threads.Size());
ReportThread *thread = rep->threads[idx];
*tid = thread->id;
*os_id = thread->os_id;
*running = thread->running;
*name = thread->name;
*parent_tid = thread->parent_tid;
if (thread->stack) CopyTrace(thread->stack->frames, trace, trace_size);
return 1;
}
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_get_report_unique_tid(void *report, uptr idx, int *tid) {
const ReportDesc *rep = (ReportDesc *)report;
CHECK_LT(idx, rep->unique_tids.Size());
*tid = rep->unique_tids[idx];
return 1;
}
SANITIZER_INTERFACE_ATTRIBUTE
const char *__tsan_locate_address(uptr addr, char *name, uptr name_size,
uptr *region_address_ptr,
uptr *region_size_ptr) {
uptr region_address = 0;
uptr region_size = 0;
const char *region_kind = nullptr;
if (name && name_size > 0) name[0] = 0;
if (IsMetaMem(addr)) {
region_kind = "meta shadow";
} else if (IsShadowMem(addr)) {
region_kind = "shadow";
} else {
bool is_stack = false;
MBlock *b = 0;
Allocator *a = allocator();
if (a->PointerIsMine((void *)addr)) {
void *block_begin = a->GetBlockBegin((void *)addr);
if (block_begin) b = ctx->metamap.GetBlock((uptr)block_begin);
}
if (b != 0) {
region_address = (uptr)allocator()->GetBlockBegin((void *)addr);
region_size = b->siz;
region_kind = "heap";
} else {
// TODO(kuba.brecka): We should not lock. This is supposed to be called
// from within the debugger when other threads are stopped.
ctx->thread_registry->Lock();
ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack);
ctx->thread_registry->Unlock();
if (tctx) {
region_kind = is_stack ? "stack" : "tls";
} else {
region_kind = "global";
DataInfo info;
if (Symbolizer::GetOrInit()->SymbolizeData(addr, &info)) {
internal_strncpy(name, info.name, name_size);
region_address = info.start;
region_size = info.size;
}
}
}
}
CHECK(region_kind);
if (region_address_ptr) *region_address_ptr = region_address;
if (region_size_ptr) *region_size_ptr = region_size;
return region_kind;
}
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_get_alloc_stack(uptr addr, uptr *trace, uptr size, int *thread_id,
tid_t *os_id) {
MBlock *b = 0;
Allocator *a = allocator();
if (a->PointerIsMine((void *)addr)) {
void *block_begin = a->GetBlockBegin((void *)addr);
if (block_begin) b = ctx->metamap.GetBlock((uptr)block_begin);
}
if (b == 0) return 0;
*thread_id = b->tid;
// No locking. This is supposed to be called from within the debugger when
// other threads are stopped.
ThreadContextBase *tctx = ctx->thread_registry->GetThreadLocked(b->tid);
*os_id = tctx->os_id;
StackTrace stack = StackDepotGet(b->stk);
size = Min(size, (uptr)stack.size);
for (uptr i = 0; i < size; i++) trace[i] = stack.trace[stack.size - i - 1];
return size;
}

View File

@@ -0,0 +1,196 @@
//===-- tsan_defs.h ---------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#ifndef TSAN_DEFS_H
#define TSAN_DEFS_H
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "tsan_stat.h"
#include "ubsan/ubsan_platform.h"
// Setup defaults for compile definitions.
#ifndef TSAN_NO_HISTORY
# define TSAN_NO_HISTORY 0
#endif
#ifndef TSAN_COLLECT_STATS
# define TSAN_COLLECT_STATS 0
#endif
#ifndef TSAN_CONTAINS_UBSAN
# if CAN_SANITIZE_UB && !SANITIZER_GO
# define TSAN_CONTAINS_UBSAN 1
# else
# define TSAN_CONTAINS_UBSAN 0
# endif
#endif
namespace __tsan {
const int kClkBits = 42;
const unsigned kMaxTidReuse = (1 << (64 - kClkBits)) - 1;
struct ClockElem {
u64 epoch : kClkBits;
u64 reused : 64 - kClkBits; // tid reuse count
};
struct ClockBlock {
static const uptr kSize = 512;
static const uptr kTableSize = kSize / sizeof(u32);
static const uptr kClockCount = kSize / sizeof(ClockElem);
static const uptr kRefIdx = kTableSize - 1;
static const uptr kBlockIdx = kTableSize - 2;
union {
u32 table[kTableSize];
ClockElem clock[kClockCount];
};
ClockBlock() {
}
};
const int kTidBits = 13;
// Reduce kMaxTid by kClockCount because one slot in ClockBlock table is
// occupied by reference counter, so total number of elements we can store
// in SyncClock is kClockCount * (kTableSize - 1).
const unsigned kMaxTid = (1 << kTidBits) - ClockBlock::kClockCount;
#if !SANITIZER_GO
const unsigned kMaxTidInClock = kMaxTid * 2; // This includes msb 'freed' bit.
#else
const unsigned kMaxTidInClock = kMaxTid; // Go does not track freed memory.
#endif
const uptr kShadowStackSize = 64 * 1024;
// Count of shadow values in a shadow cell.
const uptr kShadowCnt = 4;
// That many user bytes are mapped onto a single shadow cell.
const uptr kShadowCell = 8;
// Size of a single shadow value (u64).
const uptr kShadowSize = 8;
// Shadow memory is kShadowMultiplier times larger than user memory.
const uptr kShadowMultiplier = kShadowSize * kShadowCnt / kShadowCell;
// That many user bytes are mapped onto a single meta shadow cell.
// Must be less or equal to minimal memory allocator alignment.
const uptr kMetaShadowCell = 8;
// Size of a single meta shadow value (u32).
const uptr kMetaShadowSize = 4;
#if TSAN_NO_HISTORY
const bool kCollectHistory = false;
#else
const bool kCollectHistory = true;
#endif
const u16 kInvalidTid = kMaxTid + 1;
// The following "build consistency" machinery ensures that all source files
// are built in the same configuration. Inconsistent builds lead to
// hard to debug crashes.
#if SANITIZER_DEBUG
void build_consistency_debug();
#else
void build_consistency_release();
#endif
#if TSAN_COLLECT_STATS
void build_consistency_stats();
#else
void build_consistency_nostats();
#endif
static inline void USED build_consistency() {
#if SANITIZER_DEBUG
build_consistency_debug();
#else
build_consistency_release();
#endif
#if TSAN_COLLECT_STATS
build_consistency_stats();
#else
build_consistency_nostats();
#endif
}
template<typename T>
T min(T a, T b) {
return a < b ? a : b;
}
template<typename T>
T max(T a, T b) {
return a > b ? a : b;
}
template<typename T>
T RoundUp(T p, u64 align) {
DCHECK_EQ(align & (align - 1), 0);
return (T)(((u64)p + align - 1) & ~(align - 1));
}
template<typename T>
T RoundDown(T p, u64 align) {
DCHECK_EQ(align & (align - 1), 0);
return (T)((u64)p & ~(align - 1));
}
// Zeroizes high part, returns 'bits' lsb bits.
template<typename T>
T GetLsb(T v, int bits) {
return (T)((u64)v & ((1ull << bits) - 1));
}
struct MD5Hash {
u64 hash[2];
bool operator==(const MD5Hash &other) const;
};
MD5Hash md5_hash(const void *data, uptr size);
struct Processor;
struct ThreadState;
class ThreadContext;
struct Context;
struct ReportStack;
class ReportDesc;
class RegionAlloc;
// Descriptor of user's memory block.
struct MBlock {
u64 siz : 48;
u64 tag : 16;
u32 stk;
u16 tid;
};
COMPILER_CHECK(sizeof(MBlock) == 16);
enum ExternalTag : uptr {
kExternalTagNone = 0,
kExternalTagSwiftModifyingAccess = 1,
kExternalTagFirstUserAvailable = 2,
kExternalTagMax = 1024,
// Don't set kExternalTagMax over 65,536, since MBlock only stores tags
// as 16-bit values, see tsan_defs.h.
};
} // namespace __tsan
#endif // TSAN_DEFS_H

View File

@@ -0,0 +1,142 @@
//===-- tsan_dense_alloc.h --------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
// A DenseSlabAlloc is a freelist-based allocator of fixed-size objects.
// DenseSlabAllocCache is a thread-local cache for DenseSlabAlloc.
// The only difference with traditional slab allocators is that DenseSlabAlloc
// allocates/free indices of objects and provide a functionality to map
// the index onto the real pointer. The index is u32, that is, 2 times smaller
// than uptr (hense the Dense prefix).
//===----------------------------------------------------------------------===//
#ifndef TSAN_DENSE_ALLOC_H
#define TSAN_DENSE_ALLOC_H
#include "sanitizer_common/sanitizer_common.h"
#include "tsan_defs.h"
#include "tsan_mutex.h"
namespace __tsan {
class DenseSlabAllocCache {
static const uptr kSize = 128;
typedef u32 IndexT;
uptr pos;
IndexT cache[kSize];
template<typename T, uptr kL1Size, uptr kL2Size> friend class DenseSlabAlloc;
};
template<typename T, uptr kL1Size, uptr kL2Size>
class DenseSlabAlloc {
public:
typedef DenseSlabAllocCache Cache;
typedef typename Cache::IndexT IndexT;
explicit DenseSlabAlloc(const char *name) {
// Check that kL1Size and kL2Size are sane.
CHECK_EQ(kL1Size & (kL1Size - 1), 0);
CHECK_EQ(kL2Size & (kL2Size - 1), 0);
CHECK_GE(1ull << (sizeof(IndexT) * 8), kL1Size * kL2Size);
// Check that it makes sense to use the dense alloc.
CHECK_GE(sizeof(T), sizeof(IndexT));
internal_memset(map_, 0, sizeof(map_));
freelist_ = 0;
fillpos_ = 0;
name_ = name;
}
~DenseSlabAlloc() {
for (uptr i = 0; i < kL1Size; i++) {
if (map_[i] != 0)
UnmapOrDie(map_[i], kL2Size * sizeof(T));
}
}
IndexT Alloc(Cache *c) {
if (c->pos == 0)
Refill(c);
return c->cache[--c->pos];
}
void Free(Cache *c, IndexT idx) {
DCHECK_NE(idx, 0);
if (c->pos == Cache::kSize)
Drain(c);
c->cache[c->pos++] = idx;
}
T *Map(IndexT idx) {
DCHECK_NE(idx, 0);
DCHECK_LE(idx, kL1Size * kL2Size);
return &map_[idx / kL2Size][idx % kL2Size];
}
void FlushCache(Cache *c) {
SpinMutexLock lock(&mtx_);
while (c->pos) {
IndexT idx = c->cache[--c->pos];
*(IndexT*)Map(idx) = freelist_;
freelist_ = idx;
}
}
void InitCache(Cache *c) {
c->pos = 0;
internal_memset(c->cache, 0, sizeof(c->cache));
}
private:
T *map_[kL1Size];
SpinMutex mtx_;
IndexT freelist_;
uptr fillpos_;
const char *name_;
void Refill(Cache *c) {
SpinMutexLock lock(&mtx_);
if (freelist_ == 0) {
if (fillpos_ == kL1Size) {
Printf("ThreadSanitizer: %s overflow (%zu*%zu). Dying.\n",
name_, kL1Size, kL2Size);
Die();
}
VPrintf(2, "ThreadSanitizer: growing %s: %zu out of %zu*%zu\n",
name_, fillpos_, kL1Size, kL2Size);
T *batch = (T*)MmapOrDie(kL2Size * sizeof(T), name_);
// Reserve 0 as invalid index.
IndexT start = fillpos_ == 0 ? 1 : 0;
for (IndexT i = start; i < kL2Size; i++) {
new(batch + i) T;
*(IndexT*)(batch + i) = i + 1 + fillpos_ * kL2Size;
}
*(IndexT*)(batch + kL2Size - 1) = 0;
freelist_ = fillpos_ * kL2Size + start;
map_[fillpos_++] = batch;
}
for (uptr i = 0; i < Cache::kSize / 2 && freelist_ != 0; i++) {
IndexT idx = freelist_;
c->cache[c->pos++] = idx;
freelist_ = *(IndexT*)Map(idx);
}
}
void Drain(Cache *c) {
SpinMutexLock lock(&mtx_);
for (uptr i = 0; i < Cache::kSize / 2; i++) {
IndexT idx = c->cache[--c->pos];
*(IndexT*)Map(idx) = freelist_;
freelist_ = idx;
}
}
};
} // namespace __tsan
#endif // TSAN_DENSE_ALLOC_H

View File

@@ -0,0 +1,125 @@
//===-- tsan_external.cc --------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#include "tsan_rtl.h"
#include "tsan_interceptors.h"
namespace __tsan {
#define CALLERPC ((uptr)__builtin_return_address(0))
struct TagData {
const char *object_type;
const char *header;
};
static TagData registered_tags[kExternalTagMax] = {
{},
{"Swift variable", "Swift access race"},
};
static atomic_uint32_t used_tags{kExternalTagFirstUserAvailable}; // NOLINT.
static TagData *GetTagData(uptr tag) {
// Invalid/corrupted tag? Better return NULL and let the caller deal with it.
if (tag >= atomic_load(&used_tags, memory_order_relaxed)) return nullptr;
return &registered_tags[tag];
}
const char *GetObjectTypeFromTag(uptr tag) {
TagData *tag_data = GetTagData(tag);
return tag_data ? tag_data->object_type : nullptr;
}
const char *GetReportHeaderFromTag(uptr tag) {
TagData *tag_data = GetTagData(tag);
return tag_data ? tag_data->header : nullptr;
}
void InsertShadowStackFrameForTag(ThreadState *thr, uptr tag) {
FuncEntry(thr, (uptr)&registered_tags[tag]);
}
uptr TagFromShadowStackFrame(uptr pc) {
uptr tag_count = atomic_load(&used_tags, memory_order_relaxed);
void *pc_ptr = (void *)pc;
if (pc_ptr < GetTagData(0) || pc_ptr > GetTagData(tag_count - 1))
return 0;
return (TagData *)pc_ptr - GetTagData(0);
}
#if !SANITIZER_GO
typedef void(*AccessFunc)(ThreadState *, uptr, uptr, int);
void ExternalAccess(void *addr, void *caller_pc, void *tag, AccessFunc access) {
CHECK_LT(tag, atomic_load(&used_tags, memory_order_relaxed));
ThreadState *thr = cur_thread();
if (caller_pc) FuncEntry(thr, (uptr)caller_pc);
InsertShadowStackFrameForTag(thr, (uptr)tag);
bool in_ignored_lib;
if (!caller_pc || !libignore()->IsIgnored((uptr)caller_pc, &in_ignored_lib)) {
access(thr, CALLERPC, (uptr)addr, kSizeLog1);
}
FuncExit(thr);
if (caller_pc) FuncExit(thr);
}
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
void *__tsan_external_register_tag(const char *object_type) {
uptr new_tag = atomic_fetch_add(&used_tags, 1, memory_order_relaxed);
CHECK_LT(new_tag, kExternalTagMax);
GetTagData(new_tag)->object_type = internal_strdup(object_type);
char header[127] = {0};
internal_snprintf(header, sizeof(header), "race on %s", object_type);
GetTagData(new_tag)->header = internal_strdup(header);
return (void *)new_tag;
}
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_external_register_header(void *tag, const char *header) {
CHECK_GE((uptr)tag, kExternalTagFirstUserAvailable);
CHECK_LT((uptr)tag, kExternalTagMax);
atomic_uintptr_t *header_ptr =
(atomic_uintptr_t *)&GetTagData((uptr)tag)->header;
header = internal_strdup(header);
char *old_header =
(char *)atomic_exchange(header_ptr, (uptr)header, memory_order_seq_cst);
if (old_header) internal_free(old_header);
}
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_external_assign_tag(void *addr, void *tag) {
CHECK_LT(tag, atomic_load(&used_tags, memory_order_relaxed));
Allocator *a = allocator();
MBlock *b = nullptr;
if (a->PointerIsMine((void *)addr)) {
void *block_begin = a->GetBlockBegin((void *)addr);
if (block_begin) b = ctx->metamap.GetBlock((uptr)block_begin);
}
if (b) {
b->tag = (uptr)tag;
}
}
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_external_read(void *addr, void *caller_pc, void *tag) {
ExternalAccess(addr, caller_pc, tag, MemoryRead);
}
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_external_write(void *addr, void *caller_pc, void *tag) {
ExternalAccess(addr, caller_pc, tag, MemoryWrite);
}
} // extern "C"
#endif // !SANITIZER_GO
} // namespace __tsan

View File

@@ -0,0 +1,316 @@
//===-- tsan_fd.cc --------------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#include "tsan_fd.h"
#include "tsan_rtl.h"
#include <sanitizer_common/sanitizer_atomic.h>
namespace __tsan {
const int kTableSizeL1 = 1024;
const int kTableSizeL2 = 1024;
const int kTableSize = kTableSizeL1 * kTableSizeL2;
struct FdSync {
atomic_uint64_t rc;
};
struct FdDesc {
FdSync *sync;
int creation_tid;
u32 creation_stack;
};
struct FdContext {
atomic_uintptr_t tab[kTableSizeL1];
// Addresses used for synchronization.
FdSync globsync;
FdSync filesync;
FdSync socksync;
u64 connectsync;
};
static FdContext fdctx;
static bool bogusfd(int fd) {
// Apparently a bogus fd value.
return fd < 0 || fd >= kTableSize;
}
static FdSync *allocsync(ThreadState *thr, uptr pc) {
FdSync *s = (FdSync*)user_alloc_internal(thr, pc, sizeof(FdSync),
kDefaultAlignment, false);
atomic_store(&s->rc, 1, memory_order_relaxed);
return s;
}
static FdSync *ref(FdSync *s) {
if (s && atomic_load(&s->rc, memory_order_relaxed) != (u64)-1)
atomic_fetch_add(&s->rc, 1, memory_order_relaxed);
return s;
}
static void unref(ThreadState *thr, uptr pc, FdSync *s) {
if (s && atomic_load(&s->rc, memory_order_relaxed) != (u64)-1) {
if (atomic_fetch_sub(&s->rc, 1, memory_order_acq_rel) == 1) {
CHECK_NE(s, &fdctx.globsync);
CHECK_NE(s, &fdctx.filesync);
CHECK_NE(s, &fdctx.socksync);
user_free(thr, pc, s, false);
}
}
}
static FdDesc *fddesc(ThreadState *thr, uptr pc, int fd) {
CHECK_GE(fd, 0);
CHECK_LT(fd, kTableSize);
atomic_uintptr_t *pl1 = &fdctx.tab[fd / kTableSizeL2];
uptr l1 = atomic_load(pl1, memory_order_consume);
if (l1 == 0) {
uptr size = kTableSizeL2 * sizeof(FdDesc);
// We need this to reside in user memory to properly catch races on it.
void *p = user_alloc_internal(thr, pc, size, kDefaultAlignment, false);
internal_memset(p, 0, size);
MemoryResetRange(thr, (uptr)&fddesc, (uptr)p, size);
if (atomic_compare_exchange_strong(pl1, &l1, (uptr)p, memory_order_acq_rel))
l1 = (uptr)p;
else
user_free(thr, pc, p, false);
}
return &((FdDesc*)l1)[fd % kTableSizeL2]; // NOLINT
}
// pd must be already ref'ed.
static void init(ThreadState *thr, uptr pc, int fd, FdSync *s,
bool write = true) {
FdDesc *d = fddesc(thr, pc, fd);
// As a matter of fact, we don't intercept all close calls.
// See e.g. libc __res_iclose().
if (d->sync) {
unref(thr, pc, d->sync);
d->sync = 0;
}
if (flags()->io_sync == 0) {
unref(thr, pc, s);
} else if (flags()->io_sync == 1) {
d->sync = s;
} else if (flags()->io_sync == 2) {
unref(thr, pc, s);
d->sync = &fdctx.globsync;
}
d->creation_tid = thr->tid;
d->creation_stack = CurrentStackId(thr, pc);
if (write) {
// To catch races between fd usage and open.
MemoryRangeImitateWrite(thr, pc, (uptr)d, 8);
} else {
// See the dup-related comment in FdClose.
MemoryRead(thr, pc, (uptr)d, kSizeLog8);
}
}
void FdInit() {
atomic_store(&fdctx.globsync.rc, (u64)-1, memory_order_relaxed);
atomic_store(&fdctx.filesync.rc, (u64)-1, memory_order_relaxed);
atomic_store(&fdctx.socksync.rc, (u64)-1, memory_order_relaxed);
}
void FdOnFork(ThreadState *thr, uptr pc) {
// On fork() we need to reset all fd's, because the child is going
// close all them, and that will cause races between previous read/write
// and the close.
for (int l1 = 0; l1 < kTableSizeL1; l1++) {
FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed);
if (tab == 0)
break;
for (int l2 = 0; l2 < kTableSizeL2; l2++) {
FdDesc *d = &tab[l2];
MemoryResetRange(thr, pc, (uptr)d, 8);
}
}
}
bool FdLocation(uptr addr, int *fd, int *tid, u32 *stack) {
for (int l1 = 0; l1 < kTableSizeL1; l1++) {
FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed);
if (tab == 0)
break;
if (addr >= (uptr)tab && addr < (uptr)(tab + kTableSizeL2)) {
int l2 = (addr - (uptr)tab) / sizeof(FdDesc);
FdDesc *d = &tab[l2];
*fd = l1 * kTableSizeL1 + l2;
*tid = d->creation_tid;
*stack = d->creation_stack;
return true;
}
}
return false;
}
void FdAcquire(ThreadState *thr, uptr pc, int fd) {
if (bogusfd(fd))
return;
FdDesc *d = fddesc(thr, pc, fd);
FdSync *s = d->sync;
DPrintf("#%d: FdAcquire(%d) -> %p\n", thr->tid, fd, s);
MemoryRead(thr, pc, (uptr)d, kSizeLog8);
if (s)
Acquire(thr, pc, (uptr)s);
}
void FdRelease(ThreadState *thr, uptr pc, int fd) {
if (bogusfd(fd))
return;
FdDesc *d = fddesc(thr, pc, fd);
FdSync *s = d->sync;
DPrintf("#%d: FdRelease(%d) -> %p\n", thr->tid, fd, s);
MemoryRead(thr, pc, (uptr)d, kSizeLog8);
if (s)
Release(thr, pc, (uptr)s);
}
void FdAccess(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdAccess(%d)\n", thr->tid, fd);
if (bogusfd(fd))
return;
FdDesc *d = fddesc(thr, pc, fd);
MemoryRead(thr, pc, (uptr)d, kSizeLog8);
}
void FdClose(ThreadState *thr, uptr pc, int fd, bool write) {
DPrintf("#%d: FdClose(%d)\n", thr->tid, fd);
if (bogusfd(fd))
return;
FdDesc *d = fddesc(thr, pc, fd);
if (write) {
// To catch races between fd usage and close.
MemoryWrite(thr, pc, (uptr)d, kSizeLog8);
} else {
// This path is used only by dup2/dup3 calls.
// We do read instead of write because there is a number of legitimate
// cases where write would lead to false positives:
// 1. Some software dups a closed pipe in place of a socket before closing
// the socket (to prevent races actually).
// 2. Some daemons dup /dev/null in place of stdin/stdout.
// On the other hand we have not seen cases when write here catches real
// bugs.
MemoryRead(thr, pc, (uptr)d, kSizeLog8);
}
// We need to clear it, because if we do not intercept any call out there
// that creates fd, we will hit false postives.
MemoryResetRange(thr, pc, (uptr)d, 8);
unref(thr, pc, d->sync);
d->sync = 0;
d->creation_tid = 0;
d->creation_stack = 0;
}
void FdFileCreate(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdFileCreate(%d)\n", thr->tid, fd);
if (bogusfd(fd))
return;
init(thr, pc, fd, &fdctx.filesync);
}
void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd, bool write) {
DPrintf("#%d: FdDup(%d, %d)\n", thr->tid, oldfd, newfd);
if (bogusfd(oldfd) || bogusfd(newfd))
return;
// Ignore the case when user dups not yet connected socket.
FdDesc *od = fddesc(thr, pc, oldfd);
MemoryRead(thr, pc, (uptr)od, kSizeLog8);
FdClose(thr, pc, newfd, write);
init(thr, pc, newfd, ref(od->sync), write);
}
void FdPipeCreate(ThreadState *thr, uptr pc, int rfd, int wfd) {
DPrintf("#%d: FdCreatePipe(%d, %d)\n", thr->tid, rfd, wfd);
FdSync *s = allocsync(thr, pc);
init(thr, pc, rfd, ref(s));
init(thr, pc, wfd, ref(s));
unref(thr, pc, s);
}
void FdEventCreate(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdEventCreate(%d)\n", thr->tid, fd);
if (bogusfd(fd))
return;
init(thr, pc, fd, allocsync(thr, pc));
}
void FdSignalCreate(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdSignalCreate(%d)\n", thr->tid, fd);
if (bogusfd(fd))
return;
init(thr, pc, fd, 0);
}
void FdInotifyCreate(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdInotifyCreate(%d)\n", thr->tid, fd);
if (bogusfd(fd))
return;
init(thr, pc, fd, 0);
}
void FdPollCreate(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdPollCreate(%d)\n", thr->tid, fd);
if (bogusfd(fd))
return;
init(thr, pc, fd, allocsync(thr, pc));
}
void FdSocketCreate(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdSocketCreate(%d)\n", thr->tid, fd);
if (bogusfd(fd))
return;
// It can be a UDP socket.
init(thr, pc, fd, &fdctx.socksync);
}
void FdSocketAccept(ThreadState *thr, uptr pc, int fd, int newfd) {
DPrintf("#%d: FdSocketAccept(%d, %d)\n", thr->tid, fd, newfd);
if (bogusfd(fd))
return;
// Synchronize connect->accept.
Acquire(thr, pc, (uptr)&fdctx.connectsync);
init(thr, pc, newfd, &fdctx.socksync);
}
void FdSocketConnecting(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdSocketConnecting(%d)\n", thr->tid, fd);
if (bogusfd(fd))
return;
// Synchronize connect->accept.
Release(thr, pc, (uptr)&fdctx.connectsync);
}
void FdSocketConnect(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdSocketConnect(%d)\n", thr->tid, fd);
if (bogusfd(fd))
return;
init(thr, pc, fd, &fdctx.socksync);
}
uptr File2addr(const char *path) {
(void)path;
static u64 addr;
return (uptr)&addr;
}
uptr Dir2addr(const char *path) {
(void)path;
static u64 addr;
return (uptr)&addr;
}
} // namespace __tsan

View File

@@ -0,0 +1,65 @@
//===-- tsan_fd.h -----------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
// This file handles synchronization via IO.
// People use IO for synchronization along the lines of:
//
// int X;
// int client_socket; // initialized elsewhere
// int server_socket; // initialized elsewhere
//
// Thread 1:
// X = 42;
// send(client_socket, ...);
//
// Thread 2:
// if (recv(server_socket, ...) > 0)
// assert(X == 42);
//
// This file determines the scope of the file descriptor (pipe, socket,
// all local files, etc) and executes acquire and release operations on
// the scope as necessary. Some scopes are very fine grained (e.g. pipe
// operations synchronize only with operations on the same pipe), while
// others are corse-grained (e.g. all operations on local files synchronize
// with each other).
//===----------------------------------------------------------------------===//
#ifndef TSAN_FD_H
#define TSAN_FD_H
#include "tsan_rtl.h"
namespace __tsan {
void FdInit();
void FdAcquire(ThreadState *thr, uptr pc, int fd);
void FdRelease(ThreadState *thr, uptr pc, int fd);
void FdAccess(ThreadState *thr, uptr pc, int fd);
void FdClose(ThreadState *thr, uptr pc, int fd, bool write = true);
void FdFileCreate(ThreadState *thr, uptr pc, int fd);
void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd, bool write);
void FdPipeCreate(ThreadState *thr, uptr pc, int rfd, int wfd);
void FdEventCreate(ThreadState *thr, uptr pc, int fd);
void FdSignalCreate(ThreadState *thr, uptr pc, int fd);
void FdInotifyCreate(ThreadState *thr, uptr pc, int fd);
void FdPollCreate(ThreadState *thr, uptr pc, int fd);
void FdSocketCreate(ThreadState *thr, uptr pc, int fd);
void FdSocketAccept(ThreadState *thr, uptr pc, int fd, int newfd);
void FdSocketConnecting(ThreadState *thr, uptr pc, int fd);
void FdSocketConnect(ThreadState *thr, uptr pc, int fd);
bool FdLocation(uptr addr, int *fd, int *tid, u32 *stack);
void FdOnFork(ThreadState *thr, uptr pc);
uptr File2addr(const char *path);
uptr Dir2addr(const char *path);
} // namespace __tsan
#endif // TSAN_INTERFACE_H

View File

@@ -0,0 +1,127 @@
//===-- tsan_flags.cc -----------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_flag_parser.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "tsan_flags.h"
#include "tsan_rtl.h"
#include "tsan_mman.h"
#include "ubsan/ubsan_flags.h"
namespace __tsan {
// Can be overriden in frontend.
#ifdef TSAN_EXTERNAL_HOOKS
extern "C" const char* __tsan_default_options();
#else
SANITIZER_WEAK_DEFAULT_IMPL
const char *__tsan_default_options() {
return "";
}
#endif
void Flags::SetDefaults() {
#define TSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
#include "tsan_flags.inc"
#undef TSAN_FLAG
// DDFlags
second_deadlock_stack = false;
}
void RegisterTsanFlags(FlagParser *parser, Flags *f) {
#define TSAN_FLAG(Type, Name, DefaultValue, Description) \
RegisterFlag(parser, #Name, Description, &f->Name);
#include "tsan_flags.inc"
#undef TSAN_FLAG
// DDFlags
RegisterFlag(parser, "second_deadlock_stack",
"Report where each mutex is locked in deadlock reports",
&f->second_deadlock_stack);
}
void InitializeFlags(Flags *f, const char *env) {
SetCommonFlagsDefaults();
{
// Override some common flags defaults.
CommonFlags cf;
cf.CopyFrom(*common_flags());
cf.allow_addr2line = true;
if (SANITIZER_GO) {
// Does not work as expected for Go: runtime handles SIGABRT and crashes.
cf.abort_on_error = false;
// Go does not have mutexes.
} else {
cf.detect_deadlocks = true;
}
cf.print_suppressions = false;
cf.stack_trace_format = " #%n %f %S %M";
cf.exitcode = 66;
cf.intercept_tls_get_addr = true;
OverrideCommonFlags(cf);
}
f->SetDefaults();
FlagParser parser;
RegisterTsanFlags(&parser, f);
RegisterCommonFlags(&parser);
#if TSAN_CONTAINS_UBSAN
__ubsan::Flags *uf = __ubsan::flags();
uf->SetDefaults();
FlagParser ubsan_parser;
__ubsan::RegisterUbsanFlags(&ubsan_parser, uf);
RegisterCommonFlags(&ubsan_parser);
#endif
// Let a frontend override.
parser.ParseString(__tsan_default_options());
#if TSAN_CONTAINS_UBSAN
const char *ubsan_default_options = __ubsan::MaybeCallUbsanDefaultOptions();
ubsan_parser.ParseString(ubsan_default_options);
#endif
// Override from command line.
parser.ParseString(env);
#if TSAN_CONTAINS_UBSAN
ubsan_parser.ParseString(GetEnv("UBSAN_OPTIONS"));
#endif
// Sanity check.
if (!f->report_bugs) {
f->report_thread_leaks = false;
f->report_destroy_locked = false;
f->report_signal_unsafe = false;
}
InitializeCommonFlags();
if (Verbosity()) ReportUnrecognizedFlags();
if (common_flags()->help) parser.PrintFlagDescriptions();
if (f->history_size < 0 || f->history_size > 7) {
Printf("ThreadSanitizer: incorrect value for history_size"
" (must be [0..7])\n");
Die();
}
if (f->io_sync < 0 || f->io_sync > 2) {
Printf("ThreadSanitizer: incorrect value for io_sync"
" (must be [0..2])\n");
Die();
}
}
} // namespace __tsan

View File

@@ -0,0 +1,34 @@
//===-- tsan_flags.h --------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
// NOTE: This file may be included into user code.
//===----------------------------------------------------------------------===//
#ifndef TSAN_FLAGS_H
#define TSAN_FLAGS_H
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
namespace __tsan {
struct Flags : DDFlags {
#define TSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
#include "tsan_flags.inc"
#undef TSAN_FLAG
void SetDefaults();
void ParseFromString(const char *str);
};
void InitializeFlags(Flags *flags, const char *env);
} // namespace __tsan
#endif // TSAN_FLAGS_H

View File

@@ -0,0 +1,86 @@
//===-- tsan_flags.inc ------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// TSan runtime flags.
//
//===----------------------------------------------------------------------===//
#ifndef TSAN_FLAG
# error "Define TSAN_FLAG prior to including this file!"
#endif
// TSAN_FLAG(Type, Name, DefaultValue, Description)
// See COMMON_FLAG in sanitizer_flags.inc for more details.
TSAN_FLAG(bool, enable_annotations, true,
"Enable dynamic annotations, otherwise they are no-ops.")
// Suppress a race report if we've already output another race report
// with the same stack.
TSAN_FLAG(bool, suppress_equal_stacks, true,
"Suppress a race report if we've already output another race report "
"with the same stack.")
TSAN_FLAG(bool, suppress_equal_addresses, true,
"Suppress a race report if we've already output another race report "
"on the same address.")
TSAN_FLAG(bool, report_bugs, true,
"Turns off bug reporting entirely (useful for benchmarking).")
TSAN_FLAG(bool, report_thread_leaks, true, "Report thread leaks at exit?")
TSAN_FLAG(bool, report_destroy_locked, true,
"Report destruction of a locked mutex?")
TSAN_FLAG(bool, report_mutex_bugs, true,
"Report incorrect usages of mutexes and mutex annotations?")
TSAN_FLAG(bool, report_signal_unsafe, true,
"Report violations of async signal-safety "
"(e.g. malloc() call from a signal handler).")
TSAN_FLAG(bool, report_atomic_races, true,
"Report races between atomic and plain memory accesses.")
TSAN_FLAG(
bool, force_seq_cst_atomics, false,
"If set, all atomics are effectively sequentially consistent (seq_cst), "
"regardless of what user actually specified.")
TSAN_FLAG(bool, print_benign, false, "Print matched \"benign\" races at exit.")
TSAN_FLAG(bool, halt_on_error, false, "Exit after first reported error.")
TSAN_FLAG(int, atexit_sleep_ms, 1000,
"Sleep in main thread before exiting for that many ms "
"(useful to catch \"at exit\" races).")
TSAN_FLAG(const char *, profile_memory, "",
"If set, periodically write memory profile to that file.")
TSAN_FLAG(int, flush_memory_ms, 0, "Flush shadow memory every X ms.")
TSAN_FLAG(int, flush_symbolizer_ms, 5000, "Flush symbolizer caches every X ms.")
TSAN_FLAG(
int, memory_limit_mb, 0,
"Resident memory limit in MB to aim at."
"If the process consumes more memory, then TSan will flush shadow memory.")
TSAN_FLAG(bool, stop_on_start, false,
"Stops on start until __tsan_resume() is called (for debugging).")
TSAN_FLAG(bool, running_on_valgrind, false,
"Controls whether RunningOnValgrind() returns true or false.")
// There are a lot of goroutines in Go, so we use smaller history.
TSAN_FLAG(
int, history_size, SANITIZER_GO ? 1 : 3,
"Per-thread history size, controls how many previous memory accesses "
"are remembered per thread. Possible values are [0..7]. "
"history_size=0 amounts to 32K memory accesses. Each next value doubles "
"the amount of memory accesses, up to history_size=7 that amounts to "
"4M memory accesses. The default value is 2 (128K memory accesses).")
TSAN_FLAG(int, io_sync, 1,
"Controls level of synchronization implied by IO operations. "
"0 - no synchronization "
"1 - reasonable level of synchronization (write->read)"
"2 - global synchronization of all IO operations.")
TSAN_FLAG(bool, die_after_fork, true,
"Die after multi-threaded fork if the child creates new threads.")
TSAN_FLAG(const char *, suppressions, "", "Suppressions file name.")
TSAN_FLAG(bool, ignore_interceptors_accesses, false,
"Ignore reads and writes from all interceptors.")
TSAN_FLAG(bool, ignore_noninstrumented_modules, SANITIZER_MAC ? true : false,
"Interceptors should only detect races when called from instrumented "
"modules.")
TSAN_FLAG(bool, shared_ptr_interceptor, true,
"Track atomic reference counting in libc++ shared_ptr and weak_ptr.")

View File

@@ -0,0 +1,47 @@
//===-- tsan_ignoreset.cc -------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#include "tsan_ignoreset.h"
namespace __tsan {
const uptr IgnoreSet::kMaxSize;
IgnoreSet::IgnoreSet()
: size_() {
}
void IgnoreSet::Add(u32 stack_id) {
if (size_ == kMaxSize)
return;
for (uptr i = 0; i < size_; i++) {
if (stacks_[i] == stack_id)
return;
}
stacks_[size_++] = stack_id;
}
void IgnoreSet::Reset() {
size_ = 0;
}
uptr IgnoreSet::Size() const {
return size_;
}
u32 IgnoreSet::At(uptr i) const {
CHECK_LT(i, size_);
CHECK_LE(size_, kMaxSize);
return stacks_[i];
}
} // namespace __tsan

View File

@@ -0,0 +1,38 @@
//===-- tsan_ignoreset.h ----------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
// IgnoreSet holds a set of stack traces where ignores were enabled.
//===----------------------------------------------------------------------===//
#ifndef TSAN_IGNORESET_H
#define TSAN_IGNORESET_H
#include "tsan_defs.h"
namespace __tsan {
class IgnoreSet {
public:
static const uptr kMaxSize = 16;
IgnoreSet();
void Add(u32 stack_id);
void Reset();
uptr Size() const;
u32 At(uptr i) const;
private:
uptr size_;
u32 stacks_[kMaxSize];
};
} // namespace __tsan
#endif // TSAN_IGNORESET_H

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,64 @@
#ifndef TSAN_INTERCEPTORS_H
#define TSAN_INTERCEPTORS_H
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "tsan_rtl.h"
namespace __tsan {
class ScopedInterceptor {
public:
ScopedInterceptor(ThreadState *thr, const char *fname, uptr pc);
~ScopedInterceptor();
void DisableIgnores();
void EnableIgnores();
private:
ThreadState *const thr_;
const uptr pc_;
bool in_ignored_lib_;
bool ignoring_;
};
LibIgnore *libignore();
} // namespace __tsan
#define SCOPED_INTERCEPTOR_RAW(func, ...) \
ThreadState *thr = cur_thread(); \
const uptr caller_pc = GET_CALLER_PC(); \
ScopedInterceptor si(thr, #func, caller_pc); \
const uptr pc = StackTrace::GetCurrentPc(); \
(void)pc; \
/**/
#define SCOPED_TSAN_INTERCEPTOR(func, ...) \
SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
if (REAL(func) == 0) { \
Report("FATAL: ThreadSanitizer: failed to intercept %s\n", #func); \
Die(); \
} \
if (!thr->is_inited || thr->ignore_interceptors || thr->in_ignored_lib) \
return REAL(func)(__VA_ARGS__); \
/**/
#define SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START() \
si.DisableIgnores();
#define SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END() \
si.EnableIgnores();
#define TSAN_INTERCEPTOR(ret, func, ...) INTERCEPTOR(ret, func, __VA_ARGS__)
#if SANITIZER_NETBSD
# define TSAN_INTERCEPTOR_NETBSD_ALIAS(ret, func, ...) \
TSAN_INTERCEPTOR(ret, __libc_##func, __VA_ARGS__) \
ALIAS(WRAPPER_NAME(pthread_##func));
# define TSAN_INTERCEPTOR_NETBSD_ALIAS_THR(ret, func, ...) \
TSAN_INTERCEPTOR(ret, __libc_thr_##func, __VA_ARGS__) \
ALIAS(WRAPPER_NAME(pthread_##func));
#else
# define TSAN_INTERCEPTOR_NETBSD_ALIAS(ret, func, ...)
# define TSAN_INTERCEPTOR_NETBSD_ALIAS_THR(ret, func, ...)
#endif
#endif // TSAN_INTERCEPTORS_H

View File

@@ -0,0 +1,388 @@
//===-- tsan_interceptors_mac.cc ------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
// Mac-specific interceptors.
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_MAC
#include "interception/interception.h"
#include "tsan_interceptors.h"
#include "tsan_interface.h"
#include "tsan_interface_ann.h"
#include <libkern/OSAtomic.h>
#if defined(__has_include) && __has_include(<xpc/xpc.h>)
#include <xpc/xpc.h>
#endif // #if defined(__has_include) && __has_include(<xpc/xpc.h>)
typedef long long_t; // NOLINT
namespace __tsan {
// The non-barrier versions of OSAtomic* functions are semantically mo_relaxed,
// but the two variants (e.g. OSAtomicAdd32 and OSAtomicAdd32Barrier) are
// actually aliases of each other, and we cannot have different interceptors for
// them, because they're actually the same function. Thus, we have to stay
// conservative and treat the non-barrier versions as mo_acq_rel.
static const morder kMacOrderBarrier = mo_acq_rel;
static const morder kMacOrderNonBarrier = mo_acq_rel;
#define OSATOMIC_INTERCEPTOR(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) { \
SCOPED_TSAN_INTERCEPTOR(f, x, ptr); \
return tsan_atomic_f((volatile tsan_t *)ptr, x, mo); \
}
#define OSATOMIC_INTERCEPTOR_PLUS_X(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) { \
SCOPED_TSAN_INTERCEPTOR(f, x, ptr); \
return tsan_atomic_f((volatile tsan_t *)ptr, x, mo) + x; \
}
#define OSATOMIC_INTERCEPTOR_PLUS_1(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) { \
SCOPED_TSAN_INTERCEPTOR(f, ptr); \
return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) + 1; \
}
#define OSATOMIC_INTERCEPTOR_MINUS_1(return_t, t, tsan_t, f, tsan_atomic_f, \
mo) \
TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) { \
SCOPED_TSAN_INTERCEPTOR(f, ptr); \
return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) - 1; \
}
#define OSATOMIC_INTERCEPTORS_ARITHMETIC(f, tsan_atomic_f, m) \
m(int32_t, int32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f, \
kMacOrderNonBarrier) \
m(int32_t, int32_t, a32, f##32##Barrier, __tsan_atomic32_##tsan_atomic_f, \
kMacOrderBarrier) \
m(int64_t, int64_t, a64, f##64, __tsan_atomic64_##tsan_atomic_f, \
kMacOrderNonBarrier) \
m(int64_t, int64_t, a64, f##64##Barrier, __tsan_atomic64_##tsan_atomic_f, \
kMacOrderBarrier)
#define OSATOMIC_INTERCEPTORS_BITWISE(f, tsan_atomic_f, m, m_orig) \
m(int32_t, uint32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f, \
kMacOrderNonBarrier) \
m(int32_t, uint32_t, a32, f##32##Barrier, __tsan_atomic32_##tsan_atomic_f, \
kMacOrderBarrier) \
m_orig(int32_t, uint32_t, a32, f##32##Orig, __tsan_atomic32_##tsan_atomic_f, \
kMacOrderNonBarrier) \
m_orig(int32_t, uint32_t, a32, f##32##OrigBarrier, \
__tsan_atomic32_##tsan_atomic_f, kMacOrderBarrier)
OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicAdd, fetch_add,
OSATOMIC_INTERCEPTOR_PLUS_X)
OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicIncrement, fetch_add,
OSATOMIC_INTERCEPTOR_PLUS_1)
OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicDecrement, fetch_sub,
OSATOMIC_INTERCEPTOR_MINUS_1)
OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicOr, fetch_or, OSATOMIC_INTERCEPTOR_PLUS_X,
OSATOMIC_INTERCEPTOR)
OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicAnd, fetch_and,
OSATOMIC_INTERCEPTOR_PLUS_X, OSATOMIC_INTERCEPTOR)
OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicXor, fetch_xor,
OSATOMIC_INTERCEPTOR_PLUS_X, OSATOMIC_INTERCEPTOR)
#define OSATOMIC_INTERCEPTORS_CAS(f, tsan_atomic_f, tsan_t, t) \
TSAN_INTERCEPTOR(bool, f, t old_value, t new_value, t volatile *ptr) { \
SCOPED_TSAN_INTERCEPTOR(f, old_value, new_value, ptr); \
return tsan_atomic_f##_compare_exchange_strong( \
(volatile tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value, \
kMacOrderNonBarrier, kMacOrderNonBarrier); \
} \
\
TSAN_INTERCEPTOR(bool, f##Barrier, t old_value, t new_value, \
t volatile *ptr) { \
SCOPED_TSAN_INTERCEPTOR(f##Barrier, old_value, new_value, ptr); \
return tsan_atomic_f##_compare_exchange_strong( \
(volatile tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value, \
kMacOrderBarrier, kMacOrderNonBarrier); \
}
OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapInt, __tsan_atomic32, a32, int)
OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapLong, __tsan_atomic64, a64,
long_t)
OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapPtr, __tsan_atomic64, a64,
void *)
OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwap32, __tsan_atomic32, a32,
int32_t)
OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwap64, __tsan_atomic64, a64,
int64_t)
#define OSATOMIC_INTERCEPTOR_BITOP(f, op, clear, mo) \
TSAN_INTERCEPTOR(bool, f, uint32_t n, volatile void *ptr) { \
SCOPED_TSAN_INTERCEPTOR(f, n, ptr); \
volatile char *byte_ptr = ((volatile char *)ptr) + (n >> 3); \
char bit = 0x80u >> (n & 7); \
char mask = clear ? ~bit : bit; \
char orig_byte = op((volatile a8 *)byte_ptr, mask, mo); \
return orig_byte & bit; \
}
#define OSATOMIC_INTERCEPTORS_BITOP(f, op, clear) \
OSATOMIC_INTERCEPTOR_BITOP(f, op, clear, kMacOrderNonBarrier) \
OSATOMIC_INTERCEPTOR_BITOP(f##Barrier, op, clear, kMacOrderBarrier)
OSATOMIC_INTERCEPTORS_BITOP(OSAtomicTestAndSet, __tsan_atomic8_fetch_or, false)
OSATOMIC_INTERCEPTORS_BITOP(OSAtomicTestAndClear, __tsan_atomic8_fetch_and,
true)
TSAN_INTERCEPTOR(void, OSAtomicEnqueue, OSQueueHead *list, void *item,
size_t offset) {
SCOPED_TSAN_INTERCEPTOR(OSAtomicEnqueue, list, item, offset);
__tsan_release(item);
REAL(OSAtomicEnqueue)(list, item, offset);
}
TSAN_INTERCEPTOR(void *, OSAtomicDequeue, OSQueueHead *list, size_t offset) {
SCOPED_TSAN_INTERCEPTOR(OSAtomicDequeue, list, offset);
void *item = REAL(OSAtomicDequeue)(list, offset);
if (item) __tsan_acquire(item);
return item;
}
// OSAtomicFifoEnqueue and OSAtomicFifoDequeue are only on OS X.
#if !SANITIZER_IOS
TSAN_INTERCEPTOR(void, OSAtomicFifoEnqueue, OSFifoQueueHead *list, void *item,
size_t offset) {
SCOPED_TSAN_INTERCEPTOR(OSAtomicFifoEnqueue, list, item, offset);
__tsan_release(item);
REAL(OSAtomicFifoEnqueue)(list, item, offset);
}
TSAN_INTERCEPTOR(void *, OSAtomicFifoDequeue, OSFifoQueueHead *list,
size_t offset) {
SCOPED_TSAN_INTERCEPTOR(OSAtomicFifoDequeue, list, offset);
void *item = REAL(OSAtomicFifoDequeue)(list, offset);
if (item) __tsan_acquire(item);
return item;
}
#endif
TSAN_INTERCEPTOR(void, OSSpinLockLock, volatile OSSpinLock *lock) {
CHECK(!cur_thread()->is_dead);
if (!cur_thread()->is_inited) {
return REAL(OSSpinLockLock)(lock);
}
SCOPED_TSAN_INTERCEPTOR(OSSpinLockLock, lock);
REAL(OSSpinLockLock)(lock);
Acquire(thr, pc, (uptr)lock);
}
TSAN_INTERCEPTOR(bool, OSSpinLockTry, volatile OSSpinLock *lock) {
CHECK(!cur_thread()->is_dead);
if (!cur_thread()->is_inited) {
return REAL(OSSpinLockTry)(lock);
}
SCOPED_TSAN_INTERCEPTOR(OSSpinLockTry, lock);
bool result = REAL(OSSpinLockTry)(lock);
if (result)
Acquire(thr, pc, (uptr)lock);
return result;
}
TSAN_INTERCEPTOR(void, OSSpinLockUnlock, volatile OSSpinLock *lock) {
CHECK(!cur_thread()->is_dead);
if (!cur_thread()->is_inited) {
return REAL(OSSpinLockUnlock)(lock);
}
SCOPED_TSAN_INTERCEPTOR(OSSpinLockUnlock, lock);
Release(thr, pc, (uptr)lock);
REAL(OSSpinLockUnlock)(lock);
}
TSAN_INTERCEPTOR(void, os_lock_lock, void *lock) {
CHECK(!cur_thread()->is_dead);
if (!cur_thread()->is_inited) {
return REAL(os_lock_lock)(lock);
}
SCOPED_TSAN_INTERCEPTOR(os_lock_lock, lock);
REAL(os_lock_lock)(lock);
Acquire(thr, pc, (uptr)lock);
}
TSAN_INTERCEPTOR(bool, os_lock_trylock, void *lock) {
CHECK(!cur_thread()->is_dead);
if (!cur_thread()->is_inited) {
return REAL(os_lock_trylock)(lock);
}
SCOPED_TSAN_INTERCEPTOR(os_lock_trylock, lock);
bool result = REAL(os_lock_trylock)(lock);
if (result)
Acquire(thr, pc, (uptr)lock);
return result;
}
TSAN_INTERCEPTOR(void, os_lock_unlock, void *lock) {
CHECK(!cur_thread()->is_dead);
if (!cur_thread()->is_inited) {
return REAL(os_lock_unlock)(lock);
}
SCOPED_TSAN_INTERCEPTOR(os_lock_unlock, lock);
Release(thr, pc, (uptr)lock);
REAL(os_lock_unlock)(lock);
}
#if defined(__has_include) && __has_include(<xpc/xpc.h>)
TSAN_INTERCEPTOR(void, xpc_connection_set_event_handler,
xpc_connection_t connection, xpc_handler_t handler) {
SCOPED_TSAN_INTERCEPTOR(xpc_connection_set_event_handler, connection,
handler);
Release(thr, pc, (uptr)connection);
xpc_handler_t new_handler = ^(xpc_object_t object) {
{
SCOPED_INTERCEPTOR_RAW(xpc_connection_set_event_handler);
Acquire(thr, pc, (uptr)connection);
}
handler(object);
};
REAL(xpc_connection_set_event_handler)(connection, new_handler);
}
TSAN_INTERCEPTOR(void, xpc_connection_send_barrier, xpc_connection_t connection,
dispatch_block_t barrier) {
SCOPED_TSAN_INTERCEPTOR(xpc_connection_send_barrier, connection, barrier);
Release(thr, pc, (uptr)connection);
dispatch_block_t new_barrier = ^() {
{
SCOPED_INTERCEPTOR_RAW(xpc_connection_send_barrier);
Acquire(thr, pc, (uptr)connection);
}
barrier();
};
REAL(xpc_connection_send_barrier)(connection, new_barrier);
}
TSAN_INTERCEPTOR(void, xpc_connection_send_message_with_reply,
xpc_connection_t connection, xpc_object_t message,
dispatch_queue_t replyq, xpc_handler_t handler) {
SCOPED_TSAN_INTERCEPTOR(xpc_connection_send_message_with_reply, connection,
message, replyq, handler);
Release(thr, pc, (uptr)connection);
xpc_handler_t new_handler = ^(xpc_object_t object) {
{
SCOPED_INTERCEPTOR_RAW(xpc_connection_send_message_with_reply);
Acquire(thr, pc, (uptr)connection);
}
handler(object);
};
REAL(xpc_connection_send_message_with_reply)
(connection, message, replyq, new_handler);
}
TSAN_INTERCEPTOR(void, xpc_connection_cancel, xpc_connection_t connection) {
SCOPED_TSAN_INTERCEPTOR(xpc_connection_cancel, connection);
Release(thr, pc, (uptr)connection);
REAL(xpc_connection_cancel)(connection);
}
#endif // #if defined(__has_include) && __has_include(<xpc/xpc.h>)
// On macOS, libc++ is always linked dynamically, so intercepting works the
// usual way.
#define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR
namespace {
struct fake_shared_weak_count {
volatile a64 shared_owners;
volatile a64 shared_weak_owners;
virtual void _unused_0x0() = 0;
virtual void _unused_0x8() = 0;
virtual void on_zero_shared() = 0;
virtual void _unused_0x18() = 0;
virtual void on_zero_shared_weak() = 0;
};
} // namespace
// The following code adds libc++ interceptors for:
// void __shared_weak_count::__release_shared() _NOEXCEPT;
// bool __shared_count::__release_shared() _NOEXCEPT;
// Shared and weak pointers in C++ maintain reference counts via atomics in
// libc++.dylib, which are TSan-invisible, and this leads to false positives in
// destructor code. These interceptors re-implements the whole functions so that
// the mo_acq_rel semantics of the atomic decrement are visible.
//
// Unfortunately, the interceptors cannot simply Acquire/Release some sync
// object and call the original function, because it would have a race between
// the sync and the destruction of the object. Calling both under a lock will
// not work because the destructor can invoke this interceptor again (and even
// in a different thread, so recursive locks don't help).
STDCXX_INTERCEPTOR(void, _ZNSt3__119__shared_weak_count16__release_sharedEv,
fake_shared_weak_count *o) {
if (!flags()->shared_ptr_interceptor)
return REAL(_ZNSt3__119__shared_weak_count16__release_sharedEv)(o);
SCOPED_TSAN_INTERCEPTOR(_ZNSt3__119__shared_weak_count16__release_sharedEv,
o);
if (__tsan_atomic64_fetch_add(&o->shared_owners, -1, mo_release) == 0) {
Acquire(thr, pc, (uptr)&o->shared_owners);
o->on_zero_shared();
if (__tsan_atomic64_fetch_add(&o->shared_weak_owners, -1, mo_release) ==
0) {
Acquire(thr, pc, (uptr)&o->shared_weak_owners);
o->on_zero_shared_weak();
}
}
}
STDCXX_INTERCEPTOR(bool, _ZNSt3__114__shared_count16__release_sharedEv,
fake_shared_weak_count *o) {
if (!flags()->shared_ptr_interceptor)
return REAL(_ZNSt3__114__shared_count16__release_sharedEv)(o);
SCOPED_TSAN_INTERCEPTOR(_ZNSt3__114__shared_count16__release_sharedEv, o);
if (__tsan_atomic64_fetch_add(&o->shared_owners, -1, mo_release) == 0) {
Acquire(thr, pc, (uptr)&o->shared_owners);
o->on_zero_shared();
return true;
}
return false;
}
namespace {
struct call_once_callback_args {
void (*orig_func)(void *arg);
void *orig_arg;
void *flag;
};
void call_once_callback_wrapper(void *arg) {
call_once_callback_args *new_args = (call_once_callback_args *)arg;
new_args->orig_func(new_args->orig_arg);
__tsan_release(new_args->flag);
}
} // namespace
// This adds a libc++ interceptor for:
// void __call_once(volatile unsigned long&, void*, void(*)(void*));
// C++11 call_once is implemented via an internal function __call_once which is
// inside libc++.dylib, and the atomic release store inside it is thus
// TSan-invisible. To avoid false positives, this interceptor wraps the callback
// function and performs an explicit Release after the user code has run.
STDCXX_INTERCEPTOR(void, _ZNSt3__111__call_onceERVmPvPFvS2_E, void *flag,
void *arg, void (*func)(void *arg)) {
call_once_callback_args new_args = {func, arg, flag};
REAL(_ZNSt3__111__call_onceERVmPvPFvS2_E)(flag, &new_args,
call_once_callback_wrapper);
}
} // namespace __tsan
#endif // SANITIZER_MAC

View File

@@ -0,0 +1,135 @@
//===-- tsan_interface.cc -------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#include "tsan_interface.h"
#include "tsan_interface_ann.h"
#include "tsan_rtl.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#define CALLERPC ((uptr)__builtin_return_address(0))
using namespace __tsan; // NOLINT
typedef u16 uint16_t;
typedef u32 uint32_t;
typedef u64 uint64_t;
void __tsan_init() {
Initialize(cur_thread());
}
void __tsan_flush_memory() {
FlushShadowMemory();
}
void __tsan_read16(void *addr) {
MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8);
MemoryRead(cur_thread(), CALLERPC, (uptr)addr + 8, kSizeLog8);
}
void __tsan_write16(void *addr) {
MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8);
MemoryWrite(cur_thread(), CALLERPC, (uptr)addr + 8, kSizeLog8);
}
void __tsan_read16_pc(void *addr, void *pc) {
MemoryRead(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog8);
MemoryRead(cur_thread(), (uptr)pc, (uptr)addr + 8, kSizeLog8);
}
void __tsan_write16_pc(void *addr, void *pc) {
MemoryWrite(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog8);
MemoryWrite(cur_thread(), (uptr)pc, (uptr)addr + 8, kSizeLog8);
}
// __tsan_unaligned_read/write calls are emitted by compiler.
void __tsan_unaligned_read2(const void *addr) {
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, false, false);
}
void __tsan_unaligned_read4(const void *addr) {
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, false, false);
}
void __tsan_unaligned_read8(const void *addr) {
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, false, false);
}
void __tsan_unaligned_read16(const void *addr) {
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 16, false, false);
}
void __tsan_unaligned_write2(void *addr) {
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, true, false);
}
void __tsan_unaligned_write4(void *addr) {
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, true, false);
}
void __tsan_unaligned_write8(void *addr) {
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, true, false);
}
void __tsan_unaligned_write16(void *addr) {
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 16, true, false);
}
// __sanitizer_unaligned_load/store are for user instrumentation.
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
u16 __sanitizer_unaligned_load16(const uu16 *addr) {
__tsan_unaligned_read2(addr);
return *addr;
}
SANITIZER_INTERFACE_ATTRIBUTE
u32 __sanitizer_unaligned_load32(const uu32 *addr) {
__tsan_unaligned_read4(addr);
return *addr;
}
SANITIZER_INTERFACE_ATTRIBUTE
u64 __sanitizer_unaligned_load64(const uu64 *addr) {
__tsan_unaligned_read8(addr);
return *addr;
}
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_unaligned_store16(uu16 *addr, u16 v) {
__tsan_unaligned_write2(addr);
*addr = v;
}
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_unaligned_store32(uu32 *addr, u32 v) {
__tsan_unaligned_write4(addr);
*addr = v;
}
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_unaligned_store64(uu64 *addr, u64 v) {
__tsan_unaligned_write8(addr);
*addr = v;
}
} // extern "C"
void __tsan_acquire(void *addr) {
Acquire(cur_thread(), CALLERPC, (uptr)addr);
}
void __tsan_release(void *addr) {
Release(cur_thread(), CALLERPC, (uptr)addr);
}

View File

@@ -0,0 +1,405 @@
//===-- tsan_interface.h ----------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
// The functions declared in this header will be inserted by the instrumentation
// module.
// This header can be included by the instrumented program or by TSan tests.
//===----------------------------------------------------------------------===//
#ifndef TSAN_INTERFACE_H
#define TSAN_INTERFACE_H
#include <sanitizer_common/sanitizer_internal_defs.h>
using __sanitizer::uptr;
using __sanitizer::tid_t;
// This header should NOT include any other headers.
// All functions in this header are extern "C" and start with __tsan_.
#ifdef __cplusplus
extern "C" {
#endif
#if !SANITIZER_GO
// This function should be called at the very beginning of the process,
// before any instrumented code is executed and before any call to malloc.
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_init();
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_flush_memory();
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read1(void *addr);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read2(void *addr);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read4(void *addr);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read8(void *addr);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read16(void *addr);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write1(void *addr);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write2(void *addr);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write4(void *addr);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write8(void *addr);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write16(void *addr);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_read2(const void *addr);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_read4(const void *addr);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_read8(const void *addr);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_read16(const void *addr);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_write2(void *addr);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_write4(void *addr);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_write8(void *addr);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_write16(void *addr);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read1_pc(void *addr, void *pc);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read2_pc(void *addr, void *pc);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read4_pc(void *addr, void *pc);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read8_pc(void *addr, void *pc);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read16_pc(void *addr, void *pc);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write1_pc(void *addr, void *pc);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write2_pc(void *addr, void *pc);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write4_pc(void *addr, void *pc);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write8_pc(void *addr, void *pc);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write16_pc(void *addr, void *pc);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_vptr_read(void **vptr_p);
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_vptr_update(void **vptr_p, void *new_val);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_func_entry(void *call_pc);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_func_exit();
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_ignore_thread_begin();
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_ignore_thread_end();
SANITIZER_INTERFACE_ATTRIBUTE
void *__tsan_external_register_tag(const char *object_type);
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_external_register_header(void *tag, const char *header);
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_external_assign_tag(void *addr, void *tag);
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_external_read(void *addr, void *caller_pc, void *tag);
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_external_write(void *addr, void *caller_pc, void *tag);
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_read_range(void *addr, unsigned long size); // NOLINT
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_write_range(void *addr, unsigned long size); // NOLINT
// User may provide function that would be called right when TSan detects
// an error. The argument 'report' is an opaque pointer that can be used to
// gather additional information using other TSan report API functions.
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_on_report(void *report);
// If TSan is currently reporting a detected issue on the current thread,
// returns an opaque pointer to the current report. Otherwise returns NULL.
SANITIZER_INTERFACE_ATTRIBUTE
void *__tsan_get_current_report();
// Returns a report's description (issue type), number of duplicate issues
// found, counts of array data (stack traces, memory operations, locations,
// mutexes, threads, unique thread IDs) and a stack trace of a sleep() call (if
// one was involved in the issue).
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_get_report_data(void *report, const char **description, int *count,
int *stack_count, int *mop_count, int *loc_count,
int *mutex_count, int *thread_count,
int *unique_tid_count, void **sleep_trace,
uptr trace_size);
// Returns information about stack traces included in the report.
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_get_report_stack(void *report, uptr idx, void **trace,
uptr trace_size);
// Returns information about memory operations included in the report.
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_get_report_mop(void *report, uptr idx, int *tid, void **addr,
int *size, int *write, int *atomic, void **trace,
uptr trace_size);
// Returns information about locations included in the report.
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_get_report_loc(void *report, uptr idx, const char **type,
void **addr, uptr *start, uptr *size, int *tid,
int *fd, int *suppressable, void **trace,
uptr trace_size);
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_get_report_loc_object_type(void *report, uptr idx,
const char **object_type);
// Returns information about mutexes included in the report.
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_get_report_mutex(void *report, uptr idx, uptr *mutex_id, void **addr,
int *destroyed, void **trace, uptr trace_size);
// Returns information about threads included in the report.
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_get_report_thread(void *report, uptr idx, int *tid, tid_t *os_id,
int *running, const char **name, int *parent_tid,
void **trace, uptr trace_size);
// Returns information about unique thread IDs included in the report.
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_get_report_unique_tid(void *report, uptr idx, int *tid);
// Returns the type of the pointer (heap, stack, global, ...) and if possible
// also the starting address (e.g. of a heap allocation) and size.
SANITIZER_INTERFACE_ATTRIBUTE
const char *__tsan_locate_address(uptr addr, char *name, uptr name_size,
uptr *region_address, uptr *region_size);
// Returns the allocation stack for a heap pointer.
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_get_alloc_stack(uptr addr, uptr *trace, uptr size, int *thread_id,
tid_t *os_id);
#endif // SANITIZER_GO
#ifdef __cplusplus
} // extern "C"
#endif
namespace __tsan {
// These should match declarations from public tsan_interface_atomic.h header.
typedef unsigned char a8;
typedef unsigned short a16; // NOLINT
typedef unsigned int a32;
typedef unsigned long long a64; // NOLINT
#if !SANITIZER_GO && (defined(__SIZEOF_INT128__) \
|| (__clang_major__ * 100 + __clang_minor__ >= 302)) && !defined(__mips64)
__extension__ typedef __int128 a128;
# define __TSAN_HAS_INT128 1
#else
# define __TSAN_HAS_INT128 0
#endif
// Part of ABI, do not change.
// http://llvm.org/viewvc/llvm-project/libcxx/trunk/include/atomic?view=markup
typedef enum {
mo_relaxed,
mo_consume,
mo_acquire,
mo_release,
mo_acq_rel,
mo_seq_cst
} morder;
struct ThreadState;
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_load(const volatile a8 *a, morder mo);
SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_load(const volatile a16 *a, morder mo);
SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_load(const volatile a32 *a, morder mo);
SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_load(const volatile a64 *a, morder mo);
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_load(const volatile a128 *a, morder mo);
#endif
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo);
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo);
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo);
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo);
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo);
#endif
SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo);
SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo);
SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo);
SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo);
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo);
#endif
SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo);
SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo);
SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo);
SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo);
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo);
#endif
SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo);
SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo);
SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo);
SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo);
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo);
#endif
SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo);
SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo);
SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo);
SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo);
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo);
#endif
SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo);
SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo);
SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo);
SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo);
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo);
#endif
SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo);
SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo);
SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo);
SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo);
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo);
#endif
SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo);
SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo);
SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo);
SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo);
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo);
#endif
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
morder mo, morder fmo);
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
morder mo, morder fmo);
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
morder mo, morder fmo);
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
morder mo, morder fmo);
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
morder mo, morder fmo);
#endif
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v, morder mo,
morder fmo);
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
morder mo, morder fmo);
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
morder mo, morder fmo);
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
morder mo, morder fmo);
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
morder mo, morder fmo);
#endif
SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v, morder mo,
morder fmo);
SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
morder mo, morder fmo);
SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
morder mo, morder fmo);
SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
morder mo, morder fmo);
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
morder mo, morder fmo);
#endif
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic_thread_fence(morder mo);
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic_signal_fence(morder mo);
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_go_atomic32_compare_exchange(ThreadState *thr, uptr cpc, uptr pc,
u8 *a);
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_go_atomic64_compare_exchange(ThreadState *thr, uptr cpc, uptr pc,
u8 *a);
} // extern "C"
} // namespace __tsan
#endif // TSAN_INTERFACE_H

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More