Imported Upstream version 6.10.0.49

Former-commit-id: 1d6753294b2993e1fbf92de9366bb9544db4189b
This commit is contained in:
Xamarin Public Jenkins (auto-signing)
2020-01-16 16:38:04 +00:00
parent d94e79959b
commit 468663ddbb
48518 changed files with 2789335 additions and 61176 deletions

View File

@@ -0,0 +1,69 @@
include_directories(../rtl)
add_custom_target(TsanUnitTests)
set_target_properties(TsanUnitTests PROPERTIES
FOLDER "Compiler-RT Tests")
set(TSAN_UNITTEST_CFLAGS
${TSAN_CFLAGS}
${COMPILER_RT_UNITTEST_CFLAGS}
${COMPILER_RT_GTEST_CFLAGS}
-I${COMPILER_RT_SOURCE_DIR}/include
-I${COMPILER_RT_SOURCE_DIR}/lib
-I${COMPILER_RT_SOURCE_DIR}/lib/tsan/rtl
-DGTEST_HAS_RTTI=0)
set(TSAN_TEST_ARCH ${TSAN_SUPPORTED_ARCH})
if(APPLE)
# Create a static library for test dependencies.
set(TSAN_TEST_RUNTIME_OBJECTS
$<TARGET_OBJECTS:RTTsan_dynamic.osx>
$<TARGET_OBJECTS:RTInterception.osx>
$<TARGET_OBJECTS:RTSanitizerCommon.osx>
$<TARGET_OBJECTS:RTSanitizerCommonLibc.osx>
$<TARGET_OBJECTS:RTUbsan.osx>)
set(TSAN_TEST_RUNTIME RTTsanTest)
add_library(${TSAN_TEST_RUNTIME} STATIC ${TSAN_TEST_RUNTIME_OBJECTS})
set_target_properties(${TSAN_TEST_RUNTIME} PROPERTIES
ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
darwin_filter_host_archs(TSAN_SUPPORTED_ARCH TSAN_TEST_ARCH)
list(APPEND TSAN_UNITTEST_CFLAGS ${DARWIN_osx_CFLAGS})
set(LINK_FLAGS "-lc++")
add_weak_symbols("ubsan" LINK_FLAGS)
add_weak_symbols("sanitizer_common" LINK_FLAGS)
else()
set(LINK_FLAGS "-fsanitize=thread;-lstdc++;-lm")
endif()
set(TSAN_RTL_HEADERS)
foreach (header ${TSAN_HEADERS})
list(APPEND TSAN_RTL_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/../${header})
endforeach()
# add_tsan_unittest(<name>
# SOURCES <sources list>
# HEADERS <extra headers list>)
macro(add_tsan_unittest testname)
cmake_parse_arguments(TEST "" "" "SOURCES;HEADERS" ${ARGN})
if(UNIX)
foreach(arch ${TSAN_TEST_ARCH})
set(TsanUnitTestsObjects)
generate_compiler_rt_tests(TsanUnitTestsObjects TsanUnitTests
"${testname}-${arch}-Test" ${arch}
SOURCES ${TEST_SOURCES} ${COMPILER_RT_GTEST_SOURCE}
RUNTIME ${TSAN_TEST_RUNTIME}
COMPILE_DEPS ${TEST_HEADERS} ${TSAN_RTL_HEADERS}
DEPS gtest tsan
CFLAGS ${TSAN_UNITTEST_CFLAGS}
LINK_FLAGS ${LINK_FLAGS})
endforeach()
endif()
endmacro()
if(COMPILER_RT_CAN_EXECUTE_TESTS AND NOT ANDROID)
add_subdirectory(rtl)
add_subdirectory(unit)
endif()

View File

@@ -0,0 +1,19 @@
set(TSAN_RTL_TEST_SOURCES
tsan_bench.cc
tsan_mop.cc
tsan_mutex.cc
tsan_posix.cc
tsan_string.cc
tsan_test.cc
tsan_thread.cc)
if(UNIX)
list(APPEND TSAN_RTL_TEST_SOURCES tsan_test_util_posix.cc)
endif()
set(TSAN_RTL_TEST_HEADERS
tsan_test_util.h)
add_tsan_unittest(TsanRtlTest
SOURCES ${TSAN_RTL_TEST_SOURCES}
HEADERS ${TSAN_RTL_TEST_HEADERS})

View File

@@ -0,0 +1,105 @@
//===-- tsan_bench.cc -----------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#include "tsan_test_util.h"
#include "tsan_interface.h"
#include "tsan_defs.h"
#include "gtest/gtest.h"
#include <stdint.h>
const int kSize = 128;
const int kRepeat = 2*1024*1024;
void noinstr(void *p) {}
template<typename T, void(*__tsan_mop)(void *p)>
static void Benchmark() {
volatile T data[kSize];
for (int i = 0; i < kRepeat; i++) {
for (int j = 0; j < kSize; j++) {
__tsan_mop((void*)&data[j]);
data[j]++;
}
}
}
TEST(DISABLED_BENCH, Mop1) {
Benchmark<uint8_t, noinstr>();
}
TEST(DISABLED_BENCH, Mop1Read) {
Benchmark<uint8_t, __tsan_read1>();
}
TEST(DISABLED_BENCH, Mop1Write) {
Benchmark<uint8_t, __tsan_write1>();
}
TEST(DISABLED_BENCH, Mop2) {
Benchmark<uint16_t, noinstr>();
}
TEST(DISABLED_BENCH, Mop2Read) {
Benchmark<uint16_t, __tsan_read2>();
}
TEST(DISABLED_BENCH, Mop2Write) {
Benchmark<uint16_t, __tsan_write2>();
}
TEST(DISABLED_BENCH, Mop4) {
Benchmark<uint32_t, noinstr>();
}
TEST(DISABLED_BENCH, Mop4Read) {
Benchmark<uint32_t, __tsan_read4>();
}
TEST(DISABLED_BENCH, Mop4Write) {
Benchmark<uint32_t, __tsan_write4>();
}
TEST(DISABLED_BENCH, Mop8) {
Benchmark<uint8_t, noinstr>();
}
TEST(DISABLED_BENCH, Mop8Read) {
Benchmark<uint64_t, __tsan_read8>();
}
TEST(DISABLED_BENCH, Mop8Write) {
Benchmark<uint64_t, __tsan_write8>();
}
TEST(DISABLED_BENCH, FuncCall) {
for (int i = 0; i < kRepeat; i++) {
for (int j = 0; j < kSize; j++)
__tsan_func_entry((void*)(uintptr_t)j);
for (int j = 0; j < kSize; j++)
__tsan_func_exit();
}
}
TEST(DISABLED_BENCH, MutexLocal) {
Mutex m;
ScopedThread().Create(m);
for (int i = 0; i < 50; i++) {
ScopedThread t;
t.Lock(m);
t.Unlock(m);
}
for (int i = 0; i < 16*1024*1024; i++) {
m.Lock();
m.Unlock();
}
ScopedThread().Destroy(m);
}

View File

@@ -0,0 +1,233 @@
//===-- tsan_mop.cc -------------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#include "tsan_interface.h"
#include "tsan_test_util.h"
#include "gtest/gtest.h"
#include <stddef.h>
#include <stdint.h>
TEST(ThreadSanitizer, SimpleWrite) {
ScopedThread t;
MemLoc l;
t.Write1(l);
}
TEST(ThreadSanitizer, SimpleWriteWrite) {
ScopedThread t1, t2;
MemLoc l1, l2;
t1.Write1(l1);
t2.Write1(l2);
}
TEST(ThreadSanitizer, WriteWriteRace) {
ScopedThread t1, t2;
MemLoc l;
t1.Write1(l);
t2.Write1(l, true);
}
TEST(ThreadSanitizer, ReadWriteRace) {
ScopedThread t1, t2;
MemLoc l;
t1.Read1(l);
t2.Write1(l, true);
}
TEST(ThreadSanitizer, WriteReadRace) {
ScopedThread t1, t2;
MemLoc l;
t1.Write1(l);
t2.Read1(l, true);
}
TEST(ThreadSanitizer, ReadReadNoRace) {
ScopedThread t1, t2;
MemLoc l;
t1.Read1(l);
t2.Read1(l);
}
TEST(ThreadSanitizer, WriteThenRead) {
MemLoc l;
ScopedThread t1, t2;
t1.Write1(l);
t1.Read1(l);
t2.Read1(l, true);
}
TEST(ThreadSanitizer, WriteThenLockedRead) {
Mutex m(Mutex::RW);
MainThread t0;
t0.Create(m);
MemLoc l;
{
ScopedThread t1, t2;
t1.Write8(l);
t1.Lock(m);
t1.Read8(l);
t1.Unlock(m);
t2.Read8(l, true);
}
t0.Destroy(m);
}
TEST(ThreadSanitizer, LockedWriteThenRead) {
Mutex m(Mutex::RW);
MainThread t0;
t0.Create(m);
MemLoc l;
{
ScopedThread t1, t2;
t1.Lock(m);
t1.Write8(l);
t1.Unlock(m);
t1.Read8(l);
t2.Read8(l, true);
}
t0.Destroy(m);
}
TEST(ThreadSanitizer, RaceWithOffset) {
ScopedThread t1, t2;
{
MemLoc l;
t1.Access(l.loc(), true, 8, false);
t2.Access((char*)l.loc() + 4, true, 4, true);
}
{
MemLoc l;
t1.Access(l.loc(), true, 8, false);
t2.Access((char*)l.loc() + 7, true, 1, true);
}
{
MemLoc l;
t1.Access((char*)l.loc() + 4, true, 4, false);
t2.Access((char*)l.loc() + 4, true, 2, true);
}
{
MemLoc l;
t1.Access((char*)l.loc() + 4, true, 4, false);
t2.Access((char*)l.loc() + 6, true, 2, true);
}
{
MemLoc l;
t1.Access((char*)l.loc() + 3, true, 2, false);
t2.Access((char*)l.loc() + 4, true, 1, true);
}
{
MemLoc l;
t1.Access((char*)l.loc() + 1, true, 8, false);
t2.Access((char*)l.loc() + 3, true, 1, true);
}
}
TEST(ThreadSanitizer, RaceWithOffset2) {
ScopedThread t1, t2;
{
MemLoc l;
t1.Access((char*)l.loc(), true, 4, false);
t2.Access((char*)l.loc() + 2, true, 1, true);
}
{
MemLoc l;
t1.Access((char*)l.loc() + 2, true, 1, false);
t2.Access((char*)l.loc(), true, 4, true);
}
}
TEST(ThreadSanitizer, NoRaceWithOffset) {
ScopedThread t1, t2;
{
MemLoc l;
t1.Access(l.loc(), true, 4, false);
t2.Access((char*)l.loc() + 4, true, 4, false);
}
{
MemLoc l;
t1.Access((char*)l.loc() + 3, true, 2, false);
t2.Access((char*)l.loc() + 1, true, 2, false);
t2.Access((char*)l.loc() + 5, true, 2, false);
}
}
TEST(ThreadSanitizer, RaceWithDeadThread) {
MemLoc l;
ScopedThread t;
ScopedThread().Write1(l);
t.Write1(l, true);
}
TEST(ThreadSanitizer, BenignRaceOnVptr) {
void *vptr_storage;
MemLoc vptr(&vptr_storage), val;
vptr_storage = val.loc();
ScopedThread t1, t2;
t1.VptrUpdate(vptr, val);
t2.Read8(vptr);
}
TEST(ThreadSanitizer, HarmfulRaceOnVptr) {
void *vptr_storage;
MemLoc vptr(&vptr_storage), val1, val2;
vptr_storage = val1.loc();
ScopedThread t1, t2;
t1.VptrUpdate(vptr, val2);
t2.Read8(vptr, true);
}
static void foo() {
volatile int x = 42;
int x2 = x;
(void)x2;
}
static void bar() {
volatile int x = 43;
int x2 = x;
(void)x2;
}
TEST(ThreadSanitizer, ReportDeadThread) {
MemLoc l;
ScopedThread t1;
{
ScopedThread t2;
t2.Call(&foo);
t2.Call(&bar);
t2.Write1(l);
}
t1.Write1(l, true);
}
struct ClassWithStatic {
static int Data[4];
};
int ClassWithStatic::Data[4];
static void foobarbaz() {}
TEST(ThreadSanitizer, ReportRace) {
ScopedThread t1;
MainThread().Access(&ClassWithStatic::Data, true, 4, false);
t1.Call(&foobarbaz);
t1.Access(&ClassWithStatic::Data, true, 2, true);
t1.Return();
}

View File

@@ -0,0 +1,221 @@
//===-- tsan_mutex.cc -----------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_atomic.h"
#include "tsan_interface.h"
#include "tsan_interface_ann.h"
#include "tsan_test_util.h"
#include "gtest/gtest.h"
#include <stdint.h>
namespace __tsan {
TEST(ThreadSanitizer, BasicMutex) {
ScopedThread t;
Mutex m;
t.Create(m);
t.Lock(m);
t.Unlock(m);
CHECK(t.TryLock(m));
t.Unlock(m);
t.Lock(m);
CHECK(!t.TryLock(m));
t.Unlock(m);
t.Destroy(m);
}
TEST(ThreadSanitizer, BasicSpinMutex) {
ScopedThread t;
Mutex m(Mutex::Spin);
t.Create(m);
t.Lock(m);
t.Unlock(m);
CHECK(t.TryLock(m));
t.Unlock(m);
t.Lock(m);
CHECK(!t.TryLock(m));
t.Unlock(m);
t.Destroy(m);
}
TEST(ThreadSanitizer, BasicRwMutex) {
ScopedThread t;
Mutex m(Mutex::RW);
t.Create(m);
t.Lock(m);
t.Unlock(m);
CHECK(t.TryLock(m));
t.Unlock(m);
t.Lock(m);
CHECK(!t.TryLock(m));
t.Unlock(m);
t.ReadLock(m);
t.ReadUnlock(m);
CHECK(t.TryReadLock(m));
t.ReadUnlock(m);
t.Lock(m);
CHECK(!t.TryReadLock(m));
t.Unlock(m);
t.ReadLock(m);
CHECK(!t.TryLock(m));
t.ReadUnlock(m);
t.ReadLock(m);
CHECK(t.TryReadLock(m));
t.ReadUnlock(m);
t.ReadUnlock(m);
t.Destroy(m);
}
TEST(ThreadSanitizer, Mutex) {
Mutex m;
MainThread t0;
t0.Create(m);
ScopedThread t1, t2;
MemLoc l;
t1.Lock(m);
t1.Write1(l);
t1.Unlock(m);
t2.Lock(m);
t2.Write1(l);
t2.Unlock(m);
t2.Destroy(m);
}
TEST(ThreadSanitizer, SpinMutex) {
Mutex m(Mutex::Spin);
MainThread t0;
t0.Create(m);
ScopedThread t1, t2;
MemLoc l;
t1.Lock(m);
t1.Write1(l);
t1.Unlock(m);
t2.Lock(m);
t2.Write1(l);
t2.Unlock(m);
t2.Destroy(m);
}
TEST(ThreadSanitizer, RwMutex) {
Mutex m(Mutex::RW);
MainThread t0;
t0.Create(m);
ScopedThread t1, t2, t3;
MemLoc l;
t1.Lock(m);
t1.Write1(l);
t1.Unlock(m);
t2.Lock(m);
t2.Write1(l);
t2.Unlock(m);
t1.ReadLock(m);
t3.ReadLock(m);
t1.Read1(l);
t3.Read1(l);
t1.ReadUnlock(m);
t3.ReadUnlock(m);
t2.Lock(m);
t2.Write1(l);
t2.Unlock(m);
t2.Destroy(m);
}
TEST(ThreadSanitizer, StaticMutex) {
// Emulates statically initialized mutex.
Mutex m;
m.StaticInit();
{
ScopedThread t1, t2;
t1.Lock(m);
t1.Unlock(m);
t2.Lock(m);
t2.Unlock(m);
}
MainThread().Destroy(m);
}
static void *singleton_thread(void *param) {
atomic_uintptr_t *singleton = (atomic_uintptr_t *)param;
for (int i = 0; i < 4*1024*1024; i++) {
int *val = (int *)atomic_load(singleton, memory_order_acquire);
__tsan_acquire(singleton);
__tsan_read4(val);
CHECK_EQ(*val, 42);
}
return 0;
}
TEST(DISABLED_BENCH_ThreadSanitizer, Singleton) {
const int kClockSize = 100;
const int kThreadCount = 8;
// Puff off thread's clock.
for (int i = 0; i < kClockSize; i++) {
ScopedThread t1;
(void)t1;
}
// Create the singleton.
int val = 42;
__tsan_write4(&val);
atomic_uintptr_t singleton;
__tsan_release(&singleton);
atomic_store(&singleton, (uintptr_t)&val, memory_order_release);
// Create reader threads.
pthread_t threads[kThreadCount];
for (int t = 0; t < kThreadCount; t++)
pthread_create(&threads[t], 0, singleton_thread, &singleton);
for (int t = 0; t < kThreadCount; t++)
pthread_join(threads[t], 0);
}
TEST(DISABLED_BENCH_ThreadSanitizer, StopFlag) {
const int kClockSize = 100;
const int kIters = 16*1024*1024;
// Puff off thread's clock.
for (int i = 0; i < kClockSize; i++) {
ScopedThread t1;
(void)t1;
}
// Create the stop flag.
atomic_uintptr_t flag;
__tsan_release(&flag);
atomic_store(&flag, 0, memory_order_release);
// Read it a lot.
for (int i = 0; i < kIters; i++) {
uptr v = atomic_load(&flag, memory_order_acquire);
__tsan_acquire(&flag);
CHECK_EQ(v, 0);
}
}
} // namespace __tsan

View File

@@ -0,0 +1,155 @@
//===-- tsan_posix.cc -----------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#include "tsan_interface.h"
#include "tsan_posix_util.h"
#include "tsan_test_util.h"
#include "gtest/gtest.h"
#include <pthread.h>
struct thread_key {
pthread_key_t key;
pthread_mutex_t *mtx;
int val;
int *cnt;
thread_key(pthread_key_t key, pthread_mutex_t *mtx, int val, int *cnt)
: key(key)
, mtx(mtx)
, val(val)
, cnt(cnt) {
}
};
static void thread_secific_dtor(void *v) {
thread_key *k = (thread_key *)v;
EXPECT_EQ(__interceptor_pthread_mutex_lock(k->mtx), 0);
(*k->cnt)++;
__tsan_write4(&k->cnt);
EXPECT_EQ(__interceptor_pthread_mutex_unlock(k->mtx), 0);
if (k->val == 42) {
// Okay.
} else if (k->val == 43 || k->val == 44) {
k->val--;
EXPECT_EQ(pthread_setspecific(k->key, k), 0);
} else {
ASSERT_TRUE(false);
}
}
static void *dtors_thread(void *p) {
thread_key *k = (thread_key *)p;
EXPECT_EQ(pthread_setspecific(k->key, k), 0);
return 0;
}
TEST(Posix, ThreadSpecificDtors) {
int cnt = 0;
pthread_key_t key;
EXPECT_EQ(pthread_key_create(&key, thread_secific_dtor), 0);
pthread_mutex_t mtx;
EXPECT_EQ(__interceptor_pthread_mutex_init(&mtx, 0), 0);
pthread_t th[3];
thread_key k1 = thread_key(key, &mtx, 42, &cnt);
thread_key k2 = thread_key(key, &mtx, 43, &cnt);
thread_key k3 = thread_key(key, &mtx, 44, &cnt);
EXPECT_EQ(__interceptor_pthread_create(&th[0], 0, dtors_thread, &k1), 0);
EXPECT_EQ(__interceptor_pthread_create(&th[1], 0, dtors_thread, &k2), 0);
EXPECT_EQ(__interceptor_pthread_join(th[0], 0), 0);
EXPECT_EQ(__interceptor_pthread_create(&th[2], 0, dtors_thread, &k3), 0);
EXPECT_EQ(__interceptor_pthread_join(th[1], 0), 0);
EXPECT_EQ(__interceptor_pthread_join(th[2], 0), 0);
EXPECT_EQ(pthread_key_delete(key), 0);
EXPECT_EQ(6, cnt);
}
#if !defined(__aarch64__) && !defined(__APPLE__)
static __thread int local_var;
static void *local_thread(void *p) {
__tsan_write1(&local_var);
__tsan_write1(&p);
if (p == 0)
return 0;
const int kThreads = 4;
pthread_t th[kThreads];
for (int i = 0; i < kThreads; i++)
EXPECT_EQ(__interceptor_pthread_create(&th[i], 0, local_thread,
(void*)((long)p - 1)), 0); // NOLINT
for (int i = 0; i < kThreads; i++)
EXPECT_EQ(__interceptor_pthread_join(th[i], 0), 0);
return 0;
}
#endif
TEST(Posix, ThreadLocalAccesses) {
// The test is failing with high thread count for aarch64.
// FIXME: track down the issue and re-enable the test.
// On Darwin, we're running unit tests without interceptors and __thread is
// using malloc and free, which causes false data race reports. On rare
// occasions on powerpc64le this test also fails.
#if !defined(__aarch64__) && !defined(__APPLE__) && !defined(powerpc64le)
local_thread((void*)2);
#endif
}
struct CondContext {
pthread_mutex_t m;
pthread_cond_t c;
int data;
};
static void *cond_thread(void *p) {
CondContext &ctx = *static_cast<CondContext*>(p);
EXPECT_EQ(__interceptor_pthread_mutex_lock(&ctx.m), 0);
EXPECT_EQ(ctx.data, 0);
ctx.data = 1;
EXPECT_EQ(__interceptor_pthread_cond_signal(&ctx.c), 0);
EXPECT_EQ(__interceptor_pthread_mutex_unlock(&ctx.m), 0);
EXPECT_EQ(__interceptor_pthread_mutex_lock(&ctx.m), 0);
while (ctx.data != 2)
EXPECT_EQ(__interceptor_pthread_cond_wait(&ctx.c, &ctx.m), 0);
EXPECT_EQ(__interceptor_pthread_mutex_unlock(&ctx.m), 0);
EXPECT_EQ(__interceptor_pthread_mutex_lock(&ctx.m), 0);
ctx.data = 3;
EXPECT_EQ(pthread_cond_broadcast(&ctx.c), 0);
EXPECT_EQ(__interceptor_pthread_mutex_unlock(&ctx.m), 0);
return 0;
}
TEST(Posix, CondBasic) {
CondContext ctx;
EXPECT_EQ(__interceptor_pthread_mutex_init(&ctx.m, 0), 0);
EXPECT_EQ(__interceptor_pthread_cond_init(&ctx.c, 0), 0);
ctx.data = 0;
pthread_t th;
EXPECT_EQ(__interceptor_pthread_create(&th, 0, cond_thread, &ctx), 0);
EXPECT_EQ(__interceptor_pthread_mutex_lock(&ctx.m), 0);
while (ctx.data != 1)
EXPECT_EQ(__interceptor_pthread_cond_wait(&ctx.c, &ctx.m), 0);
ctx.data = 2;
EXPECT_EQ(__interceptor_pthread_mutex_unlock(&ctx.m), 0);
EXPECT_EQ(pthread_cond_broadcast(&ctx.c), 0);
EXPECT_EQ(__interceptor_pthread_mutex_lock(&ctx.m), 0);
while (ctx.data != 3)
EXPECT_EQ(__interceptor_pthread_cond_wait(&ctx.c, &ctx.m), 0);
EXPECT_EQ(__interceptor_pthread_mutex_unlock(&ctx.m), 0);
EXPECT_EQ(__interceptor_pthread_join(th, 0), 0);
EXPECT_EQ(__interceptor_pthread_cond_destroy(&ctx.c), 0);
EXPECT_EQ(__interceptor_pthread_mutex_destroy(&ctx.m), 0);
}

View File

@@ -0,0 +1,77 @@
//===-- tsan_posix_util.h ---------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
// Test POSIX utils.
//===----------------------------------------------------------------------===//
#ifndef TSAN_POSIX_UTIL_H
#define TSAN_POSIX_UTIL_H
#include <pthread.h>
#ifdef __APPLE__
#define __interceptor_memcpy wrap_memcpy
#define __interceptor_memset wrap_memset
#define __interceptor_pthread_create wrap_pthread_create
#define __interceptor_pthread_join wrap_pthread_join
#define __interceptor_pthread_detach wrap_pthread_detach
#define __interceptor_pthread_mutex_init wrap_pthread_mutex_init
#define __interceptor_pthread_mutex_lock wrap_pthread_mutex_lock
#define __interceptor_pthread_mutex_unlock wrap_pthread_mutex_unlock
#define __interceptor_pthread_mutex_destroy wrap_pthread_mutex_destroy
#define __interceptor_pthread_mutex_trylock wrap_pthread_mutex_trylock
#define __interceptor_pthread_rwlock_init wrap_pthread_rwlock_init
#define __interceptor_pthread_rwlock_destroy wrap_pthread_rwlock_destroy
#define __interceptor_pthread_rwlock_trywrlock wrap_pthread_rwlock_trywrlock
#define __interceptor_pthread_rwlock_wrlock wrap_pthread_rwlock_wrlock
#define __interceptor_pthread_rwlock_unlock wrap_pthread_rwlock_unlock
#define __interceptor_pthread_rwlock_rdlock wrap_pthread_rwlock_rdlock
#define __interceptor_pthread_rwlock_tryrdlock wrap_pthread_rwlock_tryrdlock
#define __interceptor_pthread_cond_init wrap_pthread_cond_init
#define __interceptor_pthread_cond_signal wrap_pthread_cond_signal
#define __interceptor_pthread_cond_broadcast wrap_pthread_cond_broadcast
#define __interceptor_pthread_cond_wait wrap_pthread_cond_wait
#define __interceptor_pthread_cond_destroy wrap_pthread_cond_destroy
#endif
extern "C" void *__interceptor_memcpy(void *, const void *, uptr);
extern "C" void *__interceptor_memset(void *, int, uptr);
extern "C" int __interceptor_pthread_create(pthread_t *thread,
const pthread_attr_t *attr,
void *(*start_routine)(void *),
void *arg);
extern "C" int __interceptor_pthread_join(pthread_t thread, void **value_ptr);
extern "C" int __interceptor_pthread_detach(pthread_t thread);
extern "C" int __interceptor_pthread_mutex_init(
pthread_mutex_t *mutex, const pthread_mutexattr_t *attr);
extern "C" int __interceptor_pthread_mutex_lock(pthread_mutex_t *mutex);
extern "C" int __interceptor_pthread_mutex_unlock(pthread_mutex_t *mutex);
extern "C" int __interceptor_pthread_mutex_destroy(pthread_mutex_t *mutex);
extern "C" int __interceptor_pthread_mutex_trylock(pthread_mutex_t *mutex);
extern "C" int __interceptor_pthread_rwlock_init(
pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr);
extern "C" int __interceptor_pthread_rwlock_destroy(pthread_rwlock_t *rwlock);
extern "C" int __interceptor_pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock);
extern "C" int __interceptor_pthread_rwlock_wrlock(pthread_rwlock_t *rwlock);
extern "C" int __interceptor_pthread_rwlock_unlock(pthread_rwlock_t *rwlock);
extern "C" int __interceptor_pthread_rwlock_rdlock(pthread_rwlock_t *rwlock);
extern "C" int __interceptor_pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock);
extern "C" int __interceptor_pthread_cond_init(pthread_cond_t *cond,
const pthread_condattr_t *attr);
extern "C" int __interceptor_pthread_cond_signal(pthread_cond_t *cond);
extern "C" int __interceptor_pthread_cond_broadcast(pthread_cond_t *cond);
extern "C" int __interceptor_pthread_cond_wait(pthread_cond_t *cond,
pthread_mutex_t *mutex);
extern "C" int __interceptor_pthread_cond_destroy(pthread_cond_t *cond);
#endif // #ifndef TSAN_POSIX_UTIL_H

View File

@@ -0,0 +1,82 @@
//===-- tsan_string.cc ----------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#include "tsan_test_util.h"
#include "gtest/gtest.h"
#include <string.h>
namespace __tsan {
TEST(ThreadSanitizer, Memcpy) {
char data0[7] = {1, 2, 3, 4, 5, 6, 7};
char data[7] = {42, 42, 42, 42, 42, 42, 42};
MainThread().Memcpy(data+1, data0+1, 5);
EXPECT_EQ(data[0], 42);
EXPECT_EQ(data[1], 2);
EXPECT_EQ(data[2], 3);
EXPECT_EQ(data[3], 4);
EXPECT_EQ(data[4], 5);
EXPECT_EQ(data[5], 6);
EXPECT_EQ(data[6], 42);
MainThread().Memset(data+1, 13, 5);
EXPECT_EQ(data[0], 42);
EXPECT_EQ(data[1], 13);
EXPECT_EQ(data[2], 13);
EXPECT_EQ(data[3], 13);
EXPECT_EQ(data[4], 13);
EXPECT_EQ(data[5], 13);
EXPECT_EQ(data[6], 42);
}
TEST(ThreadSanitizer, MemcpyRace1) {
char *data = new char[10];
char *data1 = new char[10];
char *data2 = new char[10];
ScopedThread t1, t2;
t1.Memcpy(data, data1, 10);
t2.Memcpy(data, data2, 10, true);
}
TEST(ThreadSanitizer, MemcpyRace2) {
char *data = new char[10];
char *data1 = new char[10];
char *data2 = new char[10];
ScopedThread t1, t2;
t1.Memcpy(data+5, data1, 1);
t2.Memcpy(data+3, data2, 4, true);
}
TEST(ThreadSanitizer, MemcpyRace3) {
char *data = new char[10];
char *data1 = new char[10];
char *data2 = new char[10];
ScopedThread t1, t2;
t1.Memcpy(data, data1, 10);
t2.Memcpy(data1, data2, 10, true);
}
TEST(ThreadSanitizer, MemcpyStack) {
char *data = new char[10];
char *data1 = new char[10];
ScopedThread t1, t2;
t1.Memcpy(data, data1, 10);
t2.Memcpy(data, data1, 10, true);
}
TEST(ThreadSanitizer, MemsetRace1) {
char *data = new char[10];
ScopedThread t1, t2;
t1.Memset(data, 1, 10);
t2.Memset(data, 2, 10, true);
}
} // namespace __tsan

View File

@@ -0,0 +1,66 @@
//===-- tsan_test.cc ------------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#include "tsan_interface.h"
#include "tsan_test_util.h"
#include "gtest/gtest.h"
static void foo() {}
static void bar() {}
TEST(ThreadSanitizer, FuncCall) {
ScopedThread t1, t2;
MemLoc l;
t1.Write1(l);
t2.Call(foo);
t2.Call(bar);
t2.Write1(l, true);
t2.Return();
t2.Return();
}
// We use this function instead of main, as ISO C++ forbids taking the address
// of main, which we need to pass inside __tsan_func_entry.
int run_tests(int argc, char **argv) {
TestMutexBeforeInit(); // Mutexes must be usable before __tsan_init();
__tsan_init();
__tsan_func_entry(__builtin_return_address(0));
__tsan_func_entry((void*)((intptr_t)&run_tests + 1));
testing::GTEST_FLAG(death_test_style) = "threadsafe";
testing::InitGoogleTest(&argc, argv);
int res = RUN_ALL_TESTS();
__tsan_func_exit();
__tsan_func_exit();
return res;
}
const char *argv0;
#ifdef __APPLE__
// On Darwin, turns off symbolication and crash logs to make tests faster.
extern "C" const char* __tsan_default_options() {
return "symbolize=false:abort_on_error=0";
}
#endif
namespace __sanitizer {
bool ReexecDisabled() {
return true;
}
}
int main(int argc, char **argv) {
argv0 = argv[0];
return run_tests(argc, argv);
}

View File

@@ -0,0 +1,130 @@
//===-- tsan_test_util.h ----------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
// Test utils.
//===----------------------------------------------------------------------===//
#ifndef TSAN_TEST_UTIL_H
#define TSAN_TEST_UTIL_H
void TestMutexBeforeInit();
// A location of memory on which a race may be detected.
class MemLoc {
public:
explicit MemLoc(int offset_from_aligned = 0);
explicit MemLoc(void *const real_addr) : loc_(real_addr) { }
~MemLoc();
void *loc() const { return loc_; }
private:
void *const loc_;
MemLoc(const MemLoc&);
void operator = (const MemLoc&);
};
class Mutex {
public:
enum Type {
Normal,
RW,
#ifndef __APPLE__
Spin
#else
Spin = Normal
#endif
};
explicit Mutex(Type type = Normal);
~Mutex();
void Init();
void StaticInit(); // Emulates static initialization (tsan invisible).
void Destroy();
void Lock();
bool TryLock();
void Unlock();
void ReadLock();
bool TryReadLock();
void ReadUnlock();
private:
// Placeholder for pthread_mutex_t, CRITICAL_SECTION or whatever.
void *mtx_[128];
bool alive_;
const Type type_;
Mutex(const Mutex&);
void operator = (const Mutex&);
};
// A thread is started in CTOR and joined in DTOR.
class ScopedThread {
public:
explicit ScopedThread(bool detached = false, bool main = false);
~ScopedThread();
void Detach();
void Access(void *addr, bool is_write, int size, bool expect_race);
void Read(const MemLoc &ml, int size, bool expect_race = false) {
Access(ml.loc(), false, size, expect_race);
}
void Write(const MemLoc &ml, int size, bool expect_race = false) {
Access(ml.loc(), true, size, expect_race);
}
void Read1(const MemLoc &ml, bool expect_race = false) {
Read(ml, 1, expect_race); }
void Read2(const MemLoc &ml, bool expect_race = false) {
Read(ml, 2, expect_race); }
void Read4(const MemLoc &ml, bool expect_race = false) {
Read(ml, 4, expect_race); }
void Read8(const MemLoc &ml, bool expect_race = false) {
Read(ml, 8, expect_race); }
void Write1(const MemLoc &ml, bool expect_race = false) {
Write(ml, 1, expect_race); }
void Write2(const MemLoc &ml, bool expect_race = false) {
Write(ml, 2, expect_race); }
void Write4(const MemLoc &ml, bool expect_race = false) {
Write(ml, 4, expect_race); }
void Write8(const MemLoc &ml, bool expect_race = false) {
Write(ml, 8, expect_race); }
void VptrUpdate(const MemLoc &vptr, const MemLoc &new_val,
bool expect_race = false);
void Call(void(*pc)());
void Return();
void Create(const Mutex &m);
void Destroy(const Mutex &m);
void Lock(const Mutex &m);
bool TryLock(const Mutex &m);
void Unlock(const Mutex &m);
void ReadLock(const Mutex &m);
bool TryReadLock(const Mutex &m);
void ReadUnlock(const Mutex &m);
void Memcpy(void *dst, const void *src, int size, bool expect_race = false);
void Memset(void *dst, int val, int size, bool expect_race = false);
private:
struct Impl;
Impl *impl_;
ScopedThread(const ScopedThread&); // Not implemented.
void operator = (const ScopedThread&); // Not implemented.
};
class MainThread : public ScopedThread {
public:
MainThread()
: ScopedThread(false, true) {
}
};
#endif // #ifndef TSAN_TEST_UTIL_H

View File

@@ -0,0 +1,478 @@
//===-- tsan_test_util_posix.cc -------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
// Test utils, Linux, FreeBSD, NetBSD and Darwin implementation.
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_atomic.h"
#include "tsan_interface.h"
#include "tsan_posix_util.h"
#include "tsan_test_util.h"
#include "tsan_report.h"
#include "gtest/gtest.h"
#include <assert.h>
#include <pthread.h>
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
using namespace __tsan; // NOLINT
static __thread bool expect_report;
static __thread bool expect_report_reported;
static __thread ReportType expect_report_type;
static void *BeforeInitThread(void *param) {
(void)param;
return 0;
}
static void AtExit() {
}
void TestMutexBeforeInit() {
// Mutexes must be usable before __tsan_init();
pthread_mutex_t mtx = PTHREAD_MUTEX_INITIALIZER;
__interceptor_pthread_mutex_lock(&mtx);
__interceptor_pthread_mutex_unlock(&mtx);
__interceptor_pthread_mutex_destroy(&mtx);
pthread_t thr;
__interceptor_pthread_create(&thr, 0, BeforeInitThread, 0);
__interceptor_pthread_join(thr, 0);
atexit(AtExit);
}
namespace __tsan {
bool OnReport(const ReportDesc *rep, bool suppressed) {
if (expect_report) {
if (rep->typ != expect_report_type) {
printf("Expected report of type %d, got type %d\n",
(int)expect_report_type, (int)rep->typ);
EXPECT_TRUE(false) << "Wrong report type";
return false;
}
} else {
EXPECT_TRUE(false) << "Unexpected report";
return false;
}
expect_report_reported = true;
return true;
}
} // namespace __tsan
static void* allocate_addr(int size, int offset_from_aligned = 0) {
static uintptr_t foo;
static atomic_uintptr_t uniq = {(uintptr_t)&foo}; // Some real address.
const int kAlign = 16;
CHECK(offset_from_aligned < kAlign);
size = (size + 2 * kAlign) & ~(kAlign - 1);
uintptr_t addr = atomic_fetch_add(&uniq, size, memory_order_relaxed);
return (void*)(addr + offset_from_aligned);
}
MemLoc::MemLoc(int offset_from_aligned)
: loc_(allocate_addr(16, offset_from_aligned)) {
}
MemLoc::~MemLoc() {
}
Mutex::Mutex(Type type)
: alive_()
, type_(type) {
}
Mutex::~Mutex() {
CHECK(!alive_);
}
void Mutex::Init() {
CHECK(!alive_);
alive_ = true;
if (type_ == Normal)
CHECK_EQ(__interceptor_pthread_mutex_init((pthread_mutex_t*)mtx_, 0), 0);
#ifndef __APPLE__
else if (type_ == Spin)
CHECK_EQ(pthread_spin_init((pthread_spinlock_t*)mtx_, 0), 0);
#endif
else if (type_ == RW)
CHECK_EQ(__interceptor_pthread_rwlock_init((pthread_rwlock_t*)mtx_, 0), 0);
else
CHECK(0);
}
void Mutex::StaticInit() {
CHECK(!alive_);
CHECK(type_ == Normal);
alive_ = true;
pthread_mutex_t tmp = PTHREAD_MUTEX_INITIALIZER;
memcpy(mtx_, &tmp, sizeof(tmp));
}
void Mutex::Destroy() {
CHECK(alive_);
alive_ = false;
if (type_ == Normal)
CHECK_EQ(__interceptor_pthread_mutex_destroy((pthread_mutex_t*)mtx_), 0);
#ifndef __APPLE__
else if (type_ == Spin)
CHECK_EQ(pthread_spin_destroy((pthread_spinlock_t*)mtx_), 0);
#endif
else if (type_ == RW)
CHECK_EQ(__interceptor_pthread_rwlock_destroy((pthread_rwlock_t*)mtx_), 0);
}
void Mutex::Lock() {
CHECK(alive_);
if (type_ == Normal)
CHECK_EQ(__interceptor_pthread_mutex_lock((pthread_mutex_t*)mtx_), 0);
#ifndef __APPLE__
else if (type_ == Spin)
CHECK_EQ(pthread_spin_lock((pthread_spinlock_t*)mtx_), 0);
#endif
else if (type_ == RW)
CHECK_EQ(__interceptor_pthread_rwlock_wrlock((pthread_rwlock_t*)mtx_), 0);
}
bool Mutex::TryLock() {
CHECK(alive_);
if (type_ == Normal)
return __interceptor_pthread_mutex_trylock((pthread_mutex_t*)mtx_) == 0;
#ifndef __APPLE__
else if (type_ == Spin)
return pthread_spin_trylock((pthread_spinlock_t*)mtx_) == 0;
#endif
else if (type_ == RW)
return __interceptor_pthread_rwlock_trywrlock((pthread_rwlock_t*)mtx_) == 0;
return false;
}
void Mutex::Unlock() {
CHECK(alive_);
if (type_ == Normal)
CHECK_EQ(__interceptor_pthread_mutex_unlock((pthread_mutex_t*)mtx_), 0);
#ifndef __APPLE__
else if (type_ == Spin)
CHECK_EQ(pthread_spin_unlock((pthread_spinlock_t*)mtx_), 0);
#endif
else if (type_ == RW)
CHECK_EQ(__interceptor_pthread_rwlock_unlock((pthread_rwlock_t*)mtx_), 0);
}
void Mutex::ReadLock() {
CHECK(alive_);
CHECK(type_ == RW);
CHECK_EQ(__interceptor_pthread_rwlock_rdlock((pthread_rwlock_t*)mtx_), 0);
}
bool Mutex::TryReadLock() {
CHECK(alive_);
CHECK(type_ == RW);
return __interceptor_pthread_rwlock_tryrdlock((pthread_rwlock_t*)mtx_) == 0;
}
void Mutex::ReadUnlock() {
CHECK(alive_);
CHECK(type_ == RW);
CHECK_EQ(__interceptor_pthread_rwlock_unlock((pthread_rwlock_t*)mtx_), 0);
}
struct Event {
enum Type {
SHUTDOWN,
READ,
WRITE,
VPTR_UPDATE,
CALL,
RETURN,
MUTEX_CREATE,
MUTEX_DESTROY,
MUTEX_LOCK,
MUTEX_TRYLOCK,
MUTEX_UNLOCK,
MUTEX_READLOCK,
MUTEX_TRYREADLOCK,
MUTEX_READUNLOCK,
MEMCPY,
MEMSET
};
Type type;
void *ptr;
uptr arg;
uptr arg2;
bool res;
bool expect_report;
ReportType report_type;
Event(Type type, const void *ptr = 0, uptr arg = 0, uptr arg2 = 0)
: type(type)
, ptr(const_cast<void*>(ptr))
, arg(arg)
, arg2(arg2)
, res()
, expect_report()
, report_type() {
}
void ExpectReport(ReportType type) {
expect_report = true;
report_type = type;
}
};
struct ScopedThread::Impl {
pthread_t thread;
bool main;
bool detached;
atomic_uintptr_t event; // Event*
static void *ScopedThreadCallback(void *arg);
void send(Event *ev);
void HandleEvent(Event *ev);
};
void ScopedThread::Impl::HandleEvent(Event *ev) {
CHECK_EQ(expect_report, false);
expect_report = ev->expect_report;
expect_report_reported = false;
expect_report_type = ev->report_type;
switch (ev->type) {
case Event::READ:
case Event::WRITE: {
void (*tsan_mop)(void *addr) = 0;
if (ev->type == Event::READ) {
switch (ev->arg /*size*/) {
case 1: tsan_mop = __tsan_read1; break;
case 2: tsan_mop = __tsan_read2; break;
case 4: tsan_mop = __tsan_read4; break;
case 8: tsan_mop = __tsan_read8; break;
case 16: tsan_mop = __tsan_read16; break;
}
} else {
switch (ev->arg /*size*/) {
case 1: tsan_mop = __tsan_write1; break;
case 2: tsan_mop = __tsan_write2; break;
case 4: tsan_mop = __tsan_write4; break;
case 8: tsan_mop = __tsan_write8; break;
case 16: tsan_mop = __tsan_write16; break;
}
}
CHECK_NE(tsan_mop, 0);
#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__NetBSD__)
const int ErrCode = ESOCKTNOSUPPORT;
#else
const int ErrCode = ECHRNG;
#endif
errno = ErrCode;
tsan_mop(ev->ptr);
CHECK_EQ(ErrCode, errno); // In no case must errno be changed.
break;
}
case Event::VPTR_UPDATE:
__tsan_vptr_update((void**)ev->ptr, (void*)ev->arg);
break;
case Event::CALL:
__tsan_func_entry((void*)((uptr)ev->ptr));
break;
case Event::RETURN:
__tsan_func_exit();
break;
case Event::MUTEX_CREATE:
static_cast<Mutex*>(ev->ptr)->Init();
break;
case Event::MUTEX_DESTROY:
static_cast<Mutex*>(ev->ptr)->Destroy();
break;
case Event::MUTEX_LOCK:
static_cast<Mutex*>(ev->ptr)->Lock();
break;
case Event::MUTEX_TRYLOCK:
ev->res = static_cast<Mutex*>(ev->ptr)->TryLock();
break;
case Event::MUTEX_UNLOCK:
static_cast<Mutex*>(ev->ptr)->Unlock();
break;
case Event::MUTEX_READLOCK:
static_cast<Mutex*>(ev->ptr)->ReadLock();
break;
case Event::MUTEX_TRYREADLOCK:
ev->res = static_cast<Mutex*>(ev->ptr)->TryReadLock();
break;
case Event::MUTEX_READUNLOCK:
static_cast<Mutex*>(ev->ptr)->ReadUnlock();
break;
case Event::MEMCPY:
__interceptor_memcpy(ev->ptr, (void*)ev->arg, ev->arg2);
break;
case Event::MEMSET:
__interceptor_memset(ev->ptr, ev->arg, ev->arg2);
break;
default: CHECK(0);
}
if (expect_report && !expect_report_reported) {
printf("Missed expected report of type %d\n", (int)ev->report_type);
EXPECT_TRUE(false) << "Missed expected race";
}
expect_report = false;
}
void *ScopedThread::Impl::ScopedThreadCallback(void *arg) {
__tsan_func_entry(__builtin_return_address(0));
Impl *impl = (Impl*)arg;
for (;;) {
Event* ev = (Event*)atomic_load(&impl->event, memory_order_acquire);
if (ev == 0) {
sched_yield();
continue;
}
if (ev->type == Event::SHUTDOWN) {
atomic_store(&impl->event, 0, memory_order_release);
break;
}
impl->HandleEvent(ev);
atomic_store(&impl->event, 0, memory_order_release);
}
__tsan_func_exit();
return 0;
}
void ScopedThread::Impl::send(Event *e) {
if (main) {
HandleEvent(e);
} else {
CHECK_EQ(atomic_load(&event, memory_order_relaxed), 0);
atomic_store(&event, (uintptr_t)e, memory_order_release);
while (atomic_load(&event, memory_order_acquire) != 0)
sched_yield();
}
}
ScopedThread::ScopedThread(bool detached, bool main) {
impl_ = new Impl;
impl_->main = main;
impl_->detached = detached;
atomic_store(&impl_->event, 0, memory_order_relaxed);
if (!main) {
pthread_attr_t attr;
pthread_attr_init(&attr);
pthread_attr_setdetachstate(
&attr, detached ? PTHREAD_CREATE_DETACHED : PTHREAD_CREATE_JOINABLE);
pthread_attr_setstacksize(&attr, 64*1024);
__interceptor_pthread_create(&impl_->thread, &attr,
ScopedThread::Impl::ScopedThreadCallback, impl_);
}
}
ScopedThread::~ScopedThread() {
if (!impl_->main) {
Event event(Event::SHUTDOWN);
impl_->send(&event);
if (!impl_->detached)
__interceptor_pthread_join(impl_->thread, 0);
}
delete impl_;
}
void ScopedThread::Detach() {
CHECK(!impl_->main);
CHECK(!impl_->detached);
impl_->detached = true;
__interceptor_pthread_detach(impl_->thread);
}
void ScopedThread::Access(void *addr, bool is_write,
int size, bool expect_race) {
Event event(is_write ? Event::WRITE : Event::READ, addr, size);
if (expect_race)
event.ExpectReport(ReportTypeRace);
impl_->send(&event);
}
void ScopedThread::VptrUpdate(const MemLoc &vptr,
const MemLoc &new_val,
bool expect_race) {
Event event(Event::VPTR_UPDATE, vptr.loc(), (uptr)new_val.loc());
if (expect_race)
event.ExpectReport(ReportTypeRace);
impl_->send(&event);
}
void ScopedThread::Call(void(*pc)()) {
Event event(Event::CALL, (void*)((uintptr_t)pc));
impl_->send(&event);
}
void ScopedThread::Return() {
Event event(Event::RETURN);
impl_->send(&event);
}
void ScopedThread::Create(const Mutex &m) {
Event event(Event::MUTEX_CREATE, &m);
impl_->send(&event);
}
void ScopedThread::Destroy(const Mutex &m) {
Event event(Event::MUTEX_DESTROY, &m);
impl_->send(&event);
}
void ScopedThread::Lock(const Mutex &m) {
Event event(Event::MUTEX_LOCK, &m);
impl_->send(&event);
}
bool ScopedThread::TryLock(const Mutex &m) {
Event event(Event::MUTEX_TRYLOCK, &m);
impl_->send(&event);
return event.res;
}
void ScopedThread::Unlock(const Mutex &m) {
Event event(Event::MUTEX_UNLOCK, &m);
impl_->send(&event);
}
void ScopedThread::ReadLock(const Mutex &m) {
Event event(Event::MUTEX_READLOCK, &m);
impl_->send(&event);
}
bool ScopedThread::TryReadLock(const Mutex &m) {
Event event(Event::MUTEX_TRYREADLOCK, &m);
impl_->send(&event);
return event.res;
}
void ScopedThread::ReadUnlock(const Mutex &m) {
Event event(Event::MUTEX_READUNLOCK, &m);
impl_->send(&event);
}
void ScopedThread::Memcpy(void *dst, const void *src, int size,
bool expect_race) {
Event event(Event::MEMCPY, dst, (uptr)src, size);
if (expect_race)
event.ExpectReport(ReportTypeRace);
impl_->send(&event);
}
void ScopedThread::Memset(void *dst, int val, int size,
bool expect_race) {
Event event(Event::MEMSET, dst, val, size);
if (expect_race)
event.ExpectReport(ReportTypeRace);
impl_->send(&event);
}

View File

@@ -0,0 +1,59 @@
//===-- tsan_thread.cc ----------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#include "tsan_test_util.h"
#include "gtest/gtest.h"
TEST(ThreadSanitizer, ThreadSync) {
MainThread t0;
MemLoc l;
t0.Write1(l);
{
ScopedThread t1;
t1.Write1(l);
}
t0.Write1(l);
}
TEST(ThreadSanitizer, ThreadDetach1) {
ScopedThread t1(true);
MemLoc l;
t1.Write1(l);
}
TEST(ThreadSanitizer, ThreadDetach2) {
ScopedThread t1;
MemLoc l;
t1.Write1(l);
t1.Detach();
}
static void *thread_alot_func(void *arg) {
(void)arg;
int usleep(unsigned);
usleep(50);
return 0;
}
TEST(DISABLED_SLOW_ThreadSanitizer, ThreadALot) {
const int kThreads = 70000;
const int kAlive = 1000;
pthread_t threads[kAlive] = {};
for (int i = 0; i < kThreads; i++) {
if (threads[i % kAlive])
pthread_join(threads[i % kAlive], 0);
pthread_create(&threads[i % kAlive], 0, thread_alot_func, 0);
}
for (int i = 0; i < kAlive; i++) {
pthread_join(threads[i], 0);
}
}

View File

@@ -0,0 +1,12 @@
set(TSAN_UNIT_TEST_SOURCES
tsan_clock_test.cc
tsan_flags_test.cc
tsan_mman_test.cc
tsan_mutex_test.cc
tsan_shadow_test.cc
tsan_stack_test.cc
tsan_sync_test.cc
tsan_unit_test_main.cc)
add_tsan_unittest(TsanUnitTest
SOURCES ${TSAN_UNIT_TEST_SOURCES})

View File

@@ -0,0 +1,494 @@
//===-- tsan_clock_test.cc ------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#include "tsan_clock.h"
#include "tsan_rtl.h"
#include "gtest/gtest.h"
#include <sys/time.h>
#include <time.h>
namespace __tsan {
ClockCache cache;
TEST(Clock, VectorBasic) {
ThreadClock clk(0);
ASSERT_EQ(clk.size(), 1U);
clk.tick();
ASSERT_EQ(clk.size(), 1U);
ASSERT_EQ(clk.get(0), 1U);
clk.set(&cache, 3, clk.get(3) + 1);
ASSERT_EQ(clk.size(), 4U);
ASSERT_EQ(clk.get(0), 1U);
ASSERT_EQ(clk.get(1), 0U);
ASSERT_EQ(clk.get(2), 0U);
ASSERT_EQ(clk.get(3), 1U);
clk.set(&cache, 3, clk.get(3) + 1);
ASSERT_EQ(clk.get(3), 2U);
}
TEST(Clock, ChunkedBasic) {
ThreadClock vector(0);
SyncClock chunked;
ASSERT_EQ(vector.size(), 1U);
ASSERT_EQ(chunked.size(), 0U);
vector.acquire(&cache, &chunked);
ASSERT_EQ(vector.size(), 1U);
ASSERT_EQ(chunked.size(), 0U);
vector.release(&cache, &chunked);
ASSERT_EQ(vector.size(), 1U);
ASSERT_EQ(chunked.size(), 1U);
vector.acq_rel(&cache, &chunked);
ASSERT_EQ(vector.size(), 1U);
ASSERT_EQ(chunked.size(), 1U);
chunked.Reset(&cache);
}
static const uptr interesting_sizes[] = {0, 1, 2, 30, 61, 62, 63, 64, 65, 66,
100, 124, 125, 126, 127, 128, 129, 130, 188, 189, 190, 191, 192, 193, 254,
255};
TEST(Clock, Iter) {
const uptr n = ARRAY_SIZE(interesting_sizes);
for (uptr fi = 0; fi < n; fi++) {
const uptr size = interesting_sizes[fi];
SyncClock sync;
ThreadClock vector(0);
for (uptr i = 0; i < size; i++)
vector.set(&cache, i, i + 1);
if (size != 0)
vector.release(&cache, &sync);
uptr i = 0;
for (ClockElem &ce : sync) {
ASSERT_LT(i, size);
ASSERT_EQ(sync.get_clean(i), ce.epoch);
i++;
}
ASSERT_EQ(i, size);
sync.Reset(&cache);
}
}
TEST(Clock, AcquireRelease) {
ThreadClock vector1(100);
vector1.tick();
SyncClock chunked;
vector1.release(&cache, &chunked);
ASSERT_EQ(chunked.size(), 101U);
ThreadClock vector2(0);
vector2.acquire(&cache, &chunked);
ASSERT_EQ(vector2.size(), 101U);
ASSERT_EQ(vector2.get(0), 0U);
ASSERT_EQ(vector2.get(1), 0U);
ASSERT_EQ(vector2.get(99), 0U);
ASSERT_EQ(vector2.get(100), 1U);
chunked.Reset(&cache);
}
TEST(Clock, RepeatedAcquire) {
ThreadClock thr1(1);
thr1.tick();
ThreadClock thr2(2);
thr2.tick();
SyncClock sync;
thr1.ReleaseStore(&cache, &sync);
thr2.acquire(&cache, &sync);
thr2.acquire(&cache, &sync);
sync.Reset(&cache);
}
TEST(Clock, ManyThreads) {
SyncClock chunked;
for (unsigned i = 0; i < 200; i++) {
ThreadClock vector(0);
vector.tick();
vector.set(&cache, i, i + 1);
vector.release(&cache, &chunked);
ASSERT_EQ(i + 1, chunked.size());
vector.acquire(&cache, &chunked);
ASSERT_EQ(i + 1, vector.size());
}
for (unsigned i = 0; i < 200; i++) {
printf("i=%d\n", i);
ASSERT_EQ(i + 1, chunked.get(i));
}
ThreadClock vector(1);
vector.acquire(&cache, &chunked);
ASSERT_EQ(200U, vector.size());
for (unsigned i = 0; i < 200; i++)
ASSERT_EQ(i + 1, vector.get(i));
chunked.Reset(&cache);
}
TEST(Clock, DifferentSizes) {
{
ThreadClock vector1(10);
vector1.tick();
ThreadClock vector2(20);
vector2.tick();
{
SyncClock chunked;
vector1.release(&cache, &chunked);
ASSERT_EQ(chunked.size(), 11U);
vector2.release(&cache, &chunked);
ASSERT_EQ(chunked.size(), 21U);
chunked.Reset(&cache);
}
{
SyncClock chunked;
vector2.release(&cache, &chunked);
ASSERT_EQ(chunked.size(), 21U);
vector1.release(&cache, &chunked);
ASSERT_EQ(chunked.size(), 21U);
chunked.Reset(&cache);
}
{
SyncClock chunked;
vector1.release(&cache, &chunked);
vector2.acquire(&cache, &chunked);
ASSERT_EQ(vector2.size(), 21U);
chunked.Reset(&cache);
}
{
SyncClock chunked;
vector2.release(&cache, &chunked);
vector1.acquire(&cache, &chunked);
ASSERT_EQ(vector1.size(), 21U);
chunked.Reset(&cache);
}
}
}
TEST(Clock, Growth) {
{
ThreadClock vector(10);
vector.tick();
vector.set(&cache, 5, 42);
SyncClock sync;
vector.release(&cache, &sync);
ASSERT_EQ(sync.size(), 11U);
ASSERT_EQ(sync.get(0), 0ULL);
ASSERT_EQ(sync.get(1), 0ULL);
ASSERT_EQ(sync.get(5), 42ULL);
ASSERT_EQ(sync.get(9), 0ULL);
ASSERT_EQ(sync.get(10), 1ULL);
sync.Reset(&cache);
}
{
ThreadClock vector1(10);
vector1.tick();
ThreadClock vector2(20);
vector2.tick();
SyncClock sync;
vector1.release(&cache, &sync);
vector2.release(&cache, &sync);
ASSERT_EQ(sync.size(), 21U);
ASSERT_EQ(sync.get(0), 0ULL);
ASSERT_EQ(sync.get(10), 1ULL);
ASSERT_EQ(sync.get(19), 0ULL);
ASSERT_EQ(sync.get(20), 1ULL);
sync.Reset(&cache);
}
{
ThreadClock vector(100);
vector.tick();
vector.set(&cache, 5, 42);
vector.set(&cache, 90, 84);
SyncClock sync;
vector.release(&cache, &sync);
ASSERT_EQ(sync.size(), 101U);
ASSERT_EQ(sync.get(0), 0ULL);
ASSERT_EQ(sync.get(1), 0ULL);
ASSERT_EQ(sync.get(5), 42ULL);
ASSERT_EQ(sync.get(60), 0ULL);
ASSERT_EQ(sync.get(70), 0ULL);
ASSERT_EQ(sync.get(90), 84ULL);
ASSERT_EQ(sync.get(99), 0ULL);
ASSERT_EQ(sync.get(100), 1ULL);
sync.Reset(&cache);
}
{
ThreadClock vector1(10);
vector1.tick();
ThreadClock vector2(100);
vector2.tick();
SyncClock sync;
vector1.release(&cache, &sync);
vector2.release(&cache, &sync);
ASSERT_EQ(sync.size(), 101U);
ASSERT_EQ(sync.get(0), 0ULL);
ASSERT_EQ(sync.get(10), 1ULL);
ASSERT_EQ(sync.get(99), 0ULL);
ASSERT_EQ(sync.get(100), 1ULL);
sync.Reset(&cache);
}
}
TEST(Clock, Growth2) {
// Test clock growth for every pair of sizes:
const uptr n = ARRAY_SIZE(interesting_sizes);
for (uptr fi = 0; fi < n; fi++) {
for (uptr ti = fi + 1; ti < n; ti++) {
const uptr from = interesting_sizes[fi];
const uptr to = interesting_sizes[ti];
SyncClock sync;
ThreadClock vector(0);
for (uptr i = 0; i < from; i++)
vector.set(&cache, i, i + 1);
if (from != 0)
vector.release(&cache, &sync);
ASSERT_EQ(sync.size(), from);
for (uptr i = 0; i < from; i++)
ASSERT_EQ(sync.get(i), i + 1);
for (uptr i = 0; i < to; i++)
vector.set(&cache, i, i + 1);
vector.release(&cache, &sync);
ASSERT_EQ(sync.size(), to);
for (uptr i = 0; i < to; i++)
ASSERT_EQ(sync.get(i), i + 1);
vector.set(&cache, to + 1, to + 1);
vector.release(&cache, &sync);
ASSERT_EQ(sync.size(), to + 2);
for (uptr i = 0; i < to; i++)
ASSERT_EQ(sync.get(i), i + 1);
ASSERT_EQ(sync.get(to), 0U);
ASSERT_EQ(sync.get(to + 1), to + 1);
sync.Reset(&cache);
}
}
}
const uptr kThreads = 4;
const uptr kClocks = 4;
// SimpleSyncClock and SimpleThreadClock implement the same thing as
// SyncClock and ThreadClock, but in a very simple way.
struct SimpleSyncClock {
u64 clock[kThreads];
uptr size;
SimpleSyncClock() {
Reset();
}
void Reset() {
size = 0;
for (uptr i = 0; i < kThreads; i++)
clock[i] = 0;
}
bool verify(const SyncClock *other) const {
for (uptr i = 0; i < min(size, other->size()); i++) {
if (clock[i] != other->get(i))
return false;
}
for (uptr i = min(size, other->size()); i < max(size, other->size()); i++) {
if (i < size && clock[i] != 0)
return false;
if (i < other->size() && other->get(i) != 0)
return false;
}
return true;
}
};
struct SimpleThreadClock {
u64 clock[kThreads];
uptr size;
unsigned tid;
explicit SimpleThreadClock(unsigned tid) {
this->tid = tid;
size = tid + 1;
for (uptr i = 0; i < kThreads; i++)
clock[i] = 0;
}
void tick() {
clock[tid]++;
}
void acquire(const SimpleSyncClock *src) {
if (size < src->size)
size = src->size;
for (uptr i = 0; i < kThreads; i++)
clock[i] = max(clock[i], src->clock[i]);
}
void release(SimpleSyncClock *dst) const {
if (dst->size < size)
dst->size = size;
for (uptr i = 0; i < kThreads; i++)
dst->clock[i] = max(dst->clock[i], clock[i]);
}
void acq_rel(SimpleSyncClock *dst) {
acquire(dst);
release(dst);
}
void ReleaseStore(SimpleSyncClock *dst) const {
if (dst->size < size)
dst->size = size;
for (uptr i = 0; i < kThreads; i++)
dst->clock[i] = clock[i];
}
bool verify(const ThreadClock *other) const {
for (uptr i = 0; i < min(size, other->size()); i++) {
if (clock[i] != other->get(i))
return false;
}
for (uptr i = min(size, other->size()); i < max(size, other->size()); i++) {
if (i < size && clock[i] != 0)
return false;
if (i < other->size() && other->get(i) != 0)
return false;
}
return true;
}
};
static bool ClockFuzzer(bool printing) {
// Create kThreads thread clocks.
SimpleThreadClock *thr0[kThreads];
ThreadClock *thr1[kThreads];
unsigned reused[kThreads];
for (unsigned i = 0; i < kThreads; i++) {
reused[i] = 0;
thr0[i] = new SimpleThreadClock(i);
thr1[i] = new ThreadClock(i, reused[i]);
}
// Create kClocks sync clocks.
SimpleSyncClock *sync0[kClocks];
SyncClock *sync1[kClocks];
for (unsigned i = 0; i < kClocks; i++) {
sync0[i] = new SimpleSyncClock();
sync1[i] = new SyncClock();
}
// Do N random operations (acquire, release, etc) and compare results
// for SimpleThread/SyncClock and real Thread/SyncClock.
for (int i = 0; i < 10000; i++) {
unsigned tid = rand() % kThreads;
unsigned cid = rand() % kClocks;
thr0[tid]->tick();
thr1[tid]->tick();
switch (rand() % 6) {
case 0:
if (printing)
printf("acquire thr%d <- clk%d\n", tid, cid);
thr0[tid]->acquire(sync0[cid]);
thr1[tid]->acquire(&cache, sync1[cid]);
break;
case 1:
if (printing)
printf("release thr%d -> clk%d\n", tid, cid);
thr0[tid]->release(sync0[cid]);
thr1[tid]->release(&cache, sync1[cid]);
break;
case 2:
if (printing)
printf("acq_rel thr%d <> clk%d\n", tid, cid);
thr0[tid]->acq_rel(sync0[cid]);
thr1[tid]->acq_rel(&cache, sync1[cid]);
break;
case 3:
if (printing)
printf("rel_str thr%d >> clk%d\n", tid, cid);
thr0[tid]->ReleaseStore(sync0[cid]);
thr1[tid]->ReleaseStore(&cache, sync1[cid]);
break;
case 4:
if (printing)
printf("reset clk%d\n", cid);
sync0[cid]->Reset();
sync1[cid]->Reset(&cache);
break;
case 5:
if (printing)
printf("reset thr%d\n", tid);
u64 epoch = thr0[tid]->clock[tid] + 1;
reused[tid]++;
delete thr0[tid];
thr0[tid] = new SimpleThreadClock(tid);
thr0[tid]->clock[tid] = epoch;
delete thr1[tid];
thr1[tid] = new ThreadClock(tid, reused[tid]);
thr1[tid]->set(epoch);
break;
}
if (printing) {
for (unsigned i = 0; i < kThreads; i++) {
printf("thr%d: ", i);
thr1[i]->DebugDump(printf);
printf("\n");
}
for (unsigned i = 0; i < kClocks; i++) {
printf("clk%d: ", i);
sync1[i]->DebugDump(printf);
printf("\n");
}
printf("\n");
}
if (!thr0[tid]->verify(thr1[tid]) || !sync0[cid]->verify(sync1[cid])) {
if (!printing)
return false;
printf("differs with model:\n");
for (unsigned i = 0; i < kThreads; i++) {
printf("thr%d: clock=[", i);
for (uptr j = 0; j < thr0[i]->size; j++)
printf("%s%llu", j == 0 ? "" : ",", thr0[i]->clock[j]);
printf("]\n");
}
for (unsigned i = 0; i < kClocks; i++) {
printf("clk%d: clock=[", i);
for (uptr j = 0; j < sync0[i]->size; j++)
printf("%s%llu", j == 0 ? "" : ",", sync0[i]->clock[j]);
printf("]\n");
}
return false;
}
}
for (unsigned i = 0; i < kClocks; i++) {
sync1[i]->Reset(&cache);
}
return true;
}
TEST(Clock, Fuzzer) {
struct timeval tv;
gettimeofday(&tv, NULL);
int seed = tv.tv_sec + tv.tv_usec;
printf("seed=%d\n", seed);
srand(seed);
if (!ClockFuzzer(false)) {
// Redo the test with the same seed, but logging operations.
srand(seed);
ClockFuzzer(true);
ASSERT_TRUE(false);
}
}
} // namespace __tsan

View File

@@ -0,0 +1,55 @@
//===-- tsan_dense_alloc_test.cc ------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#include "tsan_dense_alloc.h"
#include "tsan_rtl.h"
#include "tsan_mman.h"
#include "gtest/gtest.h"
#include <stdlib.h>
#include <stdint.h>
#include <map>
namespace __tsan {
TEST(DenseSlabAlloc, Basic) {
typedef DenseSlabAlloc<int, 128, 128> Alloc;
typedef Alloc::Cache Cache;
typedef Alloc::IndexT IndexT;
const int N = 1000;
Alloc alloc;
Cache cache;
alloc.InitCache(&cache);
IndexT blocks[N];
for (int ntry = 0; ntry < 3; ntry++) {
for (int i = 0; i < N; i++) {
IndexT idx = alloc.Alloc(&cache);
blocks[i] = idx;
EXPECT_NE(idx, 0U);
int *v = alloc.Map(idx);
*v = i;
}
for (int i = 0; i < N; i++) {
IndexT idx = blocks[i];
int *v = alloc.Map(idx);
EXPECT_EQ(*v, i);
alloc.Free(&cache, idx);
}
alloc.FlushCache(&cache);
}
}
} // namespace __tsan

View File

@@ -0,0 +1,174 @@
//===-- tsan_flags_test.cc ------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#include "tsan_flags.h"
#include "tsan_rtl.h"
#include "gtest/gtest.h"
#include <string>
namespace __tsan {
TEST(Flags, Basic) {
// At least should not crash.
Flags f;
InitializeFlags(&f, 0);
InitializeFlags(&f, "");
}
TEST(Flags, DefaultValues) {
Flags f;
f.enable_annotations = false;
InitializeFlags(&f, "");
EXPECT_EQ(true, f.enable_annotations);
}
static const char *options1 =
" enable_annotations=0"
" suppress_equal_stacks=0"
" suppress_equal_addresses=0"
" report_bugs=0"
" report_thread_leaks=0"
" report_destroy_locked=0"
" report_mutex_bugs=0"
" report_signal_unsafe=0"
" report_atomic_races=0"
" force_seq_cst_atomics=0"
" print_benign=0"
" halt_on_error=0"
" atexit_sleep_ms=222"
" profile_memory=qqq"
" flush_memory_ms=444"
" flush_symbolizer_ms=555"
" memory_limit_mb=666"
" stop_on_start=0"
" running_on_valgrind=0"
" history_size=5"
" io_sync=1"
" die_after_fork=true"
"";
static const char *options2 =
" enable_annotations=true"
" suppress_equal_stacks=true"
" suppress_equal_addresses=true"
" report_bugs=true"
" report_thread_leaks=true"
" report_destroy_locked=true"
" report_mutex_bugs=true"
" report_signal_unsafe=true"
" report_atomic_races=true"
" force_seq_cst_atomics=true"
" print_benign=true"
" halt_on_error=true"
" atexit_sleep_ms=123"
" profile_memory=bbbbb"
" flush_memory_ms=234"
" flush_symbolizer_ms=345"
" memory_limit_mb=456"
" stop_on_start=true"
" running_on_valgrind=true"
" history_size=6"
" io_sync=2"
" die_after_fork=false"
"";
void VerifyOptions1(Flags *f) {
EXPECT_EQ(f->enable_annotations, 0);
EXPECT_EQ(f->suppress_equal_stacks, 0);
EXPECT_EQ(f->suppress_equal_addresses, 0);
EXPECT_EQ(f->report_bugs, 0);
EXPECT_EQ(f->report_thread_leaks, 0);
EXPECT_EQ(f->report_destroy_locked, 0);
EXPECT_EQ(f->report_mutex_bugs, 0);
EXPECT_EQ(f->report_signal_unsafe, 0);
EXPECT_EQ(f->report_atomic_races, 0);
EXPECT_EQ(f->force_seq_cst_atomics, 0);
EXPECT_EQ(f->print_benign, 0);
EXPECT_EQ(f->halt_on_error, 0);
EXPECT_EQ(f->atexit_sleep_ms, 222);
EXPECT_EQ(f->profile_memory, std::string("qqq"));
EXPECT_EQ(f->flush_memory_ms, 444);
EXPECT_EQ(f->flush_symbolizer_ms, 555);
EXPECT_EQ(f->memory_limit_mb, 666);
EXPECT_EQ(f->stop_on_start, 0);
EXPECT_EQ(f->running_on_valgrind, 0);
EXPECT_EQ(f->history_size, 5);
EXPECT_EQ(f->io_sync, 1);
EXPECT_EQ(f->die_after_fork, true);
}
void VerifyOptions2(Flags *f) {
EXPECT_EQ(f->enable_annotations, true);
EXPECT_EQ(f->suppress_equal_stacks, true);
EXPECT_EQ(f->suppress_equal_addresses, true);
EXPECT_EQ(f->report_bugs, true);
EXPECT_EQ(f->report_thread_leaks, true);
EXPECT_EQ(f->report_destroy_locked, true);
EXPECT_EQ(f->report_mutex_bugs, true);
EXPECT_EQ(f->report_signal_unsafe, true);
EXPECT_EQ(f->report_atomic_races, true);
EXPECT_EQ(f->force_seq_cst_atomics, true);
EXPECT_EQ(f->print_benign, true);
EXPECT_EQ(f->halt_on_error, true);
EXPECT_EQ(f->atexit_sleep_ms, 123);
EXPECT_EQ(f->profile_memory, std::string("bbbbb"));
EXPECT_EQ(f->flush_memory_ms, 234);
EXPECT_EQ(f->flush_symbolizer_ms, 345);
EXPECT_EQ(f->memory_limit_mb, 456);
EXPECT_EQ(f->stop_on_start, true);
EXPECT_EQ(f->running_on_valgrind, true);
EXPECT_EQ(f->history_size, 6);
EXPECT_EQ(f->io_sync, 2);
EXPECT_EQ(f->die_after_fork, false);
}
static const char *test_default_options;
extern "C" const char *__tsan_default_options() {
return test_default_options;
}
TEST(Flags, ParseDefaultOptions) {
Flags f;
test_default_options = options1;
InitializeFlags(&f, "");
VerifyOptions1(&f);
test_default_options = options2;
InitializeFlags(&f, "");
VerifyOptions2(&f);
}
TEST(Flags, ParseEnvOptions) {
Flags f;
InitializeFlags(&f, options1);
VerifyOptions1(&f);
InitializeFlags(&f, options2);
VerifyOptions2(&f);
}
TEST(Flags, ParsePriority) {
Flags f;
test_default_options = options2;
InitializeFlags(&f, options1);
VerifyOptions1(&f);
test_default_options = options1;
InitializeFlags(&f, options2);
VerifyOptions2(&f);
}
} // namespace __tsan

View File

@@ -0,0 +1,232 @@
//===-- tsan_mman_test.cc -------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#include <limits>
#include <sanitizer/allocator_interface.h>
#include "tsan_mman.h"
#include "tsan_rtl.h"
#include "gtest/gtest.h"
namespace __tsan {
TEST(Mman, Internal) {
char *p = (char*)internal_alloc(MBlockScopedBuf, 10);
EXPECT_NE(p, (char*)0);
char *p2 = (char*)internal_alloc(MBlockScopedBuf, 20);
EXPECT_NE(p2, (char*)0);
EXPECT_NE(p2, p);
for (int i = 0; i < 10; i++) {
p[i] = 42;
}
for (int i = 0; i < 20; i++) {
((char*)p2)[i] = 42;
}
internal_free(p);
internal_free(p2);
}
TEST(Mman, User) {
ThreadState *thr = cur_thread();
uptr pc = 0;
char *p = (char*)user_alloc(thr, pc, 10);
EXPECT_NE(p, (char*)0);
char *p2 = (char*)user_alloc(thr, pc, 20);
EXPECT_NE(p2, (char*)0);
EXPECT_NE(p2, p);
EXPECT_EQ(10U, user_alloc_usable_size(p));
EXPECT_EQ(20U, user_alloc_usable_size(p2));
user_free(thr, pc, p);
user_free(thr, pc, p2);
}
TEST(Mman, UserRealloc) {
ThreadState *thr = cur_thread();
uptr pc = 0;
{
void *p = user_realloc(thr, pc, 0, 0);
// Realloc(NULL, N) is equivalent to malloc(N), thus must return
// non-NULL pointer.
EXPECT_NE(p, (void*)0);
user_free(thr, pc, p);
}
{
void *p = user_realloc(thr, pc, 0, 100);
EXPECT_NE(p, (void*)0);
memset(p, 0xde, 100);
user_free(thr, pc, p);
}
{
void *p = user_alloc(thr, pc, 100);
EXPECT_NE(p, (void*)0);
memset(p, 0xde, 100);
// Realloc(P, 0) is equivalent to free(P) and returns NULL.
void *p2 = user_realloc(thr, pc, p, 0);
EXPECT_EQ(p2, (void*)0);
}
{
void *p = user_realloc(thr, pc, 0, 100);
EXPECT_NE(p, (void*)0);
memset(p, 0xde, 100);
void *p2 = user_realloc(thr, pc, p, 10000);
EXPECT_NE(p2, (void*)0);
for (int i = 0; i < 100; i++)
EXPECT_EQ(((char*)p2)[i], (char)0xde);
memset(p2, 0xde, 10000);
user_free(thr, pc, p2);
}
{
void *p = user_realloc(thr, pc, 0, 10000);
EXPECT_NE(p, (void*)0);
memset(p, 0xde, 10000);
void *p2 = user_realloc(thr, pc, p, 10);
EXPECT_NE(p2, (void*)0);
for (int i = 0; i < 10; i++)
EXPECT_EQ(((char*)p2)[i], (char)0xde);
user_free(thr, pc, p2);
}
}
TEST(Mman, UsableSize) {
ThreadState *thr = cur_thread();
uptr pc = 0;
char *p = (char*)user_alloc(thr, pc, 10);
char *p2 = (char*)user_alloc(thr, pc, 20);
EXPECT_EQ(0U, user_alloc_usable_size(NULL));
EXPECT_EQ(10U, user_alloc_usable_size(p));
EXPECT_EQ(20U, user_alloc_usable_size(p2));
user_free(thr, pc, p);
user_free(thr, pc, p2);
EXPECT_EQ(0U, user_alloc_usable_size((void*)0x4123));
}
TEST(Mman, Stats) {
ThreadState *thr = cur_thread();
uptr alloc0 = __sanitizer_get_current_allocated_bytes();
uptr heap0 = __sanitizer_get_heap_size();
uptr free0 = __sanitizer_get_free_bytes();
uptr unmapped0 = __sanitizer_get_unmapped_bytes();
EXPECT_EQ(10U, __sanitizer_get_estimated_allocated_size(10));
EXPECT_EQ(20U, __sanitizer_get_estimated_allocated_size(20));
EXPECT_EQ(100U, __sanitizer_get_estimated_allocated_size(100));
char *p = (char*)user_alloc(thr, 0, 10);
EXPECT_TRUE(__sanitizer_get_ownership(p));
EXPECT_EQ(10U, __sanitizer_get_allocated_size(p));
EXPECT_EQ(alloc0 + 16, __sanitizer_get_current_allocated_bytes());
EXPECT_GE(__sanitizer_get_heap_size(), heap0);
EXPECT_EQ(free0, __sanitizer_get_free_bytes());
EXPECT_EQ(unmapped0, __sanitizer_get_unmapped_bytes());
user_free(thr, 0, p);
EXPECT_EQ(alloc0, __sanitizer_get_current_allocated_bytes());
EXPECT_GE(__sanitizer_get_heap_size(), heap0);
EXPECT_EQ(free0, __sanitizer_get_free_bytes());
EXPECT_EQ(unmapped0, __sanitizer_get_unmapped_bytes());
}
TEST(Mman, Valloc) {
ThreadState *thr = cur_thread();
uptr page_size = GetPageSizeCached();
void *p = user_valloc(thr, 0, 100);
EXPECT_NE(p, (void*)0);
user_free(thr, 0, p);
p = user_pvalloc(thr, 0, 100);
EXPECT_NE(p, (void*)0);
user_free(thr, 0, p);
p = user_pvalloc(thr, 0, 0);
EXPECT_NE(p, (void*)0);
EXPECT_EQ(page_size, __sanitizer_get_allocated_size(p));
user_free(thr, 0, p);
EXPECT_DEATH(p = user_pvalloc(thr, 0, (uptr)-(page_size - 1)),
"allocator is terminating the process instead of returning 0");
EXPECT_DEATH(p = user_pvalloc(thr, 0, (uptr)-1),
"allocator is terminating the process instead of returning 0");
}
#if !SANITIZER_DEBUG
// EXPECT_DEATH clones a thread with 4K stack,
// which is overflown by tsan memory accesses functions in debug mode.
TEST(Mman, CallocOverflow) {
ThreadState *thr = cur_thread();
uptr pc = 0;
size_t kArraySize = 4096;
volatile size_t kMaxSizeT = std::numeric_limits<size_t>::max();
volatile size_t kArraySize2 = kMaxSizeT / kArraySize + 10;
volatile void *p = NULL;
EXPECT_DEATH(p = user_calloc(thr, pc, kArraySize, kArraySize2),
"allocator is terminating the process instead of returning 0");
EXPECT_EQ(0L, p);
}
TEST(Mman, Memalign) {
ThreadState *thr = cur_thread();
void *p = user_memalign(thr, 0, 8, 100);
EXPECT_NE(p, (void*)0);
user_free(thr, 0, p);
p = NULL;
EXPECT_DEATH(p = user_memalign(thr, 0, 7, 100),
"allocator is terminating the process instead of returning 0");
EXPECT_EQ(0L, p);
}
TEST(Mman, PosixMemalign) {
ThreadState *thr = cur_thread();
void *p = NULL;
int res = user_posix_memalign(thr, 0, &p, 8, 100);
EXPECT_NE(p, (void*)0);
EXPECT_EQ(res, 0);
user_free(thr, 0, p);
p = NULL;
// Alignment is not a power of two, although is a multiple of sizeof(void*).
EXPECT_DEATH(res = user_posix_memalign(thr, 0, &p, 3 * sizeof(p), 100),
"allocator is terminating the process instead of returning 0");
EXPECT_EQ(0L, p);
// Alignment is not a multiple of sizeof(void*), although is a power of 2.
EXPECT_DEATH(res = user_posix_memalign(thr, 0, &p, 2, 100),
"allocator is terminating the process instead of returning 0");
EXPECT_EQ(0L, p);
}
TEST(Mman, AlignedAlloc) {
ThreadState *thr = cur_thread();
void *p = user_aligned_alloc(thr, 0, 8, 64);
EXPECT_NE(p, (void*)0);
user_free(thr, 0, p);
p = NULL;
// Alignement is not a power of 2.
EXPECT_DEATH(p = user_aligned_alloc(thr, 0, 7, 100),
"allocator is terminating the process instead of returning 0");
EXPECT_EQ(0L, p);
// Size is not a multiple of alignment.
EXPECT_DEATH(p = user_aligned_alloc(thr, 0, 8, 100),
"allocator is terminating the process instead of returning 0");
EXPECT_EQ(0L, p);
}
#endif
} // namespace __tsan

View File

@@ -0,0 +1,126 @@
//===-- tsan_mutex_test.cc ------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_mutex.h"
#include "tsan_mutex.h"
#include "gtest/gtest.h"
namespace __tsan {
template<typename MutexType>
class TestData {
public:
explicit TestData(MutexType *mtx)
: mtx_(mtx) {
for (int i = 0; i < kSize; i++)
data_[i] = 0;
}
void Write() {
Lock l(mtx_);
T v0 = data_[0];
for (int i = 0; i < kSize; i++) {
CHECK_EQ(data_[i], v0);
data_[i]++;
}
}
void Read() {
ReadLock l(mtx_);
T v0 = data_[0];
for (int i = 0; i < kSize; i++) {
CHECK_EQ(data_[i], v0);
}
}
void Backoff() {
volatile T data[kSize] = {};
for (int i = 0; i < kSize; i++) {
data[i]++;
CHECK_EQ(data[i], 1);
}
}
private:
typedef GenericScopedLock<MutexType> Lock;
static const int kSize = 64;
typedef u64 T;
MutexType *mtx_;
char pad_[kCacheLineSize];
T data_[kSize];
};
const int kThreads = 8;
const int kWriteRate = 1024;
#if SANITIZER_DEBUG
const int kIters = 16*1024;
#else
const int kIters = 64*1024;
#endif
template<typename MutexType>
static void *write_mutex_thread(void *param) {
TestData<MutexType> *data = (TestData<MutexType>*)param;
for (int i = 0; i < kIters; i++) {
data->Write();
data->Backoff();
}
return 0;
}
template<typename MutexType>
static void *read_mutex_thread(void *param) {
TestData<MutexType> *data = (TestData<MutexType>*)param;
for (int i = 0; i < kIters; i++) {
if ((i % kWriteRate) == 0)
data->Write();
else
data->Read();
data->Backoff();
}
return 0;
}
TEST(Mutex, Write) {
Mutex mtx(MutexTypeAnnotations, StatMtxAnnotations);
TestData<Mutex> data(&mtx);
pthread_t threads[kThreads];
for (int i = 0; i < kThreads; i++)
pthread_create(&threads[i], 0, write_mutex_thread<Mutex>, &data);
for (int i = 0; i < kThreads; i++)
pthread_join(threads[i], 0);
}
TEST(Mutex, ReadWrite) {
Mutex mtx(MutexTypeAnnotations, StatMtxAnnotations);
TestData<Mutex> data(&mtx);
pthread_t threads[kThreads];
for (int i = 0; i < kThreads; i++)
pthread_create(&threads[i], 0, read_mutex_thread<Mutex>, &data);
for (int i = 0; i < kThreads; i++)
pthread_join(threads[i], 0);
}
TEST(Mutex, SpinWrite) {
SpinMutex mtx;
TestData<SpinMutex> data(&mtx);
pthread_t threads[kThreads];
for (int i = 0; i < kThreads; i++)
pthread_create(&threads[i], 0, write_mutex_thread<SpinMutex>, &data);
for (int i = 0; i < kThreads; i++)
pthread_join(threads[i], 0);
}
} // namespace __tsan

View File

@@ -0,0 +1,127 @@
//===-- tsan_mutexset_test.cc ---------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#include "tsan_mutexset.h"
#include "gtest/gtest.h"
namespace __tsan {
static void Expect(const MutexSet &mset, uptr i, u64 id, bool write, u64 epoch,
int count) {
MutexSet::Desc d = mset.Get(i);
EXPECT_EQ(id, d.id);
EXPECT_EQ(write, d.write);
EXPECT_EQ(epoch, d.epoch);
EXPECT_EQ(count, d.count);
}
TEST(MutexSet, Basic) {
MutexSet mset;
EXPECT_EQ(mset.Size(), (uptr)0);
mset.Add(1, true, 2);
EXPECT_EQ(mset.Size(), (uptr)1);
Expect(mset, 0, 1, true, 2, 1);
mset.Del(1, true);
EXPECT_EQ(mset.Size(), (uptr)0);
mset.Add(3, true, 4);
mset.Add(5, false, 6);
EXPECT_EQ(mset.Size(), (uptr)2);
Expect(mset, 0, 3, true, 4, 1);
Expect(mset, 1, 5, false, 6, 1);
mset.Del(3, true);
EXPECT_EQ(mset.Size(), (uptr)1);
mset.Del(5, false);
EXPECT_EQ(mset.Size(), (uptr)0);
}
TEST(MutexSet, DoubleAdd) {
MutexSet mset;
mset.Add(1, true, 2);
EXPECT_EQ(mset.Size(), (uptr)1);
Expect(mset, 0, 1, true, 2, 1);
mset.Add(1, true, 2);
EXPECT_EQ(mset.Size(), (uptr)1);
Expect(mset, 0, 1, true, 2, 2);
mset.Del(1, true);
EXPECT_EQ(mset.Size(), (uptr)1);
Expect(mset, 0, 1, true, 2, 1);
mset.Del(1, true);
EXPECT_EQ(mset.Size(), (uptr)0);
}
TEST(MutexSet, DoubleDel) {
MutexSet mset;
mset.Add(1, true, 2);
EXPECT_EQ(mset.Size(), (uptr)1);
mset.Del(1, true);
EXPECT_EQ(mset.Size(), (uptr)0);
mset.Del(1, true);
EXPECT_EQ(mset.Size(), (uptr)0);
}
TEST(MutexSet, Remove) {
MutexSet mset;
mset.Add(1, true, 2);
mset.Add(1, true, 2);
mset.Add(3, true, 4);
mset.Add(3, true, 4);
EXPECT_EQ(mset.Size(), (uptr)2);
mset.Remove(1);
EXPECT_EQ(mset.Size(), (uptr)1);
Expect(mset, 0, 3, true, 4, 2);
}
TEST(MutexSet, Full) {
MutexSet mset;
for (uptr i = 0; i < MutexSet::kMaxSize; i++) {
mset.Add(i, true, i + 1);
}
EXPECT_EQ(mset.Size(), MutexSet::kMaxSize);
for (uptr i = 0; i < MutexSet::kMaxSize; i++) {
Expect(mset, i, i, true, i + 1, 1);
}
for (uptr i = 0; i < MutexSet::kMaxSize; i++) {
mset.Add(i, true, i + 1);
}
EXPECT_EQ(mset.Size(), MutexSet::kMaxSize);
for (uptr i = 0; i < MutexSet::kMaxSize; i++) {
Expect(mset, i, i, true, i + 1, 2);
}
}
TEST(MutexSet, Overflow) {
MutexSet mset;
for (uptr i = 0; i < MutexSet::kMaxSize; i++) {
mset.Add(i, true, i + 1);
mset.Add(i, true, i + 1);
}
mset.Add(100, true, 200);
EXPECT_EQ(mset.Size(), MutexSet::kMaxSize);
for (uptr i = 0; i < MutexSet::kMaxSize; i++) {
if (i == 0)
Expect(mset, i, MutexSet::kMaxSize - 1,
true, MutexSet::kMaxSize, 2);
else if (i == MutexSet::kMaxSize - 1)
Expect(mset, i, 100, true, 200, 1);
else
Expect(mset, i, i, true, i + 1, 2);
}
}
} // namespace __tsan

View File

@@ -0,0 +1,78 @@
//===-- tsan_shadow_test.cc -----------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#include "tsan_platform.h"
#include "tsan_rtl.h"
#include "gtest/gtest.h"
namespace __tsan {
TEST(Shadow, FastState) {
Shadow s(FastState(11, 22));
EXPECT_EQ(s.tid(), (u64)11);
EXPECT_EQ(s.epoch(), (u64)22);
EXPECT_EQ(s.GetIgnoreBit(), false);
EXPECT_EQ(s.GetFreedAndReset(), false);
EXPECT_EQ(s.GetHistorySize(), 0);
EXPECT_EQ(s.addr0(), (u64)0);
EXPECT_EQ(s.size(), (u64)1);
EXPECT_EQ(s.IsWrite(), true);
s.IncrementEpoch();
EXPECT_EQ(s.epoch(), (u64)23);
s.IncrementEpoch();
EXPECT_EQ(s.epoch(), (u64)24);
s.SetIgnoreBit();
EXPECT_EQ(s.GetIgnoreBit(), true);
s.ClearIgnoreBit();
EXPECT_EQ(s.GetIgnoreBit(), false);
for (int i = 0; i < 8; i++) {
s.SetHistorySize(i);
EXPECT_EQ(s.GetHistorySize(), i);
}
s.SetHistorySize(2);
s.ClearHistorySize();
EXPECT_EQ(s.GetHistorySize(), 0);
}
TEST(Shadow, Mapping) {
static int global;
int stack;
void *heap = malloc(0);
free(heap);
CHECK(IsAppMem((uptr)&global));
CHECK(IsAppMem((uptr)&stack));
CHECK(IsAppMem((uptr)heap));
CHECK(IsShadowMem(MemToShadow((uptr)&global)));
CHECK(IsShadowMem(MemToShadow((uptr)&stack)));
CHECK(IsShadowMem(MemToShadow((uptr)heap)));
}
TEST(Shadow, Celling) {
u64 aligned_data[4];
char *data = (char*)aligned_data;
CHECK_EQ((uptr)data % kShadowSize, 0);
uptr s0 = MemToShadow((uptr)&data[0]);
CHECK_EQ(s0 % kShadowSize, 0);
for (unsigned i = 1; i < kShadowCell; i++)
CHECK_EQ(s0, MemToShadow((uptr)&data[i]));
for (unsigned i = kShadowCell; i < 2*kShadowCell; i++)
CHECK_EQ(s0 + kShadowSize*kShadowCnt, MemToShadow((uptr)&data[i]));
for (unsigned i = 2*kShadowCell; i < 3*kShadowCell; i++)
CHECK_EQ(s0 + 2*kShadowSize*kShadowCnt, MemToShadow((uptr)&data[i]));
}
} // namespace __tsan

Some files were not shown because too many files have changed in this diff Show More