Imported Upstream version 6.10.0.49

Former-commit-id: 1d6753294b2993e1fbf92de9366bb9544db4189b
This commit is contained in:
Xamarin Public Jenkins (auto-signing)
2020-01-16 16:38:04 +00:00
parent d94e79959b
commit 468663ddbb
48518 changed files with 2789335 additions and 61176 deletions

View File

@@ -0,0 +1,12 @@
set(TSAN_UNIT_TEST_SOURCES
tsan_clock_test.cc
tsan_flags_test.cc
tsan_mman_test.cc
tsan_mutex_test.cc
tsan_shadow_test.cc
tsan_stack_test.cc
tsan_sync_test.cc
tsan_unit_test_main.cc)
add_tsan_unittest(TsanUnitTest
SOURCES ${TSAN_UNIT_TEST_SOURCES})

View File

@@ -0,0 +1,494 @@
//===-- tsan_clock_test.cc ------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#include "tsan_clock.h"
#include "tsan_rtl.h"
#include "gtest/gtest.h"
#include <sys/time.h>
#include <time.h>
namespace __tsan {
ClockCache cache;
TEST(Clock, VectorBasic) {
ThreadClock clk(0);
ASSERT_EQ(clk.size(), 1U);
clk.tick();
ASSERT_EQ(clk.size(), 1U);
ASSERT_EQ(clk.get(0), 1U);
clk.set(&cache, 3, clk.get(3) + 1);
ASSERT_EQ(clk.size(), 4U);
ASSERT_EQ(clk.get(0), 1U);
ASSERT_EQ(clk.get(1), 0U);
ASSERT_EQ(clk.get(2), 0U);
ASSERT_EQ(clk.get(3), 1U);
clk.set(&cache, 3, clk.get(3) + 1);
ASSERT_EQ(clk.get(3), 2U);
}
TEST(Clock, ChunkedBasic) {
ThreadClock vector(0);
SyncClock chunked;
ASSERT_EQ(vector.size(), 1U);
ASSERT_EQ(chunked.size(), 0U);
vector.acquire(&cache, &chunked);
ASSERT_EQ(vector.size(), 1U);
ASSERT_EQ(chunked.size(), 0U);
vector.release(&cache, &chunked);
ASSERT_EQ(vector.size(), 1U);
ASSERT_EQ(chunked.size(), 1U);
vector.acq_rel(&cache, &chunked);
ASSERT_EQ(vector.size(), 1U);
ASSERT_EQ(chunked.size(), 1U);
chunked.Reset(&cache);
}
static const uptr interesting_sizes[] = {0, 1, 2, 30, 61, 62, 63, 64, 65, 66,
100, 124, 125, 126, 127, 128, 129, 130, 188, 189, 190, 191, 192, 193, 254,
255};
TEST(Clock, Iter) {
const uptr n = ARRAY_SIZE(interesting_sizes);
for (uptr fi = 0; fi < n; fi++) {
const uptr size = interesting_sizes[fi];
SyncClock sync;
ThreadClock vector(0);
for (uptr i = 0; i < size; i++)
vector.set(&cache, i, i + 1);
if (size != 0)
vector.release(&cache, &sync);
uptr i = 0;
for (ClockElem &ce : sync) {
ASSERT_LT(i, size);
ASSERT_EQ(sync.get_clean(i), ce.epoch);
i++;
}
ASSERT_EQ(i, size);
sync.Reset(&cache);
}
}
TEST(Clock, AcquireRelease) {
ThreadClock vector1(100);
vector1.tick();
SyncClock chunked;
vector1.release(&cache, &chunked);
ASSERT_EQ(chunked.size(), 101U);
ThreadClock vector2(0);
vector2.acquire(&cache, &chunked);
ASSERT_EQ(vector2.size(), 101U);
ASSERT_EQ(vector2.get(0), 0U);
ASSERT_EQ(vector2.get(1), 0U);
ASSERT_EQ(vector2.get(99), 0U);
ASSERT_EQ(vector2.get(100), 1U);
chunked.Reset(&cache);
}
TEST(Clock, RepeatedAcquire) {
ThreadClock thr1(1);
thr1.tick();
ThreadClock thr2(2);
thr2.tick();
SyncClock sync;
thr1.ReleaseStore(&cache, &sync);
thr2.acquire(&cache, &sync);
thr2.acquire(&cache, &sync);
sync.Reset(&cache);
}
TEST(Clock, ManyThreads) {
SyncClock chunked;
for (unsigned i = 0; i < 200; i++) {
ThreadClock vector(0);
vector.tick();
vector.set(&cache, i, i + 1);
vector.release(&cache, &chunked);
ASSERT_EQ(i + 1, chunked.size());
vector.acquire(&cache, &chunked);
ASSERT_EQ(i + 1, vector.size());
}
for (unsigned i = 0; i < 200; i++) {
printf("i=%d\n", i);
ASSERT_EQ(i + 1, chunked.get(i));
}
ThreadClock vector(1);
vector.acquire(&cache, &chunked);
ASSERT_EQ(200U, vector.size());
for (unsigned i = 0; i < 200; i++)
ASSERT_EQ(i + 1, vector.get(i));
chunked.Reset(&cache);
}
TEST(Clock, DifferentSizes) {
{
ThreadClock vector1(10);
vector1.tick();
ThreadClock vector2(20);
vector2.tick();
{
SyncClock chunked;
vector1.release(&cache, &chunked);
ASSERT_EQ(chunked.size(), 11U);
vector2.release(&cache, &chunked);
ASSERT_EQ(chunked.size(), 21U);
chunked.Reset(&cache);
}
{
SyncClock chunked;
vector2.release(&cache, &chunked);
ASSERT_EQ(chunked.size(), 21U);
vector1.release(&cache, &chunked);
ASSERT_EQ(chunked.size(), 21U);
chunked.Reset(&cache);
}
{
SyncClock chunked;
vector1.release(&cache, &chunked);
vector2.acquire(&cache, &chunked);
ASSERT_EQ(vector2.size(), 21U);
chunked.Reset(&cache);
}
{
SyncClock chunked;
vector2.release(&cache, &chunked);
vector1.acquire(&cache, &chunked);
ASSERT_EQ(vector1.size(), 21U);
chunked.Reset(&cache);
}
}
}
TEST(Clock, Growth) {
{
ThreadClock vector(10);
vector.tick();
vector.set(&cache, 5, 42);
SyncClock sync;
vector.release(&cache, &sync);
ASSERT_EQ(sync.size(), 11U);
ASSERT_EQ(sync.get(0), 0ULL);
ASSERT_EQ(sync.get(1), 0ULL);
ASSERT_EQ(sync.get(5), 42ULL);
ASSERT_EQ(sync.get(9), 0ULL);
ASSERT_EQ(sync.get(10), 1ULL);
sync.Reset(&cache);
}
{
ThreadClock vector1(10);
vector1.tick();
ThreadClock vector2(20);
vector2.tick();
SyncClock sync;
vector1.release(&cache, &sync);
vector2.release(&cache, &sync);
ASSERT_EQ(sync.size(), 21U);
ASSERT_EQ(sync.get(0), 0ULL);
ASSERT_EQ(sync.get(10), 1ULL);
ASSERT_EQ(sync.get(19), 0ULL);
ASSERT_EQ(sync.get(20), 1ULL);
sync.Reset(&cache);
}
{
ThreadClock vector(100);
vector.tick();
vector.set(&cache, 5, 42);
vector.set(&cache, 90, 84);
SyncClock sync;
vector.release(&cache, &sync);
ASSERT_EQ(sync.size(), 101U);
ASSERT_EQ(sync.get(0), 0ULL);
ASSERT_EQ(sync.get(1), 0ULL);
ASSERT_EQ(sync.get(5), 42ULL);
ASSERT_EQ(sync.get(60), 0ULL);
ASSERT_EQ(sync.get(70), 0ULL);
ASSERT_EQ(sync.get(90), 84ULL);
ASSERT_EQ(sync.get(99), 0ULL);
ASSERT_EQ(sync.get(100), 1ULL);
sync.Reset(&cache);
}
{
ThreadClock vector1(10);
vector1.tick();
ThreadClock vector2(100);
vector2.tick();
SyncClock sync;
vector1.release(&cache, &sync);
vector2.release(&cache, &sync);
ASSERT_EQ(sync.size(), 101U);
ASSERT_EQ(sync.get(0), 0ULL);
ASSERT_EQ(sync.get(10), 1ULL);
ASSERT_EQ(sync.get(99), 0ULL);
ASSERT_EQ(sync.get(100), 1ULL);
sync.Reset(&cache);
}
}
TEST(Clock, Growth2) {
// Test clock growth for every pair of sizes:
const uptr n = ARRAY_SIZE(interesting_sizes);
for (uptr fi = 0; fi < n; fi++) {
for (uptr ti = fi + 1; ti < n; ti++) {
const uptr from = interesting_sizes[fi];
const uptr to = interesting_sizes[ti];
SyncClock sync;
ThreadClock vector(0);
for (uptr i = 0; i < from; i++)
vector.set(&cache, i, i + 1);
if (from != 0)
vector.release(&cache, &sync);
ASSERT_EQ(sync.size(), from);
for (uptr i = 0; i < from; i++)
ASSERT_EQ(sync.get(i), i + 1);
for (uptr i = 0; i < to; i++)
vector.set(&cache, i, i + 1);
vector.release(&cache, &sync);
ASSERT_EQ(sync.size(), to);
for (uptr i = 0; i < to; i++)
ASSERT_EQ(sync.get(i), i + 1);
vector.set(&cache, to + 1, to + 1);
vector.release(&cache, &sync);
ASSERT_EQ(sync.size(), to + 2);
for (uptr i = 0; i < to; i++)
ASSERT_EQ(sync.get(i), i + 1);
ASSERT_EQ(sync.get(to), 0U);
ASSERT_EQ(sync.get(to + 1), to + 1);
sync.Reset(&cache);
}
}
}
const uptr kThreads = 4;
const uptr kClocks = 4;
// SimpleSyncClock and SimpleThreadClock implement the same thing as
// SyncClock and ThreadClock, but in a very simple way.
struct SimpleSyncClock {
u64 clock[kThreads];
uptr size;
SimpleSyncClock() {
Reset();
}
void Reset() {
size = 0;
for (uptr i = 0; i < kThreads; i++)
clock[i] = 0;
}
bool verify(const SyncClock *other) const {
for (uptr i = 0; i < min(size, other->size()); i++) {
if (clock[i] != other->get(i))
return false;
}
for (uptr i = min(size, other->size()); i < max(size, other->size()); i++) {
if (i < size && clock[i] != 0)
return false;
if (i < other->size() && other->get(i) != 0)
return false;
}
return true;
}
};
struct SimpleThreadClock {
u64 clock[kThreads];
uptr size;
unsigned tid;
explicit SimpleThreadClock(unsigned tid) {
this->tid = tid;
size = tid + 1;
for (uptr i = 0; i < kThreads; i++)
clock[i] = 0;
}
void tick() {
clock[tid]++;
}
void acquire(const SimpleSyncClock *src) {
if (size < src->size)
size = src->size;
for (uptr i = 0; i < kThreads; i++)
clock[i] = max(clock[i], src->clock[i]);
}
void release(SimpleSyncClock *dst) const {
if (dst->size < size)
dst->size = size;
for (uptr i = 0; i < kThreads; i++)
dst->clock[i] = max(dst->clock[i], clock[i]);
}
void acq_rel(SimpleSyncClock *dst) {
acquire(dst);
release(dst);
}
void ReleaseStore(SimpleSyncClock *dst) const {
if (dst->size < size)
dst->size = size;
for (uptr i = 0; i < kThreads; i++)
dst->clock[i] = clock[i];
}
bool verify(const ThreadClock *other) const {
for (uptr i = 0; i < min(size, other->size()); i++) {
if (clock[i] != other->get(i))
return false;
}
for (uptr i = min(size, other->size()); i < max(size, other->size()); i++) {
if (i < size && clock[i] != 0)
return false;
if (i < other->size() && other->get(i) != 0)
return false;
}
return true;
}
};
static bool ClockFuzzer(bool printing) {
// Create kThreads thread clocks.
SimpleThreadClock *thr0[kThreads];
ThreadClock *thr1[kThreads];
unsigned reused[kThreads];
for (unsigned i = 0; i < kThreads; i++) {
reused[i] = 0;
thr0[i] = new SimpleThreadClock(i);
thr1[i] = new ThreadClock(i, reused[i]);
}
// Create kClocks sync clocks.
SimpleSyncClock *sync0[kClocks];
SyncClock *sync1[kClocks];
for (unsigned i = 0; i < kClocks; i++) {
sync0[i] = new SimpleSyncClock();
sync1[i] = new SyncClock();
}
// Do N random operations (acquire, release, etc) and compare results
// for SimpleThread/SyncClock and real Thread/SyncClock.
for (int i = 0; i < 10000; i++) {
unsigned tid = rand() % kThreads;
unsigned cid = rand() % kClocks;
thr0[tid]->tick();
thr1[tid]->tick();
switch (rand() % 6) {
case 0:
if (printing)
printf("acquire thr%d <- clk%d\n", tid, cid);
thr0[tid]->acquire(sync0[cid]);
thr1[tid]->acquire(&cache, sync1[cid]);
break;
case 1:
if (printing)
printf("release thr%d -> clk%d\n", tid, cid);
thr0[tid]->release(sync0[cid]);
thr1[tid]->release(&cache, sync1[cid]);
break;
case 2:
if (printing)
printf("acq_rel thr%d <> clk%d\n", tid, cid);
thr0[tid]->acq_rel(sync0[cid]);
thr1[tid]->acq_rel(&cache, sync1[cid]);
break;
case 3:
if (printing)
printf("rel_str thr%d >> clk%d\n", tid, cid);
thr0[tid]->ReleaseStore(sync0[cid]);
thr1[tid]->ReleaseStore(&cache, sync1[cid]);
break;
case 4:
if (printing)
printf("reset clk%d\n", cid);
sync0[cid]->Reset();
sync1[cid]->Reset(&cache);
break;
case 5:
if (printing)
printf("reset thr%d\n", tid);
u64 epoch = thr0[tid]->clock[tid] + 1;
reused[tid]++;
delete thr0[tid];
thr0[tid] = new SimpleThreadClock(tid);
thr0[tid]->clock[tid] = epoch;
delete thr1[tid];
thr1[tid] = new ThreadClock(tid, reused[tid]);
thr1[tid]->set(epoch);
break;
}
if (printing) {
for (unsigned i = 0; i < kThreads; i++) {
printf("thr%d: ", i);
thr1[i]->DebugDump(printf);
printf("\n");
}
for (unsigned i = 0; i < kClocks; i++) {
printf("clk%d: ", i);
sync1[i]->DebugDump(printf);
printf("\n");
}
printf("\n");
}
if (!thr0[tid]->verify(thr1[tid]) || !sync0[cid]->verify(sync1[cid])) {
if (!printing)
return false;
printf("differs with model:\n");
for (unsigned i = 0; i < kThreads; i++) {
printf("thr%d: clock=[", i);
for (uptr j = 0; j < thr0[i]->size; j++)
printf("%s%llu", j == 0 ? "" : ",", thr0[i]->clock[j]);
printf("]\n");
}
for (unsigned i = 0; i < kClocks; i++) {
printf("clk%d: clock=[", i);
for (uptr j = 0; j < sync0[i]->size; j++)
printf("%s%llu", j == 0 ? "" : ",", sync0[i]->clock[j]);
printf("]\n");
}
return false;
}
}
for (unsigned i = 0; i < kClocks; i++) {
sync1[i]->Reset(&cache);
}
return true;
}
TEST(Clock, Fuzzer) {
struct timeval tv;
gettimeofday(&tv, NULL);
int seed = tv.tv_sec + tv.tv_usec;
printf("seed=%d\n", seed);
srand(seed);
if (!ClockFuzzer(false)) {
// Redo the test with the same seed, but logging operations.
srand(seed);
ClockFuzzer(true);
ASSERT_TRUE(false);
}
}
} // namespace __tsan

View File

@@ -0,0 +1,55 @@
//===-- tsan_dense_alloc_test.cc ------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#include "tsan_dense_alloc.h"
#include "tsan_rtl.h"
#include "tsan_mman.h"
#include "gtest/gtest.h"
#include <stdlib.h>
#include <stdint.h>
#include <map>
namespace __tsan {
TEST(DenseSlabAlloc, Basic) {
typedef DenseSlabAlloc<int, 128, 128> Alloc;
typedef Alloc::Cache Cache;
typedef Alloc::IndexT IndexT;
const int N = 1000;
Alloc alloc;
Cache cache;
alloc.InitCache(&cache);
IndexT blocks[N];
for (int ntry = 0; ntry < 3; ntry++) {
for (int i = 0; i < N; i++) {
IndexT idx = alloc.Alloc(&cache);
blocks[i] = idx;
EXPECT_NE(idx, 0U);
int *v = alloc.Map(idx);
*v = i;
}
for (int i = 0; i < N; i++) {
IndexT idx = blocks[i];
int *v = alloc.Map(idx);
EXPECT_EQ(*v, i);
alloc.Free(&cache, idx);
}
alloc.FlushCache(&cache);
}
}
} // namespace __tsan

View File

@@ -0,0 +1,174 @@
//===-- tsan_flags_test.cc ------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#include "tsan_flags.h"
#include "tsan_rtl.h"
#include "gtest/gtest.h"
#include <string>
namespace __tsan {
TEST(Flags, Basic) {
// At least should not crash.
Flags f;
InitializeFlags(&f, 0);
InitializeFlags(&f, "");
}
TEST(Flags, DefaultValues) {
Flags f;
f.enable_annotations = false;
InitializeFlags(&f, "");
EXPECT_EQ(true, f.enable_annotations);
}
static const char *options1 =
" enable_annotations=0"
" suppress_equal_stacks=0"
" suppress_equal_addresses=0"
" report_bugs=0"
" report_thread_leaks=0"
" report_destroy_locked=0"
" report_mutex_bugs=0"
" report_signal_unsafe=0"
" report_atomic_races=0"
" force_seq_cst_atomics=0"
" print_benign=0"
" halt_on_error=0"
" atexit_sleep_ms=222"
" profile_memory=qqq"
" flush_memory_ms=444"
" flush_symbolizer_ms=555"
" memory_limit_mb=666"
" stop_on_start=0"
" running_on_valgrind=0"
" history_size=5"
" io_sync=1"
" die_after_fork=true"
"";
static const char *options2 =
" enable_annotations=true"
" suppress_equal_stacks=true"
" suppress_equal_addresses=true"
" report_bugs=true"
" report_thread_leaks=true"
" report_destroy_locked=true"
" report_mutex_bugs=true"
" report_signal_unsafe=true"
" report_atomic_races=true"
" force_seq_cst_atomics=true"
" print_benign=true"
" halt_on_error=true"
" atexit_sleep_ms=123"
" profile_memory=bbbbb"
" flush_memory_ms=234"
" flush_symbolizer_ms=345"
" memory_limit_mb=456"
" stop_on_start=true"
" running_on_valgrind=true"
" history_size=6"
" io_sync=2"
" die_after_fork=false"
"";
void VerifyOptions1(Flags *f) {
EXPECT_EQ(f->enable_annotations, 0);
EXPECT_EQ(f->suppress_equal_stacks, 0);
EXPECT_EQ(f->suppress_equal_addresses, 0);
EXPECT_EQ(f->report_bugs, 0);
EXPECT_EQ(f->report_thread_leaks, 0);
EXPECT_EQ(f->report_destroy_locked, 0);
EXPECT_EQ(f->report_mutex_bugs, 0);
EXPECT_EQ(f->report_signal_unsafe, 0);
EXPECT_EQ(f->report_atomic_races, 0);
EXPECT_EQ(f->force_seq_cst_atomics, 0);
EXPECT_EQ(f->print_benign, 0);
EXPECT_EQ(f->halt_on_error, 0);
EXPECT_EQ(f->atexit_sleep_ms, 222);
EXPECT_EQ(f->profile_memory, std::string("qqq"));
EXPECT_EQ(f->flush_memory_ms, 444);
EXPECT_EQ(f->flush_symbolizer_ms, 555);
EXPECT_EQ(f->memory_limit_mb, 666);
EXPECT_EQ(f->stop_on_start, 0);
EXPECT_EQ(f->running_on_valgrind, 0);
EXPECT_EQ(f->history_size, 5);
EXPECT_EQ(f->io_sync, 1);
EXPECT_EQ(f->die_after_fork, true);
}
void VerifyOptions2(Flags *f) {
EXPECT_EQ(f->enable_annotations, true);
EXPECT_EQ(f->suppress_equal_stacks, true);
EXPECT_EQ(f->suppress_equal_addresses, true);
EXPECT_EQ(f->report_bugs, true);
EXPECT_EQ(f->report_thread_leaks, true);
EXPECT_EQ(f->report_destroy_locked, true);
EXPECT_EQ(f->report_mutex_bugs, true);
EXPECT_EQ(f->report_signal_unsafe, true);
EXPECT_EQ(f->report_atomic_races, true);
EXPECT_EQ(f->force_seq_cst_atomics, true);
EXPECT_EQ(f->print_benign, true);
EXPECT_EQ(f->halt_on_error, true);
EXPECT_EQ(f->atexit_sleep_ms, 123);
EXPECT_EQ(f->profile_memory, std::string("bbbbb"));
EXPECT_EQ(f->flush_memory_ms, 234);
EXPECT_EQ(f->flush_symbolizer_ms, 345);
EXPECT_EQ(f->memory_limit_mb, 456);
EXPECT_EQ(f->stop_on_start, true);
EXPECT_EQ(f->running_on_valgrind, true);
EXPECT_EQ(f->history_size, 6);
EXPECT_EQ(f->io_sync, 2);
EXPECT_EQ(f->die_after_fork, false);
}
static const char *test_default_options;
extern "C" const char *__tsan_default_options() {
return test_default_options;
}
TEST(Flags, ParseDefaultOptions) {
Flags f;
test_default_options = options1;
InitializeFlags(&f, "");
VerifyOptions1(&f);
test_default_options = options2;
InitializeFlags(&f, "");
VerifyOptions2(&f);
}
TEST(Flags, ParseEnvOptions) {
Flags f;
InitializeFlags(&f, options1);
VerifyOptions1(&f);
InitializeFlags(&f, options2);
VerifyOptions2(&f);
}
TEST(Flags, ParsePriority) {
Flags f;
test_default_options = options2;
InitializeFlags(&f, options1);
VerifyOptions1(&f);
test_default_options = options1;
InitializeFlags(&f, options2);
VerifyOptions2(&f);
}
} // namespace __tsan

View File

@@ -0,0 +1,232 @@
//===-- tsan_mman_test.cc -------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#include <limits>
#include <sanitizer/allocator_interface.h>
#include "tsan_mman.h"
#include "tsan_rtl.h"
#include "gtest/gtest.h"
namespace __tsan {
TEST(Mman, Internal) {
char *p = (char*)internal_alloc(MBlockScopedBuf, 10);
EXPECT_NE(p, (char*)0);
char *p2 = (char*)internal_alloc(MBlockScopedBuf, 20);
EXPECT_NE(p2, (char*)0);
EXPECT_NE(p2, p);
for (int i = 0; i < 10; i++) {
p[i] = 42;
}
for (int i = 0; i < 20; i++) {
((char*)p2)[i] = 42;
}
internal_free(p);
internal_free(p2);
}
TEST(Mman, User) {
ThreadState *thr = cur_thread();
uptr pc = 0;
char *p = (char*)user_alloc(thr, pc, 10);
EXPECT_NE(p, (char*)0);
char *p2 = (char*)user_alloc(thr, pc, 20);
EXPECT_NE(p2, (char*)0);
EXPECT_NE(p2, p);
EXPECT_EQ(10U, user_alloc_usable_size(p));
EXPECT_EQ(20U, user_alloc_usable_size(p2));
user_free(thr, pc, p);
user_free(thr, pc, p2);
}
TEST(Mman, UserRealloc) {
ThreadState *thr = cur_thread();
uptr pc = 0;
{
void *p = user_realloc(thr, pc, 0, 0);
// Realloc(NULL, N) is equivalent to malloc(N), thus must return
// non-NULL pointer.
EXPECT_NE(p, (void*)0);
user_free(thr, pc, p);
}
{
void *p = user_realloc(thr, pc, 0, 100);
EXPECT_NE(p, (void*)0);
memset(p, 0xde, 100);
user_free(thr, pc, p);
}
{
void *p = user_alloc(thr, pc, 100);
EXPECT_NE(p, (void*)0);
memset(p, 0xde, 100);
// Realloc(P, 0) is equivalent to free(P) and returns NULL.
void *p2 = user_realloc(thr, pc, p, 0);
EXPECT_EQ(p2, (void*)0);
}
{
void *p = user_realloc(thr, pc, 0, 100);
EXPECT_NE(p, (void*)0);
memset(p, 0xde, 100);
void *p2 = user_realloc(thr, pc, p, 10000);
EXPECT_NE(p2, (void*)0);
for (int i = 0; i < 100; i++)
EXPECT_EQ(((char*)p2)[i], (char)0xde);
memset(p2, 0xde, 10000);
user_free(thr, pc, p2);
}
{
void *p = user_realloc(thr, pc, 0, 10000);
EXPECT_NE(p, (void*)0);
memset(p, 0xde, 10000);
void *p2 = user_realloc(thr, pc, p, 10);
EXPECT_NE(p2, (void*)0);
for (int i = 0; i < 10; i++)
EXPECT_EQ(((char*)p2)[i], (char)0xde);
user_free(thr, pc, p2);
}
}
TEST(Mman, UsableSize) {
ThreadState *thr = cur_thread();
uptr pc = 0;
char *p = (char*)user_alloc(thr, pc, 10);
char *p2 = (char*)user_alloc(thr, pc, 20);
EXPECT_EQ(0U, user_alloc_usable_size(NULL));
EXPECT_EQ(10U, user_alloc_usable_size(p));
EXPECT_EQ(20U, user_alloc_usable_size(p2));
user_free(thr, pc, p);
user_free(thr, pc, p2);
EXPECT_EQ(0U, user_alloc_usable_size((void*)0x4123));
}
TEST(Mman, Stats) {
ThreadState *thr = cur_thread();
uptr alloc0 = __sanitizer_get_current_allocated_bytes();
uptr heap0 = __sanitizer_get_heap_size();
uptr free0 = __sanitizer_get_free_bytes();
uptr unmapped0 = __sanitizer_get_unmapped_bytes();
EXPECT_EQ(10U, __sanitizer_get_estimated_allocated_size(10));
EXPECT_EQ(20U, __sanitizer_get_estimated_allocated_size(20));
EXPECT_EQ(100U, __sanitizer_get_estimated_allocated_size(100));
char *p = (char*)user_alloc(thr, 0, 10);
EXPECT_TRUE(__sanitizer_get_ownership(p));
EXPECT_EQ(10U, __sanitizer_get_allocated_size(p));
EXPECT_EQ(alloc0 + 16, __sanitizer_get_current_allocated_bytes());
EXPECT_GE(__sanitizer_get_heap_size(), heap0);
EXPECT_EQ(free0, __sanitizer_get_free_bytes());
EXPECT_EQ(unmapped0, __sanitizer_get_unmapped_bytes());
user_free(thr, 0, p);
EXPECT_EQ(alloc0, __sanitizer_get_current_allocated_bytes());
EXPECT_GE(__sanitizer_get_heap_size(), heap0);
EXPECT_EQ(free0, __sanitizer_get_free_bytes());
EXPECT_EQ(unmapped0, __sanitizer_get_unmapped_bytes());
}
TEST(Mman, Valloc) {
ThreadState *thr = cur_thread();
uptr page_size = GetPageSizeCached();
void *p = user_valloc(thr, 0, 100);
EXPECT_NE(p, (void*)0);
user_free(thr, 0, p);
p = user_pvalloc(thr, 0, 100);
EXPECT_NE(p, (void*)0);
user_free(thr, 0, p);
p = user_pvalloc(thr, 0, 0);
EXPECT_NE(p, (void*)0);
EXPECT_EQ(page_size, __sanitizer_get_allocated_size(p));
user_free(thr, 0, p);
EXPECT_DEATH(p = user_pvalloc(thr, 0, (uptr)-(page_size - 1)),
"allocator is terminating the process instead of returning 0");
EXPECT_DEATH(p = user_pvalloc(thr, 0, (uptr)-1),
"allocator is terminating the process instead of returning 0");
}
#if !SANITIZER_DEBUG
// EXPECT_DEATH clones a thread with 4K stack,
// which is overflown by tsan memory accesses functions in debug mode.
TEST(Mman, CallocOverflow) {
ThreadState *thr = cur_thread();
uptr pc = 0;
size_t kArraySize = 4096;
volatile size_t kMaxSizeT = std::numeric_limits<size_t>::max();
volatile size_t kArraySize2 = kMaxSizeT / kArraySize + 10;
volatile void *p = NULL;
EXPECT_DEATH(p = user_calloc(thr, pc, kArraySize, kArraySize2),
"allocator is terminating the process instead of returning 0");
EXPECT_EQ(0L, p);
}
TEST(Mman, Memalign) {
ThreadState *thr = cur_thread();
void *p = user_memalign(thr, 0, 8, 100);
EXPECT_NE(p, (void*)0);
user_free(thr, 0, p);
p = NULL;
EXPECT_DEATH(p = user_memalign(thr, 0, 7, 100),
"allocator is terminating the process instead of returning 0");
EXPECT_EQ(0L, p);
}
TEST(Mman, PosixMemalign) {
ThreadState *thr = cur_thread();
void *p = NULL;
int res = user_posix_memalign(thr, 0, &p, 8, 100);
EXPECT_NE(p, (void*)0);
EXPECT_EQ(res, 0);
user_free(thr, 0, p);
p = NULL;
// Alignment is not a power of two, although is a multiple of sizeof(void*).
EXPECT_DEATH(res = user_posix_memalign(thr, 0, &p, 3 * sizeof(p), 100),
"allocator is terminating the process instead of returning 0");
EXPECT_EQ(0L, p);
// Alignment is not a multiple of sizeof(void*), although is a power of 2.
EXPECT_DEATH(res = user_posix_memalign(thr, 0, &p, 2, 100),
"allocator is terminating the process instead of returning 0");
EXPECT_EQ(0L, p);
}
TEST(Mman, AlignedAlloc) {
ThreadState *thr = cur_thread();
void *p = user_aligned_alloc(thr, 0, 8, 64);
EXPECT_NE(p, (void*)0);
user_free(thr, 0, p);
p = NULL;
// Alignement is not a power of 2.
EXPECT_DEATH(p = user_aligned_alloc(thr, 0, 7, 100),
"allocator is terminating the process instead of returning 0");
EXPECT_EQ(0L, p);
// Size is not a multiple of alignment.
EXPECT_DEATH(p = user_aligned_alloc(thr, 0, 8, 100),
"allocator is terminating the process instead of returning 0");
EXPECT_EQ(0L, p);
}
#endif
} // namespace __tsan

View File

@@ -0,0 +1,126 @@
//===-- tsan_mutex_test.cc ------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_mutex.h"
#include "tsan_mutex.h"
#include "gtest/gtest.h"
namespace __tsan {
template<typename MutexType>
class TestData {
public:
explicit TestData(MutexType *mtx)
: mtx_(mtx) {
for (int i = 0; i < kSize; i++)
data_[i] = 0;
}
void Write() {
Lock l(mtx_);
T v0 = data_[0];
for (int i = 0; i < kSize; i++) {
CHECK_EQ(data_[i], v0);
data_[i]++;
}
}
void Read() {
ReadLock l(mtx_);
T v0 = data_[0];
for (int i = 0; i < kSize; i++) {
CHECK_EQ(data_[i], v0);
}
}
void Backoff() {
volatile T data[kSize] = {};
for (int i = 0; i < kSize; i++) {
data[i]++;
CHECK_EQ(data[i], 1);
}
}
private:
typedef GenericScopedLock<MutexType> Lock;
static const int kSize = 64;
typedef u64 T;
MutexType *mtx_;
char pad_[kCacheLineSize];
T data_[kSize];
};
const int kThreads = 8;
const int kWriteRate = 1024;
#if SANITIZER_DEBUG
const int kIters = 16*1024;
#else
const int kIters = 64*1024;
#endif
template<typename MutexType>
static void *write_mutex_thread(void *param) {
TestData<MutexType> *data = (TestData<MutexType>*)param;
for (int i = 0; i < kIters; i++) {
data->Write();
data->Backoff();
}
return 0;
}
template<typename MutexType>
static void *read_mutex_thread(void *param) {
TestData<MutexType> *data = (TestData<MutexType>*)param;
for (int i = 0; i < kIters; i++) {
if ((i % kWriteRate) == 0)
data->Write();
else
data->Read();
data->Backoff();
}
return 0;
}
TEST(Mutex, Write) {
Mutex mtx(MutexTypeAnnotations, StatMtxAnnotations);
TestData<Mutex> data(&mtx);
pthread_t threads[kThreads];
for (int i = 0; i < kThreads; i++)
pthread_create(&threads[i], 0, write_mutex_thread<Mutex>, &data);
for (int i = 0; i < kThreads; i++)
pthread_join(threads[i], 0);
}
TEST(Mutex, ReadWrite) {
Mutex mtx(MutexTypeAnnotations, StatMtxAnnotations);
TestData<Mutex> data(&mtx);
pthread_t threads[kThreads];
for (int i = 0; i < kThreads; i++)
pthread_create(&threads[i], 0, read_mutex_thread<Mutex>, &data);
for (int i = 0; i < kThreads; i++)
pthread_join(threads[i], 0);
}
TEST(Mutex, SpinWrite) {
SpinMutex mtx;
TestData<SpinMutex> data(&mtx);
pthread_t threads[kThreads];
for (int i = 0; i < kThreads; i++)
pthread_create(&threads[i], 0, write_mutex_thread<SpinMutex>, &data);
for (int i = 0; i < kThreads; i++)
pthread_join(threads[i], 0);
}
} // namespace __tsan

View File

@@ -0,0 +1,127 @@
//===-- tsan_mutexset_test.cc ---------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#include "tsan_mutexset.h"
#include "gtest/gtest.h"
namespace __tsan {
static void Expect(const MutexSet &mset, uptr i, u64 id, bool write, u64 epoch,
int count) {
MutexSet::Desc d = mset.Get(i);
EXPECT_EQ(id, d.id);
EXPECT_EQ(write, d.write);
EXPECT_EQ(epoch, d.epoch);
EXPECT_EQ(count, d.count);
}
TEST(MutexSet, Basic) {
MutexSet mset;
EXPECT_EQ(mset.Size(), (uptr)0);
mset.Add(1, true, 2);
EXPECT_EQ(mset.Size(), (uptr)1);
Expect(mset, 0, 1, true, 2, 1);
mset.Del(1, true);
EXPECT_EQ(mset.Size(), (uptr)0);
mset.Add(3, true, 4);
mset.Add(5, false, 6);
EXPECT_EQ(mset.Size(), (uptr)2);
Expect(mset, 0, 3, true, 4, 1);
Expect(mset, 1, 5, false, 6, 1);
mset.Del(3, true);
EXPECT_EQ(mset.Size(), (uptr)1);
mset.Del(5, false);
EXPECT_EQ(mset.Size(), (uptr)0);
}
TEST(MutexSet, DoubleAdd) {
MutexSet mset;
mset.Add(1, true, 2);
EXPECT_EQ(mset.Size(), (uptr)1);
Expect(mset, 0, 1, true, 2, 1);
mset.Add(1, true, 2);
EXPECT_EQ(mset.Size(), (uptr)1);
Expect(mset, 0, 1, true, 2, 2);
mset.Del(1, true);
EXPECT_EQ(mset.Size(), (uptr)1);
Expect(mset, 0, 1, true, 2, 1);
mset.Del(1, true);
EXPECT_EQ(mset.Size(), (uptr)0);
}
TEST(MutexSet, DoubleDel) {
MutexSet mset;
mset.Add(1, true, 2);
EXPECT_EQ(mset.Size(), (uptr)1);
mset.Del(1, true);
EXPECT_EQ(mset.Size(), (uptr)0);
mset.Del(1, true);
EXPECT_EQ(mset.Size(), (uptr)0);
}
TEST(MutexSet, Remove) {
MutexSet mset;
mset.Add(1, true, 2);
mset.Add(1, true, 2);
mset.Add(3, true, 4);
mset.Add(3, true, 4);
EXPECT_EQ(mset.Size(), (uptr)2);
mset.Remove(1);
EXPECT_EQ(mset.Size(), (uptr)1);
Expect(mset, 0, 3, true, 4, 2);
}
TEST(MutexSet, Full) {
MutexSet mset;
for (uptr i = 0; i < MutexSet::kMaxSize; i++) {
mset.Add(i, true, i + 1);
}
EXPECT_EQ(mset.Size(), MutexSet::kMaxSize);
for (uptr i = 0; i < MutexSet::kMaxSize; i++) {
Expect(mset, i, i, true, i + 1, 1);
}
for (uptr i = 0; i < MutexSet::kMaxSize; i++) {
mset.Add(i, true, i + 1);
}
EXPECT_EQ(mset.Size(), MutexSet::kMaxSize);
for (uptr i = 0; i < MutexSet::kMaxSize; i++) {
Expect(mset, i, i, true, i + 1, 2);
}
}
TEST(MutexSet, Overflow) {
MutexSet mset;
for (uptr i = 0; i < MutexSet::kMaxSize; i++) {
mset.Add(i, true, i + 1);
mset.Add(i, true, i + 1);
}
mset.Add(100, true, 200);
EXPECT_EQ(mset.Size(), MutexSet::kMaxSize);
for (uptr i = 0; i < MutexSet::kMaxSize; i++) {
if (i == 0)
Expect(mset, i, MutexSet::kMaxSize - 1,
true, MutexSet::kMaxSize, 2);
else if (i == MutexSet::kMaxSize - 1)
Expect(mset, i, 100, true, 200, 1);
else
Expect(mset, i, i, true, i + 1, 2);
}
}
} // namespace __tsan

View File

@@ -0,0 +1,78 @@
//===-- tsan_shadow_test.cc -----------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#include "tsan_platform.h"
#include "tsan_rtl.h"
#include "gtest/gtest.h"
namespace __tsan {
TEST(Shadow, FastState) {
Shadow s(FastState(11, 22));
EXPECT_EQ(s.tid(), (u64)11);
EXPECT_EQ(s.epoch(), (u64)22);
EXPECT_EQ(s.GetIgnoreBit(), false);
EXPECT_EQ(s.GetFreedAndReset(), false);
EXPECT_EQ(s.GetHistorySize(), 0);
EXPECT_EQ(s.addr0(), (u64)0);
EXPECT_EQ(s.size(), (u64)1);
EXPECT_EQ(s.IsWrite(), true);
s.IncrementEpoch();
EXPECT_EQ(s.epoch(), (u64)23);
s.IncrementEpoch();
EXPECT_EQ(s.epoch(), (u64)24);
s.SetIgnoreBit();
EXPECT_EQ(s.GetIgnoreBit(), true);
s.ClearIgnoreBit();
EXPECT_EQ(s.GetIgnoreBit(), false);
for (int i = 0; i < 8; i++) {
s.SetHistorySize(i);
EXPECT_EQ(s.GetHistorySize(), i);
}
s.SetHistorySize(2);
s.ClearHistorySize();
EXPECT_EQ(s.GetHistorySize(), 0);
}
TEST(Shadow, Mapping) {
static int global;
int stack;
void *heap = malloc(0);
free(heap);
CHECK(IsAppMem((uptr)&global));
CHECK(IsAppMem((uptr)&stack));
CHECK(IsAppMem((uptr)heap));
CHECK(IsShadowMem(MemToShadow((uptr)&global)));
CHECK(IsShadowMem(MemToShadow((uptr)&stack)));
CHECK(IsShadowMem(MemToShadow((uptr)heap)));
}
TEST(Shadow, Celling) {
u64 aligned_data[4];
char *data = (char*)aligned_data;
CHECK_EQ((uptr)data % kShadowSize, 0);
uptr s0 = MemToShadow((uptr)&data[0]);
CHECK_EQ(s0 % kShadowSize, 0);
for (unsigned i = 1; i < kShadowCell; i++)
CHECK_EQ(s0, MemToShadow((uptr)&data[i]));
for (unsigned i = kShadowCell; i < 2*kShadowCell; i++)
CHECK_EQ(s0 + kShadowSize*kShadowCnt, MemToShadow((uptr)&data[i]));
for (unsigned i = 2*kShadowCell; i < 3*kShadowCell; i++)
CHECK_EQ(s0 + 2*kShadowSize*kShadowCnt, MemToShadow((uptr)&data[i]));
}
} // namespace __tsan

View File

@@ -0,0 +1,95 @@
//===-- tsan_stack_test.cc ------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#include "tsan_sync.h"
#include "tsan_rtl.h"
#include "gtest/gtest.h"
#include <string.h>
namespace __tsan {
template <typename StackTraceTy>
static void TestStackTrace(StackTraceTy *trace) {
ThreadState thr(0, 0, 0, 0, 0, 0, 0, 0, 0);
uptr stack[128];
thr.shadow_stack = &stack[0];
thr.shadow_stack_pos = &stack[0];
thr.shadow_stack_end = &stack[128];
ObtainCurrentStack(&thr, 0, trace);
EXPECT_EQ(0U, trace->size);
ObtainCurrentStack(&thr, 42, trace);
EXPECT_EQ(1U, trace->size);
EXPECT_EQ(42U, trace->trace[0]);
*thr.shadow_stack_pos++ = 100;
*thr.shadow_stack_pos++ = 101;
ObtainCurrentStack(&thr, 0, trace);
EXPECT_EQ(2U, trace->size);
EXPECT_EQ(100U, trace->trace[0]);
EXPECT_EQ(101U, trace->trace[1]);
ObtainCurrentStack(&thr, 42, trace);
EXPECT_EQ(3U, trace->size);
EXPECT_EQ(100U, trace->trace[0]);
EXPECT_EQ(101U, trace->trace[1]);
EXPECT_EQ(42U, trace->trace[2]);
}
template<typename StackTraceTy>
static void TestTrim(StackTraceTy *trace) {
ThreadState thr(0, 0, 0, 0, 0, 0, 0, 0, 0);
const uptr kShadowStackSize = 2 * kStackTraceMax;
uptr stack[kShadowStackSize];
thr.shadow_stack = &stack[0];
thr.shadow_stack_pos = &stack[0];
thr.shadow_stack_end = &stack[kShadowStackSize];
for (uptr i = 0; i < kShadowStackSize; ++i)
*thr.shadow_stack_pos++ = 100 + i;
ObtainCurrentStack(&thr, 0, trace);
EXPECT_EQ(kStackTraceMax, trace->size);
for (uptr i = 0; i < kStackTraceMax; i++) {
EXPECT_EQ(100 + kStackTraceMax + i, trace->trace[i]);
}
ObtainCurrentStack(&thr, 42, trace);
EXPECT_EQ(kStackTraceMax, trace->size);
for (uptr i = 0; i < kStackTraceMax - 1; i++) {
EXPECT_EQ(101 + kStackTraceMax + i, trace->trace[i]);
}
EXPECT_EQ(42U, trace->trace[kStackTraceMax - 1]);
}
TEST(StackTrace, BasicVarSize) {
VarSizeStackTrace trace;
TestStackTrace(&trace);
}
TEST(StackTrace, BasicBuffered) {
BufferedStackTrace trace;
TestStackTrace(&trace);
}
TEST(StackTrace, TrimVarSize) {
VarSizeStackTrace trace;
TestTrim(&trace);
}
TEST(StackTrace, TrimBuffered) {
BufferedStackTrace trace;
TestTrim(&trace);
}
} // namespace __tsan

View File

@@ -0,0 +1,123 @@
//===-- tsan_sync_test.cc -------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#include "tsan_sync.h"
#include "tsan_rtl.h"
#include "gtest/gtest.h"
namespace __tsan {
TEST(MetaMap, Basic) {
ThreadState *thr = cur_thread();
MetaMap *m = &ctx->metamap;
u64 block[1] = {}; // fake malloc block
m->AllocBlock(thr, 0, (uptr)&block[0], 1 * sizeof(u64));
MBlock *mb = m->GetBlock((uptr)&block[0]);
EXPECT_NE(mb, (MBlock*)0);
EXPECT_EQ(mb->siz, 1 * sizeof(u64));
EXPECT_EQ(mb->tid, thr->tid);
uptr sz = m->FreeBlock(thr->proc(), (uptr)&block[0]);
EXPECT_EQ(sz, 1 * sizeof(u64));
mb = m->GetBlock((uptr)&block[0]);
EXPECT_EQ(mb, (MBlock*)0);
}
TEST(MetaMap, FreeRange) {
ThreadState *thr = cur_thread();
MetaMap *m = &ctx->metamap;
u64 block[4] = {}; // fake malloc block
m->AllocBlock(thr, 0, (uptr)&block[0], 1 * sizeof(u64));
m->AllocBlock(thr, 0, (uptr)&block[1], 3 * sizeof(u64));
MBlock *mb1 = m->GetBlock((uptr)&block[0]);
EXPECT_EQ(mb1->siz, 1 * sizeof(u64));
MBlock *mb2 = m->GetBlock((uptr)&block[1]);
EXPECT_EQ(mb2->siz, 3 * sizeof(u64));
m->FreeRange(thr->proc(), (uptr)&block[0], 4 * sizeof(u64));
mb1 = m->GetBlock((uptr)&block[0]);
EXPECT_EQ(mb1, (MBlock*)0);
mb2 = m->GetBlock((uptr)&block[1]);
EXPECT_EQ(mb2, (MBlock*)0);
}
TEST(MetaMap, Sync) {
ThreadState *thr = cur_thread();
MetaMap *m = &ctx->metamap;
u64 block[4] = {}; // fake malloc block
m->AllocBlock(thr, 0, (uptr)&block[0], 4 * sizeof(u64));
SyncVar *s1 = m->GetIfExistsAndLock((uptr)&block[0], true);
EXPECT_EQ(s1, (SyncVar*)0);
s1 = m->GetOrCreateAndLock(thr, 0, (uptr)&block[0], true);
EXPECT_NE(s1, (SyncVar*)0);
EXPECT_EQ(s1->addr, (uptr)&block[0]);
s1->mtx.Unlock();
SyncVar *s2 = m->GetOrCreateAndLock(thr, 0, (uptr)&block[1], false);
EXPECT_NE(s2, (SyncVar*)0);
EXPECT_EQ(s2->addr, (uptr)&block[1]);
s2->mtx.ReadUnlock();
m->FreeBlock(thr->proc(), (uptr)&block[0]);
s1 = m->GetIfExistsAndLock((uptr)&block[0], true);
EXPECT_EQ(s1, (SyncVar*)0);
s2 = m->GetIfExistsAndLock((uptr)&block[1], true);
EXPECT_EQ(s2, (SyncVar*)0);
m->OnProcIdle(thr->proc());
}
TEST(MetaMap, MoveMemory) {
ThreadState *thr = cur_thread();
MetaMap *m = &ctx->metamap;
u64 block1[4] = {}; // fake malloc block
u64 block2[4] = {}; // fake malloc block
m->AllocBlock(thr, 0, (uptr)&block1[0], 3 * sizeof(u64));
m->AllocBlock(thr, 0, (uptr)&block1[3], 1 * sizeof(u64));
SyncVar *s1 = m->GetOrCreateAndLock(thr, 0, (uptr)&block1[0], true);
s1->mtx.Unlock();
SyncVar *s2 = m->GetOrCreateAndLock(thr, 0, (uptr)&block1[1], true);
s2->mtx.Unlock();
m->MoveMemory((uptr)&block1[0], (uptr)&block2[0], 4 * sizeof(u64));
MBlock *mb1 = m->GetBlock((uptr)&block1[0]);
EXPECT_EQ(mb1, (MBlock*)0);
MBlock *mb2 = m->GetBlock((uptr)&block1[3]);
EXPECT_EQ(mb2, (MBlock*)0);
mb1 = m->GetBlock((uptr)&block2[0]);
EXPECT_NE(mb1, (MBlock*)0);
EXPECT_EQ(mb1->siz, 3 * sizeof(u64));
mb2 = m->GetBlock((uptr)&block2[3]);
EXPECT_NE(mb2, (MBlock*)0);
EXPECT_EQ(mb2->siz, 1 * sizeof(u64));
s1 = m->GetIfExistsAndLock((uptr)&block1[0], true);
EXPECT_EQ(s1, (SyncVar*)0);
s2 = m->GetIfExistsAndLock((uptr)&block1[1], true);
EXPECT_EQ(s2, (SyncVar*)0);
s1 = m->GetIfExistsAndLock((uptr)&block2[0], true);
EXPECT_NE(s1, (SyncVar*)0);
EXPECT_EQ(s1->addr, (uptr)&block2[0]);
s1->mtx.Unlock();
s2 = m->GetIfExistsAndLock((uptr)&block2[1], true);
EXPECT_NE(s2, (SyncVar*)0);
EXPECT_EQ(s2->addr, (uptr)&block2[1]);
s2->mtx.Unlock();
m->FreeRange(thr->proc(), (uptr)&block2[0], 4 * sizeof(u64));
}
TEST(MetaMap, ResetSync) {
ThreadState *thr = cur_thread();
MetaMap *m = &ctx->metamap;
u64 block[1] = {}; // fake malloc block
m->AllocBlock(thr, 0, (uptr)&block[0], 1 * sizeof(u64));
SyncVar *s = m->GetOrCreateAndLock(thr, 0, (uptr)&block[0], true);
s->Reset(thr->proc());
s->mtx.Unlock();
uptr sz = m->FreeBlock(thr->proc(), (uptr)&block[0]);
EXPECT_EQ(sz, 1 * sizeof(u64));
}
} // namespace __tsan

View File

@@ -0,0 +1,25 @@
//===-- tsan_unit_test_main.cc --------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#include "gtest/gtest.h"
namespace __sanitizer {
bool ReexecDisabled() {
return true;
}
}
int main(int argc, char **argv) {
testing::GTEST_FLAG(death_test_style) = "threadsafe";
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}