Imported Upstream version 6.10.0.49

Former-commit-id: 1d6753294b2993e1fbf92de9366bb9544db4189b
This commit is contained in:
Xamarin Public Jenkins (auto-signing)
2020-01-16 16:38:04 +00:00
parent d94e79959b
commit 468663ddbb
48518 changed files with 2789335 additions and 61176 deletions

View File

@@ -0,0 +1,33 @@
; Test marking string functions as nobuiltin in efficiency sanitizer.
;
; RUN: opt < %s -esan -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-unknown-linux-gnu"
declare i8* @memchr(i8* %a, i32 %b, i64 %c)
declare i32 @memcmp(i8* %a, i8* %b, i64 %c)
declare i32 @strcmp(i8* %a, i8* %b)
declare i8* @strcpy(i8* %a, i8* %b)
declare i8* @stpcpy(i8* %a, i8* %b)
declare i64 @strlen(i8* %a)
declare i64 @strnlen(i8* %a, i64 %b)
; CHECK: call{{.*}}@memchr{{.*}} #[[ATTR:[0-9]+]]
; CHECK: call{{.*}}@memcmp{{.*}} #[[ATTR]]
; CHECK: call{{.*}}@strcmp{{.*}} #[[ATTR]]
; CHECK: call{{.*}}@strcpy{{.*}} #[[ATTR]]
; CHECK: call{{.*}}@stpcpy{{.*}} #[[ATTR]]
; CHECK: call{{.*}}@strlen{{.*}} #[[ATTR]]
; CHECK: call{{.*}}@strnlen{{.*}} #[[ATTR]]
; attributes #[[ATTR]] = { nobuiltin }
define void @f1(i8* %a, i8* %b) nounwind uwtable {
tail call i8* @memchr(i8* %a, i32 1, i64 12)
tail call i32 @memcmp(i8* %a, i8* %b, i64 12)
tail call i32 @strcmp(i8* %a, i8* %b)
tail call i8* @strcpy(i8* %a, i8* %b)
tail call i8* @stpcpy(i8* %a, i8* %b)
tail call i64 @strlen(i8* %a)
tail call i64 @strnlen(i8* %a, i64 12)
ret void
}

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,41 @@
; Test the complex GetElementPtr instruction handling in the EfficiencySanitizer
; cache fragmentation tool.
;
; RUN: opt < %s -esan -esan-cache-frag -S | FileCheck %s
; Code from http://llvm.org/docs/LangRef.html#getelementptr-instruction
; struct RT {
; char A;
; int B[10][20];
; char C;
; };
; struct ST {
; int X;
; double Y;
; struct RT Z;
; };
;
; int *foo(struct ST *s) {
; return &s[1].Z.B[5][13];
; }
%struct.RT = type { i8, [10 x [20 x i32]], i8 }
%struct.ST = type { i32, double, %struct.RT }
define i32* @foo(%struct.ST* %s) nounwind uwtable readnone optsize ssp {
entry:
%arrayidx = getelementptr inbounds %struct.ST, %struct.ST* %s, i64 1, i32 2, i32 1, i64 5, i64 13
ret i32* %arrayidx
}
; CHECK: %0 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.ST$3$13$3$11", i32 0, i32 3)
; CHECK-NEXT: %1 = add i64 %0, 1
; CHECK-NEXT: store i64 %1, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.ST$3$13$3$11", i32 0, i32 3)
; CHECK-NEXT: %2 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.ST$3$13$3$11", i32 0, i32 2)
; CHECK-NEXT: %3 = add i64 %2, 1
; CHECK-NEXT: store i64 %3, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.ST$3$13$3$11", i32 0, i32 2)
; CHECK-NEXT: %4 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.RT$3$11$14$11", i32 0, i32 1)
; CHECK-NEXT: %5 = add i64 %4, 1
; CHECK-NEXT: store i64 %5, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.RT$3$11$14$11", i32 0, i32 1)
; CHECK-NEXT: %arrayidx = getelementptr inbounds %struct.ST, %struct.ST* %s, i64 1, i32 2, i32 1, i64 5, i64 13
; CHECK-NEXT: ret i32* %arrayidx

View File

@@ -0,0 +1,133 @@
; Test basic EfficiencySanitizer struct field count instrumentation with -esan-small-binary
;
; RUN: opt < %s -esan -esan-cache-frag -esan-aux-field-info=false -S | FileCheck %s
%struct.A = type { i32, i32 }
%union.U = type { double }
%struct.C = type { %struct.anon, %union.anon, [10 x i8] }
%struct.anon = type { i32, i32 }
%union.anon = type { double }
; CHECK: @0 = private unnamed_addr constant [8 x i8] c"<stdin>\00", align 1
; CHECK-NEXT: @1 = private unnamed_addr constant [17 x i8] c"struct.A$2$11$11\00", align 1
; CHECK-NEXT: @"struct.A$2$11$11" = weak global [3 x i64] zeroinitializer
; CHECK-NEXT: @2 = private unnamed_addr constant [12 x i8] c"union.U$1$3\00", align 1
; CHECK-NEXT: @"union.U$1$3" = weak global [2 x i64] zeroinitializer
; CHECK-NEXT: @3 = private unnamed_addr constant [20 x i8] c"struct.C$3$14$13$13\00", align 1
; CHECK-NEXT: @"struct.C$3$14$13$13" = weak global [4 x i64] zeroinitializer
; CHECK-NEXT: @4 = private unnamed_addr constant [20 x i8] c"struct.anon$2$11$11\00", align 1
; CHECK-NEXT: @"struct.anon$2$11$11" = weak global [3 x i64] zeroinitializer
; CHECK-NEXT: @5 = private unnamed_addr constant [15 x i8] c"union.anon$1$3\00", align 1
; CHECK-NEXT: @"union.anon$1$3" = weak global [2 x i64] zeroinitializer
; CHECK-NEXT: @6 = internal global [5 x { i8*, i32, i32, i32*, i32*, i8**, i64*, i64* }] [{ i8*, i32, i32, i32*, i32*, i8**, i64*, i64* } { i8* getelementptr inbounds ([17 x i8], [17 x i8]* @1, i32 0, i32 0), i32 8, i32 2, i32* null, i32* null, i8** null, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @"struct.A$2$11$11", i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @"struct.A$2$11$11", i32 0, i32 2) }, { i8*, i32, i32, i32*, i32*, i8**, i64*, i64* } { i8* getelementptr inbounds ([12 x i8], [12 x i8]* @2, i32 0, i32 0), i32 8, i32 1, i32* null, i32* null, i8** null, i64* getelementptr inbounds ([2 x i64], [2 x i64]* @"union.U$1$3", i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @"union.U$1$3", i32 0, i32 1) }, { i8*, i32, i32, i32*, i32*, i8**, i64*, i64* } { i8* getelementptr inbounds ([20 x i8], [20 x i8]* @3, i32 0, i32 0), i32 32, i32 3, i32* null, i32* null, i8** null, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 3) }, { i8*, i32, i32, i32*, i32*, i8**, i64*, i64* } { i8* getelementptr inbounds ([20 x i8], [20 x i8]* @4, i32 0, i32 0), i32 8, i32 2, i32* null, i32* null, i8** null, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @"struct.anon$2$11$11", i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @"struct.anon$2$11$11", i32 0, i32 2) }, { i8*, i32, i32, i32*, i32*, i8**, i64*, i64* } { i8* getelementptr inbounds ([15 x i8], [15 x i8]* @5, i32 0, i32 0), i32 8, i32 1, i32* null, i32* null, i8** null, i64* getelementptr inbounds ([2 x i64], [2 x i64]* @"union.anon$1$3", i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @"union.anon$1$3", i32 0, i32 1) }]
; CHECK-NEXT: @7 = internal constant { i8*, i32, { i8*, i32, i32, i32*, i32*, i8**, i64*, i64* }* } { i8* getelementptr inbounds ([8 x i8], [8 x i8]* @0, i32 0, i32 0), i32 5, { i8*, i32, i32, i32*, i32*, i8**, i64*, i64* }* getelementptr inbounds ([5 x { i8*, i32, i32, i32*, i32*, i8**, i64*, i64* }], [5 x { i8*, i32, i32, i32*, i32*, i8**, i64*, i64* }]* @6, i32 0, i32 0) }
define i32 @main() {
entry:
%a = alloca %struct.A, align 4
%u = alloca %union.U, align 8
%c = alloca [2 x %struct.C], align 16
%k = alloca %struct.A*, align 8
%x = getelementptr inbounds %struct.A, %struct.A* %a, i32 0, i32 0
%y = getelementptr inbounds %struct.A, %struct.A* %a, i32 0, i32 1
%f = bitcast %union.U* %u to float*
%d = bitcast %union.U* %u to double*
%arrayidx = getelementptr inbounds [2 x %struct.C], [2 x %struct.C]* %c, i64 0, i64 0
%cs = getelementptr inbounds %struct.C, %struct.C* %arrayidx, i32 0, i32 0
%x1 = getelementptr inbounds %struct.anon, %struct.anon* %cs, i32 0, i32 0
%arrayidx2 = getelementptr inbounds [2 x %struct.C], [2 x %struct.C]* %c, i64 0, i64 1
%cs3 = getelementptr inbounds %struct.C, %struct.C* %arrayidx2, i32 0, i32 0
%y4 = getelementptr inbounds %struct.anon, %struct.anon* %cs3, i32 0, i32 1
%arrayidx5 = getelementptr inbounds [2 x %struct.C], [2 x %struct.C]* %c, i64 0, i64 0
%cu = getelementptr inbounds %struct.C, %struct.C* %arrayidx5, i32 0, i32 1
%f6 = bitcast %union.anon* %cu to float*
%arrayidx7 = getelementptr inbounds [2 x %struct.C], [2 x %struct.C]* %c, i64 0, i64 1
%cu8 = getelementptr inbounds %struct.C, %struct.C* %arrayidx7, i32 0, i32 1
%d9 = bitcast %union.anon* %cu8 to double*
%arrayidx10 = getelementptr inbounds [2 x %struct.C], [2 x %struct.C]* %c, i64 0, i64 0
%c11 = getelementptr inbounds %struct.C, %struct.C* %arrayidx10, i32 0, i32 2
%arrayidx12 = getelementptr inbounds [10 x i8], [10 x i8]* %c11, i64 0, i64 2
%k1 = load %struct.A*, %struct.A** %k, align 8
%arrayidx13 = getelementptr inbounds %struct.A, %struct.A* %k1, i64 0
ret i32 0
}
; CHECK: @llvm.global_ctors = {{.*}}@esan.module_ctor
; CHECK: @llvm.global_dtors = {{.*}}@esan.module_dtor
; CHECK: %a = alloca %struct.A, align 4
; CHECK-NEXT: %u = alloca %union.U, align 8
; CHECK-NEXT: %c = alloca [2 x %struct.C], align 16
; CHECK-NEXT: %k = alloca %struct.A*, align 8
; CHECK-NEXT: %0 = load i64, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @"struct.A$2$11$11", i32 0, i32 0)
; CHECK-NEXT: %1 = add i64 %0, 1
; CHECK-NEXT: store i64 %1, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @"struct.A$2$11$11", i32 0, i32 0)
; CHECK-NEXT: %x = getelementptr inbounds %struct.A, %struct.A* %a, i32 0, i32 0
; CHECK-NEXT: %2 = load i64, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @"struct.A$2$11$11", i32 0, i32 1)
; CHECK-NEXT: %3 = add i64 %2, 1
; CHECK-NEXT: store i64 %3, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @"struct.A$2$11$11", i32 0, i32 1)
; CHECK-NEXT: %y = getelementptr inbounds %struct.A, %struct.A* %a, i32 0, i32 1
; CHECK-NEXT: %f = bitcast %union.U* %u to float*
; CHECK-NEXT: %d = bitcast %union.U* %u to double*
; CHECK-NEXT: %4 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 3)
; CHECK-NEXT: %5 = add i64 %4, 1
; CHECK-NEXT: store i64 %5, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 3)
; CHECK-NEXT: %arrayidx = getelementptr inbounds [2 x %struct.C], [2 x %struct.C]* %c, i64 0, i64 0
; CHECK-NEXT: %6 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 0)
; CHECK-NEXT: %7 = add i64 %6, 1
; CHECK-NEXT: store i64 %7, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 0)
; CHECK-NEXT: %cs = getelementptr inbounds %struct.C, %struct.C* %arrayidx, i32 0, i32 0
; CHECK-NEXT: %8 = load i64, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @"struct.anon$2$11$11", i32 0, i32 0)
; CHECK-NEXT: %9 = add i64 %8, 1
; CHECK-NEXT: store i64 %9, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @"struct.anon$2$11$11", i32 0, i32 0)
; CHECK-NEXT: %x1 = getelementptr inbounds %struct.anon, %struct.anon* %cs, i32 0, i32 0
; CHECK-NEXT: %10 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 3)
; CHECK-NEXT: %11 = add i64 %10, 1
; CHECK-NEXT: store i64 %11, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 3)
; CHECK-NEXT: %arrayidx2 = getelementptr inbounds [2 x %struct.C], [2 x %struct.C]* %c, i64 0, i64 1
; CHECK-NEXT: %12 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 0)
; CHECK-NEXT: %13 = add i64 %12, 1
; CHECK-NEXT: store i64 %13, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 0)
; CHECK-NEXT: %cs3 = getelementptr inbounds %struct.C, %struct.C* %arrayidx2, i32 0, i32 0
; CHECK-NEXT: %14 = load i64, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @"struct.anon$2$11$11", i32 0, i32 1)
; CHECK-NEXT: %15 = add i64 %14, 1
; CHECK-NEXT: store i64 %15, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @"struct.anon$2$11$11", i32 0, i32 1)
; CHECK-NEXT: %y4 = getelementptr inbounds %struct.anon, %struct.anon* %cs3, i32 0, i32 1
; CHECK-NEXT: %16 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 3)
; CHECK-NEXT: %17 = add i64 %16, 1
; CHECK-NEXT: store i64 %17, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 3)
; CHECK-NEXT: %arrayidx5 = getelementptr inbounds [2 x %struct.C], [2 x %struct.C]* %c, i64 0, i64 0
; CHECK-NEXT: %18 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 1)
; CHECK-NEXT: %19 = add i64 %18, 1
; CHECK-NEXT: store i64 %19, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 1)
; CHECK-NEXT: %cu = getelementptr inbounds %struct.C, %struct.C* %arrayidx5, i32 0, i32 1
; CHECK-NEXT: %f6 = bitcast %union.anon* %cu to float*
; CHECK-NEXT: %20 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 3)
; CHECK-NEXT: %21 = add i64 %20, 1
; CHECK-NEXT: store i64 %21, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 3)
; CHECK-NEXT: %arrayidx7 = getelementptr inbounds [2 x %struct.C], [2 x %struct.C]* %c, i64 0, i64 1
; CHECK-NEXT: %22 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 1)
; CHECK-NEXT: %23 = add i64 %22, 1
; CHECK-NEXT: store i64 %23, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 1)
; CHECK-NEXT: %cu8 = getelementptr inbounds %struct.C, %struct.C* %arrayidx7, i32 0, i32 1
; CHECK-NEXT: %d9 = bitcast %union.anon* %cu8 to double*
; CHECK-NEXT: %24 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 3)
; CHECK-NEXT: %25 = add i64 %24, 1
; CHECK-NEXT: store i64 %25, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 3)
; CHECK-NEXT: %arrayidx10 = getelementptr inbounds [2 x %struct.C], [2 x %struct.C]* %c, i64 0, i64 0
; CHECK-NEXT: %26 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 2)
; CHECK-NEXT: %27 = add i64 %26, 1
; CHECK-NEXT: store i64 %27, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @"struct.C$3$14$13$13", i32 0, i32 2)
; CHECK-NEXT: %c11 = getelementptr inbounds %struct.C, %struct.C* %arrayidx10, i32 0, i32 2
; CHECK-NEXT: %arrayidx12 = getelementptr inbounds [10 x i8], [10 x i8]* %c11, i64 0, i64 2
; CHECK-NEXT: %k1 = load %struct.A*, %struct.A** %k, align 8
; CHECK-NEXT: %arrayidx13 = getelementptr inbounds %struct.A, %struct.A* %k1, i64 0
; CHECK-NEXT: ret i32 0
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; Top-level:
; CHECK: define internal void @esan.module_ctor()
; CHECK: call void @__esan_init(i32 1, i8* bitcast ({ i8*, i32, { i8*, i32, i32, i32*, i32*, i8**, i64*, i64* }* }* @7 to i8*))
; CHECK: define internal void @esan.module_dtor()
; CHECK: call void @__esan_exit(i8* bitcast ({ i8*, i32, { i8*, i32, i32, i32*, i32*, i8**, i64*, i64* }* }* @7 to i8*))

View File

@@ -0,0 +1,275 @@
; Test basic EfficiencySanitizer working set instrumentation.
;
; RUN: opt < %s -esan -esan-working-set -S | FileCheck %s
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; Intra-cache-line
define i8 @aligned1(i8* %a) {
entry:
%tmp1 = load i8, i8* %a, align 1
ret i8 %tmp1
; CHECK: @llvm.global_ctors = {{.*}}@esan.module_ctor
; CHECK: %0 = ptrtoint i8* %a to i64
; CHECK-NEXT: %1 = and i64 %0, 17592186044415
; CHECK-NEXT: %2 = add i64 %1, 1337006139375616
; CHECK-NEXT: %3 = lshr i64 %2, 6
; CHECK-NEXT: %4 = inttoptr i64 %3 to i8*
; CHECK-NEXT: %5 = load i8, i8* %4
; CHECK-NEXT: %6 = and i8 %5, -127
; CHECK-NEXT: %7 = icmp ne i8 %6, -127
; CHECK-NEXT: br i1 %7, label %8, label %11
; CHECK: %9 = or i8 %5, -127
; CHECK-NEXT: %10 = inttoptr i64 %3 to i8*
; CHECK-NEXT: store i8 %9, i8* %10
; CHECK-NEXT: br label %11
; CHECK: %tmp1 = load i8, i8* %a, align 1
; CHECK-NEXT: ret i8 %tmp1
}
define i16 @aligned2(i16* %a) {
entry:
%tmp1 = load i16, i16* %a, align 2
ret i16 %tmp1
; CHECK: %0 = ptrtoint i16* %a to i64
; CHECK-NEXT: %1 = and i64 %0, 17592186044415
; CHECK-NEXT: %2 = add i64 %1, 1337006139375616
; CHECK-NEXT: %3 = lshr i64 %2, 6
; CHECK-NEXT: %4 = inttoptr i64 %3 to i8*
; CHECK-NEXT: %5 = load i8, i8* %4
; CHECK-NEXT: %6 = and i8 %5, -127
; CHECK-NEXT: %7 = icmp ne i8 %6, -127
; CHECK-NEXT: br i1 %7, label %8, label %11
; CHECK: %9 = or i8 %5, -127
; CHECK-NEXT: %10 = inttoptr i64 %3 to i8*
; CHECK-NEXT: store i8 %9, i8* %10
; CHECK-NEXT: br label %11
; CHECK: %tmp1 = load i16, i16* %a, align 2
; CHECK-NEXT: ret i16 %tmp1
}
define i32 @aligned4(i32* %a) {
entry:
%tmp1 = load i32, i32* %a, align 4
ret i32 %tmp1
; CHECK: %0 = ptrtoint i32* %a to i64
; CHECK-NEXT: %1 = and i64 %0, 17592186044415
; CHECK-NEXT: %2 = add i64 %1, 1337006139375616
; CHECK-NEXT: %3 = lshr i64 %2, 6
; CHECK-NEXT: %4 = inttoptr i64 %3 to i8*
; CHECK-NEXT: %5 = load i8, i8* %4
; CHECK-NEXT: %6 = and i8 %5, -127
; CHECK-NEXT: %7 = icmp ne i8 %6, -127
; CHECK-NEXT: br i1 %7, label %8, label %11
; CHECK: %9 = or i8 %5, -127
; CHECK-NEXT: %10 = inttoptr i64 %3 to i8*
; CHECK-NEXT: store i8 %9, i8* %10
; CHECK-NEXT: br label %11
; CHECK: %tmp1 = load i32, i32* %a, align 4
; CHECK-NEXT: ret i32 %tmp1
}
define i64 @aligned8(i64* %a) {
entry:
%tmp1 = load i64, i64* %a, align 8
ret i64 %tmp1
; CHECK: %0 = ptrtoint i64* %a to i64
; CHECK-NEXT: %1 = and i64 %0, 17592186044415
; CHECK-NEXT: %2 = add i64 %1, 1337006139375616
; CHECK-NEXT: %3 = lshr i64 %2, 6
; CHECK-NEXT: %4 = inttoptr i64 %3 to i8*
; CHECK-NEXT: %5 = load i8, i8* %4
; CHECK-NEXT: %6 = and i8 %5, -127
; CHECK-NEXT: %7 = icmp ne i8 %6, -127
; CHECK-NEXT: br i1 %7, label %8, label %11
; CHECK: %9 = or i8 %5, -127
; CHECK-NEXT: %10 = inttoptr i64 %3 to i8*
; CHECK-NEXT: store i8 %9, i8* %10
; CHECK-NEXT: br label %11
; CHECK: %tmp1 = load i64, i64* %a, align 8
; CHECK-NEXT: ret i64 %tmp1
}
define i128 @aligned16(i128* %a) {
entry:
%tmp1 = load i128, i128* %a, align 16
ret i128 %tmp1
; CHECK: %0 = ptrtoint i128* %a to i64
; CHECK-NEXT: %1 = and i64 %0, 17592186044415
; CHECK-NEXT: %2 = add i64 %1, 1337006139375616
; CHECK-NEXT: %3 = lshr i64 %2, 6
; CHECK-NEXT: %4 = inttoptr i64 %3 to i8*
; CHECK-NEXT: %5 = load i8, i8* %4
; CHECK-NEXT: %6 = and i8 %5, -127
; CHECK-NEXT: %7 = icmp ne i8 %6, -127
; CHECK-NEXT: br i1 %7, label %8, label %11
; CHECK: %9 = or i8 %5, -127
; CHECK-NEXT: %10 = inttoptr i64 %3 to i8*
; CHECK-NEXT: store i8 %9, i8* %10
; CHECK-NEXT: br label %11
; CHECK: %tmp1 = load i128, i128* %a, align 16
; CHECK-NEXT: ret i128 %tmp1
}
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; Not guaranteed to be intra-cache-line, but our defaults are to
; assume they are:
define i16 @unaligned2(i16* %a) {
entry:
%tmp1 = load i16, i16* %a, align 1
ret i16 %tmp1
; CHECK: %0 = ptrtoint i16* %a to i64
; CHECK-NEXT: %1 = and i64 %0, 17592186044415
; CHECK-NEXT: %2 = add i64 %1, 1337006139375616
; CHECK-NEXT: %3 = lshr i64 %2, 6
; CHECK-NEXT: %4 = inttoptr i64 %3 to i8*
; CHECK-NEXT: %5 = load i8, i8* %4
; CHECK-NEXT: %6 = and i8 %5, -127
; CHECK-NEXT: %7 = icmp ne i8 %6, -127
; CHECK-NEXT: br i1 %7, label %8, label %11
; CHECK: %9 = or i8 %5, -127
; CHECK-NEXT: %10 = inttoptr i64 %3 to i8*
; CHECK-NEXT: store i8 %9, i8* %10
; CHECK-NEXT: br label %11
; CHECK: %tmp1 = load i16, i16* %a, align 1
; CHECK-NEXT: ret i16 %tmp1
}
define i32 @unaligned4(i32* %a) {
entry:
%tmp1 = load i32, i32* %a, align 2
ret i32 %tmp1
; CHECK: %0 = ptrtoint i32* %a to i64
; CHECK-NEXT: %1 = and i64 %0, 17592186044415
; CHECK-NEXT: %2 = add i64 %1, 1337006139375616
; CHECK-NEXT: %3 = lshr i64 %2, 6
; CHECK-NEXT: %4 = inttoptr i64 %3 to i8*
; CHECK-NEXT: %5 = load i8, i8* %4
; CHECK-NEXT: %6 = and i8 %5, -127
; CHECK-NEXT: %7 = icmp ne i8 %6, -127
; CHECK-NEXT: br i1 %7, label %8, label %11
; CHECK: %9 = or i8 %5, -127
; CHECK-NEXT: %10 = inttoptr i64 %3 to i8*
; CHECK-NEXT: store i8 %9, i8* %10
; CHECK-NEXT: br label %11
; CHECK: %tmp1 = load i32, i32* %a, align 2
; CHECK-NEXT: ret i32 %tmp1
}
define i64 @unaligned8(i64* %a) {
entry:
%tmp1 = load i64, i64* %a, align 4
ret i64 %tmp1
; CHECK: %0 = ptrtoint i64* %a to i64
; CHECK-NEXT: %1 = and i64 %0, 17592186044415
; CHECK-NEXT: %2 = add i64 %1, 1337006139375616
; CHECK-NEXT: %3 = lshr i64 %2, 6
; CHECK-NEXT: %4 = inttoptr i64 %3 to i8*
; CHECK-NEXT: %5 = load i8, i8* %4
; CHECK-NEXT: %6 = and i8 %5, -127
; CHECK-NEXT: %7 = icmp ne i8 %6, -127
; CHECK-NEXT: br i1 %7, label %8, label %11
; CHECK: %9 = or i8 %5, -127
; CHECK-NEXT: %10 = inttoptr i64 %3 to i8*
; CHECK-NEXT: store i8 %9, i8* %10
; CHECK-NEXT: br label %11
; CHECK: %tmp1 = load i64, i64* %a, align 4
; CHECK-NEXT: ret i64 %tmp1
}
define i128 @unaligned16(i128* %a) {
entry:
%tmp1 = load i128, i128* %a, align 8
ret i128 %tmp1
; CHECK: %0 = ptrtoint i128* %a to i64
; CHECK-NEXT: %1 = and i64 %0, 17592186044415
; CHECK-NEXT: %2 = add i64 %1, 1337006139375616
; CHECK-NEXT: %3 = lshr i64 %2, 6
; CHECK-NEXT: %4 = inttoptr i64 %3 to i8*
; CHECK-NEXT: %5 = load i8, i8* %4
; CHECK-NEXT: %6 = and i8 %5, -127
; CHECK-NEXT: %7 = icmp ne i8 %6, -127
; CHECK-NEXT: br i1 %7, label %8, label %11
; CHECK: %9 = or i8 %5, -127
; CHECK-NEXT: %10 = inttoptr i64 %3 to i8*
; CHECK-NEXT: store i8 %9, i8* %10
; CHECK-NEXT: br label %11
; CHECK: %tmp1 = load i128, i128* %a, align 8
; CHECK-NEXT: ret i128 %tmp1
}
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; Ensure that esan converts intrinsics to calls:
declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1)
declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1)
declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1)
define void @memCpyTest(i8* nocapture %x, i8* nocapture %y) {
entry:
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %x, i8* %y, i64 16, i32 4, i1 false)
ret void
; CHECK: define void @memCpyTest
; CHECK: call i8* @memcpy
; CHECK: ret void
}
define void @memMoveTest(i8* nocapture %x, i8* nocapture %y) {
entry:
tail call void @llvm.memmove.p0i8.p0i8.i64(i8* %x, i8* %y, i64 16, i32 4, i1 false)
ret void
; CHECK: define void @memMoveTest
; CHECK: call i8* @memmove
; CHECK: ret void
}
define void @memSetTest(i8* nocapture %x) {
entry:
tail call void @llvm.memset.p0i8.i64(i8* %x, i8 77, i64 16, i32 4, i1 false)
ret void
; CHECK: define void @memSetTest
; CHECK: call i8* @memset
; CHECK: ret void
}
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; Ensure that esan doesn't convert element atomic memory intrinsics to
; calls.
declare void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* nocapture writeonly, i8, i64, i32) nounwind
declare void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32) nounwind
declare void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32) nounwind
define void @elementAtomic_memCpyTest(i8* nocapture %x, i8* nocapture %y) {
; CHECK-LABEL: elementAtomic_memCpyTest
; CHECK-NEXT: tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 1 %y, i64 16, i32 1)
; CHECK-NEXT: ret void
tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 1 %y, i64 16, i32 1)
ret void
}
define void @elementAtomic_memMoveTest(i8* nocapture %x, i8* nocapture %y) {
; CHECK-LABEL: elementAtomic_memMoveTest
; CHECK-NEXT: tail call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 1 %y, i64 16, i32 1)
; CHECK-NEXT: ret void
tail call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 1 %y, i64 16, i32 1)
ret void
}
define void @elementAtomic_memSetTest(i8* nocapture %x) {
; CHECK-LABEL: elementAtomic_memSetTest
; CHECK-NEXT: tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %x, i8 77, i64 16, i32 1)
; CHECK-NEXT: ret void
tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %x, i8 77, i64 16, i32 1)
ret void
}
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; Top-level:
; CHECK: define internal void @esan.module_ctor()
; CHECK: call void @__esan_init(i32 2, i8* null)
; CHECK: define internal void @esan.module_dtor()
; CHECK: call void @__esan_exit(i8* null)

View File

@@ -0,0 +1,291 @@
; Test basic EfficiencySanitizer slowpath instrumentation.
;
; RUN: opt < %s -esan -esan-working-set -esan-instrument-fastpath=false -S | FileCheck %s
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; Aligned loads:
define i8 @loadAligned1(i8* %a) {
entry:
%tmp1 = load i8, i8* %a, align 1
ret i8 %tmp1
; CHECK: @llvm.global_ctors = {{.*}}@esan.module_ctor
; CHECK: call void @__esan_aligned_load1(i8* %a)
; CHECK-NEXT: %tmp1 = load i8, i8* %a, align 1
; CHECK-NEXT: ret i8 %tmp1
}
define i16 @loadAligned2(i16* %a) {
entry:
%tmp1 = load i16, i16* %a, align 2
ret i16 %tmp1
; CHECK: %0 = bitcast i16* %a to i8*
; CHECK-NEXT: call void @__esan_aligned_load2(i8* %0)
; CHECK-NEXT: %tmp1 = load i16, i16* %a, align 2
; CHECK-NEXT: ret i16 %tmp1
}
define i32 @loadAligned4(i32* %a) {
entry:
%tmp1 = load i32, i32* %a, align 4
ret i32 %tmp1
; CHECK: %0 = bitcast i32* %a to i8*
; CHECK-NEXT: call void @__esan_aligned_load4(i8* %0)
; CHECK-NEXT: %tmp1 = load i32, i32* %a, align 4
; CHECK-NEXT: ret i32 %tmp1
}
define i64 @loadAligned8(i64* %a) {
entry:
%tmp1 = load i64, i64* %a, align 8
ret i64 %tmp1
; CHECK: %0 = bitcast i64* %a to i8*
; CHECK-NEXT: call void @__esan_aligned_load8(i8* %0)
; CHECK-NEXT: %tmp1 = load i64, i64* %a, align 8
; CHECK-NEXT: ret i64 %tmp1
}
define i128 @loadAligned16(i128* %a) {
entry:
%tmp1 = load i128, i128* %a, align 16
ret i128 %tmp1
; CHECK: %0 = bitcast i128* %a to i8*
; CHECK-NEXT: call void @__esan_aligned_load16(i8* %0)
; CHECK-NEXT: %tmp1 = load i128, i128* %a, align 16
; CHECK-NEXT: ret i128 %tmp1
}
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; Aligned stores:
define void @storeAligned1(i8* %a) {
entry:
store i8 1, i8* %a, align 1
ret void
; CHECK: call void @__esan_aligned_store1(i8* %a)
; CHECK-NEXT: store i8 1, i8* %a, align 1
; CHECK-NEXT: ret void
}
define void @storeAligned2(i16* %a) {
entry:
store i16 1, i16* %a, align 2
ret void
; CHECK: %0 = bitcast i16* %a to i8*
; CHECK-NEXT: call void @__esan_aligned_store2(i8* %0)
; CHECK-NEXT: store i16 1, i16* %a, align 2
; CHECK-NEXT: ret void
}
define void @storeAligned4(i32* %a) {
entry:
store i32 1, i32* %a, align 4
ret void
; CHECK: %0 = bitcast i32* %a to i8*
; CHECK-NEXT: call void @__esan_aligned_store4(i8* %0)
; CHECK-NEXT: store i32 1, i32* %a, align 4
; CHECK-NEXT: ret void
}
define void @storeAligned8(i64* %a) {
entry:
store i64 1, i64* %a, align 8
ret void
; CHECK: %0 = bitcast i64* %a to i8*
; CHECK-NEXT: call void @__esan_aligned_store8(i8* %0)
; CHECK-NEXT: store i64 1, i64* %a, align 8
; CHECK-NEXT: ret void
}
define void @storeAligned16(i128* %a) {
entry:
store i128 1, i128* %a, align 16
ret void
; CHECK: %0 = bitcast i128* %a to i8*
; CHECK-NEXT: call void @__esan_aligned_store16(i8* %0)
; CHECK-NEXT: store i128 1, i128* %a, align 16
; CHECK-NEXT: ret void
}
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; Unaligned loads:
define i16 @loadUnaligned2(i16* %a) {
entry:
%tmp1 = load i16, i16* %a, align 1
ret i16 %tmp1
; CHECK: %0 = bitcast i16* %a to i8*
; CHECK-NEXT: call void @__esan_unaligned_load2(i8* %0)
; CHECK-NEXT: %tmp1 = load i16, i16* %a, align 1
; CHECK-NEXT: ret i16 %tmp1
}
define i32 @loadUnaligned4(i32* %a) {
entry:
%tmp1 = load i32, i32* %a, align 1
ret i32 %tmp1
; CHECK: %0 = bitcast i32* %a to i8*
; CHECK-NEXT: call void @__esan_unaligned_load4(i8* %0)
; CHECK-NEXT: %tmp1 = load i32, i32* %a, align 1
; CHECK-NEXT: ret i32 %tmp1
}
define i64 @loadUnaligned8(i64* %a) {
entry:
%tmp1 = load i64, i64* %a, align 1
ret i64 %tmp1
; CHECK: %0 = bitcast i64* %a to i8*
; CHECK-NEXT: call void @__esan_unaligned_load8(i8* %0)
; CHECK-NEXT: %tmp1 = load i64, i64* %a, align 1
; CHECK-NEXT: ret i64 %tmp1
}
define i128 @loadUnaligned16(i128* %a) {
entry:
%tmp1 = load i128, i128* %a, align 1
ret i128 %tmp1
; CHECK: %0 = bitcast i128* %a to i8*
; CHECK-NEXT: call void @__esan_unaligned_load16(i8* %0)
; CHECK-NEXT: %tmp1 = load i128, i128* %a, align 1
; CHECK-NEXT: ret i128 %tmp1
}
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; Unaligned stores:
define void @storeUnaligned2(i16* %a) {
entry:
store i16 1, i16* %a, align 1
ret void
; CHECK: %0 = bitcast i16* %a to i8*
; CHECK-NEXT: call void @__esan_unaligned_store2(i8* %0)
; CHECK-NEXT: store i16 1, i16* %a, align 1
; CHECK-NEXT: ret void
}
define void @storeUnaligned4(i32* %a) {
entry:
store i32 1, i32* %a, align 1
ret void
; CHECK: %0 = bitcast i32* %a to i8*
; CHECK-NEXT: call void @__esan_unaligned_store4(i8* %0)
; CHECK-NEXT: store i32 1, i32* %a, align 1
; CHECK-NEXT: ret void
}
define void @storeUnaligned8(i64* %a) {
entry:
store i64 1, i64* %a, align 1
ret void
; CHECK: %0 = bitcast i64* %a to i8*
; CHECK-NEXT: call void @__esan_unaligned_store8(i8* %0)
; CHECK-NEXT: store i64 1, i64* %a, align 1
; CHECK-NEXT: ret void
}
define void @storeUnaligned16(i128* %a) {
entry:
store i128 1, i128* %a, align 1
ret void
; CHECK: %0 = bitcast i128* %a to i8*
; CHECK-NEXT: call void @__esan_unaligned_store16(i8* %0)
; CHECK-NEXT: store i128 1, i128* %a, align 1
; CHECK-NEXT: ret void
}
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; Unusual loads and stores:
define x86_fp80 @loadUnalignedFP(x86_fp80* %a) {
entry:
%tmp1 = load x86_fp80, x86_fp80* %a, align 1
ret x86_fp80 %tmp1
; CHECK: %0 = bitcast x86_fp80* %a to i8*
; CHECK-NEXT: call void @__esan_unaligned_loadN(i8* %0, i64 10)
; CHECK-NEXT: %tmp1 = load x86_fp80, x86_fp80* %a, align 1
; CHECK-NEXT: ret x86_fp80 %tmp1
}
define void @storeUnalignedFP(x86_fp80* %a) {
entry:
store x86_fp80 0xK00000000000000000000, x86_fp80* %a, align 1
ret void
; CHECK: %0 = bitcast x86_fp80* %a to i8*
; CHECK-NEXT: call void @__esan_unaligned_storeN(i8* %0, i64 10)
; CHECK-NEXT: store x86_fp80 0xK00000000000000000000, x86_fp80* %a, align 1
; CHECK-NEXT: ret void
}
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; Ensure that esan converts memcpy intrinsics to calls:
declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1)
declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1)
declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1)
define void @memCpyTest(i8* nocapture %x, i8* nocapture %y) {
entry:
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %x, i8* %y, i64 16, i32 4, i1 false)
ret void
; CHECK: define void @memCpyTest
; CHECK: call i8* @memcpy
; CHECK: ret void
}
define void @memMoveTest(i8* nocapture %x, i8* nocapture %y) {
entry:
tail call void @llvm.memmove.p0i8.p0i8.i64(i8* %x, i8* %y, i64 16, i32 4, i1 false)
ret void
; CHECK: define void @memMoveTest
; CHECK: call i8* @memmove
; CHECK: ret void
}
define void @memSetTest(i8* nocapture %x) {
entry:
tail call void @llvm.memset.p0i8.i64(i8* %x, i8 77, i64 16, i32 4, i1 false)
ret void
; CHECK: define void @memSetTest
; CHECK: call i8* @memset
; CHECK: ret void
}
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; Ensure that esan doesn't convert element atomic memory intrinsics to
; calls.
declare void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* nocapture writeonly, i8, i64, i32) nounwind
declare void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32) nounwind
declare void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32) nounwind
define void @elementAtomic_memCpyTest(i8* nocapture %x, i8* nocapture %y) {
; CHECK-LABEL: elementAtomic_memCpyTest
; CHECK-NEXT: tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 1 %y, i64 16, i32 1)
; CHECK-NEXT: ret void
tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 1 %y, i64 16, i32 1)
ret void
}
define void @elementAtomic_memMoveTest(i8* nocapture %x, i8* nocapture %y) {
; CHECK-LABEL: elementAtomic_memMoveTest
; CHECK-NEXT: tail call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 1 %y, i64 16, i32 1)
; CHECK-NEXT: ret void
tail call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 1 %y, i64 16, i32 1)
ret void
}
define void @elementAtomic_memSetTest(i8* nocapture %x) {
; CHECK-LABEL: elementAtomic_memSetTest
; CHECK-NEXT: tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %x, i8 77, i64 16, i32 1)
; CHECK-NEXT: ret void
tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %x, i8 77, i64 16, i32 1)
ret void
}
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; Top-level:
; CHECK: define internal void @esan.module_ctor()
; CHECK: call void @__esan_init(i32 2, i8* null)
; CHECK: define internal void @esan.module_dtor()
; CHECK: call void @__esan_exit(i8* null)

View File

@@ -0,0 +1,156 @@
; Test EfficiencySanitizer working set instrumentation without aggressive
; optimization flags.
;
; RUN: opt < %s -esan -esan-working-set -esan-assume-intra-cache-line=0 -S | FileCheck %s
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; Intra-cache-line
define i8 @aligned1(i8* %a) {
entry:
%tmp1 = load i8, i8* %a, align 1
ret i8 %tmp1
; CHECK: @llvm.global_ctors = {{.*}}@esan.module_ctor
; CHECK: %0 = ptrtoint i8* %a to i64
; CHECK-NEXT: %1 = and i64 %0, 17592186044415
; CHECK-NEXT: %2 = add i64 %1, 1337006139375616
; CHECK-NEXT: %3 = lshr i64 %2, 6
; CHECK-NEXT: %4 = inttoptr i64 %3 to i8*
; CHECK-NEXT: %5 = load i8, i8* %4
; CHECK-NEXT: %6 = and i8 %5, -127
; CHECK-NEXT: %7 = icmp ne i8 %6, -127
; CHECK-NEXT: br i1 %7, label %8, label %11
; CHECK: %9 = or i8 %5, -127
; CHECK-NEXT: %10 = inttoptr i64 %3 to i8*
; CHECK-NEXT: store i8 %9, i8* %10
; CHECK-NEXT: br label %11
; CHECK: %tmp1 = load i8, i8* %a, align 1
; CHECK-NEXT: ret i8 %tmp1
}
define i16 @aligned2(i16* %a) {
entry:
%tmp1 = load i16, i16* %a, align 2
ret i16 %tmp1
; CHECK: %0 = ptrtoint i16* %a to i64
; CHECK-NEXT: %1 = and i64 %0, 17592186044415
; CHECK-NEXT: %2 = add i64 %1, 1337006139375616
; CHECK-NEXT: %3 = lshr i64 %2, 6
; CHECK-NEXT: %4 = inttoptr i64 %3 to i8*
; CHECK-NEXT: %5 = load i8, i8* %4
; CHECK-NEXT: %6 = and i8 %5, -127
; CHECK-NEXT: %7 = icmp ne i8 %6, -127
; CHECK-NEXT: br i1 %7, label %8, label %11
; CHECK: %9 = or i8 %5, -127
; CHECK-NEXT: %10 = inttoptr i64 %3 to i8*
; CHECK-NEXT: store i8 %9, i8* %10
; CHECK-NEXT: br label %11
; CHECK: %tmp1 = load i16, i16* %a, align 2
; CHECK-NEXT: ret i16 %tmp1
}
define i32 @aligned4(i32* %a) {
entry:
%tmp1 = load i32, i32* %a, align 4
ret i32 %tmp1
; CHECK: %0 = ptrtoint i32* %a to i64
; CHECK-NEXT: %1 = and i64 %0, 17592186044415
; CHECK-NEXT: %2 = add i64 %1, 1337006139375616
; CHECK-NEXT: %3 = lshr i64 %2, 6
; CHECK-NEXT: %4 = inttoptr i64 %3 to i8*
; CHECK-NEXT: %5 = load i8, i8* %4
; CHECK-NEXT: %6 = and i8 %5, -127
; CHECK-NEXT: %7 = icmp ne i8 %6, -127
; CHECK-NEXT: br i1 %7, label %8, label %11
; CHECK: %9 = or i8 %5, -127
; CHECK-NEXT: %10 = inttoptr i64 %3 to i8*
; CHECK-NEXT: store i8 %9, i8* %10
; CHECK-NEXT: br label %11
; CHECK: %tmp1 = load i32, i32* %a, align 4
; CHECK-NEXT: ret i32 %tmp1
}
define i64 @aligned8(i64* %a) {
entry:
%tmp1 = load i64, i64* %a, align 8
ret i64 %tmp1
; CHECK: %0 = ptrtoint i64* %a to i64
; CHECK-NEXT: %1 = and i64 %0, 17592186044415
; CHECK-NEXT: %2 = add i64 %1, 1337006139375616
; CHECK-NEXT: %3 = lshr i64 %2, 6
; CHECK-NEXT: %4 = inttoptr i64 %3 to i8*
; CHECK-NEXT: %5 = load i8, i8* %4
; CHECK-NEXT: %6 = and i8 %5, -127
; CHECK-NEXT: %7 = icmp ne i8 %6, -127
; CHECK-NEXT: br i1 %7, label %8, label %11
; CHECK: %9 = or i8 %5, -127
; CHECK-NEXT: %10 = inttoptr i64 %3 to i8*
; CHECK-NEXT: store i8 %9, i8* %10
; CHECK-NEXT: br label %11
; CHECK: %tmp1 = load i64, i64* %a, align 8
; CHECK-NEXT: ret i64 %tmp1
}
define i128 @aligned16(i128* %a) {
entry:
%tmp1 = load i128, i128* %a, align 16
ret i128 %tmp1
; CHECK: %0 = ptrtoint i128* %a to i64
; CHECK-NEXT: %1 = and i64 %0, 17592186044415
; CHECK-NEXT: %2 = add i64 %1, 1337006139375616
; CHECK-NEXT: %3 = lshr i64 %2, 6
; CHECK-NEXT: %4 = inttoptr i64 %3 to i8*
; CHECK-NEXT: %5 = load i8, i8* %4
; CHECK-NEXT: %6 = and i8 %5, -127
; CHECK-NEXT: %7 = icmp ne i8 %6, -127
; CHECK-NEXT: br i1 %7, label %8, label %11
; CHECK: %9 = or i8 %5, -127
; CHECK-NEXT: %10 = inttoptr i64 %3 to i8*
; CHECK-NEXT: store i8 %9, i8* %10
; CHECK-NEXT: br label %11
; CHECK: %tmp1 = load i128, i128* %a, align 16
; CHECK-NEXT: ret i128 %tmp1
}
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; Not guaranteed to be intra-cache-line
define i16 @unaligned2(i16* %a) {
entry:
%tmp1 = load i16, i16* %a, align 1
ret i16 %tmp1
; CHECK: %0 = bitcast i16* %a to i8*
; CHECK-NEXT: call void @__esan_unaligned_load2(i8* %0)
; CHECK-NEXT: %tmp1 = load i16, i16* %a, align 1
; CHECK-NEXT: ret i16 %tmp1
}
define i32 @unaligned4(i32* %a) {
entry:
%tmp1 = load i32, i32* %a, align 2
ret i32 %tmp1
; CHECK: %0 = bitcast i32* %a to i8*
; CHECK-NEXT: call void @__esan_unaligned_load4(i8* %0)
; CHECK-NEXT: %tmp1 = load i32, i32* %a, align 2
; CHECK-NEXT: ret i32 %tmp1
}
define i64 @unaligned8(i64* %a) {
entry:
%tmp1 = load i64, i64* %a, align 4
ret i64 %tmp1
; CHECK: %0 = bitcast i64* %a to i8*
; CHECK-NEXT: call void @__esan_unaligned_load8(i8* %0)
; CHECK-NEXT: %tmp1 = load i64, i64* %a, align 4
; CHECK-NEXT: ret i64 %tmp1
}
define i128 @unaligned16(i128* %a) {
entry:
%tmp1 = load i128, i128* %a, align 8
ret i128 %tmp1
; CHECK: %0 = bitcast i128* %a to i8*
; CHECK-NEXT: call void @__esan_unaligned_load16(i8* %0)
; CHECK-NEXT: %tmp1 = load i128, i128* %a, align 8
; CHECK-NEXT: ret i128 %tmp1
}