Imported Upstream version 5.18.0.167

Former-commit-id: 289509151e0fee68a1b591a20c9f109c3c789d3a
This commit is contained in:
Xamarin Public Jenkins (auto-signing)
2018-10-20 08:25:10 +00:00
parent e19d552987
commit b084638f15
28489 changed files with 184 additions and 3866856 deletions

View File

@@ -1,76 +0,0 @@
; RUN: opt < %s -msan -S | FileCheck %s
target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
target triple = "aarch64-unknown-linux-gnu"
%struct.__va_list = type { i8*, i8*, i8*, i32, i32 }
define i32 @foo(i32 %guard, ...) {
%vl = alloca %struct.__va_list, align 8
%1 = bitcast %struct.__va_list* %vl to i8*
call void @llvm.lifetime.start.p0i8(i64 32, i8* %1)
call void @llvm.va_start(i8* %1)
call void @llvm.va_end(i8* %1)
call void @llvm.lifetime.end.p0i8(i64 32, i8* %1)
ret i32 0
}
; First check if the variadic shadow values are saved in stack with correct
; size (192 is total of general purpose registers size, 64, plus total of
; floating-point registers size, 128).
; CHECK-LABEL: @foo
; CHECK: [[A:%.*]] = load {{.*}} @__msan_va_arg_overflow_size_tls
; CHECK: [[B:%.*]] = add i64 192, [[A]]
; CHECK: alloca {{.*}} [[B]]
; We expect three memcpy operations: one for the general purpose registers,
; one for floating-point/SIMD ones, and one for thre remaining arguments.
; Propagate the GR shadow values on for the va_list::__gp_top, adjust the
; offset in the __msan_va_arg_tls based on va_list:__gp_off, and finally
; issue the memcpy.
; CHECK: [[GRP:%.*]] = getelementptr inbounds i8, i8* {{%.*}}, i64 {{%.*}}
; CHECK: [[GRSIZE:%.*]] = sub i64 64, {{%.*}}
; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* {{%.*}}, i8* [[GRP]], i64 [[GRSIZE]], i32 8, i1 false)
; Propagate the VR shadow values on for the va_list::__vr_top, adjust the
; offset in the __msan_va_arg_tls based on va_list:__vr_off, and finally
; issue the memcpy.
; CHECK: [[VRP:%.*]] = getelementptr inbounds i8, i8* {{%.*}}, i64 {{%.*}}
; CHECK: [[VRSIZE:%.*]] = sub i64 128, {{%.*}}
; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* {{%.*}}, i8* [[VRP]], i64 [[VRSIZE]], i32 8, i1 false)
; Copy the remaining shadow values on the va_list::__stack position (it is
; on the constant offset of 192 from __msan_va_arg_tls).
; CHECK: [[STACK:%.*]] = getelementptr inbounds i8, i8* {{%.*}}, i32 192
; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* {{%.*}}, i8* [[STACK]], i64 {{%.*}}, i32 16, i1 false)
declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
declare void @llvm.va_start(i8*) #2
declare void @llvm.va_end(i8*) #2
declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
define i32 @bar() {
%1 = call i32 (i32, ...) @foo(i32 0, i32 1, i32 2, double 3.000000e+00,
double 4.000000e+00, i32 5, i32 6,
double 7.000000e+00, i32 8, i32 9, i32 10, i32 11)
ret i32 %1
}
; Save the incoming shadow value from the arguments in the __msan_va_arg_tls
; array. General purpose registers are saved at positions from 0 to 64, Floating
; point and SIMD are saved from 64 to 192, and the remaining from 192.
; CHECK-LABEL: @bar
; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 8
; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 16
; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 64
; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 80
; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 24
; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 32
; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 96
; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 40
; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 48
; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 56
; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 192
; CHECK: store {{.*}} 8, {{.*}} @__msan_va_arg_overflow_size_tls

View File

@@ -1,55 +0,0 @@
; RUN: opt < %s -msan -S | FileCheck %s
target datalayout = "E-m:m-i8:8:32-i16:16:32-i64:64-n32:64-S128"
target triple = "mips64--linux"
define i32 @foo(i32 %guard, ...) {
%vl = alloca i8*, align 8
%1 = bitcast i8** %vl to i8*
call void @llvm.lifetime.start.p0i8(i64 32, i8* %1)
call void @llvm.va_start(i8* %1)
call void @llvm.va_end(i8* %1)
call void @llvm.lifetime.end.p0i8(i64 32, i8* %1)
ret i32 0
}
; First, check allocation of the save area.
; CHECK-LABEL: @foo
; CHECK: [[A:%.*]] = load {{.*}} @__msan_va_arg_overflow_size_tls
; CHECK: [[B:%.*]] = add i64 0, [[A]]
; CHECK: [[C:%.*]] = alloca {{.*}} [[B]]
; CHECK: [[STACK:%.*]] = bitcast {{.*}} @__msan_va_arg_tls to i8*
; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[C]], i8* [[STACK]], i64 [[B]], i32 8, i1 false)
declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
declare void @llvm.va_start(i8*) #2
declare void @llvm.va_end(i8*) #2
declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
define i32 @bar() {
%1 = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00)
ret i32 %1
}
; Save the incoming shadow value from the arguments in the __msan_va_arg_tls
; array. The first argument is stored at position 4, since it's right
; justified.
; CHECK-LABEL: @bar
; CHECK: store i32 0, i32* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 4) to i32*), align 8
; CHECK: store i64 0, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 8) to i64*), align 8
; CHECK: store i64 0, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 16) to i64*), align 8
; CHECK: store {{.*}} 24, {{.*}} @__msan_va_arg_overflow_size_tls
; Check multiple fixed arguments.
declare i32 @foo2(i32 %g1, i32 %g2, ...)
define i32 @bar2() {
%1 = call i32 (i32, i32, ...) @foo2(i32 0, i32 1, i64 2, double 3.000000e+00)
ret i32 %1
}
; CHECK-LABEL: @bar2
; CHECK: store i64 0, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_va_arg_tls, i32 0, i32 0), align 8
; CHECK: store i64 0, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 8) to i64*), align 8
; CHECK: store {{.*}} 16, {{.*}} @__msan_va_arg_overflow_size_tls

View File

@@ -1,54 +0,0 @@
; RUN: opt < %s -msan -S | FileCheck %s
target datalayout = "e-m:m-i8:8:32-i16:16:32-i64:64-n32:64-S128"
target triple = "mips64el--linux"
define i32 @foo(i32 %guard, ...) {
%vl = alloca i8*, align 8
%1 = bitcast i8** %vl to i8*
call void @llvm.lifetime.start.p0i8(i64 32, i8* %1)
call void @llvm.va_start(i8* %1)
call void @llvm.va_end(i8* %1)
call void @llvm.lifetime.end.p0i8(i64 32, i8* %1)
ret i32 0
}
; First, check allocation of the save area.
; CHECK-LABEL: @foo
; CHECK: [[A:%.*]] = load {{.*}} @__msan_va_arg_overflow_size_tls
; CHECK: [[B:%.*]] = add i64 0, [[A]]
; CHECK: [[C:%.*]] = alloca {{.*}} [[B]]
; CHECK: [[STACK:%.*]] = bitcast {{.*}} @__msan_va_arg_tls to i8*
; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[C]], i8* [[STACK]], i64 [[B]], i32 8, i1 false)
declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
declare void @llvm.va_start(i8*) #2
declare void @llvm.va_end(i8*) #2
declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
define i32 @bar() {
%1 = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00)
ret i32 %1
}
; Save the incoming shadow value from the arguments in the __msan_va_arg_tls
; array.
; CHECK-LABEL: @bar
; CHECK: store i32 0, i32* bitcast ([100 x i64]* @__msan_va_arg_tls to i32*), align 8
; CHECK: store i64 0, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 8) to i64*), align 8
; CHECK: store i64 0, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 16) to i64*), align 8
; CHECK: store {{.*}} 24, {{.*}} @__msan_va_arg_overflow_size_tls
; Check multiple fixed arguments.
declare i32 @foo2(i32 %g1, i32 %g2, ...)
define i32 @bar2() {
%1 = call i32 (i32, i32, ...) @foo2(i32 0, i32 1, i64 2, double 3.000000e+00)
ret i32 %1
}
; CHECK-LABEL: @bar2
; CHECK: store i64 0, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_va_arg_tls, i32 0, i32 0), align 8
; CHECK: store i64 0, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 8) to i64*), align 8
; CHECK: store {{.*}} 16, {{.*}} @__msan_va_arg_overflow_size_tls

View File

@@ -1,113 +0,0 @@
; RUN: opt < %s -msan -S | FileCheck %s
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64--linux"
define i32 @foo(i32 %guard, ...) {
%vl = alloca i8*, align 8
%1 = bitcast i8** %vl to i8*
call void @llvm.lifetime.start.p0i8(i64 32, i8* %1)
call void @llvm.va_start(i8* %1)
call void @llvm.va_end(i8* %1)
call void @llvm.lifetime.end.p0i8(i64 32, i8* %1)
ret i32 0
}
; First, check allocation of the save area.
; CHECK-LABEL: @foo
; CHECK: [[A:%.*]] = load {{.*}} @__msan_va_arg_overflow_size_tls
; CHECK: [[B:%.*]] = add i64 0, [[A]]
; CHECK: [[C:%.*]] = alloca {{.*}} [[B]]
; CHECK: [[STACK:%.*]] = bitcast {{.*}} @__msan_va_arg_tls to i8*
; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[C]], i8* [[STACK]], i64 [[B]], i32 8, i1 false)
declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
declare void @llvm.va_start(i8*) #2
declare void @llvm.va_end(i8*) #2
declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
define i32 @bar() {
%1 = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00)
ret i32 %1
}
; Save the incoming shadow value from the arguments in the __msan_va_arg_tls
; array. The first argument is stored at position 4, since it's right
; justified.
; CHECK-LABEL: @bar
; CHECK: store i32 0, i32* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 4) to i32*), align 8
; CHECK: store i64 0, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 8) to i64*), align 8
; CHECK: store i64 0, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 16) to i64*), align 8
; CHECK: store {{.*}} 24, {{.*}} @__msan_va_arg_overflow_size_tls
; Check vector argument.
define i32 @bar2() {
%1 = call i32 (i32, ...) @foo(i32 0, <2 x i64> <i64 1, i64 2>)
ret i32 %1
}
; The vector is at offset 16 of parameter save area, but __msan_va_arg_tls
; corresponds to offset 8+ of parameter save area - so the offset from
; __msan_va_arg_tls is actually misaligned.
; CHECK-LABEL: @bar2
; CHECK: store <2 x i64> zeroinitializer, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 8) to <2 x i64>*), align 8
; CHECK: store {{.*}} 24, {{.*}} @__msan_va_arg_overflow_size_tls
; Check QPX vector argument.
define i32 @bar3() "target-features"="+qpx" {
%1 = call i32 (i32, ...) @foo(i32 0, i32 1, i32 2, <4 x double> <double 1.0, double 2.0, double 3.0, double 4.0>)
ret i32 %1
}
; That one is even stranger: the parameter save area starts at offset 48 from
; (32-byte aligned) stack pointer, the vector parameter is at 96 bytes from
; the stack pointer, so its offset from parameter save area is misaligned.
; CHECK-LABEL: @bar3
; CHECK: store i32 0, i32* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 4) to i32*), align 8
; CHECK: store i32 0, i32* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 12) to i32*), align 8
; CHECK: store <4 x i64> zeroinitializer, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 40) to <4 x i64>*), align 8
; CHECK: store {{.*}} 72, {{.*}} @__msan_va_arg_overflow_size_tls
; Check i64 array.
define i32 @bar4() {
%1 = call i32 (i32, ...) @foo(i32 0, [2 x i64] [i64 1, i64 2])
ret i32 %1
}
; CHECK-LABEL: @bar4
; CHECK: store [2 x i64] zeroinitializer, [2 x i64]* bitcast ([100 x i64]* @__msan_va_arg_tls to [2 x i64]*), align 8
; CHECK: store {{.*}} 16, {{.*}} @__msan_va_arg_overflow_size_tls
; Check i128 array.
define i32 @bar5() {
%1 = call i32 (i32, ...) @foo(i32 0, [2 x i128] [i128 1, i128 2])
ret i32 %1
}
; CHECK-LABEL: @bar5
; CHECK: store [2 x i128] zeroinitializer, [2 x i128]* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 8) to [2 x i128]*), align 8
; CHECK: store {{.*}} 40, {{.*}} @__msan_va_arg_overflow_size_tls
; Check 8-aligned byval.
define i32 @bar6([2 x i64]* %arg) {
%1 = call i32 (i32, ...) @foo(i32 0, [2 x i64]* byval align 8 %arg)
ret i32 %1
}
; CHECK-LABEL: @bar6
; CHECK: [[SHADOW:%[0-9]+]] = bitcast [2 x i64]* bitcast ([100 x i64]* @__msan_va_arg_tls to [2 x i64]*) to i8*
; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[SHADOW]], i8* {{.*}}, i64 16, i32 8, i1 false)
; CHECK: store {{.*}} 16, {{.*}} @__msan_va_arg_overflow_size_tls
; Check 16-aligned byval.
define i32 @bar7([4 x i64]* %arg) {
%1 = call i32 (i32, ...) @foo(i32 0, [4 x i64]* byval align 16 %arg)
ret i32 %1
}
; CHECK-LABEL: @bar7
; CHECK: [[SHADOW:%[0-9]+]] = bitcast [4 x i64]* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 8) to [4 x i64]*)
; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[SHADOW]], i8* {{.*}}, i64 32, i32 8, i1 false)
; CHECK: store {{.*}} 40, {{.*}} @__msan_va_arg_overflow_size_tls

View File

@@ -1,97 +0,0 @@
; RUN: opt < %s -msan -S | FileCheck %s
target datalayout = "e-m:e-i64:64-n32:64"
target triple = "powerpc64le--linux"
define i32 @foo(i32 %guard, ...) {
%vl = alloca i8*, align 8
%1 = bitcast i8** %vl to i8*
call void @llvm.lifetime.start.p0i8(i64 32, i8* %1)
call void @llvm.va_start(i8* %1)
call void @llvm.va_end(i8* %1)
call void @llvm.lifetime.end.p0i8(i64 32, i8* %1)
ret i32 0
}
; First, check allocation of the save area.
; CHECK-LABEL: @foo
; CHECK: [[A:%.*]] = load {{.*}} @__msan_va_arg_overflow_size_tls
; CHECK: [[B:%.*]] = add i64 0, [[A]]
; CHECK: [[C:%.*]] = alloca {{.*}} [[B]]
; CHECK: [[STACK:%.*]] = bitcast {{.*}} @__msan_va_arg_tls to i8*
; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[C]], i8* [[STACK]], i64 [[B]], i32 8, i1 false)
declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
declare void @llvm.va_start(i8*) #2
declare void @llvm.va_end(i8*) #2
declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
define i32 @bar() {
%1 = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00)
ret i32 %1
}
; Save the incoming shadow value from the arguments in the __msan_va_arg_tls
; array.
; CHECK-LABEL: @bar
; CHECK: store i32 0, i32* bitcast ([100 x i64]* @__msan_va_arg_tls to i32*), align 8
; CHECK: store i64 0, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 8) to i64*), align 8
; CHECK: store i64 0, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 16) to i64*), align 8
; CHECK: store {{.*}} 24, {{.*}} @__msan_va_arg_overflow_size_tls
; Check vector argument.
define i32 @bar2() {
%1 = call i32 (i32, ...) @foo(i32 0, <2 x i64> <i64 1, i64 2>)
ret i32 %1
}
; The vector is at offset 16 of parameter save area, but __msan_va_arg_tls
; corresponds to offset 8+ of parameter save area - so the offset from
; __msan_va_arg_tls is actually misaligned.
; CHECK-LABEL: @bar2
; CHECK: store <2 x i64> zeroinitializer, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 8) to <2 x i64>*), align 8
; CHECK: store {{.*}} 24, {{.*}} @__msan_va_arg_overflow_size_tls
; Check i64 array.
define i32 @bar4() {
%1 = call i32 (i32, ...) @foo(i32 0, [2 x i64] [i64 1, i64 2])
ret i32 %1
}
; CHECK-LABEL: @bar4
; CHECK: store [2 x i64] zeroinitializer, [2 x i64]* bitcast ([100 x i64]* @__msan_va_arg_tls to [2 x i64]*), align 8
; CHECK: store {{.*}} 16, {{.*}} @__msan_va_arg_overflow_size_tls
; Check i128 array.
define i32 @bar5() {
%1 = call i32 (i32, ...) @foo(i32 0, [2 x i128] [i128 1, i128 2])
ret i32 %1
}
; CHECK-LABEL: @bar5
; CHECK: store [2 x i128] zeroinitializer, [2 x i128]* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 8) to [2 x i128]*), align 8
; CHECK: store {{.*}} 40, {{.*}} @__msan_va_arg_overflow_size_tls
; Check 8-aligned byval.
define i32 @bar6([2 x i64]* %arg) {
%1 = call i32 (i32, ...) @foo(i32 0, [2 x i64]* byval align 8 %arg)
ret i32 %1
}
; CHECK-LABEL: @bar6
; CHECK: [[SHADOW:%[0-9]+]] = bitcast [2 x i64]* bitcast ([100 x i64]* @__msan_va_arg_tls to [2 x i64]*) to i8*
; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[SHADOW]], i8* {{.*}}, i64 16, i32 8, i1 false)
; CHECK: store {{.*}} 16, {{.*}} @__msan_va_arg_overflow_size_tls
; Check 16-aligned byval.
define i32 @bar7([4 x i64]* %arg) {
%1 = call i32 (i32, ...) @foo(i32 0, [4 x i64]* byval align 16 %arg)
ret i32 %1
}
; CHECK-LABEL: @bar7
; CHECK: [[SHADOW:%[0-9]+]] = bitcast [4 x i64]* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 8) to [4 x i64]*)
; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[SHADOW]], i8* {{.*}}, i64 32, i32 8, i1 false)
; CHECK: store {{.*}} 40, {{.*}} @__msan_va_arg_overflow_size_tls

View File

@@ -1,15 +0,0 @@
; RUN: opt < %s -msan -msan-check-access-address=0 -S
; Test that code using va_start can be compiled on i386.
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32-S128"
target triple = "i386-unknown-linux-gnu"
define void @VaStart(i8* %s, ...) {
entry:
%vl = alloca i8*, align 4
%vl1 = bitcast i8** %vl to i8*
call void @llvm.va_start(i8* %vl1)
ret void
}
declare void @llvm.va_start(i8*)

View File

@@ -1,59 +0,0 @@
; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s --check-prefixes=CHECK,INLINE
; RUN: opt < %s -msan -msan-check-access-address=0 -msan-poison-stack-with-call=1 -S | FileCheck %s --check-prefixes=CHECK,CALL
; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=1 -S | FileCheck %s --check-prefixes=CHECK,ORIGIN
; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=2 -S | FileCheck %s --check-prefixes=CHECK,ORIGIN
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
define void @static() sanitize_memory {
entry:
%x = alloca i32, align 4
ret void
}
; CHECK-LABEL: define void @static(
; INLINE: call void @llvm.memset.p0i8.i64(i8* {{.*}}, i8 -1, i64 4, i32 4, i1 false)
; CALL: call void @__msan_poison_stack(i8* {{.*}}, i64 4)
; ORIGIN: call void @__msan_set_alloca_origin4(i8* {{.*}}, i64 4,
; CHECK: ret void
define void @dynamic() sanitize_memory {
entry:
br label %l
l:
%x = alloca i32, align 4
ret void
}
; CHECK-LABEL: define void @dynamic(
; INLINE: call void @llvm.memset.p0i8.i64(i8* {{.*}}, i8 -1, i64 4, i32 4, i1 false)
; CALL: call void @__msan_poison_stack(i8* {{.*}}, i64 4)
; ORIGIN: call void @__msan_set_alloca_origin4(i8* {{.*}}, i64 4,
; CHECK: ret void
define void @array() sanitize_memory {
entry:
%x = alloca i32, i64 5, align 4
ret void
}
; CHECK-LABEL: define void @array(
; INLINE: call void @llvm.memset.p0i8.i64(i8* {{.*}}, i8 -1, i64 20, i32 4, i1 false)
; CALL: call void @__msan_poison_stack(i8* {{.*}}, i64 20)
; ORIGIN: call void @__msan_set_alloca_origin4(i8* {{.*}}, i64 20,
; CHECK: ret void
define void @array_non_const(i64 %cnt) sanitize_memory {
entry:
%x = alloca i32, i64 %cnt, align 4
ret void
}
; CHECK-LABEL: define void @array_non_const(
; CHECK: %[[A:.*]] = mul i64 4, %cnt
; INLINE: call void @llvm.memset.p0i8.i64(i8* {{.*}}, i8 -1, i64 %[[A]], i32 4, i1 false)
; CALL: call void @__msan_poison_stack(i8* {{.*}}, i64 %[[A]])
; ORIGIN: call void @__msan_set_alloca_origin4(i8* {{.*}}, i64 %[[A]],
; CHECK: ret void

View File

@@ -1,89 +0,0 @@
; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=1 -S | FileCheck -check-prefix=CHECK -check-prefix=CHECK-ORIGINS %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
define [2 x i32] @InsertValue(i32 %x, i32 %y) sanitize_memory {
entry:
%a = insertvalue [2 x i32] undef, i32 %x, 0
%b = insertvalue [2 x i32] %a, i32 %y, 1
ret [2 x i32] %b
}
; CHECK-LABEL: @InsertValue(
; CHECK-DAG: [[Sy:%.*]] = load i32, i32* {{.*}}@__msan_param_tls to i64), i64 8) to i32*)
; CHECK-DAG: [[Sx:%.*]] = load i32, i32* {{.*}}@__msan_param_tls to i32*)
; CHECK: [[A:%.*]] = insertvalue [2 x i32] [i32 -1, i32 -1], i32 [[Sx]], 0
; CHECK: [[B:%.*]] = insertvalue [2 x i32] [[A]], i32 [[Sy]], 1
; CHECK: store [2 x i32] [[B]], [2 x i32]* {{.*}}@__msan_retval_tls
; CHECK: ret [2 x i32]
define [2 x double] @InsertValueDouble(double %x, double %y) sanitize_memory {
entry:
%a = insertvalue [2 x double] undef, double %x, 0
%b = insertvalue [2 x double] %a, double %y, 1
ret [2 x double] %b
}
; CHECK-LABEL: @InsertValueDouble(
; CHECK-DAG: [[Sy:%.*]] = load i64, i64* {{.*}}@__msan_param_tls to i64), i64 8) to i64*)
; CHECK-DAG: [[Sx:%.*]] = load i64, i64* getelementptr {{.*}}@__msan_param_tls, i32 0, i32 0
; CHECK: [[A:%.*]] = insertvalue [2 x i64] [i64 -1, i64 -1], i64 [[Sx]], 0
; CHECK: [[B:%.*]] = insertvalue [2 x i64] [[A]], i64 [[Sy]], 1
; CHECK: store [2 x i64] [[B]], [2 x i64]* {{.*}}@__msan_retval_tls
; CHECK: ret [2 x double]
define i32 @ExtractValue([2 x i32] %a) sanitize_memory {
entry:
%x = extractvalue [2 x i32] %a, 1
ret i32 %x
}
; CHECK-LABEL: @ExtractValue(
; CHECK: [[Sa:%.*]] = load [2 x i32], [2 x i32]* {{.*}}@__msan_param_tls to [2 x i32]*)
; CHECK: [[Sx:%.*]] = extractvalue [2 x i32] [[Sa]], 1
; CHECK: store i32 [[Sx]], i32* {{.*}}@__msan_retval_tls
; CHECK: ret i32
; Regression test for PR20493.
%MyStruct = type { i32, i32, [3 x i32] }
define i32 @ArrayInStruct(%MyStruct %s) sanitize_memory {
%x = extractvalue %MyStruct %s, 2, 1
ret i32 %x
}
; CHECK-LABEL: @ArrayInStruct(
; CHECK: [[Ss:%.*]] = load { i32, i32, [3 x i32] }, { i32, i32, [3 x i32] }* {{.*}}@__msan_param_tls to { i32, i32, [3 x i32] }*)
; CHECK: [[Sx:%.*]] = extractvalue { i32, i32, [3 x i32] } [[Ss]], 2, 1
; CHECK: store i32 [[Sx]], i32* {{.*}}@__msan_retval_tls
; CHECK: ret i32
define i32 @ArrayOfStructs([3 x { i32, i32 }] %a) sanitize_memory {
%x = extractvalue [3 x { i32, i32 }] %a, 2, 1
ret i32 %x
}
; CHECK-LABEL: @ArrayOfStructs(
; CHECK: [[Ss:%.*]] = load [3 x { i32, i32 }], [3 x { i32, i32 }]* {{.*}}@__msan_param_tls to [3 x { i32, i32 }]*)
; CHECK: [[Sx:%.*]] = extractvalue [3 x { i32, i32 }] [[Ss]], 2, 1
; CHECK: store i32 [[Sx]], i32* {{.*}}@__msan_retval_tls
; CHECK: ret i32
define <8 x i16> @ArrayOfVectors([3 x <8 x i16>] %a) sanitize_memory {
%x = extractvalue [3 x <8 x i16>] %a, 1
ret <8 x i16> %x
}
; CHECK-LABEL: @ArrayOfVectors(
; CHECK: [[Ss:%.*]] = load [3 x <8 x i16>], [3 x <8 x i16>]* {{.*}}@__msan_param_tls to [3 x <8 x i16>]*)
; CHECK: [[Sx:%.*]] = extractvalue [3 x <8 x i16>] [[Ss]], 1
; CHECK: store <8 x i16> [[Sx]], <8 x i16>* {{.*}}@__msan_retval_tls
; CHECK: ret <8 x i16>

View File

@@ -1,193 +0,0 @@
; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=1 -S | FileCheck %s
; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=2 -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
; atomicrmw xchg: store clean shadow, return clean shadow
define i32 @AtomicRmwXchg(i32* %p, i32 %x) sanitize_memory {
entry:
%0 = atomicrmw xchg i32* %p, i32 %x seq_cst
ret i32 %0
}
; CHECK-LABEL: @AtomicRmwXchg
; CHECK: store i32 0,
; CHECK: atomicrmw xchg {{.*}} seq_cst
; CHECK: store i32 0, {{.*}} @__msan_retval_tls
; CHECK: ret i32
; atomicrmw max: exactly the same as above
define i32 @AtomicRmwMax(i32* %p, i32 %x) sanitize_memory {
entry:
%0 = atomicrmw max i32* %p, i32 %x seq_cst
ret i32 %0
}
; CHECK-LABEL: @AtomicRmwMax
; CHECK: store i32 0,
; CHECK: atomicrmw max {{.*}} seq_cst
; CHECK: store i32 0, {{.*}} @__msan_retval_tls
; CHECK: ret i32
; cmpxchg: the same as above, but also check %a shadow
define i32 @Cmpxchg(i32* %p, i32 %a, i32 %b) sanitize_memory {
entry:
%pair = cmpxchg i32* %p, i32 %a, i32 %b seq_cst seq_cst
%0 = extractvalue { i32, i1 } %pair, 0
ret i32 %0
}
; CHECK-LABEL: @Cmpxchg
; CHECK: store { i32, i1 } zeroinitializer,
; CHECK: icmp
; CHECK: br
; CHECK: @__msan_warning
; CHECK: cmpxchg {{.*}} seq_cst seq_cst
; CHECK: store i32 0, {{.*}} @__msan_retval_tls
; CHECK: ret i32
; relaxed cmpxchg: bump up to "release monotonic"
define i32 @CmpxchgMonotonic(i32* %p, i32 %a, i32 %b) sanitize_memory {
entry:
%pair = cmpxchg i32* %p, i32 %a, i32 %b monotonic monotonic
%0 = extractvalue { i32, i1 } %pair, 0
ret i32 %0
}
; CHECK-LABEL: @CmpxchgMonotonic
; CHECK: store { i32, i1 } zeroinitializer,
; CHECK: icmp
; CHECK: br
; CHECK: @__msan_warning
; CHECK: cmpxchg {{.*}} release monotonic
; CHECK: store i32 0, {{.*}} @__msan_retval_tls
; CHECK: ret i32
; atomic load: preserve alignment, load shadow value after app value
define i32 @AtomicLoad(i32* %p) sanitize_memory {
entry:
%0 = load atomic i32, i32* %p seq_cst, align 16
ret i32 %0
}
; CHECK-LABEL: @AtomicLoad
; CHECK: load atomic i32, i32* {{.*}} seq_cst, align 16
; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32, i32* {{.*}}, align 16
; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls
; CHECK: ret i32
; atomic load: preserve alignment, load shadow value after app value
define i32 @AtomicLoadAcquire(i32* %p) sanitize_memory {
entry:
%0 = load atomic i32, i32* %p acquire, align 16
ret i32 %0
}
; CHECK-LABEL: @AtomicLoadAcquire
; CHECK: load atomic i32, i32* {{.*}} acquire, align 16
; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32, i32* {{.*}}, align 16
; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls
; CHECK: ret i32
; atomic load monotonic: bump up to load acquire
define i32 @AtomicLoadMonotonic(i32* %p) sanitize_memory {
entry:
%0 = load atomic i32, i32* %p monotonic, align 16
ret i32 %0
}
; CHECK-LABEL: @AtomicLoadMonotonic
; CHECK: load atomic i32, i32* {{.*}} acquire, align 16
; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32, i32* {{.*}}, align 16
; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls
; CHECK: ret i32
; atomic load unordered: bump up to load acquire
define i32 @AtomicLoadUnordered(i32* %p) sanitize_memory {
entry:
%0 = load atomic i32, i32* %p unordered, align 16
ret i32 %0
}
; CHECK-LABEL: @AtomicLoadUnordered
; CHECK: load atomic i32, i32* {{.*}} acquire, align 16
; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32, i32* {{.*}}, align 16
; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls
; CHECK: ret i32
; atomic store: preserve alignment, store clean shadow value before app value
define void @AtomicStore(i32* %p, i32 %x) sanitize_memory {
entry:
store atomic i32 %x, i32* %p seq_cst, align 16
ret void
}
; CHECK-LABEL: @AtomicStore
; CHECK-NOT: @__msan_param_tls
; CHECK: store i32 0, i32* {{.*}}, align 16
; CHECK: store atomic i32 %x, i32* %p seq_cst, align 16
; CHECK: ret void
; atomic store: preserve alignment, store clean shadow value before app value
define void @AtomicStoreRelease(i32* %p, i32 %x) sanitize_memory {
entry:
store atomic i32 %x, i32* %p release, align 16
ret void
}
; CHECK-LABEL: @AtomicStoreRelease
; CHECK-NOT: @__msan_param_tls
; CHECK: store i32 0, i32* {{.*}}, align 16
; CHECK: store atomic i32 %x, i32* %p release, align 16
; CHECK: ret void
; atomic store monotonic: bumped up to store release
define void @AtomicStoreMonotonic(i32* %p, i32 %x) sanitize_memory {
entry:
store atomic i32 %x, i32* %p monotonic, align 16
ret void
}
; CHECK-LABEL: @AtomicStoreMonotonic
; CHECK-NOT: @__msan_param_tls
; CHECK: store i32 0, i32* {{.*}}, align 16
; CHECK: store atomic i32 %x, i32* %p release, align 16
; CHECK: ret void
; atomic store unordered: bumped up to store release
define void @AtomicStoreUnordered(i32* %p, i32 %x) sanitize_memory {
entry:
store atomic i32 %x, i32* %p unordered, align 16
ret void
}
; CHECK-LABEL: @AtomicStoreUnordered
; CHECK-NOT: @__msan_param_tls
; CHECK: store i32 0, i32* {{.*}}, align 16
; CHECK: store atomic i32 %x, i32* %p release, align 16
; CHECK: ret void

View File

@@ -1,20 +0,0 @@
; Test that copy alignment for byval arguments is limited by param-tls slot alignment.
; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
%struct.S = type { i64, i64, i64, [8 x i8] }
; CHECK: [[A:%.*]] = bitcast i64* {{.*}} add {{.*}} ptrtoint {{.*}} @__msan_param_tls {{.*}} i64 8)
; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[A]], i8* {{.*}}, i64 32, i32 8, i1 false)
define void @Caller() sanitize_memory {
entry:
%agg.tmp = alloca %struct.S, align 16
call void @Callee(i32 1, %struct.S* byval align 16 %agg.tmp)
ret void
}
declare void @Callee(i32, %struct.S* byval align 16)

View File

@@ -1,53 +0,0 @@
; RUN: opt < %s -msan -msan-check-access-address=0 -msan-check-constant-shadow=1 -msan-track-origins=1 -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
; Test that returning a literal undef from main() triggers an MSan warning.
; main() is special: it inserts check for the return value
define i32 @main() nounwind uwtable sanitize_memory {
entry:
ret i32 undef
}
; CHECK-LABEL: @main
; CHECK: call void @__msan_warning_noreturn
; CHECK: ret i32 undef
; This function stores known initialized value.
; Expect 2 stores: one for the shadow (0), one for the value (42), but no origin.
define void @StoreConstant(i32* nocapture %p) nounwind uwtable sanitize_memory {
entry:
store i32 42, i32* %p, align 4
ret void
}
; CHECK-LABEL: @StoreConstant
; CHECK-NOT: store i32
; CHECK: store i32 0,
; CHECK-NOT: store i32
; CHECK: store i32 42,
; CHECK-NOT: store i32
; CHECK: ret void
; This function stores known uninitialized value.
; Expect 3 stores: shadow, value and origin.
; Expect no icmp(s): everything here is unconditional.
define void @StoreUndef(i32* nocapture %p) nounwind uwtable sanitize_memory {
entry:
store i32 undef, i32* %p, align 4
ret void
}
; CHECK-LABEL: @StoreUndef
; CHECK-NOT: icmp
; CHECK: store i32
; CHECK-NOT: icmp
; CHECK: store i32
; CHECK-NOT: icmp
; CHECK: store i32
; CHECK-NOT: icmp
; CHECK: ret void

View File

@@ -1,50 +0,0 @@
; RUN: opt < %s -msan -msan-check-access-address=1 -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
; Test byval argument shadow alignment
define <2 x i64> @ByValArgumentShadowLargeAlignment(<2 x i64>* byval %p) sanitize_memory {
entry:
%x = load <2 x i64>, <2 x i64>* %p
ret <2 x i64> %x
}
; CHECK-LABEL: @ByValArgumentShadowLargeAlignment
; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* {{.*}}, i8* {{.*}}, i64 16, i32 8, i1 false)
; CHECK: ret <2 x i64>
define i16 @ByValArgumentShadowSmallAlignment(i16* byval %p) sanitize_memory {
entry:
%x = load i16, i16* %p
ret i16 %x
}
; CHECK-LABEL: @ByValArgumentShadowSmallAlignment
; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* {{.*}}, i8* {{.*}}, i64 2, i32 2, i1 false)
; CHECK: ret i16
; Check instrumentation of stores. The check must precede the shadow store.
define void @Store(i32* nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
entry:
store i32 %x, i32* %p, align 4
ret void
}
; CHECK-LABEL: @Store
; CHECK: load {{.*}} @__msan_param_tls
; CHECK: icmp
; CHECK: br i1
; CHECK: <label>
; CHECK: call void @__msan_warning_noreturn
; CHECK: <label>
; CHECK: store
; CHECK: store i32 %x
; CHECK: ret void

View File

@@ -1,53 +0,0 @@
; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
; RUN: opt < %s -msan -msan-check-access-address=1 -S | FileCheck %s --check-prefix=ADDR
; REQUIRES: x86-registered-target
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
declare void @llvm.x86.sse.stmxcsr(i8*)
declare void @llvm.x86.sse.ldmxcsr(i8*)
define void @getcsr(i32 *%p) sanitize_memory {
entry:
%0 = bitcast i32* %p to i8*
call void @llvm.x86.sse.stmxcsr(i8* %0)
ret void
}
; CHECK-LABEL: @getcsr(
; CHECK: store i32 0, i32*
; CHECK: call void @llvm.x86.sse.stmxcsr(
; CHECK: ret void
; ADDR-LABEL: @getcsr(
; ADDR: %[[A:.*]] = load i64, i64* getelementptr inbounds {{.*}} @__msan_param_tls, i32 0, i32 0), align 8
; ADDR: %[[B:.*]] = icmp ne i64 %[[A]], 0
; ADDR: br i1 %[[B]], label {{.*}}, label
; ADDR: call void @__msan_warning_noreturn()
; ADDR: call void @llvm.x86.sse.stmxcsr(
; ADDR: ret void
; Function Attrs: nounwind uwtable
define void @setcsr(i32 *%p) sanitize_memory {
entry:
%0 = bitcast i32* %p to i8*
call void @llvm.x86.sse.ldmxcsr(i8* %0)
ret void
}
; CHECK-LABEL: @setcsr(
; CHECK: %[[A:.*]] = load i32, i32* %{{.*}}, align 1
; CHECK: %[[B:.*]] = icmp ne i32 %[[A]], 0
; CHECK: br i1 %[[B]], label {{.*}}, label
; CHECK: call void @__msan_warning_noreturn()
; CHECK: call void @llvm.x86.sse.ldmxcsr(
; CHECK: ret void
; ADDR-LABEL: @setcsr(
; ADDR: %[[A:.*]] = load i64, i64* getelementptr inbounds {{.*}} @__msan_param_tls, i32 0, i32 0), align 8
; ADDR: %[[B:.*]] = icmp ne i64 %[[A]], 0
; ADDR: br i1 %[[B]], label {{.*}}, label
; ADDR: call void @__msan_warning_noreturn()
; ADDR: call void @llvm.x86.sse.ldmxcsr(
; ADDR: ret void

View File

@@ -1,17 +0,0 @@
; MSan converts 2-element global_ctors to 3-element when adding the new entry.
; RUN: opt < %s -msan -msan-with-comdat -S | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
; CHECK: $msan.module_ctor = comdat any
; CHECK: @llvm.global_ctors = appending global [2 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 65535, void ()* @f, i8* null }, { i32, void ()*, i8* } { i32 0, void ()* @msan.module_ctor, i8* bitcast (void ()* @msan.module_ctor to i8*) }]
@llvm.global_ctors = appending global [1 x { i32, void ()* }] [{ i32, void ()* } { i32 65535, void ()* @f }]
define internal void @f() {
entry:
ret void
}
; CHECK: define internal void @msan.module_ctor() comdat {

View File

@@ -1,53 +0,0 @@
; Test -msan-instrumentation-with-call-threshold
; Test that in with-calls mode there are no calls to __msan_chain_origin - they
; are done from __msan_maybe_store_origin_*.
; RUN: opt < %s -msan -msan-check-access-address=0 -msan-instrumentation-with-call-threshold=0 -S | FileCheck %s
; RUN: opt < %s -msan -msan-check-access-address=0 -msan-instrumentation-with-call-threshold=0 -msan-track-origins=1 -S | FileCheck -check-prefix=CHECK -check-prefix=CHECK-ORIGINS %s
; RUN: opt < %s -msan -msan-check-access-address=0 -msan-instrumentation-with-call-threshold=0 -msan-track-origins=2 -S | FileCheck -check-prefix=CHECK -check-prefix=CHECK-ORIGINS %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
define void @LoadAndCmp(i32* nocapture %a) nounwind uwtable sanitize_memory {
entry:
%0 = load i32, i32* %a, align 4
%tobool = icmp eq i32 %0, 0
br i1 %tobool, label %if.end, label %if.then
if.then: ; preds = %entry
tail call void (...) @foo() nounwind
br label %if.end
if.end: ; preds = %entry, %if.then
ret void
}
declare void @foo(...)
; CHECK-LABEL: @LoadAndCmp
; CHECK: = load
; CHECK: = load
; CHECK: = zext i1 {{.*}} to i8
; CHECK: call void @__msan_maybe_warning_1(
; CHECK-NOT: unreachable
; CHECK: ret void
define void @Store(i64* nocapture %p, i64 %x) nounwind uwtable sanitize_memory {
entry:
store i64 %x, i64* %p, align 4
ret void
}
; CHECK-LABEL: @Store
; CHECK: load {{.*}} @__msan_param_tls
; CHECK-ORIGINS: load {{.*}} @__msan_param_origin_tls
; CHECK: store
; CHECK-ORIGINS-NOT: __msan_chain_origin
; CHECK-ORIGINS: bitcast i64* {{.*}} to i8*
; CHECK-ORIGINS-NOT: __msan_chain_origin
; CHECK-ORIGINS: call void @__msan_maybe_store_origin_8(
; CHECK-ORIGINS-NOT: __msan_chain_origin
; CHECK: store i64
; CHECK: ret void

View File

@@ -1,33 +0,0 @@
; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=1 -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
; Test that result origin is directy propagated from the argument,
; and is not affected by all the literal undef operands.
; https://github.com/google/sanitizers/issues/559
define <4 x i32> @Shuffle(<4 x i32> %x) nounwind uwtable sanitize_memory {
entry:
%y = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
ret <4 x i32> %y
}
; CHECK-LABEL: @Shuffle(
; CHECK: [[A:%.*]] = load i32, i32* {{.*}}@__msan_param_origin_tls,
; CHECK: store i32 [[A]], i32* @__msan_retval_origin_tls
; CHECK: ret <4 x i32>
; Regression test for origin propagation in "select i1, float, float".
; https://github.com/google/sanitizers/issues/581
define float @SelectFloat(i1 %b, float %x, float %y) nounwind uwtable sanitize_memory {
entry:
%z = select i1 %b, float %x, float %y
ret float %z
}
; CHECK-LABEL: @SelectFloat(
; CHECK-NOT: select {{.*}} i32 0, i32 0
; CHECK: ret float

File diff suppressed because it is too large Load Diff

View File

@@ -1,68 +0,0 @@
; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=1 -S | FileCheck -check-prefix=CHECK -check-prefix=CHECK-ORIGINS %s
; REQUIRES: x86-registered-target
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
; Store intrinsic.
define void @StoreIntrinsic(i8* %p, <4 x float> %x) nounwind uwtable sanitize_memory {
call void @llvm.x86.sse.storeu.ps(i8* %p, <4 x float> %x)
ret void
}
declare void @llvm.x86.sse.storeu.ps(i8*, <4 x float>) nounwind
; CHECK-LABEL: @StoreIntrinsic
; CHECK-NOT: br
; CHECK-NOT: = or
; CHECK: store <4 x i32> {{.*}} align 1
; CHECK: store <4 x float> %{{.*}}, <4 x float>* %{{.*}}, align 1{{$}}
; CHECK: ret void
; Load intrinsic.
define <16 x i8> @LoadIntrinsic(i8* %p) nounwind uwtable sanitize_memory {
%call = call <16 x i8> @llvm.x86.sse3.ldu.dq(i8* %p)
ret <16 x i8> %call
}
declare <16 x i8> @llvm.x86.sse3.ldu.dq(i8* %p) nounwind
; CHECK-LABEL: @LoadIntrinsic
; CHECK: load <16 x i8>, <16 x i8>* {{.*}} align 1
; CHECK-ORIGINS: [[ORIGIN:%[01-9a-z]+]] = load i32, i32* {{.*}}
; CHECK-NOT: br
; CHECK-NOT: = or
; CHECK: call <16 x i8> @llvm.x86.sse3.ldu.dq
; CHECK: store <16 x i8> {{.*}} @__msan_retval_tls
; CHECK-ORIGINS: store i32 {{.*}}[[ORIGIN]], i32* @__msan_retval_origin_tls
; CHECK: ret <16 x i8>
; Simple NoMem intrinsic
; Check that shadow is OR'ed, and origin is Select'ed
; And no shadow checks!
define <8 x i16> @Paddsw128(<8 x i16> %a, <8 x i16> %b) nounwind uwtable sanitize_memory {
%call = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a, <8 x i16> %b)
ret <8 x i16> %call
}
declare <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a, <8 x i16> %b) nounwind
; CHECK-LABEL: @Paddsw128
; CHECK-NEXT: load <8 x i16>, <8 x i16>* {{.*}} @__msan_param_tls
; CHECK-ORIGINS: load i32, i32* {{.*}} @__msan_param_origin_tls
; CHECK-NEXT: load <8 x i16>, <8 x i16>* {{.*}} @__msan_param_tls
; CHECK-ORIGINS: load i32, i32* {{.*}} @__msan_param_origin_tls
; CHECK-NEXT: = or <8 x i16>
; CHECK-ORIGINS: = bitcast <8 x i16> {{.*}} to i128
; CHECK-ORIGINS-NEXT: = icmp ne i128 {{.*}}, 0
; CHECK-ORIGINS-NEXT: = select i1 {{.*}}, i32 {{.*}}, i32
; CHECK-NEXT: call <8 x i16> @llvm.x86.sse2.padds.w
; CHECK-NEXT: store <8 x i16> {{.*}} @__msan_retval_tls
; CHECK-ORIGINS: store i32 {{.*}} @__msan_retval_origin_tls
; CHECK-NEXT: ret <8 x i16>

View File

@@ -1,117 +0,0 @@
; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
; Check instrumentation mul when one of the operands is a constant.
define i64 @MulConst(i64 %x) sanitize_memory {
entry:
%y = mul i64 %x, 42949672960000
ret i64 %y
}
; 42949672960000 = 2**32 * 10000
; 36 trailing zero bits
; 68719476736 = 2**36
; CHECK-LABEL: @MulConst(
; CHECK: [[A:%.*]] = load {{.*}} @__msan_param_tls
; CHECK: [[B:%.*]] = mul i64 [[A]], 68719476736
; CHECK: store i64 [[B]], i64* {{.*}} @__msan_retval_tls
define i64 @MulZero(i64 %x) sanitize_memory {
entry:
%y = mul i64 %x, 0
ret i64 %y
}
; CHECK-LABEL: @MulZero(
; CHECK: [[A:%.*]] = load {{.*}} @__msan_param_tls
; CHECK: [[B:%.*]] = mul i64 [[A]], 0{{$}}
; CHECK: store i64 [[B]], i64* {{.*}} @__msan_retval_tls
define i64 @MulNeg(i64 %x) sanitize_memory {
entry:
%y = mul i64 %x, -16
ret i64 %y
}
; CHECK-LABEL: @MulNeg(
; CHECK: [[A:%.*]] = load {{.*}} @__msan_param_tls
; CHECK: [[B:%.*]] = mul i64 [[A]], 16
; CHECK: store i64 [[B]], i64* {{.*}} @__msan_retval_tls
define i64 @MulNeg2(i64 %x) sanitize_memory {
entry:
%y = mul i64 %x, -48
ret i64 %y
}
; CHECK-LABEL: @MulNeg2(
; CHECK: [[A:%.*]] = load {{.*}} @__msan_param_tls
; CHECK: [[B:%.*]] = mul i64 [[A]], 16
; CHECK: store i64 [[B]], i64* {{.*}} @__msan_retval_tls
define i64 @MulOdd(i64 %x) sanitize_memory {
entry:
%y = mul i64 %x, 12345
ret i64 %y
}
; CHECK-LABEL: @MulOdd(
; CHECK: [[A:%.*]] = load {{.*}} @__msan_param_tls
; CHECK: [[B:%.*]] = mul i64 [[A]], 1
; CHECK: store i64 [[B]], i64* {{.*}} @__msan_retval_tls
define i64 @MulLarge(i64 %x) sanitize_memory {
entry:
%y = mul i64 %x, -9223372036854775808
ret i64 %y
}
; -9223372036854775808 = 0x7000000000000000
; CHECK-LABEL: @MulLarge(
; CHECK: [[A:%.*]] = load {{.*}} @__msan_param_tls
; CHECK: [[B:%.*]] = mul i64 [[A]], -9223372036854775808
; CHECK: store i64 [[B]], i64* {{.*}} @__msan_retval_tls
define <4 x i32> @MulVectorConst(<4 x i32> %x) sanitize_memory {
entry:
%y = mul <4 x i32> %x, <i32 3072, i32 0, i32 -16, i32 -48>
ret <4 x i32> %y
}
; CHECK-LABEL: @MulVectorConst(
; CHECK: [[A:%.*]] = load {{.*}} @__msan_param_tls
; CHECK: [[B:%.*]] = mul <4 x i32> [[A]], <i32 1024, i32 0, i32 16, i32 16>
; CHECK: store <4 x i32> [[B]], <4 x i32>* {{.*}} @__msan_retval_tls
; The constant in multiplication does not have to be a literal integer constant.
@X = linkonce_odr global i8* null
define i64 @MulNonIntegerConst(i64 %a) sanitize_memory {
%mul = mul i64 %a, ptrtoint (i8** @X to i64)
ret i64 %mul
}
; CHECK-LABEL: @MulNonIntegerConst(
; CHECK: [[A:%.*]] = load {{.*}} @__msan_param_tls
; CHECK: [[B:%.*]] = mul i64 [[A]], 1
; CHECK: store i64 [[B]], {{.*}}@__msan_retval_tls
define <2 x i64> @MulNonIntegerVectorConst(<2 x i64> %a) sanitize_memory {
%mul = mul <2 x i64> %a, <i64 3072, i64 ptrtoint (i8** @X to i64)>
ret <2 x i64> %mul
}
; CHECK-LABEL: @MulNonIntegerVectorConst(
; CHECK: [[A:%.*]] = load {{.*}} @__msan_param_tls
; CHECK: [[B:%.*]] = mul <2 x i64> [[A]], <i64 1024, i64 1>
; CHECK: store <2 x i64> [[B]], {{.*}}@__msan_retval_tls

View File

@@ -1,48 +0,0 @@
; Verify that calls with !nosanitize are not instrumented by MSan.
; RUN: opt < %s -msan -S | FileCheck %s
; RUN: opt < %s -msan -msan-track-origins=1 -S | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
declare void @bar(i32 %x)
define void @foo() {
call void @bar(i32 7), !nosanitize !{}
ret void
}
; CHECK-LABEL: define void @foo
; CHECK-NOT: store {{.*}} @__msan_param_tls
; CHECK: call void @bar
; CHECK: ret void
@__sancov_gen_ = private global [1 x i8] zeroinitializer, section "__sancov_cntrs", align 1
define void @sancov() sanitize_memory {
entry:
%0 = load i8, i8* getelementptr inbounds ([1 x i8], [1 x i8]* @__sancov_gen_, i64 0, i64 0), !nosanitize !{}
%1 = add i8 %0, 1
store i8 %1, i8* getelementptr inbounds ([1 x i8], [1 x i8]* @__sancov_gen_, i64 0, i64 0), !nosanitize !{}
ret void
}
; CHECK-LABEL: define void @sancov
; CHECK-NOT: xor
; CHECK-NOT: 87960930222080
; CHECK: ret void
define void @load_store() sanitize_memory {
entry:
%x = alloca i32, align 4, !nosanitize !{}
store i32 4, i32* %x, align 4, !nosanitize !{}
%0 = load i32, i32* %x, align 4, !nosanitize !{}
%add = add nsw i32 %0, %0
store i32 %add, i32* %x, align 4, !nosanitize !{}
ret void
}
; CHECK-LABEL: define void @load_store
; CHECK-NOT: xor
; CHECK-NOT: 87960930222080
; CHECK: ret void

Some files were not shown because too many files have changed in this diff Show More