Imported Upstream version 6.10.0.49

Former-commit-id: 1d6753294b2993e1fbf92de9366bb9544db4189b
This commit is contained in:
Xamarin Public Jenkins (auto-signing)
2020-01-16 16:38:04 +00:00
parent d94e79959b
commit 468663ddbb
48518 changed files with 2789335 additions and 61176 deletions

View File

@ -0,0 +1,51 @@
; RUN: llc < %s | FileCheck -check-prefix=ENABLED %s
; RUN: llc -disable-nvptx-load-store-vectorizer < %s | FileCheck -check-prefix=DISABLED %s
target triple = "nvptx64-nvidia-cuda"
; Check that the load-store vectorizer is enabled by default for nvptx, and
; that it's disabled by the appropriate flag.
; ENABLED: ld.v2.{{.}}32
; DISABLED: ld.{{.}}32
; DISABLED: ld.{{.}}32
define i32 @f(i32* %p) {
%p.1 = getelementptr i32, i32* %p, i32 1
%v0 = load i32, i32* %p, align 8
%v1 = load i32, i32* %p.1, align 4
%sum = add i32 %v0, %v1
ret i32 %sum
}
define half @fh(half* %p) {
%p.1 = getelementptr half, half* %p, i32 1
%p.2 = getelementptr half, half* %p, i32 2
%p.3 = getelementptr half, half* %p, i32 3
%p.4 = getelementptr half, half* %p, i32 4
%v0 = load half, half* %p, align 64
%v1 = load half, half* %p.1, align 4
%v2 = load half, half* %p.2, align 4
%v3 = load half, half* %p.3, align 4
%v4 = load half, half* %p.4, align 4
%sum1 = fadd half %v0, %v1
%sum2 = fadd half %v2, %v3
%sum3 = fadd half %sum1, %sum2
%sum = fadd half %sum3, %v4
ret half %sum
}
define float @ff(float* %p) {
%p.1 = getelementptr float, float* %p, i32 1
%p.2 = getelementptr float, float* %p, i32 2
%p.3 = getelementptr float, float* %p, i32 3
%p.4 = getelementptr float, float* %p, i32 4
%v0 = load float, float* %p, align 64
%v1 = load float, float* %p.1, align 4
%v2 = load float, float* %p.2, align 4
%v3 = load float, float* %p.3, align 4
%v4 = load float, float* %p.4, align 4
%sum1 = fadd float %v0, %v1
%sum2 = fadd float %v2, %v3
%sum3 = fadd float %sum1, %sum2
%sum = fadd float %sum3, %v4
ret float %sum
}

View File

@ -0,0 +1,23 @@
; RUN: llc < %s | FileCheck %s
target triple = "nvptx64-nvidia-cuda"
declare void @foo()
; Load a value, then call a function. Branch, and use the loaded value only on
; one side of the branch. The load shouldn't be sunk beneath the call, because
; the call may modify memory.
define i32 @f(i32 %x, i32* %ptr, i1 %cond) {
Start:
; CHECK: ld.u32
%ptr_val = load i32, i32* %ptr
; CHECK: call.uni
call void @foo()
br i1 %cond, label %L1, label %L2
L1:
%ptr_val2 = add i32 %ptr_val, 100
br label %L2
L2:
%v4 = phi i32 [ %x, %Start ], [ %ptr_val2, %L1 ]
%v5 = add i32 %v4, 1000
ret i32 %v5
}

View File

@ -0,0 +1,23 @@
; RUN: llc < %s | FileCheck %s
target triple = "nvptx64-nvidia-cuda"
declare void @llvm.nvvm.barrier0()
; Load a value, then syncthreads. Branch, and use the loaded value only on one
; side of the branch. The load shouldn't be sunk beneath the call, because
; syncthreads is modeled as maystore.
define i32 @f(i32 %x, i32* %ptr, i1 %cond) {
Start:
; CHECK: ld.u32
%ptr_val = load i32, i32* %ptr
; CHECK: bar.sync
call void @llvm.nvvm.barrier0()
br i1 %cond, label %L1, label %L2
L1:
%ptr_val2 = add i32 %ptr_val, 100
br label %L2
L2:
%v4 = phi i32 [ %x, %Start ], [ %ptr_val2, %L1 ]
%v5 = add i32 %v4, 1000
ret i32 %v5
}

View File

@ -0,0 +1,45 @@
; RUN: llc -O2 -tail-dup-size=100 -enable-tail-merge=0 < %s | FileCheck %s
target triple = "nvptx64-nvidia-cuda"
declare void @foo()
declare void @llvm.nvvm.barrier0()
; syncthreads shouldn't be duplicated.
; CHECK: .func call_syncthreads
; CHECK: bar.sync
; CHECK-NOT: bar.sync
define void @call_syncthreads(i32* %a, i32* %b, i1 %cond, i1 %cond2) nounwind {
br i1 %cond, label %L1, label %L2
br i1 %cond2, label %Ret, label %L1
Ret:
ret void
L1:
store i32 0, i32* %a
br label %L42
L2:
store i32 1, i32* %a
br label %L42
L42:
call void @llvm.nvvm.barrier0()
br label %Ret
}
; Check that call_syncthreads really does trigger tail duplication.
; CHECK: .func call_foo
; CHECK: call
; CHECK: call
define void @call_foo(i32* %a, i32* %b, i1 %cond, i1 %cond2) nounwind {
br i1 %cond, label %L1, label %L2
br i1 %cond2, label %Ret, label %L1
Ret:
ret void
L1:
store i32 0, i32* %a
br label %L42
L2:
store i32 1, i32* %a
br label %L42
L42:
call void @foo()
br label %Ret
}

View File

@ -0,0 +1,187 @@
; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s --check-prefix PTX
; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s --check-prefix PTX
; RUN: opt -mtriple=nvptx-- < %s -S -infer-address-spaces | FileCheck %s --check-prefix IR
; RUN: opt -mtriple=nvptx64-- < %s -S -infer-address-spaces | FileCheck %s --check-prefix IR
@array = internal addrspace(3) global [10 x float] zeroinitializer, align 4
@scalar = internal addrspace(3) global float 0.000000e+00, align 4
@generic_scalar = internal global float 0.000000e+00, align 4
define float @ld_from_shared() {
%1 = addrspacecast float* @generic_scalar to float addrspace(3)*
%2 = load float, float addrspace(3)* %1
ret float %2
}
; Verifies nvptx-favor-non-generic correctly optimizes generic address space
; usage to non-generic address space usage for the patterns we claim to handle:
; 1. load cast
; 2. store cast
; 3. load gep cast
; 4. store gep cast
; gep and cast can be an instruction or a constant expression. This function
; tries all possible combinations.
define void @ld_st_shared_f32(i32 %i, float %v) {
; IR-LABEL: @ld_st_shared_f32
; IR-NOT: addrspacecast
; PTX-LABEL: ld_st_shared_f32(
; load cast
%1 = load float, float* addrspacecast (float addrspace(3)* @scalar to float*), align 4
call void @use(float %1)
; PTX: ld.shared.f32 %f{{[0-9]+}}, [scalar];
; store cast
store float %v, float* addrspacecast (float addrspace(3)* @scalar to float*), align 4
; PTX: st.shared.f32 [scalar], %f{{[0-9]+}};
; use syncthreads to disable optimizations across components
call void @llvm.nvvm.barrier0()
; PTX: bar.sync 0;
; cast; load
%2 = addrspacecast float addrspace(3)* @scalar to float*
%3 = load float, float* %2, align 4
call void @use(float %3)
; PTX: ld.shared.f32 %f{{[0-9]+}}, [scalar];
; cast; store
store float %v, float* %2, align 4
; PTX: st.shared.f32 [scalar], %f{{[0-9]+}};
call void @llvm.nvvm.barrier0()
; PTX: bar.sync 0;
; load gep cast
%4 = load float, float* getelementptr inbounds ([10 x float], [10 x float]* addrspacecast ([10 x float] addrspace(3)* @array to [10 x float]*), i32 0, i32 5), align 4
call void @use(float %4)
; PTX: ld.shared.f32 %f{{[0-9]+}}, [array+20];
; store gep cast
store float %v, float* getelementptr inbounds ([10 x float], [10 x float]* addrspacecast ([10 x float] addrspace(3)* @array to [10 x float]*), i32 0, i32 5), align 4
; PTX: st.shared.f32 [array+20], %f{{[0-9]+}};
call void @llvm.nvvm.barrier0()
; PTX: bar.sync 0;
; gep cast; load
%5 = getelementptr inbounds [10 x float], [10 x float]* addrspacecast ([10 x float] addrspace(3)* @array to [10 x float]*), i32 0, i32 5
%6 = load float, float* %5, align 4
call void @use(float %6)
; PTX: ld.shared.f32 %f{{[0-9]+}}, [array+20];
; gep cast; store
store float %v, float* %5, align 4
; PTX: st.shared.f32 [array+20], %f{{[0-9]+}};
call void @llvm.nvvm.barrier0()
; PTX: bar.sync 0;
; cast; gep; load
%7 = addrspacecast [10 x float] addrspace(3)* @array to [10 x float]*
%8 = getelementptr inbounds [10 x float], [10 x float]* %7, i32 0, i32 %i
%9 = load float, float* %8, align 4
call void @use(float %9)
; PTX: ld.shared.f32 %f{{[0-9]+}}, [%{{(r|rl|rd)[0-9]+}}];
; cast; gep; store
store float %v, float* %8, align 4
; PTX: st.shared.f32 [%{{(r|rl|rd)[0-9]+}}], %f{{[0-9]+}};
call void @llvm.nvvm.barrier0()
; PTX: bar.sync 0;
ret void
}
; When hoisting an addrspacecast between different pointer types, replace the
; addrspacecast with a bitcast.
define i32 @ld_int_from_float() {
; IR-LABEL: @ld_int_from_float
; IR: load i32, i32 addrspace(3)* bitcast (float addrspace(3)* @scalar to i32 addrspace(3)*)
; PTX-LABEL: ld_int_from_float(
; PTX: ld.shared.u{{(32|64)}}
%1 = load i32, i32* addrspacecast(float addrspace(3)* @scalar to i32*), align 4
ret i32 %1
}
define i32 @ld_int_from_global_float(float addrspace(1)* %input, i32 %i, i32 %j) {
; IR-LABEL: @ld_int_from_global_float(
; PTX-LABEL: ld_int_from_global_float(
%1 = addrspacecast float addrspace(1)* %input to float*
%2 = getelementptr float, float* %1, i32 %i
; IR-NEXT: getelementptr float, float addrspace(1)* %input, i32 %i
%3 = getelementptr float, float* %2, i32 %j
; IR-NEXT: getelementptr float, float addrspace(1)* {{%[^,]+}}, i32 %j
%4 = bitcast float* %3 to i32*
; IR-NEXT: bitcast float addrspace(1)* {{%[^ ]+}} to i32 addrspace(1)*
%5 = load i32, i32* %4
; IR-NEXT: load i32, i32 addrspace(1)* {{%.+}}
; PTX-LABEL: ld.global
ret i32 %5
}
define void @nested_const_expr() {
; PTX-LABEL: nested_const_expr(
; store 1 to bitcast(gep(addrspacecast(array), 0, 1))
store i32 1, i32* bitcast (float* getelementptr ([10 x float], [10 x float]* addrspacecast ([10 x float] addrspace(3)* @array to [10 x float]*), i64 0, i64 1) to i32*), align 4
; PTX: mov.u32 %r1, 1;
; PTX-NEXT: st.shared.u32 [array+4], %r1;
ret void
}
define void @rauw(float addrspace(1)* %input) {
%generic_input = addrspacecast float addrspace(1)* %input to float*
%addr = getelementptr float, float* %generic_input, i64 10
%v = load float, float* %addr
store float %v, float* %addr
ret void
; IR-LABEL: @rauw(
; IR-NEXT: %addr = getelementptr float, float addrspace(1)* %input, i64 10
; IR-NEXT: %v = load float, float addrspace(1)* %addr
; IR-NEXT: store float %v, float addrspace(1)* %addr
; IR-NEXT: ret void
}
define void @loop() {
; IR-LABEL: @loop(
entry:
%p = addrspacecast [10 x float] addrspace(3)* @array to float*
%end = getelementptr float, float* %p, i64 10
br label %loop
loop:
%i = phi float* [ %p, %entry ], [ %i2, %loop ]
; IR: phi float addrspace(3)* [ %p, %entry ], [ %i2, %loop ]
%v = load float, float* %i
; IR: %v = load float, float addrspace(3)* %i
call void @use(float %v)
%i2 = getelementptr float, float* %i, i64 1
; IR: %i2 = getelementptr float, float addrspace(3)* %i, i64 1
%exit_cond = icmp eq float* %i2, %end
br i1 %exit_cond, label %exit, label %loop
exit:
ret void
}
@generic_end = external global float*
define void @loop_with_generic_bound() {
; IR-LABEL: @loop_with_generic_bound(
entry:
%p = addrspacecast [10 x float] addrspace(3)* @array to float*
%end = load float*, float** @generic_end
br label %loop
loop:
%i = phi float* [ %p, %entry ], [ %i2, %loop ]
; IR: phi float addrspace(3)* [ %p, %entry ], [ %i2, %loop ]
%v = load float, float* %i
; IR: %v = load float, float addrspace(3)* %i
call void @use(float %v)
%i2 = getelementptr float, float* %i, i64 1
; IR: %i2 = getelementptr float, float addrspace(3)* %i, i64 1
%exit_cond = icmp eq float* %i2, %end
; IR: addrspacecast float addrspace(3)* %i2 to float*
; IR: icmp eq float* %{{[0-9]+}}, %end
br i1 %exit_cond, label %exit, label %loop
exit:
ret void
}
declare void @llvm.nvvm.barrier0() #3
declare void @use(float)
attributes #3 = { noduplicate nounwind }

View File

@ -0,0 +1,19 @@
; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64"
define void @foo(i64 %a, i64 %add, i128* %retptr) {
; CHECK: add.s64
; CHECK: setp.lt.u64
; CHECK: setp.lt.u64
; CHECK: selp.u64
; CHECK: selp.b64
; CHECK: add.s64
%t1 = sext i64 %a to i128
%add2 = zext i64 %add to i128
%val = add i128 %t1, %add2
store i128 %val, i128* %retptr
ret void
}

View File

@ -0,0 +1,13 @@
; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
; CHECK: .visible .global .align 4 .u32 g = 42;
; CHECK: .visible .global .align 4 .u32 g2 = generic(g);
; CHECK: .visible .global .align 4 .u32 g3 = g;
; CHECK: .visible .global .align 8 .u32 g4[2] = {0, generic(g)};
; CHECK: .visible .global .align 8 .u32 g5[2] = {0, generic(g)+8};
@g = addrspace(1) global i32 42
@g2 = addrspace(1) global i32* addrspacecast (i32 addrspace(1)* @g to i32*)
@g3 = addrspace(1) global i32 addrspace(1)* @g
@g4 = constant {i32*, i32*} {i32* null, i32* addrspacecast (i32 addrspace(1)* @g to i32*)}
@g5 = constant {i32*, i32*} {i32* null, i32* addrspacecast (i32 addrspace(1)* getelementptr (i32, i32 addrspace(1)* @g, i32 2) to i32*)}

View File

@ -0,0 +1,98 @@
; RUN: llc -O0 < %s -march=nvptx -mcpu=sm_20 | FileCheck %s -check-prefix=PTX32
; RUN: llc -O0 < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s -check-prefix=PTX64
define i32 @conv1(i32 addrspace(1)* %ptr) {
; PTX32: conv1
; PTX32: cvta.global.u32
; PTX32: ld.u32
; PTX64: conv1
; PTX64: cvta.global.u64
; PTX64: ld.u32
%genptr = addrspacecast i32 addrspace(1)* %ptr to i32*
%val = load i32, i32* %genptr
ret i32 %val
}
define i32 @conv2(i32 addrspace(3)* %ptr) {
; PTX32: conv2
; PTX32: cvta.shared.u32
; PTX32: ld.u32
; PTX64: conv2
; PTX64: cvta.shared.u64
; PTX64: ld.u32
%genptr = addrspacecast i32 addrspace(3)* %ptr to i32*
%val = load i32, i32* %genptr
ret i32 %val
}
define i32 @conv3(i32 addrspace(4)* %ptr) {
; PTX32: conv3
; PTX32: cvta.const.u32
; PTX32: ld.u32
; PTX64: conv3
; PTX64: cvta.const.u64
; PTX64: ld.u32
%genptr = addrspacecast i32 addrspace(4)* %ptr to i32*
%val = load i32, i32* %genptr
ret i32 %val
}
define i32 @conv4(i32 addrspace(5)* %ptr) {
; PTX32: conv4
; PTX32: cvta.local.u32
; PTX32: ld.u32
; PTX64: conv4
; PTX64: cvta.local.u64
; PTX64: ld.u32
%genptr = addrspacecast i32 addrspace(5)* %ptr to i32*
%val = load i32, i32* %genptr
ret i32 %val
}
define i32 @conv5(i32* %ptr) {
; PTX32: conv5
; PTX32: cvta.to.global.u32
; PTX32: ld.global.u32
; PTX64: conv5
; PTX64: cvta.to.global.u64
; PTX64: ld.global.u32
%specptr = addrspacecast i32* %ptr to i32 addrspace(1)*
%val = load i32, i32 addrspace(1)* %specptr
ret i32 %val
}
define i32 @conv6(i32* %ptr) {
; PTX32: conv6
; PTX32: cvta.to.shared.u32
; PTX32: ld.shared.u32
; PTX64: conv6
; PTX64: cvta.to.shared.u64
; PTX64: ld.shared.u32
%specptr = addrspacecast i32* %ptr to i32 addrspace(3)*
%val = load i32, i32 addrspace(3)* %specptr
ret i32 %val
}
define i32 @conv7(i32* %ptr) {
; PTX32: conv7
; PTX32: cvta.to.const.u32
; PTX32: ld.const.u32
; PTX64: conv7
; PTX64: cvta.to.const.u64
; PTX64: ld.const.u32
%specptr = addrspacecast i32* %ptr to i32 addrspace(4)*
%val = load i32, i32 addrspace(4)* %specptr
ret i32 %val
}
define i32 @conv8(i32* %ptr) {
; PTX32: conv8
; PTX32: cvta.to.local.u32
; PTX32: ld.local.u32
; PTX64: conv8
; PTX64: cvta.to.local.u64
; PTX64: ld.local.u32
%specptr = addrspacecast i32* %ptr to i32 addrspace(5)*
%val = load i32, i32 addrspace(5)* %specptr
ret i32 %val
}

View File

@ -0,0 +1,20 @@
; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
; Make sure aggregate param types get emitted properly.
%struct.float4 = type { float, float, float, float }
; CHECK: .visible .func bar
; CHECK: .param .align 4 .b8 bar_param_0[16]
define void @bar(%struct.float4 %f) {
entry:
ret void
}
; CHECK: .visible .func foo
; CHECK: .param .align 4 .b8 foo_param_0[20]
define void @foo([5 x i32] %f) {
entry:
ret void
}

View File

@ -0,0 +1,62 @@
; RUN: llc < %s -march=nvptx64 -mcpu=sm_35 | FileCheck %s
declare <2 x float> @barv(<2 x float> %input)
declare <3 x float> @barv3(<3 x float> %input)
declare [2 x float] @bara([2 x float] %input)
declare {float, float} @bars({float, float} %input)
define void @test_v2f32(<2 x float> %input, <2 x float>* %output) {
; CHECK-LABEL: @test_v2f32
%call = tail call <2 x float> @barv(<2 x float> %input)
; CHECK: .param .align 8 .b8 retval0[8];
; CHECK: ld.param.v2.f32 {[[E0:%f[0-9]+]], [[E1:%f[0-9]+]]}, [retval0+0];
store <2 x float> %call, <2 x float>* %output, align 8
; CHECK: st.v2.f32 [{{%rd[0-9]+}}], {[[E0]], [[E1]]}
ret void
}
define void @test_v3f32(<3 x float> %input, <3 x float>* %output) {
; CHECK-LABEL: @test_v3f32
;
%call = tail call <3 x float> @barv3(<3 x float> %input)
; CHECK: .param .align 16 .b8 retval0[16];
; CHECK-DAG: ld.param.v2.f32 {[[E0:%f[0-9]+]], [[E1:%f[0-9]+]]}, [retval0+0];
; CHECK-DAG: ld.param.f32 [[E2:%f[0-9]+]], [retval0+8];
; Make sure we don't load more values than than we need to.
; CHECK-NOT: ld.param.f32 [[E3:%f[0-9]+]], [retval0+12];
store <3 x float> %call, <3 x float>* %output, align 8
; CHECK-DAG: st.f32 [{{%rd[0-9]}}+8],
; -- This is suboptimal. We should do st.v2.f32 instead
; of combining 2xf32 info i64.
; CHECK-DAG: st.u64 [{{%rd[0-9]}}],
; CHECK: ret;
ret void
}
define void @test_a2f32([2 x float] %input, [2 x float]* %output) {
; CHECK-LABEL: @test_a2f32
%call = tail call [2 x float] @bara([2 x float] %input)
; CHECK: .param .align 4 .b8 retval0[8];
; CHECK-DAG: ld.param.f32 [[ELEMA1:%f[0-9]+]], [retval0+0];
; CHECK-DAG: ld.param.f32 [[ELEMA2:%f[0-9]+]], [retval0+4];
store [2 x float] %call, [2 x float]* %output, align 4
; CHECK: }
; CHECK-DAG: st.f32 [{{%rd[0-9]+}}], [[ELEMA1]]
; CHECK-DAG: st.f32 [{{%rd[0-9]+}}+4], [[ELEMA2]]
ret void
; CHECK: ret
}
define void @test_s2f32({float, float} %input, {float, float}* %output) {
; CHECK-LABEL: @test_s2f32
%call = tail call {float, float} @bars({float, float} %input)
; CHECK: .param .align 4 .b8 retval0[8];
; CHECK-DAG: ld.param.f32 [[ELEMS1:%f[0-9]+]], [retval0+0];
; CHECK-DAG: ld.param.f32 [[ELEMS2:%f[0-9]+]], [retval0+4];
store {float, float} %call, {float, float}* %output, align 4
; CHECK: }
; CHECK-DAG: st.f32 [{{%rd[0-9]+}}], [[ELEMS1]]
; CHECK-DAG: st.f32 [{{%rd[0-9]+}}+4], [[ELEMS2]]
ret void
; CHECK: ret
}

View File

@ -0,0 +1,7 @@
; RUN: not llc < %s -march=nvptx -mcpu=sm_20 2>&1 | FileCheck %s
; Check that llc dies gracefully when given an alias.
define i32 @a() { ret i32 0 }
; CHECK: ERROR: Module has aliases
@b = internal alias i32 (), i32 ()* @a

View File

@ -0,0 +1,52 @@
; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s
@texture = internal addrspace(1) global i64 0, align 8
; CHECK: .global .texref texture
@surface = internal addrspace(1) global i64 0, align 8
; CHECK: .global .surfref surface
; CHECK: .entry kernel_func_maxntid
define void @kernel_func_maxntid(float* %a) {
; CHECK: .maxntid 10, 20, 30
; CHECK: ret
ret void
}
; CHECK: .entry kernel_func_reqntid
define void @kernel_func_reqntid(float* %a) {
; CHECK: .reqntid 11, 22, 33
; CHECK: ret
ret void
}
; CHECK: .entry kernel_func_minctasm
define void @kernel_func_minctasm(float* %a) {
; CHECK: .minnctapersm 42
; CHECK: ret
ret void
}
; CHECK-LABEL: .entry kernel_func_maxnreg
define void @kernel_func_maxnreg() {
; CHECK: .maxnreg 1234
; CHECK: ret
ret void
}
!nvvm.annotations = !{!1, !2, !3, !4, !5, !6, !7, !8, !9, !10}
!1 = !{void (float*)* @kernel_func_maxntid, !"kernel", i32 1}
!2 = !{void (float*)* @kernel_func_maxntid, !"maxntidx", i32 10, !"maxntidy", i32 20, !"maxntidz", i32 30}
!3 = !{void (float*)* @kernel_func_reqntid, !"kernel", i32 1}
!4 = !{void (float*)* @kernel_func_reqntid, !"reqntidx", i32 11, !"reqntidy", i32 22, !"reqntidz", i32 33}
!5 = !{void (float*)* @kernel_func_minctasm, !"kernel", i32 1}
!6 = !{void (float*)* @kernel_func_minctasm, !"minctasm", i32 42}
!7 = !{void ()* @kernel_func_maxnreg, !"kernel", i32 1}
!8 = !{void ()* @kernel_func_maxnreg, !"maxnreg", i32 1234}
!9 = !{i64 addrspace(1)* @texture, !"texture", i32 1}
!10 = !{i64 addrspace(1)* @surface, !"surface", i32 1}

View File

@ -0,0 +1,13 @@
; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
; CHECK: .visible .func (.param .align 16 .b8 func_retval0[16]) foo0(
; CHECK: .param .align 4 .b8 foo0_param_0[8]
define <4 x float> @foo0({float, float} %arg0) {
ret <4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>
}
; CHECK: .visible .func (.param .align 8 .b8 func_retval0[8]) foo1(
; CHECK: .param .align 8 .b8 foo1_param_0[16]
define <2 x float> @foo1({float, float, i64} %arg0) {
ret <2 x float> <float 1.0, float 1.0>
}

View File

@ -0,0 +1,72 @@
; RUN: llc < %s -march=nvptx -mcpu=sm_20 -fp-contract=fast | FileCheck %s
; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 -fp-contract=fast | FileCheck %s
;; These tests should run for all targets
;;===-- Basic instruction selection tests ---------------------------------===;;
;;; f64
define double @fadd_f64(double %a, double %b) {
; CHECK: add.f64 %fd{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}}
; CHECK: ret
%ret = fadd double %a, %b
ret double %ret
}
define double @fsub_f64(double %a, double %b) {
; CHECK: sub.f64 %fd{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}}
; CHECK: ret
%ret = fsub double %a, %b
ret double %ret
}
define double @fmul_f64(double %a, double %b) {
; CHECK: mul.f64 %fd{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}}
; CHECK: ret
%ret = fmul double %a, %b
ret double %ret
}
define double @fdiv_f64(double %a, double %b) {
; CHECK: div.rn.f64 %fd{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}}
; CHECK: ret
%ret = fdiv double %a, %b
ret double %ret
}
;; PTX does not have a floating-point rem instruction
;;; f32
define float @fadd_f32(float %a, float %b) {
; CHECK: add.f32 %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}
; CHECK: ret
%ret = fadd float %a, %b
ret float %ret
}
define float @fsub_f32(float %a, float %b) {
; CHECK: sub.f32 %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}
; CHECK: ret
%ret = fsub float %a, %b
ret float %ret
}
define float @fmul_f32(float %a, float %b) {
; CHECK: mul.f32 %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}
; CHECK: ret
%ret = fmul float %a, %b
ret float %ret
}
define float @fdiv_f32(float %a, float %b) {
; CHECK: div.rn.f32 %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}
; CHECK: ret
%ret = fdiv float %a, %b
ret float %ret
}
;; PTX does not have a floating-point rem instruction

View File

@ -0,0 +1,317 @@
; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s
;; These tests should run for all targets
;;===-- Basic instruction selection tests ---------------------------------===;;
;;; i64
define i64 @add_i64(i64 %a, i64 %b) {
; CHECK: add.s64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, %rd{{[0-9]+}}
; CHECK: ret
%ret = add i64 %a, %b
ret i64 %ret
}
define i64 @sub_i64(i64 %a, i64 %b) {
; CHECK: sub.s64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, %rd{{[0-9]+}}
; CHECK: ret
%ret = sub i64 %a, %b
ret i64 %ret
}
define i64 @mul_i64(i64 %a, i64 %b) {
; CHECK: mul.lo.s64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, %rd{{[0-9]+}}
; CHECK: ret
%ret = mul i64 %a, %b
ret i64 %ret
}
define i64 @umul_lohi_i64(i64 %a) {
; CHECK-LABEL: umul_lohi_i64(
entry:
%0 = zext i64 %a to i128
%1 = mul i128 %0, 288
; CHECK: mul.lo.{{u|s}}64
; CHECK: mul.hi.{{u|s}}64
%2 = lshr i128 %1, 1
%3 = trunc i128 %2 to i64
ret i64 %3
}
define i64 @smul_lohi_i64(i64 %a) {
; CHECK-LABEL: smul_lohi_i64(
entry:
%0 = sext i64 %a to i128
%1 = mul i128 %0, 288
; CHECK: mul.lo.{{u|s}}64
; CHECK: mul.hi.{{u|s}}64
%2 = ashr i128 %1, 1
%3 = trunc i128 %2 to i64
ret i64 %3
}
define i64 @sdiv_i64(i64 %a, i64 %b) {
; CHECK: div.s64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, %rd{{[0-9]+}}
; CHECK: ret
%ret = sdiv i64 %a, %b
ret i64 %ret
}
define i64 @udiv_i64(i64 %a, i64 %b) {
; CHECK: div.u64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, %rd{{[0-9]+}}
; CHECK: ret
%ret = udiv i64 %a, %b
ret i64 %ret
}
define i64 @srem_i64(i64 %a, i64 %b) {
; CHECK: rem.s64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, %rd{{[0-9]+}}
; CHECK: ret
%ret = srem i64 %a, %b
ret i64 %ret
}
define i64 @urem_i64(i64 %a, i64 %b) {
; CHECK: rem.u64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, %rd{{[0-9]+}}
; CHECK: ret
%ret = urem i64 %a, %b
ret i64 %ret
}
define i64 @and_i64(i64 %a, i64 %b) {
; CHECK: and.b64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, %rd{{[0-9]+}}
; CHECK: ret
%ret = and i64 %a, %b
ret i64 %ret
}
define i64 @or_i64(i64 %a, i64 %b) {
; CHECK: or.b64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, %rd{{[0-9]+}}
; CHECK: ret
%ret = or i64 %a, %b
ret i64 %ret
}
define i64 @xor_i64(i64 %a, i64 %b) {
; CHECK: xor.b64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, %rd{{[0-9]+}}
; CHECK: ret
%ret = xor i64 %a, %b
ret i64 %ret
}
define i64 @shl_i64(i64 %a, i64 %b) {
; PTX requires 32-bit shift amount
; CHECK: shl.b64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, %r{{[0-9]+}}
; CHECK: ret
%ret = shl i64 %a, %b
ret i64 %ret
}
define i64 @ashr_i64(i64 %a, i64 %b) {
; PTX requires 32-bit shift amount
; CHECK: shr.s64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, %r{{[0-9]+}}
; CHECK: ret
%ret = ashr i64 %a, %b
ret i64 %ret
}
define i64 @lshr_i64(i64 %a, i64 %b) {
; PTX requires 32-bit shift amount
; CHECK: shr.u64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, %r{{[0-9]+}}
; CHECK: ret
%ret = lshr i64 %a, %b
ret i64 %ret
}
;;; i32
define i32 @add_i32(i32 %a, i32 %b) {
; CHECK: add.s32 %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}
; CHECK: ret
%ret = add i32 %a, %b
ret i32 %ret
}
define i32 @sub_i32(i32 %a, i32 %b) {
; CHECK: sub.s32 %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}
; CHECK: ret
%ret = sub i32 %a, %b
ret i32 %ret
}
define i32 @mul_i32(i32 %a, i32 %b) {
; CHECK: mul.lo.s32 %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}
; CHECK: ret
%ret = mul i32 %a, %b
ret i32 %ret
}
define i32 @sdiv_i32(i32 %a, i32 %b) {
; CHECK: div.s32 %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}
; CHECK: ret
%ret = sdiv i32 %a, %b
ret i32 %ret
}
define i32 @udiv_i32(i32 %a, i32 %b) {
; CHECK: div.u32 %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}
; CHECK: ret
%ret = udiv i32 %a, %b
ret i32 %ret
}
define i32 @srem_i32(i32 %a, i32 %b) {
; CHECK: rem.s32 %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}
; CHECK: ret
%ret = srem i32 %a, %b
ret i32 %ret
}
define i32 @urem_i32(i32 %a, i32 %b) {
; CHECK: rem.u32 %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}
; CHECK: ret
%ret = urem i32 %a, %b
ret i32 %ret
}
define i32 @and_i32(i32 %a, i32 %b) {
; CHECK: and.b32 %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}
; CHECK: ret
%ret = and i32 %a, %b
ret i32 %ret
}
define i32 @or_i32(i32 %a, i32 %b) {
; CHECK: or.b32 %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}
; CHECK: ret
%ret = or i32 %a, %b
ret i32 %ret
}
define i32 @xor_i32(i32 %a, i32 %b) {
; CHECK: xor.b32 %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}
; CHECK: ret
%ret = xor i32 %a, %b
ret i32 %ret
}
define i32 @shl_i32(i32 %a, i32 %b) {
; CHECK: shl.b32 %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}
; CHECK: ret
%ret = shl i32 %a, %b
ret i32 %ret
}
define i32 @ashr_i32(i32 %a, i32 %b) {
; CHECK: shr.s32 %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}
; CHECK: ret
%ret = ashr i32 %a, %b
ret i32 %ret
}
define i32 @lshr_i32(i32 %a, i32 %b) {
; CHECK: shr.u32 %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}
; CHECK: ret
%ret = lshr i32 %a, %b
ret i32 %ret
}
;;; i16
define i16 @add_i16(i16 %a, i16 %b) {
; CHECK: add.s16 %rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}
; CHECK: ret
%ret = add i16 %a, %b
ret i16 %ret
}
define i16 @sub_i16(i16 %a, i16 %b) {
; CHECK: sub.s16 %rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}
; CHECK: ret
%ret = sub i16 %a, %b
ret i16 %ret
}
define i16 @mul_i16(i16 %a, i16 %b) {
; CHECK: mul.lo.s16 %rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}
; CHECK: ret
%ret = mul i16 %a, %b
ret i16 %ret
}
define i16 @sdiv_i16(i16 %a, i16 %b) {
; CHECK: div.s16 %rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}
; CHECK: ret
%ret = sdiv i16 %a, %b
ret i16 %ret
}
define i16 @udiv_i16(i16 %a, i16 %b) {
; CHECK: div.u16 %rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}
; CHECK: ret
%ret = udiv i16 %a, %b
ret i16 %ret
}
define i16 @srem_i16(i16 %a, i16 %b) {
; CHECK: rem.s16 %rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}
; CHECK: ret
%ret = srem i16 %a, %b
ret i16 %ret
}
define i16 @urem_i16(i16 %a, i16 %b) {
; CHECK: rem.u16 %rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}
; CHECK: ret
%ret = urem i16 %a, %b
ret i16 %ret
}
define i16 @and_i16(i16 %a, i16 %b) {
; CHECK: and.b16 %rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}
; CHECK: ret
%ret = and i16 %a, %b
ret i16 %ret
}
define i16 @or_i16(i16 %a, i16 %b) {
; CHECK: or.b16 %rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}
; CHECK: ret
%ret = or i16 %a, %b
ret i16 %ret
}
define i16 @xor_i16(i16 %a, i16 %b) {
; CHECK: xor.b16 %rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}
; CHECK: ret
%ret = xor i16 %a, %b
ret i16 %ret
}
define i16 @shl_i16(i16 %a, i16 %b) {
; PTX requires 32-bit shift amount
; CHECK: shl.b16 %rs{{[0-9]+}}, %rs{{[0-9]+}}, %r{{[0-9]+}}
; CHECK: ret
%ret = shl i16 %a, %b
ret i16 %ret
}
define i16 @ashr_i16(i16 %a, i16 %b) {
; PTX requires 32-bit shift amount
; CHECK: shr.s16 %rs{{[0-9]+}}, %rs{{[0-9]+}}, %r{{[0-9]+}}
; CHECK: ret
%ret = ashr i16 %a, %b
ret i16 %ret
}
define i16 @lshr_i16(i16 %a, i16 %b) {
; PTX requires 32-bit shift amount
; CHECK: shr.u16 %rs{{[0-9]+}}, %rs{{[0-9]+}}, %r{{[0-9]+}}
; CHECK: ret
%ret = lshr i16 %a, %b
ret i16 %ret
}

View File

@ -0,0 +1,19 @@
; RUN: llc < %s -march=nvptx -mcpu=sm_60 | FileCheck %s
; RUN: llc < %s -march=nvptx64 -mcpu=sm_60 | FileCheck %s
; CHECK-LABEL .func test(
define void @test(double* %dp0, double addrspace(1)* %dp1, double addrspace(3)* %dp3, double %d) {
; CHECK: atom.add.f64
%r1 = call double @llvm.nvvm.atomic.load.add.f64.p0f64(double* %dp0, double %d)
; CHECK: atom.global.add.f64
%r2 = call double @llvm.nvvm.atomic.load.add.f64.p1f64(double addrspace(1)* %dp1, double %d)
; CHECK: atom.shared.add.f64
%ret = call double @llvm.nvvm.atomic.load.add.f64.p3f64(double addrspace(3)* %dp3, double %d)
ret void
}
declare double @llvm.nvvm.atomic.load.add.f64.p0f64(double* nocapture, double) #1
declare double @llvm.nvvm.atomic.load.add.f64.p1f64(double addrspace(1)* nocapture, double) #1
declare double @llvm.nvvm.atomic.load.add.f64.p3f64(double addrspace(3)* nocapture, double) #1
attributes #1 = { argmemonly nounwind }

View File

@ -0,0 +1,187 @@
; RUN: llc < %s -march=nvptx -mcpu=sm_60 | FileCheck %s -check-prefixes=CHECK,CHECK32
; RUN: llc < %s -march=nvptx64 -mcpu=sm_60 | FileCheck %s
; CHECK-LABEL: .func test_atomics_scope(
define void @test_atomics_scope(float* %fp, float %f,
double* %dfp, double %df,
i32* %ip, i32 %i,
i32* %uip, i32 %ui,
i64* %llp, i64 %ll) #0 {
entry:
; CHECK: atom.cta.add.s32
%tmp36 = tail call i32 @llvm.nvvm.atomic.add.gen.i.cta.i32.p0i32(i32* %ip, i32 %i)
; CHECK: atom.cta.add.u64
%tmp38 = tail call i64 @llvm.nvvm.atomic.add.gen.i.cta.i64.p0i64(i64* %llp, i64 %ll)
; CHECK: atom.sys.add.s32
%tmp39 = tail call i32 @llvm.nvvm.atomic.add.gen.i.sys.i32.p0i32(i32* %ip, i32 %i)
; CHECK: atom.sys.add.u64
%tmp41 = tail call i64 @llvm.nvvm.atomic.add.gen.i.sys.i64.p0i64(i64* %llp, i64 %ll)
; CHECK: atom.cta.add.f32
%tmp42 = tail call float @llvm.nvvm.atomic.add.gen.f.cta.f32.p0f32(float* %fp, float %f)
; CHECK: atom.cta.add.f64
%tmp43 = tail call double @llvm.nvvm.atomic.add.gen.f.cta.f64.p0f64(double* %dfp, double %df)
; CHECK: atom.sys.add.f32
%tmp44 = tail call float @llvm.nvvm.atomic.add.gen.f.sys.f32.p0f32(float* %fp, float %f)
; CHECK: atom.sys.add.f64
%tmp45 = tail call double @llvm.nvvm.atomic.add.gen.f.sys.f64.p0f64(double* %dfp, double %df)
; CHECK: atom.cta.exch.b32
%tmp46 = tail call i32 @llvm.nvvm.atomic.exch.gen.i.cta.i32.p0i32(i32* %ip, i32 %i)
; CHECK: atom.cta.exch.b64
%tmp48 = tail call i64 @llvm.nvvm.atomic.exch.gen.i.cta.i64.p0i64(i64* %llp, i64 %ll)
; CHECK: atom.sys.exch.b32
%tmp49 = tail call i32 @llvm.nvvm.atomic.exch.gen.i.sys.i32.p0i32(i32* %ip, i32 %i)
; CHECK: atom.sys.exch.b64
%tmp51 = tail call i64 @llvm.nvvm.atomic.exch.gen.i.sys.i64.p0i64(i64* %llp, i64 %ll)
; CHECK: atom.cta.max.s32
%tmp52 = tail call i32 @llvm.nvvm.atomic.max.gen.i.cta.i32.p0i32(i32* %ip, i32 %i)
; CHECK: atom.cta.max.s64
%tmp56 = tail call i64 @llvm.nvvm.atomic.max.gen.i.cta.i64.p0i64(i64* %llp, i64 %ll)
; CHECK: atom.sys.max.s32
%tmp58 = tail call i32 @llvm.nvvm.atomic.max.gen.i.sys.i32.p0i32(i32* %ip, i32 %i)
; CHECK: atom.sys.max.s64
%tmp62 = tail call i64 @llvm.nvvm.atomic.max.gen.i.sys.i64.p0i64(i64* %llp, i64 %ll)
; CHECK: atom.cta.min.s32
%tmp64 = tail call i32 @llvm.nvvm.atomic.min.gen.i.cta.i32.p0i32(i32* %ip, i32 %i)
; CHECK: atom.cta.min.s64
%tmp68 = tail call i64 @llvm.nvvm.atomic.min.gen.i.cta.i64.p0i64(i64* %llp, i64 %ll)
; CHECK: atom.sys.min.s32
%tmp70 = tail call i32 @llvm.nvvm.atomic.min.gen.i.sys.i32.p0i32(i32* %ip, i32 %i)
; CHECK: atom.sys.min.s64
%tmp74 = tail call i64 @llvm.nvvm.atomic.min.gen.i.sys.i64.p0i64(i64* %llp, i64 %ll)
; CHECK: atom.cta.inc.u32
%tmp76 = tail call i32 @llvm.nvvm.atomic.inc.gen.i.cta.i32.p0i32(i32* %ip, i32 %i)
; CHECK: atom.sys.inc.u32
%tmp77 = tail call i32 @llvm.nvvm.atomic.inc.gen.i.sys.i32.p0i32(i32* %ip, i32 %i)
; CHECK: atom.cta.dec.u32
%tmp78 = tail call i32 @llvm.nvvm.atomic.dec.gen.i.cta.i32.p0i32(i32* %ip, i32 %i)
; CHECK: atom.sys.dec.u32
%tmp79 = tail call i32 @llvm.nvvm.atomic.dec.gen.i.sys.i32.p0i32(i32* %ip, i32 %i)
; CHECK: atom.cta.and.b32
%tmp80 = tail call i32 @llvm.nvvm.atomic.and.gen.i.cta.i32.p0i32(i32* %ip, i32 %i)
; CHECK: atom.cta.and.b64
%tmp82 = tail call i64 @llvm.nvvm.atomic.and.gen.i.cta.i64.p0i64(i64* %llp, i64 %ll)
; CHECK: atom.sys.and.b32
%tmp83 = tail call i32 @llvm.nvvm.atomic.and.gen.i.sys.i32.p0i32(i32* %ip, i32 %i)
; CHECK: atom.sys.and.b64
%tmp85 = tail call i64 @llvm.nvvm.atomic.and.gen.i.sys.i64.p0i64(i64* %llp, i64 %ll)
; CHECK: atom.cta.or.b32
%tmp86 = tail call i32 @llvm.nvvm.atomic.or.gen.i.cta.i32.p0i32(i32* %ip, i32 %i)
; CHECK: atom.cta.or.b64
%tmp88 = tail call i64 @llvm.nvvm.atomic.or.gen.i.cta.i64.p0i64(i64* %llp, i64 %ll)
; CHECK: atom.sys.or.b32
%tmp89 = tail call i32 @llvm.nvvm.atomic.or.gen.i.sys.i32.p0i32(i32* %ip, i32 %i)
; CHECK: atom.sys.or.b64
%tmp91 = tail call i64 @llvm.nvvm.atomic.or.gen.i.sys.i64.p0i64(i64* %llp, i64 %ll)
; CHECK: atom.cta.xor.b32
%tmp92 = tail call i32 @llvm.nvvm.atomic.xor.gen.i.cta.i32.p0i32(i32* %ip, i32 %i)
; CHECK: atom.cta.xor.b64
%tmp94 = tail call i64 @llvm.nvvm.atomic.xor.gen.i.cta.i64.p0i64(i64* %llp, i64 %ll)
; CHECK: atom.sys.xor.b32
%tmp95 = tail call i32 @llvm.nvvm.atomic.xor.gen.i.sys.i32.p0i32(i32* %ip, i32 %i)
; CHECK: atom.sys.xor.b64
%tmp97 = tail call i64 @llvm.nvvm.atomic.xor.gen.i.sys.i64.p0i64(i64* %llp, i64 %ll)
; CHECK: atom.cta.cas.b32
%tmp98 = tail call i32 @llvm.nvvm.atomic.cas.gen.i.cta.i32.p0i32(i32* %ip, i32 %i, i32 %i)
; CHECK: atom.cta.cas.b64
%tmp100 = tail call i64 @llvm.nvvm.atomic.cas.gen.i.cta.i64.p0i64(i64* %llp, i64 %ll, i64 %ll)
; CHECK: atom.sys.cas.b32
%tmp101 = tail call i32 @llvm.nvvm.atomic.cas.gen.i.sys.i32.p0i32(i32* %ip, i32 %i, i32 %i)
; CHECK: atom.sys.cas.b64
%tmp103 = tail call i64 @llvm.nvvm.atomic.cas.gen.i.sys.i64.p0i64(i64* %llp, i64 %ll, i64 %ll)
; CHECK: ret
ret void
}
; Make sure we use constants as operands to our scoped atomic calls, where appropriate.
; CHECK-LABEL: .func test_atomics_scope_imm(
define void @test_atomics_scope_imm(float* %fp, float %f,
double* %dfp, double %df,
i32* %ip, i32 %i,
i32* %uip, i32 %ui,
i64* %llp, i64 %ll) #0 {
; CHECK: atom.cta.add.s32{{.*}} %r{{[0-9]+}};
%tmp1r = tail call i32 @llvm.nvvm.atomic.add.gen.i.cta.i32.p0i32(i32* %ip, i32 %i)
; CHECK: atom.cta.add.s32{{.*}}, 1;
%tmp1i = tail call i32 @llvm.nvvm.atomic.add.gen.i.cta.i32.p0i32(i32* %ip, i32 1)
; CHECK: atom.cta.add.u64{{.*}}, %rd{{[0-9]+}};
%tmp2r = tail call i64 @llvm.nvvm.atomic.add.gen.i.cta.i64.p0i64(i64* %llp, i64 %ll)
; CHECK: atom.cta.add.u64{{.*}}, 2;
%tmp2i = tail call i64 @llvm.nvvm.atomic.add.gen.i.cta.i64.p0i64(i64* %llp, i64 2)
; CHECK: atom.cta.add.f32{{.*}}, %f{{[0-9]+}};
%tmp3r = tail call float @llvm.nvvm.atomic.add.gen.f.cta.f32.p0f32(float* %fp, float %f)
; CHECK: atom.cta.add.f32{{.*}}, 0f40400000;
%tmp3i = tail call float @llvm.nvvm.atomic.add.gen.f.cta.f32.p0f32(float* %fp, float 3.0)
; CHECK: atom.cta.add.f64{{.*}}, %fd{{[0-9]+}};
%tmp4r = tail call double @llvm.nvvm.atomic.add.gen.f.cta.f64.p0f64(double* %dfp, double %df)
; CHECK: atom.cta.add.f64{{.*}}, 0d4010000000000000;
%tmp4i = tail call double @llvm.nvvm.atomic.add.gen.f.cta.f64.p0f64(double* %dfp, double 4.0)
; CAS is implemented separately and has more arguments
; CHECK: atom.cta.cas.b32{{.*}}], %r{{[0-9+]}}, %r{{[0-9+]}};
%tmp5rr = tail call i32 @llvm.nvvm.atomic.cas.gen.i.cta.i32.p0i32(i32* %ip, i32 %i, i32 %i)
; For some reason in 64-bit mode we end up passing 51 via a register.
; CHECK32: atom.cta.cas.b32{{.*}}], %r{{[0-9+]}}, 51;
%tmp5ri = tail call i32 @llvm.nvvm.atomic.cas.gen.i.cta.i32.p0i32(i32* %ip, i32 %i, i32 51)
; CHECK: atom.cta.cas.b32{{.*}}], 52, %r{{[0-9+]}};
%tmp5ir = tail call i32 @llvm.nvvm.atomic.cas.gen.i.cta.i32.p0i32(i32* %ip, i32 52, i32 %i)
; CHECK: atom.cta.cas.b32{{.*}}], 53, 54;
%tmp5ii = tail call i32 @llvm.nvvm.atomic.cas.gen.i.cta.i32.p0i32(i32* %ip, i32 53, i32 54)
; CHECK: ret
ret void
}
declare i32 @llvm.nvvm.atomic.add.gen.i.cta.i32.p0i32(i32* nocapture, i32) #1
declare i64 @llvm.nvvm.atomic.add.gen.i.cta.i64.p0i64(i64* nocapture, i64) #1
declare i32 @llvm.nvvm.atomic.add.gen.i.sys.i32.p0i32(i32* nocapture, i32) #1
declare i64 @llvm.nvvm.atomic.add.gen.i.sys.i64.p0i64(i64* nocapture, i64) #1
declare float @llvm.nvvm.atomic.add.gen.f.cta.f32.p0f32(float* nocapture, float) #1
declare double @llvm.nvvm.atomic.add.gen.f.cta.f64.p0f64(double* nocapture, double) #1
declare float @llvm.nvvm.atomic.add.gen.f.sys.f32.p0f32(float* nocapture, float) #1
declare double @llvm.nvvm.atomic.add.gen.f.sys.f64.p0f64(double* nocapture, double) #1
declare i32 @llvm.nvvm.atomic.exch.gen.i.cta.i32.p0i32(i32* nocapture, i32) #1
declare i64 @llvm.nvvm.atomic.exch.gen.i.cta.i64.p0i64(i64* nocapture, i64) #1
declare i32 @llvm.nvvm.atomic.exch.gen.i.sys.i32.p0i32(i32* nocapture, i32) #1
declare i64 @llvm.nvvm.atomic.exch.gen.i.sys.i64.p0i64(i64* nocapture, i64) #1
declare i32 @llvm.nvvm.atomic.max.gen.i.cta.i32.p0i32(i32* nocapture, i32) #1
declare i64 @llvm.nvvm.atomic.max.gen.i.cta.i64.p0i64(i64* nocapture, i64) #1
declare i32 @llvm.nvvm.atomic.max.gen.i.sys.i32.p0i32(i32* nocapture, i32) #1
declare i64 @llvm.nvvm.atomic.max.gen.i.sys.i64.p0i64(i64* nocapture, i64) #1
declare i32 @llvm.nvvm.atomic.min.gen.i.cta.i32.p0i32(i32* nocapture, i32) #1
declare i64 @llvm.nvvm.atomic.min.gen.i.cta.i64.p0i64(i64* nocapture, i64) #1
declare i32 @llvm.nvvm.atomic.min.gen.i.sys.i32.p0i32(i32* nocapture, i32) #1
declare i64 @llvm.nvvm.atomic.min.gen.i.sys.i64.p0i64(i64* nocapture, i64) #1
declare i32 @llvm.nvvm.atomic.inc.gen.i.cta.i32.p0i32(i32* nocapture, i32) #1
declare i32 @llvm.nvvm.atomic.inc.gen.i.sys.i32.p0i32(i32* nocapture, i32) #1
declare i32 @llvm.nvvm.atomic.dec.gen.i.cta.i32.p0i32(i32* nocapture, i32) #1
declare i32 @llvm.nvvm.atomic.dec.gen.i.sys.i32.p0i32(i32* nocapture, i32) #1
declare i32 @llvm.nvvm.atomic.and.gen.i.cta.i32.p0i32(i32* nocapture, i32) #1
declare i64 @llvm.nvvm.atomic.and.gen.i.cta.i64.p0i64(i64* nocapture, i64) #1
declare i32 @llvm.nvvm.atomic.and.gen.i.sys.i32.p0i32(i32* nocapture, i32) #1
declare i64 @llvm.nvvm.atomic.and.gen.i.sys.i64.p0i64(i64* nocapture, i64) #1
declare i32 @llvm.nvvm.atomic.or.gen.i.cta.i32.p0i32(i32* nocapture, i32) #1
declare i64 @llvm.nvvm.atomic.or.gen.i.cta.i64.p0i64(i64* nocapture, i64) #1
declare i32 @llvm.nvvm.atomic.or.gen.i.sys.i32.p0i32(i32* nocapture, i32) #1
declare i64 @llvm.nvvm.atomic.or.gen.i.sys.i64.p0i64(i64* nocapture, i64) #1
declare i32 @llvm.nvvm.atomic.xor.gen.i.cta.i32.p0i32(i32* nocapture, i32) #1
declare i64 @llvm.nvvm.atomic.xor.gen.i.cta.i64.p0i64(i64* nocapture, i64) #1
declare i32 @llvm.nvvm.atomic.xor.gen.i.sys.i32.p0i32(i32* nocapture, i32) #1
declare i64 @llvm.nvvm.atomic.xor.gen.i.sys.i64.p0i64(i64* nocapture, i64) #1
declare i32 @llvm.nvvm.atomic.cas.gen.i.cta.i32.p0i32(i32* nocapture, i32, i32) #1
declare i64 @llvm.nvvm.atomic.cas.gen.i.cta.i64.p0i64(i64* nocapture, i64, i64) #1
declare i32 @llvm.nvvm.atomic.cas.gen.i.sys.i32.p0i32(i32* nocapture, i32, i32) #1
declare i64 @llvm.nvvm.atomic.cas.gen.i.sys.i64.p0i64(i64* nocapture, i64, i64) #1
attributes #1 = { argmemonly nounwind }

View File

@ -0,0 +1,182 @@
; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
; CHECK-LABEL: atom0
define i32 @atom0(i32* %addr, i32 %val) {
; CHECK: atom.add.u32
%ret = atomicrmw add i32* %addr, i32 %val seq_cst
ret i32 %ret
}
; CHECK-LABEL: atom1
define i64 @atom1(i64* %addr, i64 %val) {
; CHECK: atom.add.u64
%ret = atomicrmw add i64* %addr, i64 %val seq_cst
ret i64 %ret
}
; CHECK-LABEL: atom2
define i32 @atom2(i32* %subr, i32 %val) {
; CHECK: neg.s32
; CHECK: atom.add.u32
%ret = atomicrmw sub i32* %subr, i32 %val seq_cst
ret i32 %ret
}
; CHECK-LABEL: atom3
define i64 @atom3(i64* %subr, i64 %val) {
; CHECK: neg.s64
; CHECK: atom.add.u64
%ret = atomicrmw sub i64* %subr, i64 %val seq_cst
ret i64 %ret
}
; CHECK-LABEL: atom4
define i32 @atom4(i32* %subr, i32 %val) {
; CHECK: atom.and.b32
%ret = atomicrmw and i32* %subr, i32 %val seq_cst
ret i32 %ret
}
; CHECK-LABEL: atom5
define i64 @atom5(i64* %subr, i64 %val) {
; CHECK: atom.and.b64
%ret = atomicrmw and i64* %subr, i64 %val seq_cst
ret i64 %ret
}
;; NAND not yet supported
;define i32 @atom6(i32* %subr, i32 %val) {
; %ret = atomicrmw nand i32* %subr, i32 %val seq_cst
; ret i32 %ret
;}
;define i64 @atom7(i64* %subr, i64 %val) {
; %ret = atomicrmw nand i64* %subr, i64 %val seq_cst
; ret i64 %ret
;}
; CHECK-LABEL: atom8
define i32 @atom8(i32* %subr, i32 %val) {
; CHECK: atom.or.b32
%ret = atomicrmw or i32* %subr, i32 %val seq_cst
ret i32 %ret
}
; CHECK-LABEL: atom9
define i64 @atom9(i64* %subr, i64 %val) {
; CHECK: atom.or.b64
%ret = atomicrmw or i64* %subr, i64 %val seq_cst
ret i64 %ret
}
; CHECK-LABEL: atom10
define i32 @atom10(i32* %subr, i32 %val) {
; CHECK: atom.xor.b32
%ret = atomicrmw xor i32* %subr, i32 %val seq_cst
ret i32 %ret
}
; CHECK-LABEL: atom11
define i64 @atom11(i64* %subr, i64 %val) {
; CHECK: atom.xor.b64
%ret = atomicrmw xor i64* %subr, i64 %val seq_cst
ret i64 %ret
}
; CHECK-LABEL: atom12
define i32 @atom12(i32* %subr, i32 %val) {
; CHECK: atom.max.s32
%ret = atomicrmw max i32* %subr, i32 %val seq_cst
ret i32 %ret
}
; CHECK-LABEL: atom13
define i64 @atom13(i64* %subr, i64 %val) {
; CHECK: atom.max.s64
%ret = atomicrmw max i64* %subr, i64 %val seq_cst
ret i64 %ret
}
; CHECK-LABEL: atom14
define i32 @atom14(i32* %subr, i32 %val) {
; CHECK: atom.min.s32
%ret = atomicrmw min i32* %subr, i32 %val seq_cst
ret i32 %ret
}
; CHECK-LABEL: atom15
define i64 @atom15(i64* %subr, i64 %val) {
; CHECK: atom.min.s64
%ret = atomicrmw min i64* %subr, i64 %val seq_cst
ret i64 %ret
}
; CHECK-LABEL: atom16
define i32 @atom16(i32* %subr, i32 %val) {
; CHECK: atom.max.u32
%ret = atomicrmw umax i32* %subr, i32 %val seq_cst
ret i32 %ret
}
; CHECK-LABEL: atom17
define i64 @atom17(i64* %subr, i64 %val) {
; CHECK: atom.max.u64
%ret = atomicrmw umax i64* %subr, i64 %val seq_cst
ret i64 %ret
}
; CHECK-LABEL: atom18
define i32 @atom18(i32* %subr, i32 %val) {
; CHECK: atom.min.u32
%ret = atomicrmw umin i32* %subr, i32 %val seq_cst
ret i32 %ret
}
; CHECK-LABEL: atom19
define i64 @atom19(i64* %subr, i64 %val) {
; CHECK: atom.min.u64
%ret = atomicrmw umin i64* %subr, i64 %val seq_cst
ret i64 %ret
}
declare float @llvm.nvvm.atomic.load.add.f32.p0f32(float* %addr, float %val)
; CHECK-LABEL: atomic_add_f32_generic
define float @atomic_add_f32_generic(float* %addr, float %val) {
; CHECK: atom.add.f32
%ret = call float @llvm.nvvm.atomic.load.add.f32.p0f32(float* %addr, float %val)
ret float %ret
}
declare float @llvm.nvvm.atomic.load.add.f32.p1f32(float addrspace(1)* %addr, float %val)
; CHECK-LABEL: atomic_add_f32_addrspace1
define float @atomic_add_f32_addrspace1(float addrspace(1)* %addr, float %val) {
; CHECK: atom.global.add.f32
%ret = call float @llvm.nvvm.atomic.load.add.f32.p1f32(float addrspace(1)* %addr, float %val)
ret float %ret
}
declare float @llvm.nvvm.atomic.load.add.f32.p3f32(float addrspace(3)* %addr, float %val)
; CHECK-LABEL: atomic_add_f32_addrspace3
define float @atomic_add_f32_addrspace3(float addrspace(3)* %addr, float %val) {
; CHECK: atom.shared.add.f32
%ret = call float @llvm.nvvm.atomic.load.add.f32.p3f32(float addrspace(3)* %addr, float %val)
ret float %ret
}
; CHECK-LABEL: atomic_cmpxchg_i32
define i32 @atomic_cmpxchg_i32(i32* %addr, i32 %cmp, i32 %new) {
; CHECK: atom.cas.b32
%pairold = cmpxchg i32* %addr, i32 %cmp, i32 %new seq_cst seq_cst
ret i32 %new
}
; CHECK-LABEL: atomic_cmpxchg_i64
define i64 @atomic_cmpxchg_i64(i64* %addr, i64 %cmp, i64 %new) {
; CHECK: atom.cas.b64
%pairold = cmpxchg i64* %addr, i64 %cmp, i64 %new seq_cst seq_cst
ret i64 %new
}

View File

@ -0,0 +1,32 @@
; RUN: llc < %s -march=nvptx64 -mcpu=sm_30 -mattr=+ptx60 | FileCheck %s
declare void @llvm.nvvm.bar.warp.sync(i32)
declare void @llvm.nvvm.barrier.sync(i32)
declare void @llvm.nvvm.barrier.sync.cnt(i32, i32)
; CHECK-LABEL: .func{{.*}}barrier.sync
define void @barrier.sync(i32 %id, i32 %cnt) {
; CHECK: ld.param.u32 [[ID:%r[0-9]+]], [barrier.sync_param_0];
; CHECK: ld.param.u32 [[CNT:%r[0-9]+]], [barrier.sync_param_1];
; CHECK: barrier.sync [[ID]], [[CNT]];
call void @llvm.nvvm.barrier.sync.cnt(i32 %id, i32 %cnt)
; CHECK: barrier.sync [[ID]], 2;
call void @llvm.nvvm.barrier.sync.cnt(i32 %id, i32 2)
; CHECK: barrier.sync 3, [[CNT]];
call void @llvm.nvvm.barrier.sync.cnt(i32 3, i32 %cnt)
; CHECK: barrier.sync 4, 5;
call void @llvm.nvvm.barrier.sync.cnt(i32 4, i32 5)
; CHECK: barrier.sync [[ID]];
call void @llvm.nvvm.barrier.sync(i32 %id)
; CHECK: barrier.sync 1;
call void @llvm.nvvm.barrier.sync(i32 1)
; CHECK: bar.warp.sync [[ID]];
call void @llvm.nvvm.bar.warp.sync(i32 %id)
; CHECK: bar.warp.sync 6;
call void @llvm.nvvm.bar.warp.sync(i32 6)
ret void;
}

View File

@ -0,0 +1,32 @@
; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
; CHECK: bfe0
define i32 @bfe0(i32 %a) {
; CHECK: bfe.u32 %r{{[0-9]+}}, %r{{[0-9]+}}, 4, 4
; CHECK-NOT: shr
; CHECK-NOT: and
%val0 = ashr i32 %a, 4
%val1 = and i32 %val0, 15
ret i32 %val1
}
; CHECK: bfe1
define i32 @bfe1(i32 %a) {
; CHECK: bfe.u32 %r{{[0-9]+}}, %r{{[0-9]+}}, 3, 3
; CHECK-NOT: shr
; CHECK-NOT: and
%val0 = ashr i32 %a, 3
%val1 = and i32 %val0, 7
ret i32 %val1
}
; CHECK: bfe2
define i32 @bfe2(i32 %a) {
; CHECK: bfe.u32 %r{{[0-9]+}}, %r{{[0-9]+}}, 5, 3
; CHECK-NOT: shr
; CHECK-NOT: and
%val0 = ashr i32 %a, 5
%val1 = and i32 %val0, 7
ret i32 %val1
}

Some files were not shown because too many files have changed in this diff Show More