You've already forked linux-packaging-mono
Imported Upstream version 5.18.0.167
Former-commit-id: 289509151e0fee68a1b591a20c9f109c3c789d3a
This commit is contained in:
parent
e19d552987
commit
b084638f15
@ -1,185 +0,0 @@
|
||||
; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -infer-address-spaces %s | FileCheck %s
|
||||
|
||||
; Trivial optimization of generic addressing
|
||||
|
||||
; CHECK-LABEL: @load_global_from_flat(
|
||||
; CHECK-NEXT: %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(1)*
|
||||
; CHECK-NEXT: %tmp1 = load float, float addrspace(1)* %tmp0
|
||||
; CHECK-NEXT: ret float %tmp1
|
||||
define float @load_global_from_flat(float addrspace(4)* %generic_scalar) #0 {
|
||||
%tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(1)*
|
||||
%tmp1 = load float, float addrspace(1)* %tmp0
|
||||
ret float %tmp1
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @load_constant_from_flat(
|
||||
; CHECK-NEXT: %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(2)*
|
||||
; CHECK-NEXT: %tmp1 = load float, float addrspace(2)* %tmp0
|
||||
; CHECK-NEXT: ret float %tmp1
|
||||
define float @load_constant_from_flat(float addrspace(4)* %generic_scalar) #0 {
|
||||
%tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(2)*
|
||||
%tmp1 = load float, float addrspace(2)* %tmp0
|
||||
ret float %tmp1
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @load_group_from_flat(
|
||||
; CHECK-NEXT: %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(3)*
|
||||
; CHECK-NEXT: %tmp1 = load float, float addrspace(3)* %tmp0
|
||||
; CHECK-NEXT: ret float %tmp1
|
||||
define float @load_group_from_flat(float addrspace(4)* %generic_scalar) #0 {
|
||||
%tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(3)*
|
||||
%tmp1 = load float, float addrspace(3)* %tmp0
|
||||
ret float %tmp1
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @load_private_from_flat(
|
||||
; CHECK-NEXT: %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float*
|
||||
; CHECK-NEXT: %tmp1 = load float, float* %tmp0
|
||||
; CHECK-NEXT: ret float %tmp1
|
||||
define float @load_private_from_flat(float addrspace(4)* %generic_scalar) #0 {
|
||||
%tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float*
|
||||
%tmp1 = load float, float* %tmp0
|
||||
ret float %tmp1
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @store_global_from_flat(
|
||||
; CHECK-NEXT: %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(1)*
|
||||
; CHECK-NEXT: store float 0.000000e+00, float addrspace(1)* %tmp0
|
||||
define amdgpu_kernel void @store_global_from_flat(float addrspace(4)* %generic_scalar) #0 {
|
||||
%tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(1)*
|
||||
store float 0.0, float addrspace(1)* %tmp0
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @store_group_from_flat(
|
||||
; CHECK-NEXT: %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(3)*
|
||||
; CHECK-NEXT: store float 0.000000e+00, float addrspace(3)* %tmp0
|
||||
define amdgpu_kernel void @store_group_from_flat(float addrspace(4)* %generic_scalar) #0 {
|
||||
%tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(3)*
|
||||
store float 0.0, float addrspace(3)* %tmp0
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @store_private_from_flat(
|
||||
; CHECK-NEXT: %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float*
|
||||
; CHECK-NEXT: store float 0.000000e+00, float* %tmp0
|
||||
define amdgpu_kernel void @store_private_from_flat(float addrspace(4)* %generic_scalar) #0 {
|
||||
%tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float*
|
||||
store float 0.0, float* %tmp0
|
||||
ret void
|
||||
}
|
||||
|
||||
; optimized to global load/store.
|
||||
; CHECK-LABEL: @load_store_global(
|
||||
; CHECK-NEXT: %val = load i32, i32 addrspace(1)* %input, align 4
|
||||
; CHECK-NEXT: store i32 %val, i32 addrspace(1)* %output, align 4
|
||||
; CHECK-NEXT: ret void
|
||||
define amdgpu_kernel void @load_store_global(i32 addrspace(1)* nocapture %input, i32 addrspace(1)* nocapture %output) #0 {
|
||||
%tmp0 = addrspacecast i32 addrspace(1)* %input to i32 addrspace(4)*
|
||||
%tmp1 = addrspacecast i32 addrspace(1)* %output to i32 addrspace(4)*
|
||||
%val = load i32, i32 addrspace(4)* %tmp0, align 4
|
||||
store i32 %val, i32 addrspace(4)* %tmp1, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
; Optimized to group load/store.
|
||||
; CHECK-LABEL: @load_store_group(
|
||||
; CHECK-NEXT: %val = load i32, i32 addrspace(3)* %input, align 4
|
||||
; CHECK-NEXT: store i32 %val, i32 addrspace(3)* %output, align 4
|
||||
; CHECK-NEXT: ret void
|
||||
define amdgpu_kernel void @load_store_group(i32 addrspace(3)* nocapture %input, i32 addrspace(3)* nocapture %output) #0 {
|
||||
%tmp0 = addrspacecast i32 addrspace(3)* %input to i32 addrspace(4)*
|
||||
%tmp1 = addrspacecast i32 addrspace(3)* %output to i32 addrspace(4)*
|
||||
%val = load i32, i32 addrspace(4)* %tmp0, align 4
|
||||
store i32 %val, i32 addrspace(4)* %tmp1, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
; Optimized to private load/store.
|
||||
; CHECK-LABEL: @load_store_private(
|
||||
; CHECK-NEXT: %val = load i32, i32* %input, align 4
|
||||
; CHECK-NEXT: store i32 %val, i32* %output, align 4
|
||||
; CHECK-NEXT: ret void
|
||||
define amdgpu_kernel void @load_store_private(i32* nocapture %input, i32* nocapture %output) #0 {
|
||||
%tmp0 = addrspacecast i32* %input to i32 addrspace(4)*
|
||||
%tmp1 = addrspacecast i32* %output to i32 addrspace(4)*
|
||||
%val = load i32, i32 addrspace(4)* %tmp0, align 4
|
||||
store i32 %val, i32 addrspace(4)* %tmp1, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
; No optimization. flat load/store.
|
||||
; CHECK-LABEL: @load_store_flat(
|
||||
; CHECK-NEXT: %val = load i32, i32 addrspace(4)* %input, align 4
|
||||
; CHECK-NEXT: store i32 %val, i32 addrspace(4)* %output, align 4
|
||||
; CHECK-NEXT: ret void
|
||||
define amdgpu_kernel void @load_store_flat(i32 addrspace(4)* nocapture %input, i32 addrspace(4)* nocapture %output) #0 {
|
||||
%val = load i32, i32 addrspace(4)* %input, align 4
|
||||
store i32 %val, i32 addrspace(4)* %output, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @store_addrspacecast_ptr_value(
|
||||
; CHECK: %cast = addrspacecast i32 addrspace(1)* %input to i32 addrspace(4)*
|
||||
; CHECK-NEXT: store i32 addrspace(4)* %cast, i32 addrspace(4)* addrspace(1)* %output, align 4
|
||||
define amdgpu_kernel void @store_addrspacecast_ptr_value(i32 addrspace(1)* nocapture %input, i32 addrspace(4)* addrspace(1)* nocapture %output) #0 {
|
||||
%cast = addrspacecast i32 addrspace(1)* %input to i32 addrspace(4)*
|
||||
store i32 addrspace(4)* %cast, i32 addrspace(4)* addrspace(1)* %output, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @atomicrmw_add_global_to_flat(
|
||||
; CHECK-NEXT: %ret = atomicrmw add i32 addrspace(1)* %global.ptr, i32 %y seq_cst
|
||||
define i32 @atomicrmw_add_global_to_flat(i32 addrspace(1)* %global.ptr, i32 %y) #0 {
|
||||
%cast = addrspacecast i32 addrspace(1)* %global.ptr to i32 addrspace(4)*
|
||||
%ret = atomicrmw add i32 addrspace(4)* %cast, i32 %y seq_cst
|
||||
ret i32 %ret
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @atomicrmw_add_group_to_flat(
|
||||
; CHECK-NEXT: %ret = atomicrmw add i32 addrspace(3)* %group.ptr, i32 %y seq_cst
|
||||
define i32 @atomicrmw_add_group_to_flat(i32 addrspace(3)* %group.ptr, i32 %y) #0 {
|
||||
%cast = addrspacecast i32 addrspace(3)* %group.ptr to i32 addrspace(4)*
|
||||
%ret = atomicrmw add i32 addrspace(4)* %cast, i32 %y seq_cst
|
||||
ret i32 %ret
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @cmpxchg_global_to_flat(
|
||||
; CHECK: %ret = cmpxchg i32 addrspace(1)* %global.ptr, i32 %cmp, i32 %val seq_cst monotonic
|
||||
define { i32, i1 } @cmpxchg_global_to_flat(i32 addrspace(1)* %global.ptr, i32 %cmp, i32 %val) #0 {
|
||||
%cast = addrspacecast i32 addrspace(1)* %global.ptr to i32 addrspace(4)*
|
||||
%ret = cmpxchg i32 addrspace(4)* %cast, i32 %cmp, i32 %val seq_cst monotonic
|
||||
ret { i32, i1 } %ret
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @cmpxchg_group_to_flat(
|
||||
; CHECK: %ret = cmpxchg i32 addrspace(3)* %group.ptr, i32 %cmp, i32 %val seq_cst monotonic
|
||||
define { i32, i1 } @cmpxchg_group_to_flat(i32 addrspace(3)* %group.ptr, i32 %cmp, i32 %val) #0 {
|
||||
%cast = addrspacecast i32 addrspace(3)* %group.ptr to i32 addrspace(4)*
|
||||
%ret = cmpxchg i32 addrspace(4)* %cast, i32 %cmp, i32 %val seq_cst monotonic
|
||||
ret { i32, i1 } %ret
|
||||
}
|
||||
|
||||
; Not pointer operand
|
||||
; CHECK-LABEL: @cmpxchg_group_to_flat_wrong_operand(
|
||||
; CHECK: %cast.cmp = addrspacecast i32 addrspace(3)* %cmp.ptr to i32 addrspace(4)*
|
||||
; CHECK: %ret = cmpxchg i32 addrspace(4)* addrspace(3)* %cas.ptr, i32 addrspace(4)* %cast.cmp, i32 addrspace(4)* %val seq_cst monotonic
|
||||
define { i32 addrspace(4)*, i1 } @cmpxchg_group_to_flat_wrong_operand(i32 addrspace(4)* addrspace(3)* %cas.ptr, i32 addrspace(3)* %cmp.ptr, i32 addrspace(4)* %val) #0 {
|
||||
%cast.cmp = addrspacecast i32 addrspace(3)* %cmp.ptr to i32 addrspace(4)*
|
||||
%ret = cmpxchg i32 addrspace(4)* addrspace(3)* %cas.ptr, i32 addrspace(4)* %cast.cmp, i32 addrspace(4)* %val seq_cst monotonic
|
||||
ret { i32 addrspace(4)*, i1 } %ret
|
||||
}
|
||||
|
||||
; Null pointer in local addr space
|
||||
; CHECK-LABEL: @local_nullptr
|
||||
; CHECK: icmp ne i8 addrspace(3)* %a, addrspacecast (i8* null to i8 addrspace(3)*)
|
||||
; CHECK-NOT: i8 addrspace(3)* null
|
||||
define void @local_nullptr(i32 addrspace(1)* nocapture %results, i8 addrspace(3)* %a) {
|
||||
entry:
|
||||
%tobool = icmp ne i8 addrspace(3)* %a, addrspacecast (i8* null to i8 addrspace(3)*)
|
||||
%conv = zext i1 %tobool to i32
|
||||
store i32 %conv, i32 addrspace(1)* %results, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
attributes #0 = { nounwind }
|
@ -1,160 +0,0 @@
|
||||
; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -infer-address-spaces %s | FileCheck %s
|
||||
|
||||
; CHECK-LABEL: @icmp_flat_cmp_self(
|
||||
; CHECK: %cmp = icmp eq i32 addrspace(3)* %group.ptr.0, %group.ptr.0
|
||||
define i1 @icmp_flat_cmp_self(i32 addrspace(3)* %group.ptr.0) #0 {
|
||||
%cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
|
||||
%cmp = icmp eq i32 addrspace(4)* %cast0, %cast0
|
||||
ret i1 %cmp
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @icmp_flat_flat_from_group(
|
||||
; CHECK: %cmp = icmp eq i32 addrspace(3)* %group.ptr.0, %group.ptr.1
|
||||
define i1 @icmp_flat_flat_from_group(i32 addrspace(3)* %group.ptr.0, i32 addrspace(3)* %group.ptr.1) #0 {
|
||||
%cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
|
||||
%cast1 = addrspacecast i32 addrspace(3)* %group.ptr.1 to i32 addrspace(4)*
|
||||
%cmp = icmp eq i32 addrspace(4)* %cast0, %cast1
|
||||
ret i1 %cmp
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @icmp_mismatch_flat_from_group_private(
|
||||
; CHECK: %1 = addrspacecast i32* %private.ptr.0 to i32 addrspace(4)*
|
||||
; CHECK: %2 = addrspacecast i32 addrspace(3)* %group.ptr.1 to i32 addrspace(4)*
|
||||
; CHECK: %cmp = icmp eq i32 addrspace(4)* %1, %2
|
||||
define i1 @icmp_mismatch_flat_from_group_private(i32* %private.ptr.0, i32 addrspace(3)* %group.ptr.1) #0 {
|
||||
%cast0 = addrspacecast i32* %private.ptr.0 to i32 addrspace(4)*
|
||||
%cast1 = addrspacecast i32 addrspace(3)* %group.ptr.1 to i32 addrspace(4)*
|
||||
%cmp = icmp eq i32 addrspace(4)* %cast0, %cast1
|
||||
ret i1 %cmp
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @icmp_flat_group_flat(
|
||||
; CHECK: %1 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
|
||||
; CHECK: %cmp = icmp eq i32 addrspace(4)* %1, %flat.ptr.1
|
||||
define i1 @icmp_flat_group_flat(i32 addrspace(3)* %group.ptr.0, i32 addrspace(4)* %flat.ptr.1) #0 {
|
||||
%cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
|
||||
%cmp = icmp eq i32 addrspace(4)* %cast0, %flat.ptr.1
|
||||
ret i1 %cmp
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @icmp_flat_flat_group(
|
||||
; CHECK: %1 = addrspacecast i32 addrspace(3)* %group.ptr.1 to i32 addrspace(4)*
|
||||
; CHECK: %cmp = icmp eq i32 addrspace(4)* %flat.ptr.0, %1
|
||||
define i1 @icmp_flat_flat_group(i32 addrspace(4)* %flat.ptr.0, i32 addrspace(3)* %group.ptr.1) #0 {
|
||||
%cast1 = addrspacecast i32 addrspace(3)* %group.ptr.1 to i32 addrspace(4)*
|
||||
%cmp = icmp eq i32 addrspace(4)* %flat.ptr.0, %cast1
|
||||
ret i1 %cmp
|
||||
}
|
||||
|
||||
; Keeping as cmp addrspace(3)* is better
|
||||
; CHECK-LABEL: @icmp_flat_to_group_cmp(
|
||||
; CHECK: %cast0 = addrspacecast i32 addrspace(4)* %flat.ptr.0 to i32 addrspace(3)*
|
||||
; CHECK: %cast1 = addrspacecast i32 addrspace(4)* %flat.ptr.1 to i32 addrspace(3)*
|
||||
; CHECK: %cmp = icmp eq i32 addrspace(3)* %cast0, %cast1
|
||||
define i1 @icmp_flat_to_group_cmp(i32 addrspace(4)* %flat.ptr.0, i32 addrspace(4)* %flat.ptr.1) #0 {
|
||||
%cast0 = addrspacecast i32 addrspace(4)* %flat.ptr.0 to i32 addrspace(3)*
|
||||
%cast1 = addrspacecast i32 addrspace(4)* %flat.ptr.1 to i32 addrspace(3)*
|
||||
%cmp = icmp eq i32 addrspace(3)* %cast0, %cast1
|
||||
ret i1 %cmp
|
||||
}
|
||||
|
||||
; FIXME: Should be able to ask target about how to constant fold the
|
||||
; constant cast if this is OK to change if 0 is a valid pointer.
|
||||
|
||||
; CHECK-LABEL: @icmp_group_flat_cmp_null(
|
||||
; CHECK: %cmp = icmp eq i32 addrspace(3)* %group.ptr.0, addrspacecast (i32 addrspace(4)* null to i32 addrspace(3)*)
|
||||
define i1 @icmp_group_flat_cmp_null(i32 addrspace(3)* %group.ptr.0) #0 {
|
||||
%cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
|
||||
%cmp = icmp eq i32 addrspace(4)* %cast0, null
|
||||
ret i1 %cmp
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @icmp_group_flat_cmp_constant_inttoptr(
|
||||
; CHECK: %cmp = icmp eq i32 addrspace(3)* %group.ptr.0, addrspacecast (i32 addrspace(4)* inttoptr (i64 400 to i32 addrspace(4)*) to i32 addrspace(3)*)
|
||||
define i1 @icmp_group_flat_cmp_constant_inttoptr(i32 addrspace(3)* %group.ptr.0) #0 {
|
||||
%cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
|
||||
%cmp = icmp eq i32 addrspace(4)* %cast0, inttoptr (i64 400 to i32 addrspace(4)*)
|
||||
ret i1 %cmp
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @icmp_mismatch_flat_group_private_cmp_null(
|
||||
; CHECK: %1 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
|
||||
; CHECK: %cmp = icmp eq i32 addrspace(4)* %1, addrspacecast (i32* null to i32 addrspace(4)*)
|
||||
define i1 @icmp_mismatch_flat_group_private_cmp_null(i32 addrspace(3)* %group.ptr.0) #0 {
|
||||
%cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
|
||||
%cmp = icmp eq i32 addrspace(4)* %cast0, addrspacecast (i32* null to i32 addrspace(4)*)
|
||||
ret i1 %cmp
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @icmp_mismatch_flat_group_private_cmp_undef(
|
||||
; CHECK: %cmp = icmp eq i32 addrspace(3)* %group.ptr.0, undef
|
||||
define i1 @icmp_mismatch_flat_group_private_cmp_undef(i32 addrspace(3)* %group.ptr.0) #0 {
|
||||
%cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
|
||||
%cmp = icmp eq i32 addrspace(4)* %cast0, addrspacecast (i32* undef to i32 addrspace(4)*)
|
||||
ret i1 %cmp
|
||||
}
|
||||
|
||||
@lds0 = internal addrspace(3) global i32 0, align 4
|
||||
@global0 = internal addrspace(1) global i32 0, align 4
|
||||
|
||||
; CHECK-LABEL: @icmp_mismatch_flat_group_global_cmp_gv(
|
||||
; CHECK: %1 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
|
||||
; CHECK: %cmp = icmp eq i32 addrspace(4)* %1, addrspacecast (i32 addrspace(1)* @global0 to i32 addrspace(4)*)
|
||||
define i1 @icmp_mismatch_flat_group_global_cmp_gv(i32 addrspace(3)* %group.ptr.0) #0 {
|
||||
%cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
|
||||
%cmp = icmp eq i32 addrspace(4)* %cast0, addrspacecast (i32 addrspace(1)* @global0 to i32 addrspace(4)*)
|
||||
ret i1 %cmp
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @icmp_mismatch_group_global_cmp_gv_gv(
|
||||
; CHECK: %cmp = icmp eq i32 addrspace(4)* addrspacecast (i32 addrspace(3)* @lds0 to i32 addrspace(4)*), addrspacecast (i32 addrspace(1)* @global0 to i32 addrspace(4)*)
|
||||
define i1 @icmp_mismatch_group_global_cmp_gv_gv(i32 addrspace(3)* %group.ptr.0) #0 {
|
||||
%cmp = icmp eq i32 addrspace(4)* addrspacecast (i32 addrspace(3)* @lds0 to i32 addrspace(4)*), addrspacecast (i32 addrspace(1)* @global0 to i32 addrspace(4)*)
|
||||
ret i1 %cmp
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @icmp_group_flat_cmp_undef(
|
||||
; CHECK: %cmp = icmp eq i32 addrspace(3)* %group.ptr.0, undef
|
||||
define i1 @icmp_group_flat_cmp_undef(i32 addrspace(3)* %group.ptr.0) #0 {
|
||||
%cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
|
||||
%cmp = icmp eq i32 addrspace(4)* %cast0, undef
|
||||
ret i1 %cmp
|
||||
}
|
||||
|
||||
; Test non-canonical orders
|
||||
; CHECK-LABEL: @icmp_mismatch_flat_group_private_cmp_null_swap(
|
||||
; CHECK: %1 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
|
||||
; CHECK: %cmp = icmp eq i32 addrspace(4)* addrspacecast (i32* null to i32 addrspace(4)*), %1
|
||||
define i1 @icmp_mismatch_flat_group_private_cmp_null_swap(i32 addrspace(3)* %group.ptr.0) #0 {
|
||||
%cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
|
||||
%cmp = icmp eq i32 addrspace(4)* addrspacecast (i32* null to i32 addrspace(4)*), %cast0
|
||||
ret i1 %cmp
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @icmp_group_flat_cmp_undef_swap(
|
||||
; CHECK: %cmp = icmp eq i32 addrspace(3)* undef, %group.ptr.0
|
||||
define i1 @icmp_group_flat_cmp_undef_swap(i32 addrspace(3)* %group.ptr.0) #0 {
|
||||
%cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
|
||||
%cmp = icmp eq i32 addrspace(4)* undef, %cast0
|
||||
ret i1 %cmp
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @icmp_mismatch_flat_group_private_cmp_undef_swap(
|
||||
; CHECK: %cmp = icmp eq i32 addrspace(3)* undef, %group.ptr.0
|
||||
define i1 @icmp_mismatch_flat_group_private_cmp_undef_swap(i32 addrspace(3)* %group.ptr.0) #0 {
|
||||
%cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
|
||||
%cmp = icmp eq i32 addrspace(4)* addrspacecast (i32* undef to i32 addrspace(4)*), %cast0
|
||||
ret i1 %cmp
|
||||
}
|
||||
|
||||
; TODO: Should be handled
|
||||
; CHECK-LABEL: @icmp_flat_flat_from_group_vector(
|
||||
; CHECK: %cmp = icmp eq <2 x i32 addrspace(4)*> %cast0, %cast1
|
||||
define <2 x i1> @icmp_flat_flat_from_group_vector(<2 x i32 addrspace(3)*> %group.ptr.0, <2 x i32 addrspace(3)*> %group.ptr.1) #0 {
|
||||
%cast0 = addrspacecast <2 x i32 addrspace(3)*> %group.ptr.0 to <2 x i32 addrspace(4)*>
|
||||
%cast1 = addrspacecast <2 x i32 addrspace(3)*> %group.ptr.1 to <2 x i32 addrspace(4)*>
|
||||
%cmp = icmp eq <2 x i32 addrspace(4)*> %cast0, %cast1
|
||||
ret <2 x i1> %cmp
|
||||
}
|
||||
|
||||
attributes #0 = { nounwind }
|
@ -1,175 +0,0 @@
|
||||
; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -infer-address-spaces %s | FileCheck %s
|
||||
; Ports of most of test/CodeGen/NVPTX/access-non-generic.ll
|
||||
|
||||
@scalar = internal addrspace(3) global float 0.0, align 4
|
||||
@array = internal addrspace(3) global [10 x float] zeroinitializer, align 4
|
||||
|
||||
; CHECK-LABEL: @load_store_lds_f32(
|
||||
; CHECK: %tmp = load float, float addrspace(3)* @scalar, align 4
|
||||
; CHECK: call void @use(float %tmp)
|
||||
; CHECK: store float %v, float addrspace(3)* @scalar, align 4
|
||||
; CHECK: call void @llvm.amdgcn.s.barrier()
|
||||
; CHECK: %tmp2 = load float, float addrspace(3)* @scalar, align 4
|
||||
; CHECK: call void @use(float %tmp2)
|
||||
; CHECK: store float %v, float addrspace(3)* @scalar, align 4
|
||||
; CHECK: call void @llvm.amdgcn.s.barrier()
|
||||
; CHECK: %tmp3 = load float, float addrspace(3)* getelementptr inbounds ([10 x float], [10 x float] addrspace(3)* @array, i32 0, i32 5), align 4
|
||||
; CHECK: call void @use(float %tmp3)
|
||||
; CHECK: store float %v, float addrspace(3)* getelementptr inbounds ([10 x float], [10 x float] addrspace(3)* @array, i32 0, i32 5), align 4
|
||||
; CHECK: call void @llvm.amdgcn.s.barrier()
|
||||
; CHECK: %tmp4 = getelementptr inbounds [10 x float], [10 x float] addrspace(3)* @array, i32 0, i32 5
|
||||
; CHECK: %tmp5 = load float, float addrspace(3)* %tmp4, align 4
|
||||
; CHECK: call void @use(float %tmp5)
|
||||
; CHECK: store float %v, float addrspace(3)* %tmp4, align 4
|
||||
; CHECK: call void @llvm.amdgcn.s.barrier()
|
||||
; CHECK: %tmp7 = getelementptr inbounds [10 x float], [10 x float] addrspace(3)* @array, i32 0, i32 %i
|
||||
; CHECK: %tmp8 = load float, float addrspace(3)* %tmp7, align 4
|
||||
; CHECK: call void @use(float %tmp8)
|
||||
; CHECK: store float %v, float addrspace(3)* %tmp7, align 4
|
||||
; CHECK: call void @llvm.amdgcn.s.barrier()
|
||||
; CHECK: ret void
|
||||
define amdgpu_kernel void @load_store_lds_f32(i32 %i, float %v) #0 {
|
||||
bb:
|
||||
%tmp = load float, float addrspace(4)* addrspacecast (float addrspace(3)* @scalar to float addrspace(4)*), align 4
|
||||
call void @use(float %tmp)
|
||||
store float %v, float addrspace(4)* addrspacecast (float addrspace(3)* @scalar to float addrspace(4)*), align 4
|
||||
call void @llvm.amdgcn.s.barrier()
|
||||
%tmp1 = addrspacecast float addrspace(3)* @scalar to float addrspace(4)*
|
||||
%tmp2 = load float, float addrspace(4)* %tmp1, align 4
|
||||
call void @use(float %tmp2)
|
||||
store float %v, float addrspace(4)* %tmp1, align 4
|
||||
call void @llvm.amdgcn.s.barrier()
|
||||
%tmp3 = load float, float addrspace(4)* getelementptr inbounds ([10 x float], [10 x float] addrspace(4)* addrspacecast ([10 x float] addrspace(3)* @array to [10 x float] addrspace(4)*), i32 0, i32 5), align 4
|
||||
call void @use(float %tmp3)
|
||||
store float %v, float addrspace(4)* getelementptr inbounds ([10 x float], [10 x float] addrspace(4)* addrspacecast ([10 x float] addrspace(3)* @array to [10 x float] addrspace(4)*), i32 0, i32 5), align 4
|
||||
call void @llvm.amdgcn.s.barrier()
|
||||
%tmp4 = getelementptr inbounds [10 x float], [10 x float] addrspace(4)* addrspacecast ([10 x float] addrspace(3)* @array to [10 x float] addrspace(4)*), i32 0, i32 5
|
||||
%tmp5 = load float, float addrspace(4)* %tmp4, align 4
|
||||
call void @use(float %tmp5)
|
||||
store float %v, float addrspace(4)* %tmp4, align 4
|
||||
call void @llvm.amdgcn.s.barrier()
|
||||
%tmp6 = addrspacecast [10 x float] addrspace(3)* @array to [10 x float] addrspace(4)*
|
||||
%tmp7 = getelementptr inbounds [10 x float], [10 x float] addrspace(4)* %tmp6, i32 0, i32 %i
|
||||
%tmp8 = load float, float addrspace(4)* %tmp7, align 4
|
||||
call void @use(float %tmp8)
|
||||
store float %v, float addrspace(4)* %tmp7, align 4
|
||||
call void @llvm.amdgcn.s.barrier()
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @constexpr_load_int_from_float_lds(
|
||||
; CHECK: %tmp = load i32, i32 addrspace(3)* bitcast (float addrspace(3)* @scalar to i32 addrspace(3)*), align 4
|
||||
define i32 @constexpr_load_int_from_float_lds() #0 {
|
||||
bb:
|
||||
%tmp = load i32, i32 addrspace(4)* addrspacecast (i32 addrspace(3)* bitcast (float addrspace(3)* @scalar to i32 addrspace(3)*) to i32 addrspace(4)*), align 4
|
||||
ret i32 %tmp
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @load_int_from_global_float(
|
||||
; CHECK: %tmp1 = getelementptr float, float addrspace(1)* %input, i32 %i
|
||||
; CHECK: %tmp2 = getelementptr float, float addrspace(1)* %tmp1, i32 %j
|
||||
; CHECK: %tmp3 = bitcast float addrspace(1)* %tmp2 to i32 addrspace(1)*
|
||||
; CHECK: %tmp4 = load i32, i32 addrspace(1)* %tmp3
|
||||
; CHECK: ret i32 %tmp4
|
||||
define i32 @load_int_from_global_float(float addrspace(1)* %input, i32 %i, i32 %j) #0 {
|
||||
bb:
|
||||
%tmp = addrspacecast float addrspace(1)* %input to float addrspace(4)*
|
||||
%tmp1 = getelementptr float, float addrspace(4)* %tmp, i32 %i
|
||||
%tmp2 = getelementptr float, float addrspace(4)* %tmp1, i32 %j
|
||||
%tmp3 = bitcast float addrspace(4)* %tmp2 to i32 addrspace(4)*
|
||||
%tmp4 = load i32, i32 addrspace(4)* %tmp3
|
||||
ret i32 %tmp4
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @nested_const_expr(
|
||||
; CHECK: store i32 1, i32 addrspace(3)* bitcast (float addrspace(3)* getelementptr inbounds ([10 x float], [10 x float] addrspace(3)* @array, i64 0, i64 1) to i32 addrspace(3)*), align 4
|
||||
define amdgpu_kernel void @nested_const_expr() #0 {
|
||||
store i32 1, i32 addrspace(4)* bitcast (float addrspace(4)* getelementptr ([10 x float], [10 x float] addrspace(4)* addrspacecast ([10 x float] addrspace(3)* @array to [10 x float] addrspace(4)*), i64 0, i64 1) to i32 addrspace(4)*), align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @rauw(
|
||||
; CHECK: %addr = getelementptr float, float addrspace(1)* %input, i64 10
|
||||
; CHECK-NEXT: %v = load float, float addrspace(1)* %addr
|
||||
; CHECK-NEXT: store float %v, float addrspace(1)* %addr
|
||||
; CHECK-NEXT: ret void
|
||||
define amdgpu_kernel void @rauw(float addrspace(1)* %input) #0 {
|
||||
bb:
|
||||
%generic_input = addrspacecast float addrspace(1)* %input to float addrspace(4)*
|
||||
%addr = getelementptr float, float addrspace(4)* %generic_input, i64 10
|
||||
%v = load float, float addrspace(4)* %addr
|
||||
store float %v, float addrspace(4)* %addr
|
||||
ret void
|
||||
}
|
||||
|
||||
; FIXME: Should be able to eliminate the cast inside the loop
|
||||
; CHECK-LABEL: @loop(
|
||||
|
||||
; CHECK: %p = bitcast [10 x float] addrspace(3)* @array to float addrspace(3)*
|
||||
; CHECK: %end = getelementptr float, float addrspace(3)* %p, i64 10
|
||||
; CHECK: br label %loop
|
||||
|
||||
; CHECK: loop: ; preds = %loop, %entry
|
||||
; CHECK: %i = phi float addrspace(3)* [ %p, %entry ], [ %i2, %loop ]
|
||||
; CHECK: %v = load float, float addrspace(3)* %i
|
||||
; CHECK: call void @use(float %v)
|
||||
; CHECK: %i2 = getelementptr float, float addrspace(3)* %i, i64 1
|
||||
; CHECK: %exit_cond = icmp eq float addrspace(3)* %i2, %end
|
||||
|
||||
; CHECK: br i1 %exit_cond, label %exit, label %loop
|
||||
define amdgpu_kernel void @loop() #0 {
|
||||
entry:
|
||||
%p = addrspacecast [10 x float] addrspace(3)* @array to float addrspace(4)*
|
||||
%end = getelementptr float, float addrspace(4)* %p, i64 10
|
||||
br label %loop
|
||||
|
||||
loop: ; preds = %loop, %entry
|
||||
%i = phi float addrspace(4)* [ %p, %entry ], [ %i2, %loop ]
|
||||
%v = load float, float addrspace(4)* %i
|
||||
call void @use(float %v)
|
||||
%i2 = getelementptr float, float addrspace(4)* %i, i64 1
|
||||
%exit_cond = icmp eq float addrspace(4)* %i2, %end
|
||||
br i1 %exit_cond, label %exit, label %loop
|
||||
|
||||
exit: ; preds = %loop
|
||||
ret void
|
||||
}
|
||||
|
||||
@generic_end = external addrspace(1) global float addrspace(4)*
|
||||
|
||||
; CHECK-LABEL: @loop_with_generic_bound(
|
||||
; CHECK: %p = bitcast [10 x float] addrspace(3)* @array to float addrspace(3)*
|
||||
; CHECK: %end = load float addrspace(4)*, float addrspace(4)* addrspace(1)* @generic_end
|
||||
; CHECK: br label %loop
|
||||
|
||||
; CHECK: loop:
|
||||
; CHECK: %i = phi float addrspace(3)* [ %p, %entry ], [ %i2, %loop ]
|
||||
; CHECK: %v = load float, float addrspace(3)* %i
|
||||
; CHECK: call void @use(float %v)
|
||||
; CHECK: %i2 = getelementptr float, float addrspace(3)* %i, i64 1
|
||||
; CHECK: %0 = addrspacecast float addrspace(3)* %i2 to float addrspace(4)*
|
||||
; CHECK: %exit_cond = icmp eq float addrspace(4)* %0, %end
|
||||
; CHECK: br i1 %exit_cond, label %exit, label %loop
|
||||
define amdgpu_kernel void @loop_with_generic_bound() #0 {
|
||||
entry:
|
||||
%p = addrspacecast [10 x float] addrspace(3)* @array to float addrspace(4)*
|
||||
%end = load float addrspace(4)*, float addrspace(4)* addrspace(1)* @generic_end
|
||||
br label %loop
|
||||
|
||||
loop: ; preds = %loop, %entry
|
||||
%i = phi float addrspace(4)* [ %p, %entry ], [ %i2, %loop ]
|
||||
%v = load float, float addrspace(4)* %i
|
||||
call void @use(float %v)
|
||||
%i2 = getelementptr float, float addrspace(4)* %i, i64 1
|
||||
%exit_cond = icmp eq float addrspace(4)* %i2, %end
|
||||
br i1 %exit_cond, label %exit, label %loop
|
||||
|
||||
exit: ; preds = %loop
|
||||
ret void
|
||||
}
|
||||
|
||||
declare void @llvm.amdgcn.s.barrier() #1
|
||||
declare void @use(float) #0
|
||||
|
||||
attributes #0 = { nounwind }
|
||||
attributes #1 = { convergent nounwind }
|
@ -1,56 +0,0 @@
|
||||
; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -infer-address-spaces %s | FileCheck %s
|
||||
|
||||
; Test that pure addrspacecast instructions not directly connected to
|
||||
; a memory operation are inferred.
|
||||
|
||||
; CHECK-LABEL: @addrspacecast_gep_addrspacecast(
|
||||
; CHECK: %gep0 = getelementptr i32, i32 addrspace(3)* %ptr, i64 9
|
||||
; CHECK-NEXT: store i32 8, i32 addrspace(3)* %gep0, align 8
|
||||
; CHECK-NEXT: ret void
|
||||
define void @addrspacecast_gep_addrspacecast(i32 addrspace(3)* %ptr) {
|
||||
%asc0 = addrspacecast i32 addrspace(3)* %ptr to i32 addrspace(4)*
|
||||
%gep0 = getelementptr i32, i32 addrspace(4)* %asc0, i64 9
|
||||
%asc1 = addrspacecast i32 addrspace(4)* %gep0 to i32 addrspace(3)*
|
||||
store i32 8, i32 addrspace(3)* %asc1, align 8
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @addrspacecast_different_pointee_type(
|
||||
; CHECK: [[GEP:%.*]] = getelementptr i32, i32 addrspace(3)* %ptr, i64 9
|
||||
; CHECK: [[CAST:%.*]] = bitcast i32 addrspace(3)* [[GEP]] to i8 addrspace(3)*
|
||||
; CHECK-NEXT: store i8 8, i8 addrspace(3)* [[CAST]], align 8
|
||||
; CHECK-NEXT: ret void
|
||||
define void @addrspacecast_different_pointee_type(i32 addrspace(3)* %ptr) {
|
||||
%asc0 = addrspacecast i32 addrspace(3)* %ptr to i32 addrspace(4)*
|
||||
%gep0 = getelementptr i32, i32 addrspace(4)* %asc0, i64 9
|
||||
%asc1 = addrspacecast i32 addrspace(4)* %gep0 to i8 addrspace(3)*
|
||||
store i8 8, i8 addrspace(3)* %asc1, align 8
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @addrspacecast_to_memory(
|
||||
; CHECK: %gep0 = getelementptr i32, i32 addrspace(3)* %ptr, i64 9
|
||||
; CHECK-NEXT: store volatile i32 addrspace(3)* %gep0, i32 addrspace(3)* addrspace(1)* undef
|
||||
; CHECK-NEXT: ret void
|
||||
define void @addrspacecast_to_memory(i32 addrspace(3)* %ptr) {
|
||||
%asc0 = addrspacecast i32 addrspace(3)* %ptr to i32 addrspace(4)*
|
||||
%gep0 = getelementptr i32, i32 addrspace(4)* %asc0, i64 9
|
||||
%asc1 = addrspacecast i32 addrspace(4)* %gep0 to i32 addrspace(3)*
|
||||
store volatile i32 addrspace(3)* %asc1, i32 addrspace(3)* addrspace(1)* undef
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @multiuse_addrspacecast_gep_addrspacecast(
|
||||
; CHECK: %1 = addrspacecast i32 addrspace(3)* %ptr to i32 addrspace(4)*
|
||||
; CHECK-NEXT: store volatile i32 addrspace(4)* %1, i32 addrspace(4)* addrspace(1)* undef
|
||||
; CHECK-NEXT: %gep0 = getelementptr i32, i32 addrspace(3)* %ptr, i64 9
|
||||
; CHECK-NEXT: store i32 8, i32 addrspace(3)* %gep0, align 8
|
||||
; CHECK-NEXT: ret void
|
||||
define void @multiuse_addrspacecast_gep_addrspacecast(i32 addrspace(3)* %ptr) {
|
||||
%asc0 = addrspacecast i32 addrspace(3)* %ptr to i32 addrspace(4)*
|
||||
store volatile i32 addrspace(4)* %asc0, i32 addrspace(4)* addrspace(1)* undef
|
||||
%gep0 = getelementptr i32, i32 addrspace(4)* %asc0, i64 9
|
||||
%asc1 = addrspacecast i32 addrspace(4)* %gep0 to i32 addrspace(3)*
|
||||
store i32 8, i32 addrspace(3)* %asc1, align 8
|
||||
ret void
|
||||
}
|
@ -1,73 +0,0 @@
|
||||
; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -infer-address-spaces %s | FileCheck %s
|
||||
|
||||
; Test that pure GetElementPtr instructions not directly connected to
|
||||
; a memory operation are inferred.
|
||||
|
||||
@lds = internal unnamed_addr addrspace(3) global [648 x double] undef, align 8
|
||||
|
||||
; CHECK-LABEL: @simplified_constexpr_gep_addrspacecast(
|
||||
; CHECK: %gep0 = getelementptr inbounds double, double addrspace(3)* getelementptr inbounds ([648 x double], [648 x double] addrspace(3)* @lds, i64 0, i64 384), i64 %idx0
|
||||
; CHECK-NEXT: store double 1.000000e+00, double addrspace(3)* %gep0, align 8
|
||||
define void @simplified_constexpr_gep_addrspacecast(i64 %idx0, i64 %idx1) {
|
||||
%gep0 = getelementptr inbounds double, double addrspace(4)* addrspacecast (double addrspace(3)* getelementptr inbounds ([648 x double], [648 x double] addrspace(3)* @lds, i64 0, i64 384) to double addrspace(4)*), i64 %idx0
|
||||
%asc = addrspacecast double addrspace(4)* %gep0 to double addrspace(3)*
|
||||
store double 1.000000e+00, double addrspace(3)* %asc, align 8
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @constexpr_gep_addrspacecast(
|
||||
; CHECK-NEXT: %gep0 = getelementptr inbounds double, double addrspace(3)* getelementptr inbounds ([648 x double], [648 x double] addrspace(3)* @lds, i64 0, i64 384), i64 %idx0
|
||||
; CHECK-NEXT: store double 1.000000e+00, double addrspace(3)* %gep0, align 8
|
||||
define void @constexpr_gep_addrspacecast(i64 %idx0, i64 %idx1) {
|
||||
%gep0 = getelementptr inbounds double, double addrspace(4)* getelementptr ([648 x double], [648 x double] addrspace(4)* addrspacecast ([648 x double] addrspace(3)* @lds to [648 x double] addrspace(4)*), i64 0, i64 384), i64 %idx0
|
||||
%asc = addrspacecast double addrspace(4)* %gep0 to double addrspace(3)*
|
||||
store double 1.0, double addrspace(3)* %asc, align 8
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @constexpr_gep_gep_addrspacecast(
|
||||
; CHECK: %gep0 = getelementptr inbounds double, double addrspace(3)* getelementptr inbounds ([648 x double], [648 x double] addrspace(3)* @lds, i64 0, i64 384), i64 %idx0
|
||||
; CHECK-NEXT: %gep1 = getelementptr inbounds double, double addrspace(3)* %gep0, i64 %idx1
|
||||
; CHECK-NEXT: store double 1.000000e+00, double addrspace(3)* %gep1, align 8
|
||||
define void @constexpr_gep_gep_addrspacecast(i64 %idx0, i64 %idx1) {
|
||||
%gep0 = getelementptr inbounds double, double addrspace(4)* getelementptr ([648 x double], [648 x double] addrspace(4)* addrspacecast ([648 x double] addrspace(3)* @lds to [648 x double] addrspace(4)*), i64 0, i64 384), i64 %idx0
|
||||
%gep1 = getelementptr inbounds double, double addrspace(4)* %gep0, i64 %idx1
|
||||
%asc = addrspacecast double addrspace(4)* %gep1 to double addrspace(3)*
|
||||
store double 1.0, double addrspace(3)* %asc, align 8
|
||||
ret void
|
||||
}
|
||||
|
||||
; Don't crash
|
||||
; CHECK-LABEL: @vector_gep(
|
||||
; CHECK: %cast = addrspacecast <4 x [1024 x i32] addrspace(3)*> %array to <4 x [1024 x i32] addrspace(4)*>
|
||||
define amdgpu_kernel void @vector_gep(<4 x [1024 x i32] addrspace(3)*> %array) nounwind {
|
||||
%cast = addrspacecast <4 x [1024 x i32] addrspace(3)*> %array to <4 x [1024 x i32] addrspace(4)*>
|
||||
%p = getelementptr [1024 x i32], <4 x [1024 x i32] addrspace(4)*> %cast, <4 x i16> zeroinitializer, <4 x i16> <i16 16, i16 16, i16 16, i16 16>
|
||||
%p0 = extractelement <4 x i32 addrspace(4)*> %p, i32 0
|
||||
%p1 = extractelement <4 x i32 addrspace(4)*> %p, i32 1
|
||||
%p2 = extractelement <4 x i32 addrspace(4)*> %p, i32 2
|
||||
%p3 = extractelement <4 x i32 addrspace(4)*> %p, i32 3
|
||||
store i32 99, i32 addrspace(4)* %p0
|
||||
store i32 99, i32 addrspace(4)* %p1
|
||||
store i32 99, i32 addrspace(4)* %p2
|
||||
store i32 99, i32 addrspace(4)* %p3
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @repeated_constexpr_gep_addrspacecast(
|
||||
; CHECK-NEXT: %gep0 = getelementptr inbounds double, double addrspace(3)* getelementptr inbounds ([648 x double], [648 x double] addrspace(3)* @lds, i64 0, i64 384), i64 %idx0
|
||||
; CHECK-NEXT: store double 1.000000e+00, double addrspace(3)* %gep0, align 8
|
||||
; CHECK-NEXT: %gep1 = getelementptr inbounds double, double addrspace(3)* getelementptr inbounds ([648 x double], [648 x double] addrspace(3)* @lds, i64 0, i64 384), i64 %idx1
|
||||
; CHECK-NEXT: store double 1.000000e+00, double addrspace(3)* %gep1, align 8
|
||||
; CHECK-NEXT: ret void
|
||||
define void @repeated_constexpr_gep_addrspacecast(i64 %idx0, i64 %idx1) {
|
||||
%gep0 = getelementptr inbounds double, double addrspace(4)* getelementptr ([648 x double], [648 x double] addrspace(4)* addrspacecast ([648 x double] addrspace(3)* @lds to [648 x double] addrspace(4)*), i64 0, i64 384), i64 %idx0
|
||||
%asc0 = addrspacecast double addrspace(4)* %gep0 to double addrspace(3)*
|
||||
store double 1.0, double addrspace(3)* %asc0, align 8
|
||||
|
||||
%gep1 = getelementptr inbounds double, double addrspace(4)* getelementptr ([648 x double], [648 x double] addrspace(4)* addrspacecast ([648 x double] addrspace(3)* @lds to [648 x double] addrspace(4)*), i64 0, i64 384), i64 %idx1
|
||||
%asc1 = addrspacecast double addrspace(4)* %gep1 to double addrspace(3)*
|
||||
store double 1.0, double addrspace(3)* %asc1, align 8
|
||||
|
||||
ret void
|
||||
}
|
@ -1,146 +0,0 @@
|
||||
; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -infer-address-spaces %s | FileCheck %s
|
||||
|
||||
; CHECK-LABEL: @objectsize_group_to_flat_i32(
|
||||
; CHECK: %val = call i32 @llvm.objectsize.i32.p3i8(i8 addrspace(3)* %group.ptr, i1 true, i1 false)
|
||||
define i32 @objectsize_group_to_flat_i32(i8 addrspace(3)* %group.ptr) #0 {
|
||||
%cast = addrspacecast i8 addrspace(3)* %group.ptr to i8 addrspace(4)*
|
||||
%val = call i32 @llvm.objectsize.i32.p4i8(i8 addrspace(4)* %cast, i1 true, i1 false)
|
||||
ret i32 %val
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @objectsize_global_to_flat_i64(
|
||||
; CHECK: %val = call i64 @llvm.objectsize.i64.p3i8(i8 addrspace(3)* %global.ptr, i1 true, i1 false)
|
||||
define i64 @objectsize_global_to_flat_i64(i8 addrspace(3)* %global.ptr) #0 {
|
||||
%cast = addrspacecast i8 addrspace(3)* %global.ptr to i8 addrspace(4)*
|
||||
%val = call i64 @llvm.objectsize.i64.p4i8(i8 addrspace(4)* %cast, i1 true, i1 false)
|
||||
ret i64 %val
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @atomicinc_global_to_flat_i32(
|
||||
; CHECK: call i32 @llvm.amdgcn.atomic.inc.i32.p1i32(i32 addrspace(1)* %global.ptr, i32 %y, i32 0, i32 0, i1 false)
|
||||
define i32 @atomicinc_global_to_flat_i32(i32 addrspace(1)* %global.ptr, i32 %y) #0 {
|
||||
%cast = addrspacecast i32 addrspace(1)* %global.ptr to i32 addrspace(4)*
|
||||
%ret = call i32 @llvm.amdgcn.atomic.inc.i32.p4i32(i32 addrspace(4)* %cast, i32 %y, i32 0, i32 0, i1 false)
|
||||
ret i32 %ret
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @atomicinc_group_to_flat_i32(
|
||||
; CHECK: %ret = call i32 @llvm.amdgcn.atomic.inc.i32.p3i32(i32 addrspace(3)* %group.ptr, i32 %y, i32 0, i32 0, i1 false)
|
||||
define i32 @atomicinc_group_to_flat_i32(i32 addrspace(3)* %group.ptr, i32 %y) #0 {
|
||||
%cast = addrspacecast i32 addrspace(3)* %group.ptr to i32 addrspace(4)*
|
||||
%ret = call i32 @llvm.amdgcn.atomic.inc.i32.p4i32(i32 addrspace(4)* %cast, i32 %y, i32 0, i32 0, i1 false)
|
||||
ret i32 %ret
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @atomicinc_global_to_flat_i64(
|
||||
; CHECK: call i64 @llvm.amdgcn.atomic.inc.i64.p1i64(i64 addrspace(1)* %global.ptr, i64 %y, i32 0, i32 0, i1 false)
|
||||
define i64 @atomicinc_global_to_flat_i64(i64 addrspace(1)* %global.ptr, i64 %y) #0 {
|
||||
%cast = addrspacecast i64 addrspace(1)* %global.ptr to i64 addrspace(4)*
|
||||
%ret = call i64 @llvm.amdgcn.atomic.inc.i64.p4i64(i64 addrspace(4)* %cast, i64 %y, i32 0, i32 0, i1 false)
|
||||
ret i64 %ret
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @atomicinc_group_to_flat_i64(
|
||||
; CHECK: call i64 @llvm.amdgcn.atomic.inc.i64.p3i64(i64 addrspace(3)* %group.ptr, i64 %y, i32 0, i32 0, i1 false)
|
||||
define i64 @atomicinc_group_to_flat_i64(i64 addrspace(3)* %group.ptr, i64 %y) #0 {
|
||||
%cast = addrspacecast i64 addrspace(3)* %group.ptr to i64 addrspace(4)*
|
||||
%ret = call i64 @llvm.amdgcn.atomic.inc.i64.p4i64(i64 addrspace(4)* %cast, i64 %y, i32 0, i32 0, i1 false)
|
||||
ret i64 %ret
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @atomicdec_global_to_flat_i32(
|
||||
; CHECK: call i32 @llvm.amdgcn.atomic.dec.i32.p1i32(i32 addrspace(1)* %global.ptr, i32 %val, i32 0, i32 0, i1 false)
|
||||
define i32 @atomicdec_global_to_flat_i32(i32 addrspace(1)* %global.ptr, i32 %val) #0 {
|
||||
%cast = addrspacecast i32 addrspace(1)* %global.ptr to i32 addrspace(4)*
|
||||
%ret = call i32 @llvm.amdgcn.atomic.dec.i32.p4i32(i32 addrspace(4)* %cast, i32 %val, i32 0, i32 0, i1 false)
|
||||
ret i32 %ret
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @atomicdec_group_to_flat_i32(
|
||||
; CHECK: %ret = call i32 @llvm.amdgcn.atomic.dec.i32.p3i32(i32 addrspace(3)* %group.ptr, i32 %val, i32 0, i32 0, i1 false)
|
||||
define i32 @atomicdec_group_to_flat_i32(i32 addrspace(3)* %group.ptr, i32 %val) #0 {
|
||||
%cast = addrspacecast i32 addrspace(3)* %group.ptr to i32 addrspace(4)*
|
||||
%ret = call i32 @llvm.amdgcn.atomic.dec.i32.p4i32(i32 addrspace(4)* %cast, i32 %val, i32 0, i32 0, i1 false)
|
||||
ret i32 %ret
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @atomicdec_global_to_flat_i64(
|
||||
; CHECK: call i64 @llvm.amdgcn.atomic.dec.i64.p1i64(i64 addrspace(1)* %global.ptr, i64 %y, i32 0, i32 0, i1 false)
|
||||
define i64 @atomicdec_global_to_flat_i64(i64 addrspace(1)* %global.ptr, i64 %y) #0 {
|
||||
%cast = addrspacecast i64 addrspace(1)* %global.ptr to i64 addrspace(4)*
|
||||
%ret = call i64 @llvm.amdgcn.atomic.dec.i64.p4i64(i64 addrspace(4)* %cast, i64 %y, i32 0, i32 0, i1 false)
|
||||
ret i64 %ret
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @atomicdec_group_to_flat_i64(
|
||||
; CHECK: call i64 @llvm.amdgcn.atomic.dec.i64.p3i64(i64 addrspace(3)* %group.ptr, i64 %y, i32 0, i32 0, i1 false
|
||||
define i64 @atomicdec_group_to_flat_i64(i64 addrspace(3)* %group.ptr, i64 %y) #0 {
|
||||
%cast = addrspacecast i64 addrspace(3)* %group.ptr to i64 addrspace(4)*
|
||||
%ret = call i64 @llvm.amdgcn.atomic.dec.i64.p4i64(i64 addrspace(4)* %cast, i64 %y, i32 0, i32 0, i1 false)
|
||||
ret i64 %ret
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @volatile_atomicinc_group_to_flat_i64(
|
||||
; CHECK-NEXT: %1 = addrspacecast i64 addrspace(3)* %group.ptr to i64 addrspace(4)*
|
||||
; CHECK-NEXT: %ret = call i64 @llvm.amdgcn.atomic.inc.i64.p4i64(i64 addrspace(4)* %1, i64 %y, i32 0, i32 0, i1 true)
|
||||
define i64 @volatile_atomicinc_group_to_flat_i64(i64 addrspace(3)* %group.ptr, i64 %y) #0 {
|
||||
%cast = addrspacecast i64 addrspace(3)* %group.ptr to i64 addrspace(4)*
|
||||
%ret = call i64 @llvm.amdgcn.atomic.inc.i64.p4i64(i64 addrspace(4)* %cast, i64 %y, i32 0, i32 0, i1 true)
|
||||
ret i64 %ret
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @volatile_atomicdec_global_to_flat_i32(
|
||||
; CHECK-NEXT: %1 = addrspacecast i32 addrspace(1)* %global.ptr to i32 addrspace(4)*
|
||||
; CHECK-NEXT: %ret = call i32 @llvm.amdgcn.atomic.dec.i32.p4i32(i32 addrspace(4)* %1, i32 %val, i32 0, i32 0, i1 true)
|
||||
define i32 @volatile_atomicdec_global_to_flat_i32(i32 addrspace(1)* %global.ptr, i32 %val) #0 {
|
||||
%cast = addrspacecast i32 addrspace(1)* %global.ptr to i32 addrspace(4)*
|
||||
%ret = call i32 @llvm.amdgcn.atomic.dec.i32.p4i32(i32 addrspace(4)* %cast, i32 %val, i32 0, i32 0, i1 true)
|
||||
ret i32 %ret
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @volatile_atomicdec_group_to_flat_i32(
|
||||
; CHECK-NEXT: %1 = addrspacecast i32 addrspace(3)* %group.ptr to i32 addrspace(4)*
|
||||
; CHECK-NEXT: %ret = call i32 @llvm.amdgcn.atomic.dec.i32.p4i32(i32 addrspace(4)* %1, i32 %val, i32 0, i32 0, i1 true)
|
||||
define i32 @volatile_atomicdec_group_to_flat_i32(i32 addrspace(3)* %group.ptr, i32 %val) #0 {
|
||||
%cast = addrspacecast i32 addrspace(3)* %group.ptr to i32 addrspace(4)*
|
||||
%ret = call i32 @llvm.amdgcn.atomic.dec.i32.p4i32(i32 addrspace(4)* %cast, i32 %val, i32 0, i32 0, i1 true)
|
||||
ret i32 %ret
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @volatile_atomicdec_global_to_flat_i64(
|
||||
; CHECK-NEXT: %1 = addrspacecast i64 addrspace(1)* %global.ptr to i64 addrspace(4)*
|
||||
; CHECK-NEXT: %ret = call i64 @llvm.amdgcn.atomic.dec.i64.p4i64(i64 addrspace(4)* %1, i64 %y, i32 0, i32 0, i1 true)
|
||||
define i64 @volatile_atomicdec_global_to_flat_i64(i64 addrspace(1)* %global.ptr, i64 %y) #0 {
|
||||
%cast = addrspacecast i64 addrspace(1)* %global.ptr to i64 addrspace(4)*
|
||||
%ret = call i64 @llvm.amdgcn.atomic.dec.i64.p4i64(i64 addrspace(4)* %cast, i64 %y, i32 0, i32 0, i1 true)
|
||||
ret i64 %ret
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @volatile_atomicdec_group_to_flat_i64(
|
||||
; CHECK-NEXT: %1 = addrspacecast i64 addrspace(3)* %group.ptr to i64 addrspace(4)*
|
||||
; CHECK-NEXT: %ret = call i64 @llvm.amdgcn.atomic.dec.i64.p4i64(i64 addrspace(4)* %1, i64 %y, i32 0, i32 0, i1 true)
|
||||
define i64 @volatile_atomicdec_group_to_flat_i64(i64 addrspace(3)* %group.ptr, i64 %y) #0 {
|
||||
%cast = addrspacecast i64 addrspace(3)* %group.ptr to i64 addrspace(4)*
|
||||
%ret = call i64 @llvm.amdgcn.atomic.dec.i64.p4i64(i64 addrspace(4)* %cast, i64 %y, i32 0, i32 0, i1 true)
|
||||
ret i64 %ret
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @invalid_variable_volatile_atomicinc_group_to_flat_i64(
|
||||
; CHECK-NEXT: %1 = addrspacecast i64 addrspace(3)* %group.ptr to i64 addrspace(4)*
|
||||
; CHECK-NEXT: %ret = call i64 @llvm.amdgcn.atomic.inc.i64.p4i64(i64 addrspace(4)* %1, i64 %y, i32 0, i32 0, i1 %volatile.var)
|
||||
define i64 @invalid_variable_volatile_atomicinc_group_to_flat_i64(i64 addrspace(3)* %group.ptr, i64 %y, i1 %volatile.var) #0 {
|
||||
%cast = addrspacecast i64 addrspace(3)* %group.ptr to i64 addrspace(4)*
|
||||
%ret = call i64 @llvm.amdgcn.atomic.inc.i64.p4i64(i64 addrspace(4)* %cast, i64 %y, i32 0, i32 0, i1 %volatile.var)
|
||||
ret i64 %ret
|
||||
}
|
||||
|
||||
declare i32 @llvm.objectsize.i32.p4i8(i8 addrspace(4)*, i1, i1) #1
|
||||
declare i64 @llvm.objectsize.i64.p4i8(i8 addrspace(4)*, i1, i1) #1
|
||||
declare i32 @llvm.amdgcn.atomic.inc.i32.p4i32(i32 addrspace(4)* nocapture, i32, i32, i32, i1) #2
|
||||
declare i64 @llvm.amdgcn.atomic.inc.i64.p4i64(i64 addrspace(4)* nocapture, i64, i32, i32, i1) #2
|
||||
declare i32 @llvm.amdgcn.atomic.dec.i32.p4i32(i32 addrspace(4)* nocapture, i32, i32, i32, i1) #2
|
||||
declare i64 @llvm.amdgcn.atomic.dec.i64.p4i64(i64 addrspace(4)* nocapture, i64, i32, i32, i1) #2
|
||||
|
||||
attributes #0 = { nounwind }
|
||||
attributes #1 = { nounwind readnone }
|
||||
attributes #2 = { nounwind argmemonly }
|
@ -1,3 +0,0 @@
|
||||
if not 'AMDGPU' in config.root.targets:
|
||||
config.unsupported = True
|
||||
|
@ -1,134 +0,0 @@
|
||||
; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -infer-address-spaces %s | FileCheck %s
|
||||
|
||||
; CHECK-LABEL: @memset_group_to_flat(
|
||||
; CHECK: call void @llvm.memset.p3i8.i64(i8 addrspace(3)* %group.ptr, i8 4, i64 32, i32 4, i1 false), !tbaa !0, !alias.scope !3, !noalias !4
|
||||
define amdgpu_kernel void @memset_group_to_flat(i8 addrspace(3)* %group.ptr, i32 %y) #0 {
|
||||
%cast = addrspacecast i8 addrspace(3)* %group.ptr to i8 addrspace(4)*
|
||||
call void @llvm.memset.p4i8.i64(i8 addrspace(4)* %cast, i8 4, i64 32, i32 4, i1 false), !tbaa !0, !alias.scope !3, !noalias !4
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @memset_global_to_flat(
|
||||
; CHECK: call void @llvm.memset.p1i8.i64(i8 addrspace(1)* %global.ptr, i8 4, i64 32, i32 4, i1 false), !tbaa !0, !alias.scope !3, !noalias !4
|
||||
define amdgpu_kernel void @memset_global_to_flat(i8 addrspace(1)* %global.ptr, i32 %y) #0 {
|
||||
%cast = addrspacecast i8 addrspace(1)* %global.ptr to i8 addrspace(4)*
|
||||
call void @llvm.memset.p4i8.i64(i8 addrspace(4)* %cast, i8 4, i64 32, i32 4, i1 false), !tbaa !0, !alias.scope !3, !noalias !4
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @memset_group_to_flat_no_md(
|
||||
; CHECK: call void @llvm.memset.p3i8.i64(i8 addrspace(3)* %group.ptr, i8 4, i64 %size, i32 4, i1 false){{$}}
|
||||
define amdgpu_kernel void @memset_group_to_flat_no_md(i8 addrspace(3)* %group.ptr, i64 %size) #0 {
|
||||
%cast = addrspacecast i8 addrspace(3)* %group.ptr to i8 addrspace(4)*
|
||||
call void @llvm.memset.p4i8.i64(i8 addrspace(4)* %cast, i8 4, i64 %size, i32 4, i1 false)
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @memset_global_to_flat_no_md(
|
||||
; CHECK: call void @llvm.memset.p1i8.i64(i8 addrspace(1)* %global.ptr, i8 4, i64 %size, i32 4, i1 false){{$}}
|
||||
define amdgpu_kernel void @memset_global_to_flat_no_md(i8 addrspace(1)* %global.ptr, i64 %size) #0 {
|
||||
%cast = addrspacecast i8 addrspace(1)* %global.ptr to i8 addrspace(4)*
|
||||
call void @llvm.memset.p4i8.i64(i8 addrspace(4)* %cast, i8 4, i64 %size, i32 4, i1 false)
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @memcpy_flat_to_flat_replace_src_with_group(
|
||||
; CHCK: call void @llvm.memcpy.p4i8.p3i8.i64(i8 addrspace(4)* %dest, i8 addrspace(3)* %src.group.ptr, i64 %size, i32 4, i1 false), !tbaa !0, !alias.scope !3, !noalias !4
|
||||
define amdgpu_kernel void @memcpy_flat_to_flat_replace_src_with_group(i8 addrspace(4)* %dest, i8 addrspace(3)* %src.group.ptr, i64 %size) #0 {
|
||||
%cast.src = addrspacecast i8 addrspace(3)* %src.group.ptr to i8 addrspace(4)*
|
||||
call void @llvm.memcpy.p4i8.p4i8.i64(i8 addrspace(4)* %dest, i8 addrspace(4)* %cast.src, i64 %size, i32 4, i1 false), !tbaa !0, !alias.scope !3, !noalias !4
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @memcpy_flat_to_flat_replace_dest_with_group(
|
||||
; CHECK: call void @llvm.memcpy.p3i8.p4i8.i64(i8 addrspace(3)* %dest.group.ptr, i8 addrspace(4)* %src.ptr, i64 %size, i32 4, i1 false), !tbaa !0, !alias.scope !3, !noalias !4
|
||||
define amdgpu_kernel void @memcpy_flat_to_flat_replace_dest_with_group(i8 addrspace(3)* %dest.group.ptr, i8 addrspace(4)* %src.ptr, i64 %size) #0 {
|
||||
%cast.dest = addrspacecast i8 addrspace(3)* %dest.group.ptr to i8 addrspace(4)*
|
||||
call void @llvm.memcpy.p4i8.p4i8.i64(i8 addrspace(4)* %cast.dest, i8 addrspace(4)* %src.ptr, i64 %size, i32 4, i1 false), !tbaa !0, !alias.scope !3, !noalias !4
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @memcpy_flat_to_flat_replace_dest_src_with_group(
|
||||
; CHECK: call void @llvm.memcpy.p3i8.p3i8.i64(i8 addrspace(3)* %src.group.ptr, i8 addrspace(3)* %src.group.ptr, i64 %size, i32 4, i1 false), !tbaa !0, !alias.scope !3, !noalias !4
|
||||
define amdgpu_kernel void @memcpy_flat_to_flat_replace_dest_src_with_group(i8 addrspace(3)* %dest.group.ptr, i8 addrspace(3)* %src.group.ptr, i64 %size) #0 {
|
||||
%cast.src = addrspacecast i8 addrspace(3)* %src.group.ptr to i8 addrspace(4)*
|
||||
%cast.dest = addrspacecast i8 addrspace(3)* %src.group.ptr to i8 addrspace(4)*
|
||||
call void @llvm.memcpy.p4i8.p4i8.i64(i8 addrspace(4)* %cast.dest, i8 addrspace(4)* %cast.src, i64 %size, i32 4, i1 false), !tbaa !0, !alias.scope !3, !noalias !4
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @memcpy_flat_to_flat_replace_dest_group_src_global(
|
||||
; CHECK: call void @llvm.memcpy.p3i8.p1i8.i64(i8 addrspace(3)* %dest.group.ptr, i8 addrspace(1)* %src.global.ptr, i64 %size, i32 4, i1 false), !tbaa !0, !alias.scope !3, !noalias !4
|
||||
define amdgpu_kernel void @memcpy_flat_to_flat_replace_dest_group_src_global(i8 addrspace(3)* %dest.group.ptr, i8 addrspace(1)* %src.global.ptr, i64 %size) #0 {
|
||||
%cast.src = addrspacecast i8 addrspace(1)* %src.global.ptr to i8 addrspace(4)*
|
||||
%cast.dest = addrspacecast i8 addrspace(3)* %dest.group.ptr to i8 addrspace(4)*
|
||||
call void @llvm.memcpy.p4i8.p4i8.i64(i8 addrspace(4)* %cast.dest, i8 addrspace(4)* %cast.src, i64 %size, i32 4, i1 false), !tbaa !0, !alias.scope !3, !noalias !4
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @memcpy_group_to_flat_replace_dest_global(
|
||||
; CHECK: call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %dest.global.ptr, i8 addrspace(3)* %src.group.ptr, i32 %size, i32 4, i1 false), !tbaa !0, !alias.scope !3, !noalias !4
|
||||
define amdgpu_kernel void @memcpy_group_to_flat_replace_dest_global(i8 addrspace(1)* %dest.global.ptr, i8 addrspace(3)* %src.group.ptr, i32 %size) #0 {
|
||||
%cast.dest = addrspacecast i8 addrspace(1)* %dest.global.ptr to i8 addrspace(4)*
|
||||
call void @llvm.memcpy.p4i8.p3i8.i32(i8 addrspace(4)* %cast.dest, i8 addrspace(3)* %src.group.ptr, i32 %size, i32 4, i1 false), !tbaa !0, !alias.scope !3, !noalias !4
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @memcpy_flat_to_flat_replace_src_with_group_tbaa_struct(
|
||||
; CHECK: call void @llvm.memcpy.p4i8.p3i8.i64(i8 addrspace(4)* %dest, i8 addrspace(3)* %src.group.ptr, i64 %size, i32 4, i1 false), !tbaa.struct !7
|
||||
define amdgpu_kernel void @memcpy_flat_to_flat_replace_src_with_group_tbaa_struct(i8 addrspace(4)* %dest, i8 addrspace(3)* %src.group.ptr, i64 %size) #0 {
|
||||
%cast.src = addrspacecast i8 addrspace(3)* %src.group.ptr to i8 addrspace(4)*
|
||||
call void @llvm.memcpy.p4i8.p4i8.i64(i8 addrspace(4)* %dest, i8 addrspace(4)* %cast.src, i64 %size, i32 4, i1 false), !tbaa.struct !7
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @memcpy_flat_to_flat_replace_src_with_group_no_md(
|
||||
; CHECK: call void @llvm.memcpy.p4i8.p3i8.i64(i8 addrspace(4)* %dest, i8 addrspace(3)* %src.group.ptr, i64 %size, i32 4, i1 false){{$}}
|
||||
define amdgpu_kernel void @memcpy_flat_to_flat_replace_src_with_group_no_md(i8 addrspace(4)* %dest, i8 addrspace(3)* %src.group.ptr, i64 %size) #0 {
|
||||
%cast.src = addrspacecast i8 addrspace(3)* %src.group.ptr to i8 addrspace(4)*
|
||||
call void @llvm.memcpy.p4i8.p4i8.i64(i8 addrspace(4)* %dest, i8 addrspace(4)* %cast.src, i64 %size, i32 4, i1 false)
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @multiple_memcpy_flat_to_flat_replace_src_with_group_no_md(
|
||||
; CHECK: call void @llvm.memcpy.p4i8.p3i8.i64(i8 addrspace(4)* %dest0, i8 addrspace(3)* %src.group.ptr, i64 %size, i32 4, i1 false){{$}}
|
||||
; CHECK: call void @llvm.memcpy.p4i8.p3i8.i64(i8 addrspace(4)* %dest1, i8 addrspace(3)* %src.group.ptr, i64 %size, i32 4, i1 false){{$}}
|
||||
define amdgpu_kernel void @multiple_memcpy_flat_to_flat_replace_src_with_group_no_md(i8 addrspace(4)* %dest0, i8 addrspace(4)* %dest1, i8 addrspace(3)* %src.group.ptr, i64 %size) #0 {
|
||||
%cast.src = addrspacecast i8 addrspace(3)* %src.group.ptr to i8 addrspace(4)*
|
||||
call void @llvm.memcpy.p4i8.p4i8.i64(i8 addrspace(4)* %dest0, i8 addrspace(4)* %cast.src, i64 %size, i32 4, i1 false)
|
||||
call void @llvm.memcpy.p4i8.p4i8.i64(i8 addrspace(4)* %dest1, i8 addrspace(4)* %cast.src, i64 %size, i32 4, i1 false)
|
||||
ret void
|
||||
}
|
||||
|
||||
; Check for iterator problems if the pointer has 2 uses in the same call
|
||||
; CHECK-LABEL: @memcpy_group_flat_to_flat_self(
|
||||
; CHECK: call void @llvm.memcpy.p3i8.p3i8.i64(i8 addrspace(3)* %group.ptr, i8 addrspace(3)* %group.ptr, i64 32, i32 4, i1 false), !tbaa !0, !alias.scope !3, !noalias !4
|
||||
define amdgpu_kernel void @memcpy_group_flat_to_flat_self(i8 addrspace(3)* %group.ptr) #0 {
|
||||
%cast = addrspacecast i8 addrspace(3)* %group.ptr to i8 addrspace(4)*
|
||||
call void @llvm.memcpy.p4i8.p4i8.i64(i8 addrspace(4)* %cast, i8 addrspace(4)* %cast, i64 32, i32 4, i1 false), !tbaa !0, !alias.scope !3, !noalias !4
|
||||
ret void
|
||||
}
|
||||
; CHECK-LABEL: @memmove_flat_to_flat_replace_src_with_group(
|
||||
; CHECK: call void @llvm.memmove.p4i8.p3i8.i64(i8 addrspace(4)* %dest, i8 addrspace(3)* %src.group.ptr, i64 %size, i32 4, i1 false), !tbaa !0, !alias.scope !3, !noalias !4
|
||||
define amdgpu_kernel void @memmove_flat_to_flat_replace_src_with_group(i8 addrspace(4)* %dest, i8 addrspace(3)* %src.group.ptr, i64 %size) #0 {
|
||||
%cast.src = addrspacecast i8 addrspace(3)* %src.group.ptr to i8 addrspace(4)*
|
||||
call void @llvm.memmove.p4i8.p4i8.i64(i8 addrspace(4)* %dest, i8 addrspace(4)* %cast.src, i64 %size, i32 4, i1 false), !tbaa !0, !alias.scope !3, !noalias !4
|
||||
ret void
|
||||
}
|
||||
|
||||
declare void @llvm.memset.p4i8.i64(i8 addrspace(4)* nocapture writeonly, i8, i64, i32, i1) #1
|
||||
declare void @llvm.memcpy.p4i8.p4i8.i64(i8 addrspace(4)* nocapture writeonly, i8 addrspace(4)* nocapture readonly, i64, i32, i1) #1
|
||||
declare void @llvm.memcpy.p4i8.p3i8.i32(i8 addrspace(4)* nocapture writeonly, i8 addrspace(3)* nocapture readonly, i32, i32, i1) #1
|
||||
declare void @llvm.memmove.p4i8.p4i8.i64(i8 addrspace(4)* nocapture writeonly, i8 addrspace(4)* nocapture readonly, i64, i32, i1) #1
|
||||
|
||||
attributes #0 = { nounwind }
|
||||
attributes #1 = { argmemonly nounwind }
|
||||
|
||||
!0 = !{!1, !1, i64 0}
|
||||
!1 = !{!"A", !2}
|
||||
!2 = !{!"tbaa root"}
|
||||
!3 = !{!"B", !2}
|
||||
!4 = !{!5}
|
||||
!5 = distinct !{!5, !6, !"some scope"}
|
||||
!6 = distinct !{!6, !"some domain"}
|
||||
!7 = !{i64 0, i64 8, null}
|
@ -1,143 +0,0 @@
|
||||
; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -infer-address-spaces %s | FileCheck %s
|
||||
|
||||
; Regression tests from old HSAIL addrspacecast optimization pass
|
||||
|
||||
@data = internal addrspace(1) global [100 x double] [double 0.00, double 1.000000e-01, double 2.000000e-01, double 3.000000e-01, double 4.000000e-01, double 5.000000e-01, double 6.000000e-01, double 7.000000e-01, double 8.000000e-01, double 9.000000e-01, double 1.00, double 1.10, double 1.20, double 1.30, double 1.40, double 1.50, double 1.60, double 1.70, double 1.80, double 1.90, double 2.00, double 2.10, double 2.20, double 2.30, double 2.40, double 2.50, double 2.60, double 2.70, double 2.80, double 2.90, double 3.00, double 3.10, double 3.20, double 3.30, double 3.40, double 3.50, double 3.60, double 3.70, double 3.80, double 3.90, double 4.00, double 4.10, double 4.20, double 4.30, double 4.40, double 4.50, double 4.60, double 4.70, double 4.80, double 4.90, double 5.00, double 5.10, double 5.20, double 5.30, double 5.40, double 5.50, double 5.60, double 5.70, double 5.80, double 5.90, double 6.00, double 6.10, double 6.20, double 6.30, double 6.40, double 6.50, double 6.60, double 6.70, double 6.80, double 6.90, double 7.00, double 7.10, double 7.20, double 7.30, double 7.40, double 7.50, double 7.60, double 7.70, double 7.80, double 7.90, double 8.00, double 8.10, double 8.20, double 8.30, double 8.40, double 8.50, double 8.60, double 8.70, double 8.80, double 8.90, double 9.00, double 9.10, double 9.20, double 9.30, double 9.40, double 9.50, double 9.60, double 9.70, double 9.80, double 9.90], align 8
|
||||
|
||||
|
||||
; Should generate flat load
|
||||
|
||||
; CHECK-LABEL: @generic_address_bitcast_const(
|
||||
; CHECK: %vecload1 = load <2 x double>, <2 x double> addrspace(1)* bitcast (double addrspace(1)* getelementptr inbounds ([100 x double], [100 x double] addrspace(1)* @data, i64 0, i64 4) to <2 x double> addrspace(1)*), align 8
|
||||
define amdgpu_kernel void @generic_address_bitcast_const(i64 %arg0, i32 addrspace(1)* nocapture %results) #0 {
|
||||
entry:
|
||||
%tmp1 = call i32 @llvm.amdgcn.workitem.id.x()
|
||||
%tmp2 = zext i32 %tmp1 to i64
|
||||
%tmp3 = add i64 %tmp2, %arg0
|
||||
%vecload1 = load <2 x double>, <2 x double> addrspace(4)* bitcast (double addrspace(4)* getelementptr ([100 x double], [100 x double] addrspace(4)* addrspacecast ([100 x double] addrspace(1)* @data to [100 x double] addrspace(4)*), i64 0, i64 4) to <2 x double> addrspace(4)*), align 8
|
||||
%cmp = fcmp ord <2 x double> %vecload1, zeroinitializer
|
||||
%sext = sext <2 x i1> %cmp to <2 x i64>
|
||||
%tmp4 = extractelement <2 x i64> %sext, i64 0
|
||||
%tmp5 = extractelement <2 x i64> %sext, i64 1
|
||||
%tmp6 = and i64 %tmp4, %tmp5
|
||||
%tmp7 = lshr i64 %tmp6, 63
|
||||
%tmp8 = trunc i64 %tmp7 to i32
|
||||
%idxprom = and i64 %tmp3, 4294967295
|
||||
%arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %results, i64 %idxprom
|
||||
store i32 %tmp8, i32 addrspace(1)* %arrayidx, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
@generic_address_bug9749.val = internal addrspace(1) global float 0.0, align 4
|
||||
|
||||
declare i32 @_Z9get_fencePU3AS4v(i8 addrspace(4)*)
|
||||
%opencl.pipe_t = type opaque
|
||||
|
||||
; This is a compile time assert bug, but we still want to check optimization
|
||||
; is performed to generate ld_global.
|
||||
; CHECK-LABEL: @generic_address_pipe_bug9673(
|
||||
; CHECK: %tmp1 = bitcast %opencl.pipe_t addrspace(3)* %in_pipe to i32 addrspace(3)*
|
||||
; CHECK: %add.ptr = getelementptr inbounds i32, i32 addrspace(3)* %tmp1, i32 2
|
||||
; CHECK: %tmp2 = load i32, i32 addrspace(3)* %add.ptr, align 4
|
||||
define amdgpu_kernel void @generic_address_pipe_bug9673(%opencl.pipe_t addrspace(3)* nocapture %in_pipe, i32 addrspace(1)* nocapture %dst) #0 {
|
||||
entry:
|
||||
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
|
||||
%tmp1 = bitcast %opencl.pipe_t addrspace(3)* %in_pipe to i32 addrspace(3)*
|
||||
%add.ptr = getelementptr inbounds i32, i32 addrspace(3)* %tmp1, i32 2
|
||||
%tmp2 = load i32, i32 addrspace(3)* %add.ptr, align 4
|
||||
%arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %dst, i32 %tmp
|
||||
store i32 %tmp2, i32 addrspace(1)* %arrayidx, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
; Should generate flat load
|
||||
; CHECK-LABEL: @generic_address_bug9749(
|
||||
; CHECK: br i1
|
||||
; CHECK: load float, float addrspace(4)*
|
||||
; CHECK: br label
|
||||
define amdgpu_kernel void @generic_address_bug9749(i32 addrspace(1)* nocapture %results) #0 {
|
||||
entry:
|
||||
%ptr = alloca float addrspace(4)*, align 8
|
||||
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
|
||||
%tmp1 = zext i32 %tmp to i64
|
||||
store float 0x3FB99999A0000000, float addrspace(1)* @generic_address_bug9749.val, align 4
|
||||
store volatile float addrspace(4)* addrspacecast (float addrspace(1)* @generic_address_bug9749.val to float addrspace(4)*), float addrspace(4)** %ptr, align 8
|
||||
%tmp2 = load volatile float addrspace(4)*, float addrspace(4)** %ptr, align 8
|
||||
%tmp3 = load float, float addrspace(1)* @generic_address_bug9749.val, align 4
|
||||
%tmp4 = bitcast float addrspace(4)* %tmp2 to i8 addrspace(4)*
|
||||
%call.i = call i32 @_Z9get_fencePU3AS4v(i8 addrspace(4)* %tmp4) #1
|
||||
%switch.i.i = icmp ult i32 %call.i, 4
|
||||
br i1 %switch.i.i, label %if.end.i, label %helperFunction.exit
|
||||
|
||||
if.end.i: ; preds = %entry
|
||||
%tmp5 = load float, float addrspace(4)* %tmp2, align 4
|
||||
%not.cmp.i = fcmp oeq float %tmp5, %tmp3
|
||||
%phitmp = zext i1 %not.cmp.i to i32
|
||||
br label %helperFunction.exit
|
||||
|
||||
helperFunction.exit: ; preds = %if.end.i, %entry
|
||||
%retval.0.i = phi i32 [ 0, %entry ], [ %phitmp, %if.end.i ]
|
||||
%arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %results, i64 %tmp1
|
||||
store i32 %retval.0.i, i32 addrspace(1)* %arrayidx, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @generic_address_opt_phi_bug9776_simple_phi_kernel(
|
||||
; CHECK: phi i32 addrspace(3)*
|
||||
; CHECK: store i32 %i.03, i32 addrspace(3)* %
|
||||
define amdgpu_kernel void @generic_address_opt_phi_bug9776_simple_phi_kernel(i32 addrspace(3)* nocapture %in, i32 %numElems) #0 {
|
||||
entry:
|
||||
%cmp1 = icmp eq i32 %numElems, 0
|
||||
br i1 %cmp1, label %for.end, label %for.body.lr.ph
|
||||
|
||||
for.body.lr.ph: ; preds = %entry
|
||||
%tmp = addrspacecast i32 addrspace(3)* %in to i32 addrspace(4)*
|
||||
br label %for.body
|
||||
|
||||
for.body: ; preds = %for.body, %for.body.lr.ph
|
||||
%i.03 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
|
||||
%ptr.02 = phi i32 addrspace(4)* [ %tmp, %for.body.lr.ph ], [ %add.ptr, %for.body ]
|
||||
store i32 %i.03, i32 addrspace(4)* %ptr.02, align 4
|
||||
%add.ptr = getelementptr inbounds i32, i32 addrspace(4)* %ptr.02, i64 4
|
||||
%inc = add nuw i32 %i.03, 1
|
||||
%exitcond = icmp eq i32 %inc, %numElems
|
||||
br i1 %exitcond, label %for.end, label %for.body
|
||||
|
||||
for.end: ; preds = %for.body, %entry
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @generic_address_bug9899(
|
||||
; CHECK: %vecload = load <2 x i32>, <2 x i32> addrspace(3)*
|
||||
; CHECK: store <2 x i32> %tmp16, <2 x i32> addrspace(3)*
|
||||
define amdgpu_kernel void @generic_address_bug9899(i64 %arg0, i32 addrspace(3)* nocapture %sourceA, i32 addrspace(3)* nocapture %destValues) #0 {
|
||||
entry:
|
||||
%tmp1 = call i32 @llvm.amdgcn.workitem.id.x()
|
||||
%tmp2 = zext i32 %tmp1 to i64
|
||||
%tmp3 = add i64 %tmp2, %arg0
|
||||
%sext = shl i64 %tmp3, 32
|
||||
%tmp4 = addrspacecast i32 addrspace(3)* %destValues to i32 addrspace(4)*
|
||||
%tmp5 = addrspacecast i32 addrspace(3)* %sourceA to i32 addrspace(4)*
|
||||
%tmp6 = ashr exact i64 %sext, 31
|
||||
%tmp7 = getelementptr inbounds i32, i32 addrspace(4)* %tmp5, i64 %tmp6
|
||||
%arrayidx_v4 = bitcast i32 addrspace(4)* %tmp7 to <2 x i32> addrspace(4)*
|
||||
%vecload = load <2 x i32>, <2 x i32> addrspace(4)* %arrayidx_v4, align 4
|
||||
%tmp8 = extractelement <2 x i32> %vecload, i32 0
|
||||
%tmp9 = extractelement <2 x i32> %vecload, i32 1
|
||||
%tmp10 = icmp eq i32 %tmp8, 0
|
||||
%tmp11 = select i1 %tmp10, i32 32, i32 %tmp8
|
||||
%tmp12 = icmp eq i32 %tmp9, 0
|
||||
%tmp13 = select i1 %tmp12, i32 32, i32 %tmp9
|
||||
%tmp14 = getelementptr inbounds i32, i32 addrspace(4)* %tmp4, i64 %tmp6
|
||||
%tmp15 = insertelement <2 x i32> undef, i32 %tmp11, i32 0
|
||||
%tmp16 = insertelement <2 x i32> %tmp15, i32 %tmp13, i32 1
|
||||
%arrayidx_v41 = bitcast i32 addrspace(4)* %tmp14 to <2 x i32> addrspace(4)*
|
||||
store <2 x i32> %tmp16, <2 x i32> addrspace(4)* %arrayidx_v41, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
declare i32 @llvm.amdgcn.workitem.id.x() #2
|
||||
|
||||
attributes #0 = { nounwind }
|
||||
attributes #1 = { nounwind readonly }
|
||||
attributes #2 = { nounwind readnone }
|
@ -1,264 +0,0 @@
|
||||
; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -infer-address-spaces %s | FileCheck %s
|
||||
|
||||
; Instcombine pulls the addrspacecast out of the select, make sure
|
||||
; this doesn't do something insane on non-canonical IR.
|
||||
|
||||
; CHECK-LABEL: @return_select_group_flat(
|
||||
; CHECK-NEXT: %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
|
||||
; CHECK-NEXT: %cast1 = addrspacecast i32 addrspace(3)* %group.ptr.1 to i32 addrspace(4)*
|
||||
; CHECK-NEXT: %select = select i1 %c, i32 addrspace(4)* %cast0, i32 addrspace(4)* %cast1
|
||||
; CHECK-NEXT: ret i32 addrspace(4)* %select
|
||||
define i32 addrspace(4)* @return_select_group_flat(i1 %c, i32 addrspace(3)* %group.ptr.0, i32 addrspace(3)* %group.ptr.1) #0 {
|
||||
%cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
|
||||
%cast1 = addrspacecast i32 addrspace(3)* %group.ptr.1 to i32 addrspace(4)*
|
||||
%select = select i1 %c, i32 addrspace(4)* %cast0, i32 addrspace(4)* %cast1
|
||||
ret i32 addrspace(4)* %select
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @store_select_group_flat(
|
||||
; CHECK: %select = select i1 %c, i32 addrspace(3)* %group.ptr.0, i32 addrspace(3)* %group.ptr.1
|
||||
; CHECK: store i32 -1, i32 addrspace(3)* %select
|
||||
define amdgpu_kernel void @store_select_group_flat(i1 %c, i32 addrspace(3)* %group.ptr.0, i32 addrspace(3)* %group.ptr.1) #0 {
|
||||
%cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
|
||||
%cast1 = addrspacecast i32 addrspace(3)* %group.ptr.1 to i32 addrspace(4)*
|
||||
%select = select i1 %c, i32 addrspace(4)* %cast0, i32 addrspace(4)* %cast1
|
||||
store i32 -1, i32 addrspace(4)* %select
|
||||
ret void
|
||||
}
|
||||
|
||||
; Make sure metadata is preserved
|
||||
; CHECK-LABEL: @load_select_group_flat_md(
|
||||
; CHECK: %select = select i1 %c, i32 addrspace(3)* %group.ptr.0, i32 addrspace(3)* %group.ptr.1, !prof !0
|
||||
; CHECK: %load = load i32, i32 addrspace(3)* %select
|
||||
define i32 @load_select_group_flat_md(i1 %c, i32 addrspace(3)* %group.ptr.0, i32 addrspace(3)* %group.ptr.1) #0 {
|
||||
%cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
|
||||
%cast1 = addrspacecast i32 addrspace(3)* %group.ptr.1 to i32 addrspace(4)*
|
||||
%select = select i1 %c, i32 addrspace(4)* %cast0, i32 addrspace(4)* %cast1, !prof !0
|
||||
%load = load i32, i32 addrspace(4)* %select
|
||||
ret i32 %load
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @store_select_mismatch_group_private_flat(
|
||||
; CHECK: %1 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
|
||||
; CHECK: %2 = addrspacecast i32* %private.ptr.1 to i32 addrspace(4)*
|
||||
; CHECK: %select = select i1 %c, i32 addrspace(4)* %1, i32 addrspace(4)* %2
|
||||
; CHECK: store i32 -1, i32 addrspace(4)* %select
|
||||
define amdgpu_kernel void @store_select_mismatch_group_private_flat(i1 %c, i32 addrspace(3)* %group.ptr.0, i32* %private.ptr.1) #0 {
|
||||
%cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
|
||||
%cast1 = addrspacecast i32* %private.ptr.1 to i32 addrspace(4)*
|
||||
%select = select i1 %c, i32 addrspace(4)* %cast0, i32 addrspace(4)* %cast1
|
||||
store i32 -1, i32 addrspace(4)* %select
|
||||
ret void
|
||||
}
|
||||
|
||||
@lds0 = internal addrspace(3) global i32 123, align 4
|
||||
@lds1 = internal addrspace(3) global i32 456, align 4
|
||||
|
||||
; CHECK-LABEL: @constexpr_select_group_flat(
|
||||
; CHECK: %tmp = load i32, i32 addrspace(3)* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32 addrspace(3)* @lds0, i32 addrspace(3)* @lds1)
|
||||
define i32 @constexpr_select_group_flat() #0 {
|
||||
bb:
|
||||
%tmp = load i32, i32 addrspace(4)* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32 addrspace(4)* addrspacecast (i32 addrspace(3)* @lds0 to i32 addrspace(4)*), i32 addrspace(4)* addrspacecast (i32 addrspace(3)* @lds1 to i32 addrspace(4)*))
|
||||
ret i32 %tmp
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @constexpr_select_group_global_flat_mismatch(
|
||||
; CHECK: %tmp = load i32, i32 addrspace(4)* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32 addrspace(4)* addrspacecast (i32 addrspace(3)* @lds0 to i32 addrspace(4)*), i32 addrspace(4)* addrspacecast (i32 addrspace(1)* @global0 to i32 addrspace(4)*))
|
||||
define i32 @constexpr_select_group_global_flat_mismatch() #0 {
|
||||
bb:
|
||||
%tmp = load i32, i32 addrspace(4)* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32 addrspace(4)* addrspacecast (i32 addrspace(3)* @lds0 to i32 addrspace(4)*), i32 addrspace(4)* addrspacecast (i32 addrspace(1)* @global0 to i32 addrspace(4)*))
|
||||
ret i32 %tmp
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @store_select_group_flat_null(
|
||||
; CHECK: %select = select i1 %c, i32 addrspace(3)* %group.ptr.0, i32 addrspace(3)* addrspacecast (i32 addrspace(4)* null to i32 addrspace(3)*)
|
||||
; CHECK: store i32 -1, i32 addrspace(3)* %select
|
||||
define amdgpu_kernel void @store_select_group_flat_null(i1 %c, i32 addrspace(3)* %group.ptr.0) #0 {
|
||||
%cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
|
||||
%select = select i1 %c, i32 addrspace(4)* %cast0, i32 addrspace(4)* null
|
||||
store i32 -1, i32 addrspace(4)* %select
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @store_select_group_flat_null_swap(
|
||||
; CHECK: %select = select i1 %c, i32 addrspace(3)* addrspacecast (i32 addrspace(4)* null to i32 addrspace(3)*), i32 addrspace(3)* %group.ptr.0
|
||||
; CHECK: store i32 -1, i32 addrspace(3)* %select
|
||||
define amdgpu_kernel void @store_select_group_flat_null_swap(i1 %c, i32 addrspace(3)* %group.ptr.0) #0 {
|
||||
%cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
|
||||
%select = select i1 %c, i32 addrspace(4)* null, i32 addrspace(4)* %cast0
|
||||
store i32 -1, i32 addrspace(4)* %select
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @store_select_group_flat_undef(
|
||||
; CHECK: %select = select i1 %c, i32 addrspace(3)* %group.ptr.0, i32 addrspace(3)* undef
|
||||
; CHECK: store i32 -1, i32 addrspace(3)* %select
|
||||
define amdgpu_kernel void @store_select_group_flat_undef(i1 %c, i32 addrspace(3)* %group.ptr.0) #0 {
|
||||
%cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
|
||||
%select = select i1 %c, i32 addrspace(4)* %cast0, i32 addrspace(4)* undef
|
||||
store i32 -1, i32 addrspace(4)* %select
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @store_select_group_flat_undef_swap(
|
||||
; CHECK: %select = select i1 %c, i32 addrspace(3)* undef, i32 addrspace(3)* %group.ptr.0
|
||||
; CHECK: store i32 -1, i32 addrspace(3)* %select
|
||||
define amdgpu_kernel void @store_select_group_flat_undef_swap(i1 %c, i32 addrspace(3)* %group.ptr.0) #0 {
|
||||
%cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
|
||||
%select = select i1 %c, i32 addrspace(4)* undef, i32 addrspace(4)* %cast0
|
||||
store i32 -1, i32 addrspace(4)* %select
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @store_select_gep_group_flat_null(
|
||||
; CHECK: %select = select i1 %c, i32 addrspace(3)* %group.ptr.0, i32 addrspace(3)* addrspacecast (i32 addrspace(4)* null to i32 addrspace(3)*)
|
||||
; CHECK: %gep = getelementptr i32, i32 addrspace(3)* %select, i64 16
|
||||
; CHECK: store i32 -1, i32 addrspace(3)* %gep
|
||||
define amdgpu_kernel void @store_select_gep_group_flat_null(i1 %c, i32 addrspace(3)* %group.ptr.0) #0 {
|
||||
%cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
|
||||
%select = select i1 %c, i32 addrspace(4)* %cast0, i32 addrspace(4)* null
|
||||
%gep = getelementptr i32, i32 addrspace(4)* %select, i64 16
|
||||
store i32 -1, i32 addrspace(4)* %gep
|
||||
ret void
|
||||
}
|
||||
|
||||
@global0 = internal addrspace(1) global i32 123, align 4
|
||||
|
||||
; CHECK-LABEL: @store_select_group_flat_constexpr(
|
||||
; CHECK: %select = select i1 %c, i32 addrspace(3)* %group.ptr.0, i32 addrspace(3)* @lds1
|
||||
; CHECK: store i32 7, i32 addrspace(3)* %select
|
||||
define amdgpu_kernel void @store_select_group_flat_constexpr(i1 %c, i32 addrspace(3)* %group.ptr.0) #0 {
|
||||
%cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
|
||||
%select = select i1 %c, i32 addrspace(4)* %cast0, i32 addrspace(4)* addrspacecast (i32 addrspace(3)* @lds1 to i32 addrspace(4)*)
|
||||
store i32 7, i32 addrspace(4)* %select
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @store_select_group_flat_inttoptr_flat(
|
||||
; CHECK: %select = select i1 %c, i32 addrspace(3)* %group.ptr.0, i32 addrspace(3)* addrspacecast (i32 addrspace(4)* inttoptr (i64 12345 to i32 addrspace(4)*) to i32 addrspace(3)*)
|
||||
; CHECK: store i32 7, i32 addrspace(3)* %select
|
||||
define amdgpu_kernel void @store_select_group_flat_inttoptr_flat(i1 %c, i32 addrspace(3)* %group.ptr.0) #0 {
|
||||
%cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
|
||||
%select = select i1 %c, i32 addrspace(4)* %cast0, i32 addrspace(4)* inttoptr (i64 12345 to i32 addrspace(4)*)
|
||||
store i32 7, i32 addrspace(4)* %select
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @store_select_group_flat_inttoptr_group(
|
||||
; CHECK: %select = select i1 %c, i32 addrspace(3)* %group.ptr.0, i32 addrspace(3)* inttoptr (i32 400 to i32 addrspace(3)*)
|
||||
; CHECK-NEXT: store i32 7, i32 addrspace(3)* %select
|
||||
define amdgpu_kernel void @store_select_group_flat_inttoptr_group(i1 %c, i32 addrspace(3)* %group.ptr.0) #0 {
|
||||
%cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
|
||||
%select = select i1 %c, i32 addrspace(4)* %cast0, i32 addrspace(4)* addrspacecast (i32 addrspace(3)* inttoptr (i32 400 to i32 addrspace(3)*) to i32 addrspace(4)*)
|
||||
store i32 7, i32 addrspace(4)* %select
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @store_select_group_global_mismatch_flat_constexpr(
|
||||
; CHECK: %1 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
|
||||
; CHECK: %select = select i1 %c, i32 addrspace(4)* %1, i32 addrspace(4)* addrspacecast (i32 addrspace(1)* @global0 to i32 addrspace(4)*)
|
||||
; CHECK: store i32 7, i32 addrspace(4)* %select
|
||||
define amdgpu_kernel void @store_select_group_global_mismatch_flat_constexpr(i1 %c, i32 addrspace(3)* %group.ptr.0) #0 {
|
||||
%cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
|
||||
%select = select i1 %c, i32 addrspace(4)* %cast0, i32 addrspace(4)* addrspacecast (i32 addrspace(1)* @global0 to i32 addrspace(4)*)
|
||||
store i32 7, i32 addrspace(4)* %select
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @store_select_group_global_mismatch_flat_constexpr_swap(
|
||||
; CHECK: %1 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
|
||||
; CHECK: %select = select i1 %c, i32 addrspace(4)* addrspacecast (i32 addrspace(1)* @global0 to i32 addrspace(4)*), i32 addrspace(4)* %1
|
||||
; CHECK: store i32 7, i32 addrspace(4)* %select
|
||||
define amdgpu_kernel void @store_select_group_global_mismatch_flat_constexpr_swap(i1 %c, i32 addrspace(3)* %group.ptr.0) #0 {
|
||||
%cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
|
||||
%select = select i1 %c, i32 addrspace(4)* addrspacecast (i32 addrspace(1)* @global0 to i32 addrspace(4)*), i32 addrspace(4)* %cast0
|
||||
store i32 7, i32 addrspace(4)* %select
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @store_select_group_global_mismatch_null_null(
|
||||
; CHECK: %select = select i1 %c, i32 addrspace(4)* addrspacecast (i32 addrspace(3)* null to i32 addrspace(4)*), i32 addrspace(4)* addrspacecast (i32 addrspace(1)* null to i32 addrspace(4)*)
|
||||
; CHECK: store i32 7, i32 addrspace(4)* %select
|
||||
define amdgpu_kernel void @store_select_group_global_mismatch_null_null(i1 %c) #0 {
|
||||
%select = select i1 %c, i32 addrspace(4)* addrspacecast (i32 addrspace(3)* null to i32 addrspace(4)*), i32 addrspace(4)* addrspacecast (i32 addrspace(1)* null to i32 addrspace(4)*)
|
||||
store i32 7, i32 addrspace(4)* %select
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @store_select_group_global_mismatch_null_null_constexpr(
|
||||
; CHECK: store i32 7, i32 addrspace(4)* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32 addrspace(4)* addrspacecast (i32 addrspace(3)* null to i32 addrspace(4)*), i32 addrspace(4)* addrspacecast (i32 addrspace(1)* null to i32 addrspace(4)*)), align 4
|
||||
define amdgpu_kernel void @store_select_group_global_mismatch_null_null_constexpr() #0 {
|
||||
store i32 7, i32 addrspace(4)* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32 addrspace(4)* addrspacecast (i32 addrspace(3)* null to i32 addrspace(4)*), i32 addrspace(4)* addrspacecast (i32 addrspace(1)* null to i32 addrspace(4)*)), align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @store_select_group_global_mismatch_gv_null_constexpr(
|
||||
; CHECK: store i32 7, i32 addrspace(4)* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32 addrspace(4)* addrspacecast (i32 addrspace(3)* @lds0 to i32 addrspace(4)*), i32 addrspace(4)* addrspacecast (i32 addrspace(1)* null to i32 addrspace(4)*)), align 4
|
||||
define amdgpu_kernel void @store_select_group_global_mismatch_gv_null_constexpr() #0 {
|
||||
store i32 7, i32 addrspace(4)* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32 addrspace(4)* addrspacecast (i32 addrspace(3)* @lds0 to i32 addrspace(4)*), i32 addrspace(4)* addrspacecast (i32 addrspace(1)* null to i32 addrspace(4)*)), align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @store_select_group_global_mismatch_null_gv_constexpr(
|
||||
; CHECK: store i32 7, i32 addrspace(4)* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32 addrspace(4)* addrspacecast (i32 addrspace(3)* null to i32 addrspace(4)*), i32 addrspace(4)* addrspacecast (i32 addrspace(1)* @global0 to i32 addrspace(4)*)), align 4
|
||||
define amdgpu_kernel void @store_select_group_global_mismatch_null_gv_constexpr() #0 {
|
||||
store i32 7, i32 addrspace(4)* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32 addrspace(4)* addrspacecast (i32 addrspace(3)* null to i32 addrspace(4)*), i32 addrspace(4)* addrspacecast (i32 addrspace(1)* @global0 to i32 addrspace(4)*)), align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @store_select_group_global_mismatch_inttoptr_null_constexpr(
|
||||
; CHECK: store i32 7, i32 addrspace(4)* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32 addrspace(4)* addrspacecast (i32 addrspace(3)* inttoptr (i64 123 to i32 addrspace(3)*) to i32 addrspace(4)*), i32 addrspace(4)* addrspacecast (i32 addrspace(1)* null to i32 addrspace(4)*)), align 4
|
||||
define amdgpu_kernel void @store_select_group_global_mismatch_inttoptr_null_constexpr() #0 {
|
||||
store i32 7, i32 addrspace(4)* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32 addrspace(4)* addrspacecast (i32 addrspace(3)* inttoptr (i64 123 to i32 addrspace(3)*) to i32 addrspace(4)*), i32 addrspace(4)* addrspacecast (i32 addrspace(1)* null to i32 addrspace(4)*)), align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @store_select_group_global_mismatch_inttoptr_flat_null_constexpr(
|
||||
; CHECK: store i32 7, i32 addrspace(1)* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32 addrspace(1)* addrspacecast (i32 addrspace(4)* inttoptr (i64 123 to i32 addrspace(4)*) to i32 addrspace(1)*), i32 addrspace(1)* null), align 4
|
||||
define amdgpu_kernel void @store_select_group_global_mismatch_inttoptr_flat_null_constexpr() #0 {
|
||||
store i32 7, i32 addrspace(4)* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32 addrspace(4)* inttoptr (i64 123 to i32 addrspace(4)*), i32 addrspace(4)* addrspacecast (i32 addrspace(1)* null to i32 addrspace(4)*)), align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @store_select_group_global_mismatch_undef_undef_constexpr(
|
||||
; CHECK: store i32 7, i32 addrspace(3)* null
|
||||
define amdgpu_kernel void @store_select_group_global_mismatch_undef_undef_constexpr() #0 {
|
||||
store i32 7, i32 addrspace(4)* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32 addrspace(4)* addrspacecast (i32 addrspace(3)* null to i32 addrspace(4)*), i32 addrspace(4)* addrspacecast (i32 addrspace(1)* undef to i32 addrspace(4)*)), align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
@lds2 = external addrspace(3) global [1024 x i32], align 4
|
||||
|
||||
; CHECK-LABEL: @store_select_group_constexpr_ptrtoint(
|
||||
; CHECK: %1 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
|
||||
; CHECK: %select = select i1 %c, i32 addrspace(4)* %1, i32 addrspace(4)* addrspacecast (i32 addrspace(1)* inttoptr (i32 add (i32 ptrtoint ([1024 x i32] addrspace(3)* @lds2 to i32), i32 124) to i32 addrspace(1)*) to i32 addrspace(4)*)
|
||||
; CHECK: store i32 7, i32 addrspace(4)* %select
|
||||
define amdgpu_kernel void @store_select_group_constexpr_ptrtoint(i1 %c, i32 addrspace(3)* %group.ptr.0) #0 {
|
||||
%cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
|
||||
%select = select i1 %c, i32 addrspace(4)* %cast0, i32 addrspace(4)* addrspacecast (i32 addrspace(1)* inttoptr (i32 add (i32 ptrtoint ([1024 x i32] addrspace(3)* @lds2 to i32), i32 124) to i32 addrspace(1)*) to i32 addrspace(4)*)
|
||||
store i32 7, i32 addrspace(4)* %select
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @store_select_group_flat_vector(
|
||||
; CHECK: %cast0 = addrspacecast <2 x i32 addrspace(3)*> %group.ptr.0 to <2 x i32 addrspace(4)*>
|
||||
; CHECK: %cast1 = addrspacecast <2 x i32 addrspace(3)*> %group.ptr.1 to <2 x i32 addrspace(4)*>
|
||||
; CHECK: %select = select i1 %c, <2 x i32 addrspace(4)*> %cast0, <2 x i32 addrspace(4)*> %cast1
|
||||
; CHECK: %extract0 = extractelement <2 x i32 addrspace(4)*> %select, i32 0
|
||||
; CHECK: %extract1 = extractelement <2 x i32 addrspace(4)*> %select, i32 1
|
||||
; CHECK: store i32 -1, i32 addrspace(4)* %extract0
|
||||
; CHECK: store i32 -2, i32 addrspace(4)* %extract1
|
||||
define amdgpu_kernel void @store_select_group_flat_vector(i1 %c, <2 x i32 addrspace(3)*> %group.ptr.0, <2 x i32 addrspace(3)*> %group.ptr.1) #0 {
|
||||
%cast0 = addrspacecast <2 x i32 addrspace(3)*> %group.ptr.0 to <2 x i32 addrspace(4)*>
|
||||
%cast1 = addrspacecast <2 x i32 addrspace(3)*> %group.ptr.1 to <2 x i32 addrspace(4)*>
|
||||
%select = select i1 %c, <2 x i32 addrspace(4)*> %cast0, <2 x i32 addrspace(4)*> %cast1
|
||||
%extract0 = extractelement <2 x i32 addrspace(4)*> %select, i32 0
|
||||
%extract1 = extractelement <2 x i32 addrspace(4)*> %select, i32 1
|
||||
store i32 -1, i32 addrspace(4)* %extract0
|
||||
store i32 -2, i32 addrspace(4)* %extract1
|
||||
ret void
|
||||
}
|
||||
|
||||
attributes #0 = { nounwind }
|
||||
|
||||
!0 = !{!"branch_weights", i32 2, i32 10}
|
@ -1,140 +0,0 @@
|
||||
; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -infer-address-spaces %s | FileCheck %s
|
||||
|
||||
; Check that volatile users of addrspacecast are not replaced.
|
||||
|
||||
; CHECK-LABEL: @volatile_load_flat_from_global(
|
||||
; CHECK: load volatile i32, i32 addrspace(4)*
|
||||
; CHECK: store i32 %val, i32 addrspace(1)*
|
||||
define amdgpu_kernel void @volatile_load_flat_from_global(i32 addrspace(1)* nocapture %input, i32 addrspace(1)* nocapture %output) #0 {
|
||||
%tmp0 = addrspacecast i32 addrspace(1)* %input to i32 addrspace(4)*
|
||||
%tmp1 = addrspacecast i32 addrspace(1)* %output to i32 addrspace(4)*
|
||||
%val = load volatile i32, i32 addrspace(4)* %tmp0, align 4
|
||||
store i32 %val, i32 addrspace(4)* %tmp1, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @volatile_load_flat_from_constant(
|
||||
; CHECK: load volatile i32, i32 addrspace(4)*
|
||||
; CHECK: store i32 %val, i32 addrspace(1)*
|
||||
define amdgpu_kernel void @volatile_load_flat_from_constant(i32 addrspace(2)* nocapture %input, i32 addrspace(1)* nocapture %output) #0 {
|
||||
%tmp0 = addrspacecast i32 addrspace(2)* %input to i32 addrspace(4)*
|
||||
%tmp1 = addrspacecast i32 addrspace(1)* %output to i32 addrspace(4)*
|
||||
%val = load volatile i32, i32 addrspace(4)* %tmp0, align 4
|
||||
store i32 %val, i32 addrspace(4)* %tmp1, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @volatile_load_flat_from_group(
|
||||
; CHECK: load volatile i32, i32 addrspace(4)*
|
||||
; CHECK: store i32 %val, i32 addrspace(3)*
|
||||
define amdgpu_kernel void @volatile_load_flat_from_group(i32 addrspace(3)* nocapture %input, i32 addrspace(3)* nocapture %output) #0 {
|
||||
%tmp0 = addrspacecast i32 addrspace(3)* %input to i32 addrspace(4)*
|
||||
%tmp1 = addrspacecast i32 addrspace(3)* %output to i32 addrspace(4)*
|
||||
%val = load volatile i32, i32 addrspace(4)* %tmp0, align 4
|
||||
store i32 %val, i32 addrspace(4)* %tmp1, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @volatile_load_flat_from_private(
|
||||
; CHECK: load volatile i32, i32 addrspace(4)*
|
||||
; CHECK: store i32 %val, i32*
|
||||
define amdgpu_kernel void @volatile_load_flat_from_private(i32* nocapture %input, i32* nocapture %output) #0 {
|
||||
%tmp0 = addrspacecast i32* %input to i32 addrspace(4)*
|
||||
%tmp1 = addrspacecast i32* %output to i32 addrspace(4)*
|
||||
%val = load volatile i32, i32 addrspace(4)* %tmp0, align 4
|
||||
store i32 %val, i32 addrspace(4)* %tmp1, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @volatile_store_flat_to_global(
|
||||
; CHECK: load i32, i32 addrspace(1)*
|
||||
; CHECK: store volatile i32 %val, i32 addrspace(4)*
|
||||
define amdgpu_kernel void @volatile_store_flat_to_global(i32 addrspace(1)* nocapture %input, i32 addrspace(1)* nocapture %output) #0 {
|
||||
%tmp0 = addrspacecast i32 addrspace(1)* %input to i32 addrspace(4)*
|
||||
%tmp1 = addrspacecast i32 addrspace(1)* %output to i32 addrspace(4)*
|
||||
%val = load i32, i32 addrspace(4)* %tmp0, align 4
|
||||
store volatile i32 %val, i32 addrspace(4)* %tmp1, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @volatile_store_flat_to_group(
|
||||
; CHECK: load i32, i32 addrspace(3)*
|
||||
; CHECK: store volatile i32 %val, i32 addrspace(4)*
|
||||
define amdgpu_kernel void @volatile_store_flat_to_group(i32 addrspace(3)* nocapture %input, i32 addrspace(3)* nocapture %output) #0 {
|
||||
%tmp0 = addrspacecast i32 addrspace(3)* %input to i32 addrspace(4)*
|
||||
%tmp1 = addrspacecast i32 addrspace(3)* %output to i32 addrspace(4)*
|
||||
%val = load i32, i32 addrspace(4)* %tmp0, align 4
|
||||
store volatile i32 %val, i32 addrspace(4)* %tmp1, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @volatile_store_flat_to_private(
|
||||
; CHECK: load i32, i32*
|
||||
; CHECK: store volatile i32 %val, i32 addrspace(4)*
|
||||
define amdgpu_kernel void @volatile_store_flat_to_private(i32* nocapture %input, i32* nocapture %output) #0 {
|
||||
%tmp0 = addrspacecast i32* %input to i32 addrspace(4)*
|
||||
%tmp1 = addrspacecast i32* %output to i32 addrspace(4)*
|
||||
%val = load i32, i32 addrspace(4)* %tmp0, align 4
|
||||
store volatile i32 %val, i32 addrspace(4)* %tmp1, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @volatile_atomicrmw_add_group_to_flat(
|
||||
; CHECK: addrspacecast i32 addrspace(3)* %group.ptr to i32 addrspace(4)*
|
||||
; CHECK: atomicrmw volatile add i32 addrspace(4)*
|
||||
define i32 @volatile_atomicrmw_add_group_to_flat(i32 addrspace(3)* %group.ptr, i32 %y) #0 {
|
||||
%cast = addrspacecast i32 addrspace(3)* %group.ptr to i32 addrspace(4)*
|
||||
%ret = atomicrmw volatile add i32 addrspace(4)* %cast, i32 %y seq_cst
|
||||
ret i32 %ret
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @volatile_atomicrmw_add_global_to_flat(
|
||||
; CHECK: addrspacecast i32 addrspace(1)* %global.ptr to i32 addrspace(4)*
|
||||
; CHECK: %ret = atomicrmw volatile add i32 addrspace(4)*
|
||||
define i32 @volatile_atomicrmw_add_global_to_flat(i32 addrspace(1)* %global.ptr, i32 %y) #0 {
|
||||
%cast = addrspacecast i32 addrspace(1)* %global.ptr to i32 addrspace(4)*
|
||||
%ret = atomicrmw volatile add i32 addrspace(4)* %cast, i32 %y seq_cst
|
||||
ret i32 %ret
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @volatile_cmpxchg_global_to_flat(
|
||||
; CHECK: addrspacecast i32 addrspace(1)* %global.ptr to i32 addrspace(4)*
|
||||
; CHECK: cmpxchg volatile i32 addrspace(4)*
|
||||
define { i32, i1 } @volatile_cmpxchg_global_to_flat(i32 addrspace(1)* %global.ptr, i32 %cmp, i32 %val) #0 {
|
||||
%cast = addrspacecast i32 addrspace(1)* %global.ptr to i32 addrspace(4)*
|
||||
%ret = cmpxchg volatile i32 addrspace(4)* %cast, i32 %cmp, i32 %val seq_cst monotonic
|
||||
ret { i32, i1 } %ret
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @volatile_cmpxchg_group_to_flat(
|
||||
; CHECK: addrspacecast i32 addrspace(3)* %group.ptr to i32 addrspace(4)*
|
||||
; CHECK: cmpxchg volatile i32 addrspace(4)*
|
||||
define { i32, i1 } @volatile_cmpxchg_group_to_flat(i32 addrspace(3)* %group.ptr, i32 %cmp, i32 %val) #0 {
|
||||
%cast = addrspacecast i32 addrspace(3)* %group.ptr to i32 addrspace(4)*
|
||||
%ret = cmpxchg volatile i32 addrspace(4)* %cast, i32 %cmp, i32 %val seq_cst monotonic
|
||||
ret { i32, i1 } %ret
|
||||
}
|
||||
|
||||
; FIXME: Shouldn't be losing names
|
||||
; CHECK-LABEL: @volatile_memset_group_to_flat(
|
||||
; CHECK: addrspacecast i8 addrspace(3)* %group.ptr to i8 addrspace(4)*
|
||||
; CHECK: call void @llvm.memset.p4i8.i64(i8 addrspace(4)* %1, i8 4, i64 32, i32 4, i1 true)
|
||||
define amdgpu_kernel void @volatile_memset_group_to_flat(i8 addrspace(3)* %group.ptr, i32 %y) #0 {
|
||||
%cast = addrspacecast i8 addrspace(3)* %group.ptr to i8 addrspace(4)*
|
||||
call void @llvm.memset.p4i8.i64(i8 addrspace(4)* %cast, i8 4, i64 32, i32 4, i1 true)
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @volatile_memset_global_to_flat(
|
||||
; CHECK: addrspacecast i8 addrspace(1)* %global.ptr to i8 addrspace(4)*
|
||||
; CHECK: call void @llvm.memset.p4i8.i64(i8 addrspace(4)* %1, i8 4, i64 32, i32 4, i1 true)
|
||||
define amdgpu_kernel void @volatile_memset_global_to_flat(i8 addrspace(1)* %global.ptr, i32 %y) #0 {
|
||||
%cast = addrspacecast i8 addrspace(1)* %global.ptr to i8 addrspace(4)*
|
||||
call void @llvm.memset.p4i8.i64(i8 addrspace(4)* %cast, i8 4, i64 32, i32 4, i1 true)
|
||||
ret void
|
||||
}
|
||||
|
||||
declare void @llvm.memset.p4i8.i64(i8 addrspace(4)* nocapture writeonly, i8, i64, i32, i1) #1
|
||||
|
||||
attributes #0 = { nounwind }
|
||||
attributes #1 = { argmemonly nounwind }
|
Reference in New Issue
Block a user