You've already forked linux-packaging-mono
Imported Upstream version 5.18.0.205
Former-commit-id: 7f59f7e792705db773f1caecdaa823092f4e2927
This commit is contained in:
parent
5cd5df71cc
commit
8e12397d70
3
external/llvm/test/Transforms/SeparateConstOffsetFromGEP/AMDGPU/lit.local.cfg
vendored
Normal file
3
external/llvm/test/Transforms/SeparateConstOffsetFromGEP/AMDGPU/lit.local.cfg
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
if not 'AMDGPU' in config.root.targets:
|
||||
config.unsupported = True
|
||||
|
@ -0,0 +1,139 @@
|
||||
; RUN: opt -mtriple=amdgcn-- -S -separate-const-offset-from-gep -reassociate-geps-verify-no-dead-code -gvn < %s | FileCheck -check-prefix=IR %s
|
||||
|
||||
target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:64:64-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"
|
||||
|
||||
@array = internal addrspace(2) constant [4096 x [32 x float]] zeroinitializer, align 4
|
||||
|
||||
; IR-LABEL: @sum_of_array(
|
||||
; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [4096 x [32 x float]], [4096 x [32 x float]] addrspace(2)* @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
|
||||
; IR: getelementptr inbounds float, float addrspace(2)* [[BASE_PTR]], i64 1
|
||||
; IR: getelementptr inbounds float, float addrspace(2)* [[BASE_PTR]], i64 32
|
||||
; IR: getelementptr inbounds float, float addrspace(2)* [[BASE_PTR]], i64 33
|
||||
define amdgpu_kernel void @sum_of_array(i32 %x, i32 %y, float addrspace(1)* nocapture %output) {
|
||||
%tmp = sext i32 %y to i64
|
||||
%tmp1 = sext i32 %x to i64
|
||||
%tmp2 = getelementptr inbounds [4096 x [32 x float]], [4096 x [32 x float]] addrspace(2)* @array, i64 0, i64 %tmp1, i64 %tmp
|
||||
%tmp4 = load float, float addrspace(2)* %tmp2, align 4
|
||||
%tmp5 = fadd float %tmp4, 0.000000e+00
|
||||
%tmp6 = add i32 %y, 1
|
||||
%tmp7 = sext i32 %tmp6 to i64
|
||||
%tmp8 = getelementptr inbounds [4096 x [32 x float]], [4096 x [32 x float]] addrspace(2)* @array, i64 0, i64 %tmp1, i64 %tmp7
|
||||
%tmp10 = load float, float addrspace(2)* %tmp8, align 4
|
||||
%tmp11 = fadd float %tmp5, %tmp10
|
||||
%tmp12 = add i32 %x, 1
|
||||
%tmp13 = sext i32 %tmp12 to i64
|
||||
%tmp14 = getelementptr inbounds [4096 x [32 x float]], [4096 x [32 x float]] addrspace(2)* @array, i64 0, i64 %tmp13, i64 %tmp
|
||||
%tmp16 = load float, float addrspace(2)* %tmp14, align 4
|
||||
%tmp17 = fadd float %tmp11, %tmp16
|
||||
%tmp18 = getelementptr inbounds [4096 x [32 x float]], [4096 x [32 x float]] addrspace(2)* @array, i64 0, i64 %tmp13, i64 %tmp7
|
||||
%tmp20 = load float, float addrspace(2)* %tmp18, align 4
|
||||
%tmp21 = fadd float %tmp17, %tmp20
|
||||
store float %tmp21, float addrspace(1)* %output, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
@array2 = internal addrspace(2) constant [4096 x [4 x float]] zeroinitializer, align 4
|
||||
|
||||
; Some of the indices go over the maximum mubuf offset, so don't split them.
|
||||
|
||||
; IR-LABEL: @sum_of_array_over_max_mubuf_offset(
|
||||
; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [4096 x [4 x float]], [4096 x [4 x float]] addrspace(2)* @array2, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
|
||||
; IR: getelementptr inbounds float, float addrspace(2)* [[BASE_PTR]], i64 255
|
||||
; IR: add i32 %x, 256
|
||||
; IR: getelementptr inbounds [4096 x [4 x float]], [4096 x [4 x float]] addrspace(2)* @array2, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
|
||||
; IR: getelementptr inbounds [4096 x [4 x float]], [4096 x [4 x float]] addrspace(2)* @array2, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
|
||||
define amdgpu_kernel void @sum_of_array_over_max_mubuf_offset(i32 %x, i32 %y, float addrspace(1)* nocapture %output) {
|
||||
%tmp = sext i32 %y to i64
|
||||
%tmp1 = sext i32 %x to i64
|
||||
%tmp2 = getelementptr inbounds [4096 x [4 x float]], [4096 x [4 x float]] addrspace(2)* @array2, i64 0, i64 %tmp1, i64 %tmp
|
||||
%tmp4 = load float, float addrspace(2)* %tmp2, align 4
|
||||
%tmp5 = fadd float %tmp4, 0.000000e+00
|
||||
%tmp6 = add i32 %y, 255
|
||||
%tmp7 = sext i32 %tmp6 to i64
|
||||
%tmp8 = getelementptr inbounds [4096 x [4 x float]], [4096 x [4 x float]] addrspace(2)* @array2, i64 0, i64 %tmp1, i64 %tmp7
|
||||
%tmp10 = load float, float addrspace(2)* %tmp8, align 4
|
||||
%tmp11 = fadd float %tmp5, %tmp10
|
||||
%tmp12 = add i32 %x, 256
|
||||
%tmp13 = sext i32 %tmp12 to i64
|
||||
%tmp14 = getelementptr inbounds [4096 x [4 x float]], [4096 x [4 x float]] addrspace(2)* @array2, i64 0, i64 %tmp13, i64 %tmp
|
||||
%tmp16 = load float, float addrspace(2)* %tmp14, align 4
|
||||
%tmp17 = fadd float %tmp11, %tmp16
|
||||
%tmp18 = getelementptr inbounds [4096 x [4 x float]], [4096 x [4 x float]] addrspace(2)* @array2, i64 0, i64 %tmp13, i64 %tmp7
|
||||
%tmp20 = load float, float addrspace(2)* %tmp18, align 4
|
||||
%tmp21 = fadd float %tmp17, %tmp20
|
||||
store float %tmp21, float addrspace(1)* %output, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
|
||||
@lds_array = internal addrspace(3) global [4096 x [4 x float]] undef, align 4
|
||||
|
||||
; DS instructions have a larger immediate offset, so make sure these are OK.
|
||||
; IR-LABEL: @sum_of_lds_array_over_max_mubuf_offset(
|
||||
; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [4096 x [4 x float]], [4096 x [4 x float]] addrspace(3)* @lds_array, i32 0, i32 %{{[a-zA-Z0-9]+}}, i32 %{{[a-zA-Z0-9]+}}
|
||||
; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i32 255
|
||||
; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i32 16128
|
||||
; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i32 16383
|
||||
define amdgpu_kernel void @sum_of_lds_array_over_max_mubuf_offset(i32 %x, i32 %y, float addrspace(1)* nocapture %output) {
|
||||
%tmp2 = getelementptr inbounds [4096 x [4 x float]], [4096 x [4 x float]] addrspace(3)* @lds_array, i32 0, i32 %x, i32 %y
|
||||
%tmp4 = load float, float addrspace(3)* %tmp2, align 4
|
||||
%tmp5 = fadd float %tmp4, 0.000000e+00
|
||||
%tmp6 = add i32 %y, 255
|
||||
%tmp8 = getelementptr inbounds [4096 x [4 x float]], [4096 x [4 x float]] addrspace(3)* @lds_array, i32 0, i32 %x, i32 %tmp6
|
||||
%tmp10 = load float, float addrspace(3)* %tmp8, align 4
|
||||
%tmp11 = fadd float %tmp5, %tmp10
|
||||
%tmp12 = add i32 %x, 4032
|
||||
%tmp14 = getelementptr inbounds [4096 x [4 x float]], [4096 x [4 x float]] addrspace(3)* @lds_array, i32 0, i32 %tmp12, i32 %y
|
||||
%tmp16 = load float, float addrspace(3)* %tmp14, align 4
|
||||
%tmp17 = fadd float %tmp11, %tmp16
|
||||
%tmp18 = getelementptr inbounds [4096 x [4 x float]], [4096 x [4 x float]] addrspace(3)* @lds_array, i32 0, i32 %tmp12, i32 %tmp6
|
||||
%tmp20 = load float, float addrspace(3)* %tmp18, align 4
|
||||
%tmp21 = fadd float %tmp17, %tmp20
|
||||
store float %tmp21, float addrspace(1)* %output, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
; IR-LABEL: @keep_metadata(
|
||||
; IR: getelementptr {{.*}} !amdgpu.uniform
|
||||
; IR: getelementptr {{.*}} !amdgpu.uniform
|
||||
; IR: getelementptr {{.*}} !amdgpu.uniform
|
||||
define amdgpu_ps <{ i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> @keep_metadata([0 x <4 x i32>] addrspace(2)* inreg noalias dereferenceable(18446744073709551615), [0 x <8 x i32>] addrspace(2)* inreg noalias dereferenceable(18446744073709551615), [0 x <4 x i32>] addrspace(2)* inreg noalias dereferenceable(18446744073709551615), [0 x <8 x i32>] addrspace(2)* inreg noalias dereferenceable(18446744073709551615), float inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, i32, i32, float, i32) #5 {
|
||||
main_body:
|
||||
%22 = call nsz float @llvm.amdgcn.interp.mov(i32 2, i32 0, i32 0, i32 %5) #8
|
||||
%23 = bitcast float %22 to i32
|
||||
%24 = shl i32 %23, 1
|
||||
%25 = getelementptr [0 x <8 x i32>], [0 x <8 x i32>] addrspace(2)* %1, i32 0, i32 %24, !amdgpu.uniform !0
|
||||
%26 = load <8 x i32>, <8 x i32> addrspace(2)* %25, align 32, !invariant.load !0
|
||||
%27 = shl i32 %23, 2
|
||||
%28 = or i32 %27, 3
|
||||
%29 = bitcast [0 x <8 x i32>] addrspace(2)* %1 to [0 x <4 x i32>] addrspace(2)*
|
||||
%30 = getelementptr [0 x <4 x i32>], [0 x <4 x i32>] addrspace(2)* %29, i32 0, i32 %28, !amdgpu.uniform !0
|
||||
%31 = load <4 x i32>, <4 x i32> addrspace(2)* %30, align 16, !invariant.load !0
|
||||
%32 = call nsz <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> zeroinitializer, <8 x i32> %26, <4 x i32> %31, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) #8
|
||||
%33 = extractelement <4 x float> %32, i32 0
|
||||
%34 = extractelement <4 x float> %32, i32 1
|
||||
%35 = extractelement <4 x float> %32, i32 2
|
||||
%36 = extractelement <4 x float> %32, i32 3
|
||||
%37 = bitcast float %4 to i32
|
||||
%38 = insertvalue <{ i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> undef, i32 %37, 4
|
||||
%39 = insertvalue <{ i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> %38, float %33, 5
|
||||
%40 = insertvalue <{ i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> %39, float %34, 6
|
||||
%41 = insertvalue <{ i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> %40, float %35, 7
|
||||
%42 = insertvalue <{ i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> %41, float %36, 8
|
||||
%43 = insertvalue <{ i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> %42, float %20, 19
|
||||
ret <{ i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> %43
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone speculatable
|
||||
declare float @llvm.amdgcn.interp.mov(i32, i32, i32, i32) #6
|
||||
|
||||
; Function Attrs: nounwind readonly
|
||||
declare <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #7
|
||||
|
||||
|
||||
!0 = !{}
|
||||
|
||||
attributes #5 = { "InitialPSInputAddr"="45175" }
|
||||
attributes #6 = { nounwind readnone speculatable }
|
||||
attributes #7 = { nounwind readonly }
|
||||
attributes #8 = { nounwind readnone }
|
3
external/llvm/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/lit.local.cfg
vendored
Normal file
3
external/llvm/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/lit.local.cfg
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
if not 'NVPTX' in config.root.targets:
|
||||
config.unsupported = True
|
||||
|
236
external/llvm/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll
vendored
Normal file
236
external/llvm/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll
vendored
Normal file
@ -0,0 +1,236 @@
|
||||
; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s --check-prefix=PTX
|
||||
; RUN: opt < %s -S -separate-const-offset-from-gep -reassociate-geps-verify-no-dead-code -gvn | FileCheck %s --check-prefix=IR
|
||||
|
||||
; Verifies the SeparateConstOffsetFromGEP pass.
|
||||
; The following code computes
|
||||
; *output = array[x][y] + array[x][y+1] + array[x+1][y] + array[x+1][y+1]
|
||||
;
|
||||
; We expect SeparateConstOffsetFromGEP to transform it to
|
||||
;
|
||||
; float *base = &a[x][y];
|
||||
; *output = base[0] + base[1] + base[32] + base[33];
|
||||
;
|
||||
; so the backend can emit PTX that uses fewer virtual registers.
|
||||
|
||||
target datalayout = "e-i64:64-v16:16-v32:32-n16:32:64"
|
||||
target triple = "nvptx64-unknown-unknown"
|
||||
|
||||
@array = internal addrspace(3) constant [32 x [32 x float]] zeroinitializer, align 4
|
||||
|
||||
define void @sum_of_array(i32 %x, i32 %y, float* nocapture %output) {
|
||||
.preheader:
|
||||
%0 = sext i32 %y to i64
|
||||
%1 = sext i32 %x to i64
|
||||
%2 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0
|
||||
%3 = addrspacecast float addrspace(3)* %2 to float*
|
||||
%4 = load float, float* %3, align 4
|
||||
%5 = fadd float %4, 0.000000e+00
|
||||
%6 = add i32 %y, 1
|
||||
%7 = sext i32 %6 to i64
|
||||
%8 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %7
|
||||
%9 = addrspacecast float addrspace(3)* %8 to float*
|
||||
%10 = load float, float* %9, align 4
|
||||
%11 = fadd float %5, %10
|
||||
%12 = add i32 %x, 1
|
||||
%13 = sext i32 %12 to i64
|
||||
%14 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %0
|
||||
%15 = addrspacecast float addrspace(3)* %14 to float*
|
||||
%16 = load float, float* %15, align 4
|
||||
%17 = fadd float %11, %16
|
||||
%18 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %7
|
||||
%19 = addrspacecast float addrspace(3)* %18 to float*
|
||||
%20 = load float, float* %19, align 4
|
||||
%21 = fadd float %17, %20
|
||||
store float %21, float* %output, align 4
|
||||
ret void
|
||||
}
|
||||
; PTX-LABEL: sum_of_array(
|
||||
; PTX-DAG: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG:%(rd|r)[0-9]+]]{{\]}}
|
||||
; PTX-DAG: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+4{{\]}}
|
||||
; PTX-DAG: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+128{{\]}}
|
||||
; PTX-DAG: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+132{{\]}}
|
||||
|
||||
; IR-LABEL: @sum_of_array(
|
||||
; TODO: GVN is unable to preserve the "inbounds" keyword on the first GEP. Need
|
||||
; some infrastructure changes to enable such optimizations.
|
||||
; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
|
||||
; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i64 1
|
||||
; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i64 32
|
||||
; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i64 33
|
||||
|
||||
; @sum_of_array2 is very similar to @sum_of_array. The only difference is in
|
||||
; the order of "sext" and "add" when computing the array indices. @sum_of_array
|
||||
; computes add before sext, e.g., array[sext(x + 1)][sext(y + 1)], while
|
||||
; @sum_of_array2 computes sext before add,
|
||||
; e.g., array[sext(x) + 1][sext(y) + 1]. SeparateConstOffsetFromGEP should be
|
||||
; able to extract constant offsets from both forms.
|
||||
define void @sum_of_array2(i32 %x, i32 %y, float* nocapture %output) {
|
||||
.preheader:
|
||||
%0 = sext i32 %y to i64
|
||||
%1 = sext i32 %x to i64
|
||||
%2 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0
|
||||
%3 = addrspacecast float addrspace(3)* %2 to float*
|
||||
%4 = load float, float* %3, align 4
|
||||
%5 = fadd float %4, 0.000000e+00
|
||||
%6 = add i64 %0, 1
|
||||
%7 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %6
|
||||
%8 = addrspacecast float addrspace(3)* %7 to float*
|
||||
%9 = load float, float* %8, align 4
|
||||
%10 = fadd float %5, %9
|
||||
%11 = add i64 %1, 1
|
||||
%12 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %11, i64 %0
|
||||
%13 = addrspacecast float addrspace(3)* %12 to float*
|
||||
%14 = load float, float* %13, align 4
|
||||
%15 = fadd float %10, %14
|
||||
%16 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %11, i64 %6
|
||||
%17 = addrspacecast float addrspace(3)* %16 to float*
|
||||
%18 = load float, float* %17, align 4
|
||||
%19 = fadd float %15, %18
|
||||
store float %19, float* %output, align 4
|
||||
ret void
|
||||
}
|
||||
; PTX-LABEL: sum_of_array2(
|
||||
; PTX-DAG: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG:%(rd|r)[0-9]+]]{{\]}}
|
||||
; PTX-DAG: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+4{{\]}}
|
||||
; PTX-DAG: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+128{{\]}}
|
||||
; PTX-DAG: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+132{{\]}}
|
||||
|
||||
; IR-LABEL: @sum_of_array2(
|
||||
; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
|
||||
; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i64 1
|
||||
; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i64 32
|
||||
; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i64 33
|
||||
|
||||
|
||||
; This function loads
|
||||
; array[zext(x)][zext(y)]
|
||||
; array[zext(x)][zext(y +nuw 1)]
|
||||
; array[zext(x +nuw 1)][zext(y)]
|
||||
; array[zext(x +nuw 1)][zext(y +nuw 1)].
|
||||
;
|
||||
; This function is similar to @sum_of_array, but it
|
||||
; 1) extends array indices using zext instead of sext;
|
||||
; 2) annotates the addition with "nuw"; otherwise, zext(x + 1) => zext(x) + 1
|
||||
; may be invalid.
|
||||
define void @sum_of_array3(i32 %x, i32 %y, float* nocapture %output) {
|
||||
.preheader:
|
||||
%0 = zext i32 %y to i64
|
||||
%1 = zext i32 %x to i64
|
||||
%2 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0
|
||||
%3 = addrspacecast float addrspace(3)* %2 to float*
|
||||
%4 = load float, float* %3, align 4
|
||||
%5 = fadd float %4, 0.000000e+00
|
||||
%6 = add nuw i32 %y, 1
|
||||
%7 = zext i32 %6 to i64
|
||||
%8 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %7
|
||||
%9 = addrspacecast float addrspace(3)* %8 to float*
|
||||
%10 = load float, float* %9, align 4
|
||||
%11 = fadd float %5, %10
|
||||
%12 = add nuw i32 %x, 1
|
||||
%13 = zext i32 %12 to i64
|
||||
%14 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %0
|
||||
%15 = addrspacecast float addrspace(3)* %14 to float*
|
||||
%16 = load float, float* %15, align 4
|
||||
%17 = fadd float %11, %16
|
||||
%18 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %7
|
||||
%19 = addrspacecast float addrspace(3)* %18 to float*
|
||||
%20 = load float, float* %19, align 4
|
||||
%21 = fadd float %17, %20
|
||||
store float %21, float* %output, align 4
|
||||
ret void
|
||||
}
|
||||
; PTX-LABEL: sum_of_array3(
|
||||
; PTX-DAG: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG:%(rd|r)[0-9]+]]{{\]}}
|
||||
; PTX-DAG: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+4{{\]}}
|
||||
; PTX-DAG: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+128{{\]}}
|
||||
; PTX-DAG: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+132{{\]}}
|
||||
|
||||
; IR-LABEL: @sum_of_array3(
|
||||
; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
|
||||
; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i64 1
|
||||
; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i64 32
|
||||
; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i64 33
|
||||
|
||||
|
||||
; This function loads
|
||||
; array[zext(x)][zext(y)]
|
||||
; array[zext(x)][zext(y)]
|
||||
; array[zext(x) + 1][zext(y) + 1]
|
||||
; array[zext(x) + 1][zext(y) + 1].
|
||||
;
|
||||
; We expect the generated code to reuse the computation of
|
||||
; &array[zext(x)][zext(y)]. See the expected IR and PTX for details.
|
||||
define void @sum_of_array4(i32 %x, i32 %y, float* nocapture %output) {
|
||||
.preheader:
|
||||
%0 = zext i32 %y to i64
|
||||
%1 = zext i32 %x to i64
|
||||
%2 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0
|
||||
%3 = addrspacecast float addrspace(3)* %2 to float*
|
||||
%4 = load float, float* %3, align 4
|
||||
%5 = fadd float %4, 0.000000e+00
|
||||
%6 = add i64 %0, 1
|
||||
%7 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %6
|
||||
%8 = addrspacecast float addrspace(3)* %7 to float*
|
||||
%9 = load float, float* %8, align 4
|
||||
%10 = fadd float %5, %9
|
||||
%11 = add i64 %1, 1
|
||||
%12 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %11, i64 %0
|
||||
%13 = addrspacecast float addrspace(3)* %12 to float*
|
||||
%14 = load float, float* %13, align 4
|
||||
%15 = fadd float %10, %14
|
||||
%16 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %11, i64 %6
|
||||
%17 = addrspacecast float addrspace(3)* %16 to float*
|
||||
%18 = load float, float* %17, align 4
|
||||
%19 = fadd float %15, %18
|
||||
store float %19, float* %output, align 4
|
||||
ret void
|
||||
}
|
||||
; PTX-LABEL: sum_of_array4(
|
||||
; PTX-DAG: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG:%(rd|r)[0-9]+]]{{\]}}
|
||||
; PTX-DAG: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+4{{\]}}
|
||||
; PTX-DAG: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+128{{\]}}
|
||||
; PTX-DAG: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+132{{\]}}
|
||||
|
||||
; IR-LABEL: @sum_of_array4(
|
||||
; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
|
||||
; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i64 1
|
||||
; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i64 32
|
||||
; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i64 33
|
||||
|
||||
|
||||
; The source code is:
|
||||
; p0 = &input[sext(x + y)];
|
||||
; p1 = &input[sext(x + (y + 5))];
|
||||
;
|
||||
; Without reuniting extensions, SeparateConstOffsetFromGEP would emit
|
||||
; p0 = &input[sext(x + y)];
|
||||
; t1 = &input[sext(x) + sext(y)];
|
||||
; p1 = &t1[5];
|
||||
;
|
||||
; With reuniting extensions, it merges p0 and t1 and thus emits
|
||||
; p0 = &input[sext(x + y)];
|
||||
; p1 = &p0[5];
|
||||
define void @reunion(i32 %x, i32 %y, float* %input) {
|
||||
; IR-LABEL: @reunion(
|
||||
; PTX-LABEL: reunion(
|
||||
entry:
|
||||
%xy = add nsw i32 %x, %y
|
||||
%0 = sext i32 %xy to i64
|
||||
%p0 = getelementptr inbounds float, float* %input, i64 %0
|
||||
%v0 = load float, float* %p0, align 4
|
||||
; PTX: ld.f32 %f{{[0-9]+}}, {{\[}}[[p0:%rd[0-9]+]]{{\]}}
|
||||
call void @use(float %v0)
|
||||
|
||||
%y5 = add nsw i32 %y, 5
|
||||
%xy5 = add nsw i32 %x, %y5
|
||||
%1 = sext i32 %xy5 to i64
|
||||
%p1 = getelementptr inbounds float, float* %input, i64 %1
|
||||
; IR: getelementptr inbounds float, float* %p0, i64 5
|
||||
%v1 = load float, float* %p1, align 4
|
||||
; PTX: ld.f32 %f{{[0-9]+}}, {{\[}}[[p0]]+20{{\]}}
|
||||
call void @use(float %v1)
|
||||
|
||||
ret void
|
||||
}
|
||||
|
||||
declare void @use(float)
|
279
external/llvm/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll
vendored
Normal file
279
external/llvm/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll
vendored
Normal file
@ -0,0 +1,279 @@
|
||||
; RUN: opt < %s -separate-const-offset-from-gep -reassociate-geps-verify-no-dead-code -S | FileCheck %s
|
||||
|
||||
; Several unit tests for -separate-const-offset-from-gep. The transformation
|
||||
; heavily relies on TargetTransformInfo, so we put these tests under
|
||||
; target-specific folders.
|
||||
|
||||
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
|
||||
; target triple is necessary; otherwise TargetTransformInfo rejects any
|
||||
; addressing mode.
|
||||
target triple = "nvptx64-unknown-unknown"
|
||||
|
||||
%struct.S = type { float, double }
|
||||
|
||||
@struct_array = global [1024 x %struct.S] zeroinitializer, align 16
|
||||
@float_2d_array = global [32 x [32 x float]] zeroinitializer, align 4
|
||||
|
||||
; We should not extract any struct field indices, because fields in a struct
|
||||
; may have different types.
|
||||
define double* @struct(i32 %i) {
|
||||
entry:
|
||||
%add = add nsw i32 %i, 5
|
||||
%idxprom = sext i32 %add to i64
|
||||
%p = getelementptr inbounds [1024 x %struct.S], [1024 x %struct.S]* @struct_array, i64 0, i64 %idxprom, i32 1
|
||||
ret double* %p
|
||||
}
|
||||
; CHECK-LABEL: @struct(
|
||||
; CHECK: getelementptr [1024 x %struct.S], [1024 x %struct.S]* @struct_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i32 1
|
||||
|
||||
; We should be able to trace into sext(a + b) if a + b is non-negative
|
||||
; (e.g., used as an index of an inbounds GEP) and one of a and b is
|
||||
; non-negative.
|
||||
define float* @sext_add(i32 %i, i32 %j) {
|
||||
entry:
|
||||
%0 = add i32 %i, 1
|
||||
%1 = sext i32 %0 to i64 ; inbound sext(i + 1) = sext(i) + 1
|
||||
%2 = add i32 %j, -2
|
||||
; However, inbound sext(j + -2) != sext(j) + -2, e.g., j = INT_MIN
|
||||
%3 = sext i32 %2 to i64
|
||||
%p = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %1, i64 %3
|
||||
ret float* %p
|
||||
}
|
||||
; CHECK-LABEL: @sext_add(
|
||||
; CHECK-NOT: = add
|
||||
; CHECK: add i32 %j, -2
|
||||
; CHECK: sext
|
||||
; CHECK: getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
|
||||
; CHECK: getelementptr inbounds float, float* %{{[a-zA-Z0-9]+}}, i64 32
|
||||
|
||||
; We should be able to trace into sext/zext if it can be distributed to both
|
||||
; operands, e.g., sext (add nsw a, b) == add nsw (sext a), (sext b)
|
||||
;
|
||||
; This test verifies we can transform
|
||||
; gep base, a + sext(b +nsw 1), c + zext(d +nuw 1)
|
||||
; to
|
||||
; gep base, a + sext(b), c + zext(d); gep ..., 1 * 32 + 1
|
||||
define float* @ext_add_no_overflow(i64 %a, i32 %b, i64 %c, i32 %d) {
|
||||
%b1 = add nsw i32 %b, 1
|
||||
%b2 = sext i32 %b1 to i64
|
||||
%i = add i64 %a, %b2 ; i = a + sext(b +nsw 1)
|
||||
%d1 = add nuw i32 %d, 1
|
||||
%d2 = zext i32 %d1 to i64
|
||||
%j = add i64 %c, %d2 ; j = c + zext(d +nuw 1)
|
||||
%p = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 %j
|
||||
ret float* %p
|
||||
}
|
||||
; CHECK-LABEL: @ext_add_no_overflow(
|
||||
; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
|
||||
; CHECK: getelementptr inbounds float, float* [[BASE_PTR]], i64 33
|
||||
|
||||
; Verifies we handle nested sext/zext correctly.
|
||||
define void @sext_zext(i32 %a, i32 %b, float** %out1, float** %out2) {
|
||||
entry:
|
||||
%0 = add nsw nuw i32 %a, 1
|
||||
%1 = sext i32 %0 to i48
|
||||
%2 = zext i48 %1 to i64 ; zext(sext(a +nsw nuw 1)) = zext(sext(a)) + 1
|
||||
%3 = add nsw i32 %b, 2
|
||||
%4 = sext i32 %3 to i48
|
||||
%5 = zext i48 %4 to i64 ; zext(sext(b +nsw 2)) != zext(sext(b)) + 2
|
||||
%p1 = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %2, i64 %5
|
||||
store float* %p1, float** %out1
|
||||
%6 = add nuw i32 %a, 3
|
||||
%7 = zext i32 %6 to i48
|
||||
%8 = sext i48 %7 to i64 ; sext(zext(a +nuw 3)) = zext(a +nuw 3) = zext(a) + 3
|
||||
%9 = add nsw i32 %b, 4
|
||||
%10 = zext i32 %9 to i48
|
||||
%11 = sext i48 %10 to i64 ; sext(zext(b +nsw 4)) != zext(b) + 4
|
||||
%p2 = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %8, i64 %11
|
||||
store float* %p2, float** %out2
|
||||
ret void
|
||||
}
|
||||
; CHECK-LABEL: @sext_zext(
|
||||
; CHECK: [[BASE_PTR_1:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
|
||||
; CHECK: getelementptr float, float* [[BASE_PTR_1]], i64 32
|
||||
; CHECK: [[BASE_PTR_2:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
|
||||
; CHECK: getelementptr float, float* [[BASE_PTR_2]], i64 96
|
||||
|
||||
; Similar to @ext_add_no_overflow, we should be able to trace into s/zext if
|
||||
; its operand is an OR and the two operands of the OR have no common bits.
|
||||
define float* @sext_or(i64 %a, i32 %b) {
|
||||
entry:
|
||||
%b1 = shl i32 %b, 2
|
||||
%b2 = or i32 %b1, 1 ; (b << 2) and 1 have no common bits
|
||||
%b3 = or i32 %b1, 4 ; (b << 2) and 4 may have common bits
|
||||
%b2.ext = zext i32 %b2 to i64
|
||||
%b3.ext = sext i32 %b3 to i64
|
||||
%i = add i64 %a, %b2.ext
|
||||
%j = add i64 %a, %b3.ext
|
||||
%p = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 %j
|
||||
ret float* %p
|
||||
}
|
||||
; CHECK-LABEL: @sext_or(
|
||||
; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
|
||||
; CHECK: getelementptr inbounds float, float* [[BASE_PTR]], i64 32
|
||||
|
||||
; The subexpression (b + 5) is used in both "i = a + (b + 5)" and "*out = b +
|
||||
; 5". When extracting the constant offset 5, make sure "*out = b + 5" isn't
|
||||
; affected.
|
||||
define float* @expr(i64 %a, i64 %b, i64* %out) {
|
||||
entry:
|
||||
%b5 = add i64 %b, 5
|
||||
%i = add i64 %b5, %a
|
||||
%p = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 0
|
||||
store i64 %b5, i64* %out
|
||||
ret float* %p
|
||||
}
|
||||
; CHECK-LABEL: @expr(
|
||||
; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 0
|
||||
; CHECK: getelementptr inbounds float, float* [[BASE_PTR]], i64 160
|
||||
; CHECK: store i64 %b5, i64* %out
|
||||
|
||||
; d + sext(a +nsw (b +nsw (c +nsw 8))) => (d + sext(a) + sext(b) + sext(c)) + 8
|
||||
define float* @sext_expr(i32 %a, i32 %b, i32 %c, i64 %d) {
|
||||
entry:
|
||||
%0 = add nsw i32 %c, 8
|
||||
%1 = add nsw i32 %b, %0
|
||||
%2 = add nsw i32 %a, %1
|
||||
%3 = sext i32 %2 to i64
|
||||
%i = add i64 %d, %3
|
||||
%p = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %i
|
||||
ret float* %p
|
||||
}
|
||||
; CHECK-LABEL: @sext_expr(
|
||||
; CHECK: sext i32
|
||||
; CHECK: sext i32
|
||||
; CHECK: sext i32
|
||||
; CHECK: getelementptr inbounds float, float* %{{[a-zA-Z0-9]+}}, i64 8
|
||||
|
||||
; Verifies we handle "sub" correctly.
|
||||
define float* @sub(i64 %i, i64 %j) {
|
||||
%i2 = sub i64 %i, 5 ; i - 5
|
||||
%j2 = sub i64 5, %j ; 5 - i
|
||||
%p = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i2, i64 %j2
|
||||
ret float* %p
|
||||
}
|
||||
; CHECK-LABEL: @sub(
|
||||
; CHECK: %[[j2:[a-zA-Z0-9]+]] = sub i64 0, %j
|
||||
; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 %[[j2]]
|
||||
; CHECK: getelementptr inbounds float, float* [[BASE_PTR]], i64 -155
|
||||
|
||||
%struct.Packed = type <{ [3 x i32], [8 x i64] }> ; <> means packed
|
||||
|
||||
; Verifies we can emit correct uglygep if the address is not natually aligned.
|
||||
define i64* @packed_struct(i32 %i, i32 %j) {
|
||||
entry:
|
||||
%s = alloca [1024 x %struct.Packed], align 16
|
||||
%add = add nsw i32 %j, 3
|
||||
%idxprom = sext i32 %add to i64
|
||||
%add1 = add nsw i32 %i, 1
|
||||
%idxprom2 = sext i32 %add1 to i64
|
||||
%arrayidx3 = getelementptr inbounds [1024 x %struct.Packed], [1024 x %struct.Packed]* %s, i64 0, i64 %idxprom2, i32 1, i64 %idxprom
|
||||
ret i64* %arrayidx3
|
||||
}
|
||||
; CHECK-LABEL: @packed_struct(
|
||||
; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [1024 x %struct.Packed], [1024 x %struct.Packed]* %s, i64 0, i64 %{{[a-zA-Z0-9]+}}, i32 1, i64 %{{[a-zA-Z0-9]+}}
|
||||
; CHECK: [[CASTED_PTR:%[a-zA-Z0-9]+]] = bitcast i64* [[BASE_PTR]] to i8*
|
||||
; CHECK: %uglygep = getelementptr inbounds i8, i8* [[CASTED_PTR]], i64 100
|
||||
; CHECK: bitcast i8* %uglygep to i64*
|
||||
|
||||
; We shouldn't be able to extract the 8 from "zext(a +nuw (b + 8))",
|
||||
; because "zext(b + 8) != zext(b) + 8"
|
||||
define float* @zext_expr(i32 %a, i32 %b) {
|
||||
entry:
|
||||
%0 = add i32 %b, 8
|
||||
%1 = add nuw i32 %a, %0
|
||||
%i = zext i32 %1 to i64
|
||||
%p = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %i
|
||||
ret float* %p
|
||||
}
|
||||
; CHECK-LABEL: zext_expr(
|
||||
; CHECK: getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %i
|
||||
|
||||
; Per http://llvm.org/docs/LangRef.html#id181, the indices of a off-bound gep
|
||||
; should be considered sign-extended to the pointer size. Therefore,
|
||||
; gep base, (add i32 a, b) != gep (gep base, i32 a), i32 b
|
||||
; because
|
||||
; sext(a + b) != sext(a) + sext(b)
|
||||
;
|
||||
; This test verifies we do not illegitimately extract the 8 from
|
||||
; gep base, (i32 a + 8)
|
||||
define float* @i32_add(i32 %a) {
|
||||
entry:
|
||||
%i = add i32 %a, 8
|
||||
%p = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i32 %i
|
||||
ret float* %p
|
||||
}
|
||||
; CHECK-LABEL: @i32_add(
|
||||
; CHECK: getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %{{[a-zA-Z0-9]+}}
|
||||
; CHECK-NOT: getelementptr
|
||||
|
||||
; Verifies that we compute the correct constant offset when the index is
|
||||
; sign-extended and then zero-extended. The old version of our code failed to
|
||||
; handle this case because it simply computed the constant offset as the
|
||||
; sign-extended value of the constant part of the GEP index.
|
||||
define float* @apint(i1 %a) {
|
||||
entry:
|
||||
%0 = add nsw nuw i1 %a, 1
|
||||
%1 = sext i1 %0 to i4
|
||||
%2 = zext i4 %1 to i64 ; zext (sext i1 1 to i4) to i64 = 15
|
||||
%p = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %2
|
||||
ret float* %p
|
||||
}
|
||||
; CHECK-LABEL: @apint(
|
||||
; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %{{[a-zA-Z0-9]+}}
|
||||
; CHECK: getelementptr float, float* [[BASE_PTR]], i64 15
|
||||
|
||||
; Do not trace into binary operators other than ADD, SUB, and OR.
|
||||
define float* @and(i64 %a) {
|
||||
entry:
|
||||
%0 = shl i64 %a, 2
|
||||
%1 = and i64 %0, 1
|
||||
%p = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %1
|
||||
ret float* %p
|
||||
}
|
||||
; CHECK-LABEL: @and(
|
||||
; CHECK: getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array
|
||||
; CHECK-NOT: getelementptr
|
||||
|
||||
; The code that rebuilds an OR expression used to be buggy, and failed on this
|
||||
; test.
|
||||
define float* @shl_add_or(i64 %a, float* %ptr) {
|
||||
; CHECK-LABEL: @shl_add_or(
|
||||
entry:
|
||||
%shl = shl i64 %a, 2
|
||||
%add = add i64 %shl, 12
|
||||
%or = or i64 %add, 1
|
||||
; CHECK: [[OR:%or[0-9]*]] = add i64 %shl, 1
|
||||
; ((a << 2) + 12) and 1 have no common bits. Therefore,
|
||||
; SeparateConstOffsetFromGEP is able to extract the 12.
|
||||
; TODO(jingyue): We could reassociate the expression to combine 12 and 1.
|
||||
%p = getelementptr float, float* %ptr, i64 %or
|
||||
; CHECK: [[PTR:%[a-zA-Z0-9]+]] = getelementptr float, float* %ptr, i64 [[OR]]
|
||||
; CHECK: getelementptr float, float* [[PTR]], i64 12
|
||||
ret float* %p
|
||||
; CHECK-NEXT: ret
|
||||
}
|
||||
|
||||
; The source code used to be buggy in checking
|
||||
; (AccumulativeByteOffset % ElementTypeSizeOfGEP == 0)
|
||||
; where AccumulativeByteOffset is signed but ElementTypeSizeOfGEP is unsigned.
|
||||
; The compiler would promote AccumulativeByteOffset to unsigned, causing
|
||||
; unexpected results. For example, while -64 % (int64_t)24 != 0,
|
||||
; -64 % (uint64_t)24 == 0.
|
||||
%struct3 = type { i64, i32 }
|
||||
%struct2 = type { %struct3, i32 }
|
||||
%struct1 = type { i64, %struct2 }
|
||||
%struct0 = type { i32, i32, i64*, [100 x %struct1] }
|
||||
define %struct2* @sign_mod_unsign(%struct0* %ptr, i64 %idx) {
|
||||
; CHECK-LABEL: @sign_mod_unsign(
|
||||
entry:
|
||||
%arrayidx = add nsw i64 %idx, -2
|
||||
; CHECK-NOT: add
|
||||
%ptr2 = getelementptr inbounds %struct0, %struct0* %ptr, i64 0, i32 3, i64 %arrayidx, i32 1
|
||||
; CHECK: [[PTR:%[a-zA-Z0-9]+]] = getelementptr %struct0, %struct0* %ptr, i64 0, i32 3, i64 %idx, i32 1
|
||||
; CHECK: [[PTR1:%[a-zA-Z0-9]+]] = bitcast %struct2* [[PTR]] to i8*
|
||||
; CHECK: getelementptr inbounds i8, i8* [[PTR1]], i64 -64
|
||||
; CHECK: bitcast
|
||||
ret %struct2* %ptr2
|
||||
; CHECK-NEXT: ret
|
||||
}
|
Reference in New Issue
Block a user