You've already forked linux-packaging-mono
Imported Upstream version 5.18.0.167
Former-commit-id: 289509151e0fee68a1b591a20c9f109c3c789d3a
This commit is contained in:
parent
e19d552987
commit
b084638f15
@ -1,70 +0,0 @@
|
||||
; RUN: opt -cost-model -analyze -mtriple=aarch64--linux-gnu < %s | FileCheck %s
|
||||
|
||||
; Verify the cost of bswap instructions.
|
||||
|
||||
declare i16 @llvm.bswap.i16(i16)
|
||||
declare i32 @llvm.bswap.i32(i32)
|
||||
declare i64 @llvm.bswap.i64(i64)
|
||||
|
||||
declare <2 x i32> @llvm.bswap.v2i32(<2 x i32>)
|
||||
declare <4 x i16> @llvm.bswap.v4i16(<4 x i16>)
|
||||
|
||||
declare <2 x i64> @llvm.bswap.v2i64(<2 x i64>)
|
||||
declare <4 x i32> @llvm.bswap.v4i32(<4 x i32>)
|
||||
declare <8 x i16> @llvm.bswap.v8i16(<8 x i16>)
|
||||
|
||||
define i16 @bswap_i16(i16 %a) {
|
||||
; CHECK: 'Cost Model Analysis' for function 'bswap_i16':
|
||||
; CHECK: Found an estimated cost of 1 for instruction: %bswap
|
||||
%bswap = tail call i16 @llvm.bswap.i16(i16 %a)
|
||||
ret i16 %bswap
|
||||
}
|
||||
|
||||
define i32 @bswap_i32(i32 %a) {
|
||||
; CHECK: 'Cost Model Analysis' for function 'bswap_i32':
|
||||
; CHECK: Found an estimated cost of 1 for instruction: %bswap
|
||||
%bswap = tail call i32 @llvm.bswap.i32(i32 %a)
|
||||
ret i32 %bswap
|
||||
}
|
||||
|
||||
define i64 @bswap_i64(i64 %a) {
|
||||
; CHECK: 'Cost Model Analysis' for function 'bswap_i64':
|
||||
; CHECK: Found an estimated cost of 1 for instruction: %bswap
|
||||
%bswap = tail call i64 @llvm.bswap.i64(i64 %a)
|
||||
ret i64 %bswap
|
||||
}
|
||||
|
||||
define <2 x i32> @bswap_v2i32(<2 x i32> %a) {
|
||||
; CHECK: 'Cost Model Analysis' for function 'bswap_v2i32':
|
||||
; CHECK: Found an estimated cost of 8 for instruction: %bswap
|
||||
%bswap = call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %a)
|
||||
ret <2 x i32> %bswap
|
||||
}
|
||||
|
||||
define <4 x i16> @bswap_v4i16(<4 x i16> %a) {
|
||||
; CHECK: 'Cost Model Analysis' for function 'bswap_v4i16':
|
||||
; CHECK: Found an estimated cost of 22 for instruction: %bswap
|
||||
%bswap = call <4 x i16> @llvm.bswap.v4i16(<4 x i16> %a)
|
||||
ret <4 x i16> %bswap
|
||||
}
|
||||
|
||||
define <2 x i64> @bswap_v2i64(<2 x i64> %a) {
|
||||
; CHECK: 'Cost Model Analysis' for function 'bswap_v2i64':
|
||||
; CHECK: Found an estimated cost of 8 for instruction: %bswap
|
||||
%bswap = call <2 x i64> @llvm.bswap.v2i64(<2 x i64> %a)
|
||||
ret <2 x i64> %bswap
|
||||
}
|
||||
|
||||
define <4 x i32> @bswap_v4i32(<4 x i32> %a) {
|
||||
; CHECK: 'Cost Model Analysis' for function 'bswap_v4i32':
|
||||
; CHECK: Found an estimated cost of 22 for instruction: %bswap
|
||||
%bswap = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> %a)
|
||||
ret <4 x i32> %bswap
|
||||
}
|
||||
|
||||
define <8 x i16> @bswap_v8i16(<8 x i16> %a) {
|
||||
; CHECK: 'Cost Model Analysis' for function 'bswap_v8i16':
|
||||
; CHECK: Found an estimated cost of 50 for instruction: %bswap
|
||||
%bswap = call <8 x i16> @llvm.bswap.v8i16(<8 x i16> %a)
|
||||
ret <8 x i16> %bswap
|
||||
}
|
File diff suppressed because it is too large
Load Diff
292
external/llvm/test/Analysis/CostModel/AArch64/gep.ll
vendored
292
external/llvm/test/Analysis/CostModel/AArch64/gep.ll
vendored
@ -1,292 +0,0 @@
|
||||
; RUN: opt -cost-model -analyze -mtriple=aarch64--linux-gnu < %s | FileCheck %s
|
||||
|
||||
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
|
||||
target triple = "aarch64--linux-gnu"
|
||||
|
||||
define i8 @test1(i8* %p) {
|
||||
; CHECK-LABEL: test1
|
||||
; CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i8, i8*
|
||||
%a = getelementptr inbounds i8, i8* %p, i32 0
|
||||
%v = load i8, i8* %a
|
||||
ret i8 %v
|
||||
}
|
||||
|
||||
define i16 @test2(i16* %p) {
|
||||
; CHECK-LABEL: test2
|
||||
; CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i16, i16*
|
||||
%a = getelementptr inbounds i16, i16* %p, i32 0
|
||||
%v = load i16, i16* %a
|
||||
ret i16 %v
|
||||
}
|
||||
|
||||
define i32 @test3(i32* %p) {
|
||||
; CHECK-LABEL: test3
|
||||
; CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i32, i32*
|
||||
%a = getelementptr inbounds i32, i32* %p, i32 0
|
||||
%v = load i32, i32* %a
|
||||
ret i32 %v
|
||||
}
|
||||
|
||||
define i64 @test4(i64* %p) {
|
||||
; CHECK-LABEL: test4
|
||||
; CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i64, i64*
|
||||
%a = getelementptr inbounds i64, i64* %p, i32 0
|
||||
%v = load i64, i64* %a
|
||||
ret i64 %v
|
||||
}
|
||||
|
||||
define i8 @test5(i8* %p) {
|
||||
; CHECK-LABEL: test5
|
||||
; CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i8, i8*
|
||||
%a = getelementptr inbounds i8, i8* %p, i32 1024
|
||||
%v = load i8, i8* %a
|
||||
ret i8 %v
|
||||
}
|
||||
|
||||
define i16 @test6(i16* %p) {
|
||||
; CHECK-LABEL: test6
|
||||
; CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i16, i16*
|
||||
%a = getelementptr inbounds i16, i16* %p, i32 1024
|
||||
%v = load i16, i16* %a
|
||||
ret i16 %v
|
||||
}
|
||||
|
||||
define i32 @test7(i32* %p) {
|
||||
; CHECK-LABEL: test7
|
||||
; CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i32, i32*
|
||||
%a = getelementptr inbounds i32, i32* %p, i32 1024
|
||||
%v = load i32, i32* %a
|
||||
ret i32 %v
|
||||
}
|
||||
|
||||
define i64 @test8(i64* %p) {
|
||||
; CHECK-LABEL: test8
|
||||
; CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i64, i64*
|
||||
%a = getelementptr inbounds i64, i64* %p, i32 1024
|
||||
%v = load i64, i64* %a
|
||||
ret i64 %v
|
||||
}
|
||||
|
||||
define i8 @test9(i8* %p) {
|
||||
; CHECK-LABEL: test9
|
||||
; CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds i8, i8*
|
||||
%a = getelementptr inbounds i8, i8* %p, i32 4096
|
||||
%v = load i8, i8* %a
|
||||
ret i8 %v
|
||||
}
|
||||
|
||||
define i16 @test10(i16* %p) {
|
||||
; CHECK-LABEL: test10
|
||||
; CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds i16, i16*
|
||||
%a = getelementptr inbounds i16, i16* %p, i32 4096
|
||||
%v = load i16, i16* %a
|
||||
ret i16 %v
|
||||
}
|
||||
|
||||
define i32 @test11(i32* %p) {
|
||||
; CHECK-LABEL: test11
|
||||
; CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds i32, i32*
|
||||
%a = getelementptr inbounds i32, i32* %p, i32 4096
|
||||
%v = load i32, i32* %a
|
||||
ret i32 %v
|
||||
}
|
||||
|
||||
define i64 @test12(i64* %p) {
|
||||
; CHECK-LABEL: test12
|
||||
; CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds i64, i64*
|
||||
%a = getelementptr inbounds i64, i64* %p, i32 4096
|
||||
%v = load i64, i64* %a
|
||||
ret i64 %v
|
||||
}
|
||||
|
||||
define i8 @test13(i8* %p) {
|
||||
; CHECK-LABEL: test13
|
||||
; CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i8, i8*
|
||||
%a = getelementptr inbounds i8, i8* %p, i32 -64
|
||||
%v = load i8, i8* %a
|
||||
ret i8 %v
|
||||
}
|
||||
|
||||
define i16 @test14(i16* %p) {
|
||||
; CHECK-LABEL: test14
|
||||
; CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i16, i16*
|
||||
%a = getelementptr inbounds i16, i16* %p, i32 -64
|
||||
%v = load i16, i16* %a
|
||||
ret i16 %v
|
||||
}
|
||||
|
||||
define i32 @test15(i32* %p) {
|
||||
; CHECK-LABEL: test15
|
||||
; CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i32, i32*
|
||||
%a = getelementptr inbounds i32, i32* %p, i32 -64
|
||||
%v = load i32, i32* %a
|
||||
ret i32 %v
|
||||
}
|
||||
|
||||
define i64 @test16(i64* %p) {
|
||||
; CHECK-LABEL: test16
|
||||
; CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds i64, i64*
|
||||
%a = getelementptr inbounds i64, i64* %p, i32 -64
|
||||
%v = load i64, i64* %a
|
||||
ret i64 %v
|
||||
}
|
||||
|
||||
define i8 @test17(i8* %p) {
|
||||
; CHECK-LABEL: test17
|
||||
; CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds i8, i8*
|
||||
%a = getelementptr inbounds i8, i8* %p, i32 -1024
|
||||
%v = load i8, i8* %a
|
||||
ret i8 %v
|
||||
}
|
||||
|
||||
define i16 @test18(i16* %p) {
|
||||
; CHECK-LABEL: test18
|
||||
; CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds i16, i16*
|
||||
%a = getelementptr inbounds i16, i16* %p, i32 -1024
|
||||
%v = load i16, i16* %a
|
||||
ret i16 %v
|
||||
}
|
||||
|
||||
define i32 @test19(i32* %p) {
|
||||
; CHECK-LABEL: test19
|
||||
; CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds i32, i32*
|
||||
%a = getelementptr inbounds i32, i32* %p, i32 -1024
|
||||
%v = load i32, i32* %a
|
||||
ret i32 %v
|
||||
}
|
||||
|
||||
define i64 @test20(i64* %p) {
|
||||
; CHECK-LABEL: test20
|
||||
; CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds i64, i64*
|
||||
%a = getelementptr inbounds i64, i64* %p, i32 -1024
|
||||
%v = load i64, i64* %a
|
||||
ret i64 %v
|
||||
}
|
||||
|
||||
define i8 @test21(i8* %p, i32 %i) {
|
||||
; CHECK-LABEL: test21
|
||||
; CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i8, i8*
|
||||
%a = getelementptr inbounds i8, i8* %p, i32 %i
|
||||
%v = load i8, i8* %a
|
||||
ret i8 %v
|
||||
}
|
||||
|
||||
define i16 @test22(i16* %p, i32 %i) {
|
||||
; CHECK-LABEL: test22
|
||||
; CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i16, i16*
|
||||
%a = getelementptr inbounds i16, i16* %p, i32 %i
|
||||
%v = load i16, i16* %a
|
||||
ret i16 %v
|
||||
}
|
||||
|
||||
define i32 @test23(i32* %p, i32 %i) {
|
||||
; CHECK-LABEL: test23
|
||||
; CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i32, i32*
|
||||
%a = getelementptr inbounds i32, i32* %p, i32 %i
|
||||
%v = load i32, i32* %a
|
||||
ret i32 %v
|
||||
}
|
||||
|
||||
define i64 @test24(i64* %p, i32 %i) {
|
||||
; CHECK-LABEL: test24
|
||||
; CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i64, i64*
|
||||
%a = getelementptr inbounds i64, i64* %p, i32 %i
|
||||
%v = load i64, i64* %a
|
||||
ret i64 %v
|
||||
}
|
||||
|
||||
define i8 @test25(i8* %p) {
|
||||
; CHECK-LABEL: test25
|
||||
; CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i8, i8*
|
||||
%a = getelementptr inbounds i8, i8* %p, i32 -128
|
||||
%v = load i8, i8* %a
|
||||
ret i8 %v
|
||||
}
|
||||
|
||||
define i16 @test26(i16* %p) {
|
||||
; CHECK-LABEL: test26
|
||||
; CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i16, i16*
|
||||
%a = getelementptr inbounds i16, i16* %p, i32 -128
|
||||
%v = load i16, i16* %a
|
||||
ret i16 %v
|
||||
}
|
||||
|
||||
define i32 @test27(i32* %p) {
|
||||
; CHECK-LABEL: test27
|
||||
; CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds i32, i32*
|
||||
%a = getelementptr inbounds i32, i32* %p, i32 -128
|
||||
%v = load i32, i32* %a
|
||||
ret i32 %v
|
||||
}
|
||||
|
||||
define i64 @test28(i64* %p) {
|
||||
; CHECK-LABEL: test28
|
||||
; CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds i64, i64*
|
||||
%a = getelementptr inbounds i64, i64* %p, i32 -128
|
||||
%v = load i64, i64* %a
|
||||
ret i64 %v
|
||||
}
|
||||
|
||||
define i8 @test29(i8* %p) {
|
||||
; CHECK-LABEL: test29
|
||||
; CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i8, i8*
|
||||
%a = getelementptr inbounds i8, i8* %p, i32 -256
|
||||
%v = load i8, i8* %a
|
||||
ret i8 %v
|
||||
}
|
||||
|
||||
define i16 @test30(i16* %p) {
|
||||
; CHECK-LABEL: test30
|
||||
; CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds i16, i16*
|
||||
%a = getelementptr inbounds i16, i16* %p, i32 -256
|
||||
%v = load i16, i16* %a
|
||||
ret i16 %v
|
||||
}
|
||||
|
||||
define i32 @test31(i32* %p) {
|
||||
; CHECK-LABEL: test31
|
||||
; CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds i32, i32*
|
||||
%a = getelementptr inbounds i32, i32* %p, i32 -256
|
||||
%v = load i32, i32* %a
|
||||
ret i32 %v
|
||||
}
|
||||
|
||||
define i64 @test32(i64* %p) {
|
||||
; CHECK-LABEL: test32
|
||||
; CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds i64, i64*
|
||||
%a = getelementptr inbounds i64, i64* %p, i32 -256
|
||||
%v = load i64, i64* %a
|
||||
ret i64 %v
|
||||
}
|
||||
|
||||
define i8 @test33(i8* %p) {
|
||||
; CHECK-LABEL: test33
|
||||
; CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds i8, i8*
|
||||
%a = getelementptr inbounds i8, i8* %p, i32 -512
|
||||
%v = load i8, i8* %a
|
||||
ret i8 %v
|
||||
}
|
||||
|
||||
define i16 @test34(i16* %p) {
|
||||
; CHECK-LABEL: test34
|
||||
; CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds i16, i16*
|
||||
%a = getelementptr inbounds i16, i16* %p, i32 -512
|
||||
%v = load i16, i16* %a
|
||||
ret i16 %v
|
||||
}
|
||||
|
||||
define i32 @test35(i32* %p) {
|
||||
; CHECK-LABEL: test35
|
||||
; CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds i32, i32*
|
||||
%a = getelementptr inbounds i32, i32* %p, i32 -512
|
||||
%v = load i32, i32* %a
|
||||
ret i32 %v
|
||||
}
|
||||
|
||||
define i64 @test36(i64* %p) {
|
||||
; CHECK-LABEL: test36
|
||||
; CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds i64, i64*
|
||||
%a = getelementptr inbounds i64, i64* %p, i32 -512
|
||||
%v = load i64, i64* %a
|
||||
ret i64 %v
|
||||
}
|
@ -1,26 +0,0 @@
|
||||
; RUN: opt < %s -cost-model -analyze -mcpu=kryo | FileCheck %s
|
||||
|
||||
target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
|
||||
target triple = "aarch64--linux-gnu"
|
||||
|
||||
; CHECK-LABEL: vectorInstrCost
|
||||
define void @vectorInstrCost() {
|
||||
|
||||
; Vector extracts - extracting the first element should have a zero cost;
|
||||
; all other elements should have a cost of two.
|
||||
;
|
||||
; CHECK: cost of 0 {{.*}} extractelement <2 x i64> undef, i32 0
|
||||
; CHECK: cost of 2 {{.*}} extractelement <2 x i64> undef, i32 1
|
||||
%t1 = extractelement <2 x i64> undef, i32 0
|
||||
%t2 = extractelement <2 x i64> undef, i32 1
|
||||
|
||||
; Vector inserts - inserting the first element should have a zero cost; all
|
||||
; other elements should have a cost of two.
|
||||
;
|
||||
; CHECK: cost of 0 {{.*}} insertelement <2 x i64> undef, i64 undef, i32 0
|
||||
; CHECK: cost of 2 {{.*}} insertelement <2 x i64> undef, i64 undef, i32 1
|
||||
%t3 = insertelement <2 x i64> undef, i64 undef, i32 0
|
||||
%t4 = insertelement <2 x i64> undef, i64 undef, i32 1
|
||||
|
||||
ret void
|
||||
}
|
@ -1,2 +0,0 @@
|
||||
if not 'AArch64' in config.root.targets:
|
||||
config.unsupported = True
|
@ -1,38 +0,0 @@
|
||||
; RUN: opt < %s -cost-model -analyze -mtriple=arm64-apple-ios -mcpu=cyclone | FileCheck %s
|
||||
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
|
||||
|
||||
; CHECK-LABEL: select
|
||||
define void @select() {
|
||||
; Scalar values
|
||||
; CHECK: cost of 1 {{.*}} select
|
||||
%v1 = select i1 undef, i8 undef, i8 undef
|
||||
; CHECK: cost of 1 {{.*}} select
|
||||
%v2 = select i1 undef, i16 undef, i16 undef
|
||||
; CHECK: cost of 1 {{.*}} select
|
||||
%v3 = select i1 undef, i32 undef, i32 undef
|
||||
; CHECK: cost of 1 {{.*}} select
|
||||
%v4 = select i1 undef, i64 undef, i64 undef
|
||||
; CHECK: cost of 1 {{.*}} select
|
||||
%v5 = select i1 undef, float undef, float undef
|
||||
; CHECK: cost of 1 {{.*}} select
|
||||
%v6 = select i1 undef, double undef, double undef
|
||||
|
||||
; CHECK: cost of 16 {{.*}} select
|
||||
%v13b = select <16 x i1> undef, <16 x i16> undef, <16 x i16> undef
|
||||
|
||||
; CHECK: cost of 8 {{.*}} select
|
||||
%v15b = select <8 x i1> undef, <8 x i32> undef, <8 x i32> undef
|
||||
; CHECK: cost of 16 {{.*}} select
|
||||
%v15c = select <16 x i1> undef, <16 x i32> undef, <16 x i32> undef
|
||||
|
||||
; Vector values - check for vectors of i64s that have a high cost because
|
||||
; they end up scalarized.
|
||||
; CHECK: cost of 80 {{.*}} select
|
||||
%v16a = select <4 x i1> undef, <4 x i64> undef, <4 x i64> undef
|
||||
; CHECK: cost of 160 {{.*}} select
|
||||
%v16b = select <8 x i1> undef, <8 x i64> undef, <8 x i64> undef
|
||||
; CHECK: cost of 320 {{.*}} select
|
||||
%v16c = select <16 x i1> undef, <16 x i64> undef, <16 x i64> undef
|
||||
|
||||
ret void
|
||||
}
|
@ -1,70 +0,0 @@
|
||||
; RUN: opt < %s -cost-model -analyze -mtriple=aarch64-unknown | FileCheck %s
|
||||
; RUN: opt < %s -cost-model -analyze -mtriple=aarch64-unknown -mattr=slow-misaligned-128store | FileCheck %s --check-prefix=SLOW_MISALIGNED_128_STORE
|
||||
|
||||
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
|
||||
; CHECK-LABEL: getMemoryOpCost
|
||||
; SLOW_MISALIGNED_128_STORE-LABEL: getMemoryOpCost
|
||||
define void @getMemoryOpCost() {
|
||||
; If FeatureSlowMisaligned128Store is set, we penalize 128-bit stores.
|
||||
; The unlegalized 256-bit stores are further penalized when legalized down
|
||||
; to 128-bit stores.
|
||||
|
||||
; CHECK: cost of 2 for {{.*}} store <4 x i64>
|
||||
; SLOW_MISALIGNED_128_STORE: cost of 24 for {{.*}} store <4 x i64>
|
||||
store <4 x i64> undef, <4 x i64> * undef
|
||||
; CHECK-NEXT: cost of 2 for {{.*}} store <8 x i32>
|
||||
; SLOW_MISALIGNED_128_STORE-NEXT: cost of 24 for {{.*}} store <8 x i32>
|
||||
store <8 x i32> undef, <8 x i32> * undef
|
||||
; CHECK-NEXT: cost of 2 for {{.*}} store <16 x i16>
|
||||
; SLOW_MISALIGNED_128_STORE-NEXT: cost of 24 for {{.*}} store <16 x i16>
|
||||
store <16 x i16> undef, <16 x i16> * undef
|
||||
; CHECK-NEXT: cost of 2 for {{.*}} store <32 x i8>
|
||||
; SLOW_MISALIGNED_128_STORE-NEXT: cost of 24 for {{.*}} store <32 x i8>
|
||||
store <32 x i8> undef, <32 x i8> * undef
|
||||
|
||||
; CHECK-NEXT: cost of 2 for {{.*}} store <4 x double>
|
||||
; SLOW_MISALIGNED_128_STORE-NEXT: cost of 24 for {{.*}} store <4 x double>
|
||||
store <4 x double> undef, <4 x double> * undef
|
||||
; CHECK-NEXT: cost of 2 for {{.*}} store <8 x float>
|
||||
; SLOW_MISALIGNED_128_STORE-NEXT: cost of 24 for {{.*}} store <8 x float>
|
||||
store <8 x float> undef, <8 x float> * undef
|
||||
; CHECK-NEXT: cost of 2 for {{.*}} store <16 x half>
|
||||
; SLOW_MISALIGNED_128_STORE-NEXT: cost of 24 for {{.*}} store <16 x half>
|
||||
store <16 x half> undef, <16 x half> * undef
|
||||
|
||||
; CHECK-NEXT: cost of 1 for {{.*}} store <2 x i64>
|
||||
; SLOW_MISALIGNED_128_STORE-NEXT: cost of 12 for {{.*}} store <2 x i64>
|
||||
store <2 x i64> undef, <2 x i64> * undef
|
||||
; CHECK-NEXT: cost of 1 for {{.*}} store <4 x i32>
|
||||
; SLOW_MISALIGNED_128_STORE-NEXT: cost of 12 for {{.*}} store <4 x i32>
|
||||
store <4 x i32> undef, <4 x i32> * undef
|
||||
; CHECK-NEXT: cost of 1 for {{.*}} store <8 x i16>
|
||||
; SLOW_MISALIGNED_128_STORE-NEXT: cost of 12 for {{.*}} store <8 x i16>
|
||||
store <8 x i16> undef, <8 x i16> * undef
|
||||
; CHECK-NEXT: cost of 1 for {{.*}} store <16 x i8>
|
||||
; SLOW_MISALIGNED_128_STORE-NEXT: cost of 12 for {{.*}} store <16 x i8>
|
||||
store <16 x i8> undef, <16 x i8> * undef
|
||||
|
||||
; CHECK-NEXT: cost of 1 for {{.*}} store <2 x double>
|
||||
; SLOW_MISALIGNED_128_STORE-NEXT: cost of 12 for {{.*}} store <2 x double>
|
||||
store <2 x double> undef, <2 x double> * undef
|
||||
; CHECK-NEXT: cost of 1 for {{.*}} store <4 x float>
|
||||
; SLOW_MISALIGNED_128_STORE-NEXT: cost of 12 for {{.*}} store <4 x float>
|
||||
store <4 x float> undef, <4 x float> * undef
|
||||
; CHECK-NEXT: cost of 1 for {{.*}} store <8 x half>
|
||||
; SLOW_MISALIGNED_128_STORE-NEXT: cost of 12 for {{.*}} store <8 x half>
|
||||
store <8 x half> undef, <8 x half> * undef
|
||||
|
||||
; We scalarize the loads/stores because there is no vector register name for
|
||||
; these types (they get extended to v.4h/v.2s).
|
||||
; CHECK: cost of 16 {{.*}} store
|
||||
store <2 x i8> undef, <2 x i8> * undef
|
||||
; CHECK: cost of 64 {{.*}} store
|
||||
store <4 x i8> undef, <4 x i8> * undef
|
||||
; CHECK: cost of 16 {{.*}} load
|
||||
load <2 x i8> , <2 x i8> * undef
|
||||
; CHECK: cost of 64 {{.*}} load
|
||||
load <4 x i8> , <4 x i8> * undef
|
||||
|
||||
ret void
|
||||
}
|
@ -1,138 +0,0 @@
|
||||
; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=+half-rate-64-ops < %s | FileCheck %s
|
||||
; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=-half-rate-64-ops < %s | FileCheck %s
|
||||
|
||||
; CHECK: 'add_i32'
|
||||
; CHECK: estimated cost of 1 for {{.*}} add i32
|
||||
define amdgpu_kernel void @add_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
|
||||
%vec = load i32, i32 addrspace(1)* %vaddr
|
||||
%add = add i32 %vec, %b
|
||||
store i32 %add, i32 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK: 'add_v2i32'
|
||||
; CHECK: estimated cost of 2 for {{.*}} add <2 x i32>
|
||||
define amdgpu_kernel void @add_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %vaddr, <2 x i32> %b) #0 {
|
||||
%vec = load <2 x i32>, <2 x i32> addrspace(1)* %vaddr
|
||||
%add = add <2 x i32> %vec, %b
|
||||
store <2 x i32> %add, <2 x i32> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK: 'add_v3i32'
|
||||
; CHECK: estimated cost of 3 for {{.*}} add <3 x i32>
|
||||
define amdgpu_kernel void @add_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> addrspace(1)* %vaddr, <3 x i32> %b) #0 {
|
||||
%vec = load <3 x i32>, <3 x i32> addrspace(1)* %vaddr
|
||||
%add = add <3 x i32> %vec, %b
|
||||
store <3 x i32> %add, <3 x i32> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK: 'add_v4i32'
|
||||
; CHECK: estimated cost of 4 for {{.*}} add <4 x i32>
|
||||
define amdgpu_kernel void @add_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %vaddr, <4 x i32> %b) #0 {
|
||||
%vec = load <4 x i32>, <4 x i32> addrspace(1)* %vaddr
|
||||
%add = add <4 x i32> %vec, %b
|
||||
store <4 x i32> %add, <4 x i32> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK: 'add_i64'
|
||||
; CHECK: estimated cost of 2 for {{.*}} add i64
|
||||
define amdgpu_kernel void @add_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 {
|
||||
%vec = load i64, i64 addrspace(1)* %vaddr
|
||||
%add = add i64 %vec, %b
|
||||
store i64 %add, i64 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK: 'add_v2i64'
|
||||
; CHECK: estimated cost of 4 for {{.*}} add <2 x i64>
|
||||
define amdgpu_kernel void @add_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %vaddr, <2 x i64> %b) #0 {
|
||||
%vec = load <2 x i64>, <2 x i64> addrspace(1)* %vaddr
|
||||
%add = add <2 x i64> %vec, %b
|
||||
store <2 x i64> %add, <2 x i64> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK: 'add_v3i64'
|
||||
; CHECK: estimated cost of 6 for {{.*}} add <3 x i64>
|
||||
define amdgpu_kernel void @add_v3i64(<3 x i64> addrspace(1)* %out, <3 x i64> addrspace(1)* %vaddr, <3 x i64> %b) #0 {
|
||||
%vec = load <3 x i64>, <3 x i64> addrspace(1)* %vaddr
|
||||
%add = add <3 x i64> %vec, %b
|
||||
store <3 x i64> %add, <3 x i64> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK: 'add_v4i64'
|
||||
; CHECK: estimated cost of 8 for {{.*}} add <4 x i64>
|
||||
define amdgpu_kernel void @add_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %vaddr, <4 x i64> %b) #0 {
|
||||
%vec = load <4 x i64>, <4 x i64> addrspace(1)* %vaddr
|
||||
%add = add <4 x i64> %vec, %b
|
||||
store <4 x i64> %add, <4 x i64> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK: 'add_v16i64'
|
||||
; CHECK: estimated cost of 32 for {{.*}} add <16 x i64>
|
||||
define amdgpu_kernel void @add_v16i64(<16 x i64> addrspace(1)* %out, <16 x i64> addrspace(1)* %vaddr, <16 x i64> %b) #0 {
|
||||
%vec = load <16 x i64>, <16 x i64> addrspace(1)* %vaddr
|
||||
%add = add <16 x i64> %vec, %b
|
||||
store <16 x i64> %add, <16 x i64> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK: 'add_i16'
|
||||
; CHECK: estimated cost of 1 for {{.*}} add i16
|
||||
define amdgpu_kernel void @add_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %vaddr, i16 %b) #0 {
|
||||
%vec = load i16, i16 addrspace(1)* %vaddr
|
||||
%add = add i16 %vec, %b
|
||||
store i16 %add, i16 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK: 'add_v2i16'
|
||||
; CHECK: estimated cost of 2 for {{.*}} add <2 x i16>
|
||||
define amdgpu_kernel void @add_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr, <2 x i16> %b) #0 {
|
||||
%vec = load <2 x i16>, <2 x i16> addrspace(1)* %vaddr
|
||||
%add = add <2 x i16> %vec, %b
|
||||
store <2 x i16> %add, <2 x i16> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK: 'sub_i32'
|
||||
; CHECK: estimated cost of 1 for {{.*}} sub i32
|
||||
define amdgpu_kernel void @sub_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
|
||||
%vec = load i32, i32 addrspace(1)* %vaddr
|
||||
%sub = sub i32 %vec, %b
|
||||
store i32 %sub, i32 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK: 'sub_i64'
|
||||
; CHECK: estimated cost of 2 for {{.*}} sub i64
|
||||
define amdgpu_kernel void @sub_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 {
|
||||
%vec = load i64, i64 addrspace(1)* %vaddr
|
||||
%sub = sub i64 %vec, %b
|
||||
store i64 %sub, i64 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
; CHECK: 'sub_i16'
|
||||
; CHECK: estimated cost of 1 for {{.*}} sub i16
|
||||
define amdgpu_kernel void @sub_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %vaddr, i16 %b) #0 {
|
||||
%vec = load i16, i16 addrspace(1)* %vaddr
|
||||
%sub = sub i16 %vec, %b
|
||||
store i16 %sub, i16 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK: 'sub_v2i16'
|
||||
; CHECK: estimated cost of 2 for {{.*}} sub <2 x i16>
|
||||
define amdgpu_kernel void @sub_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr, <2 x i16> %b) #0 {
|
||||
%vec = load <2 x i16>, <2 x i16> addrspace(1)* %vaddr
|
||||
%sub = sub <2 x i16> %vec, %b
|
||||
store <2 x i16> %sub, <2 x i16> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
attributes #0 = { nounwind }
|
@ -1,45 +0,0 @@
|
||||
; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=kaveri < %s | FileCheck %s
|
||||
|
||||
; CHECK: 'addrspacecast_global_to_flat'
|
||||
; CHECK: estimated cost of 0 for {{.*}} addrspacecast i8 addrspace(1)* %ptr to i8 addrspace(4)*
|
||||
define i8 addrspace(4)* @addrspacecast_global_to_flat(i8 addrspace(1)* %ptr) #0 {
|
||||
%cast = addrspacecast i8 addrspace(1)* %ptr to i8 addrspace(4)*
|
||||
ret i8 addrspace(4)* %cast
|
||||
}
|
||||
|
||||
; CHECK: 'addrspacecast_global_to_flat_v2'
|
||||
; CHECK: estimated cost of 0 for {{.*}} addrspacecast <2 x i8 addrspace(1)*> %ptr to <2 x i8 addrspace(4)*>
|
||||
define <2 x i8 addrspace(4)*> @addrspacecast_global_to_flat_v2(<2 x i8 addrspace(1)*> %ptr) #0 {
|
||||
%cast = addrspacecast <2 x i8 addrspace(1)*> %ptr to <2 x i8 addrspace(4)*>
|
||||
ret <2 x i8 addrspace(4)*> %cast
|
||||
}
|
||||
|
||||
; CHECK: 'addrspacecast_global_to_flat_v32'
|
||||
; CHECK: estimated cost of 0 for {{.*}} addrspacecast <32 x i8 addrspace(1)*> %ptr to <32 x i8 addrspace(4)*>
|
||||
define <32 x i8 addrspace(4)*> @addrspacecast_global_to_flat_v32(<32 x i8 addrspace(1)*> %ptr) #0 {
|
||||
%cast = addrspacecast <32 x i8 addrspace(1)*> %ptr to <32 x i8 addrspace(4)*>
|
||||
ret <32 x i8 addrspace(4)*> %cast
|
||||
}
|
||||
|
||||
; CHECK: 'addrspacecast_local_to_flat'
|
||||
; CHECK: estimated cost of 1 for {{.*}} addrspacecast i8 addrspace(3)* %ptr to i8 addrspace(4)*
|
||||
define i8 addrspace(4)* @addrspacecast_local_to_flat(i8 addrspace(3)* %ptr) #0 {
|
||||
%cast = addrspacecast i8 addrspace(3)* %ptr to i8 addrspace(4)*
|
||||
ret i8 addrspace(4)* %cast
|
||||
}
|
||||
|
||||
; CHECK: 'addrspacecast_local_to_flat_v2'
|
||||
; CHECK: estimated cost of 2 for {{.*}} addrspacecast <2 x i8 addrspace(3)*> %ptr to <2 x i8 addrspace(4)*>
|
||||
define <2 x i8 addrspace(4)*> @addrspacecast_local_to_flat_v2(<2 x i8 addrspace(3)*> %ptr) #0 {
|
||||
%cast = addrspacecast <2 x i8 addrspace(3)*> %ptr to <2 x i8 addrspace(4)*>
|
||||
ret <2 x i8 addrspace(4)*> %cast
|
||||
}
|
||||
|
||||
; CHECK: 'addrspacecast_local_to_flat_v32'
|
||||
; CHECK: estimated cost of 32 for {{.*}} addrspacecast <32 x i8 addrspace(3)*> %ptr to <32 x i8 addrspace(4)*>
|
||||
define <32 x i8 addrspace(4)*> @addrspacecast_local_to_flat_v32(<32 x i8 addrspace(3)*> %ptr) #0 {
|
||||
%cast = addrspacecast <32 x i8 addrspace(3)*> %ptr to <32 x i8 addrspace(4)*>
|
||||
ret <32 x i8 addrspace(4)*> %cast
|
||||
}
|
||||
|
||||
attributes #0 = { nounwind readnone }
|
@ -1,59 +0,0 @@
|
||||
; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa < %s | FileCheck %s
|
||||
|
||||
; CHECK: 'or_i32'
|
||||
; CHECK: estimated cost of 1 for {{.*}} or i32
|
||||
define amdgpu_kernel void @or_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
|
||||
%vec = load i32, i32 addrspace(1)* %vaddr
|
||||
%or = or i32 %vec, %b
|
||||
store i32 %or, i32 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK: 'or_i64'
|
||||
; CHECK: estimated cost of 2 for {{.*}} or i64
|
||||
define amdgpu_kernel void @or_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 {
|
||||
%vec = load i64, i64 addrspace(1)* %vaddr
|
||||
%or = or i64 %vec, %b
|
||||
store i64 %or, i64 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK: 'xor_i32'
|
||||
; CHECK: estimated cost of 1 for {{.*}} xor i32
|
||||
define amdgpu_kernel void @xor_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
|
||||
%vec = load i32, i32 addrspace(1)* %vaddr
|
||||
%or = xor i32 %vec, %b
|
||||
store i32 %or, i32 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK: 'xor_i64'
|
||||
; CHECK: estimated cost of 2 for {{.*}} xor i64
|
||||
define amdgpu_kernel void @xor_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 {
|
||||
%vec = load i64, i64 addrspace(1)* %vaddr
|
||||
%or = xor i64 %vec, %b
|
||||
store i64 %or, i64 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
|
||||
; CHECK: 'and_i32'
|
||||
; CHECK: estimated cost of 1 for {{.*}} and i32
|
||||
define amdgpu_kernel void @and_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
|
||||
%vec = load i32, i32 addrspace(1)* %vaddr
|
||||
%or = and i32 %vec, %b
|
||||
store i32 %or, i32 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK: 'and_i64'
|
||||
; CHECK: estimated cost of 2 for {{.*}} and i64
|
||||
define amdgpu_kernel void @and_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 {
|
||||
%vec = load i64, i64 addrspace(1)* %vaddr
|
||||
%or = and i64 %vec, %b
|
||||
store i64 %or, i64 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
|
||||
attributes #0 = { nounwind }
|
@ -1,45 +0,0 @@
|
||||
; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa < %s | FileCheck %s
|
||||
|
||||
; CHECK: 'test_br_cost'
|
||||
; CHECK: estimated cost of 10 for instruction: br i1
|
||||
; CHECK: estimated cost of 10 for instruction: br label
|
||||
; CHECK: estimated cost of 10 for instruction: ret void
|
||||
define amdgpu_kernel void @test_br_cost(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
|
||||
bb0:
|
||||
br i1 undef, label %bb1, label %bb2
|
||||
|
||||
bb1:
|
||||
%vec = load i32, i32 addrspace(1)* %vaddr
|
||||
%add = add i32 %vec, %b
|
||||
store i32 %add, i32 addrspace(1)* %out
|
||||
br label %bb2
|
||||
|
||||
bb2:
|
||||
ret void
|
||||
|
||||
}
|
||||
|
||||
; CHECK: 'test_switch_cost'
|
||||
; CHECK: Unknown cost for instruction: switch
|
||||
define amdgpu_kernel void @test_switch_cost(i32 %a) #0 {
|
||||
entry:
|
||||
switch i32 %a, label %default [
|
||||
i32 0, label %case0
|
||||
i32 1, label %case1
|
||||
]
|
||||
|
||||
case0:
|
||||
store volatile i32 undef, i32 addrspace(1)* undef
|
||||
ret void
|
||||
|
||||
case1:
|
||||
store volatile i32 undef, i32 addrspace(1)* undef
|
||||
ret void
|
||||
|
||||
default:
|
||||
store volatile i32 undef, i32 addrspace(1)* undef
|
||||
ret void
|
||||
|
||||
end:
|
||||
ret void
|
||||
}
|
@ -1,132 +0,0 @@
|
||||
; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa %s | FileCheck -check-prefixes=GCN,CI %s
|
||||
; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=fiji %s | FileCheck -check-prefixes=GCN,VI %s
|
||||
; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx900 %s | FileCheck -check-prefixes=GCN,GFX9 %s
|
||||
|
||||
; GCN: 'extractelement_v2i32'
|
||||
; GCN: estimated cost of 0 for {{.*}} extractelement <2 x i32>
|
||||
define amdgpu_kernel void @extractelement_v2i32(i32 addrspace(1)* %out, <2 x i32> addrspace(1)* %vaddr) {
|
||||
%vec = load <2 x i32>, <2 x i32> addrspace(1)* %vaddr
|
||||
%elt = extractelement <2 x i32> %vec, i32 1
|
||||
store i32 %elt, i32 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; GCN: 'extractelement_v2f32'
|
||||
; GCN: estimated cost of 0 for {{.*}} extractelement <2 x float>
|
||||
define amdgpu_kernel void @extractelement_v2f32(float addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr) {
|
||||
%vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr
|
||||
%elt = extractelement <2 x float> %vec, i32 1
|
||||
store float %elt, float addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; GCN: 'extractelement_v3i32'
|
||||
; GCN: estimated cost of 0 for {{.*}} extractelement <3 x i32>
|
||||
define amdgpu_kernel void @extractelement_v3i32(i32 addrspace(1)* %out, <3 x i32> addrspace(1)* %vaddr) {
|
||||
%vec = load <3 x i32>, <3 x i32> addrspace(1)* %vaddr
|
||||
%elt = extractelement <3 x i32> %vec, i32 1
|
||||
store i32 %elt, i32 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; GCN: 'extractelement_v4i32'
|
||||
; GCN: estimated cost of 0 for {{.*}} extractelement <4 x i32>
|
||||
define amdgpu_kernel void @extractelement_v4i32(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %vaddr) {
|
||||
%vec = load <4 x i32>, <4 x i32> addrspace(1)* %vaddr
|
||||
%elt = extractelement <4 x i32> %vec, i32 1
|
||||
store i32 %elt, i32 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; GCN: 'extractelement_v8i32'
|
||||
; GCN: estimated cost of 0 for {{.*}} extractelement <8 x i32>
|
||||
define amdgpu_kernel void @extractelement_v8i32(i32 addrspace(1)* %out, <8 x i32> addrspace(1)* %vaddr) {
|
||||
%vec = load <8 x i32>, <8 x i32> addrspace(1)* %vaddr
|
||||
%elt = extractelement <8 x i32> %vec, i32 1
|
||||
store i32 %elt, i32 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; FIXME: Should be non-0
|
||||
; GCN: 'extractelement_v8i32_dynindex'
|
||||
; GCN: estimated cost of 2 for {{.*}} extractelement <8 x i32>
|
||||
define amdgpu_kernel void @extractelement_v8i32_dynindex(i32 addrspace(1)* %out, <8 x i32> addrspace(1)* %vaddr, i32 %idx) {
|
||||
%vec = load <8 x i32>, <8 x i32> addrspace(1)* %vaddr
|
||||
%elt = extractelement <8 x i32> %vec, i32 %idx
|
||||
store i32 %elt, i32 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; GCN: 'extractelement_v2i64'
|
||||
; GCN: estimated cost of 0 for {{.*}} extractelement <2 x i64>
|
||||
define amdgpu_kernel void @extractelement_v2i64(i64 addrspace(1)* %out, <2 x i64> addrspace(1)* %vaddr) {
|
||||
%vec = load <2 x i64>, <2 x i64> addrspace(1)* %vaddr
|
||||
%elt = extractelement <2 x i64> %vec, i64 1
|
||||
store i64 %elt, i64 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; GCN: 'extractelement_v3i64'
|
||||
; GCN: estimated cost of 0 for {{.*}} extractelement <3 x i64>
|
||||
define amdgpu_kernel void @extractelement_v3i64(i64 addrspace(1)* %out, <3 x i64> addrspace(1)* %vaddr) {
|
||||
%vec = load <3 x i64>, <3 x i64> addrspace(1)* %vaddr
|
||||
%elt = extractelement <3 x i64> %vec, i64 1
|
||||
store i64 %elt, i64 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; GCN: 'extractelement_v4i64'
|
||||
; GCN: estimated cost of 0 for {{.*}} extractelement <4 x i64>
|
||||
define amdgpu_kernel void @extractelement_v4i64(i64 addrspace(1)* %out, <4 x i64> addrspace(1)* %vaddr) {
|
||||
%vec = load <4 x i64>, <4 x i64> addrspace(1)* %vaddr
|
||||
%elt = extractelement <4 x i64> %vec, i64 1
|
||||
store i64 %elt, i64 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; GCN: 'extractelement_v8i64'
|
||||
; GCN: estimated cost of 0 for {{.*}} extractelement <8 x i64>
|
||||
define amdgpu_kernel void @extractelement_v8i64(i64 addrspace(1)* %out, <8 x i64> addrspace(1)* %vaddr) {
|
||||
%vec = load <8 x i64>, <8 x i64> addrspace(1)* %vaddr
|
||||
%elt = extractelement <8 x i64> %vec, i64 1
|
||||
store i64 %elt, i64 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; GCN: 'extractelement_v4i8'
|
||||
; GCN: estimated cost of 1 for {{.*}} extractelement <4 x i8>
|
||||
define amdgpu_kernel void @extractelement_v4i8(i8 addrspace(1)* %out, <4 x i8> addrspace(1)* %vaddr) {
|
||||
%vec = load <4 x i8>, <4 x i8> addrspace(1)* %vaddr
|
||||
%elt = extractelement <4 x i8> %vec, i8 1
|
||||
store i8 %elt, i8 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; GCN: 'extractelement_0_v2i16':
|
||||
; CI: estimated cost of 1 for {{.*}} extractelement <2 x i16> %vec, i16 0
|
||||
; VI: estimated cost of 0 for {{.*}} extractelement <2 x i16>
|
||||
; GFX9: estimated cost of 0 for {{.*}} extractelement <2 x i16>
|
||||
define amdgpu_kernel void @extractelement_0_v2i16(i16 addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr) {
|
||||
%vec = load <2 x i16>, <2 x i16> addrspace(1)* %vaddr
|
||||
%elt = extractelement <2 x i16> %vec, i16 0
|
||||
store i16 %elt, i16 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; GCN: 'extractelement_1_v2i16':
|
||||
; GCN: estimated cost of 1 for {{.*}} extractelement <2 x i16>
|
||||
define amdgpu_kernel void @extractelement_1_v2i16(i16 addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr) {
|
||||
%vec = load <2 x i16>, <2 x i16> addrspace(1)* %vaddr
|
||||
%elt = extractelement <2 x i16> %vec, i16 1
|
||||
store i16 %elt, i16 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; GCN: 'extractelement_var_v2i16'
|
||||
; GCN: estimated cost of 1 for {{.*}} extractelement <2 x i16>
|
||||
define amdgpu_kernel void @extractelement_var_v2i16(i16 addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr, i32 %idx) {
|
||||
%vec = load <2 x i16>, <2 x i16> addrspace(1)* %vaddr
|
||||
%elt = extractelement <2 x i16> %vec, i32 %idx
|
||||
store i16 %elt, i16 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
@ -1,97 +0,0 @@
|
||||
; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa < %s | FileCheck %s
|
||||
|
||||
; CHECK: 'fabs_f32'
|
||||
; CHECK: estimated cost of 0 for {{.*}} call float @llvm.fabs.f32
|
||||
define amdgpu_kernel void @fabs_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr) #0 {
|
||||
%vec = load float, float addrspace(1)* %vaddr
|
||||
%fabs = call float @llvm.fabs.f32(float %vec) #1
|
||||
store float %fabs, float addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK: 'fabs_v2f32'
|
||||
; CHECK: estimated cost of 0 for {{.*}} call <2 x float> @llvm.fabs.v2f32
|
||||
define amdgpu_kernel void @fabs_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr) #0 {
|
||||
%vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr
|
||||
%fabs = call <2 x float> @llvm.fabs.v2f32(<2 x float> %vec) #1
|
||||
store <2 x float> %fabs, <2 x float> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK: 'fabs_v3f32'
|
||||
; CHECK: estimated cost of 0 for {{.*}} call <3 x float> @llvm.fabs.v3f32
|
||||
define amdgpu_kernel void @fabs_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr) #0 {
|
||||
%vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr
|
||||
%fabs = call <3 x float> @llvm.fabs.v3f32(<3 x float> %vec) #1
|
||||
store <3 x float> %fabs, <3 x float> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK: 'fabs_f64'
|
||||
; CHECK: estimated cost of 0 for {{.*}} call double @llvm.fabs.f64
|
||||
define amdgpu_kernel void @fabs_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr) #0 {
|
||||
%vec = load double, double addrspace(1)* %vaddr
|
||||
%fabs = call double @llvm.fabs.f64(double %vec) #1
|
||||
store double %fabs, double addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK: 'fabs_v2f64'
|
||||
; CHECK: estimated cost of 0 for {{.*}} call <2 x double> @llvm.fabs.v2f64
|
||||
define amdgpu_kernel void @fabs_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr) #0 {
|
||||
%vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr
|
||||
%fabs = call <2 x double> @llvm.fabs.v2f64(<2 x double> %vec) #1
|
||||
store <2 x double> %fabs, <2 x double> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK: 'fabs_v3f64'
|
||||
; CHECK: estimated cost of 0 for {{.*}} call <3 x double> @llvm.fabs.v3f64
|
||||
define amdgpu_kernel void @fabs_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr) #0 {
|
||||
%vec = load <3 x double>, <3 x double> addrspace(1)* %vaddr
|
||||
%fabs = call <3 x double> @llvm.fabs.v3f64(<3 x double> %vec) #1
|
||||
store <3 x double> %fabs, <3 x double> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK: 'fabs_f16'
|
||||
; CHECK: estimated cost of 0 for {{.*}} call half @llvm.fabs.f16
|
||||
define amdgpu_kernel void @fabs_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr) #0 {
|
||||
%vec = load half, half addrspace(1)* %vaddr
|
||||
%fabs = call half @llvm.fabs.f16(half %vec) #1
|
||||
store half %fabs, half addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK: 'fabs_v2f16'
|
||||
; CHECK: estimated cost of 0 for {{.*}} call <2 x half> @llvm.fabs.v2f16
|
||||
define amdgpu_kernel void @fabs_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr) #0 {
|
||||
%vec = load <2 x half>, <2 x half> addrspace(1)* %vaddr
|
||||
%fabs = call <2 x half> @llvm.fabs.v2f16(<2 x half> %vec) #1
|
||||
store <2 x half> %fabs, <2 x half> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK: 'fabs_v3f16'
|
||||
; CHECK: estimated cost of 0 for {{.*}} call <3 x half> @llvm.fabs.v3f16
|
||||
define amdgpu_kernel void @fabs_v3f16(<3 x half> addrspace(1)* %out, <3 x half> addrspace(1)* %vaddr) #0 {
|
||||
%vec = load <3 x half>, <3 x half> addrspace(1)* %vaddr
|
||||
%fabs = call <3 x half> @llvm.fabs.v3f16(<3 x half> %vec) #1
|
||||
store <3 x half> %fabs, <3 x half> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
declare float @llvm.fabs.f32(float) #1
|
||||
declare <2 x float> @llvm.fabs.v2f32(<2 x float>) #1
|
||||
declare <3 x float> @llvm.fabs.v3f32(<3 x float>) #1
|
||||
|
||||
declare double @llvm.fabs.f64(double) #1
|
||||
declare <2 x double> @llvm.fabs.v2f64(<2 x double>) #1
|
||||
declare <3 x double> @llvm.fabs.v3f64(<3 x double>) #1
|
||||
|
||||
declare half @llvm.fabs.f16(half) #1
|
||||
declare <2 x half> @llvm.fabs.v2f16(<2 x half>) #1
|
||||
declare <3 x half> @llvm.fabs.v3f16(<3 x half>) #1
|
||||
|
||||
attributes #0 = { nounwind }
|
||||
attributes #1 = { nounwind readnone }
|
@ -1,88 +0,0 @@
|
||||
; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=+half-rate-64-ops < %s | FileCheck -check-prefix=FASTF64 -check-prefix=ALL %s
|
||||
; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=-half-rate-64-ops < %s | FileCheck -check-prefix=SLOWF64 -check-prefix=ALL %s
|
||||
|
||||
; ALL: 'fadd_f32'
|
||||
; ALL: estimated cost of 1 for {{.*}} fadd float
|
||||
define amdgpu_kernel void @fadd_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b) #0 {
|
||||
%vec = load float, float addrspace(1)* %vaddr
|
||||
%add = fadd float %vec, %b
|
||||
store float %add, float addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; ALL: 'fadd_v2f32'
|
||||
; ALL: estimated cost of 2 for {{.*}} fadd <2 x float>
|
||||
define amdgpu_kernel void @fadd_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x float> %b) #0 {
|
||||
%vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr
|
||||
%add = fadd <2 x float> %vec, %b
|
||||
store <2 x float> %add, <2 x float> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; ALL: 'fadd_v3f32'
|
||||
; ALL: estimated cost of 3 for {{.*}} fadd <3 x float>
|
||||
define amdgpu_kernel void @fadd_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x float> %b) #0 {
|
||||
%vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr
|
||||
%add = fadd <3 x float> %vec, %b
|
||||
store <3 x float> %add, <3 x float> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; ALL: 'fadd_f64'
|
||||
; FASTF64: estimated cost of 2 for {{.*}} fadd double
|
||||
; SLOWF64: estimated cost of 3 for {{.*}} fadd double
|
||||
define amdgpu_kernel void @fadd_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double %b) #0 {
|
||||
%vec = load double, double addrspace(1)* %vaddr
|
||||
%add = fadd double %vec, %b
|
||||
store double %add, double addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; ALL: 'fadd_v2f64'
|
||||
; FASTF64: estimated cost of 4 for {{.*}} fadd <2 x double>
|
||||
; SLOWF64: estimated cost of 6 for {{.*}} fadd <2 x double>
|
||||
define amdgpu_kernel void @fadd_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x double> %b) #0 {
|
||||
%vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr
|
||||
%add = fadd <2 x double> %vec, %b
|
||||
store <2 x double> %add, <2 x double> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; ALL: 'fadd_v3f64'
|
||||
; FASTF64: estimated cost of 6 for {{.*}} fadd <3 x double>
|
||||
; SLOWF64: estimated cost of 9 for {{.*}} fadd <3 x double>
|
||||
define amdgpu_kernel void @fadd_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr, <3 x double> %b) #0 {
|
||||
%vec = load <3 x double>, <3 x double> addrspace(1)* %vaddr
|
||||
%add = fadd <3 x double> %vec, %b
|
||||
store <3 x double> %add, <3 x double> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; ALL 'fadd_f16'
|
||||
; ALL estimated cost of 1 for {{.*}} fadd half
|
||||
define amdgpu_kernel void @fadd_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half %b) #0 {
|
||||
%vec = load half, half addrspace(1)* %vaddr
|
||||
%add = fadd half %vec, %b
|
||||
store half %add, half addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; ALL 'fadd_v2f16'
|
||||
; ALL estimated cost of 2 for {{.*}} fadd <2 x half>
|
||||
define amdgpu_kernel void @fadd_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr, <2 x half> %b) #0 {
|
||||
%vec = load <2 x half>, <2 x half> addrspace(1)* %vaddr
|
||||
%add = fadd <2 x half> %vec, %b
|
||||
store <2 x half> %add, <2 x half> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; ALL 'fadd_v4f16'
|
||||
; ALL estimated cost of 4 for {{.*}} fadd <4 x half>
|
||||
define amdgpu_kernel void @fadd_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %vaddr, <4 x half> %b) #0 {
|
||||
%vec = load <4 x half>, <4 x half> addrspace(1)* %vaddr
|
||||
%add = fadd <4 x half> %vec, %b
|
||||
store <4 x half> %add, <4 x half> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
attributes #0 = { nounwind }
|
163
external/llvm/test/Analysis/CostModel/AMDGPU/fdiv.ll
vendored
163
external/llvm/test/Analysis/CostModel/AMDGPU/fdiv.ll
vendored
@ -1,163 +0,0 @@
|
||||
; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=hawaii -mattr=+half-rate-64-ops < %s | FileCheck -check-prefixes=ALL,CIFASTF64,NOFP32DENORM,NOFP16,NOFP16-NOFP32DENORM %s
|
||||
; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=kaveri -mattr=-half-rate-64-ops < %s | FileCheck -check-prefixes=ALL,CISLOWF64,NOFP32DENORM,NOFP16,NOFP16-NOFP32DENORM %s
|
||||
; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=tahiti -mattr=+half-rate-64-ops < %s | FileCheck -check-prefixes=ALL,SIFASTF64,NOFP32DENORM,NOFP16,NOFP16-NOFP32DENORM %s
|
||||
; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=verde -mattr=-half-rate-64-ops < %s | FileCheck -check-prefixes=ALL,SISLOWF64,NOFP32DENORM,NOFP16,NOFP16-NOFP32DENORM %s
|
||||
; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=hawaii -mattr=+fp32-denormals < %s | FileCheck -check-prefixes=ALL,FP32DENORMS,SLOWFP32DENORMS,NOFP16,NOFP16-FP32DENORM %s
|
||||
; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx900 -mattr=+fp32-denormals < %s | FileCheck -check-prefixes=ALL,FP32DENORMS,FASTFP32DENORMS,FP16 %s
|
||||
|
||||
; ALL: 'fdiv_f32'
|
||||
; NOFP32DENORM: estimated cost of 12 for {{.*}} fdiv float
|
||||
; FP32DENORMS: estimated cost of 10 for {{.*}} fdiv float
|
||||
define amdgpu_kernel void @fdiv_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b) #0 {
|
||||
%vec = load float, float addrspace(1)* %vaddr
|
||||
%add = fdiv float %vec, %b
|
||||
store float %add, float addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; ALL: 'fdiv_v2f32'
|
||||
; NOFP32DENORM: estimated cost of 24 for {{.*}} fdiv <2 x float>
|
||||
; FP32DENORMS: estimated cost of 20 for {{.*}} fdiv <2 x float>
|
||||
define amdgpu_kernel void @fdiv_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x float> %b) #0 {
|
||||
%vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr
|
||||
%add = fdiv <2 x float> %vec, %b
|
||||
store <2 x float> %add, <2 x float> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; ALL: 'fdiv_v3f32'
|
||||
; NOFP32DENORM: estimated cost of 36 for {{.*}} fdiv <3 x float>
|
||||
; FP32DENORMS: estimated cost of 30 for {{.*}} fdiv <3 x float>
|
||||
define amdgpu_kernel void @fdiv_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x float> %b) #0 {
|
||||
%vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr
|
||||
%add = fdiv <3 x float> %vec, %b
|
||||
store <3 x float> %add, <3 x float> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; ALL: 'fdiv_f64'
|
||||
; CIFASTF64: estimated cost of 29 for {{.*}} fdiv double
|
||||
; CISLOWF64: estimated cost of 33 for {{.*}} fdiv double
|
||||
; SIFASTF64: estimated cost of 32 for {{.*}} fdiv double
|
||||
; SISLOWF64: estimated cost of 36 for {{.*}} fdiv double
|
||||
define amdgpu_kernel void @fdiv_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double %b) #0 {
|
||||
%vec = load double, double addrspace(1)* %vaddr
|
||||
%add = fdiv double %vec, %b
|
||||
store double %add, double addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; ALL: 'fdiv_v2f64'
|
||||
; CIFASTF64: estimated cost of 58 for {{.*}} fdiv <2 x double>
|
||||
; CISLOWF64: estimated cost of 66 for {{.*}} fdiv <2 x double>
|
||||
; SIFASTF64: estimated cost of 64 for {{.*}} fdiv <2 x double>
|
||||
; SISLOWF64: estimated cost of 72 for {{.*}} fdiv <2 x double>
|
||||
define amdgpu_kernel void @fdiv_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x double> %b) #0 {
|
||||
%vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr
|
||||
%add = fdiv <2 x double> %vec, %b
|
||||
store <2 x double> %add, <2 x double> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; ALL: 'fdiv_v3f64'
|
||||
; CIFASTF64: estimated cost of 87 for {{.*}} fdiv <3 x double>
|
||||
; CISLOWF64: estimated cost of 99 for {{.*}} fdiv <3 x double>
|
||||
; SIFASTF64: estimated cost of 96 for {{.*}} fdiv <3 x double>
|
||||
; SISLOWF64: estimated cost of 108 for {{.*}} fdiv <3 x double>
|
||||
define amdgpu_kernel void @fdiv_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr, <3 x double> %b) #0 {
|
||||
%vec = load <3 x double>, <3 x double> addrspace(1)* %vaddr
|
||||
%add = fdiv <3 x double> %vec, %b
|
||||
store <3 x double> %add, <3 x double> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; ALL: 'fdiv_f16'
|
||||
; NOFP16-NOFP32DENORM: estimated cost of 12 for {{.*}} fdiv half
|
||||
; NOFP16-FP32DENORM: estimated cost of 10 for {{.*}} fdiv half
|
||||
; FP16: estimated cost of 10 for {{.*}} fdiv half
|
||||
define amdgpu_kernel void @fdiv_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half %b) #0 {
|
||||
%vec = load half, half addrspace(1)* %vaddr
|
||||
%add = fdiv half %vec, %b
|
||||
store half %add, half addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; ALL: 'fdiv_v2f16'
|
||||
; NOFP16-NOFP32DENORM: estimated cost of 24 for {{.*}} fdiv <2 x half>
|
||||
; NOFP16-FP32DENORM: estimated cost of 20 for {{.*}} fdiv <2 x half>
|
||||
; FP16: estimated cost of 20 for {{.*}} fdiv <2 x half>
|
||||
define amdgpu_kernel void @fdiv_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr, <2 x half> %b) #0 {
|
||||
%vec = load <2 x half>, <2 x half> addrspace(1)* %vaddr
|
||||
%add = fdiv <2 x half> %vec, %b
|
||||
store <2 x half> %add, <2 x half> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; ALL: 'fdiv_v4f16'
|
||||
; NOFP16-NOFP32DENORM: estimated cost of 48 for {{.*}} fdiv <4 x half>
|
||||
; NOFP16-FP32DENORM: estimated cost of 40 for {{.*}} fdiv <4 x half>
|
||||
; FP16: estimated cost of 40 for {{.*}} fdiv <4 x half>
|
||||
define amdgpu_kernel void @fdiv_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %vaddr, <4 x half> %b) #0 {
|
||||
%vec = load <4 x half>, <4 x half> addrspace(1)* %vaddr
|
||||
%add = fdiv <4 x half> %vec, %b
|
||||
store <4 x half> %add, <4 x half> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; ALL: 'rcp_f32'
|
||||
; NOFP32DENORM: estimated cost of 3 for {{.*}} fdiv float
|
||||
; SLOWFP32DENORMS: estimated cost of 10 for {{.*}} fdiv float
|
||||
; FASTFP32DENORMS: estimated cost of 10 for {{.*}} fdiv float
|
||||
define amdgpu_kernel void @rcp_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr) #0 {
|
||||
%vec = load float, float addrspace(1)* %vaddr
|
||||
%add = fdiv float 1.0, %vec
|
||||
store float %add, float addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; ALL: 'rcp_f16'
|
||||
; NOFP16-NOFP32DENORM: estimated cost of 3 for {{.*}} fdiv half
|
||||
; NOFP16-FP32DENORM: estimated cost of 10 for {{.*}} fdiv half
|
||||
; FP16: estimated cost of 3 for {{.*}} fdiv half
|
||||
define amdgpu_kernel void @rcp_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr) #0 {
|
||||
%vec = load half, half addrspace(1)* %vaddr
|
||||
%add = fdiv half 1.0, %vec
|
||||
store half %add, half addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; ALL: 'rcp_f64'
|
||||
; CIFASTF64: estimated cost of 29 for {{.*}} fdiv double
|
||||
; CISLOWF64: estimated cost of 33 for {{.*}} fdiv double
|
||||
; SIFASTF64: estimated cost of 32 for {{.*}} fdiv double
|
||||
; SISLOWF64: estimated cost of 36 for {{.*}} fdiv double
|
||||
define amdgpu_kernel void @rcp_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr) #0 {
|
||||
%vec = load double, double addrspace(1)* %vaddr
|
||||
%add = fdiv double 1.0, %vec
|
||||
store double %add, double addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; ALL: 'rcp_v2f32'
|
||||
; NOFP32DENORM: estimated cost of 6 for {{.*}} fdiv <2 x float>
|
||||
; SLOWFP32DENORMS: estimated cost of 20 for {{.*}} fdiv <2 x float>
|
||||
; FASTFP32DENORMS: estimated cost of 20 for {{.*}} fdiv <2 x float>
|
||||
define amdgpu_kernel void @rcp_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr) #0 {
|
||||
%vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr
|
||||
%add = fdiv <2 x float> <float 1.0, float 1.0>, %vec
|
||||
store <2 x float> %add, <2 x float> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; ALL: 'rcp_v2f16'
|
||||
; NOFP16-NOFP32DENORM: estimated cost of 6 for {{.*}} fdiv <2 x half>
|
||||
; NOFP16-FP32DENORM: estimated cost of 20 for {{.*}} fdiv <2 x half>
|
||||
; FP16: estimated cost of 6 for {{.*}} fdiv <2 x half>
|
||||
define amdgpu_kernel void @rcp_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr) #0 {
|
||||
%vec = load <2 x half>, <2 x half> addrspace(1)* %vaddr
|
||||
%add = fdiv <2 x half> <half 1.0, half 1.0>, %vec
|
||||
store <2 x half> %add, <2 x half> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
attributes #0 = { nounwind }
|
@ -1,88 +0,0 @@
|
||||
; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=+half-rate-64-ops < %s | FileCheck -check-prefix=FASTF64 -check-prefix=ALL %s
|
||||
; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=-half-rate-64-ops < %s | FileCheck -check-prefix=SLOWF64 -check-prefix=ALL %s
|
||||
|
||||
; ALL: 'fmul_f32'
|
||||
; ALL: estimated cost of 1 for {{.*}} fmul float
|
||||
define amdgpu_kernel void @fmul_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b) #0 {
|
||||
%vec = load float, float addrspace(1)* %vaddr
|
||||
%add = fmul float %vec, %b
|
||||
store float %add, float addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; ALL: 'fmul_v2f32'
|
||||
; ALL: estimated cost of 2 for {{.*}} fmul <2 x float>
|
||||
define amdgpu_kernel void @fmul_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x float> %b) #0 {
|
||||
%vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr
|
||||
%add = fmul <2 x float> %vec, %b
|
||||
store <2 x float> %add, <2 x float> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; ALL: 'fmul_v3f32'
|
||||
; ALL: estimated cost of 3 for {{.*}} fmul <3 x float>
|
||||
define amdgpu_kernel void @fmul_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x float> %b) #0 {
|
||||
%vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr
|
||||
%add = fmul <3 x float> %vec, %b
|
||||
store <3 x float> %add, <3 x float> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; ALL: 'fmul_f64'
|
||||
; FASTF64: estimated cost of 2 for {{.*}} fmul double
|
||||
; SLOWF64: estimated cost of 3 for {{.*}} fmul double
|
||||
define amdgpu_kernel void @fmul_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double %b) #0 {
|
||||
%vec = load double, double addrspace(1)* %vaddr
|
||||
%add = fmul double %vec, %b
|
||||
store double %add, double addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; ALL: 'fmul_v2f64'
|
||||
; FASTF64: estimated cost of 4 for {{.*}} fmul <2 x double>
|
||||
; SLOWF64: estimated cost of 6 for {{.*}} fmul <2 x double>
|
||||
define amdgpu_kernel void @fmul_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x double> %b) #0 {
|
||||
%vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr
|
||||
%add = fmul <2 x double> %vec, %b
|
||||
store <2 x double> %add, <2 x double> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; ALL: 'fmul_v3f64'
|
||||
; FASTF64: estimated cost of 6 for {{.*}} fmul <3 x double>
|
||||
; SLOWF64: estimated cost of 9 for {{.*}} fmul <3 x double>
|
||||
define amdgpu_kernel void @fmul_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr, <3 x double> %b) #0 {
|
||||
%vec = load <3 x double>, <3 x double> addrspace(1)* %vaddr
|
||||
%add = fmul <3 x double> %vec, %b
|
||||
store <3 x double> %add, <3 x double> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; ALL 'fmul_f16'
|
||||
; ALL estimated cost of 1 for {{.*}} fmul half
|
||||
define amdgpu_kernel void @fmul_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half %b) #0 {
|
||||
%vec = load half, half addrspace(1)* %vaddr
|
||||
%add = fmul half %vec, %b
|
||||
store half %add, half addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; ALL 'fmul_v2f16'
|
||||
; ALL estimated cost of 2 for {{.*}} fmul <2 x half>
|
||||
define amdgpu_kernel void @fmul_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr, <2 x half> %b) #0 {
|
||||
%vec = load <2 x half>, <2 x half> addrspace(1)* %vaddr
|
||||
%add = fmul <2 x half> %vec, %b
|
||||
store <2 x half> %add, <2 x half> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; ALL 'fmul_v4f16'
|
||||
; ALL estimated cost of 4 for {{.*}} fmul <4 x half>
|
||||
define amdgpu_kernel void @fmul_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %vaddr, <4 x half> %b) #0 {
|
||||
%vec = load <4 x half>, <4 x half> addrspace(1)* %vaddr
|
||||
%add = fmul <4 x half> %vec, %b
|
||||
store <4 x half> %add, <4 x half> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
attributes #0 = { nounwind }
|
@ -1,86 +0,0 @@
|
||||
; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=+half-rate-64-ops < %s | FileCheck -check-prefix=FASTF64 -check-prefix=ALL %s
|
||||
; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=-half-rate-64-ops < %s | FileCheck -check-prefix=SLOWF64 -check-prefix=ALL %s
|
||||
|
||||
; ALL: 'fsub_f32'
|
||||
; ALL: estimated cost of 1 for {{.*}} fsub float
|
||||
define amdgpu_kernel void @fsub_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b) #0 {
|
||||
%vec = load float, float addrspace(1)* %vaddr
|
||||
%add = fsub float %vec, %b
|
||||
store float %add, float addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; ALL: 'fsub_v2f32'
|
||||
; ALL: estimated cost of 2 for {{.*}} fsub <2 x float>
|
||||
define amdgpu_kernel void @fsub_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x float> %b) #0 {
|
||||
%vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr
|
||||
%add = fsub <2 x float> %vec, %b
|
||||
store <2 x float> %add, <2 x float> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; ALL: 'fsub_v3f32'
|
||||
; ALL: estimated cost of 3 for {{.*}} fsub <3 x float>
|
||||
define amdgpu_kernel void @fsub_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x float> %b) #0 {
|
||||
%vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr
|
||||
%add = fsub <3 x float> %vec, %b
|
||||
store <3 x float> %add, <3 x float> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; ALL: 'fsub_f64'
|
||||
; FASTF64: estimated cost of 2 for {{.*}} fsub double
|
||||
; SLOWF64: estimated cost of 3 for {{.*}} fsub double
|
||||
define amdgpu_kernel void @fsub_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double %b) #0 {
|
||||
%vec = load double, double addrspace(1)* %vaddr
|
||||
%add = fsub double %vec, %b
|
||||
store double %add, double addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; ALL: 'fsub_v2f64'
|
||||
; FASTF64: estimated cost of 4 for {{.*}} fsub <2 x double>
|
||||
; SLOWF64: estimated cost of 6 for {{.*}} fsub <2 x double>
|
||||
define amdgpu_kernel void @fsub_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x double> %b) #0 {
|
||||
%vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr
|
||||
%add = fsub <2 x double> %vec, %b
|
||||
store <2 x double> %add, <2 x double> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; ALL: 'fsub_v3f64'
|
||||
; FASTF64: estimated cost of 6 for {{.*}} fsub <3 x double>
|
||||
; SLOWF64: estimated cost of 9 for {{.*}} fsub <3 x double>
|
||||
define amdgpu_kernel void @fsub_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr, <3 x double> %b) #0 {
|
||||
%vec = load <3 x double>, <3 x double> addrspace(1)* %vaddr
|
||||
%add = fsub <3 x double> %vec, %b
|
||||
store <3 x double> %add, <3 x double> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; ALL: 'fsub_f16'
|
||||
; ALL: estimated cost of 1 for {{.*}} fsub half
|
||||
define amdgpu_kernel void @fsub_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half %b) #0 {
|
||||
%vec = load half, half addrspace(1)* %vaddr
|
||||
%add = fsub half %vec, %b
|
||||
store half %add, half addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; ALL: 'fsub_v2f16'
|
||||
; ALL: estimated cost of 2 for {{.*}} fsub <2 x half>
|
||||
define amdgpu_kernel void @fsub_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr, <2 x half> %b) #0 {
|
||||
%vec = load <2 x half>, <2 x half> addrspace(1)* %vaddr
|
||||
%add = fsub <2 x half> %vec, %b
|
||||
store <2 x half> %add, <2 x half> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; ALL: 'fsub_v4f16'
|
||||
; ALL: estimated cost of 4 for {{.*}} fsub <4 x half>
|
||||
define amdgpu_kernel void @fsub_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %vaddr, <4 x half> %b) #0 {
|
||||
%vec = load <4 x half>, <4 x half> addrspace(1)* %vaddr
|
||||
%add = fsub <4 x half> %vec, %b
|
||||
store <4 x half> %add, <4 x half> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
@ -1,50 +0,0 @@
|
||||
; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa %s | FileCheck -check-prefixes=GCN,CI %s
|
||||
; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=fiji %s | FileCheck -check-prefixes=GCN,VI %s
|
||||
; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx900 %s | FileCheck -check-prefixes=GCN,GFX9 %s
|
||||
|
||||
; GCN-LABEL: 'insertelement_v2i32'
|
||||
; GCN: estimated cost of 0 for {{.*}} insertelement <2 x i32>
|
||||
define amdgpu_kernel void @insertelement_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %vaddr) {
|
||||
%vec = load <2 x i32>, <2 x i32> addrspace(1)* %vaddr
|
||||
%insert = insertelement <2 x i32> %vec, i32 123, i32 1
|
||||
store <2 x i32> %insert, <2 x i32> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; GCN-LABEL: 'insertelement_v2i64'
|
||||
; GCN: estimated cost of 0 for {{.*}} insertelement <2 x i64>
|
||||
define amdgpu_kernel void @insertelement_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %vaddr) {
|
||||
%vec = load <2 x i64>, <2 x i64> addrspace(1)* %vaddr
|
||||
%insert = insertelement <2 x i64> %vec, i64 123, i64 1
|
||||
store <2 x i64> %insert, <2 x i64> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; GCN-LABEL: 'insertelement_0_v2i16'
|
||||
; CI: estimated cost of 1 for {{.*}} insertelement <2 x i16>
|
||||
; VI: estimated cost of 0 for {{.*}} insertelement <2 x i16>
|
||||
; GFX9: estimated cost of 0 for {{.*}} insertelement <2 x i16>
|
||||
define amdgpu_kernel void @insertelement_0_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr) {
|
||||
%vec = load <2 x i16>, <2 x i16> addrspace(1)* %vaddr
|
||||
%insert = insertelement <2 x i16> %vec, i16 123, i16 0
|
||||
store <2 x i16> %insert, <2 x i16> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; GCN-LABEL: 'insertelement_1_v2i16'
|
||||
; GCN: estimated cost of 1 for {{.*}} insertelement <2 x i16>
|
||||
define amdgpu_kernel void @insertelement_1_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr) {
|
||||
%vec = load <2 x i16>, <2 x i16> addrspace(1)* %vaddr
|
||||
%insert = insertelement <2 x i16> %vec, i16 123, i16 1
|
||||
store <2 x i16> %insert, <2 x i16> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; GCN-LABEL: 'insertelement_1_v2i8'
|
||||
; GCN: estimated cost of 1 for {{.*}} insertelement <2 x i8>
|
||||
define amdgpu_kernel void @insertelement_1_v2i8(<2 x i8> addrspace(1)* %out, <2 x i8> addrspace(1)* %vaddr) {
|
||||
%vec = load <2 x i8>, <2 x i8> addrspace(1)* %vaddr
|
||||
%insert = insertelement <2 x i8> %vec, i8 123, i8 1
|
||||
store <2 x i8> %insert, <2 x i8> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
@ -1,2 +0,0 @@
|
||||
if not 'AMDGPU' in config.root.targets:
|
||||
config.unsupported = True
|
@ -1,85 +0,0 @@
|
||||
; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa < %s | FileCheck %s
|
||||
|
||||
; CHECK: 'mul_i32'
|
||||
; CHECK: estimated cost of 3 for {{.*}} mul i32
|
||||
define amdgpu_kernel void @mul_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
|
||||
%vec = load i32, i32 addrspace(1)* %vaddr
|
||||
%mul = mul i32 %vec, %b
|
||||
store i32 %mul, i32 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK: 'mul_v2i32'
|
||||
; CHECK: estimated cost of 6 for {{.*}} mul <2 x i32>
|
||||
define amdgpu_kernel void @mul_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %vaddr, <2 x i32> %b) #0 {
|
||||
%vec = load <2 x i32>, <2 x i32> addrspace(1)* %vaddr
|
||||
%mul = mul <2 x i32> %vec, %b
|
||||
store <2 x i32> %mul, <2 x i32> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK: 'mul_v3i32'
|
||||
; CHECK: estimated cost of 9 for {{.*}} mul <3 x i32>
|
||||
define amdgpu_kernel void @mul_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> addrspace(1)* %vaddr, <3 x i32> %b) #0 {
|
||||
%vec = load <3 x i32>, <3 x i32> addrspace(1)* %vaddr
|
||||
%mul = mul <3 x i32> %vec, %b
|
||||
store <3 x i32> %mul, <3 x i32> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK: 'mul_v4i32'
|
||||
; CHECK: estimated cost of 12 for {{.*}} mul <4 x i32>
|
||||
define amdgpu_kernel void @mul_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %vaddr, <4 x i32> %b) #0 {
|
||||
%vec = load <4 x i32>, <4 x i32> addrspace(1)* %vaddr
|
||||
%mul = mul <4 x i32> %vec, %b
|
||||
store <4 x i32> %mul, <4 x i32> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK: 'mul_i64'
|
||||
; CHECK: estimated cost of 16 for {{.*}} mul i64
|
||||
define amdgpu_kernel void @mul_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 {
|
||||
%vec = load i64, i64 addrspace(1)* %vaddr
|
||||
%mul = mul i64 %vec, %b
|
||||
store i64 %mul, i64 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK: 'mul_v2i64'
|
||||
; CHECK: estimated cost of 32 for {{.*}} mul <2 x i64>
|
||||
define amdgpu_kernel void @mul_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %vaddr, <2 x i64> %b) #0 {
|
||||
%vec = load <2 x i64>, <2 x i64> addrspace(1)* %vaddr
|
||||
%mul = mul <2 x i64> %vec, %b
|
||||
store <2 x i64> %mul, <2 x i64> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK: 'mul_v3i64'
|
||||
; CHECK: estimated cost of 48 for {{.*}} mul <3 x i64>
|
||||
define amdgpu_kernel void @mul_v3i64(<3 x i64> addrspace(1)* %out, <3 x i64> addrspace(1)* %vaddr, <3 x i64> %b) #0 {
|
||||
%vec = load <3 x i64>, <3 x i64> addrspace(1)* %vaddr
|
||||
%mul = mul <3 x i64> %vec, %b
|
||||
store <3 x i64> %mul, <3 x i64> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK: 'mul_v4i64'
|
||||
; CHECK: estimated cost of 64 for {{.*}} mul <4 x i64>
|
||||
define amdgpu_kernel void @mul_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %vaddr, <4 x i64> %b) #0 {
|
||||
%vec = load <4 x i64>, <4 x i64> addrspace(1)* %vaddr
|
||||
%mul = mul <4 x i64> %vec, %b
|
||||
store <4 x i64> %mul, <4 x i64> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
|
||||
; CHECK: 'mul_v8i64'
|
||||
; CHECK: estimated cost of 128 for {{.*}} mul <8 x i64>
|
||||
define amdgpu_kernel void @mul_v8i64(<8 x i64> addrspace(1)* %out, <8 x i64> addrspace(1)* %vaddr, <8 x i64> %b) #0 {
|
||||
%vec = load <8 x i64>, <8 x i64> addrspace(1)* %vaddr
|
||||
%mul = mul <8 x i64> %vec, %b
|
||||
store <8 x i64> %mul, <8 x i64> addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
attributes #0 = { nounwind }
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user