Imported Upstream version 6.10.0.49

Former-commit-id: 1d6753294b2993e1fbf92de9366bb9544db4189b
This commit is contained in:
Xamarin Public Jenkins (auto-signing)
2020-01-16 16:38:04 +00:00
parent d94e79959b
commit 468663ddbb
48518 changed files with 2789335 additions and 61176 deletions

View File

@@ -0,0 +1,197 @@
; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
; RUN: llc -march=hexagon -O0 < %s | FileCheck -check-prefix=CHECK-CALL %s
; Hexagon Programmer's Reference Manual 11.1.1 ALU32/ALU
; CHECK-CALL-NOT: call
; Add
declare i32 @llvm.hexagon.A2.addi(i32, i32)
define i32 @A2_addi(i32 %a) {
%z = call i32 @llvm.hexagon.A2.addi(i32 %a, i32 0)
ret i32 %z
}
; CHECK: = add({{.*}},#0)
declare i32 @llvm.hexagon.A2.add(i32, i32)
define i32 @A2_add(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.add(i32 %a, i32 %b)
ret i32 %z
}
; CHECK: = add({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.addsat(i32, i32)
define i32 @A2_addsat(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.addsat(i32 %a, i32 %b)
ret i32 %z
}
; CHECK: = add({{.*}},{{.*}}):sat
; Logical operations
declare i32 @llvm.hexagon.A2.and(i32, i32)
define i32 @A2_and(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.and(i32 %a, i32 %b)
ret i32 %z
}
; CHECK: = and({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.or(i32, i32)
define i32 @A2_or(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.or(i32 %a, i32 %b)
ret i32 %z
}
; CHECK: = or({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.xor(i32, i32)
define i32 @A2_xor(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.xor(i32 %a, i32 %b)
ret i32 %z
}
; CHECK: = xor({{.*}},{{.*}})
declare i32 @llvm.hexagon.A4.andn(i32, i32)
define i32 @A4_andn(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A4.andn(i32 %a, i32 %b)
ret i32 %z
}
; CHECK: = and({{.*}},~{{.*}})
declare i32 @llvm.hexagon.A4.orn(i32, i32)
define i32 @A4_orn(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A4.orn(i32 %a, i32 %b)
ret i32 %z
}
; CHECK: = or({{.*}},~{{.*}})
; Subtract
declare i32 @llvm.hexagon.A2.sub(i32, i32)
define i32 @A2_sub(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.sub(i32 %a, i32 %b)
ret i32 %z
}
; CHECK: = sub({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.subsat(i32, i32)
define i32 @A2_subsat(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.subsat(i32 %a, i32 %b)
ret i32 %z
}
; CHECK: = sub({{.*}},{{.*}}):sat
; Sign extend
declare i32 @llvm.hexagon.A2.sxtb(i32)
define i32 @A2_sxtb(i32 %a) {
%z = call i32 @llvm.hexagon.A2.sxtb(i32 %a)
ret i32 %z
}
; CHECK: = sxtb({{.*}})
declare i32 @llvm.hexagon.A2.sxth(i32)
define i32 @A2_sxth(i32 %a) {
%z = call i32 @llvm.hexagon.A2.sxth(i32 %a)
ret i32 %z
}
; CHECK: = sxth({{.*}})
; Transfer immediate
declare i32 @llvm.hexagon.A2.tfril(i32, i32)
define i32 @A2_tfril(i32 %a) {
%z = call i32 @llvm.hexagon.A2.tfril(i32 %a, i32 0)
ret i32 %z
}
; CHECK: = #0
declare i32 @llvm.hexagon.A2.tfrih(i32, i32)
define i32 @A2_tfrih(i32 %a) {
%z = call i32 @llvm.hexagon.A2.tfrih(i32 %a, i32 0)
ret i32 %z
}
; CHECK: = #0
declare i32 @llvm.hexagon.A2.tfrsi(i32)
define i32 @A2_tfrsi() {
%z = call i32 @llvm.hexagon.A2.tfrsi(i32 0)
ret i32 %z
}
; CHECK: = #0
; Transfer register
declare i32 @llvm.hexagon.A2.tfr(i32)
define i32 @A2_tfr(i32 %a) {
%z = call i32 @llvm.hexagon.A2.tfr(i32 %a)
ret i32 %z
}
; CHECK: =
; Vector add halfwords
declare i32 @llvm.hexagon.A2.svaddh(i32, i32)
define i32 @A2_svaddh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.svaddh(i32 %a, i32 %b)
ret i32 %z
}
; CHECK: = vaddh({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.svaddhs(i32, i32)
define i32 @A2_svaddhs(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.svaddhs(i32 %a, i32 %b)
ret i32 %z
}
; CHECK: = vaddh({{.*}},{{.*}}):sat
declare i32 @llvm.hexagon.A2.svadduhs(i32, i32)
define i32 @A2_svadduhs(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.svadduhs(i32 %a, i32 %b)
ret i32 %z
}
; CHECK: = vadduh({{.*}},{{.*}}):sat
; Vector average halfwords
declare i32 @llvm.hexagon.A2.svavgh(i32, i32)
define i32 @A2_svavgh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.svavgh(i32 %a, i32 %b)
ret i32 %z
}
; CHECK: = vavgh({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.svavghs(i32, i32)
define i32 @A2_svavghs(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.svavghs(i32 %a, i32 %b)
ret i32 %z
}
; CHECK: = vavgh({{.*}},{{.*}}):rnd
declare i32 @llvm.hexagon.A2.svnavgh(i32, i32)
define i32 @A2_svnavgh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.svnavgh(i32 %a, i32 %b)
ret i32 %z
}
; CHECK: = vnavgh({{.*}},{{.*}})
; Vector subtract halfwords
declare i32 @llvm.hexagon.A2.svsubh(i32, i32)
define i32 @A2_svsubh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.svsubh(i32 %a, i32 %b)
ret i32 %z
}
; CHECK: = vsubh({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.svsubhs(i32, i32)
define i32 @A2_svsubhs(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.svsubhs(i32 %a, i32 %b)
ret i32 %z
}
; CHECK: = vsubh({{.*}},{{.*}}):sat
declare i32 @llvm.hexagon.A2.svsubuhs(i32, i32)
define i32 @A2_svsubuhs(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.svsubuhs(i32 %a, i32 %b)
ret i32 %z
}
; CHECK: = vsubuh({{.*}},{{.*}}):sat
; Zero extend
declare i32 @llvm.hexagon.A2.zxth(i32)
define i32 @A2_zxth(i32 %a) {
%z = call i32 @llvm.hexagon.A2.zxth(i32 %a)
ret i32 %z
}
; CHECK: = zxth({{.*}})

View File

@@ -0,0 +1,107 @@
; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
; RUN: llc -march=hexagon -O0 < %s | FileCheck -check-prefix=CHECK-CALL %s
; Hexagon Programmer's Reference Manual 11.1.2 ALU32/PERM
; CHECK-CALL-NOT: call
; Combine words into doubleword
declare i64 @llvm.hexagon.A4.combineri(i32, i32)
define i64 @A4_combineri(i32 %a) {
%z = call i64 @llvm.hexagon.A4.combineri(i32 %a, i32 0)
ret i64 %z
}
; CHECK: = combine({{.*}},#0)
declare i64 @llvm.hexagon.A4.combineir(i32, i32)
define i64 @A4_combineir(i32 %a) {
%z = call i64 @llvm.hexagon.A4.combineir(i32 0, i32 %a)
ret i64 %z
}
; CHECK: = combine(#0,{{.*}})
declare i64 @llvm.hexagon.A2.combineii(i32, i32)
define i64 @A2_combineii() {
%z = call i64 @llvm.hexagon.A2.combineii(i32 0, i32 0)
ret i64 %z
}
; CHECK: = combine(#0,#0)
declare i32 @llvm.hexagon.A2.combine.hh(i32, i32)
define i32 @A2_combine_hh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.combine.hh(i32 %a, i32 %b)
ret i32 %z
}
; CHECK: = combine({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.combine.hl(i32, i32)
define i32 @A2_combine_hl(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.combine.hl(i32 %a, i32 %b)
ret i32 %z
}
; CHECK: = combine({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.combine.lh(i32, i32)
define i32 @A2_combine_lh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.combine.lh(i32 %a, i32 %b)
ret i32 %z
}
; CHECK: = combine({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.combine.ll(i32, i32)
define i32 @A2_combine_ll(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.combine.ll(i32 %a, i32 %b)
ret i32 %z
}
; CHECK: = combine({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.combinew(i32, i32)
define i64 @A2_combinew(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.A2.combinew(i32 %a, i32 %b)
ret i64 %z
}
; CHECK: = combine({{.*}},{{.*}})
; Mux
declare i32 @llvm.hexagon.C2.muxri(i32, i32, i32)
define i32 @C2_muxri(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.C2.muxri(i32 %a, i32 0, i32 %b)
ret i32 %z
}
; CHECK: = mux({{.*}},#0,{{.*}})
declare i32 @llvm.hexagon.C2.muxir(i32, i32, i32)
define i32 @C2_muxir(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.C2.muxir(i32 %a, i32 %b, i32 0)
ret i32 %z
}
; CHECK: = mux({{.*}},{{.*}},#0)
declare i32 @llvm.hexagon.C2.mux(i32, i32, i32)
define i32 @C2_mux(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.C2.mux(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
; CHECK: = mux({{.*}},{{.*}},{{.*}})
; Shift word by 16
declare i32 @llvm.hexagon.A2.aslh(i32)
define i32 @A2_aslh(i32 %a) {
%z = call i32 @llvm.hexagon.A2.aslh(i32 %a)
ret i32 %z
}
; CHECK: = aslh({{.*}})
declare i32 @llvm.hexagon.A2.asrh(i32)
define i32 @A2_asrh(i32 %a) {
%z = call i32 @llvm.hexagon.A2.asrh(i32 %a)
ret i32 %z
}
; CHECK: = asrh({{.*}})
; Pack high and low halfwords
declare i64 @llvm.hexagon.S2.packhl(i32, i32)
define i64 @S2_packhl(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.S2.packhl(i32 %a, i32 %b)
ret i64 %z
}
; CHECK: = packhl({{.*}},{{.*}})

View File

@@ -0,0 +1,68 @@
; RUN: sed -e "s/ORDER/unordered/" %s | llc -march=hexagon | FileCheck %s
; RUN: sed -e "s/ORDER/monotonic/" %s | llc -march=hexagon | FileCheck %s
; RUN: sed -e "s/ORDER/release/" %s | llc -march=hexagon | FileCheck %s
; RUN: sed -e "s/ORDER/seq_cst/" %s | llc -march=hexagon | FileCheck %s
%struct.Obj = type { [100 x i32] }
@i8Src = global i8 0, align 1
@i8Dest = global i8 0, align 1
@i16Src = global i16 0, align 2
@i16Dest = global i16 0, align 2
@i32Src = global i32 0, align 4
@i32Dest = global i32 0, align 4
@i64Src = global i64 0, align 8
@i64Dest = global i64 0, align 8
@ptrSrc = global %struct.Obj* null, align 4
@ptrDest = global %struct.Obj* null, align 4
define void @store_i8() #0 {
entry:
%i8Tmp = load i8, i8* @i8Src, align 1
store atomic i8 %i8Tmp, i8* @i8Dest ORDER, align 1
ret void
}
; CHECK-LABEL: store_i8:
; CHECK: [[TMP_REG:r[0-9]+]] = memub(gp+#i8Src)
; CHECK: memb(gp+#i8Dest) = [[TMP_REG]]
define void @store_i16() #0 {
entry:
%i16Tmp = load i16, i16* @i16Src, align 2
store atomic i16 %i16Tmp, i16* @i16Dest ORDER, align 2
ret void
}
; CHECK-LABEL: store_i16:
; CHECK: [[TMP_REG:r[0-9]+]] = memuh(gp+#i16Src)
; CHECK: memh(gp+#i16Dest) = [[TMP_REG]]
define void @store_i32() #0 {
entry:
%i32Tmp = load i32, i32* @i32Src, align 4
store atomic i32 %i32Tmp, i32* @i32Dest ORDER, align 4
ret void
}
; CHECK-LABEL: store_i32:
; CHECK: [[TMP_REG:r[0-9]+]] = memw(gp+#i32Src)
; CHECK: memw(gp+#i32Dest) = [[TMP_REG]]
define void @store_i64() #0 {
entry:
%i64Tmp = load i64, i64* @i64Src, align 8
store atomic i64 %i64Tmp, i64* @i64Dest ORDER, align 8
ret void
}
; CHECK-LABEL: store_i64:
; CHECK: [[TMP_REG:r[0-9]+:[0-9]+]] = memd(gp+#i64Src)
; CHECK: memd(gp+#i64Dest) = [[TMP_REG]]
define void @store_ptr() #0 {
entry:
%ptrTmp = load i32, i32* bitcast (%struct.Obj** @ptrSrc to i32*), align 4
store atomic i32 %ptrTmp, i32* bitcast (%struct.Obj** @ptrDest to i32*) ORDER, align 4
ret void
}
; CHECK-LABEL: store_ptr:
; CHECK: [[TMP_REG:r[0-9]+]] = memw(gp+#ptrSrc)
; CHECK: memw(gp+#ptrDest) = [[TMP_REG]]

View File

@@ -0,0 +1,41 @@
; RUN: llc -mattr=+hvxv60,hvx-length128b -march=hexagon -O2 < %s | FileCheck %s
; CHECK-LABEL: V6_vmaskedstoreq_128B
; CHECK: if (q{{[0-3]+}}) vmem(r{{[0-9]+}}+#0) = v{{[0-9]+}}
; CHECK-LABEL: V6_vmaskedstorenq_128B
; CHECK: if (!q{{[0-3]+}}) vmem(r{{[0-9]+}}+#0) = v{{[0-9]+}}
; CHECK-LABEL: V6_vmaskedstorentq_128B
; CHECK: if (q{{[0-3]+}}) vmem(r{{[0-9]+}}+#0):nt = v{{[0-9]+}}
; CHECK-LABEL: V6_vmaskedstorentnq_128B
; CHECK: if (!q{{[0-3]+}}) vmem(r{{[0-9]+}}+#0):nt = v{{[0-9]+}}
declare void @llvm.hexagon.V6.vmaskedstoreq.128B(<1024 x i1>, i8*, <32 x i32>)
define void @V6_vmaskedstoreq_128B( <32 x i32> %a, i8* %b, <32 x i32> %c) {
%1 = bitcast <32 x i32> %a to <1024 x i1>
call void @llvm.hexagon.V6.vmaskedstoreq.128B(<1024 x i1> %1, i8* %b, <32 x i32> %c)
ret void
}
declare void @llvm.hexagon.V6.vmaskedstorenq.128B(<1024 x i1>, i8*, <32 x i32>)
define void @V6_vmaskedstorenq_128B( <32 x i32> %a, i8* %b, <32 x i32> %c) {
%1 = bitcast <32 x i32> %a to <1024 x i1>
call void @llvm.hexagon.V6.vmaskedstorenq.128B(<1024 x i1> %1, i8* %b, <32 x i32> %c)
ret void
}
declare void @llvm.hexagon.V6.vmaskedstorentq.128B(<1024 x i1>, i8*, <32 x i32>)
define void @V6_vmaskedstorentq_128B( <32 x i32> %a, i8* %b, <32 x i32> %c) {
%1 = bitcast <32 x i32> %a to <1024 x i1>
call void @llvm.hexagon.V6.vmaskedstorentq.128B(<1024 x i1> %1, i8* %b, <32 x i32> %c)
ret void
}
declare void @llvm.hexagon.V6.vmaskedstorentnq.128B(<1024 x i1>, i8*, <32 x i32>)
define void @V6_vmaskedstorentnq_128B( <32 x i32> %a, i8* %b, <32 x i32> %c) {
%1 = bitcast <32 x i32> %a to <1024 x i1>
call void @llvm.hexagon.V6.vmaskedstorentnq.128B(<1024 x i1> %1, i8* %b, <32 x i32> %c)
ret void
}

View File

@@ -0,0 +1,41 @@
; RUN: llc -mattr=+hvxv60,hvx-length64b -march=hexagon -O2 < %s | FileCheck %s
; CHECK-LABEL: V6_vmaskedstoreq
; CHECK: if (q{{[0-3]+}}) vmem(r{{[0-9]+}}+#0) = v{{[0-9]+}}
; CHECK-LABEL: V6_vmaskedstorenq
; CHECK: if (!q{{[0-3]+}}) vmem(r{{[0-9]+}}+#0) = v{{[0-9]+}}
; CHECK-LABEL: V6_vmaskedstorentq
; CHECK: if (q{{[0-3]+}}) vmem(r{{[0-9]+}}+#0):nt = v{{[0-9]+}}
; CHECK-LABEL: V6_vmaskedstorentnq
; CHECK: if (!q{{[0-3]+}}) vmem(r{{[0-9]+}}+#0):nt = v{{[0-9]+}}
declare void @llvm.hexagon.V6.vmaskedstoreq(<512 x i1>, i8*, <16 x i32>)
define void @V6_vmaskedstoreq( <16 x i32> %a, i8* %b, <16 x i32> %c) {
%1 = bitcast <16 x i32> %a to <512 x i1>
call void @llvm.hexagon.V6.vmaskedstoreq(<512 x i1> %1, i8* %b, <16 x i32> %c)
ret void
}
declare void @llvm.hexagon.V6.vmaskedstorenq(<512 x i1>, i8*, <16 x i32>)
define void @V6_vmaskedstorenq( <16 x i32> %a, i8* %b, <16 x i32> %c) {
%1 = bitcast <16 x i32> %a to <512 x i1>
call void @llvm.hexagon.V6.vmaskedstorenq(<512 x i1> %1, i8* %b, <16 x i32> %c)
ret void
}
declare void @llvm.hexagon.V6.vmaskedstorentq(<512 x i1>, i8*, <16 x i32>)
define void @V6_vmaskedstorentq( <16 x i32> %a, i8* %b, <16 x i32> %c) {
%1 = bitcast <16 x i32> %a to <512 x i1>
call void @llvm.hexagon.V6.vmaskedstorentq(<512 x i1> %1, i8* %b, <16 x i32> %c)
ret void
}
declare void @llvm.hexagon.V6.vmaskedstorentnq(<512 x i1>, i8*, <16 x i32>)
define void @V6_vmaskedstorentnq( <16 x i32> %a, i8* %b, <16 x i32> %c) {
%1 = bitcast <16 x i32> %a to <512 x i1>
call void @llvm.hexagon.V6.vmaskedstorentnq(<512 x i1> %1, i8* %b, <16 x i32> %c)
ret void
}

View File

@@ -0,0 +1,135 @@
; RUN: llc -march=hexagon < %s | FileCheck %s
; RUN: llc -march=hexagon -O0 < %s | FileCheck -check-prefix=CHECK-CALL %s
; Hexagon Programmer's Reference Manual 11.2 CR
; CHECK-CALL-NOT: call
; Corner detection acceleration
declare i32 @llvm.hexagon.C4.fastcorner9(i32, i32)
define i32 @C4_fastcorner9(i32 %a, i32 %b) {
%z = call i32@llvm.hexagon.C4.fastcorner9(i32 %a, i32 %b)
ret i32 %z
}
; CHECK: = fastcorner9({{.*}},{{.*}})
declare i32 @llvm.hexagon.C4.fastcorner9.not(i32, i32)
define i32 @C4_fastcorner9_not(i32 %a, i32 %b) {
%z = call i32@llvm.hexagon.C4.fastcorner9.not(i32 %a, i32 %b)
ret i32 %z
}
; CHECK: = !fastcorner9({{.*}},{{.*}})
; Logical reductions on predicates
declare i32 @llvm.hexagon.C2.any8(i32)
define i32 @C2_any8(i32 %a) {
%z = call i32@llvm.hexagon.C2.any8(i32 %a)
ret i32 %z
}
; CHECK: = any8({{.*}})
declare i32 @llvm.hexagon.C2.all8(i32)
define i32 @C2_all8(i32 %a) {
%z = call i32@llvm.hexagon.C2.all8(i32 %a)
ret i32 %z
}
; CHECK: = all8({{.*}})
; Logical operations on predicates
declare i32 @llvm.hexagon.C2.and(i32, i32)
define i32 @C2_and(i32 %a, i32 %b) {
%z = call i32@llvm.hexagon.C2.and(i32 %a, i32 %b)
ret i32 %z
}
; CHECK: = and({{.*}},{{.*}})
declare i32 @llvm.hexagon.C4.and.and(i32, i32, i32)
define i32 @C4_and_and(i32 %a, i32 %b, i32 %c) {
%z = call i32@llvm.hexagon.C4.and.and(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
; CHECK: = and({{.*}},and({{.*}},{{.*}}))
declare i32 @llvm.hexagon.C2.or(i32, i32)
define i32 @C2_or(i32 %a, i32 %b) {
%z = call i32@llvm.hexagon.C2.or(i32 %a, i32 %b)
ret i32 %z
}
; CHECK: = or({{.*}},{{.*}})
declare i32 @llvm.hexagon.C4.and.or(i32, i32, i32)
define i32 @C4_and_or(i32 %a, i32 %b, i32 %c) {
%z = call i32@llvm.hexagon.C4.and.or(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
; CHECK: = and({{.*}},or({{.*}},{{.*}}))
declare i32 @llvm.hexagon.C2.xor(i32, i32)
define i32 @C2_xor(i32 %a, i32 %b) {
%z = call i32@llvm.hexagon.C2.xor(i32 %a, i32 %b)
ret i32 %z
}
; CHECK: = xor({{.*}},{{.*}})
declare i32 @llvm.hexagon.C4.or.and(i32, i32, i32)
define i32 @C4_or_and(i32 %a, i32 %b, i32 %c) {
%z = call i32@llvm.hexagon.C4.or.and(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
; CHECK: = or({{.*}},and({{.*}},{{.*}}))
declare i32 @llvm.hexagon.C2.andn(i32, i32)
define i32 @C2_andn(i32 %a, i32 %b) {
%z = call i32@llvm.hexagon.C2.andn(i32 %a, i32 %b)
ret i32 %z
}
; CHECK: = and({{.*}},!{{.*}})
declare i32 @llvm.hexagon.C4.or.or(i32, i32, i32)
define i32 @C4_or_or(i32 %a, i32 %b, i32 %c) {
%z = call i32@llvm.hexagon.C4.or.or(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
; CHECK: = or({{.*}},or({{.*}},{{.*}}))
declare i32 @llvm.hexagon.C4.and.andn(i32, i32, i32)
define i32 @C4_and_andn(i32 %a, i32 %b, i32 %c) {
%z = call i32@llvm.hexagon.C4.and.andn(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
; CHECK: = and({{.*}},and({{.*}},!{{.*}}))
declare i32 @llvm.hexagon.C4.and.orn(i32, i32, i32)
define i32 @C4_and_orn(i32 %a, i32 %b, i32 %c) {
%z = call i32@llvm.hexagon.C4.and.orn(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
; CHECK: = and({{.*}},or({{.*}},!{{.*}}))
declare i32 @llvm.hexagon.C2.not(i32)
define i32 @C2_not(i32 %a) {
%z = call i32@llvm.hexagon.C2.not(i32 %a)
ret i32 %z
}
; CHECK: = not({{.*}})
declare i32 @llvm.hexagon.C4.or.andn(i32, i32, i32)
define i32 @C4_or_andn(i32 %a, i32 %b, i32 %c) {
%z = call i32@llvm.hexagon.C4.or.andn(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
; CHECK: = or({{.*}},and({{.*}},!{{.*}}))
declare i32 @llvm.hexagon.C2.orn(i32, i32)
define i32 @C2_orn(i32 %a, i32 %b) {
%z = call i32@llvm.hexagon.C2.orn(i32 %a, i32 %b)
ret i32 %z
}
; CHECK: = or({{.*}},!{{.*}})
declare i32 @llvm.hexagon.C4.or.orn(i32, i32, i32)
define i32 @C4_or_orn(i32 %a, i32 %b, i32 %c) {
%z = call i32@llvm.hexagon.C4.or.orn(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
; CHECK: = or({{.*}},or({{.*}},!{{.*}}))

View File

@@ -0,0 +1,12 @@
; RUN: llc -march=hexagon < %s
target triple = "hexagon-unknown--elf"
; Function Attrs: norecurse nounwind
define void @_Z4lockv() #0 {
entry:
%__shared_owners = alloca i32, align 4
%0 = cmpxchg weak i32* %__shared_owners, i32 0, i32 1 seq_cst seq_cst
ret void
}
attributes #0 = { nounwind }

View File

@@ -0,0 +1,71 @@
; RUN: llc -march=hexagon < %s | FileCheck %s
target triple = "hexagon"
; CHECK-LABEL: dc00:
; CHECK: dcfetch
define void @dc00(i8* nocapture readonly %p) local_unnamed_addr #0 {
tail call void @llvm.hexagon.prefetch(i8* %p)
ret void
}
; CHECK-LABEL: dc01:
; CHECK: dccleana
define void @dc01(i8* nocapture readonly %p) local_unnamed_addr #0 {
entry:
tail call void @llvm.hexagon.Y2.dccleana(i8* %p)
ret void
}
; CHECK-LABEL: dc02:
; CHECK: dccleaninva
define void @dc02(i8* nocapture readonly %p) local_unnamed_addr #0 {
entry:
tail call void @llvm.hexagon.Y2.dccleaninva(i8* %p)
ret void
}
; CHECK-LABEL: dc03:
; CHECK: dcinva
define void @dc03(i8* nocapture readonly %p) local_unnamed_addr #0 {
entry:
tail call void @llvm.hexagon.Y2.dcinva(i8* %p)
ret void
}
; CHECK-LABEL: dc04:
; CHECK: dczeroa
define void @dc04(i8* nocapture %p) local_unnamed_addr #0 {
entry:
tail call void @llvm.hexagon.Y2.dczeroa(i8* %p)
ret void
}
; CHECK-LABEL: dc05:
; CHECK: l2fetch(r{{[0-9]+}},r{{[0-9]+}})
define void @dc05(i8* nocapture readonly %p, i32 %q) local_unnamed_addr #0 {
entry:
tail call void @llvm.hexagon.Y4.l2fetch(i8* %p, i32 %q)
ret void
}
; CHECK-LABEL: dc06:
; CHECK: l2fetch(r{{[0-9]+}},r{{[0-9]+}}:{{[0-9]+}})
define void @dc06(i8* nocapture readonly %p, i64 %q) local_unnamed_addr #0 {
entry:
tail call void @llvm.hexagon.Y5.l2fetch(i8* %p, i64 %q)
ret void
}
declare void @llvm.hexagon.prefetch(i8* nocapture) #1
declare void @llvm.hexagon.Y2.dccleana(i8* nocapture readonly) #2
declare void @llvm.hexagon.Y2.dccleaninva(i8* nocapture readonly) #2
declare void @llvm.hexagon.Y2.dcinva(i8* nocapture readonly) #2
declare void @llvm.hexagon.Y2.dczeroa(i8* nocapture) #3
declare void @llvm.hexagon.Y4.l2fetch(i8* nocapture readonly, i32) #2
declare void @llvm.hexagon.Y5.l2fetch(i8* nocapture readonly, i64) #2
attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="-hvx,-long-calls" }
attributes #1 = { inaccessiblemem_or_argmemonly nounwind }
attributes #2 = { nounwind }
attributes #3 = { argmemonly nounwind writeonly }

View File

@@ -0,0 +1,60 @@
; RUN: llc -mv65 -mattr=+hvxv65,hvx-length128b -march=hexagon -O2 < %s | FileCheck %s
; CHECK-LABEL: V6_vgathermw_128B
; CHECK: vtmp.w = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}.w).w
; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new
; CHECK-LABEL: V6_vgathermh_128B
; CHECK: vtmp.h = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}.h).h
; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new
; CHECK-LABEL: V6_vgathermhw_128B
; CHECK: vtmp.h = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}:{{[0-9]+}}.w).h
; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new
; CHECK-LABEL: V6_vgathermwq_128B
; CHECK: if (q{{[0-3]+}}) vtmp.w = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}.w).w
; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new
; CHECK-LABEL: V6_vgathermhq_128B
; CHECK: if (q{{[0-3]+}}) vtmp.h = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}.h).h
; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new
; CHECK-LABEL: V6_vgathermhwq_128B
; CHECK: if (q{{[0-3]+}}) vtmp.h = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}:{{[0-9]+}}.w).h
; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new
declare void @llvm.hexagon.V6.vgathermw.128B(i8*, i32, i32, <32 x i32>)
define void @V6_vgathermw_128B(i8* %a, i32 %b, i32 %c, <32 x i32> %d) {
call void @llvm.hexagon.V6.vgathermw.128B(i8* %a, i32 %b, i32 %c, <32 x i32> %d)
ret void
}
declare void @llvm.hexagon.V6.vgathermh.128B(i8*, i32, i32, <32 x i32>)
define void @V6_vgathermh_128B(i8* %a, i32 %b, i32 %c, <32 x i32> %d) {
call void @llvm.hexagon.V6.vgathermh.128B(i8* %a, i32 %b, i32 %c, <32 x i32> %d)
ret void
}
declare void @llvm.hexagon.V6.vgathermhw.128B(i8*, i32, i32, <64 x i32>)
define void @V6_vgathermhw_128B(i8* %a, i32 %b, i32 %c, <64 x i32> %d) {
call void @llvm.hexagon.V6.vgathermhw.128B(i8* %a, i32 %b, i32 %c, <64 x i32> %d)
ret void
}
declare void @llvm.hexagon.V6.vgathermwq.128B(i8*, <1024 x i1>, i32, i32, <32 x i32>)
define void @V6_vgathermwq_128B(i8* %a, <32 x i32> %b, i32 %c, i32 %d, <32 x i32> %e) {
%1 = bitcast <32 x i32> %b to <1024 x i1>
call void @llvm.hexagon.V6.vgathermwq.128B(i8* %a, <1024 x i1> %1, i32 %c, i32 %d, <32 x i32> %e)
ret void
}
declare void @llvm.hexagon.V6.vgathermhq.128B(i8*, <1024 x i1>, i32, i32, <32 x i32>)
define void @V6_vgathermhq_128B(i8* %a, <32 x i32> %b, i32 %c, i32 %d, <32 x i32> %e) {
%1 = bitcast <32 x i32> %b to <1024 x i1>
call void @llvm.hexagon.V6.vgathermhq.128B(i8* %a, <1024 x i1> %1, i32 %c, i32 %d, <32 x i32> %e)
ret void
}
declare void @llvm.hexagon.V6.vgathermhwq.128B(i8*, <1024 x i1>, i32, i32, <64 x i32>)
define void @V6_vgathermhwq_128B(i8* %a, <32 x i32> %b, i32 %c, i32 %d, <64 x i32> %e) {
%1 = bitcast <32 x i32> %b to <1024 x i1>
call void @llvm.hexagon.V6.vgathermhwq.128B(i8* %a, <1024 x i1> %1, i32 %c, i32 %d, <64 x i32> %e)
ret void
}

View File

@@ -0,0 +1,59 @@
; RUN: llc -mv65 -mattr=+hvxv65,hvx-length64b -march=hexagon -O2 < %s | FileCheck %s
; CHECK-LABEL: V6_vgathermw
; CHECK: vtmp.w = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}.w).w
; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new
; CHECK-LABEL: V6_vgathermh
; CHECK: vtmp.h = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}.h).h
; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new
; CHECK-LABEL: V6_vgathermhw
; CHECK: vtmp.h = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}:{{[0-9]+}}.w).h
; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new
; CHECK-LABEL: V6_vgathermwq
; CHECK: if (q{{[0-3]+}}) vtmp.w = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}.w).w
; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new
; CHECK-LABEL: V6_vgathermhq
; CHECK: if (q{{[0-3]+}}) vtmp.h = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}.h).h
; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new
; CHECK-LABEL: V6_vgathermhwq
; CHECK: if (q{{[0-3]+}}) vtmp.h = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}:{{[0-9]+}}.w).h
; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new
declare void @llvm.hexagon.V6.vgathermw(i8*, i32, i32, <16 x i32>)
define void @V6_vgathermw(i8* %a, i32 %b, i32 %c, <16 x i32> %d) {
call void @llvm.hexagon.V6.vgathermw(i8* %a, i32 %b, i32 %c, <16 x i32> %d)
ret void
}
declare void @llvm.hexagon.V6.vgathermh(i8*, i32, i32, <16 x i32>)
define void @V6_vgathermh(i8* %a, i32 %b, i32 %c, <16 x i32> %d) {
call void @llvm.hexagon.V6.vgathermh(i8* %a, i32 %b, i32 %c, <16 x i32> %d)
ret void
}
declare void @llvm.hexagon.V6.vgathermhw(i8*, i32, i32, <32 x i32>)
define void @V6_vgathermhw(i8* %a, i32 %b, i32 %c, <32 x i32> %d) {
call void @llvm.hexagon.V6.vgathermhw(i8* %a, i32 %b, i32 %c, <32 x i32> %d)
ret void
}
declare void @llvm.hexagon.V6.vgathermwq(i8*, <512 x i1>, i32, i32, <16 x i32>)
define void @V6_vgathermwq(i8* %a, <16 x i32> %b, i32 %c, i32 %d, <16 x i32> %e) {
%1 = bitcast <16 x i32> %b to <512 x i1>
call void @llvm.hexagon.V6.vgathermwq(i8* %a, <512 x i1> %1, i32 %c, i32 %d, <16 x i32> %e)
ret void
}
declare void @llvm.hexagon.V6.vgathermhq(i8*, <512 x i1>, i32, i32, <16 x i32>)
define void @V6_vgathermhq(i8* %a, <16 x i32> %b, i32 %c, i32 %d, <16 x i32> %e) {
%1 = bitcast <16 x i32> %b to <512 x i1>
call void @llvm.hexagon.V6.vgathermhq(i8* %a, <512 x i1> %1, i32 %c, i32 %d, <16 x i32> %e)
ret void
}
declare void @llvm.hexagon.V6.vgathermhwq(i8*, <512 x i1>, i32, i32, <32 x i32>)
define void @V6_vgathermhwq(i8* %a, <16 x i32> %b, i32 %c, i32 %d, <32 x i32> %e) {
%1 = bitcast <16 x i32> %b to <512 x i1>
call void @llvm.hexagon.V6.vgathermhwq(i8* %a, <512 x i1> %1, i32 %c, i32 %d, <32 x i32> %e)
ret void
}

View File

@@ -0,0 +1,78 @@
; RUN: llc -mv65 -mattr=+hvxv65,hvx-length128b -march=hexagon -O2 < %s | FileCheck %s
; CHECK-LABEL: V6_vscattermw_128B
; CHECK: vscatter(r{{[0-9]+}},m{{[0-9]+}},v{{[0-9]+}}.w).w = v{{[0-9]+}}
; CHECK-LABEL: V6_vscattermh_128B
; CHECK: vscatter(r{{[0-9]+}},m{{[0-9]+}},v{{[0-9]+}}.h).h = v{{[0-9]+}}
; CHECK-LABEL: V6_vscattermw_add_128B
; CHECK: vscatter(r{{[0-9]+}},m{{[0-9]+}},v{{[0-9]+}}.w).w += v{{[0-9]+}}
; CHECK-LABEL: V6_vscattermh_add_128B
; CHECK: vscatter(r{{[0-9]+}},m{{[0-9]+}},v{{[0-9]+}}.h).h += v{{[0-9]+}}
; CHECK-LABEL: V6_vscattermwq_128B
; CHECK: if (q{{[0-3]}}) vscatter(r{{[0-9]+}},m{{[0-9]+}},v{{[0-9]+}}.w).w = v{{[0-9]+}}
; CHECK-LABEL: V6_vscattermhq_128B
; CHECK: if (q{{[0-3]}}) vscatter(r{{[0-9]+}},m{{[0-9]+}},v{{[0-9]+}}.h).h = v{{[0-9]+}}
; CHECK-LABEL: V6_vscattermhw_128B
; CHECK: vscatter(r{{[0-9]+}},m{{[0-9]+}},v{{[0-9]+}}:{{[0-9]+}}.w).h = v{{[0-9]+}}
; CHECK-LABEL: V6_vscattermhw_add_128B
; CHECK: vscatter(r{{[0-9]+}},m{{[0-9]+}},v{{[0-9]+}}:{{[0-9]+}}.w).h += v{{[0-9]+}}
; CHECK-LABEL: V6_vscattermhwq_128B
; CHECK: if (q{{[0-3]}}) vscatter(r{{[0-9]+}},m{{[0-9]+}},v{{[0-9]+}}:{{[0-9]+}}.w).h = v{{[0-9]+}}
declare void @llvm.hexagon.V6.vscattermw.128B(i32, i32, <32 x i32>, <32 x i32>)
define void @V6_vscattermw_128B(i32 %a, i32 %b, <32 x i32> %c, <32 x i32> %d) {
call void @llvm.hexagon.V6.vscattermw.128B(i32 %a, i32 %b, <32 x i32> %c, <32 x i32> %d)
ret void
}
declare void @llvm.hexagon.V6.vscattermh.128B(i32, i32, <32 x i32>, <32 x i32>)
define void @V6_vscattermh_128B(i32 %a, i32 %b, <32 x i32> %c, <32 x i32> %d) {
call void @llvm.hexagon.V6.vscattermh.128B(i32 %a, i32 %b, <32 x i32> %c, <32 x i32> %d)
ret void
}
declare void @llvm.hexagon.V6.vscattermw.add.128B(i32, i32, <32 x i32>, <32 x i32>)
define void @V6_vscattermw_add_128B(i32 %a, i32 %b, <32 x i32> %c, <32 x i32> %d) {
call void @llvm.hexagon.V6.vscattermw.add.128B(i32 %a, i32 %b, <32 x i32> %c, <32 x i32> %d)
ret void
}
declare void @llvm.hexagon.V6.vscattermh.add.128B(i32, i32, <32 x i32>, <32 x i32>)
define void @V6_vscattermh_add_128B(i32 %a, i32 %b, <32 x i32> %c, <32 x i32> %d) {
call void @llvm.hexagon.V6.vscattermh.add.128B(i32 %a, i32 %b, <32 x i32> %c, <32 x i32> %d)
ret void
}
declare void @llvm.hexagon.V6.vscattermwq.128B(<1024 x i1>, i32, i32, <32 x i32>, <32 x i32>)
define void @V6_vscattermwq_128B(<32 x i32> %a, i32 %b, i32 %c, <32 x i32> %d, <32 x i32> %e) {
%1 = bitcast <32 x i32> %a to <1024 x i1>
call void @llvm.hexagon.V6.vscattermwq.128B(<1024 x i1> %1, i32 %b, i32 %c, <32 x i32> %d, <32 x i32> %e)
ret void
}
declare void @llvm.hexagon.V6.vscattermhq.128B(<1024 x i1>, i32, i32, <32 x i32>, <32 x i32>)
define void @V6_vscattermhq_128B(<32 x i32> %a, i32 %b, i32 %c, <32 x i32> %d, <32 x i32> %e) {
%1 = bitcast <32 x i32> %a to <1024 x i1>
call void @llvm.hexagon.V6.vscattermhq.128B(<1024 x i1> %1, i32 %b, i32 %c, <32 x i32> %d, <32 x i32> %e)
ret void
}
declare void @llvm.hexagon.V6.vscattermhw.128B(i32, i32, <64 x i32>, <32 x i32>)
define void @V6_vscattermhw_128B(i32 %a, i32 %b, <64 x i32> %c, <32 x i32> %d) {
call void @llvm.hexagon.V6.vscattermhw.128B(i32 %a, i32 %b, <64 x i32> %c, <32 x i32> %d)
ret void
}
declare void @llvm.hexagon.V6.vscattermhw.add.128B(i32, i32, <64 x i32>, <32 x i32>)
define void @V6_vscattermhw_add_128B(i32 %a, i32 %b, <64 x i32> %c, <32 x i32> %d) {
call void @llvm.hexagon.V6.vscattermhw.add.128B(i32 %a, i32 %b, <64 x i32> %c, <32 x i32> %d)
ret void
}
declare void @llvm.hexagon.V6.vscattermhwq.128B(<1024 x i1>, i32, i32, <64 x i32>, <32 x i32>)
define void @V6_vscattermhwq_128B(<32 x i32> %a, i32 %b, i32 %c, <64 x i32> %d, <32 x i32> %e) {
%1 = bitcast <32 x i32> %a to <1024 x i1>
call void @llvm.hexagon.V6.vscattermhwq.128B(<1024 x i1> %1, i32 %b, i32 %c, <64 x i32> %d, <32 x i32> %e)
ret void
}

View File

@@ -0,0 +1,32 @@
; RUN: llc -mv65 -mattr=+hvxv65,hvx-length64b -march=hexagon -O2 < %s | FileCheck %s
; RUN: llc -mv65 -mattr=+hvxv65,hvx-length64b -march=hexagon -O2 -disable-packetizer < %s | FileCheck %s
; RUN: llc -mv65 -mattr=+hvxv65,hvx-length64b -march=hexagon -O0 < %s | FileCheck %s
; CHECK: vtmp.h = vgather(r{{[0-9]+}},m{{[0-9]+}},v{{[0-9]+}}.h).h
; CHECK-NEXT: vmem(r{{[0-9]+}}+#0) = vtmp.new
; CHECK-NEXT: }
declare i32 @add_translation_extended(i32, i8*, i64, i32, i32, i32, i32, i32, i32) local_unnamed_addr
; Function Attrs: nounwind
define i32 @main() local_unnamed_addr {
entry:
%hvx_vector = alloca <16 x i32>, align 64
%0 = bitcast <16 x i32>* %hvx_vector to i8*
%call.i = tail call i32 @add_translation_extended(i32 1, i8* inttoptr (i32 -668991488 to i8*), i64 3625975808, i32 16, i32 15, i32 0, i32 0, i32 0, i32 3)
%1 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 1)
%2 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 2)
tail call void @llvm.hexagon.V6.vscattermh.add(i32 -668991488, i32 1023, <16 x i32> %1, <16 x i32> %2)
call void @llvm.hexagon.V6.vgathermh(i8* %0, i32 -668991488, i32 1023, <16 x i32> %1)
ret i32 0
}
; Function Attrs: nounwind writeonly
declare void @llvm.hexagon.V6.vscattermh.add(i32, i32, <16 x i32>, <16 x i32>)
; Function Attrs: nounwind readnone
declare <16 x i32> @llvm.hexagon.V6.lvsplatw(i32)
; Function Attrs: argmemonly nounwind
declare void @llvm.hexagon.V6.vgathermh(i8*, i32, i32, <16 x i32>)

View File

@@ -0,0 +1,78 @@
; RUN: llc -mv65 -mattr=+hvxv65,hvx-length64b -march=hexagon -O2 < %s | FileCheck %s
; CHECK-LABEL: V6_vscattermw
; CHECK: vscatter(r{{[0-9]+}},m{{[0-9]+}},v{{[0-9]+}}.w).w = v{{[0-9]+}}
; CHECK-LABEL: V6_vscattermh
; CHECK: vscatter(r{{[0-9]+}},m{{[0-9]+}},v{{[0-9]+}}.h).h = v{{[0-9]+}}
; CHECK-LABEL: V6_vscattermw_add
; CHECK: vscatter(r{{[0-9]+}},m{{[0-9]+}},v{{[0-9]+}}.w).w += v{{[0-9]+}}
; CHECK-LABEL: V6_vscattermh_add
; CHECK: vscatter(r{{[0-9]+}},m{{[0-9]+}},v{{[0-9]+}}.h).h += v{{[0-9]+}}
; CHECK-LABEL: V6_vscattermwq
; CHECK: if (q{{[0-3]}}) vscatter(r{{[0-9]+}},m{{[0-9]+}},v{{[0-9]+}}.w).w = v{{[0-9]+}}
; CHECK-LABEL: V6_vscattermhq
; CHECK: if (q{{[0-3]}}) vscatter(r{{[0-9]+}},m{{[0-9]+}},v{{[0-9]+}}.h).h = v{{[0-9]+}}
; CHECK-LABEL: V6_vscattermhw
; CHECK: vscatter(r{{[0-9]+}},m{{[0-9]+}},v{{[0-9]+}}:{{[0-9]+}}.w).h = v{{[0-9]+}}
; CHECK-LABEL: V6_vscattermhw_add
; CHECK: vscatter(r{{[0-9]+}},m{{[0-9]+}},v{{[0-9]+}}:{{[0-9]+}}.w).h += v{{[0-9]+}}
; CHECK-LABEL: V6_vscattermhwq
; CHECK: if (q{{[0-3]}}) vscatter(r{{[0-9]+}},m{{[0-9]+}},v{{[0-9]+}}:{{[0-9]+}}.w).h = v{{[0-9]+}}
declare void @llvm.hexagon.V6.vscattermw(i32, i32, <16 x i32>, <16 x i32>)
define void @V6_vscattermw(i32 %a, i32 %b, <16 x i32> %c, <16 x i32> %d) {
call void @llvm.hexagon.V6.vscattermw(i32 %a, i32 %b, <16 x i32> %c, <16 x i32> %d)
ret void
}
declare void @llvm.hexagon.V6.vscattermh(i32, i32, <16 x i32>, <16 x i32>)
define void @V6_vscattermh(i32 %a, i32 %b, <16 x i32> %c, <16 x i32> %d) {
call void @llvm.hexagon.V6.vscattermh(i32 %a, i32 %b, <16 x i32> %c, <16 x i32> %d)
ret void
}
declare void @llvm.hexagon.V6.vscattermw.add(i32, i32, <16 x i32>, <16 x i32>)
define void @V6_vscattermw_add(i32 %a, i32 %b, <16 x i32> %c, <16 x i32> %d) {
call void @llvm.hexagon.V6.vscattermw.add(i32 %a, i32 %b, <16 x i32> %c, <16 x i32> %d)
ret void
}
declare void @llvm.hexagon.V6.vscattermh.add(i32, i32, <16 x i32>, <16 x i32>)
define void @V6_vscattermh_add(i32 %a, i32 %b, <16 x i32> %c, <16 x i32> %d) {
call void @llvm.hexagon.V6.vscattermh.add(i32 %a, i32 %b, <16 x i32> %c, <16 x i32> %d)
ret void
}
declare void @llvm.hexagon.V6.vscattermwq(<512 x i1>, i32, i32, <16 x i32>, <16 x i32>)
define void @V6_vscattermwq(<16 x i32> %a, i32 %b, i32 %c, <16 x i32> %d, <16 x i32> %e) {
%1 = bitcast <16 x i32> %a to <512 x i1>
call void @llvm.hexagon.V6.vscattermwq(<512 x i1> %1, i32 %b, i32 %c, <16 x i32> %d, <16 x i32> %e)
ret void
}
declare void @llvm.hexagon.V6.vscattermhq(<512 x i1>, i32, i32, <16 x i32>, <16 x i32>)
define void @V6_vscattermhq(<16 x i32> %a, i32 %b, i32 %c, <16 x i32> %d, <16 x i32> %e) {
%1 = bitcast <16 x i32> %a to <512 x i1>
call void @llvm.hexagon.V6.vscattermhq(<512 x i1> %1, i32 %b, i32 %c, <16 x i32> %d, <16 x i32> %e)
ret void
}
declare void @llvm.hexagon.V6.vscattermhw(i32, i32, <32 x i32>, <16 x i32>)
define void @V6_vscattermhw(i32 %a, i32 %b, <32 x i32> %c, <16 x i32> %d) {
call void @llvm.hexagon.V6.vscattermhw(i32 %a, i32 %b, <32 x i32> %c, <16 x i32> %d)
ret void
}
declare void @llvm.hexagon.V6.vscattermhw.add(i32, i32, <32 x i32>, <16 x i32>)
define void @V6_vscattermhw_add(i32 %a, i32 %b, <32 x i32> %c, <16 x i32> %d) {
call void @llvm.hexagon.V6.vscattermhw.add(i32 %a, i32 %b, <32 x i32> %c, <16 x i32> %d)
ret void
}
declare void @llvm.hexagon.V6.vscattermhwq(<512 x i1>, i32, i32, <32 x i32>, <16 x i32>)
define void @V6_vscattermhwq(<16 x i32> %a, i32 %b, i32 %c, <32 x i32> %d, <16 x i32> %e) {
%1 = bitcast <16 x i32> %a to <512 x i1>
call void @llvm.hexagon.V6.vscattermhwq(<512 x i1> %1, i32 %b, i32 %c, <32 x i32> %d, <16 x i32> %e)
ret void
}

View File

@@ -0,0 +1,156 @@
; RUN: llc -mv65 -mattr=+hvxv65,hvx-length64b -march=hexagon -O0 < %s | FileCheck %s
; RUN: llc -mv65 -mattr=+hvxv65,hvx-length64b -march=hexagon -O0 < %s | FileCheck -check-prefix=CHECK-CALL %s
; CHECK-CALL-NOT: call
declare i32 @llvm.hexagon.A6.vcmpbeq.notany(i64, i64)
define i32 @A6_vcmpbeq_notany(i64 %a, i64 %b) {
%c = call i32 @llvm.hexagon.A6.vcmpbeq.notany(i64 %a, i64 %b)
ret i32 %c
}
; CHECK = !any8(vcmpb.eq(r1:0,r3:2))
declare <16 x i32> @llvm.hexagon.V6.vabsb(<16 x i32>)
define <16 x i32> @V6_vabsb(<16 x i32> %a) {
%b = call <16 x i32> @llvm.hexagon.V6.vabsb(<16 x i32> %a)
ret <16 x i32> %b
}
; CHECK: = vabs(v0.b)
declare <16 x i32> @llvm.hexagon.V6.vabsb.sat(<16 x i32>)
define <16 x i32> @V6_vabsb_sat(<16 x i32> %a) {
%b = call <16 x i32> @llvm.hexagon.V6.vabsb.sat(<16 x i32> %a)
ret <16 x i32> %b
}
; CHECK: = vabs(v0.b):sat
declare <16 x i32> @llvm.hexagon.V6.vaslh.acc(<16 x i32>, <16 x i32>, i32)
define <16 x i32> @V6_vaslh_acc(<16 x i32> %a, <16 x i32> %b, i32 %c) {
%d = call <16 x i32> @llvm.hexagon.V6.vaslh.acc(<16 x i32> %a, <16 x i32> %b, i32 %c)
ret <16 x i32> %d
}
; CHECK: += vasl(v1.h,r0)
declare <16 x i32> @llvm.hexagon.V6.vasrh.acc(<16 x i32>, <16 x i32>, i32)
define <16 x i32> @V6_vasrh_acc(<16 x i32> %a, <16 x i32> %b, i32 %c) {
%d = call <16 x i32> @llvm.hexagon.V6.vasrh.acc(<16 x i32> %a, <16 x i32> %b, i32 %c)
ret <16 x i32> %d
}
; CHECK: += vasr(v1.h,r0)
declare <16 x i32> @llvm.hexagon.V6.vasruwuhsat(<16 x i32>, <16 x i32>, i32)
define <16 x i32> @V6_vasruwuhsat(<16 x i32> %a, <16 x i32> %b, i32 %c) {
%d = call <16 x i32> @llvm.hexagon.V6.vasruwuhsat(<16 x i32> %a, <16 x i32> %b, i32 %c)
ret <16 x i32> %d
}
; CHECK: = vasr(v0.uw,v1.uw,r0):sat
declare <16 x i32> @llvm.hexagon.V6.vasruhubsat(<16 x i32>, <16 x i32>, i32)
define <16 x i32> @V6_vasruhubsat(<16 x i32> %a, <16 x i32> %b, i32 %c) {
%d = call <16 x i32> @llvm.hexagon.V6.vasruhubsat(<16 x i32> %a, <16 x i32> %b, i32 %c)
ret <16 x i32> %d
}
; CHECK: = vasr(v0.uh,v1.uh,r0):sat
declare <16 x i32> @llvm.hexagon.V6.vasruhubrndsat(<16 x i32>, <16 x i32>, i32)
define <16 x i32> @V6_vasruhubrndsat(<16 x i32> %a, <16 x i32> %b, i32 %c) {
%d = call <16 x i32> @llvm.hexagon.V6.vasruhubrndsat(<16 x i32> %a, <16 x i32> %b, i32 %c)
ret <16 x i32> %d
}
; CHECK: = vasr(v0.uh,v1.uh,r0):rnd:sat
declare <16 x i32> @llvm.hexagon.V6.vavguw(<16 x i32>, <16 x i32>)
define <16 x i32> @V6_vavguw(<16 x i32> %a, <16 x i32> %b) {
%c = call <16 x i32> @llvm.hexagon.V6.vavguw(<16 x i32> %a, <16 x i32> %b)
ret <16 x i32> %c
}
; CHECK: = vavg(v0.uw,v1.uw)
declare <16 x i32> @llvm.hexagon.V6.vavguwrnd(<16 x i32>, <16 x i32>)
define <16 x i32> @V6_vavguwrnd(<16 x i32> %a, <16 x i32> %b) {
%c = call <16 x i32> @llvm.hexagon.V6.vavguwrnd(<16 x i32> %a, <16 x i32> %b)
ret <16 x i32> %c
}
; CHECK: = vavg(v0.uw,v1.uw):rnd
declare <16 x i32> @llvm.hexagon.V6.vavgb(<16 x i32>, <16 x i32>)
define <16 x i32> @V6_vavgb(<16 x i32> %a, <16 x i32> %b) {
%c = call <16 x i32> @llvm.hexagon.V6.vavgb(<16 x i32> %a, <16 x i32> %b)
ret <16 x i32> %c
}
; CHECK: = vavg(v0.b,v1.b)
declare <16 x i32> @llvm.hexagon.V6.vavgbrnd(<16 x i32>, <16 x i32>)
define <16 x i32> @V6_vavgbrnd(<16 x i32> %a, <16 x i32> %b) {
%c = call <16 x i32> @llvm.hexagon.V6.vavgbrnd(<16 x i32> %a, <16 x i32> %b)
ret <16 x i32> %c
}
; CHECK: = vavg(v0.b,v1.b):rnd
declare <16 x i32> @llvm.hexagon.V6.vnavgb(<16 x i32>, <16 x i32>)
define <16 x i32> @V6_vnavgb(<16 x i32> %a, <16 x i32> %b) {
%c = call <16 x i32> @llvm.hexagon.V6.vnavgb(<16 x i32> %a, <16 x i32> %b)
ret <16 x i32> %c
}
; CHECK: = vnavg(v0.b,v1.b)
declare <32 x i32> @llvm.hexagon.V6.vmpabuu(<32 x i32>, i32)
define <32 x i32> @V6_vmpabuu(<32 x i32> %a, i32 %b) {
%c = call <32 x i32> @llvm.hexagon.V6.vmpabuu(<32 x i32> %a, i32 %b)
ret <32 x i32> %c
}
; CHECK: = vmpa(v1:0.ub,r0.ub)
declare <32 x i32> @llvm.hexagon.V6.vmpabuu.acc(<32 x i32>, <32 x i32>, i32)
define <32 x i32> @V6_vmpabuu_acc(<32 x i32> %a, <32 x i32> %b, i32 %c) {
%d = call <32 x i32> @llvm.hexagon.V6.vmpabuu.acc(<32 x i32> %a, <32 x i32> %b, i32 %c)
ret <32 x i32> %d
}
; CHECK: += vmpa(v3:2.ub,r0.ub)
declare <16 x i32> @llvm.hexagon.V6.vmpauhuhsat(<16 x i32>, <16 x i32>, i64)
define <16 x i32> @V6_vmpauhuhsat(<16 x i32> %a, <16 x i32> %b, i64 %c) {
%d = call <16 x i32> @llvm.hexagon.V6.vmpauhuhsat(<16 x i32> %a, <16 x i32> %b, i64 %c)
ret <16 x i32> %d
}
; CHECK: = vmpa(v0.h,v1.uh,r1:0.uh):sat
declare <16 x i32> @llvm.hexagon.V6.vmpsuhuhsat(<16 x i32>, <16 x i32>, i64)
define <16 x i32> @V6_vmpsuhuhsat(<16 x i32> %a, <16 x i32> %b, i64 %c) {
%d = call <16 x i32> @llvm.hexagon.V6.vmpsuhuhsat(<16 x i32> %a, <16 x i32> %b, i64 %c)
ret <16 x i32> %d
}
; CHECK: = vmps(v0.h,v1.uh,r1:0.uh):sat
declare <32 x i32> @llvm.hexagon.V6.vmpyh.acc(<32 x i32>, <16 x i32>, i32)
define <32 x i32> @V6_vmpyh_acc(<32 x i32> %a, <16 x i32> %b, i32 %c) {
%d = call <32 x i32> @llvm.hexagon.V6.vmpyh.acc(<32 x i32> %a, <16 x i32> %b, i32 %c)
ret <32 x i32> %d
}
; CHECK: += vmpy(v2.h,r0.h)
declare <16 x i32> @llvm.hexagon.V6.vmpyuhe(<16 x i32>, i32)
define <16 x i32> @V6_vmpyuhe(<16 x i32> %a, i32 %b) {
%c = call <16 x i32> @llvm.hexagon.V6.vmpyuhe(<16 x i32> %a, i32 %b)
ret <16 x i32> %c
}
; CHECK: = vmpye(v0.uh,r0.uh)
;declare <16 x i32> @llvm.hexagon.V6.vprefixqb(<512 x i1>)
;define <16 x i32> @V6_vprefixqb(<512 x i1> %a) {
; %b = call <16 x i32> @llvm.hexagon.V6.vprefixqb(<512 x i1> %a)
; ret <16 x i32> %b
;}
;declare <16 x i32> @llvm.hexagon.V6.vprefixqh(<512 x i1>)
;define <16 x i32> @V6_vprefixqh(<512 x i1> %a) {
; %b = call <16 x i32> @llvm.hexagon.V6.vprefixqh(<512 x i1> %a)
; ret <16 x i32> %b
;}
;declare <16 x i32> @llvm.hexagon.V6.vprefixqw(<512 x i1>)
;define <16 x i32> @V6_vprefixqw(<512 x i1> %a) {
; %b = call <16 x i32> @llvm.hexagon.V6.vprefixqw(<512 x i1> %a)
; ret <16 x i32> %b
;}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,332 @@
; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
; RUN: llc -march=hexagon -O0 < %s | FileCheck -check-prefix=CHECK-CALL %s
; Hexagon Programmer's Reference Manual 11.10.2 XTYPE/BIT
; CHECK-CALL-NOT: call
; Count leading
declare i32 @llvm.hexagon.S2.clbp(i64)
define i32 @S2_clbp(i64 %a) {
%z = call i32 @llvm.hexagon.S2.clbp(i64 %a)
ret i32 %z
}
; CHECK: = clb({{.*}})
declare i32 @llvm.hexagon.S2.cl0p(i64)
define i32 @S2_cl0p(i64 %a) {
%z = call i32 @llvm.hexagon.S2.cl0p(i64 %a)
ret i32 %z
}
; CHECK: = cl0({{.*}})
declare i32 @llvm.hexagon.S2.cl1p(i64)
define i32 @S2_cl1p(i64 %a) {
%z = call i32 @llvm.hexagon.S2.cl1p(i64 %a)
ret i32 %z
}
; CHECK: = cl1({{.*}})
declare i32 @llvm.hexagon.S4.clbpnorm(i64)
define i32 @S4_clbpnorm(i64 %a) {
%z = call i32 @llvm.hexagon.S4.clbpnorm(i64 %a)
ret i32 %z
}
; CHECK: = normamt({{.*}})
declare i32 @llvm.hexagon.S4.clbpaddi(i64, i32)
define i32 @S4_clbpaddi(i64 %a) {
%z = call i32 @llvm.hexagon.S4.clbpaddi(i64 %a, i32 0)
ret i32 %z
}
; CHECK: = add(clb({{.*}}),#0)
declare i32 @llvm.hexagon.S4.clbaddi(i32, i32)
define i32 @S4_clbaddi(i32 %a) {
%z = call i32 @llvm.hexagon.S4.clbaddi(i32 %a, i32 0)
ret i32 %z
}
; CHECK: = add(clb({{.*}}),#0)
declare i32 @llvm.hexagon.S2.cl0(i32)
define i32 @S2_cl0(i32 %a) {
%z = call i32 @llvm.hexagon.S2.cl0(i32 %a)
ret i32 %z
}
; CHECK: = cl0({{.*}})
declare i32 @llvm.hexagon.S2.cl1(i32)
define i32 @S2_cl1(i32 %a) {
%z = call i32 @llvm.hexagon.S2.cl1(i32 %a)
ret i32 %z
}
; CHECK: = cl1({{.*}})
declare i32 @llvm.hexagon.S2.clbnorm(i32)
define i32 @S4_clbnorm(i32 %a) {
%z = call i32 @llvm.hexagon.S2.clbnorm(i32 %a)
ret i32 %z
}
; CHECK: = normamt({{.*}})
; Count population
declare i32 @llvm.hexagon.S5.popcountp(i64)
define i32 @S5_popcountp(i64 %a) {
%z = call i32 @llvm.hexagon.S5.popcountp(i64 %a)
ret i32 %z
}
; CHECK: = popcount({{.*}})
; Count trailing
declare i32 @llvm.hexagon.S2.ct0p(i64)
define i32 @S2_ct0p(i64 %a) {
%z = call i32 @llvm.hexagon.S2.ct0p(i64 %a)
ret i32 %z
}
; CHECK: = ct0({{.*}})
declare i32 @llvm.hexagon.S2.ct1p(i64)
define i32 @S2_ct1p(i64 %a) {
%z = call i32 @llvm.hexagon.S2.ct1p(i64 %a)
ret i32 %z
}
; CHECK: = ct1({{.*}})
declare i32 @llvm.hexagon.S2.ct0(i32)
define i32 @S2_ct0(i32 %a) {
%z = call i32 @llvm.hexagon.S2.ct0(i32 %a)
ret i32 %z
}
; CHECK: = ct0({{.*}})
declare i32 @llvm.hexagon.S2.ct1(i32)
define i32 @S2_ct1(i32 %a) {
%z = call i32 @llvm.hexagon.S2.ct1(i32 %a)
ret i32 %z
}
; CHECK: = ct1({{.*}})
; Extract bitfield
declare i64 @llvm.hexagon.S2.extractup(i64, i32, i32)
define i64 @S2_extractup(i64 %a) {
%z = call i64 @llvm.hexagon.S2.extractup(i64 %a, i32 0, i32 0)
ret i64 %z
}
; CHECK: = extractu({{.*}},#0,#0)
declare i64 @llvm.hexagon.S4.extractp(i64, i32, i32)
define i64 @S2_extractp(i64 %a) {
%z = call i64 @llvm.hexagon.S4.extractp(i64 %a, i32 0, i32 0)
ret i64 %z
}
; CHECK: = extract({{.*}},#0,#0)
declare i32 @llvm.hexagon.S2.extractu(i32, i32, i32)
define i32 @S2_extractu(i32 %a) {
%z = call i32 @llvm.hexagon.S2.extractu(i32 %a, i32 0, i32 0)
ret i32 %z
}
; CHECK: = extractu({{.*}},#0,#0)
declare i32 @llvm.hexagon.S4.extract(i32, i32, i32)
define i32 @S2_extract(i32 %a) {
%z = call i32 @llvm.hexagon.S4.extract(i32 %a, i32 0, i32 0)
ret i32 %z
}
; CHECK: = extract({{.*}},#0,#0)
declare i64 @llvm.hexagon.S2.extractup.rp(i64, i64)
define i64 @S2_extractup_rp(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.extractup.rp(i64 %a, i64 %b)
ret i64 %z
}
; CHECK: = extractu({{.*}},{{.*}})
declare i64 @llvm.hexagon.S4.extractp.rp(i64, i64)
define i64 @S4_extractp_rp(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S4.extractp.rp(i64 %a, i64 %b)
ret i64 %z
}
; CHECK: = extract({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.extractu.rp(i32, i64)
define i32 @S2_extractu_rp(i32 %a, i64 %b) {
%z = call i32 @llvm.hexagon.S2.extractu.rp(i32 %a, i64 %b)
ret i32 %z
}
; CHECK: = extractu({{.*}},{{.*}})
declare i32 @llvm.hexagon.S4.extract.rp(i32, i64)
define i32 @S4_extract_rp(i32 %a, i64 %b) {
%z = call i32 @llvm.hexagon.S4.extract.rp(i32 %a, i64 %b)
ret i32 %z
}
; CHECK: = extract({{.*}},{{.*}})
; Insert bitfield
declare i64 @llvm.hexagon.S2.insertp(i64, i64, i32, i32)
define i64 @S2_insertp(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.insertp(i64 %a, i64 %b, i32 0, i32 0)
ret i64 %z
}
; CHECK: = insert({{.*}},#0,#0)
declare i32 @llvm.hexagon.S2.insert(i32, i32, i32, i32)
define i32 @S2_insert(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.insert(i32 %a, i32 %b, i32 0, i32 0)
ret i32 %z
}
; CHECK: = insert({{.*}},#0,#0)
declare i32 @llvm.hexagon.S2.insert.rp(i32, i32, i64)
define i32 @S2_insert_rp(i32 %a, i32 %b, i64 %c) {
%z = call i32 @llvm.hexagon.S2.insert.rp(i32 %a, i32 %b, i64 %c)
ret i32 %z
}
; CHECK: = insert({{.*}},{{.*}})
declare i64 @llvm.hexagon.S2.insertp.rp(i64, i64, i64)
define i64 @S2_insertp_rp(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.S2.insertp.rp(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
; CHECK: = insert({{.*}},r5:4)
; Interleave/deinterleave
declare i64 @llvm.hexagon.S2.deinterleave(i64)
define i64 @S2_deinterleave(i64 %a) {
%z = call i64 @llvm.hexagon.S2.deinterleave(i64 %a)
ret i64 %z
}
; CHECK: = deinterleave({{.*}})
declare i64 @llvm.hexagon.S2.interleave(i64)
define i64 @S2_interleave(i64 %a) {
%z = call i64 @llvm.hexagon.S2.interleave(i64 %a)
ret i64 %z
}
; CHECK: = interleave({{.*}})
; Linear feedback-shift operation
declare i64 @llvm.hexagon.S2.lfsp(i64, i64)
define i64 @S2_lfsp(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.lfsp(i64 %a, i64 %b)
ret i64 %z
}
; CHECK: = lfs({{.*}},{{.*}})
; Masked parity
declare i32 @llvm.hexagon.S2.parityp(i64, i64)
define i32 @S2_parityp(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.S2.parityp(i64 %a, i64 %b)
ret i32 %z
}
; CHECK: = parity({{.*}},{{.*}})
declare i32 @llvm.hexagon.S4.parity(i32, i32)
define i32 @S4_parity(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S4.parity(i32 %a, i32 %b)
ret i32 %z
}
; CHECK: = parity({{.*}},{{.*}})
; Bit reverse
declare i64 @llvm.hexagon.S2.brevp(i64)
define i64 @S2_brevp(i64 %a) {
%z = call i64 @llvm.hexagon.S2.brevp(i64 %a)
ret i64 %z
}
; CHECK: = brev({{.*}})
declare i32 @llvm.hexagon.S2.brev(i32)
define i32 @S2_brev(i32 %a) {
%z = call i32 @llvm.hexagon.S2.brev(i32 %a)
ret i32 %z
}
; CHECK: = brev({{.*}})
; Set/clear/toggle bit
declare i32 @llvm.hexagon.S2.setbit.i(i32, i32)
define i32 @S2_setbit_i(i32 %a) {
%z = call i32 @llvm.hexagon.S2.setbit.i(i32 %a, i32 0)
ret i32 %z
}
; CHECK: = setbit({{.*}},#0)
declare i32 @llvm.hexagon.S2.clrbit.i(i32, i32)
define i32 @S2_clrbit_i(i32 %a) {
%z = call i32 @llvm.hexagon.S2.clrbit.i(i32 %a, i32 0)
ret i32 %z
}
; CHECK: = clrbit({{.*}},#0)
declare i32 @llvm.hexagon.S2.togglebit.i(i32, i32)
define i32 @S2_togglebit_i(i32 %a) {
%z = call i32 @llvm.hexagon.S2.togglebit.i(i32 %a, i32 0)
ret i32 %z
}
; CHECK: = togglebit({{.*}},#0)
declare i32 @llvm.hexagon.S2.setbit.r(i32, i32)
define i32 @S2_setbit_r(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.setbit.r(i32 %a, i32 %b)
ret i32 %z
}
; CHECK: = setbit({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.clrbit.r(i32, i32)
define i32 @S2_clrbit_r(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.clrbit.r(i32 %a, i32 %b)
ret i32 %z
}
; CHECK: = clrbit({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.togglebit.r(i32, i32)
define i32 @S2_togglebit_r(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.togglebit.r(i32 %a, i32 %b)
ret i32 %z
}
; CHECK: = togglebit({{.*}},{{.*}})
; Split bitfield
declare i64 @llvm.hexagon.A4.bitspliti(i32, i32)
define i64 @A4_bitspliti(i32 %a) {
%z = call i64 @llvm.hexagon.A4.bitspliti(i32 %a, i32 0)
ret i64 %z
}
; CHECK: = bitsplit({{.*}},#0)
declare i64 @llvm.hexagon.A4.bitsplit(i32, i32)
define i64 @A4_bitsplit(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.A4.bitsplit(i32 %a, i32 %b)
ret i64 %z
}
; CHECK: = bitsplit({{.*}},{{.*}})
; Table index
declare i32 @llvm.hexagon.S2.tableidxb.goodsyntax(i32, i32, i32, i32)
define i32 @S2_tableidxb_goodsyntax(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.tableidxb.goodsyntax(i32 %a, i32 %b, i32 0, i32 0)
ret i32 %z
}
; CHECK: = tableidxb({{.*}},#0,#0)
declare i32 @llvm.hexagon.S2.tableidxh.goodsyntax(i32, i32, i32, i32)
define i32 @S2_tableidxh_goodsyntax(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.tableidxh.goodsyntax(i32 %a, i32 %b, i32 0, i32 0)
ret i32 %z
}
; CHECK: = tableidxh({{.*}},#0,#-1)
declare i32 @llvm.hexagon.S2.tableidxw.goodsyntax(i32, i32, i32, i32)
define i32 @S2_tableidxw_goodsyntax(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.tableidxw.goodsyntax(i32 %a, i32 %b, i32 0, i32 0)
ret i32 %z
}
; CHECK: = tableidxw({{.*}},#0,#-2)
declare i32 @llvm.hexagon.S2.tableidxd.goodsyntax(i32, i32, i32, i32)
define i32 @S2_tableidxd_goodsyntax(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.tableidxd.goodsyntax(i32 %a, i32 %b, i32 0, i32 0)
ret i32 %z
}
; CHECK: = tableidxd({{.*}},#0,#-3)

View File

@@ -0,0 +1,352 @@
; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
; RUN: llc -march=hexagon -O0 < %s | FileCheck -check-prefix=CHECK-CALL %s
; Hexagon Programmer's Reference Manual 11.10.3 XTYPE/COMPLEX
; CHECK-CALL-NOT: call
; Complex add/sub halfwords
declare i64 @llvm.hexagon.S4.vxaddsubh(i64, i64)
define i64 @S4_vxaddsubh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S4.vxaddsubh(i64 %a, i64 %b)
ret i64 %z
}
; CHECK: = vxaddsubh({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.S4.vxsubaddh(i64, i64)
define i64 @S4_vxsubaddh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S4.vxsubaddh(i64 %a, i64 %b)
ret i64 %z
}
; CHECK: = vxsubaddh({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.S4.vxaddsubhr(i64, i64)
define i64 @S4_vxaddsubhr(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S4.vxaddsubhr(i64 %a, i64 %b)
ret i64 %z
}
; CHECK: = vxaddsubh({{.*}},{{.*}}):rnd:>>1:sat
declare i64 @llvm.hexagon.S4.vxsubaddhr(i64, i64)
define i64 @S4_vxsubaddhr(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S4.vxsubaddhr(i64 %a, i64 %b)
ret i64 %z
}
; CHECK: = vxsubaddh({{.*}},{{.*}}):rnd:>>1:sat
; Complex add/sub words
declare i64 @llvm.hexagon.S4.vxaddsubw(i64, i64)
define i64 @S4_vxaddsubw(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S4.vxaddsubw(i64 %a, i64 %b)
ret i64 %z
}
; CHECK: = vxaddsubw({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.S4.vxsubaddw(i64, i64)
define i64 @S4_vxsubaddw(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S4.vxsubaddw(i64 %a, i64 %b)
ret i64 %z
}
; CHECK: = vxsubaddw({{.*}},{{.*}}):sat
; Complex multiply
declare i64 @llvm.hexagon.M2.cmpys.s0(i32, i32)
define i64 @M2_cmpys_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.cmpys.s0(i32 %a, i32 %b)
ret i64 %z
}
; CHECK: = cmpy({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.M2.cmpys.s1(i32, i32)
define i64 @M2_cmpys_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.cmpys.s1(i32 %a, i32 %b)
ret i64 %z
}
; CHECK: = cmpy({{.*}},{{.*}}):<<1:sat
declare i64 @llvm.hexagon.M2.cmpysc.s0(i32, i32)
define i64 @M2_cmpysc_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.cmpysc.s0(i32 %a, i32 %b)
ret i64 %z
}
; CHECK: = cmpy({{.*}},{{.*}}*):sat
declare i64 @llvm.hexagon.M2.cmpysc.s1(i32, i32)
define i64 @M2_cmpysc_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.cmpysc.s1(i32 %a, i32 %b)
ret i64 %z
}
; CHECK: = cmpy({{.*}},{{.*}}*):<<1:sat
declare i64 @llvm.hexagon.M2.cmacs.s0(i64, i32, i32)
define i64 @M2_cmacs_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.cmacs.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
; CHECK: += cmpy({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.M2.cmacs.s1(i64, i32, i32)
define i64 @M2_cmacs_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.cmacs.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
; CHECK: += cmpy({{.*}},{{.*}}):<<1:sat
declare i64 @llvm.hexagon.M2.cnacs.s0(i64, i32, i32)
define i64 @M2_cnacs_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.cnacs.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
; CHECK: -= cmpy({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.M2.cnacs.s1(i64, i32, i32)
define i64 @M2_cnacs_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.cnacs.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
; CHECK: -= cmpy({{.*}},{{.*}}):<<1:sat
declare i64 @llvm.hexagon.M2.cmacsc.s0(i64, i32, i32)
define i64 @M2_cmacsc_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.cmacsc.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
; CHECK: += cmpy({{.*}},{{.*}}*):sat
declare i64 @llvm.hexagon.M2.cmacsc.s1(i64, i32, i32)
define i64 @M2_cmacsc_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.cmacsc.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
; CHECK: += cmpy({{.*}},{{.*}}*):<<1:sat
declare i64 @llvm.hexagon.M2.cnacsc.s0(i64, i32, i32)
define i64 @M2_cnacsc_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.cnacsc.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
; CHECK: -= cmpy({{.*}},{{.*}}*):sat
declare i64 @llvm.hexagon.M2.cnacsc.s1(i64, i32, i32)
define i64 @M2_cnacsc_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.cnacsc.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
; CHECK: -= cmpy({{.*}},{{.*}}*):<<1:sat
; Complex multiply real or imaginary
declare i64 @llvm.hexagon.M2.cmpyi.s0(i32, i32)
define i64 @M2_cmpyi_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.cmpyi.s0(i32 %a, i32 %b)
ret i64 %z
}
; CHECK: = cmpyi({{.*}},{{.*}})
declare i64 @llvm.hexagon.M2.cmpyr.s0(i32, i32)
define i64 @M2_cmpyr_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.cmpyr.s0(i32 %a, i32 %b)
ret i64 %z
}
; CHECK: = cmpyr({{.*}},{{.*}})
declare i64 @llvm.hexagon.M2.cmaci.s0(i64, i32, i32)
define i64 @M2_cmaci_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.cmaci.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
; CHECK: += cmpyi({{.*}},{{.*}})
declare i64 @llvm.hexagon.M2.cmacr.s0(i64, i32, i32)
define i64 @M2_cmacr_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.cmacr.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
; CHECK: += cmpyr({{.*}},{{.*}})
; Complex multiply with round and pack
declare i32 @llvm.hexagon.M2.cmpyrs.s0(i32, i32)
define i32 @M2_cmpyrs_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.cmpyrs.s0(i32 %a, i32 %b)
ret i32 %z
}
; CHECK: = cmpy({{.*}},{{.*}}):rnd:sat
declare i32 @llvm.hexagon.M2.cmpyrs.s1(i32, i32)
define i32 @M2_cmpyrs_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.cmpyrs.s1(i32 %a, i32 %b)
ret i32 %z
}
; CHECK: = cmpy({{.*}},{{.*}}):<<1:rnd:sat
declare i32 @llvm.hexagon.M2.cmpyrsc.s0(i32, i32)
define i32 @M2_cmpyrsc_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.cmpyrsc.s0(i32 %a, i32 %b)
ret i32 %z
}
; CHECK: = cmpy({{.*}},{{.*}}*):rnd:sat
declare i32 @llvm.hexagon.M2.cmpyrsc.s1(i32, i32)
define i32 @M2_cmpyrsc_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.cmpyrsc.s1(i32 %a, i32 %b)
ret i32 %z
}
; CHECK: = cmpy({{.*}},{{.*}}*):<<1:rnd:sat
; Complex multiply 32x16
declare i32 @llvm.hexagon.M4.cmpyi.wh(i64, i32)
define i32 @M4_cmpyi_wh(i64 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M4.cmpyi.wh(i64 %a, i32 %b)
ret i32 %z
}
; CHECK: = cmpyiwh({{.*}},{{.*}}):<<1:rnd:sat
declare i32 @llvm.hexagon.M4.cmpyi.whc(i64, i32)
define i32 @M4_cmpyi_whc(i64 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M4.cmpyi.whc(i64 %a, i32 %b)
ret i32 %z
}
; CHECK: = cmpyiwh({{.*}},{{.*}}*):<<1:rnd:sat
declare i32 @llvm.hexagon.M4.cmpyr.wh(i64, i32)
define i32 @M4_cmpyr_wh(i64 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M4.cmpyr.wh(i64 %a, i32 %b)
ret i32 %z
}
; CHECK: = cmpyrwh({{.*}},{{.*}}):<<1:rnd:sat
declare i32 @llvm.hexagon.M4.cmpyr.whc(i64, i32)
define i32 @M4_cmpyr_whc(i64 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M4.cmpyr.whc(i64 %a, i32 %b)
ret i32 %z
}
; CHECK: = cmpyrwh({{.*}},{{.*}}*):<<1:rnd:sat
; Vector complex multiply real or imaginary
declare i64 @llvm.hexagon.M2.vcmpy.s0.sat.r(i64, i64)
define i64 @M2_vcmpy_s0_sat_r(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vcmpy.s0.sat.r(i64 %a, i64 %b)
ret i64 %z
}
; CHECK: = vcmpyr({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.M2.vcmpy.s1.sat.r(i64, i64)
define i64 @M2_vcmpy_s1_sat_r(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vcmpy.s1.sat.r(i64 %a, i64 %b)
ret i64 %z
}
; CHECK: = vcmpyr({{.*}},{{.*}}):<<1:sat
declare i64 @llvm.hexagon.M2.vcmpy.s0.sat.i(i64, i64)
define i64 @M2_vcmpy_s0_sat_i(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vcmpy.s0.sat.i(i64 %a, i64 %b)
ret i64 %z
}
; CHECK: = vcmpyi({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.M2.vcmpy.s1.sat.i(i64, i64)
define i64 @M2_vcmpy_s1_sat_i(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vcmpy.s1.sat.i(i64 %a, i64 %b)
ret i64 %z
}
; CHECK: = vcmpyi({{.*}},{{.*}}):<<1:sat
declare i64 @llvm.hexagon.M2.vcmac.s0.sat.r(i64, i64, i64)
define i64 @M2_vcmac_s0_sat_r(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M2.vcmac.s0.sat.r(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
; CHECK: += vcmpyr({{.*}},r5:4):sat
declare i64 @llvm.hexagon.M2.vcmac.s0.sat.i(i64, i64, i64)
define i64 @M2_vcmac_s0_sat_i(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M2.vcmac.s0.sat.i(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
; CHECK: += vcmpyi({{.*}},r5:4):sat
; Vector complex conjugate
declare i64 @llvm.hexagon.A2.vconj(i64)
define i64 @A2_vconj(i64 %a) {
%z = call i64 @llvm.hexagon.A2.vconj(i64 %a)
ret i64 %z
}
; CHECK: = vconj({{.*}}):sat
; Vector complex rotate
declare i64 @llvm.hexagon.S2.vcrotate(i64, i32)
define i64 @S2_vcrotate(i64 %a, i32 %b) {
%z = call i64 @llvm.hexagon.S2.vcrotate(i64 %a, i32 %b)
ret i64 %z
}
; CHECK: = vcrotate({{.*}},{{.*}})
; Vector reduce complex multiply real or imaginary
declare i64 @llvm.hexagon.M2.vrcmpyi.s0(i64, i64)
define i64 @M2_vrcmpyi_s0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vrcmpyi.s0(i64 %a, i64 %b)
ret i64 %z
}
; CHECK: = vrcmpyi({{.*}},{{.*}})
declare i64 @llvm.hexagon.M2.vrcmpyr.s0(i64, i64)
define i64 @M2_vrcmpyr_s0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vrcmpyr.s0(i64 %a, i64 %b)
ret i64 %z
}
; CHECK: = vrcmpyr({{.*}},{{.*}})
declare i64 @llvm.hexagon.M2.vrcmpyi.s0c(i64, i64)
define i64 @M2_vrcmpyi_s0c(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vrcmpyi.s0c(i64 %a, i64 %b)
ret i64 %z
}
; CHECK: = vrcmpyi({{.*}},{{.*}}*)
declare i64 @llvm.hexagon.M2.vrcmpyr.s0c(i64, i64)
define i64 @M2_vrcmpyr_s0c(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vrcmpyr.s0c(i64 %a, i64 %b)
ret i64 %z
}
; CHECK: = vrcmpyr({{.*}},{{.*}}*)
declare i64 @llvm.hexagon.M2.vrcmaci.s0(i64, i64, i64)
define i64 @M2_vrcmaci_s0(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M2.vrcmaci.s0(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
; CHECK: += vrcmpyi({{.*}},r5:4)
declare i64 @llvm.hexagon.M2.vrcmacr.s0(i64, i64, i64)
define i64 @M2_vrcmacr_s0(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M2.vrcmacr.s0(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
; CHECK: += vrcmpyr({{.*}},r5:4)
declare i64 @llvm.hexagon.M2.vrcmaci.s0c(i64, i64, i64)
define i64 @M2_vrcmaci_s0c(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M2.vrcmaci.s0c(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
; CHECK: += vrcmpyi({{.*}},r5:4*)
declare i64 @llvm.hexagon.M2.vrcmacr.s0c(i64, i64, i64)
define i64 @M2_vrcmacr_s0c(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M2.vrcmacr.s0c(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
; CHECK: += vrcmpyr({{.*}},r5:4*)
; Vector reduce complex rotate
declare i64 @llvm.hexagon.S4.vrcrotate(i64, i32, i32)
define i64 @S4_vrcrotate(i64 %a, i32 %b) {
%z = call i64 @llvm.hexagon.S4.vrcrotate(i64 %a, i32 %b, i32 0)
ret i64 %z
}
; CHECK: = vrcrotate({{.*}},{{.*}},#0)
declare i64 @llvm.hexagon.S4.vrcrotate.acc(i64, i64, i32, i32)
define i64 @S4_vrcrotate_acc(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S4.vrcrotate.acc(i64 %a, i64 %b, i32 %c, i32 0)
ret i64 %z
}
; CHECK: += vrcrotate({{.*}},{{.*}},#0)

View File

@@ -0,0 +1,392 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 -O0 < %s | FileCheck %s
; RUN: llc -march=hexagon -mcpu=hexagonv5 -O0 < %s | \
; RUN: FileCheck -check-prefix=CHECK-CALL %s
; Hexagon Programmer's Reference Manual 11.10.4 XTYPE/FP
; CHECK-CALL-NOT: call
; Floating point addition
declare float @llvm.hexagon.F2.sfadd(float, float)
define float @F2_sfadd(float %a, float %b) {
%z = call float @llvm.hexagon.F2.sfadd(float %a, float %b)
ret float %z
}
; CHECK: = sfadd({{.*}},{{.*}})
; Classify floating-point value
declare i32 @llvm.hexagon.F2.sfclass(float, i32)
define i32 @F2_sfclass(float %a) {
%z = call i32 @llvm.hexagon.F2.sfclass(float %a, i32 0)
ret i32 %z
}
; CHECK: = sfclass({{.*}},#0)
declare i32 @llvm.hexagon.F2.dfclass(double, i32)
define i32 @F2_dfclass(double %a) {
%z = call i32 @llvm.hexagon.F2.dfclass(double %a, i32 0)
ret i32 %z
}
; CHECK: = dfclass({{.*}},#0)
; Compare floating-point value
declare i32 @llvm.hexagon.F2.sfcmpge(float, float)
define i32 @F2_sfcmpge(float %a, float %b) {
%z = call i32 @llvm.hexagon.F2.sfcmpge(float %a, float %b)
ret i32 %z
}
; CHECK: = sfcmp.ge({{.*}},{{.*}})
declare i32 @llvm.hexagon.F2.sfcmpuo(float, float)
define i32 @F2_sfcmpuo(float %a, float %b) {
%z = call i32 @llvm.hexagon.F2.sfcmpuo(float %a, float %b)
ret i32 %z
}
; CHECK: = sfcmp.uo({{.*}},{{.*}})
declare i32 @llvm.hexagon.F2.sfcmpeq(float, float)
define i32 @F2_sfcmpeq(float %a, float %b) {
%z = call i32 @llvm.hexagon.F2.sfcmpeq(float %a, float %b)
ret i32 %z
}
; CHECK: = sfcmp.eq({{.*}},{{.*}})
declare i32 @llvm.hexagon.F2.sfcmpgt(float, float)
define i32 @F2_sfcmpgt(float %a, float %b) {
%z = call i32 @llvm.hexagon.F2.sfcmpgt(float %a, float %b)
ret i32 %z
}
; CHECK: = sfcmp.gt({{.*}},{{.*}})
declare i32 @llvm.hexagon.F2.dfcmpge(double, double)
define i32 @F2_dfcmpge(double %a, double %b) {
%z = call i32 @llvm.hexagon.F2.dfcmpge(double %a, double %b)
ret i32 %z
}
; CHECK: = dfcmp.ge({{.*}},{{.*}})
declare i32 @llvm.hexagon.F2.dfcmpuo(double, double)
define i32 @F2_dfcmpuo(double %a, double %b) {
%z = call i32 @llvm.hexagon.F2.dfcmpuo(double %a, double %b)
ret i32 %z
}
; CHECK: = dfcmp.uo({{.*}},{{.*}})
declare i32 @llvm.hexagon.F2.dfcmpeq(double, double)
define i32 @F2_dfcmpeq(double %a, double %b) {
%z = call i32 @llvm.hexagon.F2.dfcmpeq(double %a, double %b)
ret i32 %z
}
; CHECK: = dfcmp.eq({{.*}},{{.*}})
declare i32 @llvm.hexagon.F2.dfcmpgt(double, double)
define i32 @F2_dfcmpgt(double %a, double %b) {
%z = call i32 @llvm.hexagon.F2.dfcmpgt(double %a, double %b)
ret i32 %z
}
; CHECK: = dfcmp.gt({{.*}},{{.*}})
; Convert floating-point value to other format
declare double @llvm.hexagon.F2.conv.sf2df(float)
define double @F2_conv_sf2df(float %a) {
%z = call double @llvm.hexagon.F2.conv.sf2df(float %a)
ret double %z
}
; CHECK: = convert_sf2df({{.*}})
declare float @llvm.hexagon.F2.conv.df2sf(double)
define float @F2_conv_df2sf(double %a) {
%z = call float @llvm.hexagon.F2.conv.df2sf(double %a)
ret float %z
}
; CHECK: = convert_df2sf({{.*}})
; Convert integer to floating-point value
declare double @llvm.hexagon.F2.conv.ud2df(i64)
define double @F2_conv_ud2df(i64 %a) {
%z = call double @llvm.hexagon.F2.conv.ud2df(i64 %a)
ret double %z
}
; CHECK: = convert_ud2df({{.*}})
declare double @llvm.hexagon.F2.conv.d2df(i64)
define double @F2_conv_d2df(i64 %a) {
%z = call double @llvm.hexagon.F2.conv.d2df(i64 %a)
ret double %z
}
; CHECK: = convert_d2df({{.*}})
declare double @llvm.hexagon.F2.conv.uw2df(i32)
define double @F2_conv_uw2df(i32 %a) {
%z = call double @llvm.hexagon.F2.conv.uw2df(i32 %a)
ret double %z
}
; CHECK: = convert_uw2df({{.*}})
declare double @llvm.hexagon.F2.conv.w2df(i32)
define double @F2_conv_w2df(i32 %a) {
%z = call double @llvm.hexagon.F2.conv.w2df(i32 %a)
ret double %z
}
; CHECK: = convert_w2df({{.*}})
declare float @llvm.hexagon.F2.conv.ud2sf(i64)
define float @F2_conv_ud2sf(i64 %a) {
%z = call float @llvm.hexagon.F2.conv.ud2sf(i64 %a)
ret float %z
}
; CHECK: = convert_ud2sf({{.*}})
declare float @llvm.hexagon.F2.conv.d2sf(i64)
define float @F2_conv_d2sf(i64 %a) {
%z = call float @llvm.hexagon.F2.conv.d2sf(i64 %a)
ret float %z
}
; CHECK: = convert_d2sf({{.*}})
declare float @llvm.hexagon.F2.conv.uw2sf(i32)
define float @F2_conv_uw2sf(i32 %a) {
%z = call float @llvm.hexagon.F2.conv.uw2sf(i32 %a)
ret float %z
}
; CHECK: = convert_uw2sf({{.*}})
declare float @llvm.hexagon.F2.conv.w2sf(i32)
define float @F2_conv_w2sf(i32 %a) {
%z = call float @llvm.hexagon.F2.conv.w2sf(i32 %a)
ret float %z
}
; CHECK: = convert_w2sf({{.*}})
; Convert floating-point value to integer
declare i64 @llvm.hexagon.F2.conv.df2d(double)
define i64 @F2_conv_df2d(double %a) {
%z = call i64 @llvm.hexagon.F2.conv.df2d(double %a)
ret i64 %z
}
; CHECK: = convert_df2d({{.*}})
declare i64 @llvm.hexagon.F2.conv.df2ud(double)
define i64 @F2_conv_df2ud(double %a) {
%z = call i64 @llvm.hexagon.F2.conv.df2ud(double %a)
ret i64 %z
}
; CHECK: {{.*}} = convert_df2ud({{.*}})
declare i64 @llvm.hexagon.F2.conv.df2d.chop(double)
define i64 @F2_conv_df2d_chop(double %a) {
%z = call i64 @llvm.hexagon.F2.conv.df2d.chop(double %a)
ret i64 %z
}
; CHECK: = convert_df2d({{.*}}):chop
declare i64 @llvm.hexagon.F2.conv.df2ud.chop(double)
define i64 @F2_conv_df2ud_chop(double %a) {
%z = call i64 @llvm.hexagon.F2.conv.df2ud.chop(double %a)
ret i64 %z
}
; CHECK: = convert_df2ud({{.*}}):chop
declare i64 @llvm.hexagon.F2.conv.sf2ud(float)
define i64 @F2_conv_sf2ud(float %a) {
%z = call i64 @llvm.hexagon.F2.conv.sf2ud(float %a)
ret i64 %z
}
; CHECK: = convert_sf2ud({{.*}})
declare i64 @llvm.hexagon.F2.conv.sf2d(float)
define i64 @F2_conv_sf2d(float %a) {
%z = call i64 @llvm.hexagon.F2.conv.sf2d(float %a)
ret i64 %z
}
; CHECK: = convert_sf2d({{.*}})
declare i64 @llvm.hexagon.F2.conv.sf2d.chop(float)
define i64 @F2_conv_sf2d_chop(float %a) {
%z = call i64 @llvm.hexagon.F2.conv.sf2d.chop(float %a)
ret i64 %z
}
; CHECK: = convert_sf2d({{.*}}):chop
declare i64 @llvm.hexagon.F2.conv.sf2ud.chop(float)
define i64 @F2_conv_sf2ud_chop(float %a) {
%z = call i64 @llvm.hexagon.F2.conv.sf2ud.chop(float %a)
ret i64 %z
}
; CHECK: = convert_sf2ud({{.*}}):chop
declare i32 @llvm.hexagon.F2.conv.df2uw(double)
define i32 @F2_conv_df2uw(double %a) {
%z = call i32 @llvm.hexagon.F2.conv.df2uw(double %a)
ret i32 %z
}
; CHECK: = convert_df2uw({{.*}})
declare i32 @llvm.hexagon.F2.conv.df2w(double)
define i32 @F2_conv_df2w(double %a) {
%z = call i32 @llvm.hexagon.F2.conv.df2w(double %a)
ret i32 %z
}
; CHECK: = convert_df2w({{.*}})
declare i32 @llvm.hexagon.F2.conv.df2w.chop(double)
define i32 @F2_conv_df2w_chop(double %a) {
%z = call i32 @llvm.hexagon.F2.conv.df2w.chop(double %a)
ret i32 %z
}
; CHECK: = convert_df2w({{.*}}):chop
declare i32 @llvm.hexagon.F2.conv.df2uw.chop(double)
define i32 @F2_conv_df2uw_chop(double %a) {
%z = call i32 @llvm.hexagon.F2.conv.df2uw.chop(double %a)
ret i32 %z
}
; CHECK: = convert_df2uw({{.*}}):chop
declare i32 @llvm.hexagon.F2.conv.sf2uw(float)
define i32 @F2_conv_sf2uw(float %a) {
%z = call i32 @llvm.hexagon.F2.conv.sf2uw(float %a)
ret i32 %z
}
; CHECK: = convert_sf2uw({{.*}})
declare i32 @llvm.hexagon.F2.conv.sf2uw.chop(float)
define i32 @F2_conv_sf2uw_chop(float %a) {
%z = call i32 @llvm.hexagon.F2.conv.sf2uw.chop(float %a)
ret i32 %z
}
; CHECK: = convert_sf2uw({{.*}}):chop
declare i32 @llvm.hexagon.F2.conv.sf2w(float)
define i32 @F2_conv_sf2w(float %a) {
%z = call i32 @llvm.hexagon.F2.conv.sf2w(float %a)
ret i32 %z
}
; CHECK: = convert_sf2w({{.*}})
declare i32 @llvm.hexagon.F2.conv.sf2w.chop(float)
define i32 @F2_conv_sf2w_chop(float %a) {
%z = call i32 @llvm.hexagon.F2.conv.sf2w.chop(float %a)
ret i32 %z
}
; CHECK: = convert_sf2w({{.*}}):chop
; Floating point extreme value assistance
declare float @llvm.hexagon.F2.sffixupr(float)
define float @F2_sffixupr(float %a) {
%z = call float @llvm.hexagon.F2.sffixupr(float %a)
ret float %z
}
; CHECK: = sffixupr({{.*}})
declare float @llvm.hexagon.F2.sffixupn(float, float)
define float @F2_sffixupn(float %a, float %b) {
%z = call float @llvm.hexagon.F2.sffixupn(float %a, float %b)
ret float %z
}
; CHECK: = sffixupn({{.*}},{{.*}})
declare float @llvm.hexagon.F2.sffixupd(float, float)
define float @F2_sffixupd(float %a, float %b) {
%z = call float @llvm.hexagon.F2.sffixupd(float %a, float %b)
ret float %z
}
; CHECK: = sffixupd({{.*}},{{.*}})
; Floating point fused multiply-add
declare float @llvm.hexagon.F2.sffma(float, float, float)
define float @F2_sffma(float %a, float %b, float %c) {
%z = call float @llvm.hexagon.F2.sffma(float %a, float %b, float %c)
ret float %z
}
; CHECK: += sfmpy({{.*}},{{.*}})
declare float @llvm.hexagon.F2.sffms(float, float, float)
define float @F2_sffms(float %a, float %b, float %c) {
%z = call float @llvm.hexagon.F2.sffms(float %a, float %b, float %c)
ret float %z
}
; CHECK: -= sfmpy({{.*}},{{.*}})
; Floating point fused multiply-add with scaling
declare float @llvm.hexagon.F2.sffma.sc(float, float, float, i32)
define float @F2_sffma_sc(float %a, float %b, float %c, i32 %d) {
%z = call float @llvm.hexagon.F2.sffma.sc(float %a, float %b, float %c, i32 %d)
ret float %z
}
; CHECK: += sfmpy({{.*}},{{.*}},{{.*}}):scale
; Floating point fused multiply-add for library routines
declare float @llvm.hexagon.F2.sffma.lib(float, float, float)
define float @F2_sffma_lib(float %a, float %b, float %c) {
%z = call float @llvm.hexagon.F2.sffma.lib(float %a, float %b, float %c)
ret float %z
}
; CHECK: += sfmpy({{.*}},{{.*}}):lib
declare float @llvm.hexagon.F2.sffms.lib(float, float, float)
define float @F2_sffms_lib(float %a, float %b, float %c) {
%z = call float @llvm.hexagon.F2.sffms.lib(float %a, float %b, float %c)
ret float %z
}
; CHECK: -= sfmpy({{.*}},{{.*}}):lib
; Create floating-point constant
declare float @llvm.hexagon.F2.sfimm.p(i32)
define float @F2_sfimm_p() {
%z = call float @llvm.hexagon.F2.sfimm.p(i32 0)
ret float %z
}
; CHECK: = sfmake(#0):pos
declare float @llvm.hexagon.F2.sfimm.n(i32)
define float @F2_sfimm_n() {
%z = call float @llvm.hexagon.F2.sfimm.n(i32 0)
ret float %z
}
; CHECK: = sfmake(#0):neg
declare double @llvm.hexagon.F2.dfimm.p(i32)
define double @F2_dfimm_p() {
%z = call double @llvm.hexagon.F2.dfimm.p(i32 0)
ret double %z
}
; CHECK: = dfmake(#0):pos
declare double @llvm.hexagon.F2.dfimm.n(i32)
define double @F2_dfimm_n() {
%z = call double @llvm.hexagon.F2.dfimm.n(i32 0)
ret double %z
}
; CHECK: = dfmake(#0):neg
; Floating point maximum
declare float @llvm.hexagon.F2.sfmax(float, float)
define float @F2_sfmax(float %a, float %b) {
%z = call float @llvm.hexagon.F2.sfmax(float %a, float %b)
ret float %z
}
; CHECK: = sfmax({{.*}},{{.*}})
; Floating point minimum
declare float @llvm.hexagon.F2.sfmin(float, float)
define float @F2_sfmin(float %a, float %b) {
%z = call float @llvm.hexagon.F2.sfmin(float %a, float %b)
ret float %z
}
; CHECK: = sfmin({{.*}},{{.*}})
; Floating point multiply
declare float @llvm.hexagon.F2.sfmpy(float, float)
define float @F2_sfmpy(float %a, float %b) {
%z = call float @llvm.hexagon.F2.sfmpy(float %a, float %b)
ret float %z
}
; CHECK: = sfmpy({{.*}},{{.*}})
; Floating point subtraction
declare float @llvm.hexagon.F2.sfsub(float, float)
define float @F2_sfsub(float %a, float %b) {
%z = call float @llvm.hexagon.F2.sfsub(float %a, float %b)
ret float %z
}
; CHECK: = sfsub({{.*}},{{.*}})

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,255 @@
; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
; RUN: llc -march=hexagon -O0 < %s | FileCheck -check-prefix=CHECK-CALL %s
; Hexagon Programmer's Reference Manual 11.10.6 XTYPE/PERM
; CHECK-CALL-NOT: call
; Saturate
declare i32 @llvm.hexagon.A2.sat(i64)
define i32 @A2_sat(i64 %a) {
%z = call i32 @llvm.hexagon.A2.sat(i64 %a)
ret i32 %z
}
; CHECK: = sat({{.*}})
declare i32 @llvm.hexagon.A2.sath(i32)
define i32 @A2_sath(i32 %a) {
%z = call i32 @llvm.hexagon.A2.sath(i32 %a)
ret i32 %z
}
; CHECK: = sath({{.*}})
declare i32 @llvm.hexagon.A2.satuh(i32)
define i32 @A2_satuh(i32 %a) {
%z = call i32 @llvm.hexagon.A2.satuh(i32 %a)
ret i32 %z
}
; CHECK: = satuh({{.*}})
declare i32 @llvm.hexagon.A2.satub(i32)
define i32 @A2_satub(i32 %a) {
%z = call i32 @llvm.hexagon.A2.satub(i32 %a)
ret i32 %z
}
; CHECK: = satub({{.*}})
declare i32 @llvm.hexagon.A2.satb(i32)
define i32 @A2_satb(i32 %a) {
%z = call i32 @llvm.hexagon.A2.satb(i32 %a)
ret i32 %z
}
; CHECK: = satb({{.*}})
; Swizzle bytes
declare i32 @llvm.hexagon.A2.swiz(i32)
define i32 @A2_swiz(i32 %a) {
%z = call i32 @llvm.hexagon.A2.swiz(i32 %a)
ret i32 %z
}
; CHECK: = swiz({{.*}})
; Vector round and pack
declare i32 @llvm.hexagon.S2.vrndpackwh(i64)
define i32 @S2_vrndpackwh(i64 %a) {
%z = call i32 @llvm.hexagon.S2.vrndpackwh(i64 %a)
ret i32 %z
}
; CHECK: = vrndwh({{.*}})
declare i32 @llvm.hexagon.S2.vrndpackwhs(i64)
define i32 @S2_vrndpackwhs(i64 %a) {
%z = call i32 @llvm.hexagon.S2.vrndpackwhs(i64 %a)
ret i32 %z
}
; CHECK: = vrndwh({{.*}}):sat
; Vector saturate and pack
declare i32 @llvm.hexagon.S2.vsathub(i64)
define i32 @S2_vsathub(i64 %a) {
%z = call i32 @llvm.hexagon.S2.vsathub(i64 %a)
ret i32 %z
}
; CHECK: = vsathub({{.*}})
declare i32 @llvm.hexagon.S2.vsatwh(i64)
define i32 @S2_vsatwh(i64 %a) {
%z = call i32 @llvm.hexagon.S2.vsatwh(i64 %a)
ret i32 %z
}
; CHECK: = vsatwh({{.*}})
declare i32 @llvm.hexagon.S2.vsatwuh(i64)
define i32 @S2_vsatwuh(i64 %a) {
%z = call i32 @llvm.hexagon.S2.vsatwuh(i64 %a)
ret i32 %z
}
; CHECK: = vsatwuh({{.*}})
declare i32 @llvm.hexagon.S2.vsathb(i64)
define i32 @S2_vsathb(i64 %a) {
%z = call i32 @llvm.hexagon.S2.vsathb(i64 %a)
ret i32 %z
}
; CHECK: = vsathb({{.*}})
declare i32 @llvm.hexagon.S2.svsathb(i32)
define i32 @S2_svsathb(i32 %a) {
%z = call i32 @llvm.hexagon.S2.svsathb(i32 %a)
ret i32 %z
}
; CHECK: = vsathb({{.*}})
declare i32 @llvm.hexagon.S2.svsathub(i32)
define i32 @S2_svsathub(i32 %a) {
%z = call i32 @llvm.hexagon.S2.svsathub(i32 %a)
ret i32 %z
}
; CHECK: = vsathub({{.*}})
; Vector saturate without pack
declare i64 @llvm.hexagon.S2.vsathub.nopack(i64)
define i64 @S2_vsathub_nopack(i64 %a) {
%z = call i64 @llvm.hexagon.S2.vsathub.nopack(i64 %a)
ret i64 %z
}
; CHECK: = vsathub({{.*}})
declare i64 @llvm.hexagon.S2.vsatwuh.nopack(i64)
define i64 @S2_vsatwuh_nopack(i64 %a) {
%z = call i64 @llvm.hexagon.S2.vsatwuh.nopack(i64 %a)
ret i64 %z
}
; CHECK: = vsatwuh({{.*}})
declare i64 @llvm.hexagon.S2.vsatwh.nopack(i64)
define i64 @S2_vsatwh_nopack(i64 %a) {
%z = call i64 @llvm.hexagon.S2.vsatwh.nopack(i64 %a)
ret i64 %z
}
; CHECK: = vsatwh({{.*}})
declare i64 @llvm.hexagon.S2.vsathb.nopack(i64)
define i64 @S2_vsathb_nopack(i64 %a) {
%z = call i64 @llvm.hexagon.S2.vsathb.nopack(i64 %a)
ret i64 %z
}
; CHECK: = vsathb({{.*}})
; Vector shuffle
declare i64 @llvm.hexagon.S2.shuffeb(i64, i64)
define i64 @S2_shuffeb(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.shuffeb(i64 %a, i64 %b)
ret i64 %z
}
; CHECK: = shuffeb({{.*}},{{.*}})
declare i64 @llvm.hexagon.S2.shuffob(i64, i64)
define i64 @S2_shuffob(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.shuffob(i64 %a, i64 %b)
ret i64 %z
}
; CHECK: = shuffob({{.*}},{{.*}})
declare i64 @llvm.hexagon.S2.shuffeh(i64, i64)
define i64 @S2_shuffeh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.shuffeh(i64 %a, i64 %b)
ret i64 %z
}
; CHECK: = shuffeh({{.*}},{{.*}})
declare i64 @llvm.hexagon.S2.shuffoh(i64, i64)
define i64 @S2_shuffoh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.shuffoh(i64 %a, i64 %b)
ret i64 %z
}
; CHECK: = shuffoh({{.*}},{{.*}})
; Vector splat bytes
declare i32 @llvm.hexagon.S2.vsplatrb(i32)
define i32 @S2_vsplatrb(i32 %a) {
%z = call i32 @llvm.hexagon.S2.vsplatrb(i32 %a)
ret i32 %z
}
; CHECK: = vsplatb({{.*}})
; Vector splat halfwords
declare i64 @llvm.hexagon.S2.vsplatrh(i32)
define i64 @S2_vsplatrh(i32 %a) {
%z = call i64 @llvm.hexagon.S2.vsplatrh(i32 %a)
ret i64 %z
}
; CHECK: = vsplath({{.*}})
; Vector splice
declare i64 @llvm.hexagon.S2.vspliceib(i64, i64, i32)
define i64 @S2_vspliceib(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.vspliceib(i64 %a, i64 %b, i32 0)
ret i64 %z
}
; CHECK: = vspliceb({{.*}},{{.*}},#0)
declare i64 @llvm.hexagon.S2.vsplicerb(i64, i64, i32)
define i64 @S2_vsplicerb(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.vsplicerb(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
; CHECK: = vspliceb({{.*}},{{.*}},{{.*}})
; Vector sign extend
declare i64 @llvm.hexagon.S2.vsxtbh(i32)
define i64 @S2_vsxtbh(i32 %a) {
%z = call i64 @llvm.hexagon.S2.vsxtbh(i32 %a)
ret i64 %z
}
; CHECK: = vsxtbh({{.*}})
declare i64 @llvm.hexagon.S2.vsxthw(i32)
define i64 @S2_vsxthw(i32 %a) {
%z = call i64 @llvm.hexagon.S2.vsxthw(i32 %a)
ret i64 %z
}
; CHECK: = vsxthw({{.*}})
; Vector truncate
declare i32 @llvm.hexagon.S2.vtrunohb(i64)
define i32 @S2_vtrunohb(i64 %a) {
%z = call i32 @llvm.hexagon.S2.vtrunohb(i64 %a)
ret i32 %z
}
; CHECK: = vtrunohb({{.*}})
declare i32 @llvm.hexagon.S2.vtrunehb(i64)
define i32 @S2_vtrunehb(i64 %a) {
%z = call i32 @llvm.hexagon.S2.vtrunehb(i64 %a)
ret i32 %z
}
; CHECK: = vtrunehb({{.*}})
declare i64 @llvm.hexagon.S2.vtrunowh(i64, i64)
define i64 @S2_vtrunowh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.vtrunowh(i64 %a, i64 %b)
ret i64 %z
}
; CHECK: = vtrunowh({{.*}},{{.*}})
declare i64 @llvm.hexagon.S2.vtrunewh(i64, i64)
define i64 @S2_vtrunewh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.vtrunewh(i64 %a, i64 %b)
ret i64 %z
}
; CHECK: = vtrunewh({{.*}},{{.*}})
; Vector zero extend
declare i64 @llvm.hexagon.S2.vzxtbh(i32)
define i64 @S2_vzxtbh(i32 %a) {
%z = call i64 @llvm.hexagon.S2.vzxtbh(i32 %a)
ret i64 %z
}
; CHECK: = vzxtbh({{.*}})
declare i64 @llvm.hexagon.S2.vzxthw(i32)
define i64 @S2_vzxthw(i32 %a) {
%z = call i64 @llvm.hexagon.S2.vzxthw(i32 %a)
ret i64 %z
}
; CHECK: = vzxthw({{.*}})

Some files were not shown because too many files have changed in this diff Show More