Skip to content

Commit 49683ee

Browse files
authored
[RISCV] Add missing intrinsicis vrgatherei16/vslideup/vslidedown support for Zvfbfmin (#146309)
LLVM IR intrinsicis for vrgatherei16/vslideup/vslidedown have been supported in #146312.
1 parent dd96465 commit 49683ee

File tree

14 files changed

+2481
-3
lines changed

14 files changed

+2481
-3
lines changed

clang/include/clang/Basic/riscv_vector.td

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2313,7 +2313,7 @@ defm vrgather : RVVOutBuiltinSet<"vrgather_vv", "csilxfdy",
23132313
[["vv", "v", "vvUv"]]>;
23142314
defm vrgather : RVVOutBuiltinSet<"vrgather_vx", "csilxfdy",
23152315
[["vx", "v", "vvz"]]>;
2316-
defm vrgatherei16 : RVVOutBuiltinSet<"vrgatherei16_vv", "csilxfd",
2316+
defm vrgatherei16 : RVVOutBuiltinSet<"vrgatherei16_vv", "csilxfdy",
23172317
[["vv", "v", "vv(Log2EEW:4)Uv"]]>;
23182318
// unsigned type
23192319
defm vrgather : RVVOutBuiltinSet<"vrgather_vv", "csil",

clang/include/clang/Basic/riscv_vector_common.td

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -591,7 +591,7 @@ class RVVMaskOp0Builtin<string prototype> : RVVOp0Builtin<"m", prototype, "c"> {
591591
let UnMaskedPolicyScheme = HasPolicyOperand,
592592
HasMaskedOffOperand = false in {
593593
multiclass RVVSlideUpBuiltinSet {
594-
defm "" : RVVOutBuiltinSet<NAME, "csilxfd",
594+
defm "" : RVVOutBuiltinSet<NAME, "csilxfdy",
595595
[["vx","v", "vvvz"]]>;
596596
defm "" : RVVOutBuiltinSet<NAME, "csil",
597597
[["vx","Uv", "UvUvUvz"]]>;
@@ -613,7 +613,7 @@ let UnMaskedPolicyScheme = HasPassthruOperand,
613613
IntrinsicTypes = {ResultType, Ops.back()->getType()};
614614
}] in {
615615
multiclass RVVSlideDownBuiltinSet {
616-
defm "" : RVVOutBuiltinSet<NAME, "csilxfd",
616+
defm "" : RVVOutBuiltinSet<NAME, "csilxfdy",
617617
[["vx","v", "vvz"]]>;
618618
defm "" : RVVOutBuiltinSet<NAME, "csil",
619619
[["vx","Uv", "UvUvz"]]>;
Lines changed: 140 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,140 @@
1+
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
2+
// REQUIRES: riscv-registered-target
3+
// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
4+
// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
5+
// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
6+
// RUN: FileCheck --check-prefix=CHECK-RV64 %s
7+
8+
#include <riscv_vector.h>
9+
10+
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vrgatherei16_vv_bf16mf4(
11+
// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
12+
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
13+
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vrgatherei16.vv.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
14+
// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
15+
//
16+
vbfloat16mf4_t test_vrgatherei16_vv_bf16mf4(vbfloat16mf4_t vs2,
17+
vuint16mf4_t vs1, size_t vl) {
18+
return __riscv_vrgatherei16_vv_bf16mf4(vs2, vs1, vl);
19+
}
20+
21+
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vrgatherei16_vv_bf16mf2(
22+
// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
23+
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
24+
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vrgatherei16.vv.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
25+
// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
26+
//
27+
vbfloat16mf2_t test_vrgatherei16_vv_bf16mf2(vbfloat16mf2_t vs2,
28+
vuint16mf2_t vs1, size_t vl) {
29+
return __riscv_vrgatherei16_vv_bf16mf2(vs2, vs1, vl);
30+
}
31+
32+
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vrgatherei16_vv_bf16m1(
33+
// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
34+
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
35+
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vrgatherei16.vv.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
36+
// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
37+
//
38+
vbfloat16m1_t test_vrgatherei16_vv_bf16m1(vbfloat16m1_t vs2, vuint16m1_t vs1,
39+
size_t vl) {
40+
return __riscv_vrgatherei16_vv_bf16m1(vs2, vs1, vl);
41+
}
42+
43+
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vrgatherei16_vv_bf16m2(
44+
// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
45+
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
46+
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vrgatherei16.vv.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
47+
// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
48+
//
49+
vbfloat16m2_t test_vrgatherei16_vv_bf16m2(vbfloat16m2_t vs2, vuint16m2_t vs1,
50+
size_t vl) {
51+
return __riscv_vrgatherei16_vv_bf16m2(vs2, vs1, vl);
52+
}
53+
54+
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vrgatherei16_vv_bf16m4(
55+
// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
56+
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
57+
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vrgatherei16.vv.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
58+
// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
59+
//
60+
vbfloat16m4_t test_vrgatherei16_vv_bf16m4(vbfloat16m4_t vs2, vuint16m4_t vs1,
61+
size_t vl) {
62+
return __riscv_vrgatherei16_vv_bf16m4(vs2, vs1, vl);
63+
}
64+
65+
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vrgatherei16_vv_bf16m8(
66+
// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
67+
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
68+
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vrgatherei16.vv.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
69+
// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
70+
//
71+
vbfloat16m8_t test_vrgatherei16_vv_bf16m8(vbfloat16m8_t vs2, vuint16m8_t vs1,
72+
size_t vl) {
73+
return __riscv_vrgatherei16_vv_bf16m8(vs2, vs1, vl);
74+
}
75+
76+
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vrgatherei16_vv_bf16mf4_m(
77+
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
78+
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
79+
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vrgatherei16.vv.mask.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
80+
// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
81+
//
82+
vbfloat16mf4_t test_vrgatherei16_vv_bf16mf4_m(vbool64_t vm, vbfloat16mf4_t vs2,
83+
vuint16mf4_t vs1, size_t vl) {
84+
return __riscv_vrgatherei16_vv_bf16mf4_m(vm, vs2, vs1, vl);
85+
}
86+
87+
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vrgatherei16_vv_bf16mf2_m(
88+
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
89+
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
90+
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vrgatherei16.vv.mask.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
91+
// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
92+
//
93+
vbfloat16mf2_t test_vrgatherei16_vv_bf16mf2_m(vbool32_t vm, vbfloat16mf2_t vs2,
94+
vuint16mf2_t vs1, size_t vl) {
95+
return __riscv_vrgatherei16_vv_bf16mf2_m(vm, vs2, vs1, vl);
96+
}
97+
98+
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vrgatherei16_vv_bf16m1_m(
99+
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
100+
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
101+
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vrgatherei16.vv.mask.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
102+
// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
103+
//
104+
vbfloat16m1_t test_vrgatherei16_vv_bf16m1_m(vbool16_t vm, vbfloat16m1_t vs2,
105+
vuint16m1_t vs1, size_t vl) {
106+
return __riscv_vrgatherei16_vv_bf16m1_m(vm, vs2, vs1, vl);
107+
}
108+
109+
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vrgatherei16_vv_bf16m2_m(
110+
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
111+
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
112+
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vrgatherei16.vv.mask.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
113+
// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
114+
//
115+
vbfloat16m2_t test_vrgatherei16_vv_bf16m2_m(vbool8_t vm, vbfloat16m2_t vs2,
116+
vuint16m2_t vs1, size_t vl) {
117+
return __riscv_vrgatherei16_vv_bf16m2_m(vm, vs2, vs1, vl);
118+
}
119+
120+
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vrgatherei16_vv_bf16m4_m(
121+
// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
122+
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
123+
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vrgatherei16.vv.mask.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
124+
// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
125+
//
126+
vbfloat16m4_t test_vrgatherei16_vv_bf16m4_m(vbool4_t vm, vbfloat16m4_t vs2,
127+
vuint16m4_t vs1, size_t vl) {
128+
return __riscv_vrgatherei16_vv_bf16m4_m(vm, vs2, vs1, vl);
129+
}
130+
131+
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vrgatherei16_vv_bf16m8_m(
132+
// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
133+
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
134+
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vrgatherei16.vv.mask.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
135+
// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
136+
//
137+
vbfloat16m8_t test_vrgatherei16_vv_bf16m8_m(vbool2_t vm, vbfloat16m8_t vs2,
138+
vuint16m8_t vs1, size_t vl) {
139+
return __riscv_vrgatherei16_vv_bf16m8_m(vm, vs2, vs1, vl);
140+
}
Lines changed: 140 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,140 @@
1+
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
2+
// REQUIRES: riscv-registered-target
3+
// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
4+
// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
5+
// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
6+
// RUN: FileCheck --check-prefix=CHECK-RV64 %s
7+
8+
#include <riscv_vector.h>
9+
10+
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vslidedown_vx_bf16mf4(
11+
// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
12+
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
13+
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vslidedown.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[VS2]], i64 [[RS1]], i64 [[VL]], i64 3)
14+
// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
15+
//
16+
vbfloat16mf4_t test_vslidedown_vx_bf16mf4(vbfloat16mf4_t vs2, size_t rs1,
17+
size_t vl) {
18+
return __riscv_vslidedown_vx_bf16mf4(vs2, rs1, vl);
19+
}
20+
21+
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vslidedown_vx_bf16mf2(
22+
// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
23+
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
24+
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vslidedown.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[VS2]], i64 [[RS1]], i64 [[VL]], i64 3)
25+
// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
26+
//
27+
vbfloat16mf2_t test_vslidedown_vx_bf16mf2(vbfloat16mf2_t vs2, size_t rs1,
28+
size_t vl) {
29+
return __riscv_vslidedown_vx_bf16mf2(vs2, rs1, vl);
30+
}
31+
32+
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vslidedown_vx_bf16m1(
33+
// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
34+
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
35+
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vslidedown.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[VS2]], i64 [[RS1]], i64 [[VL]], i64 3)
36+
// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
37+
//
38+
vbfloat16m1_t test_vslidedown_vx_bf16m1(vbfloat16m1_t vs2, size_t rs1,
39+
size_t vl) {
40+
return __riscv_vslidedown_vx_bf16m1(vs2, rs1, vl);
41+
}
42+
43+
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vslidedown_vx_bf16m2(
44+
// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
45+
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
46+
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vslidedown.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[VS2]], i64 [[RS1]], i64 [[VL]], i64 3)
47+
// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
48+
//
49+
vbfloat16m2_t test_vslidedown_vx_bf16m2(vbfloat16m2_t vs2, size_t rs1,
50+
size_t vl) {
51+
return __riscv_vslidedown_vx_bf16m2(vs2, rs1, vl);
52+
}
53+
54+
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vslidedown_vx_bf16m4(
55+
// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
56+
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
57+
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vslidedown.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[VS2]], i64 [[RS1]], i64 [[VL]], i64 3)
58+
// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
59+
//
60+
vbfloat16m4_t test_vslidedown_vx_bf16m4(vbfloat16m4_t vs2, size_t rs1,
61+
size_t vl) {
62+
return __riscv_vslidedown_vx_bf16m4(vs2, rs1, vl);
63+
}
64+
65+
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vslidedown_vx_bf16m8(
66+
// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
67+
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
68+
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vslidedown.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[VS2]], i64 [[RS1]], i64 [[VL]], i64 3)
69+
// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
70+
//
71+
vbfloat16m8_t test_vslidedown_vx_bf16m8(vbfloat16m8_t vs2, size_t rs1,
72+
size_t vl) {
73+
return __riscv_vslidedown_vx_bf16m8(vs2, rs1, vl);
74+
}
75+
76+
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vslidedown_vx_bf16mf4_m(
77+
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
78+
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
79+
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vslidedown.mask.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[VS2]], i64 [[RS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
80+
// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
81+
//
82+
vbfloat16mf4_t test_vslidedown_vx_bf16mf4_m(vbool64_t vm, vbfloat16mf4_t vs2,
83+
size_t rs1, size_t vl) {
84+
return __riscv_vslidedown_vx_bf16mf4_m(vm, vs2, rs1, vl);
85+
}
86+
87+
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vslidedown_vx_bf16mf2_m(
88+
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
89+
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
90+
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vslidedown.mask.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[VS2]], i64 [[RS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
91+
// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
92+
//
93+
vbfloat16mf2_t test_vslidedown_vx_bf16mf2_m(vbool32_t vm, vbfloat16mf2_t vs2,
94+
size_t rs1, size_t vl) {
95+
return __riscv_vslidedown_vx_bf16mf2_m(vm, vs2, rs1, vl);
96+
}
97+
98+
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vslidedown_vx_bf16m1_m(
99+
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
100+
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
101+
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vslidedown.mask.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[VS2]], i64 [[RS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
102+
// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
103+
//
104+
vbfloat16m1_t test_vslidedown_vx_bf16m1_m(vbool16_t vm, vbfloat16m1_t vs2,
105+
size_t rs1, size_t vl) {
106+
return __riscv_vslidedown_vx_bf16m1_m(vm, vs2, rs1, vl);
107+
}
108+
109+
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vslidedown_vx_bf16m2_m(
110+
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
111+
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
112+
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vslidedown.mask.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[VS2]], i64 [[RS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
113+
// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
114+
//
115+
vbfloat16m2_t test_vslidedown_vx_bf16m2_m(vbool8_t vm, vbfloat16m2_t vs2,
116+
size_t rs1, size_t vl) {
117+
return __riscv_vslidedown_vx_bf16m2_m(vm, vs2, rs1, vl);
118+
}
119+
120+
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vslidedown_vx_bf16m4_m(
121+
// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
122+
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
123+
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vslidedown.mask.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[VS2]], i64 [[RS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
124+
// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
125+
//
126+
vbfloat16m4_t test_vslidedown_vx_bf16m4_m(vbool4_t vm, vbfloat16m4_t vs2,
127+
size_t rs1, size_t vl) {
128+
return __riscv_vslidedown_vx_bf16m4_m(vm, vs2, rs1, vl);
129+
}
130+
131+
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vslidedown_vx_bf16m8_m(
132+
// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
133+
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
134+
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vslidedown.mask.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[VS2]], i64 [[RS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
135+
// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
136+
//
137+
vbfloat16m8_t test_vslidedown_vx_bf16m8_m(vbool2_t vm, vbfloat16m8_t vs2,
138+
size_t rs1, size_t vl) {
139+
return __riscv_vslidedown_vx_bf16m8_m(vm, vs2, rs1, vl);
140+
}

0 commit comments

Comments
 (0)