Skip to content

Commit 9980148

Browse files
Lian WangLian Wang
authored andcommitted
[RISCV][SelectionDAG] Support VP_ADD/VP_MUL/VP_SUB mask operations
Reviewed By: craig.topper Differential Revision: https://reviews.llvm.org/D124144
1 parent 059f39d commit 9980148

File tree

7 files changed

+440
-0
lines changed

7 files changed

+440
-0
lines changed

llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8818,6 +8818,17 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
88188818
assert(Ops[2].getValueType() == Ops[3].getValueType() &&
88198819
"LHS/RHS of comparison should match types!");
88208820
break;
8821+
case ISD::VP_ADD:
8822+
case ISD::VP_SUB:
8823+
// If it is VP_ADD/VP_SUB mask operation then turn it to VP_XOR
8824+
if (VT.isVector() && VT.getVectorElementType() == MVT::i1)
8825+
Opcode = ISD::VP_XOR;
8826+
break;
8827+
case ISD::VP_MUL:
8828+
// If it is VP_MUL mask operation then turn it to VP_AND
8829+
if (VT.isVector() && VT.getVectorElementType() == MVT::i1)
8830+
Opcode = ISD::VP_AND;
8831+
break;
88218832
}
88228833

88238834
// Memoize nodes.
Lines changed: 77 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,77 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2+
; RUN: llc -mtriple=riscv32 -mattr=+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
3+
; RUN: | FileCheck %s --check-prefixes=CHECK
4+
; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
5+
; RUN: | FileCheck %s --check-prefixes=CHECK
6+
7+
declare <2 x i1> @llvm.vp.add.v2i1(<2 x i1>, <2 x i1>, <2 x i1>, i32)
8+
9+
define <2 x i1> @vadd_vv_v2i1(<2 x i1> %va, <2 x i1> %b, <2 x i1> %m, i32 zeroext %evl) {
10+
; CHECK-LABEL: vadd_vv_v2i1:
11+
; CHECK: # %bb.0:
12+
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
13+
; CHECK-NEXT: vmxor.mm v0, v0, v8
14+
; CHECK-NEXT: ret
15+
%v = call <2 x i1> @llvm.vp.add.v2i1(<2 x i1> %va, <2 x i1> %b, <2 x i1> %m, i32 %evl)
16+
ret <2 x i1> %v
17+
}
18+
19+
declare <4 x i1> @llvm.vp.add.v4i1(<4 x i1>, <4 x i1>, <4 x i1>, i32)
20+
21+
define <4 x i1> @vadd_vv_v4i1(<4 x i1> %va, <4 x i1> %b, <4 x i1> %m, i32 zeroext %evl) {
22+
; CHECK-LABEL: vadd_vv_v4i1:
23+
; CHECK: # %bb.0:
24+
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
25+
; CHECK-NEXT: vmxor.mm v0, v0, v8
26+
; CHECK-NEXT: ret
27+
%v = call <4 x i1> @llvm.vp.add.v4i1(<4 x i1> %va, <4 x i1> %b, <4 x i1> %m, i32 %evl)
28+
ret <4 x i1> %v
29+
}
30+
31+
declare <8 x i1> @llvm.vp.add.v8i1(<8 x i1>, <8 x i1>, <8 x i1>, i32)
32+
33+
define <8 x i1> @vadd_vv_v8i1(<8 x i1> %va, <8 x i1> %b, <8 x i1> %m, i32 zeroext %evl) {
34+
; CHECK-LABEL: vadd_vv_v8i1:
35+
; CHECK: # %bb.0:
36+
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
37+
; CHECK-NEXT: vmxor.mm v0, v0, v8
38+
; CHECK-NEXT: ret
39+
%v = call <8 x i1> @llvm.vp.add.v8i1(<8 x i1> %va, <8 x i1> %b, <8 x i1> %m, i32 %evl)
40+
ret <8 x i1> %v
41+
}
42+
43+
declare <16 x i1> @llvm.vp.add.v16i1(<16 x i1>, <16 x i1>, <16 x i1>, i32)
44+
45+
define <16 x i1> @vadd_vv_v16i1(<16 x i1> %va, <16 x i1> %b, <16 x i1> %m, i32 zeroext %evl) {
46+
; CHECK-LABEL: vadd_vv_v16i1:
47+
; CHECK: # %bb.0:
48+
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
49+
; CHECK-NEXT: vmxor.mm v0, v0, v8
50+
; CHECK-NEXT: ret
51+
%v = call <16 x i1> @llvm.vp.add.v16i1(<16 x i1> %va, <16 x i1> %b, <16 x i1> %m, i32 %evl)
52+
ret <16 x i1> %v
53+
}
54+
55+
declare <32 x i1> @llvm.vp.add.v32i1(<32 x i1>, <32 x i1>, <32 x i1>, i32)
56+
57+
define <32 x i1> @vadd_vv_v32i1(<32 x i1> %va, <32 x i1> %b, <32 x i1> %m, i32 zeroext %evl) {
58+
; CHECK-LABEL: vadd_vv_v32i1:
59+
; CHECK: # %bb.0:
60+
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
61+
; CHECK-NEXT: vmxor.mm v0, v0, v8
62+
; CHECK-NEXT: ret
63+
%v = call <32 x i1> @llvm.vp.add.v32i1(<32 x i1> %va, <32 x i1> %b, <32 x i1> %m, i32 %evl)
64+
ret <32 x i1> %v
65+
}
66+
67+
declare <64 x i1> @llvm.vp.add.v64i1(<64 x i1>, <64 x i1>, <64 x i1>, i32)
68+
69+
define <64 x i1> @vadd_vv_v64i1(<64 x i1> %va, <64 x i1> %b, <64 x i1> %m, i32 zeroext %evl) {
70+
; CHECK-LABEL: vadd_vv_v64i1:
71+
; CHECK: # %bb.0:
72+
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
73+
; CHECK-NEXT: vmxor.mm v0, v0, v8
74+
; CHECK-NEXT: ret
75+
%v = call <64 x i1> @llvm.vp.add.v64i1(<64 x i1> %va, <64 x i1> %b, <64 x i1> %m, i32 %evl)
76+
ret <64 x i1> %v
77+
}
Lines changed: 77 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,77 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2+
; RUN: llc -mtriple=riscv32 -mattr=+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
3+
; RUN: | FileCheck %s --check-prefixes=CHECK
4+
; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
5+
; RUN: | FileCheck %s --check-prefixes=CHECK
6+
7+
declare <2 x i1> @llvm.vp.mul.v2i1(<2 x i1>, <2 x i1>, <2 x i1>, i32)
8+
9+
define <2 x i1> @vmul_vv_v2i1(<2 x i1> %va, <2 x i1> %b, <2 x i1> %m, i32 zeroext %evl) {
10+
; CHECK-LABEL: vmul_vv_v2i1:
11+
; CHECK: # %bb.0:
12+
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
13+
; CHECK-NEXT: vmand.mm v0, v0, v8
14+
; CHECK-NEXT: ret
15+
%v = call <2 x i1> @llvm.vp.mul.v2i1(<2 x i1> %va, <2 x i1> %b, <2 x i1> %m, i32 %evl)
16+
ret <2 x i1> %v
17+
}
18+
19+
declare <4 x i1> @llvm.vp.mul.v4i1(<4 x i1>, <4 x i1>, <4 x i1>, i32)
20+
21+
define <4 x i1> @vmul_vv_v4i1(<4 x i1> %va, <4 x i1> %b, <4 x i1> %m, i32 zeroext %evl) {
22+
; CHECK-LABEL: vmul_vv_v4i1:
23+
; CHECK: # %bb.0:
24+
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
25+
; CHECK-NEXT: vmand.mm v0, v0, v8
26+
; CHECK-NEXT: ret
27+
%v = call <4 x i1> @llvm.vp.mul.v4i1(<4 x i1> %va, <4 x i1> %b, <4 x i1> %m, i32 %evl)
28+
ret <4 x i1> %v
29+
}
30+
31+
declare <8 x i1> @llvm.vp.mul.v8i1(<8 x i1>, <8 x i1>, <8 x i1>, i32)
32+
33+
define <8 x i1> @vmul_vv_v8i1(<8 x i1> %va, <8 x i1> %b, <8 x i1> %m, i32 zeroext %evl) {
34+
; CHECK-LABEL: vmul_vv_v8i1:
35+
; CHECK: # %bb.0:
36+
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
37+
; CHECK-NEXT: vmand.mm v0, v0, v8
38+
; CHECK-NEXT: ret
39+
%v = call <8 x i1> @llvm.vp.mul.v8i1(<8 x i1> %va, <8 x i1> %b, <8 x i1> %m, i32 %evl)
40+
ret <8 x i1> %v
41+
}
42+
43+
declare <16 x i1> @llvm.vp.mul.v16i1(<16 x i1>, <16 x i1>, <16 x i1>, i32)
44+
45+
define <16 x i1> @vmul_vv_v16i1(<16 x i1> %va, <16 x i1> %b, <16 x i1> %m, i32 zeroext %evl) {
46+
; CHECK-LABEL: vmul_vv_v16i1:
47+
; CHECK: # %bb.0:
48+
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
49+
; CHECK-NEXT: vmand.mm v0, v0, v8
50+
; CHECK-NEXT: ret
51+
%v = call <16 x i1> @llvm.vp.mul.v16i1(<16 x i1> %va, <16 x i1> %b, <16 x i1> %m, i32 %evl)
52+
ret <16 x i1> %v
53+
}
54+
55+
declare <32 x i1> @llvm.vp.mul.v32i1(<32 x i1>, <32 x i1>, <32 x i1>, i32)
56+
57+
define <32 x i1> @vmul_vv_v32i1(<32 x i1> %va, <32 x i1> %b, <32 x i1> %m, i32 zeroext %evl) {
58+
; CHECK-LABEL: vmul_vv_v32i1:
59+
; CHECK: # %bb.0:
60+
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
61+
; CHECK-NEXT: vmand.mm v0, v0, v8
62+
; CHECK-NEXT: ret
63+
%v = call <32 x i1> @llvm.vp.mul.v32i1(<32 x i1> %va, <32 x i1> %b, <32 x i1> %m, i32 %evl)
64+
ret <32 x i1> %v
65+
}
66+
67+
declare <64 x i1> @llvm.vp.mul.v64i1(<64 x i1>, <64 x i1>, <64 x i1>, i32)
68+
69+
define <64 x i1> @vmul_vv_v64i1(<64 x i1> %va, <64 x i1> %b, <64 x i1> %m, i32 zeroext %evl) {
70+
; CHECK-LABEL: vmul_vv_v64i1:
71+
; CHECK: # %bb.0:
72+
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
73+
; CHECK-NEXT: vmand.mm v0, v0, v8
74+
; CHECK-NEXT: ret
75+
%v = call <64 x i1> @llvm.vp.mul.v64i1(<64 x i1> %va, <64 x i1> %b, <64 x i1> %m, i32 %evl)
76+
ret <64 x i1> %v
77+
}
Lines changed: 77 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,77 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2+
; RUN: llc -mtriple=riscv32 -mattr=+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
3+
; RUN: | FileCheck %s --check-prefixes=CHECK
4+
; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
5+
; RUN: | FileCheck %s --check-prefixes=CHECK
6+
7+
declare <2 x i1> @llvm.vp.sub.v2i1(<2 x i1>, <2 x i1>, <2 x i1>, i32)
8+
9+
define <2 x i1> @vsub_vv_v2i1(<2 x i1> %va, <2 x i1> %b, <2 x i1> %m, i32 zeroext %evl) {
10+
; CHECK-LABEL: vsub_vv_v2i1:
11+
; CHECK: # %bb.0:
12+
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
13+
; CHECK-NEXT: vmxor.mm v0, v0, v8
14+
; CHECK-NEXT: ret
15+
%v = call <2 x i1> @llvm.vp.sub.v2i1(<2 x i1> %va, <2 x i1> %b, <2 x i1> %m, i32 %evl)
16+
ret <2 x i1> %v
17+
}
18+
19+
declare <4 x i1> @llvm.vp.sub.v4i1(<4 x i1>, <4 x i1>, <4 x i1>, i32)
20+
21+
define <4 x i1> @vsub_vv_v4i1(<4 x i1> %va, <4 x i1> %b, <4 x i1> %m, i32 zeroext %evl) {
22+
; CHECK-LABEL: vsub_vv_v4i1:
23+
; CHECK: # %bb.0:
24+
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
25+
; CHECK-NEXT: vmxor.mm v0, v0, v8
26+
; CHECK-NEXT: ret
27+
%v = call <4 x i1> @llvm.vp.sub.v4i1(<4 x i1> %va, <4 x i1> %b, <4 x i1> %m, i32 %evl)
28+
ret <4 x i1> %v
29+
}
30+
31+
declare <8 x i1> @llvm.vp.sub.v8i1(<8 x i1>, <8 x i1>, <8 x i1>, i32)
32+
33+
define <8 x i1> @vsub_vv_v8i1(<8 x i1> %va, <8 x i1> %b, <8 x i1> %m, i32 zeroext %evl) {
34+
; CHECK-LABEL: vsub_vv_v8i1:
35+
; CHECK: # %bb.0:
36+
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
37+
; CHECK-NEXT: vmxor.mm v0, v0, v8
38+
; CHECK-NEXT: ret
39+
%v = call <8 x i1> @llvm.vp.sub.v8i1(<8 x i1> %va, <8 x i1> %b, <8 x i1> %m, i32 %evl)
40+
ret <8 x i1> %v
41+
}
42+
43+
declare <16 x i1> @llvm.vp.sub.v16i1(<16 x i1>, <16 x i1>, <16 x i1>, i32)
44+
45+
define <16 x i1> @vsub_vv_v16i1(<16 x i1> %va, <16 x i1> %b, <16 x i1> %m, i32 zeroext %evl) {
46+
; CHECK-LABEL: vsub_vv_v16i1:
47+
; CHECK: # %bb.0:
48+
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
49+
; CHECK-NEXT: vmxor.mm v0, v0, v8
50+
; CHECK-NEXT: ret
51+
%v = call <16 x i1> @llvm.vp.sub.v16i1(<16 x i1> %va, <16 x i1> %b, <16 x i1> %m, i32 %evl)
52+
ret <16 x i1> %v
53+
}
54+
55+
declare <32 x i1> @llvm.vp.sub.v32i1(<32 x i1>, <32 x i1>, <32 x i1>, i32)
56+
57+
define <32 x i1> @vsub_vv_v32i1(<32 x i1> %va, <32 x i1> %b, <32 x i1> %m, i32 zeroext %evl) {
58+
; CHECK-LABEL: vsub_vv_v32i1:
59+
; CHECK: # %bb.0:
60+
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
61+
; CHECK-NEXT: vmxor.mm v0, v0, v8
62+
; CHECK-NEXT: ret
63+
%v = call <32 x i1> @llvm.vp.sub.v32i1(<32 x i1> %va, <32 x i1> %b, <32 x i1> %m, i32 %evl)
64+
ret <32 x i1> %v
65+
}
66+
67+
declare <64 x i1> @llvm.vp.sub.v64i1(<64 x i1>, <64 x i1>, <64 x i1>, i32)
68+
69+
define <64 x i1> @vsub_vv_v64i1(<64 x i1> %va, <64 x i1> %b, <64 x i1> %m, i32 zeroext %evl) {
70+
; CHECK-LABEL: vsub_vv_v64i1:
71+
; CHECK: # %bb.0:
72+
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
73+
; CHECK-NEXT: vmxor.mm v0, v0, v8
74+
; CHECK-NEXT: ret
75+
%v = call <64 x i1> @llvm.vp.sub.v64i1(<64 x i1> %va, <64 x i1> %b, <64 x i1> %m, i32 %evl)
76+
ret <64 x i1> %v
77+
}
Lines changed: 66 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,66 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2+
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
3+
; RUN: | FileCheck %s --check-prefixes=CHECK
4+
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
5+
; RUN: | FileCheck %s --check-prefixes=CHECK
6+
7+
8+
declare <vscale x 2 x i1> @llvm.vp.add.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, <vscale x 2 x i1>, i32)
9+
10+
define <vscale x 2 x i1> @vadd_vv_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
11+
; CHECK-LABEL: vadd_vv_nxv2i1:
12+
; CHECK: # %bb.0:
13+
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
14+
; CHECK-NEXT: vmxor.mm v0, v0, v8
15+
; CHECK-NEXT: ret
16+
%v = call <vscale x 2 x i1> @llvm.vp.add.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %b, <vscale x 2 x i1> %m, i32 %evl)
17+
ret <vscale x 2 x i1> %v
18+
}
19+
20+
declare <vscale x 4 x i1> @llvm.vp.add.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, <vscale x 4 x i1>, i32)
21+
22+
define <vscale x 4 x i1> @vadd_vv_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
23+
; CHECK-LABEL: vadd_vv_nxv4i1:
24+
; CHECK: # %bb.0:
25+
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
26+
; CHECK-NEXT: vmxor.mm v0, v0, v8
27+
; CHECK-NEXT: ret
28+
%v = call <vscale x 4 x i1> @llvm.vp.add.nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %b, <vscale x 4 x i1> %m, i32 %evl)
29+
ret <vscale x 4 x i1> %v
30+
}
31+
32+
declare <vscale x 8 x i1> @llvm.vp.add.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>, <vscale x 8 x i1>, i32)
33+
34+
define <vscale x 8 x i1> @vadd_vv_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
35+
; CHECK-LABEL: vadd_vv_nxv8i1:
36+
; CHECK: # %bb.0:
37+
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
38+
; CHECK-NEXT: vmxor.mm v0, v0, v8
39+
; CHECK-NEXT: ret
40+
%v = call <vscale x 8 x i1> @llvm.vp.add.nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %b, <vscale x 8 x i1> %m, i32 %evl)
41+
ret <vscale x 8 x i1> %v
42+
}
43+
44+
declare <vscale x 16 x i1> @llvm.vp.add.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, i32)
45+
46+
define <vscale x 16 x i1> @vadd_vv_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
47+
; CHECK-LABEL: vadd_vv_nxv16i1:
48+
; CHECK: # %bb.0:
49+
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
50+
; CHECK-NEXT: vmxor.mm v0, v0, v8
51+
; CHECK-NEXT: ret
52+
%v = call <vscale x 16 x i1> @llvm.vp.add.nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %b, <vscale x 16 x i1> %m, i32 %evl)
53+
ret <vscale x 16 x i1> %v
54+
}
55+
56+
declare <vscale x 32 x i1> @llvm.vp.add.nxv32i1(<vscale x 32 x i1>, <vscale x 32 x i1>, <vscale x 32 x i1>, i32)
57+
58+
define <vscale x 32 x i1> @vadd_vv_nxv32i1(<vscale x 32 x i1> %va, <vscale x 32 x i1> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
59+
; CHECK-LABEL: vadd_vv_nxv32i1:
60+
; CHECK: # %bb.0:
61+
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
62+
; CHECK-NEXT: vmxor.mm v0, v0, v8
63+
; CHECK-NEXT: ret
64+
%v = call <vscale x 32 x i1> @llvm.vp.add.nxv32i1(<vscale x 32 x i1> %va, <vscale x 32 x i1> %b, <vscale x 32 x i1> %m, i32 %evl)
65+
ret <vscale x 32 x i1> %v
66+
}
Lines changed: 66 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,66 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2+
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
3+
; RUN: | FileCheck %s --check-prefixes=CHECK
4+
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
5+
; RUN: | FileCheck %s --check-prefixes=CHECK
6+
7+
8+
declare <vscale x 2 x i1> @llvm.vp.mul.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, <vscale x 2 x i1>, i32)
9+
10+
define <vscale x 2 x i1> @vmul_vv_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
11+
; CHECK-LABEL: vmul_vv_nxv2i1:
12+
; CHECK: # %bb.0:
13+
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
14+
; CHECK-NEXT: vmand.mm v0, v0, v8
15+
; CHECK-NEXT: ret
16+
%v = call <vscale x 2 x i1> @llvm.vp.mul.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %b, <vscale x 2 x i1> %m, i32 %evl)
17+
ret <vscale x 2 x i1> %v
18+
}
19+
20+
declare <vscale x 4 x i1> @llvm.vp.mul.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, <vscale x 4 x i1>, i32)
21+
22+
define <vscale x 4 x i1> @vmul_vv_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
23+
; CHECK-LABEL: vmul_vv_nxv4i1:
24+
; CHECK: # %bb.0:
25+
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
26+
; CHECK-NEXT: vmand.mm v0, v0, v8
27+
; CHECK-NEXT: ret
28+
%v = call <vscale x 4 x i1> @llvm.vp.mul.nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %b, <vscale x 4 x i1> %m, i32 %evl)
29+
ret <vscale x 4 x i1> %v
30+
}
31+
32+
declare <vscale x 8 x i1> @llvm.vp.mul.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>, <vscale x 8 x i1>, i32)
33+
34+
define <vscale x 8 x i1> @vmul_vv_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
35+
; CHECK-LABEL: vmul_vv_nxv8i1:
36+
; CHECK: # %bb.0:
37+
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
38+
; CHECK-NEXT: vmand.mm v0, v0, v8
39+
; CHECK-NEXT: ret
40+
%v = call <vscale x 8 x i1> @llvm.vp.mul.nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %b, <vscale x 8 x i1> %m, i32 %evl)
41+
ret <vscale x 8 x i1> %v
42+
}
43+
44+
declare <vscale x 16 x i1> @llvm.vp.mul.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, i32)
45+
46+
define <vscale x 16 x i1> @vmul_vv_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
47+
; CHECK-LABEL: vmul_vv_nxv16i1:
48+
; CHECK: # %bb.0:
49+
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
50+
; CHECK-NEXT: vmand.mm v0, v0, v8
51+
; CHECK-NEXT: ret
52+
%v = call <vscale x 16 x i1> @llvm.vp.mul.nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %b, <vscale x 16 x i1> %m, i32 %evl)
53+
ret <vscale x 16 x i1> %v
54+
}
55+
56+
declare <vscale x 32 x i1> @llvm.vp.mul.nxv32i1(<vscale x 32 x i1>, <vscale x 32 x i1>, <vscale x 32 x i1>, i32)
57+
58+
define <vscale x 32 x i1> @vmul_vv_nxv32i1(<vscale x 32 x i1> %va, <vscale x 32 x i1> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
59+
; CHECK-LABEL: vmul_vv_nxv32i1:
60+
; CHECK: # %bb.0:
61+
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
62+
; CHECK-NEXT: vmand.mm v0, v0, v8
63+
; CHECK-NEXT: ret
64+
%v = call <vscale x 32 x i1> @llvm.vp.mul.nxv32i1(<vscale x 32 x i1> %va, <vscale x 32 x i1> %b, <vscale x 32 x i1> %m, i32 %evl)
65+
ret <vscale x 32 x i1> %v
66+
}

0 commit comments

Comments
 (0)