You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
[RISCV][IR] Implement verifier check for llvm.experimental.vp.splice immediate. (#147458)
This applies the same check as llvm.vector.splice which checks that the immediate is in the range [-VL, VL-1] where VL is the minimum vector length. If vscale_range is available, the lower bound is used to increase the known minimum vector length for this check. This ensures the immediate is in range for any possible value of vscale that satisfies the vscale_range.
Copy file name to clipboardExpand all lines: llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll
+43-41Lines changed: 43 additions & 41 deletions
Original file line number
Diff line number
Diff line change
@@ -10,7 +10,7 @@ declare <vscale x 16 x i1> @llvm.experimental.vp.splice.nxv16i1(<vscale x 16 x i
10
10
declare <vscale x 32 x i1> @llvm.experimental.vp.splice.nxv32i1(<vscale x 32 x i1>, <vscale x 32 x i1>, i32, <vscale x 32 x i1>, i32, i32)
11
11
declare <vscale x 64 x i1> @llvm.experimental.vp.splice.nxv64i1(<vscale x 64 x i1>, <vscale x 64 x i1>, i32, <vscale x 64 x i1>, i32, i32)
12
12
13
-
define <vscale x 1 x i1> @test_vp_splice_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32zeroext%evla, i32zeroext%evlb) {
13
+
define <vscale x 1 x i1> @test_vp_splice_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32zeroext%evla, i32zeroext%evlb) #0{
14
14
; CHECK-LABEL: test_vp_splice_nxv1i1:
15
15
; CHECK: # %bb.0:
16
16
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -22,19 +22,19 @@ define <vscale x 1 x i1> @test_vp_splice_nxv1i1(<vscale x 1 x i1> %va, <vscale x
22
22
; CHECK-NEXT: vmv.v.i v10, 0
23
23
; CHECK-NEXT: vmv1r.v v0, v9
24
24
; CHECK-NEXT: vmerge.vim v9, v10, 1, v0
25
-
; CHECK-NEXT: addi a0, a0, -5
25
+
; CHECK-NEXT: addi a0, a0, -1
26
26
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
27
-
; CHECK-NEXT: vslidedown.vi v9, v9, 5
27
+
; CHECK-NEXT: vslidedown.vi v9, v9, 1
28
28
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
29
29
; CHECK-NEXT: vslideup.vx v9, v8, a0
30
30
; CHECK-NEXT: vmsne.vi v0, v9, 0
31
31
; CHECK-NEXT: ret
32
32
33
-
%v = call <vscale x 1 x i1> @llvm.experimental.vp.splice.nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i325, <vscale x 1 x i1> splat (i11), i32%evla, i32%evlb)
33
+
%v = call <vscale x 1 x i1> @llvm.experimental.vp.splice.nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i321, <vscale x 1 x i1> splat (i11), i32%evla, i32%evlb)
34
34
ret <vscale x 1 x i1> %v
35
35
}
36
36
37
-
define <vscale x 1 x i1> @test_vp_splice_nxv1i1_negative_offset(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32zeroext%evla, i32zeroext%evlb) {
37
+
define <vscale x 1 x i1> @test_vp_splice_nxv1i1_negative_offset(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32zeroext%evla, i32zeroext%evlb) #0{
@@ -46,19 +46,19 @@ define <vscale x 1 x i1> @test_vp_splice_nxv1i1_negative_offset(<vscale x 1 x i1
46
46
; CHECK-NEXT: vmv.v.i v10, 0
47
47
; CHECK-NEXT: vmv1r.v v0, v9
48
48
; CHECK-NEXT: vmerge.vim v9, v10, 1, v0
49
-
; CHECK-NEXT: addi a0, a0, -5
50
-
; CHECK-NEXT: vsetivli zero, 5, e8, mf8, ta, ma
49
+
; CHECK-NEXT: addi a0, a0, -2
50
+
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
51
51
; CHECK-NEXT: vslidedown.vx v9, v9, a0
52
52
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
53
-
; CHECK-NEXT: vslideup.vi v9, v8, 5
53
+
; CHECK-NEXT: vslideup.vi v9, v8, 2
54
54
; CHECK-NEXT: vmsne.vi v0, v9, 0
55
55
; CHECK-NEXT: ret
56
56
57
-
%v = call <vscale x 1 x i1> @llvm.experimental.vp.splice.nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 -5, <vscale x 1 x i1> splat (i11), i32%evla, i32%evlb)
57
+
%v = call <vscale x 1 x i1> @llvm.experimental.vp.splice.nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 -2, <vscale x 1 x i1> splat (i11), i32%evla, i32%evlb)
58
58
ret <vscale x 1 x i1> %v
59
59
}
60
60
61
-
define <vscale x 1 x i1> @test_vp_splice_nxv1i1_masked(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, <vscale x 1 x i1> %mask, i32zeroext%evla, i32zeroext%evlb) {
61
+
define <vscale x 1 x i1> @test_vp_splice_nxv1i1_masked(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, <vscale x 1 x i1> %mask, i32zeroext%evla, i32zeroext%evlb) #0{
62
62
; CHECK-LABEL: test_vp_splice_nxv1i1_masked:
63
63
; CHECK: # %bb.0:
64
64
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -70,20 +70,20 @@ define <vscale x 1 x i1> @test_vp_splice_nxv1i1_masked(<vscale x 1 x i1> %va, <v
70
70
; CHECK-NEXT: vmv.v.i v11, 0
71
71
; CHECK-NEXT: vmv1r.v v0, v10
72
72
; CHECK-NEXT: vmerge.vim v10, v11, 1, v0
73
-
; CHECK-NEXT: addi a0, a0, -5
73
+
; CHECK-NEXT: addi a0, a0, -1
74
74
; CHECK-NEXT: vmv1r.v v0, v9
75
75
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
76
-
; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t
76
+
; CHECK-NEXT: vslidedown.vi v10, v10, 1, v0.t
77
77
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
78
78
; CHECK-NEXT: vslideup.vx v10, v8, a0, v0.t
79
79
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
80
80
; CHECK-NEXT: vmsne.vi v0, v10, 0, v0.t
81
81
; CHECK-NEXT: ret
82
-
%v = call <vscale x 1 x i1> @llvm.experimental.vp.splice.nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i325, <vscale x 1 x i1> %mask, i32%evla, i32%evlb)
82
+
%v = call <vscale x 1 x i1> @llvm.experimental.vp.splice.nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i321, <vscale x 1 x i1> %mask, i32%evla, i32%evlb)
83
83
ret <vscale x 1 x i1> %v
84
84
}
85
85
86
-
define <vscale x 2 x i1> @test_vp_splice_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32zeroext%evla, i32zeroext%evlb) {
86
+
define <vscale x 2 x i1> @test_vp_splice_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32zeroext%evla, i32zeroext%evlb) #0{
87
87
; CHECK-LABEL: test_vp_splice_nxv2i1:
88
88
; CHECK: # %bb.0:
89
89
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -95,19 +95,19 @@ define <vscale x 2 x i1> @test_vp_splice_nxv2i1(<vscale x 2 x i1> %va, <vscale x
95
95
; CHECK-NEXT: vmv.v.i v10, 0
96
96
; CHECK-NEXT: vmv1r.v v0, v9
97
97
; CHECK-NEXT: vmerge.vim v9, v10, 1, v0
98
-
; CHECK-NEXT: addi a0, a0, -5
98
+
; CHECK-NEXT: addi a0, a0, -3
99
99
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
100
-
; CHECK-NEXT: vslidedown.vi v9, v9, 5
100
+
; CHECK-NEXT: vslidedown.vi v9, v9, 3
101
101
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
102
102
; CHECK-NEXT: vslideup.vx v9, v8, a0
103
103
; CHECK-NEXT: vmsne.vi v0, v9, 0
104
104
; CHECK-NEXT: ret
105
105
106
-
%v = call <vscale x 2 x i1> @llvm.experimental.vp.splice.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i325, <vscale x 2 x i1> splat (i11), i32%evla, i32%evlb)
106
+
%v = call <vscale x 2 x i1> @llvm.experimental.vp.splice.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i323, <vscale x 2 x i1> splat (i11), i32%evla, i32%evlb)
107
107
ret <vscale x 2 x i1> %v
108
108
}
109
109
110
-
define <vscale x 2 x i1> @test_vp_splice_nxv2i1_negative_offset(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32zeroext%evla, i32zeroext%evlb) {
110
+
define <vscale x 2 x i1> @test_vp_splice_nxv2i1_negative_offset(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32zeroext%evla, i32zeroext%evlb) #0{
@@ -119,19 +119,19 @@ define <vscale x 2 x i1> @test_vp_splice_nxv2i1_negative_offset(<vscale x 2 x i1
119
119
; CHECK-NEXT: vmv.v.i v10, 0
120
120
; CHECK-NEXT: vmv1r.v v0, v9
121
121
; CHECK-NEXT: vmerge.vim v9, v10, 1, v0
122
-
; CHECK-NEXT: addi a0, a0, -5
123
-
; CHECK-NEXT: vsetivli zero, 5, e8, mf4, ta, ma
122
+
; CHECK-NEXT: addi a0, a0, -4
123
+
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
124
124
; CHECK-NEXT: vslidedown.vx v9, v9, a0
125
125
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
126
-
; CHECK-NEXT: vslideup.vi v9, v8, 5
126
+
; CHECK-NEXT: vslideup.vi v9, v8, 4
127
127
; CHECK-NEXT: vmsne.vi v0, v9, 0
128
128
; CHECK-NEXT: ret
129
129
130
-
%v = call <vscale x 2 x i1> @llvm.experimental.vp.splice.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 -5, <vscale x 2 x i1> splat (i11), i32%evla, i32%evlb)
130
+
%v = call <vscale x 2 x i1> @llvm.experimental.vp.splice.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 -4, <vscale x 2 x i1> splat (i11), i32%evla, i32%evlb)
131
131
ret <vscale x 2 x i1> %v
132
132
}
133
133
134
-
define <vscale x 2 x i1> @test_vp_splice_nxv2i1_masked(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, <vscale x 2 x i1> %mask, i32zeroext%evla, i32zeroext%evlb) {
134
+
define <vscale x 2 x i1> @test_vp_splice_nxv2i1_masked(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, <vscale x 2 x i1> %mask, i32zeroext%evla, i32zeroext%evlb) #0{
135
135
; CHECK-LABEL: test_vp_splice_nxv2i1_masked:
136
136
; CHECK: # %bb.0:
137
137
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -143,20 +143,20 @@ define <vscale x 2 x i1> @test_vp_splice_nxv2i1_masked(<vscale x 2 x i1> %va, <v
143
143
; CHECK-NEXT: vmv.v.i v11, 0
144
144
; CHECK-NEXT: vmv1r.v v0, v10
145
145
; CHECK-NEXT: vmerge.vim v10, v11, 1, v0
146
-
; CHECK-NEXT: addi a0, a0, -5
146
+
; CHECK-NEXT: addi a0, a0, -3
147
147
; CHECK-NEXT: vmv1r.v v0, v9
148
148
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
149
-
; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t
149
+
; CHECK-NEXT: vslidedown.vi v10, v10, 3, v0.t
150
150
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
151
151
; CHECK-NEXT: vslideup.vx v10, v8, a0, v0.t
152
152
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
153
153
; CHECK-NEXT: vmsne.vi v0, v10, 0, v0.t
154
154
; CHECK-NEXT: ret
155
-
%v = call <vscale x 2 x i1> @llvm.experimental.vp.splice.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i325, <vscale x 2 x i1> %mask, i32%evla, i32%evlb)
155
+
%v = call <vscale x 2 x i1> @llvm.experimental.vp.splice.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i323, <vscale x 2 x i1> %mask, i32%evla, i32%evlb)
156
156
ret <vscale x 2 x i1> %v
157
157
}
158
158
159
-
define <vscale x 4 x i1> @test_vp_splice_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, i32zeroext%evla, i32zeroext%evlb) {
159
+
define <vscale x 4 x i1> @test_vp_splice_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, i32zeroext%evla, i32zeroext%evlb) #0{
160
160
; CHECK-LABEL: test_vp_splice_nxv4i1:
161
161
; CHECK: # %bb.0:
162
162
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -180,7 +180,7 @@ define <vscale x 4 x i1> @test_vp_splice_nxv4i1(<vscale x 4 x i1> %va, <vscale x
180
180
ret <vscale x 4 x i1> %v
181
181
}
182
182
183
-
define <vscale x 4 x i1> @test_vp_splice_nxv4i1_negative_offset(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, i32zeroext%evla, i32zeroext%evlb) {
183
+
define <vscale x 4 x i1> @test_vp_splice_nxv4i1_negative_offset(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, i32zeroext%evla, i32zeroext%evlb) #0{
@@ -204,7 +204,7 @@ define <vscale x 4 x i1> @test_vp_splice_nxv4i1_negative_offset(<vscale x 4 x i1
204
204
ret <vscale x 4 x i1> %v
205
205
}
206
206
207
-
define <vscale x 4 x i1> @test_vp_splice_nxv4i1_masked(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, <vscale x 4 x i1> %mask, i32zeroext%evla, i32zeroext%evlb) {
207
+
define <vscale x 4 x i1> @test_vp_splice_nxv4i1_masked(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, <vscale x 4 x i1> %mask, i32zeroext%evla, i32zeroext%evlb) #0{
208
208
; CHECK-LABEL: test_vp_splice_nxv4i1_masked:
209
209
; CHECK: # %bb.0:
210
210
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -229,7 +229,7 @@ define <vscale x 4 x i1> @test_vp_splice_nxv4i1_masked(<vscale x 4 x i1> %va, <v
229
229
ret <vscale x 4 x i1> %v
230
230
}
231
231
232
-
define <vscale x 8 x i1> @test_vp_splice_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, i32zeroext%evla, i32zeroext%evlb) {
232
+
define <vscale x 8 x i1> @test_vp_splice_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, i32zeroext%evla, i32zeroext%evlb) #0{
233
233
; CHECK-LABEL: test_vp_splice_nxv8i1:
234
234
; CHECK: # %bb.0:
235
235
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -253,7 +253,7 @@ define <vscale x 8 x i1> @test_vp_splice_nxv8i1(<vscale x 8 x i1> %va, <vscale x
253
253
ret <vscale x 8 x i1> %v
254
254
}
255
255
256
-
define <vscale x 8 x i1> @test_vp_splice_nxv8i1_negative_offset(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, i32zeroext%evla, i32zeroext%evlb) {
256
+
define <vscale x 8 x i1> @test_vp_splice_nxv8i1_negative_offset(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, i32zeroext%evla, i32zeroext%evlb) #0{
@@ -277,7 +277,7 @@ define <vscale x 8 x i1> @test_vp_splice_nxv8i1_negative_offset(<vscale x 8 x i1
277
277
ret <vscale x 8 x i1> %v
278
278
}
279
279
280
-
define <vscale x 8 x i1> @test_vp_splice_nxv8i1_masked(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, <vscale x 8 x i1> %mask, i32zeroext%evla, i32zeroext%evlb) {
280
+
define <vscale x 8 x i1> @test_vp_splice_nxv8i1_masked(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, <vscale x 8 x i1> %mask, i32zeroext%evla, i32zeroext%evlb) #0{
281
281
; CHECK-LABEL: test_vp_splice_nxv8i1_masked:
282
282
; CHECK: # %bb.0:
283
283
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -302,7 +302,7 @@ define <vscale x 8 x i1> @test_vp_splice_nxv8i1_masked(<vscale x 8 x i1> %va, <v
302
302
ret <vscale x 8 x i1> %v
303
303
}
304
304
305
-
define <vscale x 16 x i1> @test_vp_splice_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, i32zeroext%evla, i32zeroext%evlb) {
305
+
define <vscale x 16 x i1> @test_vp_splice_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, i32zeroext%evla, i32zeroext%evlb) #0{
306
306
; CHECK-LABEL: test_vp_splice_nxv16i1:
307
307
; CHECK: # %bb.0:
308
308
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
@@ -326,7 +326,7 @@ define <vscale x 16 x i1> @test_vp_splice_nxv16i1(<vscale x 16 x i1> %va, <vscal
326
326
ret <vscale x 16 x i1> %v
327
327
}
328
328
329
-
define <vscale x 16 x i1> @test_vp_splice_nxv16i1_negative_offset(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, i32zeroext%evla, i32zeroext%evlb) {
329
+
define <vscale x 16 x i1> @test_vp_splice_nxv16i1_negative_offset(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, i32zeroext%evla, i32zeroext%evlb) #0{
@@ -350,7 +350,7 @@ define <vscale x 16 x i1> @test_vp_splice_nxv16i1_negative_offset(<vscale x 16 x
350
350
ret <vscale x 16 x i1> %v
351
351
}
352
352
353
-
define <vscale x 16 x i1> @test_vp_splice_nxv16i1_masked(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, <vscale x 16 x i1> %mask, i32zeroext%evla, i32zeroext%evlb) {
353
+
define <vscale x 16 x i1> @test_vp_splice_nxv16i1_masked(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, <vscale x 16 x i1> %mask, i32zeroext%evla, i32zeroext%evlb) #0{
354
354
; CHECK-LABEL: test_vp_splice_nxv16i1_masked:
355
355
; CHECK: # %bb.0:
356
356
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
@@ -376,7 +376,7 @@ define <vscale x 16 x i1> @test_vp_splice_nxv16i1_masked(<vscale x 16 x i1> %va,
376
376
ret <vscale x 16 x i1> %v
377
377
}
378
378
379
-
define <vscale x 32 x i1> @test_vp_splice_nxv32i1(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, i32zeroext%evla, i32zeroext%evlb) {
379
+
define <vscale x 32 x i1> @test_vp_splice_nxv32i1(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, i32zeroext%evla, i32zeroext%evlb) #0{
380
380
; CHECK-LABEL: test_vp_splice_nxv32i1:
381
381
; CHECK: # %bb.0:
382
382
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -400,7 +400,7 @@ define <vscale x 32 x i1> @test_vp_splice_nxv32i1(<vscale x 32 x i1> %va, <vscal
400
400
ret <vscale x 32 x i1> %v
401
401
}
402
402
403
-
define <vscale x 32 x i1> @test_vp_splice_nxv32i1_negative_offset(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, i32zeroext%evla, i32zeroext%evlb) {
403
+
define <vscale x 32 x i1> @test_vp_splice_nxv32i1_negative_offset(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, i32zeroext%evla, i32zeroext%evlb) #0{
@@ -424,7 +424,7 @@ define <vscale x 32 x i1> @test_vp_splice_nxv32i1_negative_offset(<vscale x 32 x
424
424
ret <vscale x 32 x i1> %v
425
425
}
426
426
427
-
define <vscale x 32 x i1> @test_vp_splice_nxv32i1_masked(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, <vscale x 32 x i1> %mask, i32zeroext%evla, i32zeroext%evlb) {
427
+
define <vscale x 32 x i1> @test_vp_splice_nxv32i1_masked(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, <vscale x 32 x i1> %mask, i32zeroext%evla, i32zeroext%evlb) #0{
428
428
; CHECK-LABEL: test_vp_splice_nxv32i1_masked:
429
429
; CHECK: # %bb.0:
430
430
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -450,7 +450,7 @@ define <vscale x 32 x i1> @test_vp_splice_nxv32i1_masked(<vscale x 32 x i1> %va,
450
450
ret <vscale x 32 x i1> %v
451
451
}
452
452
453
-
define <vscale x 64 x i1> @test_vp_splice_nxv64i1(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, i32zeroext%evla, i32zeroext%evlb) {
453
+
define <vscale x 64 x i1> @test_vp_splice_nxv64i1(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, i32zeroext%evla, i32zeroext%evlb) #0{
454
454
; CHECK-LABEL: test_vp_splice_nxv64i1:
455
455
; CHECK: # %bb.0:
456
456
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
@@ -474,7 +474,7 @@ define <vscale x 64 x i1> @test_vp_splice_nxv64i1(<vscale x 64 x i1> %va, <vscal
474
474
ret <vscale x 64 x i1> %v
475
475
}
476
476
477
-
define <vscale x 64 x i1> @test_vp_splice_nxv64i1_negative_offset(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, i32zeroext%evla, i32zeroext%evlb) {
477
+
define <vscale x 64 x i1> @test_vp_splice_nxv64i1_negative_offset(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, i32zeroext%evla, i32zeroext%evlb) #0{
@@ -498,7 +498,7 @@ define <vscale x 64 x i1> @test_vp_splice_nxv64i1_negative_offset(<vscale x 64 x
498
498
ret <vscale x 64 x i1> %v
499
499
}
500
500
501
-
define <vscale x 64 x i1> @test_vp_splice_nxv64i1_masked(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, <vscale x 64 x i1> %mask, i32zeroext%evla, i32zeroext%evlb) {
501
+
define <vscale x 64 x i1> @test_vp_splice_nxv64i1_masked(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, <vscale x 64 x i1> %mask, i32zeroext%evla, i32zeroext%evlb) #0{
502
502
; CHECK-LABEL: test_vp_splice_nxv64i1_masked:
503
503
; CHECK: # %bb.0:
504
504
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
@@ -523,3 +523,5 @@ define <vscale x 64 x i1> @test_vp_splice_nxv64i1_masked(<vscale x 64 x i1> %va,
523
523
%v = call <vscale x 64 x i1> @llvm.experimental.vp.splice.nxv64i1(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, i325, <vscale x 64 x i1> %mask, i32%evla, i32%evlb)
0 commit comments