Skip to content

Commit e2aa878

Browse files
authored
[RISCV] Fix out-of-bounds offsets in fixed vector splice tests. NFC (#147247)
Per the langref, the immediate offset needs to be smaller than the first EVL, and the first EVL needs to be <= the vector length.
1 parent ded1426 commit e2aa878

File tree

2 files changed

+60
-60
lines changed

2 files changed

+60
-60
lines changed

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-splice.ll

Lines changed: 40 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -7,28 +7,28 @@
77
define <2 x i64> @test_vp_splice_v2i64(<2 x i64> %va, <2 x i64> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
88
; CHECK-LABEL: test_vp_splice_v2i64:
99
; CHECK: # %bb.0:
10-
; CHECK-NEXT: addi a0, a0, -5
10+
; CHECK-NEXT: addi a0, a0, -1
1111
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
12-
; CHECK-NEXT: vslidedown.vi v8, v8, 5
12+
; CHECK-NEXT: vslidedown.vi v8, v8, 1
1313
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1414
; CHECK-NEXT: vslideup.vx v8, v9, a0
1515
; CHECK-NEXT: ret
1616

17-
%v = call <2 x i64> @llvm.experimental.vp.splice.v2i64(<2 x i64> %va, <2 x i64> %vb, i32 5, <2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
17+
%v = call <2 x i64> @llvm.experimental.vp.splice.v2i64(<2 x i64> %va, <2 x i64> %vb, i32 1, <2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
1818
ret <2 x i64> %v
1919
}
2020

2121
define <2 x i64> @test_vp_splice_v2i64_negative_offset(<2 x i64> %va, <2 x i64> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
2222
; CHECK-LABEL: test_vp_splice_v2i64_negative_offset:
2323
; CHECK: # %bb.0:
24-
; CHECK-NEXT: addi a0, a0, -5
25-
; CHECK-NEXT: vsetivli zero, 5, e64, m1, ta, ma
24+
; CHECK-NEXT: addi a0, a0, -1
25+
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
2626
; CHECK-NEXT: vslidedown.vx v8, v8, a0
2727
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
28-
; CHECK-NEXT: vslideup.vi v8, v9, 5
28+
; CHECK-NEXT: vslideup.vi v8, v9, 1
2929
; CHECK-NEXT: ret
3030

31-
%v = call <2 x i64> @llvm.experimental.vp.splice.v2i64(<2 x i64> %va, <2 x i64> %vb, i32 -5, <2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
31+
%v = call <2 x i64> @llvm.experimental.vp.splice.v2i64(<2 x i64> %va, <2 x i64> %vb, i32 -1, <2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
3232
ret <2 x i64> %v
3333
}
3434

@@ -46,54 +46,54 @@ define <2 x i64> @test_vp_splice_v2i64_zero_offset(<2 x i64> %va, <2 x i64> %vb,
4646
define <2 x i64> @test_vp_splice_v2i64_masked(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
4747
; CHECK-LABEL: test_vp_splice_v2i64_masked:
4848
; CHECK: # %bb.0:
49-
; CHECK-NEXT: addi a0, a0, -5
49+
; CHECK-NEXT: addi a0, a0, -1
5050
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
51-
; CHECK-NEXT: vslidedown.vi v8, v8, 5, v0.t
51+
; CHECK-NEXT: vslidedown.vi v8, v8, 1, v0.t
5252
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
5353
; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t
5454
; CHECK-NEXT: ret
55-
%v = call <2 x i64> @llvm.experimental.vp.splice.v2i64(<2 x i64> %va, <2 x i64> %vb, i32 5, <2 x i1> %mask, i32 %evla, i32 %evlb)
55+
%v = call <2 x i64> @llvm.experimental.vp.splice.v2i64(<2 x i64> %va, <2 x i64> %vb, i32 1, <2 x i1> %mask, i32 %evla, i32 %evlb)
5656
ret <2 x i64> %v
5757
}
5858

5959
define <4 x i32> @test_vp_splice_v4i32(<4 x i32> %va, <4 x i32> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
6060
; CHECK-LABEL: test_vp_splice_v4i32:
6161
; CHECK: # %bb.0:
62-
; CHECK-NEXT: addi a0, a0, -5
62+
; CHECK-NEXT: addi a0, a0, -3
6363
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
64-
; CHECK-NEXT: vslidedown.vi v8, v8, 5
64+
; CHECK-NEXT: vslidedown.vi v8, v8, 3
6565
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
6666
; CHECK-NEXT: vslideup.vx v8, v9, a0
6767
; CHECK-NEXT: ret
6868

69-
%v = call <4 x i32> @llvm.experimental.vp.splice.v4i32(<4 x i32> %va, <4 x i32> %vb, i32 5, <4 x i1> splat (i1 1), i32 %evla, i32 %evlb)
69+
%v = call <4 x i32> @llvm.experimental.vp.splice.v4i32(<4 x i32> %va, <4 x i32> %vb, i32 3, <4 x i1> splat (i1 1), i32 %evla, i32 %evlb)
7070
ret <4 x i32> %v
7171
}
7272

7373
define <4 x i32> @test_vp_splice_v4i32_negative_offset(<4 x i32> %va, <4 x i32> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
7474
; CHECK-LABEL: test_vp_splice_v4i32_negative_offset:
7575
; CHECK: # %bb.0:
76-
; CHECK-NEXT: addi a0, a0, -5
77-
; CHECK-NEXT: vsetivli zero, 5, e32, m1, ta, ma
76+
; CHECK-NEXT: addi a0, a0, -3
77+
; CHECK-NEXT: vsetivli zero, 3, e32, m1, ta, ma
7878
; CHECK-NEXT: vslidedown.vx v8, v8, a0
7979
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
80-
; CHECK-NEXT: vslideup.vi v8, v9, 5
80+
; CHECK-NEXT: vslideup.vi v8, v9, 3
8181
; CHECK-NEXT: ret
8282

83-
%v = call <4 x i32> @llvm.experimental.vp.splice.v4i32(<4 x i32> %va, <4 x i32> %vb, i32 -5, <4 x i1> splat (i1 1), i32 %evla, i32 %evlb)
83+
%v = call <4 x i32> @llvm.experimental.vp.splice.v4i32(<4 x i32> %va, <4 x i32> %vb, i32 -3, <4 x i1> splat (i1 1), i32 %evla, i32 %evlb)
8484
ret <4 x i32> %v
8585
}
8686

8787
define <4 x i32> @test_vp_splice_v4i32_masked(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
8888
; CHECK-LABEL: test_vp_splice_v4i32_masked:
8989
; CHECK: # %bb.0:
90-
; CHECK-NEXT: addi a0, a0, -5
90+
; CHECK-NEXT: addi a0, a0, -3
9191
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
92-
; CHECK-NEXT: vslidedown.vi v8, v8, 5, v0.t
92+
; CHECK-NEXT: vslidedown.vi v8, v8, 3, v0.t
9393
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
9494
; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t
9595
; CHECK-NEXT: ret
96-
%v = call <4 x i32> @llvm.experimental.vp.splice.v4i32(<4 x i32> %va, <4 x i32> %vb, i32 5, <4 x i1> %mask, i32 %evla, i32 %evlb)
96+
%v = call <4 x i32> @llvm.experimental.vp.splice.v4i32(<4 x i32> %va, <4 x i32> %vb, i32 3, <4 x i1> %mask, i32 %evla, i32 %evlb)
9797
ret <4 x i32> %v
9898
}
9999

@@ -182,82 +182,82 @@ define <16 x i8> @test_vp_splice_v16i8_masked(<16 x i8> %va, <16 x i8> %vb, <16
182182
define <2 x double> @test_vp_splice_v2f64(<2 x double> %va, <2 x double> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
183183
; CHECK-LABEL: test_vp_splice_v2f64:
184184
; CHECK: # %bb.0:
185-
; CHECK-NEXT: addi a0, a0, -5
185+
; CHECK-NEXT: addi a0, a0, -1
186186
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
187-
; CHECK-NEXT: vslidedown.vi v8, v8, 5
187+
; CHECK-NEXT: vslidedown.vi v8, v8, 1
188188
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
189189
; CHECK-NEXT: vslideup.vx v8, v9, a0
190190
; CHECK-NEXT: ret
191191

192-
%v = call <2 x double> @llvm.experimental.vp.splice.v2f64(<2 x double> %va, <2 x double> %vb, i32 5, <2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
192+
%v = call <2 x double> @llvm.experimental.vp.splice.v2f64(<2 x double> %va, <2 x double> %vb, i32 1, <2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
193193
ret <2 x double> %v
194194
}
195195

196196
define <2 x double> @test_vp_splice_v2f64_negative_offset(<2 x double> %va, <2 x double> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
197197
; CHECK-LABEL: test_vp_splice_v2f64_negative_offset:
198198
; CHECK: # %bb.0:
199-
; CHECK-NEXT: addi a0, a0, -5
200-
; CHECK-NEXT: vsetivli zero, 5, e64, m1, ta, ma
199+
; CHECK-NEXT: addi a0, a0, -1
200+
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
201201
; CHECK-NEXT: vslidedown.vx v8, v8, a0
202202
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
203-
; CHECK-NEXT: vslideup.vi v8, v9, 5
203+
; CHECK-NEXT: vslideup.vi v8, v9, 1
204204
; CHECK-NEXT: ret
205205

206-
%v = call <2 x double> @llvm.experimental.vp.splice.v2f64(<2 x double> %va, <2 x double> %vb, i32 -5, <2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
206+
%v = call <2 x double> @llvm.experimental.vp.splice.v2f64(<2 x double> %va, <2 x double> %vb, i32 -1, <2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
207207
ret <2 x double> %v
208208
}
209209

210210
define <2 x double> @test_vp_splice_v2f64_masked(<2 x double> %va, <2 x double> %vb, <2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
211211
; CHECK-LABEL: test_vp_splice_v2f64_masked:
212212
; CHECK: # %bb.0:
213-
; CHECK-NEXT: addi a0, a0, -5
213+
; CHECK-NEXT: addi a0, a0, -1
214214
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
215-
; CHECK-NEXT: vslidedown.vi v8, v8, 5, v0.t
215+
; CHECK-NEXT: vslidedown.vi v8, v8, 1, v0.t
216216
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
217217
; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t
218218
; CHECK-NEXT: ret
219-
%v = call <2 x double> @llvm.experimental.vp.splice.v2f64(<2 x double> %va, <2 x double> %vb, i32 5, <2 x i1> %mask, i32 %evla, i32 %evlb)
219+
%v = call <2 x double> @llvm.experimental.vp.splice.v2f64(<2 x double> %va, <2 x double> %vb, i32 1, <2 x i1> %mask, i32 %evla, i32 %evlb)
220220
ret <2 x double> %v
221221
}
222222

223223
define <4 x float> @test_vp_splice_v4f32(<4 x float> %va, <4 x float> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
224224
; CHECK-LABEL: test_vp_splice_v4f32:
225225
; CHECK: # %bb.0:
226-
; CHECK-NEXT: addi a0, a0, -5
226+
; CHECK-NEXT: addi a0, a0, -3
227227
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
228-
; CHECK-NEXT: vslidedown.vi v8, v8, 5
228+
; CHECK-NEXT: vslidedown.vi v8, v8, 3
229229
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
230230
; CHECK-NEXT: vslideup.vx v8, v9, a0
231231
; CHECK-NEXT: ret
232232

233-
%v = call <4 x float> @llvm.experimental.vp.splice.v4f32(<4 x float> %va, <4 x float> %vb, i32 5, <4 x i1> splat (i1 1), i32 %evla, i32 %evlb)
233+
%v = call <4 x float> @llvm.experimental.vp.splice.v4f32(<4 x float> %va, <4 x float> %vb, i32 3, <4 x i1> splat (i1 1), i32 %evla, i32 %evlb)
234234
ret <4 x float> %v
235235
}
236236

237237
define <4 x float> @test_vp_splice_v4f32_negative_offset(<4 x float> %va, <4 x float> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
238238
; CHECK-LABEL: test_vp_splice_v4f32_negative_offset:
239239
; CHECK: # %bb.0:
240-
; CHECK-NEXT: addi a0, a0, -5
241-
; CHECK-NEXT: vsetivli zero, 5, e32, m1, ta, ma
240+
; CHECK-NEXT: addi a0, a0, -3
241+
; CHECK-NEXT: vsetivli zero, 3, e32, m1, ta, ma
242242
; CHECK-NEXT: vslidedown.vx v8, v8, a0
243243
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
244-
; CHECK-NEXT: vslideup.vi v8, v9, 5
244+
; CHECK-NEXT: vslideup.vi v8, v9, 3
245245
; CHECK-NEXT: ret
246246

247-
%v = call <4 x float> @llvm.experimental.vp.splice.v4f32(<4 x float> %va, <4 x float> %vb, i32 -5, <4 x i1> splat (i1 1), i32 %evla, i32 %evlb)
247+
%v = call <4 x float> @llvm.experimental.vp.splice.v4f32(<4 x float> %va, <4 x float> %vb, i32 -3, <4 x i1> splat (i1 1), i32 %evla, i32 %evlb)
248248
ret <4 x float> %v
249249
}
250250

251251
define <4 x float> @test_vp_splice_v4f32_masked(<4 x float> %va, <4 x float> %vb, <4 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
252252
; CHECK-LABEL: test_vp_splice_v4f32_masked:
253253
; CHECK: # %bb.0:
254-
; CHECK-NEXT: addi a0, a0, -5
254+
; CHECK-NEXT: addi a0, a0, -3
255255
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
256-
; CHECK-NEXT: vslidedown.vi v8, v8, 5, v0.t
256+
; CHECK-NEXT: vslidedown.vi v8, v8, 3, v0.t
257257
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
258258
; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t
259259
; CHECK-NEXT: ret
260-
%v = call <4 x float> @llvm.experimental.vp.splice.v4f32(<4 x float> %va, <4 x float> %vb, i32 5, <4 x i1> %mask, i32 %evla, i32 %evlb)
260+
%v = call <4 x float> @llvm.experimental.vp.splice.v4f32(<4 x float> %va, <4 x float> %vb, i32 3, <4 x i1> %mask, i32 %evla, i32 %evlb)
261261
ret <4 x float> %v
262262
}
263263

llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll

Lines changed: 20 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -19,15 +19,15 @@ define <2 x i1> @test_vp_splice_v2i1(<2 x i1> %va, <2 x i1> %vb, i32 zeroext %ev
1919
; CHECK-NEXT: vmv.v.i v10, 0
2020
; CHECK-NEXT: vmv1r.v v0, v9
2121
; CHECK-NEXT: vmerge.vim v9, v10, 1, v0
22-
; CHECK-NEXT: addi a0, a0, -5
22+
; CHECK-NEXT: addi a0, a0, -1
2323
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
24-
; CHECK-NEXT: vslidedown.vi v9, v9, 5
24+
; CHECK-NEXT: vslidedown.vi v9, v9, 1
2525
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
2626
; CHECK-NEXT: vslideup.vx v9, v8, a0
2727
; CHECK-NEXT: vmsne.vi v0, v9, 0
2828
; CHECK-NEXT: ret
2929

30-
%v = call <2 x i1> @llvm.experimental.vp.splice.v2i1(<2 x i1> %va, <2 x i1> %vb, i32 5, <2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
30+
%v = call <2 x i1> @llvm.experimental.vp.splice.v2i1(<2 x i1> %va, <2 x i1> %vb, i32 1, <2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
3131
ret <2 x i1> %v
3232
}
3333

@@ -43,15 +43,15 @@ define <2 x i1> @test_vp_splice_v2i1_negative_offset(<2 x i1> %va, <2 x i1> %vb,
4343
; CHECK-NEXT: vmv.v.i v10, 0
4444
; CHECK-NEXT: vmv1r.v v0, v9
4545
; CHECK-NEXT: vmerge.vim v9, v10, 1, v0
46-
; CHECK-NEXT: addi a0, a0, -5
47-
; CHECK-NEXT: vsetivli zero, 5, e8, mf8, ta, ma
46+
; CHECK-NEXT: addi a0, a0, -1
47+
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
4848
; CHECK-NEXT: vslidedown.vx v9, v9, a0
4949
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
50-
; CHECK-NEXT: vslideup.vi v9, v8, 5
50+
; CHECK-NEXT: vslideup.vi v9, v8, 1
5151
; CHECK-NEXT: vmsne.vi v0, v9, 0
5252
; CHECK-NEXT: ret
5353

54-
%v = call <2 x i1> @llvm.experimental.vp.splice.v2i1(<2 x i1> %va, <2 x i1> %vb, i32 -5, <2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
54+
%v = call <2 x i1> @llvm.experimental.vp.splice.v2i1(<2 x i1> %va, <2 x i1> %vb, i32 -1, <2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
5555
ret <2 x i1> %v
5656
}
5757

@@ -67,16 +67,16 @@ define <2 x i1> @test_vp_splice_v2i1_masked(<2 x i1> %va, <2 x i1> %vb, <2 x i1>
6767
; CHECK-NEXT: vmv.v.i v11, 0
6868
; CHECK-NEXT: vmv1r.v v0, v10
6969
; CHECK-NEXT: vmerge.vim v10, v11, 1, v0
70-
; CHECK-NEXT: addi a0, a0, -5
70+
; CHECK-NEXT: addi a0, a0, -1
7171
; CHECK-NEXT: vmv1r.v v0, v9
7272
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
73-
; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t
73+
; CHECK-NEXT: vslidedown.vi v10, v10, 1, v0.t
7474
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
7575
; CHECK-NEXT: vslideup.vx v10, v8, a0, v0.t
7676
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
7777
; CHECK-NEXT: vmsne.vi v0, v10, 0, v0.t
7878
; CHECK-NEXT: ret
79-
%v = call <2 x i1> @llvm.experimental.vp.splice.v2i1(<2 x i1> %va, <2 x i1> %vb, i32 5, <2 x i1> %mask, i32 %evla, i32 %evlb)
79+
%v = call <2 x i1> @llvm.experimental.vp.splice.v2i1(<2 x i1> %va, <2 x i1> %vb, i32 1, <2 x i1> %mask, i32 %evla, i32 %evlb)
8080
ret <2 x i1> %v
8181
}
8282

@@ -92,15 +92,15 @@ define <4 x i1> @test_vp_splice_v4i1(<4 x i1> %va, <4 x i1> %vb, i32 zeroext %ev
9292
; CHECK-NEXT: vmv.v.i v10, 0
9393
; CHECK-NEXT: vmv1r.v v0, v9
9494
; CHECK-NEXT: vmerge.vim v9, v10, 1, v0
95-
; CHECK-NEXT: addi a0, a0, -5
95+
; CHECK-NEXT: addi a0, a0, -3
9696
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
97-
; CHECK-NEXT: vslidedown.vi v9, v9, 5
97+
; CHECK-NEXT: vslidedown.vi v9, v9, 3
9898
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
9999
; CHECK-NEXT: vslideup.vx v9, v8, a0
100100
; CHECK-NEXT: vmsne.vi v0, v9, 0
101101
; CHECK-NEXT: ret
102102

103-
%v = call <4 x i1> @llvm.experimental.vp.splice.v4i1(<4 x i1> %va, <4 x i1> %vb, i32 5, <4 x i1> splat (i1 1), i32 %evla, i32 %evlb)
103+
%v = call <4 x i1> @llvm.experimental.vp.splice.v4i1(<4 x i1> %va, <4 x i1> %vb, i32 3, <4 x i1> splat (i1 1), i32 %evla, i32 %evlb)
104104
ret <4 x i1> %v
105105
}
106106

@@ -116,15 +116,15 @@ define <4 x i1> @test_vp_splice_v4i1_negative_offset(<4 x i1> %va, <4 x i1> %vb,
116116
; CHECK-NEXT: vmv.v.i v10, 0
117117
; CHECK-NEXT: vmv1r.v v0, v9
118118
; CHECK-NEXT: vmerge.vim v9, v10, 1, v0
119-
; CHECK-NEXT: addi a0, a0, -5
120-
; CHECK-NEXT: vsetivli zero, 5, e8, mf4, ta, ma
119+
; CHECK-NEXT: addi a0, a0, -3
120+
; CHECK-NEXT: vsetivli zero, 3, e8, mf4, ta, ma
121121
; CHECK-NEXT: vslidedown.vx v9, v9, a0
122122
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
123-
; CHECK-NEXT: vslideup.vi v9, v8, 5
123+
; CHECK-NEXT: vslideup.vi v9, v8, 3
124124
; CHECK-NEXT: vmsne.vi v0, v9, 0
125125
; CHECK-NEXT: ret
126126

127-
%v = call <4 x i1> @llvm.experimental.vp.splice.v4i1(<4 x i1> %va, <4 x i1> %vb, i32 -5, <4 x i1> splat (i1 1), i32 %evla, i32 %evlb)
127+
%v = call <4 x i1> @llvm.experimental.vp.splice.v4i1(<4 x i1> %va, <4 x i1> %vb, i32 -3, <4 x i1> splat (i1 1), i32 %evla, i32 %evlb)
128128
ret <4 x i1> %v
129129
}
130130

@@ -140,16 +140,16 @@ define <4 x i1> @test_vp_splice_v4i1_masked(<4 x i1> %va, <4 x i1> %vb, <4 x i1>
140140
; CHECK-NEXT: vmv.v.i v11, 0
141141
; CHECK-NEXT: vmv1r.v v0, v10
142142
; CHECK-NEXT: vmerge.vim v10, v11, 1, v0
143-
; CHECK-NEXT: addi a0, a0, -5
143+
; CHECK-NEXT: addi a0, a0, -3
144144
; CHECK-NEXT: vmv1r.v v0, v9
145145
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
146-
; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t
146+
; CHECK-NEXT: vslidedown.vi v10, v10, 3, v0.t
147147
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
148148
; CHECK-NEXT: vslideup.vx v10, v8, a0, v0.t
149149
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
150150
; CHECK-NEXT: vmsne.vi v0, v10, 0, v0.t
151151
; CHECK-NEXT: ret
152-
%v = call <4 x i1> @llvm.experimental.vp.splice.v4i1(<4 x i1> %va, <4 x i1> %vb, i32 5, <4 x i1> %mask, i32 %evla, i32 %evlb)
152+
%v = call <4 x i1> @llvm.experimental.vp.splice.v4i1(<4 x i1> %va, <4 x i1> %vb, i32 3, <4 x i1> %mask, i32 %evla, i32 %evlb)
153153
ret <4 x i1> %v
154154
}
155155

0 commit comments

Comments
 (0)