7
7
define <2 x i64 > @test_vp_splice_v2i64 (<2 x i64 > %va , <2 x i64 > %vb , i32 zeroext %evla , i32 zeroext %evlb ) {
8
8
; CHECK-LABEL: test_vp_splice_v2i64:
9
9
; CHECK: # %bb.0:
10
- ; CHECK-NEXT: addi a0, a0, -5
10
+ ; CHECK-NEXT: addi a0, a0, -1
11
11
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
12
- ; CHECK-NEXT: vslidedown.vi v8, v8, 5
12
+ ; CHECK-NEXT: vslidedown.vi v8, v8, 1
13
13
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
14
14
; CHECK-NEXT: vslideup.vx v8, v9, a0
15
15
; CHECK-NEXT: ret
16
16
17
- %v = call <2 x i64 > @llvm.experimental.vp.splice.v2i64 (<2 x i64 > %va , <2 x i64 > %vb , i32 5 , <2 x i1 > splat (i1 1 ), i32 %evla , i32 %evlb )
17
+ %v = call <2 x i64 > @llvm.experimental.vp.splice.v2i64 (<2 x i64 > %va , <2 x i64 > %vb , i32 1 , <2 x i1 > splat (i1 1 ), i32 %evla , i32 %evlb )
18
18
ret <2 x i64 > %v
19
19
}
20
20
21
21
define <2 x i64 > @test_vp_splice_v2i64_negative_offset (<2 x i64 > %va , <2 x i64 > %vb , i32 zeroext %evla , i32 zeroext %evlb ) {
22
22
; CHECK-LABEL: test_vp_splice_v2i64_negative_offset:
23
23
; CHECK: # %bb.0:
24
- ; CHECK-NEXT: addi a0, a0, -5
25
- ; CHECK-NEXT: vsetivli zero, 5 , e64, m1, ta, ma
24
+ ; CHECK-NEXT: addi a0, a0, -1
25
+ ; CHECK-NEXT: vsetivli zero, 1 , e64, m1, ta, ma
26
26
; CHECK-NEXT: vslidedown.vx v8, v8, a0
27
27
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
28
- ; CHECK-NEXT: vslideup.vi v8, v9, 5
28
+ ; CHECK-NEXT: vslideup.vi v8, v9, 1
29
29
; CHECK-NEXT: ret
30
30
31
- %v = call <2 x i64 > @llvm.experimental.vp.splice.v2i64 (<2 x i64 > %va , <2 x i64 > %vb , i32 -5 , <2 x i1 > splat (i1 1 ), i32 %evla , i32 %evlb )
31
+ %v = call <2 x i64 > @llvm.experimental.vp.splice.v2i64 (<2 x i64 > %va , <2 x i64 > %vb , i32 -1 , <2 x i1 > splat (i1 1 ), i32 %evla , i32 %evlb )
32
32
ret <2 x i64 > %v
33
33
}
34
34
@@ -46,54 +46,54 @@ define <2 x i64> @test_vp_splice_v2i64_zero_offset(<2 x i64> %va, <2 x i64> %vb,
46
46
define <2 x i64 > @test_vp_splice_v2i64_masked (<2 x i64 > %va , <2 x i64 > %vb , <2 x i1 > %mask , i32 zeroext %evla , i32 zeroext %evlb ) {
47
47
; CHECK-LABEL: test_vp_splice_v2i64_masked:
48
48
; CHECK: # %bb.0:
49
- ; CHECK-NEXT: addi a0, a0, -5
49
+ ; CHECK-NEXT: addi a0, a0, -1
50
50
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
51
- ; CHECK-NEXT: vslidedown.vi v8, v8, 5 , v0.t
51
+ ; CHECK-NEXT: vslidedown.vi v8, v8, 1 , v0.t
52
52
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
53
53
; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t
54
54
; CHECK-NEXT: ret
55
- %v = call <2 x i64 > @llvm.experimental.vp.splice.v2i64 (<2 x i64 > %va , <2 x i64 > %vb , i32 5 , <2 x i1 > %mask , i32 %evla , i32 %evlb )
55
+ %v = call <2 x i64 > @llvm.experimental.vp.splice.v2i64 (<2 x i64 > %va , <2 x i64 > %vb , i32 1 , <2 x i1 > %mask , i32 %evla , i32 %evlb )
56
56
ret <2 x i64 > %v
57
57
}
58
58
59
59
define <4 x i32 > @test_vp_splice_v4i32 (<4 x i32 > %va , <4 x i32 > %vb , i32 zeroext %evla , i32 zeroext %evlb ) {
60
60
; CHECK-LABEL: test_vp_splice_v4i32:
61
61
; CHECK: # %bb.0:
62
- ; CHECK-NEXT: addi a0, a0, -5
62
+ ; CHECK-NEXT: addi a0, a0, -3
63
63
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
64
- ; CHECK-NEXT: vslidedown.vi v8, v8, 5
64
+ ; CHECK-NEXT: vslidedown.vi v8, v8, 3
65
65
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
66
66
; CHECK-NEXT: vslideup.vx v8, v9, a0
67
67
; CHECK-NEXT: ret
68
68
69
- %v = call <4 x i32 > @llvm.experimental.vp.splice.v4i32 (<4 x i32 > %va , <4 x i32 > %vb , i32 5 , <4 x i1 > splat (i1 1 ), i32 %evla , i32 %evlb )
69
+ %v = call <4 x i32 > @llvm.experimental.vp.splice.v4i32 (<4 x i32 > %va , <4 x i32 > %vb , i32 3 , <4 x i1 > splat (i1 1 ), i32 %evla , i32 %evlb )
70
70
ret <4 x i32 > %v
71
71
}
72
72
73
73
define <4 x i32 > @test_vp_splice_v4i32_negative_offset (<4 x i32 > %va , <4 x i32 > %vb , i32 zeroext %evla , i32 zeroext %evlb ) {
74
74
; CHECK-LABEL: test_vp_splice_v4i32_negative_offset:
75
75
; CHECK: # %bb.0:
76
- ; CHECK-NEXT: addi a0, a0, -5
77
- ; CHECK-NEXT: vsetivli zero, 5 , e32, m1, ta, ma
76
+ ; CHECK-NEXT: addi a0, a0, -3
77
+ ; CHECK-NEXT: vsetivli zero, 3 , e32, m1, ta, ma
78
78
; CHECK-NEXT: vslidedown.vx v8, v8, a0
79
79
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
80
- ; CHECK-NEXT: vslideup.vi v8, v9, 5
80
+ ; CHECK-NEXT: vslideup.vi v8, v9, 3
81
81
; CHECK-NEXT: ret
82
82
83
- %v = call <4 x i32 > @llvm.experimental.vp.splice.v4i32 (<4 x i32 > %va , <4 x i32 > %vb , i32 -5 , <4 x i1 > splat (i1 1 ), i32 %evla , i32 %evlb )
83
+ %v = call <4 x i32 > @llvm.experimental.vp.splice.v4i32 (<4 x i32 > %va , <4 x i32 > %vb , i32 -3 , <4 x i1 > splat (i1 1 ), i32 %evla , i32 %evlb )
84
84
ret <4 x i32 > %v
85
85
}
86
86
87
87
define <4 x i32 > @test_vp_splice_v4i32_masked (<4 x i32 > %va , <4 x i32 > %vb , <4 x i1 > %mask , i32 zeroext %evla , i32 zeroext %evlb ) {
88
88
; CHECK-LABEL: test_vp_splice_v4i32_masked:
89
89
; CHECK: # %bb.0:
90
- ; CHECK-NEXT: addi a0, a0, -5
90
+ ; CHECK-NEXT: addi a0, a0, -3
91
91
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
92
- ; CHECK-NEXT: vslidedown.vi v8, v8, 5 , v0.t
92
+ ; CHECK-NEXT: vslidedown.vi v8, v8, 3 , v0.t
93
93
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
94
94
; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t
95
95
; CHECK-NEXT: ret
96
- %v = call <4 x i32 > @llvm.experimental.vp.splice.v4i32 (<4 x i32 > %va , <4 x i32 > %vb , i32 5 , <4 x i1 > %mask , i32 %evla , i32 %evlb )
96
+ %v = call <4 x i32 > @llvm.experimental.vp.splice.v4i32 (<4 x i32 > %va , <4 x i32 > %vb , i32 3 , <4 x i1 > %mask , i32 %evla , i32 %evlb )
97
97
ret <4 x i32 > %v
98
98
}
99
99
@@ -182,82 +182,82 @@ define <16 x i8> @test_vp_splice_v16i8_masked(<16 x i8> %va, <16 x i8> %vb, <16
182
182
define <2 x double > @test_vp_splice_v2f64 (<2 x double > %va , <2 x double > %vb , i32 zeroext %evla , i32 zeroext %evlb ) {
183
183
; CHECK-LABEL: test_vp_splice_v2f64:
184
184
; CHECK: # %bb.0:
185
- ; CHECK-NEXT: addi a0, a0, -5
185
+ ; CHECK-NEXT: addi a0, a0, -1
186
186
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
187
- ; CHECK-NEXT: vslidedown.vi v8, v8, 5
187
+ ; CHECK-NEXT: vslidedown.vi v8, v8, 1
188
188
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
189
189
; CHECK-NEXT: vslideup.vx v8, v9, a0
190
190
; CHECK-NEXT: ret
191
191
192
- %v = call <2 x double > @llvm.experimental.vp.splice.v2f64 (<2 x double > %va , <2 x double > %vb , i32 5 , <2 x i1 > splat (i1 1 ), i32 %evla , i32 %evlb )
192
+ %v = call <2 x double > @llvm.experimental.vp.splice.v2f64 (<2 x double > %va , <2 x double > %vb , i32 1 , <2 x i1 > splat (i1 1 ), i32 %evla , i32 %evlb )
193
193
ret <2 x double > %v
194
194
}
195
195
196
196
define <2 x double > @test_vp_splice_v2f64_negative_offset (<2 x double > %va , <2 x double > %vb , i32 zeroext %evla , i32 zeroext %evlb ) {
197
197
; CHECK-LABEL: test_vp_splice_v2f64_negative_offset:
198
198
; CHECK: # %bb.0:
199
- ; CHECK-NEXT: addi a0, a0, -5
200
- ; CHECK-NEXT: vsetivli zero, 5 , e64, m1, ta, ma
199
+ ; CHECK-NEXT: addi a0, a0, -1
200
+ ; CHECK-NEXT: vsetivli zero, 1 , e64, m1, ta, ma
201
201
; CHECK-NEXT: vslidedown.vx v8, v8, a0
202
202
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
203
- ; CHECK-NEXT: vslideup.vi v8, v9, 5
203
+ ; CHECK-NEXT: vslideup.vi v8, v9, 1
204
204
; CHECK-NEXT: ret
205
205
206
- %v = call <2 x double > @llvm.experimental.vp.splice.v2f64 (<2 x double > %va , <2 x double > %vb , i32 -5 , <2 x i1 > splat (i1 1 ), i32 %evla , i32 %evlb )
206
+ %v = call <2 x double > @llvm.experimental.vp.splice.v2f64 (<2 x double > %va , <2 x double > %vb , i32 -1 , <2 x i1 > splat (i1 1 ), i32 %evla , i32 %evlb )
207
207
ret <2 x double > %v
208
208
}
209
209
210
210
define <2 x double > @test_vp_splice_v2f64_masked (<2 x double > %va , <2 x double > %vb , <2 x i1 > %mask , i32 zeroext %evla , i32 zeroext %evlb ) {
211
211
; CHECK-LABEL: test_vp_splice_v2f64_masked:
212
212
; CHECK: # %bb.0:
213
- ; CHECK-NEXT: addi a0, a0, -5
213
+ ; CHECK-NEXT: addi a0, a0, -1
214
214
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
215
- ; CHECK-NEXT: vslidedown.vi v8, v8, 5 , v0.t
215
+ ; CHECK-NEXT: vslidedown.vi v8, v8, 1 , v0.t
216
216
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
217
217
; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t
218
218
; CHECK-NEXT: ret
219
- %v = call <2 x double > @llvm.experimental.vp.splice.v2f64 (<2 x double > %va , <2 x double > %vb , i32 5 , <2 x i1 > %mask , i32 %evla , i32 %evlb )
219
+ %v = call <2 x double > @llvm.experimental.vp.splice.v2f64 (<2 x double > %va , <2 x double > %vb , i32 1 , <2 x i1 > %mask , i32 %evla , i32 %evlb )
220
220
ret <2 x double > %v
221
221
}
222
222
223
223
define <4 x float > @test_vp_splice_v4f32 (<4 x float > %va , <4 x float > %vb , i32 zeroext %evla , i32 zeroext %evlb ) {
224
224
; CHECK-LABEL: test_vp_splice_v4f32:
225
225
; CHECK: # %bb.0:
226
- ; CHECK-NEXT: addi a0, a0, -5
226
+ ; CHECK-NEXT: addi a0, a0, -3
227
227
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
228
- ; CHECK-NEXT: vslidedown.vi v8, v8, 5
228
+ ; CHECK-NEXT: vslidedown.vi v8, v8, 3
229
229
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
230
230
; CHECK-NEXT: vslideup.vx v8, v9, a0
231
231
; CHECK-NEXT: ret
232
232
233
- %v = call <4 x float > @llvm.experimental.vp.splice.v4f32 (<4 x float > %va , <4 x float > %vb , i32 5 , <4 x i1 > splat (i1 1 ), i32 %evla , i32 %evlb )
233
+ %v = call <4 x float > @llvm.experimental.vp.splice.v4f32 (<4 x float > %va , <4 x float > %vb , i32 3 , <4 x i1 > splat (i1 1 ), i32 %evla , i32 %evlb )
234
234
ret <4 x float > %v
235
235
}
236
236
237
237
define <4 x float > @test_vp_splice_v4f32_negative_offset (<4 x float > %va , <4 x float > %vb , i32 zeroext %evla , i32 zeroext %evlb ) {
238
238
; CHECK-LABEL: test_vp_splice_v4f32_negative_offset:
239
239
; CHECK: # %bb.0:
240
- ; CHECK-NEXT: addi a0, a0, -5
241
- ; CHECK-NEXT: vsetivli zero, 5 , e32, m1, ta, ma
240
+ ; CHECK-NEXT: addi a0, a0, -3
241
+ ; CHECK-NEXT: vsetivli zero, 3 , e32, m1, ta, ma
242
242
; CHECK-NEXT: vslidedown.vx v8, v8, a0
243
243
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
244
- ; CHECK-NEXT: vslideup.vi v8, v9, 5
244
+ ; CHECK-NEXT: vslideup.vi v8, v9, 3
245
245
; CHECK-NEXT: ret
246
246
247
- %v = call <4 x float > @llvm.experimental.vp.splice.v4f32 (<4 x float > %va , <4 x float > %vb , i32 -5 , <4 x i1 > splat (i1 1 ), i32 %evla , i32 %evlb )
247
+ %v = call <4 x float > @llvm.experimental.vp.splice.v4f32 (<4 x float > %va , <4 x float > %vb , i32 -3 , <4 x i1 > splat (i1 1 ), i32 %evla , i32 %evlb )
248
248
ret <4 x float > %v
249
249
}
250
250
251
251
define <4 x float > @test_vp_splice_v4f32_masked (<4 x float > %va , <4 x float > %vb , <4 x i1 > %mask , i32 zeroext %evla , i32 zeroext %evlb ) {
252
252
; CHECK-LABEL: test_vp_splice_v4f32_masked:
253
253
; CHECK: # %bb.0:
254
- ; CHECK-NEXT: addi a0, a0, -5
254
+ ; CHECK-NEXT: addi a0, a0, -3
255
255
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
256
- ; CHECK-NEXT: vslidedown.vi v8, v8, 5 , v0.t
256
+ ; CHECK-NEXT: vslidedown.vi v8, v8, 3 , v0.t
257
257
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
258
258
; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t
259
259
; CHECK-NEXT: ret
260
- %v = call <4 x float > @llvm.experimental.vp.splice.v4f32 (<4 x float > %va , <4 x float > %vb , i32 5 , <4 x i1 > %mask , i32 %evla , i32 %evlb )
260
+ %v = call <4 x float > @llvm.experimental.vp.splice.v4f32 (<4 x float > %va , <4 x float > %vb , i32 3 , <4 x i1 > %mask , i32 %evla , i32 %evlb )
261
261
ret <4 x float > %v
262
262
}
263
263
0 commit comments