Skip to content

Commit 6fdc77e

Browse files
committed
[RISCV] Don't reduce vslidedown's VL in rotations
Even though we only need to write to the bottom NumElts - Rotation elements for the vslidedown.vi, we can save an extra vsetivli toggle if we just keep the wide VL. (I may be missing something here: is there a reason why we want to explicitly keep the vslidedown narrow?) Reviewed By: craig.topper Differential Revision: https://reviews.llvm.org/D151390
1 parent c4a60c9 commit 6fdc77e

File tree

5 files changed

+32
-67
lines changed

5 files changed

+32
-67
lines changed

llvm/lib/Target/RISCV/RISCVISelLowering.cpp

Lines changed: 3 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -3945,16 +3945,10 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
39453945

39463946
SDValue Res = DAG.getUNDEF(ContainerVT);
39473947
if (HiV) {
3948-
// If we are doing a SLIDEDOWN+SLIDEUP, reduce the VL for the SLIDEDOWN.
3949-
// FIXME: If we are only doing a SLIDEDOWN, don't reduce the VL as it
3950-
// causes multiple vsetvlis in some test cases such as lowering
3951-
// reduce.mul
3952-
SDValue DownVL = VL;
3953-
if (LoV)
3954-
DownVL = DAG.getConstant(InvRotate, DL, XLenVT);
3948+
// Even though we could use a smaller VL, don't to avoid a vsetivli
3949+
// toggle.
39553950
Res = getVSlidedown(DAG, Subtarget, DL, ContainerVT, Res, HiV,
3956-
DAG.getConstant(Rotation, DL, XLenVT), TrueMask,
3957-
DownVL);
3951+
DAG.getConstant(Rotation, DL, XLenVT), TrueMask, VL);
39583952
}
39593953
if (LoV)
39603954
Res = getVSlideup(DAG, Subtarget, DL, ContainerVT, Res, LoV,

llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-reverse.ll

Lines changed: 7 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -115,9 +115,8 @@ define <1 x i8> @reverse_v1i8(<1 x i8> %a) {
115115
define <2 x i8> @reverse_v2i8(<2 x i8> %a) {
116116
; CHECK-LABEL: reverse_v2i8:
117117
; CHECK: # %bb.0:
118-
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
119-
; CHECK-NEXT: vslidedown.vi v9, v8, 1
120118
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
119+
; CHECK-NEXT: vslidedown.vi v9, v8, 1
121120
; CHECK-NEXT: vslideup.vi v9, v8, 1
122121
; CHECK-NEXT: vmv1r.v v8, v9
123122
; CHECK-NEXT: ret
@@ -205,9 +204,8 @@ define <1 x i16> @reverse_v1i16(<1 x i16> %a) {
205204
define <2 x i16> @reverse_v2i16(<2 x i16> %a) {
206205
; CHECK-LABEL: reverse_v2i16:
207206
; CHECK: # %bb.0:
208-
; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
209-
; CHECK-NEXT: vslidedown.vi v9, v8, 1
210207
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
208+
; CHECK-NEXT: vslidedown.vi v9, v8, 1
211209
; CHECK-NEXT: vslideup.vi v9, v8, 1
212210
; CHECK-NEXT: vmv1r.v v8, v9
213211
; CHECK-NEXT: ret
@@ -280,9 +278,8 @@ define <1 x i32> @reverse_v1i32(<1 x i32> %a) {
280278
define <2 x i32> @reverse_v2i32(<2 x i32> %a) {
281279
; CHECK-LABEL: reverse_v2i32:
282280
; CHECK: # %bb.0:
283-
; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
284-
; CHECK-NEXT: vslidedown.vi v9, v8, 1
285281
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
282+
; CHECK-NEXT: vslidedown.vi v9, v8, 1
286283
; CHECK-NEXT: vslideup.vi v9, v8, 1
287284
; CHECK-NEXT: vmv1r.v v8, v9
288285
; CHECK-NEXT: ret
@@ -340,9 +337,8 @@ define <1 x i64> @reverse_v1i64(<1 x i64> %a) {
340337
define <2 x i64> @reverse_v2i64(<2 x i64> %a) {
341338
; CHECK-LABEL: reverse_v2i64:
342339
; CHECK: # %bb.0:
343-
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
344-
; CHECK-NEXT: vslidedown.vi v9, v8, 1
345340
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
341+
; CHECK-NEXT: vslidedown.vi v9, v8, 1
346342
; CHECK-NEXT: vslideup.vi v9, v8, 1
347343
; CHECK-NEXT: vmv.v.v v8, v9
348344
; CHECK-NEXT: ret
@@ -484,9 +480,8 @@ define <1 x half> @reverse_v1f16(<1 x half> %a) {
484480
define <2 x half> @reverse_v2f16(<2 x half> %a) {
485481
; CHECK-LABEL: reverse_v2f16:
486482
; CHECK: # %bb.0:
487-
; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
488-
; CHECK-NEXT: vslidedown.vi v9, v8, 1
489483
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
484+
; CHECK-NEXT: vslidedown.vi v9, v8, 1
490485
; CHECK-NEXT: vslideup.vi v9, v8, 1
491486
; CHECK-NEXT: vmv1r.v v8, v9
492487
; CHECK-NEXT: ret
@@ -559,9 +554,8 @@ define <1 x float> @reverse_v1f32(<1 x float> %a) {
559554
define <2 x float> @reverse_v2f32(<2 x float> %a) {
560555
; CHECK-LABEL: reverse_v2f32:
561556
; CHECK: # %bb.0:
562-
; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
563-
; CHECK-NEXT: vslidedown.vi v9, v8, 1
564557
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
558+
; CHECK-NEXT: vslidedown.vi v9, v8, 1
565559
; CHECK-NEXT: vslideup.vi v9, v8, 1
566560
; CHECK-NEXT: vmv1r.v v8, v9
567561
; CHECK-NEXT: ret
@@ -619,9 +613,8 @@ define <1 x double> @reverse_v1f64(<1 x double> %a) {
619613
define <2 x double> @reverse_v2f64(<2 x double> %a) {
620614
; CHECK-LABEL: reverse_v2f64:
621615
; CHECK: # %bb.0:
622-
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
623-
; CHECK-NEXT: vslidedown.vi v9, v8, 1
624616
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
617+
; CHECK-NEXT: vslidedown.vi v9, v8, 1
625618
; CHECK-NEXT: vslideup.vi v9, v8, 1
626619
; CHECK-NEXT: vmv.v.v v8, v9
627620
; CHECK-NEXT: ret

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -304,9 +304,8 @@ define <8 x float> @slideup_v8f32(<8 x float> %x) {
304304
define <8 x float> @splice_unary(<8 x float> %x) {
305305
; CHECK-LABEL: splice_unary:
306306
; CHECK: # %bb.0:
307-
; CHECK-NEXT: vsetivli zero, 7, e32, m2, ta, ma
308-
; CHECK-NEXT: vslidedown.vi v10, v8, 1
309307
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
308+
; CHECK-NEXT: vslidedown.vi v10, v8, 1
310309
; CHECK-NEXT: vslideup.vi v10, v8, 7
311310
; CHECK-NEXT: vmv.v.v v8, v10
312311
; CHECK-NEXT: ret
@@ -317,9 +316,8 @@ define <8 x float> @splice_unary(<8 x float> %x) {
317316
define <8 x double> @splice_unary2(<8 x double> %x) {
318317
; CHECK-LABEL: splice_unary2:
319318
; CHECK: # %bb.0:
320-
; CHECK-NEXT: vsetivli zero, 2, e64, m4, ta, ma
321-
; CHECK-NEXT: vslidedown.vi v12, v8, 6
322319
; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
320+
; CHECK-NEXT: vslidedown.vi v12, v8, 6
323321
; CHECK-NEXT: vslideup.vi v12, v8, 2
324322
; CHECK-NEXT: vmv.v.v v8, v12
325323
; CHECK-NEXT: ret
@@ -330,9 +328,8 @@ define <8 x double> @splice_unary2(<8 x double> %x) {
330328
define <8 x float> @splice_binary(<8 x float> %x, <8 x float> %y) {
331329
; CHECK-LABEL: splice_binary:
332330
; CHECK: # %bb.0:
333-
; CHECK-NEXT: vsetivli zero, 6, e32, m2, ta, ma
334-
; CHECK-NEXT: vslidedown.vi v8, v8, 2
335331
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
332+
; CHECK-NEXT: vslidedown.vi v8, v8, 2
336333
; CHECK-NEXT: vslideup.vi v8, v10, 6
337334
; CHECK-NEXT: ret
338335
%s = shufflevector <8 x float> %x, <8 x float> %y, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 9>
@@ -342,9 +339,8 @@ define <8 x float> @splice_binary(<8 x float> %x, <8 x float> %y) {
342339
define <8 x double> @splice_binary2(<8 x double> %x, <8 x double> %y) {
343340
; CHECK-LABEL: splice_binary2:
344341
; CHECK: # %bb.0:
345-
; CHECK-NEXT: vsetivli zero, 3, e64, m4, ta, ma
346-
; CHECK-NEXT: vslidedown.vi v12, v12, 5
347342
; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
343+
; CHECK-NEXT: vslidedown.vi v12, v12, 5
348344
; CHECK-NEXT: vslideup.vi v12, v8, 3
349345
; CHECK-NEXT: vmv.v.v v8, v12
350346
; CHECK-NEXT: ret

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -583,9 +583,8 @@ define <8 x i32> @slideup_v8i32(<8 x i32> %x) {
583583
define <8 x i16> @splice_unary(<8 x i16> %x) {
584584
; CHECK-LABEL: splice_unary:
585585
; CHECK: # %bb.0:
586-
; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
587-
; CHECK-NEXT: vslidedown.vi v9, v8, 2
588586
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
587+
; CHECK-NEXT: vslidedown.vi v9, v8, 2
589588
; CHECK-NEXT: vslideup.vi v9, v8, 6
590589
; CHECK-NEXT: vmv.v.v v8, v9
591590
; CHECK-NEXT: ret
@@ -596,9 +595,8 @@ define <8 x i16> @splice_unary(<8 x i16> %x) {
596595
define <8 x i32> @splice_unary2(<8 x i32> %x) {
597596
; CHECK-LABEL: splice_unary2:
598597
; CHECK: # %bb.0:
599-
; CHECK-NEXT: vsetivli zero, 3, e32, m2, ta, ma
600-
; CHECK-NEXT: vslidedown.vi v10, v8, 5
601598
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
599+
; CHECK-NEXT: vslidedown.vi v10, v8, 5
602600
; CHECK-NEXT: vslideup.vi v10, v8, 3
603601
; CHECK-NEXT: vmv.v.v v8, v10
604602
; CHECK-NEXT: ret
@@ -609,9 +607,8 @@ define <8 x i32> @splice_unary2(<8 x i32> %x) {
609607
define <8 x i16> @splice_binary(<8 x i16> %x, <8 x i16> %y) {
610608
; CHECK-LABEL: splice_binary:
611609
; CHECK: # %bb.0:
612-
; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
613-
; CHECK-NEXT: vslidedown.vi v8, v8, 2
614610
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
611+
; CHECK-NEXT: vslidedown.vi v8, v8, 2
615612
; CHECK-NEXT: vslideup.vi v8, v9, 6
616613
; CHECK-NEXT: ret
617614
%s = shufflevector <8 x i16> %x, <8 x i16> %y, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 9>
@@ -621,9 +618,8 @@ define <8 x i16> @splice_binary(<8 x i16> %x, <8 x i16> %y) {
621618
define <8 x i32> @splice_binary2(<8 x i32> %x, <8 x i32> %y) {
622619
; CHECK-LABEL: splice_binary2:
623620
; CHECK: # %bb.0:
624-
; CHECK-NEXT: vsetivli zero, 3, e32, m2, ta, ma
625-
; CHECK-NEXT: vslidedown.vi v8, v8, 5
626621
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
622+
; CHECK-NEXT: vslidedown.vi v8, v8, 5
627623
; CHECK-NEXT: vslideup.vi v8, v10, 3
628624
; CHECK-NEXT: ret
629625
%s = shufflevector <8 x i32> %x, <8 x i32> %y, <8 x i32> <i32 undef, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12>

llvm/test/CodeGen/RISCV/rvv/shuffle-reverse.ll

Lines changed: 14 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -5,9 +5,8 @@
55
define <2 x i8> @v2i8(<2 x i8> %a) {
66
; CHECK-LABEL: v2i8:
77
; CHECK: # %bb.0:
8-
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
9-
; CHECK-NEXT: vslidedown.vi v9, v8, 1
108
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
9+
; CHECK-NEXT: vslidedown.vi v9, v8, 1
1110
; CHECK-NEXT: vslideup.vi v9, v8, 1
1211
; CHECK-NEXT: vmv1r.v v8, v9
1312
; CHECK-NEXT: ret
@@ -18,9 +17,8 @@ define <2 x i8> @v2i8(<2 x i8> %a) {
1817
define <4 x i8> @v2i8_2(<2 x i8> %a, <2 x i8> %b) {
1918
; CHECK-LABEL: v2i8_2:
2019
; CHECK: # %bb.0:
21-
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
22-
; CHECK-NEXT: vslidedown.vi v10, v8, 1
2320
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
21+
; CHECK-NEXT: vslidedown.vi v10, v8, 1
2422
; CHECK-NEXT: vslideup.vi v10, v8, 1
2523
; CHECK-NEXT: vslidedown.vi v8, v9, 1
2624
; CHECK-NEXT: vslideup.vi v8, v9, 1
@@ -153,9 +151,8 @@ define <32 x i8> @v16i8_2(<16 x i8> %a, <16 x i8> %b) {
153151
define <2 x i16> @v2i16(<2 x i16> %a) {
154152
; CHECK-LABEL: v2i16:
155153
; CHECK: # %bb.0:
156-
; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
157-
; CHECK-NEXT: vslidedown.vi v9, v8, 1
158154
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
155+
; CHECK-NEXT: vslidedown.vi v9, v8, 1
159156
; CHECK-NEXT: vslideup.vi v9, v8, 1
160157
; CHECK-NEXT: vmv1r.v v8, v9
161158
; CHECK-NEXT: ret
@@ -166,9 +163,8 @@ define <2 x i16> @v2i16(<2 x i16> %a) {
166163
define <4 x i16> @v2i16_2(<2 x i16> %a, <2 x i16> %b) {
167164
; CHECK-LABEL: v2i16_2:
168165
; CHECK: # %bb.0:
169-
; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
170-
; CHECK-NEXT: vslidedown.vi v10, v8, 1
171166
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
167+
; CHECK-NEXT: vslidedown.vi v10, v8, 1
172168
; CHECK-NEXT: vslideup.vi v10, v8, 1
173169
; CHECK-NEXT: vslidedown.vi v8, v9, 1
174170
; CHECK-NEXT: vslideup.vi v8, v9, 1
@@ -300,9 +296,8 @@ define <32 x i16> @v16i16_2(<16 x i16> %a, <16 x i16> %b) {
300296
define <2 x i32> @v2i32(<2 x i32> %a) {
301297
; CHECK-LABEL: v2i32:
302298
; CHECK: # %bb.0:
303-
; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
304-
; CHECK-NEXT: vslidedown.vi v9, v8, 1
305299
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
300+
; CHECK-NEXT: vslidedown.vi v9, v8, 1
306301
; CHECK-NEXT: vslideup.vi v9, v8, 1
307302
; CHECK-NEXT: vmv1r.v v8, v9
308303
; CHECK-NEXT: ret
@@ -313,9 +308,8 @@ define <2 x i32> @v2i32(<2 x i32> %a) {
313308
define <4 x i32> @v2i32_2(<2 x i32> %a, < 2 x i32> %b) {
314309
; CHECK-LABEL: v2i32_2:
315310
; CHECK: # %bb.0:
316-
; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
317-
; CHECK-NEXT: vslidedown.vi v10, v8, 1
318311
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
312+
; CHECK-NEXT: vslidedown.vi v10, v8, 1
319313
; CHECK-NEXT: vslideup.vi v10, v8, 1
320314
; CHECK-NEXT: vslidedown.vi v8, v9, 1
321315
; CHECK-NEXT: vslideup.vi v8, v9, 1
@@ -444,9 +438,8 @@ define <32 x i32> @v16i32_2(<16 x i32> %a, <16 x i32> %b) {
444438
define <2 x i64> @v2i64(<2 x i64> %a) {
445439
; CHECK-LABEL: v2i64:
446440
; CHECK: # %bb.0:
447-
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
448-
; CHECK-NEXT: vslidedown.vi v9, v8, 1
449441
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
442+
; CHECK-NEXT: vslidedown.vi v9, v8, 1
450443
; CHECK-NEXT: vslideup.vi v9, v8, 1
451444
; CHECK-NEXT: vmv.v.v v8, v9
452445
; CHECK-NEXT: ret
@@ -457,9 +450,8 @@ define <2 x i64> @v2i64(<2 x i64> %a) {
457450
define <4 x i64> @v2i64_2(<2 x i64> %a, < 2 x i64> %b) {
458451
; CHECK-LABEL: v2i64_2:
459452
; CHECK: # %bb.0:
460-
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
461-
; CHECK-NEXT: vslidedown.vi v10, v8, 1
462453
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
454+
; CHECK-NEXT: vslidedown.vi v10, v8, 1
463455
; CHECK-NEXT: vslideup.vi v10, v8, 1
464456
; CHECK-NEXT: vslidedown.vi v8, v9, 1
465457
; CHECK-NEXT: vslideup.vi v8, v9, 1
@@ -531,9 +523,8 @@ define <8 x i64> @v4i64_2(<4 x i64> %a, <4 x i64> %b) {
531523
define <2 x half> @v2f16(<2 x half> %a) {
532524
; CHECK-LABEL: v2f16:
533525
; CHECK: # %bb.0:
534-
; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
535-
; CHECK-NEXT: vslidedown.vi v9, v8, 1
536526
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
527+
; CHECK-NEXT: vslidedown.vi v9, v8, 1
537528
; CHECK-NEXT: vslideup.vi v9, v8, 1
538529
; CHECK-NEXT: vmv1r.v v8, v9
539530
; CHECK-NEXT: ret
@@ -544,9 +535,8 @@ define <2 x half> @v2f16(<2 x half> %a) {
544535
define <4 x half> @v2f16_2(<2 x half> %a, <2 x half> %b) {
545536
; CHECK-LABEL: v2f16_2:
546537
; CHECK: # %bb.0:
547-
; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
548-
; CHECK-NEXT: vslidedown.vi v10, v8, 1
549538
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
539+
; CHECK-NEXT: vslidedown.vi v10, v8, 1
550540
; CHECK-NEXT: vslideup.vi v10, v8, 1
551541
; CHECK-NEXT: vslidedown.vi v8, v9, 1
552542
; CHECK-NEXT: vslideup.vi v8, v9, 1
@@ -652,9 +642,8 @@ define <32 x half> @v16f16_2(<16 x half> %a) {
652642
define <2 x float> @v2f32(<2 x float> %a) {
653643
; CHECK-LABEL: v2f32:
654644
; CHECK: # %bb.0:
655-
; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
656-
; CHECK-NEXT: vslidedown.vi v9, v8, 1
657645
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
646+
; CHECK-NEXT: vslidedown.vi v9, v8, 1
658647
; CHECK-NEXT: vslideup.vi v9, v8, 1
659648
; CHECK-NEXT: vmv1r.v v8, v9
660649
; CHECK-NEXT: ret
@@ -665,9 +654,8 @@ define <2 x float> @v2f32(<2 x float> %a) {
665654
define <4 x float> @v2f32_2(<2 x float> %a, <2 x float> %b) {
666655
; CHECK-LABEL: v2f32_2:
667656
; CHECK: # %bb.0:
668-
; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
669-
; CHECK-NEXT: vslidedown.vi v10, v8, 1
670657
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
658+
; CHECK-NEXT: vslidedown.vi v10, v8, 1
671659
; CHECK-NEXT: vslideup.vi v10, v8, 1
672660
; CHECK-NEXT: vslidedown.vi v8, v9, 1
673661
; CHECK-NEXT: vslideup.vi v8, v9, 1
@@ -743,9 +731,8 @@ define <16 x float> @v8f32_2(<8 x float> %a, <8 x float> %b) {
743731
define <2 x double> @v2f64(<2 x double> %a) {
744732
; CHECK-LABEL: v2f64:
745733
; CHECK: # %bb.0:
746-
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
747-
; CHECK-NEXT: vslidedown.vi v9, v8, 1
748734
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
735+
; CHECK-NEXT: vslidedown.vi v9, v8, 1
749736
; CHECK-NEXT: vslideup.vi v9, v8, 1
750737
; CHECK-NEXT: vmv.v.v v8, v9
751738
; CHECK-NEXT: ret
@@ -756,9 +743,8 @@ define <2 x double> @v2f64(<2 x double> %a) {
756743
define <4 x double> @v2f64_2(<2 x double> %a, < 2 x double> %b) {
757744
; CHECK-LABEL: v2f64_2:
758745
; CHECK: # %bb.0:
759-
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
760-
; CHECK-NEXT: vslidedown.vi v10, v8, 1
761746
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
747+
; CHECK-NEXT: vslidedown.vi v10, v8, 1
762748
; CHECK-NEXT: vslideup.vi v10, v8, 1
763749
; CHECK-NEXT: vslidedown.vi v8, v9, 1
764750
; CHECK-NEXT: vslideup.vi v8, v9, 1

0 commit comments

Comments
 (0)