Skip to content

Commit 9478822

Browse files
authored
[RISCV] Decompose single source shuffles (without exact VLEN) (llvm#126951)
(This is a re-apply for what was 8374d42. The bug there was fairly major - despite the comments and review description, the code was using each register in the source register group, not only the first register. This was completely wrong.) This is a continuation of the work started in llvm#125735 to lower selected VLA shuffles in linear m1 components instead of generating O(LMUL^2) or O(LMUL*Log2(LMUL) high LMUL shuffles. This pattern focuses on shuffles where all the elements being used across the entire destination register group come from a single register in the source register group. Such cases come up fairly frequently via e.g. spread(N), and repeat(N) idioms. One subtlety to this patch is the handling of the index vector for vrgatherei16.vv. Because the index and source registers can have different EEW, the index vector for the Nth chunk of the destination is not guaranteed to be register aligned. In fact, it is common for e.g. an EEW=64 shuffle to have EEW=16 indices which are four chunks per source register. Given this, we have to pay a cost for extracting these chunks into the low position before performing each shuffle. I'd initially expressed this as a naive extract sub-vector for each data parallel piece. However, at high LMUL, this quickly caused register pressure problems since we could at worst need 4x the temporary registers for the index. Instead, this patch uses a repeating slidedown chained from previous iterations. This increases critical path by at worst 3 slides (SEW=64 is the worst case), but reduces register pressure to at worst 2x - and only if the original index vector is reused elsewhere. I view this as arguably a bit of a workaround (since our scheduling should have done better with the plain extract variant), but a probably necessary one.
1 parent 53c618c commit 9478822

File tree

5 files changed

+203
-79
lines changed

5 files changed

+203
-79
lines changed

llvm/lib/Target/RISCV/RISCVISelLowering.cpp

Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5354,6 +5354,12 @@ static bool isLocalRepeatingShuffle(ArrayRef<int> Mask, int Span) {
53545354
return true;
53555355
}
53565356

5357+
/// Is this mask only using elements from the first span of the input?
5358+
static bool isLowSourceShuffle(ArrayRef<int> Mask, int Span) {
5359+
return all_of(Mask,
5360+
[&](const auto &Idx) { return Idx == -1 || Idx < Span; });
5361+
}
5362+
53575363
/// Try to widen element type to get a new mask value for a better permutation
53585364
/// sequence. This doesn't try to inspect the widened mask for profitability;
53595365
/// we speculate the widened form is equal or better. This has the effect of
@@ -5769,6 +5775,41 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
57695775
Gather = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT, Gather,
57705776
SubVec, SubIdx);
57715777
}
5778+
} else if (NumElts > MinVLMAX && isLowSourceShuffle(Mask, MinVLMAX)) {
5779+
// If we have a shuffle which only uses the first register in our
5780+
// source register group, we can do a linear number of m1 vrgathers
5781+
// reusing the same source register (but with different indices)
5782+
// TODO: This can be generalized for m2 or m4, or for any shuffle
5783+
// for which we can do a vslidedown followed by this expansion.
5784+
const MVT M1VT = getLMUL1VT(ContainerVT);
5785+
EVT SubIndexVT = M1VT.changeVectorElementType(IndexVT.getScalarType());
5786+
auto [InnerTrueMask, InnerVL] =
5787+
getDefaultScalableVLOps(M1VT, DL, DAG, Subtarget);
5788+
int N = ContainerVT.getVectorMinNumElements() /
5789+
M1VT.getVectorMinNumElements();
5790+
assert(isPowerOf2_32(N) && N <= 8);
5791+
Gather = DAG.getUNDEF(ContainerVT);
5792+
SDValue SlideAmt =
5793+
DAG.getElementCount(DL, XLenVT, M1VT.getVectorElementCount());
5794+
SDValue SubV1 =
5795+
DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, M1VT, V1,
5796+
DAG.getVectorIdxConstant(0, DL));
5797+
for (int i = 0; i < N; i++) {
5798+
if (i != 0)
5799+
LHSIndices = getVSlidedown(DAG, Subtarget, DL, IndexContainerVT,
5800+
DAG.getUNDEF(IndexContainerVT), LHSIndices,
5801+
SlideAmt, TrueMask, VL);
5802+
SDValue SubIndex =
5803+
DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubIndexVT, LHSIndices,
5804+
DAG.getVectorIdxConstant(0, DL));
5805+
SDValue SubVec =
5806+
DAG.getNode(GatherVVOpc, DL, M1VT, SubV1, SubIndex,
5807+
DAG.getUNDEF(M1VT), InnerTrueMask, InnerVL);
5808+
SDValue SubIdx =
5809+
DAG.getVectorIdxConstant(M1VT.getVectorMinNumElements() * i, DL);
5810+
Gather = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT, Gather,
5811+
SubVec, SubIdx);
5812+
}
57725813
} else {
57735814
Gather = DAG.getNode(GatherVVOpc, DL, ContainerVT, V1, LHSIndices,
57745815
DAG.getUNDEF(ContainerVT), TrueMask, VL);

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll

Lines changed: 12 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -38,15 +38,20 @@ define <4 x float> @interleave_v2f32(<2 x float> %x, <2 x float> %y) {
3838
define <4 x double> @interleave_v2f64(<2 x double> %x, <2 x double> %y) {
3939
; V128-LABEL: interleave_v2f64:
4040
; V128: # %bb.0:
41+
; V128-NEXT: csrr a0, vlenb
4142
; V128-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
42-
; V128-NEXT: vmv1r.v v12, v9
43-
; V128-NEXT: vid.v v9
43+
; V128-NEXT: vid.v v10
4444
; V128-NEXT: vmv.v.i v0, 10
45-
; V128-NEXT: vsrl.vi v14, v9, 1
46-
; V128-NEXT: vsetvli zero, zero, e64, m2, ta, mu
47-
; V128-NEXT: vrgatherei16.vv v10, v8, v14
48-
; V128-NEXT: vrgatherei16.vv v10, v12, v14, v0.t
49-
; V128-NEXT: vmv.v.v v8, v10
45+
; V128-NEXT: srli a0, a0, 3
46+
; V128-NEXT: vsrl.vi v10, v10, 1
47+
; V128-NEXT: vslidedown.vx v11, v10, a0
48+
; V128-NEXT: vsetvli a0, zero, e64, m1, ta, ma
49+
; V128-NEXT: vrgatherei16.vv v13, v9, v11
50+
; V128-NEXT: vrgatherei16.vv v12, v9, v10
51+
; V128-NEXT: vrgatherei16.vv v15, v8, v11
52+
; V128-NEXT: vrgatherei16.vv v14, v8, v10
53+
; V128-NEXT: vsetivli zero, 4, e64, m2, ta, ma
54+
; V128-NEXT: vmerge.vvm v8, v14, v12, v0
5055
; V128-NEXT: ret
5156
;
5257
; RV32-V512-LABEL: interleave_v2f64:

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll

Lines changed: 12 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -51,15 +51,20 @@ define <4 x i32> @interleave_v2i32(<2 x i32> %x, <2 x i32> %y) {
5151
define <4 x i64> @interleave_v2i64(<2 x i64> %x, <2 x i64> %y) {
5252
; V128-LABEL: interleave_v2i64:
5353
; V128: # %bb.0:
54+
; V128-NEXT: csrr a0, vlenb
5455
; V128-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
55-
; V128-NEXT: vmv1r.v v12, v9
56-
; V128-NEXT: vid.v v9
56+
; V128-NEXT: vid.v v10
5757
; V128-NEXT: vmv.v.i v0, 10
58-
; V128-NEXT: vsrl.vi v14, v9, 1
59-
; V128-NEXT: vsetvli zero, zero, e64, m2, ta, mu
60-
; V128-NEXT: vrgatherei16.vv v10, v8, v14
61-
; V128-NEXT: vrgatherei16.vv v10, v12, v14, v0.t
62-
; V128-NEXT: vmv.v.v v8, v10
58+
; V128-NEXT: srli a0, a0, 3
59+
; V128-NEXT: vsrl.vi v10, v10, 1
60+
; V128-NEXT: vslidedown.vx v11, v10, a0
61+
; V128-NEXT: vsetvli a0, zero, e64, m1, ta, ma
62+
; V128-NEXT: vrgatherei16.vv v13, v9, v11
63+
; V128-NEXT: vrgatherei16.vv v12, v9, v10
64+
; V128-NEXT: vrgatherei16.vv v15, v8, v11
65+
; V128-NEXT: vrgatherei16.vv v14, v8, v10
66+
; V128-NEXT: vsetivli zero, 4, e64, m2, ta, ma
67+
; V128-NEXT: vmerge.vvm v8, v14, v12, v0
6368
; V128-NEXT: ret
6469
;
6570
; RV32-V512-LABEL: interleave_v2i64:

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll

Lines changed: 65 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -817,13 +817,17 @@ define <8 x i32> @shuffle_spread2_singlesrc_e32_index1(<8 x i32> %v) {
817817
define <8 x i32> @shuffle_spread2_singlesrc_e32_index2(<8 x i32> %v) {
818818
; CHECK-LABEL: shuffle_spread2_singlesrc_e32_index2:
819819
; CHECK: # %bb.0:
820+
; CHECK-NEXT: csrr a0, vlenb
820821
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
821-
; CHECK-NEXT: vid.v v10
822-
; CHECK-NEXT: vsrl.vi v10, v10, 1
823-
; CHECK-NEXT: vadd.vi v12, v10, -1
824-
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
825-
; CHECK-NEXT: vrgatherei16.vv v10, v8, v12
826-
; CHECK-NEXT: vmv.v.v v8, v10
822+
; CHECK-NEXT: vid.v v9
823+
; CHECK-NEXT: srli a0, a0, 2
824+
; CHECK-NEXT: vsrl.vi v9, v9, 1
825+
; CHECK-NEXT: vadd.vi v9, v9, -1
826+
; CHECK-NEXT: vslidedown.vx v10, v9, a0
827+
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
828+
; CHECK-NEXT: vrgatherei16.vv v11, v8, v10
829+
; CHECK-NEXT: vrgatherei16.vv v10, v8, v9
830+
; CHECK-NEXT: vmv2r.v v8, v10
827831
; CHECK-NEXT: ret
828832
%out = shufflevector <8 x i32> %v, <8 x i32> poison, <8 x i32> <i32 undef, i32 undef, i32 0, i32 undef, i32 1, i32 undef, i32 2, i32 undef>
829833
ret <8 x i32> %out
@@ -833,12 +837,16 @@ define <8 x i32> @shuffle_spread3_singlesrc_e32(<8 x i32> %v) {
833837
; CHECK-LABEL: shuffle_spread3_singlesrc_e32:
834838
; CHECK: # %bb.0:
835839
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
836-
; CHECK-NEXT: vmv.v.i v10, 0
840+
; CHECK-NEXT: vmv.v.i v9, 0
837841
; CHECK-NEXT: li a0, 1
838-
; CHECK-NEXT: vslide1down.vx v12, v10, a0
839-
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
840-
; CHECK-NEXT: vrgatherei16.vv v10, v8, v12
841-
; CHECK-NEXT: vmv.v.v v8, v10
842+
; CHECK-NEXT: vslide1down.vx v9, v9, a0
843+
; CHECK-NEXT: csrr a0, vlenb
844+
; CHECK-NEXT: srli a0, a0, 3
845+
; CHECK-NEXT: vslidedown.vx v10, v9, a0
846+
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
847+
; CHECK-NEXT: vrgatherei16.vv v11, v8, v10
848+
; CHECK-NEXT: vrgatherei16.vv v10, v8, v9
849+
; CHECK-NEXT: vmv2r.v v8, v10
842850
; CHECK-NEXT: ret
843851
%out = shufflevector <8 x i32> %v, <8 x i32> poison, <8 x i32> <i32 0, i32 undef, i32 undef, i32 1, i32 undef, i32 undef, i32 2, i32 undef>
844852
ret <8 x i32> %out
@@ -848,12 +856,16 @@ define <8 x i32> @shuffle_spread3_singlesrc_e32(<8 x i32> %v) {
848856
define <8 x i32> @shuffle_spread4_singlesrc_e32(<8 x i32> %v) {
849857
; CHECK-LABEL: shuffle_spread4_singlesrc_e32:
850858
; CHECK: # %bb.0:
859+
; CHECK-NEXT: csrr a0, vlenb
851860
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
852-
; CHECK-NEXT: vid.v v10
853-
; CHECK-NEXT: vsrl.vi v12, v10, 2
854-
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
855-
; CHECK-NEXT: vrgatherei16.vv v10, v8, v12
856-
; CHECK-NEXT: vmv.v.v v8, v10
861+
; CHECK-NEXT: vid.v v9
862+
; CHECK-NEXT: srli a0, a0, 2
863+
; CHECK-NEXT: vsrl.vi v9, v9, 2
864+
; CHECK-NEXT: vslidedown.vx v10, v9, a0
865+
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
866+
; CHECK-NEXT: vrgatherei16.vv v11, v8, v10
867+
; CHECK-NEXT: vrgatherei16.vv v10, v8, v9
868+
; CHECK-NEXT: vmv2r.v v8, v10
857869
; CHECK-NEXT: ret
858870
%out = shufflevector <8 x i32> %v, <8 x i32> poison, <8 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 1, i32 undef, i32 undef, i32 undef>
859871
ret <8 x i32> %out
@@ -977,15 +989,19 @@ define <8 x i32> @shuffle_repeat3_singlesrc_e32(<8 x i32> %v) {
977989
; CHECK: # %bb.0:
978990
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
979991
; CHECK-NEXT: vmv.v.i v0, 7
980-
; CHECK-NEXT: vmv.v.i v11, 1
992+
; CHECK-NEXT: vmv.v.i v10, 1
981993
; CHECK-NEXT: li a0, 192
982-
; CHECK-NEXT: vmv.s.x v10, a0
983-
; CHECK-NEXT: vmerge.vim v11, v11, 0, v0
984-
; CHECK-NEXT: vmv.v.v v0, v10
985-
; CHECK-NEXT: vmerge.vim v12, v11, 2, v0
986-
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
987-
; CHECK-NEXT: vrgatherei16.vv v10, v8, v12
988-
; CHECK-NEXT: vmv.v.v v8, v10
994+
; CHECK-NEXT: vmv.s.x v9, a0
995+
; CHECK-NEXT: csrr a0, vlenb
996+
; CHECK-NEXT: vmerge.vim v10, v10, 0, v0
997+
; CHECK-NEXT: vmv.v.v v0, v9
998+
; CHECK-NEXT: vmerge.vim v9, v10, 2, v0
999+
; CHECK-NEXT: srli a0, a0, 2
1000+
; CHECK-NEXT: vslidedown.vx v10, v9, a0
1001+
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
1002+
; CHECK-NEXT: vrgatherei16.vv v11, v8, v10
1003+
; CHECK-NEXT: vrgatherei16.vv v10, v8, v9
1004+
; CHECK-NEXT: vmv2r.v v8, v10
9891005
; CHECK-NEXT: ret
9901006
%out = shufflevector <8 x i32> %v, <8 x i32> poison, <8 x i32> <i32 0, i32 0, i32 0, i32 1, i32 1, i32 1, i32 2, i32 2>
9911007
ret <8 x i32> %out
@@ -994,12 +1010,16 @@ define <8 x i32> @shuffle_repeat3_singlesrc_e32(<8 x i32> %v) {
9941010
define <8 x i32> @shuffle_repeat4_singlesrc_e32(<8 x i32> %v) {
9951011
; CHECK-LABEL: shuffle_repeat4_singlesrc_e32:
9961012
; CHECK: # %bb.0:
1013+
; CHECK-NEXT: csrr a0, vlenb
9971014
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
998-
; CHECK-NEXT: vid.v v10
999-
; CHECK-NEXT: vsrl.vi v12, v10, 2
1000-
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
1001-
; CHECK-NEXT: vrgatherei16.vv v10, v8, v12
1002-
; CHECK-NEXT: vmv.v.v v8, v10
1015+
; CHECK-NEXT: vid.v v9
1016+
; CHECK-NEXT: srli a0, a0, 2
1017+
; CHECK-NEXT: vsrl.vi v9, v9, 2
1018+
; CHECK-NEXT: vslidedown.vx v10, v9, a0
1019+
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
1020+
; CHECK-NEXT: vrgatherei16.vv v11, v8, v10
1021+
; CHECK-NEXT: vrgatherei16.vv v10, v8, v9
1022+
; CHECK-NEXT: vmv2r.v v8, v10
10031023
; CHECK-NEXT: ret
10041024
%out = shufflevector <8 x i32> %v, <8 x i32> poison, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 1, i32 1, i32 1, i32 1>
10051025
ret <8 x i32> %out
@@ -1291,12 +1311,24 @@ define void @shuffle_i128_splat(ptr %p) nounwind {
12911311
; CHECK: # %bb.0:
12921312
; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
12931313
; CHECK-NEXT: vle64.v v8, (a0)
1294-
; CHECK-NEXT: lui a1, 16
1314+
; CHECK-NEXT: csrr a1, vlenb
1315+
; CHECK-NEXT: lui a2, 16
1316+
; CHECK-NEXT: srli a1, a1, 3
12951317
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
1296-
; CHECK-NEXT: vmv.v.x v12, a1
1318+
; CHECK-NEXT: vmv.v.x v9, a2
1319+
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
1320+
; CHECK-NEXT: vslidedown.vx v10, v9, a1
1321+
; CHECK-NEXT: vslidedown.vx v11, v10, a1
1322+
; CHECK-NEXT: vsetvli a2, zero, e64, m1, ta, ma
1323+
; CHECK-NEXT: vrgatherei16.vv v13, v8, v10
1324+
; CHECK-NEXT: vrgatherei16.vv v12, v8, v9
1325+
; CHECK-NEXT: vrgatherei16.vv v14, v8, v11
1326+
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
1327+
; CHECK-NEXT: vslidedown.vx v9, v11, a1
1328+
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
1329+
; CHECK-NEXT: vrgatherei16.vv v15, v8, v9
12971330
; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
1298-
; CHECK-NEXT: vrgatherei16.vv v16, v8, v12
1299-
; CHECK-NEXT: vse64.v v16, (a0)
1331+
; CHECK-NEXT: vse64.v v12, (a0)
13001332
; CHECK-NEXT: ret
13011333
%a = load <4 x i128>, ptr %p
13021334
%res = shufflevector <4 x i128> %a, <4 x i128> poison, <4 x i32> <i32 0, i32 0, i32 0, i32 0>

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-changes-length.ll

Lines changed: 73 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -237,10 +237,15 @@ define <8 x i32> @v8i32_v4i32(<4 x i32>) {
237237
; CHECK: # %bb.0:
238238
; CHECK-NEXT: lui a0, %hi(.LCPI5_0)
239239
; CHECK-NEXT: addi a0, a0, %lo(.LCPI5_0)
240-
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
241-
; CHECK-NEXT: vle16.v v12, (a0)
242-
; CHECK-NEXT: vrgatherei16.vv v10, v8, v12
243-
; CHECK-NEXT: vmv.v.v v8, v10
240+
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
241+
; CHECK-NEXT: vle16.v v9, (a0)
242+
; CHECK-NEXT: csrr a0, vlenb
243+
; CHECK-NEXT: srli a0, a0, 2
244+
; CHECK-NEXT: vslidedown.vx v10, v9, a0
245+
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
246+
; CHECK-NEXT: vrgatherei16.vv v11, v8, v10
247+
; CHECK-NEXT: vrgatherei16.vv v10, v8, v9
248+
; CHECK-NEXT: vmv2r.v v8, v10
244249
; CHECK-NEXT: ret
245250
%2 = shufflevector <4 x i32> %0, <4 x i32> poison, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 1, i32 2, i32 0, i32 3>
246251
ret <8 x i32> %2
@@ -249,30 +254,40 @@ define <8 x i32> @v8i32_v4i32(<4 x i32>) {
249254
define <16 x i32> @v16i32_v4i32(<4 x i32>) {
250255
; CHECK-LABEL: v16i32_v4i32:
251256
; CHECK: # %bb.0:
252-
; CHECK-NEXT: lui a0, 2
253257
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
254-
; CHECK-NEXT: vmv.v.i v9, 3
258+
; CHECK-NEXT: vmv1r.v v12, v8
259+
; CHECK-NEXT: lui a0, 2
260+
; CHECK-NEXT: vmv.v.i v10, 3
255261
; CHECK-NEXT: addi a1, a0, 265
256262
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
257263
; CHECK-NEXT: vmv.s.x v0, a1
258264
; CHECK-NEXT: lui a1, 4
259265
; CHECK-NEXT: addi a1, a1, 548
260-
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
261-
; CHECK-NEXT: vmerge.vim v9, v9, 2, v0
262-
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
263-
; CHECK-NEXT: vmv.s.x v0, a1
266+
; CHECK-NEXT: vmv.s.x v8, a1
267+
; CHECK-NEXT: csrr a1, vlenb
264268
; CHECK-NEXT: addi a0, a0, -1856
269+
; CHECK-NEXT: srli a1, a1, 2
270+
; CHECK-NEXT: vmv.s.x v9, a0
265271
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
266-
; CHECK-NEXT: vmerge.vim v9, v9, 0, v0
267-
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
268-
; CHECK-NEXT: vmv.s.x v0, a0
269-
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
270-
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
272+
; CHECK-NEXT: vmerge.vim v10, v10, 2, v0
273+
; CHECK-NEXT: vmv1r.v v0, v8
274+
; CHECK-NEXT: vmerge.vim v8, v10, 0, v0
275+
; CHECK-NEXT: vmv1r.v v0, v9
276+
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
271277
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
272-
; CHECK-NEXT: vsext.vf2 v16, v9
273-
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
274-
; CHECK-NEXT: vrgatherei16.vv v12, v8, v16
275-
; CHECK-NEXT: vmv.v.v v8, v12
278+
; CHECK-NEXT: vsext.vf2 v10, v8
279+
; CHECK-NEXT: vslidedown.vx v14, v10, a1
280+
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
281+
; CHECK-NEXT: vrgatherei16.vv v9, v12, v14
282+
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
283+
; CHECK-NEXT: vslidedown.vx v14, v14, a1
284+
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
285+
; CHECK-NEXT: vrgatherei16.vv v8, v12, v10
286+
; CHECK-NEXT: vrgatherei16.vv v10, v12, v14
287+
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
288+
; CHECK-NEXT: vslidedown.vx v14, v14, a1
289+
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
290+
; CHECK-NEXT: vrgatherei16.vv v11, v12, v14
276291
; CHECK-NEXT: ret
277292
%2 = shufflevector <4 x i32> %0, <4 x i32> poison, <16 x i32> <i32 2, i32 3, i32 0, i32 2, i32 3, i32 0, i32 1, i32 1, i32 2, i32 0, i32 3, i32 1, i32 1, i32 2, i32 0, i32 3>
278293
ret <16 x i32> %2
@@ -290,22 +305,48 @@ define <32 x i32> @v32i32_v4i32(<4 x i32>) {
290305
; CHECK-NEXT: addi a1, a1, 548
291306
; CHECK-NEXT: vmv.s.x v9, a1
292307
; CHECK-NEXT: lui a1, 100550
308+
; CHECK-NEXT: addi a1, a1, 64
309+
; CHECK-NEXT: vmv.s.x v10, a1
310+
; CHECK-NEXT: csrr a1, vlenb
293311
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
294-
; CHECK-NEXT: vmv.v.i v10, 3
295-
; CHECK-NEXT: addi a0, a1, 64
296-
; CHECK-NEXT: vmerge.vim v18, v10, 2, v0
297-
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
298-
; CHECK-NEXT: vmv.s.x v16, a0
312+
; CHECK-NEXT: vmv.v.i v12, 3
313+
; CHECK-NEXT: srli a1, a1, 2
314+
; CHECK-NEXT: vmerge.vim v12, v12, 2, v0
299315
; CHECK-NEXT: vmv1r.v v0, v9
300-
; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma
301-
; CHECK-NEXT: vmerge.vim v18, v18, 0, v0
302-
; CHECK-NEXT: vmv1r.v v0, v16
303-
; CHECK-NEXT: vmerge.vim v16, v18, 1, v0
316+
; CHECK-NEXT: vmerge.vim v12, v12, 0, v0
317+
; CHECK-NEXT: vmv1r.v v0, v10
318+
; CHECK-NEXT: vmerge.vim v10, v12, 1, v0
304319
; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
305-
; CHECK-NEXT: vsext.vf2 v24, v16
306-
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
307-
; CHECK-NEXT: vrgatherei16.vv v16, v8, v24
308-
; CHECK-NEXT: vmv.v.v v8, v16
320+
; CHECK-NEXT: vsext.vf2 v12, v10
321+
; CHECK-NEXT: vslidedown.vx v20, v12, a1
322+
; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma
323+
; CHECK-NEXT: vrgatherei16.vv v17, v8, v20
324+
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
325+
; CHECK-NEXT: vslidedown.vx v20, v20, a1
326+
; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma
327+
; CHECK-NEXT: vrgatherei16.vv v16, v8, v12
328+
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
329+
; CHECK-NEXT: vslidedown.vx v12, v20, a1
330+
; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma
331+
; CHECK-NEXT: vrgatherei16.vv v18, v8, v20
332+
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
333+
; CHECK-NEXT: vslidedown.vx v24, v12, a1
334+
; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma
335+
; CHECK-NEXT: vrgatherei16.vv v19, v8, v12
336+
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
337+
; CHECK-NEXT: vslidedown.vx v12, v24, a1
338+
; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma
339+
; CHECK-NEXT: vrgatherei16.vv v20, v8, v24
340+
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
341+
; CHECK-NEXT: vslidedown.vx v24, v12, a1
342+
; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma
343+
; CHECK-NEXT: vrgatherei16.vv v21, v8, v12
344+
; CHECK-NEXT: vrgatherei16.vv v22, v8, v24
345+
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
346+
; CHECK-NEXT: vslidedown.vx v12, v24, a1
347+
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
348+
; CHECK-NEXT: vrgatherei16.vv v23, v8, v12
349+
; CHECK-NEXT: vmv8r.v v8, v16
309350
; CHECK-NEXT: ret
310351
%2 = shufflevector <4 x i32> %0, <4 x i32> poison, <32 x i32> <i32 2, i32 3, i32 0, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 2, i32 3, i32 0, i32 1, i32 1, i32 2, i32 0, i32 3, i32 1, i32 1, i32 2, i32 0, i32 3, i32 1, i32 2, i32 0, i32 3, i32 1, i32 1, i32 2, i32 0, i32 3>
311352
ret <32 x i32> %2

0 commit comments

Comments
 (0)