@@ -3498,14 +3498,6 @@ getVSlideup(SelectionDAG &DAG, const RISCVSubtarget &Subtarget, const SDLoc &DL,
3498
3498
return DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, VT, Ops);
3499
3499
}
3500
3500
3501
- static MVT getLMUL1VT(MVT VT) {
3502
- assert(VT.getVectorElementType().getSizeInBits() <= RISCV::RVVBitsPerBlock &&
3503
- "Unexpected vector MVT");
3504
- return MVT::getScalableVectorVT(
3505
- VT.getVectorElementType(),
3506
- RISCV::RVVBitsPerBlock / VT.getVectorElementType().getSizeInBits());
3507
- }
3508
-
3509
3501
struct VIDSequence {
3510
3502
int64_t StepNumerator;
3511
3503
unsigned StepDenominator;
@@ -4316,7 +4308,7 @@ static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
4316
4308
EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
4317
4309
MVT OneRegVT = MVT::getVectorVT(ElemVT, ElemsPerVReg);
4318
4310
MVT M1VT = getContainerForFixedLengthVector(DAG, OneRegVT, Subtarget);
4319
- assert(M1VT == getLMUL1VT (M1VT));
4311
+ assert(M1VT == RISCVTargetLowering::getM1VT (M1VT));
4320
4312
4321
4313
// The following semantically builds up a fixed length concat_vector
4322
4314
// of the component build_vectors. We eagerly lower to scalable and
@@ -4356,7 +4348,7 @@ static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
4356
4348
count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); });
4357
4349
unsigned NumDefElts = NumElts - NumUndefElts;
4358
4350
if (NumDefElts >= 8 && NumDefElts > NumElts / 2 &&
4359
- ContainerVT.bitsLE(getLMUL1VT (ContainerVT))) {
4351
+ ContainerVT.bitsLE(RISCVTargetLowering::getM1VT (ContainerVT))) {
4360
4352
SmallVector<SDValue> SubVecAOps, SubVecBOps;
4361
4353
SmallVector<SDValue> MaskVals;
4362
4354
SDValue UndefElem = DAG.getUNDEF(Op->getOperand(0)->getValueType(0));
@@ -5114,7 +5106,8 @@ static SDValue lowerVZIP(unsigned Opc, SDValue Op0, SDValue Op1,
5114
5106
5115
5107
MVT InnerVT = ContainerVT;
5116
5108
auto [Mask, VL] = getDefaultVLOps(IntVT, InnerVT, DL, DAG, Subtarget);
5117
- if (Op1.isUndef() && ContainerVT.bitsGT(getLMUL1VT(ContainerVT)) &&
5109
+ if (Op1.isUndef() &&
5110
+ ContainerVT.bitsGT(RISCVTargetLowering::getM1VT(ContainerVT)) &&
5118
5111
(RISCVISD::RI_VUNZIP2A_VL == Opc || RISCVISD::RI_VUNZIP2B_VL == Opc)) {
5119
5112
InnerVT = ContainerVT.getHalfNumVectorElementsVT();
5120
5113
VL = DAG.getConstant(VT.getVectorNumElements() / 2, DL,
@@ -5382,7 +5375,7 @@ static SDValue lowerShuffleViaVRegSplitting(ShuffleVectorSDNode *SVN,
5382
5375
EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
5383
5376
MVT OneRegVT = MVT::getVectorVT(ElemVT, ElemsPerVReg);
5384
5377
MVT M1VT = getContainerForFixedLengthVector(DAG, OneRegVT, Subtarget);
5385
- assert(M1VT == getLMUL1VT (M1VT));
5378
+ assert(M1VT == RISCVTargetLowering::getM1VT (M1VT));
5386
5379
unsigned NumOpElts = M1VT.getVectorMinNumElements();
5387
5380
unsigned NumElts = ContainerVT.getVectorMinNumElements();
5388
5381
unsigned NumOfSrcRegs = NumElts / NumOpElts;
@@ -6152,7 +6145,7 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
6152
6145
return convertFromScalableVector(VT, Gather, DAG, Subtarget);
6153
6146
}
6154
6147
6155
- const MVT M1VT = getLMUL1VT (ContainerVT);
6148
+ const MVT M1VT = RISCVTargetLowering::getM1VT (ContainerVT);
6156
6149
EVT SubIndexVT = M1VT.changeVectorElementType(IndexVT.getScalarType());
6157
6150
auto [InnerTrueMask, InnerVL] =
6158
6151
getDefaultScalableVLOps(M1VT, DL, DAG, Subtarget);
@@ -7801,7 +7794,7 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
7801
7794
// This reduces the length of the chain of vslideups and allows us to
7802
7795
// perform the vslideups at a smaller LMUL, limited to MF2.
7803
7796
if (Op.getNumOperands() > 2 &&
7804
- ContainerVT.bitsGE(getLMUL1VT (ContainerVT))) {
7797
+ ContainerVT.bitsGE(RISCVTargetLowering::getM1VT (ContainerVT))) {
7805
7798
MVT HalfVT = VT.getHalfNumVectorElementsVT();
7806
7799
assert(isPowerOf2_32(Op.getNumOperands()));
7807
7800
size_t HalfNumOps = Op.getNumOperands() / 2;
@@ -9821,11 +9814,12 @@ getSmallestVTForIndex(MVT VecVT, unsigned MaxIdx, SDLoc DL, SelectionDAG &DAG,
9821
9814
const unsigned MinVLMAX = VectorBitsMin / EltSize;
9822
9815
MVT SmallerVT;
9823
9816
if (MaxIdx < MinVLMAX)
9824
- SmallerVT = getLMUL1VT (VecVT);
9817
+ SmallerVT = RISCVTargetLowering::getM1VT (VecVT);
9825
9818
else if (MaxIdx < MinVLMAX * 2)
9826
- SmallerVT = getLMUL1VT(VecVT).getDoubleNumVectorElementsVT();
9819
+ SmallerVT =
9820
+ RISCVTargetLowering::getM1VT(VecVT).getDoubleNumVectorElementsVT();
9827
9821
else if (MaxIdx < MinVLMAX * 4)
9828
- SmallerVT = getLMUL1VT (VecVT)
9822
+ SmallerVT = RISCVTargetLowering::getM1VT (VecVT)
9829
9823
.getDoubleNumVectorElementsVT()
9830
9824
.getDoubleNumVectorElementsVT();
9831
9825
if (!SmallerVT.isValid() || !VecVT.bitsGT(SmallerVT))
@@ -9898,9 +9892,8 @@ SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
9898
9892
// If we're compiling for an exact VLEN value, we can always perform
9899
9893
// the insert in m1 as we can determine the register corresponding to
9900
9894
// the index in the register group.
9901
- const MVT M1VT = getLMUL1VT(ContainerVT);
9902
- if (auto VLEN = Subtarget.getRealVLen();
9903
- VLEN && ContainerVT.bitsGT(M1VT)) {
9895
+ const MVT M1VT = RISCVTargetLowering::getM1VT(ContainerVT);
9896
+ if (auto VLEN = Subtarget.getRealVLen(); VLEN && ContainerVT.bitsGT(M1VT)) {
9904
9897
EVT ElemVT = VecVT.getVectorElementType();
9905
9898
unsigned ElemsPerVReg = *VLEN / ElemVT.getFixedSizeInBits();
9906
9899
unsigned RemIdx = OrigIdx % ElemsPerVReg;
@@ -10127,7 +10120,7 @@ SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
10127
10120
const auto VLen = Subtarget.getRealVLen();
10128
10121
if (auto *IdxC = dyn_cast<ConstantSDNode>(Idx);
10129
10122
IdxC && VLen && VecVT.getSizeInBits().getKnownMinValue() > *VLen) {
10130
- MVT M1VT = getLMUL1VT (ContainerVT);
10123
+ MVT M1VT = RISCVTargetLowering::getM1VT (ContainerVT);
10131
10124
unsigned OrigIdx = IdxC->getZExtValue();
10132
10125
EVT ElemVT = VecVT.getVectorElementType();
10133
10126
unsigned ElemsPerVReg = *VLen / ElemVT.getFixedSizeInBits();
@@ -10175,7 +10168,8 @@ SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
10175
10168
// TODO: We don't have the same code for insert_vector_elt because we
10176
10169
// have BUILD_VECTOR and handle the degenerate case there. Should we
10177
10170
// consider adding an inverse BUILD_VECTOR node?
10178
- MVT LMUL2VT = getLMUL1VT(ContainerVT).getDoubleNumVectorElementsVT();
10171
+ MVT LMUL2VT =
10172
+ RISCVTargetLowering::getM1VT(ContainerVT).getDoubleNumVectorElementsVT();
10179
10173
if (ContainerVT.bitsGT(LMUL2VT) && VecVT.isFixedLengthVector())
10180
10174
return SDValue();
10181
10175
@@ -11107,7 +11101,7 @@ static SDValue lowerReductionSeq(unsigned RVVOpcode, MVT ResVT,
11107
11101
SDValue VL, const SDLoc &DL, SelectionDAG &DAG,
11108
11102
const RISCVSubtarget &Subtarget) {
11109
11103
const MVT VecVT = Vec.getSimpleValueType();
11110
- const MVT M1VT = getLMUL1VT (VecVT);
11104
+ const MVT M1VT = RISCVTargetLowering::getM1VT (VecVT);
11111
11105
const MVT XLenVT = Subtarget.getXLenVT();
11112
11106
const bool NonZeroAVL = isNonZeroAVL(VL);
11113
11107
@@ -11485,8 +11479,8 @@ SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
11485
11479
assert(VLen);
11486
11480
AlignedIdx /= *VLen / RISCV::RVVBitsPerBlock;
11487
11481
}
11488
- if (ContainerVecVT.bitsGT(getLMUL1VT (ContainerVecVT))) {
11489
- InterSubVT = getLMUL1VT (ContainerVecVT);
11482
+ if (ContainerVecVT.bitsGT(RISCVTargetLowering::getM1VT (ContainerVecVT))) {
11483
+ InterSubVT = RISCVTargetLowering::getM1VT (ContainerVecVT);
11490
11484
// Extract a subvector equal to the nearest full vector register type. This
11491
11485
// should resolve to a EXTRACT_SUBREG instruction.
11492
11486
AlignedExtract = DAG.getExtractSubvector(DL, InterSubVT, Vec, AlignedIdx);
@@ -11677,7 +11671,7 @@ SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
11677
11671
// If the vector type is an LMUL-group type, extract a subvector equal to the
11678
11672
// nearest full vector register type.
11679
11673
MVT InterSubVT = VecVT;
11680
- if (VecVT.bitsGT(getLMUL1VT (VecVT))) {
11674
+ if (VecVT.bitsGT(RISCVTargetLowering::getM1VT (VecVT))) {
11681
11675
// If VecVT has an LMUL > 1, then SubVecVT should have a smaller LMUL, and
11682
11676
// we should have successfully decomposed the extract into a subregister.
11683
11677
// We use an extract_subvector that will resolve to a subreg extract.
@@ -11688,7 +11682,7 @@ SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
11688
11682
assert(VLen);
11689
11683
Idx /= *VLen / RISCV::RVVBitsPerBlock;
11690
11684
}
11691
- InterSubVT = getLMUL1VT (VecVT);
11685
+ InterSubVT = RISCVTargetLowering::getM1VT (VecVT);
11692
11686
Vec = DAG.getExtractSubvector(DL, InterSubVT, Vec, Idx);
11693
11687
}
11694
11688
@@ -11805,7 +11799,7 @@ SDValue RISCVTargetLowering::lowerVECTOR_DEINTERLEAVE(SDValue Op,
11805
11799
// For fractional LMUL, check if we can use a higher LMUL
11806
11800
// instruction to avoid a vslidedown.
11807
11801
if (SDValue Src = foldConcatVector(V1, V2);
11808
- Src && getLMUL1VT (VT).bitsGT(VT)) {
11802
+ Src && RISCVTargetLowering::getM1VT (VT).bitsGT(VT)) {
11809
11803
EVT NewVT = VT.getDoubleNumVectorElementsVT();
11810
11804
Src = DAG.getExtractSubvector(DL, NewVT, Src, 0);
11811
11805
// Freeze the source so we can increase its use count.
@@ -12187,7 +12181,7 @@ SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
12187
12181
// vrgather.vv v14, v9, v16
12188
12182
// vrgather.vv v13, v10, v16
12189
12183
// vrgather.vv v12, v11, v16
12190
- if (ContainerVT.bitsGT(getLMUL1VT (ContainerVT)) &&
12184
+ if (ContainerVT.bitsGT(RISCVTargetLowering::getM1VT (ContainerVT)) &&
12191
12185
ContainerVT.getVectorElementCount().isKnownMultipleOf(2)) {
12192
12186
auto [Lo, Hi] = DAG.SplitVector(Vec, DL);
12193
12187
Lo = DAG.getNode(ISD::VECTOR_REVERSE, DL, Lo.getSimpleValueType(), Lo);
@@ -12252,7 +12246,7 @@ SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
12252
12246
// At LMUL > 1, do the index computation in 16 bits to reduce register
12253
12247
// pressure.
12254
12248
if (IntVT.getScalarType().bitsGT(MVT::i16) &&
12255
- IntVT.bitsGT(getLMUL1VT (IntVT))) {
12249
+ IntVT.bitsGT(RISCVTargetLowering::getM1VT (IntVT))) {
12256
12250
assert(isUInt<16>(MaxVLMAX - 1)); // Largest VLMAX is 65536 @ zvl65536b
12257
12251
GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
12258
12252
IntVT = IntVT.changeVectorElementType(MVT::i16);
@@ -12339,7 +12333,7 @@ RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op,
12339
12333
const auto [MinVLMAX, MaxVLMAX] =
12340
12334
RISCVTargetLowering::computeVLMAXBounds(ContainerVT, Subtarget);
12341
12335
if (MinVLMAX == MaxVLMAX && MinVLMAX == VT.getVectorNumElements() &&
12342
- getLMUL1VT (ContainerVT).bitsLE(ContainerVT)) {
12336
+ RISCVTargetLowering::getM1VT (ContainerVT).bitsLE(ContainerVT)) {
12343
12337
MachineMemOperand *MMO = Load->getMemOperand();
12344
12338
SDValue NewLoad =
12345
12339
DAG.getLoad(ContainerVT, DL, Load->getChain(), Load->getBasePtr(),
@@ -12400,7 +12394,7 @@ RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op,
12400
12394
const auto [MinVLMAX, MaxVLMAX] =
12401
12395
RISCVTargetLowering::computeVLMAXBounds(ContainerVT, Subtarget);
12402
12396
if (MinVLMAX == MaxVLMAX && MinVLMAX == VT.getVectorNumElements() &&
12403
- getLMUL1VT (ContainerVT).bitsLE(ContainerVT)) {
12397
+ RISCVTargetLowering::getM1VT (ContainerVT).bitsLE(ContainerVT)) {
12404
12398
MachineMemOperand *MMO = Store->getMemOperand();
12405
12399
return DAG.getStore(Store->getChain(), DL, NewValue, Store->getBasePtr(),
12406
12400
MMO->getPointerInfo(), MMO->getBaseAlign(),
@@ -20368,7 +20362,7 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
20368
20362
return Scalar.getOperand(0);
20369
20363
20370
20364
// Use M1 or smaller to avoid over constraining register allocation
20371
- const MVT M1VT = getLMUL1VT (VT);
20365
+ const MVT M1VT = RISCVTargetLowering::getM1VT (VT);
20372
20366
if (M1VT.bitsLT(VT)) {
20373
20367
SDValue M1Passthru = DAG.getExtractSubvector(DL, M1VT, Passthru, 0);
20374
20368
SDValue Result =
@@ -20382,15 +20376,15 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
20382
20376
// no purpose.
20383
20377
if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Scalar);
20384
20378
Const && !Const->isZero() && isInt<5>(Const->getSExtValue()) &&
20385
- VT.bitsLE(getLMUL1VT (VT)) && Passthru.isUndef())
20379
+ VT.bitsLE(RISCVTargetLowering::getM1VT (VT)) && Passthru.isUndef())
20386
20380
return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Scalar, VL);
20387
20381
20388
20382
break;
20389
20383
}
20390
20384
case RISCVISD::VMV_X_S: {
20391
20385
SDValue Vec = N->getOperand(0);
20392
20386
MVT VecVT = N->getOperand(0).getSimpleValueType();
20393
- const MVT M1VT = getLMUL1VT (VecVT);
20387
+ const MVT M1VT = RISCVTargetLowering::getM1VT (VecVT);
20394
20388
if (M1VT.bitsLT(VecVT)) {
20395
20389
Vec = DAG.getExtractSubvector(DL, M1VT, Vec, 0);
20396
20390
return DAG.getNode(RISCVISD::VMV_X_S, DL, N->getSimpleValueType(0), Vec);
0 commit comments