Skip to content

Commit 3dd61c1

Browse files
authored
[LV] Fix MVE regression from #132190 (#141736)
Register pressure was only considered if the vector bandwidth was being maximised (chosen either by the target or user options), but #132190 inadvertently caused high pressure VFs to be pruned even when max bandwidth wasn't enabled. This PR returns to the previous behaviour.
1 parent f12dd8f commit 3dd61c1

File tree

6 files changed

+338
-182
lines changed

6 files changed

+338
-182
lines changed

llvm/lib/Transforms/Vectorize/LoopVectorize.cpp

Lines changed: 41 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -953,6 +953,14 @@ class LoopVectorizationCostModel {
953953
return expectedCost(UserVF).isValid();
954954
}
955955

956+
/// \return True if maximizing vector bandwidth is enabled by the target or
957+
/// user options, for the given register kind.
958+
bool useMaxBandwidth(TargetTransformInfo::RegisterKind RegKind);
959+
960+
/// \return True if maximizing vector bandwidth is enabled by the target or
961+
/// user options, for the given vector factor.
962+
bool useMaxBandwidth(ElementCount VF);
963+
956964
/// \return The size (in bits) of the smallest and widest types in the code
957965
/// that needs to be vectorized. We ignore values that remain scalar such as
958966
/// 64 bit loop indices.
@@ -3921,6 +3929,20 @@ LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) {
39213929
return FixedScalableVFPair::getNone();
39223930
}
39233931

3932+
bool LoopVectorizationCostModel::useMaxBandwidth(ElementCount VF) {
3933+
return useMaxBandwidth(VF.isScalable()
3934+
? TargetTransformInfo::RGK_ScalableVector
3935+
: TargetTransformInfo::RGK_FixedWidthVector);
3936+
}
3937+
3938+
bool LoopVectorizationCostModel::useMaxBandwidth(
3939+
TargetTransformInfo::RegisterKind RegKind) {
3940+
return MaximizeBandwidth || (MaximizeBandwidth.getNumOccurrences() == 0 &&
3941+
(TTI.shouldMaximizeVectorBandwidth(RegKind) ||
3942+
(UseWiderVFIfCallVariantsPresent &&
3943+
Legal->hasVectorCallVariants())));
3944+
}
3945+
39243946
ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget(
39253947
unsigned MaxTripCount, unsigned SmallestType, unsigned WidestType,
39263948
ElementCount MaxSafeVF, bool FoldTailByMasking) {
@@ -3986,10 +4008,7 @@ ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget(
39864008
ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector
39874009
: TargetTransformInfo::RGK_FixedWidthVector;
39884010
ElementCount MaxVF = MaxVectorElementCount;
3989-
if (MaximizeBandwidth ||
3990-
(MaximizeBandwidth.getNumOccurrences() == 0 &&
3991-
(TTI.shouldMaximizeVectorBandwidth(RegKind) ||
3992-
(UseWiderVFIfCallVariantsPresent && Legal->hasVectorCallVariants())))) {
4011+
if (useMaxBandwidth(RegKind)) {
39934012
auto MaxVectorElementCountMaxBW = ElementCount::get(
39944013
llvm::bit_floor(WidestRegister.getKnownMinValue() / SmallestType),
39954014
ComputeScalableMaxVF);
@@ -4344,15 +4363,21 @@ VectorizationFactor LoopVectorizationPlanner::selectVectorizationFactor() {
43444363
for (auto &P : VPlans) {
43454364
ArrayRef<ElementCount> VFs(P->vectorFactors().begin(),
43464365
P->vectorFactors().end());
4347-
auto RUs = calculateRegisterUsageForPlan(*P, VFs, TTI, CM.ValuesToIgnore);
4348-
for (auto [VF, RU] : zip_equal(VFs, RUs)) {
4366+
4367+
SmallVector<VPRegisterUsage, 8> RUs;
4368+
if (CM.useMaxBandwidth(TargetTransformInfo::RGK_ScalableVector) ||
4369+
CM.useMaxBandwidth(TargetTransformInfo::RGK_FixedWidthVector))
4370+
RUs = calculateRegisterUsageForPlan(*P, VFs, TTI, CM.ValuesToIgnore);
4371+
4372+
for (unsigned I = 0; I < VFs.size(); I++) {
4373+
ElementCount VF = VFs[I];
43494374
// The cost for scalar VF=1 is already calculated, so ignore it.
43504375
if (VF.isScalar())
43514376
continue;
43524377

43534378
/// Don't consider the VF if it exceeds the number of registers for the
43544379
/// target.
4355-
if (RU.exceedsMaxNumRegs(TTI))
4380+
if (CM.useMaxBandwidth(VF) && RUs[I].exceedsMaxNumRegs(TTI))
43564381
continue;
43574382

43584383
InstructionCost C = CM.expectedCost(VF);
@@ -7106,8 +7131,14 @@ VectorizationFactor LoopVectorizationPlanner::computeBestVF() {
71067131
for (auto &P : VPlans) {
71077132
ArrayRef<ElementCount> VFs(P->vectorFactors().begin(),
71087133
P->vectorFactors().end());
7109-
auto RUs = calculateRegisterUsageForPlan(*P, VFs, TTI, CM.ValuesToIgnore);
7110-
for (auto [VF, RU] : zip_equal(VFs, RUs)) {
7134+
7135+
SmallVector<VPRegisterUsage, 8> RUs;
7136+
if (CM.useMaxBandwidth(TargetTransformInfo::RGK_ScalableVector) ||
7137+
CM.useMaxBandwidth(TargetTransformInfo::RGK_FixedWidthVector))
7138+
RUs = calculateRegisterUsageForPlan(*P, VFs, TTI, CM.ValuesToIgnore);
7139+
7140+
for (unsigned I = 0; I < VFs.size(); I++) {
7141+
ElementCount VF = VFs[I];
71117142
if (VF.isScalar())
71127143
continue;
71137144
if (!ForceVectorization && !willGenerateVectors(*P, VF, TTI)) {
@@ -7129,7 +7160,7 @@ VectorizationFactor LoopVectorizationPlanner::computeBestVF() {
71297160
InstructionCost Cost = cost(*P, VF);
71307161
VectorizationFactor CurrentFactor(VF, Cost, ScalarCost);
71317162

7132-
if (RU.exceedsMaxNumRegs(TTI)) {
7163+
if (CM.useMaxBandwidth(VF) && RUs[I].exceedsMaxNumRegs(TTI)) {
71337164
LLVM_DEBUG(dbgs() << "LV(REG): Not considering vector loop of width "
71347165
<< VF << " because it uses too many registers\n");
71357166
continue;
Lines changed: 136 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,136 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --filter-out-after "^scalar.ph:" --version 5
2+
; RUN: opt -mattr=+mve -passes=loop-vectorize < %s -S -o - | FileCheck %s
3+
4+
target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
5+
target triple = "thumbv8.1m.main-unknown-none-eabihf"
6+
7+
; Even though it has high register pressure, this example should still vectorise since the mul+add chains become VMLAs.
8+
9+
define void @fn(i32 noundef %n, ptr %in, ptr %out) #0 {
10+
; CHECK-LABEL: define void @fn(
11+
; CHECK-SAME: i32 noundef [[N:%.*]], ptr [[IN:%.*]], ptr [[OUT:%.*]]) #[[ATTR0:[0-9]+]] {
12+
; CHECK-NEXT: [[ENTRY:.*:]]
13+
; CHECK-NEXT: [[CMP46_NOT:%.*]] = icmp eq i32 [[N]], 0
14+
; CHECK-NEXT: br i1 [[CMP46_NOT]], [[EXIT:label %.*]], label %[[FOR_BODY_PREHEADER:.*]]
15+
; CHECK: [[FOR_BODY_PREHEADER]]:
16+
; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
17+
; CHECK: [[VECTOR_MEMCHECK]]:
18+
; CHECK-NEXT: [[TMP0:%.*]] = mul i32 [[N]], 3
19+
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[OUT]], i32 [[TMP0]]
20+
; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[IN]], i32 [[TMP0]]
21+
; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[OUT]], [[SCEVGEP1]]
22+
; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[IN]], [[SCEVGEP]]
23+
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
24+
; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
25+
; CHECK: [[VECTOR_PH]]:
26+
; CHECK-NEXT: [[N_RND_UP:%.*]] = add i32 [[N]], 3
27+
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N_RND_UP]], 4
28+
; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[N_RND_UP]], [[N_MOD_VF]]
29+
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
30+
; CHECK: [[VECTOR_BODY]]:
31+
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
32+
; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[IN]], %[[VECTOR_PH]] ], [ [[PTR_IND:%.*]], %[[VECTOR_BODY]] ]
33+
; CHECK-NEXT: [[POINTER_PHI2:%.*]] = phi ptr [ [[OUT]], %[[VECTOR_PH]] ], [ [[PTR_IND3:%.*]], %[[VECTOR_BODY]] ]
34+
; CHECK-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <4 x i32> <i32 0, i32 3, i32 6, i32 9>
35+
; CHECK-NEXT: [[VECTOR_GEP4:%.*]] = getelementptr i8, ptr [[POINTER_PHI2]], <4 x i32> <i32 0, i32 3, i32 6, i32 9>
36+
; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[INDEX]], i32 [[N]])
37+
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, <4 x ptr> [[VECTOR_GEP]], i32 1
38+
; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <4 x i8> @llvm.masked.gather.v4i8.v4p0(<4 x ptr> [[VECTOR_GEP]], i32 1, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i8> poison), !alias.scope [[META0:![0-9]+]]
39+
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw i8, <4 x ptr> [[VECTOR_GEP]], i32 2
40+
; CHECK-NEXT: [[WIDE_MASKED_GATHER5:%.*]] = call <4 x i8> @llvm.masked.gather.v4i8.v4p0(<4 x ptr> [[TMP1]], i32 1, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i8> poison), !alias.scope [[META0]]
41+
; CHECK-NEXT: [[WIDE_MASKED_GATHER6:%.*]] = call <4 x i8> @llvm.masked.gather.v4i8.v4p0(<4 x ptr> [[TMP2]], i32 1, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i8> poison), !alias.scope [[META0]]
42+
; CHECK-NEXT: [[TMP3:%.*]] = zext <4 x i8> [[WIDE_MASKED_GATHER]] to <4 x i32>
43+
; CHECK-NEXT: [[TMP4:%.*]] = mul nuw nsw <4 x i32> [[TMP3]], splat (i32 19595)
44+
; CHECK-NEXT: [[TMP5:%.*]] = zext <4 x i8> [[WIDE_MASKED_GATHER5]] to <4 x i32>
45+
; CHECK-NEXT: [[TMP6:%.*]] = mul nuw nsw <4 x i32> [[TMP5]], splat (i32 38470)
46+
; CHECK-NEXT: [[TMP7:%.*]] = zext <4 x i8> [[WIDE_MASKED_GATHER6]] to <4 x i32>
47+
; CHECK-NEXT: [[TMP8:%.*]] = mul nuw nsw <4 x i32> [[TMP7]], splat (i32 7471)
48+
; CHECK-NEXT: [[TMP9:%.*]] = add nuw nsw <4 x i32> [[TMP4]], splat (i32 32768)
49+
; CHECK-NEXT: [[TMP10:%.*]] = add nuw nsw <4 x i32> [[TMP9]], [[TMP6]]
50+
; CHECK-NEXT: [[TMP11:%.*]] = add nuw nsw <4 x i32> [[TMP10]], [[TMP8]]
51+
; CHECK-NEXT: [[TMP12:%.*]] = lshr <4 x i32> [[TMP11]], splat (i32 16)
52+
; CHECK-NEXT: [[TMP13:%.*]] = trunc <4 x i32> [[TMP12]] to <4 x i8>
53+
; CHECK-NEXT: [[TMP14:%.*]] = mul nuw nsw <4 x i32> [[TMP3]], splat (i32 32767)
54+
; CHECK-NEXT: [[TMP15:%.*]] = mul nuw <4 x i32> [[TMP5]], splat (i32 16762097)
55+
; CHECK-NEXT: [[TMP16:%.*]] = mul nuw <4 x i32> [[TMP7]], splat (i32 16759568)
56+
; CHECK-NEXT: [[TMP17:%.*]] = add nuw nsw <4 x i32> [[TMP14]], splat (i32 32768)
57+
; CHECK-NEXT: [[TMP18:%.*]] = add nuw <4 x i32> [[TMP17]], [[TMP15]]
58+
; CHECK-NEXT: [[TMP19:%.*]] = add <4 x i32> [[TMP18]], [[TMP16]]
59+
; CHECK-NEXT: [[TMP20:%.*]] = lshr <4 x i32> [[TMP19]], splat (i32 16)
60+
; CHECK-NEXT: [[TMP21:%.*]] = trunc <4 x i32> [[TMP20]] to <4 x i8>
61+
; CHECK-NEXT: [[TMP22:%.*]] = mul nuw nsw <4 x i32> [[TMP3]], splat (i32 13282)
62+
; CHECK-NEXT: [[TMP23:%.*]] = mul nuw <4 x i32> [[TMP5]], splat (i32 16744449)
63+
; CHECK-NEXT: [[TMP24:%.*]] = mul nuw nsw <4 x i32> [[TMP7]], splat (i32 19485)
64+
; CHECK-NEXT: [[TMP25:%.*]] = add nuw nsw <4 x i32> [[TMP22]], splat (i32 32768)
65+
; CHECK-NEXT: [[TMP26:%.*]] = add nuw <4 x i32> [[TMP25]], [[TMP23]]
66+
; CHECK-NEXT: [[TMP27:%.*]] = add nuw <4 x i32> [[TMP26]], [[TMP24]]
67+
; CHECK-NEXT: [[TMP28:%.*]] = lshr <4 x i32> [[TMP27]], splat (i32 16)
68+
; CHECK-NEXT: [[TMP29:%.*]] = trunc <4 x i32> [[TMP28]] to <4 x i8>
69+
; CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw i8, <4 x ptr> [[VECTOR_GEP4]], i32 1
70+
; CHECK-NEXT: call void @llvm.masked.scatter.v4i8.v4p0(<4 x i8> [[TMP13]], <4 x ptr> [[VECTOR_GEP4]], i32 1, <4 x i1> [[ACTIVE_LANE_MASK]]), !alias.scope [[META3:![0-9]+]], !noalias [[META0]]
71+
; CHECK-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw i8, <4 x ptr> [[VECTOR_GEP4]], i32 2
72+
; CHECK-NEXT: call void @llvm.masked.scatter.v4i8.v4p0(<4 x i8> [[TMP21]], <4 x ptr> [[TMP30]], i32 1, <4 x i1> [[ACTIVE_LANE_MASK]]), !alias.scope [[META3]], !noalias [[META0]]
73+
; CHECK-NEXT: call void @llvm.masked.scatter.v4i8.v4p0(<4 x i8> [[TMP29]], <4 x ptr> [[TMP31]], i32 1, <4 x i1> [[ACTIVE_LANE_MASK]]), !alias.scope [[META3]], !noalias [[META0]]
74+
; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4
75+
; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i32 12
76+
; CHECK-NEXT: [[PTR_IND3]] = getelementptr i8, ptr [[POINTER_PHI2]], i32 12
77+
; CHECK-NEXT: [[TMP32:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
78+
; CHECK-NEXT: br i1 [[TMP32]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
79+
; CHECK: [[MIDDLE_BLOCK]]:
80+
; CHECK-NEXT: br [[EXIT_LOOPEXIT:label %.*]]
81+
; CHECK: [[SCALAR_PH]]:
82+
;
83+
entry:
84+
%cmp46.not = icmp eq i32 %n, 0
85+
br i1 %cmp46.not, label %exit, label %for.body
86+
87+
for.body: ; preds = %for.body.preheader, %for.body
88+
%ptr.iv.1 = phi ptr [ %in, %entry ], [ %ptr.iv.1.next, %for.body ]
89+
%ptr.iv.2 = phi ptr [ %out, %entry ], [ %ptr.iv.2.next, %for.body ]
90+
%iv = phi i32 [ %iv.next, %for.body ], [ 0, %entry ]
91+
%incdec.ptr = getelementptr inbounds nuw i8, ptr %ptr.iv.1, i32 1
92+
%0 = load i8, ptr %ptr.iv.1, align 1
93+
%incdec.ptr1 = getelementptr inbounds nuw i8, ptr %ptr.iv.1, i32 2
94+
%1 = load i8, ptr %incdec.ptr, align 1
95+
%ptr.iv.1.next = getelementptr inbounds nuw i8, ptr %ptr.iv.1, i32 3
96+
%2 = load i8, ptr %incdec.ptr1, align 1
97+
%conv = zext i8 %0 to i32
98+
%mul = mul nuw nsw i32 %conv, 19595
99+
%conv3 = zext i8 %1 to i32
100+
%mul4 = mul nuw nsw i32 %conv3, 38470
101+
%conv5 = zext i8 %2 to i32
102+
%mul6 = mul nuw nsw i32 %conv5, 7471
103+
%add = add nuw nsw i32 %mul, 32768
104+
%add7 = add nuw nsw i32 %add, %mul4
105+
%add8 = add nuw nsw i32 %add7, %mul6
106+
%shr = lshr i32 %add8, 16
107+
%conv9 = trunc nuw i32 %shr to i8
108+
%mul11 = mul nuw nsw i32 %conv, 32767
109+
%mul13 = mul nuw i32 %conv3, 16762097
110+
%mul16 = mul nuw i32 %conv5, 16759568
111+
%add14 = add nuw nsw i32 %mul11, 32768
112+
%add17 = add nuw i32 %add14, %mul13
113+
%add18 = add i32 %add17, %mul16
114+
%shr19 = lshr i32 %add18, 16
115+
%conv20 = trunc i32 %shr19 to i8
116+
%mul22 = mul nuw nsw i32 %conv, 13282
117+
%mul24 = mul nuw i32 %conv3, 16744449
118+
%mul27 = mul nuw nsw i32 %conv5, 19485
119+
%add25 = add nuw nsw i32 %mul22, 32768
120+
%add28 = add nuw i32 %add25, %mul24
121+
%add29 = add nuw i32 %add28, %mul27
122+
%shr30 = lshr i32 %add29, 16
123+
%conv31 = trunc i32 %shr30 to i8
124+
%incdec.ptr32 = getelementptr inbounds nuw i8, ptr %ptr.iv.2, i32 1
125+
store i8 %conv9, ptr %ptr.iv.2, align 1
126+
%incdec.ptr33 = getelementptr inbounds nuw i8, ptr %ptr.iv.2, i32 2
127+
store i8 %conv20, ptr %incdec.ptr32, align 1
128+
%ptr.iv.2.next = getelementptr inbounds nuw i8, ptr %ptr.iv.2, i32 3
129+
store i8 %conv31, ptr %incdec.ptr33, align 1
130+
%iv.next = add nuw i32 %iv, 1
131+
%exitcond.not = icmp eq i32 %iv.next, %n
132+
br i1 %exitcond.not, label %exit, label %for.body
133+
134+
exit: ; preds = %for.cond.cleanup.loopexit, %entry
135+
ret void
136+
}

0 commit comments

Comments
 (0)