Skip to content

Commit 2e13ec5

Browse files
committed
[VPlan] Bail out on non-intrinsic calls in VPlanNativePath.
Update initial VPlan-construction in VPlanNativePath in line with the inner loop path, in that it bails out when encountering constructs it cannot handle, like non-intrinsic calls. Fixes #131071.
1 parent fecd937 commit 2e13ec5

File tree

7 files changed

+153
-52
lines changed

7 files changed

+153
-52
lines changed

llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -494,8 +494,10 @@ class LoopVectorizationPlanner {
494494
private:
495495
/// Build a VPlan according to the information gathered by Legal. \return a
496496
/// VPlan for vectorization factors \p Range.Start and up to \p Range.End
497-
/// exclusive, possibly decreasing \p Range.End.
498-
VPlanPtr buildVPlan(VFRange &Range);
497+
/// exclusive, possibly decreasing \p Range.End. If no VPlan can be built for
498+
/// the input range, set the largest included VF to the maximum VF for which
499+
/// no plan could be built.
500+
VPlanPtr tryToBuildVPlan(VFRange &Range);
499501

500502
/// Build a VPlan using VPRecipes according to the information gather by
501503
/// Legal. This method is only used for the legacy inner loop vectorizer.

llvm/lib/Transforms/Vectorize/LoopVectorize.cpp

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -7121,6 +7121,9 @@ LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) {
71217121
<< "VF " << VF << " to build VPlans.\n");
71227122
buildVPlans(VF, VF);
71237123

7124+
if (VPlans.empty())
7125+
return VectorizationFactor::Disabled();
7126+
71247127
// For VPlan build stress testing, we bail out after VPlan construction.
71257128
if (VPlanBuildStressTest)
71267129
return VectorizationFactor::Disabled();
@@ -9620,7 +9623,7 @@ LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(VFRange &Range) {
96209623
return Plan;
96219624
}
96229625

9623-
VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) {
9626+
VPlanPtr LoopVectorizationPlanner::tryToBuildVPlan(VFRange &Range) {
96249627
// Outer loop handling: They may require CFG and instruction level
96259628
// transformations before even evaluating whether vectorization is profitable.
96269629
// Since we cannot modify the incoming IR, we need to build VPlan upfront in
@@ -9640,10 +9643,13 @@ VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) {
96409643
for (ElementCount VF : Range)
96419644
Plan->addVF(VF);
96429645

9643-
VPlanTransforms::VPInstructionsToVPRecipes(
9644-
Plan,
9645-
[this](PHINode *P) { return Legal->getIntOrFpInductionDescriptor(P); },
9646-
*PSE.getSE(), *TLI);
9646+
if (!VPlanTransforms::tryToConvertVPInstructionsToVPRecipes(
9647+
Plan,
9648+
[this](PHINode *P) {
9649+
return Legal->getIntOrFpInductionDescriptor(P);
9650+
},
9651+
*PSE.getSE(), *TLI))
9652+
return nullptr;
96479653

96489654
// Tail folding is not supported for outer loops, so the induction increment
96499655
// is guaranteed to not wrap.

llvm/lib/Transforms/Vectorize/VPlan.cpp

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1532,12 +1532,13 @@ void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF,
15321532
auto MaxVFTimes2 = MaxVF * 2;
15331533
for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFTimes2);) {
15341534
VFRange SubRange = {VF, MaxVFTimes2};
1535-
auto Plan = buildVPlan(SubRange);
1536-
VPlanTransforms::optimize(*Plan);
1537-
// Update the name of the latch of the top-level vector loop region region
1538-
// after optimizations which includes block folding.
1539-
Plan->getVectorLoopRegion()->getExiting()->setName("vector.latch");
1540-
VPlans.push_back(std::move(Plan));
1535+
if (auto Plan = tryToBuildVPlan(SubRange)) {
1536+
VPlanTransforms::optimize(*Plan);
1537+
// Update the name of the latch of the top-level vector loop region region
1538+
// after optimizations which includes block folding.
1539+
Plan->getVectorLoopRegion()->getExiting()->setName("vector.latch");
1540+
VPlans.push_back(std::move(Plan));
1541+
}
15411542
VF = SubRange.End;
15421543
}
15431544
}

llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@
3232

3333
using namespace llvm;
3434

35-
void VPlanTransforms::VPInstructionsToVPRecipes(
35+
bool VPlanTransforms::tryToConvertVPInstructionsToVPRecipes(
3636
VPlanPtr &Plan,
3737
function_ref<const InductionDescriptor *(PHINode *)>
3838
GetIntOrFpInductionDescriptor,
@@ -83,6 +83,9 @@ void VPlanTransforms::VPInstructionsToVPRecipes(
8383
} else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Inst)) {
8484
NewRecipe = new VPWidenGEPRecipe(GEP, Ingredient.operands());
8585
} else if (CallInst *CI = dyn_cast<CallInst>(Inst)) {
86+
Intrinsic::ID VectorID = getVectorIntrinsicIDForCall(CI, &TLI);
87+
if (VectorID == Intrinsic::not_intrinsic)
88+
return false;
8689
NewRecipe = new VPWidenIntrinsicRecipe(
8790
*CI, getVectorIntrinsicIDForCall(CI, &TLI),
8891
{Ingredient.op_begin(), Ingredient.op_end() - 1}, CI->getType(),
@@ -106,6 +109,7 @@ void VPlanTransforms::VPInstructionsToVPRecipes(
106109
Ingredient.eraseFromParent();
107110
}
108111
}
112+
return true;
109113
}
110114

111115
static bool sinkScalarOperands(VPlan &Plan) {

llvm/lib/Transforms/Vectorize/VPlanTransforms.h

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -68,12 +68,13 @@ struct VPlanTransforms {
6868
bool RequiresScalarEpilogueCheck, bool TailFolded, Loop *TheLoop);
6969

7070
/// Replaces the VPInstructions in \p Plan with corresponding
71-
/// widen recipes.
72-
static void
73-
VPInstructionsToVPRecipes(VPlanPtr &Plan,
74-
function_ref<const InductionDescriptor *(PHINode *)>
75-
GetIntOrFpInductionDescriptor,
76-
ScalarEvolution &SE, const TargetLibraryInfo &TLI);
71+
/// widen recipes. Returns false if any VPInstructions could not be converted
72+
/// to a wide recipe if needed.
73+
static bool tryToConvertVPInstructionsToVPRecipes(
74+
VPlanPtr &Plan,
75+
function_ref<const InductionDescriptor *(PHINode *)>
76+
GetIntOrFpInductionDescriptor,
77+
ScalarEvolution &SE, const TargetLibraryInfo &TLI);
7778

7879
/// Try to have all users of fixed-order recurrences appear after the recipe
7980
/// defining their previous value, by either sinking users or hoisting recipes
Lines changed: 117 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -1,40 +1,72 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
12
; RUN: opt -passes=loop-vectorize -force-vector-width=4 -enable-vplan-native-path -S %s | FileCheck %s
23

34
; Test that VPlan native path is able to widen call intructions like
45
; llvm.sqrt.* intrincis calls.
56

67
declare double @llvm.sqrt.f64(double %0)
78
define void @widen_call_instruction(ptr noalias nocapture readonly %a.in, ptr noalias nocapture readonly %b.in, ptr noalias nocapture %c.out) {
8-
; CHECK-LABEL: @widen_call_instruction(
9-
10-
; CHECK: vector.body:
11-
; CHECK-NEXT: %[[FOR1_INDEX:.*]] = phi i64 [ 0, %[[LABEL_PR:.*]] ], [ %{{.*}}, %[[LABEL_FOR1_LATCH:.*]] ]
12-
; CHECK: %[[VEC_INDEX:.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[LABEL_PR]] ], [ %{{.*}}, %[[LABEL_FOR1_LATCH]] ]
13-
; CHECK-NEXT: %[[A_PTR:.*]] = getelementptr inbounds double, ptr %a.in, <4 x i64> %[[VEC_INDEX]]
14-
; CHECK-NEXT: %[[MASKED_GATHER1:.*]] = call <4 x double> @llvm.masked.gather.v4f64.v4p0(<4 x ptr> %[[A_PTR]], i32 8, <4 x i1> splat (i1 true), <4 x double> poison)
15-
; CHECK-NEXT: %[[B_PTR:.*]] = getelementptr inbounds double, ptr %b.in, <4 x i64> %[[VEC_INDEX]]
16-
; CHECK-NEXT: %[[MASKED_GATHER2:.*]] = call <4 x double> @llvm.masked.gather.v4f64.v4p0(<4 x ptr> %[[B_PTR]], i32 8, <4 x i1> splat (i1 true), <4 x double> poison)
17-
; CHECK-NEXT: %[[B_SQRT:.*]] = call <4 x double> @llvm.sqrt.v4f64(<4 x double> %[[MASKED_GATHER2]])
18-
; CHECK-NEXT: br label %[[FOR2_HEADER:.*]]
19-
20-
; CHECK: [[FOR2_HEADER]]:
21-
; CHECK-NEXT: %[[FOR2_INDEX:.*]] = phi <4 x i32> [ zeroinitializer, %vector.body ], [ %[[FOR2_INDEX_NEXT:.*]], %[[FOR2_HEADER]] ]
22-
; CHECK-NEXT: %[[REDUCTION:.*]] = phi <4 x double> [ %[[MASKED_GATHER1]], %vector.body ], [ %[[REDUCTION_NEXT:.*]], %[[FOR2_HEADER]] ]
23-
; CHECK-NEXT: %[[REDUCTION_NEXT]] = fadd <4 x double> %[[B_SQRT]], %[[REDUCTION]]
24-
; CHECK-NEXT: %[[FOR2_INDEX_NEXT]] = add nuw nsw <4 x i32> %[[FOR2_INDEX]], splat (i32 1)
25-
; CHECK-NEXT: %[[VEC_PTR:.*]] = icmp eq <4 x i32> %[[FOR2_INDEX_NEXT]], splat (i32 10000)
26-
; CHECK-NEXT: %[[EXIT_COND:.*]] = extractelement <4 x i1> %[[VEC_PTR]], i32 0
27-
; CHECK-NEXT: br i1 %[[EXIT_COND]], label %[[FOR1_LATCH:.*]], label %{{.*}}
28-
29-
; CHECK: [[FOR1_LATCH]]:
30-
; CHECK-NEXT: %[[REDUCTION:.*]] = phi <4 x double> [ %[[REDUCTION_NEXT]], %[[FOR2_HEADER]] ]
31-
; CHECK-NEXT: %[[C_PTR:.*]] = getelementptr inbounds double, ptr %c.out, <4 x i64> %[[VEC_INDEX]]
32-
; CHECK-NEXT: call void @llvm.masked.scatter.v4f64.v4p0(<4 x double> %[[REDUCTION]], <4 x ptr> %[[C_PTR]], i32 8, <4 x i1> splat (i1 true))
33-
; CHECK-NEXT: %[[FOR1_INDEX_NEXT:.*]] = add nuw i64 %[[FOR1_INDEX]], 4
34-
; CHECK-NEXT: %{{.*}} = add <4 x i64> %[[VEC_INDEX]], splat (i64 4)
35-
; CHECK-NEXT: %[[EXIT_COND:.*]] = icmp eq i64 %[[FOR1_INDEX_NEXT]], 1000
36-
; CHECK-NEXT: br i1 %[[EXIT_COND]], label %{{.*}}, label %vector.body
37-
9+
; CHECK-LABEL: define void @widen_call_instruction(
10+
; CHECK-SAME: ptr noalias readonly captures(none) [[A_IN:%.*]], ptr noalias readonly captures(none) [[B_IN:%.*]], ptr noalias captures(none) [[C_OUT:%.*]]) {
11+
; CHECK-NEXT: [[ENTRY:.*]]:
12+
; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
13+
; CHECK: [[VECTOR_PH]]:
14+
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
15+
; CHECK: [[VECTOR_BODY]]:
16+
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_LATCH:.*]] ]
17+
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_LATCH]] ]
18+
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds double, ptr [[A_IN]], <4 x i64> [[VEC_IND]]
19+
; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <4 x double> @llvm.masked.gather.v4f64.v4p0(<4 x ptr> [[TMP0]], i32 8, <4 x i1> splat (i1 true), <4 x double> poison)
20+
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds double, ptr [[B_IN]], <4 x i64> [[VEC_IND]]
21+
; CHECK-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <4 x double> @llvm.masked.gather.v4f64.v4p0(<4 x ptr> [[TMP1]], i32 8, <4 x i1> splat (i1 true), <4 x double> poison)
22+
; CHECK-NEXT: [[TMP2:%.*]] = call <4 x double> @llvm.sqrt.v4f64(<4 x double> [[WIDE_MASKED_GATHER1]])
23+
; CHECK-NEXT: br label %[[FOR2_HEADER2:.*]]
24+
; CHECK: [[FOR2_HEADER2]]:
25+
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_BODY]] ], [ [[TMP4:%.*]], %[[FOR2_HEADER2]] ]
26+
; CHECK-NEXT: [[VEC_PHI3:%.*]] = phi <4 x double> [ [[WIDE_MASKED_GATHER]], %[[VECTOR_BODY]] ], [ [[TMP3:%.*]], %[[FOR2_HEADER2]] ]
27+
; CHECK-NEXT: [[TMP3]] = fadd <4 x double> [[TMP2]], [[VEC_PHI3]]
28+
; CHECK-NEXT: [[TMP4]] = add nuw nsw <4 x i32> [[VEC_PHI]], splat (i32 1)
29+
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq <4 x i32> [[TMP4]], splat (i32 10000)
30+
; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x i1> [[TMP5]], i32 0
31+
; CHECK-NEXT: br i1 [[TMP6]], label %[[VECTOR_LATCH]], label %[[FOR2_HEADER2]]
32+
; CHECK: [[VECTOR_LATCH]]:
33+
; CHECK-NEXT: [[VEC_PHI4:%.*]] = phi <4 x double> [ [[TMP3]], %[[FOR2_HEADER2]] ]
34+
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds double, ptr [[C_OUT]], <4 x i64> [[VEC_IND]]
35+
; CHECK-NEXT: call void @llvm.masked.scatter.v4f64.v4p0(<4 x double> [[VEC_PHI4]], <4 x ptr> [[TMP7]], i32 8, <4 x i1> splat (i1 true))
36+
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
37+
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4)
38+
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
39+
; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
40+
; CHECK: [[MIDDLE_BLOCK]]:
41+
; CHECK-NEXT: br i1 true, label %[[EXIT:.*]], label %[[SCALAR_PH]]
42+
; CHECK: [[SCALAR_PH]]:
43+
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1000, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
44+
; CHECK-NEXT: br label %[[FOR1_HEADER:.*]]
45+
; CHECK: [[FOR1_HEADER]]:
46+
; CHECK-NEXT: [[INDVAR1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVAR11:%.*]], %[[FOR1_LATCH:.*]] ]
47+
; CHECK-NEXT: [[A_PTR:%.*]] = getelementptr inbounds double, ptr [[A_IN]], i64 [[INDVAR1]]
48+
; CHECK-NEXT: [[A:%.*]] = load double, ptr [[A_PTR]], align 8
49+
; CHECK-NEXT: [[B_PTR:%.*]] = getelementptr inbounds double, ptr [[B_IN]], i64 [[INDVAR1]]
50+
; CHECK-NEXT: [[B:%.*]] = load double, ptr [[B_PTR]], align 8
51+
; CHECK-NEXT: [[B_SQRT:%.*]] = call double @llvm.sqrt.f64(double [[B]])
52+
; CHECK-NEXT: br label %[[FOR2_HEADER:.*]]
53+
; CHECK: [[FOR2_HEADER]]:
54+
; CHECK-NEXT: [[INDVAR2:%.*]] = phi i32 [ 0, %[[FOR1_HEADER]] ], [ [[INDVAR21:%.*]], %[[FOR2_HEADER]] ]
55+
; CHECK-NEXT: [[A_REDUCTION:%.*]] = phi double [ [[A]], %[[FOR1_HEADER]] ], [ [[A_REDUCTION1:%.*]], %[[FOR2_HEADER]] ]
56+
; CHECK-NEXT: [[A_REDUCTION1]] = fadd double [[B_SQRT]], [[A_REDUCTION]]
57+
; CHECK-NEXT: [[INDVAR21]] = add nuw nsw i32 [[INDVAR2]], 1
58+
; CHECK-NEXT: [[FOR2_COND:%.*]] = icmp eq i32 [[INDVAR21]], 10000
59+
; CHECK-NEXT: br i1 [[FOR2_COND]], label %[[FOR1_LATCH]], label %[[FOR2_HEADER]]
60+
; CHECK: [[FOR1_LATCH]]:
61+
; CHECK-NEXT: [[A_REDUCTION1_LCSSA:%.*]] = phi double [ [[A_REDUCTION1]], %[[FOR2_HEADER]] ]
62+
; CHECK-NEXT: [[C_PTR:%.*]] = getelementptr inbounds double, ptr [[C_OUT]], i64 [[INDVAR1]]
63+
; CHECK-NEXT: store double [[A_REDUCTION1_LCSSA]], ptr [[C_PTR]], align 8
64+
; CHECK-NEXT: [[INDVAR11]] = add nuw nsw i64 [[INDVAR1]], 1
65+
; CHECK-NEXT: [[FOR1_COND:%.*]] = icmp eq i64 [[INDVAR11]], 1000
66+
; CHECK-NEXT: br i1 [[FOR1_COND]], label %[[EXIT]], label %[[FOR1_HEADER]], !llvm.loop [[LOOP3:![0-9]+]]
67+
; CHECK: [[EXIT]]:
68+
; CHECK-NEXT: ret void
69+
;
3870
entry:
3971
br label %for1.header
4072

@@ -66,5 +98,60 @@ exit:
6698
ret void
6799
}
68100

101+
; Check we do not try to widen non-intrinsic calls,
102+
; https://github.com/llvm/llvm-project/issues/131071.
103+
define void @call_to_non_intrinsic() {
104+
; CHECK-LABEL: define void @call_to_non_intrinsic() {
105+
; CHECK-NEXT: [[ENTRY:.*]]:
106+
; CHECK-NEXT: br label %[[OUTER_HEADER:.*]]
107+
; CHECK: [[OUTER_HEADER]]:
108+
; CHECK-NEXT: [[OUTER_IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[OUTER_IV_NEXT:%.*]], %[[OUTER_LATCH:.*]] ]
109+
; CHECK-NEXT: br label %[[INNER_HEADER:.*]]
110+
; CHECK: [[INNER_HEADER]]:
111+
; CHECK-NEXT: [[INNER_IV:%.*]] = phi i64 [ 0, %[[OUTER_HEADER]] ], [ [[INNER_IV_NEXT:%.*]], %[[INNER_HEADER]] ]
112+
; CHECK-NEXT: call void @use()
113+
; CHECK-NEXT: [[INNER_IV_NEXT]] = add i64 [[INNER_IV]], 1
114+
; CHECK-NEXT: [[INNER_EC:%.*]] = icmp eq i64 [[INNER_IV_NEXT]], 100
115+
; CHECK-NEXT: br i1 [[INNER_EC]], label %[[OUTER_LATCH]], label %[[INNER_HEADER]]
116+
; CHECK: [[OUTER_LATCH]]:
117+
; CHECK-NEXT: [[OUTER_IV_NEXT]] = add i64 [[OUTER_IV]], 1
118+
; CHECK-NEXT: [[OUTER_EC:%.*]] = icmp eq i64 [[OUTER_IV_NEXT]], 100
119+
; CHECK-NEXT: br i1 [[OUTER_EC]], label %[[EXIT:.*]], label %[[OUTER_HEADER]], !llvm.loop [[LOOP4:![0-9]+]]
120+
; CHECK: [[EXIT]]:
121+
; CHECK-NEXT: ret void
122+
;
123+
entry:
124+
br label %outer.header
125+
126+
outer.header:
127+
%outer.iv = phi i64 [ 0, %entry ], [ %outer.iv.next, %outer.latch ]
128+
br label %inner.header
129+
130+
inner.header:
131+
%inner.iv = phi i64 [ 0, %outer.header ], [ %inner.iv.next, %inner.header ]
132+
call void @use()
133+
%inner.iv.next = add i64 %inner.iv, 1
134+
%inner.ec = icmp eq i64 %inner.iv.next, 100
135+
br i1 %inner.ec, label %outer.latch, label %inner.header
136+
137+
outer.latch:
138+
%outer.iv.next = add i64 %outer.iv, 1
139+
%outer.ec = icmp eq i64 %outer.iv.next, 100
140+
br i1 %outer.ec, label %exit, label %outer.header, !llvm.loop !0
141+
142+
exit:
143+
ret void
144+
}
145+
146+
declare void @use()
147+
69148
!0 = distinct !{!0, !1}
70149
!1 = !{!"llvm.loop.vectorize.enable", i1 true}
150+
;.
151+
; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
152+
; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
153+
; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
154+
; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]}
155+
; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META5:![0-9]+]]}
156+
; CHECK: [[META5]] = !{!"llvm.loop.vectorize.enable", i1 true}
157+
;.

llvm/unittests/Transforms/Vectorize/VPlanHCFGTest.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -173,7 +173,7 @@ compound=true
173173
#endif
174174
TargetLibraryInfoImpl TLII(M.getTargetTriple());
175175
TargetLibraryInfo TLI(TLII);
176-
VPlanTransforms::VPInstructionsToVPRecipes(
176+
VPlanTransforms::tryToConvertVPInstructionsToVPRecipes(
177177
Plan, [](PHINode *P) { return nullptr; }, *SE, TLI);
178178
}
179179

@@ -203,7 +203,7 @@ TEST_F(VPlanHCFGTest, testVPInstructionToVPRecipesInner) {
203203

204204
TargetLibraryInfoImpl TLII(M.getTargetTriple());
205205
TargetLibraryInfo TLI(TLII);
206-
VPlanTransforms::VPInstructionsToVPRecipes(
206+
VPlanTransforms::tryToConvertVPInstructionsToVPRecipes(
207207
Plan, [](PHINode *P) { return nullptr; }, *SE, TLI);
208208

209209
VPBlockBase *Entry = Plan->getEntry()->getEntryBasicBlock();

0 commit comments

Comments
 (0)