|
77 | 77 | #include "llvm/Analysis/ScalarEvolution.h"
|
78 | 78 | #include "llvm/Analysis/ScalarEvolutionExpressions.h"
|
79 | 79 | #include "llvm/Analysis/ScalarEvolutionNormalization.h"
|
| 80 | +#include "llvm/Analysis/ScalarEvolutionPatternMatch.h" |
80 | 81 | #include "llvm/Analysis/TargetLibraryInfo.h"
|
81 | 82 | #include "llvm/Analysis/TargetTransformInfo.h"
|
82 | 83 | #include "llvm/Analysis/ValueTracking.h"
|
83 | 84 | #include "llvm/BinaryFormat/Dwarf.h"
|
84 |
| -#include "llvm/Config/llvm-config.h" |
85 | 85 | #include "llvm/IR/BasicBlock.h"
|
86 | 86 | #include "llvm/IR/Constant.h"
|
87 | 87 | #include "llvm/IR/Constants.h"
|
|
128 | 128 | #include <utility>
|
129 | 129 |
|
130 | 130 | using namespace llvm;
|
| 131 | +using namespace SCEVPatternMatch; |
131 | 132 |
|
132 | 133 | #define DEBUG_TYPE "loop-reduce"
|
133 | 134 |
|
@@ -556,16 +557,17 @@ static void DoInitialMatch(const SCEV *S, Loop *L,
|
556 | 557 | }
|
557 | 558 |
|
558 | 559 | // Look at addrec operands.
|
559 |
| - if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) |
560 |
| - if (!AR->getStart()->isZero() && AR->isAffine()) { |
561 |
| - DoInitialMatch(AR->getStart(), L, Good, Bad, SE); |
562 |
| - DoInitialMatch(SE.getAddRecExpr(SE.getConstant(AR->getType(), 0), |
563 |
| - AR->getStepRecurrence(SE), |
564 |
| - // FIXME: AR->getNoWrapFlags() |
565 |
| - AR->getLoop(), SCEV::FlagAnyWrap), |
566 |
| - L, Good, Bad, SE); |
567 |
| - return; |
568 |
| - } |
| 560 | + const SCEV *Start, *Step; |
| 561 | + if (match(S, m_scev_AffineAddRec(m_SCEV(Start), m_SCEV(Step))) && |
| 562 | + !Start->isZero()) { |
| 563 | + DoInitialMatch(Start, L, Good, Bad, SE); |
| 564 | + DoInitialMatch(SE.getAddRecExpr(SE.getConstant(S->getType(), 0), Step, |
| 565 | + // FIXME: AR->getNoWrapFlags() |
| 566 | + cast<SCEVAddRecExpr>(S)->getLoop(), |
| 567 | + SCEV::FlagAnyWrap), |
| 568 | + L, Good, Bad, SE); |
| 569 | + return; |
| 570 | + } |
569 | 571 |
|
570 | 572 | // Handle a multiplication by -1 (negation) if it didn't fold.
|
571 | 573 | if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S))
|
@@ -1411,22 +1413,16 @@ void Cost::RateRegister(const Formula &F, const SCEV *Reg,
|
1411 | 1413 | unsigned LoopCost = 1;
|
1412 | 1414 | if (TTI->isIndexedLoadLegal(TTI->MIM_PostInc, AR->getType()) ||
|
1413 | 1415 | TTI->isIndexedStoreLegal(TTI->MIM_PostInc, AR->getType())) {
|
1414 |
| - |
1415 |
| - // If the step size matches the base offset, we could use pre-indexed |
1416 |
| - // addressing. |
1417 |
| - if (AMK == TTI::AMK_PreIndexed && F.BaseOffset.isFixed()) { |
1418 |
| - if (auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*SE))) |
1419 |
| - if (Step->getAPInt() == F.BaseOffset.getFixedValue()) |
1420 |
| - LoopCost = 0; |
1421 |
| - } else if (AMK == TTI::AMK_PostIndexed) { |
1422 |
| - const SCEV *LoopStep = AR->getStepRecurrence(*SE); |
1423 |
| - if (isa<SCEVConstant>(LoopStep)) { |
1424 |
| - const SCEV *LoopStart = AR->getStart(); |
1425 |
| - if (!isa<SCEVConstant>(LoopStart) && |
1426 |
| - SE->isLoopInvariant(LoopStart, L)) |
1427 |
| - LoopCost = 0; |
1428 |
| - } |
1429 |
| - } |
| 1416 | + const SCEV *Start; |
| 1417 | + const SCEVConstant *Step; |
| 1418 | + if (match(AR, m_scev_AffineAddRec(m_SCEV(Start), m_SCEVConstant(Step)))) |
| 1419 | + // If the step size matches the base offset, we could use pre-indexed |
| 1420 | + // addressing. |
| 1421 | + if ((AMK == TTI::AMK_PreIndexed && F.BaseOffset.isFixed() && |
| 1422 | + Step->getAPInt() == F.BaseOffset.getFixedValue()) || |
| 1423 | + (AMK == TTI::AMK_PostIndexed && !isa<SCEVConstant>(Start) && |
| 1424 | + SE->isLoopInvariant(Start, L))) |
| 1425 | + LoopCost = 0; |
1430 | 1426 | }
|
1431 | 1427 | C.AddRecCost += LoopCost;
|
1432 | 1428 |
|
@@ -2519,13 +2515,11 @@ ICmpInst *LSRInstance::OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse) {
|
2519 | 2515 | // Check the relevant induction variable for conformance to
|
2520 | 2516 | // the pattern.
|
2521 | 2517 | const SCEV *IV = SE.getSCEV(Cond->getOperand(0));
|
2522 |
| - const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV); |
2523 |
| - if (!AR || !AR->isAffine() || |
2524 |
| - AR->getStart() != One || |
2525 |
| - AR->getStepRecurrence(SE) != One) |
| 2518 | + if (!match(IV, |
| 2519 | + m_scev_AffineAddRec(m_scev_SpecificInt(1), m_scev_SpecificInt(1)))) |
2526 | 2520 | return Cond;
|
2527 | 2521 |
|
2528 |
| - assert(AR->getLoop() == L && |
| 2522 | + assert(cast<SCEVAddRecExpr>(IV)->getLoop() == L && |
2529 | 2523 | "Loop condition operand is an addrec in a different loop!");
|
2530 | 2524 |
|
2531 | 2525 | // Check the right operand of the select, and remember it, as it will
|
@@ -3320,7 +3314,7 @@ void LSRInstance::CollectChains() {
|
3320 | 3314 | void LSRInstance::FinalizeChain(IVChain &Chain) {
|
3321 | 3315 | assert(!Chain.Incs.empty() && "empty IV chains are not allowed");
|
3322 | 3316 | LLVM_DEBUG(dbgs() << "Final Chain: " << *Chain.Incs[0].UserInst << "\n");
|
3323 |
| - |
| 3317 | + |
3324 | 3318 | for (const IVInc &Inc : Chain) {
|
3325 | 3319 | LLVM_DEBUG(dbgs() << " Inc: " << *Inc.UserInst << "\n");
|
3326 | 3320 | auto UseI = find(Inc.UserInst->operands(), Inc.IVOperand);
|
@@ -3823,26 +3817,27 @@ static const SCEV *CollectSubexprs(const SCEV *S, const SCEVConstant *C,
|
3823 | 3817 | Ops.push_back(C ? SE.getMulExpr(C, Remainder) : Remainder);
|
3824 | 3818 | }
|
3825 | 3819 | return nullptr;
|
3826 |
| - } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { |
| 3820 | + } |
| 3821 | + const SCEV *Start, *Step; |
| 3822 | + if (match(S, m_scev_AffineAddRec(m_SCEV(Start), m_SCEV(Step)))) { |
3827 | 3823 | // Split a non-zero base out of an addrec.
|
3828 |
| - if (AR->getStart()->isZero() || !AR->isAffine()) |
| 3824 | + if (Start->isZero()) |
3829 | 3825 | return S;
|
3830 | 3826 |
|
3831 |
| - const SCEV *Remainder = CollectSubexprs(AR->getStart(), |
3832 |
| - C, Ops, L, SE, Depth+1); |
| 3827 | + const SCEV *Remainder = CollectSubexprs(Start, C, Ops, L, SE, Depth + 1); |
3833 | 3828 | // Split the non-zero AddRec unless it is part of a nested recurrence that
|
3834 | 3829 | // does not pertain to this loop.
|
3835 |
| - if (Remainder && (AR->getLoop() == L || !isa<SCEVAddRecExpr>(Remainder))) { |
| 3830 | + if (Remainder && (cast<SCEVAddRecExpr>(S)->getLoop() == L || |
| 3831 | + !isa<SCEVAddRecExpr>(Remainder))) { |
3836 | 3832 | Ops.push_back(C ? SE.getMulExpr(C, Remainder) : Remainder);
|
3837 | 3833 | Remainder = nullptr;
|
3838 | 3834 | }
|
3839 |
| - if (Remainder != AR->getStart()) { |
| 3835 | + if (Remainder != Start) { |
3840 | 3836 | if (!Remainder)
|
3841 |
| - Remainder = SE.getConstant(AR->getType(), 0); |
3842 |
| - return SE.getAddRecExpr(Remainder, |
3843 |
| - AR->getStepRecurrence(SE), |
3844 |
| - AR->getLoop(), |
3845 |
| - //FIXME: AR->getNoWrapFlags(SCEV::FlagNW) |
| 3837 | + Remainder = SE.getConstant(S->getType(), 0); |
| 3838 | + return SE.getAddRecExpr(Remainder, Step, |
| 3839 | + cast<SCEVAddRecExpr>(S)->getLoop(), |
| 3840 | + // FIXME: AR->getNoWrapFlags(SCEV::FlagNW) |
3846 | 3841 | SCEV::FlagAnyWrap);
|
3847 | 3842 | }
|
3848 | 3843 | } else if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
|
@@ -3870,17 +3865,13 @@ static bool mayUsePostIncMode(const TargetTransformInfo &TTI,
|
3870 | 3865 | if (LU.Kind != LSRUse::Address ||
|
3871 | 3866 | !LU.AccessTy.getType()->isIntOrIntVectorTy())
|
3872 | 3867 | return false;
|
3873 |
| - const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S); |
3874 |
| - if (!AR) |
3875 |
| - return false; |
3876 |
| - const SCEV *LoopStep = AR->getStepRecurrence(SE); |
3877 |
| - if (!isa<SCEVConstant>(LoopStep)) |
| 3868 | + const SCEV *Start; |
| 3869 | + if (!match(S, m_scev_AffineAddRec(m_SCEV(Start), m_SCEVConstant()))) |
3878 | 3870 | return false;
|
3879 | 3871 | // Check if a post-indexed load/store can be used.
|
3880 |
| - if (TTI.isIndexedLoadLegal(TTI.MIM_PostInc, AR->getType()) || |
3881 |
| - TTI.isIndexedStoreLegal(TTI.MIM_PostInc, AR->getType())) { |
3882 |
| - const SCEV *LoopStart = AR->getStart(); |
3883 |
| - if (!isa<SCEVConstant>(LoopStart) && SE.isLoopInvariant(LoopStart, L)) |
| 3872 | + if (TTI.isIndexedLoadLegal(TTI.MIM_PostInc, S->getType()) || |
| 3873 | + TTI.isIndexedStoreLegal(TTI.MIM_PostInc, S->getType())) { |
| 3874 | + if (!isa<SCEVConstant>(Start) && SE.isLoopInvariant(Start, L)) |
3884 | 3875 | return true;
|
3885 | 3876 | }
|
3886 | 3877 | return false;
|
@@ -4139,18 +4130,15 @@ void LSRInstance::GenerateConstantOffsetsImpl(
|
4139 | 4130 | // base pointer for each iteration of the loop, resulting in no extra add/sub
|
4140 | 4131 | // instructions for pointer updating.
|
4141 | 4132 | if (AMK == TTI::AMK_PreIndexed && LU.Kind == LSRUse::Address) {
|
4142 |
| - if (auto *GAR = dyn_cast<SCEVAddRecExpr>(G)) { |
4143 |
| - if (auto *StepRec = |
4144 |
| - dyn_cast<SCEVConstant>(GAR->getStepRecurrence(SE))) { |
4145 |
| - const APInt &StepInt = StepRec->getAPInt(); |
4146 |
| - int64_t Step = StepInt.isNegative() ? |
4147 |
| - StepInt.getSExtValue() : StepInt.getZExtValue(); |
4148 |
| - |
4149 |
| - for (Immediate Offset : Worklist) { |
4150 |
| - if (Offset.isFixed()) { |
4151 |
| - Offset = Immediate::getFixed(Offset.getFixedValue() - Step); |
4152 |
| - GenerateOffset(G, Offset); |
4153 |
| - } |
| 4133 | + const APInt *StepInt; |
| 4134 | + if (match(G, m_scev_AffineAddRec(m_SCEV(), m_scev_APInt(StepInt)))) { |
| 4135 | + int64_t Step = StepInt->isNegative() ? StepInt->getSExtValue() |
| 4136 | + : StepInt->getZExtValue(); |
| 4137 | + |
| 4138 | + for (Immediate Offset : Worklist) { |
| 4139 | + if (Offset.isFixed()) { |
| 4140 | + Offset = Immediate::getFixed(Offset.getFixedValue() - Step); |
| 4141 | + GenerateOffset(G, Offset); |
4154 | 4142 | }
|
4155 | 4143 | }
|
4156 | 4144 | }
|
@@ -6621,7 +6609,7 @@ struct SCEVDbgValueBuilder {
|
6621 | 6609 | if (Op.getOp() != dwarf::DW_OP_LLVM_arg) {
|
6622 | 6610 | Op.appendToVector(DestExpr);
|
6623 | 6611 | continue;
|
6624 |
| - } |
| 6612 | + } |
6625 | 6613 |
|
6626 | 6614 | DestExpr.push_back(dwarf::DW_OP_LLVM_arg);
|
6627 | 6615 | // `DW_OP_LLVM_arg n` represents the nth LocationOp in this SCEV,
|
|
0 commit comments