Skip to content

Commit ba46ae7

Browse files
committed
[InstCombine] Merge foldAndOfICmps() and foldOrOfICmps() (NFCI)
Folds are supposed to always be added in conjugated pairs for and and or. Merge the two functions to make folds for which this is currently not the case more obvious.
1 parent e07a7fd commit ba46ae7

File tree

2 files changed

+83
-167
lines changed

2 files changed

+83
-167
lines changed

llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp

Lines changed: 81 additions & 165 deletions
Original file line numberDiff line numberDiff line change
@@ -1220,132 +1220,6 @@ static Value *foldAndOrOfICmpsUsingRanges(
12201220
return Builder.CreateICmp(NewPred, NewV, ConstantInt::get(Ty, NewC));
12211221
}
12221222

1223-
/// Fold (icmp)&(icmp) if possible.
1224-
Value *InstCombinerImpl::foldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS,
1225-
BinaryOperator &And) {
1226-
const SimplifyQuery Q = SQ.getWithInstruction(&And);
1227-
1228-
// Fold (!iszero(A & K1) & !iszero(A & K2)) -> (A & (K1 | K2)) == (K1 | K2)
1229-
// if K1 and K2 are a one-bit mask.
1230-
if (Value *V = foldAndOrOfICmpsOfAndWithPow2(LHS, RHS, &And,
1231-
/* IsAnd */ true))
1232-
return V;
1233-
1234-
ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
1235-
1236-
// (icmp1 A, B) & (icmp2 A, B) --> (icmp3 A, B)
1237-
if (predicatesFoldable(PredL, PredR)) {
1238-
if (LHS->getOperand(0) == RHS->getOperand(1) &&
1239-
LHS->getOperand(1) == RHS->getOperand(0))
1240-
LHS->swapOperands();
1241-
if (LHS->getOperand(0) == RHS->getOperand(0) &&
1242-
LHS->getOperand(1) == RHS->getOperand(1)) {
1243-
Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1);
1244-
unsigned Code =
1245-
getICmpCode(LHS->getPredicate()) & getICmpCode(RHS->getPredicate());
1246-
bool IsSigned = LHS->isSigned() || RHS->isSigned();
1247-
return getNewICmpValue(Code, IsSigned, Op0, Op1, Builder);
1248-
}
1249-
}
1250-
1251-
// handle (roughly): (icmp eq (A & B), C) & (icmp eq (A & D), E)
1252-
if (Value *V = foldLogOpOfMaskedICmps(LHS, RHS, true, Builder))
1253-
return V;
1254-
1255-
if (Value *V = foldAndOrOfICmpsWithConstEq(LHS, RHS, And, Builder, Q))
1256-
return V;
1257-
if (Value *V = foldAndOrOfICmpsWithConstEq(RHS, LHS, And, Builder, Q))
1258-
return V;
1259-
1260-
if (Value *V = foldIsPowerOf2OrZero(LHS, RHS, /*IsAnd=*/true, Builder))
1261-
return V;
1262-
if (Value *V = foldIsPowerOf2OrZero(RHS, LHS, /*IsAnd=*/true, Builder))
1263-
return V;
1264-
1265-
// E.g. (icmp sge x, 0) & (icmp slt x, n) --> icmp ult x, n
1266-
if (Value *V = simplifyRangeCheck(LHS, RHS, /*Inverted=*/false))
1267-
return V;
1268-
1269-
// E.g. (icmp slt x, n) & (icmp sge x, 0) --> icmp ult x, n
1270-
if (Value *V = simplifyRangeCheck(RHS, LHS, /*Inverted=*/false))
1271-
return V;
1272-
1273-
if (Value *V = foldAndOrOfEqualityCmpsWithConstants(LHS, RHS, true, Builder))
1274-
return V;
1275-
1276-
if (Value *V = foldSignedTruncationCheck(LHS, RHS, And, Builder))
1277-
return V;
1278-
1279-
if (Value *V = foldIsPowerOf2(LHS, RHS, true /* JoinedByAnd */, Builder))
1280-
return V;
1281-
1282-
if (Value *X =
1283-
foldUnsignedUnderflowCheck(LHS, RHS, /*IsAnd=*/true, Q, Builder))
1284-
return X;
1285-
if (Value *X =
1286-
foldUnsignedUnderflowCheck(RHS, LHS, /*IsAnd=*/true, Q, Builder))
1287-
return X;
1288-
1289-
if (Value *X = foldEqOfParts(LHS, RHS, /*IsAnd=*/true))
1290-
return X;
1291-
1292-
// This only handles icmp of constants: (icmp1 A, C1) & (icmp2 B, C2).
1293-
Value *LHS0 = LHS->getOperand(0), *RHS0 = RHS->getOperand(0);
1294-
1295-
// (icmp eq A, 0) & (icmp eq B, 0) --> (icmp eq (A|B), 0)
1296-
// TODO: Remove this when foldLogOpOfMaskedICmps can handle undefs.
1297-
if (PredL == ICmpInst::ICMP_EQ && match(LHS->getOperand(1), m_ZeroInt()) &&
1298-
PredR == ICmpInst::ICMP_EQ && match(RHS->getOperand(1), m_ZeroInt()) &&
1299-
LHS0->getType() == RHS0->getType()) {
1300-
Value *NewOr = Builder.CreateOr(LHS0, RHS0);
1301-
return Builder.CreateICmp(PredL, NewOr,
1302-
Constant::getNullValue(NewOr->getType()));
1303-
}
1304-
1305-
const APInt *LHSC, *RHSC;
1306-
if (!match(LHS->getOperand(1), m_APInt(LHSC)) ||
1307-
!match(RHS->getOperand(1), m_APInt(RHSC)))
1308-
return nullptr;
1309-
1310-
// (trunc x) == C1 & (and x, CA) == C2 -> (and x, CA|CMAX) == C1|C2
1311-
// where CMAX is the all ones value for the truncated type,
1312-
// iff the lower bits of C2 and CA are zero.
1313-
if (PredL == ICmpInst::ICMP_EQ && PredL == PredR && LHS->hasOneUse() &&
1314-
RHS->hasOneUse()) {
1315-
Value *V;
1316-
const APInt *AndC, *SmallC = nullptr, *BigC = nullptr;
1317-
1318-
// (trunc x) == C1 & (and x, CA) == C2
1319-
// (and x, CA) == C2 & (trunc x) == C1
1320-
if (match(RHS0, m_Trunc(m_Value(V))) &&
1321-
match(LHS0, m_And(m_Specific(V), m_APInt(AndC)))) {
1322-
SmallC = RHSC;
1323-
BigC = LHSC;
1324-
} else if (match(LHS0, m_Trunc(m_Value(V))) &&
1325-
match(RHS0, m_And(m_Specific(V), m_APInt(AndC)))) {
1326-
SmallC = LHSC;
1327-
BigC = RHSC;
1328-
}
1329-
1330-
if (SmallC && BigC) {
1331-
unsigned BigBitSize = BigC->getBitWidth();
1332-
unsigned SmallBitSize = SmallC->getBitWidth();
1333-
1334-
// Check that the low bits are zero.
1335-
APInt Low = APInt::getLowBitsSet(BigBitSize, SmallBitSize);
1336-
if ((Low & *AndC).isZero() && (Low & *BigC).isZero()) {
1337-
Value *NewAnd = Builder.CreateAnd(V, Low | *AndC);
1338-
APInt N = SmallC->zext(BigBitSize) | *BigC;
1339-
Value *NewVal = ConstantInt::get(NewAnd->getType(), N);
1340-
return Builder.CreateICmp(PredL, NewAnd, NewVal);
1341-
}
1342-
}
1343-
}
1344-
1345-
return foldAndOrOfICmpsUsingRanges(PredL, LHS0, *LHSC, PredR, RHS0, *RHSC,
1346-
Builder, /* IsAnd */ true);
1347-
}
1348-
13491223
Value *InstCombinerImpl::foldLogicOfFCmps(FCmpInst *LHS, FCmpInst *RHS,
13501224
bool IsAnd, bool IsLogicalSelect) {
13511225
Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1);
@@ -1596,9 +1470,8 @@ Instruction *InstCombinerImpl::foldCastedBitwiseLogic(BinaryOperator &I) {
15961470
ICmpInst *ICmp0 = dyn_cast<ICmpInst>(Cast0Src);
15971471
ICmpInst *ICmp1 = dyn_cast<ICmpInst>(Cast1Src);
15981472
if (ICmp0 && ICmp1) {
1599-
Value *Res = LogicOpc == Instruction::And ? foldAndOfICmps(ICmp0, ICmp1, I)
1600-
: foldOrOfICmps(ICmp0, ICmp1, I);
1601-
if (Res)
1473+
if (Value *Res =
1474+
foldAndOrOfICmps(ICmp0, ICmp1, I, LogicOpc == Instruction::And))
16021475
return CastInst::Create(CastOpcode, Res, DestTy);
16031476
return nullptr;
16041477
}
@@ -2134,25 +2007,25 @@ Instruction *InstCombinerImpl::visitAnd(BinaryOperator &I) {
21342007
ICmpInst *LHS = dyn_cast<ICmpInst>(Op0);
21352008
ICmpInst *RHS = dyn_cast<ICmpInst>(Op1);
21362009
if (LHS && RHS)
2137-
if (Value *Res = foldAndOfICmps(LHS, RHS, I))
2010+
if (Value *Res = foldAndOrOfICmps(LHS, RHS, I, /* IsAnd */ true))
21382011
return replaceInstUsesWith(I, Res);
21392012

21402013
// TODO: Make this recursive; it's a little tricky because an arbitrary
21412014
// number of 'and' instructions might have to be created.
21422015
if (LHS && match(Op1, m_OneUse(m_And(m_Value(X), m_Value(Y))))) {
21432016
if (auto *Cmp = dyn_cast<ICmpInst>(X))
2144-
if (Value *Res = foldAndOfICmps(LHS, Cmp, I))
2017+
if (Value *Res = foldAndOrOfICmps(LHS, Cmp, I, /* IsAnd */ true))
21452018
return replaceInstUsesWith(I, Builder.CreateAnd(Res, Y));
21462019
if (auto *Cmp = dyn_cast<ICmpInst>(Y))
2147-
if (Value *Res = foldAndOfICmps(LHS, Cmp, I))
2020+
if (Value *Res = foldAndOrOfICmps(LHS, Cmp, I, /* IsAnd */ true))
21482021
return replaceInstUsesWith(I, Builder.CreateAnd(Res, X));
21492022
}
21502023
if (RHS && match(Op0, m_OneUse(m_And(m_Value(X), m_Value(Y))))) {
21512024
if (auto *Cmp = dyn_cast<ICmpInst>(X))
2152-
if (Value *Res = foldAndOfICmps(Cmp, RHS, I))
2025+
if (Value *Res = foldAndOrOfICmps(Cmp, RHS, I, /* IsAnd */ true))
21532026
return replaceInstUsesWith(I, Builder.CreateAnd(Res, Y));
21542027
if (auto *Cmp = dyn_cast<ICmpInst>(Y))
2155-
if (Value *Res = foldAndOfICmps(Cmp, RHS, I))
2028+
if (Value *Res = foldAndOrOfICmps(Cmp, RHS, I, /* IsAnd */ true))
21562029
return replaceInstUsesWith(I, Builder.CreateAnd(Res, X));
21572030
}
21582031
}
@@ -2508,15 +2381,15 @@ Value *InstCombinerImpl::matchSelectFromAndOr(Value *A, Value *C, Value *B,
25082381
return nullptr;
25092382
}
25102383

2511-
/// Fold (icmp)|(icmp) if possible.
2512-
Value *InstCombinerImpl::foldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS,
2513-
BinaryOperator &Or) {
2514-
const SimplifyQuery Q = SQ.getWithInstruction(&Or);
2384+
/// Fold (icmp)&(icmp) or (icmp)|(icmp) if possible.
2385+
Value *InstCombinerImpl::foldAndOrOfICmps(ICmpInst *LHS, ICmpInst *RHS,
2386+
BinaryOperator &BO, bool IsAnd) {
2387+
const SimplifyQuery Q = SQ.getWithInstruction(&BO);
25152388

25162389
// Fold (iszero(A & K1) | iszero(A & K2)) -> (A & (K1 | K2)) != (K1 | K2)
2390+
// Fold (!iszero(A & K1) & !iszero(A & K2)) -> (A & (K1 | K2)) == (K1 | K2)
25172391
// if K1 and K2 are a one-bit mask.
2518-
if (Value *V = foldAndOrOfICmpsOfAndWithPow2(LHS, RHS, &Or,
2519-
/* IsAnd */ false))
2392+
if (Value *V = foldAndOrOfICmpsOfAndWithPow2(LHS, RHS, &BO, IsAnd))
25202393
return V;
25212394

25222395
ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
@@ -2536,7 +2409,7 @@ Value *InstCombinerImpl::foldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS,
25362409
// 3) C1 ^ C2 is one-bit mask.
25372410
// 4) LowRange1 ^ LowRange2 and HighRange1 ^ HighRange2 are one-bit mask.
25382411
// This implies all values in the two ranges differ by exactly one bit.
2539-
if ((PredL == ICmpInst::ICMP_ULT || PredL == ICmpInst::ICMP_ULE) &&
2412+
if (!IsAnd && (PredL == ICmpInst::ICMP_ULT || PredL == ICmpInst::ICMP_ULE) &&
25402413
PredL == PredR && LHSC && RHSC && LHS->hasOneUse() && RHS->hasOneUse() &&
25412414
LHSC->getBitWidth() == RHSC->getBitWidth() && *LHSC == *RHSC) {
25422415

@@ -2579,24 +2452,27 @@ Value *InstCombinerImpl::foldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS,
25792452
}
25802453

25812454
// (icmp1 A, B) | (icmp2 A, B) --> (icmp3 A, B)
2455+
// (icmp1 A, B) & (icmp2 A, B) --> (icmp3 A, B)
25822456
if (predicatesFoldable(PredL, PredR)) {
25832457
if (LHS0 == RHS1 && LHS1 == RHS0) {
25842458
PredL = ICmpInst::getSwappedPredicate(PredL);
25852459
std::swap(LHS0, LHS1);
25862460
}
25872461
if (LHS0 == RHS0 && LHS1 == RHS1) {
2588-
unsigned Code = getICmpCode(PredL) | getICmpCode(PredR);
2462+
unsigned Code = IsAnd ? getICmpCode(PredL) & getICmpCode(PredR)
2463+
: getICmpCode(PredL) | getICmpCode(PredR);
25892464
bool IsSigned = LHS->isSigned() || RHS->isSigned();
25902465
return getNewICmpValue(Code, IsSigned, LHS0, LHS1, Builder);
25912466
}
25922467
}
25932468

25942469
// handle (roughly):
25952470
// (icmp ne (A & B), C) | (icmp ne (A & D), E)
2596-
if (Value *V = foldLogOpOfMaskedICmps(LHS, RHS, false, Builder))
2471+
// (icmp eq (A & B), C) & (icmp eq (A & D), E)
2472+
if (Value *V = foldLogOpOfMaskedICmps(LHS, RHS, IsAnd, Builder))
25972473
return V;
25982474

2599-
if (LHS->hasOneUse() || RHS->hasOneUse()) {
2475+
if (!IsAnd && (LHS->hasOneUse() || RHS->hasOneUse())) {
26002476
// (icmp eq B, 0) | (icmp ult A, B) -> (icmp ule A, B-1)
26012477
// (icmp eq B, 0) | (icmp ugt B, A) -> (icmp ule A, B-1)
26022478
Value *A = nullptr, *B = nullptr;
@@ -2622,44 +2498,49 @@ Value *InstCombinerImpl::foldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS,
26222498
Builder.CreateAdd(B, Constant::getAllOnesValue(B->getType())), A);
26232499
}
26242500

2625-
if (Value *V = foldAndOrOfICmpsWithConstEq(LHS, RHS, Or, Builder, Q))
2501+
if (Value *V = foldAndOrOfICmpsWithConstEq(LHS, RHS, BO, Builder, Q))
26262502
return V;
2627-
if (Value *V = foldAndOrOfICmpsWithConstEq(RHS, LHS, Or, Builder, Q))
2503+
if (Value *V = foldAndOrOfICmpsWithConstEq(RHS, LHS, BO, Builder, Q))
26282504
return V;
26292505

2630-
if (Value *V = foldIsPowerOf2OrZero(LHS, RHS, /*IsAnd=*/false, Builder))
2506+
if (Value *V = foldIsPowerOf2OrZero(LHS, RHS, IsAnd, Builder))
26312507
return V;
2632-
if (Value *V = foldIsPowerOf2OrZero(RHS, LHS, /*IsAnd=*/false, Builder))
2508+
if (Value *V = foldIsPowerOf2OrZero(RHS, LHS, IsAnd, Builder))
26332509
return V;
26342510

26352511
// E.g. (icmp slt x, 0) | (icmp sgt x, n) --> icmp ugt x, n
2636-
if (Value *V = simplifyRangeCheck(LHS, RHS, /*Inverted=*/true))
2512+
// E.g. (icmp sge x, 0) & (icmp slt x, n) --> icmp ult x, n
2513+
if (Value *V = simplifyRangeCheck(LHS, RHS, /*Inverted=*/!IsAnd))
26372514
return V;
26382515

26392516
// E.g. (icmp sgt x, n) | (icmp slt x, 0) --> icmp ugt x, n
2640-
if (Value *V = simplifyRangeCheck(RHS, LHS, /*Inverted=*/true))
2517+
// E.g. (icmp slt x, n) & (icmp sge x, 0) --> icmp ult x, n
2518+
if (Value *V = simplifyRangeCheck(RHS, LHS, /*Inverted=*/!IsAnd))
26412519
return V;
26422520

2643-
if (Value *V = foldAndOrOfEqualityCmpsWithConstants(LHS, RHS, false, Builder))
2521+
if (Value *V = foldAndOrOfEqualityCmpsWithConstants(LHS, RHS, IsAnd, Builder))
26442522
return V;
26452523

2646-
if (Value *V = foldIsPowerOf2(LHS, RHS, false /* JoinedByAnd */, Builder))
2524+
if (IsAnd)
2525+
if (Value *V = foldSignedTruncationCheck(LHS, RHS, BO, Builder))
2526+
return V;
2527+
2528+
if (Value *V = foldIsPowerOf2(LHS, RHS, IsAnd, Builder))
26472529
return V;
26482530

2649-
if (Value *X =
2650-
foldUnsignedUnderflowCheck(LHS, RHS, /*IsAnd=*/false, Q, Builder))
2531+
if (Value *X = foldUnsignedUnderflowCheck(LHS, RHS, IsAnd, Q, Builder))
26512532
return X;
2652-
if (Value *X =
2653-
foldUnsignedUnderflowCheck(RHS, LHS, /*IsAnd=*/false, Q, Builder))
2533+
if (Value *X = foldUnsignedUnderflowCheck(RHS, LHS, IsAnd, Q, Builder))
26542534
return X;
26552535

2656-
if (Value *X = foldEqOfParts(LHS, RHS, /*IsAnd=*/false))
2536+
if (Value *X = foldEqOfParts(LHS, RHS, IsAnd))
26572537
return X;
26582538

26592539
// (icmp ne A, 0) | (icmp ne B, 0) --> (icmp ne (A|B), 0)
2540+
// (icmp eq A, 0) & (icmp eq B, 0) --> (icmp eq (A|B), 0)
26602541
// TODO: Remove this when foldLogOpOfMaskedICmps can handle undefs.
2661-
if (PredL == ICmpInst::ICMP_NE && match(LHS1, m_ZeroInt()) &&
2662-
PredR == ICmpInst::ICMP_NE && match(RHS1, m_ZeroInt()) &&
2542+
if (PredL == (IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE) &&
2543+
PredL == PredR && match(LHS1, m_ZeroInt()) && match(RHS1, m_ZeroInt()) &&
26632544
LHS0->getType() == RHS0->getType()) {
26642545
Value *NewOr = Builder.CreateOr(LHS0, RHS0);
26652546
return Builder.CreateICmp(PredL, NewOr,
@@ -2670,8 +2551,43 @@ Value *InstCombinerImpl::foldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS,
26702551
if (!LHSC || !RHSC)
26712552
return nullptr;
26722553

2554+
// (trunc x) == C1 & (and x, CA) == C2 -> (and x, CA|CMAX) == C1|C2
2555+
// where CMAX is the all ones value for the truncated type,
2556+
// iff the lower bits of C2 and CA are zero.
2557+
if (IsAnd && PredL == ICmpInst::ICMP_EQ && PredL == PredR &&
2558+
LHS->hasOneUse() && RHS->hasOneUse()) {
2559+
Value *V;
2560+
const APInt *AndC, *SmallC = nullptr, *BigC = nullptr;
2561+
2562+
// (trunc x) == C1 & (and x, CA) == C2
2563+
// (and x, CA) == C2 & (trunc x) == C1
2564+
if (match(RHS0, m_Trunc(m_Value(V))) &&
2565+
match(LHS0, m_And(m_Specific(V), m_APInt(AndC)))) {
2566+
SmallC = RHSC;
2567+
BigC = LHSC;
2568+
} else if (match(LHS0, m_Trunc(m_Value(V))) &&
2569+
match(RHS0, m_And(m_Specific(V), m_APInt(AndC)))) {
2570+
SmallC = LHSC;
2571+
BigC = RHSC;
2572+
}
2573+
2574+
if (SmallC && BigC) {
2575+
unsigned BigBitSize = BigC->getBitWidth();
2576+
unsigned SmallBitSize = SmallC->getBitWidth();
2577+
2578+
// Check that the low bits are zero.
2579+
APInt Low = APInt::getLowBitsSet(BigBitSize, SmallBitSize);
2580+
if ((Low & *AndC).isZero() && (Low & *BigC).isZero()) {
2581+
Value *NewAnd = Builder.CreateAnd(V, Low | *AndC);
2582+
APInt N = SmallC->zext(BigBitSize) | *BigC;
2583+
Value *NewVal = ConstantInt::get(NewAnd->getType(), N);
2584+
return Builder.CreateICmp(PredL, NewAnd, NewVal);
2585+
}
2586+
}
2587+
}
2588+
26732589
return foldAndOrOfICmpsUsingRanges(PredL, LHS0, *LHSC, PredR, RHS0, *RHSC,
2674-
Builder, /* IsAnd */ false);
2590+
Builder, IsAnd);
26752591
}
26762592

26772593
// FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
@@ -2905,26 +2821,26 @@ Instruction *InstCombinerImpl::visitOr(BinaryOperator &I) {
29052821
ICmpInst *LHS = dyn_cast<ICmpInst>(Op0);
29062822
ICmpInst *RHS = dyn_cast<ICmpInst>(Op1);
29072823
if (LHS && RHS)
2908-
if (Value *Res = foldOrOfICmps(LHS, RHS, I))
2824+
if (Value *Res = foldAndOrOfICmps(LHS, RHS, I, /* IsAnd */ false))
29092825
return replaceInstUsesWith(I, Res);
29102826

29112827
// TODO: Make this recursive; it's a little tricky because an arbitrary
29122828
// number of 'or' instructions might have to be created.
29132829
Value *X, *Y;
29142830
if (LHS && match(Op1, m_OneUse(m_Or(m_Value(X), m_Value(Y))))) {
29152831
if (auto *Cmp = dyn_cast<ICmpInst>(X))
2916-
if (Value *Res = foldOrOfICmps(LHS, Cmp, I))
2832+
if (Value *Res = foldAndOrOfICmps(LHS, Cmp, I, /* IsAnd */ false))
29172833
return replaceInstUsesWith(I, Builder.CreateOr(Res, Y));
29182834
if (auto *Cmp = dyn_cast<ICmpInst>(Y))
2919-
if (Value *Res = foldOrOfICmps(LHS, Cmp, I))
2835+
if (Value *Res = foldAndOrOfICmps(LHS, Cmp, I, /* IsAnd */ false))
29202836
return replaceInstUsesWith(I, Builder.CreateOr(Res, X));
29212837
}
29222838
if (RHS && match(Op0, m_OneUse(m_Or(m_Value(X), m_Value(Y))))) {
29232839
if (auto *Cmp = dyn_cast<ICmpInst>(X))
2924-
if (Value *Res = foldOrOfICmps(Cmp, RHS, I))
2840+
if (Value *Res = foldAndOrOfICmps(Cmp, RHS, I, /* IsAnd */ false))
29252841
return replaceInstUsesWith(I, Builder.CreateOr(Res, Y));
29262842
if (auto *Cmp = dyn_cast<ICmpInst>(Y))
2927-
if (Value *Res = foldOrOfICmps(Cmp, RHS, I))
2843+
if (Value *Res = foldAndOrOfICmps(Cmp, RHS, I, /* IsAnd */ false))
29282844
return replaceInstUsesWith(I, Builder.CreateOr(Res, X));
29292845
}
29302846
}

llvm/lib/Transforms/InstCombine/InstCombineInternal.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -344,8 +344,8 @@ class LLVM_LIBRARY_VISIBILITY InstCombinerImpl final
344344
const CastInst *CI2);
345345
Value *simplifyIntToPtrRoundTripCast(Value *Val);
346346

347-
Value *foldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS, BinaryOperator &And);
348-
Value *foldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS, BinaryOperator &Or);
347+
Value *foldAndOrOfICmps(ICmpInst *LHS, ICmpInst *RHS, BinaryOperator &BO,
348+
bool IsAnd);
349349
Value *foldXorOfICmps(ICmpInst *LHS, ICmpInst *RHS, BinaryOperator &Xor);
350350

351351
Value *foldEqOfParts(ICmpInst *Cmp0, ICmpInst *Cmp1, bool IsAnd);

0 commit comments

Comments
 (0)