Skip to content

Commit e5ac661

Browse files
committed
[InstCombine] Fold umul.overflow(x, c1) | (x*c1 > c2) to x > c2/c1
The motivation of this pattern is to check whether the product of a variable and a constant would be mathematically (i.e., as integer numbers instead of bit vectors) greater than a given constant bound. The pattern appears to occur when compiling several Rust projects (it seems to originate from the `smallvec` crate but I have not checked this further). Unless `c1` is `0` (which should only occur in non-splat vectors because it should have been folded away otherwise), we can transform this pattern into `x > c2/c1` with all operations working on unsigned integers. If `c1` is `0`, we can use `x > -1` instead. Alive proofs: https://alive2.llvm.org/ce/z/HFVvsJ Closes #142674
1 parent e0591db commit e5ac661

File tree

2 files changed

+42
-30
lines changed

2 files changed

+42
-30
lines changed

llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3659,6 +3659,37 @@ static std::optional<DecomposedBitMaskMul> matchBitmaskMul(Value *V) {
36593659
return std::nullopt;
36603660
}
36613661

3662+
/// Fold Res, Overflow = (umul.with.overflow x c1); (or Overflow (ugt Res c2))
3663+
/// --> (ugt x (c2/c1)). This code checks whether a multiplication of two
3664+
/// unsigned numbers (one is a constant) is mathematically greater than a
3665+
/// second constant.
3666+
static Value *foldOrUnsignedUMulOverflowICmp(BinaryOperator &I,
3667+
InstCombiner::BuilderTy &Builder,
3668+
const DataLayout &DL) {
3669+
const WithOverflowInst *WO;
3670+
const Value *WOV;
3671+
Constant *C1, *C2;
3672+
if (match(&I, m_c_Or(m_OneUse(m_ExtractValue<1>(
3673+
m_CombineAnd(m_WithOverflowInst(WO), m_Value(WOV)))),
3674+
m_OneUse(m_SpecificCmp(
3675+
ICmpInst::ICMP_UGT,
3676+
m_OneUse(m_ExtractValue<0>(m_Deferred(WOV))),
3677+
m_ImmConstant(C2))))) &&
3678+
WO->getIntrinsicID() == Intrinsic::umul_with_overflow &&
3679+
match(WO->getRHS(), m_ImmConstant(C1)) && WO->hasNUses(2)) {
3680+
Type *Ty = C1->getType();
3681+
// If C1 is 0 (which may occur in non-splat vectors), we use -1 as the new
3682+
// constant instead.
3683+
Constant *NewC = ConstantFoldSelectInstruction(
3684+
ConstantFoldCompareInstOperands(ICmpInst::ICMP_EQ, C1,
3685+
ConstantInt::getNullValue(Ty), DL),
3686+
ConstantInt::getAllOnesValue(Ty),
3687+
ConstantFoldBinaryOpOperands(Instruction::UDiv, C2, C1, DL));
3688+
return Builder.CreateICmp(ICmpInst::ICMP_UGT, WO->getLHS(), NewC);
3689+
}
3690+
return nullptr;
3691+
}
3692+
36623693
// FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
36633694
// here. We should standardize that construct where it is needed or choose some
36643695
// other way to ensure that commutated variants of patterns are not missed.
@@ -4109,6 +4140,11 @@ Instruction *InstCombinerImpl::visitOr(BinaryOperator &I) {
41094140
}
41104141
}
41114142

4143+
// Try to fold the pattern "Overflow | icmp pred Res, C2" into a single
4144+
// comparison instruction for umul.with.overflow.
4145+
if (Value *R = foldOrUnsignedUMulOverflowICmp(I, Builder, DL))
4146+
return replaceInstUsesWith(I, R);
4147+
41124148
// (~x) | y --> ~(x & (~y)) iff that gets rid of inversions
41134149
if (sinkNotIntoOtherHandOfLogicalOp(I))
41144150
return &I;

llvm/test/Transforms/InstCombine/icmp_or_umul_overflow.ll

Lines changed: 6 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -8,11 +8,7 @@ declare void @use.i64i1({i64, i1} %x)
88
define i1 @umul_greater_than_or_overflow_const(i64 %in) {
99
; CHECK-LABEL: define i1 @umul_greater_than_or_overflow_const(
1010
; CHECK-SAME: i64 [[IN:%.*]]) {
11-
; CHECK-NEXT: [[TMP2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[IN]], i64 168)
12-
; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i64, i1 } [[TMP2]], 0
13-
; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
14-
; CHECK-NEXT: [[TMP5:%.*]] = icmp ugt i64 [[TMP3]], -16
15-
; CHECK-NEXT: [[TMP6:%.*]] = or i1 [[TMP4]], [[TMP5]]
11+
; CHECK-NEXT: [[TMP6:%.*]] = icmp ugt i64 [[IN]], 109802048057794950
1612
; CHECK-NEXT: ret i1 [[TMP6]]
1713
;
1814
%mwo = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %in, i64 168)
@@ -26,11 +22,7 @@ define i1 @umul_greater_than_or_overflow_const(i64 %in) {
2622
define i1 @umul_greater_than_or_overflow_const_i8(i8 %in) {
2723
; CHECK-LABEL: define i1 @umul_greater_than_or_overflow_const_i8(
2824
; CHECK-SAME: i8 [[IN:%.*]]) {
29-
; CHECK-NEXT: [[TMP2:%.*]] = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 [[IN]], i8 24)
30-
; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i8, i1 } [[TMP2]], 0
31-
; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i8, i1 } [[TMP2]], 1
32-
; CHECK-NEXT: [[TMP5:%.*]] = icmp ugt i8 [[TMP3]], -16
33-
; CHECK-NEXT: [[TMP6:%.*]] = or i1 [[TMP4]], [[TMP5]]
25+
; CHECK-NEXT: [[TMP6:%.*]] = icmp ugt i8 [[IN]], 10
3426
; CHECK-NEXT: ret i1 [[TMP6]]
3527
;
3628
%mwo = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 %in, i8 24)
@@ -44,11 +36,7 @@ define i1 @umul_greater_than_or_overflow_const_i8(i8 %in) {
4436
define i1 @umul_greater_than_or_overflow_const_commuted(i64 %in) {
4537
; CHECK-LABEL: define i1 @umul_greater_than_or_overflow_const_commuted(
4638
; CHECK-SAME: i64 [[IN:%.*]]) {
47-
; CHECK-NEXT: [[TMP2:%.*]] = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[IN]], i64 48)
48-
; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i64, i1 } [[TMP2]], 0
49-
; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
50-
; CHECK-NEXT: [[TMP5:%.*]] = icmp ugt i64 [[TMP3]], 9223372036854775800
51-
; CHECK-NEXT: [[TMP6:%.*]] = or i1 [[TMP5]], [[TMP4]]
39+
; CHECK-NEXT: [[TMP6:%.*]] = icmp ugt i64 [[IN]], 192153584101141162
5240
; CHECK-NEXT: ret i1 [[TMP6]]
5341
;
5442
%mwo = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %in, i64 48)
@@ -62,11 +50,7 @@ define i1 @umul_greater_than_or_overflow_const_commuted(i64 %in) {
6250
define i1 @umul_greater_than_or_overflow_const_disjoint(i64 %in) {
6351
; CHECK-LABEL: define i1 @umul_greater_than_or_overflow_const_disjoint(
6452
; CHECK-SAME: i64 [[IN:%.*]]) {
65-
; CHECK-NEXT: [[TMP2:%.*]] = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[IN]], i64 40)
66-
; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i64, i1 } [[TMP2]], 0
67-
; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
68-
; CHECK-NEXT: [[TMP5:%.*]] = icmp ugt i64 [[TMP3]], 9223372036854775800
69-
; CHECK-NEXT: [[TMP6:%.*]] = or disjoint i1 [[TMP4]], [[TMP5]]
53+
; CHECK-NEXT: [[TMP6:%.*]] = icmp ugt i64 [[IN]], 230584300921369395
7054
; CHECK-NEXT: ret i1 [[TMP6]]
7155
;
7256
%mwo = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %in, i64 40)
@@ -80,11 +64,7 @@ define i1 @umul_greater_than_or_overflow_const_disjoint(i64 %in) {
8064
define <2 x i1> @umul_greater_than_or_overflow_const_vector_splat(<2 x i64> %in) {
8165
; CHECK-LABEL: define <2 x i1> @umul_greater_than_or_overflow_const_vector_splat(
8266
; CHECK-SAME: <2 x i64> [[IN:%.*]]) {
83-
; CHECK-NEXT: [[TMP2:%.*]] = tail call { <2 x i64>, <2 x i1> } @llvm.umul.with.overflow.v2i64(<2 x i64> [[IN]], <2 x i64> splat (i64 1424))
84-
; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <2 x i64>, <2 x i1> } [[TMP2]], 0
85-
; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { <2 x i64>, <2 x i1> } [[TMP2]], 1
86-
; CHECK-NEXT: [[TMP5:%.*]] = icmp ugt <2 x i64> [[TMP3]], splat (i64 9223372036854775800)
87-
; CHECK-NEXT: [[TMP6:%.*]] = or <2 x i1> [[TMP4]], [[TMP5]]
67+
; CHECK-NEXT: [[TMP6:%.*]] = icmp ugt <2 x i64> [[IN]], splat (i64 6477087104532848)
8868
; CHECK-NEXT: ret <2 x i1> [[TMP6]]
8969
;
9070
%mwo = tail call { <2 x i64>, <2 x i1> } @llvm.umul.with.overflow.v2i64(<2 x i64> %in, <2 x i64> <i64 1424, i64 1424>)
@@ -98,11 +78,7 @@ define <2 x i1> @umul_greater_than_or_overflow_const_vector_splat(<2 x i64> %in)
9878
define <4 x i1> @umul_greater_than_or_overflow_const_vector_non_splat(<4 x i64> %in) {
9979
; CHECK-LABEL: define <4 x i1> @umul_greater_than_or_overflow_const_vector_non_splat(
10080
; CHECK-SAME: <4 x i64> [[IN:%.*]]) {
101-
; CHECK-NEXT: [[MWO:%.*]] = tail call { <4 x i64>, <4 x i1> } @llvm.umul.with.overflow.v4i64(<4 x i64> [[IN]], <4 x i64> <i64 24, i64 1424, i64 0, i64 -1>)
102-
; CHECK-NEXT: [[MUL:%.*]] = extractvalue { <4 x i64>, <4 x i1> } [[MWO]], 0
103-
; CHECK-NEXT: [[OVF:%.*]] = extractvalue { <4 x i64>, <4 x i1> } [[MWO]], 1
104-
; CHECK-NEXT: [[CMP:%.*]] = icmp ugt <4 x i64> [[MUL]], <i64 9223372036854775000, i64 9223372036854775800, i64 -16, i64 -16>
105-
; CHECK-NEXT: [[RET:%.*]] = or <4 x i1> [[OVF]], [[CMP]]
81+
; CHECK-NEXT: [[RET:%.*]] = icmp ugt <4 x i64> [[IN]], <i64 384307168202282291, i64 6477087104532848, i64 -1, i64 0>
10682
; CHECK-NEXT: ret <4 x i1> [[RET]]
10783
;
10884
%mwo = tail call { <4 x i64>, <4 x i1> } @llvm.umul.with.overflow.v2i64(<4 x i64> %in, <4 x i64> <i64 24, i64 1424, i64 0, i64 -1>)

0 commit comments

Comments
 (0)