|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 |
1 | 2 | ; RUN: opt -passes=loop-vectorize -force-vector-width=4 -enable-vplan-native-path -S %s | FileCheck %s
|
2 | 3 |
|
3 | 4 | ; Test that VPlan native path is able to widen call intructions like
|
4 | 5 | ; llvm.sqrt.* intrincis calls.
|
5 | 6 |
|
6 | 7 | declare double @llvm.sqrt.f64(double %0)
|
7 | 8 | define void @widen_call_instruction(ptr noalias nocapture readonly %a.in, ptr noalias nocapture readonly %b.in, ptr noalias nocapture %c.out) {
|
8 |
| -; CHECK-LABEL: @widen_call_instruction( |
9 |
| - |
10 |
| -; CHECK: vector.body: |
11 |
| -; CHECK-NEXT: %[[FOR1_INDEX:.*]] = phi i64 [ 0, %[[LABEL_PR:.*]] ], [ %{{.*}}, %[[LABEL_FOR1_LATCH:.*]] ] |
12 |
| -; CHECK: %[[VEC_INDEX:.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[LABEL_PR]] ], [ %{{.*}}, %[[LABEL_FOR1_LATCH]] ] |
13 |
| -; CHECK-NEXT: %[[A_PTR:.*]] = getelementptr inbounds double, ptr %a.in, <4 x i64> %[[VEC_INDEX]] |
14 |
| -; CHECK-NEXT: %[[MASKED_GATHER1:.*]] = call <4 x double> @llvm.masked.gather.v4f64.v4p0(<4 x ptr> %[[A_PTR]], i32 8, <4 x i1> splat (i1 true), <4 x double> poison) |
15 |
| -; CHECK-NEXT: %[[B_PTR:.*]] = getelementptr inbounds double, ptr %b.in, <4 x i64> %[[VEC_INDEX]] |
16 |
| -; CHECK-NEXT: %[[MASKED_GATHER2:.*]] = call <4 x double> @llvm.masked.gather.v4f64.v4p0(<4 x ptr> %[[B_PTR]], i32 8, <4 x i1> splat (i1 true), <4 x double> poison) |
17 |
| -; CHECK-NEXT: %[[B_SQRT:.*]] = call <4 x double> @llvm.sqrt.v4f64(<4 x double> %[[MASKED_GATHER2]]) |
18 |
| -; CHECK-NEXT: br label %[[FOR2_HEADER:.*]] |
19 |
| - |
20 |
| -; CHECK: [[FOR2_HEADER]]: |
21 |
| -; CHECK-NEXT: %[[FOR2_INDEX:.*]] = phi <4 x i32> [ zeroinitializer, %vector.body ], [ %[[FOR2_INDEX_NEXT:.*]], %[[FOR2_HEADER]] ] |
22 |
| -; CHECK-NEXT: %[[REDUCTION:.*]] = phi <4 x double> [ %[[MASKED_GATHER1]], %vector.body ], [ %[[REDUCTION_NEXT:.*]], %[[FOR2_HEADER]] ] |
23 |
| -; CHECK-NEXT: %[[REDUCTION_NEXT]] = fadd <4 x double> %[[B_SQRT]], %[[REDUCTION]] |
24 |
| -; CHECK-NEXT: %[[FOR2_INDEX_NEXT]] = add nuw nsw <4 x i32> %[[FOR2_INDEX]], splat (i32 1) |
25 |
| -; CHECK-NEXT: %[[VEC_PTR:.*]] = icmp eq <4 x i32> %[[FOR2_INDEX_NEXT]], splat (i32 10000) |
26 |
| -; CHECK-NEXT: %[[EXIT_COND:.*]] = extractelement <4 x i1> %[[VEC_PTR]], i32 0 |
27 |
| -; CHECK-NEXT: br i1 %[[EXIT_COND]], label %[[FOR1_LATCH:.*]], label %{{.*}} |
28 |
| - |
29 |
| -; CHECK: [[FOR1_LATCH]]: |
30 |
| -; CHECK-NEXT: %[[REDUCTION:.*]] = phi <4 x double> [ %[[REDUCTION_NEXT]], %[[FOR2_HEADER]] ] |
31 |
| -; CHECK-NEXT: %[[C_PTR:.*]] = getelementptr inbounds double, ptr %c.out, <4 x i64> %[[VEC_INDEX]] |
32 |
| -; CHECK-NEXT: call void @llvm.masked.scatter.v4f64.v4p0(<4 x double> %[[REDUCTION]], <4 x ptr> %[[C_PTR]], i32 8, <4 x i1> splat (i1 true)) |
33 |
| -; CHECK-NEXT: %[[FOR1_INDEX_NEXT:.*]] = add nuw i64 %[[FOR1_INDEX]], 4 |
34 |
| -; CHECK-NEXT: %{{.*}} = add <4 x i64> %[[VEC_INDEX]], splat (i64 4) |
35 |
| -; CHECK-NEXT: %[[EXIT_COND:.*]] = icmp eq i64 %[[FOR1_INDEX_NEXT]], 1000 |
36 |
| -; CHECK-NEXT: br i1 %[[EXIT_COND]], label %{{.*}}, label %vector.body |
37 |
| - |
| 9 | +; CHECK-LABEL: define void @widen_call_instruction( |
| 10 | +; CHECK-SAME: ptr noalias readonly captures(none) [[A_IN:%.*]], ptr noalias readonly captures(none) [[B_IN:%.*]], ptr noalias captures(none) [[C_OUT:%.*]]) { |
| 11 | +; CHECK-NEXT: [[ENTRY:.*]]: |
| 12 | +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] |
| 13 | +; CHECK: [[VECTOR_PH]]: |
| 14 | +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] |
| 15 | +; CHECK: [[VECTOR_BODY]]: |
| 16 | +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_LATCH:.*]] ] |
| 17 | +; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_LATCH]] ] |
| 18 | +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds double, ptr [[A_IN]], <4 x i64> [[VEC_IND]] |
| 19 | +; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <4 x double> @llvm.masked.gather.v4f64.v4p0(<4 x ptr> [[TMP0]], i32 8, <4 x i1> splat (i1 true), <4 x double> poison) |
| 20 | +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds double, ptr [[B_IN]], <4 x i64> [[VEC_IND]] |
| 21 | +; CHECK-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <4 x double> @llvm.masked.gather.v4f64.v4p0(<4 x ptr> [[TMP1]], i32 8, <4 x i1> splat (i1 true), <4 x double> poison) |
| 22 | +; CHECK-NEXT: [[TMP2:%.*]] = call <4 x double> @llvm.sqrt.v4f64(<4 x double> [[WIDE_MASKED_GATHER1]]) |
| 23 | +; CHECK-NEXT: br label %[[FOR2_HEADER2:.*]] |
| 24 | +; CHECK: [[FOR2_HEADER2]]: |
| 25 | +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_BODY]] ], [ [[TMP4:%.*]], %[[FOR2_HEADER2]] ] |
| 26 | +; CHECK-NEXT: [[VEC_PHI3:%.*]] = phi <4 x double> [ [[WIDE_MASKED_GATHER]], %[[VECTOR_BODY]] ], [ [[TMP3:%.*]], %[[FOR2_HEADER2]] ] |
| 27 | +; CHECK-NEXT: [[TMP3]] = fadd <4 x double> [[TMP2]], [[VEC_PHI3]] |
| 28 | +; CHECK-NEXT: [[TMP4]] = add nuw nsw <4 x i32> [[VEC_PHI]], splat (i32 1) |
| 29 | +; CHECK-NEXT: [[TMP5:%.*]] = icmp eq <4 x i32> [[TMP4]], splat (i32 10000) |
| 30 | +; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x i1> [[TMP5]], i32 0 |
| 31 | +; CHECK-NEXT: br i1 [[TMP6]], label %[[VECTOR_LATCH]], label %[[FOR2_HEADER2]] |
| 32 | +; CHECK: [[VECTOR_LATCH]]: |
| 33 | +; CHECK-NEXT: [[VEC_PHI4:%.*]] = phi <4 x double> [ [[TMP3]], %[[FOR2_HEADER2]] ] |
| 34 | +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds double, ptr [[C_OUT]], <4 x i64> [[VEC_IND]] |
| 35 | +; CHECK-NEXT: call void @llvm.masked.scatter.v4f64.v4p0(<4 x double> [[VEC_PHI4]], <4 x ptr> [[TMP7]], i32 8, <4 x i1> splat (i1 true)) |
| 36 | +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 |
| 37 | +; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4) |
| 38 | +; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 |
| 39 | +; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] |
| 40 | +; CHECK: [[MIDDLE_BLOCK]]: |
| 41 | +; CHECK-NEXT: br i1 true, label %[[EXIT:.*]], label %[[SCALAR_PH]] |
| 42 | +; CHECK: [[SCALAR_PH]]: |
| 43 | +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1000, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] |
| 44 | +; CHECK-NEXT: br label %[[FOR1_HEADER:.*]] |
| 45 | +; CHECK: [[FOR1_HEADER]]: |
| 46 | +; CHECK-NEXT: [[INDVAR1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVAR11:%.*]], %[[FOR1_LATCH:.*]] ] |
| 47 | +; CHECK-NEXT: [[A_PTR:%.*]] = getelementptr inbounds double, ptr [[A_IN]], i64 [[INDVAR1]] |
| 48 | +; CHECK-NEXT: [[A:%.*]] = load double, ptr [[A_PTR]], align 8 |
| 49 | +; CHECK-NEXT: [[B_PTR:%.*]] = getelementptr inbounds double, ptr [[B_IN]], i64 [[INDVAR1]] |
| 50 | +; CHECK-NEXT: [[B:%.*]] = load double, ptr [[B_PTR]], align 8 |
| 51 | +; CHECK-NEXT: [[B_SQRT:%.*]] = call double @llvm.sqrt.f64(double [[B]]) |
| 52 | +; CHECK-NEXT: br label %[[FOR2_HEADER:.*]] |
| 53 | +; CHECK: [[FOR2_HEADER]]: |
| 54 | +; CHECK-NEXT: [[INDVAR2:%.*]] = phi i32 [ 0, %[[FOR1_HEADER]] ], [ [[INDVAR21:%.*]], %[[FOR2_HEADER]] ] |
| 55 | +; CHECK-NEXT: [[A_REDUCTION:%.*]] = phi double [ [[A]], %[[FOR1_HEADER]] ], [ [[A_REDUCTION1:%.*]], %[[FOR2_HEADER]] ] |
| 56 | +; CHECK-NEXT: [[A_REDUCTION1]] = fadd double [[B_SQRT]], [[A_REDUCTION]] |
| 57 | +; CHECK-NEXT: [[INDVAR21]] = add nuw nsw i32 [[INDVAR2]], 1 |
| 58 | +; CHECK-NEXT: [[FOR2_COND:%.*]] = icmp eq i32 [[INDVAR21]], 10000 |
| 59 | +; CHECK-NEXT: br i1 [[FOR2_COND]], label %[[FOR1_LATCH]], label %[[FOR2_HEADER]] |
| 60 | +; CHECK: [[FOR1_LATCH]]: |
| 61 | +; CHECK-NEXT: [[A_REDUCTION1_LCSSA:%.*]] = phi double [ [[A_REDUCTION1]], %[[FOR2_HEADER]] ] |
| 62 | +; CHECK-NEXT: [[C_PTR:%.*]] = getelementptr inbounds double, ptr [[C_OUT]], i64 [[INDVAR1]] |
| 63 | +; CHECK-NEXT: store double [[A_REDUCTION1_LCSSA]], ptr [[C_PTR]], align 8 |
| 64 | +; CHECK-NEXT: [[INDVAR11]] = add nuw nsw i64 [[INDVAR1]], 1 |
| 65 | +; CHECK-NEXT: [[FOR1_COND:%.*]] = icmp eq i64 [[INDVAR11]], 1000 |
| 66 | +; CHECK-NEXT: br i1 [[FOR1_COND]], label %[[EXIT]], label %[[FOR1_HEADER]], !llvm.loop [[LOOP3:![0-9]+]] |
| 67 | +; CHECK: [[EXIT]]: |
| 68 | +; CHECK-NEXT: ret void |
| 69 | +; |
38 | 70 | entry:
|
39 | 71 | br label %for1.header
|
40 | 72 |
|
|
66 | 98 | ret void
|
67 | 99 | }
|
68 | 100 |
|
| 101 | +; Check we do not try to widen non-intrinsic calls, |
| 102 | +; https://github.com/llvm/llvm-project/issues/131071. |
| 103 | +define void @call_to_non_intrinsic() { |
| 104 | +; CHECK-LABEL: define void @call_to_non_intrinsic() { |
| 105 | +; CHECK-NEXT: [[ENTRY:.*]]: |
| 106 | +; CHECK-NEXT: br label %[[OUTER_HEADER:.*]] |
| 107 | +; CHECK: [[OUTER_HEADER]]: |
| 108 | +; CHECK-NEXT: [[OUTER_IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[OUTER_IV_NEXT:%.*]], %[[OUTER_LATCH:.*]] ] |
| 109 | +; CHECK-NEXT: br label %[[INNER_HEADER:.*]] |
| 110 | +; CHECK: [[INNER_HEADER]]: |
| 111 | +; CHECK-NEXT: [[INNER_IV:%.*]] = phi i64 [ 0, %[[OUTER_HEADER]] ], [ [[INNER_IV_NEXT:%.*]], %[[INNER_HEADER]] ] |
| 112 | +; CHECK-NEXT: call void @use() |
| 113 | +; CHECK-NEXT: [[INNER_IV_NEXT]] = add i64 [[INNER_IV]], 1 |
| 114 | +; CHECK-NEXT: [[INNER_EC:%.*]] = icmp eq i64 [[INNER_IV_NEXT]], 100 |
| 115 | +; CHECK-NEXT: br i1 [[INNER_EC]], label %[[OUTER_LATCH]], label %[[INNER_HEADER]] |
| 116 | +; CHECK: [[OUTER_LATCH]]: |
| 117 | +; CHECK-NEXT: [[OUTER_IV_NEXT]] = add i64 [[OUTER_IV]], 1 |
| 118 | +; CHECK-NEXT: [[OUTER_EC:%.*]] = icmp eq i64 [[OUTER_IV_NEXT]], 100 |
| 119 | +; CHECK-NEXT: br i1 [[OUTER_EC]], label %[[EXIT:.*]], label %[[OUTER_HEADER]], !llvm.loop [[LOOP4:![0-9]+]] |
| 120 | +; CHECK: [[EXIT]]: |
| 121 | +; CHECK-NEXT: ret void |
| 122 | +; |
| 123 | +entry: |
| 124 | + br label %outer.header |
| 125 | + |
| 126 | +outer.header: |
| 127 | + %outer.iv = phi i64 [ 0, %entry ], [ %outer.iv.next, %outer.latch ] |
| 128 | + br label %inner.header |
| 129 | + |
| 130 | +inner.header: |
| 131 | + %inner.iv = phi i64 [ 0, %outer.header ], [ %inner.iv.next, %inner.header ] |
| 132 | + call void @use() |
| 133 | + %inner.iv.next = add i64 %inner.iv, 1 |
| 134 | + %inner.ec = icmp eq i64 %inner.iv.next, 100 |
| 135 | + br i1 %inner.ec, label %outer.latch, label %inner.header |
| 136 | + |
| 137 | +outer.latch: |
| 138 | + %outer.iv.next = add i64 %outer.iv, 1 |
| 139 | + %outer.ec = icmp eq i64 %outer.iv.next, 100 |
| 140 | + br i1 %outer.ec, label %exit, label %outer.header, !llvm.loop !0 |
| 141 | + |
| 142 | +exit: |
| 143 | + ret void |
| 144 | +} |
| 145 | + |
| 146 | +declare void @use() |
| 147 | + |
69 | 148 | !0 = distinct !{!0, !1}
|
70 | 149 | !1 = !{!"llvm.loop.vectorize.enable", i1 true}
|
| 150 | +;. |
| 151 | +; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} |
| 152 | +; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} |
| 153 | +; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} |
| 154 | +; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]} |
| 155 | +; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META5:![0-9]+]]} |
| 156 | +; CHECK: [[META5]] = !{!"llvm.loop.vectorize.enable", i1 true} |
| 157 | +;. |
0 commit comments