|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 |
| 2 | +; RUN: opt -p loop-vectorize -force-vector-width=16 -force-vector-interleave=2 -mattr=+dotprod -S %s | FileCheck --check-prefix=IC2 %s |
| 3 | +; RUN: opt -p loop-vectorize -force-vector-width=16 -force-vector-interleave=4 -mattr=+dotprod -S %s | FileCheck --check-prefix=IC4 %s |
| 4 | + |
| 5 | +target triple = "arm64-apple-macosx" |
| 6 | + |
| 7 | +define i32 @partial_reduce_with_non_constant_start_value(ptr %src, i32 %rdx.start, i64 %n) { |
| 8 | +; IC2-LABEL: define i32 @partial_reduce_with_non_constant_start_value( |
| 9 | +; IC2-SAME: ptr [[SRC:%.*]], i32 [[RDX_START:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] { |
| 10 | +; IC2-NEXT: [[ENTRY:.*]]: |
| 11 | +; IC2-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 32 |
| 12 | +; IC2-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] |
| 13 | +; IC2: [[VECTOR_PH]]: |
| 14 | +; IC2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 32 |
| 15 | +; IC2-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] |
| 16 | +; IC2-NEXT: [[TMP0:%.*]] = insertelement <4 x i32> zeroinitializer, i32 [[RDX_START]], i32 0 |
| 17 | +; IC2-NEXT: br label %[[VECTOR_BODY:.*]] |
| 18 | +; IC2: [[VECTOR_BODY]]: |
| 19 | +; IC2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] |
| 20 | +; IC2-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ [[TMP0]], %[[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], %[[VECTOR_BODY]] ] |
| 21 | +; IC2-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ [[TMP0]], %[[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], %[[VECTOR_BODY]] ] |
| 22 | +; IC2-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[INDEX]] |
| 23 | +; IC2-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 0 |
| 24 | +; IC2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 16 |
| 25 | +; IC2-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 |
| 26 | +; IC2-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 |
| 27 | +; IC2-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> |
| 28 | +; IC2-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> |
| 29 | +; IC2-NEXT: [[TMP6:%.*]] = mul nuw nsw <16 x i32> [[TMP4]], [[TMP4]] |
| 30 | +; IC2-NEXT: [[TMP7:%.*]] = mul nuw nsw <16 x i32> [[TMP5]], [[TMP5]] |
| 31 | +; IC2-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.experimental.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP6]]) |
| 32 | +; IC2-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.experimental.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP7]]) |
| 33 | +; IC2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 |
| 34 | +; IC2-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] |
| 35 | +; IC2-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] |
| 36 | +; IC2: [[MIDDLE_BLOCK]]: |
| 37 | +; IC2-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[PARTIAL_REDUCE3]], [[PARTIAL_REDUCE]] |
| 38 | +; IC2-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) |
| 39 | +; IC2-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] |
| 40 | +; IC2-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] |
| 41 | +; IC2: [[SCALAR_PH]]: |
| 42 | +; IC2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] |
| 43 | +; IC2-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP9]], %[[MIDDLE_BLOCK]] ], [ [[RDX_START]], %[[ENTRY]] ] |
| 44 | +; IC2-NEXT: br label %[[LOOP:.*]] |
| 45 | +; IC2: [[LOOP]]: |
| 46 | +; IC2-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] |
| 47 | +; IC2-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[RDX_NEXT:%.*]], %[[LOOP]] ] |
| 48 | +; IC2-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[IV]] |
| 49 | +; IC2-NEXT: [[L:%.*]] = load i8, ptr [[GEP_SRC]], align 1 |
| 50 | +; IC2-NEXT: [[CONV:%.*]] = zext i8 [[L]] to i32 |
| 51 | +; IC2-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[CONV]], [[CONV]] |
| 52 | +; IC2-NEXT: [[RDX_NEXT]] = add nsw i32 [[MUL]], [[RDX]] |
| 53 | +; IC2-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 |
| 54 | +; IC2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] |
| 55 | +; IC2-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]] |
| 56 | +; IC2: [[EXIT]]: |
| 57 | +; IC2-NEXT: [[RDX_NEXT_LCSSA:%.*]] = phi i32 [ [[RDX_NEXT]], %[[LOOP]] ], [ [[TMP9]], %[[MIDDLE_BLOCK]] ] |
| 58 | +; IC2-NEXT: ret i32 [[RDX_NEXT_LCSSA]] |
| 59 | +; |
| 60 | +; IC4-LABEL: define i32 @partial_reduce_with_non_constant_start_value( |
| 61 | +; IC4-SAME: ptr [[SRC:%.*]], i32 [[RDX_START:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] { |
| 62 | +; IC4-NEXT: [[ENTRY:.*]]: |
| 63 | +; IC4-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 64 |
| 64 | +; IC4-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] |
| 65 | +; IC4: [[VECTOR_PH]]: |
| 66 | +; IC4-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 64 |
| 67 | +; IC4-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] |
| 68 | +; IC4-NEXT: [[TMP0:%.*]] = insertelement <4 x i32> zeroinitializer, i32 [[RDX_START]], i32 0 |
| 69 | +; IC4-NEXT: br label %[[VECTOR_BODY:.*]] |
| 70 | +; IC4: [[VECTOR_BODY]]: |
| 71 | +; IC4-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] |
| 72 | +; IC4-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ [[TMP0]], %[[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], %[[VECTOR_BODY]] ] |
| 73 | +; IC4-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ [[TMP0]], %[[VECTOR_PH]] ], [ [[PARTIAL_REDUCE7:%.*]], %[[VECTOR_BODY]] ] |
| 74 | +; IC4-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i32> [ [[TMP0]], %[[VECTOR_PH]] ], [ [[PARTIAL_REDUCE8:%.*]], %[[VECTOR_BODY]] ] |
| 75 | +; IC4-NEXT: [[VEC_PHI3:%.*]] = phi <4 x i32> [ [[TMP0]], %[[VECTOR_PH]] ], [ [[PARTIAL_REDUCE9:%.*]], %[[VECTOR_BODY]] ] |
| 76 | +; IC4-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[INDEX]] |
| 77 | +; IC4-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 0 |
| 78 | +; IC4-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 16 |
| 79 | +; IC4-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 32 |
| 80 | +; IC4-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 48 |
| 81 | +; IC4-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 |
| 82 | +; IC4-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 |
| 83 | +; IC4-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i8>, ptr [[TMP4]], align 1 |
| 84 | +; IC4-NEXT: [[WIDE_LOAD6:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1 |
| 85 | +; IC4-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> |
| 86 | +; IC4-NEXT: [[TMP7:%.*]] = zext <16 x i8> [[WIDE_LOAD4]] to <16 x i32> |
| 87 | +; IC4-NEXT: [[TMP8:%.*]] = zext <16 x i8> [[WIDE_LOAD5]] to <16 x i32> |
| 88 | +; IC4-NEXT: [[TMP9:%.*]] = zext <16 x i8> [[WIDE_LOAD6]] to <16 x i32> |
| 89 | +; IC4-NEXT: [[TMP10:%.*]] = mul nuw nsw <16 x i32> [[TMP6]], [[TMP6]] |
| 90 | +; IC4-NEXT: [[TMP11:%.*]] = mul nuw nsw <16 x i32> [[TMP7]], [[TMP7]] |
| 91 | +; IC4-NEXT: [[TMP12:%.*]] = mul nuw nsw <16 x i32> [[TMP8]], [[TMP8]] |
| 92 | +; IC4-NEXT: [[TMP13:%.*]] = mul nuw nsw <16 x i32> [[TMP9]], [[TMP9]] |
| 93 | +; IC4-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.experimental.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP10]]) |
| 94 | +; IC4-NEXT: [[PARTIAL_REDUCE7]] = call <4 x i32> @llvm.experimental.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP11]]) |
| 95 | +; IC4-NEXT: [[PARTIAL_REDUCE8]] = call <4 x i32> @llvm.experimental.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP12]]) |
| 96 | +; IC4-NEXT: [[PARTIAL_REDUCE9]] = call <4 x i32> @llvm.experimental.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI3]], <16 x i32> [[TMP13]]) |
| 97 | +; IC4-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 64 |
| 98 | +; IC4-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] |
| 99 | +; IC4-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] |
| 100 | +; IC4: [[MIDDLE_BLOCK]]: |
| 101 | +; IC4-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[PARTIAL_REDUCE7]], [[PARTIAL_REDUCE]] |
| 102 | +; IC4-NEXT: [[BIN_RDX10:%.*]] = add <4 x i32> [[PARTIAL_REDUCE8]], [[BIN_RDX]] |
| 103 | +; IC4-NEXT: [[BIN_RDX11:%.*]] = add <4 x i32> [[PARTIAL_REDUCE9]], [[BIN_RDX10]] |
| 104 | +; IC4-NEXT: [[TMP15:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX11]]) |
| 105 | +; IC4-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] |
| 106 | +; IC4-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] |
| 107 | +; IC4: [[SCALAR_PH]]: |
| 108 | +; IC4-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] |
| 109 | +; IC4-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP15]], %[[MIDDLE_BLOCK]] ], [ [[RDX_START]], %[[ENTRY]] ] |
| 110 | +; IC4-NEXT: br label %[[LOOP:.*]] |
| 111 | +; IC4: [[LOOP]]: |
| 112 | +; IC4-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] |
| 113 | +; IC4-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[RDX_NEXT:%.*]], %[[LOOP]] ] |
| 114 | +; IC4-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[IV]] |
| 115 | +; IC4-NEXT: [[L:%.*]] = load i8, ptr [[GEP_SRC]], align 1 |
| 116 | +; IC4-NEXT: [[CONV:%.*]] = zext i8 [[L]] to i32 |
| 117 | +; IC4-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[CONV]], [[CONV]] |
| 118 | +; IC4-NEXT: [[RDX_NEXT]] = add nsw i32 [[MUL]], [[RDX]] |
| 119 | +; IC4-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 |
| 120 | +; IC4-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] |
| 121 | +; IC4-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]] |
| 122 | +; IC4: [[EXIT]]: |
| 123 | +; IC4-NEXT: [[RDX_NEXT_LCSSA:%.*]] = phi i32 [ [[RDX_NEXT]], %[[LOOP]] ], [ [[TMP15]], %[[MIDDLE_BLOCK]] ] |
| 124 | +; IC4-NEXT: ret i32 [[RDX_NEXT_LCSSA]] |
| 125 | +; |
| 126 | +entry: |
| 127 | + br label %loop |
| 128 | + |
| 129 | +loop: |
| 130 | + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] |
| 131 | + %rdx = phi i32 [ %rdx.start, %entry ], [ %rdx.next, %loop ] |
| 132 | + %gep.src = getelementptr inbounds i8, ptr %src, i64 %iv |
| 133 | + %l = load i8, ptr %gep.src, align 1 |
| 134 | + %conv = zext i8 %l to i32 |
| 135 | + %mul = mul nuw nsw i32 %conv, %conv |
| 136 | + %rdx.next = add nsw i32 %mul, %rdx |
| 137 | + %iv.next = add nsw i64 %iv, 1 |
| 138 | + %ec = icmp eq i64 %iv.next, %n |
| 139 | + br i1 %ec, label %exit, label %loop |
| 140 | + |
| 141 | +exit: |
| 142 | + ret i32 %rdx.next |
| 143 | +} |
| 144 | +;. |
| 145 | +; IC2: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} |
| 146 | +; IC2: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} |
| 147 | +; IC2: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} |
| 148 | +; IC2: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]} |
| 149 | +;. |
| 150 | +; IC4: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} |
| 151 | +; IC4: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} |
| 152 | +; IC4: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} |
| 153 | +; IC4: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]} |
| 154 | +;. |
0 commit comments