|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 4 |
| 2 | +; RUN: opt -S < %s -p loop-vectorize -enable-early-exit-vectorization -force-vector-width=4 | FileCheck %s |
| 3 | + |
| 4 | +define i64 @early_exit_alignment_and_deref_known_via_assumption_with_constant_size(ptr noalias %p1, ptr noalias %p2) nofree nosync { |
| 5 | +; CHECK-LABEL: define i64 @early_exit_alignment_and_deref_known_via_assumption_with_constant_size( |
| 6 | +; CHECK-SAME: ptr noalias [[P1:%.*]], ptr noalias [[P2:%.*]]) #[[ATTR0:[0-9]+]] { |
| 7 | +; CHECK-NEXT: entry: |
| 8 | +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[P1]], i64 4), "dereferenceable"(ptr [[P1]], i64 1024) ] |
| 9 | +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[P2]], i64 4), "dereferenceable"(ptr [[P2]], i64 1024) ] |
| 10 | +; CHECK-NEXT: br label [[LOOP:%.*]] |
| 11 | +; CHECK: loop: |
| 12 | +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 0, [[ENTRY:%.*]] ] |
| 13 | +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]] |
| 14 | +; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 |
| 15 | +; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]] |
| 16 | +; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 |
| 17 | +; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] |
| 18 | +; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END:%.*]] |
| 19 | +; CHECK: loop.inc: |
| 20 | +; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 |
| 21 | +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 1024 |
| 22 | +; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] |
| 23 | +; CHECK: loop.end: |
| 24 | +; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ -1, [[LOOP_INC]] ] |
| 25 | +; CHECK-NEXT: ret i64 [[RETVAL]] |
| 26 | +; |
| 27 | +entry: |
| 28 | + call void @llvm.assume(i1 true) [ "align"(ptr %p1, i64 4), "dereferenceable"(ptr %p1, i64 1024) ] |
| 29 | + call void @llvm.assume(i1 true) [ "align"(ptr %p2, i64 4), "dereferenceable"(ptr %p2, i64 1024) ] |
| 30 | + br label %loop |
| 31 | + |
| 32 | +loop: |
| 33 | + %index = phi i64 [ %index.next, %loop.inc ], [ 0, %entry ] |
| 34 | + %arrayidx = getelementptr inbounds i8, ptr %p1, i64 %index |
| 35 | + %ld1 = load i8, ptr %arrayidx, align 1 |
| 36 | + %arrayidx1 = getelementptr inbounds i8, ptr %p2, i64 %index |
| 37 | + %ld2 = load i8, ptr %arrayidx1, align 1 |
| 38 | + %cmp3 = icmp eq i8 %ld1, %ld2 |
| 39 | + br i1 %cmp3, label %loop.inc, label %loop.end |
| 40 | + |
| 41 | +loop.inc: |
| 42 | + %index.next = add i64 %index, 1 |
| 43 | + %exitcond = icmp ne i64 %index.next, 1024 |
| 44 | + br i1 %exitcond, label %loop, label %loop.end |
| 45 | + |
| 46 | +loop.end: |
| 47 | + %retval = phi i64 [ %index, %loop ], [ -1, %loop.inc ] |
| 48 | + ret i64 %retval |
| 49 | +} |
| 50 | + |
| 51 | +define i64 @early_exit_alignment_and_deref_via_assumption_with_constant_size_too_small(ptr noalias %p1, ptr noalias %p2) nofree nosync { |
| 52 | +; CHECK-LABEL: define i64 @early_exit_alignment_and_deref_via_assumption_with_constant_size_too_small( |
| 53 | +; CHECK-SAME: ptr noalias [[P1:%.*]], ptr noalias [[P2:%.*]]) #[[ATTR0]] { |
| 54 | +; CHECK-NEXT: entry: |
| 55 | +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[P1]], i64 4), "dereferenceable"(ptr [[P1]], i64 1024) ] |
| 56 | +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[P2]], i64 4), "dereferenceable"(ptr [[P2]], i64 1024) ] |
| 57 | +; CHECK-NEXT: br label [[LOOP:%.*]] |
| 58 | +; CHECK: loop: |
| 59 | +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 0, [[ENTRY:%.*]] ] |
| 60 | +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]] |
| 61 | +; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 |
| 62 | +; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]] |
| 63 | +; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 |
| 64 | +; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] |
| 65 | +; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END:%.*]] |
| 66 | +; CHECK: loop.inc: |
| 67 | +; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 |
| 68 | +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 1025 |
| 69 | +; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] |
| 70 | +; CHECK: loop.end: |
| 71 | +; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ -1, [[LOOP_INC]] ] |
| 72 | +; CHECK-NEXT: ret i64 [[RETVAL]] |
| 73 | +; |
| 74 | +entry: |
| 75 | + call void @llvm.assume(i1 true) [ "align"(ptr %p1, i64 4), "dereferenceable"(ptr %p1, i64 1024) ] |
| 76 | + call void @llvm.assume(i1 true) [ "align"(ptr %p2, i64 4), "dereferenceable"(ptr %p2, i64 1024) ] |
| 77 | + br label %loop |
| 78 | + |
| 79 | +loop: |
| 80 | + %index = phi i64 [ %index.next, %loop.inc ], [ 0, %entry ] |
| 81 | + %arrayidx = getelementptr inbounds i8, ptr %p1, i64 %index |
| 82 | + %ld1 = load i8, ptr %arrayidx, align 1 |
| 83 | + %arrayidx1 = getelementptr inbounds i8, ptr %p2, i64 %index |
| 84 | + %ld2 = load i8, ptr %arrayidx1, align 1 |
| 85 | + %cmp3 = icmp eq i8 %ld1, %ld2 |
| 86 | + br i1 %cmp3, label %loop.inc, label %loop.end |
| 87 | + |
| 88 | +loop.inc: |
| 89 | + %index.next = add i64 %index, 1 |
| 90 | + %exitcond = icmp ne i64 %index.next, 1025 |
| 91 | + br i1 %exitcond, label %loop, label %loop.end |
| 92 | + |
| 93 | +loop.end: |
| 94 | + %retval = phi i64 [ %index, %loop ], [ -1, %loop.inc ] |
| 95 | + ret i64 %retval |
| 96 | +} |
0 commit comments