|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 |
| 2 | +; RUN: llc -mtriple=aarch64 < %s | FileCheck %s |
| 3 | + |
| 4 | +define i32 @lower_lshr(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d, <4 x i32> %e, <4 x i32> %f, <4 x i32> %g, <4 x i32> %h) { |
| 5 | +; CHECK-LABEL: lower_lshr: |
| 6 | +; CHECK: // %bb.0: |
| 7 | +; CHECK-NEXT: addv s0, v0.4s |
| 8 | +; CHECK-NEXT: addv s1, v1.4s |
| 9 | +; CHECK-NEXT: addv s4, v4.4s |
| 10 | +; CHECK-NEXT: addv s5, v5.4s |
| 11 | +; CHECK-NEXT: addv s2, v2.4s |
| 12 | +; CHECK-NEXT: addv s6, v6.4s |
| 13 | +; CHECK-NEXT: mov v0.s[1], v1.s[0] |
| 14 | +; CHECK-NEXT: addv s1, v3.4s |
| 15 | +; CHECK-NEXT: addv s3, v7.4s |
| 16 | +; CHECK-NEXT: mov v4.s[1], v5.s[0] |
| 17 | +; CHECK-NEXT: mov v0.s[2], v2.s[0] |
| 18 | +; CHECK-NEXT: mov v4.s[2], v6.s[0] |
| 19 | +; CHECK-NEXT: mov v0.s[3], v1.s[0] |
| 20 | +; CHECK-NEXT: mov v4.s[3], v3.s[0] |
| 21 | +; CHECK-NEXT: xtn v2.4h, v0.4s |
| 22 | +; CHECK-NEXT: shrn v0.4h, v0.4s, #16 |
| 23 | +; CHECK-NEXT: xtn v1.4h, v4.4s |
| 24 | +; CHECK-NEXT: shrn v3.4h, v4.4s, #16 |
| 25 | +; CHECK-NEXT: uhadd v0.4h, v2.4h, v0.4h |
| 26 | +; CHECK-NEXT: uhadd v1.4h, v1.4h, v3.4h |
| 27 | +; CHECK-NEXT: uaddl v0.4s, v0.4h, v1.4h |
| 28 | +; CHECK-NEXT: addv s0, v0.4s |
| 29 | +; CHECK-NEXT: fmov w0, s0 |
| 30 | +; CHECK-NEXT: ret |
| 31 | + %l87 = tail call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %a) |
| 32 | + %l174 = tail call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %b) |
| 33 | + %l257 = tail call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %c) |
| 34 | + %l340 = tail call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %d) |
| 35 | + %l427 = tail call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %e) |
| 36 | + %l514 = tail call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %f) |
| 37 | + %l597 = tail call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %g) |
| 38 | + %l680 = tail call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %h) |
| 39 | + %l681 = insertelement <8 x i32> poison, i32 %l87, i32 0 |
| 40 | + %l682 = insertelement <8 x i32> %l681, i32 %l174, i32 1 |
| 41 | + %l683 = insertelement <8 x i32> %l682, i32 %l257, i32 2 |
| 42 | + %l684 = insertelement <8 x i32> %l683, i32 %l340, i32 3 |
| 43 | + %l685 = insertelement <8 x i32> %l684, i32 %l427, i32 4 |
| 44 | + %l686 = insertelement <8 x i32> %l685, i32 %l514, i32 5 |
| 45 | + %l687 = insertelement <8 x i32> %l686, i32 %l597, i32 6 |
| 46 | + %l688 = insertelement <8 x i32> %l687, i32 %l680, i32 7 |
| 47 | + %l689 = and <8 x i32> %l688, <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535> |
| 48 | + %l690 = lshr <8 x i32> %l688, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16> |
| 49 | + %l691 = add nuw nsw <8 x i32> %l689, %l690 |
| 50 | + %l692 = lshr <8 x i32> %l691, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> |
| 51 | + %l693 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %l692) |
| 52 | + ret i32 %l693 |
| 53 | +} |
| 54 | +declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>) |
| 55 | +declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32>) |
| 56 | + |
| 57 | +define <16 x i8> @lower_trunc_16xi8(i16 %a, i16 %b, i16 %c, i16 %d, i16 %e, i16 %f, i16 %g, i16 %h, i16 %i, i16 %j, i16 %k, i16 %l, i16 %m, i16 %n, i16 %o, i16 %p) { |
| 58 | +; CHECK-LABEL: lower_trunc_16xi8: |
| 59 | +; CHECK: // %bb.0: |
| 60 | +; CHECK-NEXT: fmov s0, w0 |
| 61 | +; CHECK-NEXT: add x8, sp, #56 |
| 62 | +; CHECK-NEXT: ld1r { v1.8h }, [x8] |
| 63 | +; CHECK-NEXT: mov v0.h[1], w1 |
| 64 | +; CHECK-NEXT: add v3.8h, v1.8h, v1.8h |
| 65 | +; CHECK-NEXT: mov v0.h[2], w2 |
| 66 | +; CHECK-NEXT: mov v0.h[3], w3 |
| 67 | +; CHECK-NEXT: mov v0.h[4], w4 |
| 68 | +; CHECK-NEXT: mov v0.h[5], w5 |
| 69 | +; CHECK-NEXT: mov v0.h[6], w6 |
| 70 | +; CHECK-NEXT: add v2.8h, v0.8h, v0.8h |
| 71 | +; CHECK-NEXT: uzp1 v0.16b, v0.16b, v1.16b |
| 72 | +; CHECK-NEXT: uzp1 v1.16b, v2.16b, v3.16b |
| 73 | +; CHECK-NEXT: eor v0.16b, v0.16b, v1.16b |
| 74 | +; CHECK-NEXT: ret |
| 75 | + %a1 = insertelement <16 x i16> poison, i16 %a, i16 0 |
| 76 | + %b1 = insertelement <16 x i16> %a1, i16 %b, i16 1 |
| 77 | + %c1 = insertelement <16 x i16> %b1, i16 %c, i16 2 |
| 78 | + %d1 = insertelement <16 x i16> %c1, i16 %d, i16 3 |
| 79 | + %e1 = insertelement <16 x i16> %d1, i16 %e, i16 4 |
| 80 | + %f1 = insertelement <16 x i16> %e1, i16 %f, i16 5 |
| 81 | + %g1 = insertelement <16 x i16> %f1, i16 %g, i16 6 |
| 82 | + %h1 = insertelement <16 x i16> %g1, i16 %h, i16 7 |
| 83 | + %i1 = insertelement <16 x i16> %f1, i16 %i, i16 8 |
| 84 | + %j1 = insertelement <16 x i16> %g1, i16 %j, i16 9 |
| 85 | + %k1 = insertelement <16 x i16> %f1, i16 %k, i16 10 |
| 86 | + %l1 = insertelement <16 x i16> %g1, i16 %l, i16 11 |
| 87 | + %m1 = insertelement <16 x i16> %f1, i16 %m, i16 12 |
| 88 | + %n1 = insertelement <16 x i16> %g1, i16 %n, i16 13 |
| 89 | + %o1 = insertelement <16 x i16> %f1, i16 %o, i16 14 |
| 90 | + %p1 = insertelement <16 x i16> %g1, i16 %p, i16 15 |
| 91 | + %t = trunc <16 x i16> %p1 to <16 x i8> |
| 92 | + %s = add <16 x i16> %p1, %p1 |
| 93 | + %t2 = trunc <16 x i16> %s to <16 x i8> |
| 94 | + %pro = xor <16 x i8> %t, %t2 |
| 95 | + ret <16 x i8> %pro |
| 96 | +} |
| 97 | + |
| 98 | +define <8 x i16> @lower_trunc_8xi16(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h) { |
| 99 | +; CHECK-LABEL: lower_trunc_8xi16: |
| 100 | +; CHECK: // %bb.0: |
| 101 | +; CHECK-NEXT: fmov s0, w4 |
| 102 | +; CHECK-NEXT: fmov s1, w0 |
| 103 | +; CHECK-NEXT: mov v0.s[1], w5 |
| 104 | +; CHECK-NEXT: mov v1.s[1], w1 |
| 105 | +; CHECK-NEXT: mov v0.s[2], w6 |
| 106 | +; CHECK-NEXT: mov v1.s[2], w2 |
| 107 | +; CHECK-NEXT: mov v0.s[3], w7 |
| 108 | +; CHECK-NEXT: mov v1.s[3], w3 |
| 109 | +; CHECK-NEXT: add v2.4s, v0.4s, v0.4s |
| 110 | +; CHECK-NEXT: add v3.4s, v1.4s, v1.4s |
| 111 | +; CHECK-NEXT: uzp1 v0.8h, v1.8h, v0.8h |
| 112 | +; CHECK-NEXT: uzp1 v1.8h, v3.8h, v2.8h |
| 113 | +; CHECK-NEXT: eor v0.16b, v0.16b, v1.16b |
| 114 | +; CHECK-NEXT: ret |
| 115 | + %a1 = insertelement <8 x i32> poison, i32 %a, i32 0 |
| 116 | + %b1 = insertelement <8 x i32> %a1, i32 %b, i32 1 |
| 117 | + %c1 = insertelement <8 x i32> %b1, i32 %c, i32 2 |
| 118 | + %d1 = insertelement <8 x i32> %c1, i32 %d, i32 3 |
| 119 | + %e1 = insertelement <8 x i32> %d1, i32 %e, i32 4 |
| 120 | + %f1 = insertelement <8 x i32> %e1, i32 %f, i32 5 |
| 121 | + %g1 = insertelement <8 x i32> %f1, i32 %g, i32 6 |
| 122 | + %h1 = insertelement <8 x i32> %g1, i32 %h, i32 7 |
| 123 | + %t = trunc <8 x i32> %h1 to <8 x i16> |
| 124 | + %s = add <8 x i32> %h1, %h1 |
| 125 | + %t2 = trunc <8 x i32> %s to <8 x i16> |
| 126 | + %o = xor <8 x i16> %t, %t2 |
| 127 | + ret <8 x i16> %o |
| 128 | +} |
| 129 | + |
| 130 | +define <4 x i32> @lower_trunc_4xi32(i64 %a, i64 %b, i64 %c, i64 %d) { |
| 131 | +; CHECK-LABEL: lower_trunc_4xi32: |
| 132 | +; CHECK: // %bb.0: |
| 133 | +; CHECK-NEXT: fmov d0, x2 |
| 134 | +; CHECK-NEXT: fmov d1, x0 |
| 135 | +; CHECK-NEXT: mov v0.d[1], x3 |
| 136 | +; CHECK-NEXT: mov v1.d[1], x1 |
| 137 | +; CHECK-NEXT: add v2.2d, v0.2d, v0.2d |
| 138 | +; CHECK-NEXT: add v3.2d, v1.2d, v1.2d |
| 139 | +; CHECK-NEXT: uzp1 v0.4s, v1.4s, v0.4s |
| 140 | +; CHECK-NEXT: uzp1 v1.4s, v3.4s, v2.4s |
| 141 | +; CHECK-NEXT: eor v0.16b, v0.16b, v1.16b |
| 142 | +; CHECK-NEXT: ret |
| 143 | + %a1 = insertelement <4 x i64> poison, i64 %a, i64 0 |
| 144 | + %b1 = insertelement <4 x i64> %a1, i64 %b, i64 1 |
| 145 | + %c1 = insertelement <4 x i64> %b1, i64 %c, i64 2 |
| 146 | + %d1 = insertelement <4 x i64> %c1, i64 %d, i64 3 |
| 147 | + %t = trunc <4 x i64> %d1 to <4 x i32> |
| 148 | + %s = add <4 x i64> %d1, %d1 |
| 149 | + %t2 = trunc <4 x i64> %s to <4 x i32> |
| 150 | + %o = xor <4 x i32> %t, %t2 |
| 151 | + ret <4 x i32> %o |
| 152 | +} |
| 153 | + |
| 154 | +define <8 x i32> @lower_trunc_8xi32(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i64 %f, i64 %g, i64 %h) { |
| 155 | +; CHECK-LABEL: lower_trunc_8xi32: |
| 156 | +; CHECK: // %bb.0: |
| 157 | +; CHECK-NEXT: fmov d0, x2 |
| 158 | +; CHECK-NEXT: fmov d1, x0 |
| 159 | +; CHECK-NEXT: fmov d2, x6 |
| 160 | +; CHECK-NEXT: fmov d3, x4 |
| 161 | +; CHECK-NEXT: mov v0.d[1], x3 |
| 162 | +; CHECK-NEXT: mov v1.d[1], x1 |
| 163 | +; CHECK-NEXT: mov v2.d[1], x7 |
| 164 | +; CHECK-NEXT: mov v3.d[1], x5 |
| 165 | +; CHECK-NEXT: add v4.2d, v0.2d, v0.2d |
| 166 | +; CHECK-NEXT: add v5.2d, v1.2d, v1.2d |
| 167 | +; CHECK-NEXT: add v6.2d, v2.2d, v2.2d |
| 168 | +; CHECK-NEXT: add v7.2d, v3.2d, v3.2d |
| 169 | +; CHECK-NEXT: uzp1 v2.4s, v3.4s, v2.4s |
| 170 | +; CHECK-NEXT: uzp1 v0.4s, v1.4s, v0.4s |
| 171 | +; CHECK-NEXT: uzp1 v3.4s, v5.4s, v4.4s |
| 172 | +; CHECK-NEXT: uzp1 v1.4s, v7.4s, v6.4s |
| 173 | +; CHECK-NEXT: eor v0.16b, v0.16b, v3.16b |
| 174 | +; CHECK-NEXT: eor v1.16b, v2.16b, v1.16b |
| 175 | +; CHECK-NEXT: ret |
| 176 | + %a1 = insertelement <8 x i64> poison, i64 %a, i64 0 |
| 177 | + %b1 = insertelement <8 x i64> %a1, i64 %b, i64 1 |
| 178 | + %c1 = insertelement <8 x i64> %b1, i64 %c, i64 2 |
| 179 | + %d1 = insertelement <8 x i64> %c1, i64 %d, i64 3 |
| 180 | + %e1 = insertelement <8 x i64> %d1, i64 %e, i64 4 |
| 181 | + %f1 = insertelement <8 x i64> %e1, i64 %f, i64 5 |
| 182 | + %g1 = insertelement <8 x i64> %f1, i64 %g, i64 6 |
| 183 | + %h1 = insertelement <8 x i64> %g1, i64 %h, i64 7 |
| 184 | + %t = trunc <8 x i64> %h1 to <8 x i32> |
| 185 | + %s = add <8 x i64> %h1, %h1 |
| 186 | + %t2 = trunc <8 x i64> %s to <8 x i32> |
| 187 | + %o = xor <8 x i32> %t, %t2 |
| 188 | + ret <8 x i32> %o |
| 189 | +} |
0 commit comments