|
| 1 | +/*************************************************************************** |
| 2 | + * Copyright (c) 2022, The OpenBLAS Project |
| 3 | + * All rights reserved. |
| 4 | + * Redistribution and use in source and binary forms, with or without |
| 5 | + * modification, are permitted provided that the following conditions are |
| 6 | + * met: |
| 7 | + * 1. Redistributions of source code must retain the above copyright |
| 8 | + * notice, this list of conditions and the following disclaimer. |
| 9 | + * 2. Redistributions in binary form must reproduce the above copyright |
| 10 | + * notice, this list of conditions and the following disclaimer in |
| 11 | + * the documentation and/or other materials provided with the |
| 12 | + * distribution. |
| 13 | + * 3. Neither the name of the OpenBLAS project nor the names of |
| 14 | + * its contributors may be used to endorse or promote products |
| 15 | + * derived from this software without specific prior written permission. |
| 16 | + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
| 17 | + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 18 | + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 19 | + * ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE |
| 20 | + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
| 21 | + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
| 22 | + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
| 23 | + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
| 24 | + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| 25 | + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
| 26 | + * POSSIBILITY OF SUCH DAMAGE. |
| 27 | + * *****************************************************************************/ |
| 28 | + |
| 29 | +#include <arm_sve.h> |
| 30 | +#include "common.h" |
| 31 | + |
| 32 | +int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT *A, IFLOAT *B, |
| 33 | + FLOAT *C, BLASLONG ldc) { |
| 34 | + // printf("m: %d, n: %d, k: %d\n", m, n, k); |
| 35 | + BLASLONG padk = (k + 3) & ~3; |
| 36 | + BLASLONG padm = (m + 1) & ~1; |
| 37 | + BLASLONG padn = (n + 1) & ~1; |
| 38 | + FLOAT *RC = (FLOAT *) calloc(padm * padn, sizeof(float)); |
| 39 | + BLASLONG nldc = padm; |
| 40 | + |
| 41 | + IFLOAT *ptr_a = A; |
| 42 | + IFLOAT *ptr_b = B; |
| 43 | + FLOAT *ptr_c = RC; |
| 44 | + |
| 45 | + IFLOAT *ptr_a0, *ptr_a1, *ptr_a2, *ptr_a3; |
| 46 | + IFLOAT *ptr_b0, *ptr_b1; |
| 47 | + FLOAT *ptr_c00, *ptr_c10, *ptr_c20, *ptr_c30, *ptr_c01, *ptr_c11, *ptr_c21, *ptr_c31; |
| 48 | + |
| 49 | + svbfloat16_t ma0, ma1, ma2, ma3, mb0, mb1; |
| 50 | + svfloat32_t mc00, mc01, mc10, mc11, mc20, mc21, mc30, mc31; |
| 51 | + svbool_t pg16 = svptrue_b16(); |
| 52 | + svbool_t pg32 = svptrue_b32(); |
| 53 | + svfloat32_t svalpha = svdup_f32(alpha); |
| 54 | + |
| 55 | + uint32_t off_c[] = {0, (uint32_t) nldc, 1, (uint32_t) nldc + 1}; // 00 01 10 11 |
| 56 | + svuint32_t off_vc = svld1_u32(pg32, off_c); |
| 57 | + |
| 58 | + for (BLASLONG j = 0; j < padn/4; j++) { |
| 59 | + ptr_c00 = ptr_c; |
| 60 | + ptr_c10 = ptr_c00 + 2; |
| 61 | + ptr_c20 = ptr_c10 + 2; |
| 62 | + ptr_c30 = ptr_c20 + 2; |
| 63 | + ptr_c01 = ptr_c + 2 * nldc; |
| 64 | + ptr_c11 = ptr_c01 + 2; |
| 65 | + ptr_c21 = ptr_c11 + 2; |
| 66 | + ptr_c31 = ptr_c21 + 2; |
| 67 | + ptr_c += 4 * nldc; |
| 68 | + |
| 69 | + ptr_a = A; |
| 70 | + |
| 71 | + for (BLASLONG i = 0; i < padm/8; i++) { |
| 72 | + ptr_a0 = ptr_a; |
| 73 | + ptr_a1 = ptr_a0 + 2 * padk; |
| 74 | + ptr_a2 = ptr_a1 + 2 * padk; |
| 75 | + ptr_a3 = ptr_a2 + 2 * padk; |
| 76 | + ptr_a += 8 * padk; |
| 77 | + |
| 78 | + ptr_b0 = ptr_b; |
| 79 | + ptr_b1 = ptr_b0 + 2 * padk; |
| 80 | + |
| 81 | + mc00 = svdup_f32(0); mc01 = svdup_f32(0); |
| 82 | + mc10 = svdup_f32(0); mc11 = svdup_f32(0); |
| 83 | + mc20 = svdup_f32(0); mc21 = svdup_f32(0); |
| 84 | + mc30 = svdup_f32(0); mc31 = svdup_f32(0); |
| 85 | + |
| 86 | + for (BLASLONG p = 0; p < padk/4; p++) { |
| 87 | + ma0 = svld1_bf16(pg16, (bfloat16_t *) ptr_a0); |
| 88 | + ma1 = svld1_bf16(pg16, (bfloat16_t *) ptr_a1); |
| 89 | + ma2 = svld1_bf16(pg16, (bfloat16_t *) ptr_a2); |
| 90 | + ma3 = svld1_bf16(pg16, (bfloat16_t *) ptr_a3); |
| 91 | + mb0 = svld1_bf16(pg16, (bfloat16_t *) ptr_b0); |
| 92 | + mb1 = svld1_bf16(pg16, (bfloat16_t *) ptr_b1); |
| 93 | + |
| 94 | + mc00 = svbfmmla(mc00, ma0, mb0); |
| 95 | + mc10 = svbfmmla(mc10, ma1, mb0); |
| 96 | + mc20 = svbfmmla(mc20, ma2, mb0); |
| 97 | + mc30 = svbfmmla(mc30, ma3, mb0); |
| 98 | + mc01 = svbfmmla(mc01, ma0, mb1); |
| 99 | + mc11 = svbfmmla(mc11, ma1, mb1); |
| 100 | + mc21 = svbfmmla(mc21, ma2, mb1); |
| 101 | + mc31 = svbfmmla(mc31, ma3, mb1); |
| 102 | + |
| 103 | + ptr_a0 += 8; |
| 104 | + ptr_a1 += 8; |
| 105 | + ptr_a2 += 8; |
| 106 | + ptr_a3 += 8; |
| 107 | + ptr_b0 += 8; |
| 108 | + ptr_b1 += 8; |
| 109 | + } |
| 110 | + svst1_scatter_index(pg32, ptr_c00, off_vc, mc00); |
| 111 | + svst1_scatter_index(pg32, ptr_c10, off_vc, mc10); |
| 112 | + svst1_scatter_index(pg32, ptr_c20, off_vc, mc20); |
| 113 | + svst1_scatter_index(pg32, ptr_c30, off_vc, mc30); |
| 114 | + svst1_scatter_index(pg32, ptr_c01, off_vc, mc01); |
| 115 | + svst1_scatter_index(pg32, ptr_c11, off_vc, mc11); |
| 116 | + svst1_scatter_index(pg32, ptr_c21, off_vc, mc21); |
| 117 | + svst1_scatter_index(pg32, ptr_c31, off_vc, mc31); |
| 118 | + |
| 119 | + ptr_c00 += 8; |
| 120 | + ptr_c10 += 8; |
| 121 | + ptr_c20 += 8; |
| 122 | + ptr_c30 += 8; |
| 123 | + ptr_c01 += 8; |
| 124 | + ptr_c11 += 8; |
| 125 | + ptr_c21 += 8; |
| 126 | + ptr_c31 += 8; |
| 127 | + } |
| 128 | + |
| 129 | + if (padm & 4) { |
| 130 | + // rest 4 or 6 |
| 131 | + ptr_a0 = ptr_a; |
| 132 | + ptr_a1 = ptr_a0 + 2 * padk; |
| 133 | + ptr_a += 4 * padk; |
| 134 | + |
| 135 | + ptr_b0 = ptr_b; |
| 136 | + ptr_b1 = ptr_b0 + 2 * padk; |
| 137 | + |
| 138 | + mc00 = svdup_f32(0); mc01 = svdup_f32(0); |
| 139 | + mc10 = svdup_f32(0); mc11 = svdup_f32(0); |
| 140 | + for (BLASLONG p = 0; p < padk/4; p++) { |
| 141 | + ma0 = svld1_bf16(pg16, (bfloat16_t *) ptr_a0); |
| 142 | + ma1 = svld1_bf16(pg16, (bfloat16_t *) ptr_a1); |
| 143 | + mb0 = svld1_bf16(pg16, (bfloat16_t *) ptr_b0); |
| 144 | + mb1 = svld1_bf16(pg16, (bfloat16_t *) ptr_b1); |
| 145 | + |
| 146 | + mc00 = svbfmmla(mc00, ma0, mb0); |
| 147 | + mc10 = svbfmmla(mc10, ma1, mb0); |
| 148 | + mc01 = svbfmmla(mc01, ma0, mb1); |
| 149 | + mc11 = svbfmmla(mc11, ma1, mb1); |
| 150 | + |
| 151 | + ptr_a0 += 8; |
| 152 | + ptr_a1 += 8; |
| 153 | + ptr_b0 += 8; |
| 154 | + ptr_b1 += 8; |
| 155 | + } |
| 156 | + svst1_scatter_index(pg32, ptr_c00, off_vc, mc00); |
| 157 | + svst1_scatter_index(pg32, ptr_c10, off_vc, mc10); |
| 158 | + svst1_scatter_index(pg32, ptr_c01, off_vc, mc01); |
| 159 | + svst1_scatter_index(pg32, ptr_c11, off_vc, mc11); |
| 160 | + |
| 161 | + ptr_c00 += 4; |
| 162 | + ptr_c10 += 4; |
| 163 | + ptr_c01 += 4; |
| 164 | + ptr_c11 += 4; |
| 165 | + } |
| 166 | + |
| 167 | + if (padm & 2) { |
| 168 | + // rest 2 |
| 169 | + ptr_a0 = ptr_a; |
| 170 | + |
| 171 | + ptr_b0 = ptr_b; |
| 172 | + ptr_b1 = ptr_b0 + 2 * padk; |
| 173 | + |
| 174 | + mc00 = svdup_f32(0); mc01 = svdup_f32(0); |
| 175 | + for (BLASLONG p = 0; p < padk/4; p++) { |
| 176 | + ma0 = svld1_bf16(pg16, (bfloat16_t *) ptr_a0); |
| 177 | + mb0 = svld1_bf16(pg16, (bfloat16_t *) ptr_b0); |
| 178 | + mb1 = svld1_bf16(pg16, (bfloat16_t *) ptr_b1); |
| 179 | + mc00 = svbfmmla(mc00, ma0, mb0); |
| 180 | + mc01 = svbfmmla(mc01, ma0, mb1); |
| 181 | + ptr_a0 += 8; |
| 182 | + ptr_b0 += 8; |
| 183 | + ptr_b1 += 8; |
| 184 | + } |
| 185 | + svst1_scatter_index(pg32, ptr_c00, off_vc, mc00); |
| 186 | + svst1_scatter_index(pg32, ptr_c01, off_vc, mc01); |
| 187 | + ptr_c00 += 2; |
| 188 | + ptr_c01 += 2; |
| 189 | + } |
| 190 | + |
| 191 | + ptr_b += 4 * padk; |
| 192 | + |
| 193 | + } |
| 194 | + |
| 195 | + if (padn & 2) { |
| 196 | + // rest 2 |
| 197 | + ptr_c00 = ptr_c; |
| 198 | + ptr_c10 = ptr_c00 + 2; |
| 199 | + ptr_c20 = ptr_c10 + 2; |
| 200 | + ptr_c30 = ptr_c20 + 2; |
| 201 | + ptr_c += 2 * nldc; |
| 202 | + |
| 203 | + ptr_a = A; |
| 204 | + |
| 205 | + for (BLASLONG i = 0; i < padm/8; i++) { |
| 206 | + ptr_a0 = ptr_a; |
| 207 | + ptr_a1 = ptr_a0 + 2 * padk; |
| 208 | + ptr_a2 = ptr_a1 + 2 * padk; |
| 209 | + ptr_a3 = ptr_a2 + 2 * padk; |
| 210 | + ptr_a += 8 * padk; |
| 211 | + |
| 212 | + ptr_b0 = ptr_b; |
| 213 | + |
| 214 | + mc00 = svdup_f32(0); |
| 215 | + mc10 = svdup_f32(0); |
| 216 | + mc20 = svdup_f32(0); |
| 217 | + mc30 = svdup_f32(0); |
| 218 | + |
| 219 | + for (BLASLONG p = 0; p < padk/4; p++) { |
| 220 | + ma0 = svld1_bf16(pg16, (bfloat16_t *) ptr_a0); |
| 221 | + ma1 = svld1_bf16(pg16, (bfloat16_t *) ptr_a1); |
| 222 | + ma2 = svld1_bf16(pg16, (bfloat16_t *) ptr_a2); |
| 223 | + ma3 = svld1_bf16(pg16, (bfloat16_t *) ptr_a3); |
| 224 | + mb0 = svld1_bf16(pg16, (bfloat16_t *) ptr_b0); |
| 225 | + mc00 = svbfmmla(mc00, ma0, mb0); |
| 226 | + mc10 = svbfmmla(mc10, ma1, mb0); |
| 227 | + mc20 = svbfmmla(mc20, ma2, mb0); |
| 228 | + mc30 = svbfmmla(mc30, ma3, mb0); |
| 229 | + ptr_a0 += 8; |
| 230 | + ptr_a1 += 8; |
| 231 | + ptr_a2 += 8; |
| 232 | + ptr_a3 += 8; |
| 233 | + ptr_b0 += 8; |
| 234 | + } |
| 235 | + svst1_scatter_index(pg32, ptr_c00, off_vc, mc00); |
| 236 | + svst1_scatter_index(pg32, ptr_c10, off_vc, mc10); |
| 237 | + svst1_scatter_index(pg32, ptr_c20, off_vc, mc20); |
| 238 | + svst1_scatter_index(pg32, ptr_c30, off_vc, mc30); |
| 239 | + ptr_c00 += 8; |
| 240 | + ptr_c10 += 8; |
| 241 | + ptr_c20 += 8; |
| 242 | + ptr_c30 += 8; |
| 243 | + } |
| 244 | + |
| 245 | + if (padm & 4) { |
| 246 | + ptr_a0 = ptr_a; |
| 247 | + ptr_a1 = ptr_a0 + 2 * padk; |
| 248 | + ptr_a += 4 * padk; |
| 249 | + |
| 250 | + ptr_b0 = ptr_b; |
| 251 | + |
| 252 | + mc00 = svdup_f32(0); |
| 253 | + mc10 = svdup_f32(0); |
| 254 | + for (BLASLONG p = 0; p < padk/4; p++) { |
| 255 | + ma0 = svld1_bf16(pg16, (bfloat16_t *) ptr_a0); |
| 256 | + ma1 = svld1_bf16(pg16, (bfloat16_t *) ptr_a1); |
| 257 | + mb0 = svld1_bf16(pg16, (bfloat16_t *) ptr_b0); |
| 258 | + mc00 = svbfmmla(mc00, ma0, mb0); |
| 259 | + mc10 = svbfmmla(mc10, ma1, mb0); |
| 260 | + ptr_a0 += 8; |
| 261 | + ptr_a1 += 8; |
| 262 | + ptr_b0 += 8; |
| 263 | + } |
| 264 | + svst1_scatter_index(pg32, ptr_c00, off_vc, mc00); |
| 265 | + svst1_scatter_index(pg32, ptr_c10, off_vc, mc10); |
| 266 | + ptr_c00 += 4; |
| 267 | + ptr_c10 += 4; |
| 268 | + } |
| 269 | + |
| 270 | + if (padm & 2) { |
| 271 | + ptr_a0 = ptr_a; |
| 272 | + ptr_a += 2 * padk; |
| 273 | + ptr_b0 = ptr_b; |
| 274 | + mc00 = svdup_f32(0); |
| 275 | + for (BLASLONG p = 0; p < padk/4; p++) { |
| 276 | + ma0 = svld1_bf16(pg16, (bfloat16_t *) ptr_a0); |
| 277 | + mb0 = svld1_bf16(pg16, (bfloat16_t *) ptr_b0); |
| 278 | + mc00 = svbfmmla(mc00, ma0, mb0); |
| 279 | + ptr_a0 += 8; |
| 280 | + ptr_b0 += 8; |
| 281 | + } |
| 282 | + svst1_scatter_index(pg32, ptr_c00, off_vc, mc00); |
| 283 | + ptr_c00 += 2; |
| 284 | + } |
| 285 | + |
| 286 | + ptr_b += 2 * padk; |
| 287 | + } |
| 288 | + |
| 289 | + FLOAT *org_c = C; |
| 290 | + FLOAT *raw_c = RC; |
| 291 | + FLOAT *org_c0, *raw_c0; |
| 292 | + svfloat32_t org_vc0, raw_vc0; |
| 293 | + for (BLASLONG j = 0; j < n; j++) { |
| 294 | + org_c0 = org_c; |
| 295 | + raw_c0 = raw_c; |
| 296 | + org_c += ldc; |
| 297 | + raw_c += nldc; |
| 298 | + BLASLONG i; |
| 299 | + for (i = 0; i < m/4; i++) { |
| 300 | + org_vc0 = svld1_f32(pg32, org_c0); |
| 301 | + raw_vc0 = svld1_f32(pg32, raw_c0); |
| 302 | + org_vc0 = svmad_z(pg32, svalpha, raw_vc0, org_vc0); // alpha * raw + org, raw -> a * b |
| 303 | + svst1_f32(pg32, org_c0, org_vc0); |
| 304 | + org_c0 += 4; |
| 305 | + raw_c0 += 4; |
| 306 | + } |
| 307 | + for (i = 0; i < (m & 3); i++) { |
| 308 | + *org_c0 += alpha * (*raw_c0); |
| 309 | + org_c0++; |
| 310 | + raw_c0++; |
| 311 | + } |
| 312 | + } |
| 313 | + return 0; |
| 314 | +} |
0 commit comments