|
| 1 | +/* %0 = "+r"(a_pointer), %1 = "+r"(b_pointer), %2 = "+r"(c_pointer), %3 = "+r"(ldc_in_bytes), %4 for k_count, %5 for c_store */ |
| 2 | +/* r12 = k << 4(const), r13 = k(const), r14 = b_head_pos(const), r15 = tmp */ |
| 3 | + |
| 4 | +#include "common.h" |
| 5 | +#include <stdint.h> |
| 6 | + |
| 7 | +//recommended settings: GEMM_P = 320, GEMM_Q = 320. |
| 8 | + |
| 9 | +/* m = 8 *//* ymm0 for alpha, ymm1-ymm3 for temporary use, ymm4-ymm15 for accumulators */ |
| 10 | +#define KERNEL_k1m8n1 \ |
| 11 | + "vmovups (%0),%%ymm1; addq $32,%0;"\ |
| 12 | + "vbroadcastss (%1),%%ymm2; vfmadd231ps %%ymm1,%%ymm2,%%ymm4;"\ |
| 13 | + "addq $4,%1;" |
| 14 | +#define KERNEL_h_k1m8n2 \ |
| 15 | + "vmovsldup (%0),%%ymm1; vmovshdup (%0),%%ymm2; addq $32,%0;"\ |
| 16 | + "vbroadcastsd (%1),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm4; vfmadd231ps %%ymm2,%%ymm3,%%ymm5;" |
| 17 | +#define KERNEL_k1m8n2 KERNEL_h_k1m8n2 "addq $8,%1;" |
| 18 | +#define KERNEL_h_k1m8n4 \ |
| 19 | + KERNEL_h_k1m8n2 "vbroadcastsd 8(%1),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm6; vfmadd231ps %%ymm2,%%ymm3,%%ymm7;" |
| 20 | +#define KERNEL_k1m8n4 KERNEL_h_k1m8n4 "addq $16,%1;" |
| 21 | +#define unit_kernel_k1m8n4(c1,c2,c3,c4,...) \ |
| 22 | + "vbroadcastsd ("#__VA_ARGS__"),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,"#c1"; vfmadd231ps %%ymm2,%%ymm3,"#c2";"\ |
| 23 | + "vbroadcastsd 8("#__VA_ARGS__"),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,"#c3"; vfmadd231ps %%ymm2,%%ymm3,"#c4";" |
| 24 | +#define KERNEL_h_k1m8n8 KERNEL_h_k1m8n4 unit_kernel_k1m8n4(%%ymm8,%%ymm9,%%ymm10,%%ymm11,%1,%%r12,1) |
| 25 | +#define KERNEL_k1m8n8 KERNEL_h_k1m8n8 "addq $16,%1;" |
| 26 | +#define KERNEL_h_k1m8n12 KERNEL_h_k1m8n8 unit_kernel_k1m8n4(%%ymm12,%%ymm13,%%ymm14,%%ymm15,%1,%%r12,2) |
| 27 | +#define KERNEL_k1m8n12 KERNEL_h_k1m8n12 "addq $16,%1;" |
| 28 | +#define INIT_m8n1 "vpxor %%ymm4,%%ymm4,%%ymm4;" |
| 29 | +#define INIT_m8n2 INIT_m8n1 "vpxor %%ymm5,%%ymm5,%%ymm5;" |
| 30 | +#define INIT_m8n4 INIT_m8n2 "vpxor %%ymm6,%%ymm6,%%ymm6;vpxor %%ymm7,%%ymm7,%%ymm7;" |
| 31 | +#define unit_init_m8n4(c1,c2,c3,c4) \ |
| 32 | + "vpxor "#c1","#c1","#c1";vpxor "#c2","#c2","#c2";vpxor "#c3","#c3","#c3";vpxor "#c4","#c4","#c4";" |
| 33 | +#define INIT_m8n8 INIT_m8n4 unit_init_m8n4(%%ymm8,%%ymm9,%%ymm10,%%ymm11) |
| 34 | +#define INIT_m8n12 INIT_m8n8 unit_init_m8n4(%%ymm12,%%ymm13,%%ymm14,%%ymm15) |
| 35 | +#define SAVE_m8n1 \ |
| 36 | + "vunpcklps %%ymm4,%%ymm4,%%ymm2; vunpckhps %%ymm4,%%ymm4,%%ymm3;"\ |
| 37 | + "vperm2f128 $2,%%ymm2,%%ymm3,%%ymm1; vperm2f128 $19,%%ymm2,%%ymm3,%%ymm2;"\ |
| 38 | + "vfmadd213ps (%2),%%ymm0,%%ymm1; vfmadd213ps 32(%2),%%ymm0,%%ymm2; vmovups %%ymm1,(%2); vmovups %%ymm2,32(%2);" |
| 39 | +#define unit_save_m8n2(c1,c2) \ |
| 40 | + "vunpcklpd "#c2","#c1",%%ymm2; vunpckhpd "#c2","#c1",%%ymm3;"\ |
| 41 | + "vperm2f128 $2,%%ymm2,%%ymm3,"#c1"; vperm2f128 $19,%%ymm2,%%ymm3,"#c2";"\ |
| 42 | + "vmovsldup "#c1",%%ymm2; vmovsldup "#c2",%%ymm3;"\ |
| 43 | + "vfmadd213ps (%5),%%ymm0,%%ymm2; vfmadd213ps 32(%5),%%ymm0,%%ymm3; vmovups %%ymm2,(%5); vmovups %%ymm3,32(%5);"\ |
| 44 | + "vmovshdup "#c1",%%ymm2; vmovshdup "#c2",%%ymm3;"\ |
| 45 | + "vfmadd213ps (%5,%3,1),%%ymm0,%%ymm2; vfmadd213ps 32(%5,%3,1),%%ymm0,%%ymm3; vmovups %%ymm2,(%5,%3,1); vmovups %%ymm3,32(%5,%3,1);"\ |
| 46 | + "leaq (%5,%3,2),%5;" |
| 47 | +#define SAVE_m8n2 "movq %2,%5;" unit_save_m8n2(%%ymm4,%%ymm5) |
| 48 | +#define SAVE_m8n4 SAVE_m8n2 unit_save_m8n2(%%ymm6,%%ymm7) |
| 49 | +#define SAVE_m8n8 SAVE_m8n4 unit_save_m8n2(%%ymm8,%%ymm9) unit_save_m8n2(%%ymm10,%%ymm11) |
| 50 | +#define SAVE_m8n12 SAVE_m8n8 unit_save_m8n2(%%ymm12,%%ymm13) unit_save_m8n2(%%ymm14,%%ymm15) |
| 51 | +#define COMPUTE_m8(ndim) \ |
| 52 | + INIT_m8n##ndim\ |
| 53 | + "movq %%r13,%4; movq %%r14,%1; movq %2,%5; xorq %%r15,%%r15;"\ |
| 54 | + "cmpq $24,%4; jb "#ndim"882f;"\ |
| 55 | + #ndim"881:\n\t"\ |
| 56 | + "cmpq $126,%%r15; movq $126,%%r15; cmoveq %3,%%r15;"\ |
| 57 | + "prefetcht0 64(%1); prefetcht0 64(%1,%%r12,1); prefetcht0 64(%1,%%r12,2);"\ |
| 58 | + "prefetcht0 512(%0);" KERNEL_k1m8n##ndim KERNEL_k1m8n##ndim\ |
| 59 | + "prefetcht0 512(%0);" KERNEL_k1m8n##ndim KERNEL_k1m8n##ndim\ |
| 60 | + "prefetcht1 (%5); leaq -63(%5,%%r15,1),%5;"\ |
| 61 | + "prefetcht0 64(%1); prefetcht0 64(%1,%%r12,1); prefetcht0 64(%1,%%r12,2);"\ |
| 62 | + "prefetcht0 512(%0);" KERNEL_k1m8n##ndim KERNEL_k1m8n##ndim\ |
| 63 | + "prefetcht0 512(%0);" KERNEL_k1m8n##ndim KERNEL_k1m8n##ndim\ |
| 64 | + "prefetcht1 (%8); addq $16,%8;"\ |
| 65 | + "subq $8,%4; cmpq $24,%4; jnb "#ndim"881b;"\ |
| 66 | + "movq %2,%5;"\ |
| 67 | + #ndim"882:\n\t"\ |
| 68 | + "testq %4,%4; jz "#ndim"883f;"\ |
| 69 | + "prefetcht0 (%5); prefetcht0 63(%5); addq %3,%5;"\ |
| 70 | + KERNEL_k1m8n##ndim\ |
| 71 | + "decq %4; jmp "#ndim"882b;"\ |
| 72 | + #ndim"883:\n\t"\ |
| 73 | + "prefetcht0 (%%r14); prefetcht0 64(%%r14);"\ |
| 74 | + SAVE_m8n##ndim "addq $64,%2;" |
| 75 | + |
| 76 | +/* m = 4 *//* xmm0 for alpha, xmm1-xmm3 for temporary use, xmm4-xmm15 for accumulators */ |
| 77 | +#define KERNEL_k1m4n1 \ |
| 78 | + "vmovups (%0),%%xmm1; addq $16,%0;"\ |
| 79 | + "vbroadcastss (%1),%%xmm2; vfmadd231ps %%xmm1,%%xmm2,%%xmm4;"\ |
| 80 | + "addq $4,%1;" |
| 81 | +#define KERNEL_h_k1m4n2 \ |
| 82 | + "vmovsldup (%0),%%xmm1; vmovshdup (%0),%%xmm2; addq $16,%0;"\ |
| 83 | + "vmovddup (%1),%%xmm3; vfmadd231ps %%xmm1,%%xmm3,%%xmm4; vfmadd231ps %%xmm2,%%xmm3,%%xmm5;" |
| 84 | +#define KERNEL_k1m4n2 KERNEL_h_k1m4n2 "addq $8,%1;" |
| 85 | +#define KERNEL_h_k1m4n4 \ |
| 86 | + KERNEL_h_k1m4n2 "vmovddup 8(%1),%%xmm3; vfmadd231ps %%xmm1,%%xmm3,%%xmm6; vfmadd231ps %%xmm2,%%xmm3,%%xmm7;" |
| 87 | +#define KERNEL_k1m4n4 KERNEL_h_k1m4n4 "addq $16,%1;" |
| 88 | +#define unit_kernel_k1m4n4(c1,c2,c3,c4,...) \ |
| 89 | + "vmovddup ("#__VA_ARGS__"),%%xmm3; vfmadd231ps %%xmm1,%%xmm3,"#c1"; vfmadd231ps %%xmm2,%%xmm3,"#c2";"\ |
| 90 | + "vmovddup 8("#__VA_ARGS__"),%%xmm3; vfmadd231ps %%xmm1,%%xmm3,"#c3"; vfmadd231ps %%xmm2,%%xmm3,"#c4";" |
| 91 | +#define KERNEL_h_k1m4n8 KERNEL_h_k1m4n4 unit_kernel_k1m4n4(%%xmm8,%%xmm9,%%xmm10,%%xmm11,%1,%%r12,1) |
| 92 | +#define KERNEL_k1m4n8 KERNEL_h_k1m4n8 "addq $16,%1;" |
| 93 | +#define KERNEL_h_k1m4n12 KERNEL_h_k1m4n8 unit_kernel_k1m4n4(%%xmm12,%%xmm13,%%xmm14,%%xmm15,%1,%%r12,2) |
| 94 | +#define KERNEL_k1m4n12 KERNEL_h_k1m4n12 "addq $16,%1;" |
| 95 | +#define INIT_m4n1 "vpxor %%xmm4,%%xmm4,%%xmm4;" |
| 96 | +#define INIT_m4n2 INIT_m4n1 "vpxor %%xmm5,%%xmm5,%%xmm5;" |
| 97 | +#define INIT_m4n4 INIT_m4n2 "vpxor %%xmm6,%%xmm6,%%xmm6;vpxor %%xmm7,%%xmm7,%%xmm7;" |
| 98 | +#define unit_init_m4n4(c1,c2,c3,c4) \ |
| 99 | + "vpxor "#c1","#c1","#c1";vpxor "#c2","#c2","#c2";vpxor "#c3","#c3","#c3";vpxor "#c4","#c4","#c4";" |
| 100 | +#define INIT_m4n8 INIT_m4n4 unit_init_m4n4(%%xmm8,%%xmm9,%%xmm10,%%xmm11) |
| 101 | +#define INIT_m4n12 INIT_m4n8 unit_init_m4n4(%%xmm12,%%xmm13,%%xmm14,%%xmm15) |
| 102 | +#define SAVE_m4n1 \ |
| 103 | + "vunpcklps %%xmm4,%%xmm4,%%xmm2; vunpckhps %%xmm4,%%xmm4,%%xmm3;"\ |
| 104 | + "vfmadd213ps (%2),%%xmm0,%%xmm2; vfmadd213ps 16(%2),%%xmm0,%%xmm3; vmovups %%xmm2,(%2); vmovups %%xmm3,16(%2);" |
| 105 | +#define unit_save_m4n2(c1,c2) \ |
| 106 | + "vunpcklpd "#c2","#c1",%%xmm2; vunpckhpd "#c2","#c1","#c2"; vmovapd %%xmm2,"#c1";"\ |
| 107 | + "vmovsldup "#c1",%%xmm2; vmovsldup "#c2",%%xmm3;"\ |
| 108 | + "vfmadd213ps (%5),%%xmm0,%%xmm2; vfmadd213ps 16(%5),%%xmm0,%%xmm3; vmovups %%xmm2,(%5); vmovups %%xmm3,16(%5);"\ |
| 109 | + "vmovshdup "#c1",%%xmm2; vmovshdup "#c2",%%xmm3;"\ |
| 110 | + "vfmadd213ps (%5,%3,1),%%xmm0,%%xmm2; vfmadd213ps 16(%5,%3,1),%%xmm0,%%xmm3; vmovups %%xmm2,(%5,%3,1); vmovups %%xmm3,16(%5,%3,1);"\ |
| 111 | + "leaq (%5,%3,2),%5;" |
| 112 | +#define SAVE_m4n2 "movq %2,%5;" unit_save_m4n2(%%xmm4,%%xmm5) |
| 113 | +#define SAVE_m4n4 SAVE_m4n2 unit_save_m4n2(%%xmm6,%%xmm7) |
| 114 | +#define SAVE_m4n8 SAVE_m4n4 unit_save_m4n2(%%xmm8,%%xmm9) unit_save_m4n2(%%xmm10,%%xmm11) |
| 115 | +#define SAVE_m4n12 SAVE_m4n8 unit_save_m4n2(%%xmm12,%%xmm13) unit_save_m4n2(%%xmm14,%%xmm15) |
| 116 | +#define COMPUTE_m4(ndim) \ |
| 117 | + INIT_m4n##ndim\ |
| 118 | + "movq %%r13,%4; movq %%r14,%1;"\ |
| 119 | + #ndim"442:\n\t"\ |
| 120 | + "testq %4,%4; jz "#ndim"443f;"\ |
| 121 | + KERNEL_k1m4n##ndim\ |
| 122 | + "decq %4; jmp "#ndim"442b;"\ |
| 123 | + #ndim"443:\n\t"\ |
| 124 | + SAVE_m4n##ndim "addq $32,%2;" |
| 125 | + |
| 126 | +/* m = 2 *//* xmm0 for alpha, xmm1-xmm3 and xmm10 for temporary use, xmm4-xmm9 for accumulators */ |
| 127 | +#define INIT_m2n1 "vpxor %%xmm4,%%xmm4,%%xmm4;" |
| 128 | +#define KERNEL_k1m2n1 \ |
| 129 | + "vmovsd (%0),%%xmm1; addq $8,%0;"\ |
| 130 | + "vbroadcastss (%1),%%xmm2; vfmadd231ps %%xmm1,%%xmm2,%%xmm4;"\ |
| 131 | + "addq $4,%1;" |
| 132 | +#define SAVE_m2n1 \ |
| 133 | + "vunpcklps %%xmm4,%%xmm4,%%xmm1; vfmadd213ps (%2),%%xmm0,%%xmm1; vmovups %%xmm1,(%2);" |
| 134 | +#define INIT_m2n2 INIT_m2n1 "vpxor %%xmm5,%%xmm5,%%xmm5;" |
| 135 | +#define KERNEL_k1m2n2 \ |
| 136 | + "vmovsd (%0),%%xmm1; addq $8,%0;"\ |
| 137 | + "vbroadcastss (%1),%%xmm2; vfmadd231ps %%xmm1,%%xmm2,%%xmm4;"\ |
| 138 | + "vbroadcastss 4(%1),%%xmm3; vfmadd231ps %%xmm1,%%xmm3,%%xmm5;"\ |
| 139 | + "addq $8,%1;" |
| 140 | +#define SAVE_m2n2 SAVE_m2n1 \ |
| 141 | + "vunpcklps %%xmm5,%%xmm5,%%xmm1; vfmadd213ps (%2,%3,1),%%xmm0,%%xmm1; vmovups %%xmm1,(%2,%3,1);" |
| 142 | +#define INIT_m2n4 INIT_m2n2 |
| 143 | +#define INIT_m2n8 INIT_m2n4 "vpxor %%xmm6,%%xmm6,%%xmm6; vpxor %%xmm7,%%xmm7,%%xmm7;" |
| 144 | +#define INIT_m2n12 INIT_m2n8 "vpxor %%xmm8,%%xmm8,%%xmm8; vpxor %%xmm9,%%xmm9,%%xmm9;" |
| 145 | +#define KERNEL_k1m2n4 \ |
| 146 | + "vmovups (%1),%%xmm3; addq $16,%1;"\ |
| 147 | + "vbroadcastss (%0),%%xmm1; vfmadd231ps %%xmm3,%%xmm1,%%xmm4;"\ |
| 148 | + "vbroadcastss 4(%0),%%xmm2; vfmadd231ps %%xmm3,%%xmm2,%%xmm5;"\ |
| 149 | + "addq $8,%0;" |
| 150 | +#define KERNEL_k1m2n8 \ |
| 151 | + "vmovups (%1),%%xmm3; vmovups (%1,%%r12,1),%%xmm2; addq $16,%1;"\ |
| 152 | + "vbroadcastss (%0),%%xmm1; vfmadd231ps %%xmm3,%%xmm1,%%xmm4; vfmadd231ps %%xmm2,%%xmm1,%%xmm6;"\ |
| 153 | + "vbroadcastss 4(%0),%%xmm1; vfmadd231ps %%xmm3,%%xmm1,%%xmm5; vfmadd231ps %%xmm2,%%xmm1,%%xmm7;"\ |
| 154 | + "addq $8,%0;" |
| 155 | +#define KERNEL_k1m2n12 \ |
| 156 | + "vmovups (%1),%%xmm3; vmovups (%1,%%r12,1),%%xmm2; vmovups (%1,%%r12,2),%%xmm1; addq $16,%1;"\ |
| 157 | + "vbroadcastss (%0),%%xmm10; vfmadd231ps %%xmm3,%%xmm10,%%xmm4; vfmadd231ps %%xmm2,%%xmm10,%%xmm6; vfmadd231ps %%xmm1,%%xmm10,%%xmm8;"\ |
| 158 | + "vbroadcastss 4(%0),%%xmm10; vfmadd231ps %%xmm3,%%xmm10,%%xmm5; vfmadd231ps %%xmm2,%%xmm10,%%xmm7; vfmadd231ps %%xmm1,%%xmm10,%%xmm9;"\ |
| 159 | + "addq $8,%0;" |
| 160 | +#define unit_save_m2n4(c1,c2) \ |
| 161 | + "vunpcklpd "#c2","#c1",%%xmm1; vunpckhpd "#c2","#c1",%%xmm2;"\ |
| 162 | + "vmovsldup %%xmm1,%%xmm3; vfmadd213ps (%5),%%xmm0,%%xmm3; vmovups %%xmm3,(%5);"\ |
| 163 | + "vmovshdup %%xmm1,%%xmm3; vfmadd213ps (%5,%3,1),%%xmm0,%%xmm3; vmovups %%xmm3,(%5,%3,1);"\ |
| 164 | + "leaq (%5,%3,2),%5;"\ |
| 165 | + "vmovsldup %%xmm2,%%xmm3; vfmadd213ps (%5),%%xmm0,%%xmm3; vmovups %%xmm3,(%5);"\ |
| 166 | + "vmovshdup %%xmm2,%%xmm3; vfmadd213ps (%5,%3,1),%%xmm0,%%xmm3; vmovups %%xmm3,(%5,%3,1);"\ |
| 167 | + "leaq (%5,%3,2),%5;" |
| 168 | +#define SAVE_m2n4 "movq %2,%5;" unit_save_m2n4(%%xmm4,%%xmm5) |
| 169 | +#define SAVE_m2n8 SAVE_m2n4 unit_save_m2n4(%%xmm6,%%xmm7) |
| 170 | +#define SAVE_m2n12 SAVE_m2n8 unit_save_m2n4(%%xmm8,%%xmm9) |
| 171 | +#define COMPUTE_m2(ndim) \ |
| 172 | + INIT_m2n##ndim\ |
| 173 | + "movq %%r13,%4; movq %%r14,%1;"\ |
| 174 | + #ndim"222:\n\t"\ |
| 175 | + "testq %4,%4; jz "#ndim"223f;"\ |
| 176 | + KERNEL_k1m2n##ndim\ |
| 177 | + "decq %4; jmp "#ndim"222b;"\ |
| 178 | + #ndim"223:\n\t"\ |
| 179 | + SAVE_m2n##ndim "addq $16,%2;" |
| 180 | + |
| 181 | +/* m = 1 *//* xmm0 for alpha, xmm1-xmm3 and xmm10 for temporary use, xmm4-xmm6 for accumulators */ |
| 182 | +#define INIT_m1n1 "vpxor %%xmm4,%%xmm4,%%xmm4;" |
| 183 | +#define KERNEL_k1m1n1 \ |
| 184 | + "vmovss (%1),%%xmm3; addq $4,%1;"\ |
| 185 | + "vmovss (%0),%%xmm1; vfmadd231ss %%xmm3,%%xmm1,%%xmm4;"\ |
| 186 | + "addq $4,%0;" |
| 187 | +#define SAVE_m1n1 \ |
| 188 | + "vunpcklps %%xmm4,%%xmm4,%%xmm4; vmovsd (%2),%%xmm1; vfmadd213ps %%xmm1,%%xmm0,%%xmm4; vmovsd %%xmm4,(%2);" |
| 189 | +#define INIT_m1n2 INIT_m1n1 |
| 190 | +#define KERNEL_k1m1n2 \ |
| 191 | + "vmovsd (%1),%%xmm3; addq $8,%1;"\ |
| 192 | + "vbroadcastss (%0),%%xmm1; vfmadd231ps %%xmm3,%%xmm1,%%xmm4;"\ |
| 193 | + "addq $4,%0;" |
| 194 | +#define SAVE_m1n2 \ |
| 195 | + "vunpcklps %%xmm4,%%xmm4,%%xmm4; vmovsd (%2),%%xmm3; vmovhpd (%2,%3,1),%%xmm3,%%xmm3; vfmadd213ps %%xmm3,%%xmm0,%%xmm4;"\ |
| 196 | + "vmovsd %%xmm4,(%2); vmovhpd %%xmm4,(%2,%3,1);" |
| 197 | +#define INIT_m1n4 INIT_m1n2 |
| 198 | +#define INIT_m1n8 INIT_m1n4 "vpxor %%xmm5,%%xmm5,%%xmm5;" |
| 199 | +#define INIT_m1n12 INIT_m1n8 "vpxor %%xmm6,%%xmm6,%%xmm6;" |
| 200 | +#define KERNEL_k1m1n4 \ |
| 201 | + "vmovups (%1),%%xmm3; addq $16,%1;"\ |
| 202 | + "vbroadcastss (%0),%%xmm1; vfmadd231ps %%xmm3,%%xmm1,%%xmm4;"\ |
| 203 | + "addq $4,%0;" |
| 204 | +#define KERNEL_k1m1n8 \ |
| 205 | + "vmovups (%1),%%xmm3; vmovups (%1,%%r12,1),%%xmm2; addq $16,%1;"\ |
| 206 | + "vbroadcastss (%0),%%xmm1; vfmadd231ps %%xmm3,%%xmm1,%%xmm4; vfmadd231ps %%xmm2,%%xmm1,%%xmm5;"\ |
| 207 | + "addq $4,%0;" |
| 208 | +#define KERNEL_k1m1n12 \ |
| 209 | + "vmovups (%1),%%xmm3; vmovups (%1,%%r12,1),%%xmm2; vmovups (%1,%%r12,2),%%xmm1; addq $16,%1;"\ |
| 210 | + "vbroadcastss (%0),%%xmm10; vfmadd231ps %%xmm3,%%xmm10,%%xmm4; vfmadd231ps %%xmm2,%%xmm10,%%xmm5; vfmadd231ps %%xmm1,%%xmm10,%%xmm6;"\ |
| 211 | + "addq $4,%0;" |
| 212 | +#define unit_save_m1n4(c1) \ |
| 213 | + "vunpcklps "#c1","#c1",%%xmm1; vunpckhps "#c1","#c1",%%xmm2;"\ |
| 214 | + "vmovsd (%5),%%xmm3; vmovhpd (%5,%3,1),%%xmm3,%%xmm3; vfmadd213ps %%xmm3,%%xmm0,%%xmm1;"\ |
| 215 | + "vmovsd %%xmm1,(%5); vmovhpd %%xmm1,(%5,%3,1); leaq (%5,%3,2),%5;"\ |
| 216 | + "vmovsd (%5),%%xmm3; vmovhpd (%5,%3,1),%%xmm3,%%xmm3; vfmadd213ps %%xmm3,%%xmm0,%%xmm2;"\ |
| 217 | + "vmovsd %%xmm2,(%5); vmovhpd %%xmm2,(%5,%3,1); leaq (%5,%3,2),%5;" |
| 218 | +#define SAVE_m1n4 "movq %2,%5;" unit_save_m1n4(%%xmm4) |
| 219 | +#define SAVE_m1n8 SAVE_m1n4 unit_save_m1n4(%%xmm5) |
| 220 | +#define SAVE_m1n12 SAVE_m1n8 unit_save_m1n4(%%xmm6) |
| 221 | +#define COMPUTE_m1(ndim) \ |
| 222 | + INIT_m1n##ndim\ |
| 223 | + "movq %%r13,%4; movq %%r14,%1;"\ |
| 224 | + #ndim"112:\n\t"\ |
| 225 | + "testq %4,%4; jz "#ndim"113f;"\ |
| 226 | + KERNEL_k1m1n##ndim\ |
| 227 | + "decq %4; jmp "#ndim"112b;"\ |
| 228 | + #ndim"113:\n\t"\ |
| 229 | + SAVE_m1n##ndim "addq $8,%2;" |
| 230 | + |
| 231 | +/* %0 = "+r"(a_pointer), %1 = "+r"(b_pointer), %2 = "+r"(c_pointer), %3 = "+r"(ldc_in_bytes), %4 = "+r"(K), %5 = "+r"(ctemp) */ |
| 232 | +/* %6 = "+r"(&alpha), %7 = "+r"(M), %8 = "+r"(next_b) */ |
| 233 | +/* r11 = m(const), r12 = k << 4(const), r13 = k(const), r14 = b_head_pos(const),r15 = tmp */ |
| 234 | + |
| 235 | +#define COMPUTE(ndim) {\ |
| 236 | + next_b = b_pointer + ndim * K;\ |
| 237 | + __asm__ __volatile__(\ |
| 238 | + "vbroadcastsd (%6),%%ymm0;"\ |
| 239 | + "movq %4,%%r13; movq %4,%%r12; salq $4,%%r12; movq %1,%%r14; movq %7,%%r11;"\ |
| 240 | + "cmpq $8,%7;jb 33101"#ndim"f;"\ |
| 241 | + "33109"#ndim":\n\t"\ |
| 242 | + COMPUTE_m8(ndim)\ |
| 243 | + "subq $8,%7;cmpq $8,%7;jnb 33109"#ndim"b;"\ |
| 244 | + "33101"#ndim":\n\t"\ |
| 245 | + "cmpq $4,%7;jb 33103"#ndim"f;"\ |
| 246 | + COMPUTE_m4(ndim)\ |
| 247 | + "subq $4,%7;"\ |
| 248 | + "33103"#ndim":\n\t"\ |
| 249 | + "cmpq $2,%7;jb 33104"#ndim"f;"\ |
| 250 | + COMPUTE_m2(ndim)\ |
| 251 | + "subq $2,%7;"\ |
| 252 | + "33104"#ndim":\n\t"\ |
| 253 | + "testq %7,%7;jz 33105"#ndim"f;"\ |
| 254 | + COMPUTE_m1(ndim)\ |
| 255 | + "33105"#ndim":\n\t"\ |
| 256 | + "movq %%r13,%4; movq %%r14,%1; movq %%r11,%7;"\ |
| 257 | + :"+r"(a_pointer),"+r"(b_pointer),"+r"(c_pointer),"+r"(ldc_in_bytes),"+r"(K),"+r"(ctemp),"+r"(const_val),"+r"(M),"+r"(next_b)\ |
| 258 | + ::"r11","r12","r13","r14","r15",\ |
| 259 | + "xmm0","xmm1","xmm2","xmm3","xmm4","xmm5","xmm6","xmm7","xmm8","xmm9","xmm10","xmm11","xmm12","xmm13","xmm14","xmm15","cc","memory");\ |
| 260 | + a_pointer -= M * K; b_pointer += ndim * K; c_pointer += 2*(LDC * ndim - M);\ |
| 261 | +} |
| 262 | + |
| 263 | +int __attribute__ ((noinline)) |
| 264 | +CNAME(BLASLONG m, BLASLONG n, BLASLONG k, float alphar, float alphai, float * __restrict__ A, float * __restrict__ B, float * __restrict__ C, BLASLONG LDC) |
| 265 | +{ |
| 266 | + if(m==0||n==0||k==0) return 0; |
| 267 | + int64_t ldc_in_bytes = (int64_t)LDC * sizeof(float) * 2; |
| 268 | + float constval[2]; constval[0] = alphar; constval[1] = alphai; |
| 269 | + float *const_val=constval; |
| 270 | + int64_t M = (int64_t)m, K = (int64_t)k; |
| 271 | + BLASLONG n_count = n; |
| 272 | + float *a_pointer = A,*b_pointer = B,*c_pointer = C,*ctemp = C,*next_b = B; |
| 273 | + for(;n_count>11;n_count-=12) COMPUTE(12) |
| 274 | + for(;n_count>7;n_count-=8) COMPUTE(8) |
| 275 | + for(;n_count>3;n_count-=4) COMPUTE(4) |
| 276 | + for(;n_count>1;n_count-=2) COMPUTE(2) |
| 277 | + if(n_count>0) COMPUTE(1) |
| 278 | + return 0; |
| 279 | +} |
0 commit comments