Skip to content

Commit 0ae49d2

Browse files
authored
Merge pull request #2344 from wjc404/develop
Optimize AVX2 ZGEMM
2 parents 31d6c2e + 105e26e commit 0ae49d2

File tree

3 files changed

+243
-3
lines changed

3 files changed

+243
-3
lines changed

kernel/x86_64/KERNEL.HASWELL

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ CGEMMONCOPYOBJ = cgemm_oncopy$(TSUFFIX).$(SUFFIX)
6767
CGEMMOTCOPYOBJ = cgemm_otcopy$(TSUFFIX).$(SUFFIX)
6868

6969
ZTRMMKERNEL = zgemm_kernel_4x2_haswell.S
70-
ZGEMMKERNEL = zgemm_kernel_4x2_haswell.S
70+
ZGEMMKERNEL = zgemm_kernel_4x2_haswell.c
7171
ZGEMMINCOPY = ../generic/zgemm_ncopy_4.c
7272
ZGEMMITCOPY = ../generic/zgemm_tcopy_4.c
7373
ZGEMMONCOPY = ../generic/zgemm_ncopy_2.c
Lines changed: 240 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,240 @@
1+
#include "common.h"
2+
#include <stdint.h>
3+
4+
/* recommended settings: GEMM_P = 192, GEMM_Q = 192 */
5+
6+
#if defined(NN) || defined(NT) || defined(TN) || defined(TT)
7+
#define A_CONJ 0
8+
#define B_CONJ 0
9+
#endif
10+
#if defined(RN) || defined(RT) || defined(CN) || defined(CT)
11+
#define A_CONJ 1
12+
#define B_CONJ 0
13+
#endif
14+
#if defined(NR) || defined(NC) || defined(TR) || defined(TC)
15+
#define A_CONJ 0
16+
#define B_CONJ 1
17+
#endif
18+
#if defined(RR) || defined(RC) || defined(CR) || defined(CC)
19+
#define A_CONJ 1
20+
#define B_CONJ 1
21+
#endif
22+
23+
/* %0 = a_ptr, %1 = b_ptr, %2 = c_ptr, %3 = c_tmp, %4 = ldc(bytes), %5 = k_counter, %6 = &alpha, %7 = m_counter, %8 = b_pref */
24+
/* r11 = m, r12 = k << 5, r13 = k, r14 = b_head, r15 = temp */
25+
26+
/* m=4, ymm 0-3 temp, ymm 4-15 acc */
27+
#if A_CONJ == B_CONJ
28+
#define acc_m2n1_exp(ar,ai,b2,cl,cr) "vfmadd231pd %%ymm"#ar",%%ymm"#b2",%%ymm"#cl"; vfmadd231pd %%ymm"#ai",%%ymm"#b2",%%ymm"#cr";"
29+
#define acc_m4n1_con(ua,la,b1,uc,lc) "vfmaddsub231pd %%ymm"#ua",%%ymm"#b1",%%ymm"#uc"; vfmaddsub231pd %%ymm"#la",%%ymm"#b1",%%ymm"#lc";"
30+
#else
31+
#define acc_m2n1_exp(ar,ai,b2,cl,cr) "vfmadd231pd %%ymm"#ar",%%ymm"#b2",%%ymm"#cl"; vfnmadd231pd %%ymm"#ai",%%ymm"#b2",%%ymm"#cr";"
32+
#define acc_m4n1_con(ua,la,b1,uc,lc) "vfmsubadd231pd %%ymm"#ua",%%ymm"#b1",%%ymm"#uc"; vfmsubadd231pd %%ymm"#la",%%ymm"#b1",%%ymm"#lc";"
33+
#endif
34+
/* expanded accumulators for m4n1 and m4n2 */
35+
#define KERNEL_k1m4n1 \
36+
"vbroadcastf128 (%1),%%ymm0; addq $16,%1;"\
37+
"vmovddup (%0),%%ymm1; vmovddup 8(%0),%%ymm2;" acc_m2n1_exp(1,2,0,4,5)\
38+
"vmovddup 32(%0),%%ymm1; vmovddup 40(%0),%%ymm2;" acc_m2n1_exp(1,2,0,6,7)\
39+
"addq $64,%0;"
40+
#define KERNEL_k1m4n2 \
41+
"vbroadcastf128 (%1),%%ymm0; vbroadcastf128 16(%1),%%ymm1; addq $32,%1;"\
42+
"vmovddup (%0),%%ymm2; vmovddup 8(%0),%%ymm3;" acc_m2n1_exp(2,3,0,4,5) acc_m2n1_exp(2,3,1,8,9)\
43+
"vmovddup 32(%0),%%ymm2; vmovddup 40(%0),%%ymm3;" acc_m2n1_exp(2,3,0,6,7) acc_m2n1_exp(2,3,1,10,11)\
44+
"addq $64,%0;"
45+
/* contracted accumulators for m4n4 and m4n6 */
46+
#define acc_m4n2_con(ua,la,luc,llc,ruc,rlc,lboff,rboff,...) \
47+
"vbroadcastsd "#lboff"("#__VA_ARGS__"),%%ymm2;" acc_m4n1_con(ua,la,2,luc,llc)\
48+
"vbroadcastsd "#rboff"("#__VA_ARGS__"),%%ymm3;" acc_m4n1_con(ua,la,3,ruc,rlc)
49+
#define KERNEL_1_k1m4n4 \
50+
"vmovupd (%0),%%ymm0; vmovupd 32(%0),%%ymm1; prefetcht0 512(%0); addq $64,%0;"\
51+
acc_m4n2_con(0,1,4,5,6,7,0,16,%1) acc_m4n2_con(0,1,8,9,10,11,0,16,%1,%%r12,1)
52+
#define KERNEL_2_k1m4n4 \
53+
"vpermilpd $5,%%ymm0,%%ymm0; vpermilpd $5,%%ymm1,%%ymm1;"\
54+
acc_m4n2_con(0,1,4,5,6,7,8,24,%1) acc_m4n2_con(0,1,8,9,10,11,8,24,%1,%%r12,1)
55+
#define KERNEL_1_k1m4n6 KERNEL_1_k1m4n4 acc_m4n2_con(0,1,12,13,14,15,0,16,%1,%%r12,2)
56+
#define KERNEL_2_k1m4n6 KERNEL_2_k1m4n4 acc_m4n2_con(0,1,12,13,14,15,8,24,%1,%%r12,2)
57+
#define KERNEL_k1m4n4 KERNEL_1_k1m4n4 KERNEL_2_k1m4n4 "addq $32,%1;"
58+
#define KERNEL_k1m4n6 KERNEL_1_k1m4n6 KERNEL_2_k1m4n6 "addq $32,%1;"
59+
#define zero_4ymm(no1,no2,no3,no4) \
60+
"vpxor %%ymm"#no1",%%ymm"#no1",%%ymm"#no1"; vpxor %%ymm"#no2",%%ymm"#no2",%%ymm"#no2";"\
61+
"vpxor %%ymm"#no3",%%ymm"#no3",%%ymm"#no3"; vpxor %%ymm"#no4",%%ymm"#no4",%%ymm"#no4";"
62+
/* initialization and storage macros */
63+
#define INIT_m4n1 zero_4ymm(4,5,6,7)
64+
#define INIT_m4n2 zero_4ymm(4,5,6,7) zero_4ymm(8,9,10,11)
65+
#define INIT_m4n4 zero_4ymm(4,5,6,7) zero_4ymm(8,9,10,11)
66+
#define INIT_m4n6 INIT_m4n4 zero_4ymm(12,13,14,15)
67+
#if A_CONJ == B_CONJ
68+
#define cont_expacc(cl,cr,dst) "vpermilpd $5,%%ymm"#cr",%%ymm"#cr"; vaddsubpd %%ymm"#cl",%%ymm"#cr",%%ymm"#dst";"
69+
#else
70+
#define cont_expacc(cl,cr,dst) "vpermilpd $5,%%ymm"#cr",%%ymm"#cr"; vaddsubpd %%ymm"#cr",%%ymm"#cl",%%ymm"#dst";"
71+
#endif
72+
#if A_CONJ == 0
73+
#define save_1ymm(c,tmp,off,alpr,alpi,...) \
74+
"vpermilpd $5,%%ymm"#c",%%ymm"#tmp"; vfmsubadd213pd "#off"("#__VA_ARGS__"),%%ymm"#alpr",%%ymm"#c";"\
75+
"vfmsubadd231pd %%ymm"#tmp",%%ymm"#alpi",%%ymm"#c"; vmovupd %%ymm"#c","#off"("#__VA_ARGS__");"
76+
#else
77+
#define save_1ymm(c,tmp,off,alpr,alpi,...) \
78+
"vpermilpd $5,%%ymm"#c",%%ymm"#tmp"; vfmaddsub213pd "#off"("#__VA_ARGS__"),%%ymm"#alpi",%%ymm"#tmp";"\
79+
"vfmaddsub231pd %%ymm"#c",%%ymm"#alpr",%%ymm"#tmp"; vmovupd %%ymm"#tmp","#off"("#__VA_ARGS__");"
80+
#endif
81+
#define save_init_m4 "movq %2,%3; addq $64,%2; vbroadcastsd (%6),%%ymm0; vbroadcastsd 8(%6),%%ymm1;"
82+
#define SAVE_m4n1 save_init_m4 cont_expacc(4,5,4) cont_expacc(6,7,6) save_1ymm(4,2,0,0,1,%3) save_1ymm(6,3,32,0,1,%3)
83+
#define SAVE_m4n2 SAVE_m4n1\
84+
cont_expacc(8,9,8) cont_expacc(10,11,10) save_1ymm(8,2,0,0,1,%3,%4,1) save_1ymm(10,3,32,0,1,%3,%4,1)
85+
#define SAVE_m4n4 save_init_m4\
86+
save_1ymm(4,2,0,0,1,%3) save_1ymm(5,3,32,0,1,%3) save_1ymm(6,2,0,0,1,%3,%4,1) save_1ymm(7,3,32,0,1,%3,%4,1) "leaq (%3,%4,2),%3;"\
87+
save_1ymm(8,2,0,0,1,%3) save_1ymm(9,3,32,0,1,%3) save_1ymm(10,2,0,0,1,%3,%4,1) save_1ymm(11,3,32,0,1,%3,%4,1)
88+
#define SAVE_m4n6 SAVE_m4n4 "leaq (%3,%4,2),%3;"\
89+
save_1ymm(12,2,0,0,1,%3) save_1ymm(13,3,32,0,1,%3) save_1ymm(14,2,0,0,1,%3,%4,1) save_1ymm(15,3,32,0,1,%3,%4,1)
90+
#define COMPUTE_m4(ndim) \
91+
"movq %%r14,%1;" INIT_m4n##ndim "movq %2,%3; movq %%r13,%5;"\
92+
"testq %5,%5; jz "#ndim"4443f; cmpq $10,%5; jb "#ndim"4442f;"\
93+
"movq $10,%5; movq $84,%%r15;"\
94+
#ndim"4441:\n\t"\
95+
"prefetcht1 (%3); subq $63,%3; addq %%r15,%3;"\
96+
"prefetcht0 96(%1); prefetcht0 96(%1,%%r12,1); prefetcht0 96(%1,%%r12,2);" KERNEL_k1m4n##ndim KERNEL_k1m4n##ndim\
97+
"testq $12,%5; movq $84,%%r15; cmovz %4,%%r15; prefetcht1 (%8); addq $16,%8;"\
98+
"prefetcht0 96(%1); prefetcht0 96(%1,%%r12,1); prefetcht0 96(%1,%%r12,2);" KERNEL_k1m4n##ndim KERNEL_k1m4n##ndim\
99+
"addq $4,%5; cmpq %5,%%r13; jnb "#ndim"4441b;"\
100+
"movq %2,%3; negq %5; leaq 10(%%r13,%5,1),%5; prefetcht0 (%6); prefetcht0 15(%6);"\
101+
#ndim"4442:\n\t"\
102+
"prefetcht0 (%3); prefetcht0 63(%3); addq %4,%3;"\
103+
KERNEL_k1m4n##ndim "decq %5; jnz "#ndim"4442b;"\
104+
#ndim"4443:\n\t"\
105+
"prefetcht0 (%%r14); prefetcht0 64(%%r14);" SAVE_m4n##ndim
106+
107+
/* m=2, ymm 0-3 temp, ymm 4-15 acc, expanded accumulators */
108+
#define KERNEL_k1m2n1 \
109+
"vmovddup (%0),%%ymm1; vmovddup 8(%0),%%ymm2; addq $32,%0;"\
110+
"vbroadcastf128 (%1),%%ymm0;" acc_m2n1_exp(1,2,0,4,5) "addq $16,%1;"
111+
#define acc_m2n2_exp(c1l,c1r,c2l,c2r,...) \
112+
"vbroadcastf128 ("#__VA_ARGS__"),%%ymm2;" acc_m2n1_exp(0,1,2,c1l,c1r)\
113+
"vbroadcastf128 16("#__VA_ARGS__"),%%ymm3;" acc_m2n1_exp(0,1,3,c2l,c2r)
114+
#define KERNEL_h_k1m2n2 \
115+
"vmovddup (%0),%%ymm0; vmovddup 8(%0),%%ymm1; addq $32,%0;" acc_m2n2_exp(4,5,6,7,%1)
116+
#define KERNEL_h_k1m2n4 KERNEL_h_k1m2n2 acc_m2n2_exp(8,9,10,11,%1,%%r12,1)
117+
#define KERNEL_h_k1m2n6 KERNEL_h_k1m2n4 acc_m2n2_exp(12,13,14,15,%1,%%r12,2)
118+
#define KERNEL_k1m2n2 KERNEL_h_k1m2n2 "addq $32,%1;"
119+
#define KERNEL_k1m2n4 KERNEL_h_k1m2n4 "addq $32,%1;"
120+
#define KERNEL_k1m2n6 KERNEL_h_k1m2n6 "addq $32,%1;"
121+
#define INIT_m2n1 "vpxor %%ymm4,%%ymm4,%%ymm4; vpxor %%ymm5,%%ymm5,%%ymm5;"
122+
#define INIT_m2n2 zero_4ymm(4,5,6,7)
123+
#define INIT_m2n4 INIT_m2n2 zero_4ymm(8,9,10,11)
124+
#define INIT_m2n6 INIT_m2n4 zero_4ymm(12,13,14,15)
125+
#define save_init_m2 "movq %2,%3; addq $32,%2; vbroadcastsd (%6),%%ymm0; vbroadcastsd 8(%6),%%ymm1;"
126+
#define SAVE_m2n1 save_init_m2 cont_expacc(4,5,4) save_1ymm(4,2,0,0,1,%3)
127+
#define SAVE_m2n2 SAVE_m2n1 cont_expacc(6,7,6) save_1ymm(6,3,0,0,1,%3,%4,1)
128+
#define SAVE_m2n4 SAVE_m2n2 "leaq (%3,%4,2),%3;"\
129+
cont_expacc(8,9,8) cont_expacc(10,11,10) save_1ymm(8,2,0,0,1,%3) save_1ymm(10,3,0,0,1,%3,%4,1)
130+
#define SAVE_m2n6 SAVE_m2n4 "leaq (%3,%4,2),%3;"\
131+
cont_expacc(12,13,12) cont_expacc(14,15,14) save_1ymm(12,2,0,0,1,%3) save_1ymm(14,3,0,0,1,%3,%4,1)
132+
#define COMPUTE_m2(ndim) \
133+
"movq %%r14,%1;" INIT_m2n##ndim "movq %%r13,%5;"\
134+
"testq %5,%5; jz "#ndim"2222f;"\
135+
#ndim"2221:\n\t"\
136+
KERNEL_k1m2n##ndim\
137+
"decq %5; jnz "#ndim"2221b;"\
138+
#ndim"2222:\n\t"\
139+
SAVE_m2n##ndim
140+
141+
/* m=1, vmm 0-3 temp, vmm 4-15 acc, expanded accumulators */
142+
#if A_CONJ == B_CONJ
143+
#define acc_m1n1_exp(ar,ai,b2,cl,cr) "vfmadd231pd %%xmm"#ar",%%xmm"#b2",%%xmm"#cl"; vfmadd231pd %%xmm"#ai",%%xmm"#b2",%%xmm"#cr";"
144+
#define acc_m1n2_exp(arb,aib,b4,cl,cr) "vfmadd231pd %%ymm"#arb",%%ymm"#b4",%%ymm"#cl"; vfmadd231pd %%ymm"#aib",%%ymm"#b4",%%ymm"#cr";"
145+
#else
146+
#define acc_m1n1_exp(ar,ai,b2,cl,cr) "vfmadd231pd %%xmm"#ar",%%xmm"#b2",%%xmm"#cl"; vfnmadd231pd %%xmm"#ai",%%xmm"#b2",%%xmm"#cr";"
147+
#define acc_m1n2_exp(arb,aib,b4,cl,cr) "vfmadd231pd %%ymm"#arb",%%ymm"#b4",%%ymm"#cl"; vfnmadd231pd %%ymm"#aib",%%ymm"#b4",%%ymm"#cr";"
148+
#endif
149+
#define KERNEL_k1m1n1 \
150+
"vmovddup (%0),%%xmm0; vmovddup 8(%0),%%xmm1; addq $16,%0;"\
151+
"vmovupd (%1),%%xmm2; addq $16,%1;" acc_m1n1_exp(0,1,2,4,5)
152+
#define KERNEL_h_k1m1n2 \
153+
"vbroadcastsd (%0),%%ymm0; vbroadcastsd 8(%0),%%ymm1; addq $16,%0;"\
154+
"vmovupd (%1),%%ymm2;" acc_m1n2_exp(0,1,2,4,5)
155+
#define KERNEL_h_k1m1n4 KERNEL_h_k1m1n2 "vmovupd (%1,%%r12,1),%%ymm2;" acc_m1n2_exp(0,1,2,6,7)
156+
#define KERNEL_h_k1m1n6 KERNEL_h_k1m1n4 "vmovupd (%1,%%r12,2),%%ymm2;" acc_m1n2_exp(0,1,2,8,9)
157+
#define KERNEL_k1m1n2 KERNEL_h_k1m1n2 "addq $32,%1;"
158+
#define KERNEL_k1m1n4 KERNEL_h_k1m1n4 "addq $32,%1;"
159+
#define KERNEL_k1m1n6 KERNEL_h_k1m1n6 "addq $32,%1;"
160+
#define INIT_m1n1 "vpxor %%xmm4,%%xmm4,%%xmm4; vpxor %%xmm5,%%xmm5,%%xmm5;"
161+
#define INIT_m1n2 "vpxor %%ymm4,%%ymm4,%%ymm4; vpxor %%ymm5,%%ymm5,%%ymm5;"
162+
#define INIT_m1n4 INIT_m1n2 "vpxor %%ymm6,%%ymm6,%%ymm6; vpxor %%ymm7,%%ymm7,%%ymm7;"
163+
#define INIT_m1n6 INIT_m1n4 "vpxor %%ymm8,%%ymm8,%%ymm8; vpxor %%ymm9,%%ymm9,%%ymm9;"
164+
#if A_CONJ == B_CONJ
165+
#define cont_expxmmacc(cl,cr,dst) "vpermilpd $5,%%xmm"#cr",%%xmm"#cr"; vaddsubpd %%xmm"#cl",%%xmm"#cr",%%xmm"#dst";"
166+
#else
167+
#define cont_expxmmacc(cl,cr,dst) "vpermilpd $5,%%xmm"#cr",%%xmm"#cr"; vaddsubpd %%xmm"#cr",%%xmm"#cl",%%xmm"#dst";"
168+
#endif
169+
#if A_CONJ == 0
170+
#define save_m1n1(c,tmp,alpr,alpi) \
171+
"vpermilpd $5,%%xmm"#c",%%xmm"#tmp"; vfmsubadd213pd (%3),%%xmm"#alpr",%%xmm"#c";"\
172+
"vfmsubadd231pd %%xmm"#tmp",%%xmm"#alpi",%%xmm"#c"; vmovupd %%xmm"#c",(%3);"
173+
#define save_m1n2(c,tmp1,tmp2,alpr,alpi) \
174+
"vpermilpd $5,%%ymm"#c",%%ymm"#tmp1"; vmovupd (%3),%%xmm"#tmp2"; vinsertf128 $1,(%3,%4,1),%%ymm"#tmp2",%%ymm"#tmp2";"\
175+
"vfmsubadd213pd %%ymm"#tmp2",%%ymm"#alpr",%%ymm"#c"; vfmsubadd231pd %%ymm"#tmp1",%%ymm"#alpi",%%ymm"#c";"\
176+
"vmovupd %%xmm"#c",(%3); vextractf128 $1,%%ymm"#c",(%3,%4,1); leaq (%3,%4,2),%3;"
177+
#else
178+
#define save_m1n1(c,tmp,alpr,alpi) \
179+
"vpermilpd $5,%%xmm"#c",%%xmm"#tmp"; vfmaddsub213pd (%3),%%xmm"#alpi",%%xmm"#tmp";"\
180+
"vfmaddsub231pd %%xmm"#c",%%xmm"#alpr",%%xmm"#tmp"; vmovupd %%xmm"#tmp",(%3);"
181+
#define save_m1n2(c,tmp1,tmp2,alpr,alpi) \
182+
"vpermilpd $5,%%ymm"#c",%%ymm"#tmp1"; vmovupd (%3),%%xmm"#tmp2"; vinsertf128 $1,(%3,%4,1),%%ymm"#tmp2",%%ymm"#tmp2";"\
183+
"vfmaddsub213pd %%ymm"#tmp2",%%ymm"#alpi",%%ymm"#tmp1"; vfmaddsub231pd %%ymm"#c",%%ymm"#alpr",%%ymm"#tmp1";"\
184+
"vmovupd %%xmm"#tmp1",(%3); vextractf128 $1,%%ymm"#tmp1",(%3,%4,1); leaq (%3,%4,2),%3;"
185+
#endif
186+
#define save_init_m1 "movq %2,%3; addq $16,%2; vbroadcastsd (%6),%%ymm0; vbroadcastsd 8(%6),%%ymm1;"
187+
#define SAVE_m1n1 save_init_m1 cont_expxmmacc(4,5,4) save_m1n1(4,2,0,1)
188+
#define SAVE_m1n2 save_init_m1 cont_expacc(4,5,4) save_m1n2(4,2,3,0,1)
189+
#define SAVE_m1n4 SAVE_m1n2 cont_expacc(6,7,6) save_m1n2(6,2,3,0,1)
190+
#define SAVE_m1n6 SAVE_m1n4 cont_expacc(8,9,8) save_m1n2(8,2,3,0,1)
191+
#define COMPUTE_m1(ndim) \
192+
"movq %%r14,%1;" INIT_m1n##ndim "movq %%r13,%5;"\
193+
"testq %5,%5; jz "#ndim"1112f;"\
194+
#ndim"1111:\n\t"\
195+
KERNEL_k1m1n##ndim\
196+
"decq %5; jnz "#ndim"1111b;"\
197+
#ndim"1112:\n\t"\
198+
SAVE_m1n##ndim
199+
200+
#define COMPUTE(ndim) {\
201+
b_pref = b_ptr + ndim * K *2;\
202+
__asm__ __volatile__ (\
203+
"movq %1,%%r14; movq %5,%%r13; movq %5,%%r12; salq $5,%%r12; movq %7,%%r11;"\
204+
"cmpq $4,%7; jb "#ndim"9992f;"\
205+
#ndim"9991:\n\t"\
206+
COMPUTE_m4(ndim)\
207+
"subq $4,%7; cmpq $4,%7; jnb "#ndim"9991b;"\
208+
#ndim"9992:\n\t"\
209+
"cmpq $2,%7; jb "#ndim"9993f;"\
210+
COMPUTE_m2(ndim) "subq $2,%7;"\
211+
#ndim"9993:\n\t"\
212+
"testq %7,%7; jz "#ndim"9994f;"\
213+
COMPUTE_m1(ndim)\
214+
#ndim"9994:\n\t"\
215+
"movq %%r14,%1; movq %%r13,%5; movq %%r11,%7; vzeroupper;"\
216+
:"+r"(a_ptr),"+r"(b_ptr),"+r"(c_ptr),"+r"(c_tmp),"+r"(ldc_in_bytes),"+r"(K),"+r"(alp),"+r"(M),"+r"(b_pref)\
217+
::"cc","memory","r11","r12","r13","r14","r15","xmm0","xmm1","xmm2","xmm3","xmm4","xmm5",\
218+
"xmm6","xmm7","xmm8","xmm9","xmm10","xmm11","xmm12","xmm13","xmm14","xmm15");\
219+
a_ptr -= M * K *2; b_ptr += ndim * K *2; c_ptr += (ndim * LDC - M) * 2;\
220+
}
221+
222+
int __attribute__ ((noinline))
223+
CNAME(BLASLONG m, BLASLONG n, BLASLONG k, double alphar, double alphai, double * __restrict__ A, double * __restrict__ B, double * __restrict__ C, BLASLONG LDC)
224+
{
225+
if(m==0||n==0||k==0||(alphar==0.0 && alphai==0.0)) return 0;
226+
int64_t ldc_in_bytes = (int64_t)LDC * sizeof(double) * 2;
227+
#if A_CONJ == B_CONJ
228+
double const_val[2] = {-alphar, -alphai};
229+
#else
230+
double const_val[2] = {alphar, alphai};
231+
#endif
232+
int64_t M = (int64_t)m, K = (int64_t)k;
233+
BLASLONG n_count = n;
234+
double *a_ptr = A,*b_ptr = B,*c_ptr = C,*c_tmp = C,*alp = const_val,*b_pref = B;
235+
for(;n_count>5;n_count-=6) COMPUTE(6)
236+
for(;n_count>3;n_count-=4) COMPUTE(4)
237+
for(;n_count>1;n_count-=2) COMPUTE(2)
238+
if(n_count>0) COMPUTE(1)
239+
return 0;
240+
}

param.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1572,7 +1572,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
15721572
#define SGEMM_DEFAULT_P 768
15731573
#define DGEMM_DEFAULT_P 512
15741574
#define CGEMM_DEFAULT_P 384
1575-
#define ZGEMM_DEFAULT_P 256
1575+
#define ZGEMM_DEFAULT_P 192
15761576

15771577
#ifdef WINDOWS_ABI
15781578
#define SGEMM_DEFAULT_Q 320
@@ -1582,7 +1582,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
15821582
#define DGEMM_DEFAULT_Q 256
15831583
#endif
15841584
#define CGEMM_DEFAULT_Q 192
1585-
#define ZGEMM_DEFAULT_Q 128
1585+
#define ZGEMM_DEFAULT_Q 192
15861586

15871587
#define SGEMM_DEFAULT_R sgemm_r
15881588
#define DGEMM_DEFAULT_R 13824

0 commit comments

Comments
 (0)