Skip to content

Commit 0b6f6be

Browse files
committed
ggml-cpu : reorder SVE FMA for consistency with other SIMD arches
1 parent 757aa62 commit 0b6f6be

File tree

4 files changed

+20
-20
lines changed

4 files changed

+20
-20
lines changed

ggml/src/ggml-cpu/ops.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7771,7 +7771,7 @@ static void ggml_compute_forward_ssm_scan_f32(
77717771
t1 = exp_ps_sve(svptrue_b32(), t1);
77727772
svfloat32_t t2 = GGML_F32_VEC_MUL(vx_dt, vB);
77737773

7774-
vs0 = GGML_F32_VEC_FMA(vs0, t1, t2);
7774+
vs0 = GGML_F32_VEC_FMA(t2, vs0, t1);
77757775
r1_vector = GGML_F32_VEC_ADD(GGML_F32_VEC_MUL(vs0, vC), r1_vector);
77767776

77777777
GGML_F32_VEC_STORE(&s[ii*nc + k], vs0);

ggml/src/ggml-cpu/simd-mappings.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@
3232
#define GGML_F32xt_LOAD(...) GGML_F32xt_LOAD_IMPL(DEFAULT_PG, __VA_ARGS__)
3333
#define GGML_F32xt_STORE_IMPL(pg,a,b) svst1_f32(pg, a, b)
3434
#define GGML_F32xt_STORE(...) GGML_F32xt_STORE_IMPL(DEFAULT_PG, __VA_ARGS__)
35-
#define GGML_F32xt_FMA_IMPL(pg, a, b, c) svmad_f32_m(pg, a, b, c)
35+
#define GGML_F32xt_FMA_IMPL(pg, a, b, c) svmad_f32_m(pg, b, c, a)
3636
#define GGML_F32xt_FMA(...) GGML_F32xt_FMA_IMPL(DEFAULT_PG, __VA_ARGS__)
3737
#define GGML_F32xt_ADD_IMPL(pg, a, b) svadd_f32_m(pg, a, b)
3838
#define GGML_F32xt_ADD(...) GGML_F32xt_ADD_IMPL(DEFAULT_PG, __VA_ARGS__)

ggml/src/ggml-cpu/vec.cpp

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -37,43 +37,43 @@ void ggml_vec_dot_f32(int n, float * GGML_RESTRICT s, size_t bs, const float * G
3737
for (int i = 0; i < np; i += ggml_f32_step) {
3838
ax1 = GGML_F32_VEC_LOAD(x + i);
3939
ay1 = GGML_F32_VEC_LOAD(y + i);
40-
sum1 = GGML_F32_VEC_FMA(ax1, ay1, sum1);
40+
sum1 = GGML_F32_VEC_FMA(sum1, ax1, ay1);
4141

4242
ax2 = GGML_F32_VEC_LOAD(x + i + 1*ggml_f32_epr);
4343
ay2 = GGML_F32_VEC_LOAD(y + i + 1*ggml_f32_epr);
44-
sum2 = GGML_F32_VEC_FMA(ax2, ay2, sum2);
44+
sum2 = GGML_F32_VEC_FMA(sum2, ax2, ay2);
4545

4646
ax3 = GGML_F32_VEC_LOAD(x + i + 2*ggml_f32_epr);
4747
ay3 = GGML_F32_VEC_LOAD(y + i + 2*ggml_f32_epr);
48-
sum3 = GGML_F32_VEC_FMA(ax3, ay3, sum3);
48+
sum3 = GGML_F32_VEC_FMA(sum3, ax3, ay3);
4949

5050
ax4 = GGML_F32_VEC_LOAD(x + i + 3*ggml_f32_epr);
5151
ay4 = GGML_F32_VEC_LOAD(y + i + 3*ggml_f32_epr);
52-
sum4 = GGML_F32_VEC_FMA(ax4, ay4, sum4);
52+
sum4 = GGML_F32_VEC_FMA(sum4, ax4, ay4);
5353

5454
ax5 = GGML_F32_VEC_LOAD(x + i + 4*ggml_f32_epr);
5555
ay5 = GGML_F32_VEC_LOAD(y + i + 4*ggml_f32_epr);
56-
sum5 = GGML_F32_VEC_FMA(ax5, ay5, sum5);
56+
sum5 = GGML_F32_VEC_FMA(sum5, ax5, ay5);
5757

5858
ax6 = GGML_F32_VEC_LOAD(x + i + 5*ggml_f32_epr);
5959
ay6 = GGML_F32_VEC_LOAD(y + i + 5*ggml_f32_epr);
60-
sum6 = GGML_F32_VEC_FMA(ax6, ay6, sum6);
60+
sum6 = GGML_F32_VEC_FMA(sum6, ax6, ay6);
6161

6262
ax7 = GGML_F32_VEC_LOAD(x + i + 6*ggml_f32_epr);
6363
ay7 = GGML_F32_VEC_LOAD(y + i + 6*ggml_f32_epr);
64-
sum7 = GGML_F32_VEC_FMA(ax7, ay7, sum7);
64+
sum7 = GGML_F32_VEC_FMA(sum7, ax7, ay7);
6565

6666
ax8 = GGML_F32_VEC_LOAD(x + i + 7*ggml_f32_epr);
6767
ay8 = GGML_F32_VEC_LOAD(y + i + 7*ggml_f32_epr);
68-
sum8 = GGML_F32_VEC_FMA(ax8, ay8, sum8);
68+
sum8 = GGML_F32_VEC_FMA(sum8, ax8, ay8);
6969
}
7070
// leftovers
7171
// Since 8 unrolls are done in above loop, leftovers lie in range [0, ggml_f32_step] which is handled in below loop
7272
const int np2 = (n & ~(ggml_f32_epr - 1));
7373
for (int i = np; i < np2; i += ggml_f32_epr) {
7474
ax1 = GGML_F32_VEC_LOAD(x + i);
7575
ay1 = GGML_F32_VEC_LOAD(y + i);
76-
sum1 = GGML_F32_VEC_FMA(ax1, ay1, sum1);
76+
sum1 = GGML_F32_VEC_FMA(sum1, ax1, ay1);
7777
}
7878
// maximum number of leftover elements will be less that ggml_f32_epr. Apply predicated svmad on available elements only
7979
if (np2 < n) {

ggml/src/ggml-cpu/vec.h

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -163,49 +163,49 @@ inline static void ggml_vec_mad_f32(const int n, float * GGML_RESTRICT y, const
163163

164164
ax1 = GGML_F32_VEC_LOAD(x + i);
165165
ay1 = GGML_F32_VEC_LOAD(y + i);
166-
ay1 = GGML_F32_VEC_FMA(ax1, vx, ay1);
166+
ay1 = GGML_F32_VEC_FMA(ay1, ax1, vx);
167167

168168
GGML_F32_VEC_STORE(y + i, ay1);
169169

170170
ax2 = GGML_F32_VEC_LOAD(x + i + 1*ggml_f32_epr);
171171
ay2 = GGML_F32_VEC_LOAD(y + i + 1*ggml_f32_epr);
172-
ay2 = GGML_F32_VEC_FMA(ax2, vx, ay2);
172+
ay2 = GGML_F32_VEC_FMA(ay2, ax2, vx);
173173

174174
GGML_F32_VEC_STORE(y + i + 1*ggml_f32_epr, ay2);
175175

176176
ax3 = GGML_F32_VEC_LOAD(x + i + 2*ggml_f32_epr);
177177
ay3 = GGML_F32_VEC_LOAD(y + i + 2*ggml_f32_epr);
178-
ay3 = GGML_F32_VEC_FMA(ax3, vx, ay3);
178+
ay3 = GGML_F32_VEC_FMA(ay3, ax3, vx);
179179

180180
GGML_F32_VEC_STORE(y + i + 2*ggml_f32_epr, ay3);
181181

182182
ax4 = GGML_F32_VEC_LOAD(x + i + 3*ggml_f32_epr);
183183
ay4 = GGML_F32_VEC_LOAD(y + i + 3*ggml_f32_epr);
184-
ay4 = GGML_F32_VEC_FMA(ax4, vx, ay4);
184+
ay4 = GGML_F32_VEC_FMA(ay4, ax4, vx);
185185

186186
GGML_F32_VEC_STORE(y + i + 3*ggml_f32_epr, ay4);
187187

188188
ax5 = GGML_F32_VEC_LOAD(x + i + 4*ggml_f32_epr);
189189
ay5 = GGML_F32_VEC_LOAD(y + i + 4*ggml_f32_epr);
190-
ay5 = GGML_F32_VEC_FMA(ax5, vx, ay5);
190+
ay5 = GGML_F32_VEC_FMA(ay5, ax5, vx);
191191

192192
GGML_F32_VEC_STORE(y + i + 4*ggml_f32_epr, ay5);
193193

194194
ax6 = GGML_F32_VEC_LOAD(x + i + 5*ggml_f32_epr);
195195
ay6 = GGML_F32_VEC_LOAD(y + i + 5*ggml_f32_epr);
196-
ay6 = GGML_F32_VEC_FMA(ax6, vx, ay6);
196+
ay6 = GGML_F32_VEC_FMA(ay6, ax6, vx);
197197

198198
GGML_F32_VEC_STORE(y + i + 5*ggml_f32_epr, ay6);
199199

200200
ax7 = GGML_F32_VEC_LOAD(x + i + 6*ggml_f32_epr);
201201
ay7 = GGML_F32_VEC_LOAD(y + i + 6*ggml_f32_epr);
202-
ay7 = GGML_F32_VEC_FMA(ax7, vx, ay7);
202+
ay7 = GGML_F32_VEC_FMA(ay7, ax7, vx);
203203

204204
GGML_F32_VEC_STORE(y + i + 6*ggml_f32_epr, ay7);
205205

206206
ax8 = GGML_F32_VEC_LOAD(x + i + 7*ggml_f32_epr);
207207
ay8 = GGML_F32_VEC_LOAD(y + i + 7*ggml_f32_epr);
208-
ay8 = GGML_F32_VEC_FMA(ax8, vx, ay8);
208+
ay8 = GGML_F32_VEC_FMA(ay8, ax8, vx);
209209

210210
GGML_F32_VEC_STORE(y + i + 7*ggml_f32_epr, ay8);
211211
}
@@ -215,7 +215,7 @@ inline static void ggml_vec_mad_f32(const int n, float * GGML_RESTRICT y, const
215215
for (int i = np; i < np2; i += ggml_f32_epr) {
216216
ax1 = GGML_F32_VEC_LOAD(x + i);
217217
ay1 = GGML_F32_VEC_LOAD(y + i);
218-
ay1 = GGML_F32_VEC_FMA(ax1, vx, ay1);
218+
ay1 = GGML_F32_VEC_FMA(ay1, ax1, vx);
219219

220220
GGML_F32_VEC_STORE(y + i, ay1);
221221
}

0 commit comments

Comments
 (0)