@@ -10057,7 +10057,7 @@ struct llm_graph_context_mamba : public llm_graph_context {
10057
10057
// TODO: skip computing output earlier for unused tokens
10058
10058
10059
10059
y = ggml_add(ctx0, y, ggml_mul(ctx0, cur, layer.ssm_d));
10060
- y = ggml_mul (ctx0, y, ggml_silu(ctx0, ggml_cont(ctx0, z)) );
10060
+ y = ggml_swiglu_split (ctx0, ggml_cont(ctx0, z), y );
10061
10061
10062
10062
// {d_inner, n_embd} @ {d_inner, n_seq_tokens, n_seqs} => {n_embd, n_seq_tokens, n_seqs}
10063
10063
cur = build_lora_mm(layer.ssm_out, y);
@@ -10181,7 +10181,7 @@ struct llm_graph_context_mamba : public llm_graph_context {
10181
10181
// TODO: skip computing output earlier for unused tokens
10182
10182
10183
10183
y = ggml_add(ctx0, y, ggml_mul(ctx0, x, model.layers[il].ssm_d));
10184
- y = ggml_mul (ctx0, y, ggml_silu(ctx0, ggml_cont(ctx0, z)) );
10184
+ y = ggml_swiglu_split (ctx0, ggml_cont(ctx0, z), y );
10185
10185
10186
10186
// grouped RMS norm
10187
10187
y = ggml_reshape_4d(ctx0, y, d_inner / n_group, n_group, n_seq_tokens, n_seqs);
0 commit comments