From 7ad56c7382290514dc4ca02550e007817fda478b Mon Sep 17 00:00:00 2001 From: 0cc4m Date: Sat, 24 May 2025 13:27:53 +0000 Subject: [PATCH] Move GLM4 f32 attention fix to the correct function --- src/llama-graph.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/llama-graph.cpp b/src/llama-graph.cpp index 13e36d161c614..cdd5887de961c 100644 --- a/src/llama-graph.cpp +++ b/src/llama-graph.cpp @@ -1287,6 +1287,10 @@ ggml_tensor * llm_graph_context::build_attn( if (wo) { cur = build_lora_mm(wo, cur); + if (arch == LLM_ARCH_GLM4) { + // GLM4 seems to have numerical issues with half-precision accumulators + ggml_mul_mat_set_prec(cur, GGML_PREC_F32); + } } if (wo_b) { @@ -1367,10 +1371,6 @@ ggml_tensor * llm_graph_context::build_attn( if (wo) { cur = build_lora_mm(wo, cur); - if (arch == LLM_ARCH_GLM4) { - // GLM4 seems to have numerical issues with half-precision accumulators - ggml_mul_mat_set_prec(cur, GGML_PREC_F32); - } } if (wo_b) {