From: 0cc4m Date: Sat, 24 May 2025 14:49:12 +0000 (+0200) Subject: Move GLM4 f32 attention fix to the correct function (#13750) X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=259469c4b57c1a32606353bcac52ba683424a990;p=pkg%2Fggml%2Fsources%2Fllama.cpp Move GLM4 f32 attention fix to the correct function (#13750) --- diff --git a/src/llama-graph.cpp b/src/llama-graph.cpp index 13e36d16..cdd5887d 100644 --- a/src/llama-graph.cpp +++ b/src/llama-graph.cpp @@ -1287,6 +1287,10 @@ ggml_tensor * llm_graph_context::build_attn( if (wo) { cur = build_lora_mm(wo, cur); + if (arch == LLM_ARCH_GLM4) { + // GLM4 seems to have numerical issues with half-precision accumulators + ggml_mul_mat_set_prec(cur, GGML_PREC_F32); + } } if (wo_b) { @@ -1367,10 +1371,6 @@ ggml_tensor * llm_graph_context::build_attn( if (wo) { cur = build_lora_mm(wo, cur); - if (arch == LLM_ARCH_GLM4) { - // GLM4 seems to have numerical issues with half-precision accumulators - ggml_mul_mat_set_prec(cur, GGML_PREC_F32); - } } if (wo_b) {