]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
Move GLM4 f32 attention fix to the correct function (#13750)
author0cc4m <redacted>
Sat, 24 May 2025 14:49:12 +0000 (16:49 +0200)
committerGitHub <redacted>
Sat, 24 May 2025 14:49:12 +0000 (16:49 +0200)
src/llama-graph.cpp

index 13e36d161c614c2c6b41a0872dfd5337d5135c63..cdd5887de961c6f622e6c507efcf2775ad75709a 100644 (file)
@@ -1287,6 +1287,10 @@ ggml_tensor * llm_graph_context::build_attn(
 
     if (wo) {
         cur = build_lora_mm(wo, cur);
+        if (arch == LLM_ARCH_GLM4) {
+            // GLM4 seems to have numerical issues with half-precision accumulators
+            ggml_mul_mat_set_prec(cur, GGML_PREC_F32);
+        }
     }
 
     if (wo_b) {
@@ -1367,10 +1371,6 @@ ggml_tensor * llm_graph_context::build_attn(
 
     if (wo) {
         cur = build_lora_mm(wo, cur);
-        if (arch == LLM_ARCH_GLM4) {
-            // GLM4 seems to have numerical issues with half-precision accumulators
-            ggml_mul_mat_set_prec(cur, GGML_PREC_F32);
-        }
     }
 
     if (wo_b) {