]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
Force FP32 compute in GLM4 FFN Down (#13101)
authorCity <redacted>
Fri, 25 Apr 2025 12:38:34 +0000 (14:38 +0200)
committerGitHub <redacted>
Fri, 25 Apr 2025 12:38:34 +0000 (14:38 +0200)
* Force FP32 compute in cuBLAS GEMM

* Revert "Force FP32 compute in cuBLAS GEMM"

This reverts commit 6efd872732159ab88ee7b3c1d77ba5ebc83079bd.

* Force F32 compute in GLM4 ffn down

* Edit comment to clarify issue

Co-authored-by: Johannes Gäßler <redacted>
---------

Co-authored-by: Johannes Gäßler <redacted>
src/llama-graph.cpp

index a85e97288e1aeae9914acea5168ba952715c6d3a..b52e3f6203a4bfdb890ce873ba827ac8d1e7004b 100644 (file)
@@ -803,6 +803,10 @@ ggml_tensor * llm_graph_context::build_ffn(
 
     if (down) {
         cur = build_lora_mm(down, cur);
+        if (arch == LLM_ARCH_GLM4) {
+            // GLM4 seems to have numerical issues with half-precision accumulators
+            ggml_mul_mat_set_prec(cur, GGML_PREC_F32);
+        }
     }
 
     if (down_b) {