]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
graph : add clamping to ffn_moe_weights_sum to avoid div-by-zero (#16655)
authorSigbjørn Skjæret <redacted>
Sun, 26 Oct 2025 16:20:32 +0000 (17:20 +0100)
committerGitHub <redacted>
Sun, 26 Oct 2025 16:20:32 +0000 (17:20 +0100)
* add missing norm topk bias

* use clamping instead, update number and add comment

src/llama-graph.cpp

index c1b946e3f715dab199c827f55abae8b23ac0f478..112d195f2911e8d5e49552814e50d10ded933242 100644 (file)
@@ -1009,10 +1009,9 @@ ggml_tensor * llm_graph_context::build_moe_ffn(
         ggml_tensor * weights_sum = ggml_sum_rows(ctx0, weights); // [1, n_tokens]
         cb(weights_sum, "ffn_moe_weights_sum", il);
 
-        if (arch == LLM_ARCH_BAILINGMOE2) {
-            weights_sum = ggml_scale_bias(ctx0, weights_sum, 1.0, 1e-20);
-            cb(weights_sum, "ffn_moe_weights_sum_biased", il);
-        }
+        // Avoid division by zero, clamp to smallest number representable by F16
+        weights_sum = ggml_clamp(ctx0, weights_sum, 6.103515625e-5, INFINITY);
+        cb(weights_sum, "ffn_moe_weights_sum_clamped", il);
 
         weights = ggml_div(ctx0, weights, weights_sum); // [n_expert_used, n_tokens]
         cb(weights, "ffn_moe_weights_norm", il);