]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
Fix GLM 4.7 Lite MoE gating func (#18980)
authorPiotr Wilkin (ilintar) <redacted>
Wed, 21 Jan 2026 11:35:20 +0000 (12:35 +0100)
committerGitHub <redacted>
Wed, 21 Jan 2026 11:35:20 +0000 (12:35 +0100)
* Fix GLM 4.7 MoE gating func

* Update src/models/deepseek2.cpp

Co-authored-by: Sigbjørn Skjæret <redacted>
* Update src/llama-model.cpp

Co-authored-by: Xuan-Son Nguyen <redacted>
---------

Co-authored-by: Sigbjørn Skjæret <redacted>
Co-authored-by: Xuan-Son Nguyen <redacted>
src/llama-model.cpp

index 94c47dc248024532a6e5627d2284240c30ac78ff..255289b7c8848d80f3a1e222558f51babed85643 100644 (file)
@@ -1713,7 +1713,12 @@ void llama_model::load_hparams(llama_model_loader & ml) {
                 if (hparams.expert_gating_func == LLAMA_EXPERT_GATING_FUNC_TYPE_NONE) {
                     // for compatibility with existing DeepSeek V2 and V2.5 GGUFs
                     // that have no expert_gating_func model parameter set
-                    hparams.expert_gating_func = LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX;
+                    if ((hparams.n_layer == 47 || hparams.n_layer == 48) && n_vocab == 154880) {
+                        // GLM 4.7 Lite
+                        hparams.expert_gating_func = LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID;
+                    } else {
+                        hparams.expert_gating_func = LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX;
+                    }
                 }
 
                 if (ml.get_key(LLM_KV_ROPE_SCALING_YARN_LOG_MUL, hparams.rope_yarn_log_mul, 0.0f)) {