From: Gabriel Larson Date: Sun, 27 Jul 2025 08:18:37 +0000 (-0500) Subject: model : make rope_yarn_log_mul optional for deepseek2 (#14896) X-Git-Tag: upstream/0.0.6073~73 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=4762ad7316dcdec20016ab5985fb46a27902204d;p=pkg%2Fggml%2Fsources%2Fllama.cpp model : make rope_yarn_log_mul optional for deepseek2 (#14896) * make rope_yarn_log_mul optional for deepseek2 * default rope_yarn_log_mul = 0.0f --- diff --git a/src/llama-hparams.h b/src/llama-hparams.h index c422cd7b..ec7fd6a4 100644 --- a/src/llama-hparams.h +++ b/src/llama-hparams.h @@ -98,7 +98,7 @@ struct llama_hparams { float rope_freq_scale_train; float rope_freq_scale_train_swa; uint32_t n_ctx_orig_yarn; - float rope_yarn_log_mul; + float rope_yarn_log_mul = 0.0f; std::array rope_sections; diff --git a/src/llama-model.cpp b/src/llama-model.cpp index f16789c2..71f89e19 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -1369,7 +1369,7 @@ void llama_model::load_hparams(llama_model_loader & ml) { // that have no expert_gating_func model parameter set hparams.expert_gating_func = LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX; } - ml.get_key(LLM_KV_ROPE_SCALING_YARN_LOG_MUL, hparams.rope_yarn_log_mul); + ml.get_key(LLM_KV_ROPE_SCALING_YARN_LOG_MUL, hparams.rope_yarn_log_mul, false); switch (hparams.n_layer) { case 27: type = LLM_TYPE_16B; break;