]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
model: add llama 4 scaling for mistral-large (deepseek arch) (#17744)
authorXuan-Son Nguyen <redacted>
Sun, 7 Dec 2025 21:29:54 +0000 (22:29 +0100)
committerGitHub <redacted>
Sun, 7 Dec 2025 21:29:54 +0000 (22:29 +0100)
src/llama-model.cpp
src/models/deepseek2.cpp

index c3675dbdc414853aff2c03ecd96c3aab69c9b0c7..7d09d7abd514a95e4974e1e77d5be9f86702a5d2 100644 (file)
@@ -1628,6 +1628,10 @@ void llama_model::load_hparams(llama_model_loader & ml) {
                 }
                 ml.get_key(LLM_KV_ROPE_SCALING_YARN_LOG_MUL, hparams.rope_yarn_log_mul, false);
 
+                // (optional) temperature tuning - used by mistral-large
+                ml.get_key(LLM_KV_ATTENTION_TEMPERATURE_SCALE,  hparams.f_attn_temp_scale,       false);
+                ml.get_key(LLM_KV_ATTENTION_TEMPERATURE_LENGTH, hparams.n_attn_temp_floor_scale, false);
+
                 switch (hparams.n_layer) {
                     case 27: type = LLM_TYPE_16B; break;
                     case 60: type = LLM_TYPE_236B; break;
index 0b41f7ba8eb37963f61e24eab3c2c0ef91beefc7..dbaa8297be926d7f398c0c2915f99d458613383d 100644 (file)
@@ -30,6 +30,12 @@ llm_build_deepseek2::llm_build_deepseek2(const llama_model & model, const llm_gr
     // {n_embd, n_tokens}
     inpL = build_inp_embd(model.tok_embd);
 
+    // (optional) temperature tuning - used by mistral-large
+    ggml_tensor * inp_attn_scale = nullptr;
+    if (hparams.f_attn_temp_scale != 0.0f) {
+        inp_attn_scale = build_inp_attn_scale();
+    }
+
     // inp_pos - contains the positions
     ggml_tensor * inp_pos = build_inp_pos();
 
@@ -128,6 +134,12 @@ llm_build_deepseek2::llm_build_deepseek2(const llama_model & model, const llm_gr
                 ggml_tensor * Vcur = kv_cmpr;
                 cb(Vcur, "Vcur", il);
 
+                if (inp_attn_scale) {
+                    // apply llama 4 temperature scaling
+                    Qcur = ggml_mul(ctx0, Qcur, inp_attn_scale);
+                    cb(Qcur, "Qcur_attn_temp_scaled", il);
+                }
+
                 // note: MLA with the absorption optimzation converts into MQA (ie: GQA with 1 group)
                 cur = build_attn(inp_attn,
                         model.layers[il].wo, NULL,
@@ -160,6 +172,12 @@ llm_build_deepseek2::llm_build_deepseek2(const llama_model & model, const llm_gr
                 ggml_tensor * Kcur = ggml_concat(ctx0, ggml_repeat(ctx0, k_pe, q_pe), k_nope, 0);
                 cb(Kcur, "Kcur", il);
 
+                if (inp_attn_scale) {
+                    // apply llama 4 temperature scaling
+                    Qcur = ggml_mul(ctx0, Qcur, inp_attn_scale);
+                    cb(Qcur, "Qcur_attn_temp_scaled", il);
+                }
+
                 // note: MLA without the absorption optimization converts into MHA (ie: GQA with full n_head groups)
                 cur = build_attn(inp_attn,
                             model.layers[il].wo, NULL,