]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
model : support output bias for qwen2 (#14711)
authortempstudio <redacted>
Wed, 16 Jul 2025 15:02:06 +0000 (10:02 -0500)
committerGitHub <redacted>
Wed, 16 Jul 2025 15:02:06 +0000 (18:02 +0300)
Co-authored-by: qwaqrm <redacted>
src/llama-model.cpp

index 67cae69579fdb50dc57d9ebac1e6b9f126b8a379..9d8a686e0a571a122313e82461c1105178273a72 100644 (file)
@@ -2692,6 +2692,7 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
                     // output
                     output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
                     output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
+                    output_b    = create_tensor(tn(LLM_TENSOR_OUTPUT,      "bias"),   {n_vocab}, TENSOR_NOT_REQUIRED);
                     // if output is NULL, init from the input tok embed
                     if (output == NULL) {
                         output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
@@ -7765,6 +7766,10 @@ struct llm_build_qwen2 : public llm_graph_context {
         // lm_head
         cur = build_lora_mm(model.output, cur);
 
+        if (model.output_b != nullptr) {
+            cur = ggml_add(ctx0, cur, model.output_b);
+        }
+
         cb(cur, "result_output", -1);
         res->t_logits = cur;