]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
llama : gemma3 : use output tensor if it exists in model weight (#12506)
authorXuan-Son Nguyen <redacted>
Sat, 22 Mar 2025 22:28:19 +0000 (23:28 +0100)
committerGitHub <redacted>
Sat, 22 Mar 2025 22:28:19 +0000 (23:28 +0100)
* llama : gemma3 : use output tensor if it exists in model weight

* also add to the llm_tensor_names

gguf-py/gguf/constants.py
src/llama-arch.cpp
src/llama-model.cpp

index cc48913d9789d5e1ae5d5d7b8441ee74f51c5a99..13cca7ab009bf7f442a66e86b7bb43c482a0cc59 100644 (file)
@@ -1113,6 +1113,7 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = {
     ],
     MODEL_ARCH.GEMMA3: [
         MODEL_TENSOR.TOKEN_EMBD,
+        MODEL_TENSOR.OUTPUT,
         MODEL_TENSOR.OUTPUT_NORM,
         MODEL_TENSOR.ATTN_Q,
         MODEL_TENSOR.ATTN_Q_NORM,
index 9debb56cc80d543c1b13e2e05fba11f1e7d6c233..8664f8963cc185877c8c79f521fbfb02f7aadb9f 100644 (file)
@@ -778,6 +778,7 @@ static const std::map<llm_arch, std::map<llm_tensor, const char *>> LLM_TENSOR_N
         {
             { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
             { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+            { LLM_TENSOR_OUTPUT,          "output" },
             { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
             { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
             { LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },
index 26ac5e99bfc7a6cba1c07d9533f418d3485b859a..0ae754154b0699dbdde74415b35816f243e471d0 100644 (file)
@@ -2571,7 +2571,12 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
 
                     // output
                     output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                    output      = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD,  "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); // same as tok_embd, duplicated to allow offloading
+                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
+
+                    // if output is NULL, init from the input tok embed
+                    if (output == NULL) {
+                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD,   "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
+                    }
 
                     for (int i = 0; i < n_layer; ++i) {
                         auto & layer = layers[i];