From: Xuan-Son Nguyen Date: Sat, 22 Mar 2025 22:28:19 +0000 (+0100) Subject: llama : gemma3 : use output tensor if it exists in model weight (#12506) X-Git-Tag: upstream/0.0.5028~86 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=fbdfefe74e736f1a3687283c25ac21b11ba07b2e;p=pkg%2Fggml%2Fsources%2Fllama.cpp llama : gemma3 : use output tensor if it exists in model weight (#12506) * llama : gemma3 : use output tensor if it exists in model weight * also add to the llm_tensor_names --- diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py index cc48913d..13cca7ab 100644 --- a/gguf-py/gguf/constants.py +++ b/gguf-py/gguf/constants.py @@ -1113,6 +1113,7 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = { ], MODEL_ARCH.GEMMA3: [ MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT, MODEL_TENSOR.OUTPUT_NORM, MODEL_TENSOR.ATTN_Q, MODEL_TENSOR.ATTN_Q_NORM, diff --git a/src/llama-arch.cpp b/src/llama-arch.cpp index 9debb56c..8664f896 100644 --- a/src/llama-arch.cpp +++ b/src/llama-arch.cpp @@ -778,6 +778,7 @@ static const std::map> LLM_TENSOR_N { { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" }, diff --git a/src/llama-model.cpp b/src/llama-model.cpp index 26ac5e99..0ae75415 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -2571,7 +2571,12 @@ bool llama_model::load_tensors(llama_model_loader & ml) { // output output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); - output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); // same as tok_embd, duplicated to allow offloading + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED); + + // if output is NULL, init from the input tok embed + if (output == NULL) { + output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); + } for (int i = 0; i < n_layer; ++i) { auto & layer = layers[i];