From: piDack Date: Wed, 7 May 2025 07:23:11 +0000 (+0800) Subject: llama : support tie embedding for chatglm models (#13328) X-Git-Tag: upstream/0.0.5318~19 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=6c7fd67b647a76846d1691cd181011dff4549d02;p=pkg%2Fggml%2Fsources%2Fllama.cpp llama : support tie embedding for chatglm models (#13328) --- diff --git a/src/llama-model.cpp b/src/llama-model.cpp index 8d25070f..774e343f 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -3510,7 +3510,11 @@ bool llama_model::load_tensors(llama_model_loader & ml) { // output output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); - output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED); + // if output is NULL, init from the input tok embed + if (output == NULL) { + output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); + } for (int i = 0; i < n_layer; ++i) { auto & layer = layers[i];