From: Xuan-Son Nguyen Date: Tue, 18 Mar 2025 16:24:33 +0000 (+0100) Subject: llama : add support for EXAONE tied word embeddings (#12451) X-Git-Tag: upstream/0.0.5028~113 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=99aa304fb900654ec338749f64e62895b9a88afd;p=pkg%2Fggml%2Fsources%2Fllama.cpp llama : add support for EXAONE tied word embeddings (#12451) --- diff --git a/src/llama-model.cpp b/src/llama-model.cpp index c571aa69..9171585b 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -3264,7 +3264,12 @@ bool llama_model::load_tensors(llama_model_loader & ml) { // output output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); - output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED); + + // if output is NULL, init from the input tok embed + if (output == NULL) { + output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); + } for (int i = 0; i < n_layer; ++i) { auto & layer = layers[i];