From: Xuan-Son Nguyen Date: Tue, 16 Dec 2025 18:07:43 +0000 (+0100) Subject: model: fix LFM2 missing tensors (#18105) X-Git-Tag: upstream/0.0.7446~8 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=ef83fb8601229ff650d952985be47e82d644bfaa;p=pkg%2Fggml%2Fsources%2Fllama.cpp model: fix LFM2 missing tensors (#18105) --- diff --git a/src/llama-model.cpp b/src/llama-model.cpp index ae8207ee..c9a3c5df 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -6236,8 +6236,8 @@ bool llama_model::load_tensors(llama_model_loader & ml) { { tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); - output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); - output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED); + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM_LFM2, "weight"), {n_embd}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED); if (output == NULL) { output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);