From: Ren Xuancheng Date: Thu, 18 Apr 2024 11:38:04 +0000 (+0800) Subject: Qwen2 : assume tied weights if lm_head/output weights is missing (#6738) X-Git-Tag: upstream/0.0.4488~1795 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=e11b2e6e1e18522ca7cf129600875a0f6fb9307d;p=pkg%2Fggml%2Fsources%2Fllama.cpp Qwen2 : assume tied weights if lm_head/output weights is missing (#6738) --- diff --git a/llama.cpp b/llama.cpp index 8c144629..05094fec 100644 --- a/llama.cpp +++ b/llama.cpp @@ -5184,7 +5184,13 @@ static bool llm_load_tensors( // output { model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}); - model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}); + model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, false); + // if output is NULL, init from the input tok embed + if (model.output == NULL) { + model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); + ml.n_created--; // artificial tensor + ml.size_data += ggml_nbytes(model.output); + } } for (int i = 0; i < n_layer; ++i) {