]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
llama : support tie embedding for chatglm models (#13328)
authorpiDack <redacted>
Wed, 7 May 2025 07:23:11 +0000 (15:23 +0800)
committerGitHub <redacted>
Wed, 7 May 2025 07:23:11 +0000 (09:23 +0200)
src/llama-model.cpp

index 8d25070f42f7792bd856bfbd56246b4ba4deef5e..774e343fb1f179403f8b3d47b2695dad0dd464ca 100644 (file)
@@ -3510,7 +3510,11 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
 
                     // output
                     output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                    output        = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
+                    output        = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
+                    // if output is NULL, init from the input tok embed
+                    if (output == NULL) {
+                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
+                    }
 
                     for (int i = 0; i < n_layer; ++i) {
                         auto & layer = layers[i];