]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
gemma : allow offloading the output tensor (#5646)
authorslaren <redacted>
Wed, 21 Feb 2024 21:18:23 +0000 (22:18 +0100)
committerGitHub <redacted>
Wed, 21 Feb 2024 21:18:23 +0000 (22:18 +0100)
llama.cpp

index 3a226c4260c0b1841dad490e9d81c1a061bdf836..4054d5da63fc4491dffbbf4acd78dce3ed3006d6 100644 (file)
--- a/llama.cpp
+++ b/llama.cpp
@@ -4394,6 +4394,8 @@ static bool llm_load_tensors(
 
                     // output
                     model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
+                    model.output      = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD,  "weight"), {n_embd, n_vocab}); // same as tok_embd, duplicated to allow offloading
+                    ml.n_created--; // artificial tensor
 
                     const int64_t n_ff          = hparams.n_ff;
                     const int64_t n_embd_head_k = hparams.n_embd_head_k;
@@ -7525,7 +7527,7 @@ struct llm_build_context {
         cb(cur, "result_norm", -1);
 
         // lm_head
-        cur = ggml_mul_mat(ctx0, model.tok_embd, cur);
+        cur = ggml_mul_mat(ctx0, model.output, cur);
         cb(cur, "result_output", -1);
 
         ggml_build_forward_expand(gf, cur);