]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
llama: fix missing k_cache store for rwkv6qwen2 (#11445)
authorMolly Sophia <redacted>
Wed, 29 Jan 2025 04:07:21 +0000 (12:07 +0800)
committerGitHub <redacted>
Wed, 29 Jan 2025 04:07:21 +0000 (12:07 +0800)
Signed-off-by: Molly Sophia <redacted>
src/llama.cpp

index 12e8f41fc8614951a392bd41690a0c0f13e39a25..192b20a27e5cab43a81d612b6f811f0f99524783 100644 (file)
@@ -7700,17 +7700,13 @@ struct llm_build_context {
                 1
             );
 
+            struct ggml_tensor * last_norm_att = ggml_view_3d(ctx0, x_norm_att, n_embd, 1, n_seqs, x_norm_att->nb[1], x_norm_att->nb[2], (n_seq_tokens-1)*n_embd*ggml_element_size(x_norm_att));
             ggml_build_forward_expand(
                 gf,
                 ggml_cpy(
                     ctx0,
-                    wkv_states,
-                    ggml_view_1d(
-                        ctx0,
-                        kv_self.v_l[il],
-                        hparams.n_embd_v_s() * n_seqs,
-                        hparams.n_embd_v_s() * kv_head * ggml_element_size(kv_self.v_l[il])
-                    )
+                    ggml_view_1d(ctx0, last_norm_att, n_embd * n_seqs, 0),
+                    ggml_view_1d(ctx0, kv_self.k_l[il], hparams.n_embd_k_s() * n_seqs, hparams.n_embd_k_s() * kv_head * ggml_element_size(kv_self.k_l[il]))
                 )
             );