]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
Avoid the transposed X branch in the Z = X * Y matrix multiplication (#439)
authorGeorgi Gerganov <redacted>
Thu, 23 Mar 2023 21:22:01 +0000 (23:22 +0200)
committerGitHub <redacted>
Thu, 23 Mar 2023 21:22:01 +0000 (23:22 +0200)
Should make results reproducible for different number of threads and batch sizes

llama.cpp

index 7de3c19c8dcdaae44cdc43f2ffde2d2175dcbc18..d55219256932a49c4f5f7a720719be39df3050c4 100644 (file)
--- a/llama.cpp
+++ b/llama.cpp
@@ -727,11 +727,13 @@ static bool llama_eval_internal(
 
             // V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous()
             struct ggml_tensor * V_trans =
-                ggml_permute(ctx0,
-                        ggml_reshape_3d(ctx0,
-                            ggml_view_1d(ctx0, model.memory_v, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_v)*n_embd),
-                            n_embd/n_head, n_head, n_past + N),
-                        1, 2, 0, 3);
+                ggml_cpy(ctx0,
+                    ggml_permute(ctx0,
+                            ggml_reshape_3d(ctx0,
+                                ggml_view_1d(ctx0, model.memory_v, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_v)*n_embd),
+                                n_embd/n_head, n_head, n_past + N),
+                            1, 2, 0, 3),
+                    ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_past + N, n_embd/n_head, n_head));
 
             // KQV = transpose(V) * KQ_soft_max
             struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V_trans, KQ_soft_max);