From: Georgi Gerganov Date: Mon, 5 Jun 2023 07:19:03 +0000 (+0300) Subject: llama : fix Metal KV cache sync (close #1695) X-Git-Tag: gguf-v0.4.0~692 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=d1f563a743a83dabc11e125d4a7d64189c16498c;p=pkg%2Fggml%2Fsources%2Fllama.cpp llama : fix Metal KV cache sync (close #1695) --- diff --git a/llama.cpp b/llama.cpp index bc58ad96..69bfdc1a 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1455,6 +1455,14 @@ static bool llama_eval_internal( // When we implement Matrix x Matrix Metal multiplication, we can avoid this branch. // But for now, we have focused only on Matrix x Vector Metal multiplication. // + // TODO: avoid these syncs via shared memory (ref #1696) + // + if (lctx.ctx_metal) { + // We need to sync the GPU KV cache with the CPU KV cache + ggml_metal_get_tensor(lctx.ctx_metal, kv_self.k); + ggml_metal_get_tensor(lctx.ctx_metal, kv_self.v); + } + ggml_graph_compute(ctx0, &gf); if (lctx.ctx_metal) {