]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
Reset schedule earlier to allow overlap with ggml graph computation on device (#6933)
authoragray3 <redacted>
Fri, 26 Apr 2024 18:08:30 +0000 (19:08 +0100)
committerGitHub <redacted>
Fri, 26 Apr 2024 18:08:30 +0000 (20:08 +0200)
* Reset schedule earlier to allow overlap with graph computation on device

ggml-backend.c
llama.cpp

index e91d97cd9dcfce0bb234b2881d6276cf3b4c685f..f5bdcf07838aa45b224a900e1d6b4daf62ffb1c4 100644 (file)
@@ -1784,12 +1784,14 @@ void ggml_backend_sched_free(ggml_backend_sched_t sched) {
 
 void ggml_backend_sched_reset(ggml_backend_sched_t sched) {
     // reset state for the next run
-    size_t hash_size = sched->hash_set.size;
-    memset(sched->hash_set.keys,      0, sizeof(sched->hash_set.keys[0])     * hash_size); // NOLINT
-    memset(sched->tensor_backend_id, -1, sizeof(sched->tensor_backend_id[0]) * hash_size);
-    memset(sched->tensor_copies,      0, sizeof(sched->tensor_copies[0])     * hash_size);
+    if (!sched->is_reset) {
+        size_t hash_size = sched->hash_set.size;
+        memset(sched->hash_set.keys,      0, sizeof(sched->hash_set.keys[0])     * hash_size); // NOLINT
+        memset(sched->tensor_backend_id, -1, sizeof(sched->tensor_backend_id[0]) * hash_size);
+        memset(sched->tensor_copies,      0, sizeof(sched->tensor_copies[0])     * hash_size);
 
-    sched->is_reset = true;
+        sched->is_reset = true;
+    }
     sched->is_alloc = false;
 }
 
index dd8b1f2640e4cfc981e8b4d2300c7f3af0007c5d..49f2b559e965e4875b499ea73c781f15ccae4cd7 100644 (file)
--- a/llama.cpp
+++ b/llama.cpp
@@ -11473,6 +11473,10 @@ static int llama_decode_internal(
         }
     }
 
+    // Reset state for the next token before backend sync, to allow the CPU activities in the reset to
+    // overlap with device computation.
+    ggml_backend_sched_reset(lctx.sched);
+
     return 0;
 }