]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
llama : use aligned memory during ggml_init call from loading saved sessions (#1934)
authorl3utterfly <redacted>
Mon, 19 Jun 2023 15:20:06 +0000 (23:20 +0800)
committerGitHub <redacted>
Mon, 19 Jun 2023 15:20:06 +0000 (18:20 +0300)
* fixed issue: memory is not guaranteed to be aligned properly during ggml_init call from loading saved sessions

* - removed commented out old code from fix
- updated another instance of same issue below original

llama.cpp

index dad31cbcb14a401caffc142f0677b5f0aee776c3..4a7d01b3297b22aeead3209c671461ebedf3ba01 100644 (file)
--- a/llama.cpp
+++ b/llama.cpp
@@ -3126,9 +3126,7 @@ size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) {
         if (kv_size) {
             const size_t elt_size = ggml_element_size(kv_self.k);
 
-            char buffer[4096];
-
-            ggml_context * cpy_ctx = ggml_init({ sizeof(buffer), buffer, /* no_alloc */ true });
+            ggml_context * cpy_ctx = ggml_init({ 4096, NULL, /* no_alloc */ true });
             ggml_cgraph gf{};
             gf.n_threads = 1;
 
@@ -3234,9 +3232,7 @@ size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src) {
 
             const size_t elt_size = ggml_element_size(kv_self.k);
 
-            char buffer[4096];
-
-            ggml_context * cpy_ctx = ggml_init({ sizeof(buffer), buffer, /* no_alloc */ true });
+            ggml_context * cpy_ctx = ggml_init({ 4096, NULL, /* no_alloc */ true });
             ggml_cgraph gf{};
             gf.n_threads = 1;