]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
Fix crash for 65B model with pre-allocated memory (#485)
authorChris Kuehl <redacted>
Sat, 25 Mar 2023 04:38:14 +0000 (23:38 -0500)
committerGitHub <redacted>
Sat, 25 Mar 2023 04:38:14 +0000 (06:38 +0200)
llama.cpp

index 9d48ccd4c79e38318d3dea3e8e938ca4f652df45..447fa91f3190b3c995a9bcaba03a9e08f23903c6 100644 (file)
--- a/llama.cpp
+++ b/llama.cpp
@@ -239,7 +239,7 @@ static bool kv_cache_init(
     const int n_mem      = n_layer*n_ctx;
     const int n_elements = n_embd*n_mem;
 
-    cache.buf.resize(2*n_elements*ggml_type_size(wtype) + 2u*MB);
+    cache.buf.resize(2u*n_elements*ggml_type_size(wtype) + 2u*MB);
 
     struct ggml_init_params params;
     params.mem_size   = cache.buf.size();