]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
tools : fix uninitialized llama_batch in server (#13436)
authorAnthony Umfer <redacted>
Sun, 11 May 2025 15:08:26 +0000 (11:08 -0400)
committerGitHub <redacted>
Sun, 11 May 2025 15:08:26 +0000 (17:08 +0200)
* add constructor to initialize server_context::batch, preventing destructor's call to llama_batch_free from causing an invalid free()

* Update tools/server/server.cpp

Co-authored-by: Xuan-Son Nguyen <redacted>
* use C++11 initializer syntax

* switch from Copy-list-initialization to Direct-list-initialization

---------

Co-authored-by: Xuan-Son Nguyen <redacted>
tools/server/server.cpp

index de8ded71fd6adb91ea09396777045ccd463787ea..7169ffdceebf9a43accd37468cd76b80e28e10fb 100644 (file)
@@ -1862,7 +1862,7 @@ struct server_context {
 
     llama_context_params cparams_dft;
 
-    llama_batch batch;
+    llama_batch batch {};
 
     bool clean_kv_cache = true;
     bool add_bos_token  = true;