]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
server : fix draft context not being released (#11354)
authorDiego Devesa <redacted>
Wed, 22 Jan 2025 16:44:40 +0000 (17:44 +0100)
committerGitHub <redacted>
Wed, 22 Jan 2025 16:44:40 +0000 (17:44 +0100)
examples/server/server.cpp

index 412908aa80cafddbd9392e7cc56e7cb2925ae644..4cfb3c9bbd7d0f0e9e4e83d7f9c55a79b3b7e794 100644 (file)
@@ -1772,6 +1772,9 @@ struct server_context {
             // force F16 KV cache for the draft model for extra performance
             cparams_dft.type_k = GGML_TYPE_F16;
             cparams_dft.type_v = GGML_TYPE_F16;
+
+            // the context is not needed - we will create one for each slot
+            llama_init_dft.context.reset();
         }
 
         chat_templates = common_chat_templates_from_model(model, params_base.chat_template);