]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
server : fix warmup draft cache type (#12446)
authorGeorgi Gerganov <redacted>
Tue, 18 Mar 2025 10:05:42 +0000 (12:05 +0200)
committerGitHub <redacted>
Tue, 18 Mar 2025 10:05:42 +0000 (12:05 +0200)
ggml-ci

examples/server/server.cpp

index 71e053b202cd2fcdd8af5d77a526aaf65a9852e1..c2f1afeca450db555c4dbae4a570891f0a79a16e 100644 (file)
@@ -1872,6 +1872,10 @@ struct server_context {
             params_dft.n_gpu_layers = params_base.speculative.n_gpu_layers;
             params_dft.n_parallel   = 1;
 
+            // force F16 KV cache for the draft model for extra performance
+            params_dft.cache_type_k = GGML_TYPE_F16;
+            params_dft.cache_type_v = GGML_TYPE_F16;
+
             llama_init_dft = common_init_from_params(params_dft);
 
             model_dft = llama_init_dft.model.get();
@@ -1892,10 +1896,6 @@ struct server_context {
             cparams_dft = common_context_params_to_llama(params_dft);
             cparams_dft.n_batch = n_ctx_dft;
 
-            // force F16 KV cache for the draft model for extra performance
-            cparams_dft.type_k = GGML_TYPE_F16;
-            cparams_dft.type_v = GGML_TYPE_F16;
-
             // the context is not needed - we will create one for each slot
             llama_init_dft.context.reset();
         }