]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
server : enable -td and -tbd parameters (#15172)
authorSigbjørn Skjæret <redacted>
Wed, 13 Aug 2025 13:43:00 +0000 (15:43 +0200)
committerGitHub <redacted>
Wed, 13 Aug 2025 13:43:00 +0000 (15:43 +0200)
common/arg.cpp
tools/server/server.cpp

index 4e4c52b5f8748db54cc5dc244d5fe2d3a8c72ed7..20f72cd724c5302fb889c29ed1088422bc9961cf 100644 (file)
@@ -3163,7 +3163,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
                 params.speculative.cpuparams.n_threads = std::thread::hardware_concurrency();
             }
         }
-    ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
+    ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}));
     add_opt(common_arg(
         {"-tbd", "--threads-batch-draft"}, "N",
         "number of threads to use during batch and prompt processing (default: same as --threads-draft)",
@@ -3173,7 +3173,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
                 params.speculative.cpuparams_batch.n_threads = std::thread::hardware_concurrency();
             }
         }
-    ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
+    ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}));
     add_opt(common_arg(
         {"-Cd", "--cpu-mask-draft"}, "M",
         "Draft model CPU affinity mask. Complements cpu-range-draft (default: same as --cpu-mask)",
index f549cda476657bbb018388749a5ab66e8662172b..20fc784a8217b50a1fa47b5c0532629d458bfa09 100644 (file)
@@ -2015,6 +2015,8 @@ struct server_context {
             params_dft.cache_type_k = params_base.speculative.cache_type_k;
             params_dft.cache_type_v = params_base.speculative.cache_type_v;
 
+            params_dft.cpuparams.n_threads = params_base.speculative.cpuparams.n_threads;
+            params_dft.cpuparams_batch.n_threads = params_base.speculative.cpuparams_batch.n_threads;
             params_dft.tensor_buft_overrides = params_base.speculative.tensor_buft_overrides;
 
             llama_init_dft = common_init_from_params(params_dft);