]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
server : don't overfill the batch during infill (#10018)
authorGeorgi Gerganov <redacted>
Mon, 28 Oct 2024 06:49:32 +0000 (08:49 +0200)
committerGitHub <redacted>
Mon, 28 Oct 2024 06:49:32 +0000 (08:49 +0200)
ggml-ci

examples/server/server.cpp
examples/server/utils.hpp

index ff1d9b03cec5d92e7f50843648dad8e012ef8cdc..077c7ad1adffbe935173d739846d4bae1e7c1b91 100644 (file)
@@ -1880,6 +1880,7 @@ struct server_context {
                     if (slot.state == SLOT_STATE_STARTED) {
                         slot.t_start_process_prompt = ggml_time_us();
                         slot.t_start_generation = 0;
+
                         slot.n_past = 0;
                         slot.n_prompt_tokens = prompt_tokens.size();
                         slot.state = SLOT_STATE_PROCESSING_PROMPT;
index 81124206241851e1df27a03706f1bdc0b8841cad..562635555e0abaf5fc2f304332ed0f7f66249cd8 100644 (file)
@@ -266,8 +266,10 @@ static llama_tokens format_infill(
     }
 
     // for now pick FIM context to fit in a batch (ratio prefix:suffix = 3:1, TODO: configurable?)
-    const int n_suffix_take = std::min<int>(tokens_suffix.size(),   (n_batch/4));
-    const int n_prefix_take = std::min<int>(tokens_prefix.size(), 3*(n_batch/4) - 3);
+    const int n_prefix_take = std::min<int>(tokens_prefix.size(),                3*(n_batch/4));
+    const int n_suffix_take = std::min<int>(tokens_suffix.size(), std::max<int>(0, (n_batch/4) - (2 + tokens_prompt.size())));
+
+    SRV_DBG("n_prefix_take = %d, n_suffix_take = %d, total = %d\n", n_prefix_take, n_suffix_take, (n_prefix_take + n_suffix_take));
 
     // fill the rest of the context with extra chunks
     const int n_extra_take = std::min<int>(std::max<int>(0, n_ctx - (n_batch) - 2*n_predict), extra_tokens.size());