]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
server : fix SWA condition for full context reprocess (#14163)
authorGeorgi Gerganov <redacted>
Fri, 13 Jun 2025 08:18:25 +0000 (11:18 +0300)
committerGitHub <redacted>
Fri, 13 Jun 2025 08:18:25 +0000 (11:18 +0300)
ggml-ci

tools/server/server.cpp

index 8efb8b704141f73d025e00ff24adbf5801c90718..b439d8b19dda3a97e30b562d468e4803bfe5f25e 100644 (file)
@@ -3217,7 +3217,7 @@ struct server_context {
                                 }
 
                                 const auto n_swa = llama_model_n_swa(model);
-                                if (pos_min > slot.n_past - n_swa) {
+                                if (pos_min > std::max(0, slot.n_past - n_swa)) {
                                     SLT_WRN(slot, "n_past = %d, cache_tokens.size() = %d, seq_id = %d, pos_min = %d, n_swa = %d\n", slot.n_past, (int) slot.cache_tokens.size(), slot.id, pos_min, n_swa);
                                     SLT_WRN(slot, "forcing full prompt re-processing due to lack of cache data (likely due to SWA, see %s)\n",
                                             "https://github.com/ggml-org/llama.cpp/pull/13194#issuecomment-2868343055");