]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
server : remove swa_full warning (#15399) upstream/latest
authorGeorgi Gerganov <redacted>
Tue, 19 Aug 2025 05:45:26 +0000 (08:45 +0300)
committerGitHub <redacted>
Tue, 19 Aug 2025 05:45:26 +0000 (08:45 +0300)
src/llama-context.cpp

index 7d7abad5d4a2dd94ce53038c1ecd5e6b2ed3b2a4..1ebfc88ab651a48f5698ab445dae9e4186e886ac 100644 (file)
@@ -145,11 +145,6 @@ llama_context::llama_context(
                 __func__, n_ctx_per_seq, hparams.n_ctx_train);
     }
 
-    if (!params.swa_full && cparams.n_seq_max > 1 && hparams.is_swa_any()) {
-        LLAMA_LOG_WARN("%s: requested n_seq_max (%u) > 1, but swa_full is not enabled -- performance may be degraded: %s\n",
-                __func__, cparams.n_seq_max, "https://github.com/ggml-org/llama.cpp/pull/13845#issuecomment-2924800573");
-    }
-
     if (!hparams.vocab_only) {
         // GPU backends
         for (auto * dev : model.devices) {