]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
(revert) kv-cache : do not quantize SWA KV cache (#21332)
authorGeorgi Gerganov <redacted>
Fri, 3 Apr 2026 06:07:01 +0000 (09:07 +0300)
committerGitHub <redacted>
Fri, 3 Apr 2026 06:07:01 +0000 (09:07 +0300)
This reverts commit 17193cce34036a6488b092ca79313d4ee1f895f5.

src/llama-kv-cache-iswa.cpp

index 15b3fe16e8a33782d047ee4598e97df304584bf9..26e2cb4270b08771d0569d0f25ca5b6761aef677 100644 (file)
@@ -66,9 +66,8 @@ llama_kv_cache_iswa::llama_kv_cache_iswa(
 
     LLAMA_LOG_INFO("%s: creating     SWA KV cache, size = %u cells\n", __func__, size_swa);
 
-    // note: the SWA cache is never quantized because it is relatively small
     kv_swa = std::make_unique<llama_kv_cache>(
-            model, GGML_TYPE_F16, GGML_TYPE_F16,
+            model, type_k, type_v,
             v_trans, offload, unified, size_swa, n_seq_max, n_pad,
             hparams.n_swa, hparams.swa_type, filter_swa, reuse);
 }