]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
llama : use Q4_K for attn_v for Q2_K_S when n_gqa >= 4 (#4996)
authorKawrakow <redacted>
Wed, 17 Jan 2024 10:36:37 +0000 (12:36 +0200)
committerGitHub <redacted>
Wed, 17 Jan 2024 10:36:37 +0000 (12:36 +0200)
Co-authored-by: Iwan Kawrakow <redacted>
llama.cpp

index 765d20ddb639a6217a519d44153e9c097028b82e..2c5983c67f67197f131ca0a51ef88192c9324d85 100644 (file)
--- a/llama.cpp
+++ b/llama.cpp
@@ -8477,7 +8477,12 @@ static ggml_type get_k_quant_type(quantize_state_internal & qs, ggml_type new_ty
         }
         else if (name == "token_embd.weight") new_type = GGML_TYPE_Q2_K;
     } else if (name.find("attn_v.weight") != std::string::npos) {
-        if      (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
+        if      (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) {
+            new_type = qs.model.hparams.n_gqa() >= 4 ? GGML_TYPE_Q4_K : GGML_TYPE_Q3_K;
+        }
+        else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S && qs.model.hparams.n_gqa() >= 4) {
+            new_type = GGML_TYPE_Q4_K;
+        }
         else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) {
             new_type = qs.i_attention_wv < 2 ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K;
         }