]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
llama : only use Q6_K for output weights if tensor size is multiple of 256 (#1932)
authorKawrakow <redacted>
Mon, 19 Jun 2023 15:17:03 +0000 (18:17 +0300)
committerGitHub <redacted>
Mon, 19 Jun 2023 15:17:03 +0000 (18:17 +0300)
* Only use Q6_K for output weights if tensor size is multiple of 256

* Fixed copy/paste mistake

---------

Co-authored-by: Iwan Kawrakow <redacted>
llama.cpp

index 5401db00ecfccf49bd0bbb4748a5f5267690dbb8..dad31cbcb14a401caffc142f0677b5f0aee776c3 100644 (file)
--- a/llama.cpp
+++ b/llama.cpp
@@ -2495,7 +2495,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
             if (quantized_type == GGML_TYPE_Q2_K || quantized_type == GGML_TYPE_Q3_K || quantized_type == GGML_TYPE_Q4_K ||
                 quantized_type == GGML_TYPE_Q5_K || quantized_type == GGML_TYPE_Q6_K) {
                 int nx = tensor.ne.at(0);
-                int ny = tensor.ne.at(0);
+                int ny = tensor.ne.at(1);
                 if (nx % QK_K != 0 || ny % QK_K != 0) {
                     fprintf(stderr, "\n\n========================= Tensor sizes %d x %d are not divisible by %d\n",nx,ny,QK_K);
                     fprintf(stderr, "This is required to be able to use k-quants for now!\n");
@@ -2504,7 +2504,11 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
                 }
             }
             if (tensor.name == "output.weight") {
-               new_type = GGML_TYPE_Q6_K;
+                int nx = tensor.ne.at(0);
+                int ny = tensor.ne.at(1);
+                if (nx % QK_K == 0 && ny % QK_K == 0) {
+                    new_type = GGML_TYPE_Q6_K;
+                }
             } else if (tensor.name.find("attention.wv.weight") != std::string::npos) {
                 if      (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q4_K;
                 else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;