]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
Allow "quantizing" to f16 and f32 (#1787)
authorKerfuffle <redacted>
Tue, 13 Jun 2023 10:23:23 +0000 (04:23 -0600)
committerGitHub <redacted>
Tue, 13 Jun 2023 10:23:23 +0000 (04:23 -0600)
* Allow "quantizing" to f16 and f32

Fix an issue where quantizing didn't respect LLAMA_NO_K_QUANTS

Add brief help to the list of quantization types in the quantize tool

Ignore case for quantization type arguments in the quantize tool

Makefile
examples/quantize/quantize.cpp
ggml.c
llama.cpp

index 39ebfd04825da1d4bd2fee6180044638b4ad6ad9..9a08d610b220728a6ff390d490967e4562a21383 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -127,6 +127,7 @@ endif
 
 ifndef LLAMA_NO_K_QUANTS
        CFLAGS   += -DGGML_USE_K_QUANTS
+       CXXFLAGS += -DGGML_USE_K_QUANTS
        OBJS     += k_quants.o
 endif
 
index c6bf1b72362bc79932016892f3dd235d8ed90220..4e8e6f5239c051c8ae16ce8e28f1a00a0b8b0fb9 100644 (file)
 
 #include <cstdio>
 #include <cstring>
-#include <map>
+#include <vector>
 #include <string>
 
-static const std::map<std::string, llama_ftype> LLAMA_FTYPE_MAP = {
-  {"q4_0",   LLAMA_FTYPE_MOSTLY_Q4_0},
-  {"q4_1",   LLAMA_FTYPE_MOSTLY_Q4_1},
-  {"q5_0",   LLAMA_FTYPE_MOSTLY_Q5_0},
-  {"q5_1",   LLAMA_FTYPE_MOSTLY_Q5_1},
-  {"q8_0",   LLAMA_FTYPE_MOSTLY_Q8_0},
-  {"q2_K",   LLAMA_FTYPE_MOSTLY_Q2_K},
-  {"q3_K",   LLAMA_FTYPE_MOSTLY_Q3_K_M},
-  {"q3_K_S", LLAMA_FTYPE_MOSTLY_Q3_K_S},
-  {"q3_K_M", LLAMA_FTYPE_MOSTLY_Q3_K_M},
-  {"q3_K_L", LLAMA_FTYPE_MOSTLY_Q3_K_L},
-  {"q4_K",   LLAMA_FTYPE_MOSTLY_Q4_K_M},
-  {"q4_K_S", LLAMA_FTYPE_MOSTLY_Q4_K_S},
-  {"q4_K_M", LLAMA_FTYPE_MOSTLY_Q4_K_M},
-  {"q5_K",   LLAMA_FTYPE_MOSTLY_Q5_K_M},
-  {"q5_K_S", LLAMA_FTYPE_MOSTLY_Q5_K_S},
-  {"q5_K_M", LLAMA_FTYPE_MOSTLY_Q5_K_M},
-  {"q6_K",   LLAMA_FTYPE_MOSTLY_Q6_K},
+struct quant_option {
+    std::string name;
+    llama_ftype ftype;
+    std::string desc;
 };
 
-bool try_parse_ftype(const std::string & ftype_str, llama_ftype & ftype, std::string & ftype_str_out) {
-    auto it = LLAMA_FTYPE_MAP.find(ftype_str);
-    if (it != LLAMA_FTYPE_MAP.end()) {
-        ftype = it->second;
-        ftype_str_out = it->first;
-        return true;
+static const std::vector<struct quant_option> QUANT_OPTIONS = {
+    {
+        "Q4_0",
+        LLAMA_FTYPE_MOSTLY_Q4_0,
+        " 3.50G, +0.2499 ppl @ 7B - small, very high quality loss - legacy, prefer using Q3_K_M",
+    },
+    {
+        "Q4_1",
+        LLAMA_FTYPE_MOSTLY_Q4_1,
+        " 3.90G, +0.1846 ppl @ 7B - small, substantial quality loss - legacy, prefer using Q3_K_L",
+    },
+    {
+        "Q5_0",
+        LLAMA_FTYPE_MOSTLY_Q5_0,
+        " 4.30G, +0.0796 ppl @ 7B - medium, balanced quality - legacy, prefer using Q4_K_M",
+    },
+    {
+        "Q5_1",
+        LLAMA_FTYPE_MOSTLY_Q5_1,
+        " 4.70G, +0.0415 ppl @ 7B - medium, low quality loss - legacy, prefer using Q5_K_M",
+    },
+#ifdef GGML_USE_K_QUANTS
+    {
+        "Q2_K",
+        LLAMA_FTYPE_MOSTLY_Q2_K,
+        " 2.67G, +0.8698 ppl @ 7B - smallest, extreme quality loss - not recommended",
+    },
+    {
+        "Q3_K",
+        LLAMA_FTYPE_MOSTLY_Q3_K_M,
+        "alias for Q3_K_M"
+    },
+    {
+        "Q3_K_S",
+        LLAMA_FTYPE_MOSTLY_Q3_K_S,
+        " 2.75G, +0.5505 ppl @ 7B - very small, very high quality loss",
+    },
+    {
+        "Q3_K_M",
+        LLAMA_FTYPE_MOSTLY_Q3_K_M,
+        " 3.06G, +0.2437 ppl @ 7B - very small, very high quality loss",
+    },
+    {
+        "Q3_K_L",
+        LLAMA_FTYPE_MOSTLY_Q3_K_L,
+        " 3.35G, +0.1803 ppl @ 7B - small, substantial quality loss",
+    },
+    {
+        "Q4_K",
+        LLAMA_FTYPE_MOSTLY_Q4_K_M,
+        "alias for Q4_K_M",
+    },
+    {
+        "Q4_K_S",
+        LLAMA_FTYPE_MOSTLY_Q4_K_S,
+        " 3.56G, +0.1149 ppl @ 7B - small, significant quality loss",
+    },
+    {
+        "Q4_K_M",
+        LLAMA_FTYPE_MOSTLY_Q4_K_M,
+        " 3.80G, +0.0535 ppl @ 7B - medium, balanced quality - *recommended*",
+    },
+    {
+        "Q5_K",
+        LLAMA_FTYPE_MOSTLY_Q5_K_M,
+        "alias for Q5_K_M",
+    },
+    {
+        "Q5_K_S",
+        LLAMA_FTYPE_MOSTLY_Q5_K_S,
+        " 4.33G, +0.0353 ppl @ 7B - large, low quality loss - *recommended*",
+    },
+    {
+        "Q5_K_M",
+        LLAMA_FTYPE_MOSTLY_Q5_K_M,
+        " 4.45G, +0.0142 ppl @ 7B - large, very low quality loss - *recommended*",
+    },
+    {
+        "Q6_K",
+        LLAMA_FTYPE_MOSTLY_Q6_K,
+        " 5.15G, +0.0044 ppl @ 7B - very large, extremely low quality loss",
+    },
+#endif
+    {
+        "Q8_0",
+        LLAMA_FTYPE_MOSTLY_Q8_0,
+        " 6.70G, +0.0004 ppl @ 7B - very large, extremely low quality loss - not recommended",
+    },
+    {
+        "F16",
+        LLAMA_FTYPE_MOSTLY_F16,
+        "13.00G              @ 7B - extremely large, virtually no quality loss - not recommended",
+    },
+    {
+        "F32",
+        LLAMA_FTYPE_ALL_F32,
+        "26.00G              @ 7B - absolutely huge, lossless - not recommended",
+    },
+};
+
+
+bool try_parse_ftype(const std::string & ftype_str_in, llama_ftype & ftype, std::string & ftype_str_out) {
+    std::string ftype_str;
+
+    for (auto ch : ftype_str_in) {
+        ftype_str.push_back(std::toupper(ch));
+    }
+    for (auto & it : QUANT_OPTIONS) {
+        if (it.name == ftype_str) {
+            ftype = it.ftype;
+            ftype_str_out = it.name;
+            return true;
+        }
     }
-    // try to parse as an integer
     try {
         int ftype_int = std::stoi(ftype_str);
-        for (auto it = LLAMA_FTYPE_MAP.begin(); it != LLAMA_FTYPE_MAP.end(); it++) {
-            if (it->second == ftype_int) {
-                ftype = it->second;
-                ftype_str_out = it->first;
+        for (auto & it : QUANT_OPTIONS) {
+            if (it.ftype == ftype_int) {
+                ftype = it.ftype;
+                ftype_str_out = it.name;
                 return true;
             }
         }
@@ -52,15 +144,15 @@ bool try_parse_ftype(const std::string & ftype_str, llama_ftype & ftype, std::st
 }
 
 // usage:
-//  ./quantize models/llama/ggml-model.bin [models/llama/ggml-model-quant.bin] type [nthreads]
+//  ./quantize [--allow-requantize] [--leave-output-tensor] models/llama/ggml-model.bin [models/llama/ggml-model-quant.bin] type [nthreads]
 //
 void usage(const char * executable) {
-    fprintf(stderr, "usage: %s [--help] [--allow-requantize] [--leave-output-tensor] model-f32.bin [model-quant.bin] type [nthreads]\n", executable);
+    fprintf(stderr, "usage: %s [--help] [--allow-requantize] [--leave-output-tensor] model-f32.bin [model-quant.bin] type [nthreads]\n\n", executable);
     fprintf(stderr, "  --allow-requantize: Allows requantizing tensors that have already been quantized. Warning: This can severely reduce quality compared to quantizing from 16bit or 32bit\n");
     fprintf(stderr, "  --leave-output-tensor: Will leave output.weight un(re)quantized. Increases model size but may also increase quality, especially when requantizing\n");
-    fprintf(stderr, "Allowed quantization types:\n");
-    for (auto it = LLAMA_FTYPE_MAP.begin(); it != LLAMA_FTYPE_MAP.end(); it++) {
-        fprintf(stderr, "  type = \"%s\" or %d\n", it->first.c_str(), it->second);
+    fprintf(stderr, "\nAllowed quantization types:\n");
+    for (auto & it : QUANT_OPTIONS) {
+        printf("  %2d  or  %-6s : %s\n", it.ftype, it.name.c_str(), it.desc.c_str());
     }
     exit(1);
 }
diff --git a/ggml.c b/ggml.c
index a13de511527bccda45456856fe6b5435410e3e0c..252edd582c0a0e8442455a534874a3b46be28d67 100644 (file)
--- a/ggml.c
+++ b/ggml.c
@@ -16301,6 +16301,18 @@ size_t ggml_quantize_chunk(enum ggml_type type, const float * src, void * dst, i
                 result = ggml_quantize_q6_K(src + start, block, n, n, hist);
             } break;
 #endif
+        case GGML_TYPE_F16:
+            {
+                int elemsize = sizeof(ggml_fp16_t);
+                ggml_fp32_to_fp16_row(src + start, (ggml_fp16_t *)dst + start, n);
+                result = n * elemsize;
+            } break;
+        case GGML_TYPE_F32:
+            {
+                int elemsize = sizeof(float);
+                result = n * elemsize;
+                memcpy((uint8_t *)dst + start * elemsize, src + start, result);
+            } break;
         default:
             assert(false);
     }
index f0f9124d8dafd758298df9b797d7c0eb9a425a71..c7a3336426f13f2f11e1cf15d8912a50b7403e18 100644 (file)
--- a/llama.cpp
+++ b/llama.cpp
@@ -2298,7 +2298,10 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
         case LLAMA_FTYPE_MOSTLY_Q5_0: quantized_type = GGML_TYPE_Q5_0; break;
         case LLAMA_FTYPE_MOSTLY_Q5_1: quantized_type = GGML_TYPE_Q5_1; break;
         case LLAMA_FTYPE_MOSTLY_Q8_0: quantized_type = GGML_TYPE_Q8_0; break;
+        case LLAMA_FTYPE_MOSTLY_F16: quantized_type = GGML_TYPE_F16; break;
+        case LLAMA_FTYPE_ALL_F32: quantized_type = GGML_TYPE_F32; break;
 
+#ifdef GGML_USE_K_QUANTS
         // K-quants
         case LLAMA_FTYPE_MOSTLY_Q2_K:   quantized_type = GGML_TYPE_Q2_K; break;
         case LLAMA_FTYPE_MOSTLY_Q3_K_S:
@@ -2309,6 +2312,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
         case LLAMA_FTYPE_MOSTLY_Q5_K_S:
         case LLAMA_FTYPE_MOSTLY_Q5_K_M: quantized_type = GGML_TYPE_Q5_K; break;
         case LLAMA_FTYPE_MOSTLY_Q6_K:   quantized_type = GGML_TYPE_Q6_K; break;
+#endif
         default: throw std::runtime_error(format("invalid output file type %d\n", ftype));
     }
 
@@ -2320,6 +2324,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
                                                                             /*vocab_only*/ false));
     llama_file_saver file_saver(fname_out.c_str(), model_loader->file_loaders.at(0).get(), params->ftype);
 
+#ifdef GGML_USE_K_QUANTS
     int n_attention_wv    = 0;
     int n_feed_forward_w2 = 0;
     for (auto& tensor : model_loader->tensors_map.tensors) {
@@ -2333,6 +2338,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
 
     int i_attention_wv = 0;
     int i_feed_forward_w2 = 0;
+#endif
 
     size_t total_size_org = 0;
     size_t total_size_new = 0;
@@ -2358,12 +2364,8 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
 
         // quantize only 2D tensors
         quantize &= (tensor.ne.size() == 2);
-
-        // uncomment this to keep the output layer in FP16
-        if (!params->quantize_output_tensor && tensor.name == "output.weight") {
-           quantize = false;
-        }
-        quantize = quantize && quantized_type != tensor.type;
+        quantize &= params->quantize_output_tensor || tensor.name != "output.weight";
+        quantize &= quantized_type != tensor.type;
 
         enum ggml_type new_type;
         void * new_data;
@@ -2377,29 +2379,28 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
             printf("size = %8.3f MB\n", tensor.size/1024.0/1024.0);
         } else {
             new_type = quantized_type;
+#ifdef GGML_USE_K_QUANTS
             if (tensor.name == "output.weight") {
-                new_type = GGML_TYPE_Q6_K;
-            }
-            else if (tensor.name.find("attention.wv.weight") != std::string::npos) {
+               new_type = GGML_TYPE_Q6_K;
+            } else if (tensor.name.find("attention.wv.weight") != std::string::npos) {
                 if      (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q4_K;
                 else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
                 else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) &&
                          (i_attention_wv < n_attention_wv/8 || i_attention_wv >= 7*n_attention_wv/8 ||
                          (i_attention_wv - n_attention_wv/8)%3 == 2)) new_type = GGML_TYPE_Q6_K;
                 ++i_attention_wv;
-            }
-            if (tensor.name.find("feed_forward.w2.weight") != std::string::npos) {
+            } else if (tensor.name.find("feed_forward.w2.weight") != std::string::npos) {
                 if      (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q4_K;
                 else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
                 else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) &&
                          (i_feed_forward_w2 < n_feed_forward_w2/8 || i_feed_forward_w2 >= 7*n_feed_forward_w2/8 ||
                          (i_feed_forward_w2 - n_feed_forward_w2/8)%3 == 2)) new_type = GGML_TYPE_Q6_K;
                 ++i_feed_forward_w2;
-            }
-            if (tensor.name.find("attention.wo.weight") != std::string::npos) {
+            } else if (tensor.name.find("attention.wo.weight") != std::string::npos) {
                 if      (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q4_K;
                 else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
             }
+#endif
 
             float * f32_data;
             size_t nelements = tensor.ne.at(0) * tensor.ne.at(1);