From: Georgi Gerganov Date: Sat, 22 Apr 2023 12:49:15 +0000 (+0300) Subject: examples : refactor quantization tools X-Git-Tag: upstream/0.0.1642~1527 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=bbb44f966825c2f37e0b9f7677f9972b186b1897;p=pkg%2Fggml%2Fsources%2Fggml examples : refactor quantization tools --- diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 96df1396..f7519cf6 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -2,6 +2,7 @@ add_library(common STATIC common.cpp) target_include_directories(common PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) add_library(common-ggml STATIC common-ggml.cpp) +target_link_libraries(common-ggml PRIVATE ggml) target_include_directories(common-ggml PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) add_subdirectory(gpt-2) diff --git a/examples/common-ggml.cpp b/examples/common-ggml.cpp index a6e41443..60aaedaf 100644 --- a/examples/common-ggml.cpp +++ b/examples/common-ggml.cpp @@ -1 +1,192 @@ #include "common-ggml.h" + +#include "ggml.h" + +#include + +bool ggml_common_quantize_0( + std::ifstream & finp, + std::ofstream & fout, + const ggml_mtype mtype, + const std::vector & to_quant, + const std::vector & to_skip) { + + ggml_type qtype = GGML_TYPE_F32; + + switch (mtype) { + case 2: qtype = GGML_TYPE_Q4_0; break; + case 3: qtype = GGML_TYPE_Q4_1; break; + case 5: qtype = GGML_TYPE_Q4_2; break; + case 6: qtype = GGML_TYPE_Q4_3; break; + default: + { + fprintf(stderr, "%s: invalid model type %d\n", __func__, mtype); + return false; + } + }; + + if (!ggml_is_quantized(qtype)) { + fprintf(stderr, "%s: invalid quantization type %d (%s)\n", __func__, qtype, ggml_type_name(qtype)); + return false; + } + + size_t total_size_org = 0; + size_t total_size_new = 0; + + std::vector work; + + std::vector data_u8; + std::vector data_f16; + std::vector data_f32; + + std::vector hist_all(1 << 4, 0); + + while (true) { + int32_t n_dims; + int32_t length; + int32_t ttype; + + finp.read(reinterpret_cast(&n_dims), sizeof(n_dims)); + finp.read(reinterpret_cast(&length), sizeof(length)); + finp.read(reinterpret_cast(&ttype), sizeof(ttype)); + + if (finp.eof()) { + break; + } + + int32_t nelements = 1; + int32_t ne[2] = { 1, 1 }; + for (int i = 0; i < n_dims; ++i) { + finp.read (reinterpret_cast(&ne[i]), sizeof(ne[i])); + nelements *= ne[i]; + } + + std::string name(length, 0); + finp.read (&name[0], length); + + printf("%64s - [%5d, %5d], type = %6s ", name.data(), ne[0], ne[1], ggml_type_name((ggml_type) ttype)); + + bool quantize = false; + + // check if we should quantize this tensor + for (const auto & s : to_quant) { + if (std::regex_match(name, std::regex(s))) { + quantize = true; + break; + } + } + + // check if we should skip this tensor + for (const auto & s : to_skip) { + if (std::regex_match(name, std::regex(s))) { + quantize = false; + break; + } + } + + // quantize only 2D tensors + quantize &= (n_dims == 2); + + if (quantize) { + if (ttype != GGML_TYPE_F32 && ttype != GGML_TYPE_F16) { + fprintf(stderr, "%s: unsupported ttype %d (%s) for integer quantization\n", __func__, ttype, ggml_type_name((ggml_type) ttype)); + return false; + } + + if (ttype == GGML_TYPE_F16) { + data_f16.resize(nelements); + finp.read(reinterpret_cast(data_f16.data()), nelements * sizeof(ggml_fp16_t)); + data_f32.resize(nelements); + for (int i = 0; i < nelements; ++i) { + data_f32[i] = ggml_fp16_to_fp32(data_f16[i]); + } + } else { + data_f32.resize(nelements); + finp.read(reinterpret_cast(data_f32.data()), nelements * sizeof(float)); + } + + ttype = qtype; + } else { + const int bpe = (ttype == 0) ? sizeof(float) : sizeof(uint16_t); + + data_u8.resize(nelements*bpe); + finp.read(reinterpret_cast(data_u8.data()), nelements * bpe); + } + + fout.write(reinterpret_cast(&n_dims), sizeof(n_dims)); + fout.write(reinterpret_cast(&length), sizeof(length)); + fout.write(reinterpret_cast(&ttype), sizeof(ttype)); + for (int i = 0; i < n_dims; ++i) { + fout.write(reinterpret_cast(&ne[i]), sizeof(ne[i])); + } + fout.write(&name[0], length); + + if (quantize) { + work.resize(nelements); // for quantization + + size_t cur_size = 0; + std::vector hist_cur(1 << 4, 0); + + switch (ttype) { + case GGML_TYPE_Q4_0: + { + cur_size = ggml_quantize_q4_0(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); + } break; + case GGML_TYPE_Q4_1: + { + cur_size = ggml_quantize_q4_1(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); + } break; + case GGML_TYPE_Q4_2: + { + cur_size = ggml_quantize_q4_2(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); + } break; + case GGML_TYPE_Q4_3: + { + cur_size = ggml_quantize_q4_3(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); + } break; + default: + { + fprintf(stderr, "%s: unsupported quantization type %d (%s)\n", __func__, ttype, ggml_type_name((ggml_type) ttype)); + return false; + } + } + + fout.write(reinterpret_cast(work.data()), cur_size); + total_size_new += cur_size; + + printf("size = %8.2f MB -> %8.2f MB | hist: ", nelements * sizeof(float)/1024.0/1024.0, cur_size/1024.0/1024.0); + for (int i = 0; i < hist_cur.size(); ++i) { + hist_all[i] += hist_cur[i]; + } + + for (int i = 0; i < hist_cur.size(); ++i) { + printf("%5.3f ", hist_cur[i] / (float)nelements); + } + printf("\n"); + } else { + printf("size = %8.3f MB\n", data_u8.size()/1024.0/1024.0); + fout.write(reinterpret_cast(data_u8.data()), data_u8.size()); + total_size_new += data_u8.size(); + } + + total_size_org += nelements * sizeof(float); + } + + printf("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0); + printf("%s: quant size = %8.2f MB | mtype = %d (%s)\n", __func__, total_size_new/1024.0/1024.0, mtype, ggml_type_name(qtype)); + + { + int64_t sum_all = 0; + for (int i = 0; i < hist_all.size(); ++i) { + sum_all += hist_all[i]; + } + + printf("%s: hist: ", __func__); + for (int i = 0; i < hist_all.size(); ++i) { + printf("%5.3f ", hist_all[i] / (float)sum_all); + } + printf("\n"); + } + + return true; +} diff --git a/examples/common-ggml.h b/examples/common-ggml.h index 3f59c932..da839bf9 100644 --- a/examples/common-ggml.h +++ b/examples/common-ggml.h @@ -1,2 +1,23 @@ #pragma once +#include +#include +#include + +// model file types +enum ggml_mtype { + GGML_MTYPE_ALL_F32 = 0, + GGML_MTYPE_MOSTLY_F16 = 1, // except 1d tensors + GGML_MTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors + GGML_MTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors + GGML_MTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16 + GGML_MTYPE_MOSTLY_Q4_2 = 5, // except 1d tensors + GGML_MTYPE_MOSTLY_Q4_3 = 6, // except 1d tensors +}; + +bool ggml_common_quantize_0( + std::ifstream & finp, + std::ofstream & fout, + const ggml_mtype mtype, + const std::vector & to_quant, + const std::vector & to_skip); diff --git a/examples/gpt-2/CMakeLists.txt b/examples/gpt-2/CMakeLists.txt index 2624ae60..c1917c13 100644 --- a/examples/gpt-2/CMakeLists.txt +++ b/examples/gpt-2/CMakeLists.txt @@ -10,4 +10,4 @@ target_link_libraries(${TEST_TARGET} PRIVATE ggml common) set(TEST_TARGET gpt-2-quantize) add_executable(${TEST_TARGET} quantize.cpp) -target_link_libraries(${TEST_TARGET} PRIVATE ggml common) +target_link_libraries(${TEST_TARGET} PRIVATE ggml common common-ggml) diff --git a/examples/gpt-2/main.cpp b/examples/gpt-2/main.cpp index 60975383..519f901a 100644 --- a/examples/gpt-2/main.cpp +++ b/examples/gpt-2/main.cpp @@ -307,22 +307,20 @@ bool gpt2_model_load(const std::string & fname, gpt2_model & model, gpt_vocab & while (true) { int32_t n_dims; int32_t length; - int32_t ftype; + int32_t ttype; fin.read(reinterpret_cast(&n_dims), sizeof(n_dims)); fin.read(reinterpret_cast(&length), sizeof(length)); - fin.read(reinterpret_cast(&ftype), sizeof(ftype)); + fin.read(reinterpret_cast(&ttype), sizeof(ttype)); if (fin.eof()) { break; } int32_t nelements = 1; - int64_t ne[2] = { 1, 1 }; + int32_t ne[2] = { 1, 1 }; for (int i = 0; i < n_dims; ++i) { - int32_t ne_cur; - fin.read(reinterpret_cast(&ne_cur), sizeof(ne_cur)); - ne[i] = ne_cur; + fin.read(reinterpret_cast(&ne[i]), sizeof(ne[i])); nelements *= ne[i]; } @@ -341,31 +339,17 @@ bool gpt2_model_load(const std::string & fname, gpt2_model & model, gpt_vocab & } if (tensor->ne[0] != ne[0] || tensor->ne[1] != ne[1]) { - fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%lld, %lld], expected [%lld, %lld]\n", - __func__, name.data(), tensor->ne[0], tensor->ne[1], ne[0], ne[1]); + fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d], expected [%d, %d]\n", + __func__, name.data(), (int) tensor->ne[0], (int) tensor->ne[1], ne[0], ne[1]); return false; } + // for debugging if (0) { - static const char * ftype_str[] = { "f32", "f16", "q4_0", "q4_1", "q4_2", "q4_3" }; - printf("%24s - [%5lld, %5lld], type = %6s, %6.2f MB, %9zu bytes\n", name.data(), ne[0], ne[1], ftype_str[ftype], ggml_nbytes(tensor)/1024.0/1024.0, ggml_nbytes(tensor)); + printf("%24s - [%5d, %5d], type = %6s, %6.2f MB, %9zu bytes\n", name.data(), ne[0], ne[1], ggml_type_name(ggml_type(ttype)), ggml_nbytes(tensor)/1024.0/1024.0, ggml_nbytes(tensor)); } - size_t bpe = 0; - - switch (ftype) { - case 0: bpe = ggml_type_size(GGML_TYPE_F32); break; - case 1: bpe = ggml_type_size(GGML_TYPE_F16); break; - case 2: bpe = ggml_type_size(GGML_TYPE_Q4_0); assert(ne[0] % 64 == 0); break; - case 3: bpe = ggml_type_size(GGML_TYPE_Q4_1); assert(ne[0] % 64 == 0); break; - case 5: bpe = ggml_type_size(GGML_TYPE_Q4_2); assert(ne[0] % 64 == 0); break; - case 6: bpe = ggml_type_size(GGML_TYPE_Q4_3); assert(ne[0] % 64 == 0); break; - default: - { - fprintf(stderr, "%s: unknown ftype %d in model file\n", __func__, ftype); - return false; - } - }; + const size_t bpe = ggml_type_size(ggml_type(ttype)); if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) { fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n", diff --git a/examples/gpt-2/quantize.cpp b/examples/gpt-2/quantize.cpp index 28867d72..f456a396 100644 --- a/examples/gpt-2/quantize.cpp +++ b/examples/gpt-2/quantize.cpp @@ -1,6 +1,7 @@ #include "ggml/ggml.h" #include "common.h" +#include "common-ggml.h" #include #include @@ -23,22 +24,7 @@ struct gpt2_hparams { }; // quantize a model -bool gpt2_model_quantize(const std::string & fname_inp, const std::string & fname_out, int itype) { - ggml_type type = GGML_TYPE_Q4_1; - - switch (itype) { - case 2: type = GGML_TYPE_Q4_0; break; - case 3: type = GGML_TYPE_Q4_1; break; - case 5: type = GGML_TYPE_Q4_2; break; - case 6: type = GGML_TYPE_Q4_3; break; - default: fprintf(stderr, "%s: invalid quantization type %d\n", __func__, itype); return 1; - }; - - if (!ggml_is_quantized(type)) { - fprintf(stderr, "%s: invalid quantization type %d\n", __func__, type); - return false; - } - +bool gpt2_model_quantize(const std::string & fname_inp, const std::string & fname_out, ggml_mtype mtype) { gpt_vocab vocab; printf("%s: loading model from '%s'\n", __func__, fname_inp.c_str()); @@ -90,7 +76,7 @@ bool gpt2_model_quantize(const std::string & fname_inp, const std::string & fnam fout.write((char *) &hparams.n_embd, sizeof(hparams.n_embd)); fout.write((char *) &hparams.n_head, sizeof(hparams.n_head)); fout.write((char *) &hparams.n_layer, sizeof(hparams.n_layer)); - fout.write((char *) &itype, sizeof(hparams.f16)); + fout.write((char *) &mtype, sizeof(hparams.f16)); } // load vocab @@ -120,166 +106,19 @@ bool gpt2_model_quantize(const std::string & fname_inp, const std::string & fnam } } - // load weights - { - size_t total_size_org = 0; - size_t total_size_new = 0; - - std::vector work; - - std::vector data_u8; - std::vector data_f16; - std::vector data_f32; - - std::vector hist_all(1 << 4, 0); - - while (true) { - int32_t n_dims; - int32_t length; - int32_t ftype; - - finp.read(reinterpret_cast(&n_dims), sizeof(n_dims)); - finp.read(reinterpret_cast(&length), sizeof(length)); - finp.read(reinterpret_cast(&ftype), sizeof(ftype)); - - if (finp.eof()) { - break; - } - - int32_t nelements = 1; - int32_t ne[2] = { 1, 1 }; - for (int i = 0; i < n_dims; ++i) { - finp.read (reinterpret_cast(&ne[i]), sizeof(ne[i])); - nelements *= ne[i]; - } - - std::string name(length, 0); - finp.read (&name[0], length); - - { - static const char * ftype_str[] = { "f32", "f16", "q4_0", "q4_1", "q4_2", "q4_3" }; - printf("%24s - [%5d, %5d], type = %6s ", name.data(), ne[0], ne[1], ftype_str[ftype]); - } - - // regexes of tensor names to be quantized - const std::vector k_names = { - "model/wte", - "model/lm_head", - "model/h.*/attn/c_attn/w", - "model/h.*/attn/c_proj/w", - "model/h.*/mlp/c_fc/w", - "model/h.*/mlp/c_proj/w", - }; - - bool quantize = false; - for (const auto & s : k_names) { - if (std::regex_match(name, std::regex(s))) { - quantize = true; - break; - } - } - - if (quantize) { - if (ftype != 0 && ftype != 1) { - fprintf(stderr, "%s: unsupported ftype %d for integer quantization\n", __func__, ftype); - return false; - } - - if (ftype == 1) { - data_f16.resize(nelements); - finp.read(reinterpret_cast(data_f16.data()), nelements * sizeof(ggml_fp16_t)); - data_f32.resize(nelements); - for (int i = 0; i < nelements; ++i) { - data_f32[i] = ggml_fp16_to_fp32(data_f16[i]); - } - } else { - data_f32.resize(nelements); - finp.read(reinterpret_cast(data_f32.data()), nelements * sizeof(float)); - } - - ftype = itype; - } else { - const int bpe = (ftype == 0) ? sizeof(float) : sizeof(uint16_t); - - data_u8.resize(nelements*bpe); - finp.read(reinterpret_cast(data_u8.data()), nelements * bpe); - } - - fout.write(reinterpret_cast(&n_dims), sizeof(n_dims)); - fout.write(reinterpret_cast(&length), sizeof(length)); - fout.write(reinterpret_cast(&ftype), sizeof(ftype)); - for (int i = 0; i < n_dims; ++i) { - fout.write(reinterpret_cast(&ne[i]), sizeof(ne[i])); - } - fout.write(&name[0], length); - - if (quantize) { - printf("quantizing .. "); - work.resize(nelements); // for quantization - - size_t cur_size = 0; - std::vector hist_cur(1 << 4, 0); - - switch (type) { - case GGML_TYPE_Q4_0: - { - cur_size = ggml_quantize_q4_0(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); - } break; - case GGML_TYPE_Q4_1: - { - cur_size = ggml_quantize_q4_1(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); - } break; - case GGML_TYPE_Q4_2: - { - cur_size = ggml_quantize_q4_2(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); - } break; - case GGML_TYPE_Q4_3: - { - cur_size = ggml_quantize_q4_3(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); - } break; - default: - { - fprintf(stderr, "%s: unsupported quantization type %d\n", __func__, type); - return false; - } - } - - fout.write(reinterpret_cast(work.data()), cur_size); - total_size_new += cur_size; - - printf("size = %8.2f MB -> %8.2f MB | hist: ", nelements * sizeof(float)/1024.0/1024.0, cur_size/1024.0/1024.0); - for (int i = 0; i < hist_cur.size(); ++i) { - hist_all[i] += hist_cur[i]; - } - - for (int i = 0; i < hist_cur.size(); ++i) { - printf("%5.3f ", hist_cur[i] / (float)nelements); - } - printf("\n"); - } else { - printf("size = %8.3f MB\n", data_u8.size()/1024.0/1024.0); - fout.write(reinterpret_cast(data_u8.data()), data_u8.size()); - total_size_new += data_u8.size(); - } - - total_size_org += nelements * sizeof(float); - } - - printf("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0); - printf("%s: quant size = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0); - - { - int64_t sum_all = 0; - for (int i = 0; i < hist_all.size(); ++i) { - sum_all += hist_all[i]; - } + // regexes of tensor names to be quantized + const std::vector to_quant = { + "model/wte", + "model/lm_head", + "model/h.*/attn/c_attn/w", + "model/h.*/attn/c_proj/w", + "model/h.*/mlp/c_fc/w", + "model/h.*/mlp/c_proj/w", + }; - printf("%s: hist: ", __func__); - for (int i = 0; i < hist_all.size(); ++i) { - printf("%5.3f ", hist_all[i] / (float)sum_all); - } - printf("\n"); - } + if (!ggml_common_quantize_0(finp, fout, mtype, to_quant, {})) { + fprintf(stderr, "%s: failed to quantize model '%s'\n", __func__, fname_inp.c_str()); + return false; } finp.close(); @@ -311,7 +150,7 @@ int main(int argc, char ** argv) { const std::string fname_inp = argv[1]; const std::string fname_out = argv[2]; - const int itype = atoi(argv[3]); + const int mtype = atoi(argv[3]); const int64_t t_main_start_us = ggml_time_us(); @@ -321,7 +160,7 @@ int main(int argc, char ** argv) { { const int64_t t_start_us = ggml_time_us(); - if (!gpt2_model_quantize(fname_inp, fname_out, itype)) { + if (!gpt2_model_quantize(fname_inp, fname_out, ggml_mtype(mtype))) { fprintf(stderr, "%s: failed to quantize model from '%s'\n", __func__, fname_inp.c_str()); return 1; } diff --git a/examples/gpt-j/CMakeLists.txt b/examples/gpt-j/CMakeLists.txt index 195e813c..c8b5ecfe 100644 --- a/examples/gpt-j/CMakeLists.txt +++ b/examples/gpt-j/CMakeLists.txt @@ -10,4 +10,4 @@ target_link_libraries(${TEST_TARGET} PRIVATE ggml common) set(TEST_TARGET gpt-j-quantize) add_executable(${TEST_TARGET} quantize.cpp) -target_link_libraries(${TEST_TARGET} PRIVATE ggml common) +target_link_libraries(${TEST_TARGET} PRIVATE ggml common common-ggml) diff --git a/examples/gpt-j/main.cpp b/examples/gpt-j/main.cpp index 57e0caf5..eeca87fd 100644 --- a/examples/gpt-j/main.cpp +++ b/examples/gpt-j/main.cpp @@ -303,22 +303,20 @@ bool gptj_model_load(const std::string & fname, gptj_model & model, gpt_vocab & while (true) { int32_t n_dims; int32_t length; - int32_t ftype; + int32_t ttype; fin.read(reinterpret_cast(&n_dims), sizeof(n_dims)); fin.read(reinterpret_cast(&length), sizeof(length)); - fin.read(reinterpret_cast(&ftype), sizeof(ftype)); + fin.read(reinterpret_cast(&ttype), sizeof(ttype)); if (fin.eof()) { break; } - int64_t nelements = 1; - int64_t ne[2] = { 1, 1 }; + int32_t nelements = 1; + int32_t ne[2] = { 1, 1 }; for (int i = 0; i < n_dims; ++i) { - int32_t ne_cur; - fin.read(reinterpret_cast(&ne_cur), sizeof(ne_cur)); - ne[i] = ne_cur; + fin.read(reinterpret_cast(&ne[i]), sizeof(ne[i])); nelements *= ne[i]; } @@ -337,41 +335,27 @@ bool gptj_model_load(const std::string & fname, gptj_model & model, gpt_vocab & } if (tensor->ne[0] != ne[0] || tensor->ne[1] != ne[1]) { - fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%lld, %lld], expected [%lld, %lld]\n", - __func__, name.data(), tensor->ne[0], tensor->ne[1], ne[0], ne[1]); + fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d], expected [%d, %d]\n", + __func__, name.data(), (int) tensor->ne[0], (int) tensor->ne[1], ne[0], ne[1]); return false; } + // for debugging if (0) { - static const char * ftype_str[] = { "f32", "f16", "q4_0", "q4_1", "q4_2", "q4_3" }; - printf("%24s - [%5lld, %5lld], type = %6s, %6.2f MB, %9zu bytes\n", name.data(), ne[0], ne[1], ftype_str[ftype], ggml_nbytes(tensor)/1024.0/1024.0, ggml_nbytes(tensor)); + printf("%24s - [%5d, %5d], type = %6s, %6.2f MB, %9zu bytes\n", name.data(), ne[0], ne[1], ggml_type_name(ggml_type(ttype)), ggml_nbytes(tensor)/1024.0/1024.0, ggml_nbytes(tensor)); } - size_t bpe = 0; - - switch (ftype) { - case 0: bpe = ggml_type_size(GGML_TYPE_F32); break; - case 1: bpe = ggml_type_size(GGML_TYPE_F16); break; - case 2: bpe = ggml_type_size(GGML_TYPE_Q4_0); assert(ne[0] % 64 == 0); break; - case 3: bpe = ggml_type_size(GGML_TYPE_Q4_1); assert(ne[0] % 64 == 0); break; - case 5: bpe = ggml_type_size(GGML_TYPE_Q4_2); assert(ne[0] % 64 == 0); break; - case 6: bpe = ggml_type_size(GGML_TYPE_Q4_3); assert(ne[0] % 64 == 0); break; - default: - { - fprintf(stderr, "%s: unknown ftype %d in model file\n", __func__, ftype); - return false; - } - }; + const size_t bpe = ggml_type_size(ggml_type(ttype)); if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) { - fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %llu\n", + fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n", __func__, name.data(), ggml_nbytes(tensor), nelements*bpe); return false; } fin.read(reinterpret_cast(tensor->data), ggml_nbytes(tensor)); - //printf("%42s - [%5d, %5d], type = %6s, %6.2f MB\n", name.data(), ne[0], ne[1], ftype == 0 ? "float" : "f16", ggml_nbytes(tensor)/1024.0/1024.0); + //printf("%42s - [%5d, %5d], type = %6s, %6.2f MB\n", name.data(), ne[0], ne[1], ttype == 0 ? "float" : "f16", ggml_nbytes(tensor)/1024.0/1024.0); total_size += ggml_nbytes(tensor); if (++n_tensors % 8 == 0) { printf("."); diff --git a/examples/gpt-j/quantize.cpp b/examples/gpt-j/quantize.cpp index e706f3a9..08ceabd6 100644 --- a/examples/gpt-j/quantize.cpp +++ b/examples/gpt-j/quantize.cpp @@ -1,6 +1,7 @@ #include "ggml/ggml.h" #include "common.h" +#include "common-ggml.h" #include #include @@ -24,22 +25,7 @@ struct gptj_hparams { }; // quantize a model -bool gptj_model_quantize(const std::string & fname_inp, const std::string & fname_out, int itype) { - ggml_type type = GGML_TYPE_Q4_1; - - switch (itype) { - case 2: type = GGML_TYPE_Q4_0; break; - case 3: type = GGML_TYPE_Q4_1; break; - case 5: type = GGML_TYPE_Q4_2; break; - case 6: type = GGML_TYPE_Q4_3; break; - default: fprintf(stderr, "%s: invalid quantization type %d\n", __func__, itype); return 1; - }; - - if (!ggml_is_quantized(type)) { - fprintf(stderr, "%s: invalid quantization type %d\n", __func__, type); - return false; - } - +bool gptj_model_quantize(const std::string & fname_inp, const std::string & fname_out, ggml_mtype mtype) { gpt_vocab vocab; printf("%s: loading model from '%s'\n", __func__, fname_inp.c_str()); @@ -93,7 +79,7 @@ bool gptj_model_quantize(const std::string & fname_inp, const std::string & fnam fout.write((char *) &hparams.n_head, sizeof(hparams.n_head)); fout.write((char *) &hparams.n_layer, sizeof(hparams.n_layer)); fout.write((char *) &hparams.n_rot, sizeof(hparams.n_rot)); - fout.write((char *) &itype, sizeof(hparams.f16)); + fout.write((char *) &mtype, sizeof(hparams.f16)); } // load vocab @@ -123,164 +109,14 @@ bool gptj_model_quantize(const std::string & fname_inp, const std::string & fnam } } - // load weights - { - size_t total_size_org = 0; - size_t total_size_new = 0; - - std::vector work; - - std::vector data_u8; - std::vector data_f16; - std::vector data_f32; - - std::vector hist_all(1 << 4, 0); - - while (true) { - int32_t n_dims; - int32_t length; - int32_t ftype; - - finp.read(reinterpret_cast(&n_dims), sizeof(n_dims)); - finp.read(reinterpret_cast(&length), sizeof(length)); - finp.read(reinterpret_cast(&ftype), sizeof(ftype)); - - if (finp.eof()) { - break; - } - - int32_t nelements = 1; - int32_t ne[2] = { 1, 1 }; - for (int i = 0; i < n_dims; ++i) { - finp.read (reinterpret_cast(&ne[i]), sizeof(ne[i])); - nelements *= ne[i]; - } - - std::string name(length, 0); - finp.read (&name[0], length); - - { - static const char * ftype_str[] = { "f32", "f16", "q4_0", "q4_1", "q4_2", "q4_3" }; - printf("%48s - [%5d, %5d], type = %6s ", name.data(), ne[0], ne[1], ftype_str[ftype]); - } - - // regexes of tensor names to be quantized - const std::vector k_names = { - ".*weight", - }; - - bool quantize = false; - for (const auto & s : k_names) { - if (std::regex_match(name, std::regex(s))) { - quantize = true; - break; - } - } - - // quantize only 2D tensors - quantize &= (n_dims == 2); - - if (quantize) { - if (ftype != 0 && ftype != 1) { - fprintf(stderr, "%s: unsupported ftype %d for integer quantization\n", __func__, ftype); - return false; - } - - if (ftype == 1) { - data_f16.resize(nelements); - finp.read(reinterpret_cast(data_f16.data()), nelements * sizeof(ggml_fp16_t)); - data_f32.resize(nelements); - for (int i = 0; i < nelements; ++i) { - data_f32[i] = ggml_fp16_to_fp32(data_f16[i]); - } - } else { - data_f32.resize(nelements); - finp.read(reinterpret_cast(data_f32.data()), nelements * sizeof(float)); - } - - ftype = itype; - } else { - const int bpe = (ftype == 0) ? sizeof(float) : sizeof(uint16_t); - - data_u8.resize(nelements*bpe); - finp.read(reinterpret_cast(data_u8.data()), nelements * bpe); - } - - fout.write(reinterpret_cast(&n_dims), sizeof(n_dims)); - fout.write(reinterpret_cast(&length), sizeof(length)); - fout.write(reinterpret_cast(&ftype), sizeof(ftype)); - for (int i = 0; i < n_dims; ++i) { - fout.write(reinterpret_cast(&ne[i]), sizeof(ne[i])); - } - fout.write(&name[0], length); - - if (quantize) { - printf("quantizing .. "); - work.resize(nelements); // for quantization - - size_t cur_size = 0; - std::vector hist_cur(1 << 4, 0); - - switch (type) { - case GGML_TYPE_Q4_0: - { - cur_size = ggml_quantize_q4_0(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); - } break; - case GGML_TYPE_Q4_1: - { - cur_size = ggml_quantize_q4_1(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); - } break; - case GGML_TYPE_Q4_2: - { - cur_size = ggml_quantize_q4_2(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); - } break; - case GGML_TYPE_Q4_3: - { - cur_size = ggml_quantize_q4_3(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); - } break; - default: - { - fprintf(stderr, "%s: unsupported quantization type %d\n", __func__, type); - return false; - } - } - - fout.write(reinterpret_cast(work.data()), cur_size); - total_size_new += cur_size; - - printf("size = %8.2f MB -> %8.2f MB | hist: ", nelements * sizeof(float)/1024.0/1024.0, cur_size/1024.0/1024.0); - for (int i = 0; i < hist_cur.size(); ++i) { - hist_all[i] += hist_cur[i]; - } - - for (int i = 0; i < hist_cur.size(); ++i) { - printf("%5.3f ", hist_cur[i] / (float)nelements); - } - printf("\n"); - } else { - printf("size = %8.3f MB\n", data_u8.size()/1024.0/1024.0); - fout.write(reinterpret_cast(data_u8.data()), data_u8.size()); - total_size_new += data_u8.size(); - } - - total_size_org += nelements * sizeof(float); - } - - printf("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0); - printf("%s: quant size = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0); - - { - int64_t sum_all = 0; - for (int i = 0; i < hist_all.size(); ++i) { - sum_all += hist_all[i]; - } + // regexes of tensor names to be quantized + const std::vector to_quant = { + ".*weight", + }; - printf("%s: hist: ", __func__); - for (int i = 0; i < hist_all.size(); ++i) { - printf("%5.3f ", hist_all[i] / (float)sum_all); - } - printf("\n"); - } + if (!ggml_common_quantize_0(finp, fout, mtype, to_quant, {})) { + fprintf(stderr, "%s: failed to quantize model '%s'\n", __func__, fname_inp.c_str()); + return false; } finp.close(); @@ -312,7 +148,7 @@ int main(int argc, char ** argv) { const std::string fname_inp = argv[1]; const std::string fname_out = argv[2]; - const int itype = atoi(argv[3]); + const int mtype = atoi(argv[3]); const int64_t t_main_start_us = ggml_time_us(); @@ -322,7 +158,7 @@ int main(int argc, char ** argv) { { const int64_t t_start_us = ggml_time_us(); - if (!gptj_model_quantize(fname_inp, fname_out, itype)) { + if (!gptj_model_quantize(fname_inp, fname_out, ggml_mtype(mtype))) { fprintf(stderr, "%s: failed to quantize model from '%s'\n", __func__, fname_inp.c_str()); return 1; } diff --git a/examples/stablelm/CMakeLists.txt b/examples/stablelm/CMakeLists.txt index 59e97a9e..58c7d740 100644 --- a/examples/stablelm/CMakeLists.txt +++ b/examples/stablelm/CMakeLists.txt @@ -10,4 +10,4 @@ target_link_libraries(${TEST_TARGET} PRIVATE ggml common) set(TEST_TARGET stablelm-quantize) add_executable(${TEST_TARGET} quantize.cpp) -target_link_libraries(${TEST_TARGET} PRIVATE ggml common) +target_link_libraries(${TEST_TARGET} PRIVATE ggml common common-ggml) diff --git a/examples/stablelm/main.cpp b/examples/stablelm/main.cpp index 879ba115..cd7918be 100644 --- a/examples/stablelm/main.cpp +++ b/examples/stablelm/main.cpp @@ -309,11 +309,11 @@ bool stablelm_model_load(const std::string & fname, stablelm_model & model, gpt_ while (true) { int32_t n_dims; int32_t length; - int32_t ftype; + int32_t ttype; fin.read(reinterpret_cast(&n_dims), sizeof(n_dims)); fin.read(reinterpret_cast(&length), sizeof(length)); - fin.read(reinterpret_cast(&ftype), sizeof(ftype)); + fin.read(reinterpret_cast(&ttype), sizeof(ttype)); if (fin.eof()) { break; @@ -346,26 +346,12 @@ bool stablelm_model_load(const std::string & fname, stablelm_model & model, gpt_ return false; } + // for debugging if (0) { - static const char * ftype_str[] = { "f32", "f16", "q4_0", "q4_1", "q4_2", "q4_3" }; - printf("%24s - [%5d, %5d], type = %6s, %6.2f MB, %9zu bytes\n", name.data(), ne[0], ne[1], ftype_str[ftype], ggml_nbytes(tensor)/1024.0/1024.0, ggml_nbytes(tensor)); + printf("%24s - [%5d, %5d], type = %6s, %6.2f MB, %9zu bytes\n", name.data(), ne[0], ne[1], ggml_type_name(ggml_type(ttype)), ggml_nbytes(tensor)/1024.0/1024.0, ggml_nbytes(tensor)); } - size_t bpe = 0; - - switch (ftype) { - case 0: bpe = ggml_type_size(GGML_TYPE_F32); break; - case 1: bpe = ggml_type_size(GGML_TYPE_F16); break; - case 2: bpe = ggml_type_size(GGML_TYPE_Q4_0); assert(ne[0] % 64 == 0); break; - case 3: bpe = ggml_type_size(GGML_TYPE_Q4_1); assert(ne[0] % 64 == 0); break; - case 5: bpe = ggml_type_size(GGML_TYPE_Q4_2); assert(ne[0] % 64 == 0); break; - case 6: bpe = ggml_type_size(GGML_TYPE_Q4_3); assert(ne[0] % 64 == 0); break; - default: - { - fprintf(stderr, "%s: unknown ftype %d in model file\n", __func__, ftype); - return false; - } - }; + const size_t bpe = ggml_type_size(ggml_type(ttype)); if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) { fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n", @@ -375,7 +361,6 @@ bool stablelm_model_load(const std::string & fname, stablelm_model & model, gpt_ fin.read(reinterpret_cast(tensor->data), ggml_nbytes(tensor)); - //printf("%42s - [%5d, %5d], type = %6s, %6.2f MB\n", name.data(), ne[0], ne[1], ftype == 0 ? "float" : "f16", ggml_nbytes(tensor)/1024.0/1024.0); total_size += ggml_nbytes(tensor); if (++n_tensors % 8 == 0) { printf("."); diff --git a/examples/stablelm/quantize.cpp b/examples/stablelm/quantize.cpp index 3c8ca5e7..90a4799d 100644 --- a/examples/stablelm/quantize.cpp +++ b/examples/stablelm/quantize.cpp @@ -1,6 +1,7 @@ #include "ggml/ggml.h" #include "common.h" +#include "common-ggml.h" #include #include @@ -24,22 +25,7 @@ struct stablelm_hparams { }; // quantize a model -bool stablelm_model_quantize(const std::string & fname_inp, const std::string & fname_out, int itype) { - ggml_type type = GGML_TYPE_Q4_1; - - switch (itype) { - case 2: type = GGML_TYPE_Q4_0; break; - case 3: type = GGML_TYPE_Q4_1; break; - case 5: type = GGML_TYPE_Q4_2; break; - case 6: type = GGML_TYPE_Q4_3; break; - default: fprintf(stderr, "%s: invalid quantization type %d\n", __func__, itype); return 1; - }; - - if (!ggml_is_quantized(type)) { - fprintf(stderr, "%s: invalid quantization type %d\n", __func__, type); - return false; - } - +bool stablelm_model_quantize(const std::string & fname_inp, const std::string & fname_out, ggml_mtype mtype) { gpt_vocab vocab; printf("%s: loading model from '%s'\n", __func__, fname_inp.c_str()); @@ -93,7 +79,7 @@ bool stablelm_model_quantize(const std::string & fname_inp, const std::string & fout.write((char *) &hparams.n_head, sizeof(hparams.n_head)); fout.write((char *) &hparams.n_layer, sizeof(hparams.n_layer)); fout.write((char *) &hparams.n_rot, sizeof(hparams.n_rot)); - fout.write((char *) &itype, sizeof(hparams.ftype)); + fout.write((char *) &mtype, sizeof(hparams.ftype)); } // load vocab @@ -115,164 +101,14 @@ bool stablelm_model_quantize(const std::string & fname_inp, const std::string & } } - // load weights - { - size_t total_size_org = 0; - size_t total_size_new = 0; - - std::vector work; - - std::vector data_u8; - std::vector data_f16; - std::vector data_f32; - - std::vector hist_all(1 << 4, 0); - - while (true) { - int32_t n_dims; - int32_t length; - int32_t ftype; - - finp.read(reinterpret_cast(&n_dims), sizeof(n_dims)); - finp.read(reinterpret_cast(&length), sizeof(length)); - finp.read(reinterpret_cast(&ftype), sizeof(ftype)); - - if (finp.eof()) { - break; - } - - int32_t nelements = 1; - int32_t ne[2] = { 1, 1 }; - for (int i = 0; i < n_dims; ++i) { - finp.read (reinterpret_cast(&ne[i]), sizeof(ne[i])); - nelements *= ne[i]; - } - - std::string name(length, 0); - finp.read (&name[0], length); - - { - static const char * ftype_str[] = { "f32", "f16", "q4_0", "q4_1", "q4_2", "q4_3" }; - printf("%64s - [%5d, %5d], type = %6s ", name.data(), ne[0], ne[1], ftype_str[ftype]); - } - - // regexes of tensor names to be quantized - const std::vector k_names = { - ".*weight", - }; - - bool quantize = false; - for (const auto & s : k_names) { - if (std::regex_match(name, std::regex(s))) { - quantize = true; - break; - } - } - - // quantize only 2D tensors - quantize &= (n_dims == 2); - - if (quantize) { - if (ftype != 0 && ftype != 1) { - fprintf(stderr, "%s: unsupported ftype %d for integer quantization\n", __func__, ftype); - return false; - } - - if (ftype == 1) { - data_f16.resize(nelements); - finp.read(reinterpret_cast(data_f16.data()), nelements * sizeof(ggml_fp16_t)); - data_f32.resize(nelements); - for (int i = 0; i < nelements; ++i) { - data_f32[i] = ggml_fp16_to_fp32(data_f16[i]); - } - } else { - data_f32.resize(nelements); - finp.read(reinterpret_cast(data_f32.data()), nelements * sizeof(float)); - } - - ftype = itype; - } else { - const int bpe = (ftype == 0) ? sizeof(float) : sizeof(uint16_t); - - data_u8.resize(nelements*bpe); - finp.read(reinterpret_cast(data_u8.data()), nelements * bpe); - } - - fout.write(reinterpret_cast(&n_dims), sizeof(n_dims)); - fout.write(reinterpret_cast(&length), sizeof(length)); - fout.write(reinterpret_cast(&ftype), sizeof(ftype)); - for (int i = 0; i < n_dims; ++i) { - fout.write(reinterpret_cast(&ne[i]), sizeof(ne[i])); - } - fout.write(&name[0], length); - - if (quantize) { - printf("quantizing .. "); - work.resize(nelements); // for quantization - - size_t cur_size = 0; - std::vector hist_cur(1 << 4, 0); - - switch (type) { - case GGML_TYPE_Q4_0: - { - cur_size = ggml_quantize_q4_0(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); - } break; - case GGML_TYPE_Q4_1: - { - cur_size = ggml_quantize_q4_1(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); - } break; - case GGML_TYPE_Q4_2: - { - cur_size = ggml_quantize_q4_2(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); - } break; - case GGML_TYPE_Q4_3: - { - cur_size = ggml_quantize_q4_3(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); - } break; - default: - { - fprintf(stderr, "%s: unsupported quantization type %d\n", __func__, type); - return false; - } - } - - fout.write(reinterpret_cast(work.data()), cur_size); - total_size_new += cur_size; - - printf("size = %8.2f MB -> %8.2f MB | hist: ", nelements * sizeof(float)/1024.0/1024.0, cur_size/1024.0/1024.0); - for (int i = 0; i < hist_cur.size(); ++i) { - hist_all[i] += hist_cur[i]; - } - - for (int i = 0; i < hist_cur.size(); ++i) { - printf("%5.3f ", hist_cur[i] / (float)nelements); - } - printf("\n"); - } else { - printf("size = %8.3f MB\n", data_u8.size()/1024.0/1024.0); - fout.write(reinterpret_cast(data_u8.data()), data_u8.size()); - total_size_new += data_u8.size(); - } - - total_size_org += nelements * sizeof(float); - } - - printf("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0); - printf("%s: quant size = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0); - - { - int64_t sum_all = 0; - for (int i = 0; i < hist_all.size(); ++i) { - sum_all += hist_all[i]; - } + // regexes of tensor names to be quantized + const std::vector to_quant = { + ".*weight", + }; - printf("%s: hist: ", __func__); - for (int i = 0; i < hist_all.size(); ++i) { - printf("%5.3f ", hist_all[i] / (float)sum_all); - } - printf("\n"); - } + if (!ggml_common_quantize_0(finp, fout, mtype, to_quant, {})) { + fprintf(stderr, "%s: failed to quantize model '%s'\n", __func__, fname_inp.c_str()); + return false; } finp.close(); @@ -304,7 +140,7 @@ int main(int argc, char ** argv) { const std::string fname_inp = argv[1]; const std::string fname_out = argv[2]; - const int itype = atoi(argv[3]); + const int mtype = atoi(argv[3]); const int64_t t_main_start_us = ggml_time_us(); @@ -314,7 +150,7 @@ int main(int argc, char ** argv) { { const int64_t t_start_us = ggml_time_us(); - if (!stablelm_model_quantize(fname_inp, fname_out, itype)) { + if (!stablelm_model_quantize(fname_inp, fname_out, ggml_mtype(mtype))) { fprintf(stderr, "%s: failed to quantize model from '%s'\n", __func__, fname_inp.c_str()); return 1; } diff --git a/examples/whisper/CMakeLists.txt b/examples/whisper/CMakeLists.txt index 67500672..dd681aad 100644 --- a/examples/whisper/CMakeLists.txt +++ b/examples/whisper/CMakeLists.txt @@ -19,4 +19,4 @@ target_include_directories(${TEST_TARGET} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/.. set(TEST_TARGET whisper-quantize) add_executable(${TEST_TARGET} quantize.cpp) -target_link_libraries(${TEST_TARGET} PRIVATE ggml common) +target_link_libraries(${TEST_TARGET} PRIVATE ggml common common-ggml) diff --git a/examples/whisper/quantize.cpp b/examples/whisper/quantize.cpp index 2af2a0d0..e76b8ac4 100644 --- a/examples/whisper/quantize.cpp +++ b/examples/whisper/quantize.cpp @@ -1,6 +1,7 @@ #include "ggml/ggml.h" #include "common.h" +#include "common-ggml.h" #include #include @@ -35,22 +36,7 @@ struct whisper_filters { }; // quantize a model -bool whisper_model_quantize(const std::string & fname_inp, const std::string & fname_out, int itype) { - ggml_type type = GGML_TYPE_Q4_1; - - switch (itype) { - case 2: type = GGML_TYPE_Q4_0; break; - case 3: type = GGML_TYPE_Q4_1; break; - case 5: type = GGML_TYPE_Q4_2; break; - case 6: type = GGML_TYPE_Q4_3; break; - default: fprintf(stderr, "%s: invalid quantization type %d\n", __func__, itype); return 1; - }; - - if (!ggml_is_quantized(type)) { - fprintf(stderr, "%s: invalid quantization type %d\n", __func__, type); - return false; - } - +bool whisper_model_quantize(const std::string & fname_inp, const std::string & fname_out, ggml_mtype mtype) { gpt_vocab vocab; printf("%s: loading model from '%s'\n", __func__, fname_inp.c_str()); @@ -117,7 +103,7 @@ bool whisper_model_quantize(const std::string & fname_inp, const std::string & f fout.write((char *) &hparams.n_text_head, sizeof(hparams.n_text_head)); fout.write((char *) &hparams.n_text_layer, sizeof(hparams.n_text_layer)); fout.write((char *) &hparams.n_mels, sizeof(hparams.n_mels)); - fout.write((char *) &itype, sizeof(hparams.f16)); + fout.write((char *) &mtype, sizeof(hparams.f16)); } // load mel filters @@ -161,168 +147,18 @@ bool whisper_model_quantize(const std::string & fname_inp, const std::string & f } } - // load weights - { - size_t total_size_org = 0; - size_t total_size_new = 0; - - std::vector work; - - std::vector data_u8; - std::vector data_f16; - std::vector data_f32; - - std::vector hist_all(1 << 4, 0); - - while (true) { - int32_t n_dims; - int32_t length; - int32_t ftype; - - finp.read(reinterpret_cast(&n_dims), sizeof(n_dims)); - finp.read(reinterpret_cast(&length), sizeof(length)); - finp.read(reinterpret_cast(&ftype), sizeof(ftype)); - - if (finp.eof()) { - break; - } - - int32_t nelements = 1; - int32_t ne[3] = { 1, 1, 1 }; - for (int i = 0; i < n_dims; ++i) { - finp.read (reinterpret_cast(&ne[i]), sizeof(ne[i])); - nelements *= ne[i]; - } - - std::string name(length, 0); - finp.read (&name[0], length); - - { - static const char * ftype_str[] = { "f32", "f16", "q4_0", "q4_1", "q4_2", "q4_3" }; - printf("%48s - [%5d, %5d, %5d], type = %6s ", name.data(), ne[0], ne[1], ne[2], ftype_str[ftype]); - } - - // regexes of tensor names to not be quantized - const std::vector k_names = { - //"encoder.*", - "encoder.conv1.bias", - "encoder.conv2.bias", - "encoder.positional_embedding", - "decoder.positional_embedding", - }; - - bool quantize = true; - for (const auto & s : k_names) { - if (std::regex_match(name, std::regex(s))) { - quantize = false; - break; - } - } - - // quantize only 2D and 3D tensors - quantize &= (n_dims == 2); - - if (quantize) { - if (ftype != 0 && ftype != 1) { - fprintf(stderr, "%s: unsupported ftype %d for integer quantization\n", __func__, ftype); - return false; - } - - if (ftype == 1) { - data_f16.resize(nelements); - finp.read(reinterpret_cast(data_f16.data()), nelements * sizeof(ggml_fp16_t)); - data_f32.resize(nelements); - for (int i = 0; i < nelements; ++i) { - data_f32[i] = ggml_fp16_to_fp32(data_f16[i]); - } - } else { - data_f32.resize(nelements); - finp.read(reinterpret_cast(data_f32.data()), nelements * sizeof(float)); - } - - ftype = itype; - } else { - const int bpe = (ftype == 0) ? sizeof(float) : sizeof(uint16_t); - - data_u8.resize(nelements*bpe); - finp.read(reinterpret_cast(data_u8.data()), nelements * bpe); - } - - fout.write(reinterpret_cast(&n_dims), sizeof(n_dims)); - fout.write(reinterpret_cast(&length), sizeof(length)); - fout.write(reinterpret_cast(&ftype), sizeof(ftype)); - for (int i = 0; i < n_dims; ++i) { - fout.write(reinterpret_cast(&ne[i]), sizeof(ne[i])); - } - fout.write(&name[0], length); - - if (quantize) { - printf("quantizing .. "); - work.resize(nelements); // for quantization - - size_t cur_size = 0; - std::vector hist_cur(1 << 4, 0); - - switch (type) { - case GGML_TYPE_Q4_0: - { - cur_size = ggml_quantize_q4_0(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); - } break; - case GGML_TYPE_Q4_1: - { - cur_size = ggml_quantize_q4_1(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); - } break; - case GGML_TYPE_Q4_2: - { - cur_size = ggml_quantize_q4_2(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); - } break; - case GGML_TYPE_Q4_3: - { - cur_size = ggml_quantize_q4_3(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); - } break; - default: - { - fprintf(stderr, "%s: unsupported quantization type %d\n", __func__, type); - return false; - } - } - - fout.write(reinterpret_cast(work.data()), cur_size); - total_size_new += cur_size; - - printf("size = %8.3f MB -> %8.3f MB | hist: ", nelements * sizeof(float)/1024.0/1024.0, cur_size/1024.0/1024.0); - for (int i = 0; i < hist_cur.size(); ++i) { - hist_all[i] += hist_cur[i]; - } - - for (int i = 0; i < hist_cur.size(); ++i) { - printf("%5.3f ", hist_cur[i] / (float)nelements); - } - printf("\n"); - } else { - printf("size = %8.3f MB\n", data_u8.size()/1024.0/1024.0); - fout.write(reinterpret_cast(data_u8.data()), data_u8.size()); - total_size_new += data_u8.size(); - } - - total_size_org += nelements * sizeof(float); - } - - printf("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0); - printf("%s: quant size = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0); - - { - int64_t sum_all = 0; - for (int i = 0; i < hist_all.size(); ++i) { - sum_all += hist_all[i]; - } + // regexes of tensor names to not be quantized + const std::vector to_skip = { + //"encoder.*", + "encoder.conv1.bias", + "encoder.conv2.bias", + "encoder.positional_embedding", + "decoder.positional_embedding", + }; - printf("%s: hist: ", __func__); - for (int i = 0; i < hist_all.size(); ++i) { - printf("%5.3f ", hist_all[i] / (float)sum_all); - } - printf("\n"); - } + if (!ggml_common_quantize_0(finp, fout, mtype, { ".*" }, to_skip)) { + fprintf(stderr, "%s: failed to quantize model '%s'\n", __func__, fname_inp.c_str()); + return false; } finp.close(); @@ -331,9 +167,6 @@ bool whisper_model_quantize(const std::string & fname_inp, const std::string & f return true; } -// usage: -// ./gpt-2-quantize models/gpt-2-117M/ggml-model.bin models/gpt-2-117M/ggml-model-quant.bin type -// int main(int argc, char ** argv) { if (argc != 4) { fprintf(stderr, "usage: %s model-f32.bin model-quant.bin type\n", argv[0]); @@ -354,7 +187,7 @@ int main(int argc, char ** argv) { const std::string fname_inp = argv[1]; const std::string fname_out = argv[2]; - const int itype = atoi(argv[3]); + const int mtype = atoi(argv[3]); const int64_t t_main_start_us = ggml_time_us(); @@ -364,7 +197,7 @@ int main(int argc, char ** argv) { { const int64_t t_start_us = ggml_time_us(); - if (!whisper_model_quantize(fname_inp, fname_out, itype)) { + if (!whisper_model_quantize(fname_inp, fname_out, ggml_mtype(mtype))) { fprintf(stderr, "%s: failed to quantize model from '%s'\n", __func__, fname_inp.c_str()); return 1; }