From: Georgi Gerganov Date: Thu, 14 Mar 2024 20:58:41 +0000 (+0200) Subject: llama : fix integer overflow during quantization (#6063) X-Git-Tag: upstream/0.0.4488~2056 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=4755afd1cbd40d93c017e5b98c39796f52345314;p=pkg%2Fggml%2Fsources%2Fllama.cpp llama : fix integer overflow during quantization (#6063) --- diff --git a/llama.cpp b/llama.cpp index 10fd5346..2c384197 100644 --- a/llama.cpp +++ b/llama.cpp @@ -11977,7 +11977,7 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n return new_type; } -static int32_t llama_tensor_quantize_internal(enum ggml_type new_type, const float * f32_data, void * new_data, const int chunk_size, int nrows, int n_per_row, const float * imatrix, std::vector & workers, const int nthread) { +static size_t llama_tensor_quantize_internal(enum ggml_type new_type, const float * f32_data, void * new_data, const int chunk_size, int nrows, int n_per_row, const float * imatrix, std::vector & workers, const int nthread) { std::mutex mutex; int counter = 0; size_t new_size = 0;