From: Georgi Gerganov Date: Sat, 8 Feb 2025 14:49:38 +0000 (+0200) Subject: cont : fix mmap flag print (#11699) X-Git-Tag: upstream/0.0.4719~47 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=bdcf8b6a56f4d49d3420ae5b21cdf9a116040551;p=pkg%2Fggml%2Fsources%2Fllama.cpp cont : fix mmap flag print (#11699) --- diff --git a/src/llama-model.cpp b/src/llama-model.cpp index e30db66e..0f4b62c4 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -1275,7 +1275,7 @@ bool llama_model::load_tensors(llama_model_loader & ml) { const bool use_mmap_buffer = true; - LLAMA_LOG_INFO("%s: loading model tensors, this can take a while... (mmap = %s)\n", __func__, use_mmap_buffer ? "true" : "false"); + LLAMA_LOG_INFO("%s: loading model tensors, this can take a while... (mmap = %s)\n", __func__, ml.use_mmap ? "true" : "false"); // build a list of buffer types for the CPU and GPU devices pimpl->cpu_buft_list = make_cpu_buft_list(devices); diff --git a/src/llama.cpp b/src/llama.cpp index 3b6a21d8..607f2786 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -9430,7 +9430,6 @@ static struct llama_model * llama_model_load_from_file_impl( struct llama_model_params params) { ggml_time_init(); - unsigned cur_percentage = 0; if (params.progress_callback == NULL) { params.progress_callback_user_data = &cur_percentage;