From: Johannes Gäßler Date: Wed, 17 Dec 2025 20:10:03 +0000 (+0100) Subject: llama-fit-params: fix memory print (#18136) X-Git-Tag: upstream/0.0.7599~139 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=8dcc3662a292c14f003be2c465895d40c9460511;p=pkg%2Fggml%2Fsources%2Fllama.cpp llama-fit-params: fix memory print (#18136) --- diff --git a/src/llama.cpp b/src/llama.cpp index c8b5febe..708d879b 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -542,6 +542,7 @@ static void llama_params_fit_impl( } else { assert(ngl_per_device_high[id].n_layer == n_unassigned); ngl_per_device = ngl_per_device_high; + mem = mem_high; LLAMA_LOG_DEBUG("%s: set ngl_per_device[%d].n_layer=%" PRIu32 "\n", __func__, id, ngl_per_device[id].n_layer); } } @@ -629,6 +630,7 @@ static void llama_params_fit_impl( } } else { ngl_per_device = ngl_per_device_high; + mem = mem_high; id_dense_start = id_dense_start_high; LLAMA_LOG_DEBUG("%s: set ngl_per_device[%zu].(n_layer, n_part)=(%" PRIu32 ", %" PRIu32 "), id_dense_start=%zu\n", __func__, id, ngl_per_device[id].n_layer, ngl_per_device[id].n_part, id_dense_start);