From: Johannes Gäßler Date: Tue, 16 Dec 2025 23:50:12 +0000 (+0100) Subject: llama-fit-params: force disable mlock (#18103) X-Git-Tag: upstream/0.0.7446~4 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=d0794e89d9444ec970650834007bdf5c8fd4a05c;p=pkg%2Fggml%2Fsources%2Fllama.cpp llama-fit-params: force disable mlock (#18103) --- diff --git a/src/llama.cpp b/src/llama.cpp index 85f2ee72..c8b5febe 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -71,8 +71,9 @@ static std::vector llama_get_device_memory_data( }, &ud); llama_model_params mparams_copy = *mparams; - mparams_copy.no_alloc = true; - mparams_copy.use_mmap = false; + mparams_copy.no_alloc = true; + mparams_copy.use_mmap = false; + mparams_copy.use_mlock = false; llama_model * model = llama_model_load_from_file(path_model, mparams_copy); if (model == nullptr) {