]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
llama-model-loader: print warning when using overrides with mmap (#20978)
authorAman Gupta <redacted>
Mon, 30 Mar 2026 09:40:17 +0000 (17:40 +0800)
committerGitHub <redacted>
Mon, 30 Mar 2026 09:40:17 +0000 (17:40 +0800)
* llama-model-loader: use pinned memory for tensor overrides

* change to warning

src/llama-model-loader.cpp

index 2457a7ed4b70eb8f6973d9c68a307a16c31c7b37..3d549cae5b6b83559d84724f806582ca2d702685 100644 (file)
@@ -1158,6 +1158,12 @@ struct ggml_tensor * llama_model_loader::create_tensor(
                     if (overrides->buft == ggml_backend_cpu_buffer_type()) {
                         // when overriding to a CPU buffer, consider the extra buffer types
                         buft = select_weight_buft(hparams, t_meta, op, buft_list_cpu);
+                        if (use_mmap) {
+                            static std::once_flag once;
+                            std::call_once(once, [] {
+                                LLAMA_LOG_WARN("llama_model_loader: tensor overrides to CPU are used with mmap enabled - consider using --no-mmap for better performance\n");
+                            });
+                        }
                     } else {
                         buft = overrides->buft;
                     }