From: Diego Devesa Date: Wed, 14 May 2025 14:12:36 +0000 (-0700) Subject: llama : fix quantize with dl backends (#13539) X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=b7d26720821823e23e2273a99e38398d511242e9;p=pkg%2Fggml%2Fsources%2Fllama.cpp llama : fix quantize with dl backends (#13539) --- diff --git a/src/llama-model-loader.cpp b/src/llama-model-loader.cpp index 4cce5166..ee520833 100644 --- a/src/llama-model-loader.cpp +++ b/src/llama-model-loader.cpp @@ -822,13 +822,18 @@ void llama_model_loader::init_mappings(bool prefetch, llama_mlocks * mlock_mmaps mappings.reserve(files.size()); mmaps_used.reserve(files.size()); for (const auto & file : files) { - auto * reg = ggml_backend_dev_backend_reg(ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU)); - if (!reg) { - throw std::runtime_error(format("%s: no CPU backend found", __func__)); + bool is_numa = false; + + auto * dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU); + if (dev) { + auto * reg = ggml_backend_dev_backend_reg(dev); + auto * is_numa_fn = (decltype(ggml_is_numa) *) ggml_backend_reg_get_proc_address(reg, "ggml_backend_cpu_is_numa"); + if (is_numa_fn) { + is_numa = is_numa_fn(); + } } - auto * is_numa_fn = (decltype(ggml_is_numa) *) ggml_backend_reg_get_proc_address(reg, "ggml_backend_cpu_is_numa"); - std::unique_ptr mapping = std::make_unique(file.get(), prefetch ? -1 : 0, is_numa_fn()); + std::unique_ptr mapping = std::make_unique(file.get(), prefetch ? -1 : 0, is_numa); mmaps_used.emplace_back(mapping->size(), 0); if (mlock_mmaps) { std::unique_ptr mlock_mmap(new llama_mlock());