]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
llama : print hint when loading a model when no backends are loaded (#13589)
authorDiego Devesa <redacted>
Fri, 16 May 2025 14:38:07 +0000 (07:38 -0700)
committerGitHub <redacted>
Fri, 16 May 2025 14:38:07 +0000 (16:38 +0200)
src/llama.cpp

index 9fdddf7b071f83925ae7d4662046fa6340f91fd9..2f06e0f8ce12d2d309f5c61cdc8219cec27d06b5 100644 (file)
@@ -140,6 +140,11 @@ static struct llama_model * llama_model_load_from_file_impl(
         struct llama_model_params params) {
     ggml_time_init();
 
+    if (!params.vocab_only && ggml_backend_reg_count() == 0) {
+        LLAMA_LOG_ERROR("%s: no backends are loaded. hint: use ggml_backend_load() or ggml_backend_load_all() to load a backend before calling this function\n", __func__);
+        return nullptr;
+    }
+
     unsigned cur_percentage = 0;
     if (params.progress_callback == NULL) {
         params.progress_callback_user_data = &cur_percentage;