]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
llama : check returned fn ptrs from ggml_backend_reg_get_proc_address (#15893)
authorDaniel Bevenius <redacted>
Wed, 10 Sep 2025 03:33:58 +0000 (05:33 +0200)
committerGitHub <redacted>
Wed, 10 Sep 2025 03:33:58 +0000 (05:33 +0200)
This commit adds check for two function pointers returned from
ggml_backend_reg_get_proc_address.

The motivation for this is that the function pointer could be nullptr if
the get proc address function changes in the future. This is also
consistent with all the other calls to ggml_backend_reg_get_proc_address
in the code base.

src/llama-context.cpp
src/llama.cpp

index 874c6f82cb95807f392ddfbc3bc2dcfc725698d1..3e163001c180b3f40fec46903c5a9466a59f41ef 100644 (file)
@@ -1447,7 +1447,9 @@ ggml_status llama_context::graph_compute(
     if (backend_cpu != nullptr) {
         auto * reg = ggml_backend_dev_backend_reg(ggml_backend_get_device(backend_cpu));
         auto * set_threadpool_fn = (decltype(ggml_backend_cpu_set_threadpool) *) ggml_backend_reg_get_proc_address(reg, "ggml_backend_cpu_set_threadpool");
-        set_threadpool_fn(backend_cpu, tp);
+        if (set_threadpool_fn) {
+            set_threadpool_fn(backend_cpu, tp);
+        }
     }
 
     // set the number of threads for all the backends
index f0d4f5f891cc7106a01e85fc091054fc09a27f7e..92cddccc9944cd1e55e7d2ef93aeb2d3ec02728a 100644 (file)
@@ -83,7 +83,9 @@ void llama_numa_init(enum ggml_numa_strategy numa) {
         GGML_ASSERT(dev && "CPU backend is not loaded");
         auto * reg = ggml_backend_dev_backend_reg(dev);
         auto * numa_init_fn = (decltype(ggml_numa_init) *) ggml_backend_reg_get_proc_address(reg, "ggml_backend_cpu_numa_init");
-        numa_init_fn(numa);
+        if (numa_init_fn) {
+            numa_init_fn(numa);
+        }
     }
 }