From: Radoslav Gerganov Date: Wed, 29 May 2024 11:45:44 +0000 (+0300) Subject: llama-bench : add support for the RPC backend (llama/7435) X-Git-Tag: upstream/0.0.1642~628 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=cbe312b2ac5679c6d70d0dae7460e3ce6bf8fe59;p=pkg%2Fggml%2Fsources%2Fggml llama-bench : add support for the RPC backend (llama/7435) --- diff --git a/include/ggml/ggml.h b/include/ggml/ggml.h index 3859895b..f9deac7e 100644 --- a/include/ggml/ggml.h +++ b/include/ggml/ggml.h @@ -2428,6 +2428,7 @@ extern "C" { GGML_API int ggml_cpu_has_sse3 (void); GGML_API int ggml_cpu_has_ssse3 (void); GGML_API int ggml_cpu_has_sycl (void); + GGML_API int ggml_cpu_has_rpc (void); GGML_API int ggml_cpu_has_vsx (void); GGML_API int ggml_cpu_has_matmul_int8(void); diff --git a/src/ggml.c b/src/ggml.c index d8f74f3c..e6e2397b 100644 --- a/src/ggml.c +++ b/src/ggml.c @@ -22872,6 +22872,14 @@ int ggml_cpu_has_sycl(void) { #endif } +int ggml_cpu_has_rpc(void) { +#if defined(GGML_USE_RPC) + return 1; +#else + return 0; +#endif +} + int ggml_cpu_has_gpublas(void) { return ggml_cpu_has_cuda() || ggml_cpu_has_clblast() || ggml_cpu_has_vulkan() || ggml_cpu_has_kompute() || ggml_cpu_has_sycl();