From: Radoslav Gerganov Date: Mon, 23 Dec 2024 08:39:30 +0000 (+0200) Subject: rpc-server : add support for the SYCL backend (#10934) X-Git-Tag: upstream/0.0.4488~106 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=86bf31cfe684849157f0875b4f0ebccac7034547;p=pkg%2Fggml%2Fsources%2Fllama.cpp rpc-server : add support for the SYCL backend (#10934) --- diff --git a/examples/rpc/rpc-server.cpp b/examples/rpc/rpc-server.cpp index 5fe70dac..8b1b23ed 100644 --- a/examples/rpc/rpc-server.cpp +++ b/examples/rpc/rpc-server.cpp @@ -12,6 +12,10 @@ #include "ggml-vulkan.h" #endif +#ifdef GGML_USE_SYCL +#include "ggml-sycl.h" +#endif + #include "ggml-rpc.h" #ifdef _WIN32 # include @@ -91,6 +95,12 @@ static ggml_backend_t create_backend() { if (!backend) { fprintf(stderr, "%s: ggml_backend_vulkan_init() failed\n", __func__); } +#elif GGML_USE_SYCL + fprintf(stderr, "%s: using SYCL backend\n", __func__); + backend = ggml_backend_sycl_init(0); // init device 0 + if (!backend) { + fprintf(stderr, "%s: ggml_backend_sycl_init() failed\n", __func__); + } #endif // if there aren't GPU Backends fallback to CPU backend @@ -106,6 +116,8 @@ static void get_backend_memory(size_t * free_mem, size_t * total_mem) { ggml_backend_cuda_get_device_memory(0, free_mem, total_mem); #elif GGML_USE_VULKAN ggml_backend_vk_get_device_memory(0, free_mem, total_mem); +#elif GGML_USE_SYCL + ggml_backend_sycl_get_device_memory(0, free_mem, total_mem); #else #ifdef _WIN32 MEMORYSTATUSEX status;