]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
rpc-server : add support for the SYCL backend (#10934)
authorRadoslav Gerganov <redacted>
Mon, 23 Dec 2024 08:39:30 +0000 (10:39 +0200)
committerGitHub <redacted>
Mon, 23 Dec 2024 08:39:30 +0000 (10:39 +0200)
examples/rpc/rpc-server.cpp

index 5fe70dac7f193ea89a47709ba6f38393783e4ac3..8b1b23edad174c4a104f15769dd94bab9c4c6c16 100644 (file)
 #include "ggml-vulkan.h"
 #endif
 
+#ifdef GGML_USE_SYCL
+#include "ggml-sycl.h"
+#endif
+
 #include "ggml-rpc.h"
 #ifdef _WIN32
 #  include <windows.h>
@@ -91,6 +95,12 @@ static ggml_backend_t create_backend() {
     if (!backend) {
         fprintf(stderr, "%s: ggml_backend_vulkan_init() failed\n", __func__);
     }
+#elif GGML_USE_SYCL
+    fprintf(stderr, "%s: using SYCL backend\n", __func__);
+    backend = ggml_backend_sycl_init(0); // init device 0
+    if (!backend) {
+        fprintf(stderr, "%s: ggml_backend_sycl_init() failed\n", __func__);
+    }
 #endif
 
     // if there aren't GPU Backends fallback to CPU backend
@@ -106,6 +116,8 @@ static void get_backend_memory(size_t * free_mem, size_t * total_mem) {
     ggml_backend_cuda_get_device_memory(0, free_mem, total_mem);
 #elif GGML_USE_VULKAN
     ggml_backend_vk_get_device_memory(0, free_mem, total_mem);
+#elif GGML_USE_SYCL
+    ggml_backend_sycl_get_device_memory(0, free_mem, total_mem);
 #else
     #ifdef _WIN32
         MEMORYSTATUSEX status;