]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
clip : add sycl support (#10574)
authorpiDack <redacted>
Wed, 4 Dec 2024 00:26:37 +0000 (08:26 +0800)
committerGitHub <redacted>
Wed, 4 Dec 2024 00:26:37 +0000 (01:26 +0100)
Co-authored-by: piDack <redacted>
examples/llava/clip.cpp

index 7ba4cea58e80bc809d8d2d2c63e2e1cdc73e1262..d7c94352b568b3439f595f530f6ca2803a22cdc5 100644 (file)
 #include "ggml-cuda.h"
 #endif
 
+#ifdef GGML_USE_SYCL
+#include "ggml-sycl.h"
+#endif
+
 #ifdef GGML_USE_METAL
 #include "ggml-metal.h"
 #endif
@@ -1169,6 +1173,11 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
     LOG_INF("%s: CLIP using Vulkan backend\n", __func__);
 #endif
 
+#ifdef GGML_USE_SYCL
+    new_clip->backend = ggml_backend_sycl_init(0);
+    LOG_INF("%s: CLIP using SYCL backend\n", __func__);
+#endif
+
     if (!new_clip->backend) {
         new_clip->backend = ggml_backend_cpu_init();
         LOG_INF("%s: CLIP using CPU backend\n", __func__);