]> git.djapps.eu Git - pkg/ggml/sources/ggml/commitdiff
whisper : add SYCL support (whisper/1863)
authorAbhilash Majumder <redacted>
Fri, 23 Feb 2024 07:22:24 +0000 (12:52 +0530)
committerGeorgi Gerganov <redacted>
Wed, 28 Feb 2024 11:04:28 +0000 (13:04 +0200)
* add changes from llama upstream

* add sycl abstraction

* add sycl build

* update cmake

* add sycl build config

* fix bug

* fix bug

* refactor build

* fix bug

* update build

* call build

* use sycl header

* add examples

* add target

* fix typecast in quant.c

* readd fp16 and readme

* fix quant typecast

* add sample

* add readme

* remove cxx file check

examples/whisper/whisper.cpp

index 38c827d2efae801c7174021d9c6045e311c2860c..2e0a6e2e044c7cd75706dd6667261e4e21cdd78b 100644 (file)
 #include "ggml-cuda.h"
 #endif
 
+#ifdef GGML_USE_SYCL
+#include "ggml-sycl.h"
+#endif
+
 #ifdef WHISPER_USE_OPENVINO
 #include "openvino/whisper-openvino-encoder.h"
 #endif
@@ -1052,6 +1056,16 @@ static ggml_backend_t whisper_backend_init(const whisper_context_params & params
     }
 #endif
 
+#ifdef GGML_USE_SYCL
+    if (params.use_gpu) {
+        WHISPER_LOG_INFO("%s: using SYCL backend\n", __func__);
+        backend_gpu = ggml_backend_sycl_init(params.gpu_device);
+        if (!backend_gpu) {
+            WHISPER_LOG_ERROR("%s: ggml_backend_sycl_init() failed\n", __func__);
+        }
+    }
+#endif
+
     if (backend_gpu) {
         return backend_gpu;
     }