]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
Build Llama SYCL Intel with static libs (#8668)
authorJoe Todd <redacted>
Wed, 24 Jul 2024 13:36:00 +0000 (14:36 +0100)
committerGitHub <redacted>
Wed, 24 Jul 2024 13:36:00 +0000 (14:36 +0100)
Ensure SYCL CI builds both static & dynamic libs for testing purposes

Signed-off-by: Joe Todd <redacted>
.devops/llama-cli-intel.Dockerfile
.devops/llama-server-intel.Dockerfile

index 2bf82bb586e6d64b64b03bf5479ce46ec6ac5d5a..79dba06a77d6eba3c8e2b71d809a4cdf3213d337 100644 (file)
@@ -14,7 +14,9 @@ RUN if [ "${GGML_SYCL_F16}" = "ON" ]; then \
         echo "GGML_SYCL_F16 is set" && \
         export OPT_SYCL_F16="-DGGML_SYCL_F16=ON"; \
     fi && \
-    cmake -B build -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx ${OPT_SYCL_F16} && \
+    echo "Building with static libs" && \
+    cmake -B build -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx \
+    ${OPT_SYCL_F16} -DBUILD_SHARED_LIBS=OFF && \
     cmake --build build --config Release --target llama-cli
 
 FROM intel/oneapi-basekit:$ONEAPI_VERSION AS runtime
index eb9aba618d4d8833f6d623788713496f1834bbca..f525658dddfe5d6beff9c589c36c4500442c5353 100644 (file)
@@ -14,6 +14,7 @@ RUN if [ "${GGML_SYCL_F16}" = "ON" ]; then \
         echo "GGML_SYCL_F16 is set" && \
         export OPT_SYCL_F16="-DGGML_SYCL_F16=ON"; \
     fi && \
+    echo "Building with dynamic libs" && \
     cmake -B build -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_CURL=ON ${OPT_SYCL_F16} && \
     cmake --build build --config Release --target llama-server