]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
devops : remove clblast + LLAMA_CUDA -> GGML_CUDA (#8139)
authorGeorgi Gerganov <redacted>
Wed, 26 Jun 2024 16:32:07 +0000 (19:32 +0300)
committerGitHub <redacted>
Wed, 26 Jun 2024 16:32:07 +0000 (19:32 +0300)
ggml-ci

12 files changed:
.devops/full-cuda.Dockerfile
.devops/full-rocm.Dockerfile
.devops/llama-cli-cuda.Dockerfile
.devops/llama-cli-intel.Dockerfile
.devops/llama-cli-rocm.Dockerfile
.devops/llama-cli-vulkan.Dockerfile
.devops/llama-cpp-clblast.srpm.spec [deleted file]
.devops/llama-cpp-cuda.srpm.spec
.devops/llama-server-cuda.Dockerfile
.devops/llama-server-intel.Dockerfile
.devops/llama-server-rocm.Dockerfile
.devops/llama-server-vulkan.Dockerfile

index f6073f6622b5abffb4dcb1938fc4c3c2361d4204..2a7da586a7086d242beaf6917b5ac6f65912bf41 100644 (file)
@@ -27,7 +27,7 @@ COPY . .
 # Set nvcc architecture
 ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
 # Enable CUDA
-ENV LLAMA_CUDA=1
+ENV GGML_CUDA=1
 # Enable cURL
 ENV LLAMA_CURL=1
 
index 0314d469bc353662c255d6d206dfcc705708fa65..5cbd2e7a131aa9f41dc34a6633fb2fe88ada44aa 100644 (file)
@@ -36,7 +36,7 @@ COPY . .
 # Set nvcc architecture
 ENV GPU_TARGETS=${ROCM_DOCKER_ARCH}
 # Enable ROCm
-ENV LLAMA_HIPBLAS=1
+ENV GGML_HIPBLAS=1
 ENV CC=/opt/rocm/llvm/bin/clang
 ENV CXX=/opt/rocm/llvm/bin/clang++
 
index d5ce538f615545ae18d5fa3dca90ce463b03e060..bff946cbc24057c59ad18685fc45693097cf0034 100644 (file)
@@ -21,7 +21,7 @@ COPY . .
 # Set nvcc architecture
 ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
 # Enable CUDA
-ENV LLAMA_CUDA=1
+ENV GGML_CUDA=1
 
 RUN make -j$(nproc) llama-cli
 
index 6789e17afcc6e1e0e007c874ce7a849b74677c42..bd816f9f5a1e6afc8a57b79ade6eed3fa88a3169 100644 (file)
@@ -2,7 +2,7 @@ ARG ONEAPI_VERSION=2024.1.1-devel-ubuntu22.04
 
 FROM intel/oneapi-basekit:$ONEAPI_VERSION as build
 
-ARG LLAMA_SYCL_F16=OFF
+ARG GGML_SYCL_F16=OFF
 RUN apt-get update && \
     apt-get install -y git
 
@@ -10,11 +10,11 @@ WORKDIR /app
 
 COPY . .
 
-RUN if [ "${LLAMA_SYCL_F16}" = "ON" ]; then \
-        echo "LLAMA_SYCL_F16 is set" && \
-        export OPT_SYCL_F16="-DLLAMA_SYCL_F16=ON"; \
+RUN if [ "${GGML_SYCL_F16}" = "ON" ]; then \
+        echo "GGML_SYCL_F16 is set" && \
+        export OPT_SYCL_F16="-DGGML_SYCL_F16=ON"; \
     fi && \
-    cmake -B build -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx ${OPT_SYCL_F16} && \
+    cmake -B build -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx ${OPT_SYCL_F16} && \
     cmake --build build --config Release --target llama-cli
 
 FROM intel/oneapi-basekit:$ONEAPI_VERSION as runtime
index 7e8a6f0fa208c1a08a8b71e9c8f777ae4102835c..caa507b08188520983578deecb598c8a09d93cfa 100644 (file)
@@ -36,7 +36,7 @@ COPY . .
 # Set nvcc architecture
 ENV GPU_TARGETS=${ROCM_DOCKER_ARCH}
 # Enable ROCm
-ENV LLAMA_HIPBLAS=1
+ENV GGML_HIPBLAS=1
 ENV CC=/opt/rocm/llvm/bin/clang
 ENV CXX=/opt/rocm/llvm/bin/clang++
 
index 7a0abe71f15874e4a23300a440146c646bb0c89e..6155d588147c98787a6f85da6b4b3e31f572c797 100644 (file)
@@ -14,7 +14,7 @@ RUN wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key
 # Build it
 WORKDIR /app
 COPY . .
-RUN cmake -B build -DLLAMA_VULKAN=1 && \
+RUN cmake -B build -DGGML_VULKAN=1 && \
     cmake --build build --config Release --target llama-cli
 
 # Clean up
diff --git a/.devops/llama-cpp-clblast.srpm.spec b/.devops/llama-cpp-clblast.srpm.spec
deleted file mode 100644 (file)
index 0139521..0000000
+++ /dev/null
@@ -1,84 +0,0 @@
-# SRPM for building from source and packaging an RPM for RPM-based distros.
-# https://docs.fedoraproject.org/en-US/quick-docs/creating-rpm-packages
-# Built and maintained by John Boero - boeroboy@gmail.com
-# In honor of Seth Vidal https://www.redhat.com/it/blog/thank-you-seth-vidal
-
-# Notes for llama.cpp:
-# 1. Tags are currently based on hash - which will not sort asciibetically.
-#    We need to declare standard versioning if people want to sort latest releases.
-# 2. Builds for CUDA/OpenCL support are separate, with different depenedencies.
-# 3. NVidia's developer repo must be enabled with nvcc, cublas, clblas, etc installed.
-#    Example: https://developer.download.nvidia.com/compute/cuda/repos/fedora37/x86_64/cuda-fedora37.repo
-# 4. OpenCL/CLBLAST support simply requires the ICD loader and basic opencl libraries.
-#    It is up to the user to install the correct vendor-specific support.
-
-Name:           llama.cpp-clblast
-Version:        %( date "+%%Y%%m%%d" )
-Release:        1%{?dist}
-Summary:        OpenCL Inference of LLaMA model in C/C++
-License:        MIT
-Source0:        https://github.com/ggerganov/llama.cpp/archive/refs/heads/master.tar.gz
-BuildRequires:  coreutils make gcc-c++ git mesa-libOpenCL-devel clblast-devel
-Requires:       clblast
-URL:            https://github.com/ggerganov/llama.cpp
-
-%define debug_package %{nil}
-%define source_date_epoch_from_changelog 0
-
-%description
-CPU inference for Meta's Lllama2 models using default options.
-
-%prep
-%setup -n llama.cpp-master
-
-%build
-make -j LLAMA_CLBLAST=1
-
-%install
-mkdir -p %{buildroot}%{_bindir}/
-cp -p llama-cli %{buildroot}%{_bindir}/llama-clblast-cli
-cp -p llama-server %{buildroot}%{_bindir}/llama-clblast-server
-cp -p llama-simple %{buildroot}%{_bindir}/llama-clblast-simple
-
-mkdir -p %{buildroot}/usr/lib/systemd/system
-%{__cat} <<EOF  > %{buildroot}/usr/lib/systemd/system/llamaclblast.service
-[Unit]
-Description=Llama.cpp server, CPU only (no GPU support in this build).
-After=syslog.target network.target local-fs.target remote-fs.target nss-lookup.target
-
-[Service]
-Type=simple
-EnvironmentFile=/etc/sysconfig/llama
-ExecStart=/usr/bin/llama-clblast-server $LLAMA_ARGS
-ExecReload=/bin/kill -s HUP $MAINPID
-Restart=never
-
-[Install]
-WantedBy=default.target
-EOF
-
-mkdir -p %{buildroot}/etc/sysconfig
-%{__cat} <<EOF  > %{buildroot}/etc/sysconfig/llama
-LLAMA_ARGS="-m /opt/llama2/ggml-model-f32.bin"
-EOF
-
-%clean
-rm -rf %{buildroot}
-rm -rf %{_builddir}/*
-
-%files
-%{_bindir}/llama-clblast-cli
-%{_bindir}/llama-clblast-server
-%{_bindir}/llama-clblast-simple
-/usr/lib/systemd/system/llamaclblast.service
-%config /etc/sysconfig/llama
-
-
-%pre
-
-%post
-
-%preun
-%postun
-
-%changelog
index cbdf4362629d36895782aef301ac776cd5b4d432..7425d3a9d7a4027fcff13077ffec3caf6e66d02f 100644 (file)
@@ -32,7 +32,7 @@ CPU inference for Meta's Lllama2 models using default options.
 %setup -n llama.cpp-master
 
 %build
-make -j LLAMA_CUDA=1
+make -j GGML_CUDA=1
 
 %install
 mkdir -p %{buildroot}%{_bindir}/
index 7bef07a05f062cb93a7dd61ba4b0694d4169f139..d7eaa0925a4a5df774d0b6e1bcd548f35ae569fe 100644 (file)
@@ -21,7 +21,7 @@ COPY . .
 # Set nvcc architecture
 ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
 # Enable CUDA
-ENV LLAMA_CUDA=1
+ENV GGML_CUDA=1
 # Enable cURL
 ENV LLAMA_CURL=1
 
index 3bf1670ec40a4b354a7396163ecf494bffe9892f..8f8fef8c09730da548c4959f07467047bc568c5c 100644 (file)
@@ -2,7 +2,7 @@ ARG ONEAPI_VERSION=2024.1.1-devel-ubuntu22.04
 
 FROM intel/oneapi-basekit:$ONEAPI_VERSION as build
 
-ARG LLAMA_SYCL_F16=OFF
+ARG GGML_SYCL_F16=OFF
 RUN apt-get update && \
     apt-get install -y git libcurl4-openssl-dev
 
@@ -10,11 +10,11 @@ WORKDIR /app
 
 COPY . .
 
-RUN if [ "${LLAMA_SYCL_F16}" = "ON" ]; then \
-        echo "LLAMA_SYCL_F16 is set" && \
-        export OPT_SYCL_F16="-DLLAMA_SYCL_F16=ON"; \
+RUN if [ "${GGML_SYCL_F16}" = "ON" ]; then \
+        echo "GGML_SYCL_F16 is set" && \
+        export OPT_SYCL_F16="-DGGML_SYCL_F16=ON"; \
     fi && \
-    cmake -B build -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_CURL=ON ${OPT_SYCL_F16} && \
+    cmake -B build -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_CURL=ON ${OPT_SYCL_F16} && \
     cmake --build build --config Release --target llama-server
 
 FROM intel/oneapi-basekit:$ONEAPI_VERSION as runtime
index 4b1cdc32090e6fac23c26dac3e0a877240c092c1..af96c332595b1c65a239fb0d61cdaad758903c6e 100644 (file)
@@ -36,7 +36,7 @@ COPY . .
 # Set nvcc architecture
 ENV GPU_TARGETS=${ROCM_DOCKER_ARCH}
 # Enable ROCm
-ENV LLAMA_HIPBLAS=1
+ENV GGML_HIPBLAS=1
 ENV CC=/opt/rocm/llvm/bin/clang
 ENV CXX=/opt/rocm/llvm/bin/clang++
 
index 2bc2e45d3d676230fcfc3c98383984d38fa6d0b6..49062f84bf3141e677961d3e01e0eaeb9e3fc888 100644 (file)
@@ -14,7 +14,7 @@ RUN wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key
 # Build it
 WORKDIR /app
 COPY . .
-RUN cmake -B build -DLLAMA_VULKAN=1 -DLLAMA_CURL=1 && \
+RUN cmake -B build -DGGML_VULKAN=1 -DLLAMA_CURL=1 && \
     cmake --build build --config Release --target llama-server
 
 # Clean up