mkdir build
cd build
cmake .. \
+ -DCMAKE_BUILD_RPATH="@loader_path" \
-DLLAMA_FATAL_WARNINGS=ON \
-DLLAMA_CURL=ON \
-DGGML_METAL_USE_BF16=ON \
# Metal is disabled due to intermittent failures with Github runners not having a GPU:
# https://github.com/ggerganov/llama.cpp/actions/runs/8635935781/job/23674807267#step:5:2313
cmake -B build \
+ -DCMAKE_BUILD_RPATH="@loader_path" \
-DLLAMA_FATAL_WARNINGS=ON \
-DLLAMA_CURL=ON \
-DGGML_METAL=OFF \
path: llama-${{ steps.tag.outputs.name }}-bin-macos-x64.zip
name: llama-bin-macos-x64.zip
- ubuntu-latest-cmake:
- runs-on: ubuntu-latest
+ ubuntu-cpu-cmake:
+ runs-on: ubuntu-22.04
steps:
- name: Clone
run: |
mkdir build
cd build
- cmake .. -DLLAMA_FATAL_WARNINGS=ON -DLLAMA_CURL=ON -DGGML_RPC=ON
+ cmake .. \
+ -DLLAMA_FATAL_WARNINGS=ON \
+ -DLLAMA_CURL=ON \
+ -DGGML_RPC=ON
cmake --build . --config Release -j $(nproc)
- name: Test
run: |
mkdir build
cd build
- cmake .. -DLLAMA_FATAL_WARNINGS=ON -DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON -DCMAKE_BUILD_TYPE=${{ matrix.build_type }}
+ cmake .. \
+ -DLLAMA_FATAL_WARNINGS=ON \
+ -DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON \
+ -DCMAKE_BUILD_TYPE=${{ matrix.build_type }}
cmake --build . --config ${{ matrix.build_type }} -j $(nproc)
- name: Build (no OpenMP)
run: |
mkdir build
cd build
- cmake .. -DLLAMA_FATAL_WARNINGS=ON -DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} -DGGML_OPENMP=OFF
+ cmake .. \
+ -DLLAMA_FATAL_WARNINGS=ON \
+ -DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON \
+ -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} \
+ -DGGML_OPENMP=OFF
cmake --build . --config ${{ matrix.build_type }} -j $(nproc)
- name: Test
run: |
mkdir build
cd build
- cmake -DGGML_RPC=ON ..
+ cmake .. \
+ -DGGML_RPC=ON
cmake --build . --config Release -j $(nproc)
- name: Test
run: |
mkdir build
cd build
- cmake -DGGML_VULKAN=ON ..
+ cmake .. \
+ -DGGML_VULKAN=ON
cmake --build . --config Release -j $(nproc)
- name: Test
- name: Build with native CMake HIP support
id: cmake_build
run: |
- cmake -B build -S . -DCMAKE_HIP_COMPILER="$(hipconfig -l)/clang" -DGGML_HIP=ON
+ cmake -B build -S . \
+ -DCMAKE_HIP_COMPILER="$(hipconfig -l)/clang" \
+ -DGGML_HIP=ON
cmake --build build --config Release -j $(nproc)
- name: Build with legacy HIP support
id: cmake_build_legacy_hip
run: |
- cmake -B build2 -S . -DCMAKE_C_COMPILER=hipcc -DCMAKE_CXX_COMPILER=hipcc -DGGML_HIP=ON
+ cmake -B build2 -S . \
+ -DCMAKE_C_COMPILER=hipcc \
+ -DCMAKE_CXX_COMPILER=hipcc \
+ -DGGML_HIP=ON
cmake --build build2 --config Release -j $(nproc)
ubuntu-22-cmake-musa:
- name: Build with native CMake MUSA support
id: cmake_build
run: |
- cmake -B build -S . -DGGML_MUSA=ON
+ cmake -B build -S . \
+ -DGGML_MUSA=ON
cmake --build build --config Release -j $(nproc)
ubuntu-22-cmake-sycl:
source /opt/intel/oneapi/setvars.sh
mkdir build
cd build
- cmake -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx ..
+ cmake .. \
+ -DGGML_SYCL=ON \
+ -DCMAKE_C_COMPILER=icx \
+ -DCMAKE_CXX_COMPILER=icpx
cmake --build . --config Release -j $(nproc)
ubuntu-22-cmake-sycl-fp16:
source /opt/intel/oneapi/setvars.sh
mkdir build
cd build
- cmake -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DGGML_SYCL_F16=ON ..
+ cmake .. \
+ -DGGML_SYCL=ON \
+ -DCMAKE_C_COMPILER=icx \
+ -DCMAKE_CXX_COMPILER=icpx \
+ -DGGML_SYCL_F16=ON
cmake --build . --config Release -j $(nproc)
- # TODO: build with GGML_METAL=OFF because test-backend-ops fail on "Apple Paravirtual device" and I don't know
- # how to debug it.
- # ref: https://github.com/ggerganov/llama.cpp/actions/runs/7132125951/job/19422043567?pr=4359#step:5:6584
- # would be great if we fix these
- macOS-latest-cmake:
- runs-on: macos-latest
-
- steps:
- - name: Clone
- id: checkout
- uses: actions/checkout@v4
-
- - name: Dependencies
- id: depends
- continue-on-error: true
- run: |
- brew update
-
- - name: Build
- id: cmake_build
- run: |
- sysctl -a
- mkdir build
- cd build
- cmake -DLLAMA_FATAL_WARNINGS=ON -DGGML_METAL=OFF ..
- cmake --build . --config Release -j $(sysctl -n hw.logicalcpu)
-
- - name: Test
- id: cmake_test
- run: |
- cd build
- ctest -L main --verbose --timeout 900
-
macOS-latest-cmake-ios:
runs-on: macos-latest
- name: Build with CMake
run: |
- cmake -S . -B build -G Ninja -DCMAKE_BUILD_TYPE=Release -DGGML_NATIVE=OFF -DGGML_CUDA=ON -DCMAKE_CUDA_ARCHITECTURES=89-real -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined -DLLAMA_FATAL_WARNINGS=ON
+ cmake -S . -B build -G Ninja \
+ -DCMAKE_BUILD_TYPE=Release \
+ -DCMAKE_CUDA_ARCHITECTURES=89-real \
+ -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined \
+ -DLLAMA_FATAL_WARNINGS=ON \
+ -DGGML_NATIVE=OFF \
+ -DGGML_CUDA=ON
cmake --build build
windows-2019-cmake-cuda:
shell: cmd
run: |
call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat"
- cmake -S . -B build -G "Ninja Multi-Config" -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_CUDA=ON -DGGML_RPC=ON
+ cmake -S . -B build -G "Ninja Multi-Config" \
+ -DLLAMA_BUILD_SERVER=ON \
+ -DGGML_NATIVE=OFF \
+ -DGGML_CUDA=ON \
+ -DGGML_RPC=ON
set /A NINJA_JOBS=%NUMBER_OF_PROCESSORS%-1
cmake --build build --config Release -j %NINJA_JOBS% -t ggml
cmake --build build --config Release
runs-on: ubuntu-latest
needs:
- - ubuntu-latest-cmake
- - macOS-latest-cmake
+ - ubuntu-cpu-cmake
- windows-latest-cmake
- windows-2019-cmake-cuda
- windows-latest-cmake-hip-release