path: llama-bin-win-sycl-x64.zip
name: llama-bin-win-sycl-x64.zip
+ ubuntu-22-rocm:
+ runs-on: ubuntu-22.04
+
+ strategy:
+ matrix:
+ include:
+ - ROCM_VERSION: "7.2"
+ gpu_targets: "gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1151;gfx1150;gfx1200;gfx1201"
+ build: 'x64'
+
+ steps:
+ - name: Clone
+ id: checkout
+ uses: actions/checkout@v6
+ with:
+ fetch-depth: 0
+
+ - name: ccache
+ uses: ggml-org/ccache-action@v1.2.16
+ with:
+ key: ubuntu-rocm-cmake-${{ matrix.ROCM_VERSION }}-${{ matrix.build }}
+ evict-old-files: 1d
+
+ - name: Dependencies
+ id: depends
+ run: |
+ sudo apt install -y build-essential git cmake wget
+
+ - name: Setup Legacy ROCm
+ if: matrix.ROCM_VERSION == '7.2'
+ id: legacy_env
+ run: |
+ sudo mkdir --parents --mode=0755 /etc/apt/keyrings
+ wget https://repo.radeon.com/rocm/rocm.gpg.key -O - | \
+ gpg --dearmor | sudo tee /etc/apt/keyrings/rocm.gpg > /dev/null
+
+ sudo tee /etc/apt/sources.list.d/rocm.list << EOF
+ deb [arch=amd64 signed-by=/etc/apt/keyrings/rocm.gpg] https://repo.radeon.com/rocm/apt/${{ matrix.ROCM_VERSION }} jammy main
+ EOF
+
+ sudo tee /etc/apt/preferences.d/rocm-pin-600 << EOF
+ Package: *
+ Pin: release o=repo.radeon.com
+ Pin-Priority: 600
+ EOF
+
+ sudo apt update
+ sudo apt-get install -y libssl-dev rocm-hip-sdk
+
+ - name: Setup TheRock
+ if: matrix.ROCM_VERSION != '7.2'
+ id: therock_env
+ run: |
+ wget https://repo.amd.com/rocm/tarball/therock-dist-linux-gfx1151-${{ matrix.ROCM_VERSION }}.tar.gz
+ mkdir install
+ tar -xf *.tar.gz -C install
+ export ROCM_PATH=$(pwd)/install
+ echo ROCM_PATH=$ROCM_PATH >> $GITHUB_ENV
+ echo PATH=$PATH:$ROCM_PATH/bin >> $GITHUB_ENV
+ echo LD_LIBRARY_PATH=$ROCM_PATH/lib:$ROCM_PATH/llvm/lib:$ROCM_PATH/lib/rocprofiler-systems >> $GITHUB_ENV
+
+ - name: Build with native CMake HIP support
+ id: cmake_build
+ run: |
+ cmake -B build -S . \
+ -DCMAKE_HIP_COMPILER="$(hipconfig -l)/clang" \
+ -DCMAKE_HIP_FLAGS="-mllvm --amdgpu-unroll-threshold-local=600" \
+ -DCMAKE_BUILD_TYPE=Release \
+ -DGGML_BACKEND_DL=ON \
+ -DGGML_NATIVE=OFF \
+ -DCMAKE_INSTALL_RPATH='$ORIGIN' \
+ -DCMAKE_BUILD_WITH_INSTALL_RPATH=ON \
+ -DGGML_CPU_ALL_VARIANTS=ON \
+ -DGPU_TARGETS="${{ matrix.gpu_targets }}" \
+ -DGGML_HIP=ON \
+ -DHIP_PLATFORM=amd \
+ -DGGML_HIP_ROCWMMA_FATTN=ON \
+ ${{ env.CMAKE_ARGS }}
+ cmake --build build --config Release -j $(nproc)
+
+ - name: Determine tag name
+ id: tag
+ uses: ./.github/actions/get-tag-name
+
+ - name: Pack artifacts
+ id: pack_artifacts
+ run: |
+ cp LICENSE ./build/bin/
+ tar -czvf llama-bin-ubuntu-rocm-${{ matrix.ROCM_VERSION }}-${{ matrix.build }}.tar.gz --transform "s,./,llama-${{ steps.tag.outputs.name }}/," -C ./build/bin .
+
+ - name: Upload artifacts
+ uses: actions/upload-artifact@v6
+ with:
+ path: llama-bin-ubuntu-rocm-${{ matrix.ROCM_VERSION }}-${{ matrix.build }}.tar.gz
+ name: llama-bin-ubuntu-rocm-${{ matrix.ROCM_VERSION }}-${{ matrix.build }}.tar.gz
+
windows-hip:
runs-on: windows-2022
- windows-cuda
- windows-sycl
- windows-hip
+ - ubuntu-22-rocm
- ubuntu-22-cpu
- ubuntu-22-vulkan
- macOS-arm64
**Linux:**
- [Ubuntu x64 (CPU)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-ubuntu-x64.tar.gz)
- [Ubuntu x64 (Vulkan)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-ubuntu-vulkan-x64.tar.gz)
+ - [Ubuntu x64 (ROCm 7.2)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-ubuntu-rocm-x64.tar.gz)
- [Ubuntu s390x (CPU)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-ubuntu-s390x.tar.gz)
**Windows:**