--- /dev/null
+name: CI (AMD)
+
+on:
+ workflow_dispatch: # allows manual triggering
+ push:
+ branches:
+ - master
+ paths: [
+ '.github/workflows/build-amd.yml',
+ '**/CMakeLists.txt',
+ '**/.cmake',
+ '**/*.h',
+ '**/*.hpp',
+ '**/*.c',
+ '**/*.cpp',
+ '**/*.cu',
+ '**/*.cuh',
+ '**/*.comp'
+ ]
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }}
+ cancel-in-progress: true
+
+jobs:
+ ggml-ci-x64-amd-vulkan:
+ runs-on: [self-hosted, Linux, X64, AMD]
+
+ steps:
+ - name: Clone
+ id: checkout
+ uses: actions/checkout@v4
+
+ - name: Test
+ id: ggml-ci
+ run: |
+ vulkaninfo --summary
+ GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
+
+ ggml-ci-x64-amd-rocm:
+ runs-on: [self-hosted, Linux, X64, AMD]
+
+ steps:
+ - name: Clone
+ id: checkout
+ uses: actions/checkout@v4
+
+ - name: Test
+ id: ggml-ci
+ run: |
+ amd-smi static
+ GG_BUILD_ROCM=1 GG_BUILD_AMDGPU_TARGETS="gfx1101" bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
run: |
bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
-# ggml-ci-x64-amd-vulkan:
-# runs-on: [self-hosted, Linux, X64, AMD]
-#
-# steps:
-# - name: Clone
-# id: checkout
-# uses: actions/checkout@v4
-#
-# - name: Test
-# id: ggml-ci
-# run: |
-# vulkaninfo --summary
-# GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
-#
-# ggml-ci-x64-amd-rocm:
-# runs-on: [self-hosted, Linux, X64, AMD]
-#
-# steps:
-# - name: Clone
-# id: checkout
-# uses: actions/checkout@v4
-#
-# - name: Test
-# id: ggml-ci
-# run: |
-# amd-smi static
-# GG_BUILD_ROCM=1 GG_BUILD_AMDGPU_TARGETS="gfx1101" bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
-
ggml-ci-mac-metal:
runs-on: [self-hosted, macOS, ARM64]
# arm 9 and newer enables sve by default, adjust these flags depending on the cpu used
CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_NATIVE=OFF -DGGML_CPU_ARM_ARCH=armv8.5-a+fp16+i8mm"
fi
+
## helpers
# download a file if it does not exist or if it is outdated