+++ /dev/null
-name: CI (AMD)
-
-on:
- workflow_dispatch: # allows manual triggering
- push:
- branches:
- - master
- paths: [
- '.github/workflows/build-amd.yml',
- '**/CMakeLists.txt',
- '**/.cmake',
- '**/*.h',
- '**/*.hpp',
- '**/*.c',
- '**/*.cpp',
- '**/*.cu',
- '**/*.cuh',
- '**/*.comp'
- ]
-
-concurrency:
- group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }}
- cancel-in-progress: true
-
-jobs:
- ggml-ci-x64-amd-vulkan:
- runs-on: [self-hosted, Linux, X64, AMD]
-
- steps:
- - name: Clone
- id: checkout
- uses: actions/checkout@v4
-
- - name: Test
- id: ggml-ci
- run: |
- vulkaninfo --summary
- GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
-
- ggml-ci-x64-amd-rocm:
- runs-on: [self-hosted, Linux, X64, AMD]
-
- steps:
- - name: Clone
- id: checkout
- uses: actions/checkout@v4
-
- - name: Test
- id: ggml-ci
- run: |
- amd-smi static
- GG_BUILD_ROCM=1 GG_BUILD_AMDGPU_TARGETS="gfx1101" bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
run: |
bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
+ ggml-ci-x64-amd-vulkan:
+ runs-on: [self-hosted, Linux, X64, AMD]
+
+ steps:
+ - name: Clone
+ id: checkout
+ uses: actions/checkout@v4
+
+ - name: Test
+ id: ggml-ci
+ run: |
+ vulkaninfo --summary
+ GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
+
+ ggml-ci-x64-amd-rocm:
+ runs-on: [self-hosted, Linux, X64, AMD]
+
+ steps:
+ - name: Clone
+ id: checkout
+ uses: actions/checkout@v4
+
+ - name: Test
+ id: ggml-ci
+ run: |
+ amd-smi static
+ GG_BUILD_ROCM=1 GG_BUILD_AMDGPU_TARGETS="gfx1101" bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
+
ggml-ci-mac-metal:
runs-on: [self-hosted, macOS, ARM64]