run: |
GG_BUILD_NO_BF16=1 GG_BUILD_EXTRA_TESTS_0=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
- ggml-ci-x64-nvidia-v100-cuda:
- runs-on: [self-hosted, Linux, X64, NVIDIA, V100]
+ ggml-ci-x64-nvidia-cuda:
+ runs-on: [self-hosted, Linux, X64, NVIDIA]
steps:
- name: Clone
nvidia-smi
GG_BUILD_CUDA=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
- ggml-ci-x64-nvidia-v100-vulkan:
- runs-on: [self-hosted, Linux, X64, NVIDIA, V100]
+ ggml-ci-x64-nvidia-vulkan-cm:
+ runs-on: [self-hosted, Linux, X64, NVIDIA]
steps:
- name: Clone
- name: Test
id: ggml-ci
run: |
- vulkaninfo
- GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
-
- ggml-ci-x64-nvidia-t4-cuda:
- runs-on: [self-hosted, Linux, X64, NVIDIA, T4]
-
- steps:
- - name: Clone
- id: checkout
- uses: actions/checkout@v4
-
- - name: Test
- id: ggml-ci
- run: |
- nvidia-smi
- GG_BUILD_CUDA=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
+ vulkaninfo --summary
+ GG_BUILD_VULKAN=1 GGML_VK_DISABLE_COOPMAT2=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
- ggml-ci-x64-nvidia-t4-vulkan:
- runs-on: [self-hosted, Linux, X64, NVIDIA, T4]
+ ggml-ci-x64-nvidia-vulkan-cm2:
+ runs-on: [self-hosted, Linux, X64, NVIDIA, COOPMAT2]
steps:
- name: Clone
- name: Test
id: ggml-ci
run: |
- vulkaninfo
+ vulkaninfo --summary
GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
- ggml-ci-x64-nvidia-t4-vulkan-coopmat1:
- runs-on: [self-hosted, Linux, X64, NVIDIA, T4]
-
- steps:
- - name: Clone
- id: checkout
- uses: actions/checkout@v4
-
- - name: Test
- id: ggml-ci
- run: |
- vulkaninfo
- GG_BUILD_VULKAN=1 GGML_VK_DISABLE_COOPMAT2=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
-
ggml-ci-x64-cpu-amx:
runs-on: [self-hosted, Linux, X64, CPU, AMX]
run: |
bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
- ggml-ci-x64-amd-v710-vulkan:
- runs-on: [self-hosted, Linux, X64, AMD, V710]
-
- steps:
- - name: Clone
- id: checkout
- uses: actions/checkout@v4
-
- - name: Test
- id: ggml-ci
- run: |
- GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
-
- ggml-ci-x64-amd-v710-rocm:
- runs-on: [self-hosted, Linux, X64, AMD, V710]
-
- steps:
- - name: Clone
- id: checkout
- uses: actions/checkout@v4
-
- - name: Test
- id: ggml-ci
- run: |
- GG_BUILD_ROCM=1 GG_BUILD_AMDGPU_TARGETS="gfx1101" bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
+# ggml-ci-x64-amd-vulkan:
+# runs-on: [self-hosted, Linux, X64, AMD]
+#
+# steps:
+# - name: Clone
+# id: checkout
+# uses: actions/checkout@v4
+#
+# - name: Test
+# id: ggml-ci
+# run: |
+# vulkaninfo --summary
+# GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
+#
+# ggml-ci-x64-amd-rocm:
+# runs-on: [self-hosted, Linux, X64, AMD]
+#
+# steps:
+# - name: Clone
+# id: checkout
+# uses: actions/checkout@v4
+#
+# - name: Test
+# id: ggml-ci
+# run: |
+# amd-smi static
+# GG_BUILD_ROCM=1 GG_BUILD_AMDGPU_TARGETS="gfx1101" bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
ggml-ci-mac-metal:
runs-on: [self-hosted, macOS, ARM64]
- name: Test
id: ggml-ci
run: |
+ vulkaninfo --summary
GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp ~/mnt/llama.cpp