]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
ci : disable AMX jobs (#20654)
authorGeorgi Gerganov <redacted>
Mon, 16 Mar 2026 20:38:59 +0000 (22:38 +0200)
committerGitHub <redacted>
Mon, 16 Mar 2026 20:38:59 +0000 (22:38 +0200)
[no ci]

.github/workflows/build-self-hosted.yml

index 7c7710fe453c5e70e72c7642746e0a0206b50cbe..2944cb8401c9c0704281bdcfd362f36461e87ae4 100644 (file)
@@ -97,19 +97,21 @@ jobs:
           vulkaninfo --summary
           GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
 
-  ggml-ci-cpu-amx:
-    runs-on: [self-hosted, Linux, CPU, AMX]
+  # TODO: provision AMX-compatible machine
+  #ggml-ci-cpu-amx:
+  #  runs-on: [self-hosted, Linux, CPU, AMX]
 
-    steps:
-      - name: Clone
-        id: checkout
-        uses: actions/checkout@v6
+  #  steps:
+  #    - name: Clone
+  #      id: checkout
+  #      uses: actions/checkout@v6
 
-      - name: Test
-        id: ggml-ci
-        run: |
-          bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
+  #    - name: Test
+  #      id: ggml-ci
+  #      run: |
+  #        bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
 
+  # TODO: provision AMD GPU machine
   # ggml-ci-amd-vulkan:
   #   runs-on: [self-hosted, Linux, AMD]
 
@@ -124,6 +126,7 @@ jobs:
   #         vulkaninfo --summary
   #         GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
 
+  # TODO: provision AMD GPU machine
   # ggml-ci-amd-rocm:
   #   runs-on: [self-hosted, Linux, AMD]