From: Nuno Date: Mon, 9 Feb 2026 10:33:06 +0000 (+0100) Subject: ci: add vulkan docker image (#3644) X-Git-Tag: upstream/1.8.3+155~35 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=764482c3175d9c3bc6089c1ec84df7d1b9537d83;p=pkg%2Fggml%2Fsources%2Fwhisper.cpp ci: add vulkan docker image (#3644) Signed-off-by: rare-magma --- diff --git a/.devops/main-vulkan.Dockerfile b/.devops/main-vulkan.Dockerfile new file mode 100644 index 00000000..2be22e4d --- /dev/null +++ b/.devops/main-vulkan.Dockerfile @@ -0,0 +1,20 @@ +FROM ubuntu:24.04 AS build +WORKDIR /app + +RUN apt-get update && \ + apt-get install -y build-essential wget cmake git libvulkan-dev glslc \ + && rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/* + +COPY .. . +RUN make base.en CMAKE_ARGS="-DGGML_VULKAN=1" + +FROM ubuntu:24.04 AS runtime +WORKDIR /app + +RUN apt-get update && \ + apt-get install -y curl ffmpeg libsdl2-dev wget cmake git libvulkan1 mesa-vulkan-drivers \ + && rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/* + +COPY --from=build /app /app +ENV PATH=/app/build/bin:$PATH +ENTRYPOINT [ "bash", "-c" ] diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 57f062e9..6c0de0ec 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -22,6 +22,7 @@ jobs: - { tag: "main-musa", dockerfile: ".devops/main-musa.Dockerfile", platform: "linux/amd64" } - { tag: "main-intel", dockerfile: ".devops/main-intel.Dockerfile", platform: "linux/amd64" } - { tag: "main-cuda", dockerfile: ".devops/main-cuda.Dockerfile", platform: "linux/amd64" } + - { tag: "main-vulkan", dockerfile: ".devops/main-vulkan.Dockerfile", platform: "linux/amd64" } steps: - name: Check out the repo diff --git a/README.md b/README.md index 6d4988e6..c0d8edb9 100644 --- a/README.md +++ b/README.md @@ -443,11 +443,12 @@ ffmpeg -i samples/jfk.wav jfk.opus ### Images -We have two Docker images available for this project: +We have multiple Docker images available for this project: 1. `ghcr.io/ggml-org/whisper.cpp:main`: This image includes the main executable file as well as `curl` and `ffmpeg`. (platforms: `linux/amd64`, `linux/arm64`) 2. `ghcr.io/ggml-org/whisper.cpp:main-cuda`: Same as `main` but compiled with CUDA support. (platforms: `linux/amd64`) 3. `ghcr.io/ggml-org/whisper.cpp:main-musa`: Same as `main` but compiled with MUSA support. (platforms: `linux/amd64`) +4. `ghcr.io/ggml-org/whisper.cpp:main-vulkan`: Same as `main` but compiled with Vulkan support. (platforms: `linux/amd64`) ### Usage @@ -456,15 +457,27 @@ We have two Docker images available for this project: docker run -it --rm \ -v path/to/models:/models \ whisper.cpp:main "./models/download-ggml-model.sh base /models" + # transcribe an audio file docker run -it --rm \ -v path/to/models:/models \ -v path/to/audios:/audios \ whisper.cpp:main "whisper-cli -m /models/ggml-base.bin -f /audios/jfk.wav" + # transcribe an audio file in samples folder docker run -it --rm \ -v path/to/models:/models \ whisper.cpp:main "whisper-cli -m /models/ggml-base.bin -f ./samples/jfk.wav" + +# run the web server +docker run -it --rm -p "8080:8080" \ + -v path/to/models:/models \ + whisper.cpp:main "whisper-server --host 127.0.0.1 -m /models/ggml-base.bin" + +# run the bench too on the small.en model using 4 threads +docker run -it --rm \ + -v path/to/models:/models \ + whisper.cpp:main "whisper-bench -m /models/ggml-small.en.bin -t 4" ``` ## Installing with Conan