--- /dev/null
+FROM ubuntu:24.04 AS build
+WORKDIR /app
+
+RUN apt-get update && \
+ apt-get install -y build-essential wget cmake git libvulkan-dev glslc \
+ && rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/*
+
+COPY .. .
+RUN make base.en CMAKE_ARGS="-DGGML_VULKAN=1"
+
+FROM ubuntu:24.04 AS runtime
+WORKDIR /app
+
+RUN apt-get update && \
+ apt-get install -y curl ffmpeg libsdl2-dev wget cmake git libvulkan1 mesa-vulkan-drivers \
+ && rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/*
+
+COPY --from=build /app /app
+ENV PATH=/app/build/bin:$PATH
+ENTRYPOINT [ "bash", "-c" ]
- { tag: "main-musa", dockerfile: ".devops/main-musa.Dockerfile", platform: "linux/amd64" }
- { tag: "main-intel", dockerfile: ".devops/main-intel.Dockerfile", platform: "linux/amd64" }
- { tag: "main-cuda", dockerfile: ".devops/main-cuda.Dockerfile", platform: "linux/amd64" }
+ - { tag: "main-vulkan", dockerfile: ".devops/main-vulkan.Dockerfile", platform: "linux/amd64" }
steps:
- name: Check out the repo
### Images
-We have two Docker images available for this project:
+We have multiple Docker images available for this project:
1. `ghcr.io/ggml-org/whisper.cpp:main`: This image includes the main executable file as well as `curl` and `ffmpeg`. (platforms: `linux/amd64`, `linux/arm64`)
2. `ghcr.io/ggml-org/whisper.cpp:main-cuda`: Same as `main` but compiled with CUDA support. (platforms: `linux/amd64`)
3. `ghcr.io/ggml-org/whisper.cpp:main-musa`: Same as `main` but compiled with MUSA support. (platforms: `linux/amd64`)
+4. `ghcr.io/ggml-org/whisper.cpp:main-vulkan`: Same as `main` but compiled with Vulkan support. (platforms: `linux/amd64`)
### Usage
docker run -it --rm \
-v path/to/models:/models \
whisper.cpp:main "./models/download-ggml-model.sh base /models"
+
# transcribe an audio file
docker run -it --rm \
-v path/to/models:/models \
-v path/to/audios:/audios \
whisper.cpp:main "whisper-cli -m /models/ggml-base.bin -f /audios/jfk.wav"
+
# transcribe an audio file in samples folder
docker run -it --rm \
-v path/to/models:/models \
whisper.cpp:main "whisper-cli -m /models/ggml-base.bin -f ./samples/jfk.wav"
+
+# run the web server
+docker run -it --rm -p "8080:8080" \
+ -v path/to/models:/models \
+ whisper.cpp:main "whisper-server --host 127.0.0.1 -m /models/ggml-base.bin"
+
+# run the bench too on the small.en model using 4 threads
+docker run -it --rm \
+ -v path/to/models:/models \
+ whisper.cpp:main "whisper-bench -m /models/ggml-small.en.bin -t 4"
```
## Installing with Conan