--- /dev/null
+ARG UBUNTU_VERSION=22.04
+# This needs to generally match the container host's environment.
+ARG CUDA_VERSION=12.3.1
+# Target the CUDA build image
+ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
+# Target the CUDA runtime image
+ARG BASE_CUDA_RUN_CONTAINER=nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu${UBUNTU_VERSION}
+
+FROM ${BASE_CUDA_DEV_CONTAINER} AS build
+WORKDIR /app
+
+# Unless otherwise specified, we make a fat build.
+ARG CUDA_DOCKER_ARCH=all
+# Set nvcc architecture
+ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
+# Enable cuBLAS
+ENV WHISPER_CUBLAS=1
+
+RUN apt-get update && \
+ apt-get install -y build-essential \
+ && rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/*
+
+COPY .. .
+RUN make
+
+FROM ${BASE_CUDA_RUN_CONTAINER} AS runtime
+WORKDIR /app
+
+RUN apt-get update && \
+ apt-get install -y curl ffmpeg \
+ && rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/*
+
+COPY --from=build /app /app
+ENTRYPOINT [ "bash", "-c" ]
--- /dev/null
+FROM ubuntu:22.04 AS build
+WORKDIR /app
+
+RUN apt-get update && \
+ apt-get install -y build-essential \
+ && rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/*
+
+COPY .. .
+RUN make
+
+FROM ubuntu:22.04 AS runtime
+WORKDIR /app
+
+RUN apt-get update && \
+ apt-get install -y curl ffmpeg \
+ && rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/*
+
+COPY --from=build /app /app
+ENTRYPOINT [ "bash", "-c" ]
--- /dev/null
+name: Publish Docker image
+
+on:
+ pull_request:
+ push:
+ branches:
+ - master
+
+jobs:
+ push_to_registry:
+ name: Push Docker image to Docker Hub
+ if: github.event.pull_request.draft == false
+
+ runs-on: ubuntu-latest
+ env:
+ COMMIT_SHA: ${{ github.sha }}
+ strategy:
+ matrix:
+ config:
+ - { tag: "main", dockerfile: ".devops/main.Dockerfile", platform: "linux/amd64,linux/arm64" }
+ - { tag: "main-cuda", dockerfile: ".devops/main-cuda.Dockerfile", platform: "linux/amd64" }
+
+ steps:
+ - name: Check out the repo
+ uses: actions/checkout@v3
+
+ - name: Set up QEMU
+ uses: docker/setup-qemu-action@v3
+
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3
+
+ - name: Log in to Docker Hub
+ uses: docker/login-action@v3
+ with:
+ registry: ghcr.io
+ username: ${{ github.repository_owner }}
+ password: ${{ secrets.GITHUB_TOKEN }}
+
+ - name: Build and push Docker image (versioned)
+ if: github.event_name == 'push'
+ uses: docker/build-push-action@v5
+ with:
+ context: .
+ push: true
+ platforms: ${{ matrix.config.platforms }}
+ tags: "ghcr.io/${{ github.repository }}:${{ matrix.config.tag }}-${{ env.COMMIT_SHA }}"
+ file: ${{ matrix.config.dockerfile }}
+
+ - name: Build and push Docker image (tagged)
+ uses: docker/build-push-action@v4
+ with:
+ context: .
+ push: ${{ github.event_name == 'push' }}
+ platforms: ${{ matrix.config.platforms }}
+ tags: "ghcr.io/${{ github.repository }}:${{ matrix.config.tag }}"
+ file: ${{ matrix.config.dockerfile }}
- [x] [WebAssembly](examples/whisper.wasm)
- [x] Windows ([MSVC](https://github.com/ggerganov/whisper.cpp/blob/master/.github/workflows/build.yml#L117-L144) and [MinGW](https://github.com/ggerganov/whisper.cpp/issues/168)]
- [x] [Raspberry Pi](https://github.com/ggerganov/whisper.cpp/discussions/166)
+- [x] [docker](https://github.com/ggerganov/whisper.cpp/pkgs/container/whisper.cpp)
The entire high-level implementation of the model is contained in [whisper.h](whisper.h) and [whisper.cpp](whisper.cpp).
The rest of the code is part of the [ggml](https://github.com/ggerganov/ggml) machine learning library.
WHISPER_OPENBLAS=1 make -j
```
+## Docker
+
+### Prerequisites
+* Docker must be installed and running on your system.
+* Create a folder to store big models & intermediate files (ex. /whisper/models)
+
+### Images
+We have two Docker images available for this project:
+
+1. `ghcr.io/ggerganov/whisper.cpp:main`: This image includes the main executable file as well as `curl` and `ffmpeg`. (platforms: `linux/amd64`, `linux/arm64`)
+2. `ghcr.io/ggerganov/whisper.cpp:main-cuda`: Same as `main` but compiled with CUDA support. (platforms: `linux/amd64`)
+
+### Usage
+
+```shell
+# download model and persist it in a local folder
+docker run -it --rm \
+ -v path/to/models:/models \
+ whisper.cpp:main "./models/download-ggml-model.sh base /models"
+# transcribe an audio file
+docker run -it --rm \
+ -v path/to/models:/models \
+ -v path/to/audios:/audios \
+ whisper.cpp:main "./main -m /models/ggml-base.bin -f /audios/jfk.wav"
+# transcribe an audio file in samples folder
+docker run -it --rm \
+ -v path/to/models:/models \
+ whisper.cpp:main "./main -m /models/ggml-base.bin -f ./samples/jfk.wav"
+```
+
## Limitations
- Inference only
fi
}
-models_path="$(get_script_path)"
+models_path="${2:-$(get_script_path)}"
# Whisper models
models=(
printf "\n\n"
}
-if [ "$#" -ne 1 ]; then
- printf "Usage: $0 <model>\n"
+if [ "$#" -lt 1 ] || [ "$#" -gt 2 ]; then
+ printf "Usage: $0 <model> [models_path]\n"
list_models
exit 1
exit 1
fi
-printf "Done! Model '$model' saved in 'models/ggml-$model.bin'\n"
+printf "Done! Model '$model' saved in '$models_path/ggml-$model.bin'\n"
printf "You can now use it like this:\n\n"
-printf " $ ./main -m models/ggml-$model.bin -f samples/jfk.wav\n"
+printf " $ ./main -m $models_path/ggml-$model.bin -f samples/jfk.wav\n"
printf "\n"