From: slaren Date: Mon, 2 Sep 2024 16:11:13 +0000 (+0200) Subject: docker : fix missing binaries in full-cuda image (#9278) X-Git-Tag: upstream/0.0.4488~830 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=048de848ee4baad4531fcd6239438f5d55be365c;p=pkg%2Fggml%2Fsources%2Fllama.cpp docker : fix missing binaries in full-cuda image (#9278) --- diff --git a/.devops/full-cuda.Dockerfile b/.devops/full-cuda.Dockerfile index b8a35424..d5acd35e 100644 --- a/.devops/full-cuda.Dockerfile +++ b/.devops/full-cuda.Dockerfile @@ -27,7 +27,7 @@ RUN if [ "${CUDA_DOCKER_ARCH}" != "default" ]; then \ export CMAKE_ARGS="-DCMAKE_CUDA_ARCHITECTURES=${CUDA_DOCKER_ARCH}"; \ fi && \ cmake -B build -DGGML_CUDA=ON -DLLAMA_CURL=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \ - cmake --build build --config Release --target llama-cli -j$(nproc) && \ + cmake --build build --config Release -j$(nproc) && \ cp build/bin/* . ENTRYPOINT ["/app/.devops/tools.sh"]