From: Mathieu Baudier Date: Tue, 21 Jan 2025 12:02:53 +0000 (+0100) Subject: Deploy executables in libexec, so that they can load GGML backends X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=4de5c210630827455520f9778e26231dabb2ba79;p=pkg%2Fggml%2Fsources%2Fllama.cpp Deploy executables in libexec, so that they can load GGML backends --- diff --git a/debian/llama-cpp-cli.install b/debian/llama-cpp-cli.install index c327606c..90fea7db 100644 --- a/debian/llama-cpp-cli.install +++ b/debian/llama-cpp-cli.install @@ -1 +1,2 @@ /usr/bin/llama-cli +/usr/libexec/*/ggml/llama-cli diff --git a/debian/llama-cpp-server.install b/debian/llama-cpp-server.install index 313c4e16..8ba163d5 100644 --- a/debian/llama-cpp-server.install +++ b/debian/llama-cpp-server.install @@ -1 +1,2 @@ /usr/bin/llama-server +/usr/libexec/*/ggml/llama-server diff --git a/debian/not-installed b/debian/not-installed index ff0d08da..e03f0328 100644 --- a/debian/not-installed +++ b/debian/not-installed @@ -1,3 +1,6 @@ -/usr/bin/test-* /usr/bin/llama-* +/usr/libexec/*/ggml/llama-* + +/usr/bin/test-* + /usr/bin/*.py diff --git a/debian/rules b/debian/rules index 6509421b..5ce33023 100755 --- a/debian/rules +++ b/debian/rules @@ -6,7 +6,9 @@ include /usr/share/dpkg/architecture.mk build_multiarch=build/$(DEB_HOST_MULTIARCH) +install_bin=debian/tmp/usr/bin install_lib_multiarch=debian/tmp/usr/lib/$(DEB_HOST_MULTIARCH) +install_libexec_multiarch=debian/tmp/usr/libexec/$(DEB_HOST_MULTIARCH) # parallelism DEB_BUILD_OPTIONS ?= parallel=8 @@ -15,6 +17,8 @@ DEB_BUILD_OPTIONS ?= parallel=8 # Use build/ for output, so that it is in the .gitignore of upstream dh $@ --buildsystem=cmake --builddirectory=$(build_multiarch) +# FIXME we disable LLAMA_ALL_WARNINGS so that ggml_get_flags() CMake function do not get called +# as it is available deep in GGML build scripts and not published override_dh_auto_configure: dh_auto_configure -- \ -DCMAKE_LIBRARY_ARCHITECTURE="$(DEB_HOST_MULTIARCH)" \ @@ -28,8 +32,16 @@ override_dh_auto_configure: -DLLAMA_USE_CURL=ON \ -DLLAMA_SERVER_SSL=ON \ -# FIXME we disable LLAMA_ALL_WARNINGS so that ggml_get_flags() CMake function do not get called -# as it is available deep in GGML and not properly published +override_dh_auto_install: + dh_auto_install + + # Move executables to libexec, so that they can load the GGML backends + # and link them to bin + mkdir -p $(install_libexec_multiarch)/ggml + for file in $(install_bin)/llama-*; do \ + mv $$file $(install_libexec_multiarch)/ggml/$$(basename "$$file"); \ + ln -s --relative -t $(install_bin) $(install_libexec_multiarch)/ggml/$$(basename "$$file"); \ + done override_dh_auto_test: # tests which depends on remote location are failing