]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
Deploy executables in libexec, so that they can load GGML backends
authorMathieu Baudier <redacted>
Tue, 21 Jan 2025 12:02:53 +0000 (13:02 +0100)
committerMathieu Baudier <redacted>
Tue, 21 Jan 2025 12:02:53 +0000 (13:02 +0100)
debian/llama-cpp-cli.install
debian/llama-cpp-server.install
debian/not-installed
debian/rules

index c327606c2c9c5c19436ec7999712bac9a8a2a92d..90fea7db4f7f460ec81e348e487553d32ef4b3a3 100644 (file)
@@ -1 +1,2 @@
 /usr/bin/llama-cli
+/usr/libexec/*/ggml/llama-cli
index 313c4e16dadc3fd98dcbe4eae203bded839b2648..8ba163d5c64785fb3e7eb9f2fe6ab2e9b7f275d8 100644 (file)
@@ -1 +1,2 @@
 /usr/bin/llama-server
+/usr/libexec/*/ggml/llama-server
index ff0d08daf0812cf79bebc11847af47cac919aff6..e03f0328f8ab6fa3b84627b2577a09af8e0ec001 100644 (file)
@@ -1,3 +1,6 @@
-/usr/bin/test-*
 /usr/bin/llama-*
+/usr/libexec/*/ggml/llama-*
+
+/usr/bin/test-*
+
 /usr/bin/*.py
index 6509421bd33cf05dbc7a06626d0cb8129faa4f2d..5ce330232a2c6b6b8730cc8549b756fad32268f8 100755 (executable)
@@ -6,7 +6,9 @@
 include /usr/share/dpkg/architecture.mk
 
 build_multiarch=build/$(DEB_HOST_MULTIARCH)
+install_bin=debian/tmp/usr/bin
 install_lib_multiarch=debian/tmp/usr/lib/$(DEB_HOST_MULTIARCH)
+install_libexec_multiarch=debian/tmp/usr/libexec/$(DEB_HOST_MULTIARCH)
 
 # parallelism
 DEB_BUILD_OPTIONS ?= parallel=8
@@ -15,6 +17,8 @@ DEB_BUILD_OPTIONS ?= parallel=8
 # Use build/ for output, so that it is in the .gitignore of upstream
        dh $@ --buildsystem=cmake --builddirectory=$(build_multiarch)
 
+# FIXME we disable LLAMA_ALL_WARNINGS so that ggml_get_flags() CMake function do not get called
+# as it is available deep in GGML build scripts and not published
 override_dh_auto_configure:
        dh_auto_configure -- \
        -DCMAKE_LIBRARY_ARCHITECTURE="$(DEB_HOST_MULTIARCH)" \
@@ -28,8 +32,16 @@ override_dh_auto_configure:
        -DLLAMA_USE_CURL=ON \
        -DLLAMA_SERVER_SSL=ON \
 
-# FIXME we disable LLAMA_ALL_WARNINGS so that ggml_get_flags() CMake function do not get called
-# as it is available deep in GGML and not properly published
+override_dh_auto_install:
+       dh_auto_install
+       
+       # Move executables to libexec, so that they can load the GGML backends
+       # and link them to bin
+       mkdir -p $(install_libexec_multiarch)/ggml
+       for file in $(install_bin)/llama-*; do \
+       mv $$file $(install_libexec_multiarch)/ggml/$$(basename "$$file"); \
+               ln -s --relative -t $(install_bin) $(install_libexec_multiarch)/ggml/$$(basename "$$file"); \
+       done
 
 override_dh_auto_test:
        # tests which depends on remote location are failing