Section: science
Priority: optional
Maintainer: Mathieu Baudier <mbaudier@argeo.org>
-Build-Depends: debhelper-compat (= 13), pkgconf,
- cmake-data, cmake,
- ggml-dev,
- libcurl4-openssl-dev,
- bash-completion, dh-sequence-bash-completion,
-Standards-Version: 4.7.0
-Vcs-Git: https://git.djapps.eu/pkg/ggml/sources/llama.cpp
+Standards-Version: 4.7.2
Vcs-Browser: https://git.djapps.eu/?p=pkg/ggml/sources/llama.cpp;a=summary
-Homepage: https://github.com/ggml-org/llama.cpp
-Rules-Requires-Root: binary-targets
+Vcs-Git: https://git.djapps.eu/pkg/ggml/sources/llama.cpp
+Homepage: https://github.com/ggml-org/llama.cpp/
+Build-Depends: dh-sequence-bash-completion,
+ cmake,
+ debhelper-compat (= 13),
+ libcurl4-openssl-dev,
+ ggml-dev,
+ pkgconf,
+Rules-Requires-Root: no
Package: libllama0
Section: libs
Architecture: any
Multi-Arch: same
Pre-Depends: ${misc:Pre-Depends}
-Depends: ${misc:Depends}, ${shlibs:Depends},
- libggml0
+Depends: libggml0,
+ ${misc:Depends},
+ ${shlibs:Depends},
# Explicitly conflict with Debian official
Conflicts: llama.cpp
Description: Inference of large language models in pure C/C++ (shared library)
Architecture: any
Multi-Arch: same
Pre-Depends: ${misc:Pre-Depends}
-Depends: ${misc:Depends}, ${shlibs:Depends},
- libllama0
+Depends: libllama0,
+ ${misc:Depends},
+ ${shlibs:Depends},
# Explicitly conflict with Debian official
Conflicts: llama.cpp
Description: Inference of large language models in pure C/C++ (multimodal library)
- mtmd provides multimodl inference.
+ mtmd provides multimodal inference.
# We only distribute a few very useful tools, with stable CLI options
Package: llama-cpp-tools
Architecture: any
-Depends: ${misc:Depends}, ${shlibs:Depends},
- libllama0 (= ${binary:Version}), libmtmd0 (= ${binary:Version}), ggml, curl
+Depends: libllama0 (= ${binary:Version}),
+ libmtmd0 (= ${binary:Version}),
+ ggml,
+ curl,
+ ${misc:Depends},
+ ${shlibs:Depends},
Description: Inference of large language models in pure C/C++ (tools)
llama-cli: versatile tool wrapping most features provided by libllama.
It typically allows one to run one-shot prompts or to "chat"
Package: libllama0-dev
Section: libdevel
Architecture: any
-Depends: ${misc:Depends},
- ggml-dev, libllama0 (= ${binary:Version})
+Multi-Arch: same
+Depends: libllama0 (= ${binary:Version}),
+ ggml-dev,
+ ${misc:Depends},
Description: Inference of large language models in pure C/C++ (development files)
Development files required for building software based on the
stable and documented llama.cpp API.
Package: libmtmd0-dev
Section: libdevel
Architecture: any
-Depends: ${misc:Depends},
- ggml-dev, libllama0-dev (= ${binary:Version}), libmtmd0 (= ${binary:Version})
-Description: Inference of large language models in pure C/C++ (development files)
+Multi-Arch: same
+Depends: libmtmd0 (= ${binary:Version}),
+ libllama0-dev (= ${binary:Version}),
+ ${misc:Depends},
+Description: Inference of large language models in pure C/C++ (multimodal development files)
Development files required for building software based on the
- stable and documented llama.cpp API.
+ multimodal llama.cpp API.
\ No newline at end of file
#!/usr/bin/make -f
-# See debhelper(7) (uncomment to enable)
#export DH_VERBOSE = 1
-# For multiarch
-include /usr/share/dpkg/architecture.mk
-# For DEB_VERSION_UPSTREAM
-include /usr/share/dpkg/pkg-info.mk
-
-build_multiarch=build/$(DEB_HOST_MULTIARCH)
-install_bin=debian/tmp/usr/bin
-install_lib=debian/tmp/usr/lib
-install_lib_multiarch=debian/tmp/usr/lib/$(DEB_HOST_MULTIARCH)
-install_libexec_multiarch=debian/tmp/usr/libexec/$(DEB_HOST_MULTIARCH)
-
-# parallelism
-DEB_BUILD_OPTIONS ?= parallel=8
-
-# hardening
export DEB_BUILD_MAINT_OPTIONS = hardening=+all
-%:
-# Use build/ for output, so that it is in the .gitignore of upstream
- dh $@ --buildsystem=cmake --builddirectory=$(build_multiarch)
+# FOR DEB_VERSION_UPSTREAM
+include /usr/share/dpkg/pkg-info.mk
-override_dh_auto_configure:
- dh_auto_configure -- \
- -DLLAMA_BUILD_NUMBER=$(subst 0.0.,,$(DEB_VERSION_UPSTREAM)) \
- -DCMAKE_LIBRARY_ARCHITECTURE="$(DEB_HOST_MULTIARCH)" \
+CMAKE_FLAGS = \
-DCMAKE_BUILD_TYPE=RelWithDebInfo \
- \
- -DBUILD_SHARED_LIBS=ON \
+ -DLLAMA_BUILD_NUMBER=$(subst 0.0.,,$(DEB_VERSION_UPSTREAM)) \
+ -DLLAMA_BUILD_COMMIT=Argeo \
-DLLAMA_USE_SYSTEM_GGML=ON \
+ \
-DCMAKE_LIBRARY_PATH=/usr/libexec/$(DEB_HOST_MULTIARCH)/ggml \
\
-DLLAMA_CURL=ON \
-DLLAMA_BUILD_TOOLS=ON \
-DLLAMA_BUILD_EXAMPLES=OFF \
-DLLAMA_BUILD_TESTS=OFF \
- -DLLAMA_BUILD_SERVER=OFF \
+ -DLLAMA_BUILD_SERVER=OFF
+
+# Use build/ for output, so that it is in the .gitignore of upstream
+%:
+ dh $@ --buildsystem=cmake \
+ --builddirectory=build/$(DEB_HOST_MULTIARCH)
-override_dh_auto_install:
- dh_auto_install
-
- # Generate bash completion file
- LD_LIBRARY_PATH=$(install_lib_multiarch) \
- $(install_bin)/llama-cli --device none --completion-bash \
- > llama-tools-completion
- sed -i '/complete -F _llama_completions .*/d' llama-tools-completion
+override_dh_auto_configure-arch:
+ dh_auto_configure -- $(CMAKE_FLAGS)
- # Move executables to ggml's libexec, so that they can load the GGML backends
- # and link them in usr/bin
- mkdir -p $(install_libexec_multiarch)/ggml
+execute_after_dh_auto_install-arch:
+ # Bash completion file
mkdir -p completions
- for file in $(install_bin)/llama-*; do \
- mv $$file $(install_libexec_multiarch)/ggml/$$(basename "$$file"); \
- ln -s --relative -t $(install_bin) $(install_libexec_multiarch)/ggml/$$(basename "$$file"); \
- cp llama-tools-completion completions/$$(basename "$$file"); \
- echo "complete -F _llama_completions $$(basename "$$file")" >> completions/$$(basename "$$file"); \
+ LD_LIBRARY_PATH=debian/tmp/usr/lib/${DEB_HOST_MULTIARCH} \
+ debian/tmp/usr/bin/llama-cli --device none --completion-bash \
+ > completions/llama-cli
+
+# Move executables to ggml's libexec, so that they can load the GGML backends
+# and link them in usr/bin
+ mkdir -p debian/tmp/usr/libexec/x86_64-linux-gnu/ggml
+ for progname in debian/tmp/usr/bin/llama-*; do \
+ progname_base=$$(basename $$progname); \
+ mv $$progname debian/tmp/usr/libexec/$(DEB_HOST_MULTIARCH)/ggml/; \
+ ln -s --relative -t debian/tmp/usr/bin \
+ debian/tmp/usr/libexec/$(DEB_HOST_MULTIARCH)/ggml/$$progname_base; \
+ [ "$$progname_base" != "llama-cli" ] || continue ; \
+ ln -r -s completions/llama-cli completions/$$progname_base; \
done
-
+# No tests for now, as many need some kind of model we don't have
override_dh_auto_test:
- # tests which depends on remote location are failing
- dh_auto_test || true
+ :