From: Mathieu Baudier Date: Sun, 23 Feb 2025 09:57:15 +0000 (+0100) Subject: Reshuffle package names X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=8016532a91f14564bb0749366cc2275fdc6df707;p=pkg%2Fggml%2Fsources%2Fllama.cpp Reshuffle package names --- diff --git a/debian/control b/debian/control index 0fad808a..ed76d2eb 100644 --- a/debian/control +++ b/debian/control @@ -2,48 +2,57 @@ Source: llama-cpp Section: science Priority: optional Maintainer: Mathieu Baudier -Build-Depends: debhelper-compat (= 13), pkg-config, cmake-data, cmake, cpio, curl, libssl-dev, libcurl4-openssl-dev, file, git, - ggml-dev +Build-Depends: debhelper-compat (= 13), pkgconf, + cmake-data, cmake, + ggml-dev, + curl, +# libssl-dev, libcurl4-openssl-dev Standards-Version: 4.7.0 Vcs-Git: https://git.djapps.eu/pkg/ggml/sources/llama.cpp Vcs-Browser: https://git.djapps.eu/?p=pkg/ggml/sources/llama.cpp;a=summary Homepage: https://github.com/ggml-org/llama.cpp Rules-Requires-Root: binary-targets -Package: libllama +Package: libllama0 +Section: libs Architecture: any Multi-Arch: same Pre-Depends: ${misc:Pre-Depends} Depends: ${misc:Depends}, ${shlibs:Depends}, - libggml -Description: Inference of LLMs in pure C/C++ (shared library) + libggml0 +Description: Inference of large language models in pure C/C++ (shared library) llama.cpp leverages the ggml tensor library in order to run large language models (LLMs) provided in the GGUF file format. -Package: llama-cpp-cli +Package: llama-cpp-tools Architecture: any Depends: ${misc:Depends}, ${shlibs:Depends}, - libllama, ggml, curl -Description: Inference of LLMs in pure C/C++ (CLI) - A command line utility wrapping most features provided by libllama. + libllama0, ggml, curl +Description: Inference of large language models in pure C/C++ (tools) + llama-cli: utility tool wrapping most features provided by libllama. It typically allows one to run one-shot prompts or to "chat" with a large language model. - -Package: llama-cpp-quantize -Architecture: any -Depends: ${misc:Depends}, ${shlibs:Depends}, - libllama, ggml -Description: Inference of LLMs in pure C/C++ (quantize) - A command line utility to "quantize" a large language model provided - as a GGUF file. Quantizing is process of reducing the precision of + . + llama-quantize: utility tool to "quantize" a large language model + GGUF file. Quantizing is the process of reducing the precision of the underlying neural-network at aminimal cost to its accuracy. + . + llama-bench: benchmarking of large language models or + ggml backends. + +#Package: llama-cpp-server +#Architecture: any +#Depends: ${misc:Depends}, ${shlibs:Depends}, +# libllama0, ggml, curl, openssl +#Description: Inference of large language models in pure C/C++ (server) +# A simple HTTP server used to remotely run large language models. -Package: libllama-dev +Package: libllama0-dev Section: libdevel Architecture: any Depends: ${misc:Depends}, - ggml-dev, libllama (= ${binary:Version}) -Description: Inference of LLMs in pure C/C++ (development files) + ggml-dev, libllama0 (= ${binary:Version}) +Description: Inference of large language models in pure C/C++ (development files) Development files required for building software based on the stable and documented llama.cpp API. @@ -51,8 +60,8 @@ Package: llama-cpp-dev Section: libdevel Architecture: any Depends: ${misc:Depends}, - libllama-dev (= ${binary:Version}), libcurl4-openssl-dev, libssl-dev -Description: Inference of LLMs in pure C/C++ (common static library) + libllama0-dev (= ${binary:Version}), libcurl4-openssl-dev, libssl-dev +Description: Inference of large language models in pure C/C++ (common static library) Development files and static library providing a framework command to the various examples. It allows one to quickly to develop a command line utility but is expected to provide a less stable API than libllama-dev. diff --git a/debian/libllama-dev.install b/debian/libllama-dev.install deleted file mode 100644 index d981e574..00000000 --- a/debian/libllama-dev.install +++ /dev/null @@ -1,5 +0,0 @@ -/usr/include/llama*.h -include/llama-cpp.h /usr/include/ - -/usr/lib/*/pkgconfig/*.pc -/usr/lib/*/cmake/llama/llama-*.cmake diff --git a/debian/libllama.install b/debian/libllama.install deleted file mode 100644 index 54c30f17..00000000 --- a/debian/libllama.install +++ /dev/null @@ -1 +0,0 @@ -/usr/lib/*/libllama* \ No newline at end of file diff --git a/debian/libllama.lintian-overrides b/debian/libllama.lintian-overrides deleted file mode 100644 index bf24dcef..00000000 --- a/debian/libllama.lintian-overrides +++ /dev/null @@ -1 +0,0 @@ -libllama: no-symbols-control-file usr/lib/x86_64-linux-gnu/libllama.so diff --git a/debian/libllama.triggers b/debian/libllama.triggers deleted file mode 100644 index dd866036..00000000 --- a/debian/libllama.triggers +++ /dev/null @@ -1 +0,0 @@ -activate-noawait ldconfig diff --git a/debian/libllama0-dev.install b/debian/libllama0-dev.install new file mode 100644 index 00000000..d981e574 --- /dev/null +++ b/debian/libllama0-dev.install @@ -0,0 +1,5 @@ +/usr/include/llama*.h +include/llama-cpp.h /usr/include/ + +/usr/lib/*/pkgconfig/*.pc +/usr/lib/*/cmake/llama/llama-*.cmake diff --git a/debian/libllama0.install b/debian/libllama0.install new file mode 100644 index 00000000..54c30f17 --- /dev/null +++ b/debian/libllama0.install @@ -0,0 +1 @@ +/usr/lib/*/libllama* \ No newline at end of file diff --git a/debian/libllama0.lintian-overrides b/debian/libllama0.lintian-overrides new file mode 100644 index 00000000..854b71f9 --- /dev/null +++ b/debian/libllama0.lintian-overrides @@ -0,0 +1 @@ +libllama0: no-symbols-control-file usr/lib/x86_64-linux-gnu/libllama.so diff --git a/debian/libllama0.triggers b/debian/libllama0.triggers new file mode 100644 index 00000000..dd866036 --- /dev/null +++ b/debian/libllama0.triggers @@ -0,0 +1 @@ +activate-noawait ldconfig diff --git a/debian/llama-cpp-cli.install b/debian/llama-cpp-cli.install deleted file mode 100644 index 90fea7db..00000000 --- a/debian/llama-cpp-cli.install +++ /dev/null @@ -1,2 +0,0 @@ -/usr/bin/llama-cli -/usr/libexec/*/ggml/llama-cli diff --git a/debian/llama-cpp-quantize.install b/debian/llama-cpp-quantize.install deleted file mode 100644 index d72d6969..00000000 --- a/debian/llama-cpp-quantize.install +++ /dev/null @@ -1,2 +0,0 @@ -/usr/bin/llama-quantize -/usr/libexec/*/ggml/llama-quantize diff --git a/debian/llama-cpp-tools.install b/debian/llama-cpp-tools.install new file mode 100644 index 00000000..a028e995 --- /dev/null +++ b/debian/llama-cpp-tools.install @@ -0,0 +1,8 @@ +/usr/bin/llama-cli +/usr/libexec/*/ggml/llama-cli + +/usr/bin/llama-quantize +/usr/libexec/*/ggml/llama-quantize + +/usr/bin/llama-bench +/usr/libexec/*/ggml/llama-bench diff --git a/debian/source/lintian-overrides b/debian/source/lintian-overrides index 56aeffc7..1304de99 100644 --- a/debian/source/lintian-overrides +++ b/debian/source/lintian-overrides @@ -5,6 +5,7 @@ llama-cpp source: source-contains-prebuilt-java-object [examples/llama.android/g llama-cpp source: source-contains-prebuilt-javascript-object [examples/server/public_legacy/index.js] llama-cpp source: source-contains-prebuilt-javascript-object [examples/server/public_legacy/system-prompts.js] llama-cpp source: missing-prerequisite-for-pyproject-backend poetry.core.masonry.api (does not satisfy python3-poetry-core:any, pybuild-plugin-pyproject:any) [pyproject.toml:39] +# some of the "examples" are actually source code form this packaging perspective llama-cpp source: package-does-not-install-examples [examples/] llama-cpp source: package-does-not-install-examples [ggml/src/ggml-kompute/kompute/examples/] llama-cpp source: package-does-not-install-examples [gguf-py/examples/] \ No newline at end of file