]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
Reshuffle package names
authorMathieu Baudier <redacted>
Sun, 23 Feb 2025 09:57:15 +0000 (10:57 +0100)
committerMathieu Baudier <redacted>
Sun, 23 Feb 2025 10:06:02 +0000 (11:06 +0100)
13 files changed:
debian/control
debian/libllama-dev.install [deleted file]
debian/libllama.install [deleted file]
debian/libllama.lintian-overrides [deleted file]
debian/libllama.triggers [deleted file]
debian/libllama0-dev.install [new file with mode: 0644]
debian/libllama0.install [new file with mode: 0644]
debian/libllama0.lintian-overrides [new file with mode: 0644]
debian/libllama0.triggers [new file with mode: 0644]
debian/llama-cpp-cli.install [deleted file]
debian/llama-cpp-quantize.install [deleted file]
debian/llama-cpp-tools.install [new file with mode: 0644]
debian/source/lintian-overrides

index 0fad808ae1803772acfaae0fff165e5631607e54..ed76d2eb6d9078fd6efbdfc8a767cac86dab9901 100644 (file)
@@ -2,48 +2,57 @@ Source: llama-cpp
 Section: science
 Priority: optional
 Maintainer: Mathieu Baudier <mbaudier@argeo.org>
-Build-Depends: debhelper-compat (= 13), pkg-config, cmake-data, cmake, cpio, curl, libssl-dev, libcurl4-openssl-dev, file, git,
-       ggml-dev
+Build-Depends: debhelper-compat (= 13), pkgconf,
+       cmake-data, cmake,
+       ggml-dev,
+       curl,
+#      libssl-dev, libcurl4-openssl-dev
 Standards-Version: 4.7.0
 Vcs-Git: https://git.djapps.eu/pkg/ggml/sources/llama.cpp
 Vcs-Browser: https://git.djapps.eu/?p=pkg/ggml/sources/llama.cpp;a=summary
 Homepage: https://github.com/ggml-org/llama.cpp
 Rules-Requires-Root: binary-targets
 
-Package: libllama
+Package: libllama0
+Section: libs
 Architecture: any
 Multi-Arch: same
 Pre-Depends: ${misc:Pre-Depends}
 Depends: ${misc:Depends}, ${shlibs:Depends},
- libggml
-Description: Inference of LLMs in pure C/C++ (shared library)
+ libggml0
+Description: Inference of large language models in pure C/C++ (shared library)
  llama.cpp leverages the ggml tensor library in order to run
  large language models (LLMs) provided in the GGUF file format.
 
-Package: llama-cpp-cli
+Package: llama-cpp-tools
 Architecture: any
 Depends: ${misc:Depends}, ${shlibs:Depends},
- libllama, ggml, curl
-Description: Inference of LLMs in pure C/C++ (CLI)
A command line utility wrapping most features provided by libllama.
+ libllama0, ggml, curl
+Description: Inference of large language models in pure C/C++ (tools)
llama-cli: utility tool wrapping most features provided by libllama.
  It typically allows one to run one-shot prompts or to "chat"
  with a large language model.
-
-Package: llama-cpp-quantize
-Architecture: any
-Depends: ${misc:Depends}, ${shlibs:Depends},
- libllama, ggml
-Description: Inference of LLMs in pure C/C++ (quantize)
- A command line utility to "quantize" a large language model provided
- as a GGUF file. Quantizing is process of reducing the precision of
+ .
+ llama-quantize: utility tool to "quantize" a large language model
+ GGUF file. Quantizing is the process of reducing the precision of
  the underlying neural-network at  aminimal cost to its accuracy.
+ .
+ llama-bench: benchmarking of large language models or
+ ggml backends.
+
+#Package: llama-cpp-server
+#Architecture: any
+#Depends: ${misc:Depends}, ${shlibs:Depends},
+# libllama0, ggml, curl, openssl
+#Description: Inference of large language models in pure C/C++ (server)
+# A simple HTTP server used to remotely run large language models.
 
-Package: libllama-dev
+Package: libllama0-dev
 Section: libdevel
 Architecture: any
 Depends: ${misc:Depends},
- ggml-dev, libllama (= ${binary:Version})
-Description: Inference of LLMs in pure C/C++ (development files)
+ ggml-dev, libllama0 (= ${binary:Version})
+Description: Inference of large language models in pure C/C++ (development files)
  Development files required for building software based on the
  stable and documented llama.cpp API.
 
@@ -51,8 +60,8 @@ Package: llama-cpp-dev
 Section: libdevel
 Architecture: any
 Depends: ${misc:Depends},
- libllama-dev (= ${binary:Version}), libcurl4-openssl-dev, libssl-dev
-Description: Inference of LLMs in pure C/C++ (common static library)
+ libllama0-dev (= ${binary:Version}), libcurl4-openssl-dev, libssl-dev
+Description: Inference of large language models in pure C/C++ (common static library)
  Development files and static library providing a framework command to the
  various examples. It allows one to quickly to develop a command line utility
  but is expected to provide a less stable API than libllama-dev.
diff --git a/debian/libllama-dev.install b/debian/libllama-dev.install
deleted file mode 100644 (file)
index d981e57..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-/usr/include/llama*.h
-include/llama-cpp.h /usr/include/
-
-/usr/lib/*/pkgconfig/*.pc
-/usr/lib/*/cmake/llama/llama-*.cmake
diff --git a/debian/libllama.install b/debian/libllama.install
deleted file mode 100644 (file)
index 54c30f1..0000000
+++ /dev/null
@@ -1 +0,0 @@
-/usr/lib/*/libllama*
\ No newline at end of file
diff --git a/debian/libllama.lintian-overrides b/debian/libllama.lintian-overrides
deleted file mode 100644 (file)
index bf24dce..0000000
+++ /dev/null
@@ -1 +0,0 @@
-libllama: no-symbols-control-file usr/lib/x86_64-linux-gnu/libllama.so
diff --git a/debian/libllama.triggers b/debian/libllama.triggers
deleted file mode 100644 (file)
index dd86603..0000000
+++ /dev/null
@@ -1 +0,0 @@
-activate-noawait ldconfig
diff --git a/debian/libllama0-dev.install b/debian/libllama0-dev.install
new file mode 100644 (file)
index 0000000..d981e57
--- /dev/null
@@ -0,0 +1,5 @@
+/usr/include/llama*.h
+include/llama-cpp.h /usr/include/
+
+/usr/lib/*/pkgconfig/*.pc
+/usr/lib/*/cmake/llama/llama-*.cmake
diff --git a/debian/libllama0.install b/debian/libllama0.install
new file mode 100644 (file)
index 0000000..54c30f1
--- /dev/null
@@ -0,0 +1 @@
+/usr/lib/*/libllama*
\ No newline at end of file
diff --git a/debian/libllama0.lintian-overrides b/debian/libllama0.lintian-overrides
new file mode 100644 (file)
index 0000000..854b71f
--- /dev/null
@@ -0,0 +1 @@
+libllama0: no-symbols-control-file usr/lib/x86_64-linux-gnu/libllama.so
diff --git a/debian/libllama0.triggers b/debian/libllama0.triggers
new file mode 100644 (file)
index 0000000..dd86603
--- /dev/null
@@ -0,0 +1 @@
+activate-noawait ldconfig
diff --git a/debian/llama-cpp-cli.install b/debian/llama-cpp-cli.install
deleted file mode 100644 (file)
index 90fea7d..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-/usr/bin/llama-cli
-/usr/libexec/*/ggml/llama-cli
diff --git a/debian/llama-cpp-quantize.install b/debian/llama-cpp-quantize.install
deleted file mode 100644 (file)
index d72d696..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-/usr/bin/llama-quantize
-/usr/libexec/*/ggml/llama-quantize
diff --git a/debian/llama-cpp-tools.install b/debian/llama-cpp-tools.install
new file mode 100644 (file)
index 0000000..a028e99
--- /dev/null
@@ -0,0 +1,8 @@
+/usr/bin/llama-cli
+/usr/libexec/*/ggml/llama-cli
+
+/usr/bin/llama-quantize
+/usr/libexec/*/ggml/llama-quantize
+
+/usr/bin/llama-bench
+/usr/libexec/*/ggml/llama-bench
index 56aeffc7921ea2a4f5ebc4ec6169791372cb6a09..1304de993399770265aaf99b45c920e1034dfdd1 100644 (file)
@@ -5,6 +5,7 @@ llama-cpp source: source-contains-prebuilt-java-object [examples/llama.android/g
 llama-cpp source: source-contains-prebuilt-javascript-object [examples/server/public_legacy/index.js]
 llama-cpp source: source-contains-prebuilt-javascript-object [examples/server/public_legacy/system-prompts.js]
 llama-cpp source: missing-prerequisite-for-pyproject-backend poetry.core.masonry.api (does not satisfy python3-poetry-core:any, pybuild-plugin-pyproject:any) [pyproject.toml:39]
+# some of the "examples" are actually source code form this packaging perspective
 llama-cpp source: package-does-not-install-examples [examples/]
 llama-cpp source: package-does-not-install-examples [ggml/src/ggml-kompute/kompute/examples/]
 llama-cpp source: package-does-not-install-examples [gguf-py/examples/]
\ No newline at end of file