]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
Align package names with Debian official
authorMathieu Baudier <redacted>
Sat, 18 Oct 2025 08:57:19 +0000 (10:57 +0200)
committerMathieu Baudier <redacted>
Sat, 18 Oct 2025 10:45:11 +0000 (12:45 +0200)
debian/changelog
debian/control
debian/llama-cpp-tools-multimodal.bash-completion [deleted file]
debian/llama-cpp-tools-multimodal.install [deleted file]
debian/llama-cpp-tools.bash-completion [deleted file]
debian/llama-cpp-tools.install [deleted file]
debian/llama.cpp-tools-multimodal.bash-completion [new file with mode: 0644]
debian/llama.cpp-tools-multimodal.install [new file with mode: 0644]
debian/llama.cpp-tools.bash-completion [new file with mode: 0644]
debian/llama.cpp-tools.install [new file with mode: 0644]
debian/source/lintian-overrides

index aa0e221840f899888f1da8816cf8194e1a8f793e..2715de05849abc8763b662cb45b0b0e102ab48e2 100644 (file)
@@ -1,12 +1,7 @@
-llama-cpp (0.0.6641-1) unstable; urgency=medium
+llama.cpp (0.0.6764-1) unstable; urgency=medium
 
   * Update upstream
+  * Align package names with Debian official
 
- -- Mathieu Baudier <mbaudier@argeo.org>  Fri, 10 Oct 2025 14:03:01 +0000
-
-llama-cpp (0.0.6527-1) unstable; urgency=medium
-
-  * Update upstream
-
- -- Mathieu Baudier <mbaudier@argeo.org>  Mon, 22 Sep 2025 12:46:43 +0000
+ -- Mathieu Baudier <mbaudier@argeo.org>  Sat, 18 Oct 2025 08:51:56 +0000
  
\ No newline at end of file
index 3d580627a7a83fca9e81c3e620e4d83fe8d5ba9f..4e5acb6f110d9fca2c59cfccebada28fc2a0b85c 100644 (file)
@@ -1,4 +1,4 @@
-Source: llama-cpp
+Source: llama.cpp
 Section: science
 Priority: optional
 Maintainer: Mathieu Baudier <mbaudier@argeo.org>
@@ -43,7 +43,7 @@ Description: Inference of large language models in pure C/C++ (multimodal librar
  mtmd provides multimodal inference.
 
 # We only distribute a few useful tools, with stable CLI options
-Package: llama-cpp-tools
+Package: llama.cpp-tools
 Architecture: any
 Depends: libllama0 (= ${binary:Version}),
          curl,
@@ -61,7 +61,7 @@ Description: Inference of large language models in pure C/C++ (tools)
  llama-bench: benchmarking of large language models or
  ggml backends.
 
-Package: llama-cpp-tools-multimodal
+Package: llama.cpp-tools-multimodal
 Architecture: any
 Depends: libmtmd0 (= ${binary:Version}),
          curl,
diff --git a/debian/llama-cpp-tools-multimodal.bash-completion b/debian/llama-cpp-tools-multimodal.bash-completion
deleted file mode 100644 (file)
index 3ae0a65..0000000
+++ /dev/null
@@ -1 +0,0 @@
-completions/llama-mtmd-cli
diff --git a/debian/llama-cpp-tools-multimodal.install b/debian/llama-cpp-tools-multimodal.install
deleted file mode 100644 (file)
index 5aae377..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-/usr/bin/llama-mtmd-cli
-/usr/libexec/*/ggml/llama-mtmd-cli
diff --git a/debian/llama-cpp-tools.bash-completion b/debian/llama-cpp-tools.bash-completion
deleted file mode 100644 (file)
index 432aa94..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-completions/llama-cli
-# completions/llama-quantize does not actually use these options
-completions/llama-bench
diff --git a/debian/llama-cpp-tools.install b/debian/llama-cpp-tools.install
deleted file mode 100644 (file)
index a028e99..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-/usr/bin/llama-cli
-/usr/libexec/*/ggml/llama-cli
-
-/usr/bin/llama-quantize
-/usr/libexec/*/ggml/llama-quantize
-
-/usr/bin/llama-bench
-/usr/libexec/*/ggml/llama-bench
diff --git a/debian/llama.cpp-tools-multimodal.bash-completion b/debian/llama.cpp-tools-multimodal.bash-completion
new file mode 100644 (file)
index 0000000..3ae0a65
--- /dev/null
@@ -0,0 +1 @@
+completions/llama-mtmd-cli
diff --git a/debian/llama.cpp-tools-multimodal.install b/debian/llama.cpp-tools-multimodal.install
new file mode 100644 (file)
index 0000000..5aae377
--- /dev/null
@@ -0,0 +1,2 @@
+/usr/bin/llama-mtmd-cli
+/usr/libexec/*/ggml/llama-mtmd-cli
diff --git a/debian/llama.cpp-tools.bash-completion b/debian/llama.cpp-tools.bash-completion
new file mode 100644 (file)
index 0000000..432aa94
--- /dev/null
@@ -0,0 +1,3 @@
+completions/llama-cli
+# completions/llama-quantize does not actually use these options
+completions/llama-bench
diff --git a/debian/llama.cpp-tools.install b/debian/llama.cpp-tools.install
new file mode 100644 (file)
index 0000000..a028e99
--- /dev/null
@@ -0,0 +1,8 @@
+/usr/bin/llama-cli
+/usr/libexec/*/ggml/llama-cli
+
+/usr/bin/llama-quantize
+/usr/libexec/*/ggml/llama-quantize
+
+/usr/bin/llama-bench
+/usr/libexec/*/ggml/llama-bench
index 1304de993399770265aaf99b45c920e1034dfdd1..30edff27a90cfc984687a636433018ad1d33ce87 100644 (file)
@@ -1,11 +1,11 @@
-llama-cpp source: source-is-missing [examples/server/public_legacy/index-new.html]
-llama-cpp source: source-is-missing [examples/server/public_legacy/index.js]
-llama-cpp source: source-is-missing [examples/server/public_legacy/system-prompts.js]
-llama-cpp source: source-contains-prebuilt-java-object [examples/llama.android/gradle/wrapper/gradle-wrapper.jar]
-llama-cpp source: source-contains-prebuilt-javascript-object [examples/server/public_legacy/index.js]
-llama-cpp source: source-contains-prebuilt-javascript-object [examples/server/public_legacy/system-prompts.js]
-llama-cpp source: missing-prerequisite-for-pyproject-backend poetry.core.masonry.api (does not satisfy python3-poetry-core:any, pybuild-plugin-pyproject:any) [pyproject.toml:39]
+llama.cpp source: source-is-missing [examples/server/public_legacy/index-new.html]
+llama.cpp source: source-is-missing [examples/server/public_legacy/index.js]
+llama.cpp source: source-is-missing [examples/server/public_legacy/system-prompts.js]
+llama.cpp source: source-contains-prebuilt-java-object [examples/llama.android/gradle/wrapper/gradle-wrapper.jar]
+llama.cpp source: source-contains-prebuilt-javascript-object [examples/server/public_legacy/index.js]
+llama.cpp source: source-contains-prebuilt-javascript-object [examples/server/public_legacy/system-prompts.js]
+llama.cpp source: missing-prerequisite-for-pyproject-backend poetry.core.masonry.api (does not satisfy python3-poetry-core:any, pybuild-plugin-pyproject:any) [pyproject.toml:39]
 # some of the "examples" are actually source code form this packaging perspective
-llama-cpp source: package-does-not-install-examples [examples/]
-llama-cpp source: package-does-not-install-examples [ggml/src/ggml-kompute/kompute/examples/]
-llama-cpp source: package-does-not-install-examples [gguf-py/examples/]
\ No newline at end of file
+llama.cpp source: package-does-not-install-examples [examples/]
+llama.cpp source: package-does-not-install-examples [ggml/src/ggml-kompute/kompute/examples/]
+llama.cpp source: package-does-not-install-examples [gguf-py/examples/]
\ No newline at end of file