]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
Improve packaging based on Debian mentoring feedback
authorMathieu Baudier <redacted>
Sun, 23 Feb 2025 06:42:42 +0000 (07:42 +0100)
committerMathieu Baudier <redacted>
Sun, 23 Feb 2025 08:34:30 +0000 (09:34 +0100)
debian/changelog
debian/control
debian/copyright
debian/libllama.lintian-overrides [new file with mode: 0644]
debian/llama-cpp-dev.install
debian/rules
debian/source/lintian-overrides
debian/watch [new file with mode: 0644]

index 634213ac647d2d7281adeed5f33cba0d50c89b09..72cda300eb11dfb4cdb545575145bc197c8182e8 100644 (file)
@@ -1,5 +1,5 @@
-llama-cpp (0.0.4719-1) unstable; urgency=medium
+llama-cpp (0.0.4719-2) unstable; urgency=medium
 
-  * Update upstream
+  * Improve packaging based on mentoring feedback
 
- -- Mathieu Baudier <mbaudier@argeo.org>  Sat, 15 Feb 2025 07:24:06 +0000
+ -- Mathieu Baudier <mbaudier@argeo.org>  Sun, 23 Feb 2025 06:21:05 +0000
index 530f8f4c471893b6d16e0ce96b766ee97022b18f..0fad808ae1803772acfaae0fff165e5631607e54 100644 (file)
@@ -4,56 +4,55 @@ Priority: optional
 Maintainer: Mathieu Baudier <mbaudier@argeo.org>
 Build-Depends: debhelper-compat (= 13), pkg-config, cmake-data, cmake, cpio, curl, libssl-dev, libcurl4-openssl-dev, file, git,
        ggml-dev
-Standards-Version: 4.5.1
-Homepage: https://github.com/ggerganov/llama.cpp
+Standards-Version: 4.7.0
+Vcs-Git: https://git.djapps.eu/pkg/ggml/sources/llama.cpp
+Vcs-Browser: https://git.djapps.eu/?p=pkg/ggml/sources/llama.cpp;a=summary
+Homepage: https://github.com/ggml-org/llama.cpp
 Rules-Requires-Root: binary-targets
 
 Package: libllama
-Priority: optional
 Architecture: any
 Multi-Arch: same
 Pre-Depends: ${misc:Pre-Depends}
 Depends: ${misc:Depends}, ${shlibs:Depends},
  libggml
 Description: Inference of LLMs in pure C/C++ (shared library)
- Llama.cpp inference of LLMs in pure C/C++ (shared library).
+ llama.cpp leverages the ggml tensor library in order to run
+ large language models (LLMs) provided in the GGUF file format.
 
 Package: llama-cpp-cli
 Architecture: any
-Priority: optional
 Depends: ${misc:Depends}, ${shlibs:Depends},
  libllama, ggml, curl
 Description: Inference of LLMs in pure C/C++ (CLI)
- Llama.cpp inference of LLMs in pure C/C++ (CLI).
-
-Package: llama-cpp-server
-Architecture: any
-Priority: optional
-Depends: ${misc:Depends}, ${shlibs:Depends},
- libllama, ggml, curl, openssl
-Description: Inference of LLMs in pure C/C++ (server)
- Llama.cpp inference of LLMs in pure C/C++ (server).
+ A command line utility wrapping most features provided by libllama.
+ It typically allows one to run one-shot prompts or to "chat"
+ with a large language model.
 
 Package: llama-cpp-quantize
 Architecture: any
-Priority: optional
 Depends: ${misc:Depends}, ${shlibs:Depends},
  libllama, ggml
 Description: Inference of LLMs in pure C/C++ (quantize)
- Llama.cpp inference of LLMs in pure C/C++ (quantize).
+ A command line utility to "quantize" a large language model provided
+ as a GGUF file. Quantizing is process of reducing the precision of
+ the underlying neural-network at  aminimal cost to its accuracy.
 
 Package: libllama-dev
+Section: libdevel
 Architecture: any
-Priority: optional
 Depends: ${misc:Depends},
  ggml-dev, libllama (= ${binary:Version})
 Description: Inference of LLMs in pure C/C++ (development files)
- Llama.cpp inference of LLMs in pure C/C++ (development files).
+ Development files required for building software based on the
+ stable and documented llama.cpp API.
 
 Package: llama-cpp-dev
+Section: libdevel
 Architecture: any
-Priority: optional
 Depends: ${misc:Depends},
  libllama-dev (= ${binary:Version}), libcurl4-openssl-dev, libssl-dev
 Description: Inference of LLMs in pure C/C++ (common static library)
- Llama.cpp inference of LLMs in pure C/C++ (common static library).
+ Development files and static library providing a framework command to the
+ various examples. It allows one to quickly to develop a command line utility
+ but is expected to provide a less stable API than libllama-dev.
index d690de2991b3f162bfd7719fd04c9f4146b7f676..f236ff03c9112355b5882cd9379400bad9d75390 100644 (file)
@@ -1,11 +1,22 @@
 Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
 Upstream-Name: llama.cpp
-Upstream-Contact: https://github.com/ggerganov/llama.cpp/issues
-Source: https://github.com/ggerganov/llama.cpp
+Upstream-Contact: https://github.com/ggml-org/llama.cpp/issues
+Source: https://github.com/ggml-org/llama.cpp
+#Files-Excluded: 
+# models/*
+# ggml/*
+# ggml/src/ggml-kompute/kompute/*
+# usr/share/doc/llama-cpp-dev/examples/llama.android/gradlew
+#Comment: The models/ directory contains the "vocabularies" (mapping between token id and strings)
+# used by some publicly available large language models, most of them being
+# not DFSG compliant.
+# We use the version of ggml packaged separately and
+# thus consider ggml/ as third-party code here.
+# Other excludes are third-party code (which is not used here).
 
 Files: *
 Copyright: Copyright (c) 2023-2025 The llama.cpp authors
-License: MIT 
+License: Expat 
  Permission is hereby granted, free of charge, to any person obtaining a copy
  of this software and associated documentation files (the "Software"), to deal
  in the Software without restriction, including without limitation the rights
diff --git a/debian/libllama.lintian-overrides b/debian/libllama.lintian-overrides
new file mode 100644 (file)
index 0000000..bf24dce
--- /dev/null
@@ -0,0 +1 @@
+libllama: no-symbols-control-file usr/lib/x86_64-linux-gnu/libllama.so
index bec882ac142da593fecbb1f079e48804b0bfa176..156491bb3cc48bf7a686621bcc41b028c86f51eb 100644 (file)
@@ -1,2 +1,2 @@
-/usr/include/llama.cpp/common
-/usr/lib/*/llama.cpp/*
+/usr/include/llama.cpp/common/*.h
+/usr/lib/*/llama.cpp/common/libcommon.a
index 276617ea789599ea502455083c36d2e145a5a37e..d0b858bfb146f754eeeceedfdbed120bab0f17ef 100755 (executable)
@@ -14,12 +14,15 @@ install_libexec_multiarch=debian/tmp/usr/libexec/$(DEB_HOST_MULTIARCH)
 # parallelism
 DEB_BUILD_OPTIONS ?= parallel=8
 
+# hardening
+export DEB_BUILD_MAINT_OPTIONS = hardening=+all
+
 %:
 # Use build/ for output, so that it is in the .gitignore of upstream
        dh $@ --buildsystem=cmake --builddirectory=$(build_multiarch)
 
-# FIXME we disable LLAMA_ALL_WARNINGS so that ggml_get_flags() CMake function do not get called
-# as it is available deep in GGML build scripts and not published
+# Note: we disable LLAMA_ALL_WARNINGS so that ggml_get_flags() CMake function do not get called
+# as it is available deep in GGML build scripts and reset the compiler/linker flags
 override_dh_auto_configure:
        dh_auto_configure -- \
        -DCMAKE_LIBRARY_ARCHITECTURE="$(DEB_HOST_MULTIARCH)" \
@@ -29,9 +32,9 @@ override_dh_auto_configure:
        -DGGML_BACKEND_DL=ON \
        -DLLAMA_ALL_WARNINGS=OFF \
        -DLLAMA_BUILD_TESTS=OFF \
-       -DLLAMA_BUILD_SERVER=ON \
+       -DLLAMA_BUILD_SERVER=OFF \
        -DLLAMA_CURL=ON \
-       -DLLAMA_SERVER_SSL=ON \
+       -DLLAMA_SERVER_SSL=OFF \
 
 override_dh_auto_install:
        dh_auto_install
index 3cf26c03a02049c14cd98764a7815939c2ed392a..56aeffc7921ea2a4f5ebc4ec6169791372cb6a09 100644 (file)
@@ -1,4 +1,10 @@
 llama-cpp source: source-is-missing [examples/server/public_legacy/index-new.html]
 llama-cpp source: source-is-missing [examples/server/public_legacy/index.js]
 llama-cpp source: source-is-missing [examples/server/public_legacy/system-prompts.js]
-llama-cpp source: source-is-missing [examples/server/webui/index.html]
+llama-cpp source: source-contains-prebuilt-java-object [examples/llama.android/gradle/wrapper/gradle-wrapper.jar]
+llama-cpp source: source-contains-prebuilt-javascript-object [examples/server/public_legacy/index.js]
+llama-cpp source: source-contains-prebuilt-javascript-object [examples/server/public_legacy/system-prompts.js]
+llama-cpp source: missing-prerequisite-for-pyproject-backend poetry.core.masonry.api (does not satisfy python3-poetry-core:any, pybuild-plugin-pyproject:any) [pyproject.toml:39]
+llama-cpp source: package-does-not-install-examples [examples/]
+llama-cpp source: package-does-not-install-examples [ggml/src/ggml-kompute/kompute/examples/]
+llama-cpp source: package-does-not-install-examples [gguf-py/examples/]
\ No newline at end of file
diff --git a/debian/watch b/debian/watch
new file mode 100644 (file)
index 0000000..2beb463
--- /dev/null
@@ -0,0 +1,3 @@
+version=4
+opts="mode=git" \
+https://git.djapps.eu/pkg/ggml/sources/llama.cpp refs/tags/upstream/([\d\.]+)