]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
Simplify packaging debian/latest
authorMathieu Baudier <redacted>
Mon, 16 Feb 2026 06:55:00 +0000 (07:55 +0100)
committerMathieu Baudier <redacted>
Mon, 16 Feb 2026 07:36:35 +0000 (08:36 +0100)
debian/control
debian/rules

index 8ef264390e2686464caac55cbdc5cdbbcb0f5500..f62866369897437cc128487903dce717ea67a4f1 100644 (file)
@@ -20,9 +20,7 @@ Section: libs
 Architecture: any
 Multi-Arch: same
 Pre-Depends: ${misc:Pre-Depends}
-Depends: libggml0,
-         libggml0-backend-cpu,
-         ${misc:Depends},
+Depends: ${misc:Depends},
          ${shlibs:Depends},
 Description: Inference of large language models in pure C/C++ (shared library)
  llama.cpp leverages the ggml tensor library in order to run
@@ -42,7 +40,8 @@ Description: Inference of large language models in pure C/C++ (multimodal librar
 # We only distribute a few useful tools, with stable CLI options
 Package: llama.cpp-tools
 Architecture: any
-Depends: libllama0 (= ${binary:Version}),
+Depends: libggml0-backend-cpu,
+         libllama0 (= ${binary:Version}),
          libmtmd0 (= ${binary:Version}),
          ${misc:Depends},
          ${shlibs:Depends},
index 84c822df0e78e7dbe8d6e9c1f2b8f925f74d8607..c21e0e1bec275353c1d94a5c1d7aef376dd5f00e 100755 (executable)
@@ -32,24 +32,8 @@ CMAKE_FLAGS = \
               -DLLAMA_BUILD_TESTS=OFF \
               -DLLAMA_BUILD_SERVER=ON \
 
-ifeq ($(DEB_TARGET_ARCH),arm64)
 FLAGS_ARGEO=-O3 -g -DNDEBUG
 
-ifeq ($(distribution),Ubuntu)
-# Optimize for Raspberry Pi 5 on Ubuntu
-#FLAGS_ARGEO += -mcpu=cortex-a76+crc+crypto
-CMAKE_FLAGS += -DGGML_CPU_ARM_ARCH=armv8-a
-#CMAKE_FLAGS += -DGGML_NATIVE=ON
-else
-#CMAKE_FLAGS += -DGGML_CPU_ARM_ARCH=armv8-a
-CMAKE_FLAGS += -DGGML_CPU_ARM_ARCH=armv8-a
-endif
-
-else
-FLAGS_ARGEO=-O3 -g -DNDEBUG
-
-endif
-
 CMAKE_FLAGS += -DCMAKE_C_FLAGS_RELWITHDEBINFO="$(FLAGS_ARGEO)" \
                -DCMAKE_CXX_FLAGS_RELWITHDEBINFO="$(FLAGS_ARGEO)" \