]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
Adapt and improve build after recent CMake config enhancements
authorMathieu Baudier <redacted>
Mon, 23 Jun 2025 10:20:49 +0000 (12:20 +0200)
committerMathieu Baudier <redacted>
Mon, 23 Jun 2025 11:45:30 +0000 (13:45 +0200)
debian/changelog
debian/cmake/debian-llama-cpp.cmake [deleted file]
debian/control
debian/llama-cpp-dev.install [deleted file]
debian/not-installed
debian/rules

index 85cf5eb921e858904a3d44d74920a21f0380477c..eb248e01e783f3f4b3be6cd94a04b488f2d79a4e 100644 (file)
@@ -1,5 +1,5 @@
-llama-cpp (0.0.5318-1) unstable; urgency=medium
+llama-cpp (0.0.5713-1) unstable; urgency=medium
 
   * Update upstream
 
- -- Mathieu Baudier <mbaudier@argeo.org>  Fri, 30 May 2025 06:48:43 +0000
+ -- Mathieu Baudier <mbaudier@argeo.org>  Mon, 23 Jun 2025 10:23:17 +0000
diff --git a/debian/cmake/debian-llama-cpp.cmake b/debian/cmake/debian-llama-cpp.cmake
deleted file mode 100644 (file)
index 932662c..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-# GGML dependencies
-# libggml-base as external library
-find_library(GGML_BASE_LOCATION ggml-base)
-message (STATUS "Found GGML base library: ${GGML_BASE_LOCATION}") 
-add_library(ggml-base SHARED IMPORTED GLOBAL)
-set_target_properties(ggml-base PROPERTIES IMPORTED_LOCATION ${GGML_BASE_LOCATION})
-
-# libggml as external library
-# defines GGML as target so that it is disabled in llama.cpp build
-find_library(GGML_LOCATION ggml)
-message (STATUS "Found GGML library: ${GGML_LOCATION}") 
-add_library(ggml SHARED IMPORTED GLOBAL)
-set_target_properties(ggml PROPERTIES IMPORTED_LOCATION ${GGML_LOCATION})
-# transitive dependency
-target_link_libraries(ggml INTERFACE ${GGML_BASE_LOCATION})
-
-add_compile_definitions(NDEBUG)
-
-install(DIRECTORY ${CMAKE_BINARY_DIR}/common/ DESTINATION lib/${CMAKE_LIBRARY_ARCHITECTURE}/llama.cpp/common FILES_MATCHING PATTERN "*.a" )
-install(DIRECTORY ${CMAKE_SOURCE_DIR}/common/ DESTINATION include/llama.cpp/common FILES_MATCHING PATTERN "*.h" )
-
-# build number, in line with changelog
-set(BUILD_NUMBER 5318)
-
index 5d6c24f3470ad4924554e977a85a903b54fbf309..93075afdbf01b5ffb415308824bfdaeaec320c74 100644 (file)
@@ -49,13 +49,3 @@ Depends: ${misc:Depends},
 Description: Inference of large language models in pure C/C++ (development files)
  Development files required for building software based on the
  stable and documented llama.cpp API.
-
-Package: llama-cpp-dev
-Section: libdevel
-Architecture: any
-Depends: ${misc:Depends},
- libllama0-dev (= ${binary:Version}), libcurl4-openssl-dev, libssl-dev
-Description: Inference of large language models in pure C/C++ (common static library)
- Development files and static library providing a framework command to the
- various examples. It allows one to quickly to develop a command line utility
- but is expected to provide a less stable API than libllama-dev.
diff --git a/debian/llama-cpp-dev.install b/debian/llama-cpp-dev.install
deleted file mode 100644 (file)
index 156491b..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-/usr/include/llama.cpp/common/*.h
-/usr/lib/*/llama.cpp/common/libcommon.a
index d6d4b19ef5487b1e30441d2d5c240189a09d8118..05710147885c1efced9c47566dce43ba394525bc 100644 (file)
@@ -1,6 +1,6 @@
-# Ignore multimodal until the mtmd approach seem stable enough
-/usr/lib/*/libllava_shared.so
-/usr/lib/*/libmtmd_shared.so
+# Ignore multimodal until the mtmd approach seems stable enough
+/usr/lib/*/libmtmd.so
+/usr/include/mtmd*.h
 
 # Most executables produced are not stable enough to be distributed
 /usr/bin/llama-*
index 811e1879c80a16dfb90b0f27f53205d1272cc743..352d27f512b532eef58d64fed269640ddeed50a7 100755 (executable)
@@ -2,8 +2,10 @@
 # See debhelper(7) (uncomment to enable)
 #export DH_VERBOSE = 1
 
-# multiarch
+# For multiarch
 include /usr/share/dpkg/architecture.mk
+# For DEB_VERSION_UPSTREAM
+include /usr/share/dpkg/pkg-info.mk
 
 build_multiarch=build/$(DEB_HOST_MULTIARCH)
 install_bin=debian/tmp/usr/bin
@@ -21,20 +23,21 @@ export DEB_BUILD_MAINT_OPTIONS = hardening=+all
 # Use build/ for output, so that it is in the .gitignore of upstream
        dh $@ --buildsystem=cmake --builddirectory=$(build_multiarch)
 
-# Note: we disable LLAMA_ALL_WARNINGS so that ggml_get_flags() CMake function do not get called
-# as it is available deep in GGML build scripts and reset the compiler/linker flags
 override_dh_auto_configure:
        dh_auto_configure -- \
+       -DBUILD_NUMBER=$(subst 0.0.,,$(DEB_VERSION_UPSTREAM)) \
        -DCMAKE_LIBRARY_ARCHITECTURE="$(DEB_HOST_MULTIARCH)" \
-       -DCMAKE_BUILD_TYPE=Release \
-       -DCMAKE_PROJECT_llama.cpp_INCLUDE=debian/cmake/debian-llama-cpp.cmake \
+       -DCMAKE_BUILD_TYPE=RelWithDebInfo \
+       \
        -DBUILD_SHARED_LIBS=ON \
-       -DGGML_BACKEND_DL=ON \
-       -DLLAMA_ALL_WARNINGS=OFF \
+       -DLLAMA_USE_SYSTEM_GGML=ON \
+       -DCMAKE_LIBRARY_PATH=/usr/libexec/$(DEB_HOST_MULTIARCH)/ggml \
+       \
+       -DLLAMA_CURL=ON \
+       -DLLAMA_BUILD_TOOLS=ON \
+       -DLLAMA_BUILD_EXAMPLES=OFF \
        -DLLAMA_BUILD_TESTS=OFF \
        -DLLAMA_BUILD_SERVER=OFF \
-       -DLLAMA_CURL=ON \
-       -DLLAMA_SERVER_SSL=OFF \
 
 override_dh_auto_install:
        dh_auto_install
@@ -45,8 +48,8 @@ override_dh_auto_install:
                > llama-tools-completion
        sed -i '/complete -F _llama_completions .*/d' llama-tools-completion
 
-       # Move executables to libexec, so that they can load the GGML backends
-       # and link them to bin
+       # Move executables to ggml's libexec, so that they can load the GGML backends
+       # and link them in usr/bin
        mkdir -p $(install_libexec_multiarch)/ggml
        mkdir -p completions
        for file in $(install_bin)/llama-*; do \