-llama-cpp (0.0.5318-1) unstable; urgency=medium
+llama-cpp (0.0.5713-1) unstable; urgency=medium
* Update upstream
- -- Mathieu Baudier <mbaudier@argeo.org> Fri, 30 May 2025 06:48:43 +0000
+ -- Mathieu Baudier <mbaudier@argeo.org> Mon, 23 Jun 2025 10:23:17 +0000
+++ /dev/null
-# GGML dependencies
-# libggml-base as external library
-find_library(GGML_BASE_LOCATION ggml-base)
-message (STATUS "Found GGML base library: ${GGML_BASE_LOCATION}")
-add_library(ggml-base SHARED IMPORTED GLOBAL)
-set_target_properties(ggml-base PROPERTIES IMPORTED_LOCATION ${GGML_BASE_LOCATION})
-
-# libggml as external library
-# defines GGML as target so that it is disabled in llama.cpp build
-find_library(GGML_LOCATION ggml)
-message (STATUS "Found GGML library: ${GGML_LOCATION}")
-add_library(ggml SHARED IMPORTED GLOBAL)
-set_target_properties(ggml PROPERTIES IMPORTED_LOCATION ${GGML_LOCATION})
-# transitive dependency
-target_link_libraries(ggml INTERFACE ${GGML_BASE_LOCATION})
-
-add_compile_definitions(NDEBUG)
-
-install(DIRECTORY ${CMAKE_BINARY_DIR}/common/ DESTINATION lib/${CMAKE_LIBRARY_ARCHITECTURE}/llama.cpp/common FILES_MATCHING PATTERN "*.a" )
-install(DIRECTORY ${CMAKE_SOURCE_DIR}/common/ DESTINATION include/llama.cpp/common FILES_MATCHING PATTERN "*.h" )
-
-# build number, in line with changelog
-set(BUILD_NUMBER 5318)
-
Description: Inference of large language models in pure C/C++ (development files)
Development files required for building software based on the
stable and documented llama.cpp API.
-
-Package: llama-cpp-dev
-Section: libdevel
-Architecture: any
-Depends: ${misc:Depends},
- libllama0-dev (= ${binary:Version}), libcurl4-openssl-dev, libssl-dev
-Description: Inference of large language models in pure C/C++ (common static library)
- Development files and static library providing a framework command to the
- various examples. It allows one to quickly to develop a command line utility
- but is expected to provide a less stable API than libllama-dev.
+++ /dev/null
-/usr/include/llama.cpp/common/*.h
-/usr/lib/*/llama.cpp/common/libcommon.a
-# Ignore multimodal until the mtmd approach seem stable enough
-/usr/lib/*/libllava_shared.so
-/usr/lib/*/libmtmd_shared.so
+# Ignore multimodal until the mtmd approach seems stable enough
+/usr/lib/*/libmtmd.so
+/usr/include/mtmd*.h
# Most executables produced are not stable enough to be distributed
/usr/bin/llama-*
# See debhelper(7) (uncomment to enable)
#export DH_VERBOSE = 1
-# multiarch
+# For multiarch
include /usr/share/dpkg/architecture.mk
+# For DEB_VERSION_UPSTREAM
+include /usr/share/dpkg/pkg-info.mk
build_multiarch=build/$(DEB_HOST_MULTIARCH)
install_bin=debian/tmp/usr/bin
# Use build/ for output, so that it is in the .gitignore of upstream
dh $@ --buildsystem=cmake --builddirectory=$(build_multiarch)
-# Note: we disable LLAMA_ALL_WARNINGS so that ggml_get_flags() CMake function do not get called
-# as it is available deep in GGML build scripts and reset the compiler/linker flags
override_dh_auto_configure:
dh_auto_configure -- \
+ -DBUILD_NUMBER=$(subst 0.0.,,$(DEB_VERSION_UPSTREAM)) \
-DCMAKE_LIBRARY_ARCHITECTURE="$(DEB_HOST_MULTIARCH)" \
- -DCMAKE_BUILD_TYPE=Release \
- -DCMAKE_PROJECT_llama.cpp_INCLUDE=debian/cmake/debian-llama-cpp.cmake \
+ -DCMAKE_BUILD_TYPE=RelWithDebInfo \
+ \
-DBUILD_SHARED_LIBS=ON \
- -DGGML_BACKEND_DL=ON \
- -DLLAMA_ALL_WARNINGS=OFF \
+ -DLLAMA_USE_SYSTEM_GGML=ON \
+ -DCMAKE_LIBRARY_PATH=/usr/libexec/$(DEB_HOST_MULTIARCH)/ggml \
+ \
+ -DLLAMA_CURL=ON \
+ -DLLAMA_BUILD_TOOLS=ON \
+ -DLLAMA_BUILD_EXAMPLES=OFF \
-DLLAMA_BUILD_TESTS=OFF \
-DLLAMA_BUILD_SERVER=OFF \
- -DLLAMA_CURL=ON \
- -DLLAMA_SERVER_SSL=OFF \
override_dh_auto_install:
dh_auto_install
> llama-tools-completion
sed -i '/complete -F _llama_completions .*/d' llama-tools-completion
- # Move executables to libexec, so that they can load the GGML backends
- # and link them to bin
+ # Move executables to ggml's libexec, so that they can load the GGML backends
+ # and link them in usr/bin
mkdir -p $(install_libexec_multiarch)/ggml
mkdir -p completions
for file in $(install_bin)/llama-*; do \