--- /dev/null
+# dpkg-buildpackage -b -us -uc
+*.log
+*.substvars
+.debhelper
+debhelper-build-stamp
+llama-cpp*/
+libllama*/
+libllava*/
+files
--- /dev/null
+llama-cpp (0.0.4488-1) unstable; urgency=medium
+
+ * Based on llama.cpp build b4488
+
+ -- Mathieu Baudier <mbaudier@argeo.org> Tue, 21 Jan 2025 07:57:54 +0000
--- /dev/null
+cmake_minimum_required(VERSION 3.14) # for add_link_options and implicit target directories.
+
+# GGML dependencies
+find_library(GGML_BASE_LOCATION ggml-base)
+
+# define GGML as target so that it is disabled in llama.cpp build
+find_library(GGML_LOCATION ggml)
+message (STATUS "Found GGML library: ${GGML_LOCATION}")
+add_library(ggml SHARED IMPORTED GLOBAL)
+set_target_properties(ggml PROPERTIES IMPORTED_LOCATION ${GGML_LOCATION})
+
+# quite a few examples require direct reference to ggml-cpu
+# search for oldest one
+find_library(GGML_CPU_LOCATION ggml-cpu-sandybridge)
+find_library(GGML_RPC_LOCATION ggml-rpc)
+
+# make sure all libraries are available since we cannot refine per target
+link_libraries(${GGML_LOCATION} ${GGML_BASE_LOCATION} ${GGML_CPU_LOCATION} ${GGML_RPC_LOCATION})
+
+#add_compile_definitions(NDEBUG)
+
+install(DIRECTORY ${CMAKE_BINARY_DIR}/common/ DESTINATION lib/${CMAKE_LIBRARY_ARCHITECTURE}/llama.cpp/common FILES_MATCHING PATTERN "*.a" )
+install(DIRECTORY ${CMAKE_SOURCE_DIR}/common/ DESTINATION include/llama.cpp/common FILES_MATCHING PATTERN "*.h" )
+
--- /dev/null
+Source: llama-cpp
+Section: science
+Priority: optional
+Maintainer: Mathieu Baudier <mbaudier@argeo.org>
+Build-Depends: debhelper-compat (= 13), pkg-config, cmake-data, cmake, cpio, curl, libcurl4-openssl-dev, file, git,
+ ggml-dev
+Standards-Version: 4.5.1
+Homepage: https://github.com/ggerganov/llama.cpp
+Rules-Requires-Root: binary-targets
+
+Package: libllama
+Priority: optional
+Architecture: any
+Depends: ${shlibs:Depends},
+ libggml
+Recommends: curl
+Description: Inference of LLMs in pure C/C++ (shared library)
+ Llama.cpp inference of LLMs in pure C/C++ (shared library).
+
+Package: libllava-shared
+Architecture: any
+Priority: optional
+Depends: ${shlibs:Depends},
+ libggml
+Description: Llava (shared library)
+ Llama.cpp llava (shared library).
+
+Package: llama-cpp-cli
+Architecture: any
+Priority: optional
+Depends: ${shlibs:Depends},
+ libllama
+Description: Inference of LLMs in pure C/C++ (CLI)
+ Llama.cpp inference of LLMs in pure C/C++ (CLI).
+
+Package: llama-cpp-server
+Architecture: any
+Priority: optional
+Depends: ${shlibs:Depends},
+ libllama, curl, openssl
+Description: Inference of LLMs in pure C/C++ (CLI)
+ Llama.cpp inference of LLMs in pure C/C++ (CLI).
+
+Package: libllama-dev
+Architecture: any
+Priority: optional
+Depends: ggml-dev, libllama
+Description: Inference of LLMs in pure C/C++ (development files)
+ Llama.cpp inference of LLMs in pure C/C++ (development files).
+
+Package: llama-cpp-dev
+Architecture: any
+Priority: optional
+Depends: libllama-dev
+Description: Inference of LLMs in pure C/C++ (common development files)
+ Llama.cpp inference of LLMs in pure C/C++ (common development files).
+
\ No newline at end of file
--- /dev/null
+Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Upstream-Name: llama.cpp
+Upstream-Contact: https://github.com/ggerganov/llama.cpp/issues
+Source: https://github.com/ggerganov/llama.cpp
+
+Files: *
+Copyright: Copyright (c) 2023-2024 The llama.cpp authors
+License: MIT
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in all
+ copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ SOFTWARE.
+
+Files: debian/*
+Copyright: 2024 Mathieu Baudier <mbaudier@argeo.org>
+License: GPL-2+
+ This package is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+ .
+ This package is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+ .
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <https://www.gnu.org/licenses/>
+ .
+ On Debian systems, the complete text of the GNU General
+ Public License version 2 can be found in "/usr/share/common-licenses/GPL-2".
--- /dev/null
+/usr/include/llama*.h
+include/llama-cpp.h /usr/include/
+
+/usr/lib/pkgconfig/*.pc
+/usr/lib/*/cmake/llama/llama-*.cmake
--- /dev/null
+/usr/lib/*/libllama*
\ No newline at end of file
--- /dev/null
+activate-noawait ldconfig
--- /dev/null
+/usr/lib/*/libllava_shared*.so
--- /dev/null
+activate-noawait ldconfig
--- /dev/null
+/usr/bin/llama-cli
--- /dev/null
+/usr/include/llama.cpp/common
+/usr/lib/*/llama.cpp/*
--- /dev/null
+/usr/bin/llama-server
--- /dev/null
+examples/server/public_legacy/index-new.html
+examples/server/public_legacy/index.js
+examples/server/public_legacy/system-prompts.js
+examples/server/webui/index.html
--- /dev/null
+/usr/bin/test-*
+/usr/bin/llama-*
+/usr/bin/*.py
--- /dev/null
+#!/usr/bin/make -f
+# See debhelper(7) (uncomment to enable)
+#export DH_VERBOSE = 1
+
+# multiarch
+include /usr/share/dpkg/architecture.mk
+
+build_multiarch=build/$(DEB_HOST_MULTIARCH)
+install_lib_multiarch=debian/tmp/usr/lib/$(DEB_HOST_MULTIARCH)
+
+# parallelism
+DEB_BUILD_OPTIONS ?= parallel=8
+
+%:
+# Use build/ for output, so that it is in the .gitignore of upstream
+ dh $@ --buildsystem=cmake --builddirectory=$(build_multiarch)
+
+override_dh_auto_configure:
+ dh_auto_configure -- \
+ -DCMAKE_LIBRARY_ARCHITECTURE="$(DEB_HOST_MULTIARCH)" \
+ -DCMAKE_PROJECT_llama.cpp_INCLUDE=debian/cmake/debian-llama.cpp.cmake \
+ -DBUILD_SHARED_LIBS=ON \
+ -DGGML_RPC=ON \
+ -DLLAMA_ALL_WARNINGS=OFF \
+ -DLLAMA_BUILD_TESTS=OFF \
+ -DLLAMA_BUILD_SERVER=ON \
+ -DLLAMA_SERVER_SSL=ON \
+
+
+# FIXME we disable LLAMA_ALL_WARNINGS so that ggml_get_flags() CMake function do not get called
+# as it is available deep in GGML and not properly published
+
+override_dh_install:
+ dh_install
+ find $(DEBIAN_BASE_DIR) -type d -empty -delete
+
+override_dh_auto_test:
+ # tests which depends on remote location are failing
+ dh_auto_test || true
--- /dev/null
+3.0 (quilt)
--- /dev/null
+llama-cpp source: source-is-missing [examples/server/public_legacy/index-new.html]
+llama-cpp source: source-is-missing [examples/server/public_legacy/index.js]
+llama-cpp source: source-is-missing [examples/server/public_legacy/system-prompts.js]
+llama-cpp source: source-is-missing [examples/server/webui/index.html]