]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
Disable utilities not compatible with backend loading
authorMathieu Baudier <redacted>
Tue, 21 Jan 2025 10:15:39 +0000 (11:15 +0100)
committerMathieu Baudier <redacted>
Tue, 21 Jan 2025 10:15:39 +0000 (11:15 +0100)
debian/cmake/debian-llama.cpp.cmake
debian/control
debian/libllava-shared.install [deleted file]
debian/libllava-shared.triggers [deleted file]
debian/rules

index 0d776ec705b82b1a7de410d4c2d902773eebb32e..36214c6a4e4c9356d621dbddabcee1c58ae2fdb2 100644 (file)
@@ -1,23 +1,22 @@
 cmake_minimum_required(VERSION 3.14) # for add_link_options and implicit target directories.
 
 # GGML dependencies
+# libggml-base as external library
 find_library(GGML_BASE_LOCATION ggml-base)
+message (STATUS "Found GGML base library: ${GGML_BASE_LOCATION}") 
+add_library(ggml-base SHARED IMPORTED GLOBAL)
+set_target_properties(ggml-base PROPERTIES IMPORTED_LOCATION ${GGML_BASE_LOCATION})
 
-# define GGML as target so that it is disabled in llama.cpp build
+# libggml as external library
+# defines GGML as target so that it is disabled in llama.cpp build
 find_library(GGML_LOCATION ggml)
 message (STATUS "Found GGML library: ${GGML_LOCATION}") 
 add_library(ggml SHARED IMPORTED GLOBAL)
 set_target_properties(ggml PROPERTIES IMPORTED_LOCATION ${GGML_LOCATION})
+# transitive dependency
+target_link_libraries(ggml INTERFACE ${GGML_BASE_LOCATION})
 
-# quite a few examples require direct reference to ggml-cpu
-# search for oldest one
-find_library(GGML_CPU_LOCATION ggml-cpu-sandybridge)
-find_library(GGML_RPC_LOCATION ggml-rpc)
-
-# make sure all libraries are available since we cannot refine per target
-link_libraries(${GGML_LOCATION} ${GGML_BASE_LOCATION} ${GGML_CPU_LOCATION} ${GGML_RPC_LOCATION})
-
-#add_compile_definitions(NDEBUG)
+add_compile_definitions(NDEBUG)
 
 install(DIRECTORY ${CMAKE_BINARY_DIR}/common/ DESTINATION lib/${CMAKE_LIBRARY_ARCHITECTURE}/llama.cpp/common FILES_MATCHING PATTERN "*.a" )
 install(DIRECTORY ${CMAKE_SOURCE_DIR}/common/ DESTINATION include/llama.cpp/common FILES_MATCHING PATTERN "*.h" )
index 5af1c562d805173df01a101ba272b9f4617c1d1f..aa8e5189734710f0cc03f3f56d725d401b49f3a3 100644 (file)
@@ -17,14 +17,6 @@ Recommends: curl
 Description: Inference of LLMs in pure C/C++ (shared library)
  Llama.cpp inference of LLMs in pure C/C++ (shared library).
 
-Package: libllava-shared
-Architecture: any
-Priority: optional
-Depends: ${shlibs:Depends},
- libggml
-Description: Llava (shared library)
- Llama.cpp llava (shared library).
-
 Package: llama-cpp-cli
 Architecture: any
 Priority: optional
diff --git a/debian/libllava-shared.install b/debian/libllava-shared.install
deleted file mode 100644 (file)
index 002166e..0000000
+++ /dev/null
@@ -1 +0,0 @@
-/usr/lib/*/libllava_shared*.so
diff --git a/debian/libllava-shared.triggers b/debian/libllava-shared.triggers
deleted file mode 100644 (file)
index dd86603..0000000
+++ /dev/null
@@ -1 +0,0 @@
-activate-noawait ldconfig
index 9fa14ce07b154fafbcf362a2e6195cf2082a40f9..1277119c5fc2a3a3fd93929c5bbb6a579ceff53d 100755 (executable)
@@ -18,9 +18,10 @@ DEB_BUILD_OPTIONS ?= parallel=8
 override_dh_auto_configure:
        dh_auto_configure -- \
        -DCMAKE_LIBRARY_ARCHITECTURE="$(DEB_HOST_MULTIARCH)" \
+       -DCMAKE_BUILD_TYPE=Release \
        -DCMAKE_PROJECT_llama.cpp_INCLUDE=debian/cmake/debian-llama.cpp.cmake \
        -DBUILD_SHARED_LIBS=ON \
-       -DGGML_RPC=ON \
+       -DGGML_BACKEND_DL=ON \
        -DLLAMA_ALL_WARNINGS=OFF \
        -DLLAMA_BUILD_TESTS=OFF \
        -DLLAMA_BUILD_SERVER=ON \
@@ -30,10 +31,6 @@ override_dh_auto_configure:
 # FIXME we disable LLAMA_ALL_WARNINGS so that ggml_get_flags() CMake function do not get called
 # as it is available deep in GGML and not properly published
 
-override_dh_install:
-       dh_install
-       find $(DEBIAN_BASE_DIR) -type d -empty -delete
-
 override_dh_auto_test:
        # tests which depends on remote location are failing
        dh_auto_test || true