cmake_minimum_required(VERSION 3.14) # for add_link_options and implicit target directories.
# GGML dependencies
+# libggml-base as external library
find_library(GGML_BASE_LOCATION ggml-base)
+message (STATUS "Found GGML base library: ${GGML_BASE_LOCATION}")
+add_library(ggml-base SHARED IMPORTED GLOBAL)
+set_target_properties(ggml-base PROPERTIES IMPORTED_LOCATION ${GGML_BASE_LOCATION})
-# define GGML as target so that it is disabled in llama.cpp build
+# libggml as external library
+# defines GGML as target so that it is disabled in llama.cpp build
find_library(GGML_LOCATION ggml)
message (STATUS "Found GGML library: ${GGML_LOCATION}")
add_library(ggml SHARED IMPORTED GLOBAL)
set_target_properties(ggml PROPERTIES IMPORTED_LOCATION ${GGML_LOCATION})
+# transitive dependency
+target_link_libraries(ggml INTERFACE ${GGML_BASE_LOCATION})
-# quite a few examples require direct reference to ggml-cpu
-# search for oldest one
-find_library(GGML_CPU_LOCATION ggml-cpu-sandybridge)
-find_library(GGML_RPC_LOCATION ggml-rpc)
-
-# make sure all libraries are available since we cannot refine per target
-link_libraries(${GGML_LOCATION} ${GGML_BASE_LOCATION} ${GGML_CPU_LOCATION} ${GGML_RPC_LOCATION})
-
-#add_compile_definitions(NDEBUG)
+add_compile_definitions(NDEBUG)
install(DIRECTORY ${CMAKE_BINARY_DIR}/common/ DESTINATION lib/${CMAKE_LIBRARY_ARCHITECTURE}/llama.cpp/common FILES_MATCHING PATTERN "*.a" )
install(DIRECTORY ${CMAKE_SOURCE_DIR}/common/ DESTINATION include/llama.cpp/common FILES_MATCHING PATTERN "*.h" )
Description: Inference of LLMs in pure C/C++ (shared library)
Llama.cpp inference of LLMs in pure C/C++ (shared library).
-Package: libllava-shared
-Architecture: any
-Priority: optional
-Depends: ${shlibs:Depends},
- libggml
-Description: Llava (shared library)
- Llama.cpp llava (shared library).
-
Package: llama-cpp-cli
Architecture: any
Priority: optional
override_dh_auto_configure:
dh_auto_configure -- \
-DCMAKE_LIBRARY_ARCHITECTURE="$(DEB_HOST_MULTIARCH)" \
+ -DCMAKE_BUILD_TYPE=Release \
-DCMAKE_PROJECT_llama.cpp_INCLUDE=debian/cmake/debian-llama.cpp.cmake \
-DBUILD_SHARED_LIBS=ON \
- -DGGML_RPC=ON \
+ -DGGML_BACKEND_DL=ON \
-DLLAMA_ALL_WARNINGS=OFF \
-DLLAMA_BUILD_TESTS=OFF \
-DLLAMA_BUILD_SERVER=ON \
# FIXME we disable LLAMA_ALL_WARNINGS so that ggml_get_flags() CMake function do not get called
# as it is available deep in GGML and not properly published
-override_dh_install:
- dh_install
- find $(DEBIAN_BASE_DIR) -type d -empty -delete
-
override_dh_auto_test:
# tests which depends on remote location are failing
dh_auto_test || true