]> git.djapps.eu Git - pkg/ggml/sources/whisper.cpp/commitdiff
Adapt to changes upstream and in dependencies
authorMathieu Baudier <redacted>
Tue, 24 Jun 2025 06:05:55 +0000 (08:05 +0200)
committerMathieu Baudier <redacted>
Tue, 24 Jun 2025 06:29:18 +0000 (08:29 +0200)
debian/cmake/debian-whisper.cpp.cmake
debian/control
debian/not-installed
debian/patches/0001-disable-some-examples.patch
debian/patches/0002-use-llama-cpp-library.patch [new file with mode: 0644]
debian/patches/0003-load-ggml-backends-cli.patch [new file with mode: 0644]
debian/patches/0003-load-ggml-backends.patch [deleted file]
debian/patches/0004-load-ggml-backends-cli.patch [deleted file]
debian/patches/0004-use-llama-cpp-library.patch [deleted file]
debian/patches/series
debian/rules

index 1bc06cfff46558b8c5029bd4d261a3a4aa79a73b..281fdd4d65a1226236be8fe9e999aed99d133c91 100644 (file)
@@ -1,29 +1,3 @@
-cmake_minimum_required(VERSION 3.14) # for add_link_options and implicit target directories.
-
-include(GNUInstallDirs)
-include(CMakePackageConfigHelpers)
-
-# GGML dependencies
-if (NOT TARGET ggml)
-# libggml-base as external library
-find_library(GGML_BASE_LOCATION ggml-base)
-message (STATUS "Found GGML base library: ${GGML_BASE_LOCATION}") 
-add_library(ggml-base SHARED IMPORTED GLOBAL)
-set_target_properties(ggml-base PROPERTIES IMPORTED_LOCATION ${GGML_BASE_LOCATION})
-
-# libggml as external library
-# defines GGML as target so that it is disabled in whisper.cpp build
-find_library(GGML_LOCATION ggml)
-message (STATUS "Found GGML library: ${GGML_LOCATION}") 
-add_library(ggml SHARED IMPORTED GLOBAL)
-set_target_properties(ggml PROPERTIES IMPORTED_LOCATION ${GGML_LOCATION})
-# transitive dependency
-target_link_libraries(ggml INTERFACE ${GGML_BASE_LOCATION})
-endif()
-
-add_compile_definitions(NDEBUG)
-
-
 # install common
 install(DIRECTORY ${CMAKE_BINARY_DIR}/examples/ DESTINATION lib/${CMAKE_LIBRARY_ARCHITECTURE}/whisper.cpp/common FILES_MATCHING PATTERN "libcommon*.a" )
 install(DIRECTORY ${CMAKE_SOURCE_DIR}/examples/ DESTINATION include/whisper.cpp/common FILES_MATCHING PATTERN "common*.h" )
index 3e6ca94ed4bc9cb0c72e06678841a8adadce58ae..cb288b46e6251df7babc8ae67b2ad3257fa9d398 100644 (file)
@@ -5,7 +5,7 @@ Maintainer: Mathieu Baudier <mbaudier@argeo.org>
 Build-Depends: debhelper-compat (= 13), pkgconf,
        cmake-data, cmake,
        libsdl2-dev,
-       ggml-dev, libggml-cpu,
+       ggml-dev,
        libllama0-dev,
 Standards-Version: 4.7.0
 Vcs-Git: https://git.djapps.eu/pkg/ggml/sources/whisper.cpp
@@ -19,7 +19,7 @@ Architecture: any
 Multi-Arch: same
 Pre-Depends: ${misc:Pre-Depends}
 Depends: ${misc:Depends}, ${shlibs:Depends},
- libggml0, libggml-cpu
+ libggml0
 Description: Inference of Whisper in pure C/C++ (shared library)
  The shared library provides the core of whisper.cpp
  speech-to-text capabilities using a Whisper model.
index 9cdfeab6d56fd8f4d4a6fd163a95a60368a8f0fe..10f64cc92ebb1738c9031557543f7452c88f191b 100644 (file)
@@ -3,3 +3,5 @@
 
 /usr/bin/whisper-talk-llama
 /usr/libexec/*/ggml/whisper-talk-llama
+
+/usr/bin/vad-speech-segments
index 357505a61938deb2b55afcf34ceced4f309d4eba..c7ddeafa627623a9cc6718caaf991a5a4db31826 100644 (file)
@@ -8,10 +8,10 @@ Disable some unused examples
  1 file changed, 6 insertions(+), 6 deletions(-)
 
 diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt
-index e4265af..0109886 100644
+index c37a2e6..f2588b0 100644
 --- a/examples/CMakeLists.txt
 +++ b/examples/CMakeLists.txt
-@@ -102,16 +102,16 @@ elseif(CMAKE_JS_VERSION)
+@@ -102,17 +102,17 @@ elseif(CMAKE_JS_VERSION)
      add_subdirectory(addon.node)
  else()
      add_subdirectory(cli)
@@ -21,6 +21,7 @@ index e4265af..0109886 100644
 +    #add_subdirectory(bench)
 +    #add_subdirectory(server)
 +    #add_subdirectory(quantize)
+     add_subdirectory(vad-speech-segments)
      if (WHISPER_SDL2)
          add_subdirectory(stream)
          add_subdirectory(command)
@@ -33,7 +34,7 @@ index e4265af..0109886 100644
          endif()
      endif (WHISPER_SDL2)
  
-@@ -119,5 +119,5 @@ else()
+@@ -120,5 +120,5 @@ else()
  endif()
  
  if (WHISPER_SDL2)
diff --git a/debian/patches/0002-use-llama-cpp-library.patch b/debian/patches/0002-use-llama-cpp-library.patch
new file mode 100644 (file)
index 0000000..24ba64f
--- /dev/null
@@ -0,0 +1,53 @@
+From: Mathieu Baudier <mbaudier@argeo.org>
+Date: Tue, 11 Mar 2025 15:40:11 +0100
+Subject: use-llama-cpp-library
+
+---
+ examples/talk-llama/CMakeLists.txt | 32 ++++----------------------------
+ 1 file changed, 4 insertions(+), 28 deletions(-)
+
+diff --git a/examples/talk-llama/CMakeLists.txt b/examples/talk-llama/CMakeLists.txt
+index 13ecced..f86a338 100644
+--- a/examples/talk-llama/CMakeLists.txt
++++ b/examples/talk-llama/CMakeLists.txt
+@@ -3,36 +3,12 @@ if (WHISPER_SDL2)
+     set(CMAKE_CXX_STANDARD_REQUIRED ON)
+     set(TARGET whisper-talk-llama)
+-    add_executable(${TARGET} talk-llama.cpp
+-        llama.cpp
+-        llama-adapter.cpp
+-        llama-arch.cpp
+-        llama-batch.cpp
+-        llama-chat.cpp
+-        llama-context.cpp
+-        llama-cparams.cpp
+-        llama-grammar.cpp
+-        llama-graph.cpp
+-        llama-hparams.cpp
+-        llama-impl.cpp
+-        llama-io.cpp
+-        llama-kv-cache-unified.cpp
+-        llama-kv-cache-unified-iswa.cpp
+-        llama-memory-recurrent.cpp
+-        llama-memory-hybrid.cpp
+-        llama-memory.cpp
+-        llama-mmap.cpp
+-        llama-model-loader.cpp
+-        llama-model-saver.cpp
+-        llama-model.cpp
+-        llama-quant.cpp
+-        llama-sampling.cpp
+-        llama-vocab.cpp
+-        unicode.cpp
+-        unicode-data.cpp)
++    add_executable(${TARGET} talk-llama.cpp)
+     target_include_directories(${TARGET} PRIVATE ${SDL2_INCLUDE_DIRS})
+-    target_link_libraries(${TARGET} PRIVATE common common-sdl whisper ${SDL2_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
++    target_link_libraries(${TARGET} PRIVATE common common-sdl whisper llama ${SDL2_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
++
++      install(TARGETS ${TARGET} RUNTIME)
+     if(WIN32)
+         # It requires Windows 8.1 or later for PrefetchVirtualMemory
diff --git a/debian/patches/0003-load-ggml-backends-cli.patch b/debian/patches/0003-load-ggml-backends-cli.patch
new file mode 100644 (file)
index 0000000..903787a
--- /dev/null
@@ -0,0 +1,22 @@
+From: Mathieu Baudier <mbaudier@argeo.org>
+Date: Tue, 11 Mar 2025 16:22:18 +0100
+Subject: load-ggml-backends-cli
+
+---
+ examples/cli/cli.cpp | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/examples/cli/cli.cpp b/examples/cli/cli.cpp
+index f73ed9a..4640bdd 100644
+--- a/examples/cli/cli.cpp
++++ b/examples/cli/cli.cpp
+@@ -918,6 +918,9 @@ int main(int argc, char ** argv) {
+     SetConsoleOutputCP(CP_UTF8);
+ #endif
++      // make sure GGML backends are loaded
++      ggml_backend_load_all();
++
+     whisper_params params;
+     // If the only argument starts with "@", read arguments line-by-line
diff --git a/debian/patches/0003-load-ggml-backends.patch b/debian/patches/0003-load-ggml-backends.patch
deleted file mode 100644 (file)
index 8be3db1..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-From: Mathieu Baudier <mbaudier@argeo.org>
-Date: Mon, 27 Jan 2025 09:34:50 +0100
-Subject: load-ggml-backends
-
-Make sure GGML backends are loaded in talk-llama
----
- examples/talk-llama/talk-llama.cpp | 5 ++++-
- 1 file changed, 4 insertions(+), 1 deletion(-)
-
-diff --git a/examples/talk-llama/talk-llama.cpp b/examples/talk-llama/talk-llama.cpp
-index 9097c49..f12a82c 100644
---- a/examples/talk-llama/talk-llama.cpp
-+++ b/examples/talk-llama/talk-llama.cpp
-@@ -273,7 +273,10 @@ The transcript only includes text, it does not include markup like HTML and Mark
- {0}{4})";
- int main(int argc, char ** argv) {
--    whisper_params params;
-+      // make sure GGML backends are loaded
-+      ggml_backend_load_all();
-+
-+      whisper_params params;
-     if (whisper_params_parse(argc, argv, params) == false) {
-         return 1;
diff --git a/debian/patches/0004-load-ggml-backends-cli.patch b/debian/patches/0004-load-ggml-backends-cli.patch
deleted file mode 100644 (file)
index 841c301..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-From: Mathieu Baudier <mbaudier@argeo.org>
-Date: Tue, 11 Mar 2025 16:22:18 +0100
-Subject: load-ggml-backends-cli
-
----
- examples/cli/cli.cpp | 3 +++
- 1 file changed, 3 insertions(+)
-
-diff --git a/examples/cli/cli.cpp b/examples/cli/cli.cpp
-index fccfd13..732ff22 100644
---- a/examples/cli/cli.cpp
-+++ b/examples/cli/cli.cpp
-@@ -861,6 +861,9 @@ int main(int argc, char ** argv) {
-     SetConsoleOutputCP(CP_UTF8);
- #endif
-+      // make sure GGML backends are loaded
-+      ggml_backend_load_all();
-+
-     whisper_params params;
-     // If the only argument starts with "@", read arguments line-by-line
diff --git a/debian/patches/0004-use-llama-cpp-library.patch b/debian/patches/0004-use-llama-cpp-library.patch
deleted file mode 100644 (file)
index 17ecb3c..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-From: Mathieu Baudier <mbaudier@argeo.org>
-Date: Tue, 11 Mar 2025 15:40:11 +0100
-Subject: use-llama-cpp-library
-
----
- examples/talk-llama/CMakeLists.txt | 28 ++++------------------------
- 1 file changed, 4 insertions(+), 24 deletions(-)
-
-diff --git a/examples/talk-llama/CMakeLists.txt b/examples/talk-llama/CMakeLists.txt
-index 3e3971a..f86a338 100644
---- a/examples/talk-llama/CMakeLists.txt
-+++ b/examples/talk-llama/CMakeLists.txt
-@@ -3,32 +3,12 @@ if (WHISPER_SDL2)
-     set(CMAKE_CXX_STANDARD_REQUIRED ON)
-     set(TARGET whisper-talk-llama)
--    add_executable(${TARGET} talk-llama.cpp
--        llama.cpp
--        llama-adapter.cpp
--        llama-arch.cpp
--        llama-batch.cpp
--        llama-chat.cpp
--        llama-context.cpp
--        llama-cparams.cpp
--        llama-grammar.cpp
--        llama-graph.cpp
--        llama-hparams.cpp
--        llama-impl.cpp
--        llama-io.cpp
--        llama-kv-cache.cpp
--        llama-memory.cpp
--        llama-mmap.cpp
--        llama-model-loader.cpp
--        llama-model.cpp
--        llama-quant.cpp
--        llama-sampling.cpp
--        llama-vocab.cpp
--        unicode.cpp
--        unicode-data.cpp)
-+    add_executable(${TARGET} talk-llama.cpp)
-     target_include_directories(${TARGET} PRIVATE ${SDL2_INCLUDE_DIRS})
--    target_link_libraries(${TARGET} PRIVATE common common-sdl whisper ${SDL2_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
-+    target_link_libraries(${TARGET} PRIVATE common common-sdl whisper llama ${SDL2_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
-+
-+      install(TARGETS ${TARGET} RUNTIME)
-     if(WIN32)
-         # It requires Windows 8.1 or later for PrefetchVirtualMemory
index 51c4c7bd91e22af0dd4a02d753b9da161124b681..676e523615dcd099a4b51bcf2d8fbc9e117cd039 100644 (file)
@@ -1,4 +1,3 @@
 0001-disable-some-examples.patch
-0003-load-ggml-backends.patch
-0004-use-llama-cpp-library.patch
-0004-load-ggml-backends-cli.patch
+0002-use-llama-cpp-library.patch
+0003-load-ggml-backends-cli.patch
index 5790272d79e7feabf56c45ac73ff1559c2cf6f0c..fdba3daca9f2e437975612053def3a28c5f2c0ef 100755 (executable)
@@ -24,9 +24,13 @@ export DEB_BUILD_MAINT_OPTIONS = hardening=+all
 override_dh_auto_configure:
        dh_auto_configure -- \
        -DCMAKE_LIBRARY_ARCHITECTURE="$(DEB_HOST_MULTIARCH)" \
+       -DCMAKE_BUILD_TYPE=RelWithDebInfo \
        -DCMAKE_PROJECT_whisper.cpp_INCLUDE=debian/cmake/debian-whisper.cpp.cmake \
-       -DCMAKE_BUILD_TYPE=Release \
+       \
        -DBUILD_SHARED_LIBS=ON \
+       -DWHISPER_USE_SYSTEM_GGML=ON \
+       -DCMAKE_LIBRARY_PATH=/usr/libexec/$(DEB_HOST_MULTIARCH)/ggml \
+       \
        -DWHISPER_BUILD_TESTS=OFF \
        -DWHISPER_BUILD_EXAMPLES=ON \
        -DWHISPER_FFMPEG=OFF \