From: Mathieu Baudier Date: Tue, 11 Mar 2025 14:36:22 +0000 (+0100) Subject: Merge tag 'upstream/1.7.4+203' into patch-queue/debian/latest X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=4844c1dfe6f26b6b2e9cf7d0c33ddacbae2d443a;p=pkg%2Fggml%2Fsources%2Fwhisper.cpp Merge tag 'upstream/1.7.4+203' into patch-queue/debian/latest --- 4844c1dfe6f26b6b2e9cf7d0c33ddacbae2d443a diff --cc debian/changelog index 8fc9b9d2,00000000..534e55ac mode 100644,000000..100644 --- a/debian/changelog +++ b/debian/changelog @@@ -1,5 -1,0 +1,5 @@@ - whisper-cpp (1.7.4+95-2) unstable; urgency=medium ++whisper-cpp (1.7.4+203-1) unstable; urgency=medium + - * Improve packaging based on mentoring feedback ++ * Update upstream + - -- Mathieu Baudier Sun, 23 Feb 2025 11:39:50 +0000 ++ -- Mathieu Baudier Tue, 11 Mar 2025 14:06:57 +0000 diff --cc debian/cmake/debian-whisper.cpp.cmake index e9b7025d,00000000..1bc06cff mode 100644,000000..100644 --- a/debian/cmake/debian-whisper.cpp.cmake +++ b/debian/cmake/debian-whisper.cpp.cmake @@@ -1,42 -1,0 +1,30 @@@ +cmake_minimum_required(VERSION 3.14) # for add_link_options and implicit target directories. + +include(GNUInstallDirs) +include(CMakePackageConfigHelpers) + +# GGML dependencies +if (NOT TARGET ggml) +# libggml-base as external library +find_library(GGML_BASE_LOCATION ggml-base) +message (STATUS "Found GGML base library: ${GGML_BASE_LOCATION}") +add_library(ggml-base SHARED IMPORTED GLOBAL) +set_target_properties(ggml-base PROPERTIES IMPORTED_LOCATION ${GGML_BASE_LOCATION}) + +# libggml as external library +# defines GGML as target so that it is disabled in whisper.cpp build +find_library(GGML_LOCATION ggml) +message (STATUS "Found GGML library: ${GGML_LOCATION}") +add_library(ggml SHARED IMPORTED GLOBAL) +set_target_properties(ggml PROPERTIES IMPORTED_LOCATION ${GGML_LOCATION}) +# transitive dependency +target_link_libraries(ggml INTERFACE ${GGML_BASE_LOCATION}) - - # libwhisper actually link against a CPU backend - #find_library(GGML_CPU_LOCATION ggml-cpu) - # FIXME better way to find CPU backend - set(GGML_CPU_LOCATION /usr/libexec/${CMAKE_LIBRARY_ARCHITECTURE}/ggml/libggml-cpu-sandybridge.so) - message (STATUS "Found GGML CPU library: ${GGML_CPU_LOCATION}") - link_libraries(${GGML_CPU_LOCATION}) - - # Not clear whether a libggml-cpu is actually needed. - # LD_LIBRARY_PATH=/usr/libexec/*/ggml would have to be used in that case - # It could be more robust to set RPATH, but it causes lintian errors: - #set(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/libexec/${CMAKE_LIBRARY_ARCHITECTURE}/ggml") +endif() + +add_compile_definitions(NDEBUG) + + +# install common +install(DIRECTORY ${CMAKE_BINARY_DIR}/examples/ DESTINATION lib/${CMAKE_LIBRARY_ARCHITECTURE}/whisper.cpp/common FILES_MATCHING PATTERN "libcommon*.a" ) +install(DIRECTORY ${CMAKE_SOURCE_DIR}/examples/ DESTINATION include/whisper.cpp/common FILES_MATCHING PATTERN "common*.h" ) + diff --cc debian/patches/0001-disable-some-examples.patch index c8b75256,00000000..c4cc5880 mode 100644,000000..100644 --- a/debian/patches/0001-disable-some-examples.patch +++ b/debian/patches/0001-disable-some-examples.patch @@@ -1,42 -1,0 +1,42 @@@ +From: Mathieu Baudier +Date: Mon, 27 Jan 2025 08:08:56 +0100 +Subject: disable-some-examples + +Disable some unused examples +--- + examples/CMakeLists.txt | 12 ++++++------ + 1 file changed, 6 insertions(+), 6 deletions(-) + +diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt - index d509626..6d0ba25 100644 ++index e1b083d..3d45d4c 100644 +--- a/examples/CMakeLists.txt ++++ b/examples/CMakeLists.txt - @@ -104,16 +104,16 @@ elseif(CMAKE_JS_VERSION) ++@@ -102,16 +102,16 @@ elseif(CMAKE_JS_VERSION) + add_subdirectory(addon.node) + else() + add_subdirectory(cli) +- add_subdirectory(bench) +- add_subdirectory(server) +- add_subdirectory(quantize) ++ #add_subdirectory(bench) ++ #add_subdirectory(server) ++ #add_subdirectory(quantize) + if (WHISPER_SDL2) + add_subdirectory(stream) + add_subdirectory(command) + add_subdirectory(talk-llama) +- add_subdirectory(lsp) ++ #add_subdirectory(lsp) + if (GGML_SYCL) +- add_subdirectory(sycl) ++ #add_subdirectory(sycl) + endif() + endif (WHISPER_SDL2) + - @@ -121,5 +121,5 @@ else() ++@@ -119,5 +119,5 @@ else() + endif() + + if (WHISPER_SDL2) +- add_subdirectory(wchess) ++ #add_subdirectory(wchess) + endif (WHISPER_SDL2) diff --cc debian/patches/0003-load-ggml-backends.patch index da11d079,00000000..8be3db12 mode 100644,000000..100644 --- a/debian/patches/0003-load-ggml-backends.patch +++ b/debian/patches/0003-load-ggml-backends.patch @@@ -1,25 -1,0 +1,25 @@@ +From: Mathieu Baudier +Date: Mon, 27 Jan 2025 09:34:50 +0100 +Subject: load-ggml-backends + +Make sure GGML backends are loaded in talk-llama +--- + examples/talk-llama/talk-llama.cpp | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +diff --git a/examples/talk-llama/talk-llama.cpp b/examples/talk-llama/talk-llama.cpp - index dcdaec4..43bd58b 100644 ++index 9097c49..f12a82c 100644 +--- a/examples/talk-llama/talk-llama.cpp ++++ b/examples/talk-llama/talk-llama.cpp - @@ -272,7 +272,10 @@ The transcript only includes text, it does not include markup like HTML and Mark ++@@ -273,7 +273,10 @@ The transcript only includes text, it does not include markup like HTML and Mark + {0}{4})"; + + int main(int argc, char ** argv) { +- whisper_params params; ++ // make sure GGML backends are loaded ++ ggml_backend_load_all(); ++ ++ whisper_params params; + + if (whisper_params_parse(argc, argv, params) == false) { + return 1; diff --cc debian/patches/0004-load-ggml-backends-cli.patch index 00000000,00000000..b5c8f453 new file mode 100644 --- /dev/null +++ b/debian/patches/0004-load-ggml-backends-cli.patch @@@ -1,0 -1,0 +1,22 @@@ ++From: Mathieu Baudier ++Date: Tue, 11 Mar 2025 16:22:18 +0100 ++Subject: load-ggml-backends-cli ++ ++--- ++ examples/cli/cli.cpp | 3 +++ ++ 1 file changed, 3 insertions(+) ++ ++diff --git a/examples/cli/cli.cpp b/examples/cli/cli.cpp ++index a84d3cb..d891c82 100644 ++--- a/examples/cli/cli.cpp +++++ b/examples/cli/cli.cpp ++@@ -929,6 +929,9 @@ int main(int argc, char ** argv) { ++ SetConsoleOutputCP(CP_UTF8); ++ #endif ++ +++ // make sure GGML backends are loaded +++ ggml_backend_load_all(); +++ ++ whisper_params params; ++ ++ // If the only argument starts with "@", read arguments line-by-line diff --cc debian/patches/0004-use-llama-cpp-library.patch index 00000000,00000000..004fa985 new file mode 100644 --- /dev/null +++ b/debian/patches/0004-use-llama-cpp-library.patch @@@ -1,0 -1,0 +1,46 @@@ ++From: Mathieu Baudier ++Date: Tue, 11 Mar 2025 15:40:11 +0100 ++Subject: use-llama-cpp-library ++ ++--- ++ examples/talk-llama/CMakeLists.txt | 25 ++++--------------------- ++ 1 file changed, 4 insertions(+), 21 deletions(-) ++ ++diff --git a/examples/talk-llama/CMakeLists.txt b/examples/talk-llama/CMakeLists.txt ++index aea1ae6..f86a338 100644 ++--- a/examples/talk-llama/CMakeLists.txt +++++ b/examples/talk-llama/CMakeLists.txt ++@@ -3,29 +3,12 @@ if (WHISPER_SDL2) ++ set(CMAKE_CXX_STANDARD_REQUIRED ON) ++ ++ set(TARGET whisper-talk-llama) ++- add_executable(${TARGET} talk-llama.cpp ++- llama.cpp ++- llama-adapter.cpp ++- llama-arch.cpp ++- llama-batch.cpp ++- llama-chat.cpp ++- llama-context.cpp ++- llama-cparams.cpp ++- llama-grammar.cpp ++- llama-hparams.cpp ++- llama-impl.cpp ++- llama-kv-cache.cpp ++- llama-mmap.cpp ++- llama-model-loader.cpp ++- llama-model.cpp ++- llama-quant.cpp ++- llama-sampling.cpp ++- llama-vocab.cpp ++- unicode.cpp ++- unicode-data.cpp) +++ add_executable(${TARGET} talk-llama.cpp) ++ target_include_directories(${TARGET} PRIVATE ${SDL2_INCLUDE_DIRS}) ++ ++- target_link_libraries(${TARGET} PRIVATE common common-sdl whisper ${SDL2_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT}) +++ target_link_libraries(${TARGET} PRIVATE common common-sdl whisper llama ${SDL2_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT}) +++ +++ install(TARGETS ${TARGET} RUNTIME) ++ ++ if(WIN32) ++ # It requires Windows 8.1 or later for PrefetchVirtualMemory diff --cc debian/patches/series index aef67717,00000000..51c4c7bd mode 100644,000000..100644 --- a/debian/patches/series +++ b/debian/patches/series @@@ -1,3 -1,0 +1,4 @@@ +0001-disable-some-examples.patch - 0002-use-llama-cpp-library.patch +0003-load-ggml-backends.patch ++0004-use-llama-cpp-library.patch ++0004-load-ggml-backends-cli.patch