--- /dev/null
++whisper-cpp (1.8.0-1) unstable; urgency=medium
++
++ * Update upstream
++
++ -- Mathieu Baudier <mbaudier@argeo.org> Fri, 10 Oct 2025 14:19:55 +0000
++
+whisper-cpp (1.7.6-1) unstable; urgency=medium
+
+ * Update upstream
+
+ -- Mathieu Baudier <mbaudier@argeo.org> Fri, 15 Aug 2025 18:33:41 +0000
++
--- /dev/null
- index c37a2e6..f2588b0 100644
+From: Mathieu Baudier <mbaudier@argeo.org>
+Date: Mon, 27 Jan 2025 08:08:56 +0100
+Subject: disable-some-examples
+
+Disable some unused examples
+---
+ examples/CMakeLists.txt | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt
- @@ -102,17 +102,17 @@ elseif(CMAKE_JS_VERSION)
++index b202ca0..1e0c9f6 100644
+--- a/examples/CMakeLists.txt
++++ b/examples/CMakeLists.txt
- @@ -120,5 +120,5 @@ else()
++@@ -103,17 +103,17 @@ elseif(CMAKE_JS_VERSION)
+ add_subdirectory(addon.node)
+ else()
+ add_subdirectory(cli)
+- add_subdirectory(bench)
+- add_subdirectory(server)
+- add_subdirectory(quantize)
++ #add_subdirectory(bench)
++ #add_subdirectory(server)
++ #add_subdirectory(quantize)
+ add_subdirectory(vad-speech-segments)
+ if (WHISPER_SDL2)
+ add_subdirectory(stream)
+ add_subdirectory(command)
+ add_subdirectory(talk-llama)
+- add_subdirectory(lsp)
++ #add_subdirectory(lsp)
+ if (GGML_SYCL)
+- add_subdirectory(sycl)
++ #add_subdirectory(sycl)
+ endif()
+ endif (WHISPER_SDL2)
+
++@@ -121,5 +121,5 @@ else()
+ endif()
+
+ if (WHISPER_SDL2)
+- add_subdirectory(wchess)
++ #add_subdirectory(wchess)
+ endif (WHISPER_SDL2)
--- /dev/null
- index 13ecced..f86a338 100644
+From: Mathieu Baudier <mbaudier@argeo.org>
+Date: Tue, 11 Mar 2025 15:40:11 +0100
+Subject: use-llama-cpp-library
+
+---
+ examples/talk-llama/CMakeLists.txt | 32 ++++----------------------------
+ 1 file changed, 4 insertions(+), 28 deletions(-)
+
+diff --git a/examples/talk-llama/CMakeLists.txt b/examples/talk-llama/CMakeLists.txt
- - llama-kv-cache-unified.cpp
- - llama-kv-cache-unified-iswa.cpp
++index 182114c..f86a338 100644
+--- a/examples/talk-llama/CMakeLists.txt
++++ b/examples/talk-llama/CMakeLists.txt
+@@ -3,36 +3,12 @@ if (WHISPER_SDL2)
+ set(CMAKE_CXX_STANDARD_REQUIRED ON)
+
+ set(TARGET whisper-talk-llama)
+- add_executable(${TARGET} talk-llama.cpp
+- llama.cpp
+- llama-adapter.cpp
+- llama-arch.cpp
+- llama-batch.cpp
+- llama-chat.cpp
+- llama-context.cpp
+- llama-cparams.cpp
+- llama-grammar.cpp
+- llama-graph.cpp
+- llama-hparams.cpp
+- llama-impl.cpp
+- llama-io.cpp
++- llama-kv-cache.cpp
++- llama-kv-cache-iswa.cpp
+- llama-memory-recurrent.cpp
+- llama-memory-hybrid.cpp
+- llama-memory.cpp
+- llama-mmap.cpp
+- llama-model-loader.cpp
+- llama-model-saver.cpp
+- llama-model.cpp
+- llama-quant.cpp
+- llama-sampling.cpp
+- llama-vocab.cpp
+- unicode.cpp
+- unicode-data.cpp)
++ add_executable(${TARGET} talk-llama.cpp)
+ target_include_directories(${TARGET} PRIVATE ${SDL2_INCLUDE_DIRS})
+
+- target_link_libraries(${TARGET} PRIVATE common common-sdl whisper ${SDL2_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
++ target_link_libraries(${TARGET} PRIVATE common common-sdl whisper llama ${SDL2_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
++
++ install(TARGETS ${TARGET} RUNTIME)
+
+ if(WIN32)
+ # It requires Windows 8.1 or later for PrefetchVirtualMemory
--- /dev/null
- index f73ed9a..4640bdd 100644
+From: Mathieu Baudier <mbaudier@argeo.org>
+Date: Tue, 11 Mar 2025 16:22:18 +0100
+Subject: load-ggml-backends-cli
+
+---
+ examples/cli/cli.cpp | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/examples/cli/cli.cpp b/examples/cli/cli.cpp
- @@ -918,6 +918,9 @@ int main(int argc, char ** argv) {
++index 457a1ff..04cacc9 100644
+--- a/examples/cli/cli.cpp
++++ b/examples/cli/cli.cpp
++@@ -920,6 +920,9 @@ int main(int argc, char ** argv) {
+ SetConsoleOutputCP(CP_UTF8);
+ #endif
+
++ // make sure GGML backends are loaded
++ ggml_backend_load_all();
++
+ whisper_params params;
+
+ // If the only argument starts with "@", read arguments line-by-line