]> git.djapps.eu Git - pkg/ggml/sources/whisper.cpp/commitdiff
CUDA: compress mode option and default to size (llama/12029)
authorErik Scholz <redacted>
Sat, 1 Mar 2025 11:57:22 +0000 (12:57 +0100)
committerGeorgi Gerganov <redacted>
Sat, 8 Mar 2025 13:13:01 +0000 (15:13 +0200)
cuda 12.8 added the option to specify stronger compression for binaries, so we now default to "size".

ggml/CMakeLists.txt
ggml/src/ggml-cuda/CMakeLists.txt

index 359f98513bc00a20af151b1449c77d15b14afd9f..835bf16e55ac34983a911b226cca2b38a3b2ff7e 100644 (file)
@@ -155,6 +155,9 @@ option(GGML_CUDA_NO_VMM                     "ggml: do not try to use CUDA VMM"
 option(GGML_CUDA_FA                         "ggml: compile ggml FlashAttention CUDA kernels"  ON)
 option(GGML_CUDA_FA_ALL_QUANTS              "ggml: compile all quants for FlashAttention"     OFF)
 option(GGML_CUDA_GRAPHS                     "ggml: use CUDA graphs (llama.cpp only)"          ${GGML_CUDA_GRAPHS_DEFAULT})
+set   (GGML_CUDA_COMPRESSION_MODE "size" CACHE STRING
+                                            "ggml: cuda link binary compression mode; requires cuda 12.8+")
+set_property(CACHE GGML_CUDA_COMPRESSION_MODE PROPERTY STRINGS "none;speed;balance;size")
 
 option(GGML_HIP                             "ggml: use HIP"                                   OFF)
 option(GGML_HIP_GRAPHS                      "ggml: use HIP graph, experimental, slow"         OFF)
index 96bd5a0be297663d882eb1a22feb5a79f5180910..8623214c78a72986b056f5a0d8844adbb504a50b 100644 (file)
@@ -102,6 +102,15 @@ if (CUDAToolkit_FOUND)
 
     set(CUDA_FLAGS -use_fast_math)
 
+    if (CUDAToolkit_VERSION VERSION_GREATER_EQUAL "12.8")
+        # Options are:
+        # - none (not recommended)
+        # - speed (nvcc's default)
+        # - balance
+        # - size
+        list(APPEND CUDA_FLAGS -compress-mode=${GGML_CUDA_COMPRESSION_MODE})
+    endif()
+
     if (GGML_FATAL_WARNINGS)
         list(APPEND CUDA_FLAGS -Werror all-warnings)
     endif()