]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
CUDA: compress mode option and default to size (#12029)
authorErik Scholz <redacted>
Sat, 1 Mar 2025 11:57:22 +0000 (12:57 +0100)
committerGitHub <redacted>
Sat, 1 Mar 2025 11:57:22 +0000 (12:57 +0100)
cuda 12.8 added the option to specify stronger compression for binaries, so we now default to "size".

ggml/CMakeLists.txt
ggml/src/ggml-cuda/CMakeLists.txt

index 610010da8dd77841449064bb9619d2b54b034acc..9e7db35602ecb91cd74a8db821990c20a7845129 100644 (file)
@@ -155,6 +155,9 @@ option(GGML_CUDA_NO_VMM                     "ggml: do not try to use CUDA VMM"
 option(GGML_CUDA_FA                         "ggml: compile ggml FlashAttention CUDA kernels"  ON)
 option(GGML_CUDA_FA_ALL_QUANTS              "ggml: compile all quants for FlashAttention"     OFF)
 option(GGML_CUDA_GRAPHS                     "ggml: use CUDA graphs (llama.cpp only)"          ${GGML_CUDA_GRAPHS_DEFAULT})
+set   (GGML_CUDA_COMPRESSION_MODE "size" CACHE STRING
+                                            "ggml: cuda link binary compression mode; requires cuda 12.8+")
+set_property(CACHE GGML_CUDA_COMPRESSION_MODE PROPERTY STRINGS "none;speed;balance;size")
 
 option(GGML_HIP                             "ggml: use HIP"                                   OFF)
 option(GGML_HIP_GRAPHS                      "ggml: use HIP graph, experimental, slow"         OFF)
index 96bd5a0be297663d882eb1a22feb5a79f5180910..8623214c78a72986b056f5a0d8844adbb504a50b 100644 (file)
@@ -102,6 +102,15 @@ if (CUDAToolkit_FOUND)
 
     set(CUDA_FLAGS -use_fast_math)
 
+    if (CUDAToolkit_VERSION VERSION_GREATER_EQUAL "12.8")
+        # Options are:
+        # - none (not recommended)
+        # - speed (nvcc's default)
+        # - balance
+        # - size
+        list(APPEND CUDA_FLAGS -compress-mode=${GGML_CUDA_COMPRESSION_MODE})
+    endif()
+
     if (GGML_FATAL_WARNINGS)
         list(APPEND CUDA_FLAGS -Werror all-warnings)
     endif()