]> git.djapps.eu Git - pkg/ggml/sources/whisper.cpp/commitdiff
ggml-cuda: use CMAKE_CUDA_ARCHITECTURES if set when GGML_NATIVE=ON (llama/18413)
authorQDelta <redacted>
Sun, 28 Dec 2025 01:33:14 +0000 (20:33 -0500)
committerGeorgi Gerganov <redacted>
Wed, 31 Dec 2025 15:52:09 +0000 (17:52 +0200)
ggml/src/ggml-cuda/CMakeLists.txt

index 3b438c30ce64b6928960293d3adc8de3ca981798..f49121754544e2e2aceae553c2771227a9881aa5 100644 (file)
@@ -37,14 +37,13 @@ if (CUDAToolkit_FOUND)
             endif()
         endif()
     endif()
-    message(STATUS "Using CUDA architectures: ${CMAKE_CUDA_ARCHITECTURES}")
 
     enable_language(CUDA)
 
     # Replace any 12x-real architectures with 12x{a}-real. FP4 ptx instructions are not available in just 12x
     if (GGML_NATIVE)
         set(PROCESSED_ARCHITECTURES "")
-        if (CMAKE_CUDA_ARCHITECTURES_NATIVE)
+        if (NOT DEFINED CMAKE_CUDA_ARCHITECTURES AND CMAKE_CUDA_ARCHITECTURES_NATIVE)
             set(ARCH_LIST ${CMAKE_CUDA_ARCHITECTURES_NATIVE})
         else()
             set(ARCH_LIST ${CMAKE_CUDA_ARCHITECTURES})
@@ -66,6 +65,7 @@ if (CUDAToolkit_FOUND)
             endif()
         endforeach()
     endif()
+    message(STATUS "Using CUDA architectures: ${CMAKE_CUDA_ARCHITECTURES}")
 
     file(GLOB   GGML_HEADERS_CUDA "*.cuh")
     list(APPEND GGML_HEADERS_CUDA "../../include/ggml-cuda.h")