From: QDelta Date: Sun, 28 Dec 2025 01:33:14 +0000 (-0500) Subject: ggml-cuda: use CMAKE_CUDA_ARCHITECTURES if set when GGML_NATIVE=ON (llama/18413) X-Git-Tag: upstream/1.8.3~77 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=31fc2c37c88afbc0ef3caa330fa092d67ff757f9;p=pkg%2Fggml%2Fsources%2Fwhisper.cpp ggml-cuda: use CMAKE_CUDA_ARCHITECTURES if set when GGML_NATIVE=ON (llama/18413) --- diff --git a/ggml/src/ggml-cuda/CMakeLists.txt b/ggml/src/ggml-cuda/CMakeLists.txt index 3b438c30..f4912175 100644 --- a/ggml/src/ggml-cuda/CMakeLists.txt +++ b/ggml/src/ggml-cuda/CMakeLists.txt @@ -37,14 +37,13 @@ if (CUDAToolkit_FOUND) endif() endif() endif() - message(STATUS "Using CUDA architectures: ${CMAKE_CUDA_ARCHITECTURES}") enable_language(CUDA) # Replace any 12x-real architectures with 12x{a}-real. FP4 ptx instructions are not available in just 12x if (GGML_NATIVE) set(PROCESSED_ARCHITECTURES "") - if (CMAKE_CUDA_ARCHITECTURES_NATIVE) + if (NOT DEFINED CMAKE_CUDA_ARCHITECTURES AND CMAKE_CUDA_ARCHITECTURES_NATIVE) set(ARCH_LIST ${CMAKE_CUDA_ARCHITECTURES_NATIVE}) else() set(ARCH_LIST ${CMAKE_CUDA_ARCHITECTURES}) @@ -66,6 +65,7 @@ if (CUDAToolkit_FOUND) endif() endforeach() endif() + message(STATUS "Using CUDA architectures: ${CMAKE_CUDA_ARCHITECTURES}") file(GLOB GGML_HEADERS_CUDA "*.cuh") list(APPEND GGML_HEADERS_CUDA "../../include/ggml-cuda.h")