]> git.djapps.eu Git - pkg/ggml/sources/whisper.cpp/commitdiff
CUDA: fix FA logic for PTX 7.0 and CC >= 7.5 (llama/12222)
authorJohannes Gäßler <redacted>
Thu, 6 Mar 2025 17:45:09 +0000 (18:45 +0100)
committerGeorgi Gerganov <redacted>
Sat, 8 Mar 2025 13:13:01 +0000 (15:13 +0200)
ggml/src/ggml-cuda/fattn.cu

index 24f973056aa9a78a3cf1347e64adba2bb678ec00..2e72fc8fd380bb68d7cbdb2500b056925c5504a7 100644 (file)
@@ -310,7 +310,7 @@ void ggml_cuda_flash_attn_ext(ggml_backend_cuda_context & ctx, ggml_tensor * dst
     }
 
     // The MMA implementation needs Turing or newer, use the old WMMA code for Volta:
-    if (cc == GGML_CUDA_CC_VOLTA) {
+    if (fp16_mma_available(cc) && !new_mma_available(cc)) {
         ggml_cuda_flash_attn_ext_wmma_f16(ctx, dst);
         return;
     }