From: Johannes Gäßler Date: Fri, 30 May 2025 19:22:03 +0000 (+0200) Subject: CUDA: fix typo in FlashAttention code (llama/13926) X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=a5aff2819890eb270c0c325e8d440de4232e1243;p=pkg%2Fggml%2Fsources%2Fwhisper.cpp CUDA: fix typo in FlashAttention code (llama/13926) --- diff --git a/ggml/src/ggml-cuda/fattn-mma-f16.cuh b/ggml/src/ggml-cuda/fattn-mma-f16.cuh index 7120053b..925f39e8 100644 --- a/ggml/src/ggml-cuda/fattn-mma-f16.cuh +++ b/ggml/src/ggml-cuda/fattn-mma-f16.cuh @@ -1246,7 +1246,7 @@ static __global__ void flash_attn_ext_f16( NO_DEVICE_CODE; return; } -#endif __CUDA_ARCH__ == GGML_CUDA_CC_TURING +#endif // __CUDA_ARCH__ == GGML_CUDA_CC_TURING static_assert(!mla || DKQ >= DV, "MLA needs DKQ >= DV");