From: Johannes Gäßler Date: Fri, 30 May 2025 19:22:03 +0000 (+0200) Subject: CUDA: fix typo in FlashAttention code (llama/13926) X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=432d0b8b4c10f44249d5732b854cc3bae0099a1d;p=pkg%2Fggml%2Fsources%2Fggml CUDA: fix typo in FlashAttention code (llama/13926) --- diff --git a/src/ggml-cuda/fattn-mma-f16.cuh b/src/ggml-cuda/fattn-mma-f16.cuh index 7120053b..925f39e8 100644 --- a/src/ggml-cuda/fattn-mma-f16.cuh +++ b/src/ggml-cuda/fattn-mma-f16.cuh @@ -1246,7 +1246,7 @@ static __global__ void flash_attn_ext_f16( NO_DEVICE_CODE; return; } -#endif __CUDA_ARCH__ == GGML_CUDA_CC_TURING +#endif // __CUDA_ARCH__ == GGML_CUDA_CC_TURING static_assert(!mla || DKQ >= DV, "MLA needs DKQ >= DV");