From: Johannes Gäßler Date: Fri, 30 May 2025 19:22:03 +0000 (+0200) Subject: CUDA: fix typo in FlashAttention code (#13926) X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=e562eece7cb476276bfc4cbb18deb7c0369b2233;p=pkg%2Fggml%2Fsources%2Fllama.cpp CUDA: fix typo in FlashAttention code (#13926) --- diff --git a/ggml/src/ggml-cuda/fattn-mma-f16.cuh b/ggml/src/ggml-cuda/fattn-mma-f16.cuh index 7120053b..925f39e8 100644 --- a/ggml/src/ggml-cuda/fattn-mma-f16.cuh +++ b/ggml/src/ggml-cuda/fattn-mma-f16.cuh @@ -1246,7 +1246,7 @@ static __global__ void flash_attn_ext_f16( NO_DEVICE_CODE; return; } -#endif __CUDA_ARCH__ == GGML_CUDA_CC_TURING +#endif // __CUDA_ARCH__ == GGML_CUDA_CC_TURING static_assert(!mla || DKQ >= DV, "MLA needs DKQ >= DV");