From: Johannes Gäßler Date: Sat, 1 Jun 2024 21:26:10 +0000 (+0200) Subject: Fix FlashAttention debug test, FP32 assert (llama/7684) X-Git-Tag: upstream/1.7.4~659 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=9b3d7840209d522803139f293aa6e629626561a2;p=pkg%2Fggml%2Fsources%2Fwhisper.cpp Fix FlashAttention debug test, FP32 assert (llama/7684) --- diff --git a/ggml-cuda/fattn-vec-f32.cuh b/ggml-cuda/fattn-vec-f32.cuh index ce23a4eb..ddf0c837 100644 --- a/ggml-cuda/fattn-vec-f32.cuh +++ b/ggml-cuda/fattn-vec-f32.cuh @@ -278,14 +278,10 @@ void ggml_cuda_flash_attn_ext_vec_f32_case_impl(ggml_backend_cuda_context & ctx, template void ggml_cuda_flash_attn_ext_vec_f32_case(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { - ggml_tensor * KQV = dst; ggml_tensor * Q = dst->src[0]; ggml_tensor * K = dst->src[1]; ggml_tensor * V = dst->src[2]; - const int32_t precision = KQV->op_params[2]; - GGML_ASSERT(precision == GGML_PREC_DEFAULT); - GGML_ASSERT(K->type == type_K); GGML_ASSERT(V->type == type_V);