From: Georgi Gerganov Date: Mon, 17 Jun 2024 16:40:01 +0000 (+0300) Subject: llama : disable FA if KV head size do not match (#7982) X-Git-Tag: upstream/0.0.4488~1316 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=7c26775adb579e92b59c82e8084c07a1d0f75e9c;p=pkg%2Fggml%2Fsources%2Fllama.cpp llama : disable FA if KV head size do not match (#7982) --- diff --git a/llama.cpp b/llama.cpp index dd7020dc..61948751 100644 --- a/llama.cpp +++ b/llama.cpp @@ -16260,6 +16260,11 @@ struct llama_context * llama_new_context_with_model( params.flash_attn = false; } + if (params.flash_attn && model->hparams.n_embd_head_k != model->hparams.n_embd_head_v) { + LLAMA_LOG_WARN("%s: flash_attn requires n_embd_head_k == n_embd_head_v - forcing off\n", __func__); + params.flash_attn = false; + } + if (params.type_v != GGML_TYPE_F16 && !params.flash_attn) { LLAMA_LOG_ERROR("%s: V cache quantization requires flash_attn\n", __func__); return nullptr;