]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
llama : disable FA if KV head size do not match (#7982)
authorGeorgi Gerganov <redacted>
Mon, 17 Jun 2024 16:40:01 +0000 (19:40 +0300)
committerGitHub <redacted>
Mon, 17 Jun 2024 16:40:01 +0000 (19:40 +0300)
llama.cpp

index dd7020dc0eeabc5e1d788ea289bd3058f21ee072..61948751558519ea5fa5cd00ef46a692af81a117 100644 (file)
--- a/llama.cpp
+++ b/llama.cpp
@@ -16260,6 +16260,11 @@ struct llama_context * llama_new_context_with_model(
         params.flash_attn = false;
     }
 
+    if (params.flash_attn && model->hparams.n_embd_head_k != model->hparams.n_embd_head_v) {
+        LLAMA_LOG_WARN("%s: flash_attn requires n_embd_head_k == n_embd_head_v - forcing off\n", __func__);
+        params.flash_attn = false;
+    }
+
     if (params.type_v != GGML_TYPE_F16 && !params.flash_attn) {
         LLAMA_LOG_ERROR("%s: V cache quantization requires flash_attn\n", __func__);
         return nullptr;