]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
llama : use F32 precision in Qwen2 attention and no FA (#8412)
authorGeorgi Gerganov <redacted>
Thu, 11 Jul 2024 07:21:30 +0000 (10:21 +0300)
committerGitHub <redacted>
Thu, 11 Jul 2024 07:21:30 +0000 (10:21 +0300)
src/llama.cpp

index b19d786e23643ba08fdf53c86535e3c1cca614c5..ed77ed918e554e1d961ca6559de86b0bcbf16f21 100644 (file)
@@ -8134,7 +8134,7 @@ static struct ggml_tensor * llm_build_kqv(
         struct ggml_tensor * kq = ggml_mul_mat(ctx, k, q);
         cb(kq, "kq", il);
 
-        if (model.arch == LLM_ARCH_PHI2 || model.arch == LLM_ARCH_PHI3 || model.arch == LLM_ARCH_GPTNEOX) {
+        if (model.arch == LLM_ARCH_PHI2 || model.arch == LLM_ARCH_PHI3 || model.arch == LLM_ARCH_GPTNEOX || model.arch == LLM_ARCH_QWEN2) {
             // for this arch, we need to perform the KQ multiplication with F32 precision, otherwise we get NaNs
             // ref: https://github.com/ggerganov/llama.cpp/pull/4490#issuecomment-1859055847
             ggml_mul_mat_set_prec(kq, GGML_PREC_F32);