From: Georgi Gerganov Date: Thu, 11 Jul 2024 07:21:30 +0000 (+0300) Subject: llama : use F32 precision in Qwen2 attention and no FA (#8412) X-Git-Tag: upstream/0.0.4488~1117 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=7a221b672e49dfae459b1af27210ba3f2b5419b6;p=pkg%2Fggml%2Fsources%2Fllama.cpp llama : use F32 precision in Qwen2 attention and no FA (#8412) --- diff --git a/src/llama.cpp b/src/llama.cpp index b19d786e..ed77ed91 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -8134,7 +8134,7 @@ static struct ggml_tensor * llm_build_kqv( struct ggml_tensor * kq = ggml_mul_mat(ctx, k, q); cb(kq, "kq", il); - if (model.arch == LLM_ARCH_PHI2 || model.arch == LLM_ARCH_PHI3 || model.arch == LLM_ARCH_GPTNEOX) { + if (model.arch == LLM_ARCH_PHI2 || model.arch == LLM_ARCH_PHI3 || model.arch == LLM_ARCH_GPTNEOX || model.arch == LLM_ARCH_QWEN2) { // for this arch, we need to perform the KQ multiplication with F32 precision, otherwise we get NaNs // ref: https://github.com/ggerganov/llama.cpp/pull/4490#issuecomment-1859055847 ggml_mul_mat_set_prec(kq, GGML_PREC_F32);