]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
llama : use F32 precision in GLM4 attention and no FA (#9130)
authorpiDack <redacted>
Fri, 23 Aug 2024 07:27:17 +0000 (15:27 +0800)
committerGitHub <redacted>
Fri, 23 Aug 2024 07:27:17 +0000 (10:27 +0300)
src/llama.cpp

index bd7f1508b26445ea0ccfd477cf2655eec04a5ef3..869b584aa28620938d3afb95dd0908cf5453a17a 100644 (file)
@@ -8885,7 +8885,7 @@ static struct ggml_tensor * llm_build_kqv(
         struct ggml_tensor * kq = ggml_mul_mat(ctx, k, q);
         cb(kq, "kq", il);
 
-        if (model.arch == LLM_ARCH_PHI2 || model.arch == LLM_ARCH_PHI3 || model.arch == LLM_ARCH_GPTNEOX || model.arch == LLM_ARCH_QWEN2 || model.arch == LLM_ARCH_NEMOTRON) {
+        if (model.arch == LLM_ARCH_PHI2 || model.arch == LLM_ARCH_PHI3 || model.arch == LLM_ARCH_GPTNEOX || model.arch == LLM_ARCH_QWEN2 || model.arch == LLM_ARCH_NEMOTRON || model.arch == LLM_ARCH_CHATGLM) {
             // for this arch, we need to perform the KQ multiplication with F32 precision, otherwise we get NaNs
             // ref: https://github.com/ggerganov/llama.cpp/pull/4490#issuecomment-1859055847
             ggml_mul_mat_set_prec(kq, GGML_PREC_F32);