From: compilade Date: Tue, 27 Aug 2024 10:09:23 +0000 (-0400) Subject: llama : fix qs.n_attention_wv for DeepSeek-V2 (#9156) X-Git-Tag: upstream/0.0.4488~851 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=78eb487bb0038eae95506d3d832b94c979185b09;p=pkg%2Fggml%2Fsources%2Fllama.cpp llama : fix qs.n_attention_wv for DeepSeek-V2 (#9156) --- diff --git a/src/llama.cpp b/src/llama.cpp index f5097224..8d5f2478 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -16822,7 +16822,8 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s // TODO: avoid hardcoded tensor names - use the TN_* constants if (name.find("attn_v.weight") != std::string::npos || - name.find("attn_qkv.weight") != std::string::npos) { + name.find("attn_qkv.weight") != std::string::npos || + name.find("attn_kv_b.weight")!= std::string::npos) { ++qs.n_attention_wv; } else if (name == LLM_TN(model.arch)(LLM_TENSOR_OUTPUT, "weight")) { qs.has_output = true;