]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
llama : fix qs.n_attention_wv for DeepSeek-V2 (#9156)
authorcompilade <redacted>
Tue, 27 Aug 2024 10:09:23 +0000 (06:09 -0400)
committerGitHub <redacted>
Tue, 27 Aug 2024 10:09:23 +0000 (13:09 +0300)
src/llama.cpp

index f50972249baa7d5325025f88a13902e025e252f7..8d5f24783d6aba1d5fad15718f74c4586aaf1410 100644 (file)
@@ -16822,7 +16822,8 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
 
         // TODO: avoid hardcoded tensor names - use the TN_* constants
         if (name.find("attn_v.weight")   != std::string::npos ||
-            name.find("attn_qkv.weight") != std::string::npos) {
+            name.find("attn_qkv.weight") != std::string::npos ||
+            name.find("attn_kv_b.weight")!= std::string::npos) {
             ++qs.n_attention_wv;
         } else if (name == LLM_TN(model.arch)(LLM_TENSOR_OUTPUT, "weight")) {
             qs.has_output = true;