From: Jie Fu (傅杰) Date: Wed, 17 Sep 2025 07:30:55 +0000 (+0800) Subject: llama-quant : fix the verification of attention layers for encoder-decoder models... X-Git-Tag: upstream/0.0.6527~33 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=745cbcf2fe1eb88f8db615ac622f0b944d924ad6;p=pkg%2Fggml%2Fsources%2Fllama.cpp llama-quant : fix the verification of attention layers for encoder-decoder models (#16023) Signed-off-by: Jie Fu --- diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index c93e8065..97228b2a 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -725,7 +725,9 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: // attention layers have a non-zero number of kv heads int32_t n_attn_layer = model.hparams.n_layer - std::count(n_head_kv_iter, n_head_kv_iter + model.hparams.n_layer, 0); if (llama_model_has_encoder(&model)) { - n_attn_layer *= 3; + // now n_attn_layer is the number of attention layers in the encoder + // for each decoder block, there are 2 attention layers + n_attn_layer += 2 * model.hparams.dec_n_layer; } GGML_ASSERT((qs.n_attention_wv == n_attn_layer - pruned_attention_w) && "n_attention_wv is unexpected"); }