From: Georgi Gerganov Date: Mon, 8 Apr 2024 19:25:49 +0000 (+0300) Subject: llama : fix attention layer count sanity check (#6550) X-Git-Tag: upstream/0.0.4488~1853 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=cc4a95426d17417d3c83f12bdb514fbe8abe2a88;p=pkg%2Fggml%2Fsources%2Fllama.cpp llama : fix attention layer count sanity check (#6550) * llama : fix attention layer count sanity check * llama : fix parentheses in attention layer count sanity check There was otherwise a warning when compiling. --------- Co-authored-by: Francis Couture-Harpin --- diff --git a/llama.cpp b/llama.cpp index 89ea3fe1..b16ddc64 100644 --- a/llama.cpp +++ b/llama.cpp @@ -13468,7 +13468,8 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s const std::string name = ggml_get_name(meta); // TODO: avoid hardcoded tensor names - use the TN_* constants - if (name.find("attn_v.weight") != std::string::npos || name.find("attn_qkv.weight") != std::string::npos) { + if (name.find("attn_v.weight") != std::string::npos || + name.find("attn_qkv.weight") != std::string::npos) { ++qs.n_attention_wv; } else if (name == LLM_TN(model.arch)(LLM_TENSOR_OUTPUT, "weight")) { qs.has_output = true; @@ -13478,7 +13479,11 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s qs.n_ffn_down = qs.n_ffn_gate = qs.n_ffn_up = (int)model.hparams.n_layer; // sanity checks - GGML_ASSERT(qs.n_attention_wv == (int)model.hparams.n_layer && "n_attention_wv != n_layer is unexpected"); + // + // - qs.n_attention_wv == 0 for Mamba models + // - qs.n_attention_wv == model.hparams.n_layer for Transformer models + // + GGML_ASSERT((qs.n_attention_wv == 0 || qs.n_attention_wv == (int)model.hparams.n_layer) && "n_attention_wv is unexpected"); size_t total_size_org = 0; size_t total_size_new = 0;