From: compilade Date: Wed, 28 Feb 2024 08:52:56 +0000 (-0500) Subject: llama : fix non-quantization of expert gating tensors (#5754) X-Git-Tag: upstream/0.0.4488~2200 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=adcb12a9bad87bc96f2f158c95892b3d04aa7ffb;p=pkg%2Fggml%2Fsources%2Fllama.cpp llama : fix non-quantization of expert gating tensors (#5754) This reverts a single line from #5475 --- diff --git a/llama.cpp b/llama.cpp index 356ca107..893bcdbc 100644 --- a/llama.cpp +++ b/llama.cpp @@ -11162,7 +11162,8 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s quantize &= !params->only_copy; // do not quantize expert gating tensors - quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_FFN_GATE_INP, "weight"); + // NOTE: can't use LLM_TN here because the layer number is not known + quantize &= name.find("ffn_gate_inp.weight") == std::string::npos; // do not quantize positional embeddings and token types (BERT) quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_POS_EMBD, "weight");