]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
llama : fix non-quantization of expert gating tensors (#5754)
authorcompilade <redacted>
Wed, 28 Feb 2024 08:52:56 +0000 (03:52 -0500)
committerGitHub <redacted>
Wed, 28 Feb 2024 08:52:56 +0000 (10:52 +0200)
This reverts a single line from #5475

llama.cpp

index 356ca107670c8a1b225b2315045137a1c6ba5be3..893bcdbc0147dd8f2313174ed2297e15798c2d6d 100644 (file)
--- a/llama.cpp
+++ b/llama.cpp
@@ -11162,7 +11162,8 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
         quantize &= !params->only_copy;
 
         // do not quantize expert gating tensors
-        quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_FFN_GATE_INP, "weight");
+        // NOTE: can't use LLM_TN here because the layer number is not known
+        quantize &= name.find("ffn_gate_inp.weight") == std::string::npos;
 
         // do not quantize positional embeddings and token types (BERT)
         quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_POS_EMBD,    "weight");