From: Xuan-Son Nguyen Date: Tue, 2 Dec 2025 10:48:31 +0000 (+0100) Subject: convert: add error message for mistral3 quantized weight (#17686) X-Git-Tag: upstream/0.0.7446~214 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=2c453c6c7786df267b3ae6fd5019eee126a35a29;p=pkg%2Fggml%2Fsources%2Fllama.cpp convert: add error message for mistral3 quantized weight (#17686) --- diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index a54cce88..8ddb6d04 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -2842,6 +2842,10 @@ class Mistral3Model(LlamaModel): self.gguf_writer.add_attn_temperature_scale(rope_params["llama_4_scaling_beta"]) def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None): + # TODO: probably not worth supporting quantized weight, as official BF16 is also available + if name.endswith("weight_scale_inv"): + raise ValueError("This is a quantized weight, please use BF16 weight instead") + name = name.replace("language_model.", "") if "multi_modal_projector" in name or "vision_tower" in name: return []