]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
convert : force patch_merger tensors to f16/f32 (#18124)
authorSigbjørn Skjæret <redacted>
Wed, 17 Dec 2025 21:15:53 +0000 (22:15 +0100)
committerGitHub <redacted>
Wed, 17 Dec 2025 21:15:53 +0000 (22:15 +0100)
convert_hf_to_gguf.py

index bd16ba312ff5f4415a3f223e6df7a608d41ccffc..806b3d7b4709158cece84bc49b83e7d9b1b947e7 100755 (executable)
@@ -1838,7 +1838,7 @@ class MmprojModel(ModelBase):
 
     def tensor_force_quant(self, name, new_name, bid, n_dims):
         del bid, name, n_dims  # unused
-        if ".patch_embd.weight" in new_name:
+        if ".patch_embd.weight" in new_name or ".patch_merger.weight" in new_name:
             return gguf.GGMLQuantizationType.F16 if self.ftype == gguf.LlamaFileType.MOSTLY_F16 else gguf.GGMLQuantizationType.F32
         return False