From: Daniel Han Date: Sat, 21 Jun 2025 04:32:01 +0000 (-0700) Subject: convert : fix Llama 4 conversion (#14311) X-Git-Tag: upstream/0.0.5760~33 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=b23fa0b3f40165ca3aae8ad4ee756e72f9a130dd;p=pkg%2Fggml%2Fsources%2Fllama.cpp convert : fix Llama 4 conversion (#14311) --- diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 2fe76589..bbf8b30f 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -2193,7 +2193,7 @@ class Llama4VisionModel(MmprojModel): name += ".weight" if "multi_modal_projector.linear_1" in name: # despite the name with number postfix, this is a single fully connected layer - return [(gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_MMPROJ_FC], data_torch)] + return [(gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_MMPROJ_FC] + '.weight', data_torch)] return [(self.map_tensor_name(name), data_torch)] return []