From: Csaba Kecskemeti Date: Thu, 27 Mar 2025 10:11:23 +0000 (-0700) Subject: convert : Support Qwen2_5_VLForConditionalGeneration (#12595) X-Git-Tag: upstream/0.0.5028~53 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=d5c6309d91cb22ebc947920f92eb686d92f84eae;p=pkg%2Fggml%2Fsources%2Fllama.cpp convert : Support Qwen2_5_VLForConditionalGeneration (#12595) --- diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 52637c42..a06010a7 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -2269,7 +2269,7 @@ class Qwen2Model(Model): self.gguf_writer.add_rope_scaling_orig_ctx_len(self.hparams["rope_scaling"]["original_max_position_embeddings"]) -@Model.register("Qwen2VLForConditionalGeneration") +@Model.register("Qwen2VLForConditionalGeneration", "Qwen2_5_VLForConditionalGeneration") class Qwen2VLModel(Model): model_arch = gguf.MODEL_ARCH.QWEN2VL