From: Sigbjørn Skjæret Date: Mon, 24 Nov 2025 14:50:55 +0000 (+0100) Subject: convert : allow quantizing lora again (#17453) X-Git-Tag: upstream/0.0.7446~299 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=b61de2b2df4ff07e6d6de96320fb311d96908b7a;p=pkg%2Fggml%2Fsources%2Fllama.cpp convert : allow quantizing lora again (#17453) --- diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 6cbaee03..d24a4682 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -565,7 +565,7 @@ class ModelBase: gguf.MODEL_TENSOR.ALTUP_PREDICT_COEF, ) ) - or not new_name.endswith(".weight") + or new_name[-7:] not in (".weight", ".lora_a", ".lora_b") ): data_qtype = gguf.GGMLQuantizationType.F32 diff --git a/convert_lora_to_gguf.py b/convert_lora_to_gguf.py index 57c6cd0d..b0adde8a 100755 --- a/convert_lora_to_gguf.py +++ b/convert_lora_to_gguf.py @@ -242,7 +242,7 @@ def parse_args() -> argparse.Namespace: help="path to write to; default: based on input. {ftype} will be replaced by the outtype.", ) parser.add_argument( - "--outtype", type=str, choices=["f32", "f16", "bf16", "q8_0", "auto"], default="f16", + "--outtype", type=str, choices=["f32", "f16", "bf16", "q8_0", "auto"], default="f32", help="output format - use f32 for float32, f16 for float16, bf16 for bfloat16, q8_0 for Q8_0, auto for the highest-fidelity 16-bit float type depending on the first loaded tensor type", ) parser.add_argument(