]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
lora : raise error if lm_head is ignored (#9103)
authorXuan Son Nguyen <redacted>
Thu, 12 Sep 2024 11:33:57 +0000 (13:33 +0200)
committerGitHub <redacted>
Thu, 12 Sep 2024 11:33:57 +0000 (14:33 +0300)
* lora : raise error if lm_head is ignored

* fix style

* clarify comment

convert_lora_to_gguf.py

index ddd347a2abd2ad72e73bde4fda2c4655dc3e861c..d1c94e58034b3b24225812c46e2945dd5926d47c 100755 (executable)
@@ -363,7 +363,13 @@ if __name__ == '__main__':
                     yield (name, cast(torch.Tensor, LoraTorchTensor(tensor.A, tensor.B)))
 
             def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
-                dest = super().modify_tensors(data_torch, name, bid)
+                dest = list(super().modify_tensors(data_torch, name, bid))
+                # some archs may have the same tensor for lm_head and output (tie word embeddings)
+                # in this case, adapters targeting lm_head will fail when using llama-export-lora
+                # therefore, we ignore them for now
+                # see: https://github.com/ggerganov/llama.cpp/issues/9065
+                if name == "lm_head.weight" and len(dest) == 0:
+                    raise ValueError("lm_head is present in adapter, but is ignored in base model")
                 for dest_name, dest_data in dest:
                     assert isinstance(dest_data, LoraTorchTensor)
                     lora_a, lora_b = dest_data.get_lora_A_B()