]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
add safetensors support to convert-lora-to-ggml.py (#5062)
authorkuronekosaiko <redacted>
Sun, 21 Jan 2024 16:28:14 +0000 (00:28 +0800)
committerGitHub <redacted>
Sun, 21 Jan 2024 16:28:14 +0000 (17:28 +0100)
* add safetensors support to convert-lora-to-ggml.py

* Update convert-lora-to-ggml.py

Remove white space in line 69.

convert-lora-to-ggml.py

index 4904bf128774f60e8f99f232e95f472183e88c7f..9a9936dec8b0ac0fed53240cdf62a7daf213f54c 100755 (executable)
@@ -59,7 +59,14 @@ if __name__ == '__main__':
     input_model = os.path.join(sys.argv[1], "adapter_model.bin")
     output_path = os.path.join(sys.argv[1], "ggml-adapter-model.bin")
 
-    model = torch.load(input_model, map_location="cpu")
+    if os.path.exists(input_model):
+        model = torch.load(input_model, map_location="cpu")
+    else:
+        input_model = os.path.join(sys.argv[1], "adapter_model.safetensors")
+        # lazy import load_file only if lora is in safetensors format.
+        from safetensors.torch import load_file
+        model = load_file(input_model, device="cpu")
+
     arch_name = sys.argv[2] if len(sys.argv) == 3 else "llama"
 
     if arch_name not in gguf.MODEL_ARCH_NAMES.values():