]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
py : cast lora_alpha to int in convert-lora-to-ggml (#1170)
authorostix360 <redacted>
Tue, 25 Apr 2023 21:33:08 +0000 (23:33 +0200)
committerGitHub <redacted>
Tue, 25 Apr 2023 21:33:08 +0000 (23:33 +0200)
Co-authored-by: Pavol Rusnak <redacted>
convert-lora-to-ggml.py

index 8a2085c2511a1eafefa19cf435a4591585bad824..9090e8d6dd55a76c3816f9985e9c036a195a6210 100644 (file)
@@ -49,7 +49,12 @@ def translate_tensor_name(t: str) -> str:
 def write_file_header(fout: TextIO, params: Dict[str, Any]) -> None:
     fout.write(b"ggla"[::-1])  # magic (ggml lora)
     fout.write(struct.pack("i", 1))  # file version
-    fout.write(struct.pack("ii", params["r"], params["lora_alpha"]))
+    fout.write(struct.pack("i", params["r"]))
+    # https://opendelta.readthedocs.io/en/latest/modules/deltas.html says that `lora_alpha` is an int
+    # but some models ship a float value instead
+    # let's convert to int, but fail if lossless conversion is not possible
+    assert int(params["lora_alpha"]) == params["lora_alpha"], "cannot convert float to int losslessly"
+    fout.write(struct.pack("i", int(params["lora_alpha"])))
 
 
 def write_tensor_header(
@@ -89,7 +94,7 @@ if params["peft_type"] != "LORA":
     print(f"Error: unsupported adapter type {params['peft_type']}, expected LORA")
     sys.exit(1)
 
-if params["fan_in_fan_out"] == True:
+if params["fan_in_fan_out"] is True:
     print("Error: param fan_in_fan_out is not supported")
     sys.exit(1)