]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
py : cleanup the code
authorPavol Rusnak <redacted>
Wed, 29 Mar 2023 19:31:24 +0000 (21:31 +0200)
committerPavol Rusnak <redacted>
Fri, 31 Mar 2023 08:32:01 +0000 (10:32 +0200)
- use f-strings where possible
- drop first param of encode/decode functions since "utf-8" is the default

convert-ggml-to-pth.py
convert-gpt4all-to-ggml.py
convert-gptq-to-ggml.py
convert-pth-to-ggml.py
convert-unversioned-ggml-to-ggml.py
migrate-ggml-2023-03-30-pr613.py

index 8ab17410d792907ebdb10e1c2aeb81efa4434b85..7ddfe3a1b20a6fc23ad8624ce4b72dadcff7a3bf 100644 (file)
@@ -27,9 +27,9 @@ def read_tokens(fin, vocab_size):
         text_len = struct.unpack("i", fin.read(4))[0]
         text_bytes = fin.read(text_len)
         try:
-            text = text_bytes.decode("utf-8")
+            text = text_bytes.decode()
         except UnicodeDecodeError:
-            text = text_bytes.decode("utf-8", "replace")
+            text = text_bytes.decode(errors="replace")
         score = struct.unpack("f", fin.read(4))[0]
         tokens.append((text, score))
     return tokens
@@ -82,7 +82,7 @@ def read_variables(fin):
 
         shape = tuple(struct.unpack("i" * n_dims, fin.read(4 * n_dims)))
         shape = shape[::-1]
-        name = fin.read(name_length).decode("utf-8")
+        name = fin.read(name_length).decode()
 
         # ensure tensor data is aligned
         tensor_data_offset = fin.tell()
@@ -199,7 +199,7 @@ def chat(model, hparams, llama_dir):
     device = torch.device("cpu")
     llama = llama.to(device)
 
-    ctx = """You are AI. 
+    ctx = """You are AI.
 This is a dialog, where User interacts with AI. AI is helpful, kind, obedient, honest, respectful, direct, concise, should try to protect User's privacy, and knows its own limits. Also, AI must answer User and AI cannot stop the conversation by itself.
 User: Hello, AI.
 AI: Hello! How can I assist you today?
@@ -207,11 +207,11 @@ AI: Hello! How can I assist you today?
     print(ctx.rstrip("\n"))
     while True:
         print("-" * 60)
-        prompt = input(f"User: ")
+        prompt = input("User: ")
         if ctx != "":
-            ctx = ctx + "User: " + prompt + "\n"
+            ctx = f"{ctx}User: {prompt}\n"
         else:
-            ctx = prompt + "\nAI:"
+            ctx = f"{prompt}\nAI:"
 
         ctx = (ctx[-1920:]) if len(ctx) >= 2048 else ctx
 
@@ -236,7 +236,7 @@ AI: Hello! How can I assist you today?
                 )
             s = generation_output.sequences[0]
             decoded = tokenizer.decode(s)
-            ctx = decoded + "\n"
+            ctx = f"{decoded}\n"
 
 
 def main():
index f1d9d7aefe3e09c9e38f4a292984ae5c827d169f..b1a5e0560e08389cb426dd3b1bdf82787913fedb 100644 (file)
@@ -49,7 +49,7 @@ def write_header(f_out, header):
 def write_tokens(fout, tokenizer):
     for i in range(tokenizer.vocab_size()):
         if tokenizer.is_unknown(i):
-            text = " \u2047 ".encode("utf-8")
+            text = " \u2047 ".encode()
         elif tokenizer.is_control(i):
             text = b""
         elif tokenizer.is_byte(i):
@@ -60,13 +60,13 @@ def write_tokens(fout, tokenizer):
             byte_value = int(piece[3:-1], 16)
             text = struct.pack("B", byte_value)
         else:
-            text = tokenizer.id_to_piece(i).replace("\u2581", " ").encode("utf-8")
+            text = tokenizer.id_to_piece(i).replace("\u2581", " ").encode()
         fout.write(struct.pack("i", len(text)))
         fout.write(text)
         fout.write(struct.pack("f", tokenizer.get_score(i)))
 
     # TODO: GPT4All - add extra <pad> token
-    text = "<pad>".encode("utf-8")
+    text = "<pad>".encode()
     fout.write(struct.pack("i", len(text)))
     fout.write(text)
     fout.write(struct.pack("f", 0.0))
index 860eb148b2a86286d640cd6641e5c2d11ae26909..42e99c2ff8d3b89c2441c003bff502df66a2172d 100644 (file)
@@ -50,7 +50,7 @@ fout.write(struct.pack("i", 4))
 # This loop unchanged from convert-pth-to-ggml.py:
 for i in range(tokenizer.vocab_size()):
     if tokenizer.is_unknown(i):
-        text = " \u2047 ".encode("utf-8")
+        text = " \u2047 ".encode()
     elif tokenizer.is_control(i):
         text = b""
     elif tokenizer.is_byte(i):
@@ -61,13 +61,13 @@ for i in range(tokenizer.vocab_size()):
         byte_value = int(piece[3:-1], 16)
         text = struct.pack("B", byte_value)
     else:
-        text = tokenizer.id_to_piece(i).replace("\u2581", " ").encode("utf-8")
+        text = tokenizer.id_to_piece(i).replace("\u2581", " ").encode()
     fout.write(struct.pack("i", len(text)))
     fout.write(text)
     fout.write(struct.pack("f", tokenizer.get_score(i)))
 
 def write_header(shape, dst_name, ftype_cur):
-    sname = dst_name.encode('utf-8')
+    sname = dst_name.encode()
     fout.write(struct.pack("iii", len(shape), len(sname), ftype_cur))
     fout.write(struct.pack("i" * len(shape), *shape[::-1]))
     fout.write(sname)
@@ -80,7 +80,7 @@ def write_header(shape, dst_name, ftype_cur):
 def convert_non_q4(src_name, dst_name):
     v = model[src_name]
     shape = v.shape
-    print("Processing non-Q4 variable: " + src_name + " with shape: ", shape, " and type: ", v.dtype)
+    print(f"Processing non-Q4 variable: {src_name} with shape: {shape} and type: {v.dtype}")
     if len(shape) == 1:
         print("  Converting to float32")
         v = v.to(torch.float32)
@@ -105,7 +105,7 @@ def convert_q4(src_name, dst_name, permute=False):
     # Each int32 item is actually 8 int4 items packed together, and it's transposed.
     shape = (qweight.shape[0], qweight.shape[1] * 8)
 
-    print("Processing Q4 variable: " + src_name + " with shape: ", shape)
+    print(f"Processing Q4 variable: {src_name} with shape: {shape}")
 
     # The output format has the int4 weights in groups of 32 rather than 8.
     # It looks like this:
@@ -168,5 +168,5 @@ for i in range(n_layer):
 
 fout.close()
 
-print("Done. Output file: " + fname_out)
-print("")
+print(f"Done. Output file: {fname_out}")
+print()
index df42e76bdd0d2cc7cf211bad7cc15ca445cdbcb7..dcef2f6a3221391d5cac0997227851d5f3053a70 100644 (file)
@@ -120,7 +120,7 @@ def write_header(fout, hparams, ftype):
 def write_tokens(fout, tokenizer):
     for i in range(tokenizer.vocab_size()):
         if tokenizer.is_unknown(i):
-            text = " \u2047 ".encode("utf-8")
+            text = " \u2047 ".encode()
         elif tokenizer.is_control(i):
             text = b""
         elif tokenizer.is_byte(i):
@@ -131,7 +131,7 @@ def write_tokens(fout, tokenizer):
             byte_value = int(piece[3:-1], 16)
             text = struct.pack("B", byte_value)
         else:
-            text = tokenizer.id_to_piece(i).replace("\u2581", " ").encode("utf-8")
+            text = tokenizer.id_to_piece(i).replace("\u2581", " ").encode()
         fout.write(struct.pack("i", len(text)))
         fout.write(text)
         fout.write(struct.pack("f", tokenizer.get_score(i)))
@@ -191,7 +191,7 @@ def process_and_write_variables(fout, model, ftype, part_id, n_parts):
         fullshape = list(partshape)
         if n_dims > 1:
             fullshape[split_dim] *= n_parts
-        sname = name.encode('utf-8')
+        sname = name.encode()
         fout.write(struct.pack("iii", n_dims, len(sname), ftype_cur))
         for dim in reversed(fullshape):
             fout.write(struct.pack("i", dim))
index 33b6243bd94e0837a8d3864e3c2eef01eba6f095..5151d9081a6d39d6fea95bc0b23bcac4cf57cb93 100644 (file)
@@ -44,7 +44,7 @@ def write_header(f_out, header):
 def write_tokens(fout, tokenizer):
     for i in range(tokenizer.vocab_size()):
         if tokenizer.is_unknown(i):
-            text = " \u2047 ".encode("utf-8")
+            text = " \u2047 ".encode()
         elif tokenizer.is_control(i):
             text = b""
         elif tokenizer.is_byte(i):
@@ -55,7 +55,7 @@ def write_tokens(fout, tokenizer):
             byte_value = int(piece[3:-1], 16)
             text = struct.pack("B", byte_value)
         else:
-            text = tokenizer.id_to_piece(i).replace("\u2581", " ").encode("utf-8")
+            text = tokenizer.id_to_piece(i).replace("\u2581", " ").encode()
         fout.write(struct.pack("i", len(text)))
         fout.write(text)
         fout.write(struct.pack("f", tokenizer.get_score(i)))
index 5596f6c5513a59fb53bde618247f4b4f73f1621f..b6ef2476e052df287335990f130c78c7ad0226d2 100644 (file)
@@ -272,13 +272,11 @@ def main():
         tokens = read_tokens(fin, hparams)
 
     if hparams['magic'] == 0x67676a74:  # ggjt
-        print("%s: input ggml has already been converted to 'ggjt' magic\n" %
-              (args.fin_path))
+        print(f"{args.fin_path}: input ggml has already been converted to 'ggjt' magic\n")
         sys.exit(1)
 
     if hparams['magic'] != 0x67676d66:  # ggmf
-        print("%s: input ggml file doesn't have expected 'ggmf' magic: %#x\n" %
-              (args.fin_path, hparams['magic']))
+        print(f"{args.fin_path}: input ggml file doesn't have expected 'ggmf' magic: {hparams['magic']:#x}\n")
         sys.exit(1)
 
     hparams['magic'] = 0x67676a74  # ggjt
@@ -286,7 +284,7 @@ def main():
     # count number of multipart files by convention
     n_parts = 1
     while True:
-        if os.path.exists("%s.%d" % (args.fin_path, n_parts)):
+        if os.path.exists(f"{args.fin_path}.{n_parts}"):
             n_parts += 1
         else:
             break
@@ -302,7 +300,7 @@ def main():
             print(f"Processing part {part_id+1} of {n_parts}\n")
             fin_path = args.fin_path
             if part_id > 0:
-                fin_path += ".%d" % (part_id)
+                fin_path += f".{part_id}"
             with open(fin_path, "rb") as fin:
                 read_tokens(fin, read_hparams(fin))
                 copy_tensors(fin, fout, part_id, n_parts)