]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
convert : fix set_vocab_sentencepiece (#6866)
authorGeorgi Gerganov <redacted>
Sat, 18 May 2024 05:46:20 +0000 (08:46 +0300)
committerGitHub <redacted>
Sat, 18 May 2024 05:46:20 +0000 (08:46 +0300)
* convert : fix set_vocab_sentencepiece

* Update convert-hf-to-gguf.py

convert-hf-to-gguf.py

index 5ba3161c76b96179e5d306c723d53fa6b93d862a..cd1750aa3f3ba27d605ec74876b16426e39d14ca 100755 (executable)
@@ -573,6 +573,10 @@ class Model:
 
         vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
 
+        tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
+        scores: list[float] = [-10000.0] * vocab_size
+        toktypes: list[int] = [SentencePieceTokenTypes.UNKNOWN] * vocab_size
+
         for token_id in range(tokenizer.vocab_size()):
             piece = tokenizer.IdToPiece(token_id)
             text = piece.encode("utf-8")
@@ -588,21 +592,23 @@ class Model:
             elif tokenizer.IsByte(token_id):
                 toktype = SentencePieceTokenTypes.BYTE
 
-            tokens.append(text)
-            scores.append(score)
-            toktypes.append(toktype)
+            tokens[token_id] = text
+            scores[token_id] = score
+            toktypes[token_id] = toktype
 
         added_tokens_file = self.dir_model / 'added_tokens.json'
         if added_tokens_file.is_file():
             with open(added_tokens_file, "r", encoding="utf-8") as f:
                 added_tokens_json = json.load(f)
-
                 for key in added_tokens_json:
-                    key = key.encode("utf-8")
-                    if key not in tokens:
-                        tokens.append(key)
-                        scores.append(-1000.0)
-                        toktypes.append(SentencePieceTokenTypes.USER_DEFINED)
+                    token_id = added_tokens_json[key]
+                    if (token_id >= vocab_size):
+                        logger.warning(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
+                        continue
+
+                    tokens[token_id] = key.encode("utf-8")
+                    scores[token_id] = -1000.0
+                    toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
 
         if vocab_size > len(tokens):
             pad_count = vocab_size - len(tokens)
@@ -612,8 +618,6 @@ class Model:
                 scores.append(-1000.0)
                 toktypes.append(SentencePieceTokenTypes.UNUSED)
 
-        assert len(tokens) == vocab_size
-
         self.gguf_writer.add_tokenizer_model("llama")
         self.gguf_writer.add_tokenizer_pre("default")
         self.gguf_writer.add_token_list(tokens)