]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
convert-hf : fix exception in sentencepiece with added tokens (#6320)
authorPedro Cuenca <redacted>
Tue, 26 Mar 2024 12:32:19 +0000 (13:32 +0100)
committerGitHub <redacted>
Tue, 26 Mar 2024 12:32:19 +0000 (14:32 +0200)
convert-hf-to-gguf.py

index 723ea18e34c658dcaedda8e13f53084e0eb5b084..c5d2d0b7813d1c517e6b76642972ee10fd57fd89 100755 (executable)
@@ -331,7 +331,7 @@ class Model(ABC):
         tokenizer = SentencePieceProcessor(str(tokenizer_path))
         vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
 
-        for token_id in range(vocab_size):
+        for token_id in range(tokenizer.vocab_size()):
             piece = tokenizer.id_to_piece(token_id)
             text = piece.encode("utf-8")
             score = tokenizer.get_score(token_id)
@@ -356,9 +356,13 @@ class Model(ABC):
                 added_tokens_json = json.load(f)
 
                 for key in added_tokens_json:
-                    tokens.append(key.encode("utf-8"))
-                    scores.append(-1000.0)
-                    toktypes.append(SentencePieceTokenTypes.USER_DEFINED)
+                    key = key.encode("utf-8")
+                    if key not in tokens:
+                        tokens.append(key)
+                        scores.append(-1000.0)
+                        toktypes.append(SentencePieceTokenTypes.USER_DEFINED)
+
+        assert len(tokens) == vocab_size
 
         self.gguf_writer.add_tokenizer_model("llama")
         self.gguf_writer.add_token_list(tokens)