From: Georgi Gerganov Date: Sat, 18 May 2024 05:46:20 +0000 (+0300) Subject: convert : fix set_vocab_sentencepiece (#6866) X-Git-Tag: upstream/0.0.4488~1568 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=b49a13dd2fa9c94c2c19a8c248bb7fa45499f9a8;p=pkg%2Fggml%2Fsources%2Fllama.cpp convert : fix set_vocab_sentencepiece (#6866) * convert : fix set_vocab_sentencepiece * Update convert-hf-to-gguf.py --- diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index 5ba3161c..cd1750aa 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -573,6 +573,10 @@ class Model: vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size()) + tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)] + scores: list[float] = [-10000.0] * vocab_size + toktypes: list[int] = [SentencePieceTokenTypes.UNKNOWN] * vocab_size + for token_id in range(tokenizer.vocab_size()): piece = tokenizer.IdToPiece(token_id) text = piece.encode("utf-8") @@ -588,21 +592,23 @@ class Model: elif tokenizer.IsByte(token_id): toktype = SentencePieceTokenTypes.BYTE - tokens.append(text) - scores.append(score) - toktypes.append(toktype) + tokens[token_id] = text + scores[token_id] = score + toktypes[token_id] = toktype added_tokens_file = self.dir_model / 'added_tokens.json' if added_tokens_file.is_file(): with open(added_tokens_file, "r", encoding="utf-8") as f: added_tokens_json = json.load(f) - for key in added_tokens_json: - key = key.encode("utf-8") - if key not in tokens: - tokens.append(key) - scores.append(-1000.0) - toktypes.append(SentencePieceTokenTypes.USER_DEFINED) + token_id = added_tokens_json[key] + if (token_id >= vocab_size): + logger.warning(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}') + continue + + tokens[token_id] = key.encode("utf-8") + scores[token_id] = -1000.0 + toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED if vocab_size > len(tokens): pad_count = vocab_size - len(tokens) @@ -612,8 +618,6 @@ class Model: scores.append(-1000.0) toktypes.append(SentencePieceTokenTypes.UNUSED) - assert len(tokens) == vocab_size - self.gguf_writer.add_tokenizer_model("llama") self.gguf_writer.add_tokenizer_pre("default") self.gguf_writer.add_token_list(tokens)