from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(dir_model)
vocab_size = hparams.get("vocab_size", len(tokenizer.vocab))
- assert max(tokenizer.vocab.values()) < vocab_size
+ # Since we are checking the maximum index, we need to ensure it's strictly less than vocab_size,
+ # because vocab_size is the count of items, and indexes start at 0.
+ max_vocab_index = max(tokenizer.get_vocab().values())
+ if max_vocab_index >= vocab_size:
+ raise ValueError("Vocabulary size exceeds expected maximum size.")
+
+
reverse_vocab: dict[int, str] = {id_: encoded_tok for encoded_tok, id_ in tokenizer.vocab.items()}
added_vocab = tokenizer.get_added_vocab()