except KeyError:
raise NotImplementedError(f'Architecture {arch!r} not supported!') from None
+ def does_token_look_special(self, token: str | bytes) -> bool:
+ if isinstance(token, (bytes, bytearray)):
+ token_text = token.decode(encoding="utf-8")
+ elif isinstance(token, memoryview):
+ token_text = token.tobytes().decode(encoding="utf-8")
+ else:
+ token_text = token
+
+ # Some models mark some added tokens which ought to be control tokens as not special.
+ # (e.g. command-r, command-r-plus, deepseek-coder, gemma{,-2})
+ seems_special = token_text in (
+ "<pad>", # deepseek-coder
+ "<mask>", "<2mass>", "[@BOS@]", # gemma{,-2}
+ )
+
+ seems_special = seems_special or (token_text.startswith("<|") and token_text.endswith("|>"))
+ seems_special = seems_special or (token_text.startswith("<|") and token_text.endswith("|>")) # deepseek-coder
+
+ # TODO: should these be marked as UNUSED instead? (maybe not)
+ seems_special = seems_special or (token_text.startswith("<unused") and token_text.endswith(">")) # gemma{,-2}
+
+ return seems_special
+
# used for GPT-2 BPE and WordPiece vocabs
def get_vocab_base(self) -> tuple[list[str], list[int], str]:
tokens: list[str] = []
for i in range(vocab_size):
if i not in reverse_vocab:
tokens.append(f"[PAD{i}]")
- toktypes.append(gguf.TokenType.USER_DEFINED)
- elif reverse_vocab[i] in added_vocab:
- tokens.append(reverse_vocab[i])
- if tokenizer.added_tokens_decoder[i].special:
- toktypes.append(gguf.TokenType.CONTROL)
- else:
- toktypes.append(gguf.TokenType.USER_DEFINED)
+ toktypes.append(gguf.TokenType.UNUSED)
else:
- tokens.append(reverse_vocab[i])
- toktypes.append(gguf.TokenType.NORMAL)
+ token: str = reverse_vocab[i]
+ if token in added_vocab:
+ if tokenizer.added_tokens_decoder[i].special or self.does_token_look_special(token):
+ toktypes.append(gguf.TokenType.CONTROL)
+ else:
+ token = token.replace(b"\xe2\x96\x81".decode("utf-8"), " ") # pre-normalize user-defined spaces
+ toktypes.append(gguf.TokenType.USER_DEFINED)
+ else:
+ toktypes.append(gguf.TokenType.NORMAL)
+ tokens.append(token)
return tokens, toktypes, tokpre
for i in range(vocab_size):
if i not in reverse_vocab:
tokens.append(f"[PAD{i}]")
- toktypes.append(gguf.TokenType.USER_DEFINED)
+ toktypes.append(gguf.TokenType.UNUSED)
elif reverse_vocab[i] in added_vocab:
tokens.append(reverse_vocab[i])
toktypes.append(gguf.TokenType.CONTROL)
tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
scores: list[float] = [-10000.0] * vocab_size
- toktypes: list[int] = [SentencePieceTokenTypes.UNKNOWN] * vocab_size
+ toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
for token_id in range(tokenizer.vocab_size()):
piece = tokenizer.IdToPiece(token_id)
scores[token_id] = -1000.0
toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
+ tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
+ if tokenizer_config_file.is_file():
+ with open(tokenizer_config_file, "r", encoding="utf-8") as f:
+ tokenizer_config_json = json.load(f)
+ added_tokens_decoder = tokenizer_config_json.get("added_tokens_decoder", {})
+ for token_id, token_data in added_tokens_decoder.items():
+ token_id = int(token_id)
+ token: str = token_data["content"]
+ if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
+ assert tokens[token_id] == token.encode("utf-8")
+ if token_data.get("special") or self.does_token_look_special(token):
+ toktypes[token_id] = SentencePieceTokenTypes.CONTROL
+ else:
+ token = token.replace(b"\xe2\x96\x81".decode("utf-8"), " ") # pre-normalize user-defined spaces
+ toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
+
+ scores[token_id] = -1000.0
+ tokens[token_id] = token.encode("utf-8")
+
if vocab_size > len(tokens):
pad_count = vocab_size - len(tokens)
logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]")
if (self.dir_model / "tokenizer.json").is_file():
self._set_vocab_gpt2()
else:
- # StableLM 2 1.6B uses a vocab in a similar format to Qwen's vocab
+ # StableLM 2 1.6B used to have a vocab in a similar format to Qwen's vocab
self._set_vocab_qwen()
def set_gguf_parameters(self):
self.gguf_writer.add_rope_freq_base(attn_config["rope_theta"])
self.gguf_writer.add_clamp_kqv(attn_config["clip_qkv"])
- self.gguf_writer.add_file_type(self.ftype)
self.gguf_writer.add_expert_count(ffn_config["moe_num_experts"])
self.gguf_writer.add_expert_used_count(ffn_config["moe_top_k"])
tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
scores: list[float] = [-10000.0] * vocab_size
- toktypes: list[int] = [SentencePieceTokenTypes.UNKNOWN] * vocab_size
+ toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
for token_id in range(tokenizer.vocab_size()):
for token_id, foken_data in added_tokens_decoder.items():
token_id = int(token_id)
token = foken_data["content"].encode("utf-8")
- if toktypes[token_id] != SentencePieceTokenTypes.UNKNOWN:
+ if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
assert tokens[token_id] == token
tokens[token_id] = token
scores[token_id] = -1000.0
for foken_data in added_tokens:
token_id = int(foken_data["id"])
token = foken_data["content"].encode("utf-8")
- if toktypes[token_id] != SentencePieceTokenTypes.UNKNOWN:
+ if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
assert tokens[token_id] == token
tokens[token_id] = token
scores[token_id] = -1000.0
toktype = SentencePieceTokenTypes.BYTE
# take care of ununsed raw token
if piece.startswith('[UNUSED'):
- toktype = SentencePieceTokenTypes.UNKNOWN
+ toktype = SentencePieceTokenTypes.UNUSED
tokens.append(text)
scores.append(score)
if token == chat_eos_token:
chat_eos_token_id = token_id
token = token.encode("utf-8")
- if toktypes[token_id] != SentencePieceTokenTypes.UNKNOWN:
+ if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
assert(tokens[token_id] == token)
tokens[token_id] = token
scores[token_id] = -1000.0
if token == chat_eos_token:
chat_eos_token_id = token_id
token = token.encode("utf-8")
- if toktypes[token_id] != SentencePieceTokenTypes.UNKNOWN:
+ if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
assert(tokens[token_id] == token)
tokens[token_id] = token
scores[token_id] = -1000.0
model_arch = gguf.MODEL_ARCH.GEMMA2
def set_vocab(self):
- tokens, scores, toktypes = self._create_vocab_sentencepiece()
- # hack: This is required so that we can properly use start/end-of-turn for chat template
- for i in range(108):
- # including <unusedX>, <start_of_turn>, <end_of_turn>
- toktypes[i] = SentencePieceTokenTypes.CONTROL
- self.gguf_writer.add_tokenizer_model("llama")
- self.gguf_writer.add_tokenizer_pre("default")
- self.gguf_writer.add_token_list(tokens)
- self.gguf_writer.add_token_scores(scores)
- self.gguf_writer.add_token_types(toktypes)
-
- special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
- special_vocab.add_to_gguf(self.gguf_writer)
+ self._set_vocab_sentencepiece()
self.gguf_writer.add_add_space_prefix(False)
tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
scores: list[float] = [-10000.0] * vocab_size
- toktypes: list[int] = [SentencePieceTokenTypes.UNKNOWN] * vocab_size
+ toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
for token_id in range(tokenizer.vocab_size()):
tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
scores: list[float] = [-10000.0] * vocab_size
- toktypes: list[int] = [SentencePieceTokenTypes.UNKNOWN] * vocab_size
+ toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
for token_id in range(tokenizer.vocab_size()):
piece = tokenizer.IdToPiece(token_id)
if len(piece) != 0 and token_id < tokenizer.tokenizer.sp_model.vocab_size():
score = tokenizer.tokenizer.sp_model.get_score(token_id)
- if len(piece) == 0:
- text = f"[PAD{token_id}]".encode("utf-8")
-
if token_id >= tokenizer.tokenizer.sp_model.vocab_size():
if piece in special_tokens:
- # show special tokens in prompt
- toktype = SentencePieceTokenTypes.USER_DEFINED
+ toktype = SentencePieceTokenTypes.CONTROL
+ elif len(piece) == 0:
+ text = f"[PAD{token_id}]".encode("utf-8")
+ toktype = SentencePieceTokenTypes.UNUSED
else:
- toktype = SentencePieceTokenTypes.UNKNOWN
+ toktype = SentencePieceTokenTypes.USER_DEFINED
tokens.append(text)
scores.append(score)
toktypes.append(toktype)
for i in range(vocab_size):
if i not in reverse_vocab:
tokens.append(f"[PAD{i}]")
- toktypes.append(gguf.TokenType.USER_DEFINED)
+ toktypes.append(gguf.TokenType.UNUSED)
elif reverse_vocab[i] in added_vocab:
tokens.append(reverse_vocab[i])
if tokenizer.added_tokens_decoder[i].special:
} else if (
tokenizer_pre == "command-r") {
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_COMMAND_R;
+ vocab.tokenizer_clean_spaces = false;
} else if (
tokenizer_pre == "qwen2") {
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_QWEN2;
// build special tokens cache
{
for (llama_vocab::id id = 0; id < (llama_vocab::id)n_vocab; ++id) {
- if (!(vocab.id_to_token[id].attr & LLAMA_TOKEN_ATTR_NORMAL)) {
+ if (vocab.id_to_token[id].attr & (LLAMA_TOKEN_ATTR_CONTROL | LLAMA_TOKEN_ATTR_USER_DEFINED | LLAMA_TOKEN_ATTR_UNKNOWN)) {
vocab.cache_special_tokens.push_back(id);
}
}
"[0-9][0-9][0-9]",
};
break;
- case LLAMA_VOCAB_PRE_TYPE_MPT:
- // TODO: MPT pre-tokenization regexes are unknown
- // the following are close, but not exact. run the following:
- // ./bin/test-tokenizer-0 ../models/ggml-vocab-mpt.gguf
- GGML_ASSERT("MPT pre-tokenization regexes are unknown - fixes needed");
- regex_exprs = {
- "\\s?\\p{L}+",
- "\\s?\\p{P}+",
- "'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
- };
- break;
case LLAMA_VOCAB_PRE_TYPE_STARCODER:
case LLAMA_VOCAB_PRE_TYPE_REFACT:
case LLAMA_VOCAB_PRE_TYPE_COMMAND_R:
};
break;
case LLAMA_VOCAB_PRE_TYPE_GPT2:
+ case LLAMA_VOCAB_PRE_TYPE_MPT:
case LLAMA_VOCAB_PRE_TYPE_OLMO:
case LLAMA_VOCAB_PRE_TYPE_JAIS:
regex_exprs = {
break;
case LLAMA_VOCAB_PRE_TYPE_VIKING:
regex_exprs = {
- "\\p{N}",
" ?[^(\\s|.,!?…。,、।۔،)]+",
+ "\\p{N}",
};
break;
default:
// #define PRETOKENIZERDEBUG
-static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list<fragment_buffer_variant> & buffer) {
+static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list<fragment_buffer_variant> & buffer, bool parse_special) {
// for each special token
for (const llama_vocab::id special_id : vocab.cache_special_tokens) {
const auto & data = vocab.id_to_token[special_id];
const auto & special_token = data.text;
+ if (!parse_special && (data.attr & (LLAMA_TOKEN_ATTR_CONTROL | LLAMA_TOKEN_ATTR_UNKNOWN))) {
+ // Ignore control and unknown tokens when parse_special == false
+ continue;
+ // User-defined tokens are still pre-tokenized before everything else
+ // ref: https://github.com/huggingface/tokenizers/blob/fdd26ba9a3f0c133427aab0423888cbde91362d7/tokenizers/src/tokenizer/mod.rs#L726
+ // This is mostly relevant for neox-style tokenizers (mpt, olmo, stablelm, etc.)
+ }
+
// for each text fragment
std::forward_list<fragment_buffer_variant>::iterator it = buffer.begin();
while (it != buffer.end()) {
if (!raw_text.empty()) {
fragment_buffer.emplace_front(raw_text, 0, raw_text.length());
- if (parse_special) tokenizer_st_partition(vocab, fragment_buffer);
+ tokenizer_st_partition(vocab, fragment_buffer, parse_special);
}
switch (vocab.type) {