From: Georgi Gerganov Date: Fri, 10 Oct 2025 14:17:31 +0000 (+0300) Subject: vocab : mark EOT token for Granite models (#16499) X-Git-Tag: upstream/0.0.6764~35 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=81086cd6a3ca1252f0dc0f938171648399179c53;p=pkg%2Fggml%2Fsources%2Fllama.cpp vocab : mark EOT token for Granite models (#16499) * vocab : mark EOT token for Granite models * sampling : fallback to EOS when EOT is not found --- diff --git a/src/llama-sampling.cpp b/src/llama-sampling.cpp index 2186f827..55d2e355 100644 --- a/src/llama-sampling.cpp +++ b/src/llama-sampling.cpp @@ -2541,8 +2541,13 @@ static void llama_sampler_infill_apply(struct llama_sampler * smpl, llama_token_ if (n_non_eog == 0) { cur_p->size = 1; cur_p->data[0].id = ctx->vocab->token_eot(); + if (cur_p->data[0].id == LLAMA_TOKEN_NULL) { + cur_p->data[0].id = ctx->vocab->token_eos(); + } cur_p->data[0].logit = 1.0f; + GGML_ASSERT(cur_p->data[0].id != LLAMA_TOKEN_NULL); + return; } diff --git a/src/llama-vocab.cpp b/src/llama-vocab.cpp index f965752a..7fffd171 100644 --- a/src/llama-vocab.cpp +++ b/src/llama-vocab.cpp @@ -2171,6 +2171,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) { || t.first == "<|end|>" || t.first == "" || t.first == "<|endoftext|>" + || t.first == "<|end_of_text|>" // granite || t.first == "" || t.first == "_" || t.first == "<|end▁of▁sentence|>" // DeepSeek