]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
vocab : mark EOT token for Granite models (#16499)
authorGeorgi Gerganov <redacted>
Fri, 10 Oct 2025 14:17:31 +0000 (17:17 +0300)
committerGitHub <redacted>
Fri, 10 Oct 2025 14:17:31 +0000 (17:17 +0300)
* vocab : mark EOT token for Granite models

* sampling : fallback to EOS when EOT is not found

src/llama-sampling.cpp
src/llama-vocab.cpp

index 2186f827bf54307731d1fbb57e9b38b380415f94..55d2e355fd8bb50f4257ad652add533856390298 100644 (file)
@@ -2541,8 +2541,13 @@ static void llama_sampler_infill_apply(struct llama_sampler * smpl, llama_token_
     if (n_non_eog == 0) {
         cur_p->size = 1;
         cur_p->data[0].id = ctx->vocab->token_eot();
+        if (cur_p->data[0].id == LLAMA_TOKEN_NULL) {
+            cur_p->data[0].id = ctx->vocab->token_eos();
+        }
         cur_p->data[0].logit = 1.0f;
 
+        GGML_ASSERT(cur_p->data[0].id != LLAMA_TOKEN_NULL);
+
         return;
     }
 
index f965752a84970c35642e6e254693ad86cc80148d..7fffd171491aa31f8f063c8a8daba946f72ac4f8 100644 (file)
@@ -2171,6 +2171,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
                         || t.first == "<|end|>"
                         || t.first == "<end_of_turn>"
                         || t.first == "<|endoftext|>"
+                        || t.first == "<|end_of_text|>" // granite
                         || t.first == "<EOT>"
                         || t.first == "_<EOT>"
                         || t.first == "<|end▁of▁sentence|>" // DeepSeek