]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
convert : set "add bos" == True for Gemma 4 (#21500)
authorGeorgi Gerganov <redacted>
Mon, 6 Apr 2026 10:52:07 +0000 (13:52 +0300)
committerGitHub <redacted>
Mon, 6 Apr 2026 10:52:07 +0000 (13:52 +0300)
* convert : set "add bos" == True for Gemma 4

* cont : handle old GGUFs

convert_hf_to_gguf.py
src/llama-vocab.cpp

index 7ba6f6a7425f56b77e1aceded16a47a576abb358..c1737bb2c31fbd00504a38a4a52e44844a154f05 100755 (executable)
@@ -7472,7 +7472,7 @@ class Gemma4Model(Gemma3Model):
         special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
         special_vocab.add_to_gguf(self.gguf_writer)
         self.gguf_writer.add_add_space_prefix(False)
-        self.gguf_writer.add_add_bos_token(False) # already added via the chat template
+        self.gguf_writer.add_add_bos_token(True)
 
     def set_gguf_parameters(self):
         super().set_gguf_parameters()
index cb55b46b7211acd663e4c2066d291a6bd4ee76dd..75dbaa91ee43fa36654ad5b1767ab7d63c590bc6 100644 (file)
@@ -2325,6 +2325,14 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
             if (ml.get_key(LLM_KV_TOKENIZER_ADD_SEP, temp, false)) {
                 add_sep = temp;
             }
+
+            // workaround for Gemma 4
+            // ref: https://github.com/ggml-org/llama.cpp/pull/21500
+            if (pre_type == LLAMA_VOCAB_PRE_TYPE_GEMMA4 && !add_bos) {
+                add_bos = true;
+
+                LLAMA_LOG_WARN("%s: override '%s' to 'true' for Gemma4\n", __func__, kv(LLM_KV_TOKENIZER_ADD_BOS).c_str());
+            }
         }
 
         // auto-detect special tokens by text