From: Daniel Bevenius Date: Mon, 31 Mar 2025 16:40:56 +0000 (+0200) Subject: vocab : add special infill tokens for CodeLlama (#11850) X-Git-Tag: upstream/0.0.5028~15 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=c80a7759dab10657b9b6c3e87eef988a133b9b6a;p=pkg%2Fggml%2Fsources%2Fllama.cpp vocab : add special infill tokens for CodeLlama (#11850) * vocab : add special infill tokens for CodeLlama The commit adds the following special tokens for CodeLlama infill: - `▁
`
- `▁`
- `▁`

The motivation for this is that currently the infill example uses
CodeLlama as a suggested model. But when using this model the following
error is generated:
```console
/llama.cpp-debug/examples/infill/infill.cpp:165: GGML_ASSERT(llama_vocab_fim_pre(vocab) >= 0) failed

Could not attach to process.  If your uid matches the uid of the target
process, check the setting of /proc/sys/kernel/yama/ptrace_scope, or try
again as the root user.  For more details, see /etc/sysctl.d/10-ptrace.conf
ptrace: Operation not permitted.
No stack.
The program is not being run.
305251 Aborted                 (core dumped)
./build/bin/llama-infill -t 10 -ngl 0 -m models/codellama-13b.Q5_K_S.gguf \
  -c 4096 --temp 0.7 --repeat_penalty 1.1 -n 20 \
  --in-prefix "def helloworld():\n    print(\"hell" \
  --in-suffix "\n   print(\"goodbye world\")\n    "
```

* squash! vocab : add special infill tokens for CodeLlama

Add _ as well.
---

diff --git a/src/llama-vocab.cpp b/src/llama-vocab.cpp
index 78072e17..31e2055f 100644
--- a/src/llama-vocab.cpp
+++ b/src/llama-vocab.cpp
@@ -1807,6 +1807,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
                         || t.first == ""
                         || t.first == "<|endoftext|>"
                         || t.first == ""
+                        || t.first == "_"
                         || t.first == "<|end▁of▁sentence|>" // DeepSeek
                    ) {
                     special_eot_id = t.second;
@@ -1839,6 +1840,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
                         || t.first == ""
                         || t.first == "<|fim▁begin|>" // DeepSeek
                         || t.first == "
"
+                        || t.first == "▁
"          // CodeLlama
                         ) {
                     special_fim_pre_id = t.second;
                     if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
@@ -1856,6 +1858,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
                         || t.first == ""
                         || t.first == "<|fim▁hole|>" // DeepSeek
                         || t.first == ""
+                        || t.first == "▁"         // CodeLlama
                         ) {
                     special_fim_suf_id = t.second;
                     if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
@@ -1873,6 +1876,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
                         || t.first == ""
                         || t.first == "<|fim▁end|>"  // DeepSeek
                         || t.first == ""
+                        || t.first == "▁"         // CodeLlama
                         ) {
                     special_fim_mid_id = t.second;
                     if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
@@ -1957,6 +1961,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
                     || t.first == "<|endoftext|>"
                     || t.first == "<|eom_id|>"
                     || t.first == ""
+                    || t.first == "_"
                ) {
                 special_eog_ids.insert(t.second);
                 if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {