]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
cli : provide model with text filename (#19783)
authorSigbjørn Skjæret <redacted>
Sun, 22 Feb 2026 21:33:49 +0000 (22:33 +0100)
committerGitHub <redacted>
Sun, 22 Feb 2026 21:33:49 +0000 (22:33 +0100)
tools/cli/cli.cpp
tools/server/server-context.cpp
tools/server/server-context.h

index ad421e63261f53bae95878e0da802308c9d55caa..e57bf52e36c1f95933b9bb2ca838b448bf6745c1 100644 (file)
@@ -380,6 +380,15 @@ int main(int argc, char ** argv) {
                 console::error("file does not exist or cannot be opened: '%s'\n", fname.c_str());
                 continue;
             }
+            if (inf.fim_sep_token != LLAMA_TOKEN_NULL) {
+                cur_msg += common_token_to_piece(ctx_cli.ctx_server.get_llama_context(), inf.fim_sep_token, true);
+                cur_msg += fname;
+                cur_msg.push_back('\n');
+            } else {
+                cur_msg += "--- File: ";
+                cur_msg += fname;
+                cur_msg += " ---\n";
+            }
             cur_msg += marker;
             console::log("Loaded text from '%s'\n", fname.c_str());
             continue;
index 8aab0d4c1b12fec7dd7128d053a1c8d2927151e4..0f2f3a45aaae6460cc2d21436b7e907b0e90e46d 100644 (file)
@@ -2911,6 +2911,9 @@ server_context_meta server_context::get_meta() const {
         /* fim_pre_token          */ llama_vocab_fim_pre(impl->vocab),
         /* fim_sub_token          */ llama_vocab_fim_suf(impl->vocab),
         /* fim_mid_token          */ llama_vocab_fim_mid(impl->vocab),
+        /* fim_pad_token          */ llama_vocab_fim_pad(impl->vocab),
+        /* fim_rep_token          */ llama_vocab_fim_rep(impl->vocab),
+        /* fim_sep_token          */ llama_vocab_fim_sep(impl->vocab),
 
         /* model_vocab_type       */ llama_vocab_type(impl->vocab),
         /* model_vocab_n_tokens   */ llama_vocab_n_tokens(impl->vocab),
index c0b5d373ff9255f2e7a387c88d78d900a53f1177..03c29f513bf847acb4273b06c42e0cecabf38eed 100644 (file)
@@ -30,6 +30,9 @@ struct server_context_meta {
     llama_token fim_pre_token;
     llama_token fim_sub_token;
     llama_token fim_mid_token;
+    llama_token fim_pad_token;
+    llama_token fim_rep_token;
+    llama_token fim_sep_token;
 
     // model meta
     enum llama_vocab_type model_vocab_type;