]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
Only use FIM middle token if it exists (#7648)
authorSigbjørn Skjæret <redacted>
Tue, 18 Jun 2024 12:19:45 +0000 (14:19 +0200)
committerGitHub <redacted>
Tue, 18 Jun 2024 12:19:45 +0000 (22:19 +1000)
* Only use FIM middle if it exists

* Only use FIM middle if it exists

examples/infill/infill.cpp
examples/server/server.cpp

index 0e4ec79c693fa84cfad2dd795747f8d966d1db18..3e82e4a81a20bca745b004264eaace04a2283af3 100644 (file)
@@ -223,7 +223,11 @@ int main(int argc, char ** argv) {
     inp_sfx.insert(inp_sfx.begin(), llama_token_suffix(model));
     embd_inp = inp_pfx;
     embd_inp.insert(embd_inp.end(), inp_sfx.begin(), inp_sfx.end());
-    embd_inp.push_back(llama_token_middle(model));
+
+    const llama_token middle_token = llama_token_middle(model);
+    if (middle_token >= 0) {
+        embd_inp.push_back(middle_token);
+    }
 
     LOG("prefix: \"%s\"\n", log_tostr(params.input_prefix));
     LOG("suffix: \"%s\"\n", log_tostr(params.input_suffix));
@@ -528,7 +532,12 @@ int main(int argc, char ** argv) {
                 inp_sfx.insert(inp_sfx.begin(), llama_token_suffix(model));
                 embd_inp = inp_pfx;
                 embd_inp.insert(embd_inp.end(), inp_sfx.begin(), inp_sfx.end());
-                embd_inp.push_back(llama_token_middle(model));
+
+                const llama_token middle_token = llama_token_middle(model);
+                if (middle_token >= 0) {
+                    embd_inp.push_back(middle_token);
+                }
+
                 embd.clear();
                 n_remain = params.n_predict;
                 n_past = 0;
index 919078f2bd920553c75944e81954167dbf0dc083..ec59307b2881b35f31803125a49ec0112b0d3336 100644 (file)
@@ -2038,7 +2038,12 @@ struct server_context {
                             prefix_tokens.insert(prefix_tokens.begin(), llama_token_bos(model)); // always add BOS
                             prefix_tokens.insert(prefix_tokens.end(),   llama_token_suffix(model));
                             prefix_tokens.insert(prefix_tokens.end(),   suffix_tokens.begin(), suffix_tokens.end());
-                            prefix_tokens.push_back(llama_token_middle(model));
+
+                            const llama_token middle_token = llama_token_middle(model);
+                            if (middle_token >= 0) {
+                                prefix_tokens.push_back(middle_token);
+                            }
+
                             prompt_tokens = prefix_tokens;
                         } else {
                             prompt_tokens = tokenize(slot.prompt, system_prompt.empty()); // add BOS if there isn't system prompt