]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
server : avoid aniprompt in probabilities of final response (#2849)
authorJhen-Jie Hong <redacted>
Sat, 2 Sep 2023 00:31:46 +0000 (08:31 +0800)
committerGitHub <redacted>
Sat, 2 Sep 2023 00:31:46 +0000 (08:31 +0800)
examples/server/server.cpp

index 09eac2ec266be258055fd0bee53b84d413471e96..94def943b9a0a3ac323fb0bb445f5cad543c5b35 100644 (file)
@@ -1379,7 +1379,13 @@ int main(int argc, char **argv)
                 }
             }
 
-            const json data = format_final_response(llama, llama.generated_text, llama.generated_token_probs);
+            auto probs = llama.generated_token_probs;
+            if (llama.params.n_probs > 0 && llama.stopped_word) {
+                const std::vector<llama_token> stop_word_toks = llama_tokenize(llama.ctx, llama.stopping_word, false);
+                probs = std::vector<completion_token_output>(llama.generated_token_probs.begin(), llama.generated_token_probs.end() - stop_word_toks.size());
+            }
+
+            const json data = format_final_response(llama, llama.generated_text, probs);
 
             llama_print_timings(llama.ctx);
 
@@ -1456,7 +1462,11 @@ int main(int argc, char **argv)
 
                     if (!llama.has_next_token) {
                         // Generation is done, send extra information.
-                        const json data = format_final_response(llama, "", llama.generated_token_probs);
+                        const json data = format_final_response(
+                            llama,
+                            "",
+                            std::vector<completion_token_output>(llama.generated_token_probs.begin(), llama.generated_token_probs.begin() + sent_token_probs_index)
+                        );
 
                         const std::string str =
                             "data: " +