]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
server : send token probs for "stream == false" (#4714)
authorGeorgi Gerganov <redacted>
Thu, 4 Jan 2024 17:56:33 +0000 (19:56 +0200)
committerGitHub <redacted>
Thu, 4 Jan 2024 17:56:33 +0000 (19:56 +0200)
examples/server/server.cpp

index e45ea809a098a79abbbf56d228e9560707d984d1..d1469fb0833edc4235596d98b348cdb9fd4418d4 100644 (file)
@@ -1265,7 +1265,7 @@ struct llama_server_context
         {
             std::vector<completion_token_output> probs_output = {};
             const std::vector<llama_token> to_send_toks = llama_tokenize(ctx, tkn.text_to_send, false);
-            size_t probs_pos = std::min(slot.sent_token_probs_index, slot.generated_token_probs.size());
+            size_t probs_pos      = std::min(slot.sent_token_probs_index,                       slot.generated_token_probs.size());
             size_t probs_stop_pos = std::min(slot.sent_token_probs_index + to_send_toks.size(), slot.generated_token_probs.size());
             if (probs_pos < probs_stop_pos)
             {
@@ -1325,7 +1325,7 @@ struct llama_server_context
             {
                 probs = std::vector<completion_token_output>(
                                     slot.generated_token_probs.begin(),
-                                    slot.generated_token_probs.begin() + slot.sent_token_probs_index);
+                                    slot.generated_token_probs.end());
             }
             res.result_json["completion_probabilities"] = probs_vector_to_json(ctx, probs);
         }