]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
server : fix incorrect num_tokens_predicted (#3480)
authorJhen-Jie Hong <redacted>
Thu, 5 Oct 2023 14:02:55 +0000 (09:02 -0500)
committerGitHub <redacted>
Thu, 5 Oct 2023 14:02:55 +0000 (17:02 +0300)
examples/server/server.cpp

index 921eb5da4812d0d2b45d61d7f9afef2bfc04fd87..6e31e1332e1925d17a5c89c3c682e8cfc5cd133a 100644 (file)
@@ -504,9 +504,11 @@ struct llama_server_context
                                            });
         }
 
+        bool tg = true;
         while (n_past < embd.size())
         {
             int n_eval = (int)embd.size() - n_past;
+            tg = n_eval == 1;
             if (n_eval > params.n_batch)
             {
                 n_eval = params.n_batch;
@@ -633,7 +635,9 @@ struct llama_server_context
 
             last_n_tokens.erase(last_n_tokens.begin());
             last_n_tokens.push_back(result.tok);
-            num_tokens_predicted++;
+            if (tg) {
+                num_tokens_predicted++;
+            }
         }
 
         // add it to the context
@@ -1124,8 +1128,6 @@ static json format_timings(llama_server_context &llama)
 {
     const auto timings = llama_get_timings(llama.ctx);
 
-    assert(timings.n_eval == ptrdiff_t(llama.num_tokens_predicted));
-
     return json{
         {"prompt_n", timings.n_p_eval},
         {"prompt_ms", timings.t_p_eval_ms},