]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
server : simplify logic for empty prompts (#5953)
authorGeorgi Gerganov <redacted>
Sat, 9 Mar 2024 10:34:18 +0000 (12:34 +0200)
committerGitHub <redacted>
Sat, 9 Mar 2024 10:34:18 +0000 (12:34 +0200)
examples/server/server.cpp

index 6e0f8328cdf5a6e2c42f1f532d30b9b07a272528..aedf0afc603c349e145bcdd7acc3a7385922e372 100644 (file)
@@ -1704,19 +1704,6 @@ struct server_context {
         // next, batch any pending prompts without exceeding n_batch
         if (params.cont_batching || batch.n_tokens == 0) {
             for (auto & slot : slots) {
-                const bool has_prompt = slot.prompt.is_array() || (slot.prompt.is_string() && !slot.prompt.get<std::string>().empty());
-
-                // empty prompt passed -> release the slot and send empty response
-                // note: infill mode allows empty prompt
-                if (slot.state == SLOT_STATE_IDLE && slot.command == SLOT_COMMAND_LOAD_PROMPT && !has_prompt && !slot.infill) {
-                    slot.state = SLOT_STATE_PROCESSING;
-                    slot.command = SLOT_COMMAND_NONE;
-                    slot.release();
-                    slot.print_timings();
-                    send_final_response(slot);
-                    continue;
-                }
-
                 // this slot still has a prompt to be processed
                 if (slot.state == SLOT_STATE_IDLE && slot.command == SLOT_COMMAND_LOAD_PROMPT) {
                     auto & prompt_tokens = slot.prompt_tokens;
@@ -1768,6 +1755,21 @@ struct server_context {
                             {"prompt_tokens",   tokens_to_str(ctx, prompt_tokens.cbegin(), prompt_tokens.cend())},
                         });
 
+                        // empty prompt passed -> release the slot and send empty response
+                        if (prompt_tokens.empty()) {
+                            LOG_INFO("empty prompt - releasing slot", {
+                                {"id_slot", slot.id},
+                                {"id_task", slot.id_task}
+                            });
+
+                            slot.state = SLOT_STATE_PROCESSING;
+                            slot.command = SLOT_COMMAND_NONE;
+                            slot.release();
+                            slot.print_timings();
+                            send_final_response(slot);
+                            continue;
+                        }
+
                         if (slot.embedding) {
                             // this prompt is too large to process - discard it
                             if (slot.n_prompt_tokens > n_batch) {