]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
server : handle content array in chat API (#8449)
authorGeorgi Gerganov <redacted>
Fri, 12 Jul 2024 11:48:15 +0000 (14:48 +0300)
committerGitHub <redacted>
Fri, 12 Jul 2024 11:48:15 +0000 (14:48 +0300)
* server : handle content array in chat API

* Update examples/server/utils.hpp

Co-authored-by: Xuan Son Nguyen <redacted>
---------

Co-authored-by: Xuan Son Nguyen <redacted>
examples/server/utils.hpp

index 7ef2a519a10c76f22fe47219aecee366aafc6d82..db6b3b74d1dd2fe3395dc5faeb3b4a630c3a5b10 100644 (file)
@@ -122,8 +122,26 @@ inline std::string format_chat(const struct llama_model * model, const std::stri
 
     for (size_t i = 0; i < messages.size(); ++i) {
         const auto & curr_msg = messages[i];
-        std::string role    = json_value(curr_msg, "role",    std::string(""));
-        std::string content = json_value(curr_msg, "content", std::string(""));
+
+        std::string role = json_value(curr_msg, "role", std::string(""));
+
+        std::string content;
+        if (curr_msg.contains("content")) {
+            if (curr_msg["content"].is_string()) {
+                content = curr_msg["content"].get<std::string>();
+            } else if (curr_msg["content"].is_array()) {
+                for (const auto & part : curr_msg["content"]) {
+                    if (part.contains("text")) {
+                        content += "\n" + part["text"].get<std::string>();
+                    }
+                }
+            } else {
+                throw std::runtime_error("Invalid 'content' type (ref: https://github.com/ggerganov/llama.cpp/issues/8367)");
+            }
+        } else {
+            throw std::runtime_error("Missing 'content' (ref: https://github.com/ggerganov/llama.cpp/issues/8367)");
+        }
+
         chat.push_back({role, content});
     }