From 4e24cffd8cccd653634e24ee461c252bd77b1426 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 12 Jul 2024 14:48:15 +0300 Subject: [PATCH] server : handle content array in chat API (#8449) * server : handle content array in chat API * Update examples/server/utils.hpp Co-authored-by: Xuan Son Nguyen --------- Co-authored-by: Xuan Son Nguyen --- examples/server/utils.hpp | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/examples/server/utils.hpp b/examples/server/utils.hpp index 7ef2a519a..db6b3b74d 100644 --- a/examples/server/utils.hpp +++ b/examples/server/utils.hpp @@ -122,8 +122,26 @@ inline std::string format_chat(const struct llama_model * model, const std::stri for (size_t i = 0; i < messages.size(); ++i) { const auto & curr_msg = messages[i]; - std::string role = json_value(curr_msg, "role", std::string("")); - std::string content = json_value(curr_msg, "content", std::string("")); + + std::string role = json_value(curr_msg, "role", std::string("")); + + std::string content; + if (curr_msg.contains("content")) { + if (curr_msg["content"].is_string()) { + content = curr_msg["content"].get(); + } else if (curr_msg["content"].is_array()) { + for (const auto & part : curr_msg["content"]) { + if (part.contains("text")) { + content += "\n" + part["text"].get(); + } + } + } else { + throw std::runtime_error("Invalid 'content' type (ref: https://github.com/ggerganov/llama.cpp/issues/8367)"); + } + } else { + throw std::runtime_error("Missing 'content' (ref: https://github.com/ggerganov/llama.cpp/issues/8367)"); + } + chat.push_back({role, content}); }