mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-25 13:58:46 +01:00
5b7b0ac8df
* json: fix arrays (disallow `[,1]`) * json: support tuple types (`[number, string]`) * json: support additionalProperties (`{[k: string]: [string,number][]}`) * json: support required / optional properties * json: add support for pattern * json: resolve $ref (and support https schema urls) * json: fix $ref resolution * join: support union types (mostly for nullable types I think) * json: support allOf + nested anyOf * json: support any (`{}` or `{type: object}`) * json: fix merge * json: temp fix for escapes * json: spaces in output and unrestricted output spaces * json: add typings * json:fix typo * Create ts-type-to-grammar.sh * json: fix _format_literal (json.dumps already escapes quotes) * json: merge lit sequences and handle negatives {"type": "string", "pattern": "^({\"question\": \"[^\"]+\", \"response\": \"[^\"]+\"}\\n)+$"} * json: handle pattern repetitions * Update json-schema-to-grammar.mjs * Create regex-to-grammar.py * json: extract repeated regexp patterns to subrule * Update json-schema-to-grammar.py * Update json-schema-to-grammar.py * Update json-schema-to-grammar.py * json: handle schema from pydantic Optional fields * Update json-schema-to-grammar.py * Update json-schema-to-grammar.py * Update ts-type-to-grammar.sh * Update ts-type-to-grammar.sh * json: simplify nullable fields handling * json: accept duplicate identical rules * json: revert space to 1 at most * json: reuse regexp pattern subrules * json: handle uuid string format * json: fix literal escapes * json: add --allow-fetch * json: simplify range escapes * json: support negative ranges in patterns * Delete commit.txt * json: custom regex parser, adds dot support & JS-portable * json: rm trailing spaces * Update json-schema-to-grammar.mjs * json: updated server & chat `( cd examples/server && ./deps.sh )` * json: port fixes from mjs to python * Update ts-type-to-grammar.sh * json: support prefixItems alongside array items * json: add date format + fix uuid * json: add date, time, date-time formats * json: preserve order of props from TS defs * json: port schema converter to C++, wire in ./server * json: nits * Update json-schema-to-grammar.cpp * Update json-schema-to-grammar.cpp * Update json-schema-to-grammar.cpp * json: fix mjs implementation + align outputs * Update json-schema-to-grammar.mjs.hpp * json: test C++, JS & Python versions * json: nits + regen deps * json: cleanup test * json: revert from c++17 to 11 * json: nit fixes * json: dirty include for test * json: fix zig build * json: pass static command to std::system in tests (fixed temp files) * json: fix top-level $refs * json: don't use c++20 designated initializers * nit * json: basic support for reserved names `{number:{number:{root:number}}}` * Revamp test cmake to allow args (WORKING_DIRECTORY needed for JSON test) * json: re-ran server deps.sh * json: simplify test * json: support mix of additional props & required/optional * json: add tests for some expected failures * json: fix type=const in c++, add failure expectations for non-str const&enum * json: test (& simplify output of) empty schema * json: check parsing in test + fix value & string refs * json: add server tests for OAI JSON response_format * json: test/fix top-level anyOf * json: improve grammar parsing failures * json: test/fix additional props corner cases * json: fix string patterns (was missing quotes) * json: ws nit * json: fix json handling in server when there's no response_format * json: catch schema conversion errors in server * json: don't complain about unknown format type in server if unset * json: cleaner build of test * json: create examples/json-schema-pydantic-example.py * json: fix date pattern * json: move json.hpp & json-schema-to-grammar.{cpp,h} to common * json: indent 4 spaces * json: fix naming of top-level c++ function (+ drop unused one) * json: avoid using namespace std * json: fix zig build * Update server.feature * json: iostream -> fprintf * json: space before & refs for consistency * json: nits
618 lines
21 KiB
C++
618 lines
21 KiB
C++
#pragma once
|
|
|
|
#include "llama.h"
|
|
#include "common.h"
|
|
|
|
#include "json.hpp"
|
|
|
|
#include <string>
|
|
#include <vector>
|
|
#include <sstream>
|
|
#include <random>
|
|
|
|
#define DEFAULT_OAICOMPAT_MODEL "gpt-3.5-turbo-0613"
|
|
|
|
using json = nlohmann::json;
|
|
|
|
// https://community.openai.com/t/openai-chat-list-of-error-codes-and-types/357791/11
|
|
enum error_type {
|
|
ERROR_TYPE_INVALID_REQUEST,
|
|
ERROR_TYPE_AUTHENTICATION,
|
|
ERROR_TYPE_SERVER,
|
|
ERROR_TYPE_NOT_FOUND,
|
|
ERROR_TYPE_PERMISSION,
|
|
ERROR_TYPE_UNAVAILABLE, // custom error
|
|
ERROR_TYPE_NOT_SUPPORTED, // custom error
|
|
};
|
|
|
|
extern bool server_verbose;
|
|
extern bool server_log_json;
|
|
|
|
#ifndef SERVER_VERBOSE
|
|
#define SERVER_VERBOSE 1
|
|
#endif
|
|
|
|
#if SERVER_VERBOSE != 1
|
|
#define LOG_VERBOSE(MSG, ...)
|
|
#else
|
|
#define LOG_VERBOSE(MSG, ...) \
|
|
do \
|
|
{ \
|
|
if (server_verbose) \
|
|
{ \
|
|
server_log("VERB", __func__, __LINE__, MSG, __VA_ARGS__); \
|
|
} \
|
|
} while (0)
|
|
#endif
|
|
|
|
#define LOG_ERROR( MSG, ...) server_log("ERR", __func__, __LINE__, MSG, __VA_ARGS__)
|
|
#define LOG_WARNING(MSG, ...) server_log("WARN", __func__, __LINE__, MSG, __VA_ARGS__)
|
|
#define LOG_INFO( MSG, ...) server_log("INFO", __func__, __LINE__, MSG, __VA_ARGS__)
|
|
|
|
template <typename T>
|
|
static T json_value(const json &body, const std::string &key, const T &default_value) {
|
|
// Fallback null to default value
|
|
return body.contains(key) && !body.at(key).is_null()
|
|
? body.value(key, default_value)
|
|
: default_value;
|
|
}
|
|
|
|
static inline void server_log(const char *level, const char *function, int line, const char *message, const nlohmann::ordered_json &extra) {
|
|
std::stringstream ss_tid;
|
|
ss_tid << std::this_thread::get_id();
|
|
json log = nlohmann::ordered_json{
|
|
{"tid", ss_tid.str()},
|
|
{"timestamp", time(nullptr)},
|
|
};
|
|
|
|
if (server_log_json) {
|
|
log.merge_patch( {
|
|
{"level", level},
|
|
{"function", function},
|
|
{"line", line},
|
|
{"msg", message},
|
|
});
|
|
|
|
if (!extra.empty()) {
|
|
log.merge_patch(extra);
|
|
}
|
|
|
|
printf("%s\n", log.dump(-1, ' ', false, json::error_handler_t::replace).c_str());
|
|
} else {
|
|
char buf[1024];
|
|
snprintf(buf, 1024, "%4s [%24s] %s", level, function, message);
|
|
|
|
if (!extra.empty()) {
|
|
log.merge_patch(extra);
|
|
}
|
|
std::stringstream ss;
|
|
ss << buf << " |";
|
|
for (const auto& el : log.items())
|
|
{
|
|
const std::string value = el.value().dump(-1, ' ', false, json::error_handler_t::replace);
|
|
ss << " " << el.key() << "=" << value;
|
|
}
|
|
|
|
const std::string str = ss.str();
|
|
printf("%.*s\n", (int)str.size(), str.data());
|
|
fflush(stdout);
|
|
}
|
|
}
|
|
|
|
//
|
|
// chat template utils
|
|
//
|
|
|
|
// Check if the template supplied via "--chat-template" is supported or not. Returns true if it's valid
|
|
inline bool verify_custom_template(const std::string & tmpl) {
|
|
llama_chat_message chat[] = {{"user", "test"}};
|
|
int res = llama_chat_apply_template(nullptr, tmpl.c_str(), chat, 1, true, nullptr, 0);
|
|
return res >= 0;
|
|
}
|
|
|
|
// Format given chat. If tmpl is empty, we take the template from model metadata
|
|
inline std::string format_chat(const struct llama_model * model, const std::string & tmpl, const std::vector<json> & messages) {
|
|
size_t alloc_size = 0;
|
|
// vector holding all allocated string to be passed to llama_chat_apply_template
|
|
std::vector<std::string> str(messages.size() * 2);
|
|
std::vector<llama_chat_message> chat(messages.size());
|
|
|
|
for (size_t i = 0; i < messages.size(); ++i) {
|
|
const auto & curr_msg = messages[i];
|
|
str[i*2 + 0] = json_value(curr_msg, "role", std::string(""));
|
|
str[i*2 + 1] = json_value(curr_msg, "content", std::string(""));
|
|
alloc_size += str[i*2 + 1].length();
|
|
chat[i].role = str[i*2 + 0].c_str();
|
|
chat[i].content = str[i*2 + 1].c_str();
|
|
}
|
|
|
|
const char * ptr_tmpl = tmpl.empty() ? nullptr : tmpl.c_str();
|
|
std::vector<char> buf(alloc_size * 2);
|
|
|
|
// run the first time to get the total output length
|
|
int32_t res = llama_chat_apply_template(model, ptr_tmpl, chat.data(), chat.size(), true, buf.data(), buf.size());
|
|
|
|
// if it turns out that our buffer is too small, we resize it
|
|
if ((size_t) res > buf.size()) {
|
|
buf.resize(res);
|
|
res = llama_chat_apply_template(model, ptr_tmpl, chat.data(), chat.size(), true, buf.data(), buf.size());
|
|
}
|
|
|
|
const std::string formatted_chat(buf.data(), res);
|
|
|
|
LOG_VERBOSE("formatted_chat", {{"text", formatted_chat.c_str()}});
|
|
|
|
return formatted_chat;
|
|
}
|
|
|
|
//
|
|
// base64 utils (TODO: move to common in the future)
|
|
//
|
|
|
|
static const std::string base64_chars =
|
|
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
|
"abcdefghijklmnopqrstuvwxyz"
|
|
"0123456789+/";
|
|
|
|
static inline bool is_base64(uint8_t c) {
|
|
return (isalnum(c) || (c == '+') || (c == '/'));
|
|
}
|
|
|
|
static inline std::vector<uint8_t> base64_decode(const std::string & encoded_string) {
|
|
int i = 0;
|
|
int j = 0;
|
|
int in_ = 0;
|
|
|
|
int in_len = encoded_string.size();
|
|
|
|
uint8_t char_array_4[4];
|
|
uint8_t char_array_3[3];
|
|
|
|
std::vector<uint8_t> ret;
|
|
|
|
while (in_len-- && (encoded_string[in_] != '=') && is_base64(encoded_string[in_])) {
|
|
char_array_4[i++] = encoded_string[in_]; in_++;
|
|
if (i == 4) {
|
|
for (i = 0; i < 4; i++) {
|
|
char_array_4[i] = base64_chars.find(char_array_4[i]);
|
|
}
|
|
|
|
char_array_3[0] = ((char_array_4[0] ) << 2) + ((char_array_4[1] & 0x30) >> 4);
|
|
char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2);
|
|
char_array_3[2] = ((char_array_4[2] & 0x3) << 6) + char_array_4[3];
|
|
|
|
for (i = 0; (i < 3); i++) {
|
|
ret.push_back(char_array_3[i]);
|
|
}
|
|
|
|
i = 0;
|
|
}
|
|
}
|
|
|
|
if (i) {
|
|
for (j = i; j < 4; j++) {
|
|
char_array_4[j] = 0;
|
|
}
|
|
|
|
for (j = 0; j < 4; j++) {
|
|
char_array_4[j] = base64_chars.find(char_array_4[j]);
|
|
}
|
|
|
|
char_array_3[0] = ((char_array_4[0] ) << 2) + ((char_array_4[1] & 0x30) >> 4);
|
|
char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2);
|
|
char_array_3[2] = ((char_array_4[2] & 0x3) << 6) + char_array_4[3];
|
|
|
|
for (j = 0; j < i - 1; j++) {
|
|
ret.push_back(char_array_3[j]);
|
|
}
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
//
|
|
// random string / id
|
|
//
|
|
|
|
static std::string random_string() {
|
|
static const std::string str("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz");
|
|
|
|
std::random_device rd;
|
|
std::mt19937 generator(rd());
|
|
|
|
std::string result(32, ' ');
|
|
|
|
for (int i = 0; i < 32; ++i) {
|
|
result[i] = str[generator() % str.size()];
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
static std::string gen_chatcmplid() {
|
|
std::stringstream chatcmplid;
|
|
chatcmplid << "chatcmpl-" << random_string();
|
|
|
|
return chatcmplid.str();
|
|
}
|
|
|
|
//
|
|
// other common utils
|
|
//
|
|
|
|
static size_t common_part(const std::vector<llama_token> & a, const std::vector<llama_token> & b) {
|
|
size_t i;
|
|
for (i = 0; i < a.size() && i < b.size() && a[i] == b[i]; i++) {}
|
|
|
|
return i;
|
|
}
|
|
|
|
static bool ends_with(const std::string & str, const std::string & suffix) {
|
|
return str.size() >= suffix.size() && 0 == str.compare(str.size() - suffix.size(), suffix.size(), suffix);
|
|
}
|
|
|
|
static size_t find_partial_stop_string(const std::string &stop, const std::string &text) {
|
|
if (!text.empty() && !stop.empty()) {
|
|
const char text_last_char = text.back();
|
|
for (int64_t char_index = stop.size() - 1; char_index >= 0; char_index--) {
|
|
if (stop[char_index] == text_last_char) {
|
|
const std::string current_partial = stop.substr(0, char_index + 1);
|
|
if (ends_with(text, current_partial)) {
|
|
return text.size() - char_index - 1;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
return std::string::npos;
|
|
}
|
|
|
|
// TODO: reuse llama_detokenize
|
|
template <class Iter>
|
|
static std::string tokens_to_str(llama_context * ctx, Iter begin, Iter end) {
|
|
std::string ret;
|
|
for (; begin != end; ++begin) {
|
|
ret += llama_token_to_piece(ctx, *begin);
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
// format incomplete utf-8 multibyte character for output
|
|
static std::string tokens_to_output_formatted_string(const llama_context * ctx, const llama_token token) {
|
|
std::string out = token == -1 ? "" : llama_token_to_piece(ctx, token);
|
|
|
|
// if the size is 1 and first bit is 1, meaning it's a partial character
|
|
// (size > 1 meaning it's already a known token)
|
|
if (out.size() == 1 && (out[0] & 0x80) == 0x80) {
|
|
std::stringstream ss;
|
|
ss << std::hex << (out[0] & 0xff);
|
|
std::string res(ss.str());
|
|
out = "byte: \\x" + res;
|
|
}
|
|
|
|
return out;
|
|
}
|
|
|
|
struct completion_token_output {
|
|
llama_token tok;
|
|
std::string text_to_send;
|
|
|
|
struct token_prob {
|
|
llama_token tok;
|
|
float prob;
|
|
};
|
|
|
|
std::vector<token_prob> probs;
|
|
};
|
|
|
|
// convert a vector of completion_token_output to json
|
|
static json probs_vector_to_json(const llama_context * ctx, const std::vector<completion_token_output> & probs) {
|
|
json out = json::array();
|
|
|
|
for (const auto & prob : probs) {
|
|
json probs_for_token = json::array();
|
|
|
|
for (const auto & p : prob.probs) {
|
|
const std::string tok_str = tokens_to_output_formatted_string(ctx, p.tok);
|
|
probs_for_token.push_back(json {
|
|
{"tok_str", tok_str},
|
|
{"prob", p.prob},
|
|
});
|
|
}
|
|
|
|
const std::string tok_str = tokens_to_output_formatted_string(ctx, prob.tok);
|
|
out.push_back(json {
|
|
{"content", tok_str},
|
|
{"probs", probs_for_token},
|
|
});
|
|
}
|
|
|
|
return out;
|
|
}
|
|
|
|
//
|
|
// OAI utils
|
|
//
|
|
|
|
static json oaicompat_completion_params_parse(
|
|
const struct llama_model * model,
|
|
const json & body, /* openai api json semantics */
|
|
const std::string & chat_template) {
|
|
json llama_params;
|
|
|
|
llama_params["__oaicompat"] = true;
|
|
|
|
// Map OpenAI parameters to llama.cpp parameters
|
|
//
|
|
// For parameters that are defined by the OpenAI documentation (e.g.
|
|
// temperature), we explicitly specify OpenAI's intended default; we
|
|
// need to do that because sometimes OpenAI disagrees with llama.cpp
|
|
//
|
|
// https://platform.openai.com/docs/api-reference/chat/create
|
|
llama_sampling_params default_sparams;
|
|
llama_params["model"] = json_value(body, "model", std::string("unknown"));
|
|
llama_params["prompt"] = format_chat(model, chat_template, body["messages"]);
|
|
llama_params["cache_prompt"] = json_value(body, "cache_prompt", false);
|
|
llama_params["temperature"] = json_value(body, "temperature", 0.0);
|
|
llama_params["top_k"] = json_value(body, "top_k", default_sparams.top_k);
|
|
llama_params["top_p"] = json_value(body, "top_p", 1.0);
|
|
llama_params["n_predict"] = json_value(body, "max_tokens", -1);
|
|
llama_params["logit_bias"] = json_value(body, "logit_bias", json::object());
|
|
llama_params["frequency_penalty"] = json_value(body, "frequency_penalty", 0.0);
|
|
llama_params["presence_penalty"] = json_value(body, "presence_penalty", 0.0);
|
|
llama_params["seed"] = json_value(body, "seed", LLAMA_DEFAULT_SEED);
|
|
llama_params["stream"] = json_value(body, "stream", false);
|
|
llama_params["mirostat"] = json_value(body, "mirostat", default_sparams.mirostat);
|
|
llama_params["mirostat_tau"] = json_value(body, "mirostat_tau", default_sparams.mirostat_tau);
|
|
llama_params["mirostat_eta"] = json_value(body, "mirostat_eta", default_sparams.mirostat_eta);
|
|
llama_params["penalize_nl"] = json_value(body, "penalize_nl", default_sparams.penalize_nl);
|
|
llama_params["typical_p"] = json_value(body, "typical_p", default_sparams.typical_p);
|
|
llama_params["repeat_last_n"] = json_value(body, "repeat_last_n", default_sparams.penalty_last_n);
|
|
llama_params["ignore_eos"] = json_value(body, "ignore_eos", false);
|
|
llama_params["tfs_z"] = json_value(body, "tfs_z", default_sparams.tfs_z);
|
|
llama_params["n_keep"] = json_value(body, "n_keep", 0);
|
|
|
|
if (body.contains("grammar")) {
|
|
llama_params["grammar"] = json_value(body, "grammar", json::object());
|
|
}
|
|
|
|
if (body.contains("response_format")) {
|
|
auto response_format = json_value(body, "response_format", json::object());
|
|
if (response_format.contains("type")) {
|
|
if (response_format["type"] == "json_object") {
|
|
llama_params["json_schema"] = json_value(response_format, "schema", json::object());
|
|
} else {
|
|
throw std::runtime_error("response_format type not supported: " + response_format["type"].dump());
|
|
}
|
|
}
|
|
}
|
|
|
|
// Handle 'stop' field
|
|
if (body.contains("stop") && body["stop"].is_string()) {
|
|
llama_params["stop"] = json::array({body["stop"].get<std::string>()});
|
|
} else {
|
|
llama_params["stop"] = json_value(body, "stop", json::array());
|
|
}
|
|
|
|
// Ensure there is ChatML-specific end sequence among stop words
|
|
llama_params["stop"].push_back("<|im_end|>");
|
|
|
|
return llama_params;
|
|
}
|
|
|
|
static json format_final_response_oaicompat(const json & request, json result, const std::string & completion_id, bool streaming = false) {
|
|
bool stopped_word = result.count("stopped_word") != 0;
|
|
bool stopped_eos = json_value(result, "stopped_eos", false);
|
|
int num_tokens_predicted = json_value(result, "tokens_predicted", 0);
|
|
int num_prompt_tokens = json_value(result, "tokens_evaluated", 0);
|
|
std::string content = json_value(result, "content", std::string(""));
|
|
|
|
std::string finish_reason = "length";
|
|
if (stopped_word || stopped_eos) {
|
|
finish_reason = "stop";
|
|
}
|
|
|
|
json choices =
|
|
streaming ? json::array({json{{"finish_reason", finish_reason},
|
|
{"index", 0},
|
|
{"delta", json::object()}}})
|
|
: json::array({json{{"finish_reason", finish_reason},
|
|
{"index", 0},
|
|
{"message", json{{"content", content},
|
|
{"role", "assistant"}}}}});
|
|
|
|
std::time_t t = std::time(0);
|
|
|
|
json res = json {
|
|
{"choices", choices},
|
|
{"created", t},
|
|
{"model",
|
|
json_value(request, "model", std::string(DEFAULT_OAICOMPAT_MODEL))},
|
|
{"object", streaming ? "chat.completion.chunk" : "chat.completion"},
|
|
{"usage", json {
|
|
{"completion_tokens", num_tokens_predicted},
|
|
{"prompt_tokens", num_prompt_tokens},
|
|
{"total_tokens", num_tokens_predicted + num_prompt_tokens}
|
|
}},
|
|
{"id", completion_id}
|
|
};
|
|
|
|
if (server_verbose) {
|
|
res["__verbose"] = result;
|
|
}
|
|
|
|
if (result.contains("completion_probabilities")) {
|
|
res["completion_probabilities"] = json_value(result, "completion_probabilities", json::array());
|
|
}
|
|
|
|
return res;
|
|
}
|
|
|
|
// return value is vector as there is one case where we might need to generate two responses
|
|
static std::vector<json> format_partial_response_oaicompat(json result, const std::string & completion_id) {
|
|
if (!result.contains("model") || !result.contains("oaicompat_token_ctr")) {
|
|
return std::vector<json>({result});
|
|
}
|
|
|
|
bool first = json_value(result, "oaicompat_token_ctr", 0) == 0;
|
|
std::string modelname = json_value(result, "model", std::string(DEFAULT_OAICOMPAT_MODEL));
|
|
|
|
bool stopped_word = json_value(result, "stopped_word", false);
|
|
bool stopped_eos = json_value(result, "stopped_eos", false);
|
|
bool stopped_limit = json_value(result, "stopped_limit", false);
|
|
std::string content = json_value(result, "content", std::string(""));
|
|
|
|
std::string finish_reason;
|
|
if (stopped_word || stopped_eos) {
|
|
finish_reason = "stop";
|
|
}
|
|
if (stopped_limit) {
|
|
finish_reason = "length";
|
|
}
|
|
|
|
std::time_t t = std::time(0);
|
|
|
|
json choices;
|
|
|
|
if (!finish_reason.empty()) {
|
|
choices = json::array({json{{"finish_reason", finish_reason},
|
|
{"index", 0},
|
|
{"delta", json::object()}}});
|
|
} else {
|
|
if (first) {
|
|
if (content.empty()) {
|
|
choices = json::array({json{{"finish_reason", nullptr},
|
|
{"index", 0},
|
|
{"delta", json{{"role", "assistant"}}}}});
|
|
} else {
|
|
// We have to send this as two updates to conform to openai behavior
|
|
json initial_ret = json{{"choices", json::array({json{
|
|
{"finish_reason", nullptr},
|
|
{"index", 0},
|
|
{"delta", json{
|
|
{"role", "assistant"}
|
|
}}}})},
|
|
{"created", t},
|
|
{"id", completion_id},
|
|
{"model", modelname},
|
|
{"object", "chat.completion.chunk"}};
|
|
|
|
json second_ret = json{
|
|
{"choices", json::array({json{{"finish_reason", nullptr},
|
|
{"index", 0},
|
|
{"delta", json{
|
|
{"content", content}}}
|
|
}})},
|
|
{"created", t},
|
|
{"id", completion_id},
|
|
{"model", modelname},
|
|
{"object", "chat.completion.chunk"}};
|
|
|
|
return std::vector<json>({initial_ret, second_ret});
|
|
}
|
|
} else {
|
|
// Some idiosyncrasy in task processing logic makes several trailing calls
|
|
// with empty content, we ignore these at the calee site.
|
|
if (content.empty()) {
|
|
return std::vector<json>({json::object()});
|
|
}
|
|
|
|
choices = json::array({json{
|
|
{"finish_reason", nullptr},
|
|
{"index", 0},
|
|
{"delta",
|
|
json{
|
|
{"content", content},
|
|
}},
|
|
}});
|
|
}
|
|
}
|
|
|
|
json ret = json {
|
|
{"choices", choices},
|
|
{"created", t},
|
|
{"id", completion_id},
|
|
{"model", modelname},
|
|
{"object", "chat.completion.chunk"}
|
|
};
|
|
|
|
return std::vector<json>({ret});
|
|
}
|
|
|
|
static json format_embeddings_response_oaicompat(const json & request, const json & embeddings) {
|
|
json data = json::array();
|
|
int i = 0;
|
|
for (auto & elem : embeddings) {
|
|
data.push_back(json{
|
|
{"embedding", json_value(elem, "embedding", json::array())},
|
|
{"index", i++},
|
|
{"object", "embedding"}
|
|
});
|
|
}
|
|
|
|
json res = json {
|
|
{"model", json_value(request, "model", std::string(DEFAULT_OAICOMPAT_MODEL))},
|
|
{"object", "list"},
|
|
{"usage", json {
|
|
{"prompt_tokens", 0},
|
|
{"total_tokens", 0}
|
|
}},
|
|
{"data", data}
|
|
};
|
|
|
|
return res;
|
|
}
|
|
|
|
static json format_tokenizer_response(const std::vector<llama_token> & tokens) {
|
|
return json {
|
|
{"tokens", tokens}
|
|
};
|
|
}
|
|
|
|
static json format_detokenized_response(const std::string & content) {
|
|
return json {
|
|
{"content", content}
|
|
};
|
|
}
|
|
|
|
static json format_error_response(const std::string & message, const enum error_type type) {
|
|
std::string type_str;
|
|
int code = 500;
|
|
switch (type) {
|
|
case ERROR_TYPE_INVALID_REQUEST:
|
|
type_str = "invalid_request_error";
|
|
code = 400;
|
|
break;
|
|
case ERROR_TYPE_AUTHENTICATION:
|
|
type_str = "authentication_error";
|
|
code = 401;
|
|
break;
|
|
case ERROR_TYPE_NOT_FOUND:
|
|
type_str = "not_found_error";
|
|
code = 404;
|
|
break;
|
|
case ERROR_TYPE_SERVER:
|
|
type_str = "server_error";
|
|
code = 500;
|
|
break;
|
|
case ERROR_TYPE_PERMISSION:
|
|
type_str = "permission_error";
|
|
code = 403;
|
|
break;
|
|
case ERROR_TYPE_NOT_SUPPORTED:
|
|
type_str = "not_supported_error";
|
|
code = 501;
|
|
break;
|
|
case ERROR_TYPE_UNAVAILABLE:
|
|
type_str = "unavailable_error";
|
|
code = 503;
|
|
break;
|
|
}
|
|
return json {
|
|
{"code", code},
|
|
{"message", message},
|
|
{"type", type_str},
|
|
};
|
|
}
|