From cb4d86c4d723af87d3d7e3177e9485f200391384 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bjarke=20Viks=C3=B8e?= <164612031+bviksoe@users.noreply.github.com> Date: Sun, 7 Jul 2024 11:10:38 +0200 Subject: [PATCH] server: Retrieve prompt template in /props (#8337) * server: Retrieve prompt template in /props This PR adds the following: - Expose the model's Jinja2 prompt template from the model in the /props endpoint. - Change log-level from Error to Warning for warning about template mismatch. The front-end stands a better chance of actually executing the Jinja template format correctly. Server is currently just guessing it. Ideally this should have been inside a JSON block that expose the same key/value pairs as listed during startup in "llm_load_print_meta" function. * Make string buffer dynamic * Add doc and better string handling * Using chat_template naming convention * Use intermediate vector for string assignment --- examples/server/README.md | 4 +++- examples/server/server.cpp | 13 +++++++++++-- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/examples/server/README.md b/examples/server/README.md index aa4cbbe63..cb45ee06d 100644 --- a/examples/server/README.md +++ b/examples/server/README.md @@ -366,7 +366,8 @@ Notice that each `probs` is an array of length `n_probs`. "assistant_name": "", "user_name": "", "default_generation_settings": { ... }, - "total_slots": 1 + "total_slots": 1, + "chat_template": "" } ``` @@ -374,6 +375,7 @@ Notice that each `probs` is an array of length `n_probs`. - `user_name` - the required anti-prompt to generate the prompt in case you have specified a system prompt for all slots. - `default_generation_settings` - the default generation settings for the `/completion` endpoint, which has the same fields as the `generation_settings` response object from the `/completion` endpoint. - `total_slots` - the total number of slots for process requests (defined by `--parallel` option) +- `chat_template` - the model's original Jinja2 prompt template - **POST** `/v1/chat/completions`: OpenAI-compatible Chat Completions API. Given a ChatML-formatted json description in `messages`, it returns the predicted completion. Both synchronous and streaming mode are supported, so scripted and interactive applications work fine. While no strong claims of compatibility with OpenAI API spec is being made, in our experience it suffices to support many apps. Only models with a [supported chat template](https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template) can be used optimally with this endpoint. By default, the ChatML template will be used. diff --git a/examples/server/server.cpp b/examples/server/server.cpp index d7fb61812..47bea1591 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -2605,7 +2605,7 @@ int main(int argc, char ** argv) { // if a custom chat template is not supplied, we will use the one that comes with the model (if any) if (params.chat_template.empty()) { if (!ctx_server.validate_model_chat_template()) { - LOG_ERROR("The chat template that comes with this model is not yet supported, falling back to chatml. This may cause the model to output suboptimal responses", {}); + LOG_WARNING("The chat template that comes with this model is not yet supported, falling back to chatml. This may cause the model to output suboptimal responses", {}); params.chat_template = "chatml"; } } @@ -2967,11 +2967,20 @@ int main(int argc, char ** argv) { }; const auto handle_props = [&ctx_server](const httplib::Request & req, httplib::Response & res) { + std::string template_key = "tokenizer.chat_template", curr_tmpl; + int32_t tlen = llama_model_meta_val_str(ctx_server.model, template_key.c_str(), nullptr, 0); + if (tlen > 0) { + std::vector curr_tmpl_buf(tlen + 1, 0); + if (llama_model_meta_val_str(ctx_server.model, template_key.c_str(), curr_tmpl_buf.data(), curr_tmpl_buf.size()) == tlen) { + curr_tmpl = std::string(curr_tmpl_buf.data(), tlen); + } + } res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin")); json data = { { "system_prompt", ctx_server.system_prompt.c_str() }, { "default_generation_settings", ctx_server.default_generation_settings_for_props }, - { "total_slots", ctx_server.params.n_parallel } + { "total_slots", ctx_server.params.n_parallel }, + { "chat_template", curr_tmpl.c_str() } }; res.set_content(data.dump(), "application/json; charset=utf-8");