// Chat template utils
//
+std::string common_get_builtin_chat_template(const struct llama_model * model) {
+ static const char * template_key = "tokenizer.chat_template";
+ // call with NULL buffer to get the total size of the string
+ int32_t res = llama_model_meta_val_str(model, template_key, NULL, 0);
+ if (res > 0) {
+ std::vector<char> model_template(res + 1, 0);
+ llama_model_meta_val_str(model, template_key, model_template.data(), model_template.size());
+ return std::string(model_template.data(), model_template.size() - 1);
+ }
+ return "";
+}
+
bool common_chat_verify_template(const std::string & tmpl) {
llama_chat_message chat[] = {{"user", "test"}};
int res = llama_chat_apply_template(nullptr, tmpl.c_str(), chat, 1, true, nullptr, 0);
std::string content;
};
+// Get the built-in chat template for the model. Return empty string if not present.
+std::string common_get_builtin_chat_template(const struct llama_model * model);
+
// Check if the template supplied via "--chat-template" is supported or not. Returns true if it's valid
bool common_chat_verify_template(const std::string & tmpl);
return true;
}
- bool validate_model_chat_template() const {
- std::vector<char> model_template(2048, 0); // longest known template is about 1200 bytes
- std::string template_key = "tokenizer.chat_template";
- int32_t res = llama_model_meta_val_str(model, template_key.c_str(), model_template.data(), model_template.size());
- if (res >= 0) {
- llama_chat_message chat[] = {{"user", "test"}};
- std::string tmpl = std::string(model_template.data(), model_template.size());
- int32_t chat_res = llama_chat_apply_template(model, tmpl.c_str(), chat, 1, true, nullptr, 0);
- return chat_res > 0;
- }
- return false;
+ bool validate_builtin_chat_template() const {
+ llama_chat_message chat[] = {{"user", "test"}};
+ int32_t chat_res = llama_chat_apply_template(model, nullptr, chat, 1, true, nullptr, 0);
+ return chat_res > 0;
}
void init() {
{ "default_generation_settings", ctx_server.default_generation_settings_for_props },
{ "total_slots", ctx_server.params_base.n_parallel },
{ "model_path", ctx_server.params_base.model },
- { "chat_template", llama_get_chat_template(ctx_server.model) },
+ { "chat_template", common_get_builtin_chat_template(ctx_server.model) },
{ "build_info", build_info },
};
// if a custom chat template is not supplied, we will use the one that comes with the model (if any)
if (params.chat_template.empty()) {
- if (!ctx_server.validate_model_chat_template()) {
+ if (!ctx_server.validate_builtin_chat_template()) {
LOG_WRN("%s: The chat template that comes with this model is not yet supported, falling back to chatml. This may cause the model to output suboptimal responses\n", __func__);
params.chat_template = "chatml";
}
}
// print sample chat example to make it clear which template is used
- LOG_INF("%s: chat template, built_in: %d, chat_example: '%s'\n", __func__, params.chat_template.empty(), common_chat_format_example(ctx_server.model, params.chat_template).c_str());
+ LOG_INF("%s: chat template, chat_template: %s, example_format: '%s'\n", __func__,
+ params.chat_template.empty() ? "(built-in)" : params.chat_template.c_str(),
+ common_chat_format_example(ctx_server.model, params.chat_template).c_str());
ctx_server.queue_tasks.on_new_task(std::bind(
&server_context::process_single_task, &ctx_server, std::placeholders::_1));
assert match_regex("(Suddenly)+", res.choices[0].message.content)
+def test_chat_template():
+ global server
+ server.chat_template = "llama3"
+ server.debug = True # to get the "__verbose" object in the response
+ server.start()
+ res = server.make_request("POST", "/chat/completions", data={
+ "max_tokens": 8,
+ "messages": [
+ {"role": "system", "content": "Book"},
+ {"role": "user", "content": "What is the best book"},
+ ]
+ })
+ assert res.status_code == 200
+ assert "__verbose" in res.body
+ assert res.body["__verbose"]["prompt"] == "<s> <|start_header_id|>system<|end_header_id|>\n\nBook<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhat is the best book<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"
+
+
@pytest.mark.parametrize("response_format,n_predicted,re_content", [
({"type": "json_object", "schema": {"const": "42"}}, 6, "\"42\""),
({"type": "json_object", "schema": {"items": [{"type": "integer"}]}}, 10, "[ -3000 ]"),
draft_min: int | None = None
draft_max: int | None = None
no_webui: bool | None = None
+ chat_template: str | None = None
# session variables
process: subprocess.Popen | None = None
server_args.extend(["--draft-min", self.draft_min])
if self.no_webui:
server_args.append("--no-webui")
+ if self.chat_template:
+ server_args.extend(["--chat-template", self.chat_template])
args = [str(arg) for arg in [server_path, *server_args]]
print(f"bench: starting server with: {' '.join(args)}")
return formatted_chat;
}
-static std::string llama_get_chat_template(const struct llama_model * model) {
- std::string template_key = "tokenizer.chat_template";
- // call with NULL buffer to get the total size of the string
- int32_t res = llama_model_meta_val_str(model, template_key.c_str(), NULL, 0);
- if (res < 2) {
- return "";
- } else {
- std::vector<char> model_template(res + 1, 0);
- llama_model_meta_val_str(model, template_key.c_str(), model_template.data(), model_template.size());
- return std::string(model_template.data(), model_template.size() - 1);
- }
-}
-
//
// base64 utils (TODO: move to common in the future)
//