case COMMON_CHAT_FORMAT_COMMAND_R7B: return "Command R7B";
case COMMON_CHAT_FORMAT_GRANITE: return "Granite";
case COMMON_CHAT_FORMAT_GPT_OSS: return "GPT-OSS";
+ case COMMON_CHAT_FORMAT_SEED_OSS: return "Seed-OSS";
default:
throw std::runtime_error("Unknown chat format");
}
}
}
+static void common_chat_parse_seed_oss(common_chat_msg_parser & builder) {
+ // Parse thinking tags first - this handles the main reasoning content
+ builder.try_parse_reasoning("<seed:think>", "</seed:think>");
+
+ if (!builder.syntax().parse_tool_calls) {
+ builder.add_content(builder.consume_rest());
+ return;
+ }
+
+ // Parse tool calls - Seed-OSS uses <seed:tool_call> format
+ static const common_regex tool_call_begin_regex("<seed:tool_call>");
+ static const common_regex tool_call_end_regex("</seed:tool_call>");
+ static const common_regex function_regex("<function=([^>]+)>");
+ static const common_regex param_regex("<parameter=([^>]+)>");
+
+ while (auto tool_res = builder.try_find_regex(tool_call_begin_regex)) {
+ builder.consume_spaces(); // Consume whitespace after <seed:tool_call>
+
+ // Look for function call inside tool call, ignore any content before it
+ if (auto func_res = builder.try_find_regex(function_regex, std::string::npos, false)) {
+ auto function_name = builder.str(func_res->groups[1]);
+
+ // Parse Seed-OSS parameters <parameter=name>value</parameter>
+ json args = json::object();
+ // Parse all parameters
+ while (auto param_res = builder.try_find_regex(param_regex, std::string::npos, false)) {
+ // again, ignore noise around parameters
+ auto param_name = builder.str(param_res->groups[1]);
+ builder.move_to(param_res->groups[0].end);
+ builder.consume_spaces(); // Consume whitespace after parameter
+ auto savedPos = builder.pos();
+ if (auto param_parse = builder.try_find_literal("</parameter>")) {
+ auto param = param_parse->prelude;
+ builder.move_to(savedPos);
+ try {
+ if (auto param_res = builder.try_consume_json()) {
+ args[param_name] = param_res->json;
+ } else {
+ args[param_name] = param;
+ }
+ } catch (json::exception &) {
+ args[param_name] = param;
+ }
+ } else {
+ throw common_chat_msg_partial_exception("Incomplete tool parameter");
+ }
+ }
+ // Look for closing function tag
+ auto end_func = builder.try_find_literal("</function>");
+ if (end_func) {
+ builder.move_to(end_func->groups[0].end);
+ builder.consume_spaces(); // Consume whitespace after </function>
+
+ // Add the tool call with parsed arguments, but only if we REALLY got the literal
+ auto eaten_fragment = builder.input().substr(end_func->groups[0].begin, end_func->groups[0].end);
+ auto funlen = std::string("</function>").length();
+ if (eaten_fragment.length() >= funlen && eaten_fragment.substr(0, funlen) == std::string("</function>")) {
+ if (!builder.add_tool_call(function_name, "", args.dump())) {
+ throw common_chat_msg_partial_exception("Incomplete tool call");
+ }
+ } else {
+ throw common_chat_msg_partial_exception("Incomplete tool call");
+ }
+ } else {
+ throw common_chat_msg_partial_exception("Incomplete tool call");
+ }
+ // Look for closing tool call tag
+ if (auto end_tool = builder.try_find_regex(tool_call_end_regex, std::string::npos, false)) {
+ builder.move_to(end_tool->groups[0].end);
+ builder.consume_spaces(); // Consume trailing whitespace after tool call
+ } else {
+ throw common_chat_msg_partial_exception("Incomplete tool call");
+ }
+ } else {
+ // No function found - don't consume content here, let it be handled at the end
+ break;
+ }
+ }
+
+ // Consume any remaining whitespace after all tool call processing
+ builder.consume_spaces();
+ auto remaining = builder.consume_rest();
+ // If there's any non-whitespace content remaining, add it as content
+ if (!string_strip(remaining).empty()) {
+ builder.add_content(remaining);
+ }
+}
+
static common_chat_params common_chat_params_init_without_tools(const common_chat_template & tmpl, const struct templates_params & inputs) {
common_chat_params data;
data.prompt = apply(tmpl, inputs);
return data;
}
+static common_chat_params common_chat_params_init_seed_oss(
+ const common_chat_template & tmpl,
+ templates_params & params,
+ const common_chat_templates_inputs & inputs)
+{
+ common_chat_params data;
+ data.prompt = apply(tmpl, params);
+ data.format = COMMON_CHAT_FORMAT_SEED_OSS;
+ if (string_ends_with(data.prompt, "<seed:think>")) {
+ if (!inputs.enable_thinking) {
+ data.prompt += "</seed:think>";
+ } else {
+ data.thinking_forced_open = true;
+ }
+ }
+
+ if (params.tools.is_array() && !params.tools.empty()) {
+ data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
+ data.grammar = build_grammar([&](const common_grammar_builder & builder) {
+ std::vector<std::string> tool_rules;
+ foreach_function(params.tools, [&](const json & tool) {
+ const auto & function = tool.at("function");
+ std::string name = function.at("name");
+ auto parameters = function.at("parameters");
+ builder.resolve_refs(parameters);
+
+ // Create rule for Seed-OSS function call format
+ std::string param_rules;
+ if (parameters.contains("properties")) {
+ for (const auto & [key, value] : parameters.at("properties").items()) {
+ param_rules += "\"<parameter=" + key + ">\"" + builder.add_schema(name + "-arg-" + key, value) +
+ "\"</parameter>\"";
+ }
+ }
+
+ tool_rules.push_back(builder.add_rule(name + "-call",
+ "\"<seed:tool_call>\" space \"<function=" + name + ">\" space " +
+ param_rules +
+ " \"</function>\" space \"</seed:tool_call>\""));
+ });
+
+ data.grammar_triggers.push_back({ COMMON_GRAMMAR_TRIGGER_TYPE_WORD, "<seed:tool_call>" });
+
+ data.preserved_tokens = {
+ "<seed:think>", "</seed:think>", "<seed:tool_call>", "</seed:tool_call>",
+ "<function=", "</function>", "<parameter=", "</parameter>",
+ };
+
+ builder.add_rule("root", string_join(tool_rules, " | "));
+ });
+ }
+ return data;
+}
+
static common_chat_params common_chat_templates_apply_jinja(
- const struct common_chat_templates * tmpls,
+ const struct common_chat_templates * tmpls,
const struct common_chat_templates_inputs & inputs)
{
templates_params params;
return common_chat_params_init_gpt_oss(tmpl, params);
}
+ // Seed-OSS
+ if (src.find("<seed:think>") != std::string::npos) {
+ return common_chat_params_init_seed_oss(tmpl, params, inputs);
+ }
+
// Use generic handler when mixing tools + JSON schema.
// TODO: support that mix in handlers below.
if ((params.tools.is_array() && params.json_schema.is_object())) {
case COMMON_CHAT_FORMAT_GPT_OSS:
common_chat_parse_gpt_oss(builder);
break;
+ case COMMON_CHAT_FORMAT_SEED_OSS:
+ common_chat_parse_seed_oss(builder);
+ break;
default:
throw std::runtime_error(std::string("Unsupported format: ") + common_chat_format_name(builder.syntax().format));
}
COMMON_CHAT_FORMAT_COMMAND_R7B,
COMMON_CHAT_FORMAT_GRANITE,
COMMON_CHAT_FORMAT_GPT_OSS,
+ COMMON_CHAT_FORMAT_SEED_OSS,
COMMON_CHAT_FORMAT_COUNT, // Not a format, just the # formats
};
--- /dev/null
+{# ----------‑‑‑ special token variables ‑‑‑---------- #}
+{%- set bos_token = '<seed:bos>' -%}
+{%- set eos_token = '<seed:eos>' -%}
+{%- set pad_token = '<seed:pad>' -%}
+{%- set toolcall_begin_token = '<seed:tool_call>' -%}
+{%- set toolcall_end_token = '</seed:tool_call>' -%}
+{%- set think_begin_token = '<seed:think>' -%}
+{%- set think_end_token = '</seed:think>' -%}
+{%- set budget_begin_token = '<seed:cot_budget_reflect>'-%}
+{%- set budget_end_token = '</seed:cot_budget_reflect>'-%}
+{# -------------- reflection-interval lookup -------------- #}
+{%- if not thinking_budget is defined %}
+{%- set thinking_budget = -1 -%}
+{%- endif -%}
+{%- set budget_reflections_v05 = {
+ 0: 0,
+ 512: 128,
+ 1024: 256,
+ 2048: 512,
+ 4096: 512,
+ 8192: 1024,
+ 16384: 1024
+} -%}
+{# Find the first gear that is greater than or equal to the thinking_budget. #}
+{%- set ns = namespace(interval = None) -%}
+{%- for k, v in budget_reflections_v05 | dictsort -%}
+ {%- if ns.interval is none and thinking_budget <= k -%}
+ {%- set ns.interval = v -%}
+ {%- endif -%}
+{%- endfor -%}
+{# If it exceeds the maximum gear, use the value of the last gear #}
+{%- if ns.interval is none -%}
+ {%- set ns.interval = budget_reflections_v05[16384] -%}
+{%- endif -%}
+{# ---------- Preprocess the system message ---------- #}
+{%- if messages[0]["role"] == "system" %}
+{%- set system_message = messages[0]["content"] %}
+{%- set loop_messages = messages[1:] %}
+{%- else %}
+{%- set loop_messages = messages %}
+{%- endif %}
+{# ---------- Ensure tools exist ---------- #}
+{%- if not tools is defined or tools is none %}
+{%- set tools = [] %}
+{%- endif %}
+{# tools2doc.jinja #}
+{%- macro py_type(t) -%}
+ {%- if t == "string" -%}str
+ {%- elif t in ("number", "integer") -%}int
+ {%- elif t == "boolean" -%}bool
+ {%- elif t == "array" -%}list
+ {%- else -%}Any{%- endif -%}
+{%- endmacro -%}
+{# ---------- Output the system block ---------- #}
+{%- if system_message is defined %}
+{{ bos_token + "system\n" + system_message }}
+{%- else %}
+{%- if tools is iterable and tools | length > 0 %}
+{{ bos_token + "system\nYou are Doubao, a helpful AI assistant. You may call one or more functions to assist with the user query." }}
+{%- endif %}
+{%- endif %}
+{%- if use_json_tooldef is defined and use_json_tooldef %}
+
+{{"Tool List:\nYou are authorized to use the following tools (described in JSON Schema format). Before performing any task, you must decide how to call them based on the descriptions and parameters of these tools."}}
+{{ tools | tojson(ensure_ascii=False) }}
+{%- else %}
+{%- for item in tools if item.type == "function" %}
+
+
+Function:
+def {{ item.function.name }}(
+{%- for name, spec in item.function.parameters.properties.items() %}
+ {{- name }}: {{ py_type(spec.type) }}{% if not loop.last %},{% endif %}
+{%- endfor %}):
+ """
+ {{ item.function.description | trim }}
+
+ {# ---------- Args ---------- #}
+ {%- if item.function.parameters.properties %}
+ Args:
+ {%- for name, spec in item.function.parameters.properties.items() %}
+
+ - {{ name }} ({{ py_type(spec.type) }})
+ {%- if name in item.function.parameters.required %} [必填]{% else %} [选填]{% endif %}:
+ {{- " " ~ (spec.description or "") }}
+ {%- endfor %}
+ {%- endif %}
+
+ {# ---------- Returns ---------- #}
+ {%- if item.function.returns is defined
+ and item.function.returns.properties is defined
+ and item.function.returns.properties %}
+ Returns:
+ {%- for name, spec in item.function.returns.properties.items() %}
+
+ - {{ name }} ({{ py_type(spec.type) }}):
+ {{- " " ~ (spec.description or "") }}
+ {%- endfor %}
+ {%- endif %}
+
+ """
+{%- endfor %}
+{%- endif %}
+{%- if tools is iterable and tools | length > 0 %}
+
+{{"工具调用请遵循如下格式:\n<seed:tool_call>\n<function=example_function_name>\n<parameter=example_parameter_1>value_1</parameter>\n<parameter=example_parameter_2>This is the value for the second parameter\nthat can span\nmultiple lines</parameter>\n</function>\n</seed:tool_call>\n"}}
+{%- endif %}
+{# End the system block line #}
+{%- if system_message is defined or tools is iterable and tools | length > 0 %}
+{{ eos_token }}
+{%- endif %}
+{# ---------- Thinking Budget ---------- #}
+{%- if thinking_budget is defined %}
+{%- if thinking_budget == 0 %}
+{{ bos_token+"system" }}
+{{ "You are an intelligent assistant that can answer questions in one step without the need for reasoning and thinking, that is, your thinking budget is 0. Next, please skip the thinking process and directly start answering the user's questions." }}
+{{ eos_token }}
+{%- elif not thinking_budget == -1 %}
+{{ bos_token+"system" }}
+{{ "You are an intelligent assistant with reflective ability. In the process of thinking and reasoning, you need to strictly follow the thinking budget, which is "}}{{thinking_budget}}{{". That is, you need to complete your thinking within "}}{{thinking_budget}}{{" tokens and start answering the user's questions. You will reflect on your thinking process every "}}{{ns.interval}}{{" tokens, stating how many tokens have been used and how many are left."}}
+{{ eos_token }}
+{%- endif %}
+{%- endif %}
+{# ---------- List the historical messages one by one ---------- #}
+{%- for message in loop_messages %}
+{%- if message.role == "assistant"
+ and message.tool_calls is defined
+ and message.tool_calls is iterable
+ and message.tool_calls | length > 0 %}
+{{ bos_token + message.role }}
+{%- if message.reasoning_content is defined and message.reasoning_content is string and message.reasoning_content | trim | length > 0 %}
+{{ "\n" + think_begin_token + message.reasoning_content | trim + think_end_token }}
+{%- endif %}
+{%- if message.content is defined and message.content is string and message.content | trim | length > 0 %}
+{{ "\n" + message.content | trim + "\n" }}
+{%- endif %}
+{%- for tool_call in message.tool_calls %}
+{%- if tool_call.function is defined %}{% set tool_call = tool_call.function %}{% endif %}
+{{ "\n" + toolcall_begin_token + "\n<function=" + tool_call.name + ">\n" }}
+{%- if tool_call.arguments is defined %}
+{%- for arg_name, arg_value in tool_call.arguments | items %}
+{{ "<parameter=" + arg_name + ">" }}
+{%- set arg_value = arg_value if arg_value is string else arg_value | string %}
+{{ arg_value+"</parameter>\n" }}
+{%- endfor %}
+{%- endif %}
+{{ "</function>\n" + toolcall_end_token }}
+{%- endfor %}
+{{ eos_token }}
+{%- elif message.role in ["user", "system"] %}
+{{ bos_token + message.role + "\n" + message.content + eos_token }}
+{%- elif message.role == "assistant" %}
+{{ bos_token + message.role }}
+{%- if message.reasoning_content is defined and message.reasoning_content is string and message.reasoning_content | trim | length > 0 %}
+{{ "\n" + think_begin_token + message.reasoning_content | trim + think_end_token }}
+{%- endif %}
+{%- if message.content is defined and message.content is string and message.content | trim | length > 0 %}
+{{ "\n" + message.content | trim + eos_token }}
+{%- endif %}
+{# Include the tool role #}
+{%- else %}
+{{ bos_token + message.role + "\n" + message.content + eos_token }}
+{%- endif %}
+{%- endfor %}
+{# ---------- Control the model to start continuation ---------- #}
+{%- if add_generation_prompt %}
+{{ bos_token+"assistant\n" }}
+{%- if thinking_budget == 0 %}
+{{ think_begin_token + "\n" + budget_begin_token + "The current thinking budget is 0, so I will directly start answering the question." + budget_end_token + "\n" + think_end_token }}
+{%- endif %}
+{%- endif %}
\ No newline at end of file
/* .reasoning_format = */ COMMON_REASONING_FORMAT_AUTO,
}));
}
+ {
+ // Seed-OSS format tests
+ auto tmpls = read_templates("models/templates/ByteDance-Seed-OSS.jinja");
+ std::vector<std::string> end_tokens{ "<seed:eos>" };
+
+ assert_equals(COMMON_CHAT_FORMAT_SEED_OSS, common_chat_templates_apply(tmpls.get(), inputs_no_tools).format);
+ assert_equals(COMMON_CHAT_FORMAT_SEED_OSS, common_chat_templates_apply(tmpls.get(), inputs_tools).format);
+
+ test_templates(tmpls.get(), end_tokens, message_assist, tools, "Hello, world!\nWhat's up?", /* expect_grammar_triggered= */ false);
+
+ // Test simple reasoning content
+ assert_msg_equals(
+ simple_assist_msg("Hello, world!", "I'm thinking about the answer"),
+ common_chat_parse(
+ "<seed:think>I'm thinking about the answer</seed:think>Hello, world!",
+ /* is_partial= */ false,
+ {
+ /* .format = */ COMMON_CHAT_FORMAT_SEED_OSS,
+ /* .reasoning_format = */ COMMON_REASONING_FORMAT_DEEPSEEK,
+ }));
+
+ // Test budget reflection tags
+ common_chat_msg msg_budget_reflect;
+ msg_budget_reflect.role = "assistant";
+ msg_budget_reflect.content = "<seed:cot_budget_reflect>Token usage: 45/1000\nI should continue thinking to find the best solution.</seed:cot_budget_reflect>I need to calculate this step by step.";
+ msg_budget_reflect.reasoning_content = "Token usage: 45/1000\nI should continue thinking to find the best solution.";
+ assert_msg_equals(
+ msg_budget_reflect,
+ common_chat_parse(
+ "<seed:think>Token usage: 45/1000\nI should continue thinking to find the best solution.</seed:think>"
+ "<seed:cot_budget_reflect>Token usage: 45/1000\nI should continue thinking to find the best solution.</seed:cot_budget_reflect>"
+ "I need to calculate this step by step.",
+ /* is_partial= */ false,
+ {
+ /* .format = */ COMMON_CHAT_FORMAT_SEED_OSS,
+ /* .reasoning_format = */ COMMON_REASONING_FORMAT_DEEPSEEK,
+ }));
+
+ // Test tool calls with Seed-OSS format
+ common_chat_msg msg_tool_call;
+ msg_tool_call.role = "assistant";
+ msg_tool_call.tool_calls.push_back({"calculate_sum", "{\"numbers\": [1, 2, 3]}", ""});
+ assert_msg_equals(
+ msg_tool_call,
+ common_chat_parse(
+ "<seed:tool_call>\n"
+ "<function=calculate_sum>\n"
+ "<parameter=numbers>[1, 2, 3]</parameter>\n"
+ "</function>\n"
+ "</seed:tool_call>",
+ /* is_partial= */ false,
+ {COMMON_CHAT_FORMAT_SEED_OSS}));
+
+ // Test reasoning + tool call combination
+ common_chat_msg msg_reasoning_tool;
+ msg_reasoning_tool.role = "assistant";
+ msg_reasoning_tool.content = "";
+ msg_reasoning_tool.reasoning_content = "I need to calculate the sum of these numbers";
+ msg_reasoning_tool.tool_calls.push_back({"calculate_sum", "{\"numbers\": [1, 2, 3]}", ""});
+ assert_msg_equals(
+ msg_reasoning_tool,
+ common_chat_parse(
+ "<seed:think>I need to calculate the sum of these numbers</seed:think>"
+ "<seed:tool_call>\n"
+ "<function=calculate_sum>\n"
+ "<parameter=numbers>[1, 2, 3]</parameter>\n"
+ "</function>\n"
+ "</seed:tool_call>",
+ /* is_partial= */ false,
+ {
+ /* .format = */ COMMON_CHAT_FORMAT_SEED_OSS,
+ /* .reasoning_format = */ COMMON_REASONING_FORMAT_DEEPSEEK,
+ }));
+
+ // Test deltas: the number of tool calls in partial parses should never decrease
+ std::string tool_msg = "<seed:tool_call>\n"
+ "<function=fun>\n"
+ "<parameter=smth>[1, 2, 3]</parameter>\n"
+ "</function>";
+ std::size_t previousToolCalls = 0;
+ for (std::size_t i = std::string("<seed:tool_call>").length(); i < tool_msg.length() - 1; i++) {
+ auto partial = tool_msg.substr(0, i);
+ auto partial_res = common_chat_parse(partial, true, { COMMON_CHAT_FORMAT_SEED_OSS, COMMON_REASONING_FORMAT_DEEPSEEK });
+ if (partial_res.tool_calls.size() < previousToolCalls) {
+ throw std::runtime_error("Tool call size decreased on partial: " + partial + " from " + std::to_string(previousToolCalls) + " to " + std::to_string(partial_res.tool_calls.size()));
+ }
+ previousToolCalls = partial_res.tool_calls.size();
+ }
+
+ // Test multiple parameters in tool call
+ common_chat_msg msg_multi_param;
+ msg_multi_param.role = "assistant";
+ msg_multi_param.tool_calls.push_back({"process_data", "{\"input\": \"test\", \"format\": \"json\"}", ""});
+ assert_msg_equals(
+ msg_multi_param,
+ common_chat_parse(
+ "<seed:tool_call>\n"
+ "<function=process_data>\n"
+ "<parameter=input>test</parameter>\n"
+ "<parameter=format>json</parameter>\n"
+ "</function>\n"
+ "</seed:tool_call>",
+ /* is_partial= */ false,
+ {COMMON_CHAT_FORMAT_SEED_OSS}));
+
+ // Test partial parsing for incomplete tool call - don't actually add the call until parsing parameters is done
+ assert_msg_equals(
+ simple_assist_msg("", ""),
+ common_chat_parse(
+ "<seed:tool_call>\n"
+ "<function=calculate_sum>\n"
+ "<parameter=numbers>[1,\n",
+ /* is_partial= */ true,
+ {COMMON_CHAT_FORMAT_SEED_OSS}));
+
+ // Test incomplete reasoning tag
+ assert_msg_equals(
+ simple_assist_msg("", "I was thinking"),
+ common_chat_parse(
+ "<seed:think>I was thinking",
+ /* is_partial= */ true,
+ {
+ /* .format = */ COMMON_CHAT_FORMAT_SEED_OSS,
+ /* .reasoning_format = */ COMMON_REASONING_FORMAT_DEEPSEEK,
+ }));
+
+ // Test content without reasoning
+ assert_msg_equals(
+ simple_assist_msg("This is a simple response without reasoning."),
+ common_chat_parse(
+ "This is a simple response without reasoning.",
+ /* is_partial= */ false,
+ {COMMON_CHAT_FORMAT_SEED_OSS}));
+ }
}
static void test_msg_diffs_compute() {