From: Daniel Bevenius Date: Tue, 11 Feb 2025 13:06:45 +0000 (+0100) Subject: server : use common_token_to_piece instead of common_detokenize (#11740) X-Git-Tag: upstream/0.0.4719~31 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=a18f481f99962638092d6f1c98b1d34d3e3256de;p=pkg%2Fggml%2Fsources%2Fllama.cpp server : use common_token_to_piece instead of common_detokenize (#11740) * server : use common_token_to_piece instead of common_detokenize This commit replaces the call to common_detokenize with common_token_to_piece in the populate_token_probs. The motivation for this change is to avoid an issue where common_detokenize would remove the word boundary character for tokens, which caused a regression in the server generated token probabilities. Resolves: https://github.com/ggerganov/llama.cpp/issues/11728 * squash! server : use common_token_to_piece instead of common_detokenize Use common_token_to_piece for post_sampling_probs as well. --- diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 5da3e187..d320e9d6 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -2279,7 +2279,7 @@ struct server_context { for (size_t i = 0; i < std::min(max_probs, n_probs); i++) { result.probs.push_back({ cur_p->data[i].id, - common_detokenize(ctx, {cur_p->data[i].id}, special), + common_token_to_piece(ctx, cur_p->data[i].id, special), cur_p->data[i].p }); } @@ -2301,7 +2301,7 @@ struct server_context { for (size_t i = 0; i < std::min(n_vocab, n_probs); i++) { result.probs.push_back({ cur[i].id, - common_detokenize(ctx, {cur[i].id}, special), + common_token_to_piece(ctx, cur[i].id, special), cur[i].p }); }