]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
server : use common_token_to_piece instead of common_detokenize (#11740)
authorDaniel Bevenius <redacted>
Tue, 11 Feb 2025 13:06:45 +0000 (14:06 +0100)
committerGitHub <redacted>
Tue, 11 Feb 2025 13:06:45 +0000 (14:06 +0100)
* server : use common_token_to_piece instead of common_detokenize

This commit replaces the call to common_detokenize with
common_token_to_piece in the populate_token_probs.

The motivation for this change is to avoid an issue where
common_detokenize would remove the word boundary character for tokens,
which caused a regression in the server generated token probabilities.

Resolves: https://github.com/ggerganov/llama.cpp/issues/11728

* squash! server : use common_token_to_piece instead of common_detokenize

Use common_token_to_piece for post_sampling_probs as well.

examples/server/server.cpp

index 5da3e187f740aac54cc4d7c93fef15d4a7194f94..d320e9d6b4c08f6d74ddbf55160c16c75dd9f01d 100644 (file)
@@ -2279,7 +2279,7 @@ struct server_context {
             for (size_t i = 0; i < std::min(max_probs, n_probs); i++) {
                 result.probs.push_back({
                     cur_p->data[i].id,
-                    common_detokenize(ctx, {cur_p->data[i].id}, special),
+                    common_token_to_piece(ctx, cur_p->data[i].id, special),
                     cur_p->data[i].p
                 });
             }
@@ -2301,7 +2301,7 @@ struct server_context {
             for (size_t i = 0; i < std::min(n_vocab, n_probs); i++) {
                 result.probs.push_back({
                     cur[i].id,
-                    common_detokenize(ctx, {cur[i].id}, special),
+                    common_token_to_piece(ctx, cur[i].id, special),
                     cur[i].p
                 });
             }