]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
llama : fix bpe tokenize from byte (#2889)
authoropparco <redacted>
Sun, 3 Sep 2023 10:18:09 +0000 (19:18 +0900)
committerGitHub <redacted>
Sun, 3 Sep 2023 10:18:09 +0000 (13:18 +0300)
llama.cpp

index 2b0cf30f6ec0d2815cfb3781789aa1fb164fcf9b..c97c1462f6d14522d2039ce8aa90a5ed44fef224 100644 (file)
--- a/llama.cpp
+++ b/llama.cpp
@@ -3366,9 +3366,15 @@ struct llm_tokenizer_bpe {
                         std::string byte_str(1, *j);
                         auto token_multibyte = vocab.token_to_id.find(byte_str);
                         if (token_multibyte == vocab.token_to_id.end()) {
-                            fprintf(stderr,"ERROR: byte not found in vocab: '%s'\n", byte_str.c_str());
+                            try {
+                                llama_token token_byte = llama_byte_to_token(vocab, *j);
+                                output.push_back(token_byte);
+                            } catch (const std::out_of_range & err) {
+                                fprintf(stderr,"ERROR: byte not found in vocab: '%s'\n", byte_str.c_str());
+                            }
+                        } else {
+                            output.push_back((*token_multibyte).second);
                         }
-                        output.push_back((*token_multibyte).second);
                     }
                 } else {
                     output.push_back((*token).second);