]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
Add more tokenizer tests (#3742)
authorGalunid <redacted>
Tue, 24 Oct 2023 07:17:17 +0000 (09:17 +0200)
committerGitHub <redacted>
Tue, 24 Oct 2023 07:17:17 +0000 (09:17 +0200)
* Add more tokenizer tests

* Add starcoder

* Update test vocab files

* Restrict bpe tokenizer tests to unicode planes

* Update comment

* Comment cosmetics

* Remove bloom vocab/test

models/ggml-vocab-baichuan.gguf [new file with mode: 0644]
models/ggml-vocab-gpt-neox.gguf [new file with mode: 0644]
models/ggml-vocab-refact.gguf [new file with mode: 0644]
models/ggml-vocab-starcoder.gguf [new file with mode: 0644]
tests/CMakeLists.txt
tests/test-tokenizer-1-bpe.cpp

diff --git a/models/ggml-vocab-baichuan.gguf b/models/ggml-vocab-baichuan.gguf
new file mode 100644 (file)
index 0000000..7caaf82
Binary files /dev/null and b/models/ggml-vocab-baichuan.gguf differ
diff --git a/models/ggml-vocab-gpt-neox.gguf b/models/ggml-vocab-gpt-neox.gguf
new file mode 100644 (file)
index 0000000..b9af168
Binary files /dev/null and b/models/ggml-vocab-gpt-neox.gguf differ
diff --git a/models/ggml-vocab-refact.gguf b/models/ggml-vocab-refact.gguf
new file mode 100644 (file)
index 0000000..8f26cfb
Binary files /dev/null and b/models/ggml-vocab-refact.gguf differ
diff --git a/models/ggml-vocab-starcoder.gguf b/models/ggml-vocab-starcoder.gguf
new file mode 100644 (file)
index 0000000..a52983f
Binary files /dev/null and b/models/ggml-vocab-starcoder.gguf differ
index 1c73de0a3e92eb8e201680038a402c8f4606b512..6757ad1cca1a23998a56bfcb461841ded0f74bc9 100644 (file)
@@ -28,10 +28,14 @@ llama_build_executable(test-tokenizer-0-falcon.cpp)
 llama_test_executable (test-tokenizer-0-falcon test-tokenizer-0-falcon.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-falcon.gguf)
 llama_build_executable(test-tokenizer-1-llama.cpp)
 llama_test_executable (test-tokenizer-1-llama test-tokenizer-1-llama.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama.gguf)
+llama_test_executable(test-tokenizer-1-baichuan test-tokenizer-1-llama.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-baichuan.gguf)
 llama_build_executable(test-tokenizer-1-bpe.cpp)
 llama_test_executable (test-tokenizer-1-falcon test-tokenizer-1-bpe.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-falcon.gguf)
 llama_test_executable(test-tokenizer-1-aquila test-tokenizer-1-bpe.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-aquila.gguf)
 llama_test_executable(test-tokenizer-1-mpt test-tokenizer-1-bpe.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-mpt.gguf)
+llama_test_executable(test-tokenizer-1-gpt-neox test-tokenizer-1-bpe.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-gpt-neox.gguf)
+llama_test_executable(test-tokenizer-1-refact test-tokenizer-1-bpe.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-refact.gguf)
+llama_test_executable(test-tokenizer-1-starcoder test-tokenizer-1-bpe.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-starcoder.gguf)
 llama_build_and_test_executable(test-grammar-parser.cpp)
 llama_build_and_test_executable(test-llama-grammar.cpp)
 llama_build_and_test_executable(test-grad0.cpp) # SLOW
index 85a59a14dcd53621c5b4d3e4eb7cfa6bcfd55c6c..386530f23f92cce2d85c74cb1bf01f7af61dc776 100644 (file)
@@ -91,9 +91,19 @@ int main(int argc, char **argv) {
             }
         }
     }
-    // TODO: why doesn't this work for the full range of Unicodes?
+    // Restrict to assigned unicode planes
     // for (uint32_t cp = 0x10000; cp < 0x0010ffff; ++cp) {
-    for (uint32_t cp = 0x10000; cp < 0x00080000; ++cp) {
+    for (uint32_t cp = 0x10000; cp < 0x00040000; ++cp) {
+        std::string str = codepoint_to_utf8(cp);
+        std::vector<llama_token> tokens = llama_tokenize(ctx, str, false);
+        std::string check = llama_detokenize_bpe(ctx, tokens);
+        if (str != check) {
+            fprintf(stderr, "%s : error: codepoint %x detokenizes to '%s'(%zu) instead of '%s'(%zu)\n",
+                __func__, cp, check.c_str(), check.length(), str.c_str(), str.length());
+            return 4;
+        }
+    }
+    for (uint32_t cp = 0x000e0000; cp < 0x0010ffff; ++cp) {
         std::string str = codepoint_to_utf8(cp);
         std::vector<llama_token> tokens = llama_tokenize(ctx, str, false);
         std::string check = llama_detokenize_bpe(ctx, tokens);
@@ -103,7 +113,6 @@ int main(int argc, char **argv) {
             return 4;
         }
     }
-
     llama_free_model(model);
     llama_free(ctx);