]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
llama : add pre-tokenizer regexes for BLOOM and gpt3-finnish (#8850)
authorEsko Toivonen <redacted>
Thu, 15 Aug 2024 07:17:12 +0000 (10:17 +0300)
committerGitHub <redacted>
Thu, 15 Aug 2024 07:17:12 +0000 (10:17 +0300)
convert_hf_to_gguf.py
convert_hf_to_gguf_update.py
include/llama.h
src/llama-vocab.cpp
src/llama.cpp

index 550dd5cfda99f083ee0c2c24a6d288b31be8296f..41063d94b684e47dc607003304bfcdd7c1f3baaa 100755 (executable)
@@ -590,6 +590,12 @@ class Model:
         if chkhsh == "855059429035d75a914d1eda9f10a876752e281a054a7a3d421ef0533e5b6249":
             # ref: https://huggingface.co/HuggingFaceTB/SmolLM-135M
             res = "smollm"
+        if chkhsh == "3c30d3ad1d6b64202cd222813e7736c2db6e1bd6d67197090fc1211fbc612ae7":
+            # ref: https://huggingface.co/bigscience/bloom
+            res = "bloom"
+        if chkhsh == "bc01ce58980e1db43859146dc51b1758b3b88729b217a74792e9f8d43e479d21":
+            # ref: https://huggingface.co/TurkuNLP/gpt3-finnish-small
+            res = "gpt3-finnish"
 
         if res is None:
             logger.warning("\n")
@@ -893,7 +899,7 @@ class GPTNeoXModel(Model):
         return tensors
 
 
-@Model.register("BloomForCausalLM")
+@Model.register("BloomForCausalLM", "BloomModel")
 class BloomModel(Model):
     model_arch = gguf.MODEL_ARCH.BLOOM
 
index d5a2d925eaef5582fb68984e0ffed718a61134b2..ba98f5c88990e4d68e47ccf128e1ed80e6755815 100755 (executable)
@@ -94,6 +94,8 @@ models = [
     {"name": "codeshell",      "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/WisdomShell/CodeShell-7B", },
     {"name": "tekken",         "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/mistralai/Mistral-Nemo-Base-2407", },
     {"name": "smollm",         "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/HuggingFaceTB/SmolLM-135M", },
+    {'name': "bloom",          "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/bigscience/bloom", },
+    {'name': "gpt3-finnish",   "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/TurkuNLP/gpt3-finnish-small", },
 ]
 
 
index 3c28cf0b509fb39e582ad69a612447fa6b9811da..fda68da851408742f763025fb7644b335f6b5655 100644 (file)
@@ -93,6 +93,8 @@ extern "C" {
         LLAMA_VOCAB_PRE_TYPE_TEKKEN         = 20,
         LLAMA_VOCAB_PRE_TYPE_SMOLLM         = 21,
         LLAMA_VOCAB_PRE_TYPE_CODESHELL      = 22,
+        LLAMA_VOCAB_PRE_TYPE_BLOOM          = 23,
+        LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH   = 24,
     };
 
     enum llama_rope_type {
index 749f8571829dfc5dacdb7fc4f5ea3a592ff0a9b9..063af648eb3578585b030ad5487356b1b910f50b 100644 (file)
@@ -410,6 +410,8 @@ struct llm_tokenizer_bpe {
                 };
                 break;
             case LLAMA_VOCAB_PRE_TYPE_PORO:
+            case LLAMA_VOCAB_PRE_TYPE_BLOOM:
+            case LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH:
                 regex_exprs = {
                     " ?[^(\\s|.,!?…。,、।۔،)]+",
                 };
index 7f2f0003142a3f03ef7979149ce38d101aa84140..bf7a57c79905d12277c5407b8e57cd00969ecc82 100644 (file)
@@ -5467,6 +5467,12 @@ static void llm_load_vocab(
             } else if (
                 tokenizer_pre == "codeshell") {
                 vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_CODESHELL;
+            } else if (
+                tokenizer_pre == "bloom") {
+                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_BLOOM;
+            } else if (
+                tokenizer_pre == "gpt3-finnish") {
+                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH;
             } else {
                 throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str()));
             }