]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
llama : add Smaug 70B support (#7402)
authorBartowski <redacted>
Sun, 26 May 2024 12:28:35 +0000 (08:28 -0400)
committerGitHub <redacted>
Sun, 26 May 2024 12:28:35 +0000 (15:28 +0300)
convert-hf-to-gguf-update.py
convert-hf-to-gguf.py
llama.cpp
llama.h

index 1923b88ba2a802da2710ca82ebb3a56c7d658486..84b72348dc579e8b4c81994f5c867e40837fe6ed 100755 (executable)
@@ -81,6 +81,7 @@ models = [
     {"name": "jina-v2-en",     "tokt": TOKENIZER_TYPE.WPM, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-en", }, # WPM!
     {"name": "jina-v2-es",     "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-es", },
     {"name": "jina-v2-de",     "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-de", },
+    {"name": "smaug-bpe",      "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/abacusai/Smaug-Llama-3-70B-Instruct", },
 ]
 
 
index 51549ac72f8e71d136555a47bd2a9103fdcf27fa..bfccf8623a175637037e3014ca5ca9a829d30628 100755 (executable)
@@ -473,6 +473,9 @@ class Model:
         if chkhsh == "27949a2493fc4a9f53f5b9b029c82689cfbe5d3a1929bb25e043089e28466de6":
             # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-de
             res = "jina-v2-de"
+        if chkhsh == "c136ed14d01c2745d4f60a9596ae66800e2b61fa45643e72436041855ad4089d":
+            # ref: https://huggingface.co/abacusai/Smaug-Llama-3-70B-Instruct
+            res = "smaug-bpe"
 
         if res is None:
             logger.warning("\n")
index 989d27b9dfb3a3982fc15fde14936fa1e4621bb2..f67cb7e2329459a4fef1942e089eb4a201692e43 100644 (file)
--- a/llama.cpp
+++ b/llama.cpp
@@ -4593,6 +4593,9 @@ static void llm_load_vocab(
             } else if (
                 tokenizer_pre == "dbrx") {
                 vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DBRX;
+            } else if (
+                tokenizer_pre == "smaug-bpe") {
+                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_SMAUG;
             } else {
                 throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str()));
             }
@@ -12512,6 +12515,7 @@ struct llm_tokenizer_bpe {
                         });
                         break;
                     case LLAMA_VOCAB_PRE_TYPE_DBRX:
+                    case LLAMA_VOCAB_PRE_TYPE_SMAUG:
                         word_collection = unicode_regex_split(text, {
                             // same as llama3
                             "(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
diff --git a/llama.h b/llama.h
index 16676269dd38ad2f1fb6206eef331b486bbf36d0..7671b8a57f4e79c2ef83be71482a217c2ba52b78 100644 (file)
--- a/llama.h
+++ b/llama.h
@@ -85,6 +85,7 @@ extern "C" {
         LLAMA_VOCAB_PRE_TYPE_QWEN2          = 11,
         LLAMA_VOCAB_PRE_TYPE_OLMO           = 12,
         LLAMA_VOCAB_PRE_TYPE_DBRX           = 13,
+        LLAMA_VOCAB_PRE_TYPE_SMAUG          = 14,
     };
 
     // note: these values should be synchronized with ggml_rope