]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
llama : add Trillion 7B model support (#12556)
authorJuyoung Suk <redacted>
Sun, 30 Mar 2025 18:38:33 +0000 (03:38 +0900)
committerGitHub <redacted>
Sun, 30 Mar 2025 18:38:33 +0000 (20:38 +0200)
* Support Trillion 7B

* Update llama.h

* Update llama.h

* Update llama-vocab.cpp for Trillion

* Update llama-vocab.cpp

README.md
convert_hf_to_gguf.py
convert_hf_to_gguf_update.py
include/llama.h
src/llama-vocab.cpp

index 1eec944f273a84a68304ce9a3f427c24393ddf22..b637fe2ef9ee94ad531410259088de549fad1028 100644 (file)
--- a/README.md
+++ b/README.md
@@ -112,6 +112,7 @@ Instructions for adding support for new models: [HOWTO-add-model.md](docs/develo
 - [x] [RWKV-6](https://github.com/BlinkDL/RWKV-LM)
 - [x] [QRWKV-6](https://huggingface.co/recursal/QRWKV6-32B-Instruct-Preview-v0.1)
 - [x] [GigaChat-20B-A3B](https://huggingface.co/ai-sage/GigaChat-20B-A3B-instruct)
+- [X] [Trillion-7B-preview](https://huggingface.co/trillionlabs/Trillion-7B-preview)
 
 #### Multimodal
 
index c605e4d052d9c22c7e9af9bf0e9937c84d25c974..c322edc414ed8463a22dbb0383e9da43f2b4a60f 100755 (executable)
@@ -708,6 +708,9 @@ class Model:
         if chkhsh == "7dec86086fcc38b66b7bc1575a160ae21cf705be7718b9d5598190d7c12db76f":
             # ref: https://huggingface.co/UW/OLMo2-8B-SuperBPE-t180k
             res = "superbpe"
+        if chkhsh == "1994ffd01900cfb37395608534236ecd63f2bd5995d6cb1004dda1af50240f15":
+            # ref: https://huggingface.co/trillionlabs/Trillion-7B-preview
+            res = "trillion"
 
         if res is None:
             logger.warning("\n")
index ca90cf592932ba96305f35ae522c5042f86b7607..a3a64712536f1d038b9aca1a4df30f718f348f81 100755 (executable)
@@ -111,6 +111,7 @@ models = [
     {"name": "deepseek-r1-qwen", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B"},
     {"name": "gpt-4o",           "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/Xenova/gpt-4o", },
     {"name": "superbpe",         "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/UW/OLMo2-8B-SuperBPE-t180k", },
+    {"name": "trillion",         "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/trillionlabs/Trillion-7B-preview", },
 ]
 
 
index c66a23709dbf9c5e8d9e485d9d8bf1cc0d78c08c..4eb70ec99e523c8db1e5620a3247f4a754bdc9a0 100644 (file)
@@ -108,6 +108,7 @@ extern "C" {
         LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM  = 28,
         LLAMA_VOCAB_PRE_TYPE_GPT4O          = 29,
         LLAMA_VOCAB_PRE_TYPE_SUPERBPE       = 30,
+        LLAMA_VOCAB_PRE_TYPE_TRILLION       = 31,
     };
 
     enum llama_rope_type {
index 2ddc8108f4cb43ce2d31cce45c2097e83c3c2e03..5ace5e385a5d1fd005168c0028b5191c70bee927 100644 (file)
@@ -342,6 +342,7 @@ struct llm_tokenizer_bpe : llm_tokenizer {
             case LLAMA_VOCAB_PRE_TYPE_MPT:
             case LLAMA_VOCAB_PRE_TYPE_OLMO:
             case LLAMA_VOCAB_PRE_TYPE_JAIS:
+            case LLAMA_VOCAB_PRE_TYPE_TRILLION:
                 regex_exprs = {
                     "'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
                 };
@@ -1614,6 +1615,10 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
                 tokenizer_pre == "superbpe") {
                 pre_type = LLAMA_VOCAB_PRE_TYPE_SUPERBPE;
                 clean_spaces = false;
+            } else if (
+                tokenizer_pre == "trillion") {
+                pre_type = LLAMA_VOCAB_PRE_TYPE_TRILLION;
+                clean_spaces = false;
             } else {
                 throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str()));
             }