]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
convert : add support of codeqwen due to tokenizer (#6707)
authorJunyang Lin <redacted>
Wed, 24 Apr 2024 07:16:21 +0000 (15:16 +0800)
committerGitHub <redacted>
Wed, 24 Apr 2024 07:16:21 +0000 (10:16 +0300)
* add support of codeqwen due to tokenizer

* override load_hparams

* fix typo

* fix load_params

* convert : fix whitespace

---------

Co-authored-by: Georgi Gerganov <redacted>
convert-hf-to-gguf.py

index 4ace13eb631492747f077d802a82c08568ace061..5763b6664e832e9d8345cf74fc3e5cfda5926b36 100755 (executable)
@@ -363,6 +363,16 @@ class Model(ABC):
                         scores.append(-1000.0)
                         toktypes.append(SentencePieceTokenTypes.USER_DEFINED)
 
+        if vocab_size > len(tokens):
+            pad_count = vocab_size - len(tokens)
+            print(
+                f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]"
+            )
+            for i in range(1, pad_count + 1):
+                tokens.append(f"[PAD{i}]")
+                scores.append(-1000.0)
+                toktypes.append(SentencePieceTokenTypes.UNUSED)
+
         assert len(tokens) == vocab_size
 
         self.gguf_writer.add_tokenizer_model("llama")
@@ -1789,6 +1799,12 @@ class QwenModel(Model):
 class Qwen2Model(Model):
     model_arch = gguf.MODEL_ARCH.QWEN2
 
+    def set_vocab(self):
+        try:
+            self._set_vocab_sentencepiece()
+        except FileNotFoundError:
+            self._set_vocab_gpt2()
+
 
 @Model.register("Qwen2MoeForCausalLM")
 class Qwen2MoeModel(Model):