]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
llama : add 128k yarn context for Qwen (#10698)
authorRobert Collins <redacted>
Sat, 7 Dec 2024 21:12:27 +0000 (16:12 -0500)
committerGitHub <redacted>
Sat, 7 Dec 2024 21:12:27 +0000 (23:12 +0200)
* add 128k yarn context for Qwen

* added property for model tensors

* removing useless line

convert_hf_to_gguf.py
gguf-py/gguf/constants.py

index a4eece934021fb517f19ccc619db3e191b0ae643..c63d929c187a83c16b7c6ff0521dbe22e8c330f2 100755 (executable)
@@ -1992,6 +1992,14 @@ class Qwen2Model(Model):
         except FileNotFoundError:
             self._set_vocab_gpt2()
 
+    def set_gguf_parameters(self):
+        super().set_gguf_parameters()
+        if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]:
+            if self.hparams["rope_scaling"].get("type") == "yarn":
+                self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
+                self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
+                self.gguf_writer.add_rope_scaling_orig_ctx_len(self.hparams["rope_scaling"]["original_max_position_embeddings"])
+
 
 @Model.register("Qwen2MoeForCausalLM")
 class Qwen2MoeModel(Model):
index 66247b80302e6560251e61d18c03c4d460a8d56a..4c8710b39e8301a4dc00c161d7a8e30a7a175027 100644 (file)
@@ -761,6 +761,7 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = {
         MODEL_TENSOR.TOKEN_EMBD,
         MODEL_TENSOR.OUTPUT_NORM,
         MODEL_TENSOR.OUTPUT,
+        MODEL_TENSOR.ROPE_FREQS,
         MODEL_TENSOR.ATTN_NORM,
         MODEL_TENSOR.ATTN_Q,
         MODEL_TENSOR.ATTN_K,