]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
convert : add custom attention mapping
authorGeorgi Gerganov <redacted>
Fri, 6 Dec 2024 19:33:15 +0000 (21:33 +0200)
committerGeorgi Gerganov <redacted>
Fri, 6 Dec 2024 19:33:49 +0000 (21:33 +0200)
gguf-py/gguf/tensor_mapping.py

index 1b6a3f4add875f1b1231bb26f9585c44e4901940..f0a7b6478508efdce02de2dc65492e8c6e603526 100644 (file)
@@ -146,6 +146,7 @@ class TensorNameMap:
         # Attention query
         MODEL_TENSOR.ATTN_Q: (
             "model.layers.{bid}.self_attn.q_proj",                       # llama-hf nemotron olmoe olmo2
+            "model.layers.{bid}.self_attn.q_proj_no_perm",               # llama-custom
             "layers.{bid}.attention.wq",                                 # llama-pth
             "encoder.layer.{bid}.attention.self.query",                  # bert
             "transformer.h.{bid}.attn.q_proj",                           # gpt-j
@@ -158,6 +159,7 @@ class TensorNameMap:
         # Attention key
         MODEL_TENSOR.ATTN_K: (
             "model.layers.{bid}.self_attn.k_proj",                     # llama-hf nemotron olmoe olmo2
+            "model.layers.{bid}.self_attn.k_proj_no_perm",             # llama-custom
             "layers.{bid}.attention.wk",                               # llama-pth
             "encoder.layer.{bid}.attention.self.key",                  # bert
             "transformer.h.{bid}.attn.k_proj",                         # gpt-j