]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
convert : fix autoawq gemma (#6704)
authorZheng.Deng <redacted>
Tue, 16 Apr 2024 20:51:07 +0000 (04:51 +0800)
committerGitHub <redacted>
Tue, 16 Apr 2024 20:51:07 +0000 (23:51 +0300)
* fix autoawq quantized gemma model convert error

using autoawq to quantize gemma model will include a lm_head.weight tensor in model-00001-of-00002.safetensors. it result in this situation that convert-hf-to-gguf.py can't map lm_head.weight. skip loading this tensor could prevent this error.

* change code to full string match and print necessary message

change code to full string match and print a short message to inform users that lm_head.weight has been skipped.

---------

Co-authored-by: Zheng.Deng <redacted>
convert-hf-to-gguf.py

index f321d77de11f87ca6296d1a7accdac7034542271..c14186abbc2a6cb6df585e6cf1bd2c195a960397 100755 (executable)
@@ -2458,6 +2458,12 @@ class GemmaModel(Model):
         tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
 
         for name, data_torch in self.get_tensors():
+            # lm_head is not used in llama.cpp, while autoawq will include this tensor in model
+            # To prevent errors, skip loading lm_head.weight.
+            if name == "lm_head.weight":
+                print(f"Skipping get tensor {name!r} in safetensors so that convert can end normally.")
+                continue
+
             old_dtype = data_torch.dtype
 
             # convert any unsupported data types to float32