]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
falcon : use stated vocab size (#2914)
authorakawrykow <redacted>
Thu, 14 Sep 2023 17:19:42 +0000 (10:19 -0700)
committerGitHub <redacted>
Thu, 14 Sep 2023 17:19:42 +0000 (20:19 +0300)
convert-falcon-hf-to-gguf.py

index 6ed2b88c6712cb0935bcae9b953802a2b06611cb..5d4ad04a44032ab558f2ded01173ee91d3ffb6b1 100755 (executable)
@@ -137,7 +137,9 @@ with open(tokenizer_json_file, "r", encoding="utf-8") as f:
 
 print("gguf: get gpt2 tokenizer vocab")
 
-vocab_size = len(tokenizer_json["model"]["vocab"])
+# The number of tokens in tokenizer.json can differ from the expected vocab size.
+# This causes downstream issues with mismatched tensor sizes when running the inference
+vocab_size = hparams["vocab_size"] if "vocab_size" in hparams else len(tokenizer_json["model"]["vocab"])
 
 # ref: https://github.com/cmp-nct/ggllm.cpp/blob/master/falcon_convert.py
 tokenizer = AutoTokenizer.from_pretrained(dir_model)