]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
requirements : update transformers/torch for Embedding Gemma (#15828)
authorDaniel Bevenius <redacted>
Tue, 9 Sep 2025 04:06:52 +0000 (06:06 +0200)
committerGitHub <redacted>
Tue, 9 Sep 2025 04:06:52 +0000 (06:06 +0200)
* requirements : update transformers/torch for Embedding Gemma

This commit updates the requirements to support converting
Embedding Gemma 300m models.

The motivation for this change is that during development I had a local
copy of the transformers package which is what I used for converting
the models. This was a mistake on my part and I should have also updated
my transformers version to the official release.

I had checked the requirements/requirements-convert_legacy_llama.txt
file and noted that the version was >=4.45.1,<5.0.0 and came to the
conculusion that no updated would be needed, this assumed that
Embedding Gemma would be in a transformers release at the time
Commit fb15d649ed14ab447eeab911e0c9d21e35fb243e ("llama : add support
for EmbeddingGemma 300m (#15798)) was merged. So anyone wanting to
convert themselves would be able to do so. However, Embedding Gemma is
a preview release and this commit updates the requirements to use this
preview release.

* resolve additional python dependencies

* fix pyright errors in tokenizer test and remove unused import

requirements/requirements-convert_hf_to_gguf.txt
requirements/requirements-convert_legacy_llama.txt
requirements/requirements-tool_bench.txt
tests/test-tokenizer-random.py
tools/mtmd/legacy-models/minicpmv-convert-image-encoder-to-gguf.py
tools/mtmd/requirements.txt
tools/server/tests/requirements.txt

index 766745f42f7ea14224c57c4eec1e882936dd96dc..90c98c3ffe5266d4a93a7b84cf7115f674f92044 100644 (file)
@@ -2,7 +2,9 @@ mistral-common>=1.8.3
 
 -r ./requirements-convert_legacy_llama.txt
 --extra-index-url https://download.pytorch.org/whl/cpu
-torch~=2.4.0; platform_machine != "s390x"
+
+## Embedding Gemma requires PyTorch 2.6.0 or later
+torch~=2.6.0; platform_machine != "s390x"
 
 # torch s390x packages can only be found from nightly builds
 --extra-index-url https://download.pytorch.org/whl/nightly
index 859204b27ebb80da3d441f252cccd7894577aaf8..f6076142cee5e169dcbf2da2a5c86071ee70e5f6 100644 (file)
@@ -1,5 +1,14 @@
 numpy~=1.26.4
 sentencepiece~=0.2.0
-transformers>=4.45.1,<5.0.0
+
+# Embedding Gemma is currently a preview release:
+# https://github.com/huggingface/transformers/releases/tag/v4.56.0-Embedding-Gemma-preview
+
+# The version is needed to be able to convert Embedding Gemma models to GGUF format:
+git+https://github.com/huggingface/transformers@v4.56.0-Embedding-Gemma-preview
+
+# Once Embedding Gemma is officially released, we can switch to:
+#transformers>=4.57.1,<5.0.0
+
 gguf>=0.1.0
 protobuf>=4.21.0,<5.0.0
index b94521fc7fa728a4e72268f25c2947f977de4353..f7912aff724f303e89a9866b7659b268d29deee2 100644 (file)
@@ -1,6 +1,6 @@
 aiohttp~=3.9.3
 pytest~=8.3.3
-huggingface_hub~=0.23.2
+huggingface_hub>=0.34.0,<1.0
 matplotlib~=3.10.0
 numpy~=1.26.4
 openai~=1.55.3
index c6cdcb55482e7df8cb239f8e7e68bc68db903289..93e697607e6689a32284a8a7c9f772f8b4cd85fc 100644 (file)
@@ -421,10 +421,10 @@ def compare_tokenizers(tokenizer1: TokenizerGroundtruth, tokenizer2: TokenizerLl
         if text1 == text2:  # equal to TokenizerGroundtruth?
             return True
         # equal to source text?
-        if tokenizer1.add_bos_token:  # remove BOS
+        if tokenizer1.add_bos_token and tokenizer1.bos_token and isinstance(tokenizer1.bos_token, str):  # remove BOS
             if text2.startswith(tokenizer1.bos_token):
                 text2 = text2[len(tokenizer1.bos_token):]
-        if tokenizer1.add_eos_token:  # remove EOS
+        if tokenizer1.add_eos_token and tokenizer1.eos_token and isinstance(tokenizer1.eos_token, str):  # remove EOS
             if text2.endswith(tokenizer1.eos_token):
                 text2 = text2[:-len(tokenizer1.eos_token)]
         return text == text2
index f34d858d675bcce811dca54bcc1f3c162e64edac..bb2cc4e4ea52ae0a4b911418ff67c178ab51120a 100644 (file)
@@ -23,7 +23,6 @@ import warnings
 import numpy as np
 import torch
 import torch.nn.functional as F
-import torch.utils.checkpoint
 from torch import nn
 from torch.nn.init import _calculate_fan_in_and_fan_out
 
@@ -413,7 +412,8 @@ import re
 
 import numpy as np
 from gguf import *
-from transformers.models.idefics2.modeling_idefics2 import Idefics2VisionTransformer, Idefics2VisionConfig
+from transformers.models.idefics2.modeling_idefics2 import Idefics2VisionTransformer
+from transformers.models.idefics2.configuration_idefics2 import Idefics2VisionConfig
 
 TEXT = "clip.text"
 VISION = "clip.vision"
index a9d788f2653ad012c9f1777b619abdb1428d91f2..0a1f4e8647765c0c975f067221301205dc5a061f 100644 (file)
@@ -1,5 +1,5 @@
 -r ../../requirements/requirements-convert_legacy_llama.txt
 --extra-index-url https://download.pytorch.org/whl/cpu
 pillow~=11.3.0
-torch~=2.4.0
-torchvision~=0.19.1
+torch~=2.6.0
+torchvision~=0.21.0
index 15d024914e8412a414f367c732e14042f87e640e..4ea7f19f77fdbed7795a45617c577ac84ad37e05 100644 (file)
@@ -1,6 +1,6 @@
 aiohttp~=3.9.3
 pytest~=8.3.3
-huggingface_hub~=0.23.2
+huggingface_hub>=0.34.0,<1.0
 numpy~=1.26.4
 openai~=1.55.3
 prometheus-client~=0.20.0