* requirements : update transformers/torch for Embedding Gemma
This commit updates the requirements to support converting
Embedding Gemma 300m models.
The motivation for this change is that during development I had a local
copy of the transformers package which is what I used for converting
the models. This was a mistake on my part and I should have also updated
my transformers version to the official release.
I had checked the requirements/requirements-convert_legacy_llama.txt
file and noted that the version was >=4.45.1,<5.0.0 and came to the
conculusion that no updated would be needed, this assumed that
Embedding Gemma would be in a transformers release at the time
Commit
fb15d649ed14ab447eeab911e0c9d21e35fb243e ("llama : add support
for EmbeddingGemma 300m (#15798)) was merged. So anyone wanting to
convert themselves would be able to do so. However, Embedding Gemma is
a preview release and this commit updates the requirements to use this
preview release.
* resolve additional python dependencies
* fix pyright errors in tokenizer test and remove unused import
-r ./requirements-convert_legacy_llama.txt
--extra-index-url https://download.pytorch.org/whl/cpu
-torch~=2.4.0; platform_machine != "s390x"
+
+## Embedding Gemma requires PyTorch 2.6.0 or later
+torch~=2.6.0; platform_machine != "s390x"
# torch s390x packages can only be found from nightly builds
--extra-index-url https://download.pytorch.org/whl/nightly
numpy~=1.26.4
sentencepiece~=0.2.0
-transformers>=4.45.1,<5.0.0
+
+# Embedding Gemma is currently a preview release:
+# https://github.com/huggingface/transformers/releases/tag/v4.56.0-Embedding-Gemma-preview
+
+# The version is needed to be able to convert Embedding Gemma models to GGUF format:
+git+https://github.com/huggingface/transformers@v4.56.0-Embedding-Gemma-preview
+
+# Once Embedding Gemma is officially released, we can switch to:
+#transformers>=4.57.1,<5.0.0
+
gguf>=0.1.0
protobuf>=4.21.0,<5.0.0
aiohttp~=3.9.3
pytest~=8.3.3
-huggingface_hub~=0.23.2
+huggingface_hub>=0.34.0,<1.0
matplotlib~=3.10.0
numpy~=1.26.4
openai~=1.55.3
if text1 == text2: # equal to TokenizerGroundtruth?
return True
# equal to source text?
- if tokenizer1.add_bos_token: # remove BOS
+ if tokenizer1.add_bos_token and tokenizer1.bos_token and isinstance(tokenizer1.bos_token, str): # remove BOS
if text2.startswith(tokenizer1.bos_token):
text2 = text2[len(tokenizer1.bos_token):]
- if tokenizer1.add_eos_token: # remove EOS
+ if tokenizer1.add_eos_token and tokenizer1.eos_token and isinstance(tokenizer1.eos_token, str): # remove EOS
if text2.endswith(tokenizer1.eos_token):
text2 = text2[:-len(tokenizer1.eos_token)]
return text == text2
import numpy as np
import torch
import torch.nn.functional as F
-import torch.utils.checkpoint
from torch import nn
from torch.nn.init import _calculate_fan_in_and_fan_out
import numpy as np
from gguf import *
-from transformers.models.idefics2.modeling_idefics2 import Idefics2VisionTransformer, Idefics2VisionConfig
+from transformers.models.idefics2.modeling_idefics2 import Idefics2VisionTransformer
+from transformers.models.idefics2.configuration_idefics2 import Idefics2VisionConfig
TEXT = "clip.text"
VISION = "clip.vision"
-r ../../requirements/requirements-convert_legacy_llama.txt
--extra-index-url https://download.pytorch.org/whl/cpu
pillow~=11.3.0
-torch~=2.4.0
-torchvision~=0.19.1
+torch~=2.6.0
+torchvision~=0.21.0
aiohttp~=3.9.3
pytest~=8.3.3
-huggingface_hub~=0.23.2
+huggingface_hub>=0.34.0,<1.0
numpy~=1.26.4
openai~=1.55.3
prometheus-client~=0.20.0