import os
import re
import sys
-from abc import ABC, abstractmethod
from enum import IntEnum
from pathlib import Path
from hashlib import sha256
-from typing import TYPE_CHECKING, Any, Callable, ContextManager, Iterator, Sequence, TypeVar, cast
+from typing import TYPE_CHECKING, Any, Callable, ContextManager, Iterable, Iterator, Sequence, TypeVar, cast, overload
import numpy as np
import torch
sys.path.insert(1, str(Path(__file__).parent / 'gguf-py'))
import gguf
-from convert import LlamaHfVocab, permute
+from convert import LlamaHfVocab
logger = logging.getLogger("hf-to-gguf")
AnyModel = TypeVar("AnyModel", bound="type[Model]")
-class Model(ABC):
+class Model:
_model_classes: dict[str, type[Model]] = {}
- def __init__(self, dir_model: Path, ftype: int, fname_out: Path, is_big_endian: bool, use_temp_file: bool):
+ dir_model: Path
+ ftype: int
+ fname_out: Path
+ is_big_endian: bool
+ endianess: gguf.GGUFEndian
+ use_temp_file: bool
+ lazy: bool
+ part_names: list[str]
+ is_safetensors: bool
+ hparams: dict[str, Any]
+ gguf_writer: gguf.GGUFWriter
+ block_count: int
+ tensor_map: gguf.TensorNameMap
+ tensor_names: set[str] | None
+
+ # subclasses should define this!
+ model_arch: gguf.MODEL_ARCH
+
+ def __init__(self, dir_model: Path, ftype: int, fname_out: Path, is_big_endian: bool, use_temp_file: bool, eager: bool):
+ if self.__class__ == Model:
+ raise TypeError(f"{self.__class__.__name__!r} should not be directly instantiated")
self.dir_model = dir_model
self.ftype = ftype
self.fname_out = fname_out
self.is_big_endian = is_big_endian
self.endianess = gguf.GGUFEndian.BIG if is_big_endian else gguf.GGUFEndian.LITTLE
self.use_temp_file = use_temp_file
- self.is_safetensors = self._is_model_safetensors()
- self.num_parts = Model.count_model_parts(self.dir_model, ".safetensors" if self.is_safetensors else ".bin")
- self.part_names = self._get_part_names()
+ self.lazy = not eager
+ self.part_names = Model.get_model_part_names(self.dir_model, ".safetensors")
+ self.is_safetensors = len(self.part_names) > 0
+ if not self.is_safetensors:
+ self.part_names = Model.get_model_part_names(self.dir_model, ".bin")
self.hparams = Model.load_hparams(self.dir_model)
self.gguf_writer = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[self.model_arch], endianess=self.endianess, use_temp_file=self.use_temp_file)
self.block_count = self.find_hparam(["n_layers", "num_hidden_layers", "n_layer"])
+ self.tensor_map = gguf.get_tensor_name_map(self.model_arch, self.block_count)
+ self.tensor_names = None
- @property
- @abstractmethod
- def model_arch(self) -> gguf.MODEL_ARCH:
- pass
+ @classmethod
+ def __init_subclass__(cls):
+ # can't use an abstract property, because overriding it without type errors
+ # would require using decorated functions instead of simply defining the property
+ if "model_arch" not in cls.__dict__:
+ raise TypeError(f"Missing property 'model_arch' for {cls.__name__!r}")
- def find_hparam(self, keys: Sequence[str], optional: bool = False) -> Any:
+ def find_hparam(self, keys: Iterable[str], optional: bool = False) -> Any:
key = next((k for k in keys if k in self.hparams), None)
if key is not None:
return self.hparams[key]
self._set_vocab_gpt2()
def get_tensors(self) -> Iterator[tuple[str, Tensor]]:
+ tensor_names_from_parts: set[str] = set()
+
+ if len(self.part_names) > 1:
+ self.tensor_names = set()
+ index_name = "model.safetensors" if self.is_safetensors else "pytorch_model.bin"
+ index_name += ".index.json"
+ logger.info(f"gguf: loading model weight map from '{index_name}'")
+ with open(self.dir_model / index_name, "r", encoding="utf-8") as f:
+ index: dict[str, Any] = json.load(f)
+ weight_map = index.get("weight_map")
+ if weight_map is None or not isinstance(weight_map, dict):
+ raise ValueError(f"Can't load 'weight_map' from {index_name!r}")
+ self.tensor_names.update(weight_map.keys())
+ else:
+ self.tensor_names = tensor_names_from_parts
+
for part_name in self.part_names:
logger.info(f"gguf: loading model part '{part_name}'")
ctx: ContextManager[Any]
ctx = contextlib.nullcontext(torch.load(str(self.dir_model / part_name), map_location="cpu", mmap=True, weights_only=True))
with ctx as model_part:
+ tensor_names_from_parts.update(model_part.keys())
+
for name in model_part.keys():
data = model_part.get_tensor(name) if self.is_safetensors else model_part[name]
+ if self.lazy:
+ data = LazyTorchTensor.from_eager(data)
yield name, data
+ # only verify tensor name presence; it doesn't matter if they are not in the right files
+ if len(sym_diff := tensor_names_from_parts.symmetric_difference(self.tensor_names)) > 0:
+ raise ValueError(f"Mismatch between weight map and model parts for tensor names: {sym_diff}")
+
+ def format_tensor_name(self, key: gguf.MODEL_TENSOR, bid: int | None = None, suffix: str = ".weight") -> str:
+ name: str = gguf.TENSOR_NAMES[key]
+ if key not in gguf.MODEL_TENSORS[self.model_arch]:
+ raise ValueError(f"Missing {key!r} for MODEL_TENSORS of {self.model_arch!r}")
+ if "{bid}" in name:
+ assert bid is not None
+ name = name.format(bid=bid)
+ return name + suffix
+
+ def map_tensor_name(self, name: str, try_suffixes: Sequence[str] = (".weight", ".bias")) -> str:
+ new_name = self.tensor_map.get_name(key=name, try_suffixes=try_suffixes)
+ if new_name is None:
+ raise ValueError(f"Can not map tensor {name!r}")
+ return new_name
+
def set_gguf_parameters(self):
self.gguf_writer.add_name(self.dir_model.name)
self.gguf_writer.add_block_count(self.block_count)
self.gguf_writer.add_file_type(self.ftype)
logger.info(f"gguf: file type = {self.ftype}")
+ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+ del bid # unused
+
+ return [(self.map_tensor_name(name), data_torch)]
+
+ def extra_f32_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool:
+ del name, new_name, bid, n_dims # unused
+
+ return False
+
+ def extra_f16_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool:
+ del name, new_name, bid, n_dims # unused
+
+ return False
+
def write_tensors(self):
- block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer")))
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
+ max_name_len = max(len(s) for _, s in self.tensor_map.mapping.values()) + len(".weight,")
+
for name, data_torch in self.get_tensors():
# we don't need these
- if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")):
+ if name.endswith((".attention.masked_bias", ".attention.bias", ".rotary_emb.inv_freq")):
continue
old_dtype = data_torch.dtype
if data_torch.dtype not in (torch.float16, torch.float32):
data_torch = data_torch.to(torch.float32)
- data = data_torch.squeeze().numpy()
+ # use the first number-like part of the tensor name as the block id
+ bid = None
+ for part in name.split("."):
+ if part.isdecimal():
+ bid = int(part)
+ break
+
+ for new_name, data in ((n, d.squeeze().numpy()) for n, d in self.modify_tensors(data_torch, name, bid)):
+ data: np.ndarray = data # type hint
+ n_dims = len(data.shape)
+ data_dtype = data.dtype
- # map tensor names
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- raise ValueError(f"Can not map tensor {name!r}")
+ # if f32 desired, convert any float16 to float32
+ if self.ftype == 0 and data_dtype == np.float16:
+ data = data.astype(np.float32)
- n_dims = len(data.shape)
- data_dtype = data.dtype
+ # when both are True, f32 should win
+ extra_f32 = self.extra_f32_tensors(name, new_name, bid, n_dims)
+ extra_f16 = self.extra_f16_tensors(name, new_name, bid, n_dims)
+
+ # Most of the codebase that takes in 1D tensors or norms only handles F32 tensors
+ extra_f32 = extra_f32 or n_dims == 1 or new_name.endswith("_norm.weight")
+
+ # if f16 desired, convert any float32 2-dim weight tensors to float16
+ extra_f16 = extra_f16 or (name.endswith(".weight") and n_dims >= 2)
- # if f32 desired, convert any float16 to float32
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
+ # when both extra_f32 and extra_f16 are False, convert to float32 by default
+ if self.ftype == 1 and data_dtype == np.float16 and (extra_f32 or not extra_f16):
+ data = data.astype(np.float32)
- # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
- if self.ftype == 1 and data_dtype == np.float16 and (n_dims == 1 or new_name.endswith("_norm.weight")):
- data = data.astype(np.float32)
+ if self.ftype == 1 and data_dtype == np.float32 and extra_f16 and not extra_f32:
+ data = data.astype(np.float16)
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
- data = data.astype(np.float16)
+ # reverse shape to make it similar to the internal ggml dimension order
+ shape_str = f"{{{', '.join(str(n) for n in reversed(data.shape))}}}"
- logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
+ # n_dims is implicit in the shape
+ logger.info(f"{f'%-{max_name_len}s' % f'{new_name},'} {old_dtype} --> {data.dtype}, shape = {shape_str}")
- self.gguf_writer.add_tensor(new_name, data)
+ self.gguf_writer.add_tensor(new_name, data)
def write(self):
self.write_tensors()
self.gguf_writer.write_header_to_file()
self.gguf_writer.write_kv_data_to_file()
- self.gguf_writer.write_tensors_to_file()
+ self.gguf_writer.write_tensors_to_file(progress=True)
self.gguf_writer.close()
def write_vocab(self):
self.gguf_writer.close()
@staticmethod
- def count_model_parts(dir_model: Path, prefix: str) -> int:
- num_parts = 0
+ def get_model_part_names(dir_model: Path, suffix: str) -> list[str]:
+ part_names: list[str] = []
for filename in os.listdir(dir_model):
- if filename.endswith(prefix):
- num_parts += 1
+ if filename.endswith(suffix):
+ part_names.append(filename)
- return num_parts
+ part_names.sort()
+
+ return part_names
@staticmethod
- def load_hparams(dir_model):
+ def load_hparams(dir_model: Path):
with open(dir_model / "config.json", "r", encoding="utf-8") as f:
return json.load(f)
def register(cls, *names: str) -> Callable[[AnyModel], AnyModel]:
assert names
- def func(modelcls: type[Model]):
+ def func(modelcls: AnyModel) -> AnyModel:
for name in names:
cls._model_classes[name] = modelcls
return modelcls
return func
@classmethod
- def from_model_architecture(cls, arch):
+ def from_model_architecture(cls, arch: str) -> type[Model]:
try:
return cls._model_classes[arch]
except KeyError:
raise NotImplementedError(f'Architecture {arch!r} not supported!') from None
- def _is_model_safetensors(self) -> bool:
- return Model.count_model_parts(self.dir_model, ".safetensors") > 0
-
- def _get_part_names(self):
- if self.is_safetensors:
- if self.num_parts == 1: # there's only one .safetensors file
- return ("model.safetensors",)
- return (f"model-{n:05}-of-{self.num_parts:05}.safetensors" for n in range(1, self.num_parts + 1))
-
- if self.num_parts == 1: # there's only one .bin file
- return ("pytorch_model.bin",)
- return (f"pytorch_model-{n:05}-of-{self.num_parts:05}.bin" for n in range(1, self.num_parts + 1))
-
# used for GPT-2 BPE and WordPiece vocabs
def get_vocab_base(self) -> tuple[list[str], list[int], str]:
tokens: list[str] = []
if not tokenizer_path.is_file():
raise FileNotFoundError(f"File not found: {tokenizer_path}")
- tokenizer = SentencePieceProcessor(str(tokenizer_path))
+ tokenizer = SentencePieceProcessor()
+ tokenizer.LoadFromFile(str(tokenizer_path))
+
vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
for token_id in range(tokenizer.vocab_size()):
- piece = tokenizer.id_to_piece(token_id)
+ piece = tokenizer.IdToPiece(token_id)
text = piece.encode("utf-8")
- score = tokenizer.get_score(token_id)
+ score = tokenizer.GetScore(token_id)
toktype = SentencePieceTokenTypes.NORMAL
- if tokenizer.is_unknown(token_id):
+ if tokenizer.IsUnknown(token_id):
toktype = SentencePieceTokenTypes.UNKNOWN
- elif tokenizer.is_control(token_id):
+ elif tokenizer.IsControl(token_id):
toktype = SentencePieceTokenTypes.CONTROL
- elif tokenizer.is_unused(token_id):
+ elif tokenizer.IsUnused(token_id):
toktype = SentencePieceTokenTypes.UNUSED
- elif tokenizer.is_byte(token_id):
+ elif tokenizer.IsByte(token_id):
toktype = SentencePieceTokenTypes.BYTE
tokens.append(text)
pad_count = vocab_size - len(tokens)
logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]")
for i in range(1, pad_count + 1):
- tokens.append(f"[PAD{i}]")
+ tokens.append(bytes(f"[PAD{i}]", encoding="utf-8"))
scores.append(-1000.0)
toktypes.append(SentencePieceTokenTypes.UNUSED)
self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
self.gguf_writer.add_file_type(self.ftype)
- def write_tensors(self):
- block_count = self.hparams["n_layer"]
- tensors = dict(self.get_tensors())
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
- has_lm_head = True
+ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+ del bid # unused
+
n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
- for name, data_torch in tensors.items():
- if "lm_head.weight" not in tensors.keys() and "output.weight" not in tensors.keys():
- has_lm_head = False
+ name = re.sub(r'transformer\.', '', name)
+
+ tensors: list[tuple[str, Tensor]] = []
+
+ if re.match(r"h\.\d+\.self_attention\.query_key_value\.weight", name):
+ # Map bloom-style qkv_linear to gpt-style qkv_linear
+ # bloom: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py#L238-L252 # noqa
+ # gpt-2: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py#L312 # noqa
+ qkv_weights = data_torch.reshape((n_head, 3, n_embed // n_head, n_embed))
+ data_torch = torch.cat(
+ (
+ qkv_weights[:, 0, :, :].reshape((-1, n_embed)),
+ qkv_weights[:, 1, :, :].reshape((-1, n_embed)),
+ qkv_weights[:, 2, :, :].reshape((-1, n_embed)),
+ ),
+ dim=0,
+ )
+ logger.info("re-format attention.linear_qkv.weight")
+ elif re.match(r"h\.\d+\.self_attention\.query_key_value\.bias", name):
+ qkv_bias = data_torch.reshape((n_head, 3, n_embed // n_head))
+ data_torch = torch.cat(
+ (
+ qkv_bias[:, 0, :].reshape((n_embed,)),
+ qkv_bias[:, 1, :].reshape((n_embed,)),
+ qkv_bias[:, 2, :].reshape((n_embed,)),
+ ),
+ dim=0,
+ )
+ logger.info("re-format attention.linear_qkv.bias")
- name = re.sub(r'transformer\.', '', name)
+ tensors.append((self.map_tensor_name(name), data_torch))
- old_dtype = data_torch.dtype
+ if name == "word_embeddings.weight":
+ assert self.tensor_names is not None
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
+ # TODO: tie them at runtime, don't duplicate in the model file
+ if all(s not in self.tensor_names for s in ("lm_head.weight", "output.weight")):
+ tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT), data_torch))
- data = data_torch.squeeze().numpy()
-
- if re.match(r"h\.\d+\.self_attention\.query_key_value\.weight", name):
- # Map bloom-style qkv_linear to gpt-style qkv_linear
- # bloom: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py#L238-L252 # noqa
- # gpt-2: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py#L312 # noqa
- qkv_weights = data.reshape((n_head, 3, n_embed // n_head, n_embed))
- data = np.concatenate(
- (
- qkv_weights[:, 0, :, :].reshape((-1, n_embed)),
- qkv_weights[:, 1, :, :].reshape((-1, n_embed)),
- qkv_weights[:, 2, :, :].reshape((-1, n_embed)),
- ),
- axis=0,
- )
- logger.info("re-format attention.linear_qkv.weight")
- elif re.match(r"h\.\d+\.self_attention\.query_key_value\.bias", name):
- qkv_bias = data.reshape((n_head, 3, n_embed // n_head))
- data = np.concatenate(
- (
- qkv_bias[:, 0, :].reshape((n_embed,)),
- qkv_bias[:, 1, :].reshape((n_embed,)),
- qkv_bias[:, 2, :].reshape((n_embed,)),
- ),
- axis=0,
- )
- logger.info("re-format attention.linear_qkv.bias")
-
- # map tensor names
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- raise ValueError(f"Can not map tensor {name!r}")
-
- n_dims = len(data.shape)
- data_dtype = data.dtype
-
- # if f32 desired, convert any float16 to float32
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
-
- # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
- if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
- data = data.astype(np.float32)
-
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
- data = data.astype(np.float16)
-
- logger.info(f"=> {new_name}, shape = {data.shape}, {old_dtype} --> {data.dtype}")
-
- self.gguf_writer.add_tensor(new_name, data)
-
- if not has_lm_head and name == "word_embeddings.weight":
- self.gguf_writer.add_tensor("output.weight", data)
- logger.info(name, f"=> output.weight, shape = {data.shape}, {old_dtype} --> {data.dtype}")
+ return tensors
@Model.register("MPTForCausalLM")
else:
self.gguf_writer.add_max_alibi_bias(0.0)
- def write_tensors(self):
- block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers"))
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
- for name, data_torch in self.get_tensors():
- # we don't need these
- if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")):
- continue
+ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+ del bid # unused
- old_dtype = data_torch.dtype
-
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
-
- data = data_torch.squeeze().numpy()
-
- # map tensor names
- if "scales" in name:
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias", ".scales"))
- if new_name is not None:
- new_name = new_name.replace("scales", "act.scales")
- else:
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- raise ValueError(f"Can not map tensor {name!r}")
-
- n_dims = len(data.shape)
- data_dtype = data.dtype
-
- # if f32 desired, convert any float16 to float32
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
-
- # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
- if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
- data = data.astype(np.float32)
-
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
- data = data.astype(np.float16)
-
- logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
+ if "scales" in name:
+ new_name = self.map_tensor_name(name, try_suffixes=(".weight", ".bias", ".scales"))
+ new_name = new_name.replace("scales", "act.scales")
+ else:
+ new_name = self.map_tensor_name(name, try_suffixes=(".weight", ".bias"))
- self.gguf_writer.add_tensor(new_name, data)
+ return [(new_name, data_torch)]
@Model.register("OrionForCausalLM")
# ref: https://huggingface.co/OrionStarAI/Orion-14B-Chat/blob/276a17221ce42beb45f66fac657a41540e71f4f5/modeling_orion.py#L570-L571
self.gguf_writer.add_layer_norm_eps(self.hparams["rms_norm_eps"])
- def write_tensors(self):
- # Collect tensors from generator object
- model_kv = dict(self.get_tensors())
- block_count = self.hparams["num_hidden_layers"]
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
-
- for name, data_torch in model_kv.items():
- # we don't need these
- if name.endswith(".rotary_emb.inv_freq"):
- continue
-
- old_dtype = data_torch.dtype
-
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
-
- data = data_torch.squeeze().numpy()
-
- # map tensor names
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- raise ValueError(f"Can not map tensor {name!r}")
-
- n_dims = len(data.shape)
- data_dtype = data.dtype
-
- # if f32 desired, convert any float16 to float32
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
-
- # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
- if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
- data = data.astype(np.float32)
-
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
- data = data.astype(np.float16)
-
- logger.info(f"{name} -> {new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
- self.gguf_writer.add_tensor(new_name, data)
-
@Model.register("BaichuanForCausalLM", "BaiChuanForCausalLM")
class BaichuanModel(Model):
self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
- def write_tensors(self):
- # Collect tensors from generator object
- model_kv = dict(self.get_tensors())
- block_count = self.hparams["num_hidden_layers"]
+ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
head_count = self.hparams["num_attention_heads"]
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
head_count_kv = self.hparams.get("num_key_value_heads", head_count)
- for i in range(block_count):
- if (w := model_kv.get(f"model.layers.{i}.self_attn.W_pack.weight")) is not None:
- logger.info(f"Unpacking and permuting layer {i}")
- model_kv[f"model.layers.{i}.self_attn.q_proj.weight"] = \
- self._reverse_hf_permute_part(w, 0, head_count, head_count)
- model_kv[f"model.layers.{i}.self_attn.k_proj.weight"] = \
- self._reverse_hf_permute_part(w, 1, head_count, head_count_kv)
- model_kv[f"model.layers.{i}.self_attn.v_proj.weight"] = \
- self._reverse_hf_part(w, 2)
- del model_kv[f"model.layers.{i}.self_attn.W_pack.weight"]
-
- for name, data_torch in model_kv.items():
- # we don't need these
- if name.endswith(".rotary_emb.inv_freq"):
- continue
-
- old_dtype = data_torch.dtype
-
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
-
- data = data_torch.squeeze().numpy()
-
- # map tensor names
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- raise ValueError(f"Can not map tensor {name!r}")
-
- n_dims = len(data.shape)
- data_dtype = data.dtype
-
- # if f32 desired, convert any float16 to float32
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
-
- # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
- if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
- data = data.astype(np.float32)
-
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
- data = data.astype(np.float16)
+ tensors: list[tuple[str, Tensor]] = []
+
+ if bid is not None and name == f"model.layers.{bid}.self_attn.W_pack.weight":
+ logger.info(f"Unpacking and permuting layer {bid}")
+ tensors = [
+ (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid),
+ self._reverse_hf_permute_part(data_torch, 0, head_count, head_count)),
+ (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid),
+ self._reverse_hf_permute_part(data_torch, 1, head_count, head_count_kv)),
+ (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid),
+ self._reverse_hf_part(data_torch, 2)),
+ ]
+ else:
+ tensors = [(self.map_tensor_name(name), data_torch)]
- logger.info(f"{name} -> {new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
- self.gguf_writer.add_tensor(new_name, data)
+ return tensors
def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor:
if n_kv_head is not None and n_head != n_kv_head:
dir_model = self.dir_model
hparams = self.hparams
- tokens: list[bytearray] = []
+ tokens: list[bytes] = []
toktypes: list[int] = []
from transformers import AutoTokenizer
vocab_size = hparams.get("vocab_size", len(tokenizer.vocab))
assert max(tokenizer.vocab.values()) < vocab_size
- reverse_vocab = {id_: encoded_tok for encoded_tok, id_ in tokenizer.vocab.items()}
+ reverse_vocab: dict[int, str] = {id_: encoded_tok for encoded_tok, id_ in tokenizer.vocab.items()}
added_vocab = tokenizer.get_added_vocab()
for token_id in range(vocab_size):
self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
- def write_tensors(self):
- # Collect tensors from generator object
- model_kv = dict(self.get_tensors())
- block_count = self.hparams["num_hidden_layers"]
+ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+ del bid # unused
+
head_count = self.hparams["num_attention_heads"]
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
head_count_kv = self.hparams.get("num_key_value_heads", head_count)
- for name, data_torch in model_kv.items():
- # we don't need these
- if name.endswith(".rotary_emb.inv_freq"):
- continue
-
- old_dtype = data_torch.dtype
-
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
-
- # HF models permute some of the tensors, so we need to undo that
- if name.endswith(("q_proj.weight")):
- data_torch = self._reverse_hf_permute(data_torch, head_count, head_count)
- if name.endswith(("k_proj.weight")):
- data_torch = self._reverse_hf_permute(data_torch, head_count, head_count_kv)
-
- data = data_torch.squeeze().numpy()
-
- # map tensor names
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- raise ValueError(f"Can not map tensor {name!r}")
-
- n_dims = len(data.shape)
- data_dtype = data.dtype
+ # HF models permute some of the tensors, so we need to undo that
+ if name.endswith("q_proj.weight"):
+ data_torch = self._reverse_hf_permute(data_torch, head_count, head_count)
+ if name.endswith("k_proj.weight"):
+ data_torch = self._reverse_hf_permute(data_torch, head_count, head_count_kv)
- # if f32 desired, convert any float16 to float32
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
-
- # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
- if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
- data = data.astype(np.float32)
-
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
- data = data.astype(np.float16)
-
- logger.info(f"{name} -> {new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
- self.gguf_writer.add_tensor(new_name, data)
+ return [(self.map_tensor_name(name), data_torch)]
def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor:
if n_kv_head is not None and n_head != n_kv_head:
self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
self.gguf_writer.add_file_type(self.ftype)
- def write_tensors(self):
- block_count = self.hparams.get("num_hidden_layers")
- if block_count is None:
- block_count = self.hparams["n_layer"] # old name
-
- n_head = self.hparams.get("num_attention_heads")
- if n_head is None:
- n_head = self.hparams["n_head"] # old name
-
- n_head_kv = self.hparams.get("num_kv_heads")
- if n_head_kv is None:
- n_head_kv = self.hparams.get("n_head_kv", 1) # old name
-
- head_dim = self.hparams["hidden_size"] // n_head
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
-
- for name, data_torch in self.get_tensors():
- old_dtype = data_torch.dtype
-
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
-
- # QKV tensor transform
- # The original query_key_value tensor contains n_head_kv "kv groups",
- # each consisting of n_head/n_head_kv query weights followed by one key
- # and one value weight (shared by all query heads in the kv group).
- # This layout makes it a big pain to work with in GGML.
- # So we rearrange them here,, so that we have n_head query weights
- # followed by n_head_kv key weights followed by n_head_kv value weights,
- # in contiguous fashion.
- # ref: https://github.com/jploski/ggml/blob/falcon40b/examples/falcon/convert-hf-to-ggml.py
-
- if "query_key_value" in name:
- qkv = data_torch.view(n_head_kv, n_head // n_head_kv + 2, head_dim, head_dim * n_head)
- q = qkv[:, :-2].reshape(n_head * head_dim, head_dim * n_head)
- k = qkv[:, [-2]].reshape(n_head_kv * head_dim, head_dim * n_head)
- v = qkv[:, [-1]].reshape(n_head_kv * head_dim, head_dim * n_head)
- data_torch = torch.cat((q, k, v)).reshape_as(data_torch)
-
- data = data_torch.squeeze().numpy()
-
- # map tensor names
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- raise ValueError(f"Can not map tensor {name!r}")
+ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+ del bid # unused
- n_dims = len(data.shape)
- data_dtype = data.dtype
+ # QKV tensor transform
+ # The original query_key_value tensor contains n_head_kv "kv groups",
+ # each consisting of n_head/n_head_kv query weights followed by one key
+ # and one value weight (shared by all query heads in the kv group).
+ # This layout makes it a big pain to work with in GGML.
+ # So we rearrange them here,, so that we have n_head query weights
+ # followed by n_head_kv key weights followed by n_head_kv value weights,
+ # in contiguous fashion.
+ # ref: https://github.com/jploski/ggml/blob/falcon40b/examples/falcon/convert-hf-to-ggml.py
- # if f32 desired, convert any float16 to float32
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
+ if "query_key_value" in name:
+ n_head = self.find_hparam(["num_attention_heads", "n_head"])
+ n_head_kv = self.find_hparam(["num_kv_heads", "n_head_kv"], optional=True) or 1
+ head_dim = self.hparams["hidden_size"] // n_head
- # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
- if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
- data = data.astype(np.float32)
+ qkv = data_torch.view(n_head_kv, n_head // n_head_kv + 2, head_dim, head_dim * n_head)
+ q = qkv[:, :-2].reshape(n_head * head_dim, head_dim * n_head)
+ k = qkv[:, [-2]].reshape(n_head_kv * head_dim, head_dim * n_head)
+ v = qkv[:, [-1]].reshape(n_head_kv * head_dim, head_dim * n_head)
+ data_torch = torch.cat((q, k, v)).reshape_as(data_torch)
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
- data = data.astype(np.float16)
-
- logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
-
- self.gguf_writer.add_tensor(new_name, data)
+ return [(self.map_tensor_name(name), data_torch)]
@Model.register("GPTBigCodeForCausalLM")
self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"])
self.gguf_writer.add_file_type(self.ftype)
- def write_tensors(self):
+ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
hidden_dim = self.hparams["n_embd"]
inner_dim = 4 * hidden_dim
hidden_dim = int(2 * inner_dim / 3)
n_head = self.hparams["n_head"]
n_head_kv = 1
head_dim = self.hparams["n_embd"] // n_head
- block_count = self.hparams["n_layer"]
-
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
-
- tensors = dict(self.get_tensors())
- for i in range(block_count):
- if (w := tensors.get(f"transformer.h.{i}.attn.kv.weight")) is not None:
- tensors[f"model.layers.{i}.self_attn.k_proj.weight"] = w[:n_head_kv * head_dim]
- tensors[f"model.layers.{i}.self_attn.v_proj.weight"] = w[n_head_kv * head_dim:]
- del tensors[f"transformer.h.{i}.attn.kv.weight"]
- if (w := tensors.get(f"transformer.h.{i}.attn.q.weight")) is not None:
- tensors[f"model.layers.{i}.self_attn.q_proj.weight"] = w
- del tensors[f"transformer.h.{i}.attn.q.weight"]
- if (w := tensors.get(f"transformer.h.{i}.mlp.gate_up_proj.weight")) is not None:
- tensors[f"model.layers.{i}.mlp.gate_proj.weight"] = w[:ff_dim]
- tensors[f"model.layers.{i}.mlp.up_proj.weight"] = w[ff_dim:]
- del tensors[f"transformer.h.{i}.mlp.gate_up_proj.weight"]
-
- for name, data_torch in tensors.items():
- old_dtype = data_torch.dtype
-
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
-
- data = data_torch.squeeze().numpy()
-
- # map tensor names
- new_name = tensor_map.get_name(name, try_suffixes=(".weight",))
- if new_name is None:
- raise ValueError(f"Can not map tensor {name!r}")
-
- n_dims = len(data.shape)
- data_dtype = data.dtype
- # if f32 desired, convert any float16 to float32
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
+ tensors: list[tuple[str, Tensor]] = []
- # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
- if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
- data = data.astype(np.float32)
+ if bid is not None:
+ if name == f"transformer.h.{bid}.attn.kv.weight":
+ tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), data_torch[:n_head_kv * head_dim]))
+ tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), data_torch[n_head_kv * head_dim:]))
+ elif name == f"transformer.h.{bid}.attn.q.weight":
+ tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), data_torch))
+ elif name == f"transformer.h.{bid}.mlp.gate_up_proj.weight":
+ tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE, bid), data_torch[:ff_dim]))
+ tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP, bid), data_torch[ff_dim:]))
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
- data = data.astype(np.float16)
+ if len(tensors) == 0:
+ tensors.append((self.map_tensor_name(name), data_torch))
- logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
-
- self.gguf_writer.add_tensor(new_name, data)
+ return tensors
@Model.register("PersimmonForCausalLM")
# self.gguf_writer.add_bos_token_id(71013)
# self.gguf_writer.add_eos_token_id(71013)
- def write_tensors(self):
- block_count = self.hparams.get("num_layers", self.hparams.get("num_hidden_layers"))
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
+ def extra_f32_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool:
+ del name, new_name, bid, n_dims # unused
- for name, data_torch in self.get_tensors():
- if name.endswith(".self_attention.rotary_emb.inv_freq"):
- continue
- old_dtype = data_torch.dtype
- # TODO: FP16 conversion produces garbage outputs. (Q8_0 does not, so..?)
- data = data_torch.to(torch.float32).squeeze().numpy()
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- raise ValueError(f"Can not map tensor {name!r}")
- n_dims = len(data.shape)
- logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
- self.gguf_writer.add_tensor(new_name, data)
+ # TODO: FP16 conversion produces garbage outputs. (Q8_0 does not, so..?)
+ return True
@Model.register("StableLmForCausalLM", "StableLMEpochForCausalLM", "LlavaStableLMEpochForCausalLM")
self.gguf_writer.add_parallel_residual(hparams["use_parallel_residual"] if "use_parallel_residual" in hparams else True)
self.gguf_writer.add_layer_norm_eps(self.find_hparam(["layer_norm_eps", "norm_eps"]))
- def write_tensors(self):
- block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer")))
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
- n_head = self.hparams.get("num_attention_heads")
- n_kv_head = self.hparams.get("num_key_value_heads")
- q_norms = dict()
- k_norms = dict()
- for name, data_torch in self.get_tensors():
- # we don't need these
- if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")):
- continue
+ _q_norms: list[dict[str, Tensor]] | None = None
+ _k_norms: list[dict[str, Tensor]] | None = None
- old_dtype = data_torch.dtype
+ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+ n_head = self.hparams["num_attention_heads"]
+ n_kv_head = self.hparams["num_key_value_heads"]
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
+ if name.find("q_layernorm.norms") != -1:
+ assert bid is not None
- data = data_torch.squeeze().numpy()
- n_dims = len(data.shape)
- if name.find("q_layernorm.norms") != -1:
- q_norms[name] = data
- if len(q_norms) >= (block_count * n_head):
- self._stack_qk_norm(block_count, name, tensor_map, n_head, q_norms, n_dims, layer_name="q_layernorm")
- continue
- if name.find("k_layernorm.norms") != -1:
- k_norms[name] = data
- if len(k_norms) >= (block_count * n_kv_head):
- self._stack_qk_norm(block_count, name, tensor_map, n_kv_head, k_norms, n_dims, layer_name="k_layernorm")
- continue
+ if self._q_norms is None:
+ self._q_norms = [{} for _ in range(self.block_count)]
- # map tensor names
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- raise ValueError(f"Can not map tensor {name!r}")
+ self._q_norms[bid][name] = data_torch
- n_dims = len(data.shape)
- data_dtype = data.dtype
+ if len(self._q_norms[bid]) >= n_head:
+ return self._stack_qk_norm(bid, n_head, self._q_norms[bid], "q_layernorm")
+ else:
+ return []
- # if f32 desired, convert any float16 to float32
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
+ if name.find("k_layernorm.norms") != -1:
+ assert bid is not None
- # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
- if self.ftype == 1 and data_dtype == np.float16 and (n_dims == 1 or new_name.endswith("_norm.weight")):
- data = data.astype(np.float32)
+ if self._k_norms is None:
+ self._k_norms = [{} for _ in range(self.block_count)]
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and not new_name.endswith("_norm.weight") and n_dims == 2:
- data = data.astype(np.float16)
+ self._k_norms[bid][name] = data_torch
- logger.debug(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
+ if len(self._k_norms[bid]) >= n_kv_head:
+ return self._stack_qk_norm(bid, n_kv_head, self._k_norms[bid], "k_layernorm")
+ else:
+ return []
- self.gguf_writer.add_tensor(new_name, data)
+ return [(self.map_tensor_name(name), data_torch)]
- def _stack_qk_norm(self, block_count, name, tensor_map, n_head, norms, n_dims, layer_name="q_layernorm"):
- for bid in range(block_count):
- datas = []
- for xid in range(n_head):
- ename = f"model.layers.{bid}.self_attn.{layer_name}.norms.{xid}.weight"
- datas.append(norms[ename])
- del norms[ename]
- data = np.stack(datas, axis=0)
- data_dtype = data.dtype
- merged_name = f"model.layers.{bid}.self_attn.{layer_name}.weight"
- new_name = tensor_map.get_name(merged_name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- raise ValueError(f"Can not map tensor {name!r}")
- if self.ftype == 1 and data_dtype == np.float16 and (n_dims == 1 or new_name.endswith("_norm.weight")):
- data = data.astype(np.float32)
+ def _stack_qk_norm(self, bid: int, n_head: int, norms: dict[str, Tensor], layer_name: str = "q_layernorm"):
+ datas: list[Tensor] = []
+ # extract the norms in order
+ for xid in range(n_head):
+ ename = f"model.layers.{bid}.self_attn.{layer_name}.norms.{xid}.weight"
+ datas.append(norms[ename])
+ del norms[ename]
+ data_torch = torch.stack(datas, dim=0)
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and not new_name.endswith("_norm.weight") and n_dims == 2:
- data = data.astype(np.float16)
+ merged_name = f"model.layers.{bid}.self_attn.{layer_name}.weight"
+ new_name = self.map_tensor_name(merged_name)
- logger.debug(f"{new_name}, n_dims = {len(data.shape)}, shape = {data.shape} --> {data.dtype}")
+ return [(new_name, data_torch)]
- self.gguf_writer.add_tensor(new_name, data)
+ def write_tensors(self):
+ super().write_tensors()
+
+ if self._q_norms is not None or self._k_norms is not None:
+ # flatten two `list[dict[str, Tensor]]` into a single `list[str]`
+ norms = (
+ [k for d in self._q_norms for k in d.keys()] if self._q_norms is not None else []
+ ) + (
+ [k for d in self._k_norms for k in d.keys()] if self._k_norms is not None else []
+ )
+ if len(norms) > 0:
+ raise ValueError(f"Unprocessed norms: {norms}")
@Model.register("LlamaForCausalLM", "MistralForCausalLM", "MixtralForCausalLM")
self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
- # Same as super class, but permuting q_proj, k_proj
- def write_tensors(self):
- block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer")))
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
- n_head = self.hparams.get("num_attention_heads")
- n_kv_head = self.hparams.get("num_key_value_heads")
- n_experts = self.hparams.get("num_local_experts")
- experts = dict()
- for name, data_torch in self.get_tensors():
- # we don't need these
- if name.endswith((".attention.masked_bias", ".attention.bias", ".rotary_emb.inv_freq")):
- continue
-
- old_dtype = data_torch.dtype
-
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
-
- data = data_torch.numpy()
-
- if name.endswith("q_proj.weight"):
- data = permute(data, n_head, n_head)
- if name.endswith("k_proj.weight"):
- data = permute(data, n_head, n_kv_head)
-
- data = data.squeeze()
+ @staticmethod
+ def permute(weights: Tensor, n_head: int, n_head_kv: int | None):
+ if n_head_kv is not None and n_head != n_head_kv:
+ n_head = n_head_kv
+ return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
+ .swapaxes(1, 2)
+ .reshape(weights.shape))
- # process the experts separately
- if name.find("block_sparse_moe.experts") != -1:
- experts[name] = data
- if len(experts) >= n_experts:
- # merge the experts into a single 3d tensor
- for bid in range(block_count):
- for wid in range(1, 4):
- full = True
- for xid in range(n_experts):
- ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.w{wid}.weight"
- if ename not in experts:
- full = False
- break
- if not full:
- continue
+ _experts: list[dict[str, Tensor]] | None = None
- datas = []
- for xid in range(n_experts):
- ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.w{wid}.weight"
- datas.append(experts[ename])
- del experts[ename]
+ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+ n_head = self.hparams["num_attention_heads"]
+ n_kv_head = self.hparams.get("num_key_value_heads")
- data = np.stack(datas, axis=0)
- data_dtype = data.dtype
+ if name.endswith("q_proj.weight"):
+ data_torch = LlamaModel.permute(data_torch, n_head, n_head)
+ if name.endswith("k_proj.weight"):
+ data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
+ # process the experts separately
+ if name.find("block_sparse_moe.experts") != -1:
+ n_experts = self.hparams["num_local_experts"]
- if self.ftype == 1 and data_dtype == np.float32:
- data = data.astype(np.float16)
+ assert bid is not None
- merged_name = f"layers.{bid}.feed_forward.experts.w{wid}.weight"
+ if self._experts is None:
+ self._experts = [{} for _ in range(self.block_count)]
- new_name = tensor_map.get_name(merged_name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- raise ValueError(f"Can not map tensor {name!r}")
+ self._experts[bid][name] = data_torch
- logger.info(f"{new_name}, n_dims = {len(data.shape)}, shape = {data.shape} --> {data.dtype}")
+ if len(self._experts[bid]) >= n_experts * 3:
+ tensors: list[tuple[str, Tensor]] = []
- self.gguf_writer.add_tensor(new_name, data)
- continue
+ # merge the experts into a single 3d tensor
+ for wid in ["w1", "w2", "w3"]:
+ datas: list[Tensor] = []
- # map tensor names
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- raise ValueError(f"Can not map tensor {name!r}")
+ for xid in range(n_experts):
+ ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.{wid}.weight"
+ datas.append(self._experts[bid][ename])
+ del self._experts[bid][ename]
- n_dims = len(data.shape)
- data_dtype = data.dtype
+ data_torch = torch.stack(datas, dim=0)
- # if f32 desired, convert any float16 to float32
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
+ merged_name = f"layers.{bid}.feed_forward.experts.{wid}.weight"
- # 1d tensors need to be converted to float32
- if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
- data = data.astype(np.float32)
+ new_name = self.map_tensor_name(merged_name)
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
- data = data.astype(np.float16)
+ tensors.append((new_name, data_torch))
+ return tensors
+ else:
+ return []
- logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
+ return [(self.map_tensor_name(name), data_torch)]
- self.gguf_writer.add_tensor(new_name, data)
+ def write_tensors(self):
+ super().write_tensors()
- if len(experts) > 0:
- raise ValueError(f"Unprocessed experts: {experts.keys()}")
+ if self._experts is not None:
+ # flatten `list[dict[str, Tensor]]` into `list[str]`
+ experts = [k for d in self._experts for k in d.keys()]
+ if len(experts) > 0:
+ raise ValueError(f"Unprocessed experts: {experts}")
@Model.register("GrokForCausalLM")
super().set_gguf_parameters()
self.gguf_writer.add_name("Grok")
- def write_tensors(self):
- block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer")))
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
- n_experts = self.hparams.get("num_local_experts")
- experts = dict()
- for name, data_torch in self.get_tensors():
- # we don't need these
- if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")):
- continue
-
- old_dtype = data_torch.dtype
+ _experts: list[dict[str, Tensor]] | None = None
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
-
- data = data_torch.squeeze().numpy()
+ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+ # process the experts separately
+ if name.find(".moe.") != -1:
+ n_experts = self.hparams["num_local_experts"]
- # process the experts separately
- if name.find(".moe.") != -1:
- experts[name] = data
- if len(experts) >= n_experts:
- # merge the experts into a single 3d tensor
- for bid in range(block_count):
- for wid in ["linear", "linear_1", "linear_v"]:
- full = True
- for xid in range(n_experts):
- ename = f"transformer.decoder_layer.{bid}.moe.{xid}.{wid}.weight"
- if ename not in experts:
- full = False
- break
- if not full:
- continue
+ assert bid is not None
- datas = []
- for xid in range(n_experts):
- ename = f"transformer.decoder_layer.{bid}.moe.{xid}.{wid}.weight"
- datas.append(experts[ename])
- del experts[ename]
+ if self._experts is None:
+ self._experts = [{} for _ in range(self.block_count)]
- data = np.stack(datas, axis=0)
- data_dtype = data.dtype
+ self._experts[bid][name] = data_torch
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
+ if len(self._experts[bid]) >= n_experts * 3:
+ tensors: list[tuple[str, Tensor]] = []
- if self.ftype == 1 and data_dtype == np.float32:
- data = data.astype(np.float16)
+ # merge the experts into a single 3d tensor
+ for wid in ["linear", "linear_1", "linear_v"]:
+ datas: list[Tensor] = []
- merged_name = f"transformer.decoder_layer.{bid}.moe.{wid}.weight"
+ for xid in range(n_experts):
+ ename = f"transformer.decoder_layer.{bid}.moe.{xid}.{wid}.weight"
+ datas.append(self._experts[bid][ename])
+ del self._experts[bid][ename]
- new_name = tensor_map.get_name(merged_name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- raise ValueError(f"Can not map tensor {name!r}")
+ data_torch = torch.stack(datas, dim=0)
- logger.info(f"{new_name}, n_dims = {len(data.shape)}, shape = {data.shape} --> {data.dtype}")
+ merged_name = f"transformer.decoder_layer.{bid}.moe.{wid}.weight"
- self.gguf_writer.add_tensor(new_name, data)
- continue
+ new_name = self.map_tensor_name(merged_name)
- # map tensor names
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- raise ValueError(f"Can not map tensor {name!r}")
-
- n_dims = len(data.shape)
- data_dtype = data.dtype
-
- # if f32 desired, convert any float16 to float32
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
-
- # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
- if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
- data = data.astype(np.float32)
-
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
- data = data.astype(np.float16)
-
- logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
+ tensors.append((new_name, data_torch))
+ return tensors
+ else:
+ return []
- self.gguf_writer.add_tensor(new_name, data)
+ return [(self.map_tensor_name(name), data_torch)]
@Model.register("DbrxForCausalLM")
self.gguf_writer.add_file_type(self.ftype)
logger.info(f"gguf: file type = {self.ftype}")
- def write_tensors(self):
- block_count = self.hparams.get("n_layers")
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
- for name, data_torch in self.get_tensors():
- n_expert = self.hparams["ffn_config"]["moe_num_experts"]
- n_ff = self.hparams["ffn_config"]["ffn_hidden_size"]
- n_embd = self.hparams["d_model"]
-
- # Specific behavior for experts tensors: suffix .weight, view as 3D and transpose
- # original implementation expects (n_expert, n_ff, n_embd) for all experts weights
- # But llama.cpp moe graph works differently
- # AND the dimensions in ggml are typically in the reverse order of the pytorch dimensions
- # so (n_expert, n_ff, n_embd) in pytorch is {n_embd, n_ff, n_expert} in ggml_tensor
- exp_tensor_names = {"ffn.experts.mlp.w1": None, # LLM_TENSOR_FFN_GATE_EXPS ggml_tensor->ne{n_embd, n_ff, n_expert}
- "ffn.experts.mlp.w2": (0, 2, 1), # LLM_TENSOR_FFN_DOWN_EXPS ggml_tensor->ne{n_ff, n_embd, n_expert}
- "ffn.experts.mlp.v1": None} # LLM_TENSOR_FFN_UP_EXPS ggml_tensor->ne{n_embd, n_ff, n_expert}
- experts = False
- for exp_tensor_name in exp_tensor_names.keys():
- if name.find(exp_tensor_name) != -1 and name.find(".weight") == -1:
- experts = True
- data_torch = data_torch.view(n_expert, n_ff, n_embd)
- if (permute_tensor := exp_tensor_names[exp_tensor_name]) is not None:
- data_torch = data_torch.permute(*permute_tensor)
- break
-
- old_dtype = data_torch.dtype
-
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
-
- data = data_torch.squeeze().numpy()
-
- # map tensor names
- # In MoE models the ffn tensors are typically most of the model weights,
- # and need to be quantizable. Quantize expects tensor names to be suffixed by .weight.
- # Every other model has the weight names ending in .weight,
- # let's assume that is the convention which is not the case for dbrx:
- # https://huggingface.co/databricks/dbrx-instruct/blob/main/model.safetensors.index.json#L15
- new_name = tensor_map.get_name(name if not experts else name + ".weight", try_suffixes=(".weight",))
- if new_name is None:
- raise ValueError(f"Can not map tensor {name!r}")
-
- n_dims = len(data.shape)
- data_dtype = data.dtype
-
- # Most of the codebase that takes in 1D tensors only handles F32 tensors
- # and most of the outputs tensors are F32.
- if data_dtype != np.float32 and n_dims == 1:
- raise ValueError(f"Can not map tensor {name!r}: all 1D tensors must be F32")
+ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+ del bid # unused
+
+ n_expert = self.hparams["ffn_config"]["moe_num_experts"]
+ n_ff = self.hparams["ffn_config"]["ffn_hidden_size"]
+ n_embd = self.hparams["d_model"]
+
+ # Specific behavior for experts tensors: suffix .weight, view as 3D and transpose
+ # original implementation expects (n_expert, n_ff, n_embd) for all experts weights
+ # But llama.cpp moe graph works differently
+ # AND the dimensions in ggml are typically in the reverse order of the pytorch dimensions
+ # so (n_expert, n_ff, n_embd) in pytorch is {n_embd, n_ff, n_expert} in ggml_tensor
+ exp_tensor_names = {"ffn.experts.mlp.w1": None, # LLM_TENSOR_FFN_GATE_EXPS ggml_tensor->ne{n_embd, n_ff, n_expert}
+ "ffn.experts.mlp.w2": (0, 2, 1), # LLM_TENSOR_FFN_DOWN_EXPS ggml_tensor->ne{n_ff, n_embd, n_expert}
+ "ffn.experts.mlp.v1": None} # LLM_TENSOR_FFN_UP_EXPS ggml_tensor->ne{n_embd, n_ff, n_expert}
+ experts = False
+
+ for exp_tensor_name in exp_tensor_names.keys():
+ if name.find(exp_tensor_name) != -1 and name.find(".weight") == -1:
+ experts = True
+ data_torch = data_torch.view(n_expert, n_ff, n_embd)
+ if (permute_tensor := exp_tensor_names[exp_tensor_name]) is not None:
+ data_torch = data_torch.permute(*permute_tensor)
+ break
- # if f32 desired, convert any float16 to float32
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
+ # map tensor names
+ # In MoE models the ffn tensors are typically most of the model weights,
+ # and need to be quantizable. Quantize expects tensor names to be suffixed by .weight.
+ # Every other model has the weight names ending in .weight,
+ # let's assume that is the convention which is not the case for dbrx:
+ # https://huggingface.co/databricks/dbrx-instruct/blob/main/model.safetensors.index.json#L15
+ new_name = self.map_tensor_name(name if not experts else name + ".weight", try_suffixes=(".weight",))
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and n_dims > 1:
- data = data.astype(np.float16)
+ return [(new_name, data_torch)]
- logger.debug(f"{new_name}, n_dims = {n_dims}, shape = {data.shape}, {old_dtype} --> {data.dtype}")
+ def extra_f16_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool:
+ del name, new_name, bid # unused
- self.gguf_writer.add_tensor(new_name, data)
+ return n_dims > 1
@Model.register("MiniCPMForCausalLM")
.reshape(weights.shape)
)
- def write_tensors(self):
- block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer")))
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
- n_head = self.hparams.get("num_attention_heads")
- n_kv_head = self.hparams.get("num_key_value_heads")
- for name, data_torch in self.get_tensors():
- # we don't need these
- if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")):
- continue
-
- old_dtype = data_torch.dtype
-
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
-
- # HF models permute some of the tensors, so we need to undo that
- if name.endswith(("q_proj.weight")):
- data_torch = self._reverse_hf_permute(data_torch, n_head, n_head)
- if name.endswith(("k_proj.weight")):
- data_torch = self._reverse_hf_permute(data_torch, n_head, n_kv_head)
-
- data = data_torch.squeeze().numpy()
-
- # map tensor names
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- raise ValueError(f"Can not map tensor {name!r}")
-
- n_dims = len(data.shape)
- data_dtype = data.dtype
+ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+ del bid # unused
- # if f32 desired, convert any float16 to float32
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
-
- # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
- if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
- data = data.astype(np.float32)
-
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
- data = data.astype(np.float16)
+ n_head = self.hparams["num_attention_heads"]
+ n_kv_head = self.hparams.get("num_key_value_heads")
- logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
+ # HF models permute some of the tensors, so we need to undo that
+ if name.endswith(("q_proj.weight")):
+ data_torch = self._reverse_hf_permute(data_torch, n_head, n_head)
+ if name.endswith(("k_proj.weight")):
+ data_torch = self._reverse_hf_permute(data_torch, n_head, n_kv_head)
- self.gguf_writer.add_tensor(new_name, data)
+ return [(self.map_tensor_name(name), data_torch)]
@Model.register("QWenLMHeadModel")
self.gguf_writer.add_head_count(self.hparams["num_attention_heads"])
self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"])
- def write_tensors(self):
- block_count = self.hparams["num_hidden_layers"]
- model_kv = dict(self.get_tensors())
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
- for name, data_torch in model_kv.items():
- # we don't need these
- if name.endswith(".rotary_emb.inv_freq"):
- continue
-
- old_dtype = data_torch.dtype
-
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
-
- data = data_torch.squeeze().numpy()
-
- # map tensor names
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- raise ValueError(f"Can not map tensor {name!r}")
-
- n_dims = len(data.shape)
- data_dtype = data.dtype
-
- # if f32 desired, convert any float16 to float32
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
-
- # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
- if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
- data = data.astype(np.float32)
-
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
- data = data.astype(np.float16)
-
- logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
- self.gguf_writer.add_tensor(new_name, data)
-
@Model.register("Qwen2ForCausalLM")
class Qwen2Model(Model):
if (n_experts := self.hparams.get("num_experts")) is not None:
self.gguf_writer.add_expert_count(n_experts)
- def write_tensors(self):
- block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer")))
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
- n_experts = self.hparams.get("num_experts")
- experts = dict()
- for name, data_torch in self.get_tensors():
- # we don't need these
- if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")):
- continue
-
- old_dtype = data_torch.dtype
-
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
-
- data = data_torch.squeeze().numpy()
-
- # process the experts separately
- if name.find("experts") != -1:
- experts[name] = data
- if len(experts) >= n_experts * 3:
- # merge the experts into a single 3d tensor
- for bid in range(block_count):
- for w_name in ["down_proj", "gate_proj", "up_proj"]:
- full = True
- for xid in range(n_experts):
- ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
- if ename not in experts:
- full = False
- break
- if not full:
- continue
-
- datas = []
- for xid in range(n_experts):
- ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
- datas.append(experts[ename])
- del experts[ename]
+ _experts: list[dict[str, Tensor]] | None = None
- data = np.stack(datas, axis=0)
- data_dtype = data.dtype
+ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+ # process the experts separately
+ if name.find("experts") != -1:
+ n_experts = self.hparams["num_experts"]
+ assert bid is not None
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
+ if self._experts is None:
+ self._experts = [{} for _ in range(self.block_count)]
- if self.ftype == 1 and data_dtype == np.float32:
- data = data.astype(np.float16)
+ self._experts[bid][name] = data_torch
- merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
+ if len(self._experts[bid]) >= n_experts * 3:
+ tensors: list[tuple[str, Tensor]] = []
- new_name = tensor_map.get_name(merged_name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- raise ValueError(f"Can not map tensor {name!r}")
-
- logger.debug(f"{new_name}, n_dims = {len(data.shape)}, shape = {data.shape} --> {data.dtype}")
-
- self.gguf_writer.add_tensor(new_name, data)
- continue
+ # merge the experts into a single 3d tensor
+ for w_name in ["down_proj", "gate_proj", "up_proj"]:
+ datas: list[Tensor] = []
- # map tensor names
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- raise ValueError(f"Can not map tensor {name!r}")
+ for xid in range(n_experts):
+ ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
+ datas.append(self._experts[bid][ename])
+ del self._experts[bid][ename]
- n_dims = len(data.shape)
- data_dtype = data.dtype
+ data_torch = torch.stack(datas, dim=0)
- # if f32 desired, convert any float16 to float32
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
+ merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
- # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
- if self.ftype == 1 and data_dtype == np.float16 and (n_dims == 1 or new_name.endswith("_norm.weight")):
- data = data.astype(np.float32)
+ new_name = self.map_tensor_name(merged_name)
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
- data = data.astype(np.float16)
+ tensors.append((new_name, data_torch))
+ return tensors
+ else:
+ return []
- logger.debug(f"{new_name}, n_dims = {n_dims}, shape = {data.shape}, {old_dtype} --> {data.dtype}")
+ return [(self.map_tensor_name(name), data_torch)]
- self.gguf_writer.add_tensor(new_name, data)
+ def write_tensors(self):
+ super().write_tensors()
- if len(experts) > 0:
- raise ValueError(f"Unprocessed experts: {experts.keys()}")
+ if self._experts is not None:
+ # flatten `list[dict[str, Tensor]]` into `list[str]`
+ experts = [k for d in self._experts for k in d.keys()]
+ if len(experts) > 0:
+ raise ValueError(f"Unprocessed experts: {experts}")
@Model.register("GPT2LMHeadModel")
self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
self.gguf_writer.add_file_type(self.ftype)
- def write_tensors(self):
- block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer")))
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
-
- for name, data_torch in self.get_tensors():
- # we don't need these
- if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq", ".attn.bias", ".attn.masked_bias")):
- continue
+ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+ del bid # unused
- if name.endswith((".c_attn.weight", ".c_proj.weight", ".c_fc.weight", ".c_proj.weight")):
- data_torch = data_torch.transpose(1, 0)
-
- old_dtype = data_torch.dtype
-
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
+ tensors: list[tuple[str, Tensor]] = []
- data = data_torch.squeeze().numpy()
+ # we don't need these
+ if name.endswith((".attn.bias", ".attn.masked_bias")):
+ return tensors
- # map tensor names
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- raise ValueError(f"Can not map tensor {name!r}")
+ if name.endswith((".c_attn.weight", ".c_proj.weight", ".c_fc.weight", ".c_proj.weight")):
+ data_torch = data_torch.transpose(1, 0)
- n_dims = len(data.shape)
- data_dtype = data.dtype
+ new_name = self.map_tensor_name(name)
- # if f32 desired, convert any float16 to float32
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
+ tensors.append((new_name, data_torch))
- # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
- if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
- data = data.astype(np.float32)
+ # note: GPT2 output is tied to (same as) wte in original model
+ if new_name == self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD):
+ tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT), data_torch))
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
- data = data.astype(np.float16)
-
- logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
-
- self.gguf_writer.add_tensor(new_name, data)
-
- # note: GPT2 output is tied to (same as) wte in original model
- if new_name == "token_embd.weight":
- logger.info(f"output.weight, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
- self.gguf_writer.add_tensor("output.weight", data)
+ return tensors
@Model.register("PhiForCausalLM")
if not tokenizer_path.is_file():
raise ValueError(f'Error: Missing {tokenizer_path}')
- tokenizer = SentencePieceProcessor(str(tokenizer_path))
+ tokenizer = SentencePieceProcessor()
+ tokenizer.LoadFromFile(str(tokenizer_path))
vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
for token_id in range(tokenizer.vocab_size()):
- piece = tokenizer.id_to_piece(token_id)
+ piece = tokenizer.IdToPiece(token_id)
text = piece.encode("utf-8")
- score = tokenizer.get_score(token_id)
+ score = tokenizer.GetScore(token_id)
toktype = SentencePieceTokenTypes.NORMAL
- if tokenizer.is_unknown(token_id):
+ if tokenizer.IsUnknown(token_id):
toktype = SentencePieceTokenTypes.UNKNOWN
- elif tokenizer.is_control(token_id):
+ elif tokenizer.IsControl(token_id):
toktype = SentencePieceTokenTypes.CONTROL
- elif tokenizer.is_unused(token_id):
+ elif tokenizer.IsUnused(token_id):
toktype = SentencePieceTokenTypes.UNUSED
- elif tokenizer.is_byte(token_id):
+ elif tokenizer.IsByte(token_id):
toktype = SentencePieceTokenTypes.BYTE
tokens[token_id] = text
data_torch = torch.reshape(data_torch, (5120, 5120))
return data_torch
- def write_tensors(self):
- block_count = self.hparams.get("num_layers", self.hparams.get("num_hidden_layers"))
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
-
- for name, data_torch in self.get_tensors():
- if "self_attn.rotary_emb.inv_freq" in name:
- continue
-
- # map tensor names
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- raise ValueError(f"Can not map tensor {name!r}")
-
- # shuffle for broadcasting of gqa in ggml_mul_mat
- if new_name.endswith("attn_q.weight"):
- data_torch = self.shuffle_attn_q_weight(data_torch)
- elif new_name.endswith("attn_output.weight"):
- data_torch = self.shuffle_attn_output_weight(data_torch)
-
- old_dtype = data_torch.dtype
-
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
-
- data = data_torch.squeeze().numpy()
-
- n_dims = len(data.shape)
- data_dtype = data.dtype
-
- # if f32 desired, convert any float16 to float32
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
-
- # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
- if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
- data = data.astype(np.float32)
+ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+ del bid # unused
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
- data = data.astype(np.float16)
+ new_name = self.map_tensor_name(name)
- logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
+ # shuffle for broadcasting of gqa in ggml_mul_mat
+ if new_name.endswith("attn_q.weight"):
+ data_torch = self.shuffle_attn_q_weight(data_torch)
+ elif new_name.endswith("attn_output.weight"):
+ data_torch = self.shuffle_attn_output_weight(data_torch)
- self.gguf_writer.add_tensor(new_name, data)
+ return [(new_name, data_torch)]
@Model.register("CodeShellForCausalLM")
self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
self.gguf_writer.add_rope_scaling_factor(1.0)
- def write_tensors(self):
- block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer")))
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
- tensors = dict(self.get_tensors())
- has_lm_head = "lm_head.weight" in tensors.keys() or "output.weight" in tensors.keys()
- for name, data_torch in tensors.items():
- # we don't need these
- if name.endswith((".attn.rotary_emb.inv_freq")):
- continue
+ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+ del bid # unused
- old_dtype = data_torch.dtype
+ new_name = self.map_tensor_name(name)
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
-
- data = data_torch.squeeze().numpy()
-
- # map tensor names
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- raise ValueError(f"Can not map tensor {name!r}")
-
- n_dims = len(data.shape)
- data_dtype = data.dtype
-
- # if f32 desired, convert any float16 to float32
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
-
- # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
- if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
- data = data.astype(np.float32)
-
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
- data = data.astype(np.float16)
+ tensors: list[tuple[str, Tensor]] = [(new_name, data_torch)]
- logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
+ if new_name == self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD):
+ assert self.tensor_names is not None
- self.gguf_writer.add_tensor(new_name, data)
+ if all(s not in self.tensor_names for s in ("lm_head.weight", "output.weight")):
+ # copy tok_embd.weight to output.weight
+ tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT), data_torch))
- if not has_lm_head and name == "transformer.wte.weight":
- self.gguf_writer.add_tensor("output.weight", data)
- logger.info(name, f"=> output.weight, shape = {data.shape}, {old_dtype} --> {data.dtype}")
+ return tensors
@Model.register("InternLM2ForCausalLM")
sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix
- tokenizer = SentencePieceProcessor(str(tokenizer_path))
+ tokenizer = SentencePieceProcessor()
+ tokenizer.LoadFromFile(str(tokenizer_path))
+
vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
for token_id in range(vocab_size):
- piece = tokenizer.id_to_piece(token_id)
+ piece = tokenizer.IdToPiece(token_id)
text = piece.encode("utf-8")
- score = tokenizer.get_score(token_id)
+ score = tokenizer.GetScore(token_id)
if text == b"\x00":
# (TODO): fixme
# Hack here and replace the \x00 characters.
- logger.debug(f"InternLM2 convert token '{text}' to '🐉'!")
- text = "🐉"
+ logger.warning(f"InternLM2 convert token '{text}' to '🐉'!")
+ text = "🐉".encode("utf-8")
toktype = SentencePieceTokenTypes.NORMAL
- if tokenizer.is_unknown(token_id):
+ if tokenizer.IsUnknown(token_id):
toktype = SentencePieceTokenTypes.UNKNOWN
- elif tokenizer.is_control(token_id):
+ elif tokenizer.IsControl(token_id):
toktype = SentencePieceTokenTypes.CONTROL
- elif tokenizer.is_unused(token_id):
+ elif tokenizer.IsUnused(token_id):
toktype = SentencePieceTokenTypes.UNUSED
- elif tokenizer.is_byte(token_id):
+ elif tokenizer.IsByte(token_id):
toktype = SentencePieceTokenTypes.BYTE
tokens.append(text)
special_vocab.add_to_gguf(self.gguf_writer)
def _try_get_sft_eos(self, tokenizer):
- unused_145_list = tokenizer.encode('[UNUSED_TOKEN_145]')
- im_end_list = tokenizer.encode('<|im_end|>')
+ unused_145_list = tokenizer.Encode('[UNUSED_TOKEN_145]')
+ im_end_list = tokenizer.Encode('<|im_end|>')
+ eos_token = None
assert (len(unused_145_list) == 1) ^ (len(im_end_list) == 1)
if len(unused_145_list) == 1:
eos_token = unused_145_list[0]
if len(im_end_list) == 1:
eos_token = im_end_list[0]
+ assert eos_token
return eos_token
def _hf_permute_qk(self, weights, n_head: int, n_head_kv: int):
self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"])
- def post_write_tensors(self, tensor_map, name, data_torch):
- old_dtype = data_torch.dtype
-
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
-
- data = data_torch.squeeze().numpy()
-
- # map tensor names
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- raise ValueError(f"Can not map tensor {name!r}")
-
- n_dims = len(data.shape)
- data_dtype = data.dtype
-
- # if f32 desired, convert any float16 to float32
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
-
- # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
- if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
- data = data.astype(np.float32)
-
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
- data = data.astype(np.float16)
-
- logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
- self.gguf_writer.add_tensor(new_name, data)
-
- def write_tensors(self):
- from einops import rearrange
-
- num_heads = self.hparams.get("num_attention_heads")
- num_kv_heads = self.hparams.get("num_key_value_heads")
- hidden_size = self.hparams.get("hidden_size")
+ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+ num_heads = self.hparams["num_attention_heads"]
+ num_kv_heads = self.hparams["num_key_value_heads"]
+ hidden_size = self.hparams["hidden_size"]
q_per_kv = num_heads // num_kv_heads
head_dim = hidden_size // num_heads
num_groups = num_heads // q_per_kv
- block_count = self.hparams["num_hidden_layers"]
- model_kv = dict(self.get_tensors())
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
qkv_pattern = r"model\.layers\.(\d+)\.attention\.wqkv"
- for name, data_torch in model_kv.items():
- # we don't need these
- if name.endswith(".rotary_emb.inv_freq"):
- continue
- if re.match(qkv_pattern, name):
- bid = re.findall(qkv_pattern, name)[0]
- qkv = data_torch
- qkv = rearrange(qkv.T, " o (g n i) ->o g n i", g=num_groups, n=q_per_kv + 2, i=head_dim)
- q, k, v = qkv[..., : q_per_kv, :], qkv[..., q_per_kv: q_per_kv + 1, :], qkv[..., q_per_kv + 1: q_per_kv + 2, :]
- # The model weights of q and k equire additional reshape.
- q = self._hf_permute_qk(rearrange(q, " o g n i -> o (g n i)").T, num_heads, num_heads)
- k = self._hf_permute_qk(rearrange(k, " o g n i -> o (g n i)").T, num_heads, num_kv_heads)
- v = rearrange(v, " o g n i -> o (g n i)").T
- self.post_write_tensors(tensor_map, f"model.layers.{bid}.attention.wq.weight", q)
- self.post_write_tensors(tensor_map, f"model.layers.{bid}.attention.wk.weight", k)
- self.post_write_tensors(tensor_map, f"model.layers.{bid}.attention.wv.weight", v)
- else:
- self.post_write_tensors(tensor_map, name, data_torch)
+ if re.match(qkv_pattern, name):
+ bid = re.findall(qkv_pattern, name)[0]
+ qkv = data_torch
+ # qkv = rearrange(qkv.T, " o (g n i) ->o g n i", g=num_groups, n=q_per_kv + 2, i=head_dim)
+ qkv = qkv.T.reshape((-1, num_groups, q_per_kv + 2, head_dim))
+ q, k, v = qkv[..., : q_per_kv, :], qkv[..., q_per_kv: q_per_kv + 1, :], qkv[..., q_per_kv + 1: q_per_kv + 2, :]
+ # The model weights of q and k equire additional reshape.
+ # q = self._hf_permute_qk(rearrange(q, " o g n i -> o (g n i)").T, num_heads, num_heads)
+ q = self._hf_permute_qk(q.reshape((q.shape[0], -1)).T, num_heads, num_heads)
+ # k = self._hf_permute_qk(rearrange(k, " o g n i -> o (g n i)").T, num_heads, num_kv_heads)
+ k = self._hf_permute_qk(k.reshape((k.shape[0], -1)).T, num_heads, num_kv_heads)
+ # v = rearrange(v, " o g n i -> o (g n i)").T
+ v = v.reshape((v.shape[0], -1)).T
+ return [
+ (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), q),
+ (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), k),
+ (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), v),
+ ]
+ else:
+ return [(self.map_tensor_name(name), data_torch)]
@Model.register("BertModel", "CamembertModel")
special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
special_vocab.add_to_gguf(self.gguf_writer)
- def write_tensors(self):
- tensor_map = gguf.get_tensor_name_map(self.model_arch, self.block_count)
- tensors = dict(self.get_tensors())
- for name, data_torch in tensors.items():
- # we are only using BERT for embeddings so we don't need the pooling layer
- if name in ("embeddings.position_ids", "pooler.dense.weight", "pooler.dense.bias"):
- continue # we don't need these
-
- # map tensor names
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- raise ValueError(f"Can not map tensor {name!r}")
+ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+ del bid # unused
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
+ # we are only using BERT for embeddings so we don't need the pooling layer
+ if name in ("embeddings.position_ids", "pooler.dense.weight", "pooler.dense.bias"):
+ return [] # we don't need these
- data = data_torch.squeeze().numpy()
- n_dims = len(data.shape)
- new_dtype: type[np.floating[Any]]
+ return [(self.map_tensor_name(name), data_torch)]
- if (
- self.ftype == 1 and name.endswith(".weight") and n_dims == 2
- and name != "embeddings.token_type_embeddings.weight" # not used with get_rows, must be F32
- ):
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- new_dtype = np.float16
- else:
- # if f32 desired, convert any float16 to float32
- new_dtype = np.float32
-
- logger.info(f"{new_name}, n_dims = {n_dims}, {data_torch.dtype} --> {new_dtype}")
-
- if data.dtype != new_dtype:
- data = data.astype(new_dtype)
+ def extra_f32_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool:
+ del new_name, bid, n_dims # unused
- self.gguf_writer.add_tensor(new_name, data)
+ # not used with get_rows, must be F32
+ return name == "embeddings.token_type_embeddings.weight"
@Model.register("NomicBertModel")
self.gguf_writer.add_value_length(hparams["head_dim"])
self.gguf_writer.add_file_type(self.ftype)
- def write_tensors(self):
- block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer")))
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
-
- for name, data_torch in self.get_tensors():
- # lm_head is not used in llama.cpp, while autoawq will include this tensor in model
- # To prevent errors, skip loading lm_head.weight.
- if name == "lm_head.weight":
- logger.debug(f"Skipping get tensor {name!r} in safetensors so that convert can end normally.")
- continue
-
- old_dtype = data_torch.dtype
-
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
+ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+ del bid # unused
- # ref: https://github.com/huggingface/transformers/blob/fc37f38915372c15992b540dfcbbe00a916d4fc6/src/transformers/models/gemma/modeling_gemma.py#L89
- if name.endswith("norm.weight"):
- data_torch = data_torch + 1
- data = data_torch.squeeze().numpy()
+ # lm_head is not used in llama.cpp, while autoawq will include this tensor in model
+ # To prevent errors, skip loading lm_head.weight.
+ if name == "lm_head.weight":
+ logger.debug(f"Skipping get tensor {name!r} in safetensors so that convert can end normally.")
+ return []
- # map tensor names
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- raise ValueError(f"Can not map tensor {name!r}")
+ # ref: https://github.com/huggingface/transformers/blob/fc37f38915372c15992b540dfcbbe00a916d4fc6/src/transformers/models/gemma/modeling_gemma.py#L89
+ if name.endswith("norm.weight"):
+ data_torch = data_torch + 1
- n_dims = len(data.shape)
- data_dtype = data.dtype
-
- data = data.astype(np.float32)
-
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
- data = data.astype(np.float16)
-
- logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
-
- self.gguf_writer.add_tensor(new_name, data)
+ return [(self.map_tensor_name(name), data_torch)]
@Model.register("Starcoder2ForCausalLM")
if (self.dir_model / "tokenizer.json").is_file():
self._set_vocab_gpt2()
+ elif (self.dir_model / "tokenizer.model").is_file():
+ self._set_vocab_sentencepiece()
else:
# Use the GPT-NeoX tokenizer when no tokenizer files are present
tokenizer_path = Path(sys.path[0]) / "models" / "ggml-vocab-gpt-neox.gguf"
neox_reader = gguf.GGUFReader(tokenizer_path, "r")
field = neox_reader.get_field(gguf.Keys.Tokenizer.MODEL)
- self.gguf_writer.add_tokenizer_model(bytes(field.parts[-1]))
+ self.gguf_writer.add_tokenizer_model(bytes(field.parts[-1]).decode("utf-8") if field else "gpt2")
field = neox_reader.get_field(gguf.Keys.Tokenizer.PRE)
- self.gguf_writer.add_tokenizer_pre(bytes(field.parts[-1]))
+ self.gguf_writer.add_tokenizer_pre(bytes(field.parts[-1]).decode("utf-8") if field else "mpt")
field = neox_reader.get_field(gguf.Keys.Tokenizer.LIST)
+ assert field
self.gguf_writer.add_token_list([bytes(field.parts[i]) for i in field.data][:vocab_size])
field = neox_reader.get_field(gguf.Keys.Tokenizer.TOKEN_TYPE)
+ assert field
self.gguf_writer.add_token_types([field.parts[i].tolist()[0] for i in field.data][:vocab_size])
field = neox_reader.get_field(gguf.Keys.Tokenizer.MERGES)
+ assert field
self.gguf_writer.add_token_merges([bytes(field.parts[i]) for i in field.data])
field = neox_reader.get_field(gguf.Keys.Tokenizer.BOS_ID)
- self.gguf_writer.add_bos_token_id(field.parts[-1].tolist()[0])
+ self.gguf_writer.add_bos_token_id(field.parts[-1].tolist()[0] if field else 1)
field = neox_reader.get_field(gguf.Keys.Tokenizer.EOS_ID)
- self.gguf_writer.add_eos_token_id(field.parts[-1].tolist()[0])
+ self.gguf_writer.add_eos_token_id(field.parts[-1].tolist()[0] if field else 0)
field = neox_reader.get_field(gguf.Keys.Tokenizer.UNK_ID)
- self.gguf_writer.add_unk_token_id(field.parts[-1].tolist()[0])
+ self.gguf_writer.add_unk_token_id(field.parts[-1].tolist()[0] if field else 0)
+
+ field = neox_reader.get_field(gguf.Keys.Tokenizer.PAD_ID)
+ self.gguf_writer.add_pad_token_id(field.parts[-1].tolist()[0] if field else 0)
def set_gguf_parameters(self):
d_model = self.find_hparam(["hidden_size", "d_model"])
self.gguf_writer.add_layer_norm_rms_eps(rms_norm_eps)
self.gguf_writer.add_file_type(self.ftype)
- def write_tensors(self):
- block_count = self.hparams["n_layer"]
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
-
- tok_embd = None
- tok_embd_name = gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.TOKEN_EMBD] + ".weight"
- output_name = gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.OUTPUT] + ".weight"
-
- for name, data_torch in self.get_tensors():
- old_dtype = data_torch.dtype
-
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
-
- # map tensor names
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- raise ValueError(f"Can not map tensor {name!r}")
+ _tok_embd = None
- if name.endswith(".A_log"):
- logger.debug("A_log --> A ==> " + new_name)
- data_torch = -torch.exp(data_torch)
+ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+ del bid # unused
- # assuming token_embd.weight is seen before output.weight
- if tok_embd is not None and new_name == output_name:
- if torch.equal(tok_embd, data_torch):
- logger.debug(f"{output_name} is equivalent to {tok_embd_name}, omitting")
- continue
- if new_name == tok_embd_name:
- tok_embd = data_torch
+ output_name = self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT)
+ tok_embd_name = self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD)
- data = data_torch.squeeze().numpy()
+ new_name = self.map_tensor_name(name)
- n_dims = len(data.shape)
- data_dtype = data.dtype
+ if name.endswith(".A_log"):
+ logger.debug("A_log --> A ==> " + new_name)
+ data_torch = -torch.exp(data_torch)
- # if f32 desired, convert any float16 to float32
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
+ # assuming token_embd.weight is seen before output.weight
+ if self._tok_embd is not None and new_name == output_name:
+ if torch.equal(self._tok_embd, data_torch):
+ logger.debug(f"{output_name} is equivalent to {tok_embd_name}, omitting")
+ return []
+ elif new_name == tok_embd_name:
+ self._tok_embd = data_torch
- # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
- if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
- data = data.astype(np.float32)
+ return [(new_name, data_torch)]
- # if f16 desired, convert big float32 2-dim weight tensors to float16
- new_weight_name = new_name[:-len(".weight")] if new_name.endswith(".weight") else ""
- if self.ftype == 1 and data_dtype == np.float32 and new_weight_name.endswith((".ssm_in", ".ssm_out", "token_embd", "output")) and n_dims == 2:
- data = data.astype(np.float16)
+ def extra_f32_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool:
+ del n_dims # unused
- logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
-
- self.gguf_writer.add_tensor(new_name, data)
+ return bid is not None and new_name in (
+ self.format_tensor_name(n, bid, ".weight" if name.endswith(".weight") else "") for n in [
+ gguf.MODEL_TENSOR.SSM_CONV1D,
+ gguf.MODEL_TENSOR.SSM_X,
+ gguf.MODEL_TENSOR.SSM_DT,
+ gguf.MODEL_TENSOR.SSM_A,
+ gguf.MODEL_TENSOR.SSM_D,
+ ]
+ )
@Model.register("CohereForCausalLM")
# Same as super class, but permuting q_proj, k_proj
# Copied from: LlamaModel
- def write_tensors(self):
- block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer")))
- tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
- n_head = self.hparams.get("num_attention_heads")
+ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+ del bid # unused
+
+ n_head = self.hparams["num_attention_heads"]
n_kv_head = self.hparams.get("num_key_value_heads")
- for name, data_torch in self.get_tensors():
- old_dtype = data_torch.dtype
- # convert any unsupported data types to float32
- if data_torch.dtype not in (torch.float16, torch.float32):
- data_torch = data_torch.to(torch.float32)
+ if name.endswith("q_proj.weight"):
+ data_torch = LlamaModel.permute(data_torch, n_head, n_head)
+ if name.endswith("k_proj.weight"):
+ data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
- data = data_torch.numpy()
+ return [(self.map_tensor_name(name), data_torch)]
- if name.endswith("q_proj.weight"):
- data = permute(data, n_head, n_head)
- if name.endswith("k_proj.weight"):
- data = permute(data, n_head, n_kv_head)
- data = data.squeeze()
+###### CONVERSION LOGIC ######
- # map tensor names
- new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
- if new_name is None:
- raise ValueError(f"Can not map tensor {name!r}")
- n_dims = len(data.shape)
- data_dtype = data.dtype
+# tree of lazy tensors
+class LazyTorchTensor:
+ _meta: Tensor
+ _data: Tensor | None
+ _args: tuple
+ _func: Callable[[tuple], Tensor] | None
- # if f32 desired, convert any float16 to float32
- if self.ftype == 0 and data_dtype == np.float16:
- data = data.astype(np.float32)
+ def __init__(self, *, meta: Tensor, data: Tensor | None = None, args: tuple = (), func: Callable[[tuple], Tensor] | None = None):
+ self._meta = meta
+ self._data = data
+ self._args = args
+ self._func = func
- # 1d tensors need to be converted to float32
- if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
- data = data.astype(np.float32)
+ @staticmethod
+ def _recurse_apply(o: Any, fn: Callable[[Any], Any]) -> Any:
+ # TODO: dict and set
+ if isinstance(o, (list, tuple)):
+ L = []
+ for item in o:
+ L.append(LazyTorchTensor._recurse_apply(item, fn))
+ if isinstance(o, tuple):
+ L = tuple(L)
+ return L
+ elif isinstance(o, LazyTorchTensor):
+ return fn(o)
+ else:
+ return o
+
+ def _wrap_fn(self, fn: Callable, use_self: bool = False) -> Callable[[Any], LazyTorchTensor]:
+ def wrapped_fn(*args, **kwargs):
+ if kwargs is None:
+ kwargs = {}
+ args = ((self,) if use_self else ()) + args
+
+ meta_args = LazyTorchTensor._recurse_apply(args, lambda t: t._meta)
+
+ return LazyTorchTensor(meta=fn(*meta_args, **kwargs), args=args, func=lambda a: fn(*a, **kwargs))
+ return wrapped_fn
+
+ def __getattr__(self, __name: str) -> Any:
+ meta_attr = getattr(self._meta, __name)
+ if callable(meta_attr):
+ return self._wrap_fn(getattr(torch.Tensor, __name), use_self=True)
+ elif isinstance(meta_attr, torch.Tensor):
+ # for things like self.T
+ return self._wrap_fn(lambda s: getattr(s, __name))(self)
+ else:
+ return meta_attr
- # if f16 desired, convert any float32 2-dim weight tensors to float16
- if self.ftype == 1 and data_dtype == np.float32 and n_dims == 2:
- data = data.astype(np.float16)
+ _dtype_map: dict[torch.dtype, type] = {
+ torch.float16: np.float16,
+ torch.float32: np.float32,
+ }
- logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
+ def numpy(self) -> gguf.LazyTensor:
+ dtype = self._dtype_map[self.dtype]
+ return gguf.LazyTensor(lambda: LazyTorchTensor.to_eager(self).numpy(), dtype=dtype, shape=self.shape)
- self.gguf_writer.add_tensor(new_name, data)
+ @overload
+ @staticmethod
+ def to_eager(t: Tensor | LazyTorchTensor) -> Tensor: ...
+ @overload
+ @staticmethod
+ def to_eager(t: tuple) -> tuple: ...
-###### CONVERSION LOGIC ######
+ @staticmethod
+ def to_eager(t: Any) -> Any:
+ def simple_to_eager(_t: LazyTorchTensor) -> Tensor:
+ # wake up the lazy tensor
+ if _t._data is None and _t._func is not None:
+ # recurse into its arguments
+ _t._args = LazyTorchTensor.to_eager(_t._args)
+ _t._data = _t._func(_t._args)
+ if _t._data is not None:
+ return _t._data
+ else:
+ raise ValueError(f"Could not compute lazy tensor {_t!r} with args {_t._args!r}")
+
+ # recurse into lists and/or tuples, keeping their structure
+ return LazyTorchTensor._recurse_apply(t, simple_to_eager)
+
+ @staticmethod
+ def from_eager(t: Tensor) -> Tensor:
+ if (t.__class__ == LazyTorchTensor):
+ return t
+ return LazyTorchTensor(meta=t.detach().to("meta"), data=t) # type: ignore
+
+ @classmethod
+ def __torch_function__(cls, func, types, args=(), kwargs=None):
+ del types # unused
+
+ if kwargs is None:
+ kwargs = {}
+
+ if func is torch.Tensor.numpy:
+ return args[0].numpy()
+ if func is torch.equal:
+ eager_args = LazyTorchTensor.to_eager(args)
+ return func(*eager_args, **kwargs)
+
+ return LazyTorchTensor._wrap_fn(args[0], func)(*args, **kwargs)
+
+ # special methods bypass __getattr__, so they need to be added manually
+ # ref: https://docs.python.org/3/reference/datamodel.html#special-lookup
+ # NOTE: LazyTorchTensor can't be a subclass of Tensor (and then be used
+ # as self._meta is currently used), because then the following
+ # operations would by default not be wrapped, and so not propagated
+ # when the tensor is made eager.
+ # It's better to get non-silent errors for not-yet-supported operators.
+ # TODO: add more when needed to avoid clutter, or find a more concise way
+ def __neg__(self, *args): # mamba
+ return self._wrap_fn(torch.Tensor.__neg__)(self, *args)
+
+ def __add__(self, *args): # gemma
+ return self._wrap_fn(torch.Tensor.__add__)(self, *args)
+
+ def __getitem__(self, *args): # bloom falcon refact internlm2
+ return self._wrap_fn(torch.Tensor.__getitem__)(self, *args)
def parse_args() -> argparse.Namespace:
)
parser.add_argument(
"--awq-path", type=Path, default=None,
- help="Path to scale awq cache file")
+ help="Path to scale awq cache file",
+ )
parser.add_argument(
"--outfile", type=Path,
help="path to write to; default: based on input",
"--outtype", type=str, choices=["f32", "f16"], default="f16",
help="output format - use f32 for float32, f16 for float16",
)
- parser.add_argument("--bigendian", action="store_true", help="model is executed on big endian machine")
+ parser.add_argument(
+ "--bigendian", action="store_true",
+ help="model is executed on big endian machine",
+ )
parser.add_argument(
"model", type=Path,
help="directory containing model file",
)
- parser.add_argument("--use-temp-file", action="store_true", help="use the tempfile library while processing (helpful when running out of memory, process killed)")
- parser.add_argument("--model-name", type=str, default=None, help="name of the model")
- parser.add_argument("--verbose", action="store_true", help="increase output verbosity")
+ parser.add_argument(
+ "--use-temp-file", action="store_true",
+ help="use the tempfile library while processing (helpful when running out of memory, process killed)",
+ )
+ parser.add_argument(
+ "--no-lazy", action="store_true",
+ help="use more RAM by computing all outputs before writing (use in case lazy evaluation is broken)",
+ )
+ parser.add_argument(
+ "--model-name", type=str, default=None,
+ help="name of the model",
+ )
+ parser.add_argument(
+ "--verbose", action="store_true",
+ help="increase output verbosity",
+ )
return parser.parse_args()
with torch.inference_mode():
model_class = Model.from_model_architecture(hparams["architectures"][0])
- model_instance = model_class(dir_model, ftype_map[args.outtype], fname_out, args.bigendian, args.use_temp_file)
+ model_instance = model_class(dir_model, ftype_map[args.outtype], fname_out, args.bigendian, args.use_temp_file, args.no_lazy)
logger.info("Set model parameters")
model_instance.set_gguf_parameters()