]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
convert-hf : save memory with lazy evaluation (#7075)
authorcompilade <redacted>
Wed, 8 May 2024 22:16:38 +0000 (18:16 -0400)
committerGitHub <redacted>
Wed, 8 May 2024 22:16:38 +0000 (18:16 -0400)
* convert-hf : begin refactoring write_tensor

* convert : upgrade to sentencepiece v0.2.0

* convert-hf : remove unused n_dims in extra_*_tensors

* convert-hf : simplify MoE weights stacking

* convert-hf : flake8 linter doesn't like semicolons

* convert-hf : allow unusual model part names

For example, loading `model-00001-of-00001.safetensors` now works.

* convert-hf : fix stacking MoE expert tensors

`torch.stack` and `torch.cat` don't do the same thing.

* convert-hf : fix Mamba conversion

Tested to work even with a SentencePiece-based tokenizer.

* convert : use a string for the SentencePiece tokenizer path

* convert-hf : display tensor shape

* convert-hf : convert norms to f32 by default

* convert-hf : sort model part names

`os.listdir` is said to list files in arbitrary order.
Sorting the file names should let "model-00009-of-00042.safetensors"
be loaded before "model-00010-of-00042.safetensors".

* convert-hf : use an ABC for Model again

It seems Protocol can't be used as a statically type-checked ABC,
because its subclasses also can't be instantiated. (why did it seem to work?)

At least there's still a way to throw an error when forgetting to define
the `model_arch` property of any registered Model subclasses.

* convert-hf : use a plain class for Model, and forbid direct instantiation

There are no abstract methods used anyway,
so using ABC isn't really necessary.

* convert-hf : more consistent formatting of cmdline args

* convert-hf : align the message logged for converted tensors

* convert-hf : fix Refact conversion

* convert-hf : save memory with lazy evaluation

* convert-hf : flake8 doesn't like lowercase L as a variable name

* convert-hf : remove einops requirement for InternLM2

* convert-hf : faster model parts loading

Instead of pre-loading them all into a dict, iterate on the tensors
in the model parts progressively as needed in Model.write_tensors

Conversion for some architectures relies on checking for the presence
of specific tensor names, so for multi-part models, the weight map is read
from the relevant json file to quickly get these names up-front.

* convert-hf : minor changes for consistency

* gguf-py : add tqdm as a dependency

It's small, and used for a progress bar
in GGUFWriter.write_tensors_to_file

14 files changed:
convert-hf-to-gguf.py
convert.py
examples/server/tests/features/steps/steps.py
gguf-py/gguf/constants.py
gguf-py/gguf/gguf_reader.py
gguf-py/gguf/gguf_writer.py
gguf-py/gguf/vocab.py
gguf-py/pyproject.toml
gguf-py/scripts/gguf-dump.py
gguf-py/scripts/gguf-new-metadata.py
pyrightconfig.json [new file with mode: 0644]
requirements/requirements-convert-hf-to-gguf-update.txt
requirements/requirements-convert-hf-to-gguf.txt
requirements/requirements-convert.txt

index 454e9fcdd784e5825de16287296d47fb88ea6bcd..1dc18b2a5772189314f5bafd01f8e044f9b3c227 100755 (executable)
@@ -9,11 +9,10 @@ import json
 import os
 import re
 import sys
-from abc import ABC, abstractmethod
 from enum import IntEnum
 from pathlib import Path
 from hashlib import sha256
-from typing import TYPE_CHECKING, Any, Callable, ContextManager, Iterator, Sequence, TypeVar, cast
+from typing import TYPE_CHECKING, Any, Callable, ContextManager, Iterable, Iterator, Sequence, TypeVar, cast, overload
 
 import numpy as np
 import torch
@@ -25,7 +24,7 @@ if 'NO_LOCAL_GGUF' not in os.environ:
     sys.path.insert(1, str(Path(__file__).parent / 'gguf-py'))
 import gguf
 
-from convert import LlamaHfVocab, permute
+from convert import LlamaHfVocab
 
 logger = logging.getLogger("hf-to-gguf")
 
@@ -44,29 +43,55 @@ class SentencePieceTokenTypes(IntEnum):
 AnyModel = TypeVar("AnyModel", bound="type[Model]")
 
 
-class Model(ABC):
+class Model:
     _model_classes: dict[str, type[Model]] = {}
 
-    def __init__(self, dir_model: Path, ftype: int, fname_out: Path, is_big_endian: bool, use_temp_file: bool):
+    dir_model: Path
+    ftype: int
+    fname_out: Path
+    is_big_endian: bool
+    endianess: gguf.GGUFEndian
+    use_temp_file: bool
+    lazy: bool
+    part_names: list[str]
+    is_safetensors: bool
+    hparams: dict[str, Any]
+    gguf_writer: gguf.GGUFWriter
+    block_count: int
+    tensor_map: gguf.TensorNameMap
+    tensor_names: set[str] | None
+
+    # subclasses should define this!
+    model_arch: gguf.MODEL_ARCH
+
+    def __init__(self, dir_model: Path, ftype: int, fname_out: Path, is_big_endian: bool, use_temp_file: bool, eager: bool):
+        if self.__class__ == Model:
+            raise TypeError(f"{self.__class__.__name__!r} should not be directly instantiated")
         self.dir_model = dir_model
         self.ftype = ftype
         self.fname_out = fname_out
         self.is_big_endian = is_big_endian
         self.endianess = gguf.GGUFEndian.BIG if is_big_endian else gguf.GGUFEndian.LITTLE
         self.use_temp_file = use_temp_file
-        self.is_safetensors = self._is_model_safetensors()
-        self.num_parts = Model.count_model_parts(self.dir_model, ".safetensors" if self.is_safetensors else ".bin")
-        self.part_names = self._get_part_names()
+        self.lazy = not eager
+        self.part_names = Model.get_model_part_names(self.dir_model, ".safetensors")
+        self.is_safetensors = len(self.part_names) > 0
+        if not self.is_safetensors:
+            self.part_names = Model.get_model_part_names(self.dir_model, ".bin")
         self.hparams = Model.load_hparams(self.dir_model)
         self.gguf_writer = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[self.model_arch], endianess=self.endianess, use_temp_file=self.use_temp_file)
         self.block_count = self.find_hparam(["n_layers", "num_hidden_layers", "n_layer"])
+        self.tensor_map = gguf.get_tensor_name_map(self.model_arch, self.block_count)
+        self.tensor_names = None
 
-    @property
-    @abstractmethod
-    def model_arch(self) -> gguf.MODEL_ARCH:
-        pass
+    @classmethod
+    def __init_subclass__(cls):
+        # can't use an abstract property, because overriding it without type errors
+        # would require using decorated functions instead of simply defining the property
+        if "model_arch" not in cls.__dict__:
+            raise TypeError(f"Missing property 'model_arch' for {cls.__name__!r}")
 
-    def find_hparam(self, keys: Sequence[str], optional: bool = False) -> Any:
+    def find_hparam(self, keys: Iterable[str], optional: bool = False) -> Any:
         key = next((k for k in keys if k in self.hparams), None)
         if key is not None:
             return self.hparams[key]
@@ -78,6 +103,22 @@ class Model(ABC):
         self._set_vocab_gpt2()
 
     def get_tensors(self) -> Iterator[tuple[str, Tensor]]:
+        tensor_names_from_parts: set[str] = set()
+
+        if len(self.part_names) > 1:
+            self.tensor_names = set()
+            index_name = "model.safetensors" if self.is_safetensors else "pytorch_model.bin"
+            index_name += ".index.json"
+            logger.info(f"gguf: loading model weight map from '{index_name}'")
+            with open(self.dir_model / index_name, "r", encoding="utf-8") as f:
+                index: dict[str, Any] = json.load(f)
+                weight_map = index.get("weight_map")
+                if weight_map is None or not isinstance(weight_map, dict):
+                    raise ValueError(f"Can't load 'weight_map' from {index_name!r}")
+                self.tensor_names.update(weight_map.keys())
+        else:
+            self.tensor_names = tensor_names_from_parts
+
         for part_name in self.part_names:
             logger.info(f"gguf: loading model part '{part_name}'")
             ctx: ContextManager[Any]
@@ -88,10 +129,33 @@ class Model(ABC):
                 ctx = contextlib.nullcontext(torch.load(str(self.dir_model / part_name), map_location="cpu", mmap=True, weights_only=True))
 
             with ctx as model_part:
+                tensor_names_from_parts.update(model_part.keys())
+
                 for name in model_part.keys():
                     data = model_part.get_tensor(name) if self.is_safetensors else model_part[name]
+                    if self.lazy:
+                        data = LazyTorchTensor.from_eager(data)
                     yield name, data
 
+        # only verify tensor name presence; it doesn't matter if they are not in the right files
+        if len(sym_diff := tensor_names_from_parts.symmetric_difference(self.tensor_names)) > 0:
+            raise ValueError(f"Mismatch between weight map and model parts for tensor names: {sym_diff}")
+
+    def format_tensor_name(self, key: gguf.MODEL_TENSOR, bid: int | None = None, suffix: str = ".weight") -> str:
+        name: str = gguf.TENSOR_NAMES[key]
+        if key not in gguf.MODEL_TENSORS[self.model_arch]:
+            raise ValueError(f"Missing {key!r} for MODEL_TENSORS of {self.model_arch!r}")
+        if "{bid}" in name:
+            assert bid is not None
+            name = name.format(bid=bid)
+        return name + suffix
+
+    def map_tensor_name(self, name: str, try_suffixes: Sequence[str] = (".weight", ".bias")) -> str:
+        new_name = self.tensor_map.get_name(key=name, try_suffixes=try_suffixes)
+        if new_name is None:
+            raise ValueError(f"Can not map tensor {name!r}")
+        return new_name
+
     def set_gguf_parameters(self):
         self.gguf_writer.add_name(self.dir_model.name)
         self.gguf_writer.add_block_count(self.block_count)
@@ -135,12 +199,27 @@ class Model(ABC):
         self.gguf_writer.add_file_type(self.ftype)
         logger.info(f"gguf: file type = {self.ftype}")
 
+    def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+        del bid  # unused
+
+        return [(self.map_tensor_name(name), data_torch)]
+
+    def extra_f32_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool:
+        del name, new_name, bid, n_dims  # unused
+
+        return False
+
+    def extra_f16_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool:
+        del name, new_name, bid, n_dims  # unused
+
+        return False
+
     def write_tensors(self):
-        block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer")))
-        tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
+        max_name_len = max(len(s) for _, s in self.tensor_map.mapping.values()) + len(".weight,")
+
         for name, data_torch in self.get_tensors():
             # we don't need these
-            if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")):
+            if name.endswith((".attention.masked_bias", ".attention.bias", ".rotary_emb.inv_freq")):
                 continue
 
             old_dtype = data_torch.dtype
@@ -149,37 +228,52 @@ class Model(ABC):
             if data_torch.dtype not in (torch.float16, torch.float32):
                 data_torch = data_torch.to(torch.float32)
 
-            data = data_torch.squeeze().numpy()
+            # use the first number-like part of the tensor name as the block id
+            bid = None
+            for part in name.split("."):
+                if part.isdecimal():
+                    bid = int(part)
+                    break
+
+            for new_name, data in ((n, d.squeeze().numpy()) for n, d in self.modify_tensors(data_torch, name, bid)):
+                data: np.ndarray = data  # type hint
+                n_dims = len(data.shape)
+                data_dtype = data.dtype
 
-            # map tensor names
-            new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
-            if new_name is None:
-                raise ValueError(f"Can not map tensor {name!r}")
+                # if f32 desired, convert any float16 to float32
+                if self.ftype == 0 and data_dtype == np.float16:
+                    data = data.astype(np.float32)
 
-            n_dims = len(data.shape)
-            data_dtype = data.dtype
+                # when both are True, f32 should win
+                extra_f32 = self.extra_f32_tensors(name, new_name, bid, n_dims)
+                extra_f16 = self.extra_f16_tensors(name, new_name, bid, n_dims)
+
+                # Most of the codebase that takes in 1D tensors or norms only handles F32 tensors
+                extra_f32 = extra_f32 or n_dims == 1 or new_name.endswith("_norm.weight")
+
+                # if f16 desired, convert any float32 2-dim weight tensors to float16
+                extra_f16 = extra_f16 or (name.endswith(".weight") and n_dims >= 2)
 
-            # if f32 desired, convert any float16 to float32
-            if self.ftype == 0 and data_dtype == np.float16:
-                data = data.astype(np.float32)
+                # when both extra_f32 and extra_f16 are False, convert to float32 by default
+                if self.ftype == 1 and data_dtype == np.float16 and (extra_f32 or not extra_f16):
+                    data = data.astype(np.float32)
 
-            # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
-            if self.ftype == 1 and data_dtype == np.float16 and (n_dims == 1 or new_name.endswith("_norm.weight")):
-                data = data.astype(np.float32)
+                if self.ftype == 1 and data_dtype == np.float32 and extra_f16 and not extra_f32:
+                    data = data.astype(np.float16)
 
-            # if f16 desired, convert any float32 2-dim weight tensors to float16
-            if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
-                data = data.astype(np.float16)
+                # reverse shape to make it similar to the internal ggml dimension order
+                shape_str = f"{{{', '.join(str(n) for n in reversed(data.shape))}}}"
 
-            logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
+                # n_dims is implicit in the shape
+                logger.info(f"{f'%-{max_name_len}s' % f'{new_name},'} {old_dtype} --> {data.dtype}, shape = {shape_str}")
 
-            self.gguf_writer.add_tensor(new_name, data)
+                self.gguf_writer.add_tensor(new_name, data)
 
     def write(self):
         self.write_tensors()
         self.gguf_writer.write_header_to_file()
         self.gguf_writer.write_kv_data_to_file()
-        self.gguf_writer.write_tensors_to_file()
+        self.gguf_writer.write_tensors_to_file(progress=True)
         self.gguf_writer.close()
 
     def write_vocab(self):
@@ -188,16 +282,18 @@ class Model(ABC):
         self.gguf_writer.close()
 
     @staticmethod
-    def count_model_parts(dir_model: Path, prefix: str) -> int:
-        num_parts = 0
+    def get_model_part_names(dir_model: Path, suffix: str) -> list[str]:
+        part_names: list[str] = []
         for filename in os.listdir(dir_model):
-            if filename.endswith(prefix):
-                num_parts += 1
+            if filename.endswith(suffix):
+                part_names.append(filename)
 
-        return num_parts
+        part_names.sort()
+
+        return part_names
 
     @staticmethod
-    def load_hparams(dir_model):
+    def load_hparams(dir_model: Path):
         with open(dir_model / "config.json", "r", encoding="utf-8") as f:
             return json.load(f)
 
@@ -205,32 +301,19 @@ class Model(ABC):
     def register(cls, *names: str) -> Callable[[AnyModel], AnyModel]:
         assert names
 
-        def func(modelcls: type[Model]):
+        def func(modelcls: AnyModel) -> AnyModel:
             for name in names:
                 cls._model_classes[name] = modelcls
             return modelcls
         return func
 
     @classmethod
-    def from_model_architecture(cls, arch):
+    def from_model_architecture(cls, arch: str) -> type[Model]:
         try:
             return cls._model_classes[arch]
         except KeyError:
             raise NotImplementedError(f'Architecture {arch!r} not supported!') from None
 
-    def _is_model_safetensors(self) -> bool:
-        return Model.count_model_parts(self.dir_model, ".safetensors") > 0
-
-    def _get_part_names(self):
-        if self.is_safetensors:
-            if self.num_parts == 1:  # there's only one .safetensors file
-                return ("model.safetensors",)
-            return (f"model-{n:05}-of-{self.num_parts:05}.safetensors" for n in range(1, self.num_parts + 1))
-
-        if self.num_parts == 1:  # there's only one .bin file
-            return ("pytorch_model.bin",)
-        return (f"pytorch_model-{n:05}-of-{self.num_parts:05}.bin" for n in range(1, self.num_parts + 1))
-
     # used for GPT-2 BPE and WordPiece vocabs
     def get_vocab_base(self) -> tuple[list[str], list[int], str]:
         tokens: list[str] = []
@@ -420,22 +503,24 @@ class Model(ABC):
         if not tokenizer_path.is_file():
             raise FileNotFoundError(f"File not found: {tokenizer_path}")
 
-        tokenizer = SentencePieceProcessor(str(tokenizer_path))
+        tokenizer = SentencePieceProcessor()
+        tokenizer.LoadFromFile(str(tokenizer_path))
+
         vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
 
         for token_id in range(tokenizer.vocab_size()):
-            piece = tokenizer.id_to_piece(token_id)
+            piece = tokenizer.IdToPiece(token_id)
             text = piece.encode("utf-8")
-            score = tokenizer.get_score(token_id)
+            score = tokenizer.GetScore(token_id)
 
             toktype = SentencePieceTokenTypes.NORMAL
-            if tokenizer.is_unknown(token_id):
+            if tokenizer.IsUnknown(token_id):
                 toktype = SentencePieceTokenTypes.UNKNOWN
-            elif tokenizer.is_control(token_id):
+            elif tokenizer.IsControl(token_id):
                 toktype = SentencePieceTokenTypes.CONTROL
-            elif tokenizer.is_unused(token_id):
+            elif tokenizer.IsUnused(token_id):
                 toktype = SentencePieceTokenTypes.UNUSED
-            elif tokenizer.is_byte(token_id):
+            elif tokenizer.IsByte(token_id):
                 toktype = SentencePieceTokenTypes.BYTE
 
             tokens.append(text)
@@ -458,7 +543,7 @@ class Model(ABC):
             pad_count = vocab_size - len(tokens)
             logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]")
             for i in range(1, pad_count + 1):
-                tokens.append(f"[PAD{i}]")
+                tokens.append(bytes(f"[PAD{i}]", encoding="utf-8"))
                 scores.append(-1000.0)
                 toktypes.append(SentencePieceTokenTypes.UNUSED)
 
@@ -533,81 +618,52 @@ class BloomModel(Model):
         self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
         self.gguf_writer.add_file_type(self.ftype)
 
-    def write_tensors(self):
-        block_count = self.hparams["n_layer"]
-        tensors = dict(self.get_tensors())
-        tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
-        has_lm_head = True
+    def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+        del bid  # unused
+
         n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
         n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
 
-        for name, data_torch in tensors.items():
-            if "lm_head.weight" not in tensors.keys() and "output.weight" not in tensors.keys():
-                has_lm_head = False
+        name = re.sub(r'transformer\.', '', name)
+
+        tensors: list[tuple[str, Tensor]] = []
+
+        if re.match(r"h\.\d+\.self_attention\.query_key_value\.weight", name):
+            # Map bloom-style qkv_linear to gpt-style qkv_linear
+            # bloom: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py#L238-L252  # noqa
+            # gpt-2: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py#L312  # noqa
+            qkv_weights = data_torch.reshape((n_head, 3, n_embed // n_head, n_embed))
+            data_torch = torch.cat(
+                (
+                    qkv_weights[:, 0, :, :].reshape((-1, n_embed)),
+                    qkv_weights[:, 1, :, :].reshape((-1, n_embed)),
+                    qkv_weights[:, 2, :, :].reshape((-1, n_embed)),
+                ),
+                dim=0,
+            )
+            logger.info("re-format attention.linear_qkv.weight")
+        elif re.match(r"h\.\d+\.self_attention\.query_key_value\.bias", name):
+            qkv_bias = data_torch.reshape((n_head, 3, n_embed // n_head))
+            data_torch = torch.cat(
+                (
+                    qkv_bias[:, 0, :].reshape((n_embed,)),
+                    qkv_bias[:, 1, :].reshape((n_embed,)),
+                    qkv_bias[:, 2, :].reshape((n_embed,)),
+                ),
+                dim=0,
+            )
+            logger.info("re-format attention.linear_qkv.bias")
 
-            name = re.sub(r'transformer\.', '', name)
+        tensors.append((self.map_tensor_name(name), data_torch))
 
-            old_dtype = data_torch.dtype
+        if name == "word_embeddings.weight":
+            assert self.tensor_names is not None
 
-            # convert any unsupported data types to float32
-            if data_torch.dtype not in (torch.float16, torch.float32):
-                data_torch = data_torch.to(torch.float32)
+            # TODO: tie them at runtime, don't duplicate in the model file
+            if all(s not in self.tensor_names for s in ("lm_head.weight", "output.weight")):
+                tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT), data_torch))
 
-            data = data_torch.squeeze().numpy()
-
-            if re.match(r"h\.\d+\.self_attention\.query_key_value\.weight", name):
-                # Map bloom-style qkv_linear to gpt-style qkv_linear
-                # bloom: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py#L238-L252  # noqa
-                # gpt-2: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py#L312  # noqa
-                qkv_weights = data.reshape((n_head, 3, n_embed // n_head, n_embed))
-                data = np.concatenate(
-                    (
-                        qkv_weights[:, 0, :, :].reshape((-1, n_embed)),
-                        qkv_weights[:, 1, :, :].reshape((-1, n_embed)),
-                        qkv_weights[:, 2, :, :].reshape((-1, n_embed)),
-                    ),
-                    axis=0,
-                )
-                logger.info("re-format attention.linear_qkv.weight")
-            elif re.match(r"h\.\d+\.self_attention\.query_key_value\.bias", name):
-                qkv_bias = data.reshape((n_head, 3, n_embed // n_head))
-                data = np.concatenate(
-                    (
-                        qkv_bias[:, 0, :].reshape((n_embed,)),
-                        qkv_bias[:, 1, :].reshape((n_embed,)),
-                        qkv_bias[:, 2, :].reshape((n_embed,)),
-                    ),
-                    axis=0,
-                )
-                logger.info("re-format attention.linear_qkv.bias")
-
-            # map tensor names
-            new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
-            if new_name is None:
-                raise ValueError(f"Can not map tensor {name!r}")
-
-            n_dims = len(data.shape)
-            data_dtype = data.dtype
-
-            # if f32 desired, convert any float16 to float32
-            if self.ftype == 0 and data_dtype == np.float16:
-                data = data.astype(np.float32)
-
-            # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
-            if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
-                data = data.astype(np.float32)
-
-            # if f16 desired, convert any float32 2-dim weight tensors to float16
-            if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
-                data = data.astype(np.float16)
-
-            logger.info(f"=> {new_name}, shape = {data.shape}, {old_dtype} --> {data.dtype}")
-
-            self.gguf_writer.add_tensor(new_name, data)
-
-            if not has_lm_head and name == "word_embeddings.weight":
-                self.gguf_writer.add_tensor("output.weight", data)
-                logger.info(name, f"=> output.weight, shape = {data.shape}, {old_dtype} --> {data.dtype}")
+        return tensors
 
 
 @Model.register("MPTForCausalLM")
@@ -643,50 +699,16 @@ class MPTModel(Model):
         else:
             self.gguf_writer.add_max_alibi_bias(0.0)
 
-    def write_tensors(self):
-        block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers"))
-        tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
-        for name, data_torch in self.get_tensors():
-            # we don't need these
-            if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")):
-                continue
+    def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+        del bid  # unused
 
-            old_dtype = data_torch.dtype
-
-            # convert any unsupported data types to float32
-            if data_torch.dtype not in (torch.float16, torch.float32):
-                data_torch = data_torch.to(torch.float32)
-
-            data = data_torch.squeeze().numpy()
-
-            # map tensor names
-            if "scales" in name:
-                new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias", ".scales"))
-                if new_name is not None:
-                    new_name = new_name.replace("scales", "act.scales")
-            else:
-                new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
-            if new_name is None:
-                raise ValueError(f"Can not map tensor {name!r}")
-
-            n_dims = len(data.shape)
-            data_dtype = data.dtype
-
-            # if f32 desired, convert any float16 to float32
-            if self.ftype == 0 and data_dtype == np.float16:
-                data = data.astype(np.float32)
-
-            # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
-            if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
-                data = data.astype(np.float32)
-
-            # if f16 desired, convert any float32 2-dim weight tensors to float16
-            if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
-                data = data.astype(np.float16)
-
-            logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
+        if "scales" in name:
+            new_name = self.map_tensor_name(name, try_suffixes=(".weight", ".bias", ".scales"))
+            new_name = new_name.replace("scales", "act.scales")
+        else:
+            new_name = self.map_tensor_name(name, try_suffixes=(".weight", ".bias"))
 
-            self.gguf_writer.add_tensor(new_name, data)
+        return [(new_name, data_torch)]
 
 
 @Model.register("OrionForCausalLM")
@@ -726,48 +748,6 @@ class OrionModel(Model):
         # ref:  https://huggingface.co/OrionStarAI/Orion-14B-Chat/blob/276a17221ce42beb45f66fac657a41540e71f4f5/modeling_orion.py#L570-L571
         self.gguf_writer.add_layer_norm_eps(self.hparams["rms_norm_eps"])
 
-    def write_tensors(self):
-        # Collect tensors from generator object
-        model_kv = dict(self.get_tensors())
-        block_count = self.hparams["num_hidden_layers"]
-        tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
-
-        for name, data_torch in model_kv.items():
-            # we don't need these
-            if name.endswith(".rotary_emb.inv_freq"):
-                continue
-
-            old_dtype = data_torch.dtype
-
-            # convert any unsupported data types to float32
-            if data_torch.dtype not in (torch.float16, torch.float32):
-                data_torch = data_torch.to(torch.float32)
-
-            data = data_torch.squeeze().numpy()
-
-            # map tensor names
-            new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
-            if new_name is None:
-                raise ValueError(f"Can not map tensor {name!r}")
-
-            n_dims = len(data.shape)
-            data_dtype = data.dtype
-
-            # if f32 desired, convert any float16 to float32
-            if self.ftype == 0 and data_dtype == np.float16:
-                data = data.astype(np.float32)
-
-            # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
-            if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
-                data = data.astype(np.float32)
-
-            # if f16 desired, convert any float32 2-dim weight tensors to float16
-            if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
-                data = data.astype(np.float16)
-
-            logger.info(f"{name} -> {new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
-            self.gguf_writer.add_tensor(new_name, data)
-
 
 @Model.register("BaichuanForCausalLM", "BaiChuanForCausalLM")
 class BaichuanModel(Model):
@@ -809,60 +789,26 @@ class BaichuanModel(Model):
                 self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
                 self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
 
-    def write_tensors(self):
-        # Collect tensors from generator object
-        model_kv = dict(self.get_tensors())
-        block_count = self.hparams["num_hidden_layers"]
+    def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
         head_count = self.hparams["num_attention_heads"]
-        tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
         head_count_kv = self.hparams.get("num_key_value_heads", head_count)
 
-        for i in range(block_count):
-            if (w := model_kv.get(f"model.layers.{i}.self_attn.W_pack.weight")) is not None:
-                logger.info(f"Unpacking and permuting layer {i}")
-                model_kv[f"model.layers.{i}.self_attn.q_proj.weight"] = \
-                    self._reverse_hf_permute_part(w, 0, head_count, head_count)
-                model_kv[f"model.layers.{i}.self_attn.k_proj.weight"] = \
-                    self._reverse_hf_permute_part(w, 1, head_count, head_count_kv)
-                model_kv[f"model.layers.{i}.self_attn.v_proj.weight"] = \
-                    self._reverse_hf_part(w, 2)
-                del model_kv[f"model.layers.{i}.self_attn.W_pack.weight"]
-
-        for name, data_torch in model_kv.items():
-            # we don't need these
-            if name.endswith(".rotary_emb.inv_freq"):
-                continue
-
-            old_dtype = data_torch.dtype
-
-            # convert any unsupported data types to float32
-            if data_torch.dtype not in (torch.float16, torch.float32):
-                data_torch = data_torch.to(torch.float32)
-
-            data = data_torch.squeeze().numpy()
-
-            # map tensor names
-            new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
-            if new_name is None:
-                raise ValueError(f"Can not map tensor {name!r}")
-
-            n_dims = len(data.shape)
-            data_dtype = data.dtype
-
-            # if f32 desired, convert any float16 to float32
-            if self.ftype == 0 and data_dtype == np.float16:
-                data = data.astype(np.float32)
-
-            # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
-            if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
-                data = data.astype(np.float32)
-
-            # if f16 desired, convert any float32 2-dim weight tensors to float16
-            if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
-                data = data.astype(np.float16)
+        tensors: list[tuple[str, Tensor]] = []
+
+        if bid is not None and name == f"model.layers.{bid}.self_attn.W_pack.weight":
+            logger.info(f"Unpacking and permuting layer {bid}")
+            tensors = [
+                (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid),
+                    self._reverse_hf_permute_part(data_torch, 0, head_count, head_count)),
+                (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid),
+                    self._reverse_hf_permute_part(data_torch, 1, head_count, head_count_kv)),
+                (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid),
+                    self._reverse_hf_part(data_torch, 2)),
+            ]
+        else:
+            tensors = [(self.map_tensor_name(name), data_torch)]
 
-            logger.info(f"{name} -> {new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
-            self.gguf_writer.add_tensor(new_name, data)
+        return tensors
 
     def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor:
         if n_kv_head is not None and n_head != n_kv_head:
@@ -894,7 +840,7 @@ class XverseModel(Model):
         dir_model = self.dir_model
         hparams = self.hparams
 
-        tokens: list[bytearray] = []
+        tokens: list[bytes] = []
         toktypes: list[int] = []
 
         from transformers import AutoTokenizer
@@ -902,7 +848,7 @@ class XverseModel(Model):
         vocab_size = hparams.get("vocab_size", len(tokenizer.vocab))
         assert max(tokenizer.vocab.values()) < vocab_size
 
-        reverse_vocab = {id_: encoded_tok for encoded_tok, id_ in tokenizer.vocab.items()}
+        reverse_vocab: dict[int, str] = {id_: encoded_tok for encoded_tok, id_ in tokenizer.vocab.items()}
         added_vocab = tokenizer.get_added_vocab()
 
         for token_id in range(vocab_size):
@@ -965,55 +911,19 @@ class XverseModel(Model):
                 self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
                 self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
 
-    def write_tensors(self):
-        # Collect tensors from generator object
-        model_kv = dict(self.get_tensors())
-        block_count = self.hparams["num_hidden_layers"]
+    def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+        del bid  # unused
+
         head_count = self.hparams["num_attention_heads"]
-        tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
         head_count_kv = self.hparams.get("num_key_value_heads", head_count)
 
-        for name, data_torch in model_kv.items():
-            # we don't need these
-            if name.endswith(".rotary_emb.inv_freq"):
-                continue
-
-            old_dtype = data_torch.dtype
-
-            # convert any unsupported data types to float32
-            if data_torch.dtype not in (torch.float16, torch.float32):
-                data_torch = data_torch.to(torch.float32)
-
-            # HF models permute some of the tensors, so we need to undo that
-            if name.endswith(("q_proj.weight")):
-                data_torch = self._reverse_hf_permute(data_torch, head_count, head_count)
-            if name.endswith(("k_proj.weight")):
-                data_torch = self._reverse_hf_permute(data_torch, head_count, head_count_kv)
-
-            data = data_torch.squeeze().numpy()
-
-            # map tensor names
-            new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
-            if new_name is None:
-                raise ValueError(f"Can not map tensor {name!r}")
-
-            n_dims = len(data.shape)
-            data_dtype = data.dtype
+        # HF models permute some of the tensors, so we need to undo that
+        if name.endswith("q_proj.weight"):
+            data_torch = self._reverse_hf_permute(data_torch, head_count, head_count)
+        if name.endswith("k_proj.weight"):
+            data_torch = self._reverse_hf_permute(data_torch, head_count, head_count_kv)
 
-            # if f32 desired, convert any float16 to float32
-            if self.ftype == 0 and data_dtype == np.float16:
-                data = data.astype(np.float32)
-
-            # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
-            if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
-                data = data.astype(np.float32)
-
-            # if f16 desired, convert any float32 2-dim weight tensors to float16
-            if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
-                data = data.astype(np.float16)
-
-            logger.info(f"{name} -> {new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
-            self.gguf_writer.add_tensor(new_name, data)
+        return [(self.map_tensor_name(name), data_torch)]
 
     def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor:
         if n_kv_head is not None and n_head != n_kv_head:
@@ -1054,71 +964,31 @@ class FalconModel(Model):
         self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
         self.gguf_writer.add_file_type(self.ftype)
 
-    def write_tensors(self):
-        block_count = self.hparams.get("num_hidden_layers")
-        if block_count is None:
-            block_count = self.hparams["n_layer"]  # old name
-
-        n_head = self.hparams.get("num_attention_heads")
-        if n_head is None:
-            n_head = self.hparams["n_head"]  # old name
-
-        n_head_kv = self.hparams.get("num_kv_heads")
-        if n_head_kv is None:
-            n_head_kv = self.hparams.get("n_head_kv", 1)  # old name
-
-        head_dim = self.hparams["hidden_size"] // n_head
-        tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
-
-        for name, data_torch in self.get_tensors():
-            old_dtype = data_torch.dtype
-
-            # convert any unsupported data types to float32
-            if data_torch.dtype not in (torch.float16, torch.float32):
-                data_torch = data_torch.to(torch.float32)
-
-            # QKV tensor transform
-            # The original query_key_value tensor contains n_head_kv "kv groups",
-            # each consisting of n_head/n_head_kv query weights followed by one key
-            # and one value weight (shared by all query heads in the kv group).
-            # This layout makes it a big pain to work with in GGML.
-            # So we rearrange them here,, so that we have n_head query weights
-            # followed by n_head_kv key weights followed by n_head_kv value weights,
-            # in contiguous fashion.
-            # ref: https://github.com/jploski/ggml/blob/falcon40b/examples/falcon/convert-hf-to-ggml.py
-
-            if "query_key_value" in name:
-                qkv = data_torch.view(n_head_kv, n_head // n_head_kv + 2, head_dim, head_dim * n_head)
-                q = qkv[:, :-2].reshape(n_head * head_dim, head_dim * n_head)
-                k = qkv[:, [-2]].reshape(n_head_kv * head_dim, head_dim * n_head)
-                v = qkv[:, [-1]].reshape(n_head_kv * head_dim, head_dim * n_head)
-                data_torch = torch.cat((q, k, v)).reshape_as(data_torch)
-
-            data = data_torch.squeeze().numpy()
-
-            # map tensor names
-            new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
-            if new_name is None:
-                raise ValueError(f"Can not map tensor {name!r}")
+    def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+        del bid  # unused
 
-            n_dims = len(data.shape)
-            data_dtype = data.dtype
+        # QKV tensor transform
+        # The original query_key_value tensor contains n_head_kv "kv groups",
+        # each consisting of n_head/n_head_kv query weights followed by one key
+        # and one value weight (shared by all query heads in the kv group).
+        # This layout makes it a big pain to work with in GGML.
+        # So we rearrange them here,, so that we have n_head query weights
+        # followed by n_head_kv key weights followed by n_head_kv value weights,
+        # in contiguous fashion.
+        # ref: https://github.com/jploski/ggml/blob/falcon40b/examples/falcon/convert-hf-to-ggml.py
 
-            # if f32 desired, convert any float16 to float32
-            if self.ftype == 0 and data_dtype == np.float16:
-                data = data.astype(np.float32)
+        if "query_key_value" in name:
+            n_head = self.find_hparam(["num_attention_heads", "n_head"])
+            n_head_kv = self.find_hparam(["num_kv_heads", "n_head_kv"], optional=True) or 1
+            head_dim = self.hparams["hidden_size"] // n_head
 
-            # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
-            if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
-                data = data.astype(np.float32)
+            qkv = data_torch.view(n_head_kv, n_head // n_head_kv + 2, head_dim, head_dim * n_head)
+            q = qkv[:, :-2].reshape(n_head * head_dim, head_dim * n_head)
+            k = qkv[:, [-2]].reshape(n_head_kv * head_dim, head_dim * n_head)
+            v = qkv[:, [-1]].reshape(n_head_kv * head_dim, head_dim * n_head)
+            data_torch = torch.cat((q, k, v)).reshape_as(data_torch)
 
-            # if f16 desired, convert any float32 2-dim weight tensors to float16
-            if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
-                data = data.astype(np.float16)
-
-            logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
-
-            self.gguf_writer.add_tensor(new_name, data)
+        return [(self.map_tensor_name(name), data_torch)]
 
 
 @Model.register("GPTBigCodeForCausalLM")
@@ -1164,7 +1034,7 @@ class RefactModel(Model):
         self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"])
         self.gguf_writer.add_file_type(self.ftype)
 
-    def write_tensors(self):
+    def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
         hidden_dim = self.hparams["n_embd"]
         inner_dim = 4 * hidden_dim
         hidden_dim = int(2 * inner_dim / 3)
@@ -1173,56 +1043,23 @@ class RefactModel(Model):
         n_head = self.hparams["n_head"]
         n_head_kv = 1
         head_dim = self.hparams["n_embd"] // n_head
-        block_count = self.hparams["n_layer"]
-
-        tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
-
-        tensors = dict(self.get_tensors())
-        for i in range(block_count):
-            if (w := tensors.get(f"transformer.h.{i}.attn.kv.weight")) is not None:
-                tensors[f"model.layers.{i}.self_attn.k_proj.weight"] = w[:n_head_kv * head_dim]
-                tensors[f"model.layers.{i}.self_attn.v_proj.weight"] = w[n_head_kv * head_dim:]
-                del tensors[f"transformer.h.{i}.attn.kv.weight"]
-            if (w := tensors.get(f"transformer.h.{i}.attn.q.weight")) is not None:
-                tensors[f"model.layers.{i}.self_attn.q_proj.weight"] = w
-                del tensors[f"transformer.h.{i}.attn.q.weight"]
-            if (w := tensors.get(f"transformer.h.{i}.mlp.gate_up_proj.weight")) is not None:
-                tensors[f"model.layers.{i}.mlp.gate_proj.weight"] = w[:ff_dim]
-                tensors[f"model.layers.{i}.mlp.up_proj.weight"] = w[ff_dim:]
-                del tensors[f"transformer.h.{i}.mlp.gate_up_proj.weight"]
-
-        for name, data_torch in tensors.items():
-            old_dtype = data_torch.dtype
-
-            # convert any unsupported data types to float32
-            if data_torch.dtype not in (torch.float16, torch.float32):
-                data_torch = data_torch.to(torch.float32)
-
-            data = data_torch.squeeze().numpy()
-
-            # map tensor names
-            new_name = tensor_map.get_name(name, try_suffixes=(".weight",))
-            if new_name is None:
-                raise ValueError(f"Can not map tensor {name!r}")
-
-            n_dims = len(data.shape)
-            data_dtype = data.dtype
 
-            # if f32 desired, convert any float16 to float32
-            if self.ftype == 0 and data_dtype == np.float16:
-                data = data.astype(np.float32)
+        tensors: list[tuple[str, Tensor]] = []
 
-            # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
-            if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
-                data = data.astype(np.float32)
+        if bid is not None:
+            if name == f"transformer.h.{bid}.attn.kv.weight":
+                tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), data_torch[:n_head_kv * head_dim]))
+                tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), data_torch[n_head_kv * head_dim:]))
+            elif name == f"transformer.h.{bid}.attn.q.weight":
+                tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), data_torch))
+            elif name == f"transformer.h.{bid}.mlp.gate_up_proj.weight":
+                tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE, bid), data_torch[:ff_dim]))
+                tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP, bid), data_torch[ff_dim:]))
 
-            # if f16 desired, convert any float32 2-dim weight tensors to float16
-            if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
-                data = data.astype(np.float16)
+        if len(tensors) == 0:
+            tensors.append((self.map_tensor_name(name), data_torch))
 
-            logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
-
-            self.gguf_writer.add_tensor(new_name, data)
+        return tensors
 
 
 @Model.register("PersimmonForCausalLM")
@@ -1257,22 +1094,11 @@ class PersimmonModel(Model):
         # self.gguf_writer.add_bos_token_id(71013)
         # self.gguf_writer.add_eos_token_id(71013)
 
-    def write_tensors(self):
-        block_count = self.hparams.get("num_layers", self.hparams.get("num_hidden_layers"))
-        tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
+    def extra_f32_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool:
+        del name, new_name, bid, n_dims  # unused
 
-        for name, data_torch in self.get_tensors():
-            if name.endswith(".self_attention.rotary_emb.inv_freq"):
-                continue
-            old_dtype = data_torch.dtype
-            # TODO: FP16 conversion produces garbage outputs. (Q8_0 does not, so..?)
-            data = data_torch.to(torch.float32).squeeze().numpy()
-            new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
-            if new_name is None:
-                raise ValueError(f"Can not map tensor {name!r}")
-            n_dims = len(data.shape)
-            logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
-            self.gguf_writer.add_tensor(new_name, data)
+        # TODO: FP16 conversion produces garbage outputs. (Q8_0 does not, so..?)
+        return True
 
 
 @Model.register("StableLmForCausalLM", "StableLMEpochForCausalLM", "LlavaStableLMEpochForCausalLM")
@@ -1302,84 +1128,67 @@ class StableLMModel(Model):
         self.gguf_writer.add_parallel_residual(hparams["use_parallel_residual"] if "use_parallel_residual" in hparams else True)
         self.gguf_writer.add_layer_norm_eps(self.find_hparam(["layer_norm_eps", "norm_eps"]))
 
-    def write_tensors(self):
-        block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer")))
-        tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
-        n_head = self.hparams.get("num_attention_heads")
-        n_kv_head = self.hparams.get("num_key_value_heads")
-        q_norms = dict()
-        k_norms = dict()
-        for name, data_torch in self.get_tensors():
-            # we don't need these
-            if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")):
-                continue
+    _q_norms: list[dict[str, Tensor]] | None = None
+    _k_norms: list[dict[str, Tensor]] | None = None
 
-            old_dtype = data_torch.dtype
+    def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+        n_head = self.hparams["num_attention_heads"]
+        n_kv_head = self.hparams["num_key_value_heads"]
 
-            # convert any unsupported data types to float32
-            if data_torch.dtype not in (torch.float16, torch.float32):
-                data_torch = data_torch.to(torch.float32)
+        if name.find("q_layernorm.norms") != -1:
+            assert bid is not None
 
-            data = data_torch.squeeze().numpy()
-            n_dims = len(data.shape)
-            if name.find("q_layernorm.norms") != -1:
-                q_norms[name] = data
-                if len(q_norms) >= (block_count * n_head):
-                    self._stack_qk_norm(block_count, name, tensor_map, n_head, q_norms, n_dims, layer_name="q_layernorm")
-                continue
-            if name.find("k_layernorm.norms") != -1:
-                k_norms[name] = data
-                if len(k_norms) >= (block_count * n_kv_head):
-                    self._stack_qk_norm(block_count, name, tensor_map, n_kv_head, k_norms, n_dims, layer_name="k_layernorm")
-                continue
+            if self._q_norms is None:
+                self._q_norms = [{} for _ in range(self.block_count)]
 
-            # map tensor names
-            new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
-            if new_name is None:
-                raise ValueError(f"Can not map tensor {name!r}")
+            self._q_norms[bid][name] = data_torch
 
-            n_dims = len(data.shape)
-            data_dtype = data.dtype
+            if len(self._q_norms[bid]) >= n_head:
+                return self._stack_qk_norm(bid, n_head, self._q_norms[bid], "q_layernorm")
+            else:
+                return []
 
-            # if f32 desired, convert any float16 to float32
-            if self.ftype == 0 and data_dtype == np.float16:
-                data = data.astype(np.float32)
+        if name.find("k_layernorm.norms") != -1:
+            assert bid is not None
 
-            # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
-            if self.ftype == 1 and data_dtype == np.float16 and (n_dims == 1 or new_name.endswith("_norm.weight")):
-                data = data.astype(np.float32)
+            if self._k_norms is None:
+                self._k_norms = [{} for _ in range(self.block_count)]
 
-            # if f16 desired, convert any float32 2-dim weight tensors to float16
-            if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and not new_name.endswith("_norm.weight") and n_dims == 2:
-                data = data.astype(np.float16)
+            self._k_norms[bid][name] = data_torch
 
-            logger.debug(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
+            if len(self._k_norms[bid]) >= n_kv_head:
+                return self._stack_qk_norm(bid, n_kv_head, self._k_norms[bid], "k_layernorm")
+            else:
+                return []
 
-            self.gguf_writer.add_tensor(new_name, data)
+        return [(self.map_tensor_name(name), data_torch)]
 
-    def _stack_qk_norm(self, block_count, name, tensor_map, n_head, norms, n_dims, layer_name="q_layernorm"):
-        for bid in range(block_count):
-            datas = []
-            for xid in range(n_head):
-                ename = f"model.layers.{bid}.self_attn.{layer_name}.norms.{xid}.weight"
-                datas.append(norms[ename])
-                del norms[ename]
-            data = np.stack(datas, axis=0)
-            data_dtype = data.dtype
-            merged_name = f"model.layers.{bid}.self_attn.{layer_name}.weight"
-            new_name = tensor_map.get_name(merged_name, try_suffixes=(".weight", ".bias"))
-            if new_name is None:
-                raise ValueError(f"Can not map tensor {name!r}")
-            if self.ftype == 1 and data_dtype == np.float16 and (n_dims == 1 or new_name.endswith("_norm.weight")):
-                data = data.astype(np.float32)
+    def _stack_qk_norm(self, bid: int, n_head: int, norms: dict[str, Tensor], layer_name: str = "q_layernorm"):
+        datas: list[Tensor] = []
+        # extract the norms in order
+        for xid in range(n_head):
+            ename = f"model.layers.{bid}.self_attn.{layer_name}.norms.{xid}.weight"
+            datas.append(norms[ename])
+            del norms[ename]
+        data_torch = torch.stack(datas, dim=0)
 
-            # if f16 desired, convert any float32 2-dim weight tensors to float16
-            if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and not new_name.endswith("_norm.weight") and n_dims == 2:
-                data = data.astype(np.float16)
+        merged_name = f"model.layers.{bid}.self_attn.{layer_name}.weight"
+        new_name = self.map_tensor_name(merged_name)
 
-            logger.debug(f"{new_name}, n_dims = {len(data.shape)}, shape = {data.shape} --> {data.dtype}")
+        return [(new_name, data_torch)]
 
-            self.gguf_writer.add_tensor(new_name, data)
+    def write_tensors(self):
+        super().write_tensors()
+
+        if self._q_norms is not None or self._k_norms is not None:
+            # flatten two `list[dict[str, Tensor]]` into a single `list[str]`
+            norms = (
+                [k for d in self._q_norms for k in d.keys()] if self._q_norms is not None else []
+            ) + (
+                [k for d in self._k_norms for k in d.keys()] if self._k_norms is not None else []
+            )
+            if len(norms) > 0:
+                raise ValueError(f"Unprocessed norms: {norms}")
 
 
 @Model.register("LlamaForCausalLM", "MistralForCausalLM", "MixtralForCausalLM")
@@ -1419,102 +1228,69 @@ class LlamaModel(Model):
                 self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
                 self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
 
-    # Same as super class, but permuting q_proj, k_proj
-    def write_tensors(self):
-        block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer")))
-        tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
-        n_head = self.hparams.get("num_attention_heads")
-        n_kv_head = self.hparams.get("num_key_value_heads")
-        n_experts = self.hparams.get("num_local_experts")
-        experts = dict()
-        for name, data_torch in self.get_tensors():
-            # we don't need these
-            if name.endswith((".attention.masked_bias", ".attention.bias", ".rotary_emb.inv_freq")):
-                continue
-
-            old_dtype = data_torch.dtype
-
-            # convert any unsupported data types to float32
-            if data_torch.dtype not in (torch.float16, torch.float32):
-                data_torch = data_torch.to(torch.float32)
-
-            data = data_torch.numpy()
-
-            if name.endswith("q_proj.weight"):
-                data = permute(data, n_head, n_head)
-            if name.endswith("k_proj.weight"):
-                data = permute(data, n_head, n_kv_head)
-
-            data = data.squeeze()
+    @staticmethod
+    def permute(weights: Tensor, n_head: int, n_head_kv: int | None):
+        if n_head_kv is not None and n_head != n_head_kv:
+            n_head = n_head_kv
+        return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
+                .swapaxes(1, 2)
+                .reshape(weights.shape))
 
-            # process the experts separately
-            if name.find("block_sparse_moe.experts") != -1:
-                experts[name] = data
-                if len(experts) >= n_experts:
-                    # merge the experts into a single 3d tensor
-                    for bid in range(block_count):
-                        for wid in range(1, 4):
-                            full = True
-                            for xid in range(n_experts):
-                                ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.w{wid}.weight"
-                                if ename not in experts:
-                                    full = False
-                                    break
-                            if not full:
-                                continue
+    _experts: list[dict[str, Tensor]] | None = None
 
-                            datas = []
-                            for xid in range(n_experts):
-                                ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.w{wid}.weight"
-                                datas.append(experts[ename])
-                                del experts[ename]
+    def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+        n_head = self.hparams["num_attention_heads"]
+        n_kv_head = self.hparams.get("num_key_value_heads")
 
-                            data = np.stack(datas, axis=0)
-                            data_dtype = data.dtype
+        if name.endswith("q_proj.weight"):
+            data_torch = LlamaModel.permute(data_torch, n_head, n_head)
+        if name.endswith("k_proj.weight"):
+            data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
 
-                            if self.ftype == 0 and data_dtype == np.float16:
-                                data = data.astype(np.float32)
+        # process the experts separately
+        if name.find("block_sparse_moe.experts") != -1:
+            n_experts = self.hparams["num_local_experts"]
 
-                            if self.ftype == 1 and data_dtype == np.float32:
-                                data = data.astype(np.float16)
+            assert bid is not None
 
-                            merged_name = f"layers.{bid}.feed_forward.experts.w{wid}.weight"
+            if self._experts is None:
+                self._experts = [{} for _ in range(self.block_count)]
 
-                            new_name = tensor_map.get_name(merged_name, try_suffixes=(".weight", ".bias"))
-                            if new_name is None:
-                                raise ValueError(f"Can not map tensor {name!r}")
+            self._experts[bid][name] = data_torch
 
-                            logger.info(f"{new_name}, n_dims = {len(data.shape)}, shape = {data.shape} --> {data.dtype}")
+            if len(self._experts[bid]) >= n_experts * 3:
+                tensors: list[tuple[str, Tensor]] = []
 
-                            self.gguf_writer.add_tensor(new_name, data)
-                continue
+                # merge the experts into a single 3d tensor
+                for wid in ["w1", "w2", "w3"]:
+                    datas: list[Tensor] = []
 
-            # map tensor names
-            new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
-            if new_name is None:
-                raise ValueError(f"Can not map tensor {name!r}")
+                    for xid in range(n_experts):
+                        ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.{wid}.weight"
+                        datas.append(self._experts[bid][ename])
+                        del self._experts[bid][ename]
 
-            n_dims = len(data.shape)
-            data_dtype = data.dtype
+                    data_torch = torch.stack(datas, dim=0)
 
-            # if f32 desired, convert any float16 to float32
-            if self.ftype == 0 and data_dtype == np.float16:
-                data = data.astype(np.float32)
+                    merged_name = f"layers.{bid}.feed_forward.experts.{wid}.weight"
 
-            # 1d tensors need to be converted to float32
-            if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
-                data = data.astype(np.float32)
+                    new_name = self.map_tensor_name(merged_name)
 
-            # if f16 desired, convert any float32 2-dim weight tensors to float16
-            if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
-                data = data.astype(np.float16)
+                    tensors.append((new_name, data_torch))
+                return tensors
+            else:
+                return []
 
-            logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
+        return [(self.map_tensor_name(name), data_torch)]
 
-            self.gguf_writer.add_tensor(new_name, data)
+    def write_tensors(self):
+        super().write_tensors()
 
-        if len(experts) > 0:
-            raise ValueError(f"Unprocessed experts: {experts.keys()}")
+        if self._experts is not None:
+            # flatten `list[dict[str, Tensor]]` into `list[str]`
+            experts = [k for d in self._experts for k in d.keys()]
+            if len(experts) > 0:
+                raise ValueError(f"Unprocessed experts: {experts}")
 
 
 @Model.register("GrokForCausalLM")
@@ -1531,89 +1307,44 @@ class GrokModel(Model):
         super().set_gguf_parameters()
         self.gguf_writer.add_name("Grok")
 
-    def write_tensors(self):
-        block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer")))
-        tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
-        n_experts = self.hparams.get("num_local_experts")
-        experts = dict()
-        for name, data_torch in self.get_tensors():
-            # we don't need these
-            if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")):
-                continue
-
-            old_dtype = data_torch.dtype
+    _experts: list[dict[str, Tensor]] | None = None
 
-            # convert any unsupported data types to float32
-            if data_torch.dtype not in (torch.float16, torch.float32):
-                data_torch = data_torch.to(torch.float32)
-
-            data = data_torch.squeeze().numpy()
+    def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+        # process the experts separately
+        if name.find(".moe.") != -1:
+            n_experts = self.hparams["num_local_experts"]
 
-            # process the experts separately
-            if name.find(".moe.") != -1:
-                experts[name] = data
-                if len(experts) >= n_experts:
-                    # merge the experts into a single 3d tensor
-                    for bid in range(block_count):
-                        for wid in ["linear", "linear_1", "linear_v"]:
-                            full = True
-                            for xid in range(n_experts):
-                                ename = f"transformer.decoder_layer.{bid}.moe.{xid}.{wid}.weight"
-                                if ename not in experts:
-                                    full = False
-                                    break
-                            if not full:
-                                continue
+            assert bid is not None
 
-                            datas = []
-                            for xid in range(n_experts):
-                                ename = f"transformer.decoder_layer.{bid}.moe.{xid}.{wid}.weight"
-                                datas.append(experts[ename])
-                                del experts[ename]
+            if self._experts is None:
+                self._experts = [{} for _ in range(self.block_count)]
 
-                            data = np.stack(datas, axis=0)
-                            data_dtype = data.dtype
+            self._experts[bid][name] = data_torch
 
-                            if self.ftype == 0 and data_dtype == np.float16:
-                                data = data.astype(np.float32)
+            if len(self._experts[bid]) >= n_experts * 3:
+                tensors: list[tuple[str, Tensor]] = []
 
-                            if self.ftype == 1 and data_dtype == np.float32:
-                                data = data.astype(np.float16)
+                # merge the experts into a single 3d tensor
+                for wid in ["linear", "linear_1", "linear_v"]:
+                    datas: list[Tensor] = []
 
-                            merged_name = f"transformer.decoder_layer.{bid}.moe.{wid}.weight"
+                    for xid in range(n_experts):
+                        ename = f"transformer.decoder_layer.{bid}.moe.{xid}.{wid}.weight"
+                        datas.append(self._experts[bid][ename])
+                        del self._experts[bid][ename]
 
-                            new_name = tensor_map.get_name(merged_name, try_suffixes=(".weight", ".bias"))
-                            if new_name is None:
-                                raise ValueError(f"Can not map tensor {name!r}")
+                    data_torch = torch.stack(datas, dim=0)
 
-                            logger.info(f"{new_name}, n_dims = {len(data.shape)}, shape = {data.shape} --> {data.dtype}")
+                    merged_name = f"transformer.decoder_layer.{bid}.moe.{wid}.weight"
 
-                            self.gguf_writer.add_tensor(new_name, data)
-                continue
+                    new_name = self.map_tensor_name(merged_name)
 
-            # map tensor names
-            new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
-            if new_name is None:
-                raise ValueError(f"Can not map tensor {name!r}")
-
-            n_dims = len(data.shape)
-            data_dtype = data.dtype
-
-            # if f32 desired, convert any float16 to float32
-            if self.ftype == 0 and data_dtype == np.float16:
-                data = data.astype(np.float32)
-
-            # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
-            if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
-                data = data.astype(np.float32)
-
-            # if f16 desired, convert any float32 2-dim weight tensors to float16
-            if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
-                data = data.astype(np.float16)
-
-            logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
+                    tensors.append((new_name, data_torch))
+                return tensors
+            else:
+                return []
 
-            self.gguf_writer.add_tensor(new_name, data)
+        return [(self.map_tensor_name(name), data_torch)]
 
 
 @Model.register("DbrxForCausalLM")
@@ -1646,68 +1377,45 @@ class DbrxModel(Model):
         self.gguf_writer.add_file_type(self.ftype)
         logger.info(f"gguf: file type = {self.ftype}")
 
-    def write_tensors(self):
-        block_count = self.hparams.get("n_layers")
-        tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
-        for name, data_torch in self.get_tensors():
-            n_expert = self.hparams["ffn_config"]["moe_num_experts"]
-            n_ff = self.hparams["ffn_config"]["ffn_hidden_size"]
-            n_embd = self.hparams["d_model"]
-
-            # Specific behavior for experts tensors: suffix .weight, view as 3D and transpose
-            # original implementation expects (n_expert, n_ff, n_embd) for all experts weights
-            # But llama.cpp moe graph works differently
-            # AND the dimensions in ggml are typically in the reverse order of the pytorch dimensions
-            # so (n_expert, n_ff, n_embd) in pytorch is {n_embd, n_ff, n_expert} in ggml_tensor
-            exp_tensor_names = {"ffn.experts.mlp.w1": None,       # LLM_TENSOR_FFN_GATE_EXPS ggml_tensor->ne{n_embd, n_ff,   n_expert}
-                                "ffn.experts.mlp.w2": (0, 2, 1),  # LLM_TENSOR_FFN_DOWN_EXPS ggml_tensor->ne{n_ff,   n_embd, n_expert}
-                                "ffn.experts.mlp.v1": None}       # LLM_TENSOR_FFN_UP_EXPS   ggml_tensor->ne{n_embd, n_ff,   n_expert}
-            experts = False
-            for exp_tensor_name in exp_tensor_names.keys():
-                if name.find(exp_tensor_name) != -1 and name.find(".weight") == -1:
-                    experts = True
-                    data_torch = data_torch.view(n_expert, n_ff, n_embd)
-                    if (permute_tensor := exp_tensor_names[exp_tensor_name]) is not None:
-                        data_torch = data_torch.permute(*permute_tensor)
-                    break
-
-            old_dtype = data_torch.dtype
-
-            # convert any unsupported data types to float32
-            if data_torch.dtype not in (torch.float16, torch.float32):
-                data_torch = data_torch.to(torch.float32)
-
-            data = data_torch.squeeze().numpy()
-
-            # map tensor names
-            # In MoE models the ffn tensors are typically most of the model weights,
-            # and need to be quantizable. Quantize expects tensor names to be suffixed by .weight.
-            # Every other model has the weight names ending in .weight,
-            # let's assume that is the convention which is not the case for dbrx:
-            # https://huggingface.co/databricks/dbrx-instruct/blob/main/model.safetensors.index.json#L15
-            new_name = tensor_map.get_name(name if not experts else name + ".weight", try_suffixes=(".weight",))
-            if new_name is None:
-                raise ValueError(f"Can not map tensor {name!r}")
-
-            n_dims = len(data.shape)
-            data_dtype = data.dtype
-
-            # Most of the codebase that takes in 1D tensors only handles F32 tensors
-            # and most of the outputs tensors are F32.
-            if data_dtype != np.float32 and n_dims == 1:
-                raise ValueError(f"Can not map tensor {name!r}: all 1D tensors must be F32")
+    def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+        del bid  # unused
+
+        n_expert = self.hparams["ffn_config"]["moe_num_experts"]
+        n_ff = self.hparams["ffn_config"]["ffn_hidden_size"]
+        n_embd = self.hparams["d_model"]
+
+        # Specific behavior for experts tensors: suffix .weight, view as 3D and transpose
+        # original implementation expects (n_expert, n_ff, n_embd) for all experts weights
+        # But llama.cpp moe graph works differently
+        # AND the dimensions in ggml are typically in the reverse order of the pytorch dimensions
+        # so (n_expert, n_ff, n_embd) in pytorch is {n_embd, n_ff, n_expert} in ggml_tensor
+        exp_tensor_names = {"ffn.experts.mlp.w1": None,       # LLM_TENSOR_FFN_GATE_EXPS ggml_tensor->ne{n_embd, n_ff,   n_expert}
+                            "ffn.experts.mlp.w2": (0, 2, 1),  # LLM_TENSOR_FFN_DOWN_EXPS ggml_tensor->ne{n_ff,   n_embd, n_expert}
+                            "ffn.experts.mlp.v1": None}       # LLM_TENSOR_FFN_UP_EXPS   ggml_tensor->ne{n_embd, n_ff,   n_expert}
+        experts = False
+
+        for exp_tensor_name in exp_tensor_names.keys():
+            if name.find(exp_tensor_name) != -1 and name.find(".weight") == -1:
+                experts = True
+                data_torch = data_torch.view(n_expert, n_ff, n_embd)
+                if (permute_tensor := exp_tensor_names[exp_tensor_name]) is not None:
+                    data_torch = data_torch.permute(*permute_tensor)
+                break
 
-            # if f32 desired, convert any float16 to float32
-            if self.ftype == 0 and data_dtype == np.float16:
-                data = data.astype(np.float32)
+        # map tensor names
+        # In MoE models the ffn tensors are typically most of the model weights,
+        # and need to be quantizable. Quantize expects tensor names to be suffixed by .weight.
+        # Every other model has the weight names ending in .weight,
+        # let's assume that is the convention which is not the case for dbrx:
+        # https://huggingface.co/databricks/dbrx-instruct/blob/main/model.safetensors.index.json#L15
+        new_name = self.map_tensor_name(name if not experts else name + ".weight", try_suffixes=(".weight",))
 
-            # if f16 desired, convert any float32 2-dim weight tensors to float16
-            if self.ftype == 1 and data_dtype == np.float32 and n_dims > 1:
-                data = data.astype(np.float16)
+        return [(new_name, data_torch)]
 
-            logger.debug(f"{new_name}, n_dims = {n_dims}, shape = {data.shape}, {old_dtype} --> {data.dtype}")
+    def extra_f16_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool:
+        del name, new_name, bid  # unused
 
-            self.gguf_writer.add_tensor(new_name, data)
+        return n_dims > 1
 
 
 @Model.register("MiniCPMForCausalLM")
@@ -1740,53 +1448,19 @@ class MiniCPMModel(Model):
             .reshape(weights.shape)
         )
 
-    def write_tensors(self):
-        block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer")))
-        tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
-        n_head = self.hparams.get("num_attention_heads")
-        n_kv_head = self.hparams.get("num_key_value_heads")
-        for name, data_torch in self.get_tensors():
-            # we don't need these
-            if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")):
-                continue
-
-            old_dtype = data_torch.dtype
-
-            # convert any unsupported data types to float32
-            if data_torch.dtype not in (torch.float16, torch.float32):
-                data_torch = data_torch.to(torch.float32)
-
-            # HF models permute some of the tensors, so we need to undo that
-            if name.endswith(("q_proj.weight")):
-                data_torch = self._reverse_hf_permute(data_torch, n_head, n_head)
-            if name.endswith(("k_proj.weight")):
-                data_torch = self._reverse_hf_permute(data_torch, n_head, n_kv_head)
-
-            data = data_torch.squeeze().numpy()
-
-            # map tensor names
-            new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
-            if new_name is None:
-                raise ValueError(f"Can not map tensor {name!r}")
-
-            n_dims = len(data.shape)
-            data_dtype = data.dtype
+    def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+        del bid  # unused
 
-            # if f32 desired, convert any float16 to float32
-            if self.ftype == 0 and data_dtype == np.float16:
-                data = data.astype(np.float32)
-
-            # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
-            if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
-                data = data.astype(np.float32)
-
-            # if f16 desired, convert any float32 2-dim weight tensors to float16
-            if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
-                data = data.astype(np.float16)
+        n_head = self.hparams["num_attention_heads"]
+        n_kv_head = self.hparams.get("num_key_value_heads")
 
-            logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
+        # HF models permute some of the tensors, so we need to undo that
+        if name.endswith(("q_proj.weight")):
+            data_torch = self._reverse_hf_permute(data_torch, n_head, n_head)
+        if name.endswith(("k_proj.weight")):
+            data_torch = self._reverse_hf_permute(data_torch, n_head, n_kv_head)
 
-            self.gguf_writer.add_tensor(new_name, data)
+        return [(self.map_tensor_name(name), data_torch)]
 
 
 @Model.register("QWenLMHeadModel")
@@ -1830,46 +1504,6 @@ class QwenModel(Model):
         self.gguf_writer.add_head_count(self.hparams["num_attention_heads"])
         self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"])
 
-    def write_tensors(self):
-        block_count = self.hparams["num_hidden_layers"]
-        model_kv = dict(self.get_tensors())
-        tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
-        for name, data_torch in model_kv.items():
-            # we don't need these
-            if name.endswith(".rotary_emb.inv_freq"):
-                continue
-
-            old_dtype = data_torch.dtype
-
-            # convert any unsupported data types to float32
-            if data_torch.dtype not in (torch.float16, torch.float32):
-                data_torch = data_torch.to(torch.float32)
-
-            data = data_torch.squeeze().numpy()
-
-            # map tensor names
-            new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
-            if new_name is None:
-                raise ValueError(f"Can not map tensor {name!r}")
-
-            n_dims = len(data.shape)
-            data_dtype = data.dtype
-
-            # if f32 desired, convert any float16 to float32
-            if self.ftype == 0 and data_dtype == np.float16:
-                data = data.astype(np.float32)
-
-            # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
-            if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
-                data = data.astype(np.float32)
-
-            # if f16 desired, convert any float32 2-dim weight tensors to float16
-            if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
-                data = data.astype(np.float16)
-
-            logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
-            self.gguf_writer.add_tensor(new_name, data)
-
 
 @Model.register("Qwen2ForCausalLM")
 class Qwen2Model(Model):
@@ -1891,92 +1525,52 @@ class Qwen2MoeModel(Model):
         if (n_experts := self.hparams.get("num_experts")) is not None:
             self.gguf_writer.add_expert_count(n_experts)
 
-    def write_tensors(self):
-        block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer")))
-        tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
-        n_experts = self.hparams.get("num_experts")
-        experts = dict()
-        for name, data_torch in self.get_tensors():
-            # we don't need these
-            if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")):
-                continue
-
-            old_dtype = data_torch.dtype
-
-            # convert any unsupported data types to float32
-            if data_torch.dtype not in (torch.float16, torch.float32):
-                data_torch = data_torch.to(torch.float32)
-
-            data = data_torch.squeeze().numpy()
-
-            # process the experts separately
-            if name.find("experts") != -1:
-                experts[name] = data
-                if len(experts) >= n_experts * 3:
-                    # merge the experts into a single 3d tensor
-                    for bid in range(block_count):
-                        for w_name in ["down_proj", "gate_proj", "up_proj"]:
-                            full = True
-                            for xid in range(n_experts):
-                                ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
-                                if ename not in experts:
-                                    full = False
-                                    break
-                            if not full:
-                                continue
-
-                            datas = []
-                            for xid in range(n_experts):
-                                ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
-                                datas.append(experts[ename])
-                                del experts[ename]
+    _experts: list[dict[str, Tensor]] | None = None
 
-                            data = np.stack(datas, axis=0)
-                            data_dtype = data.dtype
+    def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+        # process the experts separately
+        if name.find("experts") != -1:
+            n_experts = self.hparams["num_experts"]
+            assert bid is not None
 
-                            if self.ftype == 0 and data_dtype == np.float16:
-                                data = data.astype(np.float32)
+            if self._experts is None:
+                self._experts = [{} for _ in range(self.block_count)]
 
-                            if self.ftype == 1 and data_dtype == np.float32:
-                                data = data.astype(np.float16)
+            self._experts[bid][name] = data_torch
 
-                            merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
+            if len(self._experts[bid]) >= n_experts * 3:
+                tensors: list[tuple[str, Tensor]] = []
 
-                            new_name = tensor_map.get_name(merged_name, try_suffixes=(".weight", ".bias"))
-                            if new_name is None:
-                                raise ValueError(f"Can not map tensor {name!r}")
-
-                            logger.debug(f"{new_name}, n_dims = {len(data.shape)}, shape = {data.shape} --> {data.dtype}")
-
-                            self.gguf_writer.add_tensor(new_name, data)
-                continue
+                # merge the experts into a single 3d tensor
+                for w_name in ["down_proj", "gate_proj", "up_proj"]:
+                    datas: list[Tensor] = []
 
-            # map tensor names
-            new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
-            if new_name is None:
-                raise ValueError(f"Can not map tensor {name!r}")
+                    for xid in range(n_experts):
+                        ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
+                        datas.append(self._experts[bid][ename])
+                        del self._experts[bid][ename]
 
-            n_dims = len(data.shape)
-            data_dtype = data.dtype
+                    data_torch = torch.stack(datas, dim=0)
 
-            # if f32 desired, convert any float16 to float32
-            if self.ftype == 0 and data_dtype == np.float16:
-                data = data.astype(np.float32)
+                    merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
 
-            # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
-            if self.ftype == 1 and data_dtype == np.float16 and (n_dims == 1 or new_name.endswith("_norm.weight")):
-                data = data.astype(np.float32)
+                    new_name = self.map_tensor_name(merged_name)
 
-            # if f16 desired, convert any float32 2-dim weight tensors to float16
-            if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
-                data = data.astype(np.float16)
+                    tensors.append((new_name, data_torch))
+                return tensors
+            else:
+                return []
 
-            logger.debug(f"{new_name}, n_dims = {n_dims}, shape = {data.shape}, {old_dtype} --> {data.dtype}")
+        return [(self.map_tensor_name(name), data_torch)]
 
-            self.gguf_writer.add_tensor(new_name, data)
+    def write_tensors(self):
+        super().write_tensors()
 
-        if len(experts) > 0:
-            raise ValueError(f"Unprocessed experts: {experts.keys()}")
+        if self._experts is not None:
+            # flatten `list[dict[str, Tensor]]` into `list[str]`
+            experts = [k for d in self._experts for k in d.keys()]
+            if len(experts) > 0:
+                raise ValueError(f"Unprocessed experts: {experts}")
 
 
 @Model.register("GPT2LMHeadModel")
@@ -1993,54 +1587,27 @@ class GPT2Model(Model):
         self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
         self.gguf_writer.add_file_type(self.ftype)
 
-    def write_tensors(self):
-        block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer")))
-        tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
-
-        for name, data_torch in self.get_tensors():
-            # we don't need these
-            if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq", ".attn.bias", ".attn.masked_bias")):
-                continue
+    def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+        del bid  # unused
 
-            if name.endswith((".c_attn.weight", ".c_proj.weight", ".c_fc.weight", ".c_proj.weight")):
-                data_torch = data_torch.transpose(1, 0)
-
-            old_dtype = data_torch.dtype
-
-            # convert any unsupported data types to float32
-            if data_torch.dtype not in (torch.float16, torch.float32):
-                data_torch = data_torch.to(torch.float32)
+        tensors: list[tuple[str, Tensor]] = []
 
-            data = data_torch.squeeze().numpy()
+        # we don't need these
+        if name.endswith((".attn.bias", ".attn.masked_bias")):
+            return tensors
 
-            # map tensor names
-            new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
-            if new_name is None:
-                raise ValueError(f"Can not map tensor {name!r}")
+        if name.endswith((".c_attn.weight", ".c_proj.weight", ".c_fc.weight", ".c_proj.weight")):
+            data_torch = data_torch.transpose(1, 0)
 
-            n_dims = len(data.shape)
-            data_dtype = data.dtype
+        new_name = self.map_tensor_name(name)
 
-            # if f32 desired, convert any float16 to float32
-            if self.ftype == 0 and data_dtype == np.float16:
-                data = data.astype(np.float32)
+        tensors.append((new_name, data_torch))
 
-            # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
-            if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
-                data = data.astype(np.float32)
+        # note: GPT2 output is tied to (same as) wte in original model
+        if new_name == self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD):
+            tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT), data_torch))
 
-            # if f16 desired, convert any float32 2-dim weight tensors to float16
-            if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
-                data = data.astype(np.float16)
-
-            logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
-
-            self.gguf_writer.add_tensor(new_name, data)
-
-            # note: GPT2 output is tied to (same as) wte in original model
-            if new_name == "token_embd.weight":
-                logger.info(f"output.weight, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
-                self.gguf_writer.add_tensor("output.weight", data)
+        return tensors
 
 
 @Model.register("PhiForCausalLM")
@@ -2080,7 +1647,8 @@ class Phi3MiniModel(Model):
         if not tokenizer_path.is_file():
             raise ValueError(f'Error: Missing {tokenizer_path}')
 
-        tokenizer = SentencePieceProcessor(str(tokenizer_path))
+        tokenizer = SentencePieceProcessor()
+        tokenizer.LoadFromFile(str(tokenizer_path))
 
         vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
 
@@ -2090,18 +1658,18 @@ class Phi3MiniModel(Model):
 
         for token_id in range(tokenizer.vocab_size()):
 
-            piece = tokenizer.id_to_piece(token_id)
+            piece = tokenizer.IdToPiece(token_id)
             text = piece.encode("utf-8")
-            score = tokenizer.get_score(token_id)
+            score = tokenizer.GetScore(token_id)
 
             toktype = SentencePieceTokenTypes.NORMAL
-            if tokenizer.is_unknown(token_id):
+            if tokenizer.IsUnknown(token_id):
                 toktype = SentencePieceTokenTypes.UNKNOWN
-            elif tokenizer.is_control(token_id):
+            elif tokenizer.IsControl(token_id):
                 toktype = SentencePieceTokenTypes.CONTROL
-            elif tokenizer.is_unused(token_id):
+            elif tokenizer.IsUnused(token_id):
                 toktype = SentencePieceTokenTypes.UNUSED
-            elif tokenizer.is_byte(token_id):
+            elif tokenizer.IsByte(token_id):
                 toktype = SentencePieceTokenTypes.BYTE
 
             tokens[token_id] = text
@@ -2187,51 +1755,18 @@ class PlamoModel(Model):
         data_torch = torch.reshape(data_torch, (5120, 5120))
         return data_torch
 
-    def write_tensors(self):
-        block_count = self.hparams.get("num_layers", self.hparams.get("num_hidden_layers"))
-        tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
-
-        for name, data_torch in self.get_tensors():
-            if "self_attn.rotary_emb.inv_freq" in name:
-                continue
-
-            # map tensor names
-            new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
-            if new_name is None:
-                raise ValueError(f"Can not map tensor {name!r}")
-
-            # shuffle for broadcasting of gqa in ggml_mul_mat
-            if new_name.endswith("attn_q.weight"):
-                data_torch = self.shuffle_attn_q_weight(data_torch)
-            elif new_name.endswith("attn_output.weight"):
-                data_torch = self.shuffle_attn_output_weight(data_torch)
-
-            old_dtype = data_torch.dtype
-
-            # convert any unsupported data types to float32
-            if data_torch.dtype not in (torch.float16, torch.float32):
-                data_torch = data_torch.to(torch.float32)
-
-            data = data_torch.squeeze().numpy()
-
-            n_dims = len(data.shape)
-            data_dtype = data.dtype
-
-            # if f32 desired, convert any float16 to float32
-            if self.ftype == 0 and data_dtype == np.float16:
-                data = data.astype(np.float32)
-
-            # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
-            if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
-                data = data.astype(np.float32)
+    def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+        del bid  # unused
 
-            # if f16 desired, convert any float32 2-dim weight tensors to float16
-            if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
-                data = data.astype(np.float16)
+        new_name = self.map_tensor_name(name)
 
-            logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
+        # shuffle for broadcasting of gqa in ggml_mul_mat
+        if new_name.endswith("attn_q.weight"):
+            data_torch = self.shuffle_attn_q_weight(data_torch)
+        elif new_name.endswith("attn_output.weight"):
+            data_torch = self.shuffle_attn_output_weight(data_torch)
 
-            self.gguf_writer.add_tensor(new_name, data)
+        return [(new_name, data_torch)]
 
 
 @Model.register("CodeShellForCausalLM")
@@ -2254,51 +1789,21 @@ class CodeShellModel(Model):
         self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
         self.gguf_writer.add_rope_scaling_factor(1.0)
 
-    def write_tensors(self):
-        block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer")))
-        tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
-        tensors = dict(self.get_tensors())
-        has_lm_head = "lm_head.weight" in tensors.keys() or "output.weight" in tensors.keys()
-        for name, data_torch in tensors.items():
-            # we don't need these
-            if name.endswith((".attn.rotary_emb.inv_freq")):
-                continue
+    def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+        del bid  # unused
 
-            old_dtype = data_torch.dtype
+        new_name = self.map_tensor_name(name)
 
-            # convert any unsupported data types to float32
-            if data_torch.dtype not in (torch.float16, torch.float32):
-                data_torch = data_torch.to(torch.float32)
-
-            data = data_torch.squeeze().numpy()
-
-            # map tensor names
-            new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
-            if new_name is None:
-                raise ValueError(f"Can not map tensor {name!r}")
-
-            n_dims = len(data.shape)
-            data_dtype = data.dtype
-
-            # if f32 desired, convert any float16 to float32
-            if self.ftype == 0 and data_dtype == np.float16:
-                data = data.astype(np.float32)
-
-            # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
-            if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
-                data = data.astype(np.float32)
-
-            # if f16 desired, convert any float32 2-dim weight tensors to float16
-            if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
-                data = data.astype(np.float16)
+        tensors: list[tuple[str, Tensor]] = [(new_name, data_torch)]
 
-            logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
+        if new_name == self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD):
+            assert self.tensor_names is not None
 
-            self.gguf_writer.add_tensor(new_name, data)
+            if all(s not in self.tensor_names for s in ("lm_head.weight", "output.weight")):
+                # copy tok_embd.weight to output.weight
+                tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT), data_torch))
 
-            if not has_lm_head and name == "transformer.wte.weight":
-                self.gguf_writer.add_tensor("output.weight", data)
-                logger.info(name, f"=> output.weight, shape = {data.shape}, {old_dtype} --> {data.dtype}")
+        return tensors
 
 
 @Model.register("InternLM2ForCausalLM")
@@ -2327,27 +1832,29 @@ class InternLM2Model(Model):
         sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
         add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix
 
-        tokenizer = SentencePieceProcessor(str(tokenizer_path))
+        tokenizer = SentencePieceProcessor()
+        tokenizer.LoadFromFile(str(tokenizer_path))
+
         vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
 
         for token_id in range(vocab_size):
-            piece = tokenizer.id_to_piece(token_id)
+            piece = tokenizer.IdToPiece(token_id)
             text = piece.encode("utf-8")
-            score = tokenizer.get_score(token_id)
+            score = tokenizer.GetScore(token_id)
             if text == b"\x00":
                 # (TODO): fixme
                 # Hack here and replace the \x00 characters.
-                logger.debug(f"InternLM2 convert token '{text}' to '🐉'!")
-                text = "🐉"
+                logger.warning(f"InternLM2 convert token '{text}' to '🐉'!")
+                text = "🐉".encode("utf-8")
 
             toktype = SentencePieceTokenTypes.NORMAL
-            if tokenizer.is_unknown(token_id):
+            if tokenizer.IsUnknown(token_id):
                 toktype = SentencePieceTokenTypes.UNKNOWN
-            elif tokenizer.is_control(token_id):
+            elif tokenizer.IsControl(token_id):
                 toktype = SentencePieceTokenTypes.CONTROL
-            elif tokenizer.is_unused(token_id):
+            elif tokenizer.IsUnused(token_id):
                 toktype = SentencePieceTokenTypes.UNUSED
-            elif tokenizer.is_byte(token_id):
+            elif tokenizer.IsByte(token_id):
                 toktype = SentencePieceTokenTypes.BYTE
 
             tokens.append(text)
@@ -2384,13 +1891,15 @@ in chat mode so that the conversation can end normally.")
         special_vocab.add_to_gguf(self.gguf_writer)
 
     def _try_get_sft_eos(self, tokenizer):
-        unused_145_list = tokenizer.encode('[UNUSED_TOKEN_145]')
-        im_end_list = tokenizer.encode('<|im_end|>')
+        unused_145_list = tokenizer.Encode('[UNUSED_TOKEN_145]')
+        im_end_list = tokenizer.Encode('<|im_end|>')
+        eos_token = None
         assert (len(unused_145_list) == 1) ^ (len(im_end_list) == 1)
         if len(unused_145_list) == 1:
             eos_token = unused_145_list[0]
         if len(im_end_list) == 1:
             eos_token = im_end_list[0]
+        assert eos_token
         return eos_token
 
     def _hf_permute_qk(self, weights, n_head: int, n_head_kv: int):
@@ -2411,71 +1920,36 @@ in chat mode so that the conversation can end normally.")
         self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
         self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"])
 
-    def post_write_tensors(self, tensor_map, name, data_torch):
-        old_dtype = data_torch.dtype
-
-        # convert any unsupported data types to float32
-        if data_torch.dtype not in (torch.float16, torch.float32):
-            data_torch = data_torch.to(torch.float32)
-
-        data = data_torch.squeeze().numpy()
-
-        # map tensor names
-        new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
-        if new_name is None:
-            raise ValueError(f"Can not map tensor {name!r}")
-
-        n_dims = len(data.shape)
-        data_dtype = data.dtype
-
-        # if f32 desired, convert any float16 to float32
-        if self.ftype == 0 and data_dtype == np.float16:
-            data = data.astype(np.float32)
-
-        # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
-        if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
-            data = data.astype(np.float32)
-
-        # if f16 desired, convert any float32 2-dim weight tensors to float16
-        if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
-            data = data.astype(np.float16)
-
-        logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
-        self.gguf_writer.add_tensor(new_name, data)
-
-    def write_tensors(self):
-        from einops import rearrange
-
-        num_heads = self.hparams.get("num_attention_heads")
-        num_kv_heads = self.hparams.get("num_key_value_heads")
-        hidden_size = self.hparams.get("hidden_size")
+    def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+        num_heads = self.hparams["num_attention_heads"]
+        num_kv_heads = self.hparams["num_key_value_heads"]
+        hidden_size = self.hparams["hidden_size"]
         q_per_kv = num_heads // num_kv_heads
         head_dim = hidden_size // num_heads
         num_groups = num_heads // q_per_kv
 
-        block_count = self.hparams["num_hidden_layers"]
-        model_kv = dict(self.get_tensors())
-        tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
         qkv_pattern = r"model\.layers\.(\d+)\.attention\.wqkv"
-        for name, data_torch in model_kv.items():
-            # we don't need these
-            if name.endswith(".rotary_emb.inv_freq"):
-                continue
 
-            if re.match(qkv_pattern, name):
-                bid = re.findall(qkv_pattern, name)[0]
-                qkv = data_torch
-                qkv = rearrange(qkv.T, " o (g n i) ->o g n i", g=num_groups, n=q_per_kv + 2, i=head_dim)
-                q, k, v = qkv[..., : q_per_kv, :], qkv[..., q_per_kv: q_per_kv + 1, :], qkv[..., q_per_kv + 1: q_per_kv + 2, :]
-                # The model weights of q and k equire additional reshape.
-                q = self._hf_permute_qk(rearrange(q, " o g n i ->  o (g n i)").T, num_heads, num_heads)
-                k = self._hf_permute_qk(rearrange(k, " o g n i ->  o (g n i)").T, num_heads, num_kv_heads)
-                v = rearrange(v, " o g n i ->  o (g n i)").T
-                self.post_write_tensors(tensor_map, f"model.layers.{bid}.attention.wq.weight", q)
-                self.post_write_tensors(tensor_map, f"model.layers.{bid}.attention.wk.weight", k)
-                self.post_write_tensors(tensor_map, f"model.layers.{bid}.attention.wv.weight", v)
-            else:
-                self.post_write_tensors(tensor_map, name, data_torch)
+        if re.match(qkv_pattern, name):
+            bid = re.findall(qkv_pattern, name)[0]
+            qkv = data_torch
+            # qkv = rearrange(qkv.T, " o (g n i) ->o g n i", g=num_groups, n=q_per_kv + 2, i=head_dim)
+            qkv = qkv.T.reshape((-1, num_groups, q_per_kv + 2, head_dim))
+            q, k, v = qkv[..., : q_per_kv, :], qkv[..., q_per_kv: q_per_kv + 1, :], qkv[..., q_per_kv + 1: q_per_kv + 2, :]
+            # The model weights of q and k equire additional reshape.
+            # q = self._hf_permute_qk(rearrange(q, " o g n i ->  o (g n i)").T, num_heads, num_heads)
+            q = self._hf_permute_qk(q.reshape((q.shape[0], -1)).T, num_heads, num_heads)
+            # k = self._hf_permute_qk(rearrange(k, " o g n i ->  o (g n i)").T, num_heads, num_kv_heads)
+            k = self._hf_permute_qk(k.reshape((k.shape[0], -1)).T, num_heads, num_kv_heads)
+            # v = rearrange(v, " o g n i ->  o (g n i)").T
+            v = v.reshape((v.shape[0], -1)).T
+            return [
+                (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), q),
+                (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), k),
+                (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), v),
+            ]
+        else:
+            return [(self.map_tensor_name(name), data_torch)]
 
 
 @Model.register("BertModel", "CamembertModel")
@@ -2540,43 +2014,20 @@ class BertModel(Model):
         special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
         special_vocab.add_to_gguf(self.gguf_writer)
 
-    def write_tensors(self):
-        tensor_map = gguf.get_tensor_name_map(self.model_arch, self.block_count)
-        tensors = dict(self.get_tensors())
-        for name, data_torch in tensors.items():
-            # we are only using BERT for embeddings so we don't need the pooling layer
-            if name in ("embeddings.position_ids", "pooler.dense.weight", "pooler.dense.bias"):
-                continue  # we don't need these
-
-            # map tensor names
-            new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
-            if new_name is None:
-                raise ValueError(f"Can not map tensor {name!r}")
+    def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+        del bid  # unused
 
-            # convert any unsupported data types to float32
-            if data_torch.dtype not in (torch.float16, torch.float32):
-                data_torch = data_torch.to(torch.float32)
+        # we are only using BERT for embeddings so we don't need the pooling layer
+        if name in ("embeddings.position_ids", "pooler.dense.weight", "pooler.dense.bias"):
+            return [] # we don't need these
 
-            data = data_torch.squeeze().numpy()
-            n_dims = len(data.shape)
-            new_dtype: type[np.floating[Any]]
+        return [(self.map_tensor_name(name), data_torch)]
 
-            if (
-                self.ftype == 1 and name.endswith(".weight") and n_dims == 2
-                and name != "embeddings.token_type_embeddings.weight"  # not used with get_rows, must be F32
-            ):
-                # if f16 desired, convert any float32 2-dim weight tensors to float16
-                new_dtype = np.float16
-            else:
-                # if f32 desired, convert any float16 to float32
-                new_dtype = np.float32
-
-            logger.info(f"{new_name}, n_dims = {n_dims}, {data_torch.dtype} --> {new_dtype}")
-
-            if data.dtype != new_dtype:
-                data = data.astype(new_dtype)
+    def extra_f32_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool:
+        del new_name, bid, n_dims  # unused
 
-            self.gguf_writer.add_tensor(new_name, data)
+        # not used with get_rows, must be F32
+        return name == "embeddings.token_type_embeddings.weight"
 
 
 @Model.register("NomicBertModel")
@@ -2642,45 +2093,20 @@ class GemmaModel(Model):
         self.gguf_writer.add_value_length(hparams["head_dim"])
         self.gguf_writer.add_file_type(self.ftype)
 
-    def write_tensors(self):
-        block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer")))
-        tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
-
-        for name, data_torch in self.get_tensors():
-            # lm_head is not used in llama.cpp, while autoawq will include this tensor in model
-            # To prevent errors, skip loading lm_head.weight.
-            if name == "lm_head.weight":
-                logger.debug(f"Skipping get tensor {name!r} in safetensors so that convert can end normally.")
-                continue
-
-            old_dtype = data_torch.dtype
-
-            # convert any unsupported data types to float32
-            if data_torch.dtype not in (torch.float16, torch.float32):
-                data_torch = data_torch.to(torch.float32)
+    def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+        del bid  # unused
 
-            # ref: https://github.com/huggingface/transformers/blob/fc37f38915372c15992b540dfcbbe00a916d4fc6/src/transformers/models/gemma/modeling_gemma.py#L89
-            if name.endswith("norm.weight"):
-                data_torch = data_torch + 1
-            data = data_torch.squeeze().numpy()
+        # lm_head is not used in llama.cpp, while autoawq will include this tensor in model
+        # To prevent errors, skip loading lm_head.weight.
+        if name == "lm_head.weight":
+            logger.debug(f"Skipping get tensor {name!r} in safetensors so that convert can end normally.")
+            return []
 
-            # map tensor names
-            new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
-            if new_name is None:
-                raise ValueError(f"Can not map tensor {name!r}")
+        # ref: https://github.com/huggingface/transformers/blob/fc37f38915372c15992b540dfcbbe00a916d4fc6/src/transformers/models/gemma/modeling_gemma.py#L89
+        if name.endswith("norm.weight"):
+            data_torch = data_torch + 1
 
-            n_dims = len(data.shape)
-            data_dtype = data.dtype
-
-            data = data.astype(np.float32)
-
-            # if f16 desired, convert any float32 2-dim weight tensors to float16
-            if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
-                data = data.astype(np.float16)
-
-            logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
-
-            self.gguf_writer.add_tensor(new_name, data)
+        return [(self.map_tensor_name(name), data_torch)]
 
 
 @Model.register("Starcoder2ForCausalLM")
@@ -2703,6 +2129,8 @@ class MambaModel(Model):
 
         if (self.dir_model / "tokenizer.json").is_file():
             self._set_vocab_gpt2()
+        elif (self.dir_model / "tokenizer.model").is_file():
+            self._set_vocab_sentencepiece()
         else:
             # Use the GPT-NeoX tokenizer when no tokenizer files are present
             tokenizer_path = Path(sys.path[0]) / "models" / "ggml-vocab-gpt-neox.gguf"
@@ -2710,28 +2138,34 @@ class MambaModel(Model):
             neox_reader = gguf.GGUFReader(tokenizer_path, "r")
 
             field = neox_reader.get_field(gguf.Keys.Tokenizer.MODEL)
-            self.gguf_writer.add_tokenizer_model(bytes(field.parts[-1]))
+            self.gguf_writer.add_tokenizer_model(bytes(field.parts[-1]).decode("utf-8") if field else "gpt2")
 
             field = neox_reader.get_field(gguf.Keys.Tokenizer.PRE)
-            self.gguf_writer.add_tokenizer_pre(bytes(field.parts[-1]))
+            self.gguf_writer.add_tokenizer_pre(bytes(field.parts[-1]).decode("utf-8") if field else "mpt")
 
             field = neox_reader.get_field(gguf.Keys.Tokenizer.LIST)
+            assert field
             self.gguf_writer.add_token_list([bytes(field.parts[i]) for i in field.data][:vocab_size])
 
             field = neox_reader.get_field(gguf.Keys.Tokenizer.TOKEN_TYPE)
+            assert field
             self.gguf_writer.add_token_types([field.parts[i].tolist()[0] for i in field.data][:vocab_size])
 
             field = neox_reader.get_field(gguf.Keys.Tokenizer.MERGES)
+            assert field
             self.gguf_writer.add_token_merges([bytes(field.parts[i]) for i in field.data])
 
             field = neox_reader.get_field(gguf.Keys.Tokenizer.BOS_ID)
-            self.gguf_writer.add_bos_token_id(field.parts[-1].tolist()[0])
+            self.gguf_writer.add_bos_token_id(field.parts[-1].tolist()[0] if field else 1)
 
             field = neox_reader.get_field(gguf.Keys.Tokenizer.EOS_ID)
-            self.gguf_writer.add_eos_token_id(field.parts[-1].tolist()[0])
+            self.gguf_writer.add_eos_token_id(field.parts[-1].tolist()[0] if field else 0)
 
             field = neox_reader.get_field(gguf.Keys.Tokenizer.UNK_ID)
-            self.gguf_writer.add_unk_token_id(field.parts[-1].tolist()[0])
+            self.gguf_writer.add_unk_token_id(field.parts[-1].tolist()[0] if field else 0)
+
+            field = neox_reader.get_field(gguf.Keys.Tokenizer.PAD_ID)
+            self.gguf_writer.add_pad_token_id(field.parts[-1].tolist()[0] if field else 0)
 
     def set_gguf_parameters(self):
         d_model = self.find_hparam(["hidden_size",       "d_model"])
@@ -2760,59 +2194,42 @@ class MambaModel(Model):
         self.gguf_writer.add_layer_norm_rms_eps(rms_norm_eps)
         self.gguf_writer.add_file_type(self.ftype)
 
-    def write_tensors(self):
-        block_count = self.hparams["n_layer"]
-        tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
-
-        tok_embd = None
-        tok_embd_name = gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.TOKEN_EMBD] + ".weight"
-        output_name   = gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.OUTPUT]     + ".weight"
-
-        for name, data_torch in self.get_tensors():
-            old_dtype = data_torch.dtype
-
-            # convert any unsupported data types to float32
-            if data_torch.dtype not in (torch.float16, torch.float32):
-                data_torch = data_torch.to(torch.float32)
-
-            # map tensor names
-            new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
-            if new_name is None:
-                raise ValueError(f"Can not map tensor {name!r}")
+    _tok_embd = None
 
-            if name.endswith(".A_log"):
-                logger.debug("A_log --> A ==> " + new_name)
-                data_torch = -torch.exp(data_torch)
+    def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+        del bid  # unused
 
-            # assuming token_embd.weight is seen before output.weight
-            if tok_embd is not None and new_name == output_name:
-                if torch.equal(tok_embd, data_torch):
-                    logger.debug(f"{output_name} is equivalent to {tok_embd_name}, omitting")
-                    continue
-            if new_name == tok_embd_name:
-                tok_embd = data_torch
+        output_name = self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT)
+        tok_embd_name = self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD)
 
-            data = data_torch.squeeze().numpy()
+        new_name = self.map_tensor_name(name)
 
-            n_dims = len(data.shape)
-            data_dtype = data.dtype
+        if name.endswith(".A_log"):
+            logger.debug("A_log --> A ==> " + new_name)
+            data_torch = -torch.exp(data_torch)
 
-            # if f32 desired, convert any float16 to float32
-            if self.ftype == 0 and data_dtype == np.float16:
-                data = data.astype(np.float32)
+        # assuming token_embd.weight is seen before output.weight
+        if self._tok_embd is not None and new_name == output_name:
+            if torch.equal(self._tok_embd, data_torch):
+                logger.debug(f"{output_name} is equivalent to {tok_embd_name}, omitting")
+                return []
+        elif new_name == tok_embd_name:
+            self._tok_embd = data_torch
 
-            # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
-            if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
-                data = data.astype(np.float32)
+        return [(new_name, data_torch)]
 
-            # if f16 desired, convert big float32 2-dim weight tensors to float16
-            new_weight_name = new_name[:-len(".weight")] if new_name.endswith(".weight") else ""
-            if self.ftype == 1 and data_dtype == np.float32 and new_weight_name.endswith((".ssm_in", ".ssm_out", "token_embd", "output")) and n_dims == 2:
-                data = data.astype(np.float16)
+    def extra_f32_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool:
+        del n_dims  # unused
 
-            logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
-
-            self.gguf_writer.add_tensor(new_name, data)
+        return bid is not None and new_name in (
+            self.format_tensor_name(n, bid, ".weight" if name.endswith(".weight") else "") for n in [
+                gguf.MODEL_TENSOR.SSM_CONV1D,
+                gguf.MODEL_TENSOR.SSM_X,
+                gguf.MODEL_TENSOR.SSM_DT,
+                gguf.MODEL_TENSOR.SSM_A,
+                gguf.MODEL_TENSOR.SSM_D,
+            ]
+        )
 
 
 @Model.register("CohereForCausalLM")
@@ -2846,53 +2263,142 @@ class OlmoModel(Model):
 
     # Same as super class, but permuting q_proj, k_proj
     # Copied from: LlamaModel
-    def write_tensors(self):
-        block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer")))
-        tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
-        n_head = self.hparams.get("num_attention_heads")
+    def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+        del bid  # unused
+
+        n_head = self.hparams["num_attention_heads"]
         n_kv_head = self.hparams.get("num_key_value_heads")
-        for name, data_torch in self.get_tensors():
-            old_dtype = data_torch.dtype
 
-            # convert any unsupported data types to float32
-            if data_torch.dtype not in (torch.float16, torch.float32):
-                data_torch = data_torch.to(torch.float32)
+        if name.endswith("q_proj.weight"):
+            data_torch = LlamaModel.permute(data_torch, n_head, n_head)
+        if name.endswith("k_proj.weight"):
+            data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
 
-            data = data_torch.numpy()
+        return [(self.map_tensor_name(name), data_torch)]
 
-            if name.endswith("q_proj.weight"):
-                data = permute(data, n_head, n_head)
-            if name.endswith("k_proj.weight"):
-                data = permute(data, n_head, n_kv_head)
 
-            data = data.squeeze()
+###### CONVERSION LOGIC ######
 
-            # map tensor names
-            new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
-            if new_name is None:
-                raise ValueError(f"Can not map tensor {name!r}")
 
-            n_dims = len(data.shape)
-            data_dtype = data.dtype
+# tree of lazy tensors
+class LazyTorchTensor:
+    _meta: Tensor
+    _data: Tensor | None
+    _args: tuple
+    _func: Callable[[tuple], Tensor] | None
 
-            # if f32 desired, convert any float16 to float32
-            if self.ftype == 0 and data_dtype == np.float16:
-                data = data.astype(np.float32)
+    def __init__(self, *, meta: Tensor, data: Tensor | None = None, args: tuple = (), func: Callable[[tuple], Tensor] | None = None):
+        self._meta = meta
+        self._data = data
+        self._args = args
+        self._func = func
 
-            # 1d tensors need to be converted to float32
-            if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
-                data = data.astype(np.float32)
+    @staticmethod
+    def _recurse_apply(o: Any, fn: Callable[[Any], Any]) -> Any:
+        # TODO: dict and set
+        if isinstance(o, (list, tuple)):
+            L = []
+            for item in o:
+                L.append(LazyTorchTensor._recurse_apply(item, fn))
+            if isinstance(o, tuple):
+                L = tuple(L)
+            return L
+        elif isinstance(o, LazyTorchTensor):
+            return fn(o)
+        else:
+            return o
+
+    def _wrap_fn(self, fn: Callable, use_self: bool = False) -> Callable[[Any], LazyTorchTensor]:
+        def wrapped_fn(*args, **kwargs):
+            if kwargs is None:
+                kwargs = {}
+            args = ((self,) if use_self else ()) + args
+
+            meta_args = LazyTorchTensor._recurse_apply(args, lambda t: t._meta)
+
+            return LazyTorchTensor(meta=fn(*meta_args, **kwargs), args=args, func=lambda a: fn(*a, **kwargs))
+        return wrapped_fn
+
+    def __getattr__(self, __name: str) -> Any:
+        meta_attr = getattr(self._meta, __name)
+        if callable(meta_attr):
+            return self._wrap_fn(getattr(torch.Tensor, __name), use_self=True)
+        elif isinstance(meta_attr, torch.Tensor):
+            # for things like self.T
+            return self._wrap_fn(lambda s: getattr(s, __name))(self)
+        else:
+            return meta_attr
 
-            # if f16 desired, convert any float32 2-dim weight tensors to float16
-            if self.ftype == 1 and data_dtype == np.float32 and n_dims == 2:
-                data = data.astype(np.float16)
+    _dtype_map: dict[torch.dtype, type] = {
+        torch.float16: np.float16,
+        torch.float32: np.float32,
+    }
 
-            logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
+    def numpy(self) -> gguf.LazyTensor:
+        dtype = self._dtype_map[self.dtype]
+        return gguf.LazyTensor(lambda: LazyTorchTensor.to_eager(self).numpy(), dtype=dtype, shape=self.shape)
 
-            self.gguf_writer.add_tensor(new_name, data)
+    @overload
+    @staticmethod
+    def to_eager(t: Tensor | LazyTorchTensor) -> Tensor: ...
 
+    @overload
+    @staticmethod
+    def to_eager(t: tuple) -> tuple: ...
 
-###### CONVERSION LOGIC ######
+    @staticmethod
+    def to_eager(t: Any) -> Any:
+        def simple_to_eager(_t: LazyTorchTensor) -> Tensor:
+            # wake up the lazy tensor
+            if _t._data is None and _t._func is not None:
+                # recurse into its arguments
+                _t._args = LazyTorchTensor.to_eager(_t._args)
+                _t._data = _t._func(_t._args)
+            if _t._data is not None:
+                return _t._data
+            else:
+                raise ValueError(f"Could not compute lazy tensor {_t!r} with args {_t._args!r}")
+
+        # recurse into lists and/or tuples, keeping their structure
+        return LazyTorchTensor._recurse_apply(t, simple_to_eager)
+
+    @staticmethod
+    def from_eager(t: Tensor) -> Tensor:
+        if (t.__class__ == LazyTorchTensor):
+            return t
+        return LazyTorchTensor(meta=t.detach().to("meta"), data=t)  # type: ignore
+
+    @classmethod
+    def __torch_function__(cls, func, types, args=(), kwargs=None):
+        del types  # unused
+
+        if kwargs is None:
+            kwargs = {}
+
+        if func is torch.Tensor.numpy:
+            return args[0].numpy()
+        if func is torch.equal:
+            eager_args = LazyTorchTensor.to_eager(args)
+            return func(*eager_args, **kwargs)
+
+        return LazyTorchTensor._wrap_fn(args[0], func)(*args, **kwargs)
+
+    # special methods bypass __getattr__, so they need to be added manually
+    # ref: https://docs.python.org/3/reference/datamodel.html#special-lookup
+    # NOTE: LazyTorchTensor can't be a subclass of Tensor (and then be used
+    #       as self._meta is currently used), because then the following
+    #       operations would by default not be wrapped, and so not propagated
+    #       when the tensor is made eager.
+    #       It's better to get non-silent errors for not-yet-supported operators.
+    # TODO: add more when needed to avoid clutter, or find a more concise way
+    def __neg__(self, *args):  # mamba
+        return self._wrap_fn(torch.Tensor.__neg__)(self, *args)
+
+    def __add__(self, *args):  # gemma
+        return self._wrap_fn(torch.Tensor.__add__)(self, *args)
+
+    def __getitem__(self, *args):  # bloom falcon refact internlm2
+        return self._wrap_fn(torch.Tensor.__getitem__)(self, *args)
 
 
 def parse_args() -> argparse.Namespace:
@@ -2904,7 +2410,8 @@ def parse_args() -> argparse.Namespace:
     )
     parser.add_argument(
         "--awq-path", type=Path, default=None,
-        help="Path to scale awq cache file")
+        help="Path to scale awq cache file",
+    )
     parser.add_argument(
         "--outfile", type=Path,
         help="path to write to; default: based on input",
@@ -2913,14 +2420,30 @@ def parse_args() -> argparse.Namespace:
         "--outtype", type=str, choices=["f32", "f16"], default="f16",
         help="output format - use f32 for float32, f16 for float16",
     )
-    parser.add_argument("--bigendian", action="store_true", help="model is executed on big endian machine")
+    parser.add_argument(
+        "--bigendian", action="store_true",
+        help="model is executed on big endian machine",
+    )
     parser.add_argument(
         "model", type=Path,
         help="directory containing model file",
     )
-    parser.add_argument("--use-temp-file", action="store_true", help="use the tempfile library while processing (helpful when running out of memory, process killed)")
-    parser.add_argument("--model-name", type=str, default=None, help="name of the model")
-    parser.add_argument("--verbose", action="store_true", help="increase output verbosity")
+    parser.add_argument(
+        "--use-temp-file", action="store_true",
+        help="use the tempfile library while processing (helpful when running out of memory, process killed)",
+    )
+    parser.add_argument(
+        "--no-lazy", action="store_true",
+        help="use more RAM by computing all outputs before writing (use in case lazy evaluation is broken)",
+    )
+    parser.add_argument(
+        "--model-name", type=str, default=None,
+        help="name of the model",
+    )
+    parser.add_argument(
+        "--verbose", action="store_true",
+        help="increase output verbosity",
+    )
 
     return parser.parse_args()
 
@@ -2966,7 +2489,7 @@ def main() -> None:
 
     with torch.inference_mode():
         model_class = Model.from_model_architecture(hparams["architectures"][0])
-        model_instance = model_class(dir_model, ftype_map[args.outtype], fname_out, args.bigendian, args.use_temp_file)
+        model_instance = model_class(dir_model, ftype_map[args.outtype], fname_out, args.bigendian, args.use_temp_file, args.no_lazy)
 
         logger.info("Set model parameters")
         model_instance.set_gguf_parameters()
index aebfc50f3b028ea49d9a99211d77d207be52a59e..148bfd66ad65b0780039a05a417806851e24662d 100755 (executable)
@@ -284,6 +284,7 @@ class Params:
         n_experts      = None
         n_experts_used = None
         f_rope_freq_base = None
+        n_ff = None
 
         # hack to determine LLaMA v1 vs v2 vs CodeLlama
         if config.get("moe"):
@@ -308,6 +309,8 @@ class Params:
             n_experts_used = config["moe"]["num_experts_per_tok"]
             f_rope_freq_base = 1e6
 
+        assert n_ff is not None
+
         return Params(
             n_vocab          = model["tok_embeddings.weight"].shape[0],
             n_embd           = config["dim"],
@@ -462,7 +465,8 @@ class SentencePieceVocab(Vocab):
             # not found in alternate location either
             raise FileNotFoundError('Cannot find tokenizer.model')
 
-        self.sentencepiece_tokenizer = SentencePieceProcessor(str(fname_tokenizer))
+        self.sentencepiece_tokenizer = SentencePieceProcessor()
+        self.sentencepiece_tokenizer.LoadFromFile(str(fname_tokenizer))
         vocab_size = self.sentencepiece_tokenizer.vocab_size()
 
         new_tokens       = {id: piece for piece, id in added_tokens.items() if id >= vocab_size}
@@ -482,23 +486,23 @@ class SentencePieceVocab(Vocab):
     def sentencepiece_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
         tokenizer = self.sentencepiece_tokenizer
         for i in range(tokenizer.vocab_size()):
-            piece = tokenizer.id_to_piece(i)
+            piece = tokenizer.IdToPiece(i)
             text         = piece.encode("utf-8")
-            score: float = tokenizer.get_score(i)
+            score: float = tokenizer.GetScore(i)
 
             toktype = gguf.TokenType.NORMAL
-            if tokenizer.is_unknown(i):
+            if tokenizer.IsUnknown(i):
                 toktype = gguf.TokenType.UNKNOWN
-            if tokenizer.is_control(i):
+            if tokenizer.IsControl(i):
                 toktype = gguf.TokenType.CONTROL
 
             # NOTE: I think added_tokens are user defined.
             # ref: https://github.com/google/sentencepiece/blob/master/src/sentencepiece_model.proto
             # if tokenizer.is_user_defined(i): toktype = gguf.TokenType.USER_DEFINED
 
-            if tokenizer.is_unused(i):
+            if tokenizer.IsUnused(i):
                 toktype = gguf.TokenType.UNUSED
-            if tokenizer.is_byte(i):
+            if tokenizer.IsByte(i):
                 toktype = gguf.TokenType.BYTE
 
             yield text, score, toktype
@@ -906,7 +910,7 @@ class LazyUnpickler(pickle.Unpickler):
     def rebuild_from_type_v2(func, new_type, args, state):
         return func(*args)
 
-    CLASSES = {
+    CLASSES: dict[tuple[str, str], type[LazyTensor] | LazyStorageKind] = {
         # getattr used here as a workaround for mypy not being smart enough to determine
         # the staticmethods have a __func__ attribute.
         ('torch._tensor', '_rebuild_from_type_v2'): getattr(rebuild_from_type_v2, '__func__'),
index 0882a5d36193b9cb8ccec9ac6102c4514bc7355f..f4b1ac1d718407199182b91faab4eff8559a5c3a 100644 (file)
@@ -939,7 +939,7 @@ async def oai_chat_completions(user_prompt,
                     while event_received:
                         event_received = False
                         async for line_in_bytes in response.content:
-                            line = line_in_bytes.decode('utf8')
+                            line = line_in_bytes.decode('utf-8')
                             line = line.rstrip('\n').rstrip('\r')
                             if line == '':
                                 continue
index 6e968fc4e598ffdaf5a73ed3a688d68df5fb0b32..5951c0bb0fb5ead7949b6d8008052fa83e015ba1 100644 (file)
@@ -860,7 +860,7 @@ class GGUFValueType(IntEnum):
 # Note: Does not support GGML_QKK_64
 QK_K = 256
 # Items here are (block size, type size)
-GGML_QUANT_SIZES = {
+GGML_QUANT_SIZES: dict[GGMLQuantizationType, tuple[int, int]] = {
     GGMLQuantizationType.F32:     (1, 4),
     GGMLQuantizationType.F16:     (1, 2),
     GGMLQuantizationType.Q4_0:    (32, 2 + 16),
index db8525d85b4504feb0579617a08ecee6a87dab9b..21b089f8a29371edad360c9209139c8c3d54c02d 100644 (file)
@@ -65,7 +65,7 @@ class ReaderTensor(NamedTuple):
 
 class GGUFReader:
     # I - same as host, S - swapped
-    byte_order: Literal['I' | 'S'] = 'I'
+    byte_order: Literal['I'] | Literal['S'] = 'I'
     alignment: int = GGUF_DEFAULT_ALIGNMENT
 
     # Note: Internal helper, API may change.
@@ -83,7 +83,7 @@ class GGUFReader:
         GGUFValueType.BOOL:    np.bool_,
     }
 
-    def __init__(self, path: os.PathLike[str] | str, mode: Literal['r' | 'r+' | 'c'] = 'r'):
+    def __init__(self, path: os.PathLike[str] | str, mode: Literal['r'] | Literal['r+'] | Literal['c'] = 'r'):
         self.data = np.memmap(path, mode = mode)
         offs = 0
         if self._get(offs, np.uint32, override_order = '<')[0] != GGUF_MAGIC:
@@ -128,7 +128,7 @@ class GGUFReader:
         return self.tensors[idx]
 
     def _get(
-        self, offset: int, dtype: npt.DTypeLike, count: int = 1, override_order: None | Literal['I' | 'S' | '<'] = None,
+        self, offset: int, dtype: npt.DTypeLike, count: int = 1, override_order: None | Literal['I'] | Literal['S'] | Literal['<'] = None,
     ) -> npt.NDArray[Any]:
         count = int(count)
         itemsize = int(np.empty([], dtype = dtype).itemsize)
@@ -250,7 +250,7 @@ class GGUFReader:
                 raise ValueError(f'Found duplicated tensor with name {tensor_name}')
             tensor_names.add(tensor_name)
             ggml_type = GGMLQuantizationType(raw_dtype[0])
-            n_elems = np.prod(dims)
+            n_elems = int(np.prod(dims))
             block_size, type_size = GGML_QUANT_SIZES[ggml_type]
             n_bytes = n_elems * type_size // block_size
             data_offs = int(start_offs + offset_tensor[0])
index d9cfbf71160d3d40280ead3a49d9446d8e6c60a9..8dcf9330b076fb35b875fb9600a268364bed73a1 100644 (file)
@@ -7,7 +7,7 @@ import struct
 import tempfile
 from enum import Enum, auto
 from io import BufferedWriter
-from typing import IO, Any, Sequence, Mapping
+from typing import IO, Any, Callable, Sequence, Mapping
 from string import ascii_letters, digits
 
 import numpy as np
@@ -28,6 +28,47 @@ from .constants import (
 logger = logging.getLogger(__name__)
 
 
+class LazyTensor:
+    data: Callable[[], np.ndarray[Any, Any]]
+    # to avoid too deep recursion
+    functions: list[Callable[[np.ndarray[Any, Any]], np.ndarray[Any, Any]]]
+    dtype: np.dtype[Any]
+    shape: tuple[int, ...]
+
+    def __init__(self, data: Callable[[], np.ndarray[Any, Any]], *, dtype: type, shape: tuple[int, ...]):
+        self.data = data
+        self.functions = []
+        self.dtype = np.dtype(dtype)
+        self.shape = shape
+
+    def astype(self, dtype: type, **kwargs) -> LazyTensor:
+        self.functions.append(lambda n: n.astype(dtype, **kwargs))
+        self.dtype = np.dtype(dtype)
+        return self
+
+    @property
+    def nbytes(self) -> int:
+        size = 1
+        for n in self.shape:
+            size *= n
+        return size * self.dtype.itemsize
+
+    def tofile(self, *args, **kwargs) -> None:
+        data = self.data()
+        for f in self.functions:
+            data = f(data)
+        assert data.shape == self.shape
+        assert data.dtype == self.dtype
+        assert data.nbytes == self.nbytes
+        self.functions = []
+        self.data = lambda: data
+        data.tofile(*args, **kwargs)
+
+    def byteswap(self, *args, **kwargs) -> LazyTensor:
+        self.functions.append(lambda n: n.byteswap(*args, **kwargs))
+        return self
+
+
 class WriterState(Enum):
     EMPTY   = auto()
     HEADER  = auto()
@@ -38,7 +79,7 @@ class WriterState(Enum):
 class GGUFWriter:
     fout: BufferedWriter
     temp_file: tempfile.SpooledTemporaryFile[bytes] | None
-    tensors: list[np.ndarray[Any, Any]]
+    tensors: list[np.ndarray[Any, Any] | LazyTensor]
     _simple_value_packing = {
         GGUFValueType.UINT8:   "B",
         GGUFValueType.INT8:    "b",
@@ -176,7 +217,7 @@ class GGUFWriter:
         if pack_fmt is not None:
             self.kv_data += self._pack(pack_fmt, val, skip_pack_prefix = vtype == GGUFValueType.BOOL)
         elif vtype == GGUFValueType.STRING:
-            encoded_val = val.encode("utf8") if isinstance(val, str) else val
+            encoded_val = val.encode("utf-8") if isinstance(val, str) else val
             self.kv_data += self._pack("Q", len(encoded_val))
             self.kv_data += encoded_val
         elif vtype == GGUFValueType.ARRAY and isinstance(val, Sequence) and val:
@@ -205,7 +246,7 @@ class GGUFWriter:
             raise ValueError(f'Duplicated tensor name {name}')
         self.ti_names.add(name)
 
-        encoded_name = name.encode("utf8")
+        encoded_name = name.encode("utf-8")
         self.ti_data += self._pack("Q", len(encoded_name))
         self.ti_data += encoded_name
         n_dims = len(tensor_shape)
@@ -237,7 +278,7 @@ class GGUFWriter:
         self.ti_data_count += 1
 
     def add_tensor(
-        self, name: str, tensor: np.ndarray[Any, Any], raw_shape: Sequence[int] | None = None,
+        self, name: str, tensor: np.ndarray[Any, Any] | LazyTensor, raw_shape: Sequence[int] | None = None,
         raw_dtype: GGMLQuantizationType | None = None,
     ) -> None:
         if self.endianess == GGUFEndian.BIG:
@@ -262,7 +303,7 @@ class GGUFWriter:
         if pad != 0:
             fp.write(bytes([0] * pad))
 
-    def write_tensor_data(self, tensor: np.ndarray[Any, Any]) -> None:
+    def write_tensor_data(self, tensor: np.ndarray[Any, Any] | LazyTensor) -> None:
         if self.state is not WriterState.TI_DATA:
             raise ValueError(f'Expected output file to contain tensor info, got {self.state}')
 
@@ -272,15 +313,33 @@ class GGUFWriter:
         tensor.tofile(self.fout)
         self.write_padding(self.fout, tensor.nbytes)
 
-    def write_tensors_to_file(self) -> None:
+    def write_tensors_to_file(self, *, progress: bool = False) -> None:
         self.write_ti_data_to_file()
 
         self.write_padding(self.fout, self.fout.tell())
 
         if self.temp_file is None:
+            self.tensors.reverse()  # to pop from the "beginning" in constant time
+
+            if progress:
+                from tqdm import tqdm
+
+                total_bytes = sum(t.nbytes for t in self.tensors)
+
+                bar = tqdm(desc="Writing", total=total_bytes, unit="byte", unit_scale=True)
+
+                while True:
+                    try:
+                        tensor = self.tensors.pop()
+                    except IndexError:
+                        break
+                    tensor.tofile(self.fout)
+                    bar.update(tensor.nbytes)
+                    self.write_padding(self.fout, tensor.nbytes)
+                return
             while True:
                 try:
-                    tensor = self.tensors.pop(0)
+                    tensor = self.tensors.pop()
                 except IndexError:
                     break
                 tensor.tofile(self.fout)
@@ -479,7 +538,7 @@ class GGUFWriter:
         self.add_bool(Keys.Tokenizer.ADD_PREFIX, value)
 
     def add_chat_template(self, value: str | Sequence[Mapping[str, str]]) -> None:
-        if isinstance(value, list):
+        if not isinstance(value, str):
             template_default = None
             template_names = set()
 
index c97a78f3944f4b509accda91181f5ec105db49d7..3ba99be4f44895b888c27be4b397587e99032b09 100644 (file)
@@ -4,7 +4,7 @@ import logging
 import json
 import os
 from pathlib import Path
-from typing import Any, Callable
+from typing import Any, Callable, Sequence, Mapping, Iterable
 
 from .gguf_writer import GGUFWriter
 
@@ -15,11 +15,11 @@ class SpecialVocab:
     merges: list[str]
     add_special_token: dict[str, bool]
     special_token_ids: dict[str, int]
-    chat_template: str | None
+    chat_template: str | Sequence[Mapping[str, str]] | None
 
     def __init__(
         self, path: str | os.PathLike[str], load_merges: bool = False,
-        special_token_types: tuple[str, ...] | None = None,
+        special_token_types: Iterable[str] | None = None,
         n_vocab: int | None = None,
     ):
         self.special_token_ids = {}
index d1d876d6dffa6bb973af8508e7bcaf1586eca0f0..36e63ee3b7cd6e23dc15478683f3c72c064ac37f 100644 (file)
@@ -21,6 +21,7 @@ classifiers = [
 [tool.poetry.dependencies]
 python = ">=3.8"
 numpy = ">=1.17"
+tqdm = ">=4.27"
 
 [tool.poetry.dev-dependencies]
 pytest = "^5.2"
index 2d3c3943f64cd6fa01c0a7dbe21b49e5f7412eaa..1a37a7b91409defbc970e0b5e4ffd3a9316319ec 100755 (executable)
@@ -47,7 +47,7 @@ def dump_metadata(reader: GGUFReader, args: argparse.Namespace) -> None:
         if len(field.types) == 1:
             curr_type = field.types[0]
             if curr_type == GGUFValueType.STRING:
-                log_message += ' = {0}'.format(repr(str(bytes(field.parts[-1]), encoding='utf8')[:60]))
+                log_message += ' = {0}'.format(repr(str(bytes(field.parts[-1]), encoding='utf-8')[:60]))
             elif field.types[0] in reader.gguf_scalar_to_np:
                 log_message += ' = {0}'.format(field.parts[-1][0])
         print(log_message)  # noqa: NP100
index 3444ab41802c5ee431f6959e0c98f30f10e0d9f1..c8e3a83dfbd78ad15b14880188736931e5460d0b 100644 (file)
@@ -7,7 +7,7 @@ import json
 from pathlib import Path
 
 import numpy as np
-from typing import Any, Mapping, Sequence
+from typing import Any, Sequence
 
 # Necessary to load the local gguf package
 if "NO_LOCAL_GGUF" not in os.environ and (Path(__file__).parent.parent.parent / 'gguf-py').exists():
@@ -34,7 +34,7 @@ def get_byteorder(reader: gguf.GGUFReader) -> gguf.GGUFEndian:
         return host_endian
 
 
-def decode_field(field: gguf.ReaderField) -> Any:
+def decode_field(field: gguf.ReaderField | None) -> Any:
     if field and field.types:
         main_type = field.types[0]
 
@@ -42,11 +42,11 @@ def decode_field(field: gguf.ReaderField) -> Any:
             sub_type = field.types[-1]
 
             if sub_type == gguf.GGUFValueType.STRING:
-                return [str(bytes(field.parts[idx]), encoding='utf8') for idx in field.data]
+                return [str(bytes(field.parts[idx]), encoding='utf-8') for idx in field.data]
             else:
                 return [pv for idx in field.data for pv in field.parts[idx].tolist()]
         if main_type == gguf.GGUFValueType.STRING:
-            return str(bytes(field.parts[-1]), encoding='utf8')
+            return str(bytes(field.parts[-1]), encoding='utf-8')
         else:
             return field.parts[-1][0]
 
@@ -59,7 +59,7 @@ def get_field_data(reader: gguf.GGUFReader, key: str) -> Any:
     return decode_field(field)
 
 
-def copy_with_new_metadata(reader: gguf.GGUFReader, writer: gguf.GGUFWriter, new_metadata: Mapping[str, str], remove_metadata: Sequence[str]) -> None:
+def copy_with_new_metadata(reader: gguf.GGUFReader, writer: gguf.GGUFWriter, new_metadata: dict[str, str], remove_metadata: Sequence[str]) -> None:
     for field in reader.fields.values():
         # Suppress virtual fields and fields written by GGUFWriter
         if field.name == gguf.Keys.General.ARCHITECTURE or field.name.startswith('GGUF.'):
@@ -101,7 +101,7 @@ def copy_with_new_metadata(reader: gguf.GGUFReader, writer: gguf.GGUFWriter, new
 
     for tensor in reader.tensors:
         # Dimensions are written in reverse order, so flip them first
-        shape = np.flipud(tensor.shape)
+        shape = np.flipud(tensor.shape).tolist()
         writer.add_tensor_info(tensor.name, shape, tensor.data.dtype, tensor.data.nbytes, tensor.tensor_type)
 
     writer.write_header_to_file()
diff --git a/pyrightconfig.json b/pyrightconfig.json
new file mode 100644 (file)
index 0000000..020a71a
--- /dev/null
@@ -0,0 +1,3 @@
+{
+  "extraPaths": ["gguf-py"],
+}
index 6ce840d73cb73d1e44127ac4d11360807d76d22b..6ac4026107fbeb7706d1e23f3bda1067bfff5b49 100644 (file)
@@ -1,3 +1,2 @@
 -r ./requirements-convert.txt
 torch~=2.1.1
-einops~=0.7.0
index 6ce840d73cb73d1e44127ac4d11360807d76d22b..6ac4026107fbeb7706d1e23f3bda1067bfff5b49 100644 (file)
@@ -1,3 +1,2 @@
 -r ./requirements-convert.txt
 torch~=2.1.1
-einops~=0.7.0
index 5520ba732ffcafeca43b5bbe635cee7dfd4f6040..7ab1228cb33ffd6170910e05c2665380613d6661 100644 (file)
@@ -1,5 +1,5 @@
 numpy~=1.24.4
-sentencepiece~=0.1.98
+sentencepiece~=0.2.0
 transformers>=4.40.1,<5.0.0
 gguf>=0.1.0
 protobuf>=4.21.0,<5.0.0