From: Daniel Bevenius Date: Wed, 28 Jan 2026 15:49:36 +0000 (+0100) Subject: convert : yield Mamba2Model/GraniteMoeModel modify_tensors (#19157) X-Git-Tag: upstream/0.0.8067~204 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=ebf57258702b23098d7bdcbd46008a95b2401075;p=pkg%2Fggml%2Fsources%2Fllama.cpp convert : yield Mamba2Model/GraniteMoeModel modify_tensors (#19157) * convert : yield Mamba2Model/GraniteMoeModel modify_tensors This commit updates the `GraniteHybridModel` class' modify_tensors function to properly delegate to `Mamba2Model.modify_tensors` and `GraniteMoeModel.modify_tensors` using 'yield from' instead of 'return'. The motivation for this is that modify_tensors is a generator function (it uses 'yield from'), but the two calls above use return statements but don't yield anything which means that the the caller of this function will not receive any yielded values from it. And this causes layer tensors to be silently dropped during conversion. --- diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 6e6e61898..a391717e3 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -8912,13 +8912,16 @@ class GraniteHybridModel(Mamba2Model, GraniteMoeModel): name.endswith("block_sparse_moe.input_linear.weight") or "shared_mlp" in name ): - return GraniteMoeModel.modify_tensors(self, data_torch, name, bid) + yield from GraniteMoeModel.modify_tensors(self, data_torch, name, bid) + return # Determine whether this is a mamba layer or an attention layer if bid in self._ssm_layers: - return Mamba2Model.modify_tensors(self, data_torch, name, bid) + yield from Mamba2Model.modify_tensors(self, data_torch, name, bid) + return elif bid in self._attn_layers: - return GraniteMoeModel.modify_tensors(self, data_torch, name, bid) + yield from GraniteMoeModel.modify_tensors(self, data_torch, name, bid) + return yield from ModelBase.modify_tensors(self, data_torch, name, bid) def set_gguf_parameters(self):