]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
gguf-py : fix passing non-native endian tensors (editor-gui and new-metadata) (#17553)
authorAleksei Nikiforov <redacted>
Fri, 28 Nov 2025 19:53:01 +0000 (20:53 +0100)
committerGitHub <redacted>
Fri, 28 Nov 2025 19:53:01 +0000 (20:53 +0100)
gguf_new_metadata.py reads data from reader.
Reader doesn't byteswap tensors to native endianness.
But writer does expect tensors in native endianness to convert them
into requested endianness.

There are two ways to fix this: update reader and do conversion to native endianness and back,
or skip converting endianness in writer in this particular USE-case.

gguf_editor_gui.py doesn't allow editing or viewing tensor data.
Let's go with skipping excessive byteswapping.

If eventually capability to view or edit tensor data is added,
tensor data should be instead byteswapped when reading it.

gguf-py/gguf/gguf_writer.py
gguf-py/gguf/scripts/gguf_editor_gui.py
gguf-py/gguf/scripts/gguf_new_metadata.py

index 57ca2035fe2363b003155e1900e16ee979a55be1..8ddd895cb7c6d97103ef103cc85211c331069a1e 100644 (file)
@@ -371,10 +371,13 @@ class GGUFWriter:
 
     def add_tensor(
         self, name: str, tensor: np.ndarray[Any, Any], raw_shape: Sequence[int] | None = None,
-        raw_dtype: GGMLQuantizationType | None = None,
+        raw_dtype: GGMLQuantizationType | None = None, tensor_endianess: GGUFEndian | None = None
     ) -> None:
-        if (self.endianess == GGUFEndian.BIG and sys.byteorder != 'big') or \
-                (self.endianess == GGUFEndian.LITTLE and sys.byteorder != 'little'):
+        # if tensor endianness is not passed, assume it's native to system
+        if tensor_endianess is None:
+            tensor_endianess = GGUFEndian.BIG if sys.byteorder == 'big' else GGUFEndian.LITTLE
+
+        if tensor_endianess != self.endianess:
             # Don't byteswap inplace since lazy copies cannot handle it
             tensor = tensor.byteswap(inplace=False)
         if self.use_temp_file and self.temp_file is None:
@@ -397,13 +400,16 @@ class GGUFWriter:
         if pad != 0:
             fp.write(bytes([0] * pad))
 
-    def write_tensor_data(self, tensor: np.ndarray[Any, Any]) -> None:
+    def write_tensor_data(self, tensor: np.ndarray[Any, Any], tensor_endianess: GGUFEndian | None = None) -> None:
         if self.state is not WriterState.TI_DATA and self.state is not WriterState.WEIGHTS:
             raise ValueError(f'Expected output file to contain tensor info or weights, got {self.state}')
         assert self.fout is not None
 
-        if (self.endianess == GGUFEndian.BIG and sys.byteorder != 'big') or \
-                (self.endianess == GGUFEndian.LITTLE and sys.byteorder != 'little'):
+        # if tensor endianness is not passed, assume it's native to system
+        if tensor_endianess is None:
+            tensor_endianess = GGUFEndian.BIG if sys.byteorder == 'big' else GGUFEndian.LITTLE
+
+        if tensor_endianess != self.endianess:
             # Don't byteswap inplace since lazy copies cannot handle it
             tensor = tensor.byteswap(inplace=False)
 
index 05f4db0f8cdc89049a7e8719fcaa1fe741391413..293316afed4e497ce3e426243d9fbd30193f5c30 100755 (executable)
@@ -1552,7 +1552,7 @@ class GGUFEditorWindow(QMainWindow):
 
             # Add tensors (including data)
             for tensor in self.reader.tensors:
-                writer.add_tensor(tensor.name, tensor.data, raw_shape=tensor.data.shape, raw_dtype=tensor.tensor_type)
+                writer.add_tensor(tensor.name, tensor.data, raw_shape=tensor.data.shape, raw_dtype=tensor.tensor_type, tensor_endianess=self.reader.endianess)
 
             # Write header and metadata
             writer.open_output_file(Path(file_path))
index 2fa5800cf7485fe893c488c2de812f6501629f7b..c67436bad4af6dfcf662b23aff3d3a5bb1d26257 100755 (executable)
@@ -94,7 +94,7 @@ def copy_with_new_metadata(reader: gguf.GGUFReader, writer: gguf.GGUFWriter, new
     writer.write_ti_data_to_file()
 
     for tensor in reader.tensors:
-        writer.write_tensor_data(tensor.data)
+        writer.write_tensor_data(tensor.data, tensor_endianess=reader.endianess)
         bar.update(tensor.n_bytes)
 
     writer.close()