logger = logging.getLogger("gguf-convert-endian")
+def byteswap_noop(tensor, block_offs):
+ # this function is used when byteswapping is not needed
+ pass
+
+
def byteswap_q4_0(tensor, block_offs):
# Each block_q4_0 consists of an f16 delta (scaling factor) followed by 16 int8 quantizations.
byteswap_tensors = {
- gguf.GGMLQuantizationType.Q4_0: {
- "block_size": 18, # 18 bytes = <f16 delta scaling factor> + 16 * <int8 quant>
- "byteswap_func": byteswap_q4_0,
- },
- gguf.GGMLQuantizationType.Q8_0: {
- "block_size": 34, # 34 bytes = <f16 delta scaling factor> + 32 * <int8 quant>
- "byteswap_func": byteswap_q8_0,
- },
- gguf.GGMLQuantizationType.Q4_K: {
- "block_size": 144, # 144 bytes = 2 * <f16 delta scaling factor> + 140 * <int8 quant>
- "byteswap_func": byteswap_q4_k,
- },
- gguf.GGMLQuantizationType.Q6_K: {
- "block_size": 210, # 210 bytes = <f16 delta scaling factor> + 208 * <int8 quant>
- "byteswap_func": byteswap_q6_k,
- },
+ gguf.GGMLQuantizationType.Q4_0: byteswap_q4_0,
+ gguf.GGMLQuantizationType.Q8_0: byteswap_q8_0,
+ gguf.GGMLQuantizationType.Q4_K: byteswap_q4_k,
+ gguf.GGMLQuantizationType.Q6_K: byteswap_q6_k,
+ gguf.GGMLQuantizationType.MXFP4: byteswap_noop,
}
tensor.data.resize(newshape)
- block_size = byteswap_tensors[tensor.tensor_type]["block_size"]
- byteswap_func = byteswap_tensors[tensor.tensor_type]["byteswap_func"]
+ block_size = gguf.constants.GGML_QUANT_SIZES[tensor.tensor_type][1]
+ byteswap_func = byteswap_tensors[tensor.tensor_type]
n_blocks = len(tensor.data) // block_size
for block_num in (inner_pbar := tqdm(range(n_blocks), desc="Byte-swapping Blocks", leave=False)):