* vulkan (DRAFT): split shader generation by GLSL source file, to improve incremental build times
* support dep-files so shaders are recompiled if their included files change
* rename shader files which are used as "headers" to use .glsl extension
* move glslc extension detection shaders to separate folders
* the above is to prevent them from getting glob'd with the actual compute shaders that need to be compiled
* vulkan : only write embedded shader .hpp/.cpp when they change
* avoid recompiling ggml-vulkan.cpp when editing shaders
* pass single --source argument instead of --input-dir & --filter to shader gen
* check for source file match earlier
* fix hang in vulkan-shaders-gen when there are compilation errors
* early out did not decrement compile_count
* clean up
* fix glslc integer dot product test
* unconditionally write the embedded shader cpp output
* replace output filepath in generated dep-files to match output in CMakeLists
---------
Co-authored-by: Jeff Bolz <redacted>
cmake_minimum_required(VERSION 3.19)
cmake_policy(SET CMP0114 NEW)
+cmake_policy(SET CMP0116 NEW)
find_package(Vulkan COMPONENTS glslc REQUIRED)
# Test all shader extensions
test_shader_extension_support(
"GL_KHR_cooperative_matrix"
- "${CMAKE_CURRENT_SOURCE_DIR}/vulkan-shaders/test_coopmat_support.comp"
+ "${CMAKE_CURRENT_SOURCE_DIR}/vulkan-shaders/feature-tests/coopmat.comp"
"GGML_VULKAN_COOPMAT_GLSLC_SUPPORT"
)
test_shader_extension_support(
"GL_NV_cooperative_matrix2"
- "${CMAKE_CURRENT_SOURCE_DIR}/vulkan-shaders/test_coopmat2_support.comp"
+ "${CMAKE_CURRENT_SOURCE_DIR}/vulkan-shaders/feature-tests/coopmat2.comp"
"GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT"
)
test_shader_extension_support(
"GL_EXT_integer_dot_product"
- "${CMAKE_CURRENT_SOURCE_DIR}/vulkan-shaders/test_integer_dot_support.comp"
+ "${CMAKE_CURRENT_SOURCE_DIR}/vulkan-shaders/feature-tests/integer_dot.comp"
"GGML_VULKAN_INTEGER_DOT_GLSLC_SUPPORT"
)
test_shader_extension_support(
"GL_EXT_bfloat16"
- "${CMAKE_CURRENT_SOURCE_DIR}/vulkan-shaders/test_bfloat16_support.comp"
+ "${CMAKE_CURRENT_SOURCE_DIR}/vulkan-shaders/feature-tests/bfloat16.comp"
"GGML_VULKAN_BFLOAT16_GLSLC_SUPPORT"
)
set (_ggml_vk_genshaders_dir "${CMAKE_BINARY_DIR}/$<CONFIG>")
set (_ggml_vk_genshaders_cmd "${_ggml_vk_genshaders_dir}/vulkan-shaders-gen${_ggml_vk_host_suffix}")
set (_ggml_vk_header "${CMAKE_CURRENT_BINARY_DIR}/ggml-vulkan-shaders.hpp")
- set (_ggml_vk_source "${CMAKE_CURRENT_BINARY_DIR}/ggml-vulkan-shaders.cpp")
set (_ggml_vk_input_dir "${CMAKE_CURRENT_SOURCE_DIR}/vulkan-shaders")
set (_ggml_vk_output_dir "${CMAKE_CURRENT_BINARY_DIR}/vulkan-shaders.spv")
add_custom_command(
OUTPUT ${_ggml_vk_header}
- ${_ggml_vk_source}
-
COMMAND ${_ggml_vk_genshaders_cmd}
- --glslc ${Vulkan_GLSLC_EXECUTABLE}
- --input-dir ${_ggml_vk_input_dir}
--output-dir ${_ggml_vk_output_dir}
--target-hpp ${_ggml_vk_header}
- --target-cpp ${_ggml_vk_source}
- --no-clean
-
- DEPENDS ${_ggml_vk_shader_files}
- ${_ggml_vk_shaders_gen_sources}
+ DEPENDS ${_ggml_vk_shaders_gen_sources}
vulkan-shaders-gen
-
- COMMENT "Generate vulkan shaders"
+ COMMENT "Generate vulkan shaders header"
)
-
- target_sources(ggml-vulkan PRIVATE ${_ggml_vk_source} ${_ggml_vk_header})
+ target_sources(ggml-vulkan PRIVATE ${_ggml_vk_header})
+
+ foreach (file_full ${_ggml_vk_shader_files})
+ get_filename_component(file ${file_full} NAME)
+ set (_ggml_vk_target_cpp "${CMAKE_CURRENT_BINARY_DIR}/${file}.cpp")
+
+ add_custom_command(
+ OUTPUT ${_ggml_vk_target_cpp}
+ DEPFILE ${_ggml_vk_target_cpp}.d
+ COMMAND ${_ggml_vk_genshaders_cmd}
+ --glslc ${Vulkan_GLSLC_EXECUTABLE}
+ --source ${file_full}
+ --output-dir ${_ggml_vk_output_dir}
+ --target-hpp ${_ggml_vk_header}
+ --target-cpp ${_ggml_vk_target_cpp}
+ DEPENDS ${file_full}
+ ${_ggml_vk_shaders_gen_sources}
+ vulkan-shaders-gen
+ COMMENT "Generate vulkan shaders for ${file}"
+ )
+ target_sources(ggml-vulkan PRIVATE ${_ggml_vk_target_cpp})
+ endforeach()
else()
message(WARNING "Vulkan not found")
#version 450
-#include "types.comp"
-#include "generic_binary_head.comp"
+#include "types.glsl"
+#include "generic_binary_head.glsl"
layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
#extension GL_KHR_shader_subgroup_basic : enable
#endif
-#include "types.comp"
-#include "generic_binary_head.comp"
+#include "types.glsl"
+#include "generic_binary_head.glsl"
const uint num_threads = 256;
#extension GL_EXT_control_flow_attributes : require
-#include "types.comp"
+#include "types.glsl"
layout (push_constant) uniform parameter
{
#version 450
-#include "generic_head.comp"
-#include "types.comp"
+#include "generic_head.glsl"
+#include "types.glsl"
#extension GL_EXT_control_flow_attributes : enable
#version 450
#extension GL_EXT_control_flow_attributes : enable
-#include "types.comp"
+#include "types.glsl"
layout(constant_id = 0) const int BLOCK_SIZE = 1024;
layout(constant_id = 1) const int BLOCK_SIZE_LOG2 = 10;
#version 450
-#include "types.comp"
-#include "generic_unary_head.comp"
+#include "types.glsl"
+#include "generic_unary_head.glsl"
layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
#version 450
-#include "types.comp"
-#include "generic_binary_head.comp"
+#include "types.glsl"
+#include "generic_binary_head.glsl"
layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
#version 450
-#include "types.comp"
-#include "generic_unary_head.comp"
+#include "types.glsl"
+#include "generic_unary_head.glsl"
#extension GL_EXT_control_flow_attributes : require
#version 450
-#include "types.comp"
+#include "types.glsl"
layout (push_constant) uniform parameter
{
# extension GL_KHR_shader_subgroup_shuffle : enable
#endif
-#include "types.comp"
+#include "types.glsl"
// shape notation: [dim(N), ..., dim(0)] -- stride(dim(j)) >= stride(dim(i)) if i > j
layout(binding = 0) readonly buffer A {
#version 450
-#include "types.comp"
+#include "types.glsl"
layout (binding = 0) readonly buffer A {A_TYPE data_a[];}; // src0 - kernel: [K, Cout, Cin]
layout (binding = 1) readonly buffer B {B_TYPE data_b[];}; // src1 - input: [L, Cin]
#version 450
-#include "types.comp"
-#include "generic_unary_head.comp"
+#include "types.glsl"
+#include "generic_unary_head.glsl"
layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
#version 450
-#include "types.comp"
-#include "generic_unary_head.comp"
-#include "dequant_funcs.comp"
+#include "types.glsl"
+#include "generic_unary_head.glsl"
+#include "dequant_funcs.glsl"
#if defined(DATA_A_IQ4_NL) || defined(DATA_A_MXFP4)
// 16 invocations needed for init_iq_shmem
#version 450
-#include "rte.comp"
-#include "types.comp"
+#include "rte.glsl"
+#include "types.glsl"
#if defined(SET_ROWS) && QUANT_K == 1
layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
layout (binding = 0) readonly buffer S {float data_s[];};
#if defined(SET_ROWS)
-#include "generic_binary_head.comp"
+#include "generic_binary_head.glsl"
layout (binding = 1) readonly buffer C {B_TYPE data_i[];};
layout (binding = 2) writeonly buffer Q {A_TYPE data_q[];};
#endif
#else
-#include "generic_unary_head.comp"
+#include "generic_unary_head.glsl"
layout (binding = 1) writeonly buffer Q {A_TYPE data_q[];};
#endif
#version 450
-#include "types.comp"
-#include "generic_unary_head.comp"
+#include "types.glsl"
+#include "generic_unary_head.glsl"
layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
#extension GL_EXT_control_flow_attributes : enable
-#include "types.comp"
-#include "generic_head.comp"
+#include "types.glsl"
+#include "generic_head.glsl"
layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
#version 450
-#include "dequant_head.comp"
+#include "dequant_head.glsl"
layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in;
+++ /dev/null
-#if !defined(DATA_A_F32) && !defined(DATA_A_F16)
-#extension GL_EXT_shader_explicit_arithmetic_types_int8 : require
-#endif
-
-#include "types.comp"
-
-#if defined(A_TYPE_PACKED16)
-layout (binding = 0) readonly buffer A_PACKED16 {A_TYPE_PACKED16 data_a_packed16[];};
-#endif
-#if defined(A_TYPE_PACKED32)
-layout (binding = 0) readonly buffer A_PACKED32 {A_TYPE_PACKED32 data_a_packed32[];};
-#endif
-
-#if defined(DATA_A_F32)
-vec2 dequantize(uint ib, uint iqs, uint a_offset) {
- return vec2(data_a[a_offset + ib], data_a[a_offset + ib + 1]);
-}
-#endif
-
-#if defined(DATA_A_F16)
-vec2 dequantize(uint ib, uint iqs, uint a_offset) {
- return vec2(data_a[a_offset + ib], data_a[a_offset + ib + 1]);
-}
-#endif
-
-#if defined(DATA_A_BF16)
-vec2 dequantize(uint ib, uint iqs, uint a_offset) {
- return vec2(bf16_to_fp32(data_a[a_offset + ib]), bf16_to_fp32(data_a[a_offset + ib + 1]));
-}
-#endif
-
-#if defined(DATA_A_Q4_0)
-vec2 dequantize(uint ib, uint iqs, uint a_offset) {
- const uint vui = uint(data_a[a_offset + ib].qs[iqs]);
- return (vec2(vui & 0xF, vui >> 4) - 8.0f);
-}
-vec4 dequantize4(uint ib, uint iqs, uint a_offset) {
- const uint vui = uint(data_a_packed16[a_offset + ib].qs[iqs/2]);
- return (vec4(vui & 0xF, (vui >> 4) & 0xF, (vui >> 8) & 0xF, vui >> 12) - 8.0f);
-}
-#endif
-
-#if defined(DATA_A_Q4_1)
-vec2 dequantize(uint ib, uint iqs, uint a_offset) {
- const uint vui = uint(data_a[a_offset + ib].qs[iqs]);
- return vec2(vui & 0xF, vui >> 4);
-}
-vec4 dequantize4(uint ib, uint iqs, uint a_offset) {
- const uint vui = uint(data_a_packed16[a_offset + ib].qs[iqs/2]);
- return vec4(vui & 0xF, (vui >> 4) & 0xF, (vui >> 8) & 0xF, vui >> 12);
-}
-#endif
-
-#if defined(DATA_A_Q5_0)
-vec2 dequantize(uint ib, uint iqs, uint a_offset) {
- const uint uint_qh = uint(data_a[a_offset + ib].qh[1]) << 16 | data_a[a_offset + ib].qh[0];
- const ivec2 qh = ivec2(((uint_qh >> iqs) << 4) & 0x10, (uint_qh >> (iqs + 12)) & 0x10);
- const uint vui = uint(data_a[a_offset + ib].qs[iqs]);
- return (vec2((vui & 0xF) | qh.x, (vui >> 4) | qh.y) - 16.0f);
-}
-vec4 dequantize4(uint ib, uint iqs, uint a_offset) {
- const uint uint_qh = uint(data_a_packed16[a_offset + ib].qh[1]) << 16 | data_a_packed16[a_offset + ib].qh[0];
- const ivec2 qh0 = ivec2(((uint_qh >> iqs) << 4) & 0x10, (uint_qh >> (iqs + 12)) & 0x10);
- const ivec2 qh1 = ivec2(((uint_qh >> (iqs + 1)) << 4) & 0x10, (uint_qh >> (iqs + 13)) & 0x10);
- const uint vui = uint(data_a_packed16[a_offset + ib].qs[iqs/2]);
- return (vec4((vui & 0xF) | qh0.x, ((vui >> 4) & 0xF) | qh0.y, ((vui >> 8) & 0xF) | qh1.x, (vui >> 12) | qh1.y) - 16.0f);
-}
-#endif
-
-#if defined(DATA_A_Q5_1)
-vec2 dequantize(uint ib, uint iqs, uint a_offset) {
- const uint uint_qh = data_a[a_offset + ib].qh;
- const ivec2 qh = ivec2(((uint_qh >> iqs) << 4) & 0x10, (uint_qh >> (iqs + 12)) & 0x10);
- const uint vui = uint(data_a[a_offset + ib].qs[iqs]);
- return vec2((vui & 0xF) | qh.x, (vui >> 4) | qh.y);
-}
-vec4 dequantize4(uint ib, uint iqs, uint a_offset) {
- const uint uint_qh = data_a_packed16[a_offset + ib].qh;
- const ivec2 qh0 = ivec2(((uint_qh >> iqs) << 4) & 0x10, (uint_qh >> (iqs + 12)) & 0x10);
- const ivec2 qh1 = ivec2(((uint_qh >> (iqs + 1)) << 4) & 0x10, (uint_qh >> (iqs + 13)) & 0x10);
- const uint vui = uint(data_a_packed16[a_offset + ib].qs[iqs/2]);
- return vec4((vui & 0xF) | qh0.x, ((vui >> 4) & 0xF) | qh0.y, ((vui >> 8) & 0xF) | qh1.x, (vui >> 12) | qh1.y);
-}
-#endif
-
-#if defined(DATA_A_Q8_0)
-vec2 dequantize(uint ib, uint iqs, uint a_offset) {
- return vec2(int(data_a[a_offset + ib].qs[iqs]), int(data_a[a_offset + ib].qs[iqs + 1]));
-}
-vec4 dequantize4(uint ib, uint iqs, uint a_offset) {
- const i8vec2 v0 = unpack8(int32_t(data_a_packed16[a_offset + ib].qs[iqs/2])).xy; // vec4 used due to #12147
- const i8vec2 v1 = unpack8(int32_t(data_a_packed16[a_offset + ib].qs[iqs/2 + 1])).xy;
- return vec4(v0.x, v0.y, v1.x, v1.y);
-}
-#endif
-
-#if defined(DATA_A_IQ1_S)
-vec2 dequantize(uint ib, uint iqs, uint a_offset) {
- const uint ib32 = iqs / 32;
- const uint ib8 = iqs / 8;
- const int i8 = int(iqs % 8);
- const uint qh = data_a[a_offset + ib].qh[ib32];
- const uint qs = data_a[a_offset + ib].qs[ib8];
- const float dl = float(2 * bitfieldExtract(qh, 12, 3) + 1);
- const float delta = ((qh & 0x8000) != 0) ? -IQ1S_DELTA : IQ1S_DELTA;
- const uint idxhi = bitfieldExtract(qh, 3 * int(ib8 & 3), 3);
- const int16_t grid = int16_t(iq1s_grid[qs | (idxhi << 8)]);
- // Signed bitfield extract.
- const ivec2 gvec = ivec2(
- bitfieldExtract(grid, 2 * (i8), 2),
- bitfieldExtract(grid, 2 * (i8 + 1), 2)
- );
- return dl * (vec2(gvec) + delta);
-}
-vec4 dequantize4(uint ib, uint iqs, uint a_offset) {
- const uint ib32 = iqs / 32;
- const uint ib8 = iqs / 8;
- const int i8 = int(iqs % 8);
- const uint qh = data_a[a_offset + ib].qh[ib32];
- const uint qs = data_a[a_offset + ib].qs[ib8];
- const float dl = 2 * bitfieldExtract(qh, 12, 3) + 1;
- const float delta = ((qh & 0x8000) != 0) ? -IQ1S_DELTA : IQ1S_DELTA;
- const int16_t grid = int16_t(iq1s_grid[qs | (bitfieldExtract(qh, 3 * int(ib8 & 3), 3) << 8)]);
- // Signed bitfield extract.
- const ivec4 gvec = ivec4(
- bitfieldExtract(grid, 2 * (i8), 2),
- bitfieldExtract(grid, 2 * (i8 + 1), 2),
- bitfieldExtract(grid, 2 * (i8 + 2), 2),
- bitfieldExtract(grid, 2 * (i8 + 3), 2)
- );
- return dl * (vec4(gvec) + delta);
-}
-#endif
-
-#if defined(DATA_A_IQ1_M)
-vec2 dequantize(uint ib, uint iqs, uint a_offset) {
- const uint ib8 = iqs / 8;
- const uint ib16 = iqs / 16;
- const int i8 = int(iqs % 8);
- const uint sc = data_a[a_offset + ib].scales[iqs / 64];
- const uint qs = data_a[a_offset + ib].qs[ib8];
- const uint qh = data_a[a_offset + ib].qh[ib16] >> (4 * (ib8 & 1));
- const float dl = 2 * bitfieldExtract(sc, 3 * int(ib16 & 3), 3) + 1;
- const float delta = ((qh & 8) != 0) ? -IQ1M_DELTA : IQ1M_DELTA;
- const int16_t grid = int16_t(iq1s_grid[qs | ((qh & 7) << 8)]);
- // Signed bitfield extract.
- const ivec2 gvec = ivec2(
- bitfieldExtract(grid, 2 * (i8), 2),
- bitfieldExtract(grid, 2 * (i8 + 1), 2)
- );
- return dl * (vec2(gvec) + delta);
-}
-vec4 dequantize4(uint ib, uint iqs, uint a_offset) {
- const uint ib8 = iqs / 8;
- const uint ib16 = iqs / 16;
- const int i8 = int(iqs % 8);
- const uint sc = data_a[a_offset + ib].scales[iqs / 64];
- const uint qs = data_a[a_offset + ib].qs[ib8];
- const uint qh = data_a[a_offset + ib].qh[ib16] >> (4 * (ib8 & 1));
- const float dl = 2 * bitfieldExtract(sc, 3 * int(ib16 & 3), 3) + 1;
- const float delta = ((qh & 8) != 0) ? -IQ1M_DELTA : IQ1M_DELTA;
- const int16_t grid = int16_t(iq1s_grid[qs | ((qh & 7) << 8)]);
- // Signed bitfield extract.
- const ivec4 gvec = ivec4(
- bitfieldExtract(grid, 2 * (i8), 2),
- bitfieldExtract(grid, 2 * (i8 + 1), 2),
- bitfieldExtract(grid, 2 * (i8 + 2), 2),
- bitfieldExtract(grid, 2 * (i8 + 3), 2)
- );
- return dl * (vec4(gvec) + delta);
-}
-#endif
-
-#if defined(DATA_A_IQ2_XXS)
-vec2 dequantize(uint ib, uint iqs, uint a_offset) {
- const uint ib32 = iqs / 32;
- const uint ib8 = (iqs / 8) % 4;
- const uint qs = data_a[a_offset + ib].qs[8 * ib32 + ib8];
- // Scales are stored as packed 7+7+7+7+4 bits (4 sign tuples and 1 int4 scale)
- const uint signs = pack32(u16vec2(data_a_packed16[a_offset + ib].qs[4 * ib32 + 2],
- data_a_packed16[a_offset + ib].qs[4 * ib32 + 3]));
- const float db = 0.25 * (0.5 + (signs >> 28));
- const uint sign7 = bitfieldExtract(signs, 7 * int(ib8), 7);
- // Add parity bit
- const uint sign8 = sign7 | (bitCount(sign7) << 7);
- const uint sign = sign8 >> (iqs % 8);
- const u8vec4 grid = unpack8(iq2xxs_grid[qs][(iqs % 8) / 4] >> (8 * (iqs % 4)));
- bool sign0 = (sign & 1) != 0;
- bool sign1 = (sign & 2) != 0;
- return db * vec2(
- grid.x * (sign0 ? -1.0 : 1.0),
- grid.y * (sign1 ? -1.0 : 1.0)
- );
-}
-vec4 dequantize4(uint ib, uint iqs, uint a_offset) {
- const uint ib32 = iqs / 32;
- const uint ib8 = (iqs / 8) % 4;
- const uint qs = data_a[a_offset + ib].qs[8 * ib32 + ib8];
- // Scales are stored as packed 7+7+7+7+4 bits (4 sign tuples and 1 int4 scale)
- const uint signs = pack32(u16vec2(data_a_packed16[a_offset + ib].qs[4 * ib32 + 2],
- data_a_packed16[a_offset + ib].qs[4 * ib32 + 3]));
- const float db = 0.25 * (0.5 + (signs >> 28));
- const uint sign7 = bitfieldExtract(signs, 7 * int(ib8), 7);
- // Add parity bit
- const uint sign8 = sign7 | (bitCount(sign7) << 7);
- const uint sign = sign8 >> (iqs % 8);
- const u8vec4 grid = unpack8(iq2xxs_grid[qs][(iqs % 8) / 4] >> (8 * (iqs % 4)));
- bool sign0 = (sign & 1) != 0;
- bool sign1 = (sign & 2) != 0;
- bool sign2 = (sign & 4) != 0;
- bool sign3 = (sign & 8) != 0;
- return db * vec4(
- grid.x * (sign0 ? -1.0 : 1.0),
- grid.y * (sign1 ? -1.0 : 1.0),
- grid.z * (sign2 ? -1.0 : 1.0),
- grid.w * (sign3 ? -1.0 : 1.0)
- );
-}
-#endif
-
-#if defined(DATA_A_IQ2_XS)
-vec2 dequantize(uint ib, uint iqs, uint a_offset) {
- const uint scale = (data_a[a_offset + ib].scales[iqs / 32] >> (4 * ((iqs / 16) & 1))) & 0xf;
- const uint qs = data_a[a_offset + ib].qs[iqs / 8];
- const float db = 0.25 * (0.5 + scale);
- const uint sign7 = qs >> 9;
- // Add parity bit
- const uint sign8 = sign7 | (bitCount(sign7) << 7);
- const uint sign = sign8 >> (iqs % 8);
- const u8vec4 grid = unpack8(iq2xs_grid[qs & 511][(iqs % 8) / 4] >> (8 * (iqs % 4)));
- bool sign0 = (sign & 1) != 0;
- bool sign1 = (sign & 2) != 0;
- return db * vec2(
- grid.x * (sign0 ? -1.0 : 1.0),
- grid.y * (sign1 ? -1.0 : 1.0)
- );
-}
-vec4 dequantize4(uint ib, uint iqs, uint a_offset) {
- const uint scale = (data_a[a_offset + ib].scales[iqs / 32] >> (4 * ((iqs / 16) & 1))) & 0xf;
- const uint qs = data_a[a_offset + ib].qs[iqs / 8];
- const float db = 0.25 * (0.5 + scale);
- const uint sign7 = qs >> 9;
- // Add parity bit
- const uint sign8 = sign7 | (bitCount(sign7) << 7);
- const uint sign = sign8 >> (iqs % 8);
- const u8vec4 grid = unpack8(iq2xs_grid[qs & 511][(iqs % 8) / 4] >> (8 * (iqs % 4)));
- bool sign0 = (sign & 1) != 0;
- bool sign1 = (sign & 2) != 0;
- bool sign2 = (sign & 4) != 0;
- bool sign3 = (sign & 8) != 0;
- return db * vec4(
- grid.x * (sign0 ? -1.0 : 1.0),
- grid.y * (sign1 ? -1.0 : 1.0),
- grid.z * (sign2 ? -1.0 : 1.0),
- grid.w * (sign3 ? -1.0 : 1.0)
- );
-}
-#endif
-
-#if defined(DATA_A_IQ2_S)
-vec2 dequantize(uint ib, uint iqs, uint a_offset) {
- const uint ib32 = iqs / 32;
- const uint ib8 = iqs / 8;
-
- const uint scale = (data_a[a_offset + ib].scales[ib32] >> (4 * ((iqs / 16) & 1))) & 0xf;
- const uint qs = data_a[a_offset + ib].qs[ib8];
- const uint qh = data_a[a_offset + ib].qh[ib32];
- const uint qhshift = 2 * (ib8 % 4);
- const uint sign = data_a[a_offset + ib].qs[QUANT_K / 8 + ib8] >> (iqs % 8);
-
- const float db = 0.25 * (0.5 + scale);
- const u8vec4 grid = unpack8(iq2s_grid[qs | ((qh << (8 - qhshift)) & 0x300)][(iqs % 8) / 4]);
- bool sign0 = (sign & 1) != 0;
- bool sign1 = (sign & 2) != 0;
- return db * vec2(
- grid[iqs % 4] * (sign0 ? -1.0 : 1.0),
- grid[(iqs % 4) + 1] * (sign1 ? -1.0 : 1.0)
- );
-}
-vec4 dequantize4(uint ib, uint iqs, uint a_offset) {
- const uint ib32 = iqs / 32;
- const uint ib8 = iqs / 8;
-
- const uint scale = (data_a[a_offset + ib].scales[ib32] >> (4 * ((iqs / 16) & 1))) & 0xf;
- const uint qs = data_a[a_offset + ib].qs[ib8];
- const uint qh = data_a[a_offset + ib].qh[ib32];
- const uint qhshift = 2 * (ib8 % 4);
- const uint sign = data_a[a_offset + ib].qs[QUANT_K / 8 + ib8] >> (iqs % 8);
-
- const float db = 0.25 * (0.5 + scale);
- const u8vec4 grid = unpack8(iq2s_grid[qs | ((qh << (8 - qhshift)) & 0x300)][(iqs % 8) / 4]);
- bool sign0 = (sign & 1) != 0;
- bool sign1 = (sign & 2) != 0;
- bool sign2 = (sign & 4) != 0;
- bool sign3 = (sign & 8) != 0;
- return db * vec4(
- grid.x * (sign0 ? -1.0 : 1.0),
- grid.y * (sign1 ? -1.0 : 1.0),
- grid.z * (sign2 ? -1.0 : 1.0),
- grid.w * (sign3 ? -1.0 : 1.0)
- );
-}
-#endif
-
-#if defined(DATA_A_IQ3_XXS)
-vec2 dequantize(uint ib, uint iqs, uint a_offset) {
- const uint ib4 = iqs / 4;
- const uint ib32 = iqs / 32;
- const uint is = QUANT_K / 4 + 4 * ib32;
- const uint qs = data_a[a_offset + ib].qs[ib4];
- // Scales are stored as packed 7+7+7+7+4 bits (4 sign tuples and 1 int4 scale)
- const uint signs = pack32(u16vec2(data_a_packed16[a_offset + ib].qs[is / 2],
- data_a_packed16[a_offset + ib].qs[is / 2 + 1]));
- const float db = 0.5 * (0.5 + (signs >> 28));
- const uint sign7 = bitfieldExtract(signs, 7 * (int(ib4 / 2) % 4), 7);
- // Add parity bit
- const uint sign8 = sign7 | (bitCount(sign7) << 7);
- const uint sign = sign8 >> (iqs % 8);
- const u8vec4 grid = unpack8(iq3xxs_grid[qs] >> (8 * (iqs % 4)));
- bool sign0 = (sign & 1) != 0;
- bool sign1 = (sign & 2) != 0;
- return db * vec2(
- grid.x * (sign0 ? -1.0 : 1.0),
- grid.y * (sign1 ? -1.0 : 1.0)
- );
-}
-vec4 dequantize4(uint ib, uint iqs, uint a_offset) {
- const uint ib4 = iqs / 4;
- const uint ib32 = iqs / 32;
- const uint is = QUANT_K / 4 + 4 * ib32;
- const uint qs = data_a[a_offset + ib].qs[ib4];
- const uint signs = pack32(u16vec2(data_a_packed16[a_offset + ib].qs[is / 2],
- data_a_packed16[a_offset + ib].qs[is / 2 + 1]));
- const float db = 0.5 * (0.5 + (signs >> 28));
- const uint sign7 = bitfieldExtract(signs, 7 * (int(ib4 / 2) % 4), 7);
- // Add parity bit
- const uint sign8 = sign7 | (bitCount(sign7) << 7);
- const uint sign = sign8 >> (iqs % 8);
- const u8vec4 grid = unpack8(iq3xxs_grid[qs]);
- bool sign0 = (sign & 1) != 0;
- bool sign1 = (sign & 2) != 0;
- bool sign2 = (sign & 4) != 0;
- bool sign3 = (sign & 8) != 0;
- return db * vec4(
- grid.x * (sign0 ? -1.0 : 1.0),
- grid.y * (sign1 ? -1.0 : 1.0),
- grid.z * (sign2 ? -1.0 : 1.0),
- grid.w * (sign3 ? -1.0 : 1.0)
- );
-}
-#endif
-
-#if defined(DATA_A_IQ3_S)
-vec2 dequantize(uint ib, uint iqs, uint a_offset) {
- const uint qs = data_a[a_offset + ib].qs[iqs / 4];
- const uint qh = data_a[a_offset + ib].qh[iqs / 32];
- const uint sign = data_a[a_offset + ib].signs[iqs / 8] >> (iqs % 8);
- const uint scale = data_a[a_offset + ib].scales[iqs / 64];
- bool sign0 = (sign & 1) != 0;
- bool sign1 = (sign & 2) != 0;
- const float db = 1 + 2 * ((scale >> (4 * ((iqs / 32) & 1))) & 0xf);
- const uint32_t grid = iq3s_grid[qs | ((qh << (8 - ((iqs / 4) % 8))) & 256)] >> (8 * (iqs % 4));
- return db * vec2(
- int(grid & 0xFF) * (sign0 ? -1.0 : 1.0),
- int((grid >> 8) & 0xFF) * (sign1 ? -1.0 : 1.0)
- );
-}
-vec4 dequantize4(uint ib, uint iqs, uint a_offset) {
- const uint ib4 = iqs / 4;
- const uint ib32 = iqs / 32;
- const uint qs = data_a[a_offset + ib].qs[ib4];
- const uint qh = data_a[a_offset + ib].qh[ib32];
- const uint sign = data_a[a_offset + ib].signs[iqs / 8] >> (iqs % 8);
- const uint scale = data_a[a_offset + ib].scales[ib32 / 2];
- bool sign0 = (sign & 1) != 0;
- bool sign1 = (sign & 2) != 0;
- bool sign2 = (sign & 4) != 0;
- bool sign3 = (sign & 8) != 0;
- const float db = 1 + 2 * ((scale >> (4 * (ib32 & 1))) & 0xf);
- const uint32_t grid = iq3s_grid[qs | ((qh << (8 - ib4 % 8)) & 256)] >> (8 * (iqs % 4));
- return db * vec4(
- int(grid & 0xFF) * (sign0 ? -1.0 : 1.0),
- int((grid >> 8) & 0xFF) * (sign1 ? -1.0 : 1.0),
- int((grid >> 16) & 0xFF) * (sign2 ? -1.0 : 1.0),
- int((grid >> 24) & 0xFF) * (sign3 ? -1.0 : 1.0)
- );
-}
-#endif
-
-#if defined(DATA_A_IQ4_XS)
-vec2 dequantize(uint ib, uint iqs, uint a_offset) {
- const uint ib32 = iqs / 32;
- const uint iq = 16 * ib32 + (iqs % 16);
-
- const uint sl = (data_a[a_offset + ib].scales_l[ib32/2] >> (4 * (ib32 & 1))) & 0xF;
- const uint sh = (data_a[a_offset + ib].scales_h >> (2 * ib32)) & 3;
- const uint qshift = (iqs & 16) >> 2;
- u8vec2 qs = u8vec2(data_a[a_offset + ib].qs[iq], data_a[a_offset + ib].qs[iq + 1]);
- qs = (qs >> qshift) & uint8_t(0xF);
-
- const float dl = float(int(sl | (sh << 4)) - 32);
- return dl * vec2(kvalues_iq4nl[qs.x], kvalues_iq4nl[qs.y]);
-}
-vec4 dequantize4(uint ib, uint iqs, uint a_offset) {
- const uint ib32 = iqs / 32;
- const uint iq = 16 * ib32 + (iqs % 16);
-
- const uint sl = (data_a[a_offset + ib].scales_l[ib32/2] >> (4 * (ib32 & 1))) & 0xF;
- const uint sh = (data_a[a_offset + ib].scales_h >> (2 * ib32)) & 3;
- const uint qshift = (iqs & 16) >> 2;
- u8vec4 qs = u8vec4(
- data_a[a_offset + ib].qs[iq + 0],
- data_a[a_offset + ib].qs[iq + 1],
- data_a[a_offset + ib].qs[iq + 2],
- data_a[a_offset + ib].qs[iq + 3]
- );
- qs = (qs >> qshift) & uint8_t(0xF);
-
- const float dl = float(int(sl | (sh << 4)) - 32);
- return dl * vec4(
- kvalues_iq4nl[qs.x], kvalues_iq4nl[qs.y],
- kvalues_iq4nl[qs.z], kvalues_iq4nl[qs.w]);
-}
-#endif
-
-#if defined(DATA_A_IQ4_NL)
-vec2 dequantize(uint ib, uint iqs, uint a_offset) {
- const uint vui = uint(data_a[a_offset + ib].qs[iqs]);
- return vec2(kvalues_iq4nl[vui & 0xF], kvalues_iq4nl[vui >> 4]);
-}
-vec4 dequantize4(uint ib, uint iqs, uint a_offset) {
- const uint vui = uint(data_a_packed16[a_offset + ib].qs[iqs/2]);
- return vec4(kvalues_iq4nl[vui & 0xF], kvalues_iq4nl[(vui >> 4) & 0xF], kvalues_iq4nl[(vui >> 8) & 0xF], kvalues_iq4nl[vui >> 12]);
-}
-#endif
-
-#if defined(DATA_A_MXFP4)
-vec2 dequantize(uint ib, uint iqs, uint a_offset) {
- const uint vui = uint(data_a[a_offset + ib].qs[iqs]);
- return vec2(kvalues_mxfp4[vui & 0xF], kvalues_mxfp4[vui >> 4]);
-}
-vec4 dequantize4(uint ib, uint iqs, uint a_offset) {
- vec2 v0 = dequantize(ib, iqs, a_offset);
- vec2 v1 = dequantize(ib, iqs + 1, a_offset);
- return vec4(v0.x, v0.y, v1.x, v1.y);
-}
-#endif
-
-#if defined(DATA_A_F32) || defined(DATA_A_F16) || defined(DATA_A_BF16)
-vec2 get_dm(uint ib, uint a_offset) {
- return vec2(0, 0);
-}
-#endif
-
-#if defined(DATA_A_IQ1_M)
-vec2 get_dm(uint ib, uint a_offset) {
- const uint16_t[4] scales = data_a[a_offset + ib].scales;
- const u16vec4 s = u16vec4(scales[0], scales[1], scales[2], scales[3]) >> 12;
- const float d = float(unpackHalf2x16(s.x | (s.y << 4) | (s.z << 8) | (s.w << 12)).x);
- return vec2(d, 0);
-}
-#endif
-
-#if defined(DATA_A_Q4_0) || defined(DATA_A_Q5_0) || defined(DATA_A_Q8_0) || defined(DATA_A_IQ1_S) || defined(DATA_A_IQ2_XXS) || defined(DATA_A_IQ2_XS) || defined(DATA_A_IQ2_S) || defined(DATA_A_IQ3_XXS) || defined(DATA_A_IQ3_S) || defined(DATA_A_IQ4_XS) || defined(DATA_A_IQ4_NL)
-vec2 get_dm(uint ib, uint a_offset) {
- return vec2(float(data_a[a_offset + ib].d), 0);
-}
-#endif
-
-#if defined(DATA_A_MXFP4)
-vec2 get_dm(uint ib, uint a_offset) {
- return vec2(e8m0_to_fp32(data_a[a_offset + ib].e), 0);
-}
-#endif
-
-#if defined(DATA_A_Q4_1) || defined(DATA_A_Q5_1)
-vec2 get_dm(uint ib, uint a_offset) {
- return vec2(float(data_a[a_offset + ib].d), float(data_a[a_offset + ib].m));
-}
-#endif
-
-#if defined(DATA_A_Q2_K)
-vec2 dequantize(uint ib, uint iqs, uint a_offset) {
- iqs /= 2;
- const uint qsi = (iqs / 64) * 32 + (iqs % 16) * 2; // 0,2,4..30
- const uint scalesi = iqs / 8; // 0..15
- const uint qsshift = ((iqs % 64) / 16) * 2; // 0,2,4,6
-
- const uvec2 qs = uvec2(data_a[a_offset + ib].qs[qsi], data_a[a_offset + ib].qs[qsi + 1]);
- const uint scales = data_a[a_offset + ib].scales[scalesi];
- const vec2 d = vec2(data_a[a_offset + ib].d);
-
- return d.x * float(scales & 0xF) * vec2((qs >> qsshift) & 3) - d.y * float(scales >> 4);
-}
-vec2 get_dm(uint ib, uint a_offset) {
- return vec2(1, 0);
-}
-#endif
-
-#if defined(DATA_A_Q3_K)
-vec2 dequantize(uint ib, uint iqs, uint a_offset) {
- iqs /= 2;
- const uint n = iqs / 64; // 0,1
- const uint qsi = n * 32 + (iqs % 16) * 2; // 0,2,4..62
- const uint hmi = (iqs % 16) * 2; // 0,2,4..30
- const uint j = (iqs % 64) / 4; // 0..3
- const uint is = iqs / 8; // 0..15
- const uint halfsplit = ((iqs % 64) / 16); // 0,1,2,3
- const uint qsshift = halfsplit * 2; // 0,2,4,6
- const uint m = 1 << (4 * n + halfsplit); // 1,2,4,8,16,32,64,128
-
- const int8_t us = int8_t(((data_a[a_offset + ib].scales[is % 8] >> (4 * int(is / 8))) & 0xF)
- | (((data_a[a_offset + ib].scales[8 + (is % 4)] >> (2 * int(is / 4))) & 3) << 4));
- const float dl = float(data_a[a_offset + ib].d) * float(us - 32);
-
- return vec2(dl * float(int8_t((data_a[a_offset + ib].qs[qsi ] >> qsshift) & 3) - (((data_a[a_offset + ib].hmask[hmi ] & m) != 0) ? 0 : 4)),
- dl * float(int8_t((data_a[a_offset + ib].qs[qsi + 1] >> qsshift) & 3) - (((data_a[a_offset + ib].hmask[hmi + 1] & m) != 0) ? 0 : 4)));
-}
-vec2 get_dm(uint ib, uint a_offset) {
- return vec2(1, 0);
-}
-#endif
-
-#if defined(DATA_A_Q4_K)
-vec2 dequantize(uint ib, uint iqs, uint a_offset) {
- iqs /= 2;
- const uint n = iqs / 32; // 0,1,2,3
- const uint b = (iqs % 32) / 16; // 0,1
- const uint is = 2 * n + b; // 0..7
- const uint qsi = n * 32 + (iqs % 16) * 2; // 0,2,4..126
-
- const vec2 loadd = vec2(data_a[a_offset + ib].d);
-
- const uint scidx0 = (is < 4) ? is : (is + 4);
- const uint scidx1 = (is < 4) ? is : (is - 4);
- const uint scidxmask1 = (is < 4) ? 0x30 : 0xC0;
- const uint scidxshift1 = (is < 4) ? 0 : 2;
- const uint mbidx0 = is + 4;
- const uint mbidx1 = (is < 4) ? is + 4 : is;
- const uint mbidxmask0 = (is < 4) ? 0xF : 0xF0;
- const uint mbidxshift0 = (is < 4) ? 0 : 4;
- const uint mbidxmask1 = (is < 4) ? 0x30 : 0xC0;
- const uint mbidxshift1 = (is < 4) ? 0 : 2;
-
- const uint8_t sc = uint8_t((data_a[a_offset + ib].scales[scidx0] & 0xF) | ((data_a[a_offset + ib].scales[scidx1] & scidxmask1) >> scidxshift1));
- const uint8_t mbyte = uint8_t((data_a[a_offset + ib].scales[mbidx0] & mbidxmask0) >> mbidxshift0 | ((data_a[a_offset + ib].scales[mbidx1] & mbidxmask1) >> mbidxshift1));
-
- const float d = loadd.x * sc;
- const float m = -loadd.y * mbyte;
-
- return vec2(fma(d, float((data_a[a_offset + ib].qs[qsi ] >> (b * 4)) & 0xF), m),
- fma(d, float((data_a[a_offset + ib].qs[qsi + 1] >> (b * 4)) & 0xF), m));
-}
-vec2 get_dm(uint ib, uint a_offset) {
- return vec2(1, 0);
-}
-#endif
-
-#if defined(DATA_A_Q5_K)
-vec2 dequantize(uint ib, uint iqs, uint a_offset) {
- iqs /= 2;
- const uint n = iqs / 32; // 0,1,2,3
- const uint b = (iqs % 32) / 16; // 0,1
- const uint is = 2 * n + b; // 0..7
- const uint qsi = n * 32 + (iqs % 16) * 2; // 0,2,4..126
- const uint qhi = (iqs % 16) * 2; // 0,2,4..30
-
- const uint8_t hm = uint8_t(1 << (iqs / 16));
-
- const vec2 loadd = vec2(data_a[a_offset + ib].d);
-
- const uint scidx0 = (is < 4) ? is : (is + 4);
- const uint scidx1 = (is < 4) ? is : (is - 4);
- const uint scidxmask1 = (is < 4) ? 0x30 : 0xC0;
- const uint scidxshift1 = (is < 4) ? 0 : 2;
- const uint mbidx0 = is + 4;
- const uint mbidx1 = (is < 4) ? is + 4 : is;
- const uint mbidxmask0 = (is < 4) ? 0xF : 0xF0;
- const uint mbidxshift0 = (is < 4) ? 0 : 4;
- const uint mbidxmask1 = (is < 4) ? 0x30 : 0xC0;
- const uint mbidxshift1 = (is < 4) ? 0 : 2;
-
- const uint8_t sc = uint8_t((data_a[a_offset + ib].scales[scidx0] & 0xF) | ((data_a[a_offset + ib].scales[scidx1] & scidxmask1) >> scidxshift1));
- const uint8_t mbyte = uint8_t(((data_a[a_offset + ib].scales[mbidx0] & mbidxmask0) >> mbidxshift0) | ((data_a[a_offset + ib].scales[mbidx1] & mbidxmask1) >> mbidxshift1));
-
- const float d = loadd.x * sc;
- const float m = -loadd.y * mbyte;
-
- return vec2(fma(d, float((data_a[a_offset + ib].qs[qsi ] >> (b * 4)) & 0xF) + float((data_a[a_offset + ib].qh[qhi ] & hm) != 0 ? 16 : 0), m),
- fma(d, float((data_a[a_offset + ib].qs[qsi + 1] >> (b * 4)) & 0xF) + float((data_a[a_offset + ib].qh[qhi + 1] & hm) != 0 ? 16 : 0), m));
-}
-vec2 get_dm(uint ib, uint a_offset) {
- return vec2(1, 0);
-}
-#endif
-
-#if defined(DATA_A_Q6_K)
-vec2 dequantize(uint ib, uint iqs, uint a_offset) {
- iqs /= 2;
- const uint n = iqs / 64; // 0,1
- const uint b = (iqs % 64) / 32; // 0,1
- const uint is_b = (iqs % 16) / 8; // 0,1
- const uint qhshift = ((iqs % 64) / 16) * 2; // 0,2,4,6
- const uint is = 8 * n + qhshift + is_b; // 0..15
- const uint qsi = n * 64 + (iqs % 32) * 2; // 0,2,4..126
- const uint qhi = n * 32 + (iqs % 16) * 2; // 0,2,4..62
-
- const float dscale = float(data_a[a_offset + ib].d) * float(data_a[a_offset + ib].scales[is]);
-
- return vec2(dscale * float(int8_t(((data_a[a_offset + ib].ql[qsi ] >> (b * 4)) & 0xF) | (((data_a[a_offset + ib].qh[qhi ] >> qhshift) & 3) << 4)) - 32),
- dscale * float(int8_t(((data_a[a_offset + ib].ql[qsi + 1] >> (b * 4)) & 0xF) | (((data_a[a_offset + ib].qh[qhi + 1] >> qhshift) & 3) << 4)) - 32));
-}
-vec2 get_dm(uint ib, uint a_offset) {
- return vec2(1, 0);
-}
-#endif
--- /dev/null
+#if !defined(DATA_A_F32) && !defined(DATA_A_F16)
+#extension GL_EXT_shader_explicit_arithmetic_types_int8 : require
+#endif
+
+#include "types.glsl"
+
+#if defined(A_TYPE_PACKED16)
+layout (binding = 0) readonly buffer A_PACKED16 {A_TYPE_PACKED16 data_a_packed16[];};
+#endif
+#if defined(A_TYPE_PACKED32)
+layout (binding = 0) readonly buffer A_PACKED32 {A_TYPE_PACKED32 data_a_packed32[];};
+#endif
+
+#if defined(DATA_A_F32)
+vec2 dequantize(uint ib, uint iqs, uint a_offset) {
+ return vec2(data_a[a_offset + ib], data_a[a_offset + ib + 1]);
+}
+#endif
+
+#if defined(DATA_A_F16)
+vec2 dequantize(uint ib, uint iqs, uint a_offset) {
+ return vec2(data_a[a_offset + ib], data_a[a_offset + ib + 1]);
+}
+#endif
+
+#if defined(DATA_A_BF16)
+vec2 dequantize(uint ib, uint iqs, uint a_offset) {
+ return vec2(bf16_to_fp32(data_a[a_offset + ib]), bf16_to_fp32(data_a[a_offset + ib + 1]));
+}
+#endif
+
+#if defined(DATA_A_Q4_0)
+vec2 dequantize(uint ib, uint iqs, uint a_offset) {
+ const uint vui = uint(data_a[a_offset + ib].qs[iqs]);
+ return (vec2(vui & 0xF, vui >> 4) - 8.0f);
+}
+vec4 dequantize4(uint ib, uint iqs, uint a_offset) {
+ const uint vui = uint(data_a_packed16[a_offset + ib].qs[iqs/2]);
+ return (vec4(vui & 0xF, (vui >> 4) & 0xF, (vui >> 8) & 0xF, vui >> 12) - 8.0f);
+}
+#endif
+
+#if defined(DATA_A_Q4_1)
+vec2 dequantize(uint ib, uint iqs, uint a_offset) {
+ const uint vui = uint(data_a[a_offset + ib].qs[iqs]);
+ return vec2(vui & 0xF, vui >> 4);
+}
+vec4 dequantize4(uint ib, uint iqs, uint a_offset) {
+ const uint vui = uint(data_a_packed16[a_offset + ib].qs[iqs/2]);
+ return vec4(vui & 0xF, (vui >> 4) & 0xF, (vui >> 8) & 0xF, vui >> 12);
+}
+#endif
+
+#if defined(DATA_A_Q5_0)
+vec2 dequantize(uint ib, uint iqs, uint a_offset) {
+ const uint uint_qh = uint(data_a[a_offset + ib].qh[1]) << 16 | data_a[a_offset + ib].qh[0];
+ const ivec2 qh = ivec2(((uint_qh >> iqs) << 4) & 0x10, (uint_qh >> (iqs + 12)) & 0x10);
+ const uint vui = uint(data_a[a_offset + ib].qs[iqs]);
+ return (vec2((vui & 0xF) | qh.x, (vui >> 4) | qh.y) - 16.0f);
+}
+vec4 dequantize4(uint ib, uint iqs, uint a_offset) {
+ const uint uint_qh = uint(data_a_packed16[a_offset + ib].qh[1]) << 16 | data_a_packed16[a_offset + ib].qh[0];
+ const ivec2 qh0 = ivec2(((uint_qh >> iqs) << 4) & 0x10, (uint_qh >> (iqs + 12)) & 0x10);
+ const ivec2 qh1 = ivec2(((uint_qh >> (iqs + 1)) << 4) & 0x10, (uint_qh >> (iqs + 13)) & 0x10);
+ const uint vui = uint(data_a_packed16[a_offset + ib].qs[iqs/2]);
+ return (vec4((vui & 0xF) | qh0.x, ((vui >> 4) & 0xF) | qh0.y, ((vui >> 8) & 0xF) | qh1.x, (vui >> 12) | qh1.y) - 16.0f);
+}
+#endif
+
+#if defined(DATA_A_Q5_1)
+vec2 dequantize(uint ib, uint iqs, uint a_offset) {
+ const uint uint_qh = data_a[a_offset + ib].qh;
+ const ivec2 qh = ivec2(((uint_qh >> iqs) << 4) & 0x10, (uint_qh >> (iqs + 12)) & 0x10);
+ const uint vui = uint(data_a[a_offset + ib].qs[iqs]);
+ return vec2((vui & 0xF) | qh.x, (vui >> 4) | qh.y);
+}
+vec4 dequantize4(uint ib, uint iqs, uint a_offset) {
+ const uint uint_qh = data_a_packed16[a_offset + ib].qh;
+ const ivec2 qh0 = ivec2(((uint_qh >> iqs) << 4) & 0x10, (uint_qh >> (iqs + 12)) & 0x10);
+ const ivec2 qh1 = ivec2(((uint_qh >> (iqs + 1)) << 4) & 0x10, (uint_qh >> (iqs + 13)) & 0x10);
+ const uint vui = uint(data_a_packed16[a_offset + ib].qs[iqs/2]);
+ return vec4((vui & 0xF) | qh0.x, ((vui >> 4) & 0xF) | qh0.y, ((vui >> 8) & 0xF) | qh1.x, (vui >> 12) | qh1.y);
+}
+#endif
+
+#if defined(DATA_A_Q8_0)
+vec2 dequantize(uint ib, uint iqs, uint a_offset) {
+ return vec2(int(data_a[a_offset + ib].qs[iqs]), int(data_a[a_offset + ib].qs[iqs + 1]));
+}
+vec4 dequantize4(uint ib, uint iqs, uint a_offset) {
+ const i8vec2 v0 = unpack8(int32_t(data_a_packed16[a_offset + ib].qs[iqs/2])).xy; // vec4 used due to #12147
+ const i8vec2 v1 = unpack8(int32_t(data_a_packed16[a_offset + ib].qs[iqs/2 + 1])).xy;
+ return vec4(v0.x, v0.y, v1.x, v1.y);
+}
+#endif
+
+#if defined(DATA_A_IQ1_S)
+vec2 dequantize(uint ib, uint iqs, uint a_offset) {
+ const uint ib32 = iqs / 32;
+ const uint ib8 = iqs / 8;
+ const int i8 = int(iqs % 8);
+ const uint qh = data_a[a_offset + ib].qh[ib32];
+ const uint qs = data_a[a_offset + ib].qs[ib8];
+ const float dl = float(2 * bitfieldExtract(qh, 12, 3) + 1);
+ const float delta = ((qh & 0x8000) != 0) ? -IQ1S_DELTA : IQ1S_DELTA;
+ const uint idxhi = bitfieldExtract(qh, 3 * int(ib8 & 3), 3);
+ const int16_t grid = int16_t(iq1s_grid[qs | (idxhi << 8)]);
+ // Signed bitfield extract.
+ const ivec2 gvec = ivec2(
+ bitfieldExtract(grid, 2 * (i8), 2),
+ bitfieldExtract(grid, 2 * (i8 + 1), 2)
+ );
+ return dl * (vec2(gvec) + delta);
+}
+vec4 dequantize4(uint ib, uint iqs, uint a_offset) {
+ const uint ib32 = iqs / 32;
+ const uint ib8 = iqs / 8;
+ const int i8 = int(iqs % 8);
+ const uint qh = data_a[a_offset + ib].qh[ib32];
+ const uint qs = data_a[a_offset + ib].qs[ib8];
+ const float dl = 2 * bitfieldExtract(qh, 12, 3) + 1;
+ const float delta = ((qh & 0x8000) != 0) ? -IQ1S_DELTA : IQ1S_DELTA;
+ const int16_t grid = int16_t(iq1s_grid[qs | (bitfieldExtract(qh, 3 * int(ib8 & 3), 3) << 8)]);
+ // Signed bitfield extract.
+ const ivec4 gvec = ivec4(
+ bitfieldExtract(grid, 2 * (i8), 2),
+ bitfieldExtract(grid, 2 * (i8 + 1), 2),
+ bitfieldExtract(grid, 2 * (i8 + 2), 2),
+ bitfieldExtract(grid, 2 * (i8 + 3), 2)
+ );
+ return dl * (vec4(gvec) + delta);
+}
+#endif
+
+#if defined(DATA_A_IQ1_M)
+vec2 dequantize(uint ib, uint iqs, uint a_offset) {
+ const uint ib8 = iqs / 8;
+ const uint ib16 = iqs / 16;
+ const int i8 = int(iqs % 8);
+ const uint sc = data_a[a_offset + ib].scales[iqs / 64];
+ const uint qs = data_a[a_offset + ib].qs[ib8];
+ const uint qh = data_a[a_offset + ib].qh[ib16] >> (4 * (ib8 & 1));
+ const float dl = 2 * bitfieldExtract(sc, 3 * int(ib16 & 3), 3) + 1;
+ const float delta = ((qh & 8) != 0) ? -IQ1M_DELTA : IQ1M_DELTA;
+ const int16_t grid = int16_t(iq1s_grid[qs | ((qh & 7) << 8)]);
+ // Signed bitfield extract.
+ const ivec2 gvec = ivec2(
+ bitfieldExtract(grid, 2 * (i8), 2),
+ bitfieldExtract(grid, 2 * (i8 + 1), 2)
+ );
+ return dl * (vec2(gvec) + delta);
+}
+vec4 dequantize4(uint ib, uint iqs, uint a_offset) {
+ const uint ib8 = iqs / 8;
+ const uint ib16 = iqs / 16;
+ const int i8 = int(iqs % 8);
+ const uint sc = data_a[a_offset + ib].scales[iqs / 64];
+ const uint qs = data_a[a_offset + ib].qs[ib8];
+ const uint qh = data_a[a_offset + ib].qh[ib16] >> (4 * (ib8 & 1));
+ const float dl = 2 * bitfieldExtract(sc, 3 * int(ib16 & 3), 3) + 1;
+ const float delta = ((qh & 8) != 0) ? -IQ1M_DELTA : IQ1M_DELTA;
+ const int16_t grid = int16_t(iq1s_grid[qs | ((qh & 7) << 8)]);
+ // Signed bitfield extract.
+ const ivec4 gvec = ivec4(
+ bitfieldExtract(grid, 2 * (i8), 2),
+ bitfieldExtract(grid, 2 * (i8 + 1), 2),
+ bitfieldExtract(grid, 2 * (i8 + 2), 2),
+ bitfieldExtract(grid, 2 * (i8 + 3), 2)
+ );
+ return dl * (vec4(gvec) + delta);
+}
+#endif
+
+#if defined(DATA_A_IQ2_XXS)
+vec2 dequantize(uint ib, uint iqs, uint a_offset) {
+ const uint ib32 = iqs / 32;
+ const uint ib8 = (iqs / 8) % 4;
+ const uint qs = data_a[a_offset + ib].qs[8 * ib32 + ib8];
+ // Scales are stored as packed 7+7+7+7+4 bits (4 sign tuples and 1 int4 scale)
+ const uint signs = pack32(u16vec2(data_a_packed16[a_offset + ib].qs[4 * ib32 + 2],
+ data_a_packed16[a_offset + ib].qs[4 * ib32 + 3]));
+ const float db = 0.25 * (0.5 + (signs >> 28));
+ const uint sign7 = bitfieldExtract(signs, 7 * int(ib8), 7);
+ // Add parity bit
+ const uint sign8 = sign7 | (bitCount(sign7) << 7);
+ const uint sign = sign8 >> (iqs % 8);
+ const u8vec4 grid = unpack8(iq2xxs_grid[qs][(iqs % 8) / 4] >> (8 * (iqs % 4)));
+ bool sign0 = (sign & 1) != 0;
+ bool sign1 = (sign & 2) != 0;
+ return db * vec2(
+ grid.x * (sign0 ? -1.0 : 1.0),
+ grid.y * (sign1 ? -1.0 : 1.0)
+ );
+}
+vec4 dequantize4(uint ib, uint iqs, uint a_offset) {
+ const uint ib32 = iqs / 32;
+ const uint ib8 = (iqs / 8) % 4;
+ const uint qs = data_a[a_offset + ib].qs[8 * ib32 + ib8];
+ // Scales are stored as packed 7+7+7+7+4 bits (4 sign tuples and 1 int4 scale)
+ const uint signs = pack32(u16vec2(data_a_packed16[a_offset + ib].qs[4 * ib32 + 2],
+ data_a_packed16[a_offset + ib].qs[4 * ib32 + 3]));
+ const float db = 0.25 * (0.5 + (signs >> 28));
+ const uint sign7 = bitfieldExtract(signs, 7 * int(ib8), 7);
+ // Add parity bit
+ const uint sign8 = sign7 | (bitCount(sign7) << 7);
+ const uint sign = sign8 >> (iqs % 8);
+ const u8vec4 grid = unpack8(iq2xxs_grid[qs][(iqs % 8) / 4] >> (8 * (iqs % 4)));
+ bool sign0 = (sign & 1) != 0;
+ bool sign1 = (sign & 2) != 0;
+ bool sign2 = (sign & 4) != 0;
+ bool sign3 = (sign & 8) != 0;
+ return db * vec4(
+ grid.x * (sign0 ? -1.0 : 1.0),
+ grid.y * (sign1 ? -1.0 : 1.0),
+ grid.z * (sign2 ? -1.0 : 1.0),
+ grid.w * (sign3 ? -1.0 : 1.0)
+ );
+}
+#endif
+
+#if defined(DATA_A_IQ2_XS)
+vec2 dequantize(uint ib, uint iqs, uint a_offset) {
+ const uint scale = (data_a[a_offset + ib].scales[iqs / 32] >> (4 * ((iqs / 16) & 1))) & 0xf;
+ const uint qs = data_a[a_offset + ib].qs[iqs / 8];
+ const float db = 0.25 * (0.5 + scale);
+ const uint sign7 = qs >> 9;
+ // Add parity bit
+ const uint sign8 = sign7 | (bitCount(sign7) << 7);
+ const uint sign = sign8 >> (iqs % 8);
+ const u8vec4 grid = unpack8(iq2xs_grid[qs & 511][(iqs % 8) / 4] >> (8 * (iqs % 4)));
+ bool sign0 = (sign & 1) != 0;
+ bool sign1 = (sign & 2) != 0;
+ return db * vec2(
+ grid.x * (sign0 ? -1.0 : 1.0),
+ grid.y * (sign1 ? -1.0 : 1.0)
+ );
+}
+vec4 dequantize4(uint ib, uint iqs, uint a_offset) {
+ const uint scale = (data_a[a_offset + ib].scales[iqs / 32] >> (4 * ((iqs / 16) & 1))) & 0xf;
+ const uint qs = data_a[a_offset + ib].qs[iqs / 8];
+ const float db = 0.25 * (0.5 + scale);
+ const uint sign7 = qs >> 9;
+ // Add parity bit
+ const uint sign8 = sign7 | (bitCount(sign7) << 7);
+ const uint sign = sign8 >> (iqs % 8);
+ const u8vec4 grid = unpack8(iq2xs_grid[qs & 511][(iqs % 8) / 4] >> (8 * (iqs % 4)));
+ bool sign0 = (sign & 1) != 0;
+ bool sign1 = (sign & 2) != 0;
+ bool sign2 = (sign & 4) != 0;
+ bool sign3 = (sign & 8) != 0;
+ return db * vec4(
+ grid.x * (sign0 ? -1.0 : 1.0),
+ grid.y * (sign1 ? -1.0 : 1.0),
+ grid.z * (sign2 ? -1.0 : 1.0),
+ grid.w * (sign3 ? -1.0 : 1.0)
+ );
+}
+#endif
+
+#if defined(DATA_A_IQ2_S)
+vec2 dequantize(uint ib, uint iqs, uint a_offset) {
+ const uint ib32 = iqs / 32;
+ const uint ib8 = iqs / 8;
+
+ const uint scale = (data_a[a_offset + ib].scales[ib32] >> (4 * ((iqs / 16) & 1))) & 0xf;
+ const uint qs = data_a[a_offset + ib].qs[ib8];
+ const uint qh = data_a[a_offset + ib].qh[ib32];
+ const uint qhshift = 2 * (ib8 % 4);
+ const uint sign = data_a[a_offset + ib].qs[QUANT_K / 8 + ib8] >> (iqs % 8);
+
+ const float db = 0.25 * (0.5 + scale);
+ const u8vec4 grid = unpack8(iq2s_grid[qs | ((qh << (8 - qhshift)) & 0x300)][(iqs % 8) / 4]);
+ bool sign0 = (sign & 1) != 0;
+ bool sign1 = (sign & 2) != 0;
+ return db * vec2(
+ grid[iqs % 4] * (sign0 ? -1.0 : 1.0),
+ grid[(iqs % 4) + 1] * (sign1 ? -1.0 : 1.0)
+ );
+}
+vec4 dequantize4(uint ib, uint iqs, uint a_offset) {
+ const uint ib32 = iqs / 32;
+ const uint ib8 = iqs / 8;
+
+ const uint scale = (data_a[a_offset + ib].scales[ib32] >> (4 * ((iqs / 16) & 1))) & 0xf;
+ const uint qs = data_a[a_offset + ib].qs[ib8];
+ const uint qh = data_a[a_offset + ib].qh[ib32];
+ const uint qhshift = 2 * (ib8 % 4);
+ const uint sign = data_a[a_offset + ib].qs[QUANT_K / 8 + ib8] >> (iqs % 8);
+
+ const float db = 0.25 * (0.5 + scale);
+ const u8vec4 grid = unpack8(iq2s_grid[qs | ((qh << (8 - qhshift)) & 0x300)][(iqs % 8) / 4]);
+ bool sign0 = (sign & 1) != 0;
+ bool sign1 = (sign & 2) != 0;
+ bool sign2 = (sign & 4) != 0;
+ bool sign3 = (sign & 8) != 0;
+ return db * vec4(
+ grid.x * (sign0 ? -1.0 : 1.0),
+ grid.y * (sign1 ? -1.0 : 1.0),
+ grid.z * (sign2 ? -1.0 : 1.0),
+ grid.w * (sign3 ? -1.0 : 1.0)
+ );
+}
+#endif
+
+#if defined(DATA_A_IQ3_XXS)
+vec2 dequantize(uint ib, uint iqs, uint a_offset) {
+ const uint ib4 = iqs / 4;
+ const uint ib32 = iqs / 32;
+ const uint is = QUANT_K / 4 + 4 * ib32;
+ const uint qs = data_a[a_offset + ib].qs[ib4];
+ // Scales are stored as packed 7+7+7+7+4 bits (4 sign tuples and 1 int4 scale)
+ const uint signs = pack32(u16vec2(data_a_packed16[a_offset + ib].qs[is / 2],
+ data_a_packed16[a_offset + ib].qs[is / 2 + 1]));
+ const float db = 0.5 * (0.5 + (signs >> 28));
+ const uint sign7 = bitfieldExtract(signs, 7 * (int(ib4 / 2) % 4), 7);
+ // Add parity bit
+ const uint sign8 = sign7 | (bitCount(sign7) << 7);
+ const uint sign = sign8 >> (iqs % 8);
+ const u8vec4 grid = unpack8(iq3xxs_grid[qs] >> (8 * (iqs % 4)));
+ bool sign0 = (sign & 1) != 0;
+ bool sign1 = (sign & 2) != 0;
+ return db * vec2(
+ grid.x * (sign0 ? -1.0 : 1.0),
+ grid.y * (sign1 ? -1.0 : 1.0)
+ );
+}
+vec4 dequantize4(uint ib, uint iqs, uint a_offset) {
+ const uint ib4 = iqs / 4;
+ const uint ib32 = iqs / 32;
+ const uint is = QUANT_K / 4 + 4 * ib32;
+ const uint qs = data_a[a_offset + ib].qs[ib4];
+ const uint signs = pack32(u16vec2(data_a_packed16[a_offset + ib].qs[is / 2],
+ data_a_packed16[a_offset + ib].qs[is / 2 + 1]));
+ const float db = 0.5 * (0.5 + (signs >> 28));
+ const uint sign7 = bitfieldExtract(signs, 7 * (int(ib4 / 2) % 4), 7);
+ // Add parity bit
+ const uint sign8 = sign7 | (bitCount(sign7) << 7);
+ const uint sign = sign8 >> (iqs % 8);
+ const u8vec4 grid = unpack8(iq3xxs_grid[qs]);
+ bool sign0 = (sign & 1) != 0;
+ bool sign1 = (sign & 2) != 0;
+ bool sign2 = (sign & 4) != 0;
+ bool sign3 = (sign & 8) != 0;
+ return db * vec4(
+ grid.x * (sign0 ? -1.0 : 1.0),
+ grid.y * (sign1 ? -1.0 : 1.0),
+ grid.z * (sign2 ? -1.0 : 1.0),
+ grid.w * (sign3 ? -1.0 : 1.0)
+ );
+}
+#endif
+
+#if defined(DATA_A_IQ3_S)
+vec2 dequantize(uint ib, uint iqs, uint a_offset) {
+ const uint qs = data_a[a_offset + ib].qs[iqs / 4];
+ const uint qh = data_a[a_offset + ib].qh[iqs / 32];
+ const uint sign = data_a[a_offset + ib].signs[iqs / 8] >> (iqs % 8);
+ const uint scale = data_a[a_offset + ib].scales[iqs / 64];
+ bool sign0 = (sign & 1) != 0;
+ bool sign1 = (sign & 2) != 0;
+ const float db = 1 + 2 * ((scale >> (4 * ((iqs / 32) & 1))) & 0xf);
+ const uint32_t grid = iq3s_grid[qs | ((qh << (8 - ((iqs / 4) % 8))) & 256)] >> (8 * (iqs % 4));
+ return db * vec2(
+ int(grid & 0xFF) * (sign0 ? -1.0 : 1.0),
+ int((grid >> 8) & 0xFF) * (sign1 ? -1.0 : 1.0)
+ );
+}
+vec4 dequantize4(uint ib, uint iqs, uint a_offset) {
+ const uint ib4 = iqs / 4;
+ const uint ib32 = iqs / 32;
+ const uint qs = data_a[a_offset + ib].qs[ib4];
+ const uint qh = data_a[a_offset + ib].qh[ib32];
+ const uint sign = data_a[a_offset + ib].signs[iqs / 8] >> (iqs % 8);
+ const uint scale = data_a[a_offset + ib].scales[ib32 / 2];
+ bool sign0 = (sign & 1) != 0;
+ bool sign1 = (sign & 2) != 0;
+ bool sign2 = (sign & 4) != 0;
+ bool sign3 = (sign & 8) != 0;
+ const float db = 1 + 2 * ((scale >> (4 * (ib32 & 1))) & 0xf);
+ const uint32_t grid = iq3s_grid[qs | ((qh << (8 - ib4 % 8)) & 256)] >> (8 * (iqs % 4));
+ return db * vec4(
+ int(grid & 0xFF) * (sign0 ? -1.0 : 1.0),
+ int((grid >> 8) & 0xFF) * (sign1 ? -1.0 : 1.0),
+ int((grid >> 16) & 0xFF) * (sign2 ? -1.0 : 1.0),
+ int((grid >> 24) & 0xFF) * (sign3 ? -1.0 : 1.0)
+ );
+}
+#endif
+
+#if defined(DATA_A_IQ4_XS)
+vec2 dequantize(uint ib, uint iqs, uint a_offset) {
+ const uint ib32 = iqs / 32;
+ const uint iq = 16 * ib32 + (iqs % 16);
+
+ const uint sl = (data_a[a_offset + ib].scales_l[ib32/2] >> (4 * (ib32 & 1))) & 0xF;
+ const uint sh = (data_a[a_offset + ib].scales_h >> (2 * ib32)) & 3;
+ const uint qshift = (iqs & 16) >> 2;
+ u8vec2 qs = u8vec2(data_a[a_offset + ib].qs[iq], data_a[a_offset + ib].qs[iq + 1]);
+ qs = (qs >> qshift) & uint8_t(0xF);
+
+ const float dl = float(int(sl | (sh << 4)) - 32);
+ return dl * vec2(kvalues_iq4nl[qs.x], kvalues_iq4nl[qs.y]);
+}
+vec4 dequantize4(uint ib, uint iqs, uint a_offset) {
+ const uint ib32 = iqs / 32;
+ const uint iq = 16 * ib32 + (iqs % 16);
+
+ const uint sl = (data_a[a_offset + ib].scales_l[ib32/2] >> (4 * (ib32 & 1))) & 0xF;
+ const uint sh = (data_a[a_offset + ib].scales_h >> (2 * ib32)) & 3;
+ const uint qshift = (iqs & 16) >> 2;
+ u8vec4 qs = u8vec4(
+ data_a[a_offset + ib].qs[iq + 0],
+ data_a[a_offset + ib].qs[iq + 1],
+ data_a[a_offset + ib].qs[iq + 2],
+ data_a[a_offset + ib].qs[iq + 3]
+ );
+ qs = (qs >> qshift) & uint8_t(0xF);
+
+ const float dl = float(int(sl | (sh << 4)) - 32);
+ return dl * vec4(
+ kvalues_iq4nl[qs.x], kvalues_iq4nl[qs.y],
+ kvalues_iq4nl[qs.z], kvalues_iq4nl[qs.w]);
+}
+#endif
+
+#if defined(DATA_A_IQ4_NL)
+vec2 dequantize(uint ib, uint iqs, uint a_offset) {
+ const uint vui = uint(data_a[a_offset + ib].qs[iqs]);
+ return vec2(kvalues_iq4nl[vui & 0xF], kvalues_iq4nl[vui >> 4]);
+}
+vec4 dequantize4(uint ib, uint iqs, uint a_offset) {
+ const uint vui = uint(data_a_packed16[a_offset + ib].qs[iqs/2]);
+ return vec4(kvalues_iq4nl[vui & 0xF], kvalues_iq4nl[(vui >> 4) & 0xF], kvalues_iq4nl[(vui >> 8) & 0xF], kvalues_iq4nl[vui >> 12]);
+}
+#endif
+
+#if defined(DATA_A_MXFP4)
+vec2 dequantize(uint ib, uint iqs, uint a_offset) {
+ const uint vui = uint(data_a[a_offset + ib].qs[iqs]);
+ return vec2(kvalues_mxfp4[vui & 0xF], kvalues_mxfp4[vui >> 4]);
+}
+vec4 dequantize4(uint ib, uint iqs, uint a_offset) {
+ vec2 v0 = dequantize(ib, iqs, a_offset);
+ vec2 v1 = dequantize(ib, iqs + 1, a_offset);
+ return vec4(v0.x, v0.y, v1.x, v1.y);
+}
+#endif
+
+#if defined(DATA_A_F32) || defined(DATA_A_F16) || defined(DATA_A_BF16)
+vec2 get_dm(uint ib, uint a_offset) {
+ return vec2(0, 0);
+}
+#endif
+
+#if defined(DATA_A_IQ1_M)
+vec2 get_dm(uint ib, uint a_offset) {
+ const uint16_t[4] scales = data_a[a_offset + ib].scales;
+ const u16vec4 s = u16vec4(scales[0], scales[1], scales[2], scales[3]) >> 12;
+ const float d = float(unpackHalf2x16(s.x | (s.y << 4) | (s.z << 8) | (s.w << 12)).x);
+ return vec2(d, 0);
+}
+#endif
+
+#if defined(DATA_A_Q4_0) || defined(DATA_A_Q5_0) || defined(DATA_A_Q8_0) || defined(DATA_A_IQ1_S) || defined(DATA_A_IQ2_XXS) || defined(DATA_A_IQ2_XS) || defined(DATA_A_IQ2_S) || defined(DATA_A_IQ3_XXS) || defined(DATA_A_IQ3_S) || defined(DATA_A_IQ4_XS) || defined(DATA_A_IQ4_NL)
+vec2 get_dm(uint ib, uint a_offset) {
+ return vec2(float(data_a[a_offset + ib].d), 0);
+}
+#endif
+
+#if defined(DATA_A_MXFP4)
+vec2 get_dm(uint ib, uint a_offset) {
+ return vec2(e8m0_to_fp32(data_a[a_offset + ib].e), 0);
+}
+#endif
+
+#if defined(DATA_A_Q4_1) || defined(DATA_A_Q5_1)
+vec2 get_dm(uint ib, uint a_offset) {
+ return vec2(float(data_a[a_offset + ib].d), float(data_a[a_offset + ib].m));
+}
+#endif
+
+#if defined(DATA_A_Q2_K)
+vec2 dequantize(uint ib, uint iqs, uint a_offset) {
+ iqs /= 2;
+ const uint qsi = (iqs / 64) * 32 + (iqs % 16) * 2; // 0,2,4..30
+ const uint scalesi = iqs / 8; // 0..15
+ const uint qsshift = ((iqs % 64) / 16) * 2; // 0,2,4,6
+
+ const uvec2 qs = uvec2(data_a[a_offset + ib].qs[qsi], data_a[a_offset + ib].qs[qsi + 1]);
+ const uint scales = data_a[a_offset + ib].scales[scalesi];
+ const vec2 d = vec2(data_a[a_offset + ib].d);
+
+ return d.x * float(scales & 0xF) * vec2((qs >> qsshift) & 3) - d.y * float(scales >> 4);
+}
+vec2 get_dm(uint ib, uint a_offset) {
+ return vec2(1, 0);
+}
+#endif
+
+#if defined(DATA_A_Q3_K)
+vec2 dequantize(uint ib, uint iqs, uint a_offset) {
+ iqs /= 2;
+ const uint n = iqs / 64; // 0,1
+ const uint qsi = n * 32 + (iqs % 16) * 2; // 0,2,4..62
+ const uint hmi = (iqs % 16) * 2; // 0,2,4..30
+ const uint j = (iqs % 64) / 4; // 0..3
+ const uint is = iqs / 8; // 0..15
+ const uint halfsplit = ((iqs % 64) / 16); // 0,1,2,3
+ const uint qsshift = halfsplit * 2; // 0,2,4,6
+ const uint m = 1 << (4 * n + halfsplit); // 1,2,4,8,16,32,64,128
+
+ const int8_t us = int8_t(((data_a[a_offset + ib].scales[is % 8] >> (4 * int(is / 8))) & 0xF)
+ | (((data_a[a_offset + ib].scales[8 + (is % 4)] >> (2 * int(is / 4))) & 3) << 4));
+ const float dl = float(data_a[a_offset + ib].d) * float(us - 32);
+
+ return vec2(dl * float(int8_t((data_a[a_offset + ib].qs[qsi ] >> qsshift) & 3) - (((data_a[a_offset + ib].hmask[hmi ] & m) != 0) ? 0 : 4)),
+ dl * float(int8_t((data_a[a_offset + ib].qs[qsi + 1] >> qsshift) & 3) - (((data_a[a_offset + ib].hmask[hmi + 1] & m) != 0) ? 0 : 4)));
+}
+vec2 get_dm(uint ib, uint a_offset) {
+ return vec2(1, 0);
+}
+#endif
+
+#if defined(DATA_A_Q4_K)
+vec2 dequantize(uint ib, uint iqs, uint a_offset) {
+ iqs /= 2;
+ const uint n = iqs / 32; // 0,1,2,3
+ const uint b = (iqs % 32) / 16; // 0,1
+ const uint is = 2 * n + b; // 0..7
+ const uint qsi = n * 32 + (iqs % 16) * 2; // 0,2,4..126
+
+ const vec2 loadd = vec2(data_a[a_offset + ib].d);
+
+ const uint scidx0 = (is < 4) ? is : (is + 4);
+ const uint scidx1 = (is < 4) ? is : (is - 4);
+ const uint scidxmask1 = (is < 4) ? 0x30 : 0xC0;
+ const uint scidxshift1 = (is < 4) ? 0 : 2;
+ const uint mbidx0 = is + 4;
+ const uint mbidx1 = (is < 4) ? is + 4 : is;
+ const uint mbidxmask0 = (is < 4) ? 0xF : 0xF0;
+ const uint mbidxshift0 = (is < 4) ? 0 : 4;
+ const uint mbidxmask1 = (is < 4) ? 0x30 : 0xC0;
+ const uint mbidxshift1 = (is < 4) ? 0 : 2;
+
+ const uint8_t sc = uint8_t((data_a[a_offset + ib].scales[scidx0] & 0xF) | ((data_a[a_offset + ib].scales[scidx1] & scidxmask1) >> scidxshift1));
+ const uint8_t mbyte = uint8_t((data_a[a_offset + ib].scales[mbidx0] & mbidxmask0) >> mbidxshift0 | ((data_a[a_offset + ib].scales[mbidx1] & mbidxmask1) >> mbidxshift1));
+
+ const float d = loadd.x * sc;
+ const float m = -loadd.y * mbyte;
+
+ return vec2(fma(d, float((data_a[a_offset + ib].qs[qsi ] >> (b * 4)) & 0xF), m),
+ fma(d, float((data_a[a_offset + ib].qs[qsi + 1] >> (b * 4)) & 0xF), m));
+}
+vec2 get_dm(uint ib, uint a_offset) {
+ return vec2(1, 0);
+}
+#endif
+
+#if defined(DATA_A_Q5_K)
+vec2 dequantize(uint ib, uint iqs, uint a_offset) {
+ iqs /= 2;
+ const uint n = iqs / 32; // 0,1,2,3
+ const uint b = (iqs % 32) / 16; // 0,1
+ const uint is = 2 * n + b; // 0..7
+ const uint qsi = n * 32 + (iqs % 16) * 2; // 0,2,4..126
+ const uint qhi = (iqs % 16) * 2; // 0,2,4..30
+
+ const uint8_t hm = uint8_t(1 << (iqs / 16));
+
+ const vec2 loadd = vec2(data_a[a_offset + ib].d);
+
+ const uint scidx0 = (is < 4) ? is : (is + 4);
+ const uint scidx1 = (is < 4) ? is : (is - 4);
+ const uint scidxmask1 = (is < 4) ? 0x30 : 0xC0;
+ const uint scidxshift1 = (is < 4) ? 0 : 2;
+ const uint mbidx0 = is + 4;
+ const uint mbidx1 = (is < 4) ? is + 4 : is;
+ const uint mbidxmask0 = (is < 4) ? 0xF : 0xF0;
+ const uint mbidxshift0 = (is < 4) ? 0 : 4;
+ const uint mbidxmask1 = (is < 4) ? 0x30 : 0xC0;
+ const uint mbidxshift1 = (is < 4) ? 0 : 2;
+
+ const uint8_t sc = uint8_t((data_a[a_offset + ib].scales[scidx0] & 0xF) | ((data_a[a_offset + ib].scales[scidx1] & scidxmask1) >> scidxshift1));
+ const uint8_t mbyte = uint8_t(((data_a[a_offset + ib].scales[mbidx0] & mbidxmask0) >> mbidxshift0) | ((data_a[a_offset + ib].scales[mbidx1] & mbidxmask1) >> mbidxshift1));
+
+ const float d = loadd.x * sc;
+ const float m = -loadd.y * mbyte;
+
+ return vec2(fma(d, float((data_a[a_offset + ib].qs[qsi ] >> (b * 4)) & 0xF) + float((data_a[a_offset + ib].qh[qhi ] & hm) != 0 ? 16 : 0), m),
+ fma(d, float((data_a[a_offset + ib].qs[qsi + 1] >> (b * 4)) & 0xF) + float((data_a[a_offset + ib].qh[qhi + 1] & hm) != 0 ? 16 : 0), m));
+}
+vec2 get_dm(uint ib, uint a_offset) {
+ return vec2(1, 0);
+}
+#endif
+
+#if defined(DATA_A_Q6_K)
+vec2 dequantize(uint ib, uint iqs, uint a_offset) {
+ iqs /= 2;
+ const uint n = iqs / 64; // 0,1
+ const uint b = (iqs % 64) / 32; // 0,1
+ const uint is_b = (iqs % 16) / 8; // 0,1
+ const uint qhshift = ((iqs % 64) / 16) * 2; // 0,2,4,6
+ const uint is = 8 * n + qhshift + is_b; // 0..15
+ const uint qsi = n * 64 + (iqs % 32) * 2; // 0,2,4..126
+ const uint qhi = n * 32 + (iqs % 16) * 2; // 0,2,4..62
+
+ const float dscale = float(data_a[a_offset + ib].d) * float(data_a[a_offset + ib].scales[is]);
+
+ return vec2(dscale * float(int8_t(((data_a[a_offset + ib].ql[qsi ] >> (b * 4)) & 0xF) | (((data_a[a_offset + ib].qh[qhi ] >> qhshift) & 3) << 4)) - 32),
+ dscale * float(int8_t(((data_a[a_offset + ib].ql[qsi + 1] >> (b * 4)) & 0xF) | (((data_a[a_offset + ib].qh[qhi + 1] >> qhshift) & 3) << 4)) - 32));
+}
+vec2 get_dm(uint ib, uint a_offset) {
+ return vec2(1, 0);
+}
+#endif
+++ /dev/null
-
-#include "types.comp"
-
-layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufQ4_0 {
- block_q4_0_packed16 block;
-};
-
-float16_t dequantFuncQ4_0(const in decodeBufQ4_0 bl, const in uint blockCoords[2], const in uint coordInBlock[2])
-{
- const float16_t d = bl.block.d;
- const uint idx = coordInBlock[1];
- const uint shift = (idx & 0x10) >> 2;
- uint32_t qs = uint32_t(bl.block.qs[(idx & 0xE) >> 1]);
- qs >>= shift;
- qs &= 0x0F0F;
- qs = unpack8(qs)[idx & 1];
- float16_t ret = (float16_t(qs) - float16_t(8)) * d;
- return ret;
-}
-
-layout(buffer_reference, std430, buffer_reference_align = 4) buffer decodeBufQ4_1 {
- block_q4_1 block;
-};
-
-float16_t dequantFuncQ4_1(const in decodeBufQ4_1 bl, const in uint blockCoords[2], const in uint coordInBlock[2])
-{
- const float16_t d = bl.block.d;
- const float16_t m = bl.block.m;
- const uint idx = coordInBlock[1];
- const uint iqs = idx & 0xF;
- const uint shift = (idx & 0x10) >> 2;
- uint32_t qs = bl.block.qs[iqs];
- qs >>= shift;
- qs &= 0xF;
- float16_t ret = float16_t(qs) * d + m;
- return ret;
-}
-
-layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufQ5_0 {
- block_q5_0 block;
-};
-
-float16_t dequantFuncQ5_0(const in decodeBufQ5_0 bl, const in uint blockCoords[2], const in uint coordInBlock[2])
-{
- const float16_t d = bl.block.d;
- const uint idx = coordInBlock[1];
- const uint iqs = idx & 0xF;
-
- const uint uint_qh = uint(bl.block.qh[1]) << 16 | bl.block.qh[0];
- const uint qh = ((uint_qh >> idx) << 4) & 0x10;
-
- const uint shift = (idx & 0x10) >> 2;
- uint32_t qs = bl.block.qs[iqs];
- qs >>= shift;
- qs &= 0xF;
-
- float16_t ret = (float16_t(qs | qh) - float16_t(16)) * d;
- return ret;
-}
-
-layout(buffer_reference, std430, buffer_reference_align = 8) buffer decodeBufQ5_1 {
- block_q5_1 block;
-};
-
-float16_t dequantFuncQ5_1(const in decodeBufQ5_1 bl, const in uint blockCoords[2], const in uint coordInBlock[2])
-{
- const float16_t d = bl.block.d;
- const float16_t m = bl.block.m;
- const uint idx = coordInBlock[1];
- const uint iqs = idx & 0xF;
-
- const uint uint_qh = bl.block.qh;
- const uint qh = ((uint_qh >> idx) << 4) & 0x10;
-
- const uint shift = (idx & 0x10) >> 2;
- uint32_t qs = bl.block.qs[iqs];
- qs >>= shift;
- qs &= 0xF;
-
- float16_t ret = float16_t(qs | qh) * d + m;
- return ret;
-}
-
-layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufQ8_0 {
- block_q8_0_packed16 block;
-};
-
-float16_t dequantFuncQ8_0(const in decodeBufQ8_0 bl, const in uint blockCoords[2], const in uint coordInBlock[2])
-{
- const float16_t d = bl.block.d;
- const uint idx = coordInBlock[1];
- const uint iqs = idx;
-
- // Load 16b and select the byte for this element
- int32_t qs = unpack8(bl.block.qs[(iqs & 0x1E) >> 1])[iqs & 1];
- float16_t ret = float16_t(qs) * d;
- return ret;
-}
-
-layout(buffer_reference, std430, buffer_reference_align = 4) buffer decodeBufQ2_K {
- block_q2_K block;
-};
-
-layout(buffer_reference, std430, buffer_reference_align = 16) buffer decodeBufQ2_K_packed16 {
- block_q2_K_packed16 block;
-};
-
-float16_t dequantFuncQ2_K(const in decodeBufQ2_K bl, const in uint blockCoords[2], const in uint coordInBlock[2])
-{
- decodeBufQ2_K_packed16 bl16 = decodeBufQ2_K_packed16(bl);
- const f16vec2 d = bl.block.d;
- const uint idx = coordInBlock[1];
-
- const uint scalesi = (idx & 0xF0) >> 4; // 0..15
- const uint qsshift = (idx & 0x60) >> 4; // 0,2,4,6
-
- uint qs = uint32_t(bl16.block.qs[((idx & 0x80) >> 3) + ((idx & 0x1E) >> 1)]);
- qs = (qs >> qsshift) & 0x0303;
- qs = unpack8(qs)[idx & 1];
-
- const uint scales = bl.block.scales[scalesi];
- float16_t ret = d.x * float16_t(scales & 0xF) * float16_t(qs) - d.y * float16_t(scales >> 4);
- return ret;
-}
-
-layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufQ3_K {
- block_q3_K block;
-};
-
-float16_t dequantFuncQ3_K(const in decodeBufQ3_K bl, const in uint blockCoords[2], const in uint coordInBlock[2])
-{
- const uint idx = coordInBlock[1];
- const uint iqs = idx;
-
- const uint n = iqs / 128; // 0,1
- const uint qsi = n * 32 + (iqs % 32); // 0..63
- const uint hmi = (iqs % 32); // 0..31
- const uint j = (iqs % 128) / 8; // 0..15
- const uint is = iqs / 16; // 0..15
- const uint halfsplit = ((iqs % 128) / 32); // 0,1,2,3
- const uint qsshift = halfsplit * 2; // 0,2,4,6
- const uint m = 1 << (4 * n + halfsplit); // 1,2,4,8,16,32,64,128
-
- uint32_t scaleidx0 = (is < 8) ? is : (is-8);
- uint32_t scaleidx0shift = (is < 8) ? 0 : 4;
- uint32_t scaleidx1 = is + 8 - (is/4)*4;
- uint32_t scaleidx1shift = (is/4)*2;
-
- const int8_t us = int8_t(((bl.block.scales[scaleidx0] >> scaleidx0shift) & 0xF) | (((bl.block.scales[scaleidx1] >> scaleidx1shift) & 3) << 4));
-
- const float16_t dl = bl.block.d * float16_t(us - 32);
-
- float16_t ret = dl * float16_t(int8_t((bl.block.qs[qsi ] >> qsshift) & 3) - (((bl.block.hmask[hmi ] & m) != 0) ? 0 : 4));
-
- return ret;
-}
-
-layout(buffer_reference, std430, buffer_reference_align = 16) buffer decodeBufQ4_K {
- block_q4_K block;
-};
-
-layout(buffer_reference, std430, buffer_reference_align = 16) buffer decodeBufQ4_K_packed16 {
- block_q4_K_packed16 block;
-};
-
-layout(buffer_reference, std430, buffer_reference_align = 16) buffer decodeBufQ4_K_packed128 {
- block_q4_K_packed128 block;
-};
-
-#if defined(IS_MUL_MM2)
-
-// For Q4_K and Q5_K in the mat-mul shader, we decode a tile's worth of scales
-// into shared memory and then process the whole tile using those scales.
-// There is a fetch function that loads into private variables and then a store
-// function that stores into shared memory.
-// Q4_K and Q5_K have the same encoding of scales, so everything is shared except
-// the part that fetches from the structure (which has a different block layout).
-#if defined(DATA_A_Q4_K) || defined(DATA_A_Q5_K)
-const uint shAscales_stride = (BM + 2);
-// 1 scale per 32 elements -> 8 scales per block, per row
-shared vec2 shAscales[8 * shAscales_stride];
-uvec4 row_v;
-#endif
-
-#if defined(DATA_A_Q4_K)
-layout (binding = 0) readonly buffer A_Q4_K_128 {block_q4_K_packed128 data_a_q4_k_packed128[];};
-
-void fetch_scalesQ4_K(uint ir_BM, uint pos_a, uint stride_a, uint block_k, uint tid, bool in_bounds)
-{
- uint tids_per_row = BLOCK_SIZE / BM;
- uint is_per_tid = 8 / tids_per_row;
- uint is_start = is_per_tid * (tid % tids_per_row);
- uint tid_row = tid / tids_per_row;
-
- uint row = ir_BM + tid_row;
- uint block_index = pos_a + row * stride_a + (block_k / QUANT_K);
- if (in_bounds || row < p.M) {
- row_v = data_a_q4_k_packed128[block_index].q4k[0];
- }
-}
-#endif
-#if defined(DATA_A_Q5_K)
-layout (binding = 0) readonly buffer A_Q5_K_128 {block_q5_K_packed128 data_a_q5_k_packed128[];};
-
-void fetch_scalesQ5_K(uint ir_BM, uint pos_a, uint stride_a, uint block_k, uint tid, bool in_bounds)
-{
- uint tids_per_row = BLOCK_SIZE / BM;
- uint is_per_tid = 8 / tids_per_row;
- uint is_start = is_per_tid * (tid % tids_per_row);
- uint tid_row = tid / tids_per_row;
-
- uint row = ir_BM + tid_row;
- uint block_index = pos_a + row * stride_a + (block_k / QUANT_K);
- if (in_bounds || row < p.M) {
- row_v = data_a_q5_k_packed128[block_index].q5k[0];
- }
-}
-#endif
-
-#if defined(DATA_A_Q4_K) || defined(DATA_A_Q5_K)
-void store_scalesQ4_K(uint tid)
-{
- barrier();
-
- uint tids_per_row = BLOCK_SIZE / BM;
- uint is_per_tid = 8 / tids_per_row;
- uint is_start = is_per_tid * (tid % tids_per_row);
- uint tid_row = tid / tids_per_row;
-
- [[unroll]] for (uint idx = 0; idx < is_per_tid; ++idx) {
- uint is = idx + is_start;
- uvec4 v = row_v;
- const vec2 loadd = vec2(unpackFloat2x16(v.x));
-
- uint32_t sc;
- uint32_t mbyte;
-
- uint32_t scale0 = v.y;
- uint32_t scale4 = v.z;
- uint32_t scale8 = v.w;
-
- uint32_t sc_lo = scale0;
- uint32_t mb_lo = scale4;
- uint32_t sc_hi = (scale8 & 0x0F0F0F0F) | ((scale0 & 0xC0C0C0C0) >> 2);
- uint32_t mb_hi = ((scale8 & 0xF0F0F0F0) >> 4) | ((scale4 & 0xC0C0C0C0) >> 2);
-
- sc = is < 4 ? sc_lo : sc_hi;
- mbyte = is < 4 ? mb_lo : mb_hi;
- sc = sc >> (8 * (is & 3));
- mbyte = mbyte >> (8 * (is & 3));
- sc &= 0x3F;
- mbyte &= 0x3F;
-
- const float d = loadd.x * float(sc);
- const float m = loadd.y * float(mbyte);
- shAscales[is * shAscales_stride + tid_row] = vec2(d,m);
- }
-
- barrier();
-}
-#endif
-
-#endif
-
-float16_t dequantFuncQ4_K(const in decodeBufQ4_K bl, const in uint blockCoords[2], const in uint coordInBlock[2])
-{
- decodeBufQ4_K_packed16 bl16 = decodeBufQ4_K_packed16(bl);
- decodeBufQ4_K_packed128 bl128 = decodeBufQ4_K_packed128(bl);
- const uint idx = coordInBlock[1];
-
- const uint b = (idx & 0x20) >> 5; // 0,1
- const uint is = (idx & 0xE0) >> 5; // 0..7
-
-#if defined(IS_MUL_MM2) && defined(DATA_A_Q4_K)
- vec2 v = shAscales[is * shAscales_stride + (blockCoords[0] % BM)];
- float d = v.x;
- float m = v.y;
-#else
- uvec4 v = bl128.block.q4k[0];
- const vec2 loadd = vec2(unpackFloat2x16(v.x));
-
- uint32_t sc;
- uint32_t mbyte;
-
- uint32_t scale0 = v.y;
- uint32_t scale4 = v.z;
- uint32_t scale8 = v.w;
-
- uint32_t sc_lo = scale0;
- uint32_t mb_lo = scale4;
- uint32_t sc_hi = (scale8 & 0x0F0F0F0F) | ((scale0 & 0xC0C0C0C0) >> 2);
- uint32_t mb_hi = ((scale8 & 0xF0F0F0F0) >> 4) | ((scale4 & 0xC0C0C0C0) >> 2);
-
- sc = is < 4 ? sc_lo : sc_hi;
- mbyte = is < 4 ? mb_lo : mb_hi;
- sc = sc >> (8 * (is & 3));
- mbyte = mbyte >> (8 * (is & 3));
- sc &= 0x3F;
- mbyte &= 0x3F;
-
- const float d = loadd.x * float(sc);
- const float m = loadd.y * float(mbyte);
-#endif
-
- uint qs = uint32_t(bl16.block.qs[((idx & 0xC0) >> 2) + ((idx & 0x1E) >> 1)]);
- qs = (qs >> (b * 4 + 8 * (idx & 1))) & 0xF;
-
- float ret = d * float(qs) - m;
-
- return float16_t(ret);
-}
-
-layout(buffer_reference, std430, buffer_reference_align = 16) buffer decodeBufQ5_K {
- block_q5_K block;
-};
-
-layout(buffer_reference, std430, buffer_reference_align = 16) buffer decodeBufQ5_K_packed16 {
- block_q5_K_packed16 block;
-};
-
-layout(buffer_reference, std430, buffer_reference_align = 16) buffer decodeBufQ5_K_packed128 {
- block_q5_K_packed128 block;
-};
-
-float16_t dequantFuncQ5_K(const in decodeBufQ5_K bl, const in uint blockCoords[2], const in uint coordInBlock[2])
-{
- decodeBufQ5_K_packed16 bl16 = decodeBufQ5_K_packed16(bl);
- decodeBufQ5_K_packed128 bl128 = decodeBufQ5_K_packed128(bl);
- const uint idx = coordInBlock[1];
-
- const uint b = (idx & 0x20) >> 5; // 0,1
- const uint is = (idx & 0xE0) >> 5; // 0..7
-
-#if defined(IS_MUL_MM2) && defined(DATA_A_Q5_K)
- vec2 v = shAscales[is * shAscales_stride + (blockCoords[0] % BM)];
- float d = v.x;
- float m = v.y;
-#else
- uvec4 v = bl128.block.q5k[0];
-
- const f16vec2 loadd = unpackFloat2x16(v.x);
-
- uint32_t sc;
- uint32_t mbyte;
-
- uint32_t scale0 = v.y;
- uint32_t scale4 = v.z;
- uint32_t scale8 = v.w;
-
- uint32_t sc_lo = scale0;
- uint32_t mb_lo = scale4;
- uint32_t sc_hi = (scale8 & 0x0F0F0F0F) | ((scale0 & 0xC0C0C0C0) >> 2);
- uint32_t mb_hi = ((scale8 & 0xF0F0F0F0) >> 4) | ((scale4 & 0xC0C0C0C0) >> 2);
-
- sc = is < 4 ? sc_lo : sc_hi;
- mbyte = is < 4 ? mb_lo : mb_hi;
- sc = sc >> (8 * (is & 3));
- mbyte = mbyte >> (8 * (is & 3));
- sc &= 0x3F;
- mbyte &= 0x3F;
-
- const float16_t d = loadd.x * float16_t(sc);
- const float16_t m = loadd.y * float16_t(mbyte);
-#endif
-
- uint qh = uint32_t(bl16.block.qh[(idx & 0x1E) >> 1]);
- qh = ((qh >> is) & 0x101) << 4;
-
- uint qs = uint32_t(bl16.block.qs[((idx & 0xC0) >> 2) + ((idx & 0x1E) >> 1)]);
- qs = (qs >> (b * 4)) & 0x0F0F;
- qs = unpack8(qs | qh)[idx & 1];
-
- float ret = d * float(qs) - m;
-
- return float16_t(ret);
-}
-
-layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufQ6_K {
- block_q6_K block;
-};
-
-layout(buffer_reference, std430, buffer_reference_align = 16) buffer decodeBufQ6_K_packed16 {
- block_q6_K_packed16 block;
-};
-
-float16_t dequantFuncQ6_K(const in decodeBufQ6_K bl, const in uint blockCoords[2], const in uint coordInBlock[2])
-{
- decodeBufQ6_K_packed16 bl16 = decodeBufQ6_K_packed16(bl);
- const uint idx = coordInBlock[1];
-
- const uint b = (idx & 0x40) >> 6; // 0,1
- const uint qhshift = (idx & 0x60) >> 4; // 0,2,4,6
- const uint is = (idx & 0xF0) >> 4; // 0..15
-
- const float16_t dscale = bl.block.d * float16_t(bl.block.scales[is]);
-
- uint ql = uint32_t(bl16.block.ql[((idx & 0x80) >> 2) + ((idx & 0x3E) >> 1)]);
- ql = (ql >> (b * 4)) & 0x0F0F;
-
- uint qh = uint32_t(bl16.block.qh[((idx & 0x80) >> 3) + ((idx & 0x1E) >> 1)]);
- qh = ((qh >> qhshift) & 0x0303) << 4;
-
- int q = unpack8(ql | qh)[idx & 1];
-
- float16_t ret = dscale * float16_t(q - 32);
-
- return ret;
-}
-
-#if defined(DATA_A_IQ1_S)
-layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufIQ1_S {
- block_iq1_s block;
-};
-
-float16_t dequantFuncIQ1_S(const in decodeBufIQ1_S bl, const in uint blockCoords[2], const in uint coordInBlock[2])
-{
- const float16_t d = bl.block.d;
- const uint idx = coordInBlock[1];
-
- const uint ib32 = (idx & 0xE0) >> 5;
- const uint ib8 = (idx & 0xF8) >> 3;
-
- const uint qh = bl.block.qh[ib32];
- const uint qs = bl.block.qs[ib8];
- const float dl = d * float(2 * bitfieldExtract(qh, 12, 3) + 1);
- const float delta = ((qh & 0x8000) != 0) ? -IQ1S_DELTA : IQ1S_DELTA;
- const uint grid = iq1s_grid[qs | (bitfieldExtract(qh, 3 * int(ib8 & 3), 3) << 8)];
-
- float16_t ret = float16_t(dl) * (float16_t(bitfieldExtract(int(grid), 2 * int(idx % 8), 2)) + float16_t(delta));
- return ret;
-}
-#endif
-
-#if defined(DATA_A_IQ1_M)
-layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufIQ1_M {
- block_iq1_m block;
-};
-
-layout(buffer_reference, std430, buffer_reference_align = 8) buffer decodeBufIQ1_M_packed64 {
- block_iq1_m_packed64 block;
-};
-
-float16_t dequantFuncIQ1_M(const in decodeBufIQ1_M bl, const in uint blockCoords[2], const in uint coordInBlock[2])
-{
- decodeBufIQ1_M_packed64 bl64 = decodeBufIQ1_M_packed64(bl);
- const uint idx = coordInBlock[1];
-
- uvec2 scales = unpack32(bl64.block.scales);
- const float16_t d = uint16BitsToHalf(uint16_t(((scales.x & 0xF000) >> 12) | ((scales.x & 0xF0000000) >> 24) | ((scales.y & 0xF000) >> 4) | ((scales.y & 0xF0000000) >> 16)));
-
- const uint ib8 = (idx & 0xF8) >> 3;
- const uint ib16 = (idx & 0xF0) >> 4;
- const int i8 = int(idx % 8);
- const uint sc = bl.block.scales[ib8 / 8];
- const uint qs = bl.block.qs[ib8];
- const uint qh = bl.block.qh[ib16] >> (4 * (ib8 & 1));
- const float dl = 2 * bitfieldExtract(sc, 3 * int(ib16 & 3), 3) + 1;
- const float delta = ((qh & 8) != 0) ? -IQ1S_DELTA : IQ1S_DELTA;
- const uint grid = iq1s_grid[qs | ((qh & 7) << 8)];
-
- float16_t ret = d * float16_t(dl) * (float16_t(bitfieldExtract(int(grid), 2 * i8, 2)) + float16_t(delta));
- return ret;
-}
-#endif
-
-#if defined(DATA_A_IQ2_XXS)
-layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufIQ2_XXS {
- block_iq2_xxs block;
-};
-
-layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufIQ2_XXS_packed16 {
- block_iq2_xxs_packed16 block;
-};
-
-float16_t dequantFuncIQ2_XXS(const in decodeBufIQ2_XXS bl, const in uint blockCoords[2], const in uint coordInBlock[2])
-{
- decodeBufIQ2_XXS_packed16 bl16 = decodeBufIQ2_XXS_packed16(bl);
- const float16_t d = bl.block.d;
- const uint idx = coordInBlock[1];
-
- const uint ib32 = (idx & 0xE0) >> 5; // 0..7
- const uint ib8 = (idx & 0x18) >> 3; // 0..3
- const uint iqs = 8 * ib32 + ib8;
-
- const uint qs = bl.block.qs[iqs];
- const uint signscale = pack32(u16vec2(bl16.block.qs[4*ib32+2], bl16.block.qs[4*ib32+3]));
-
- const float dscale = float(bl.block.d) * 0.25 * (0.5 + float(signscale >> 28));
- uint sign = bitfieldExtract(signscale, 7 * int(ib8), 7);
- sign |= bitCount(sign) << 7;
-
- uint g2 = iq2xxs_grid[qs][(idx & 4) >> 2];
- g2 >>= (idx & 2) * 8;
- const vec2 g = vec2(unpack8(g2));
-
- vec2 ret = dscale * g * ((sign & (1 << (idx & 7))) != 0 ? -1.0hf : 1.0hf);
- return float16_t(ret[idx & 1]);
-}
-#endif
-
-#if defined(DATA_A_IQ2_XS)
-layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufIQ2_XS {
- block_iq2_xs block;
-};
-
-float16_t dequantFuncIQ2_XS(const in decodeBufIQ2_XS bl, const in uint blockCoords[2], const in uint coordInBlock[2])
-{
- const float16_t d = bl.block.d;
- const uint idx = coordInBlock[1];
-
- const uint is = (idx & 0xE0) >> 5; // 0..8
- const uint sshift = (idx & 0x10) >> 2; // 0,4
- const uint iqs = (idx & 0xF8) >> 3; // 0..63
-
- const uint16_t qs = bl.block.qs[iqs];
- const float dscale = float(bl.block.d) * 0.25 * (0.5 + float((bl.block.scales[is] >> sshift) & 0xF));
-
- uint sign = uint(qs >> 9);
- sign |= bitCount(sign) << 7;
- uint g2 = iq2xs_grid[qs & 0x1FF][(idx & 4) >> 2];
- g2 >>= (idx & 2) * 8;
- const vec2 g = vec2(unpack8(g2));
-
- vec2 ret = dscale * g * ((sign & (1 << (idx & 7))) != 0 ? -1.0hf : 1.0hf);
- return float16_t(ret[idx & 1]);
-}
-#endif
-
-#if defined(DATA_A_IQ2_S)
-layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufIQ2_S {
- block_iq2_s block;
-};
-
-float16_t dequantFuncIQ2_S(const in decodeBufIQ2_S bl, const in uint blockCoords[2], const in uint coordInBlock[2])
-{
- uint idx = coordInBlock[1];
-
- const uint ib32 = (idx & 0xE0) >> 5; // 0..7
- const uint ib8 = (idx & 0xF8) >> 3; // 0..31
- const uint qhshift = 2 * (ib8 % 4);
-
- const uint scale = (bl.block.scales[ib32] >> ((idx & 0x10) >> 2)) & 0xf;
- const uint qs = bl.block.qs[ib8];
- const uint qh = bl.block.qh[ib32];
- const uint sign = bl.block.qs[QUANT_K / 8 + ib8] >> (idx & 0x6);
-
- const float d = float(bl.block.d);
- const float db = d * 0.25 * (0.5 + scale);
- const ivec2 sign01 = 1 - (2 & ivec2(sign << 1, sign));
- uint g2 = iq2s_grid[qs | ((qh << (8 - qhshift)) & 0x300)][(idx & 4) >> 2];
- g2 >>= (idx & 2) * 8;
- const vec2 v = db * vec2(sign01) * vec2(unpack8(g2));
- return float16_t(v[idx & 1]);
-}
-#endif
-
-#if defined(DATA_A_IQ3_XXS)
-layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufIQ3_XXS {
- block_iq3_xxs block;
-};
-
-layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufIQ3_XXS_packed16 {
- block_iq3_xxs_packed16 block;
-};
-
-float16_t dequantFuncIQ3_XXS(const in decodeBufIQ3_XXS bl, const in uint blockCoords[2], const in uint coordInBlock[2])
-{
- decodeBufIQ3_XXS_packed16 bl16 = decodeBufIQ3_XXS_packed16(bl);
- uint idx = coordInBlock[1];
-
- const uint iqs = (idx & 0xFC) >> 2; // 0..63
- const uint is = QUANT_K / 4 + ((idx & 0xE0) >> 3);// 8 values
-
- const float d = float(bl.block.d);
- const uint qs = bl.block.qs[iqs];
- const uint signs = pack32(u16vec2(
- bl16.block.qs[is/2+0],
- bl16.block.qs[is/2+1]
- ));
- const float db = d * 0.5 * (0.5 + (signs >> 28));
- const uint32_t sign7 = bitfieldExtract(signs, 7 * (int(iqs / 2) % 4), 7);
- const uint sign = (sign7 | (bitCount(sign7) << 7)) >> (idx & 0x6);
- const ivec2 sign01 = ivec2(1 - (2 & ivec2(sign << 1, sign)));
- const uint grid = iq3xxs_grid[qs] >> (16 * ((idx & 2) >> 1));
- const vec2 v = db * vec2(sign01) * vec2(unpack8(grid).xy);
- return float16_t(v[idx & 1]);
-}
-#endif
-
-#if defined(DATA_A_IQ3_S)
-layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufIQ3_S {
- block_iq3_s block;
-};
-
-float16_t dequantFuncIQ3_S(const in decodeBufIQ3_S bl, const in uint blockCoords[2], const in uint coordInBlock[2])
-{
- uint idx = coordInBlock[1];
-
- const uint iqs = (idx & 0xFC) >> 2; // 0..63
- const uint iqh = (idx & 0xE0) >> 5;
-
- const float d = float(bl.block.d);
- const uint qs = bl.block.qs[iqs];
- const uint qh = bl.block.qh[iqh];
- const int8_t sign = int8_t(bl.block.signs[iqs / 2] >> (idx & 0x6));
- const uint scale = bl.block.scales[iqs / 16];
- const ivec2 sign01 = ivec2(1 - (2 & ivec2(sign << 1, sign)));
- const float db = d * (1 + 2 * ((scale >> (4 * (iqh & 1))) & 0xf));
- const uint32_t grid = iq3s_grid[qs | ((qh << (8 - (iqs % 8))) & 256)] >> ((idx & 2) << 3);
- const vec2 v = db * vec2(sign01) * vec2(unpack8(grid).xy);
-
- return float16_t(v[idx & 1]);
-}
-#endif
-
-#if defined(DATA_A_IQ4_XS)
-layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufIQ4_XS {
- block_iq4_xs block;
-};
-
-float16_t dequantFuncIQ4_XS(const in decodeBufIQ4_XS bl, const in uint blockCoords[2], const in uint coordInBlock[2])
-{
- const float16_t d = bl.block.d;
- const uint idx = coordInBlock[1];
-
- const uint ib32 = (idx & 0xE0) >> 5; // 0..7
-
- const uint sl = (bl.block.scales_l[ib32/2] >> (4 * (ib32 & 1))) & 0xF;
- const uint sh = ((bl.block.scales_h) >> (2 * ib32)) & 3;
- const uint qshift = (idx & 16) >> 2;
- const uint q = (bl.block.qs[16 * ib32 + (idx % 16)] >> qshift) & 0xF;
-
- float16_t ret = d * float16_t(int(sl | (sh << 4)) - 32) * float16_t(kvalues_iq4nl[q]);
- return ret;
-}
-#endif
-
-#if defined(DATA_A_IQ4_NL)
-layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufIQ4_NL {
- block_iq4_nl block;
-};
-
-float16_t dequantFuncIQ4_NL(const in decodeBufIQ4_NL bl, const in uint blockCoords[2], const in uint coordInBlock[2])
-{
- const float16_t d = bl.block.d;
- const uint idx = coordInBlock[1];
- const uint iqs = idx & 0xF;
- const uint shift = (idx & 0x10) >> 2;
- uint32_t qs = bl.block.qs[iqs];
- qs >>= shift;
- qs &= 0xF;
- float16_t ret = float16_t(kvalues_iq4nl[qs]) * d;
- return ret;
-}
-#endif
-
-#if defined(DATA_A_MXFP4)
-layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufMXFP4 {
- block_mxfp4 block;
-};
-
-float16_t dequantFuncMXFP4(const in decodeBufMXFP4 bl, const in uint blockCoords[2], const in uint coordInBlock[2])
-{
- const float d = e8m0_to_fp32(bl.block.e);
- const uint idx = coordInBlock[1];
- const uint iqs = idx & 0xF;
- const uint shift = (idx & 0x10) >> 2;
- uint32_t qs = bl.block.qs[iqs];
- qs >>= shift;
- qs &= 0xF;
- float16_t ret = float16_t(kvalues_mxfp4[qs] * d);
- return ret;
-}
-#endif
-
-#if defined(DATA_A_Q4_0)
-#define dequantFuncA dequantFuncQ4_0
-#elif defined(DATA_A_Q4_1)
-#define dequantFuncA dequantFuncQ4_1
-#elif defined(DATA_A_Q5_0)
-#define dequantFuncA dequantFuncQ5_0
-#elif defined(DATA_A_Q5_1)
-#define dequantFuncA dequantFuncQ5_1
-#elif defined(DATA_A_Q8_0)
-#define dequantFuncA dequantFuncQ8_0
-#elif defined(DATA_A_Q2_K)
-#define dequantFuncA dequantFuncQ2_K
-#elif defined(DATA_A_Q3_K)
-#define dequantFuncA dequantFuncQ3_K
-#elif defined(DATA_A_Q4_K)
-#define dequantFuncA dequantFuncQ4_K
-#define fetch_scales fetch_scalesQ4_K
-#define store_scales store_scalesQ4_K
-#elif defined(DATA_A_Q5_K)
-#define dequantFuncA dequantFuncQ5_K
-#define fetch_scales fetch_scalesQ5_K
-#define store_scales store_scalesQ4_K
-#elif defined(DATA_A_Q6_K)
-#define dequantFuncA dequantFuncQ6_K
-#elif defined(DATA_A_IQ1_S)
-#define dequantFuncA dequantFuncIQ1_S
-#elif defined(DATA_A_IQ1_M)
-#define dequantFuncA dequantFuncIQ1_M
-#elif defined(DATA_A_IQ2_XXS)
-#define dequantFuncA dequantFuncIQ2_XXS
-#elif defined(DATA_A_IQ2_XS)
-#define dequantFuncA dequantFuncIQ2_XS
-#elif defined(DATA_A_IQ2_S)
-#define dequantFuncA dequantFuncIQ2_S
-#elif defined(DATA_A_IQ3_XXS)
-#define dequantFuncA dequantFuncIQ3_XXS
-#elif defined(DATA_A_IQ3_S)
-#define dequantFuncA dequantFuncIQ3_S
-#elif defined(DATA_A_IQ4_XS)
-#define dequantFuncA dequantFuncIQ4_XS
-#elif defined(DATA_A_IQ4_NL)
-#define dequantFuncA dequantFuncIQ4_NL
-#elif defined(DATA_A_MXFP4)
-#define dequantFuncA dequantFuncMXFP4
-#endif
--- /dev/null
+
+#include "types.glsl"
+
+layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufQ4_0 {
+ block_q4_0_packed16 block;
+};
+
+float16_t dequantFuncQ4_0(const in decodeBufQ4_0 bl, const in uint blockCoords[2], const in uint coordInBlock[2])
+{
+ const float16_t d = bl.block.d;
+ const uint idx = coordInBlock[1];
+ const uint shift = (idx & 0x10) >> 2;
+ uint32_t qs = uint32_t(bl.block.qs[(idx & 0xE) >> 1]);
+ qs >>= shift;
+ qs &= 0x0F0F;
+ qs = unpack8(qs)[idx & 1];
+ float16_t ret = (float16_t(qs) - float16_t(8)) * d;
+ return ret;
+}
+
+layout(buffer_reference, std430, buffer_reference_align = 4) buffer decodeBufQ4_1 {
+ block_q4_1 block;
+};
+
+float16_t dequantFuncQ4_1(const in decodeBufQ4_1 bl, const in uint blockCoords[2], const in uint coordInBlock[2])
+{
+ const float16_t d = bl.block.d;
+ const float16_t m = bl.block.m;
+ const uint idx = coordInBlock[1];
+ const uint iqs = idx & 0xF;
+ const uint shift = (idx & 0x10) >> 2;
+ uint32_t qs = bl.block.qs[iqs];
+ qs >>= shift;
+ qs &= 0xF;
+ float16_t ret = float16_t(qs) * d + m;
+ return ret;
+}
+
+layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufQ5_0 {
+ block_q5_0 block;
+};
+
+float16_t dequantFuncQ5_0(const in decodeBufQ5_0 bl, const in uint blockCoords[2], const in uint coordInBlock[2])
+{
+ const float16_t d = bl.block.d;
+ const uint idx = coordInBlock[1];
+ const uint iqs = idx & 0xF;
+
+ const uint uint_qh = uint(bl.block.qh[1]) << 16 | bl.block.qh[0];
+ const uint qh = ((uint_qh >> idx) << 4) & 0x10;
+
+ const uint shift = (idx & 0x10) >> 2;
+ uint32_t qs = bl.block.qs[iqs];
+ qs >>= shift;
+ qs &= 0xF;
+
+ float16_t ret = (float16_t(qs | qh) - float16_t(16)) * d;
+ return ret;
+}
+
+layout(buffer_reference, std430, buffer_reference_align = 8) buffer decodeBufQ5_1 {
+ block_q5_1 block;
+};
+
+float16_t dequantFuncQ5_1(const in decodeBufQ5_1 bl, const in uint blockCoords[2], const in uint coordInBlock[2])
+{
+ const float16_t d = bl.block.d;
+ const float16_t m = bl.block.m;
+ const uint idx = coordInBlock[1];
+ const uint iqs = idx & 0xF;
+
+ const uint uint_qh = bl.block.qh;
+ const uint qh = ((uint_qh >> idx) << 4) & 0x10;
+
+ const uint shift = (idx & 0x10) >> 2;
+ uint32_t qs = bl.block.qs[iqs];
+ qs >>= shift;
+ qs &= 0xF;
+
+ float16_t ret = float16_t(qs | qh) * d + m;
+ return ret;
+}
+
+layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufQ8_0 {
+ block_q8_0_packed16 block;
+};
+
+float16_t dequantFuncQ8_0(const in decodeBufQ8_0 bl, const in uint blockCoords[2], const in uint coordInBlock[2])
+{
+ const float16_t d = bl.block.d;
+ const uint idx = coordInBlock[1];
+ const uint iqs = idx;
+
+ // Load 16b and select the byte for this element
+ int32_t qs = unpack8(bl.block.qs[(iqs & 0x1E) >> 1])[iqs & 1];
+ float16_t ret = float16_t(qs) * d;
+ return ret;
+}
+
+layout(buffer_reference, std430, buffer_reference_align = 4) buffer decodeBufQ2_K {
+ block_q2_K block;
+};
+
+layout(buffer_reference, std430, buffer_reference_align = 16) buffer decodeBufQ2_K_packed16 {
+ block_q2_K_packed16 block;
+};
+
+float16_t dequantFuncQ2_K(const in decodeBufQ2_K bl, const in uint blockCoords[2], const in uint coordInBlock[2])
+{
+ decodeBufQ2_K_packed16 bl16 = decodeBufQ2_K_packed16(bl);
+ const f16vec2 d = bl.block.d;
+ const uint idx = coordInBlock[1];
+
+ const uint scalesi = (idx & 0xF0) >> 4; // 0..15
+ const uint qsshift = (idx & 0x60) >> 4; // 0,2,4,6
+
+ uint qs = uint32_t(bl16.block.qs[((idx & 0x80) >> 3) + ((idx & 0x1E) >> 1)]);
+ qs = (qs >> qsshift) & 0x0303;
+ qs = unpack8(qs)[idx & 1];
+
+ const uint scales = bl.block.scales[scalesi];
+ float16_t ret = d.x * float16_t(scales & 0xF) * float16_t(qs) - d.y * float16_t(scales >> 4);
+ return ret;
+}
+
+layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufQ3_K {
+ block_q3_K block;
+};
+
+float16_t dequantFuncQ3_K(const in decodeBufQ3_K bl, const in uint blockCoords[2], const in uint coordInBlock[2])
+{
+ const uint idx = coordInBlock[1];
+ const uint iqs = idx;
+
+ const uint n = iqs / 128; // 0,1
+ const uint qsi = n * 32 + (iqs % 32); // 0..63
+ const uint hmi = (iqs % 32); // 0..31
+ const uint j = (iqs % 128) / 8; // 0..15
+ const uint is = iqs / 16; // 0..15
+ const uint halfsplit = ((iqs % 128) / 32); // 0,1,2,3
+ const uint qsshift = halfsplit * 2; // 0,2,4,6
+ const uint m = 1 << (4 * n + halfsplit); // 1,2,4,8,16,32,64,128
+
+ uint32_t scaleidx0 = (is < 8) ? is : (is-8);
+ uint32_t scaleidx0shift = (is < 8) ? 0 : 4;
+ uint32_t scaleidx1 = is + 8 - (is/4)*4;
+ uint32_t scaleidx1shift = (is/4)*2;
+
+ const int8_t us = int8_t(((bl.block.scales[scaleidx0] >> scaleidx0shift) & 0xF) | (((bl.block.scales[scaleidx1] >> scaleidx1shift) & 3) << 4));
+
+ const float16_t dl = bl.block.d * float16_t(us - 32);
+
+ float16_t ret = dl * float16_t(int8_t((bl.block.qs[qsi ] >> qsshift) & 3) - (((bl.block.hmask[hmi ] & m) != 0) ? 0 : 4));
+
+ return ret;
+}
+
+layout(buffer_reference, std430, buffer_reference_align = 16) buffer decodeBufQ4_K {
+ block_q4_K block;
+};
+
+layout(buffer_reference, std430, buffer_reference_align = 16) buffer decodeBufQ4_K_packed16 {
+ block_q4_K_packed16 block;
+};
+
+layout(buffer_reference, std430, buffer_reference_align = 16) buffer decodeBufQ4_K_packed128 {
+ block_q4_K_packed128 block;
+};
+
+#if defined(IS_MUL_MM2)
+
+// For Q4_K and Q5_K in the mat-mul shader, we decode a tile's worth of scales
+// into shared memory and then process the whole tile using those scales.
+// There is a fetch function that loads into private variables and then a store
+// function that stores into shared memory.
+// Q4_K and Q5_K have the same encoding of scales, so everything is shared except
+// the part that fetches from the structure (which has a different block layout).
+#if defined(DATA_A_Q4_K) || defined(DATA_A_Q5_K)
+const uint shAscales_stride = (BM + 2);
+// 1 scale per 32 elements -> 8 scales per block, per row
+shared vec2 shAscales[8 * shAscales_stride];
+uvec4 row_v;
+#endif
+
+#if defined(DATA_A_Q4_K)
+layout (binding = 0) readonly buffer A_Q4_K_128 {block_q4_K_packed128 data_a_q4_k_packed128[];};
+
+void fetch_scalesQ4_K(uint ir_BM, uint pos_a, uint stride_a, uint block_k, uint tid, bool in_bounds)
+{
+ uint tids_per_row = BLOCK_SIZE / BM;
+ uint is_per_tid = 8 / tids_per_row;
+ uint is_start = is_per_tid * (tid % tids_per_row);
+ uint tid_row = tid / tids_per_row;
+
+ uint row = ir_BM + tid_row;
+ uint block_index = pos_a + row * stride_a + (block_k / QUANT_K);
+ if (in_bounds || row < p.M) {
+ row_v = data_a_q4_k_packed128[block_index].q4k[0];
+ }
+}
+#endif
+#if defined(DATA_A_Q5_K)
+layout (binding = 0) readonly buffer A_Q5_K_128 {block_q5_K_packed128 data_a_q5_k_packed128[];};
+
+void fetch_scalesQ5_K(uint ir_BM, uint pos_a, uint stride_a, uint block_k, uint tid, bool in_bounds)
+{
+ uint tids_per_row = BLOCK_SIZE / BM;
+ uint is_per_tid = 8 / tids_per_row;
+ uint is_start = is_per_tid * (tid % tids_per_row);
+ uint tid_row = tid / tids_per_row;
+
+ uint row = ir_BM + tid_row;
+ uint block_index = pos_a + row * stride_a + (block_k / QUANT_K);
+ if (in_bounds || row < p.M) {
+ row_v = data_a_q5_k_packed128[block_index].q5k[0];
+ }
+}
+#endif
+
+#if defined(DATA_A_Q4_K) || defined(DATA_A_Q5_K)
+void store_scalesQ4_K(uint tid)
+{
+ barrier();
+
+ uint tids_per_row = BLOCK_SIZE / BM;
+ uint is_per_tid = 8 / tids_per_row;
+ uint is_start = is_per_tid * (tid % tids_per_row);
+ uint tid_row = tid / tids_per_row;
+
+ [[unroll]] for (uint idx = 0; idx < is_per_tid; ++idx) {
+ uint is = idx + is_start;
+ uvec4 v = row_v;
+ const vec2 loadd = vec2(unpackFloat2x16(v.x));
+
+ uint32_t sc;
+ uint32_t mbyte;
+
+ uint32_t scale0 = v.y;
+ uint32_t scale4 = v.z;
+ uint32_t scale8 = v.w;
+
+ uint32_t sc_lo = scale0;
+ uint32_t mb_lo = scale4;
+ uint32_t sc_hi = (scale8 & 0x0F0F0F0F) | ((scale0 & 0xC0C0C0C0) >> 2);
+ uint32_t mb_hi = ((scale8 & 0xF0F0F0F0) >> 4) | ((scale4 & 0xC0C0C0C0) >> 2);
+
+ sc = is < 4 ? sc_lo : sc_hi;
+ mbyte = is < 4 ? mb_lo : mb_hi;
+ sc = sc >> (8 * (is & 3));
+ mbyte = mbyte >> (8 * (is & 3));
+ sc &= 0x3F;
+ mbyte &= 0x3F;
+
+ const float d = loadd.x * float(sc);
+ const float m = loadd.y * float(mbyte);
+ shAscales[is * shAscales_stride + tid_row] = vec2(d,m);
+ }
+
+ barrier();
+}
+#endif
+
+#endif
+
+float16_t dequantFuncQ4_K(const in decodeBufQ4_K bl, const in uint blockCoords[2], const in uint coordInBlock[2])
+{
+ decodeBufQ4_K_packed16 bl16 = decodeBufQ4_K_packed16(bl);
+ decodeBufQ4_K_packed128 bl128 = decodeBufQ4_K_packed128(bl);
+ const uint idx = coordInBlock[1];
+
+ const uint b = (idx & 0x20) >> 5; // 0,1
+ const uint is = (idx & 0xE0) >> 5; // 0..7
+
+#if defined(IS_MUL_MM2) && defined(DATA_A_Q4_K)
+ vec2 v = shAscales[is * shAscales_stride + (blockCoords[0] % BM)];
+ float d = v.x;
+ float m = v.y;
+#else
+ uvec4 v = bl128.block.q4k[0];
+ const vec2 loadd = vec2(unpackFloat2x16(v.x));
+
+ uint32_t sc;
+ uint32_t mbyte;
+
+ uint32_t scale0 = v.y;
+ uint32_t scale4 = v.z;
+ uint32_t scale8 = v.w;
+
+ uint32_t sc_lo = scale0;
+ uint32_t mb_lo = scale4;
+ uint32_t sc_hi = (scale8 & 0x0F0F0F0F) | ((scale0 & 0xC0C0C0C0) >> 2);
+ uint32_t mb_hi = ((scale8 & 0xF0F0F0F0) >> 4) | ((scale4 & 0xC0C0C0C0) >> 2);
+
+ sc = is < 4 ? sc_lo : sc_hi;
+ mbyte = is < 4 ? mb_lo : mb_hi;
+ sc = sc >> (8 * (is & 3));
+ mbyte = mbyte >> (8 * (is & 3));
+ sc &= 0x3F;
+ mbyte &= 0x3F;
+
+ const float d = loadd.x * float(sc);
+ const float m = loadd.y * float(mbyte);
+#endif
+
+ uint qs = uint32_t(bl16.block.qs[((idx & 0xC0) >> 2) + ((idx & 0x1E) >> 1)]);
+ qs = (qs >> (b * 4 + 8 * (idx & 1))) & 0xF;
+
+ float ret = d * float(qs) - m;
+
+ return float16_t(ret);
+}
+
+layout(buffer_reference, std430, buffer_reference_align = 16) buffer decodeBufQ5_K {
+ block_q5_K block;
+};
+
+layout(buffer_reference, std430, buffer_reference_align = 16) buffer decodeBufQ5_K_packed16 {
+ block_q5_K_packed16 block;
+};
+
+layout(buffer_reference, std430, buffer_reference_align = 16) buffer decodeBufQ5_K_packed128 {
+ block_q5_K_packed128 block;
+};
+
+float16_t dequantFuncQ5_K(const in decodeBufQ5_K bl, const in uint blockCoords[2], const in uint coordInBlock[2])
+{
+ decodeBufQ5_K_packed16 bl16 = decodeBufQ5_K_packed16(bl);
+ decodeBufQ5_K_packed128 bl128 = decodeBufQ5_K_packed128(bl);
+ const uint idx = coordInBlock[1];
+
+ const uint b = (idx & 0x20) >> 5; // 0,1
+ const uint is = (idx & 0xE0) >> 5; // 0..7
+
+#if defined(IS_MUL_MM2) && defined(DATA_A_Q5_K)
+ vec2 v = shAscales[is * shAscales_stride + (blockCoords[0] % BM)];
+ float d = v.x;
+ float m = v.y;
+#else
+ uvec4 v = bl128.block.q5k[0];
+
+ const f16vec2 loadd = unpackFloat2x16(v.x);
+
+ uint32_t sc;
+ uint32_t mbyte;
+
+ uint32_t scale0 = v.y;
+ uint32_t scale4 = v.z;
+ uint32_t scale8 = v.w;
+
+ uint32_t sc_lo = scale0;
+ uint32_t mb_lo = scale4;
+ uint32_t sc_hi = (scale8 & 0x0F0F0F0F) | ((scale0 & 0xC0C0C0C0) >> 2);
+ uint32_t mb_hi = ((scale8 & 0xF0F0F0F0) >> 4) | ((scale4 & 0xC0C0C0C0) >> 2);
+
+ sc = is < 4 ? sc_lo : sc_hi;
+ mbyte = is < 4 ? mb_lo : mb_hi;
+ sc = sc >> (8 * (is & 3));
+ mbyte = mbyte >> (8 * (is & 3));
+ sc &= 0x3F;
+ mbyte &= 0x3F;
+
+ const float16_t d = loadd.x * float16_t(sc);
+ const float16_t m = loadd.y * float16_t(mbyte);
+#endif
+
+ uint qh = uint32_t(bl16.block.qh[(idx & 0x1E) >> 1]);
+ qh = ((qh >> is) & 0x101) << 4;
+
+ uint qs = uint32_t(bl16.block.qs[((idx & 0xC0) >> 2) + ((idx & 0x1E) >> 1)]);
+ qs = (qs >> (b * 4)) & 0x0F0F;
+ qs = unpack8(qs | qh)[idx & 1];
+
+ float ret = d * float(qs) - m;
+
+ return float16_t(ret);
+}
+
+layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufQ6_K {
+ block_q6_K block;
+};
+
+layout(buffer_reference, std430, buffer_reference_align = 16) buffer decodeBufQ6_K_packed16 {
+ block_q6_K_packed16 block;
+};
+
+float16_t dequantFuncQ6_K(const in decodeBufQ6_K bl, const in uint blockCoords[2], const in uint coordInBlock[2])
+{
+ decodeBufQ6_K_packed16 bl16 = decodeBufQ6_K_packed16(bl);
+ const uint idx = coordInBlock[1];
+
+ const uint b = (idx & 0x40) >> 6; // 0,1
+ const uint qhshift = (idx & 0x60) >> 4; // 0,2,4,6
+ const uint is = (idx & 0xF0) >> 4; // 0..15
+
+ const float16_t dscale = bl.block.d * float16_t(bl.block.scales[is]);
+
+ uint ql = uint32_t(bl16.block.ql[((idx & 0x80) >> 2) + ((idx & 0x3E) >> 1)]);
+ ql = (ql >> (b * 4)) & 0x0F0F;
+
+ uint qh = uint32_t(bl16.block.qh[((idx & 0x80) >> 3) + ((idx & 0x1E) >> 1)]);
+ qh = ((qh >> qhshift) & 0x0303) << 4;
+
+ int q = unpack8(ql | qh)[idx & 1];
+
+ float16_t ret = dscale * float16_t(q - 32);
+
+ return ret;
+}
+
+#if defined(DATA_A_IQ1_S)
+layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufIQ1_S {
+ block_iq1_s block;
+};
+
+float16_t dequantFuncIQ1_S(const in decodeBufIQ1_S bl, const in uint blockCoords[2], const in uint coordInBlock[2])
+{
+ const float16_t d = bl.block.d;
+ const uint idx = coordInBlock[1];
+
+ const uint ib32 = (idx & 0xE0) >> 5;
+ const uint ib8 = (idx & 0xF8) >> 3;
+
+ const uint qh = bl.block.qh[ib32];
+ const uint qs = bl.block.qs[ib8];
+ const float dl = d * float(2 * bitfieldExtract(qh, 12, 3) + 1);
+ const float delta = ((qh & 0x8000) != 0) ? -IQ1S_DELTA : IQ1S_DELTA;
+ const uint grid = iq1s_grid[qs | (bitfieldExtract(qh, 3 * int(ib8 & 3), 3) << 8)];
+
+ float16_t ret = float16_t(dl) * (float16_t(bitfieldExtract(int(grid), 2 * int(idx % 8), 2)) + float16_t(delta));
+ return ret;
+}
+#endif
+
+#if defined(DATA_A_IQ1_M)
+layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufIQ1_M {
+ block_iq1_m block;
+};
+
+layout(buffer_reference, std430, buffer_reference_align = 8) buffer decodeBufIQ1_M_packed64 {
+ block_iq1_m_packed64 block;
+};
+
+float16_t dequantFuncIQ1_M(const in decodeBufIQ1_M bl, const in uint blockCoords[2], const in uint coordInBlock[2])
+{
+ decodeBufIQ1_M_packed64 bl64 = decodeBufIQ1_M_packed64(bl);
+ const uint idx = coordInBlock[1];
+
+ uvec2 scales = unpack32(bl64.block.scales);
+ const float16_t d = uint16BitsToHalf(uint16_t(((scales.x & 0xF000) >> 12) | ((scales.x & 0xF0000000) >> 24) | ((scales.y & 0xF000) >> 4) | ((scales.y & 0xF0000000) >> 16)));
+
+ const uint ib8 = (idx & 0xF8) >> 3;
+ const uint ib16 = (idx & 0xF0) >> 4;
+ const int i8 = int(idx % 8);
+ const uint sc = bl.block.scales[ib8 / 8];
+ const uint qs = bl.block.qs[ib8];
+ const uint qh = bl.block.qh[ib16] >> (4 * (ib8 & 1));
+ const float dl = 2 * bitfieldExtract(sc, 3 * int(ib16 & 3), 3) + 1;
+ const float delta = ((qh & 8) != 0) ? -IQ1S_DELTA : IQ1S_DELTA;
+ const uint grid = iq1s_grid[qs | ((qh & 7) << 8)];
+
+ float16_t ret = d * float16_t(dl) * (float16_t(bitfieldExtract(int(grid), 2 * i8, 2)) + float16_t(delta));
+ return ret;
+}
+#endif
+
+#if defined(DATA_A_IQ2_XXS)
+layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufIQ2_XXS {
+ block_iq2_xxs block;
+};
+
+layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufIQ2_XXS_packed16 {
+ block_iq2_xxs_packed16 block;
+};
+
+float16_t dequantFuncIQ2_XXS(const in decodeBufIQ2_XXS bl, const in uint blockCoords[2], const in uint coordInBlock[2])
+{
+ decodeBufIQ2_XXS_packed16 bl16 = decodeBufIQ2_XXS_packed16(bl);
+ const float16_t d = bl.block.d;
+ const uint idx = coordInBlock[1];
+
+ const uint ib32 = (idx & 0xE0) >> 5; // 0..7
+ const uint ib8 = (idx & 0x18) >> 3; // 0..3
+ const uint iqs = 8 * ib32 + ib8;
+
+ const uint qs = bl.block.qs[iqs];
+ const uint signscale = pack32(u16vec2(bl16.block.qs[4*ib32+2], bl16.block.qs[4*ib32+3]));
+
+ const float dscale = float(bl.block.d) * 0.25 * (0.5 + float(signscale >> 28));
+ uint sign = bitfieldExtract(signscale, 7 * int(ib8), 7);
+ sign |= bitCount(sign) << 7;
+
+ uint g2 = iq2xxs_grid[qs][(idx & 4) >> 2];
+ g2 >>= (idx & 2) * 8;
+ const vec2 g = vec2(unpack8(g2));
+
+ vec2 ret = dscale * g * ((sign & (1 << (idx & 7))) != 0 ? -1.0hf : 1.0hf);
+ return float16_t(ret[idx & 1]);
+}
+#endif
+
+#if defined(DATA_A_IQ2_XS)
+layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufIQ2_XS {
+ block_iq2_xs block;
+};
+
+float16_t dequantFuncIQ2_XS(const in decodeBufIQ2_XS bl, const in uint blockCoords[2], const in uint coordInBlock[2])
+{
+ const float16_t d = bl.block.d;
+ const uint idx = coordInBlock[1];
+
+ const uint is = (idx & 0xE0) >> 5; // 0..8
+ const uint sshift = (idx & 0x10) >> 2; // 0,4
+ const uint iqs = (idx & 0xF8) >> 3; // 0..63
+
+ const uint16_t qs = bl.block.qs[iqs];
+ const float dscale = float(bl.block.d) * 0.25 * (0.5 + float((bl.block.scales[is] >> sshift) & 0xF));
+
+ uint sign = uint(qs >> 9);
+ sign |= bitCount(sign) << 7;
+ uint g2 = iq2xs_grid[qs & 0x1FF][(idx & 4) >> 2];
+ g2 >>= (idx & 2) * 8;
+ const vec2 g = vec2(unpack8(g2));
+
+ vec2 ret = dscale * g * ((sign & (1 << (idx & 7))) != 0 ? -1.0hf : 1.0hf);
+ return float16_t(ret[idx & 1]);
+}
+#endif
+
+#if defined(DATA_A_IQ2_S)
+layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufIQ2_S {
+ block_iq2_s block;
+};
+
+float16_t dequantFuncIQ2_S(const in decodeBufIQ2_S bl, const in uint blockCoords[2], const in uint coordInBlock[2])
+{
+ uint idx = coordInBlock[1];
+
+ const uint ib32 = (idx & 0xE0) >> 5; // 0..7
+ const uint ib8 = (idx & 0xF8) >> 3; // 0..31
+ const uint qhshift = 2 * (ib8 % 4);
+
+ const uint scale = (bl.block.scales[ib32] >> ((idx & 0x10) >> 2)) & 0xf;
+ const uint qs = bl.block.qs[ib8];
+ const uint qh = bl.block.qh[ib32];
+ const uint sign = bl.block.qs[QUANT_K / 8 + ib8] >> (idx & 0x6);
+
+ const float d = float(bl.block.d);
+ const float db = d * 0.25 * (0.5 + scale);
+ const ivec2 sign01 = 1 - (2 & ivec2(sign << 1, sign));
+ uint g2 = iq2s_grid[qs | ((qh << (8 - qhshift)) & 0x300)][(idx & 4) >> 2];
+ g2 >>= (idx & 2) * 8;
+ const vec2 v = db * vec2(sign01) * vec2(unpack8(g2));
+ return float16_t(v[idx & 1]);
+}
+#endif
+
+#if defined(DATA_A_IQ3_XXS)
+layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufIQ3_XXS {
+ block_iq3_xxs block;
+};
+
+layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufIQ3_XXS_packed16 {
+ block_iq3_xxs_packed16 block;
+};
+
+float16_t dequantFuncIQ3_XXS(const in decodeBufIQ3_XXS bl, const in uint blockCoords[2], const in uint coordInBlock[2])
+{
+ decodeBufIQ3_XXS_packed16 bl16 = decodeBufIQ3_XXS_packed16(bl);
+ uint idx = coordInBlock[1];
+
+ const uint iqs = (idx & 0xFC) >> 2; // 0..63
+ const uint is = QUANT_K / 4 + ((idx & 0xE0) >> 3);// 8 values
+
+ const float d = float(bl.block.d);
+ const uint qs = bl.block.qs[iqs];
+ const uint signs = pack32(u16vec2(
+ bl16.block.qs[is/2+0],
+ bl16.block.qs[is/2+1]
+ ));
+ const float db = d * 0.5 * (0.5 + (signs >> 28));
+ const uint32_t sign7 = bitfieldExtract(signs, 7 * (int(iqs / 2) % 4), 7);
+ const uint sign = (sign7 | (bitCount(sign7) << 7)) >> (idx & 0x6);
+ const ivec2 sign01 = ivec2(1 - (2 & ivec2(sign << 1, sign)));
+ const uint grid = iq3xxs_grid[qs] >> (16 * ((idx & 2) >> 1));
+ const vec2 v = db * vec2(sign01) * vec2(unpack8(grid).xy);
+ return float16_t(v[idx & 1]);
+}
+#endif
+
+#if defined(DATA_A_IQ3_S)
+layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufIQ3_S {
+ block_iq3_s block;
+};
+
+float16_t dequantFuncIQ3_S(const in decodeBufIQ3_S bl, const in uint blockCoords[2], const in uint coordInBlock[2])
+{
+ uint idx = coordInBlock[1];
+
+ const uint iqs = (idx & 0xFC) >> 2; // 0..63
+ const uint iqh = (idx & 0xE0) >> 5;
+
+ const float d = float(bl.block.d);
+ const uint qs = bl.block.qs[iqs];
+ const uint qh = bl.block.qh[iqh];
+ const int8_t sign = int8_t(bl.block.signs[iqs / 2] >> (idx & 0x6));
+ const uint scale = bl.block.scales[iqs / 16];
+ const ivec2 sign01 = ivec2(1 - (2 & ivec2(sign << 1, sign)));
+ const float db = d * (1 + 2 * ((scale >> (4 * (iqh & 1))) & 0xf));
+ const uint32_t grid = iq3s_grid[qs | ((qh << (8 - (iqs % 8))) & 256)] >> ((idx & 2) << 3);
+ const vec2 v = db * vec2(sign01) * vec2(unpack8(grid).xy);
+
+ return float16_t(v[idx & 1]);
+}
+#endif
+
+#if defined(DATA_A_IQ4_XS)
+layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufIQ4_XS {
+ block_iq4_xs block;
+};
+
+float16_t dequantFuncIQ4_XS(const in decodeBufIQ4_XS bl, const in uint blockCoords[2], const in uint coordInBlock[2])
+{
+ const float16_t d = bl.block.d;
+ const uint idx = coordInBlock[1];
+
+ const uint ib32 = (idx & 0xE0) >> 5; // 0..7
+
+ const uint sl = (bl.block.scales_l[ib32/2] >> (4 * (ib32 & 1))) & 0xF;
+ const uint sh = ((bl.block.scales_h) >> (2 * ib32)) & 3;
+ const uint qshift = (idx & 16) >> 2;
+ const uint q = (bl.block.qs[16 * ib32 + (idx % 16)] >> qshift) & 0xF;
+
+ float16_t ret = d * float16_t(int(sl | (sh << 4)) - 32) * float16_t(kvalues_iq4nl[q]);
+ return ret;
+}
+#endif
+
+#if defined(DATA_A_IQ4_NL)
+layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufIQ4_NL {
+ block_iq4_nl block;
+};
+
+float16_t dequantFuncIQ4_NL(const in decodeBufIQ4_NL bl, const in uint blockCoords[2], const in uint coordInBlock[2])
+{
+ const float16_t d = bl.block.d;
+ const uint idx = coordInBlock[1];
+ const uint iqs = idx & 0xF;
+ const uint shift = (idx & 0x10) >> 2;
+ uint32_t qs = bl.block.qs[iqs];
+ qs >>= shift;
+ qs &= 0xF;
+ float16_t ret = float16_t(kvalues_iq4nl[qs]) * d;
+ return ret;
+}
+#endif
+
+#if defined(DATA_A_MXFP4)
+layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufMXFP4 {
+ block_mxfp4 block;
+};
+
+float16_t dequantFuncMXFP4(const in decodeBufMXFP4 bl, const in uint blockCoords[2], const in uint coordInBlock[2])
+{
+ const float d = e8m0_to_fp32(bl.block.e);
+ const uint idx = coordInBlock[1];
+ const uint iqs = idx & 0xF;
+ const uint shift = (idx & 0x10) >> 2;
+ uint32_t qs = bl.block.qs[iqs];
+ qs >>= shift;
+ qs &= 0xF;
+ float16_t ret = float16_t(kvalues_mxfp4[qs] * d);
+ return ret;
+}
+#endif
+
+#if defined(DATA_A_Q4_0)
+#define dequantFuncA dequantFuncQ4_0
+#elif defined(DATA_A_Q4_1)
+#define dequantFuncA dequantFuncQ4_1
+#elif defined(DATA_A_Q5_0)
+#define dequantFuncA dequantFuncQ5_0
+#elif defined(DATA_A_Q5_1)
+#define dequantFuncA dequantFuncQ5_1
+#elif defined(DATA_A_Q8_0)
+#define dequantFuncA dequantFuncQ8_0
+#elif defined(DATA_A_Q2_K)
+#define dequantFuncA dequantFuncQ2_K
+#elif defined(DATA_A_Q3_K)
+#define dequantFuncA dequantFuncQ3_K
+#elif defined(DATA_A_Q4_K)
+#define dequantFuncA dequantFuncQ4_K
+#define fetch_scales fetch_scalesQ4_K
+#define store_scales store_scalesQ4_K
+#elif defined(DATA_A_Q5_K)
+#define dequantFuncA dequantFuncQ5_K
+#define fetch_scales fetch_scalesQ5_K
+#define store_scales store_scalesQ4_K
+#elif defined(DATA_A_Q6_K)
+#define dequantFuncA dequantFuncQ6_K
+#elif defined(DATA_A_IQ1_S)
+#define dequantFuncA dequantFuncIQ1_S
+#elif defined(DATA_A_IQ1_M)
+#define dequantFuncA dequantFuncIQ1_M
+#elif defined(DATA_A_IQ2_XXS)
+#define dequantFuncA dequantFuncIQ2_XXS
+#elif defined(DATA_A_IQ2_XS)
+#define dequantFuncA dequantFuncIQ2_XS
+#elif defined(DATA_A_IQ2_S)
+#define dequantFuncA dequantFuncIQ2_S
+#elif defined(DATA_A_IQ3_XXS)
+#define dequantFuncA dequantFuncIQ3_XXS
+#elif defined(DATA_A_IQ3_S)
+#define dequantFuncA dequantFuncIQ3_S
+#elif defined(DATA_A_IQ4_XS)
+#define dequantFuncA dequantFuncIQ4_XS
+#elif defined(DATA_A_IQ4_NL)
+#define dequantFuncA dequantFuncIQ4_NL
+#elif defined(DATA_A_MXFP4)
+#define dequantFuncA dequantFuncMXFP4
+#endif
+++ /dev/null
-#extension GL_EXT_control_flow_attributes : require
-#extension GL_EXT_shader_16bit_storage : require
-
-layout (push_constant) uniform parameter
-{
- uint M;
- uint K;
- uint stride_a;
- uint stride_b;
- uint nel;
-} p;
-
-#include "types.comp"
--- /dev/null
+#extension GL_EXT_control_flow_attributes : require
+#extension GL_EXT_shader_16bit_storage : require
+
+layout (push_constant) uniform parameter
+{
+ uint M;
+ uint K;
+ uint stride_a;
+ uint stride_b;
+ uint nel;
+} p;
+
+#include "types.glsl"
#extension GL_EXT_shader_explicit_arithmetic_types_int16 : require
-#include "dequant_head.comp"
+#include "dequant_head.glsl"
layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in;
#version 450
-#include "dequant_head.comp"
+#include "dequant_head.glsl"
layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in;
#version 450
-#include "dequant_head.comp"
+#include "dequant_head.glsl"
layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in;
#version 450
-#include "dequant_head.comp"
+#include "dequant_head.glsl"
layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in;
#version 450
-#include "dequant_head.comp"
+#include "dequant_head.glsl"
layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in;
#version 450
-#include "dequant_head.comp"
+#include "dequant_head.glsl"
layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in;
#version 450
-#include "dequant_head.comp"
+#include "dequant_head.glsl"
layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in;
#version 450
-#include "dequant_head.comp"
+#include "dequant_head.glsl"
layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in;
#version 450
-#include "dequant_head.comp"
+#include "dequant_head.glsl"
layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in;
#version 450
-#include "dequant_head.comp"
+#include "dequant_head.glsl"
layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in;
#version 450
-#include "dequant_head.comp"
+#include "dequant_head.glsl"
layout(local_size_x = 64, local_size_y = 1, local_size_z = 1) in;
#version 450
-#include "dequant_head.comp"
+#include "dequant_head.glsl"
layout(local_size_x = 64, local_size_y = 1, local_size_z = 1) in;
#version 450
-#include "dequant_head.comp"
+#include "dequant_head.glsl"
layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in;
#version 450
-#include "dequant_head.comp"
+#include "dequant_head.glsl"
layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in;
#version 450
-#include "dequant_head.comp"
+#include "dequant_head.glsl"
layout(local_size_x = 32, local_size_y = 1, local_size_z = 1) in;
#version 450
-#include "dequant_head.comp"
+#include "dequant_head.glsl"
layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in;
#version 450
-#include "dequant_head.comp"
+#include "dequant_head.glsl"
layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in;
#version 450
-#include "dequant_head.comp"
+#include "dequant_head.glsl"
layout(local_size_x = 64, local_size_y = 1, local_size_z = 1) in;
#version 450
-#include "dequant_head.comp"
+#include "dequant_head.glsl"
layout(local_size_x = 64, local_size_y = 1, local_size_z = 1) in;
#version 450
-#include "dequant_head.comp"
+#include "dequant_head.glsl"
layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in;
uint n_past;
} p;
-#include "types.comp"
+#include "types.glsl"
layout(local_size_x = 1, local_size_y = 512, local_size_z = 1) in;
#version 450
-#include "types.comp"
-#include "generic_binary_head.comp"
+#include "types.glsl"
+#include "generic_binary_head.glsl"
const uint num_threads = 256;
#version 450
-#include "rte.comp"
-#include "generic_head.comp"
-#include "types.comp"
+#include "rte.glsl"
+#include "generic_head.glsl"
+#include "types.glsl"
#extension GL_EXT_control_flow_attributes : enable
--- /dev/null
+#version 460
+
+#extension GL_EXT_bfloat16 : require
+
+void main()
+{
+}
--- /dev/null
+#version 460
+
+#extension GL_KHR_cooperative_matrix : require
+
+void main()
+{
+}
--- /dev/null
+#version 460
+
+#extension GL_NV_cooperative_matrix2 : require
+
+void main()
+{
+}
--- /dev/null
+#version 460
+
+#extension GL_EXT_integer_dot_product : require
+
+void main()
+{
+}
#extension GL_KHR_shader_subgroup_shuffle : enable
-#include "types.comp"
-#include "flash_attn_base.comp"
+#include "types.glsl"
+#include "flash_attn_base.glsl"
const uint32_t HSK_per_thread = HSK / D_split;
const uint32_t HSV_per_thread = HSV / D_split;
+++ /dev/null
-
-layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
-
-layout (constant_id = 0) const uint32_t WorkGroupSize = 128;
-layout (constant_id = 1) const uint32_t Br = 1;
-layout (constant_id = 2) const uint32_t Bc = 32;
-layout (constant_id = 3) const uint32_t HSK = 32;
-layout (constant_id = 4) const uint32_t HSV = 32;
-layout (constant_id = 5) const uint32_t Clamp = 0;
-layout (constant_id = 6) const uint32_t D_split = 16;
-
-// Round up head sizes to a multiple of 16, for coopmat1/coopmat2 paths
-const uint32_t HSK_pad = (HSK + 15) & ~15;
-const uint32_t HSV_pad = (HSV + 15) & ~15;
-
-const bool KV_bounds_check = Clamp != 0;
-
-layout (push_constant) uniform parameter {
- uint32_t N;
- uint32_t KV;
-
- uint32_t ne1;
- uint32_t ne2;
- uint32_t ne3;
-
- uint32_t neq2;
- uint32_t neq3;
- uint32_t nek2;
- uint32_t nek3;
- uint32_t nev2;
- uint32_t nev3;
- uint32_t nem1;
- uint32_t nem2;
- uint32_t nem3;
-
- uint32_t nb01;
- uint32_t nb02;
- uint32_t nb03;
- uint32_t nb11;
- uint32_t nb12;
- uint32_t nb13;
- uint32_t nb21;
- uint32_t nb22;
- uint32_t nb23;
-
- float scale;
- float max_bias;
- float logit_softcap;
-
- uint32_t mask_n_head_log2;
- float m0;
- float m1;
-
- uint32_t gqa_ratio;
- uint32_t split_kv;
- uint32_t k_num;
-} p;
-
-#define SINK_ENABLE_BIT (1<<24)
-#define MASK_ENABLE_BIT (1<<16)
-#define N_LOG2_MASK 0xFFFF
-
-layout (binding = 4) readonly buffer S {float data_s[];};
-
-layout (binding = 5) writeonly buffer O {D_TYPE data_o[];};
-
-#if defined(A_TYPE_PACKED16)
-#define BINDING_IDX_K 0
-#define BINDING_IDX_V 1
-layout (binding = 1) readonly buffer K_PACKED16 {A_TYPE_PACKED16 k_data_packed16[];} k_packed;
-layout (binding = 2) readonly buffer V_PACKED16 {A_TYPE_PACKED16 v_data_packed16[];} v_packed;
-#endif
-
-#if defined(DATA_A_Q4_0)
-#define BLOCK_BYTE_SIZE 18
-
-vec4 dequantize4(uint ib, uint iqs, uint a_offset, uint binding_idx) {
- if (binding_idx == BINDING_IDX_K) {
- uint vui_lo = uint(k_packed.k_data_packed16[a_offset + ib].qs[(iqs & 0xF) / 2 + 0]);
- uint vui_hi = uint(k_packed.k_data_packed16[a_offset + ib].qs[(iqs & 0xF) / 2 + 1]);
- uint shift = (iqs & 0x10) >> 2;
- vui_lo >>= shift;
- vui_hi >>= shift;
-
- return float(k_packed.k_data_packed16[a_offset + ib].d) * (vec4(vui_lo & 0xF, (vui_lo >> 8) & 0xF, vui_hi & 0xF, (vui_hi >> 8) & 0xF) - 8.0f);
- } else {
- uint vui_lo = uint(v_packed.v_data_packed16[a_offset + ib].qs[(iqs & 0xF) / 2 + 0]);
- uint vui_hi = uint(v_packed.v_data_packed16[a_offset + ib].qs[(iqs & 0xF) / 2 + 1]);
- uint shift = (iqs & 0x10) >> 2;
- vui_lo >>= shift;
- vui_hi >>= shift;
-
- return float(v_packed.v_data_packed16[a_offset + ib].d) * (vec4(vui_lo & 0xF, (vui_lo >> 8) & 0xF, vui_hi & 0xF, (vui_hi >> 8) & 0xF) - 8.0f);
- }
-}
-#endif
-
-#if defined(DATA_A_Q8_0)
-#define BLOCK_BYTE_SIZE 34
-vec4 dequantize4(uint ib, uint iqs, uint a_offset, uint binding_idx) {
- if (binding_idx == BINDING_IDX_K) {
- const i8vec2 v0 = unpack8(int32_t(k_packed.k_data_packed16[a_offset + ib].qs[iqs / 2])).xy; // vec4 used due to #12147
- const i8vec2 v1 = unpack8(int32_t(k_packed.k_data_packed16[a_offset + ib].qs[iqs / 2 + 1])).xy;
-
- return float(k_packed.k_data_packed16[a_offset + ib].d) * vec4(v0.x, v0.y, v1.x, v1.y);
- } else {
- const i8vec2 v0 = unpack8(int32_t(v_packed.v_data_packed16[a_offset + ib].qs[iqs / 2])).xy; // vec4 used due to #12147
- const i8vec2 v1 = unpack8(int32_t(v_packed.v_data_packed16[a_offset + ib].qs[iqs / 2 + 1])).xy;
-
- return float(v_packed.v_data_packed16[a_offset + ib].d) * vec4(v0.x, v0.y, v1.x, v1.y);
- }
-}
-#endif
-
-#define CEIL_DIV(a, b) (((a) + (b) - 1) / (b))
-
-
-// Store column zero. This is used to save per-row m and L values for split_k.
-ACC_TYPE perElemOpStoreCol0(const in uint32_t r, const in uint32_t c, const in ACC_TYPE elem, const in uint32_t o_offset, const in uint32_t iq2, const in uint32_t N)
-{
- if (r < N && c == 0) {
- uint32_t offset = iq2 + r;
- data_o[o_offset + offset] = D_TYPE(elem);
- }
- return elem;
-}
-
-// Load the slope matrix, indexed by Q's dimension 2.
-ACC_TYPE perElemOpComputeSlope(const in uint32_t r, const in uint32_t c, const in ACC_TYPE elem, const in uint32_t iq2)
-{
- const uint32_t h = iq2 + (r % p.gqa_ratio);
-
- uint32_t n_head_log2 = p.mask_n_head_log2 & N_LOG2_MASK;
-
- const ACC_TYPE base = ACC_TYPE(h < n_head_log2 ? p.m0 : p.m1);
- const int exph = int(h < n_head_log2 ? h + 1 : 2*(h - n_head_log2) + 1);
-
- return ACC_TYPE(pow(base, ACC_TYPE(exph)));
-}
-
-// Load the sink value, indexed by Q's dimension 2.
-ACC_TYPE perElemOpGetSink(const in uint32_t r, const in uint32_t c, const in ACC_TYPE elem, const in uint32_t iq2)
-{
- const uint32_t h = iq2 + (r % p.gqa_ratio);
-
- return ACC_TYPE(data_s[h]);
-}
-
-uint32_t i, N, KV, split_k_index, Tr, start_j, end_j,
- iq2, iq3, rk2, rk3, rv2, rv3, ik2, ik3, iv2, iv3,
- q_stride, k_stride, v_stride, m_stride;
-
-void init_indices()
-{
- N = p.N;
- KV = p.KV;
-
- i = gl_WorkGroupID.x;
- split_k_index = 0;
-
- if (p.k_num > 1) {
- i = 0;
- split_k_index = gl_WorkGroupID.x;
- }
-
- Tr = CEIL_DIV(N, Br);
-
- start_j = split_k_index * p.split_kv / Bc;
- end_j = CEIL_DIV(min(KV, (split_k_index + 1) * p.split_kv), Bc);
-
- // When not using grouped query attention, all rows share the same iq2, equal to gl_WorkGroupID.y.
- // When using grouped query attention, each workgroup does gqa_ratio consecutive values of iq2.
- iq2 = gl_WorkGroupID.y * p.gqa_ratio;
- iq3 = gl_WorkGroupID.z;
-
- // broadcast factors
- rk2 = p.neq2/p.nek2;
- rk3 = p.neq3/p.nek3;
-
- rv2 = p.neq2/p.nev2;
- rv3 = p.neq3/p.nev3;
-
- // k indices
- ik3 = iq3 / rk3;
- ik2 = iq2 / rk2;
-
- // v indices
- iv3 = iq3 / rv3;
- iv2 = iq2 / rv2;
-
- // nb?1 are already divided by the type size and are in units of elements.
- // When using grouped query attention, Q is indexed by iq2, so the stride
- // should be nb02 (which is in bytes).
- q_stride = p.gqa_ratio > 1 ? (p.nb02 / 4) : p.nb01;
- k_stride = p.nb11;
- v_stride = p.nb21;
- // When using grouped query attention, all rows use the same mask (stride 0).
- // "p.gqa_ratio >> 16" is just a roundabout way of writing zero
- // that prevents the compiler from folding the "&" through the select
- // and breaking the alignment detection.
- m_stride = (p.gqa_ratio > 1) ? (p.gqa_ratio >> 16) : KV;
-}
--- /dev/null
+
+layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
+
+layout (constant_id = 0) const uint32_t WorkGroupSize = 128;
+layout (constant_id = 1) const uint32_t Br = 1;
+layout (constant_id = 2) const uint32_t Bc = 32;
+layout (constant_id = 3) const uint32_t HSK = 32;
+layout (constant_id = 4) const uint32_t HSV = 32;
+layout (constant_id = 5) const uint32_t Clamp = 0;
+layout (constant_id = 6) const uint32_t D_split = 16;
+
+// Round up head sizes to a multiple of 16, for coopmat1/coopmat2 paths
+const uint32_t HSK_pad = (HSK + 15) & ~15;
+const uint32_t HSV_pad = (HSV + 15) & ~15;
+
+const bool KV_bounds_check = Clamp != 0;
+
+layout (push_constant) uniform parameter {
+ uint32_t N;
+ uint32_t KV;
+
+ uint32_t ne1;
+ uint32_t ne2;
+ uint32_t ne3;
+
+ uint32_t neq2;
+ uint32_t neq3;
+ uint32_t nek2;
+ uint32_t nek3;
+ uint32_t nev2;
+ uint32_t nev3;
+ uint32_t nem1;
+ uint32_t nem2;
+ uint32_t nem3;
+
+ uint32_t nb01;
+ uint32_t nb02;
+ uint32_t nb03;
+ uint32_t nb11;
+ uint32_t nb12;
+ uint32_t nb13;
+ uint32_t nb21;
+ uint32_t nb22;
+ uint32_t nb23;
+
+ float scale;
+ float max_bias;
+ float logit_softcap;
+
+ uint32_t mask_n_head_log2;
+ float m0;
+ float m1;
+
+ uint32_t gqa_ratio;
+ uint32_t split_kv;
+ uint32_t k_num;
+} p;
+
+#define SINK_ENABLE_BIT (1<<24)
+#define MASK_ENABLE_BIT (1<<16)
+#define N_LOG2_MASK 0xFFFF
+
+layout (binding = 4) readonly buffer S {float data_s[];};
+
+layout (binding = 5) writeonly buffer O {D_TYPE data_o[];};
+
+#if defined(A_TYPE_PACKED16)
+#define BINDING_IDX_K 0
+#define BINDING_IDX_V 1
+layout (binding = 1) readonly buffer K_PACKED16 {A_TYPE_PACKED16 k_data_packed16[];} k_packed;
+layout (binding = 2) readonly buffer V_PACKED16 {A_TYPE_PACKED16 v_data_packed16[];} v_packed;
+#endif
+
+#if defined(DATA_A_Q4_0)
+#define BLOCK_BYTE_SIZE 18
+
+vec4 dequantize4(uint ib, uint iqs, uint a_offset, uint binding_idx) {
+ if (binding_idx == BINDING_IDX_K) {
+ uint vui_lo = uint(k_packed.k_data_packed16[a_offset + ib].qs[(iqs & 0xF) / 2 + 0]);
+ uint vui_hi = uint(k_packed.k_data_packed16[a_offset + ib].qs[(iqs & 0xF) / 2 + 1]);
+ uint shift = (iqs & 0x10) >> 2;
+ vui_lo >>= shift;
+ vui_hi >>= shift;
+
+ return float(k_packed.k_data_packed16[a_offset + ib].d) * (vec4(vui_lo & 0xF, (vui_lo >> 8) & 0xF, vui_hi & 0xF, (vui_hi >> 8) & 0xF) - 8.0f);
+ } else {
+ uint vui_lo = uint(v_packed.v_data_packed16[a_offset + ib].qs[(iqs & 0xF) / 2 + 0]);
+ uint vui_hi = uint(v_packed.v_data_packed16[a_offset + ib].qs[(iqs & 0xF) / 2 + 1]);
+ uint shift = (iqs & 0x10) >> 2;
+ vui_lo >>= shift;
+ vui_hi >>= shift;
+
+ return float(v_packed.v_data_packed16[a_offset + ib].d) * (vec4(vui_lo & 0xF, (vui_lo >> 8) & 0xF, vui_hi & 0xF, (vui_hi >> 8) & 0xF) - 8.0f);
+ }
+}
+#endif
+
+#if defined(DATA_A_Q8_0)
+#define BLOCK_BYTE_SIZE 34
+vec4 dequantize4(uint ib, uint iqs, uint a_offset, uint binding_idx) {
+ if (binding_idx == BINDING_IDX_K) {
+ const i8vec2 v0 = unpack8(int32_t(k_packed.k_data_packed16[a_offset + ib].qs[iqs / 2])).xy; // vec4 used due to #12147
+ const i8vec2 v1 = unpack8(int32_t(k_packed.k_data_packed16[a_offset + ib].qs[iqs / 2 + 1])).xy;
+
+ return float(k_packed.k_data_packed16[a_offset + ib].d) * vec4(v0.x, v0.y, v1.x, v1.y);
+ } else {
+ const i8vec2 v0 = unpack8(int32_t(v_packed.v_data_packed16[a_offset + ib].qs[iqs / 2])).xy; // vec4 used due to #12147
+ const i8vec2 v1 = unpack8(int32_t(v_packed.v_data_packed16[a_offset + ib].qs[iqs / 2 + 1])).xy;
+
+ return float(v_packed.v_data_packed16[a_offset + ib].d) * vec4(v0.x, v0.y, v1.x, v1.y);
+ }
+}
+#endif
+
+#define CEIL_DIV(a, b) (((a) + (b) - 1) / (b))
+
+
+// Store column zero. This is used to save per-row m and L values for split_k.
+ACC_TYPE perElemOpStoreCol0(const in uint32_t r, const in uint32_t c, const in ACC_TYPE elem, const in uint32_t o_offset, const in uint32_t iq2, const in uint32_t N)
+{
+ if (r < N && c == 0) {
+ uint32_t offset = iq2 + r;
+ data_o[o_offset + offset] = D_TYPE(elem);
+ }
+ return elem;
+}
+
+// Load the slope matrix, indexed by Q's dimension 2.
+ACC_TYPE perElemOpComputeSlope(const in uint32_t r, const in uint32_t c, const in ACC_TYPE elem, const in uint32_t iq2)
+{
+ const uint32_t h = iq2 + (r % p.gqa_ratio);
+
+ uint32_t n_head_log2 = p.mask_n_head_log2 & N_LOG2_MASK;
+
+ const ACC_TYPE base = ACC_TYPE(h < n_head_log2 ? p.m0 : p.m1);
+ const int exph = int(h < n_head_log2 ? h + 1 : 2*(h - n_head_log2) + 1);
+
+ return ACC_TYPE(pow(base, ACC_TYPE(exph)));
+}
+
+// Load the sink value, indexed by Q's dimension 2.
+ACC_TYPE perElemOpGetSink(const in uint32_t r, const in uint32_t c, const in ACC_TYPE elem, const in uint32_t iq2)
+{
+ const uint32_t h = iq2 + (r % p.gqa_ratio);
+
+ return ACC_TYPE(data_s[h]);
+}
+
+uint32_t i, N, KV, split_k_index, Tr, start_j, end_j,
+ iq2, iq3, rk2, rk3, rv2, rv3, ik2, ik3, iv2, iv3,
+ q_stride, k_stride, v_stride, m_stride;
+
+void init_indices()
+{
+ N = p.N;
+ KV = p.KV;
+
+ i = gl_WorkGroupID.x;
+ split_k_index = 0;
+
+ if (p.k_num > 1) {
+ i = 0;
+ split_k_index = gl_WorkGroupID.x;
+ }
+
+ Tr = CEIL_DIV(N, Br);
+
+ start_j = split_k_index * p.split_kv / Bc;
+ end_j = CEIL_DIV(min(KV, (split_k_index + 1) * p.split_kv), Bc);
+
+ // When not using grouped query attention, all rows share the same iq2, equal to gl_WorkGroupID.y.
+ // When using grouped query attention, each workgroup does gqa_ratio consecutive values of iq2.
+ iq2 = gl_WorkGroupID.y * p.gqa_ratio;
+ iq3 = gl_WorkGroupID.z;
+
+ // broadcast factors
+ rk2 = p.neq2/p.nek2;
+ rk3 = p.neq3/p.nek3;
+
+ rv2 = p.neq2/p.nev2;
+ rv3 = p.neq3/p.nev3;
+
+ // k indices
+ ik3 = iq3 / rk3;
+ ik2 = iq2 / rk2;
+
+ // v indices
+ iv3 = iq3 / rv3;
+ iv2 = iq2 / rv2;
+
+ // nb?1 are already divided by the type size and are in units of elements.
+ // When using grouped query attention, Q is indexed by iq2, so the stride
+ // should be nb02 (which is in bytes).
+ q_stride = p.gqa_ratio > 1 ? (p.nb02 / 4) : p.nb01;
+ k_stride = p.nb11;
+ v_stride = p.nb21;
+ // When using grouped query attention, all rows use the same mask (stride 0).
+ // "p.gqa_ratio >> 16" is just a roundabout way of writing zero
+ // that prevents the compiler from folding the "&" through the select
+ // and breaking the alignment detection.
+ m_stride = (p.gqa_ratio > 1) ? (p.gqa_ratio >> 16) : KV;
+}
#extension GL_KHR_memory_scope_semantics : enable
#extension GL_KHR_cooperative_matrix : enable
-#include "types.comp"
-#include "flash_attn_base.comp"
+#include "types.glsl"
+#include "flash_attn_base.glsl"
const uint32_t HSK_per_thread = HSK / D_split;
const uint32_t HSV_per_thread = HSV / D_split;
#extension GL_KHR_shader_subgroup_vote : enable
#extension GL_EXT_null_initializer : enable
-#include "types.comp"
-#include "dequant_funcs_cm2.comp"
-#include "flash_attn_base.comp"
+#include "types.glsl"
+#include "dequant_funcs_cm2.glsl"
+#include "flash_attn_base.glsl"
layout (binding = 0) readonly buffer Q {uint8_t data_q[];};
layout (binding = 1) readonly buffer K {uint8_t data_k[];};
#version 450
-#include "glu_head.comp"
+#include "glu_head.glsl"
const float GELU_COEF_A = 0.044715f;
const float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f;
return 0.5f*a*(2.0f - 2.0f / (exp(2 * val) + 1)) * b;
}
-#include "glu_main.comp"
+#include "glu_main.glsl"
#version 450
-#include "glu_head.comp"
+#include "glu_head.glsl"
// based on Abramowitz and Stegun formula 7.1.26 or similar Hastings' approximation
// ref: https://www.johndcook.com/blog/python_erf/
return 0.5f * a * (1.0f + erf_approx) * b;
}
-#include "glu_main.comp"
+#include "glu_main.glsl"
#version 450
-#include "glu_head.comp"
+#include "glu_head.glsl"
const float GELU_QUICK_COEF = -1.702f;
return a * (1.0f / (1.0f + exp(GELU_QUICK_COEF * a))) * b;
}
-#include "glu_main.comp"
+#include "glu_main.glsl"
#version 450
-#include "generic_head.comp"
-#include "types.comp"
+#include "generic_head.glsl"
+#include "types.glsl"
#extension GL_EXT_control_flow_attributes : enable
#version 450
-#include "generic_head.comp"
-#include "types.comp"
+#include "generic_head.glsl"
+#include "types.glsl"
#extension GL_EXT_control_flow_attributes : enable
#version 450
-#include "generic_head.comp"
-#include "types.comp"
+#include "generic_head.glsl"
+#include "types.glsl"
#extension GL_EXT_control_flow_attributes : enable
+++ /dev/null
-#extension GL_EXT_shader_16bit_storage : require
-#extension GL_EXT_control_flow_attributes : require
-
-#include "rte.comp"
-#include "utils.comp"
-
-layout (push_constant) uniform parameter
-{
- uint ne;
- uint ne00; uint ne01; uint ne02; uint ne03; uint nb00; uint nb01; uint nb02; uint nb03;
- uint ne10; uint ne11; uint ne12; uint ne13; uint nb10; uint nb11; uint nb12; uint nb13;
- uint ne20; uint ne21; uint ne22; uint ne23; uint nb20; uint nb21; uint nb22; uint nb23;
- uint misalign_offsets;
- float param1; float param2; int param3;
-} p;
-
-layout (binding = 0) readonly buffer A {A_TYPE data_a[];};
-layout (binding = 1) readonly buffer B {B_TYPE data_b[];};
-layout (binding = 2) writeonly buffer D {D_TYPE data_d[];};
-
-// true if src0/src1 are the same shape and the indices can be reused without additional modulus
-layout(constant_id = 0) const bool norepeat = false;
-
-uint get_idx() {
- return gl_GlobalInvocationID.z * 262144 + gl_GlobalInvocationID.y * 512 + gl_GlobalInvocationID.x;
-}
-
-uint get_aoffset() { return p.misalign_offsets >> 16; }
-uint get_boffset() { return (p.misalign_offsets >> 8) & 0xFF; }
-uint get_doffset() { return p.misalign_offsets & 0xFF; }
-
-
-void get_indices(uint idx, out uint i00, out uint i01, out uint i02, out uint i03) {
- get_indices(idx, i00, i01, i02, i03, p.ne00, p.ne01, p.ne02, p.ne03);
-}
-
-uint src0_idx(uint i00, uint i01, uint i02, uint i03) {
- return i03*p.nb03 + i02*p.nb02 + i01*p.nb01 + i00*p.nb00;
-}
-
-uint src1_idx(uint i00, uint i01, uint i02, uint i03) {
- if (norepeat) {
- return i03*p.nb13 + i02*p.nb12 + i01*p.nb11 + i00*p.nb10;
- } else {
- return fastmod(i03, p.ne13)*p.nb13 + fastmod(i02, p.ne12)*p.nb12 + fastmod(i01, p.ne11)*p.nb11 + fastmod(i00, p.ne10)*p.nb10;
- }
-}
-
-uint dst_idx(uint i00, uint i01, uint i02, uint i03) {
- return i03*p.nb23 + i02*p.nb22 + i01*p.nb21 + i00*p.nb20;
-}
--- /dev/null
+#extension GL_EXT_shader_16bit_storage : require
+#extension GL_EXT_control_flow_attributes : require
+
+#include "rte.glsl"
+#include "utils.glsl"
+
+layout (push_constant) uniform parameter
+{
+ uint ne;
+ uint ne00; uint ne01; uint ne02; uint ne03; uint nb00; uint nb01; uint nb02; uint nb03;
+ uint ne10; uint ne11; uint ne12; uint ne13; uint nb10; uint nb11; uint nb12; uint nb13;
+ uint ne20; uint ne21; uint ne22; uint ne23; uint nb20; uint nb21; uint nb22; uint nb23;
+ uint misalign_offsets;
+ float param1; float param2; int param3;
+} p;
+
+layout (binding = 0) readonly buffer A {A_TYPE data_a[];};
+layout (binding = 1) readonly buffer B {B_TYPE data_b[];};
+layout (binding = 2) writeonly buffer D {D_TYPE data_d[];};
+
+// true if src0/src1 are the same shape and the indices can be reused without additional modulus
+layout(constant_id = 0) const bool norepeat = false;
+
+uint get_idx() {
+ return gl_GlobalInvocationID.z * 262144 + gl_GlobalInvocationID.y * 512 + gl_GlobalInvocationID.x;
+}
+
+uint get_aoffset() { return p.misalign_offsets >> 16; }
+uint get_boffset() { return (p.misalign_offsets >> 8) & 0xFF; }
+uint get_doffset() { return p.misalign_offsets & 0xFF; }
+
+
+void get_indices(uint idx, out uint i00, out uint i01, out uint i02, out uint i03) {
+ get_indices(idx, i00, i01, i02, i03, p.ne00, p.ne01, p.ne02, p.ne03);
+}
+
+uint src0_idx(uint i00, uint i01, uint i02, uint i03) {
+ return i03*p.nb03 + i02*p.nb02 + i01*p.nb01 + i00*p.nb00;
+}
+
+uint src1_idx(uint i00, uint i01, uint i02, uint i03) {
+ if (norepeat) {
+ return i03*p.nb13 + i02*p.nb12 + i01*p.nb11 + i00*p.nb10;
+ } else {
+ return fastmod(i03, p.ne13)*p.nb13 + fastmod(i02, p.ne12)*p.nb12 + fastmod(i01, p.ne11)*p.nb11 + fastmod(i00, p.ne10)*p.nb10;
+ }
+}
+
+uint dst_idx(uint i00, uint i01, uint i02, uint i03) {
+ return i03*p.nb23 + i02*p.nb22 + i01*p.nb21 + i00*p.nb20;
+}
+++ /dev/null
-#extension GL_EXT_shader_16bit_storage : require
-
-layout (push_constant) uniform parameter
-{
- uint KX;
- uint KY;
- float param1;
- float param2;
-} p;
--- /dev/null
+#extension GL_EXT_shader_16bit_storage : require
+
+layout (push_constant) uniform parameter
+{
+ uint KX;
+ uint KY;
+ float param1;
+ float param2;
+} p;
+++ /dev/null
-#extension GL_EXT_shader_16bit_storage : require
-#extension GL_EXT_control_flow_attributes : require
-
-layout (push_constant) uniform parameter
-{
- uint ne;
- uint ne00; uint ne01; uint ne02; uint ne03; uint nb00; uint nb01; uint nb02; uint nb03;
- uint ne10; uint ne11; uint ne12; uint ne13; uint nb10; uint nb11; uint nb12; uint nb13;
- uint misalign_offsets;
- float param1; float param2;
-
- uint ne0_012mp; uint ne0_012L;
- uint ne0_01mp; uint ne0_01L;
- uint ne0_0mp; uint ne0_0L;
- uint ne1_012mp; uint ne1_012L;
- uint ne1_01mp; uint ne1_01L;
- uint ne1_0mp; uint ne1_0L;
-} p;
-
-layout (binding = 0) readonly buffer A {A_TYPE data_a[];};
-layout (binding = 1) writeonly buffer D {D_TYPE data_d[];};
-
-uint get_idx() {
- return gl_GlobalInvocationID.z * 262144 + gl_GlobalInvocationID.y * 512 + gl_GlobalInvocationID.x;
-}
-
-uint get_aoffset() { return p.misalign_offsets >> 16; }
-uint get_doffset() { return p.misalign_offsets & 0xFFFF; }
-
-// see init_fastdiv_values in ggml-vulkan.cpp
-uint fastdiv(uint n, uint mp, uint L) {
- uint msbs, lsbs;
- // msbs = mulhi(n, mp)
- umulExtended(n, mp, msbs, lsbs);
- return (msbs + n) >> L;
-}
-
-uint src0_idx(uint idx) {
- const uint i03 = fastdiv(idx, p.ne0_012mp, p.ne0_012L);
- const uint i03_offset = i03 * p.ne02*p.ne01*p.ne00;
- const uint i02 = fastdiv(idx - i03_offset, p.ne0_01mp, p.ne0_01L);
- const uint i02_offset = i02*p.ne01*p.ne00;
- const uint i01 = fastdiv(idx - i03_offset - i02_offset, p.ne0_0mp, p.ne0_0L);
- const uint i00 = idx - i03_offset - i02_offset - i01*p.ne00;
- return i03*p.nb03 + i02*p.nb02 + i01*p.nb01 + i00*p.nb00;
-}
-
-uint dst_idx(uint idx) {
- const uint i13 = fastdiv(idx, p.ne1_012mp, p.ne1_012L);
- const uint i13_offset = i13 * p.ne12*p.ne11*p.ne10;
- const uint i12 = fastdiv(idx - i13_offset, p.ne1_01mp, p.ne1_01L);
- const uint i12_offset = i12*p.ne11*p.ne10;
- const uint i11 = fastdiv(idx - i13_offset - i12_offset, p.ne1_0mp, p.ne1_0L);
- const uint i10 = idx - i13_offset - i12_offset - i11*p.ne10;
- return i13*p.nb13 + i12*p.nb12 + i11*p.nb11 + i10*p.nb10;
-}
-
-uint src0_idx_quant(uint idx, uint qk) {
- const uint i03 = fastdiv(idx, p.ne0_012mp, p.ne0_012L);
- const uint i03_offset = i03 * p.ne02*p.ne01*p.ne00;
- const uint i02 = fastdiv(idx - i03_offset, p.ne0_01mp, p.ne0_01L);
- const uint i02_offset = i02*p.ne01*p.ne00;
- const uint i01 = fastdiv(idx - i03_offset - i02_offset, p.ne0_0mp, p.ne0_0L);
- const uint i00 = idx - i03_offset - i02_offset - i01*p.ne00;
- return i03*p.nb03 + i02*p.nb02 + i01*p.nb01 + (i00/qk)*p.nb00;
-}
-
-uint dst_idx_quant(uint idx, uint qk) {
- const uint i13 = fastdiv(idx, p.ne1_012mp, p.ne1_012L);
- const uint i13_offset = i13 * p.ne12*p.ne11*p.ne10;
- const uint i12 = fastdiv(idx - i13_offset, p.ne1_01mp, p.ne1_01L);
- const uint i12_offset = i12*p.ne11*p.ne10;
- const uint i11 = fastdiv(idx - i13_offset - i12_offset, p.ne1_0mp, p.ne1_0L);
- const uint i10 = idx - i13_offset - i12_offset - i11*p.ne10;
- return i13*p.nb13 + i12*p.nb12 + i11*p.nb11 + (i10/qk)*p.nb10;
-}
--- /dev/null
+#extension GL_EXT_shader_16bit_storage : require
+#extension GL_EXT_control_flow_attributes : require
+
+layout (push_constant) uniform parameter
+{
+ uint ne;
+ uint ne00; uint ne01; uint ne02; uint ne03; uint nb00; uint nb01; uint nb02; uint nb03;
+ uint ne10; uint ne11; uint ne12; uint ne13; uint nb10; uint nb11; uint nb12; uint nb13;
+ uint misalign_offsets;
+ float param1; float param2;
+
+ uint ne0_012mp; uint ne0_012L;
+ uint ne0_01mp; uint ne0_01L;
+ uint ne0_0mp; uint ne0_0L;
+ uint ne1_012mp; uint ne1_012L;
+ uint ne1_01mp; uint ne1_01L;
+ uint ne1_0mp; uint ne1_0L;
+} p;
+
+layout (binding = 0) readonly buffer A {A_TYPE data_a[];};
+layout (binding = 1) writeonly buffer D {D_TYPE data_d[];};
+
+uint get_idx() {
+ return gl_GlobalInvocationID.z * 262144 + gl_GlobalInvocationID.y * 512 + gl_GlobalInvocationID.x;
+}
+
+uint get_aoffset() { return p.misalign_offsets >> 16; }
+uint get_doffset() { return p.misalign_offsets & 0xFFFF; }
+
+// see init_fastdiv_values in ggml-vulkan.cpp
+uint fastdiv(uint n, uint mp, uint L) {
+ uint msbs, lsbs;
+ // msbs = mulhi(n, mp)
+ umulExtended(n, mp, msbs, lsbs);
+ return (msbs + n) >> L;
+}
+
+uint src0_idx(uint idx) {
+ const uint i03 = fastdiv(idx, p.ne0_012mp, p.ne0_012L);
+ const uint i03_offset = i03 * p.ne02*p.ne01*p.ne00;
+ const uint i02 = fastdiv(idx - i03_offset, p.ne0_01mp, p.ne0_01L);
+ const uint i02_offset = i02*p.ne01*p.ne00;
+ const uint i01 = fastdiv(idx - i03_offset - i02_offset, p.ne0_0mp, p.ne0_0L);
+ const uint i00 = idx - i03_offset - i02_offset - i01*p.ne00;
+ return i03*p.nb03 + i02*p.nb02 + i01*p.nb01 + i00*p.nb00;
+}
+
+uint dst_idx(uint idx) {
+ const uint i13 = fastdiv(idx, p.ne1_012mp, p.ne1_012L);
+ const uint i13_offset = i13 * p.ne12*p.ne11*p.ne10;
+ const uint i12 = fastdiv(idx - i13_offset, p.ne1_01mp, p.ne1_01L);
+ const uint i12_offset = i12*p.ne11*p.ne10;
+ const uint i11 = fastdiv(idx - i13_offset - i12_offset, p.ne1_0mp, p.ne1_0L);
+ const uint i10 = idx - i13_offset - i12_offset - i11*p.ne10;
+ return i13*p.nb13 + i12*p.nb12 + i11*p.nb11 + i10*p.nb10;
+}
+
+uint src0_idx_quant(uint idx, uint qk) {
+ const uint i03 = fastdiv(idx, p.ne0_012mp, p.ne0_012L);
+ const uint i03_offset = i03 * p.ne02*p.ne01*p.ne00;
+ const uint i02 = fastdiv(idx - i03_offset, p.ne0_01mp, p.ne0_01L);
+ const uint i02_offset = i02*p.ne01*p.ne00;
+ const uint i01 = fastdiv(idx - i03_offset - i02_offset, p.ne0_0mp, p.ne0_0L);
+ const uint i00 = idx - i03_offset - i02_offset - i01*p.ne00;
+ return i03*p.nb03 + i02*p.nb02 + i01*p.nb01 + (i00/qk)*p.nb00;
+}
+
+uint dst_idx_quant(uint idx, uint qk) {
+ const uint i13 = fastdiv(idx, p.ne1_012mp, p.ne1_012L);
+ const uint i13_offset = i13 * p.ne12*p.ne11*p.ne10;
+ const uint i12 = fastdiv(idx - i13_offset, p.ne1_01mp, p.ne1_01L);
+ const uint i12_offset = i12*p.ne11*p.ne10;
+ const uint i11 = fastdiv(idx - i13_offset - i12_offset, p.ne1_0mp, p.ne1_0L);
+ const uint i10 = idx - i13_offset - i12_offset - i11*p.ne10;
+ return i13*p.nb13 + i12*p.nb12 + i11*p.nb11 + (i10/qk)*p.nb10;
+}
#version 450
-#include "types.comp"
-#include "generic_binary_head.comp"
+#include "types.glsl"
+#include "generic_binary_head.glsl"
layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
#extension GL_EXT_control_flow_attributes : enable
-#include "types.comp"
-#include "generic_binary_head.comp"
-#include "dequant_funcs.comp"
+#include "types.glsl"
+#include "generic_binary_head.glsl"
+#include "dequant_funcs.glsl"
layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
+++ /dev/null
-#extension GL_EXT_shader_16bit_storage : require
-
-#include "rte.comp"
-
-layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
-
-layout (binding = 0) readonly buffer A {A_TYPE data_a[];};
-layout (binding = 1) readonly buffer B {A_TYPE data_b[];};
-layout (binding = 2) writeonly buffer D {D_TYPE data_d[];};
-
-layout (push_constant) uniform parameter
-{
- uint N;
- uint ne00;
- uint ne20;
- uint mode;
- float alpha;
- float limit;
-} p;
--- /dev/null
+#extension GL_EXT_shader_16bit_storage : require
+
+#include "rte.glsl"
+
+layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
+
+layout (binding = 0) readonly buffer A {A_TYPE data_a[];};
+layout (binding = 1) readonly buffer B {A_TYPE data_b[];};
+layout (binding = 2) writeonly buffer D {D_TYPE data_d[];};
+
+layout (push_constant) uniform parameter
+{
+ uint N;
+ uint ne00;
+ uint ne20;
+ uint mode;
+ float alpha;
+ float limit;
+} p;
+++ /dev/null
-void main() {
- const uint i = gl_GlobalInvocationID.z * 262144 + gl_GlobalInvocationID.y * 512 + gl_GlobalInvocationID.x;
-
- if (i >= p.N) {
- return;
- }
-
- const uint row = i / p.ne20;
- const uint col = i - row * p.ne20;
-
- if (p.mode == 0) {
- // Default
- const uint offset = p.ne00 / 2;
- const uint idx = row * p.ne00 + col;
-
- data_d[row * offset + col] = D_TYPE(op(float(data_a[idx]), float(data_a[idx + offset])));
- } else if (p.mode == 1) {
- // Swapped
- const uint offset = p.ne00 / 2;
- const uint idx = row * p.ne00 + col;
-
- data_d[row * offset + col] = D_TYPE(op(float(data_a[idx + offset]), float(data_a[idx])));
- } else {
- // Split
- const uint idx = row * p.ne00 + col;
-
- data_d[idx] = D_TYPE(op(float(data_a[idx]), float(data_b[idx])));
- }
-}
--- /dev/null
+void main() {
+ const uint i = gl_GlobalInvocationID.z * 262144 + gl_GlobalInvocationID.y * 512 + gl_GlobalInvocationID.x;
+
+ if (i >= p.N) {
+ return;
+ }
+
+ const uint row = i / p.ne20;
+ const uint col = i - row * p.ne20;
+
+ if (p.mode == 0) {
+ // Default
+ const uint offset = p.ne00 / 2;
+ const uint idx = row * p.ne00 + col;
+
+ data_d[row * offset + col] = D_TYPE(op(float(data_a[idx]), float(data_a[idx + offset])));
+ } else if (p.mode == 1) {
+ // Swapped
+ const uint offset = p.ne00 / 2;
+ const uint idx = row * p.ne00 + col;
+
+ data_d[row * offset + col] = D_TYPE(op(float(data_a[idx + offset]), float(data_a[idx])));
+ } else {
+ // Split
+ const uint idx = row * p.ne00 + col;
+
+ data_d[idx] = D_TYPE(op(float(data_a[idx]), float(data_b[idx])));
+ }
+}
#version 450
-#include "generic_head.comp"
-#include "types.comp"
+#include "generic_head.glsl"
+#include "types.glsl"
#extension GL_EXT_control_flow_attributes : enable
#define BLOCK_SIZE 512
#version 450
-#include "generic_head.comp"
-#include "types.comp"
+#include "generic_head.glsl"
+#include "types.glsl"
#extension GL_EXT_control_flow_attributes : enable
#version 450
-#include "generic_head.comp"
-#include "types.comp"
+#include "generic_head.glsl"
+#include "types.glsl"
#extension GL_EXT_control_flow_attributes : enable
#extension GL_EXT_shader_16bit_storage : require
#extension GL_EXT_control_flow_attributes : require
-#include "rte.comp"
-
-#include "types.comp"
+#include "rte.glsl"
+#include "types.glsl"
layout (push_constant) uniform parameter
{
#extension GL_EXT_control_flow_attributes : require
#extension GL_EXT_shader_explicit_arithmetic_types_int32 : require
-#include "rte.comp"
-
-#include "types.comp"
+#include "rte.glsl"
+#include "types.glsl"
layout (push_constant) uniform parameter
{
#version 450
-#include "generic_head.comp"
-#include "types.comp"
+#include "generic_head.glsl"
+#include "types.glsl"
#extension GL_EXT_control_flow_attributes : enable
#define BLOCK_SIZE 512
#version 450
-#include "generic_head.comp"
-#include "types.comp"
+#include "generic_head.glsl"
+#include "types.glsl"
#extension GL_EXT_control_flow_attributes : enable
#version 450
-#include "types.comp"
-#include "generic_binary_head.comp"
+#include "types.glsl"
+#include "generic_binary_head.glsl"
const uint num_threads = 256;
#extension GL_EXT_shader_explicit_arithmetic_types_int32 : require
-#include "mul_mat_vec_base.comp"
+#include "mul_mat_vec_base.glsl"
layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
+++ /dev/null
-#extension GL_EXT_control_flow_attributes : enable
-#extension GL_EXT_shader_16bit_storage : require
-#extension GL_EXT_shader_8bit_storage : require
-
-#if USE_SUBGROUP_ADD || USE_SUBGROUP_ADD_NO_SHMEM
-#extension GL_KHR_shader_subgroup_basic : require
-#extension GL_KHR_shader_subgroup_arithmetic : require
-#endif
-
-#ifdef MUL_MAT_ID
-#define EXPERT_COUNT 8
-#endif
-
-#include "types.comp"
-
-#ifndef MMQ
-layout (binding = 0) readonly buffer A {A_TYPE data_a[];};
-#else
-layout (binding = 0) readonly buffer A {A_TYPE_PACKED16 data_a[];};
-#endif
-
-layout (binding = 1) readonly buffer B {B_TYPE data_b[];};
-#ifdef B_TYPE_VEC2
-layout (binding = 1) readonly buffer BV2 {B_TYPE_VEC2 data_b_v2[];};
-#endif
-#ifdef B_TYPE_VEC4
-layout (binding = 1) readonly buffer BV4 {B_TYPE_VEC4 data_b_v4[];};
-#endif
-
-layout (binding = 2) writeonly buffer D {D_TYPE data_d[];};
-#ifdef MUL_MAT_ID
-layout (binding = 3) readonly buffer IDS {int data_ids[];};
-#endif
-
-#include "dequant_funcs.comp"
-
-layout (push_constant) uniform parameter
-{
- uint ncols;
- uint stride_a;
- uint stride_b;
- uint stride_d;
-
- uint batch_stride_a;
- uint batch_stride_b;
- uint batch_stride_d;
-
-#ifdef MUL_MAT_ID
- uint nei0;
- uint ne11;
-#else
- uint ne02;
- uint ne12;
- uint broadcast2;
- uint broadcast3;
-#endif
-} p;
-
-void get_offsets(out uint a_offset, out uint b_offset, out uint d_offset) {
-#ifdef MUL_MAT_ID
- const uint expert_idx = gl_GlobalInvocationID.y;
-#else
- const uint batch_idx = gl_GlobalInvocationID.y;
-#endif
-
-#ifndef MUL_MAT_ID
- uint batch_idx_a = 0;
- if (batch_idx != 0) {
- const uint i13 = batch_idx / p.ne12;
- const uint i12 = batch_idx % p.ne12;
-
- const uint i03 = i13 / p.broadcast3;
- const uint i02 = i12 / p.broadcast2;
-
- batch_idx_a = i03 * p.ne02 + i02;
- }
-#else
- const uint expert_id = data_ids[expert_idx];
-#endif
-
- a_offset =
-#ifdef MUL_MAT_ID
- expert_id * p.batch_stride_a;
-#else
- batch_idx_a * p.batch_stride_a;
-#endif
- b_offset =
-#ifdef MUL_MAT_ID
- (expert_idx % p.ne11) * p.stride_b;
-#else
- batch_idx * p.batch_stride_b;
-#endif
- d_offset =
-#ifdef MUL_MAT_ID
- expert_idx * p.stride_d;
-#else
- batch_idx * p.batch_stride_d;
-#endif
-}
-
-layout (constant_id = 0) const uint BLOCK_SIZE = 32;
-layout (constant_id = 1) const uint NUM_ROWS = 1;
-layout (constant_id = 2) const uint NUM_COLS = 1;
-
-#ifdef USE_SUBGROUP_ADD_NO_SHMEM
-void reduce_result(inout FLOAT_TYPE temp[NUM_COLS][NUM_ROWS], const in uint32_t d_offset, const in uint32_t first_row, const in uint32_t num_rows, const in uint32_t tid) {
- [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
- [[unroll]] for (uint n = 0; n < num_rows; ++n) {
- temp[j][n] = subgroupAdd(temp[j][n]);
- }
- }
-
- if (tid == 0) {
- [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
- [[unroll]] for (uint n = 0; n < num_rows; ++n) {
- data_d[j*p.batch_stride_d + d_offset + first_row + n] = D_TYPE(temp[j][n]);
- }
- }
- }
-}
-#else
-shared FLOAT_TYPE tmpsh[NUM_COLS][NUM_ROWS][BLOCK_SIZE];
-
-void reduce_result(FLOAT_TYPE temp[NUM_COLS][NUM_ROWS], const in uint32_t d_offset, const in uint32_t first_row, const in uint32_t num_rows, const in uint32_t tid) {
- // subgroupAdd is probably faster on devices that support it,
- // particularly when the workgroup has more than one subgroup
-#if USE_SUBGROUP_ADD
- // sum up partial sums within a subgroup
- [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
- [[unroll]] for (uint n = 0; n < num_rows; ++n) {
- temp[j][n] = subgroupAdd(temp[j][n]);
- }
- }
-
- // Go through shared memory to sum partials across subgroups
- if (gl_SubgroupInvocationID == 0) {
- [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
- [[unroll]] for (uint n = 0; n < num_rows; ++n) {
- tmpsh[j][n][gl_SubgroupID] = temp[j][n];
- }
- }
- }
- barrier();
- if (tid == 0) {
- [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
- [[unroll]] for (uint n = 0; n < num_rows; ++n) {
- temp[j][n] = FLOAT_TYPE(0);
- [[unroll]] for (uint s = 0; s < gl_NumSubgroups; ++s) {
- temp[j][n] += tmpsh[j][n][s];
- }
- data_d[j*p.batch_stride_d + d_offset + first_row + n] = D_TYPE(temp[j][n]);
- }
- }
- }
-#else
- // sum up partial sums and write back result
- [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
- [[unroll]] for (uint n = 0; n < num_rows; ++n) {
- tmpsh[j][n][tid] = temp[j][n];
- }
- }
- barrier();
- [[unroll]] for (uint s = BLOCK_SIZE/2; s > 0; s >>= 1) {
- if (tid < s) {
- [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
- [[unroll]] for (uint n = 0; n < num_rows; ++n) {
- tmpsh[j][n][tid] += tmpsh[j][n][tid + s];
- }
- }
- }
- barrier();
- }
- if (tid == 0) {
- [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
- [[unroll]] for (uint n = 0; n < num_rows; ++n) {
- data_d[j*p.batch_stride_d + d_offset + first_row + n] = D_TYPE(tmpsh[j][n][0]);
- }
- }
- }
-#endif
-}
-#endif
--- /dev/null
+#extension GL_EXT_control_flow_attributes : enable
+#extension GL_EXT_shader_16bit_storage : require
+#extension GL_EXT_shader_8bit_storage : require
+
+#if USE_SUBGROUP_ADD || USE_SUBGROUP_ADD_NO_SHMEM
+#extension GL_KHR_shader_subgroup_basic : require
+#extension GL_KHR_shader_subgroup_arithmetic : require
+#endif
+
+#ifdef MUL_MAT_ID
+#define EXPERT_COUNT 8
+#endif
+
+#include "types.glsl"
+
+#ifndef MMQ
+layout (binding = 0) readonly buffer A {A_TYPE data_a[];};
+#else
+layout (binding = 0) readonly buffer A {A_TYPE_PACKED16 data_a[];};
+#endif
+
+layout (binding = 1) readonly buffer B {B_TYPE data_b[];};
+#ifdef B_TYPE_VEC2
+layout (binding = 1) readonly buffer BV2 {B_TYPE_VEC2 data_b_v2[];};
+#endif
+#ifdef B_TYPE_VEC4
+layout (binding = 1) readonly buffer BV4 {B_TYPE_VEC4 data_b_v4[];};
+#endif
+
+layout (binding = 2) writeonly buffer D {D_TYPE data_d[];};
+#ifdef MUL_MAT_ID
+layout (binding = 3) readonly buffer IDS {int data_ids[];};
+#endif
+
+#include "dequant_funcs.glsl"
+
+layout (push_constant) uniform parameter
+{
+ uint ncols;
+ uint stride_a;
+ uint stride_b;
+ uint stride_d;
+
+ uint batch_stride_a;
+ uint batch_stride_b;
+ uint batch_stride_d;
+
+#ifdef MUL_MAT_ID
+ uint nei0;
+ uint ne11;
+#else
+ uint ne02;
+ uint ne12;
+ uint broadcast2;
+ uint broadcast3;
+#endif
+} p;
+
+void get_offsets(out uint a_offset, out uint b_offset, out uint d_offset) {
+#ifdef MUL_MAT_ID
+ const uint expert_idx = gl_GlobalInvocationID.y;
+#else
+ const uint batch_idx = gl_GlobalInvocationID.y;
+#endif
+
+#ifndef MUL_MAT_ID
+ uint batch_idx_a = 0;
+ if (batch_idx != 0) {
+ const uint i13 = batch_idx / p.ne12;
+ const uint i12 = batch_idx % p.ne12;
+
+ const uint i03 = i13 / p.broadcast3;
+ const uint i02 = i12 / p.broadcast2;
+
+ batch_idx_a = i03 * p.ne02 + i02;
+ }
+#else
+ const uint expert_id = data_ids[expert_idx];
+#endif
+
+ a_offset =
+#ifdef MUL_MAT_ID
+ expert_id * p.batch_stride_a;
+#else
+ batch_idx_a * p.batch_stride_a;
+#endif
+ b_offset =
+#ifdef MUL_MAT_ID
+ (expert_idx % p.ne11) * p.stride_b;
+#else
+ batch_idx * p.batch_stride_b;
+#endif
+ d_offset =
+#ifdef MUL_MAT_ID
+ expert_idx * p.stride_d;
+#else
+ batch_idx * p.batch_stride_d;
+#endif
+}
+
+layout (constant_id = 0) const uint BLOCK_SIZE = 32;
+layout (constant_id = 1) const uint NUM_ROWS = 1;
+layout (constant_id = 2) const uint NUM_COLS = 1;
+
+#ifdef USE_SUBGROUP_ADD_NO_SHMEM
+void reduce_result(inout FLOAT_TYPE temp[NUM_COLS][NUM_ROWS], const in uint32_t d_offset, const in uint32_t first_row, const in uint32_t num_rows, const in uint32_t tid) {
+ [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
+ [[unroll]] for (uint n = 0; n < num_rows; ++n) {
+ temp[j][n] = subgroupAdd(temp[j][n]);
+ }
+ }
+
+ if (tid == 0) {
+ [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
+ [[unroll]] for (uint n = 0; n < num_rows; ++n) {
+ data_d[j*p.batch_stride_d + d_offset + first_row + n] = D_TYPE(temp[j][n]);
+ }
+ }
+ }
+}
+#else
+shared FLOAT_TYPE tmpsh[NUM_COLS][NUM_ROWS][BLOCK_SIZE];
+
+void reduce_result(FLOAT_TYPE temp[NUM_COLS][NUM_ROWS], const in uint32_t d_offset, const in uint32_t first_row, const in uint32_t num_rows, const in uint32_t tid) {
+ // subgroupAdd is probably faster on devices that support it,
+ // particularly when the workgroup has more than one subgroup
+#if USE_SUBGROUP_ADD
+ // sum up partial sums within a subgroup
+ [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
+ [[unroll]] for (uint n = 0; n < num_rows; ++n) {
+ temp[j][n] = subgroupAdd(temp[j][n]);
+ }
+ }
+
+ // Go through shared memory to sum partials across subgroups
+ if (gl_SubgroupInvocationID == 0) {
+ [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
+ [[unroll]] for (uint n = 0; n < num_rows; ++n) {
+ tmpsh[j][n][gl_SubgroupID] = temp[j][n];
+ }
+ }
+ }
+ barrier();
+ if (tid == 0) {
+ [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
+ [[unroll]] for (uint n = 0; n < num_rows; ++n) {
+ temp[j][n] = FLOAT_TYPE(0);
+ [[unroll]] for (uint s = 0; s < gl_NumSubgroups; ++s) {
+ temp[j][n] += tmpsh[j][n][s];
+ }
+ data_d[j*p.batch_stride_d + d_offset + first_row + n] = D_TYPE(temp[j][n]);
+ }
+ }
+ }
+#else
+ // sum up partial sums and write back result
+ [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
+ [[unroll]] for (uint n = 0; n < num_rows; ++n) {
+ tmpsh[j][n][tid] = temp[j][n];
+ }
+ }
+ barrier();
+ [[unroll]] for (uint s = BLOCK_SIZE/2; s > 0; s >>= 1) {
+ if (tid < s) {
+ [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
+ [[unroll]] for (uint n = 0; n < num_rows; ++n) {
+ tmpsh[j][n][tid] += tmpsh[j][n][tid + s];
+ }
+ }
+ }
+ barrier();
+ }
+ if (tid == 0) {
+ [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
+ [[unroll]] for (uint n = 0; n < num_rows; ++n) {
+ data_d[j*p.batch_stride_d + d_offset + first_row + n] = D_TYPE(tmpsh[j][n][0]);
+ }
+ }
+ }
+#endif
+}
+#endif
#version 450
#extension GL_EXT_shader_explicit_arithmetic_types_int32 : require
-#include "mul_mat_vec_base.comp"
+#include "mul_mat_vec_base.glsl"
layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
#version 450
#extension GL_EXT_shader_explicit_arithmetic_types_int32 : require
-#include "mul_mat_vec_base.comp"
+#include "mul_mat_vec_base.glsl"
layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
#version 450
#extension GL_EXT_shader_explicit_arithmetic_types_int32 : require
-#include "mul_mat_vec_base.comp"
+#include "mul_mat_vec_base.glsl"
layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
#version 450
#extension GL_EXT_shader_explicit_arithmetic_types_int32 : require
-#include "mul_mat_vec_base.comp"
+#include "mul_mat_vec_base.glsl"
layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
#version 450
#extension GL_EXT_shader_explicit_arithmetic_types_int32 : require
-#include "mul_mat_vec_base.comp"
+#include "mul_mat_vec_base.glsl"
layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
#version 450
#extension GL_EXT_shader_explicit_arithmetic_types_int32 : require
-#include "mul_mat_vec_base.comp"
+#include "mul_mat_vec_base.glsl"
layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
#version 450
#extension GL_EXT_shader_explicit_arithmetic_types_int32 : require
-#include "mul_mat_vec_base.comp"
+#include "mul_mat_vec_base.glsl"
layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
#version 450
#extension GL_EXT_shader_explicit_arithmetic_types_int32 : require
-#include "mul_mat_vec_base.comp"
+#include "mul_mat_vec_base.glsl"
layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
#version 450
#extension GL_EXT_shader_explicit_arithmetic_types_int32 : require
-#include "mul_mat_vec_base.comp"
+#include "mul_mat_vec_base.glsl"
layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
#extension GL_EXT_shader_explicit_arithmetic_types_int32 : require
-#include "mul_mat_vec_base.comp"
+#include "mul_mat_vec_base.glsl"
layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
#extension GL_EXT_shader_explicit_arithmetic_types_int32 : require
-#include "mul_mat_vec_base.comp"
+#include "mul_mat_vec_base.glsl"
layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
#extension GL_EXT_shader_explicit_arithmetic_types_int32 : require
-#include "mul_mat_vec_base.comp"
+#include "mul_mat_vec_base.glsl"
layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
#define MMQ
#define B_TYPE block_q8_1_x4
-#include "mul_mat_vec_base.comp"
+#include "mul_mat_vec_base.glsl"
layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
#define K_PER_ITER 8
-#include "mul_mmq_funcs.comp"
+#include "mul_mmq_funcs.glsl"
uint a_offset, b_offset, d_offset;
#extension GL_EXT_shader_explicit_arithmetic_types_int16 : require
#endif
-#include "types.comp"
+#include "types.glsl"
#ifndef LOAD_VEC_A
#define LOAD_VEC_A 1
shared ACC_TYPE coopmat_stage[TM * TN * NUM_WARPS];
#endif
-#include "mul_mm_funcs.comp"
+#include "mul_mm_funcs.glsl"
void main() {
#ifdef NEEDS_INIT_IQ_SHMEM
#extension GL_EXT_bfloat16 : enable
#endif
-#include "types.comp"
-#include "utils.comp"
+#include "types.glsl"
+#include "utils.glsl"
layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
#if QUANT_K > 1
#define DECODEFUNCA , dequantFuncA
-#include "dequant_funcs_cm2.comp"
+#include "dequant_funcs_cm2.glsl"
#else
#define DECODEFUNCA
+++ /dev/null
-void load_a_to_shmem(const uint pos_a, const uint row, const uint col, const uint idx_m, const uint block, const uint end_k) {
-#if defined(DATA_A_F32) || defined(DATA_A_F16)
-#if LOAD_VEC_A == 8
- const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
- const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A / 2;
- FLOAT_TYPE_VEC8 aa = FLOAT_TYPE_VEC8(data_a[idx]);
- buf_a[buf_idx ] = aa[0].xy;
- buf_a[buf_idx + 1] = aa[0].zw;
- buf_a[buf_idx + 2] = aa[1].xy;
- buf_a[buf_idx + 3] = aa[1].zw;
-#elif LOAD_VEC_A == 4
- const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
- const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A / 2;
- FLOAT_TYPE_VEC4 aa = FLOAT_TYPE_VEC4(data_a[idx]);
- buf_a[buf_idx ] = aa.xy;
- buf_a[buf_idx + 1] = aa.zw;
-#else // LOAD_VEC_BATCH_A == 2
- const uint idx = pos_a + col * p.stride_a + row * 2;
- const uint buf_idx = col * SHMEM_STRIDE + row;
- if (idx_m < p.M && block + row * 2 + 1 < end_k) {
- buf_a[buf_idx] = FLOAT_TYPE_VEC2(data_a[idx],
- data_a[idx + 1]);
- } else if (idx_m < p.M && block + row * 2 < end_k) {
- buf_a[buf_idx] = FLOAT_TYPE_VEC2(data_a[idx], 0.0f);
- } else {
- buf_a[buf_idx] = FLOAT_TYPE_VEC2(0.0f);
- }
-#endif
-#elif defined(DATA_A_BF16)
-#if LOAD_VEC_A == 4
- const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
- const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A / 2;
- FLOAT_TYPE_VEC4 aa = FLOAT_TYPE_VEC4(TO_FLOAT_TYPE(data_a[idx]));
- buf_a[buf_idx ] = aa.xy;
- buf_a[buf_idx + 1] = aa.zw;
-#else // LOAD_VEC_BATCH_A == 2
- const uint idx = pos_a + col * p.stride_a + row * 2;
- const uint buf_idx = col * SHMEM_STRIDE + row;
- if (idx_m < p.M && block + row * 2 + 1 < end_k) {
- buf_a[buf_idx] = FLOAT_TYPE_VEC2(TO_FLOAT_TYPE(data_a[idx]),
- TO_FLOAT_TYPE(data_a[idx + 1]));
- } else if (idx_m < p.M && block + row * 2 < end_k) {
- buf_a[buf_idx] = FLOAT_TYPE_VEC2(TO_FLOAT_TYPE(data_a[idx]), 0.0f);
- } else {
- buf_a[buf_idx] = FLOAT_TYPE_VEC2(0.0f);
- }
-#endif
-#elif defined(DATA_A_Q4_0)
- const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
- const uint buf_idx = col * SHMEM_STRIDE + 2 * row;
-
- const uint ib = idx / 4;
- const uint iqs = idx & 0x03;
-
- const float d = float(data_a_packed16[ib].d);
- const uint vui = uint(data_a_packed16[ib].qs[2*iqs]) | (uint(data_a_packed16[ib].qs[2*iqs + 1]) << 16);
- const vec4 v0 = (vec4(unpack8(vui & 0x0F0F0F0F)) - 8.0f) * d;
- const vec4 v1 = (vec4(unpack8((vui >> 4) & 0x0F0F0F0F)) - 8.0f) * d;
-
- buf_a[buf_idx ] = FLOAT_TYPE_VEC2(v0.xy);
- buf_a[buf_idx + 1] = FLOAT_TYPE_VEC2(v0.zw);
- buf_a[buf_idx + 8] = FLOAT_TYPE_VEC2(v1.xy);
- buf_a[buf_idx + 9] = FLOAT_TYPE_VEC2(v1.zw);
-#elif defined(DATA_A_Q4_1)
- const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
- const uint buf_idx = col * SHMEM_STRIDE + 2 * row;
-
- const uint ib = idx / 4;
- const uint iqs = idx & 0x03;
-
- const float d = float(data_a_packed16[ib].d);
- const float m = float(data_a_packed16[ib].m);
- const uint vui = uint(data_a_packed16[ib].qs[2*iqs]) | (uint(data_a_packed16[ib].qs[2*iqs + 1]) << 16);
- const vec4 v0 = vec4(unpack8(vui & 0x0F0F0F0F)) * d + m;
- const vec4 v1 = vec4(unpack8((vui >> 4) & 0x0F0F0F0F)) * d + m;
-
- buf_a[buf_idx ] = FLOAT_TYPE_VEC2(v0.xy);
- buf_a[buf_idx + 1 ] = FLOAT_TYPE_VEC2(v0.zw);
- buf_a[buf_idx + 8 ] = FLOAT_TYPE_VEC2(v1.xy);
- buf_a[buf_idx + 9 ] = FLOAT_TYPE_VEC2(v1.zw);
-#elif defined(DATA_A_Q5_0)
- const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
- const uint buf_idx = col * SHMEM_STRIDE + row;
-
- const uint ib = idx / 8;
- const uint iqs = idx & 0x07;
-
- const float d = float(data_a_packed16[ib].d);
- const uint uint_qh = uint(data_a_packed16[ib].qh[1]) << 16 | uint(data_a_packed16[ib].qh[0]);
- const ivec2 qh0 = ivec2(((uint_qh >> 2*iqs) << 4) & 0x10, (uint_qh >> (2*iqs + 12)) & 0x10);
- const ivec2 qh1 = ivec2(((uint_qh >> (2*iqs + 1)) << 4) & 0x10, (uint_qh >> (2*iqs + 13)) & 0x10);
-
- const uint vui = uint(data_a_packed16[ib].qs[iqs]);
- const vec4 v = (vec4((vui & 0xF) | qh0.x, ((vui >> 4) & 0xF) | qh0.y, ((vui >> 8) & 0xF) | qh1.x, (vui >> 12) | qh1.y) - 16.0f) * d;
-
- buf_a[buf_idx ] = FLOAT_TYPE_VEC2(v.xz);
- buf_a[buf_idx + 8] = FLOAT_TYPE_VEC2(v.yw);
-#elif defined(DATA_A_Q5_1)
- const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
- const uint buf_idx = col * SHMEM_STRIDE + row;
-
- const uint ib = idx / 8;
- const uint iqs = idx & 0x07;
-
- const float d = float(data_a_packed16[ib].d);
- const float m = float(data_a_packed16[ib].m);
- const uint uint_qh = data_a_packed16[ib].qh;
- const ivec2 qh0 = ivec2(((uint_qh >> 2*iqs) << 4) & 0x10, (uint_qh >> (2*iqs + 12)) & 0x10);
- const ivec2 qh1 = ivec2(((uint_qh >> (2*iqs + 1)) << 4) & 0x10, (uint_qh >> (2*iqs + 13)) & 0x10);
-
- const uint vui = uint(data_a_packed16[ib].qs[iqs]);
- const vec4 v = vec4((vui & 0xF) | qh0.x, ((vui >> 4) & 0xF) | qh0.y, ((vui >> 8) & 0xF) | qh1.x, (vui >> 12) | qh1.y) * d + m;
-
- buf_a[buf_idx ] = FLOAT_TYPE_VEC2(v.xz);
- buf_a[buf_idx + 8] = FLOAT_TYPE_VEC2(v.yw);
-#elif defined(DATA_A_Q8_0)
- const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
- const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A / 2;
-
- const uint ib = idx / 8;
- const uint iqs = idx & 0x07;
-
- const float d = float(data_a_packed16[ib].d);
- const i8vec2 v0 = unpack8(int32_t(data_a_packed16[ib].qs[2*iqs])).xy; // vec4 used due to #12147
- const i8vec2 v1 = unpack8(int32_t(data_a_packed16[ib].qs[2*iqs + 1])).xy;
- const vec4 v = vec4(v0.x, v0.y, v1.x, v1.y) * d;
-
- buf_a[buf_idx ] = FLOAT_TYPE_VEC2(v.xy);
- buf_a[buf_idx + 1] = FLOAT_TYPE_VEC2(v.zw);
-#elif defined(DATA_A_Q2_K)
- const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
- const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A / 2;
-
- const uint ib = idx / 128; // 2 values per idx
- const uint iqs = idx % 128; // 0..127
-
- const uint qsi = (iqs / 64) * 32 + (iqs % 16) * 2; // 0,2,4..30
- const uint scalesi = iqs / 8; // 0..15
- const uint qsshift = ((iqs % 64) / 16) * 2; // 0,2,4,6
-
- const uvec2 qs = uvec2(data_a[ib].qs[qsi], data_a[ib].qs[qsi + 1]);
- const uint scales = data_a[ib].scales[scalesi];
- const vec2 d = vec2(data_a[ib].d);
-
- const vec2 v = d.x * float(scales & 0xF) * vec2((qs >> qsshift) & 3) - d.y * float(scales >> 4);
-
- buf_a[buf_idx] = FLOAT_TYPE_VEC2(v.xy);
-#elif defined(DATA_A_Q3_K)
- const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
- const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A / 2;
-
- const uint ib = idx / 128; // 2 values per idx
- const uint iqs = idx % 128; // 0..127
-
- const uint n = iqs / 64; // 0,1
- const uint qsi = n * 32 + (iqs % 16) * 2; // 0,2,4..62
- const uint hmi = (iqs % 16) * 2; // 0,2,4..30
- const uint j = (iqs % 64) / 4; // 0..3
- const uint is = iqs / 8; // 0..15
- const uint halfsplit = ((iqs % 64) / 16); // 0,1,2,3
- const uint qsshift = halfsplit * 2; // 0,2,4,6
- const uint m = 1 << (4 * n + halfsplit); // 1,2,4,8,16,32,64,128
-
- const int8_t us = int8_t(((data_a[ib].scales[is % 8] >> (4 * int(is / 8))) & 0xF)
- | (((data_a[ib].scales[8 + (is % 4)] >> (2 * int(is / 4))) & 3) << 4));
- const float dl = float(data_a[ib].d) * float(us - 32);
-
- buf_a[buf_idx] = FLOAT_TYPE_VEC2(dl * float(int8_t((data_a[ib].qs[qsi ] >> qsshift) & 3) - (((data_a[ib].hmask[hmi ] & m) != 0) ? 0 : 4)),
- dl * float(int8_t((data_a[ib].qs[qsi + 1] >> qsshift) & 3) - (((data_a[ib].hmask[hmi + 1] & m) != 0) ? 0 : 4)));
-#elif defined(DATA_A_Q4_K)
- const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
- const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A / 2;
-
- const uint ib = idx / 128; // 2 values per idx
- const uint iqs = idx % 128; // 0..127
-
- const uint n = iqs / 32; // 0,1,2,3
- const uint b = (iqs % 32) / 16; // 0,1
- const uint is = 2 * n + b; // 0..7
- const uint qsi = n * 32 + (iqs % 16) * 2; // 0,2,4..126
-
- const vec2 loadd = vec2(data_a[ib].d);
-
- const uint scidx0 = (is < 4) ? is : (is + 4);
- const uint scidx1 = (is < 4) ? is : (is - 4);
- const uint scidxmask1 = (is < 4) ? 0x30 : 0xC0;
- const uint scidxshift1 = (is < 4) ? 0 : 2;
- const uint mbidx0 = is + 4;
- const uint mbidx1 = (is < 4) ? is + 4 : is;
- const uint mbidxmask0 = (is < 4) ? 0xF : 0xF0;
- const uint mbidxshift0 = (is < 4) ? 0 : 4;
- const uint mbidxmask1 = (is < 4) ? 0x30 : 0xC0;
- const uint mbidxshift1 = (is < 4) ? 0 : 2;
-
- const uint8_t sc = uint8_t((data_a[ib].scales[scidx0] & 0xF) | ((data_a[ib].scales[scidx1] & scidxmask1) >> scidxshift1));
- const uint8_t mbyte = uint8_t((data_a[ib].scales[mbidx0] & mbidxmask0) >> mbidxshift0 | ((data_a[ib].scales[mbidx1] & mbidxmask1) >> mbidxshift1));
-
- const float d = loadd.x * sc;
- const float m = -loadd.y * mbyte;
-
- buf_a[buf_idx] = FLOAT_TYPE_VEC2(fma(d, float((data_a[ib].qs[qsi ] >> (b * 4)) & 0xF), m),
- fma(d, float((data_a[ib].qs[qsi + 1] >> (b * 4)) & 0xF), m));
-#elif defined(DATA_A_Q5_K)
- const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
- const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A / 2;
-
- const uint ib = idx / 128; // 2 values per idx
- const uint iqs = idx % 128; // 0..127
-
- const uint n = iqs / 32; // 0,1,2,3
- const uint b = (iqs % 32) / 16; // 0,1
- const uint is = 2 * n + b; // 0..7
- const uint qsi = n * 32 + (iqs % 16) * 2; // 0,2,4..126
- const uint qhi = (iqs % 16) * 2; // 0,2,4..30
-
- const uint8_t hm = uint8_t(1 << (iqs / 16));
-
- const vec2 loadd = vec2(data_a[ib].d);
-
- const uint scidx0 = (is < 4) ? is : (is + 4);
- const uint scidx1 = (is < 4) ? is : (is - 4);
- const uint scidxmask1 = (is < 4) ? 0x30 : 0xC0;
- const uint scidxshift1 = (is < 4) ? 0 : 2;
- const uint mbidx0 = is + 4;
- const uint mbidx1 = (is < 4) ? is + 4 : is;
- const uint mbidxmask0 = (is < 4) ? 0xF : 0xF0;
- const uint mbidxshift0 = (is < 4) ? 0 : 4;
- const uint mbidxmask1 = (is < 4) ? 0x30 : 0xC0;
- const uint mbidxshift1 = (is < 4) ? 0 : 2;
-
- const uint8_t sc = uint8_t((data_a[ib].scales[scidx0] & 0xF) | ((data_a[ib].scales[scidx1] & scidxmask1) >> scidxshift1));
- const uint8_t mbyte = uint8_t(((data_a[ib].scales[mbidx0] & mbidxmask0) >> mbidxshift0) | ((data_a[ib].scales[mbidx1] & mbidxmask1) >> mbidxshift1));
-
- const float d = loadd.x * sc;
- const float m = -loadd.y * mbyte;
-
- buf_a[buf_idx] = FLOAT_TYPE_VEC2(fma(d, float((data_a[ib].qs[qsi ] >> (b * 4)) & 0xF) + float((data_a[ib].qh[qhi ] & hm) != 0 ? 16 : 0), m),
- fma(d, float((data_a[ib].qs[qsi + 1] >> (b * 4)) & 0xF) + float((data_a[ib].qh[qhi + 1] & hm) != 0 ? 16 : 0), m));
-#elif defined(DATA_A_Q6_K)
- const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
- const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A / 2;
-
- const uint ib = idx / 128; // 2 values per idx
- const uint iqs = idx % 128; // 0..127
-
- const uint n = iqs / 64; // 0,1
- const uint b = (iqs % 64) / 32; // 0,1
- const uint is_b = (iqs % 16) / 8; // 0,1
- const uint qhshift = ((iqs % 64) / 16) * 2; // 0,2,4,6
- const uint is = 8 * n + qhshift + is_b; // 0..15
- const uint qsi = n * 64 + (iqs % 32) * 2; // 0,2,4..126
- const uint qhi = n * 32 + (iqs % 16) * 2; // 0,2,4..62
-
- const float dscale = float(data_a[ib].d) * float(data_a[ib].scales[is]);
-
- buf_a[buf_idx] = FLOAT_TYPE_VEC2(dscale * float(int8_t(((data_a[ib].ql[qsi ] >> (b * 4)) & 0xF) | (((data_a[ib].qh[qhi ] >> qhshift) & 3) << 4)) - 32),
- dscale * float(int8_t(((data_a[ib].ql[qsi + 1] >> (b * 4)) & 0xF) | (((data_a[ib].qh[qhi + 1] >> qhshift) & 3) << 4)) - 32));
-#elif defined(DATA_A_IQ1_S)
- const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
- const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A / 2;
-
- const uint ib = idx / 32; // 8 values per idx
- const uint ib32 = (idx % 32) / 4; // 0..7
- const uint ib8 = idx % 32;
-
- const float d = float(data_a[ib].d);
- const uint qh = data_a[ib].qh[ib32];
- const uint qs = data_a[ib].qs[ib8];
- const float dl = d * (2 * bitfieldExtract(qh, 12, 3) + 1);
- const float delta = ((qh & 0x8000) != 0) ? -IQ1S_DELTA : IQ1S_DELTA;
- const int16_t grid = int16_t(iq1s_grid[qs | (bitfieldExtract(qh, 3 * int(ib8 & 3), 3) << 8)]);
-
- [[unroll]] for (int k = 0; k < 4; ++k) {
- buf_a[buf_idx + k] = FLOAT_TYPE_VEC2(dl * (bitfieldExtract(grid, 4 * k , 2) + delta),
- dl * (bitfieldExtract(grid, 4 * k + 2, 2) + delta));
- }
-#elif defined(DATA_A_IQ1_M)
- const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
- const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A / 2;
-
- const uint ib = idx / 32; // 8 values per idx
- const uint ib8 = idx % 32;
- const uint ib16 = ib8 / 2;
-
- const uint16_t[4] scales = data_a[ib].scales;
- const u16vec4 s = u16vec4(scales[0], scales[1], scales[2], scales[3]) >> 12;
- const float d = float(unpackHalf2x16(s.x | (s.y << 4) | (s.z << 8) | (s.w << 12)).x);
- const uint sc = scales[ib8 / 8];
- const uint qs = data_a[ib].qs[ib8];
- const uint qh = data_a[ib].qh[ib16] >> (4 * (ib8 & 1));
- const float dl = d * (2 * bitfieldExtract(sc, 3 * int(ib16 & 3), 3) + 1);
- const float delta = ((qh & 8) != 0) ? -IQ1M_DELTA : IQ1M_DELTA;
- const int16_t grid = int16_t(iq1s_grid[qs | ((qh & 7) << 8)]);
-
- [[unroll]] for (int k = 0; k < 4; ++k) {
- buf_a[buf_idx + k] = FLOAT_TYPE_VEC2(dl * (bitfieldExtract(grid, 4 * k , 2) + delta),
- dl * (bitfieldExtract(grid, 4 * k + 2, 2) + delta));
- }
-#elif defined(DATA_A_IQ2_XXS)
- const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
- const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A / 2;
-
- const uint ib = idx / 32; // 8 values per idx
- const uint ib32 = (idx % 32) / 4; // 0..7
- const uint ib8 = idx % 4;
-
- const float d = float(data_a[ib].d);
- const uint qs = data_a[ib].qs[8 * ib32 + ib8];
- const uint signs = pack32(u8vec4(
- data_a[ib].qs[8*ib32 + 4],
- data_a[ib].qs[8*ib32 + 5],
- data_a[ib].qs[8*ib32 + 6],
- data_a[ib].qs[8*ib32 + 7]
- ));
- const FLOAT_TYPE db = FLOAT_TYPE(d * 0.25 * (0.5 + (signs >> 28)));
- const uint32_t sign7 = bitfieldExtract(signs, 7 * int(ib8), 7);
- const uint sign = sign7 | (bitCount(sign7) << 7);
- const uvec2 grid = iq2xxs_grid[qs];
- const vec4 grid0 = vec4(unpack8(grid.x));
- const vec4 grid1 = vec4(unpack8(grid.y));
-
- buf_a[buf_idx ] = db * FLOAT_TYPE_VEC2((sign & 1) != 0 ? -grid0.x : grid0.x,
- (sign & 2) != 0 ? -grid0.y : grid0.y);
- buf_a[buf_idx + 1] = db * FLOAT_TYPE_VEC2((sign & 4) != 0 ? -grid0.z : grid0.z,
- (sign & 8) != 0 ? -grid0.w : grid0.w);
- buf_a[buf_idx + 2] = db * FLOAT_TYPE_VEC2((sign & 16) != 0 ? -grid1.x : grid1.x,
- (sign & 32) != 0 ? -grid1.y : grid1.y);
- buf_a[buf_idx + 3] = db * FLOAT_TYPE_VEC2((sign & 64) != 0 ? -grid1.z : grid1.z,
- (sign & 128) != 0 ? -grid1.w : grid1.w);
-#elif defined(DATA_A_IQ2_XS)
- const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
- const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A / 2;
-
- const uint ib = idx / 32; // 8 values per idx
- const uint ib32 = (idx % 32) / 4; // 0..7
- const uint ib8 = idx % 4; // 0..3
-
- const float d = float(data_a[ib].d);
- const uint scale = (data_a[ib].scales[ib32] >> (2 * (ib8 & 2))) & 0xf;
- const FLOAT_TYPE db = FLOAT_TYPE(d * 0.25 * (0.5 + scale));
- const uint qs = data_a[ib].qs[4 * ib32 + ib8];
- const uint sign7 = qs >> 9;
- const uint sign = sign7 | (bitCount(sign7) << 7);
- const uvec2 grid = iq2xs_grid[qs & 511];
- const vec4 grid0 = vec4(unpack8(grid.x));
- const vec4 grid1 = vec4(unpack8(grid.y));
-
- buf_a[buf_idx ] = db * FLOAT_TYPE_VEC2((sign & 1) != 0 ? -grid0.x : grid0.x,
- (sign & 2) != 0 ? -grid0.y : grid0.y);
- buf_a[buf_idx + 1] = db * FLOAT_TYPE_VEC2((sign & 4) != 0 ? -grid0.z : grid0.z,
- (sign & 8) != 0 ? -grid0.w : grid0.w);
- buf_a[buf_idx + 2] = db * FLOAT_TYPE_VEC2((sign & 16) != 0 ? -grid1.x : grid1.x,
- (sign & 32) != 0 ? -grid1.y : grid1.y);
- buf_a[buf_idx + 3] = db * FLOAT_TYPE_VEC2((sign & 64) != 0 ? -grid1.z : grid1.z,
- (sign & 128) != 0 ? -grid1.w : grid1.w);
-#elif defined(DATA_A_IQ2_S)
- const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
- const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A / 2;
-
- const uint ib = idx / 32; // 8 values per idx
- const uint ib8 = idx % 32; // 0..31
- const uint ib32 = ib8 / 4; // 0..7
-
- const uint scale = (data_a[ib].scales[ib32] >> (2 * (ib8 & 2))) & 0xf;
- const uint qs = data_a[ib].qs[ib8];
- const uint qh = data_a[ib].qh[ib32];
- const uint qhshift = 2 * (ib8 % 4);
- const uint sign = data_a[ib].qs[QUANT_K / 8 + ib8];
-
- const float d = float(data_a[ib].d);
- const FLOAT_TYPE db = FLOAT_TYPE(d * 0.25 * (0.5 + scale));
- const uvec2 grid = iq2s_grid[qs | ((qh << (8 - qhshift)) & 0x300)];
- const vec4 grid0 = vec4(unpack8(grid.x));
- const vec4 grid1 = vec4(unpack8(grid.y));
-
- buf_a[buf_idx ] = db * FLOAT_TYPE_VEC2((sign & 1) != 0 ? -grid0.x : grid0.x,
- (sign & 2) != 0 ? -grid0.y : grid0.y);
- buf_a[buf_idx + 1] = db * FLOAT_TYPE_VEC2((sign & 4) != 0 ? -grid0.z : grid0.z,
- (sign & 8) != 0 ? -grid0.w : grid0.w);
- buf_a[buf_idx + 2] = db * FLOAT_TYPE_VEC2((sign & 16) != 0 ? -grid1.x : grid1.x,
- (sign & 32) != 0 ? -grid1.y : grid1.y);
- buf_a[buf_idx + 3] = db * FLOAT_TYPE_VEC2((sign & 64) != 0 ? -grid1.z : grid1.z,
- (sign & 128) != 0 ? -grid1.w : grid1.w);
-#elif defined(DATA_A_IQ3_XXS)
- const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
- const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A / 2;
-
- const uint ib = idx / 64; // 4 values per idx
- const uint iqs = idx % 64; // 0..63
- const uint is = QUANT_K / 4 + 4 * (iqs / 8); // 8 values
-
- const float d = float(data_a[ib].d);
- const uint qs = data_a[ib].qs[iqs];
- const uint signs = pack32(u8vec4(
- data_a[ib].qs[is+0],
- data_a[ib].qs[is+1],
- data_a[ib].qs[is+2],
- data_a[ib].qs[is+3]
- ));
- const float db = d * 0.5 * (0.5 + (signs >> 28));
- const uint32_t sign7 = bitfieldExtract(signs, 7 * (int(iqs / 2) % 4), 7);
- const uint sign = (sign7 | (bitCount(sign7) << 7)) >> (4 * (idx % 2));
- const uint grid = iq3xxs_grid[qs];
- const vec4 v = db * vec4(unpack8(grid));
-
- buf_a[buf_idx ] = FLOAT_TYPE_VEC2((sign & 1) != 0 ? -v.x : v.x,
- (sign & 2) != 0 ? -v.y : v.y);
- buf_a[buf_idx + 1] = FLOAT_TYPE_VEC2((sign & 4) != 0 ? -v.z : v.z,
- (sign & 8) != 0 ? -v.w : v.w);
-#elif defined(DATA_A_IQ3_S)
- const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
- const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A / 2;
-
- const uint ib = idx / 64; // 4 values per idx
- const uint iqs = idx % 64; // 0..63
- const uint iqh = iqs / 8;
-
- const float d = float(data_a[ib].d);
- const uint qs = data_a[ib].qs[iqs];
- const uint qh = data_a[ib].qh[iqh];
- const int8_t sign = int8_t(data_a[ib].signs[iqs / 2] >> (4 * (idx % 2)));
- const uint scale = data_a[ib].scales[iqs / 16];
- const i8vec2 sign01 = i8vec2(1 - (2 & i8vec2(sign << 1, sign)));
- const float db = d * (1 + 2 * ((scale >> (4 * (iqh & 1))) & 0xf));
- const uint32_t grid = iq3s_grid[qs | ((qh << (8 - (iqs % 8))) & 256)];
- const vec4 v = db * vec4(unpack8(grid));
-
- buf_a[buf_idx ] = FLOAT_TYPE_VEC2((sign & 1) != 0 ? -v.x : v.x,
- (sign & 2) != 0 ? -v.y : v.y);
- buf_a[buf_idx + 1] = FLOAT_TYPE_VEC2((sign & 4) != 0 ? -v.z : v.z,
- (sign & 8) != 0 ? -v.w : v.w);
-#elif defined(DATA_A_IQ4_XS)
- const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
- const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A / 2;
-
- const uint ib = idx / 128; // 2 values per idx
- const uint ib32 = (idx % 128) / 16; // 0..7
- const uint iq = 16 * ib32 + 2 * (idx % 8);
-
- const uint sl = (data_a[ib].scales_l[ib32/2] >> (4 * (ib32 & 1))) & 0xF;
- const uint sh = ((data_a[ib].scales_h) >> (2 * ib32)) & 3;
- const uint qshift = (idx & 8) >> 1;
- u8vec2 qs = u8vec2(data_a[ib].qs[iq], data_a[ib].qs[iq + 1]);
- qs = (qs >> qshift) & uint8_t(0xF);
-
- const float d = float(data_a[ib].d);
- const vec2 v = d * float(int(sl | (sh << 4)) - 32) * vec2(kvalues_iq4nl[qs.x], kvalues_iq4nl[qs.y]);
-
- buf_a[buf_idx ] = FLOAT_TYPE_VEC2(v.xy);
-#elif defined(DATA_A_IQ4_NL)
- const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
- const uint buf_idx = col * SHMEM_STRIDE + row;
-
- const uint ib = idx / 8;
- const uint iqs = idx & 0x07;
-
- const FLOAT_TYPE d = FLOAT_TYPE(data_a_packed16[ib].d);
- const uint vui = uint(data_a_packed16[ib].qs[iqs]);
-
- buf_a[buf_idx ] = d * FLOAT_TYPE_VEC2(kvalues_iq4nl[vui & 0xF],
- kvalues_iq4nl[bitfieldExtract(vui, 8, 4)]);
- buf_a[buf_idx + 8] = d * FLOAT_TYPE_VEC2(kvalues_iq4nl[bitfieldExtract(vui, 4, 4)],
- kvalues_iq4nl[vui >> 12]);
-#elif defined(DATA_A_MXFP4)
- const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
- const uint buf_idx = col * SHMEM_STRIDE + row;
-
- const uint ib = idx / 8;
- const uint iqs = (idx & 0x07) * 2;
-
- const float d = e8m0_to_fp32(data_a[ib].e);
- const uint vui = uint(data_a[ib].qs[iqs]);
- const uint vui2 = uint(data_a[ib].qs[iqs+1]);
-
- buf_a[buf_idx ] = FLOAT_TYPE_VEC2(kvalues_mxfp4[vui & 0xF] * d,
- kvalues_mxfp4[vui2 & 0xF] * d);
- buf_a[buf_idx + 8] = FLOAT_TYPE_VEC2(kvalues_mxfp4[vui >> 4] * d,
- kvalues_mxfp4[vui2 >> 4] * d);
-#endif
-}
-
-#if !defined(MUL_MAT_ID)
-void load_b_to_shmem(const uint pos_b, const uint row, const uint col, const uint idx_n, const uint block, const uint end_k) {
-#if LOAD_VEC_B == 8
- // Not supported for b_type bf16 because bf16mat2x4 does not exist
- const uint idx = pos_b + col * p.stride_b / LOAD_VEC_B + row;
- const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_B / 2;
- FLOAT_TYPE_VEC8 bb = FLOAT_TYPE_VEC8(data_b[idx]);
- buf_b[buf_idx + 0] = bb[0].xy;
- buf_b[buf_idx + 1] = bb[0].zw;
- buf_b[buf_idx + 2] = bb[1].xy;
- buf_b[buf_idx + 3] = bb[1].zw;
-#elif LOAD_VEC_B == 4
- const uint idx = pos_b + col * p.stride_b / LOAD_VEC_B + row;
- const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_B / 2;
-#if defined(DATA_B_BF16)
- FLOAT_TYPE_VEC4 bb = FLOAT_TYPE_VEC4(TO_FLOAT_TYPE(data_b[idx]));
-#else
- FLOAT_TYPE_VEC4 bb = FLOAT_TYPE_VEC4(data_b[idx]);
-#endif
- buf_b[buf_idx + 0] = bb.xy;
- buf_b[buf_idx + 1] = bb.zw;
-#else // LOAD_VEC_BATCH_B == 2
- const uint idx = pos_b + col * p.stride_b + row * 2;
- const uint buf_idx = col * SHMEM_STRIDE + row;
- if (idx_n < p.N && block + row * 2 + 1 < end_k) {
- buf_b[buf_idx] = FLOAT_TYPE_VEC2(TO_FLOAT_TYPE(data_b[idx]),
- TO_FLOAT_TYPE(data_b[idx + 1]));
- } else if (idx_n < p.N && block + row * 2 < end_k) {
- buf_b[buf_idx] = FLOAT_TYPE_VEC2(TO_FLOAT_TYPE(data_b[idx]), 0.0f);
- } else {
- buf_b[buf_idx] = FLOAT_TYPE_VEC2(0.0f);
- }
-#endif
-}
-#else
-void load_b_to_shmem(const uint pos_b, const uint row, const uint col, const uint ic, const uint _ne1, const uint block, const uint end_k) {
-#if LOAD_VEC_B == 8
- // Not supported for b_type bf16 because bf16mat2x4 does not exist
- const u16vec2 row_idx = row_ids[col];
- const uint idx = pos_b + row_idx.y * p.batch_stride_b / LOAD_VEC_B + (row_idx.x % p.ne11) * p.stride_b / LOAD_VEC_B + row;
- const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_B / 2;
- FLOAT_TYPE_VEC8 bb = FLOAT_TYPE_VEC8(data_b[idx]);
- buf_b[buf_idx + 0] = bb[0].xy;
- buf_b[buf_idx + 1] = bb[0].zw;
- buf_b[buf_idx + 2] = bb[1].xy;
- buf_b[buf_idx + 3] = bb[1].zw;
-#elif LOAD_VEC_B == 4
- const u16vec2 row_idx = row_ids[col];
- const uint idx = pos_b + row_idx.y * p.batch_stride_b / LOAD_VEC_B + (row_idx.x % p.ne11) * p.stride_b / LOAD_VEC_B + row;
- const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_B / 2;
-#if defined(DATA_B_BF16)
- FLOAT_TYPE_VEC4 bb = FLOAT_TYPE_VEC4(TO_FLOAT_TYPE(data_b[idx]));
-#else
- FLOAT_TYPE_VEC4 bb = FLOAT_TYPE_VEC4(data_b[idx]);
-#endif
- buf_b[buf_idx + 0] = bb.xy;
- buf_b[buf_idx + 1] = bb.zw;
-#else // LOAD_VEC_BATCH_B == 2
- const uint row_i = ic * BN + col;
- const uint buf_idx = col * SHMEM_STRIDE + row;
- if (row_i < _ne1 && block + row * 2 + 1 < end_k) {
- const u16vec2 row_idx = row_ids[col];
- const uint idx = pos_b + row_idx.y * p.batch_stride_b + (row_idx.x % p.ne11) * p.stride_b + row * 2;
- buf_b[buf_idx] = FLOAT_TYPE_VEC2(TO_FLOAT_TYPE(data_b[idx]),
- TO_FLOAT_TYPE(data_b[idx + 1]));
- } else if (row_i < _ne1 && block + row * 2 < end_k) {
- const u16vec2 row_idx = row_ids[col];
- const uint idx = pos_b + row_idx.y * p.batch_stride_b + (row_idx.x % p.ne11) * p.stride_b + row * 2;
- buf_b[buf_idx] = FLOAT_TYPE_VEC2(TO_FLOAT_TYPE(data_b[idx]), 0.0f);
- } else {
- buf_b[buf_idx] = FLOAT_TYPE_VEC2(0.0f);
- }
-#endif
-}
-#endif
--- /dev/null
+void load_a_to_shmem(const uint pos_a, const uint row, const uint col, const uint idx_m, const uint block, const uint end_k) {
+#if defined(DATA_A_F32) || defined(DATA_A_F16)
+#if LOAD_VEC_A == 8
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A / 2;
+ FLOAT_TYPE_VEC8 aa = FLOAT_TYPE_VEC8(data_a[idx]);
+ buf_a[buf_idx ] = aa[0].xy;
+ buf_a[buf_idx + 1] = aa[0].zw;
+ buf_a[buf_idx + 2] = aa[1].xy;
+ buf_a[buf_idx + 3] = aa[1].zw;
+#elif LOAD_VEC_A == 4
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A / 2;
+ FLOAT_TYPE_VEC4 aa = FLOAT_TYPE_VEC4(data_a[idx]);
+ buf_a[buf_idx ] = aa.xy;
+ buf_a[buf_idx + 1] = aa.zw;
+#else // LOAD_VEC_BATCH_A == 2
+ const uint idx = pos_a + col * p.stride_a + row * 2;
+ const uint buf_idx = col * SHMEM_STRIDE + row;
+ if (idx_m < p.M && block + row * 2 + 1 < end_k) {
+ buf_a[buf_idx] = FLOAT_TYPE_VEC2(data_a[idx],
+ data_a[idx + 1]);
+ } else if (idx_m < p.M && block + row * 2 < end_k) {
+ buf_a[buf_idx] = FLOAT_TYPE_VEC2(data_a[idx], 0.0f);
+ } else {
+ buf_a[buf_idx] = FLOAT_TYPE_VEC2(0.0f);
+ }
+#endif
+#elif defined(DATA_A_BF16)
+#if LOAD_VEC_A == 4
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A / 2;
+ FLOAT_TYPE_VEC4 aa = FLOAT_TYPE_VEC4(TO_FLOAT_TYPE(data_a[idx]));
+ buf_a[buf_idx ] = aa.xy;
+ buf_a[buf_idx + 1] = aa.zw;
+#else // LOAD_VEC_BATCH_A == 2
+ const uint idx = pos_a + col * p.stride_a + row * 2;
+ const uint buf_idx = col * SHMEM_STRIDE + row;
+ if (idx_m < p.M && block + row * 2 + 1 < end_k) {
+ buf_a[buf_idx] = FLOAT_TYPE_VEC2(TO_FLOAT_TYPE(data_a[idx]),
+ TO_FLOAT_TYPE(data_a[idx + 1]));
+ } else if (idx_m < p.M && block + row * 2 < end_k) {
+ buf_a[buf_idx] = FLOAT_TYPE_VEC2(TO_FLOAT_TYPE(data_a[idx]), 0.0f);
+ } else {
+ buf_a[buf_idx] = FLOAT_TYPE_VEC2(0.0f);
+ }
+#endif
+#elif defined(DATA_A_Q4_0)
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + 2 * row;
+
+ const uint ib = idx / 4;
+ const uint iqs = idx & 0x03;
+
+ const float d = float(data_a_packed16[ib].d);
+ const uint vui = uint(data_a_packed16[ib].qs[2*iqs]) | (uint(data_a_packed16[ib].qs[2*iqs + 1]) << 16);
+ const vec4 v0 = (vec4(unpack8(vui & 0x0F0F0F0F)) - 8.0f) * d;
+ const vec4 v1 = (vec4(unpack8((vui >> 4) & 0x0F0F0F0F)) - 8.0f) * d;
+
+ buf_a[buf_idx ] = FLOAT_TYPE_VEC2(v0.xy);
+ buf_a[buf_idx + 1] = FLOAT_TYPE_VEC2(v0.zw);
+ buf_a[buf_idx + 8] = FLOAT_TYPE_VEC2(v1.xy);
+ buf_a[buf_idx + 9] = FLOAT_TYPE_VEC2(v1.zw);
+#elif defined(DATA_A_Q4_1)
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + 2 * row;
+
+ const uint ib = idx / 4;
+ const uint iqs = idx & 0x03;
+
+ const float d = float(data_a_packed16[ib].d);
+ const float m = float(data_a_packed16[ib].m);
+ const uint vui = uint(data_a_packed16[ib].qs[2*iqs]) | (uint(data_a_packed16[ib].qs[2*iqs + 1]) << 16);
+ const vec4 v0 = vec4(unpack8(vui & 0x0F0F0F0F)) * d + m;
+ const vec4 v1 = vec4(unpack8((vui >> 4) & 0x0F0F0F0F)) * d + m;
+
+ buf_a[buf_idx ] = FLOAT_TYPE_VEC2(v0.xy);
+ buf_a[buf_idx + 1 ] = FLOAT_TYPE_VEC2(v0.zw);
+ buf_a[buf_idx + 8 ] = FLOAT_TYPE_VEC2(v1.xy);
+ buf_a[buf_idx + 9 ] = FLOAT_TYPE_VEC2(v1.zw);
+#elif defined(DATA_A_Q5_0)
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row;
+
+ const uint ib = idx / 8;
+ const uint iqs = idx & 0x07;
+
+ const float d = float(data_a_packed16[ib].d);
+ const uint uint_qh = uint(data_a_packed16[ib].qh[1]) << 16 | uint(data_a_packed16[ib].qh[0]);
+ const ivec2 qh0 = ivec2(((uint_qh >> 2*iqs) << 4) & 0x10, (uint_qh >> (2*iqs + 12)) & 0x10);
+ const ivec2 qh1 = ivec2(((uint_qh >> (2*iqs + 1)) << 4) & 0x10, (uint_qh >> (2*iqs + 13)) & 0x10);
+
+ const uint vui = uint(data_a_packed16[ib].qs[iqs]);
+ const vec4 v = (vec4((vui & 0xF) | qh0.x, ((vui >> 4) & 0xF) | qh0.y, ((vui >> 8) & 0xF) | qh1.x, (vui >> 12) | qh1.y) - 16.0f) * d;
+
+ buf_a[buf_idx ] = FLOAT_TYPE_VEC2(v.xz);
+ buf_a[buf_idx + 8] = FLOAT_TYPE_VEC2(v.yw);
+#elif defined(DATA_A_Q5_1)
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row;
+
+ const uint ib = idx / 8;
+ const uint iqs = idx & 0x07;
+
+ const float d = float(data_a_packed16[ib].d);
+ const float m = float(data_a_packed16[ib].m);
+ const uint uint_qh = data_a_packed16[ib].qh;
+ const ivec2 qh0 = ivec2(((uint_qh >> 2*iqs) << 4) & 0x10, (uint_qh >> (2*iqs + 12)) & 0x10);
+ const ivec2 qh1 = ivec2(((uint_qh >> (2*iqs + 1)) << 4) & 0x10, (uint_qh >> (2*iqs + 13)) & 0x10);
+
+ const uint vui = uint(data_a_packed16[ib].qs[iqs]);
+ const vec4 v = vec4((vui & 0xF) | qh0.x, ((vui >> 4) & 0xF) | qh0.y, ((vui >> 8) & 0xF) | qh1.x, (vui >> 12) | qh1.y) * d + m;
+
+ buf_a[buf_idx ] = FLOAT_TYPE_VEC2(v.xz);
+ buf_a[buf_idx + 8] = FLOAT_TYPE_VEC2(v.yw);
+#elif defined(DATA_A_Q8_0)
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A / 2;
+
+ const uint ib = idx / 8;
+ const uint iqs = idx & 0x07;
+
+ const float d = float(data_a_packed16[ib].d);
+ const i8vec2 v0 = unpack8(int32_t(data_a_packed16[ib].qs[2*iqs])).xy; // vec4 used due to #12147
+ const i8vec2 v1 = unpack8(int32_t(data_a_packed16[ib].qs[2*iqs + 1])).xy;
+ const vec4 v = vec4(v0.x, v0.y, v1.x, v1.y) * d;
+
+ buf_a[buf_idx ] = FLOAT_TYPE_VEC2(v.xy);
+ buf_a[buf_idx + 1] = FLOAT_TYPE_VEC2(v.zw);
+#elif defined(DATA_A_Q2_K)
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A / 2;
+
+ const uint ib = idx / 128; // 2 values per idx
+ const uint iqs = idx % 128; // 0..127
+
+ const uint qsi = (iqs / 64) * 32 + (iqs % 16) * 2; // 0,2,4..30
+ const uint scalesi = iqs / 8; // 0..15
+ const uint qsshift = ((iqs % 64) / 16) * 2; // 0,2,4,6
+
+ const uvec2 qs = uvec2(data_a[ib].qs[qsi], data_a[ib].qs[qsi + 1]);
+ const uint scales = data_a[ib].scales[scalesi];
+ const vec2 d = vec2(data_a[ib].d);
+
+ const vec2 v = d.x * float(scales & 0xF) * vec2((qs >> qsshift) & 3) - d.y * float(scales >> 4);
+
+ buf_a[buf_idx] = FLOAT_TYPE_VEC2(v.xy);
+#elif defined(DATA_A_Q3_K)
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A / 2;
+
+ const uint ib = idx / 128; // 2 values per idx
+ const uint iqs = idx % 128; // 0..127
+
+ const uint n = iqs / 64; // 0,1
+ const uint qsi = n * 32 + (iqs % 16) * 2; // 0,2,4..62
+ const uint hmi = (iqs % 16) * 2; // 0,2,4..30
+ const uint j = (iqs % 64) / 4; // 0..3
+ const uint is = iqs / 8; // 0..15
+ const uint halfsplit = ((iqs % 64) / 16); // 0,1,2,3
+ const uint qsshift = halfsplit * 2; // 0,2,4,6
+ const uint m = 1 << (4 * n + halfsplit); // 1,2,4,8,16,32,64,128
+
+ const int8_t us = int8_t(((data_a[ib].scales[is % 8] >> (4 * int(is / 8))) & 0xF)
+ | (((data_a[ib].scales[8 + (is % 4)] >> (2 * int(is / 4))) & 3) << 4));
+ const float dl = float(data_a[ib].d) * float(us - 32);
+
+ buf_a[buf_idx] = FLOAT_TYPE_VEC2(dl * float(int8_t((data_a[ib].qs[qsi ] >> qsshift) & 3) - (((data_a[ib].hmask[hmi ] & m) != 0) ? 0 : 4)),
+ dl * float(int8_t((data_a[ib].qs[qsi + 1] >> qsshift) & 3) - (((data_a[ib].hmask[hmi + 1] & m) != 0) ? 0 : 4)));
+#elif defined(DATA_A_Q4_K)
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A / 2;
+
+ const uint ib = idx / 128; // 2 values per idx
+ const uint iqs = idx % 128; // 0..127
+
+ const uint n = iqs / 32; // 0,1,2,3
+ const uint b = (iqs % 32) / 16; // 0,1
+ const uint is = 2 * n + b; // 0..7
+ const uint qsi = n * 32 + (iqs % 16) * 2; // 0,2,4..126
+
+ const vec2 loadd = vec2(data_a[ib].d);
+
+ const uint scidx0 = (is < 4) ? is : (is + 4);
+ const uint scidx1 = (is < 4) ? is : (is - 4);
+ const uint scidxmask1 = (is < 4) ? 0x30 : 0xC0;
+ const uint scidxshift1 = (is < 4) ? 0 : 2;
+ const uint mbidx0 = is + 4;
+ const uint mbidx1 = (is < 4) ? is + 4 : is;
+ const uint mbidxmask0 = (is < 4) ? 0xF : 0xF0;
+ const uint mbidxshift0 = (is < 4) ? 0 : 4;
+ const uint mbidxmask1 = (is < 4) ? 0x30 : 0xC0;
+ const uint mbidxshift1 = (is < 4) ? 0 : 2;
+
+ const uint8_t sc = uint8_t((data_a[ib].scales[scidx0] & 0xF) | ((data_a[ib].scales[scidx1] & scidxmask1) >> scidxshift1));
+ const uint8_t mbyte = uint8_t((data_a[ib].scales[mbidx0] & mbidxmask0) >> mbidxshift0 | ((data_a[ib].scales[mbidx1] & mbidxmask1) >> mbidxshift1));
+
+ const float d = loadd.x * sc;
+ const float m = -loadd.y * mbyte;
+
+ buf_a[buf_idx] = FLOAT_TYPE_VEC2(fma(d, float((data_a[ib].qs[qsi ] >> (b * 4)) & 0xF), m),
+ fma(d, float((data_a[ib].qs[qsi + 1] >> (b * 4)) & 0xF), m));
+#elif defined(DATA_A_Q5_K)
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A / 2;
+
+ const uint ib = idx / 128; // 2 values per idx
+ const uint iqs = idx % 128; // 0..127
+
+ const uint n = iqs / 32; // 0,1,2,3
+ const uint b = (iqs % 32) / 16; // 0,1
+ const uint is = 2 * n + b; // 0..7
+ const uint qsi = n * 32 + (iqs % 16) * 2; // 0,2,4..126
+ const uint qhi = (iqs % 16) * 2; // 0,2,4..30
+
+ const uint8_t hm = uint8_t(1 << (iqs / 16));
+
+ const vec2 loadd = vec2(data_a[ib].d);
+
+ const uint scidx0 = (is < 4) ? is : (is + 4);
+ const uint scidx1 = (is < 4) ? is : (is - 4);
+ const uint scidxmask1 = (is < 4) ? 0x30 : 0xC0;
+ const uint scidxshift1 = (is < 4) ? 0 : 2;
+ const uint mbidx0 = is + 4;
+ const uint mbidx1 = (is < 4) ? is + 4 : is;
+ const uint mbidxmask0 = (is < 4) ? 0xF : 0xF0;
+ const uint mbidxshift0 = (is < 4) ? 0 : 4;
+ const uint mbidxmask1 = (is < 4) ? 0x30 : 0xC0;
+ const uint mbidxshift1 = (is < 4) ? 0 : 2;
+
+ const uint8_t sc = uint8_t((data_a[ib].scales[scidx0] & 0xF) | ((data_a[ib].scales[scidx1] & scidxmask1) >> scidxshift1));
+ const uint8_t mbyte = uint8_t(((data_a[ib].scales[mbidx0] & mbidxmask0) >> mbidxshift0) | ((data_a[ib].scales[mbidx1] & mbidxmask1) >> mbidxshift1));
+
+ const float d = loadd.x * sc;
+ const float m = -loadd.y * mbyte;
+
+ buf_a[buf_idx] = FLOAT_TYPE_VEC2(fma(d, float((data_a[ib].qs[qsi ] >> (b * 4)) & 0xF) + float((data_a[ib].qh[qhi ] & hm) != 0 ? 16 : 0), m),
+ fma(d, float((data_a[ib].qs[qsi + 1] >> (b * 4)) & 0xF) + float((data_a[ib].qh[qhi + 1] & hm) != 0 ? 16 : 0), m));
+#elif defined(DATA_A_Q6_K)
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A / 2;
+
+ const uint ib = idx / 128; // 2 values per idx
+ const uint iqs = idx % 128; // 0..127
+
+ const uint n = iqs / 64; // 0,1
+ const uint b = (iqs % 64) / 32; // 0,1
+ const uint is_b = (iqs % 16) / 8; // 0,1
+ const uint qhshift = ((iqs % 64) / 16) * 2; // 0,2,4,6
+ const uint is = 8 * n + qhshift + is_b; // 0..15
+ const uint qsi = n * 64 + (iqs % 32) * 2; // 0,2,4..126
+ const uint qhi = n * 32 + (iqs % 16) * 2; // 0,2,4..62
+
+ const float dscale = float(data_a[ib].d) * float(data_a[ib].scales[is]);
+
+ buf_a[buf_idx] = FLOAT_TYPE_VEC2(dscale * float(int8_t(((data_a[ib].ql[qsi ] >> (b * 4)) & 0xF) | (((data_a[ib].qh[qhi ] >> qhshift) & 3) << 4)) - 32),
+ dscale * float(int8_t(((data_a[ib].ql[qsi + 1] >> (b * 4)) & 0xF) | (((data_a[ib].qh[qhi + 1] >> qhshift) & 3) << 4)) - 32));
+#elif defined(DATA_A_IQ1_S)
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A / 2;
+
+ const uint ib = idx / 32; // 8 values per idx
+ const uint ib32 = (idx % 32) / 4; // 0..7
+ const uint ib8 = idx % 32;
+
+ const float d = float(data_a[ib].d);
+ const uint qh = data_a[ib].qh[ib32];
+ const uint qs = data_a[ib].qs[ib8];
+ const float dl = d * (2 * bitfieldExtract(qh, 12, 3) + 1);
+ const float delta = ((qh & 0x8000) != 0) ? -IQ1S_DELTA : IQ1S_DELTA;
+ const int16_t grid = int16_t(iq1s_grid[qs | (bitfieldExtract(qh, 3 * int(ib8 & 3), 3) << 8)]);
+
+ [[unroll]] for (int k = 0; k < 4; ++k) {
+ buf_a[buf_idx + k] = FLOAT_TYPE_VEC2(dl * (bitfieldExtract(grid, 4 * k , 2) + delta),
+ dl * (bitfieldExtract(grid, 4 * k + 2, 2) + delta));
+ }
+#elif defined(DATA_A_IQ1_M)
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A / 2;
+
+ const uint ib = idx / 32; // 8 values per idx
+ const uint ib8 = idx % 32;
+ const uint ib16 = ib8 / 2;
+
+ const uint16_t[4] scales = data_a[ib].scales;
+ const u16vec4 s = u16vec4(scales[0], scales[1], scales[2], scales[3]) >> 12;
+ const float d = float(unpackHalf2x16(s.x | (s.y << 4) | (s.z << 8) | (s.w << 12)).x);
+ const uint sc = scales[ib8 / 8];
+ const uint qs = data_a[ib].qs[ib8];
+ const uint qh = data_a[ib].qh[ib16] >> (4 * (ib8 & 1));
+ const float dl = d * (2 * bitfieldExtract(sc, 3 * int(ib16 & 3), 3) + 1);
+ const float delta = ((qh & 8) != 0) ? -IQ1M_DELTA : IQ1M_DELTA;
+ const int16_t grid = int16_t(iq1s_grid[qs | ((qh & 7) << 8)]);
+
+ [[unroll]] for (int k = 0; k < 4; ++k) {
+ buf_a[buf_idx + k] = FLOAT_TYPE_VEC2(dl * (bitfieldExtract(grid, 4 * k , 2) + delta),
+ dl * (bitfieldExtract(grid, 4 * k + 2, 2) + delta));
+ }
+#elif defined(DATA_A_IQ2_XXS)
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A / 2;
+
+ const uint ib = idx / 32; // 8 values per idx
+ const uint ib32 = (idx % 32) / 4; // 0..7
+ const uint ib8 = idx % 4;
+
+ const float d = float(data_a[ib].d);
+ const uint qs = data_a[ib].qs[8 * ib32 + ib8];
+ const uint signs = pack32(u8vec4(
+ data_a[ib].qs[8*ib32 + 4],
+ data_a[ib].qs[8*ib32 + 5],
+ data_a[ib].qs[8*ib32 + 6],
+ data_a[ib].qs[8*ib32 + 7]
+ ));
+ const FLOAT_TYPE db = FLOAT_TYPE(d * 0.25 * (0.5 + (signs >> 28)));
+ const uint32_t sign7 = bitfieldExtract(signs, 7 * int(ib8), 7);
+ const uint sign = sign7 | (bitCount(sign7) << 7);
+ const uvec2 grid = iq2xxs_grid[qs];
+ const vec4 grid0 = vec4(unpack8(grid.x));
+ const vec4 grid1 = vec4(unpack8(grid.y));
+
+ buf_a[buf_idx ] = db * FLOAT_TYPE_VEC2((sign & 1) != 0 ? -grid0.x : grid0.x,
+ (sign & 2) != 0 ? -grid0.y : grid0.y);
+ buf_a[buf_idx + 1] = db * FLOAT_TYPE_VEC2((sign & 4) != 0 ? -grid0.z : grid0.z,
+ (sign & 8) != 0 ? -grid0.w : grid0.w);
+ buf_a[buf_idx + 2] = db * FLOAT_TYPE_VEC2((sign & 16) != 0 ? -grid1.x : grid1.x,
+ (sign & 32) != 0 ? -grid1.y : grid1.y);
+ buf_a[buf_idx + 3] = db * FLOAT_TYPE_VEC2((sign & 64) != 0 ? -grid1.z : grid1.z,
+ (sign & 128) != 0 ? -grid1.w : grid1.w);
+#elif defined(DATA_A_IQ2_XS)
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A / 2;
+
+ const uint ib = idx / 32; // 8 values per idx
+ const uint ib32 = (idx % 32) / 4; // 0..7
+ const uint ib8 = idx % 4; // 0..3
+
+ const float d = float(data_a[ib].d);
+ const uint scale = (data_a[ib].scales[ib32] >> (2 * (ib8 & 2))) & 0xf;
+ const FLOAT_TYPE db = FLOAT_TYPE(d * 0.25 * (0.5 + scale));
+ const uint qs = data_a[ib].qs[4 * ib32 + ib8];
+ const uint sign7 = qs >> 9;
+ const uint sign = sign7 | (bitCount(sign7) << 7);
+ const uvec2 grid = iq2xs_grid[qs & 511];
+ const vec4 grid0 = vec4(unpack8(grid.x));
+ const vec4 grid1 = vec4(unpack8(grid.y));
+
+ buf_a[buf_idx ] = db * FLOAT_TYPE_VEC2((sign & 1) != 0 ? -grid0.x : grid0.x,
+ (sign & 2) != 0 ? -grid0.y : grid0.y);
+ buf_a[buf_idx + 1] = db * FLOAT_TYPE_VEC2((sign & 4) != 0 ? -grid0.z : grid0.z,
+ (sign & 8) != 0 ? -grid0.w : grid0.w);
+ buf_a[buf_idx + 2] = db * FLOAT_TYPE_VEC2((sign & 16) != 0 ? -grid1.x : grid1.x,
+ (sign & 32) != 0 ? -grid1.y : grid1.y);
+ buf_a[buf_idx + 3] = db * FLOAT_TYPE_VEC2((sign & 64) != 0 ? -grid1.z : grid1.z,
+ (sign & 128) != 0 ? -grid1.w : grid1.w);
+#elif defined(DATA_A_IQ2_S)
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A / 2;
+
+ const uint ib = idx / 32; // 8 values per idx
+ const uint ib8 = idx % 32; // 0..31
+ const uint ib32 = ib8 / 4; // 0..7
+
+ const uint scale = (data_a[ib].scales[ib32] >> (2 * (ib8 & 2))) & 0xf;
+ const uint qs = data_a[ib].qs[ib8];
+ const uint qh = data_a[ib].qh[ib32];
+ const uint qhshift = 2 * (ib8 % 4);
+ const uint sign = data_a[ib].qs[QUANT_K / 8 + ib8];
+
+ const float d = float(data_a[ib].d);
+ const FLOAT_TYPE db = FLOAT_TYPE(d * 0.25 * (0.5 + scale));
+ const uvec2 grid = iq2s_grid[qs | ((qh << (8 - qhshift)) & 0x300)];
+ const vec4 grid0 = vec4(unpack8(grid.x));
+ const vec4 grid1 = vec4(unpack8(grid.y));
+
+ buf_a[buf_idx ] = db * FLOAT_TYPE_VEC2((sign & 1) != 0 ? -grid0.x : grid0.x,
+ (sign & 2) != 0 ? -grid0.y : grid0.y);
+ buf_a[buf_idx + 1] = db * FLOAT_TYPE_VEC2((sign & 4) != 0 ? -grid0.z : grid0.z,
+ (sign & 8) != 0 ? -grid0.w : grid0.w);
+ buf_a[buf_idx + 2] = db * FLOAT_TYPE_VEC2((sign & 16) != 0 ? -grid1.x : grid1.x,
+ (sign & 32) != 0 ? -grid1.y : grid1.y);
+ buf_a[buf_idx + 3] = db * FLOAT_TYPE_VEC2((sign & 64) != 0 ? -grid1.z : grid1.z,
+ (sign & 128) != 0 ? -grid1.w : grid1.w);
+#elif defined(DATA_A_IQ3_XXS)
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A / 2;
+
+ const uint ib = idx / 64; // 4 values per idx
+ const uint iqs = idx % 64; // 0..63
+ const uint is = QUANT_K / 4 + 4 * (iqs / 8); // 8 values
+
+ const float d = float(data_a[ib].d);
+ const uint qs = data_a[ib].qs[iqs];
+ const uint signs = pack32(u8vec4(
+ data_a[ib].qs[is+0],
+ data_a[ib].qs[is+1],
+ data_a[ib].qs[is+2],
+ data_a[ib].qs[is+3]
+ ));
+ const float db = d * 0.5 * (0.5 + (signs >> 28));
+ const uint32_t sign7 = bitfieldExtract(signs, 7 * (int(iqs / 2) % 4), 7);
+ const uint sign = (sign7 | (bitCount(sign7) << 7)) >> (4 * (idx % 2));
+ const uint grid = iq3xxs_grid[qs];
+ const vec4 v = db * vec4(unpack8(grid));
+
+ buf_a[buf_idx ] = FLOAT_TYPE_VEC2((sign & 1) != 0 ? -v.x : v.x,
+ (sign & 2) != 0 ? -v.y : v.y);
+ buf_a[buf_idx + 1] = FLOAT_TYPE_VEC2((sign & 4) != 0 ? -v.z : v.z,
+ (sign & 8) != 0 ? -v.w : v.w);
+#elif defined(DATA_A_IQ3_S)
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A / 2;
+
+ const uint ib = idx / 64; // 4 values per idx
+ const uint iqs = idx % 64; // 0..63
+ const uint iqh = iqs / 8;
+
+ const float d = float(data_a[ib].d);
+ const uint qs = data_a[ib].qs[iqs];
+ const uint qh = data_a[ib].qh[iqh];
+ const int8_t sign = int8_t(data_a[ib].signs[iqs / 2] >> (4 * (idx % 2)));
+ const uint scale = data_a[ib].scales[iqs / 16];
+ const i8vec2 sign01 = i8vec2(1 - (2 & i8vec2(sign << 1, sign)));
+ const float db = d * (1 + 2 * ((scale >> (4 * (iqh & 1))) & 0xf));
+ const uint32_t grid = iq3s_grid[qs | ((qh << (8 - (iqs % 8))) & 256)];
+ const vec4 v = db * vec4(unpack8(grid));
+
+ buf_a[buf_idx ] = FLOAT_TYPE_VEC2((sign & 1) != 0 ? -v.x : v.x,
+ (sign & 2) != 0 ? -v.y : v.y);
+ buf_a[buf_idx + 1] = FLOAT_TYPE_VEC2((sign & 4) != 0 ? -v.z : v.z,
+ (sign & 8) != 0 ? -v.w : v.w);
+#elif defined(DATA_A_IQ4_XS)
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A / 2;
+
+ const uint ib = idx / 128; // 2 values per idx
+ const uint ib32 = (idx % 128) / 16; // 0..7
+ const uint iq = 16 * ib32 + 2 * (idx % 8);
+
+ const uint sl = (data_a[ib].scales_l[ib32/2] >> (4 * (ib32 & 1))) & 0xF;
+ const uint sh = ((data_a[ib].scales_h) >> (2 * ib32)) & 3;
+ const uint qshift = (idx & 8) >> 1;
+ u8vec2 qs = u8vec2(data_a[ib].qs[iq], data_a[ib].qs[iq + 1]);
+ qs = (qs >> qshift) & uint8_t(0xF);
+
+ const float d = float(data_a[ib].d);
+ const vec2 v = d * float(int(sl | (sh << 4)) - 32) * vec2(kvalues_iq4nl[qs.x], kvalues_iq4nl[qs.y]);
+
+ buf_a[buf_idx ] = FLOAT_TYPE_VEC2(v.xy);
+#elif defined(DATA_A_IQ4_NL)
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row;
+
+ const uint ib = idx / 8;
+ const uint iqs = idx & 0x07;
+
+ const FLOAT_TYPE d = FLOAT_TYPE(data_a_packed16[ib].d);
+ const uint vui = uint(data_a_packed16[ib].qs[iqs]);
+
+ buf_a[buf_idx ] = d * FLOAT_TYPE_VEC2(kvalues_iq4nl[vui & 0xF],
+ kvalues_iq4nl[bitfieldExtract(vui, 8, 4)]);
+ buf_a[buf_idx + 8] = d * FLOAT_TYPE_VEC2(kvalues_iq4nl[bitfieldExtract(vui, 4, 4)],
+ kvalues_iq4nl[vui >> 12]);
+#elif defined(DATA_A_MXFP4)
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row;
+
+ const uint ib = idx / 8;
+ const uint iqs = (idx & 0x07) * 2;
+
+ const float d = e8m0_to_fp32(data_a[ib].e);
+ const uint vui = uint(data_a[ib].qs[iqs]);
+ const uint vui2 = uint(data_a[ib].qs[iqs+1]);
+
+ buf_a[buf_idx ] = FLOAT_TYPE_VEC2(kvalues_mxfp4[vui & 0xF] * d,
+ kvalues_mxfp4[vui2 & 0xF] * d);
+ buf_a[buf_idx + 8] = FLOAT_TYPE_VEC2(kvalues_mxfp4[vui >> 4] * d,
+ kvalues_mxfp4[vui2 >> 4] * d);
+#endif
+}
+
+#if !defined(MUL_MAT_ID)
+void load_b_to_shmem(const uint pos_b, const uint row, const uint col, const uint idx_n, const uint block, const uint end_k) {
+#if LOAD_VEC_B == 8
+ // Not supported for b_type bf16 because bf16mat2x4 does not exist
+ const uint idx = pos_b + col * p.stride_b / LOAD_VEC_B + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_B / 2;
+ FLOAT_TYPE_VEC8 bb = FLOAT_TYPE_VEC8(data_b[idx]);
+ buf_b[buf_idx + 0] = bb[0].xy;
+ buf_b[buf_idx + 1] = bb[0].zw;
+ buf_b[buf_idx + 2] = bb[1].xy;
+ buf_b[buf_idx + 3] = bb[1].zw;
+#elif LOAD_VEC_B == 4
+ const uint idx = pos_b + col * p.stride_b / LOAD_VEC_B + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_B / 2;
+#if defined(DATA_B_BF16)
+ FLOAT_TYPE_VEC4 bb = FLOAT_TYPE_VEC4(TO_FLOAT_TYPE(data_b[idx]));
+#else
+ FLOAT_TYPE_VEC4 bb = FLOAT_TYPE_VEC4(data_b[idx]);
+#endif
+ buf_b[buf_idx + 0] = bb.xy;
+ buf_b[buf_idx + 1] = bb.zw;
+#else // LOAD_VEC_BATCH_B == 2
+ const uint idx = pos_b + col * p.stride_b + row * 2;
+ const uint buf_idx = col * SHMEM_STRIDE + row;
+ if (idx_n < p.N && block + row * 2 + 1 < end_k) {
+ buf_b[buf_idx] = FLOAT_TYPE_VEC2(TO_FLOAT_TYPE(data_b[idx]),
+ TO_FLOAT_TYPE(data_b[idx + 1]));
+ } else if (idx_n < p.N && block + row * 2 < end_k) {
+ buf_b[buf_idx] = FLOAT_TYPE_VEC2(TO_FLOAT_TYPE(data_b[idx]), 0.0f);
+ } else {
+ buf_b[buf_idx] = FLOAT_TYPE_VEC2(0.0f);
+ }
+#endif
+}
+#else
+void load_b_to_shmem(const uint pos_b, const uint row, const uint col, const uint ic, const uint _ne1, const uint block, const uint end_k) {
+#if LOAD_VEC_B == 8
+ // Not supported for b_type bf16 because bf16mat2x4 does not exist
+ const u16vec2 row_idx = row_ids[col];
+ const uint idx = pos_b + row_idx.y * p.batch_stride_b / LOAD_VEC_B + (row_idx.x % p.ne11) * p.stride_b / LOAD_VEC_B + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_B / 2;
+ FLOAT_TYPE_VEC8 bb = FLOAT_TYPE_VEC8(data_b[idx]);
+ buf_b[buf_idx + 0] = bb[0].xy;
+ buf_b[buf_idx + 1] = bb[0].zw;
+ buf_b[buf_idx + 2] = bb[1].xy;
+ buf_b[buf_idx + 3] = bb[1].zw;
+#elif LOAD_VEC_B == 4
+ const u16vec2 row_idx = row_ids[col];
+ const uint idx = pos_b + row_idx.y * p.batch_stride_b / LOAD_VEC_B + (row_idx.x % p.ne11) * p.stride_b / LOAD_VEC_B + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_B / 2;
+#if defined(DATA_B_BF16)
+ FLOAT_TYPE_VEC4 bb = FLOAT_TYPE_VEC4(TO_FLOAT_TYPE(data_b[idx]));
+#else
+ FLOAT_TYPE_VEC4 bb = FLOAT_TYPE_VEC4(data_b[idx]);
+#endif
+ buf_b[buf_idx + 0] = bb.xy;
+ buf_b[buf_idx + 1] = bb.zw;
+#else // LOAD_VEC_BATCH_B == 2
+ const uint row_i = ic * BN + col;
+ const uint buf_idx = col * SHMEM_STRIDE + row;
+ if (row_i < _ne1 && block + row * 2 + 1 < end_k) {
+ const u16vec2 row_idx = row_ids[col];
+ const uint idx = pos_b + row_idx.y * p.batch_stride_b + (row_idx.x % p.ne11) * p.stride_b + row * 2;
+ buf_b[buf_idx] = FLOAT_TYPE_VEC2(TO_FLOAT_TYPE(data_b[idx]),
+ TO_FLOAT_TYPE(data_b[idx + 1]));
+ } else if (row_i < _ne1 && block + row * 2 < end_k) {
+ const u16vec2 row_idx = row_ids[col];
+ const uint idx = pos_b + row_idx.y * p.batch_stride_b + (row_idx.x % p.ne11) * p.stride_b + row * 2;
+ buf_b[buf_idx] = FLOAT_TYPE_VEC2(TO_FLOAT_TYPE(data_b[idx]), 0.0f);
+ } else {
+ buf_b[buf_idx] = FLOAT_TYPE_VEC2(0.0f);
+ }
+#endif
+}
+#endif
#extension GL_EXT_shader_explicit_arithmetic_types_int16 : require
#endif
-#include "types.comp"
+#include "types.glsl"
layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
shared ACC_TYPE coopmat_stage[TM * TN * NUM_WARPS];
#endif
-#include "mul_mmq_funcs.comp"
+#include "mul_mmq_funcs.glsl"
void main() {
#ifdef NEEDS_INIT_IQ_SHMEM
+++ /dev/null
-#extension GL_EXT_shader_explicit_arithmetic_types_int32 : require
-#extension GL_EXT_shader_explicit_arithmetic_types_int16 : require
-#extension GL_EXT_shader_explicit_arithmetic_types_int8 : require
-
-#include "types.comp"
-
-// Each iqs value maps to a 32-bit integer
-
-#if defined(DATA_A_Q4_0)
-i32vec2 repack(uint ib, uint iqs) {
- // Use 2-byte loads since a q4_0 block (18 bytes) is not divisible by 4
- const u16vec2 quants = u16vec2(data_a[ib].qs[iqs * 2 ],
- data_a[ib].qs[iqs * 2 + 1]);
- const uint32_t vui = pack32(quants);
- return i32vec2( vui & 0x0F0F0F0F,
- (vui >> 4) & 0x0F0F0F0F);
-}
-
-ACC_TYPE mul_q8_1(const int32_t q_sum, const float da, const vec2 dsb, const int32_t sum_divisor) {
- return ACC_TYPE(da * (float(q_sum) * dsb.x - (8 / sum_divisor) * dsb.y));
-}
-#endif
-
-#if defined(DATA_A_Q4_1)
-i32vec2 repack(uint ib, uint iqs) {
- // Use 4-byte loads since a q4_1 block (20 bytes) is divisible by 4
- const uint32_t vui = data_a_packed32[ib].qs[iqs];
- return i32vec2( vui & 0x0F0F0F0F,
- (vui >> 4) & 0x0F0F0F0F);
-}
-
-ACC_TYPE mul_q8_1(const int32_t q_sum, const vec2 dma, const vec2 dsb, const int32_t sum_divisor) {
- return ACC_TYPE(float(q_sum) * dma.x * dsb.x + dma.y * dsb.y / sum_divisor);
-}
-#endif
-
-#if defined(DATA_A_Q5_0)
-i32vec2 repack(uint ib, uint iqs) {
- // Use 2-byte loads since a q5_0 block (22 bytes) is not divisible by 4
- const u16vec2 quants = u16vec2(data_a[ib].qs[iqs * 2 ],
- data_a[ib].qs[iqs * 2 + 1]);
- const uint32_t vui = pack32(quants);
- const int32_t qh = int32_t((uint32_t(data_a[ib].qh[1]) << 16 | data_a[ib].qh[0]) >> (4 * iqs));
- const int32_t v0 = int32_t(vui & 0x0F0F0F0F)
- | ((qh & 0xF) * 0x02040810) & 0x10101010; // (0,1,2,3) -> (4,12,20,28)
-
- const int32_t v1 = int32_t((vui >> 4) & 0x0F0F0F0F)
- | (((qh >> 16) & 0xF) * 0x02040810) & 0x10101010; // (16,17,18,19) -> (4,12,20,28)
-
- return i32vec2(v0, v1);
-}
-
-ACC_TYPE mul_q8_1(const int32_t q_sum, const float da, const vec2 dsb, const int32_t sum_divisor) {
- return ACC_TYPE(da * (float(q_sum) * dsb.x - (16 / sum_divisor) * dsb.y));
-}
-#endif
-
-#if defined(DATA_A_Q5_1)
-i32vec2 repack(uint ib, uint iqs) {
- // Use 4-byte loads since a q5_1 block (24 bytes) is divisible by 4
- const uint32_t vui = data_a_packed32[ib].qs[iqs];
- const int32_t qh = int32_t(data_a_packed32[ib].qh >> (4 * iqs));
- const int32_t v0 = int32_t(vui & 0x0F0F0F0F)
- | ((qh & 0xF) * 0x02040810) & 0x10101010; // (0,1,2,3) -> (4,12,20,28)
-
- const int32_t v1 = int32_t((vui >> 4) & 0x0F0F0F0F)
- | (((qh >> 16) & 0xF) * 0x02040810) & 0x10101010; // (16,17,18,19) -> (4,12,20,28)
-
- return i32vec2(v0, v1);
-}
-
-ACC_TYPE mul_q8_1(const int32_t q_sum, const vec2 dma, const vec2 dsb, const int32_t sum_divisor) {
- return ACC_TYPE(float(q_sum) * dma.x * dsb.x + dma.y * dsb.y / sum_divisor);
-}
-#endif
-
-#if defined(DATA_A_Q8_0)
-int32_t repack(uint ib, uint iqs) {
- // Use 2-byte loads since a q8_0 block (34 bytes) is not divisible by 4
- return pack32(i16vec2(data_a[ib].qs[iqs * 2 ],
- data_a[ib].qs[iqs * 2 + 1]));
-}
-
-ACC_TYPE mul_q8_1(const int32_t q_sum, const float da, const vec2 dsb, const int32_t sum_divisor) {
- return ACC_TYPE(float(q_sum) * da * dsb.x);
-}
-#endif
-
-#if defined(DATA_A_Q4_0) || defined(DATA_A_Q5_0) || defined(DATA_A_Q8_0) || defined(DATA_A_IQ1_S) || defined(DATA_A_IQ2_XXS) || defined(DATA_A_IQ2_XS) || defined(DATA_A_IQ2_S) || defined(DATA_A_IQ3_XXS) || defined(DATA_A_IQ3_S) || defined(DATA_A_IQ4_XS) || defined(DATA_A_IQ4_NL)
-FLOAT_TYPE get_d(uint ib) {
- return FLOAT_TYPE(data_a[ib].d);
-}
-#endif
-
-#if defined(DATA_A_MXFP4)
-FLOAT_TYPE get_d(uint ib) {
- return FLOAT_TYPE(e8m0_to_fp32(data_a[ib].e));
-}
-#endif
-
-#if defined(DATA_A_Q4_1) || defined(DATA_A_Q5_1)
-FLOAT_TYPE_VEC2 get_dm(uint ib) {
- return FLOAT_TYPE_VEC2(data_a_packed32[ib].dm);
-}
-#endif
--- /dev/null
+#extension GL_EXT_shader_explicit_arithmetic_types_int32 : require
+#extension GL_EXT_shader_explicit_arithmetic_types_int16 : require
+#extension GL_EXT_shader_explicit_arithmetic_types_int8 : require
+
+#include "types.glsl"
+
+// Each iqs value maps to a 32-bit integer
+
+#if defined(DATA_A_Q4_0)
+i32vec2 repack(uint ib, uint iqs) {
+ // Use 2-byte loads since a q4_0 block (18 bytes) is not divisible by 4
+ const u16vec2 quants = u16vec2(data_a[ib].qs[iqs * 2 ],
+ data_a[ib].qs[iqs * 2 + 1]);
+ const uint32_t vui = pack32(quants);
+ return i32vec2( vui & 0x0F0F0F0F,
+ (vui >> 4) & 0x0F0F0F0F);
+}
+
+ACC_TYPE mul_q8_1(const int32_t q_sum, const float da, const vec2 dsb, const int32_t sum_divisor) {
+ return ACC_TYPE(da * (float(q_sum) * dsb.x - (8 / sum_divisor) * dsb.y));
+}
+#endif
+
+#if defined(DATA_A_Q4_1)
+i32vec2 repack(uint ib, uint iqs) {
+ // Use 4-byte loads since a q4_1 block (20 bytes) is divisible by 4
+ const uint32_t vui = data_a_packed32[ib].qs[iqs];
+ return i32vec2( vui & 0x0F0F0F0F,
+ (vui >> 4) & 0x0F0F0F0F);
+}
+
+ACC_TYPE mul_q8_1(const int32_t q_sum, const vec2 dma, const vec2 dsb, const int32_t sum_divisor) {
+ return ACC_TYPE(float(q_sum) * dma.x * dsb.x + dma.y * dsb.y / sum_divisor);
+}
+#endif
+
+#if defined(DATA_A_Q5_0)
+i32vec2 repack(uint ib, uint iqs) {
+ // Use 2-byte loads since a q5_0 block (22 bytes) is not divisible by 4
+ const u16vec2 quants = u16vec2(data_a[ib].qs[iqs * 2 ],
+ data_a[ib].qs[iqs * 2 + 1]);
+ const uint32_t vui = pack32(quants);
+ const int32_t qh = int32_t((uint32_t(data_a[ib].qh[1]) << 16 | data_a[ib].qh[0]) >> (4 * iqs));
+ const int32_t v0 = int32_t(vui & 0x0F0F0F0F)
+ | ((qh & 0xF) * 0x02040810) & 0x10101010; // (0,1,2,3) -> (4,12,20,28)
+
+ const int32_t v1 = int32_t((vui >> 4) & 0x0F0F0F0F)
+ | (((qh >> 16) & 0xF) * 0x02040810) & 0x10101010; // (16,17,18,19) -> (4,12,20,28)
+
+ return i32vec2(v0, v1);
+}
+
+ACC_TYPE mul_q8_1(const int32_t q_sum, const float da, const vec2 dsb, const int32_t sum_divisor) {
+ return ACC_TYPE(da * (float(q_sum) * dsb.x - (16 / sum_divisor) * dsb.y));
+}
+#endif
+
+#if defined(DATA_A_Q5_1)
+i32vec2 repack(uint ib, uint iqs) {
+ // Use 4-byte loads since a q5_1 block (24 bytes) is divisible by 4
+ const uint32_t vui = data_a_packed32[ib].qs[iqs];
+ const int32_t qh = int32_t(data_a_packed32[ib].qh >> (4 * iqs));
+ const int32_t v0 = int32_t(vui & 0x0F0F0F0F)
+ | ((qh & 0xF) * 0x02040810) & 0x10101010; // (0,1,2,3) -> (4,12,20,28)
+
+ const int32_t v1 = int32_t((vui >> 4) & 0x0F0F0F0F)
+ | (((qh >> 16) & 0xF) * 0x02040810) & 0x10101010; // (16,17,18,19) -> (4,12,20,28)
+
+ return i32vec2(v0, v1);
+}
+
+ACC_TYPE mul_q8_1(const int32_t q_sum, const vec2 dma, const vec2 dsb, const int32_t sum_divisor) {
+ return ACC_TYPE(float(q_sum) * dma.x * dsb.x + dma.y * dsb.y / sum_divisor);
+}
+#endif
+
+#if defined(DATA_A_Q8_0)
+int32_t repack(uint ib, uint iqs) {
+ // Use 2-byte loads since a q8_0 block (34 bytes) is not divisible by 4
+ return pack32(i16vec2(data_a[ib].qs[iqs * 2 ],
+ data_a[ib].qs[iqs * 2 + 1]));
+}
+
+ACC_TYPE mul_q8_1(const int32_t q_sum, const float da, const vec2 dsb, const int32_t sum_divisor) {
+ return ACC_TYPE(float(q_sum) * da * dsb.x);
+}
+#endif
+
+#if defined(DATA_A_Q4_0) || defined(DATA_A_Q5_0) || defined(DATA_A_Q8_0) || defined(DATA_A_IQ1_S) || defined(DATA_A_IQ2_XXS) || defined(DATA_A_IQ2_XS) || defined(DATA_A_IQ2_S) || defined(DATA_A_IQ3_XXS) || defined(DATA_A_IQ3_S) || defined(DATA_A_IQ4_XS) || defined(DATA_A_IQ4_NL)
+FLOAT_TYPE get_d(uint ib) {
+ return FLOAT_TYPE(data_a[ib].d);
+}
+#endif
+
+#if defined(DATA_A_MXFP4)
+FLOAT_TYPE get_d(uint ib) {
+ return FLOAT_TYPE(e8m0_to_fp32(data_a[ib].e));
+}
+#endif
+
+#if defined(DATA_A_Q4_1) || defined(DATA_A_Q5_1)
+FLOAT_TYPE_VEC2 get_dm(uint ib) {
+ return FLOAT_TYPE_VEC2(data_a_packed32[ib].dm);
+}
+#endif
#extension GL_KHR_shader_subgroup_basic : enable
#endif
-#include "rte.comp"
-#include "types.comp"
-#include "utils.comp"
+#include "rte.glsl"
+#include "types.glsl"
+#include "utils.glsl"
layout (push_constant) uniform parameter2
{
#version 450
-#include "generic_head.comp"
-#include "types.comp"
+#include "generic_head.glsl"
+#include "types.glsl"
#extension GL_EXT_control_flow_attributes : enable
#define BLOCK_SIZE 512
#version 450
-#include "generic_head.comp"
-#include "types.comp"
+#include "generic_head.glsl"
+#include "types.glsl"
#extension GL_EXT_control_flow_attributes : enable
#version 450
-#include "generic_head.comp"
+#include "generic_head.glsl"
layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
#version 450
-#include "types.comp"
+#include "types.glsl"
layout (push_constant) uniform parameter
{
#version 450
-#include "types.comp"
+#include "types.glsl"
#extension GL_EXT_shader_16bit_storage : require
uint ne;
} p;
-#include "types.comp"
+#include "types.glsl"
layout(constant_id = 0) const uint GROUP_SIZE = 32;
layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
#version 450
-#include "glu_head.comp"
+#include "glu_head.glsl"
float op(float a, float b) {
return max(a, 0.0f) * b;
}
-#include "glu_main.comp"
+#include "glu_main.glsl"
#version 450
-#include "generic_head.comp"
-#include "types.comp"
+#include "generic_head.glsl"
+#include "types.glsl"
#extension GL_EXT_control_flow_attributes : enable
#version 450
-#include "types.comp"
-#include "generic_unary_head.comp"
+#include "types.glsl"
+#include "generic_unary_head.glsl"
layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
#version 450
-#include "types.comp"
-#include "generic_unary_head.comp"
+#include "types.glsl"
+#include "generic_unary_head.glsl"
layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
#version 450
-#include "generic_binary_head.comp"
-#include "types.comp"
+#include "generic_binary_head.glsl"
+#include "types.glsl"
#extension GL_EXT_control_flow_attributes : enable
#define BLOCK_SIZE 512
#version 450
-#include "generic_head.comp"
-#include "types.comp"
+#include "generic_head.glsl"
+#include "types.glsl"
#extension GL_EXT_control_flow_attributes : enable
#define BLOCK_SIZE 512
#version 450
-#include "generic_binary_head.comp"
-#include "types.comp"
+#include "generic_binary_head.glsl"
+#include "types.glsl"
#extension GL_EXT_control_flow_attributes : enable
#extension GL_KHR_shader_subgroup_arithmetic : enable
#version 450
-#include "types.comp"
-#include "generic_unary_head.comp"
+#include "types.glsl"
+#include "generic_unary_head.glsl"
layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
+++ /dev/null
-#include "types.comp"
-
-#extension GL_EXT_shader_16bit_storage : require
-
-#include "rte.comp"
-
-layout(local_size_x = 1, local_size_y = 256, local_size_z = 1) in;
-
-layout (binding = 0) readonly buffer X {A_TYPE data_a[];};
-layout (binding = 1) readonly buffer Y {int data_pos[];};
-layout (binding = 2) readonly buffer Z {float data_ff[];};
-layout (binding = 3) writeonly buffer D {D_TYPE data_d[];};
-
-layout (push_constant) uniform parameter {
- uint ncols;
- uint n_dims;
- float freq_scale;
- uint p_delta_rows;
- float freq_base;
- float ext_factor;
- float attn_factor;
- float corr_dims[2];
- float theta_scale;
- uint has_ff;
- uint ne02;
- uint s1;
- uint s2;
- int sections[4];
- uint is_back;
-} p;
-
-float rope_yarn_ramp(const float low, const float high, const uint i0) {
- const float y = (i0 / 2 - low) / max(0.001f, high - low);
- return 1.0f - min(1.0f, max(0.0f, y));
-}
-
-void rope_yarn(const float theta_extrap, const uint i0, out float cos_theta, out float sin_theta) {
- float mscale = p.attn_factor;
- // Get n-d rotational scaling corrected for extrapolation
- float theta_interp = p.freq_scale * theta_extrap;
- float theta = theta_interp;
- if (p.ext_factor != 0.0f) {
- float ramp_mix = rope_yarn_ramp(p.corr_dims[0], p.corr_dims[1], i0) * p.ext_factor;
- theta = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix;
-
- // Get n-d magnitude scaling corrected for interpolation
- mscale *= 1.0f + 0.1f * log(1.0f / p.freq_scale);
- }
- // Backprogagation uses inverted rotation
- if (p.is_back != 0) {
- theta = -theta;
- }
- cos_theta = cos(theta) * mscale;
- sin_theta = sin(theta) * mscale;
-}
--- /dev/null
+#include "types.glsl"
+
+#extension GL_EXT_shader_16bit_storage : require
+
+#include "rte.glsl"
+
+layout(local_size_x = 1, local_size_y = 256, local_size_z = 1) in;
+
+layout (binding = 0) readonly buffer X {A_TYPE data_a[];};
+layout (binding = 1) readonly buffer Y {int data_pos[];};
+layout (binding = 2) readonly buffer Z {float data_ff[];};
+layout (binding = 3) writeonly buffer D {D_TYPE data_d[];};
+
+layout (push_constant) uniform parameter {
+ uint ncols;
+ uint n_dims;
+ float freq_scale;
+ uint p_delta_rows;
+ float freq_base;
+ float ext_factor;
+ float attn_factor;
+ float corr_dims[2];
+ float theta_scale;
+ uint has_ff;
+ uint ne02;
+ uint s1;
+ uint s2;
+ int sections[4];
+ uint is_back;
+} p;
+
+float rope_yarn_ramp(const float low, const float high, const uint i0) {
+ const float y = (i0 / 2 - low) / max(0.001f, high - low);
+ return 1.0f - min(1.0f, max(0.0f, y));
+}
+
+void rope_yarn(const float theta_extrap, const uint i0, out float cos_theta, out float sin_theta) {
+ float mscale = p.attn_factor;
+ // Get n-d rotational scaling corrected for extrapolation
+ float theta_interp = p.freq_scale * theta_extrap;
+ float theta = theta_interp;
+ if (p.ext_factor != 0.0f) {
+ float ramp_mix = rope_yarn_ramp(p.corr_dims[0], p.corr_dims[1], i0) * p.ext_factor;
+ theta = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix;
+
+ // Get n-d magnitude scaling corrected for interpolation
+ mscale *= 1.0f + 0.1f * log(1.0f / p.freq_scale);
+ }
+ // Backprogagation uses inverted rotation
+ if (p.is_back != 0) {
+ theta = -theta;
+ }
+ cos_theta = cos(theta) * mscale;
+ sin_theta = sin(theta) * mscale;
+}
#version 450
-#include "rope_head.comp"
+#include "rope_head.glsl"
void main() {
const uint i0 = 2*gl_GlobalInvocationID.y;
#version 450
-#include "rope_head.comp"
+#include "rope_head.glsl"
void main() {
const uint i0 = 2*gl_GlobalInvocationID.y;
#version 450
-#include "rope_head.comp"
+#include "rope_head.glsl"
void main() {
const uint i0 = 2*gl_GlobalInvocationID.y;
#version 450
-#include "rope_head.comp"
+#include "rope_head.glsl"
void main() {
const uint i0 = 2*gl_GlobalInvocationID.y;
+++ /dev/null
-
-#if RTE16
-#extension GL_EXT_spirv_intrinsics : enable
-spirv_execution_mode(capabilities = [4467], 4462, 16); // RoundingModeRTE, 16 bits
-#endif // RTE16
--- /dev/null
+
+#if RTE16
+#extension GL_EXT_spirv_intrinsics : enable
+spirv_execution_mode(capabilities = [4467], 4462, 16); // RoundingModeRTE, 16 bits
+#endif // RTE16
#version 450
-#include "types.comp"
-#include "generic_unary_head.comp"
+#include "types.glsl"
+#include "generic_unary_head.glsl"
const uint num_threads = 128;
#version 450
-#include "generic_head.comp"
-#include "types.comp"
+#include "generic_head.glsl"
+#include "types.glsl"
#extension GL_EXT_control_flow_attributes : enable
#version 450
-#include "generic_head.comp"
-#include "types.comp"
+#include "generic_head.glsl"
+#include "types.glsl"
#extension GL_EXT_control_flow_attributes : enable
#version 450
-#include "generic_head.comp"
-#include "types.comp"
+#include "generic_head.glsl"
+#include "types.glsl"
#extension GL_EXT_control_flow_attributes : enable
#version 450
-#include "types.comp"
-#include "generic_unary_head.comp"
+#include "types.glsl"
+#include "generic_unary_head.glsl"
layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
uint has_sinks;
} p;
-#include "types.comp"
+#include "types.glsl"
layout(constant_id = 0) const uint BLOCK_SIZE = 32;
layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
#extension GL_EXT_control_flow_attributes : enable
-#include "generic_head.comp"
-#include "types.comp"
+#include "generic_head.glsl"
+#include "types.glsl"
layout(constant_id = 0) const uint BLOCK_SIZE = 32;
layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
#version 450
-#include "types.comp"
-#include "generic_unary_head.comp"
+#include "types.glsl"
+#include "generic_unary_head.glsl"
layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
#version 450
-#include "types.comp"
-#include "generic_unary_head.comp"
+#include "types.glsl"
+#include "generic_unary_head.glsl"
layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
#extension GL_EXT_shader_16bit_storage : require
-#include "types.comp"
-#include "generic_binary_head.comp"
+#include "types.glsl"
+#include "generic_binary_head.glsl"
const uint num_threads = 256;
#version 450
-#include "types.comp"
+#include "types.glsl"
#extension GL_EXT_control_flow_attributes : enable
#version 450
-#include "glu_head.comp"
+#include "glu_head.glsl"
float op(float a, float b) {
return a / (1.0f + exp(-a)) * b;
}
-#include "glu_main.comp"
+#include "glu_main.glsl"
#version 450
-#include "glu_head.comp"
+#include "glu_head.glsl"
float op(float a, float b) {
float xi = min(a, p.limit);
return out_glu;
}
-#include "glu_main.comp"
+#include "glu_main.glsl"
#version 450
-#include "generic_head.comp"
-#include "types.comp"
+#include "generic_head.glsl"
+#include "types.glsl"
#extension GL_EXT_control_flow_attributes : enable
+++ /dev/null
-#version 460
-
-#extension GL_EXT_bfloat16 : require
-
-void main()
-{
-}
+++ /dev/null
-#version 460
-
-#extension GL_NV_cooperative_matrix2 : require
-
-void main()
-{
-}
+++ /dev/null
-#version 460
-
-#extension GL_KHR_cooperative_matrix : require
-
-void main()
-{
-}
+++ /dev/null
-#version 460
-
-#extension GL_EXT_integer_dot_product : require
-
-void main()
-{
-}
uint max_period;
} p;
-#include "types.comp"
+#include "types.glsl"
#extension GL_EXT_control_flow_attributes : enable
#define BLOCK_SIZE 256
+++ /dev/null
-#if !defined(GGML_TYPES_COMP)
-#define GGML_TYPES_COMP
-
-#extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
-#extension GL_EXT_shader_explicit_arithmetic_types_int32 : require
-#extension GL_EXT_shader_explicit_arithmetic_types_int16 : require
-#extension GL_EXT_shader_explicit_arithmetic_types_int8 : require
-#extension GL_EXT_shader_16bit_storage : require
-
-#if defined(DATA_A_F32)
-#define QUANT_K 1
-#define QUANT_R 1
-
-#if LOAD_VEC_A == 4
-#define A_TYPE vec4
-#elif LOAD_VEC_A == 8
-#define A_TYPE mat2x4
-#else
-#define A_TYPE float
-#endif
-#endif
-
-#if defined(DATA_A_F16)
-#define QUANT_K 1
-#define QUANT_R 1
-
-#if LOAD_VEC_A == 4
-#define A_TYPE f16vec4
-#elif LOAD_VEC_A == 8
-#define A_TYPE f16mat2x4
-#else
-#define A_TYPE float16_t
-#endif
-#endif
-
-#if defined(DATA_A_BF16)
-#define QUANT_K 1
-#define QUANT_R 1
-
-#if LOAD_VEC_A == 4
-#define A_TYPE u16vec4
-#elif LOAD_VEC_A == 8
-#error unsupported
-#else
-#define A_TYPE uint16_t
-#endif
-#endif
-
-#define QUANT_K_Q4_0 32
-#define QUANT_R_Q4_0 2
-
-struct block_q4_0
-{
- float16_t d;
- uint8_t qs[16];
-};
-struct block_q4_0_packed16
-{
- float16_t d;
- uint16_t qs[16/2];
-};
-
-#if defined(DATA_A_Q4_0)
-#define QUANT_K QUANT_K_Q4_0
-#define QUANT_R QUANT_R_Q4_0
-#define QUANT_AUXF 1
-#define A_TYPE block_q4_0
-#define A_TYPE_PACKED16 block_q4_0_packed16
-#endif
-
-#define QUANT_K_Q4_1 32
-#define QUANT_R_Q4_1 2
-
-struct block_q4_1
-{
- float16_t d;
- float16_t m;
- uint8_t qs[16];
-};
-
-struct block_q4_1_packed16
-{
- float16_t d;
- float16_t m;
- uint16_t qs[16/2];
-};
-
-struct block_q4_1_packed32
-{
- f16vec2 dm;
- uint32_t qs[16/4];
-};
-
-#if defined(DATA_A_Q4_1)
-#define QUANT_K QUANT_K_Q4_1
-#define QUANT_R QUANT_R_Q4_1
-#define QUANT_AUXF 2
-#define A_TYPE block_q4_1
-#define A_TYPE_PACKED16 block_q4_1_packed16
-#define A_TYPE_PACKED32 block_q4_1_packed32
-#endif
-
-#define QUANT_K_Q5_0 32
-#define QUANT_R_Q5_0 2
-
-struct block_q5_0
-{
- float16_t d;
- uint16_t qh[2];
- uint8_t qs[16];
-};
-
-struct block_q5_0_packed16
-{
- float16_t d;
- uint16_t qh[2];
- uint16_t qs[16/2];
-};
-
-#if defined(DATA_A_Q5_0)
-#define QUANT_K QUANT_K_Q5_0
-#define QUANT_R QUANT_R_Q5_0
-#define QUANT_AUXF 1
-#define A_TYPE block_q5_0
-#define A_TYPE_PACKED16 block_q5_0_packed16
-#endif
-
-#define QUANT_K_Q5_1 32
-#define QUANT_R_Q5_1 2
-
-struct block_q5_1
-{
- float16_t d;
- float16_t m;
- uint qh;
- uint8_t qs[16];
-};
-
-struct block_q5_1_packed16
-{
- float16_t d;
- float16_t m;
- uint qh;
- uint16_t qs[16/2];
-};
-
-struct block_q5_1_packed32
-{
- f16vec2 dm;
- uint qh;
- uint32_t qs[16/4];
-};
-
-#if defined(DATA_A_Q5_1)
-#define QUANT_K QUANT_K_Q5_1
-#define QUANT_R QUANT_R_Q5_1
-#define QUANT_AUXF 2
-#define A_TYPE block_q5_1
-#define A_TYPE_PACKED16 block_q5_1_packed16
-#define A_TYPE_PACKED32 block_q5_1_packed32
-#endif
-
-#define QUANT_K_Q8_0 32
-#define QUANT_R_Q8_0 1
-
-struct block_q8_0
-{
- float16_t d;
- int8_t qs[32];
-};
-struct block_q8_0_packed16
-{
- float16_t d;
- int16_t qs[32/2];
-};
-struct block_q8_0_packed32
-{
- float16_t d;
- int32_t qs[32/4];
-};
-
-#if defined(DATA_A_Q8_0)
-#define QUANT_K QUANT_K_Q8_0
-#define QUANT_R QUANT_R_Q8_0
-#define QUANT_AUXF 1
-#define A_TYPE block_q8_0
-#define A_TYPE_PACKED16 block_q8_0_packed16
-#define A_TYPE_PACKED32 block_q8_0_packed32
-#endif
-
-#define QUANT_K_Q8_1 32
-#define QUANT_R_Q8_1 1
-
-struct block_q8_1
-{
- f16vec2 ds;
- int8_t qs[32];
-};
-struct block_q8_1_packed16
-{
- f16vec2 ds;
- int16_t qs[16];
-};
-struct block_q8_1_packed32
-{
- f16vec2 ds;
- int32_t qs[8];
-};
-
-// 4 blocks in one to allow 16-byte/128-bit alignment and loads
-struct block_q8_1_x4
-{
- f16vec2 ds[4];
- int32_t qs[32];
-};
-struct block_q8_1_x4_packed128
-{
- f16vec2 ds[4];
- ivec4 qs[8];
-};
-
-// K-quants
-#define QUANT_K_Q2_K 256
-
-struct block_q2_K
-{
- uint8_t scales[QUANT_K_Q2_K/16];
- uint8_t qs[QUANT_K_Q2_K/4];
- f16vec2 d;
-};
-
-struct block_q2_K_packed16
-{
- uint16_t scales[QUANT_K_Q2_K/16/2];
- uint16_t qs[QUANT_K_Q2_K/4/2];
- f16vec2 d;
-};
-
-struct block_q2_K_packed32
-{
- uint32_t scales[QUANT_K_Q2_K/16/4];
- uint32_t qs[QUANT_K_Q2_K/4/4];
- f16vec2 d;
-};
-
-#if defined(DATA_A_Q2_K)
-#define QUANT_K QUANT_K_Q2_K
-#define QUANT_R 1
-#define A_TYPE block_q2_K
-#define A_TYPE_PACKED16 block_q2_K_packed16
-#define A_TYPE_PACKED32 block_q2_K_packed32
-#endif
-
-#define QUANT_K_Q3_K 256
-
-struct block_q3_K
-{
- uint8_t hmask[QUANT_K_Q3_K/8];
- uint8_t qs[QUANT_K_Q3_K/4];
- uint8_t scales[12];
- float16_t d;
-};
-
-struct block_q3_K_packed16
-{
- uint16_t hmask[QUANT_K_Q3_K/8/2];
- uint16_t qs[QUANT_K_Q3_K/4/2];
- uint16_t scales[12/2];
- float16_t d;
-};
-
-#if defined(DATA_A_Q3_K)
-#define QUANT_K QUANT_K_Q3_K
-#define QUANT_R 1
-#define A_TYPE block_q3_K
-#define A_TYPE_PACKED16 block_q3_K_packed16
-#endif
-
-#define QUANT_K_Q4_K 256
-
-struct block_q4_K
-{
- f16vec2 d;
- uint8_t scales[3*QUANT_K_Q4_K/64];
- uint8_t qs[QUANT_K_Q4_K/2];
-};
-
-struct block_q4_K_packed16
-{
- f16vec2 d;
- uint16_t scales[3*QUANT_K_Q4_K/64/2];
- uint16_t qs[QUANT_K_Q4_K/2/2];
-};
-
-struct block_q4_K_packed32
-{
- f16vec2 d;
- uint32_t scales[3*QUANT_K_Q4_K/64/4];
- uint32_t qs[QUANT_K_Q4_K/2/4];
-};
-
-struct block_q4_K_packed128
-{
- uvec4 q4k[9];
-};
-
-#if defined(DATA_A_Q4_K)
-#define QUANT_K QUANT_K_Q4_K
-#define QUANT_R 1
-#define A_TYPE block_q4_K
-#define A_TYPE_PACKED16 block_q4_K_packed16
-#define A_TYPE_PACKED32 block_q4_K_packed32
-#endif
-
-#define QUANT_K_Q5_K 256
-
-struct block_q5_K
-{
- f16vec2 d;
- uint8_t scales[12];
- uint8_t qh[QUANT_K_Q5_K/8];
- uint8_t qs[QUANT_K_Q5_K/2];
-};
-
-struct block_q5_K_packed16
-{
- f16vec2 d;
- uint16_t scales[12/2];
- uint16_t qh[QUANT_K_Q5_K/8/2];
- uint16_t qs[QUANT_K_Q5_K/2/2];
-};
-
-struct block_q5_K_packed128
-{
- uvec4 q5k[11];
-};
-
-#if defined(DATA_A_Q5_K)
-#define QUANT_K QUANT_K_Q5_K
-#define QUANT_R 1
-#define A_TYPE block_q5_K
-#define A_TYPE_PACKED16 block_q5_K_packed16
-#endif
-
-#define QUANT_K_Q6_K 256
-
-struct block_q6_K
-{
- uint8_t ql[QUANT_K_Q6_K/2];
- uint8_t qh[QUANT_K_Q6_K/4];
- int8_t scales[QUANT_K_Q6_K/16];
- float16_t d;
-};
-
-struct block_q6_K_packed16
-{
- uint16_t ql[QUANT_K_Q6_K/2/2];
- uint16_t qh[QUANT_K_Q6_K/4/2];
- int8_t scales[QUANT_K_Q6_K/16];
- float16_t d;
-};
-
-#if defined(DATA_A_Q6_K)
-#define QUANT_K QUANT_K_Q6_K
-#define QUANT_R 1
-#define A_TYPE block_q6_K
-#define A_TYPE_PACKED16 block_q6_K_packed16
-#endif
-
-// IQuants
-
-#define QUANT_K_IQ1_S 256
-#define QUANT_R_IQ1_S 1
-
-struct block_iq1_s {
- float16_t d;
- uint8_t qs[QUANT_K_IQ1_S/8];
- uint16_t qh[QUANT_K_IQ1_S/32];
-};
-
-#define QUANT_K_IQ1_M 256
-#define QUANT_R_IQ1_M 1
-
-struct block_iq1_m {
- uint8_t qs[QUANT_K_IQ1_M/8];
- uint8_t qh[QUANT_K_IQ1_M/16];
- uint16_t scales[QUANT_K_IQ1_M/64];
-};
-
-struct block_iq1_m_packed64 {
- uint64_t qs[QUANT_K_IQ1_M/8/8];
- uint64_t qh[QUANT_K_IQ1_M/16/8];
- uint64_t scales;
-};
-
-#if defined(DATA_A_IQ1_S)
-#define QUANT_K QUANT_K_IQ1_S
-#define QUANT_R QUANT_R_IQ1_S
-#define A_TYPE block_iq1_s
-#endif
-
-#if defined(DATA_A_IQ1_M)
-#define QUANT_K QUANT_K_IQ1_M
-#define QUANT_R QUANT_R_IQ1_M
-#define A_TYPE block_iq1_m
-#endif
-
-#if defined(DATA_A_IQ1_S) || defined(DATA_A_IQ1_M)
-#define IQ1S_DELTA 0.125f
-#define IQ1M_DELTA 0.125f
-
-// Packed IQ1S grid where every 2 vec8 are encoded on 32 bits (2 bits per coordinate).
-const uint[1024] iq1s_grid_const = {
- 0xfffdffff, 0xfff7fff0, 0xffccfff5, 0xffdfffc0, 0xffd7ffdd, 0xff30ffd5, 0xff03ff0c, 0xff10ff01,
- 0xff7dff7f, 0xff75ff77, 0xff5fff40, 0xff57ff5d, 0xfcf3ff55, 0xfcccfcf0, 0xfcc1fcc3, 0xfcc5fcc4,
- 0xfc3cfcd0, 0xfc34fc31, 0xfc00fc0d, 0xfc1cfc05, 0xfc11fc13, 0xfc70fc17, 0xfc43fc4c, 0xfc50fc41,
- 0xfdfdfdff, 0xfdf5fdf7, 0xfddffdc0, 0xfdd7fddd, 0xfd30fdd5, 0xfd04fd0c, 0xfd14fd13, 0xfd7dfd7f,
- 0xfd75fd77, 0xfd40fd4c, 0xfd5ffd44, 0xfd57fd5d, 0xf3ccfd55, 0xf3c1f3c3, 0xf33cf3d0, 0xf300f334,
- 0xf313f305, 0xf34cf310, 0xf350f344, 0xf0f3f0fc, 0xf0f1f0f0, 0xf0c7f0c0, 0xf0d4f0c5, 0xf030f03f,
- 0xf00ff035, 0xf003f00c, 0xf001f000, 0xf01ff004, 0xf010f01d, 0xf015f017, 0xf04cf07c, 0xf047f040,
- 0xf05cf045, 0xf050f053, 0xf054f051, 0xf1c4f1c3, 0xf133f13c, 0xf10df10f, 0xf107f100, 0xf11cf11f,
- 0xf114f111, 0xf14cf170, 0xf144f143, 0xf7fdf7ff, 0xf7f5f7f7, 0xf7dff7c0, 0xf7d7f7dd, 0xf730f7d5,
- 0xf701f70c, 0xf77ff710, 0xf777f77d, 0xf740f775, 0xf75df75f, 0xf755f757, 0xf4ccf4f0, 0xf4c4f4c3,
- 0xf4d0f4d3, 0xf40ff43c, 0xf400f40c, 0xf413f41c, 0xf44cf414, 0xf441f443, 0xf450f444, 0xf5fdf5ff,
- 0xf5f5f5f7, 0xf5dff5c0, 0xf5d7f5dd, 0xf530f5d5, 0xf504f50c, 0xf510f51c, 0xf57df57f, 0xf577f570,
- 0xf540f575, 0xf55df55f, 0xf555f557, 0xcfcccfcf, 0xcfc4cfc3, 0xcfd0cfd3, 0xcf33cf3c, 0xcf00cf0f,
- 0xcf1ccf07, 0xcf10cf13, 0xcf4ccf14, 0xcf41cf43, 0xcf50cf5c, 0xccf3ccfc, 0xccf4ccf1, 0xcccdcccf,
- 0xccc7ccc0, 0xccd3ccdc, 0xcc30ccd4, 0xcc0fcc35, 0xcc0dcc0c, 0xcc00cc03, 0xcc04cc01, 0xcc10cc1f,
- 0xcc4dcc73, 0xcc5ccc40, 0xcdcccc53, 0xcdc1cdc3, 0xcd3fcdd0, 0xcd34cd31, 0xcd00cd0d, 0xcd05cd07,
- 0xcd11cd13, 0xcd4ccd70, 0xcd41cd43, 0xc3fccd50, 0xc3f4c3f1, 0xc3c0c3c3, 0xc3c4c3c7, 0xc3d1c3dc,
- 0xc330c33c, 0xc337c331, 0xc30cc335, 0xc300c303, 0xc304c301, 0xc310c31d, 0xc373c317, 0xc34fc374,
- 0xc340c343, 0xc344c347, 0xc35cc345, 0xc350c353, 0xc0fdc354, 0xc0f5c0f0, 0xc0c3c0cc, 0xc0c1c0c0,
- 0xc0dfc0c4, 0xc0d0c0dd, 0xc0d5c0d7, 0xc033c03c, 0xc031c030, 0xc00dc00c, 0xc000c003, 0xc004c001,
- 0xc01cc005, 0xc010c013, 0xc014c011, 0xc07dc07f, 0xc070c073, 0xc075c077, 0xc04cc04f, 0xc040c043,
- 0xc044c041, 0xc05fc045, 0xc050c05d, 0xc1f3c1fc, 0xc1f1c1f0, 0xc1c1c1c0, 0xc1c5c1c7, 0xc1d1c1dc,
- 0xc13dc13f, 0xc130c133, 0xc135c137, 0xc100c10c, 0xc107c101, 0xc11cc104, 0xc110c113, 0xc114c117,
- 0xc171c115, 0xc14dc175, 0xc153c140, 0xc7ccc154, 0xc7d0c7c1, 0xc733c73c, 0xc734c731, 0xc700c70f,
- 0xc705c707, 0xc71cc71f, 0xc711c713, 0xc770c714, 0xc743c74c, 0xc4cfc750, 0xc4c0c4cd, 0xc4dcc4c5,
- 0xc43dc4d0, 0xc430c433, 0xc40cc437, 0xc400c403, 0xc404c401, 0xc41fc405, 0xc415c410, 0xc44cc474,
- 0xc440c44d, 0xc45cc447, 0xc454c451, 0xc5c1c5f4, 0xc5d1c5d3, 0xc531c533, 0xc50fc534, 0xc500c50d,
- 0xc51cc507, 0xc514c511, 0xc54cc570, 0xc545c541, 0xdffddfff, 0xdff5dff7, 0xdfdfdfc0, 0xdfd0dfdd,
- 0xdfd5dfd7, 0xdf0cdf30, 0xdf1cdf04, 0xdf7fdf10, 0xdf77df7d, 0xdf40df75, 0xdf5ddf5f, 0xdf57df50,
- 0xdcf0df55, 0xdcc3dccc, 0xdcd0dcc4, 0xdc33dc3d, 0xdc00dc34, 0xdc05dc07, 0xdc13dc1c, 0xdc11dc10,
- 0xdc4fdc70, 0xdc44dc41, 0xddfcdc50, 0xddf5ddf7, 0xddc0ddcc, 0xdddddddf, 0xddd5ddd7, 0xdd0cdd30,
- 0xdd04dd01, 0xdd7cdd10, 0xdd75dd77, 0xdd40dd4c, 0xdd5ddd5f, 0xdd55dd57, 0xd3c3d3f0, 0xd3c4d3c1,
- 0xd333d3d0, 0xd331d330, 0xd30dd334, 0xd307d300, 0xd311d305, 0xd34cd370, 0xd344d343, 0xd350d35c,
- 0xd0c0d0f4, 0xd0d4d0dc, 0xd030d03f, 0xd00cd037, 0xd000d003, 0xd01dd004, 0xd017d010, 0xd04fd074,
- 0xd040d043, 0xd045d047, 0xd053d05c, 0xd054d051, 0xd1cfd1f0, 0xd1c4d1cd, 0xd13cd1d0, 0xd100d134,
- 0xd11cd11f, 0xd173d114, 0xd14fd171, 0xd7ffd145, 0xd7f7d7fd, 0xd7c0d7f5, 0xd7ddd7df, 0xd7d5d7d7,
- 0xd70cd730, 0xd710d703, 0xd77dd77f, 0xd775d777, 0xd75dd75f, 0xd755d757, 0xd4ccd4f4, 0xd4c4d4c3,
- 0xd431d4d0, 0xd40dd434, 0xd41cd400, 0xd411d413, 0xd470d414, 0xd441d44f, 0xd453d444, 0xd5ffd450,
- 0xd5f7d5fd, 0xd5dfd5f5, 0xd5d7d5dd, 0xd530d5d5, 0xd501d50c, 0xd510d504, 0xd57dd57f, 0xd575d577,
- 0xd55fd540, 0xd557d55d, 0x3ff0d555, 0x3fc13fcc, 0x3f343fd0, 0x3f003f0d, 0x3f053f07, 0x3f133f1c,
- 0x3f433f11, 0x3f5c3f44, 0x3cff3f51, 0x3cf33cfc, 0x3cf43cf1, 0x3cc03ccd, 0x3cc73cc1, 0x3cdc3cc5,
- 0x3cd43cd1, 0x3c373c30, 0x3c0c3c35, 0x3c003c03, 0x3c043c01, 0x3c103c05, 0x3c153c17, 0x3c733c7c,
- 0x3c4f3c71, 0x3c403c4d, 0x3c5c3c5f, 0x3df03c5d, 0x3dc33dcc, 0x3dd03dc1, 0x3d0d3d3c, 0x3d053d00,
- 0x3d143d13, 0x3d433d74, 0x33fc3d50, 0x33c433c0, 0x333033d4, 0x33353337, 0x3303330c, 0x33013300,
- 0x331d331c, 0x33173310, 0x337c3315, 0x33743371, 0x334d334f, 0x335f3340, 0x3354335c, 0x30fd30fc,
- 0x30f530f0, 0x30c330cc, 0x30c130c0, 0x30df30c4, 0x30d530d0, 0x3033303c, 0x30313030, 0x300f3034,
- 0x3003300c, 0x30013000, 0x30043007, 0x3013301c, 0x30113010, 0x307d3014, 0x30703073, 0x304c3077,
- 0x30403043, 0x30443041, 0x30503045, 0x30553057, 0x31f031fc, 0x31c331f4, 0x31c731c0, 0x31dc31c5,
- 0x31d431d3, 0x313d313f, 0x31373130, 0x310c310f, 0x3100310d, 0x31043101, 0x3110311d, 0x317c3117,
- 0x31753170, 0x31403143, 0x3153315c, 0x37f03151, 0x37c037cc, 0x37d037c5, 0x3734373d, 0x3700370f,
- 0x371c3707, 0x37113713, 0x37703714, 0x3743374c, 0x37443741, 0x34fc3750, 0x34f134f0, 0x34cf34f5,
- 0x34c034c3, 0x34dc34c7, 0x34d134d3, 0x3430343f, 0x340c3435, 0x3403340d, 0x34013400, 0x341f3404,
- 0x3410341d, 0x34153411, 0x34743471, 0x3440344d, 0x34473441, 0x3453345c, 0x34543451, 0x353335c1,
- 0x35343531, 0x35073500, 0x35133505, 0x35433514, 0x0ffc3550, 0x0ff00ff3, 0x0ff40ff1, 0x0fc00fcd,
- 0x0fdc0fc5, 0x0fd40fd3, 0x0f300f3f, 0x0f0c0f37, 0x0f000f03, 0x0f040f01, 0x0f170f10, 0x0f740f71,
- 0x0f470f40, 0x0f5c0f5f, 0x0f540f51, 0x0cf70cf0, 0x0cf50cf4, 0x0cc30ccc, 0x0cc10cc0, 0x0cc40cc7,
- 0x0cd00cdf, 0x0cd70cd1, 0x0c3c0cd5, 0x0c300c33, 0x0c340c31, 0x0c0c0c0f, 0x0c030c0d, 0x0c010c00,
- 0x0c040c07, 0x0c1c0c05, 0x0c100c13, 0x0c140c11, 0x0c700c7d, 0x0c430c4c, 0x0c410c40, 0x0c5f0c44,
- 0x0c550c50, 0x0df10dfc, 0x0dc00dcd, 0x0ddc0dc5, 0x0d3d0dd3, 0x0d350d30, 0x0d030d0c, 0x0d010d00,
- 0x0d1d0d04, 0x0d700d10, 0x0d4d0d4f, 0x0d440d40, 0x0d530d45, 0x03f003f3, 0x03c303cc, 0x03c103c0,
- 0x03c403c7, 0x03d003dc, 0x03d503d7, 0x0333033c, 0x03310330, 0x03350334, 0x030c030f, 0x03000303,
- 0x03070301, 0x03050304, 0x031d031c, 0x03100313, 0x03140311, 0x0377037f, 0x034c0375, 0x03400343,
- 0x03440341, 0x0353035c, 0x03550350, 0x00fd00fc, 0x00f000f3, 0x00f400f1, 0x00cc00cf, 0x00c300cd,
- 0x00c100c0, 0x00c500c4, 0x00d300dc, 0x00d100d0, 0x003f00d4, 0x003d003c, 0x00300033, 0x00370031,
- 0x000f0034, 0x000d000c, 0x00000003, 0x00070001, 0x00050004, 0x001c001f, 0x00100013, 0x00170011,
- 0x00150014, 0x0073007c, 0x00740070, 0x004f0075, 0x0043004c, 0x00410040, 0x00440047, 0x0053005c,
- 0x00510050, 0x01ff0054, 0x01fd01fc, 0x01f101f3, 0x01f401f7, 0x01c301cc, 0x01c701c0, 0x01df01c4,
- 0x01dd01dc, 0x01d001d3, 0x01d701d1, 0x013c01d4, 0x01310130, 0x01340137, 0x010f0135, 0x010d010c,
- 0x01000103, 0x01070101, 0x01050104, 0x0113011c, 0x01140110, 0x0170017d, 0x01770171, 0x01750174,
- 0x0140014c, 0x015d0145, 0x01510150, 0x01540157, 0x07f007f3, 0x07f407f1, 0x07c007cf, 0x07dc07c7,
- 0x073007d5, 0x07350737, 0x0703070c, 0x07010700, 0x07040707, 0x071d071f, 0x07100713, 0x0774077d,
- 0x074d074f, 0x07470740, 0x0754075c, 0x04fd04fc, 0x04f504f0, 0x04c304cc, 0x04c104c0, 0x04d004c4,
- 0x0433043c, 0x04310430, 0x040f0434, 0x040d040c, 0x04000403, 0x04070401, 0x04050404, 0x0413041c,
- 0x04110410, 0x047c0414, 0x04740470, 0x0443044c, 0x04410440, 0x04440447, 0x05f30450, 0x05c005f7,
- 0x05df05c5, 0x05d105d0, 0x053005d4, 0x05340537, 0x0500050c, 0x05070501, 0x051d0504, 0x05170510,
- 0x057c0515, 0x054d0575, 0x05410540, 0x05450547, 0x1ff0055c, 0x1fc11fc3, 0x1fd01fc4, 0x1f0f1f33,
- 0x1f011f00, 0x1f051f07, 0x1f131f1c, 0x1f141f11, 0x1f411f7c, 0x1cfc1f50, 0x1cf11cf3, 0x1ccd1cf4,
- 0x1cdc1cc0, 0x1cd11cdd, 0x1c301cd4, 0x1c0c1c34, 0x1c011c00, 0x1c101c04, 0x1c151c11, 0x1c751c73,
- 0x1c401c4d, 0x1c511c5c, 0x1dcc1c54, 0x1dc41dc1, 0x1d3c1d3f, 0x1d001d31, 0x1d071d01, 0x1d701d1f,
- 0x1d411d4c, 0x13cc1d50, 0x13c013cd, 0x13c513c1, 0x13d113dc, 0x133f13d4, 0x1330133d, 0x13351337,
- 0x1303130c, 0x13011300, 0x13051304, 0x131d131f, 0x13731310, 0x13741370, 0x134d134f, 0x13401343,
- 0x13471341, 0x135c1345, 0x13541353, 0x10f710f0, 0x10cc10f5, 0x10c110c0, 0x103310c4, 0x10311030,
- 0x100f1034, 0x1003100c, 0x10011000, 0x101c1004, 0x10101013, 0x10141011, 0x10741071, 0x104c1075,
- 0x10411040, 0x10451044, 0x1050105d, 0x10571051, 0x11f411fd, 0x11df11c0, 0x11d711d1, 0x113f11d4,
- 0x11371130, 0x110c1135, 0x11001103, 0x11071101, 0x111f1105, 0x11171110, 0x117d117f, 0x11751170,
- 0x11411143, 0x11441147, 0x1153115f, 0x11551151, 0x17c417c1, 0x173c17d0, 0x1700170d, 0x171c1705,
- 0x17701714, 0x1747174c, 0x14fc1751, 0x14cf14f3, 0x14dc14c0, 0x14d114d3, 0x143f14d4, 0x1430143c,
- 0x14371431, 0x1403140c, 0x14011400, 0x141f1404, 0x14151410, 0x1473147d, 0x14401475, 0x1453145c,
- 0x14541450, 0x15c115cc, 0x153c15c7, 0x15341533, 0x1500150f, 0x15051507, 0x15101513, 0x15711514,
- 0x15471543, 0x15511545, 0x7ffd7fff, 0x7ff57ff7, 0x7fdd7fdf, 0x7fd57fd7, 0x7f0f7f30, 0x7f037f0c,
- 0x7f047f01, 0x7f7f7f10, 0x7f777f7d, 0x7f407f75, 0x7f5d7f5f, 0x7f557f57, 0x7ccc7cf0, 0x7cc17cc3,
- 0x7cd07cc4, 0x7c337c3c, 0x7c0f7c34, 0x7c007c0d, 0x7c077c01, 0x7c137c04, 0x7c147c11, 0x7c747c70,
- 0x7c417c43, 0x7c507c44, 0x7dfd7dff, 0x7df57df7, 0x7ddf7dc0, 0x7dd77ddd, 0x7d0c7dd5, 0x7d047d03,
- 0x7d7f7d10, 0x7d777d7d, 0x7d407d75, 0x7d5d7d5f, 0x7d557d57, 0x73c473c3, 0x7333733c, 0x7300730c,
- 0x731c7305, 0x73147313, 0x73447343, 0x70f470fc, 0x70c070cd, 0x70d170c5, 0x703f70d4, 0x7030703c,
- 0x700c7037, 0x70007003, 0x70047001, 0x70107005, 0x70177011, 0x707c7015, 0x70717073, 0x704f7074,
- 0x7040704d, 0x70517047, 0x71c171cc, 0x71d071c4, 0x7133713c, 0x71357134, 0x7100710f, 0x71057104,
- 0x7111711c, 0x71707115, 0x7145714c, 0x77ff7153, 0x77f777fd, 0x77c077f5, 0x77dd77df, 0x77d577d7,
- 0x7730773c, 0x7703770c, 0x77107704, 0x777f7714, 0x7777777d, 0x77407775, 0x775d775f, 0x77557757,
- 0x74f174f0, 0x74c374cc, 0x74d074c1, 0x7433743c, 0x74347431, 0x740d740f, 0x74057400, 0x7413741c,
- 0x74417470, 0x74507444, 0x75fd75ff, 0x75f575f7, 0x75df75c0, 0x75d775dd, 0x753075d5, 0x7503750c,
- 0x757f7501, 0x7577757d, 0x75407575, 0x755d755f, 0x75557557, 0x4fcc4ff0, 0x4fc74fc1, 0x4fd04fc4,
- 0x4f314f3c, 0x4f004f34, 0x4f054f07, 0x4f154f14, 0x4f4c4f70, 0x4f414f43, 0x4f504f44, 0x4cf34cfc,
- 0x4cf44cf1, 0x4cc04ccf, 0x4cc54cc7, 0x4cd34cdc, 0x4cd44cd1, 0x4c304c3f, 0x4c0c4c0f, 0x4c004c03,
- 0x4c044c01, 0x4c104c1d, 0x4c714c73, 0x4c404c4d, 0x4c5c4c47, 0x4c514c53, 0x4df04c54, 0x4dc34dcc,
- 0x4dd04dc4, 0x4d314d33, 0x4d0f4d34, 0x4d004d0d, 0x4d114d07, 0x4d704d14, 0x4d414d43, 0x43fc4d54,
- 0x43f143f3, 0x43c043cf, 0x43d143c7, 0x4335433f, 0x4303430c, 0x43014300, 0x43044307, 0x431c431f,
- 0x4310431d, 0x43714373, 0x4343434d, 0x43474340, 0x4354435c, 0x40f040ff, 0x40f540f7, 0x40cc40cf,
- 0x40c040c3, 0x40c440c1, 0x40d040dc, 0x40d540d4, 0x4033403c, 0x40314030, 0x400f4034, 0x400d400c,
- 0x40004003, 0x40074001, 0x40054004, 0x4013401c, 0x40114010, 0x407c4014, 0x40774070, 0x404d404c,
- 0x40404043, 0x40444041, 0x405f4045, 0x4050405d, 0x40554057, 0x41f341fc, 0x41c041cf, 0x41df41c4,
- 0x41d441d1, 0x41374130, 0x410c4134, 0x4100410d, 0x41044101, 0x41174110, 0x4173417d, 0x41754174,
- 0x4143414d, 0x41534140, 0x41544151, 0x47c147f0, 0x47d047c4, 0x4731473c, 0x470d470f, 0x47014700,
- 0x47134705, 0x47704710, 0x4741474c, 0x47504744, 0x44f144f3, 0x44cf44f4, 0x44c044cd, 0x44c544c7,
- 0x44dc44df, 0x44d144d3, 0x443d443f, 0x44374430, 0x440c4435, 0x44004403, 0x44044401, 0x4410441d,
- 0x44154411, 0x4473447c, 0x444d444f, 0x44454440, 0x4451445c, 0x45c045f0, 0x453345d0, 0x45344531,
- 0x4500450f, 0x451c4507, 0x454c4570, 0x45404543, 0x5fff4541, 0x5ff75ffd, 0x5fc05ff5, 0x5fdd5fdf,
- 0x5fd55fd7, 0x5f0c5f30, 0x5f015f03, 0x5f7f5f04, 0x5f775f7d, 0x5f405f75, 0x5f5d5f5f, 0x5f555f57,
- 0x5cf45cf0, 0x5cc35ccc, 0x5cc45cc1, 0x5c315cc5, 0x5c0c5c34, 0x5c075c00, 0x5c1c5c05, 0x5c705c13,
- 0x5c4d5c4f, 0x5c445c41, 0x5df75dfd, 0x5dcf5df5, 0x5ddd5dc4, 0x5dd55dd7, 0x5d0c5d30, 0x5d045d01,
- 0x5d7f5d10, 0x5d775d7d, 0x5d405d75, 0x5d5d5d5f, 0x5d555d57, 0x53d053c4, 0x5333533c, 0x5303530f,
- 0x53075300, 0x531c5305, 0x53115310, 0x53145317, 0x50f15370, 0x50cf50f4, 0x50c050cd, 0x50d150c7,
- 0x503d50d4, 0x500c5030, 0x50005003, 0x50045001, 0x50155010, 0x5073507c, 0x50715070, 0x504d5074,
- 0x50475040, 0x51cc51f0, 0x51c551c1, 0x51d051dc, 0x51315133, 0x510d5135, 0x51015100, 0x511f5107,
- 0x5171511d, 0x5140514f, 0x51445141, 0x5153515c, 0x57ff5151, 0x57f757fd, 0x57df57f5, 0x57d757dd,
- 0x570c57d5, 0x57015703, 0x577f5704, 0x5777577d, 0x57405775, 0x575d575f, 0x57555757, 0x54c354f0,
- 0x54dc54c4, 0x543c54d0, 0x5400540f, 0x541c5405, 0x54145411, 0x5441544f, 0x55fd55ff, 0x55f555f7,
- 0x55dd55df, 0x55d555d7, 0x5503550c, 0x557f5501, 0x5577557d, 0x55405575, 0x555d555f, 0x55555557
-};
-
-shared uint16_t iq1s_grid[2048];
-
-#define NEEDS_INIT_IQ_SHMEM
-void init_iq_shmem(uvec3 wgsize)
-{
- // copy the table into shared memory and sync
- [[unroll]] for (uint i = 0; i < iq1s_grid_const.length(); i += wgsize.x) {
- uint idx = i + gl_LocalInvocationIndex.x;
- if (iq1s_grid_const.length() % wgsize.x == 0 || idx < iq1s_grid_const.length()) {
- u16vec2 g = unpack16(iq1s_grid_const[idx]);
- iq1s_grid[2*idx+0] = g.x;
- iq1s_grid[2*idx+1] = g.y;
- }
- }
- barrier();
-}
-#endif
-
-#define QUANT_K_IQ2_XXS 256
-#define QUANT_R_IQ2_XXS 1
-
-struct block_iq2_xxs
-{
- float16_t d;
- uint8_t qs[QUANT_K_IQ2_XXS/4];
-};
-
-struct block_iq2_xxs_packed16
-{
- float16_t d;
- uint16_t qs[QUANT_K_IQ2_XXS/8];
-};
-
-#if defined(DATA_A_IQ2_XXS)
-
-const uvec2[256] iq2xxs_grid_const = {
- uvec2(0x08080808, 0x08080808), uvec2(0x0808082b, 0x08080808), uvec2(0x08081919, 0x08080808), uvec2(0x08082b08, 0x08080808),
- uvec2(0x08082b2b, 0x08080808), uvec2(0x08190819, 0x08080808), uvec2(0x08191908, 0x08080808), uvec2(0x082b0808, 0x08080808),
- uvec2(0x082b082b, 0x08080808), uvec2(0x082b2b08, 0x08080808), uvec2(0x082b2b2b, 0x08080808), uvec2(0x19080819, 0x08080808),
- uvec2(0x19081908, 0x08080808), uvec2(0x19190808, 0x08080808), uvec2(0x19192b08, 0x08080808), uvec2(0x192b0819, 0x08080808),
- uvec2(0x192b1908, 0x08080808), uvec2(0x2b080808, 0x08080808), uvec2(0x2b08082b, 0x08080808), uvec2(0x2b082b2b, 0x08080808),
- uvec2(0x2b2b082b, 0x08080808), uvec2(0x08080819, 0x08080819), uvec2(0x08081908, 0x08080819), uvec2(0x08190808, 0x08080819),
- uvec2(0x08191919, 0x08080819), uvec2(0x19080808, 0x08080819), uvec2(0x2b081908, 0x08080819), uvec2(0x2b192b08, 0x08080819),
- uvec2(0x08080808, 0x0808082b), uvec2(0x0808082b, 0x0808082b), uvec2(0x082b082b, 0x0808082b), uvec2(0x2b08082b, 0x0808082b),
- uvec2(0x08080819, 0x08081908), uvec2(0x08081908, 0x08081908), uvec2(0x08190808, 0x08081908), uvec2(0x082b0819, 0x08081908),
- uvec2(0x082b1908, 0x08081908), uvec2(0x19080808, 0x08081908), uvec2(0x1908082b, 0x08081908), uvec2(0x19082b08, 0x08081908),
- uvec2(0x192b0808, 0x08081908), uvec2(0x2b080819, 0x08081908), uvec2(0x2b081908, 0x08081908), uvec2(0x2b190808, 0x08081908),
- uvec2(0x2b2b1908, 0x08081908), uvec2(0x08080808, 0x08081919), uvec2(0x0808082b, 0x08081919), uvec2(0x08082b08, 0x08081919),
- uvec2(0x082b0808, 0x08081919), uvec2(0x1908192b, 0x08081919), uvec2(0x192b2b19, 0x08081919), uvec2(0x2b080808, 0x08081919),
- uvec2(0x2b190819, 0x08081919), uvec2(0x08082b19, 0x0808192b), uvec2(0x08190808, 0x0808192b), uvec2(0x19080808, 0x0808192b),
- uvec2(0x2b081908, 0x0808192b), uvec2(0x2b2b1908, 0x0808192b), uvec2(0x08080808, 0x08082b08), uvec2(0x08081919, 0x08082b08),
- uvec2(0x08082b08, 0x08082b08), uvec2(0x08191908, 0x08082b08), uvec2(0x082b2b08, 0x08082b08), uvec2(0x19080819, 0x08082b08),
- uvec2(0x19081908, 0x08082b08), uvec2(0x19190808, 0x08082b08), uvec2(0x1919082b, 0x08082b08), uvec2(0x2b082b08, 0x08082b08),
- uvec2(0x08081908, 0x08082b19), uvec2(0x19080808, 0x08082b19), uvec2(0x0808082b, 0x08082b2b), uvec2(0x08191908, 0x08082b2b),
- uvec2(0x08080819, 0x08190808), uvec2(0x08081908, 0x08190808), uvec2(0x08190808, 0x08190808), uvec2(0x082b0819, 0x08190808),
- uvec2(0x19080808, 0x08190808), uvec2(0x192b0808, 0x08190808), uvec2(0x2b081908, 0x08190808), uvec2(0x2b190808, 0x08190808),
- uvec2(0x2b191919, 0x08190808), uvec2(0x08080808, 0x08190819), uvec2(0x08082b08, 0x08190819), uvec2(0x082b0808, 0x08190819),
- uvec2(0x19190808, 0x08190819), uvec2(0x19192b2b, 0x08190819), uvec2(0x2b080808, 0x08190819), uvec2(0x082b1908, 0x0819082b),
- uvec2(0x19081919, 0x0819082b), uvec2(0x08080808, 0x08191908), uvec2(0x08082b08, 0x08191908), uvec2(0x082b0808, 0x08191908),
- uvec2(0x082b1919, 0x08191908), uvec2(0x19082b19, 0x08191908), uvec2(0x2b080808, 0x08191908), uvec2(0x08192b08, 0x08191919),
- uvec2(0x192b082b, 0x08191919), uvec2(0x08080808, 0x0819192b), uvec2(0x0819192b, 0x0819192b), uvec2(0x08080819, 0x08192b08),
- uvec2(0x08081908, 0x08192b08), uvec2(0x08190808, 0x08192b08), uvec2(0x19080808, 0x08192b08), uvec2(0x2b080819, 0x08192b08),
- uvec2(0x08080808, 0x08192b19), uvec2(0x08081919, 0x08192b19), uvec2(0x2b2b0808, 0x08192b19), uvec2(0x19190819, 0x08192b2b),
- uvec2(0x08080808, 0x082b0808), uvec2(0x0808082b, 0x082b0808), uvec2(0x08082b2b, 0x082b0808), uvec2(0x19081908, 0x082b0808),
- uvec2(0x192b0819, 0x082b0808), uvec2(0x2b080808, 0x082b0808), uvec2(0x2b08082b, 0x082b0808), uvec2(0x082b2b19, 0x082b0819),
- uvec2(0x19082b08, 0x082b0819), uvec2(0x08080808, 0x082b082b), uvec2(0x0808082b, 0x082b082b), uvec2(0x08080819, 0x082b1908),
- uvec2(0x08081908, 0x082b1908), uvec2(0x08190808, 0x082b1908), uvec2(0x19080808, 0x082b1908), uvec2(0x1919192b, 0x082b1908),
- uvec2(0x08080808, 0x082b1919), uvec2(0x19080819, 0x082b1919), uvec2(0x192b1908, 0x082b1919), uvec2(0x2b190808, 0x082b192b),
- uvec2(0x08082b08, 0x082b2b08), uvec2(0x082b0808, 0x082b2b08), uvec2(0x2b191908, 0x082b2b08), uvec2(0x19081908, 0x082b2b2b),
- uvec2(0x08080819, 0x19080808), uvec2(0x08081908, 0x19080808), uvec2(0x08190808, 0x19080808), uvec2(0x08192b08, 0x19080808),
- uvec2(0x082b0819, 0x19080808), uvec2(0x082b1908, 0x19080808), uvec2(0x19080808, 0x19080808), uvec2(0x19082b08, 0x19080808),
- uvec2(0x1919192b, 0x19080808), uvec2(0x192b0808, 0x19080808), uvec2(0x2b080819, 0x19080808), uvec2(0x2b081908, 0x19080808),
- uvec2(0x2b190808, 0x19080808), uvec2(0x08080808, 0x19080819), uvec2(0x082b0808, 0x19080819), uvec2(0x192b0819, 0x19080819),
- uvec2(0x2b080808, 0x19080819), uvec2(0x2b081919, 0x19080819), uvec2(0x08080819, 0x1908082b), uvec2(0x08190808, 0x1908082b),
- uvec2(0x19082b08, 0x1908082b), uvec2(0x1919192b, 0x1908082b), uvec2(0x192b2b08, 0x1908082b), uvec2(0x08080808, 0x19081908),
- uvec2(0x08082b08, 0x19081908), uvec2(0x082b0808, 0x19081908), uvec2(0x2b080808, 0x19081908), uvec2(0x2b192b19, 0x19081908),
- uvec2(0x0819082b, 0x19081919), uvec2(0x082b1908, 0x19081919), uvec2(0x08080808, 0x1908192b), uvec2(0x08080819, 0x19082b08),
- uvec2(0x08081908, 0x19082b08), uvec2(0x08190808, 0x19082b08), uvec2(0x19080808, 0x19082b08), uvec2(0x19081919, 0x19082b08),
- uvec2(0x08080808, 0x19082b19), uvec2(0x19192b08, 0x19082b19), uvec2(0x192b0819, 0x19082b19), uvec2(0x2b08082b, 0x19082b19),
- uvec2(0x19081919, 0x19082b2b), uvec2(0x2b190808, 0x19082b2b), uvec2(0x08080808, 0x19190808), uvec2(0x08082b08, 0x19190808),
- uvec2(0x08190819, 0x19190808), uvec2(0x08192b19, 0x19190808), uvec2(0x082b0808, 0x19190808), uvec2(0x2b080808, 0x19190808),
- uvec2(0x2b082b08, 0x19190808), uvec2(0x08081908, 0x19190819), uvec2(0x1908082b, 0x19190819), uvec2(0x2b2b1908, 0x19190819),
- uvec2(0x2b190819, 0x1919082b), uvec2(0x2b190808, 0x19191908), uvec2(0x2b19082b, 0x19191908), uvec2(0x08082b2b, 0x19191919),
- uvec2(0x08080819, 0x1919192b), uvec2(0x19191908, 0x1919192b), uvec2(0x08080808, 0x19192b08), uvec2(0x08190819, 0x19192b08),
- uvec2(0x08192b19, 0x19192b08), uvec2(0x192b1908, 0x19192b08), uvec2(0x19080808, 0x19192b19), uvec2(0x08082b08, 0x19192b2b),
- uvec2(0x08081908, 0x192b0808), uvec2(0x08190808, 0x192b0808), uvec2(0x19080808, 0x192b0808), uvec2(0x192b2b08, 0x192b0808),
- uvec2(0x08080808, 0x192b0819), uvec2(0x19191919, 0x192b0819), uvec2(0x08192b08, 0x192b082b), uvec2(0x192b0808, 0x192b082b),
- uvec2(0x08080808, 0x192b1908), uvec2(0x08081919, 0x192b1908), uvec2(0x08190808, 0x192b1919), uvec2(0x0819082b, 0x192b1919),
- uvec2(0x2b081908, 0x192b1919), uvec2(0x1908082b, 0x192b2b08), uvec2(0x08080808, 0x2b080808), uvec2(0x0808082b, 0x2b080808),
- uvec2(0x08082b2b, 0x2b080808), uvec2(0x19080819, 0x2b080808), uvec2(0x2b08082b, 0x2b080808), uvec2(0x08081908, 0x2b080819),
- uvec2(0x08192b08, 0x2b080819), uvec2(0x19080808, 0x2b080819), uvec2(0x08190819, 0x2b08082b), uvec2(0x08080819, 0x2b081908),
- uvec2(0x08081908, 0x2b081908), uvec2(0x08190808, 0x2b081908), uvec2(0x08191919, 0x2b081908), uvec2(0x19080808, 0x2b081908),
- uvec2(0x192b0808, 0x2b081908), uvec2(0x08080808, 0x2b081919), uvec2(0x1908192b, 0x2b081919), uvec2(0x2b191908, 0x2b081919),
- uvec2(0x08082b19, 0x2b08192b), uvec2(0x19080808, 0x2b08192b), uvec2(0x192b0808, 0x2b08192b), uvec2(0x0808082b, 0x2b082b08),
- uvec2(0x08081908, 0x2b082b19), uvec2(0x08190819, 0x2b082b2b), uvec2(0x08081908, 0x2b190808), uvec2(0x08190808, 0x2b190808),
- uvec2(0x082b1908, 0x2b190808), uvec2(0x19080808, 0x2b190808), uvec2(0x2b2b0819, 0x2b190808), uvec2(0x0819192b, 0x2b190819),
- uvec2(0x2b080808, 0x2b190819), uvec2(0x19081919, 0x2b19082b), uvec2(0x08080808, 0x2b191908), uvec2(0x082b082b, 0x2b191908),
- uvec2(0x19081908, 0x2b191908), uvec2(0x19190819, 0x2b191919), uvec2(0x2b080819, 0x2b192b08), uvec2(0x082b0808, 0x2b192b19),
- uvec2(0x0808082b, 0x2b2b0808), uvec2(0x19190808, 0x2b2b0808), uvec2(0x2b081919, 0x2b2b0808), uvec2(0x08082b19, 0x2b2b0819),
- uvec2(0x08080808, 0x2b2b082b), uvec2(0x08192b08, 0x2b2b1908), uvec2(0x19190808, 0x2b2b2b08), uvec2(0x08081908, 0x2b2b2b19)
-};
-
-shared uvec2 iq2xxs_grid[256];
-
-#define NEEDS_INIT_IQ_SHMEM
-void init_iq_shmem(uvec3 wgsize)
-{
- // copy the table into shared memory and sync
- [[unroll]] for (uint i = 0; i < iq2xxs_grid.length(); i += wgsize.x) {
- if (iq2xxs_grid_const.length() % wgsize.x == 0 || i + gl_LocalInvocationIndex.x < iq2xxs_grid_const.length()) {
- iq2xxs_grid[i + gl_LocalInvocationIndex.x] = iq2xxs_grid_const[i + gl_LocalInvocationIndex.x];
- }
- }
- barrier();
-}
-
-#define QUANT_K QUANT_K_IQ2_XXS
-#define QUANT_R QUANT_R_IQ2_XXS
-#define A_TYPE block_iq2_xxs
-#define A_TYPE_PACKED16 block_iq2_xxs_packed16
-#endif
-
-#define QUANT_K_IQ2_XS 256
-#define QUANT_R_IQ2_XS 1
-
-struct block_iq2_xs
-{
- float16_t d;
- uint16_t qs[QUANT_K_IQ2_XS/8];
- uint8_t scales[QUANT_K_IQ2_XS/32];
-};
-
-struct block_iq2_xs_packed16
-{
- float16_t d;
- uint16_t qs[QUANT_K_IQ2_XS/8];
- uint16_t scales[QUANT_K_IQ2_XS/64];
-};
-
-#if defined(DATA_A_IQ2_XS)
-
-const uvec2 iq2xs_grid_const[512] = {
- uvec2(0x08080808, 0x08080808), uvec2(0x0808082b, 0x08080808), uvec2(0x08081919, 0x08080808), uvec2(0x08082b08, 0x08080808),
- uvec2(0x08082b2b, 0x08080808), uvec2(0x08190819, 0x08080808), uvec2(0x08191908, 0x08080808), uvec2(0x0819192b, 0x08080808),
- uvec2(0x08192b19, 0x08080808), uvec2(0x082b0808, 0x08080808), uvec2(0x082b082b, 0x08080808), uvec2(0x082b1919, 0x08080808),
- uvec2(0x082b2b08, 0x08080808), uvec2(0x19080819, 0x08080808), uvec2(0x19081908, 0x08080808), uvec2(0x1908192b, 0x08080808),
- uvec2(0x19082b19, 0x08080808), uvec2(0x19190808, 0x08080808), uvec2(0x1919082b, 0x08080808), uvec2(0x19191919, 0x08080808),
- uvec2(0x19192b08, 0x08080808), uvec2(0x192b0819, 0x08080808), uvec2(0x192b1908, 0x08080808), uvec2(0x2b080808, 0x08080808),
- uvec2(0x2b08082b, 0x08080808), uvec2(0x2b081919, 0x08080808), uvec2(0x2b082b08, 0x08080808), uvec2(0x2b190819, 0x08080808),
- uvec2(0x2b191908, 0x08080808), uvec2(0x2b192b19, 0x08080808), uvec2(0x2b2b0808, 0x08080808), uvec2(0x08080819, 0x08080819),
- uvec2(0x08081908, 0x08080819), uvec2(0x0808192b, 0x08080819), uvec2(0x08082b19, 0x08080819), uvec2(0x08190808, 0x08080819),
- uvec2(0x0819082b, 0x08080819), uvec2(0x08191919, 0x08080819), uvec2(0x08192b08, 0x08080819), uvec2(0x08192b2b, 0x08080819),
- uvec2(0x082b0819, 0x08080819), uvec2(0x082b1908, 0x08080819), uvec2(0x19080808, 0x08080819), uvec2(0x1908082b, 0x08080819),
- uvec2(0x19081919, 0x08080819), uvec2(0x19082b08, 0x08080819), uvec2(0x19190819, 0x08080819), uvec2(0x19191908, 0x08080819),
- uvec2(0x192b0808, 0x08080819), uvec2(0x192b2b08, 0x08080819), uvec2(0x2b080819, 0x08080819), uvec2(0x2b081908, 0x08080819),
- uvec2(0x2b190808, 0x08080819), uvec2(0x08080808, 0x0808082b), uvec2(0x0808082b, 0x0808082b), uvec2(0x08081919, 0x0808082b),
- uvec2(0x08082b08, 0x0808082b), uvec2(0x08190819, 0x0808082b), uvec2(0x08191908, 0x0808082b), uvec2(0x082b0808, 0x0808082b),
- uvec2(0x19080819, 0x0808082b), uvec2(0x19081908, 0x0808082b), uvec2(0x19190808, 0x0808082b), uvec2(0x19191919, 0x0808082b),
- uvec2(0x2b080808, 0x0808082b), uvec2(0x2b082b2b, 0x0808082b), uvec2(0x08080819, 0x08081908), uvec2(0x08081908, 0x08081908),
- uvec2(0x0808192b, 0x08081908), uvec2(0x08082b19, 0x08081908), uvec2(0x08190808, 0x08081908), uvec2(0x0819082b, 0x08081908),
- uvec2(0x08191919, 0x08081908), uvec2(0x08192b08, 0x08081908), uvec2(0x082b0819, 0x08081908), uvec2(0x082b1908, 0x08081908),
- uvec2(0x19080808, 0x08081908), uvec2(0x1908082b, 0x08081908), uvec2(0x19081919, 0x08081908), uvec2(0x19082b08, 0x08081908),
- uvec2(0x19190819, 0x08081908), uvec2(0x19191908, 0x08081908), uvec2(0x1919192b, 0x08081908), uvec2(0x192b0808, 0x08081908),
- uvec2(0x2b080819, 0x08081908), uvec2(0x2b081908, 0x08081908), uvec2(0x2b190808, 0x08081908), uvec2(0x08080808, 0x08081919),
- uvec2(0x0808082b, 0x08081919), uvec2(0x08081919, 0x08081919), uvec2(0x08082b08, 0x08081919), uvec2(0x08190819, 0x08081919),
- uvec2(0x08191908, 0x08081919), uvec2(0x082b0808, 0x08081919), uvec2(0x19080819, 0x08081919), uvec2(0x19081908, 0x08081919),
- uvec2(0x19190808, 0x08081919), uvec2(0x192b0819, 0x08081919), uvec2(0x2b080808, 0x08081919), uvec2(0x08080819, 0x0808192b),
- uvec2(0x08081908, 0x0808192b), uvec2(0x08190808, 0x0808192b), uvec2(0x082b192b, 0x0808192b), uvec2(0x19080808, 0x0808192b),
- uvec2(0x1908082b, 0x0808192b), uvec2(0x2b081908, 0x0808192b), uvec2(0x08080808, 0x08082b08), uvec2(0x0808082b, 0x08082b08),
- uvec2(0x08081919, 0x08082b08), uvec2(0x08082b08, 0x08082b08), uvec2(0x08082b2b, 0x08082b08), uvec2(0x08190819, 0x08082b08),
- uvec2(0x08191908, 0x08082b08), uvec2(0x082b0808, 0x08082b08), uvec2(0x082b1919, 0x08082b08), uvec2(0x19080819, 0x08082b08),
- uvec2(0x19081908, 0x08082b08), uvec2(0x19190808, 0x08082b08), uvec2(0x19192b08, 0x08082b08), uvec2(0x2b080808, 0x08082b08),
- uvec2(0x2b2b0808, 0x08082b08), uvec2(0x2b2b2b2b, 0x08082b08), uvec2(0x08080819, 0x08082b19), uvec2(0x08081908, 0x08082b19),
- uvec2(0x08190808, 0x08082b19), uvec2(0x19080808, 0x08082b19), uvec2(0x2b080819, 0x08082b19), uvec2(0x2b082b19, 0x08082b19),
- uvec2(0x08080808, 0x08082b2b), uvec2(0x082b0808, 0x08082b2b), uvec2(0x082b2b08, 0x08082b2b), uvec2(0x2b19192b, 0x08082b2b),
- uvec2(0x2b2b0808, 0x08082b2b), uvec2(0x08080819, 0x08190808), uvec2(0x08081908, 0x08190808), uvec2(0x0808192b, 0x08190808),
- uvec2(0x08082b19, 0x08190808), uvec2(0x08190808, 0x08190808), uvec2(0x0819082b, 0x08190808), uvec2(0x08191919, 0x08190808),
- uvec2(0x08192b08, 0x08190808), uvec2(0x082b0819, 0x08190808), uvec2(0x082b1908, 0x08190808), uvec2(0x19080808, 0x08190808),
- uvec2(0x1908082b, 0x08190808), uvec2(0x19081919, 0x08190808), uvec2(0x19082b08, 0x08190808), uvec2(0x19190819, 0x08190808),
- uvec2(0x19191908, 0x08190808), uvec2(0x192b0808, 0x08190808), uvec2(0x192b2b2b, 0x08190808), uvec2(0x2b080819, 0x08190808),
- uvec2(0x2b081908, 0x08190808), uvec2(0x2b190808, 0x08190808), uvec2(0x08080808, 0x08190819), uvec2(0x0808082b, 0x08190819),
- uvec2(0x08081919, 0x08190819), uvec2(0x08082b08, 0x08190819), uvec2(0x08190819, 0x08190819), uvec2(0x08191908, 0x08190819),
- uvec2(0x082b0808, 0x08190819), uvec2(0x19080819, 0x08190819), uvec2(0x19081908, 0x08190819), uvec2(0x19190808, 0x08190819),
- uvec2(0x2b080808, 0x08190819), uvec2(0x2b191908, 0x08190819), uvec2(0x2b19192b, 0x08190819), uvec2(0x08080819, 0x0819082b),
- uvec2(0x08081908, 0x0819082b), uvec2(0x0808192b, 0x0819082b), uvec2(0x08190808, 0x0819082b), uvec2(0x19080808, 0x0819082b),
- uvec2(0x192b0808, 0x0819082b), uvec2(0x08080808, 0x08191908), uvec2(0x0808082b, 0x08191908), uvec2(0x08081919, 0x08191908),
- uvec2(0x08082b08, 0x08191908), uvec2(0x08190819, 0x08191908), uvec2(0x08191908, 0x08191908), uvec2(0x082b0808, 0x08191908),
- uvec2(0x19080819, 0x08191908), uvec2(0x19081908, 0x08191908), uvec2(0x19082b19, 0x08191908), uvec2(0x19190808, 0x08191908),
- uvec2(0x192b1908, 0x08191908), uvec2(0x2b080808, 0x08191908), uvec2(0x08080819, 0x08191919), uvec2(0x08081908, 0x08191919),
- uvec2(0x08190808, 0x08191919), uvec2(0x19080808, 0x08191919), uvec2(0x08080808, 0x0819192b), uvec2(0x08191908, 0x0819192b),
- uvec2(0x19082b19, 0x0819192b), uvec2(0x08080819, 0x08192b08), uvec2(0x08081908, 0x08192b08), uvec2(0x08190808, 0x08192b08),
- uvec2(0x0819082b, 0x08192b08), uvec2(0x19080808, 0x08192b08), uvec2(0x19191908, 0x08192b08), uvec2(0x2b08192b, 0x08192b08),
- uvec2(0x08080808, 0x08192b19), uvec2(0x08081919, 0x08192b19), uvec2(0x192b192b, 0x08192b19), uvec2(0x19190819, 0x08192b2b),
- uvec2(0x2b2b2b19, 0x08192b2b), uvec2(0x08080808, 0x082b0808), uvec2(0x0808082b, 0x082b0808), uvec2(0x08081919, 0x082b0808),
- uvec2(0x08082b08, 0x082b0808), uvec2(0x08082b2b, 0x082b0808), uvec2(0x08190819, 0x082b0808), uvec2(0x08191908, 0x082b0808),
- uvec2(0x082b0808, 0x082b0808), uvec2(0x19080819, 0x082b0808), uvec2(0x19081908, 0x082b0808), uvec2(0x19190808, 0x082b0808),
- uvec2(0x2b080808, 0x082b0808), uvec2(0x2b2b0808, 0x082b0808), uvec2(0x08080819, 0x082b0819), uvec2(0x08081908, 0x082b0819),
- uvec2(0x08190808, 0x082b0819), uvec2(0x19080808, 0x082b0819), uvec2(0x19082b08, 0x082b0819), uvec2(0x192b1919, 0x082b0819),
- uvec2(0x08080808, 0x082b082b), uvec2(0x082b082b, 0x082b082b), uvec2(0x2b080808, 0x082b082b), uvec2(0x2b2b2b08, 0x082b082b),
- uvec2(0x08080819, 0x082b1908), uvec2(0x08081908, 0x082b1908), uvec2(0x08190808, 0x082b1908), uvec2(0x082b2b19, 0x082b1908),
- uvec2(0x19080808, 0x082b1908), uvec2(0x08080808, 0x082b1919), uvec2(0x19080819, 0x082b1919), uvec2(0x1919082b, 0x082b1919),
- uvec2(0x2b192b19, 0x082b1919), uvec2(0x08080819, 0x082b192b), uvec2(0x08192b2b, 0x082b192b), uvec2(0x2b2b192b, 0x082b192b),
- uvec2(0x08080808, 0x082b2b08), uvec2(0x08082b08, 0x082b2b08), uvec2(0x08082b2b, 0x082b2b08), uvec2(0x082b0808, 0x082b2b08),
- uvec2(0x19191919, 0x082b2b08), uvec2(0x2b082b08, 0x082b2b08), uvec2(0x2b2b082b, 0x082b2b08), uvec2(0x192b2b08, 0x082b2b19),
- uvec2(0x2b190808, 0x082b2b19), uvec2(0x08082b08, 0x082b2b2b), uvec2(0x082b0808, 0x082b2b2b), uvec2(0x2b08082b, 0x082b2b2b),
- uvec2(0x2b082b08, 0x082b2b2b), uvec2(0x2b082b2b, 0x082b2b2b), uvec2(0x08080819, 0x19080808), uvec2(0x08081908, 0x19080808),
- uvec2(0x0808192b, 0x19080808), uvec2(0x08082b19, 0x19080808), uvec2(0x08190808, 0x19080808), uvec2(0x0819082b, 0x19080808),
- uvec2(0x08191919, 0x19080808), uvec2(0x08192b08, 0x19080808), uvec2(0x082b0819, 0x19080808), uvec2(0x082b1908, 0x19080808),
- uvec2(0x19080808, 0x19080808), uvec2(0x1908082b, 0x19080808), uvec2(0x19081919, 0x19080808), uvec2(0x19082b08, 0x19080808),
- uvec2(0x19082b2b, 0x19080808), uvec2(0x19190819, 0x19080808), uvec2(0x19191908, 0x19080808), uvec2(0x192b0808, 0x19080808),
- uvec2(0x192b1919, 0x19080808), uvec2(0x2b080819, 0x19080808), uvec2(0x2b081908, 0x19080808), uvec2(0x2b190808, 0x19080808),
- uvec2(0x08080808, 0x19080819), uvec2(0x0808082b, 0x19080819), uvec2(0x08081919, 0x19080819), uvec2(0x08082b08, 0x19080819),
- uvec2(0x08190819, 0x19080819), uvec2(0x08191908, 0x19080819), uvec2(0x082b0808, 0x19080819), uvec2(0x19080819, 0x19080819),
- uvec2(0x19081908, 0x19080819), uvec2(0x19190808, 0x19080819), uvec2(0x2b080808, 0x19080819), uvec2(0x2b081919, 0x19080819),
- uvec2(0x2b2b082b, 0x19080819), uvec2(0x08080819, 0x1908082b), uvec2(0x08081908, 0x1908082b), uvec2(0x08190808, 0x1908082b),
- uvec2(0x0819082b, 0x1908082b), uvec2(0x082b2b19, 0x1908082b), uvec2(0x19080808, 0x1908082b), uvec2(0x08080808, 0x19081908),
- uvec2(0x0808082b, 0x19081908), uvec2(0x08081919, 0x19081908), uvec2(0x08082b08, 0x19081908), uvec2(0x08190819, 0x19081908),
- uvec2(0x08191908, 0x19081908), uvec2(0x08192b19, 0x19081908), uvec2(0x082b0808, 0x19081908), uvec2(0x19080819, 0x19081908),
- uvec2(0x19081908, 0x19081908), uvec2(0x19190808, 0x19081908), uvec2(0x2b080808, 0x19081908), uvec2(0x2b191908, 0x19081908),
- uvec2(0x08080819, 0x19081919), uvec2(0x08081908, 0x19081919), uvec2(0x08190808, 0x19081919), uvec2(0x082b1908, 0x19081919),
- uvec2(0x19080808, 0x19081919), uvec2(0x2b192b2b, 0x19081919), uvec2(0x08080808, 0x1908192b), uvec2(0x08082b2b, 0x1908192b),
- uvec2(0x19081908, 0x1908192b), uvec2(0x19190808, 0x1908192b), uvec2(0x08080819, 0x19082b08), uvec2(0x08081908, 0x19082b08),
- uvec2(0x08190808, 0x19082b08), uvec2(0x19080808, 0x19082b08), uvec2(0x19081919, 0x19082b08), uvec2(0x19191908, 0x19082b08),
- uvec2(0x192b082b, 0x19082b08), uvec2(0x08080808, 0x19082b19), uvec2(0x08190819, 0x19082b19), uvec2(0x19081908, 0x19082b19),
- uvec2(0x19190808, 0x19082b19), uvec2(0x192b2b19, 0x19082b19), uvec2(0x08081908, 0x19082b2b), uvec2(0x08080808, 0x19190808),
- uvec2(0x0808082b, 0x19190808), uvec2(0x08081919, 0x19190808), uvec2(0x08082b08, 0x19190808), uvec2(0x08190819, 0x19190808),
- uvec2(0x08191908, 0x19190808), uvec2(0x082b0808, 0x19190808), uvec2(0x082b2b08, 0x19190808), uvec2(0x19080819, 0x19190808),
- uvec2(0x19081908, 0x19190808), uvec2(0x19190808, 0x19190808), uvec2(0x2b080808, 0x19190808), uvec2(0x08080819, 0x19190819),
- uvec2(0x08081908, 0x19190819), uvec2(0x08190808, 0x19190819), uvec2(0x08191919, 0x19190819), uvec2(0x19080808, 0x19190819),
- uvec2(0x1908082b, 0x19190819), uvec2(0x08080808, 0x1919082b), uvec2(0x19081908, 0x1919082b), uvec2(0x2b2b2b2b, 0x1919082b),
- uvec2(0x08080819, 0x19191908), uvec2(0x08081908, 0x19191908), uvec2(0x08190808, 0x19191908), uvec2(0x082b0819, 0x19191908),
- uvec2(0x19080808, 0x19191908), uvec2(0x192b0808, 0x19191908), uvec2(0x2b080819, 0x19191908), uvec2(0x2b2b0819, 0x19191908),
- uvec2(0x08080808, 0x19191919), uvec2(0x08082b08, 0x19191919), uvec2(0x2b080808, 0x19191919), uvec2(0x2b082b08, 0x19191919),
- uvec2(0x082b0819, 0x1919192b), uvec2(0x192b2b08, 0x1919192b), uvec2(0x2b2b0819, 0x1919192b), uvec2(0x08080808, 0x19192b08),
- uvec2(0x08191908, 0x19192b08), uvec2(0x19080819, 0x19192b08), uvec2(0x19190808, 0x19192b08), uvec2(0x2b192b19, 0x19192b08),
- uvec2(0x08192b2b, 0x19192b19), uvec2(0x19080808, 0x19192b19), uvec2(0x1908082b, 0x19192b19), uvec2(0x2b081919, 0x19192b2b),
- uvec2(0x08080819, 0x192b0808), uvec2(0x08081908, 0x192b0808), uvec2(0x08190808, 0x192b0808), uvec2(0x19080808, 0x192b0808),
- uvec2(0x19191908, 0x192b0808), uvec2(0x192b082b, 0x192b0808), uvec2(0x2b08192b, 0x192b0808), uvec2(0x2b2b2b19, 0x192b0808),
- uvec2(0x08080808, 0x192b0819), uvec2(0x082b1908, 0x192b082b), uvec2(0x19082b2b, 0x192b082b), uvec2(0x2b19082b, 0x192b082b),
- uvec2(0x08080808, 0x192b1908), uvec2(0x0819192b, 0x192b1908), uvec2(0x08190808, 0x192b1919), uvec2(0x19080808, 0x192b1919),
- uvec2(0x19081919, 0x192b1919), uvec2(0x2b2b1908, 0x192b1919), uvec2(0x08080819, 0x192b2b08), uvec2(0x192b2b2b, 0x192b2b08),
- uvec2(0x082b1919, 0x192b2b19), uvec2(0x0808192b, 0x192b2b2b), uvec2(0x19191908, 0x192b2b2b), uvec2(0x192b082b, 0x192b2b2b),
- uvec2(0x08080808, 0x2b080808), uvec2(0x0808082b, 0x2b080808), uvec2(0x08081919, 0x2b080808), uvec2(0x08082b08, 0x2b080808),
- uvec2(0x08190819, 0x2b080808), uvec2(0x08191908, 0x2b080808), uvec2(0x082b0808, 0x2b080808), uvec2(0x082b2b2b, 0x2b080808),
- uvec2(0x19080819, 0x2b080808), uvec2(0x19081908, 0x2b080808), uvec2(0x19190808, 0x2b080808), uvec2(0x2b080808, 0x2b080808),
- uvec2(0x2b08082b, 0x2b080808), uvec2(0x2b2b2b08, 0x2b080808), uvec2(0x2b2b2b2b, 0x2b080808), uvec2(0x08080819, 0x2b080819),
- uvec2(0x08081908, 0x2b080819), uvec2(0x0808192b, 0x2b080819), uvec2(0x08190808, 0x2b080819), uvec2(0x19080808, 0x2b080819),
- uvec2(0x19190819, 0x2b080819), uvec2(0x19192b19, 0x2b080819), uvec2(0x08080808, 0x2b08082b), uvec2(0x082b0808, 0x2b08082b),
- uvec2(0x2b080808, 0x2b08082b), uvec2(0x2b08082b, 0x2b08082b), uvec2(0x2b2b0808, 0x2b08082b), uvec2(0x2b2b2b08, 0x2b08082b),
- uvec2(0x08080819, 0x2b081908), uvec2(0x08081908, 0x2b081908), uvec2(0x08190808, 0x2b081908), uvec2(0x0819082b, 0x2b081908),
- uvec2(0x08191919, 0x2b081908), uvec2(0x19080808, 0x2b081908), uvec2(0x192b0808, 0x2b081908), uvec2(0x2b082b19, 0x2b081908),
- uvec2(0x08080808, 0x2b081919), uvec2(0x19081908, 0x2b081919), uvec2(0x2b2b1919, 0x2b081919), uvec2(0x08192b08, 0x2b08192b),
- uvec2(0x192b2b2b, 0x2b08192b), uvec2(0x08080808, 0x2b082b08), uvec2(0x08082b08, 0x2b082b08), uvec2(0x082b1919, 0x2b082b08),
- uvec2(0x19192b2b, 0x2b082b08), uvec2(0x2b080808, 0x2b082b08), uvec2(0x2b08082b, 0x2b082b08), uvec2(0x2b2b2b08, 0x2b082b08),
- uvec2(0x0808192b, 0x2b082b19), uvec2(0x082b082b, 0x2b082b2b), uvec2(0x2b080808, 0x2b082b2b), uvec2(0x2b082b08, 0x2b082b2b),
- uvec2(0x2b19192b, 0x2b082b2b), uvec2(0x2b2b2b08, 0x2b082b2b), uvec2(0x08080819, 0x2b190808), uvec2(0x08081908, 0x2b190808),
- uvec2(0x08190808, 0x2b190808), uvec2(0x19080808, 0x2b190808), uvec2(0x1919192b, 0x2b190808), uvec2(0x2b081908, 0x2b190808),
- uvec2(0x08080808, 0x2b190819), uvec2(0x082b082b, 0x2b190819), uvec2(0x192b1908, 0x2b190819), uvec2(0x1919192b, 0x2b19082b),
- uvec2(0x2b082b19, 0x2b19082b), uvec2(0x08080808, 0x2b191908), uvec2(0x08081919, 0x2b191908), uvec2(0x19081908, 0x2b191908),
- uvec2(0x19190808, 0x2b191908), uvec2(0x19192b08, 0x2b191908), uvec2(0x082b2b19, 0x2b191919), uvec2(0x2b190808, 0x2b191919),
- uvec2(0x2b19082b, 0x2b191919), uvec2(0x19080819, 0x2b19192b), uvec2(0x19190819, 0x2b192b08), uvec2(0x2b2b192b, 0x2b192b08),
- uvec2(0x19082b19, 0x2b192b19), uvec2(0x08191919, 0x2b192b2b), uvec2(0x192b0808, 0x2b192b2b), uvec2(0x08080808, 0x2b2b0808),
- uvec2(0x0808082b, 0x2b2b0808), uvec2(0x08082b08, 0x2b2b0808), uvec2(0x08082b2b, 0x2b2b0808), uvec2(0x082b0808, 0x2b2b0808),
- uvec2(0x082b2b2b, 0x2b2b0808), uvec2(0x2b2b0808, 0x2b2b0808), uvec2(0x19190819, 0x2b2b0819), uvec2(0x19192b19, 0x2b2b0819),
- uvec2(0x2b2b192b, 0x2b2b0819), uvec2(0x08080808, 0x2b2b082b), uvec2(0x0808082b, 0x2b2b082b), uvec2(0x08082b08, 0x2b2b082b),
- uvec2(0x082b2b2b, 0x2b2b082b), uvec2(0x2b080808, 0x2b2b082b), uvec2(0x2b2b0808, 0x2b2b082b), uvec2(0x19080808, 0x2b2b1908),
- uvec2(0x2b191919, 0x2b2b1908), uvec2(0x192b1919, 0x2b2b192b), uvec2(0x2b192b08, 0x2b2b192b), uvec2(0x08082b2b, 0x2b2b2b08),
- uvec2(0x082b0808, 0x2b2b2b08), uvec2(0x082b082b, 0x2b2b2b08), uvec2(0x082b2b08, 0x2b2b2b08), uvec2(0x2b2b0808, 0x2b2b2b08),
- uvec2(0x2b2b2b08, 0x2b2b2b08), uvec2(0x08081908, 0x2b2b2b19), uvec2(0x2b081908, 0x2b2b2b19), uvec2(0x2b08192b, 0x2b2b2b19),
- uvec2(0x082b2b08, 0x2b2b2b2b), uvec2(0x082b2b2b, 0x2b2b2b2b), uvec2(0x2b190819, 0x2b2b2b2b), uvec2(0x2b2b2b2b, 0x2b2b2b2b),
-};
-
-shared uvec2 iq2xs_grid[512];
-
-#define NEEDS_INIT_IQ_SHMEM
-void init_iq_shmem(uvec3 wgsize)
-{
- // copy the table into shared memory and sync
- [[unroll]] for (uint i = 0; i < iq2xs_grid.length(); i += wgsize.x) {
- if (iq2xs_grid.length() % wgsize.x == 0 || i + gl_LocalInvocationIndex.x < iq2xs_grid_const.length()) {
- iq2xs_grid[i + gl_LocalInvocationIndex.x] = iq2xs_grid_const[i + gl_LocalInvocationIndex.x];
- }
- }
- barrier();
-}
-
-#define QUANT_K QUANT_K_IQ2_XS
-#define QUANT_R QUANT_R_IQ2_XS
-#define A_TYPE block_iq2_xs
-#define A_TYPE_PACKED16 block_iq2_xs_packed16
-#endif
-
-#define QUANT_K_IQ2_S 256
-#define QUANT_R_IQ2_S 1
-
-struct block_iq2_s
-{
- float16_t d;
- uint8_t qs[QUANT_K_IQ2_S/4];
- uint8_t qh[QUANT_K_IQ2_S/32];
- uint8_t scales[QUANT_K_IQ2_S/32];
-};
-
-struct block_iq2_s_packed16
-{
- float16_t d;
- uint16_t qs[QUANT_K_IQ2_S/8];
- uint16_t qh[QUANT_K_IQ2_S/64];
- uint16_t scales[QUANT_K_IQ2_S/64];
-};
-
-#if defined(DATA_A_IQ2_S)
-
-const uvec2 iq2s_grid_const[1024] = {
- uvec2(0x08080808, 0x08080808), uvec2(0x0808082b, 0x08080808), uvec2(0x08081919, 0x08080808), uvec2(0x08082b08, 0x08080808),
- uvec2(0x08082b2b, 0x08080808), uvec2(0x08190819, 0x08080808), uvec2(0x08191908, 0x08080808), uvec2(0x0819192b, 0x08080808),
- uvec2(0x08192b19, 0x08080808), uvec2(0x082b0808, 0x08080808), uvec2(0x082b082b, 0x08080808), uvec2(0x082b1919, 0x08080808),
- uvec2(0x082b2b08, 0x08080808), uvec2(0x19080819, 0x08080808), uvec2(0x19081908, 0x08080808), uvec2(0x1908192b, 0x08080808),
- uvec2(0x19082b19, 0x08080808), uvec2(0x19190808, 0x08080808), uvec2(0x1919082b, 0x08080808), uvec2(0x19191919, 0x08080808),
- uvec2(0x19192b08, 0x08080808), uvec2(0x192b0819, 0x08080808), uvec2(0x192b1908, 0x08080808), uvec2(0x192b192b, 0x08080808),
- uvec2(0x192b2b19, 0x08080808), uvec2(0x2b080808, 0x08080808), uvec2(0x2b08082b, 0x08080808), uvec2(0x2b081919, 0x08080808),
- uvec2(0x2b082b08, 0x08080808), uvec2(0x2b190819, 0x08080808), uvec2(0x2b191908, 0x08080808), uvec2(0x2b2b0808, 0x08080808),
- uvec2(0x2b2b1919, 0x08080808), uvec2(0x2b2b2b2b, 0x08080808), uvec2(0x08080819, 0x08080819), uvec2(0x08081908, 0x08080819),
- uvec2(0x0808192b, 0x08080819), uvec2(0x08082b19, 0x08080819), uvec2(0x08190808, 0x08080819), uvec2(0x0819082b, 0x08080819),
- uvec2(0x08191919, 0x08080819), uvec2(0x08192b08, 0x08080819), uvec2(0x082b0819, 0x08080819), uvec2(0x082b1908, 0x08080819),
- uvec2(0x19080808, 0x08080819), uvec2(0x1908082b, 0x08080819), uvec2(0x19081919, 0x08080819), uvec2(0x19082b08, 0x08080819),
- uvec2(0x19190819, 0x08080819), uvec2(0x19191908, 0x08080819), uvec2(0x1919192b, 0x08080819), uvec2(0x19192b19, 0x08080819),
- uvec2(0x192b0808, 0x08080819), uvec2(0x192b1919, 0x08080819), uvec2(0x192b2b08, 0x08080819), uvec2(0x2b080819, 0x08080819),
- uvec2(0x2b081908, 0x08080819), uvec2(0x2b190808, 0x08080819), uvec2(0x2b19082b, 0x08080819), uvec2(0x2b191919, 0x08080819),
- uvec2(0x2b2b0819, 0x08080819), uvec2(0x2b2b1908, 0x08080819), uvec2(0x08080808, 0x0808082b), uvec2(0x0808082b, 0x0808082b),
- uvec2(0x08081919, 0x0808082b), uvec2(0x08082b08, 0x0808082b), uvec2(0x08190819, 0x0808082b), uvec2(0x08191908, 0x0808082b),
- uvec2(0x082b0808, 0x0808082b), uvec2(0x082b2b2b, 0x0808082b), uvec2(0x19080819, 0x0808082b), uvec2(0x19081908, 0x0808082b),
- uvec2(0x1908192b, 0x0808082b), uvec2(0x19082b19, 0x0808082b), uvec2(0x19190808, 0x0808082b), uvec2(0x19191919, 0x0808082b),
- uvec2(0x2b080808, 0x0808082b), uvec2(0x2b081919, 0x0808082b), uvec2(0x2b082b2b, 0x0808082b), uvec2(0x2b191908, 0x0808082b),
- uvec2(0x2b2b082b, 0x0808082b), uvec2(0x08080819, 0x08081908), uvec2(0x08081908, 0x08081908), uvec2(0x0808192b, 0x08081908),
- uvec2(0x08082b19, 0x08081908), uvec2(0x08190808, 0x08081908), uvec2(0x0819082b, 0x08081908), uvec2(0x08191919, 0x08081908),
- uvec2(0x08192b08, 0x08081908), uvec2(0x082b0819, 0x08081908), uvec2(0x082b1908, 0x08081908), uvec2(0x082b192b, 0x08081908),
- uvec2(0x082b2b19, 0x08081908), uvec2(0x19080808, 0x08081908), uvec2(0x1908082b, 0x08081908), uvec2(0x19081919, 0x08081908),
- uvec2(0x19082b08, 0x08081908), uvec2(0x19082b2b, 0x08081908), uvec2(0x19190819, 0x08081908), uvec2(0x19191908, 0x08081908),
- uvec2(0x1919192b, 0x08081908), uvec2(0x19192b19, 0x08081908), uvec2(0x192b0808, 0x08081908), uvec2(0x192b082b, 0x08081908),
- uvec2(0x192b1919, 0x08081908), uvec2(0x2b080819, 0x08081908), uvec2(0x2b081908, 0x08081908), uvec2(0x2b08192b, 0x08081908),
- uvec2(0x2b082b19, 0x08081908), uvec2(0x2b190808, 0x08081908), uvec2(0x2b191919, 0x08081908), uvec2(0x2b192b08, 0x08081908),
- uvec2(0x2b2b0819, 0x08081908), uvec2(0x2b2b1908, 0x08081908), uvec2(0x08080808, 0x08081919), uvec2(0x0808082b, 0x08081919),
- uvec2(0x08081919, 0x08081919), uvec2(0x08082b08, 0x08081919), uvec2(0x08082b2b, 0x08081919), uvec2(0x08190819, 0x08081919),
- uvec2(0x08191908, 0x08081919), uvec2(0x0819192b, 0x08081919), uvec2(0x08192b19, 0x08081919), uvec2(0x082b0808, 0x08081919),
- uvec2(0x082b1919, 0x08081919), uvec2(0x082b2b08, 0x08081919), uvec2(0x19080819, 0x08081919), uvec2(0x19081908, 0x08081919),
- uvec2(0x1908192b, 0x08081919), uvec2(0x19082b19, 0x08081919), uvec2(0x19190808, 0x08081919), uvec2(0x1919082b, 0x08081919),
- uvec2(0x19191919, 0x08081919), uvec2(0x19192b08, 0x08081919), uvec2(0x192b0819, 0x08081919), uvec2(0x192b1908, 0x08081919),
- uvec2(0x2b080808, 0x08081919), uvec2(0x2b08082b, 0x08081919), uvec2(0x2b081919, 0x08081919), uvec2(0x2b082b08, 0x08081919),
- uvec2(0x2b190819, 0x08081919), uvec2(0x2b191908, 0x08081919), uvec2(0x2b2b0808, 0x08081919), uvec2(0x08080819, 0x0808192b),
- uvec2(0x08081908, 0x0808192b), uvec2(0x0808192b, 0x0808192b), uvec2(0x08082b19, 0x0808192b), uvec2(0x08190808, 0x0808192b),
- uvec2(0x08191919, 0x0808192b), uvec2(0x19080808, 0x0808192b), uvec2(0x19081919, 0x0808192b), uvec2(0x19082b08, 0x0808192b),
- uvec2(0x19190819, 0x0808192b), uvec2(0x19191908, 0x0808192b), uvec2(0x192b0808, 0x0808192b), uvec2(0x2b080819, 0x0808192b),
- uvec2(0x2b081908, 0x0808192b), uvec2(0x2b190808, 0x0808192b), uvec2(0x08080808, 0x08082b08), uvec2(0x0808082b, 0x08082b08),
- uvec2(0x08081919, 0x08082b08), uvec2(0x08082b08, 0x08082b08), uvec2(0x08190819, 0x08082b08), uvec2(0x08191908, 0x08082b08),
- uvec2(0x0819192b, 0x08082b08), uvec2(0x08192b19, 0x08082b08), uvec2(0x082b0808, 0x08082b08), uvec2(0x082b1919, 0x08082b08),
- uvec2(0x082b2b2b, 0x08082b08), uvec2(0x19080819, 0x08082b08), uvec2(0x19081908, 0x08082b08), uvec2(0x1908192b, 0x08082b08),
- uvec2(0x19082b19, 0x08082b08), uvec2(0x19190808, 0x08082b08), uvec2(0x1919082b, 0x08082b08), uvec2(0x19191919, 0x08082b08),
- uvec2(0x19192b08, 0x08082b08), uvec2(0x192b0819, 0x08082b08), uvec2(0x192b1908, 0x08082b08), uvec2(0x2b080808, 0x08082b08),
- uvec2(0x2b081919, 0x08082b08), uvec2(0x2b191908, 0x08082b08), uvec2(0x2b2b2b2b, 0x08082b08), uvec2(0x08080819, 0x08082b19),
- uvec2(0x08081908, 0x08082b19), uvec2(0x08190808, 0x08082b19), uvec2(0x0819082b, 0x08082b19), uvec2(0x08191919, 0x08082b19),
- uvec2(0x08192b08, 0x08082b19), uvec2(0x082b0819, 0x08082b19), uvec2(0x19080808, 0x08082b19), uvec2(0x19081919, 0x08082b19),
- uvec2(0x19082b08, 0x08082b19), uvec2(0x19190819, 0x08082b19), uvec2(0x19191908, 0x08082b19), uvec2(0x192b0808, 0x08082b19),
- uvec2(0x2b080819, 0x08082b19), uvec2(0x2b190808, 0x08082b19), uvec2(0x08080808, 0x08082b2b), uvec2(0x08190819, 0x08082b2b),
- uvec2(0x08191908, 0x08082b2b), uvec2(0x082b082b, 0x08082b2b), uvec2(0x082b2b08, 0x08082b2b), uvec2(0x082b2b2b, 0x08082b2b),
- uvec2(0x19190808, 0x08082b2b), uvec2(0x2b192b19, 0x08082b2b), uvec2(0x08080819, 0x08190808), uvec2(0x08081908, 0x08190808),
- uvec2(0x0808192b, 0x08190808), uvec2(0x08082b19, 0x08190808), uvec2(0x08190808, 0x08190808), uvec2(0x0819082b, 0x08190808),
- uvec2(0x08191919, 0x08190808), uvec2(0x08192b08, 0x08190808), uvec2(0x082b0819, 0x08190808), uvec2(0x082b1908, 0x08190808),
- uvec2(0x082b192b, 0x08190808), uvec2(0x19080808, 0x08190808), uvec2(0x1908082b, 0x08190808), uvec2(0x19081919, 0x08190808),
- uvec2(0x19082b08, 0x08190808), uvec2(0x19190819, 0x08190808), uvec2(0x19191908, 0x08190808), uvec2(0x1919192b, 0x08190808),
- uvec2(0x19192b19, 0x08190808), uvec2(0x192b0808, 0x08190808), uvec2(0x192b082b, 0x08190808), uvec2(0x192b1919, 0x08190808),
- uvec2(0x192b2b08, 0x08190808), uvec2(0x2b080819, 0x08190808), uvec2(0x2b081908, 0x08190808), uvec2(0x2b08192b, 0x08190808),
- uvec2(0x2b190808, 0x08190808), uvec2(0x2b191919, 0x08190808), uvec2(0x2b192b08, 0x08190808), uvec2(0x2b2b0819, 0x08190808),
- uvec2(0x2b2b1908, 0x08190808), uvec2(0x08080808, 0x08190819), uvec2(0x0808082b, 0x08190819), uvec2(0x08081919, 0x08190819),
- uvec2(0x08082b08, 0x08190819), uvec2(0x08082b2b, 0x08190819), uvec2(0x08190819, 0x08190819), uvec2(0x08191908, 0x08190819),
- uvec2(0x0819192b, 0x08190819), uvec2(0x08192b19, 0x08190819), uvec2(0x082b0808, 0x08190819), uvec2(0x082b082b, 0x08190819),
- uvec2(0x082b1919, 0x08190819), uvec2(0x082b2b08, 0x08190819), uvec2(0x19080819, 0x08190819), uvec2(0x19081908, 0x08190819),
- uvec2(0x1908192b, 0x08190819), uvec2(0x19082b19, 0x08190819), uvec2(0x19190808, 0x08190819), uvec2(0x1919082b, 0x08190819),
- uvec2(0x19191919, 0x08190819), uvec2(0x19192b08, 0x08190819), uvec2(0x192b0819, 0x08190819), uvec2(0x192b1908, 0x08190819),
- uvec2(0x2b080808, 0x08190819), uvec2(0x2b08082b, 0x08190819), uvec2(0x2b081919, 0x08190819), uvec2(0x2b082b08, 0x08190819),
- uvec2(0x2b190819, 0x08190819), uvec2(0x2b191908, 0x08190819), uvec2(0x08080819, 0x0819082b), uvec2(0x08081908, 0x0819082b),
- uvec2(0x08082b19, 0x0819082b), uvec2(0x08190808, 0x0819082b), uvec2(0x08191919, 0x0819082b), uvec2(0x082b0819, 0x0819082b),
- uvec2(0x082b1908, 0x0819082b), uvec2(0x19080808, 0x0819082b), uvec2(0x19081919, 0x0819082b), uvec2(0x19190819, 0x0819082b),
- uvec2(0x19191908, 0x0819082b), uvec2(0x2b080819, 0x0819082b), uvec2(0x2b081908, 0x0819082b), uvec2(0x2b190808, 0x0819082b),
- uvec2(0x08080808, 0x08191908), uvec2(0x0808082b, 0x08191908), uvec2(0x08081919, 0x08191908), uvec2(0x08082b08, 0x08191908),
- uvec2(0x08190819, 0x08191908), uvec2(0x08191908, 0x08191908), uvec2(0x0819192b, 0x08191908), uvec2(0x08192b19, 0x08191908),
- uvec2(0x082b0808, 0x08191908), uvec2(0x082b1919, 0x08191908), uvec2(0x082b2b08, 0x08191908), uvec2(0x19080819, 0x08191908),
- uvec2(0x19081908, 0x08191908), uvec2(0x1908192b, 0x08191908), uvec2(0x19082b19, 0x08191908), uvec2(0x19190808, 0x08191908),
- uvec2(0x1919082b, 0x08191908), uvec2(0x19191919, 0x08191908), uvec2(0x19192b08, 0x08191908), uvec2(0x192b0819, 0x08191908),
- uvec2(0x192b1908, 0x08191908), uvec2(0x2b080808, 0x08191908), uvec2(0x2b08082b, 0x08191908), uvec2(0x2b081919, 0x08191908),
- uvec2(0x2b082b08, 0x08191908), uvec2(0x2b190819, 0x08191908), uvec2(0x2b191908, 0x08191908), uvec2(0x2b2b0808, 0x08191908),
- uvec2(0x08080819, 0x08191919), uvec2(0x08081908, 0x08191919), uvec2(0x0808192b, 0x08191919), uvec2(0x08082b19, 0x08191919),
- uvec2(0x08190808, 0x08191919), uvec2(0x0819082b, 0x08191919), uvec2(0x08191919, 0x08191919), uvec2(0x08192b08, 0x08191919),
- uvec2(0x082b0819, 0x08191919), uvec2(0x082b1908, 0x08191919), uvec2(0x19080808, 0x08191919), uvec2(0x1908082b, 0x08191919),
- uvec2(0x19081919, 0x08191919), uvec2(0x19082b08, 0x08191919), uvec2(0x19190819, 0x08191919), uvec2(0x19191908, 0x08191919),
- uvec2(0x192b0808, 0x08191919), uvec2(0x2b080819, 0x08191919), uvec2(0x2b081908, 0x08191919), uvec2(0x2b190808, 0x08191919),
- uvec2(0x08080808, 0x0819192b), uvec2(0x08081919, 0x0819192b), uvec2(0x08082b08, 0x0819192b), uvec2(0x08190819, 0x0819192b),
- uvec2(0x08191908, 0x0819192b), uvec2(0x082b0808, 0x0819192b), uvec2(0x19080819, 0x0819192b), uvec2(0x19081908, 0x0819192b),
- uvec2(0x19190808, 0x0819192b), uvec2(0x2b080808, 0x0819192b), uvec2(0x2b2b2b2b, 0x0819192b), uvec2(0x08080819, 0x08192b08),
- uvec2(0x08081908, 0x08192b08), uvec2(0x0808192b, 0x08192b08), uvec2(0x08082b19, 0x08192b08), uvec2(0x08190808, 0x08192b08),
- uvec2(0x08191919, 0x08192b08), uvec2(0x08192b08, 0x08192b08), uvec2(0x082b0819, 0x08192b08), uvec2(0x19080808, 0x08192b08),
- uvec2(0x1908082b, 0x08192b08), uvec2(0x19081919, 0x08192b08), uvec2(0x19082b08, 0x08192b08), uvec2(0x19190819, 0x08192b08),
- uvec2(0x19191908, 0x08192b08), uvec2(0x192b0808, 0x08192b08), uvec2(0x2b080819, 0x08192b08), uvec2(0x2b081908, 0x08192b08),
- uvec2(0x08080808, 0x08192b19), uvec2(0x0808082b, 0x08192b19), uvec2(0x08081919, 0x08192b19), uvec2(0x08082b08, 0x08192b19),
- uvec2(0x08190819, 0x08192b19), uvec2(0x08191908, 0x08192b19), uvec2(0x082b0808, 0x08192b19), uvec2(0x19080819, 0x08192b19),
- uvec2(0x19081908, 0x08192b19), uvec2(0x19190808, 0x08192b19), uvec2(0x192b2b19, 0x08192b19), uvec2(0x2b2b082b, 0x08192b19),
- uvec2(0x08081908, 0x08192b2b), uvec2(0x08190808, 0x08192b2b), uvec2(0x19080808, 0x08192b2b), uvec2(0x1919192b, 0x08192b2b),
- uvec2(0x08080808, 0x082b0808), uvec2(0x0808082b, 0x082b0808), uvec2(0x08081919, 0x082b0808), uvec2(0x08082b08, 0x082b0808),
- uvec2(0x08190819, 0x082b0808), uvec2(0x08191908, 0x082b0808), uvec2(0x0819192b, 0x082b0808), uvec2(0x08192b19, 0x082b0808),
- uvec2(0x082b0808, 0x082b0808), uvec2(0x082b1919, 0x082b0808), uvec2(0x082b2b2b, 0x082b0808), uvec2(0x19080819, 0x082b0808),
- uvec2(0x19081908, 0x082b0808), uvec2(0x19190808, 0x082b0808), uvec2(0x1919082b, 0x082b0808), uvec2(0x19191919, 0x082b0808),
- uvec2(0x192b1908, 0x082b0808), uvec2(0x2b080808, 0x082b0808), uvec2(0x2b082b2b, 0x082b0808), uvec2(0x2b191908, 0x082b0808),
- uvec2(0x2b2b2b2b, 0x082b0808), uvec2(0x08080819, 0x082b0819), uvec2(0x08081908, 0x082b0819), uvec2(0x08190808, 0x082b0819),
- uvec2(0x0819082b, 0x082b0819), uvec2(0x08191919, 0x082b0819), uvec2(0x082b0819, 0x082b0819), uvec2(0x19080808, 0x082b0819),
- uvec2(0x1908082b, 0x082b0819), uvec2(0x19081919, 0x082b0819), uvec2(0x19190819, 0x082b0819), uvec2(0x19191908, 0x082b0819),
- uvec2(0x192b0808, 0x082b0819), uvec2(0x2b080819, 0x082b0819), uvec2(0x2b081908, 0x082b0819), uvec2(0x2b190808, 0x082b0819),
- uvec2(0x08080808, 0x082b082b), uvec2(0x08082b2b, 0x082b082b), uvec2(0x082b082b, 0x082b082b), uvec2(0x082b2b08, 0x082b082b),
- uvec2(0x082b2b2b, 0x082b082b), uvec2(0x19081908, 0x082b082b), uvec2(0x19190808, 0x082b082b), uvec2(0x2b082b08, 0x082b082b),
- uvec2(0x2b082b2b, 0x082b082b), uvec2(0x2b2b2b08, 0x082b082b), uvec2(0x08080819, 0x082b1908), uvec2(0x08081908, 0x082b1908),
- uvec2(0x0808192b, 0x082b1908), uvec2(0x08082b19, 0x082b1908), uvec2(0x08190808, 0x082b1908), uvec2(0x08191919, 0x082b1908),
- uvec2(0x08192b08, 0x082b1908), uvec2(0x082b0819, 0x082b1908), uvec2(0x082b1908, 0x082b1908), uvec2(0x19080808, 0x082b1908),
- uvec2(0x1908082b, 0x082b1908), uvec2(0x19081919, 0x082b1908), uvec2(0x19082b08, 0x082b1908), uvec2(0x19190819, 0x082b1908),
- uvec2(0x19191908, 0x082b1908), uvec2(0x192b0808, 0x082b1908), uvec2(0x2b080819, 0x082b1908), uvec2(0x2b081908, 0x082b1908),
- uvec2(0x2b190808, 0x082b1908), uvec2(0x08080808, 0x082b1919), uvec2(0x08081919, 0x082b1919), uvec2(0x08082b08, 0x082b1919),
- uvec2(0x08190819, 0x082b1919), uvec2(0x08191908, 0x082b1919), uvec2(0x082b0808, 0x082b1919), uvec2(0x19080819, 0x082b1919),
- uvec2(0x19081908, 0x082b1919), uvec2(0x19190808, 0x082b1919), uvec2(0x192b192b, 0x082b1919), uvec2(0x2b080808, 0x082b1919),
- uvec2(0x08080819, 0x082b192b), uvec2(0x08081908, 0x082b192b), uvec2(0x08190808, 0x082b192b), uvec2(0x19080808, 0x082b192b),
- uvec2(0x19192b19, 0x082b192b), uvec2(0x08080808, 0x082b2b08), uvec2(0x08081919, 0x082b2b08), uvec2(0x08190819, 0x082b2b08),
- uvec2(0x08191908, 0x082b2b08), uvec2(0x19080819, 0x082b2b08), uvec2(0x19081908, 0x082b2b08), uvec2(0x19190808, 0x082b2b08),
- uvec2(0x2b082b2b, 0x082b2b08), uvec2(0x2b2b2b2b, 0x082b2b08), uvec2(0x08080819, 0x082b2b19), uvec2(0x08081908, 0x082b2b19),
- uvec2(0x08190808, 0x082b2b19), uvec2(0x2b191919, 0x082b2b19), uvec2(0x08082b2b, 0x082b2b2b), uvec2(0x082b082b, 0x082b2b2b),
- uvec2(0x192b1908, 0x082b2b2b), uvec2(0x2b082b08, 0x082b2b2b), uvec2(0x2b082b2b, 0x082b2b2b), uvec2(0x08080819, 0x19080808),
- uvec2(0x08081908, 0x19080808), uvec2(0x0808192b, 0x19080808), uvec2(0x08082b19, 0x19080808), uvec2(0x08190808, 0x19080808),
- uvec2(0x0819082b, 0x19080808), uvec2(0x08191919, 0x19080808), uvec2(0x08192b08, 0x19080808), uvec2(0x08192b2b, 0x19080808),
- uvec2(0x082b0819, 0x19080808), uvec2(0x082b1908, 0x19080808), uvec2(0x082b192b, 0x19080808), uvec2(0x19080808, 0x19080808),
- uvec2(0x1908082b, 0x19080808), uvec2(0x19081919, 0x19080808), uvec2(0x19082b08, 0x19080808), uvec2(0x19082b2b, 0x19080808),
- uvec2(0x19190819, 0x19080808), uvec2(0x19191908, 0x19080808), uvec2(0x1919192b, 0x19080808), uvec2(0x19192b19, 0x19080808),
- uvec2(0x192b0808, 0x19080808), uvec2(0x192b082b, 0x19080808), uvec2(0x192b1919, 0x19080808), uvec2(0x2b080819, 0x19080808),
- uvec2(0x2b081908, 0x19080808), uvec2(0x2b190808, 0x19080808), uvec2(0x2b191919, 0x19080808), uvec2(0x2b192b08, 0x19080808),
- uvec2(0x2b2b0819, 0x19080808), uvec2(0x2b2b1908, 0x19080808), uvec2(0x08080808, 0x19080819), uvec2(0x0808082b, 0x19080819),
- uvec2(0x08081919, 0x19080819), uvec2(0x08082b08, 0x19080819), uvec2(0x08190819, 0x19080819), uvec2(0x08191908, 0x19080819),
- uvec2(0x0819192b, 0x19080819), uvec2(0x08192b19, 0x19080819), uvec2(0x082b0808, 0x19080819), uvec2(0x082b082b, 0x19080819),
- uvec2(0x082b1919, 0x19080819), uvec2(0x19080819, 0x19080819), uvec2(0x19081908, 0x19080819), uvec2(0x1908192b, 0x19080819),
- uvec2(0x19082b19, 0x19080819), uvec2(0x19190808, 0x19080819), uvec2(0x1919082b, 0x19080819), uvec2(0x19191919, 0x19080819),
- uvec2(0x19192b08, 0x19080819), uvec2(0x192b0819, 0x19080819), uvec2(0x192b1908, 0x19080819), uvec2(0x2b080808, 0x19080819),
- uvec2(0x2b08082b, 0x19080819), uvec2(0x2b081919, 0x19080819), uvec2(0x2b082b08, 0x19080819), uvec2(0x2b190819, 0x19080819),
- uvec2(0x2b191908, 0x19080819), uvec2(0x2b2b0808, 0x19080819), uvec2(0x08080819, 0x1908082b), uvec2(0x08081908, 0x1908082b),
- uvec2(0x08190808, 0x1908082b), uvec2(0x0819082b, 0x1908082b), uvec2(0x08191919, 0x1908082b), uvec2(0x08192b08, 0x1908082b),
- uvec2(0x082b1908, 0x1908082b), uvec2(0x19080808, 0x1908082b), uvec2(0x19081919, 0x1908082b), uvec2(0x19082b08, 0x1908082b),
- uvec2(0x19190819, 0x1908082b), uvec2(0x19191908, 0x1908082b), uvec2(0x192b0808, 0x1908082b), uvec2(0x2b080819, 0x1908082b),
- uvec2(0x2b081908, 0x1908082b), uvec2(0x08080808, 0x19081908), uvec2(0x0808082b, 0x19081908), uvec2(0x08081919, 0x19081908),
- uvec2(0x08082b08, 0x19081908), uvec2(0x08082b2b, 0x19081908), uvec2(0x08190819, 0x19081908), uvec2(0x08191908, 0x19081908),
- uvec2(0x0819192b, 0x19081908), uvec2(0x08192b19, 0x19081908), uvec2(0x082b0808, 0x19081908), uvec2(0x082b082b, 0x19081908),
- uvec2(0x082b1919, 0x19081908), uvec2(0x082b2b08, 0x19081908), uvec2(0x19080819, 0x19081908), uvec2(0x19081908, 0x19081908),
- uvec2(0x1908192b, 0x19081908), uvec2(0x19082b19, 0x19081908), uvec2(0x19190808, 0x19081908), uvec2(0x1919082b, 0x19081908),
- uvec2(0x19191919, 0x19081908), uvec2(0x19192b08, 0x19081908), uvec2(0x192b0819, 0x19081908), uvec2(0x192b1908, 0x19081908),
- uvec2(0x2b080808, 0x19081908), uvec2(0x2b08082b, 0x19081908), uvec2(0x2b081919, 0x19081908), uvec2(0x2b082b08, 0x19081908),
- uvec2(0x2b190819, 0x19081908), uvec2(0x2b191908, 0x19081908), uvec2(0x2b2b0808, 0x19081908), uvec2(0x08080819, 0x19081919),
- uvec2(0x08081908, 0x19081919), uvec2(0x0808192b, 0x19081919), uvec2(0x08082b19, 0x19081919), uvec2(0x08190808, 0x19081919),
- uvec2(0x0819082b, 0x19081919), uvec2(0x08191919, 0x19081919), uvec2(0x08192b08, 0x19081919), uvec2(0x082b0819, 0x19081919),
- uvec2(0x082b1908, 0x19081919), uvec2(0x19080808, 0x19081919), uvec2(0x1908082b, 0x19081919), uvec2(0x19081919, 0x19081919),
- uvec2(0x19082b08, 0x19081919), uvec2(0x19190819, 0x19081919), uvec2(0x19191908, 0x19081919), uvec2(0x192b0808, 0x19081919),
- uvec2(0x192b2b2b, 0x19081919), uvec2(0x2b080819, 0x19081919), uvec2(0x2b081908, 0x19081919), uvec2(0x2b190808, 0x19081919),
- uvec2(0x08080808, 0x1908192b), uvec2(0x0808082b, 0x1908192b), uvec2(0x08081919, 0x1908192b), uvec2(0x08082b08, 0x1908192b),
- uvec2(0x08190819, 0x1908192b), uvec2(0x08191908, 0x1908192b), uvec2(0x082b0808, 0x1908192b), uvec2(0x19080819, 0x1908192b),
- uvec2(0x19081908, 0x1908192b), uvec2(0x19190808, 0x1908192b), uvec2(0x2b080808, 0x1908192b), uvec2(0x2b2b1919, 0x1908192b),
- uvec2(0x08080819, 0x19082b08), uvec2(0x08081908, 0x19082b08), uvec2(0x08082b19, 0x19082b08), uvec2(0x08190808, 0x19082b08),
- uvec2(0x0819082b, 0x19082b08), uvec2(0x08191919, 0x19082b08), uvec2(0x08192b08, 0x19082b08), uvec2(0x082b0819, 0x19082b08),
- uvec2(0x082b1908, 0x19082b08), uvec2(0x19080808, 0x19082b08), uvec2(0x1908082b, 0x19082b08), uvec2(0x19081919, 0x19082b08),
- uvec2(0x19082b08, 0x19082b08), uvec2(0x19190819, 0x19082b08), uvec2(0x19191908, 0x19082b08), uvec2(0x192b0808, 0x19082b08),
- uvec2(0x2b081908, 0x19082b08), uvec2(0x2b190808, 0x19082b08), uvec2(0x08080808, 0x19082b19), uvec2(0x0808082b, 0x19082b19),
- uvec2(0x08081919, 0x19082b19), uvec2(0x08082b08, 0x19082b19), uvec2(0x08190819, 0x19082b19), uvec2(0x08191908, 0x19082b19),
- uvec2(0x082b0808, 0x19082b19), uvec2(0x19080819, 0x19082b19), uvec2(0x19081908, 0x19082b19), uvec2(0x19190808, 0x19082b19),
- uvec2(0x2b080808, 0x19082b19), uvec2(0x2b19192b, 0x19082b19), uvec2(0x08080819, 0x19082b2b), uvec2(0x08081908, 0x19082b2b),
- uvec2(0x08190808, 0x19082b2b), uvec2(0x19080808, 0x19082b2b), uvec2(0x08080808, 0x19190808), uvec2(0x0808082b, 0x19190808),
- uvec2(0x08081919, 0x19190808), uvec2(0x08082b08, 0x19190808), uvec2(0x08190819, 0x19190808), uvec2(0x08191908, 0x19190808),
- uvec2(0x0819192b, 0x19190808), uvec2(0x08192b19, 0x19190808), uvec2(0x082b0808, 0x19190808), uvec2(0x082b082b, 0x19190808),
- uvec2(0x082b1919, 0x19190808), uvec2(0x082b2b08, 0x19190808), uvec2(0x19080819, 0x19190808), uvec2(0x19081908, 0x19190808),
- uvec2(0x1908192b, 0x19190808), uvec2(0x19082b19, 0x19190808), uvec2(0x19190808, 0x19190808), uvec2(0x1919082b, 0x19190808),
- uvec2(0x19191919, 0x19190808), uvec2(0x19192b08, 0x19190808), uvec2(0x192b0819, 0x19190808), uvec2(0x192b1908, 0x19190808),
- uvec2(0x2b080808, 0x19190808), uvec2(0x2b08082b, 0x19190808), uvec2(0x2b081919, 0x19190808), uvec2(0x2b082b08, 0x19190808),
- uvec2(0x2b190819, 0x19190808), uvec2(0x2b191908, 0x19190808), uvec2(0x08080819, 0x19190819), uvec2(0x08081908, 0x19190819),
- uvec2(0x0808192b, 0x19190819), uvec2(0x08082b19, 0x19190819), uvec2(0x08190808, 0x19190819), uvec2(0x0819082b, 0x19190819),
- uvec2(0x08191919, 0x19190819), uvec2(0x08192b08, 0x19190819), uvec2(0x082b0819, 0x19190819), uvec2(0x082b1908, 0x19190819),
- uvec2(0x19080808, 0x19190819), uvec2(0x1908082b, 0x19190819), uvec2(0x19081919, 0x19190819), uvec2(0x19082b08, 0x19190819),
- uvec2(0x19190819, 0x19190819), uvec2(0x19191908, 0x19190819), uvec2(0x192b0808, 0x19190819), uvec2(0x2b080819, 0x19190819),
- uvec2(0x2b081908, 0x19190819), uvec2(0x2b190808, 0x19190819), uvec2(0x08080808, 0x1919082b), uvec2(0x08081919, 0x1919082b),
- uvec2(0x08082b08, 0x1919082b), uvec2(0x08190819, 0x1919082b), uvec2(0x08191908, 0x1919082b), uvec2(0x082b0808, 0x1919082b),
- uvec2(0x19080819, 0x1919082b), uvec2(0x19081908, 0x1919082b), uvec2(0x19190808, 0x1919082b), uvec2(0x192b2b19, 0x1919082b),
- uvec2(0x2b080808, 0x1919082b), uvec2(0x08080819, 0x19191908), uvec2(0x08081908, 0x19191908), uvec2(0x0808192b, 0x19191908),
- uvec2(0x08082b19, 0x19191908), uvec2(0x08190808, 0x19191908), uvec2(0x0819082b, 0x19191908), uvec2(0x08191919, 0x19191908),
- uvec2(0x08192b08, 0x19191908), uvec2(0x082b0819, 0x19191908), uvec2(0x082b1908, 0x19191908), uvec2(0x19080808, 0x19191908),
- uvec2(0x1908082b, 0x19191908), uvec2(0x19081919, 0x19191908), uvec2(0x19082b08, 0x19191908), uvec2(0x19190819, 0x19191908),
- uvec2(0x19191908, 0x19191908), uvec2(0x192b0808, 0x19191908), uvec2(0x2b080819, 0x19191908), uvec2(0x2b081908, 0x19191908),
- uvec2(0x2b190808, 0x19191908), uvec2(0x08080808, 0x19191919), uvec2(0x0808082b, 0x19191919), uvec2(0x08081919, 0x19191919),
- uvec2(0x08082b08, 0x19191919), uvec2(0x08190819, 0x19191919), uvec2(0x08191908, 0x19191919), uvec2(0x082b0808, 0x19191919),
- uvec2(0x19080819, 0x19191919), uvec2(0x19081908, 0x19191919), uvec2(0x19190808, 0x19191919), uvec2(0x2b080808, 0x19191919),
- uvec2(0x08080819, 0x1919192b), uvec2(0x08081908, 0x1919192b), uvec2(0x08190808, 0x1919192b), uvec2(0x082b192b, 0x1919192b),
- uvec2(0x19080808, 0x1919192b), uvec2(0x08080808, 0x19192b08), uvec2(0x0808082b, 0x19192b08), uvec2(0x08081919, 0x19192b08),
- uvec2(0x08082b08, 0x19192b08), uvec2(0x08190819, 0x19192b08), uvec2(0x08191908, 0x19192b08), uvec2(0x082b0808, 0x19192b08),
- uvec2(0x19080819, 0x19192b08), uvec2(0x19081908, 0x19192b08), uvec2(0x19190808, 0x19192b08), uvec2(0x19192b2b, 0x19192b08),
- uvec2(0x2b080808, 0x19192b08), uvec2(0x08080819, 0x19192b19), uvec2(0x08081908, 0x19192b19), uvec2(0x08190808, 0x19192b19),
- uvec2(0x19080808, 0x19192b19), uvec2(0x08080808, 0x19192b2b), uvec2(0x08192b19, 0x19192b2b), uvec2(0x2b081919, 0x19192b2b),
- uvec2(0x2b2b2b08, 0x19192b2b), uvec2(0x08080819, 0x192b0808), uvec2(0x08081908, 0x192b0808), uvec2(0x0808192b, 0x192b0808),
- uvec2(0x08190808, 0x192b0808), uvec2(0x0819082b, 0x192b0808), uvec2(0x08191919, 0x192b0808), uvec2(0x08192b08, 0x192b0808),
- uvec2(0x082b0819, 0x192b0808), uvec2(0x082b1908, 0x192b0808), uvec2(0x19080808, 0x192b0808), uvec2(0x19081919, 0x192b0808),
- uvec2(0x19082b08, 0x192b0808), uvec2(0x19190819, 0x192b0808), uvec2(0x19191908, 0x192b0808), uvec2(0x192b0808, 0x192b0808),
- uvec2(0x2b081908, 0x192b0808), uvec2(0x2b190808, 0x192b0808), uvec2(0x08080808, 0x192b0819), uvec2(0x0808082b, 0x192b0819),
- uvec2(0x08081919, 0x192b0819), uvec2(0x08082b08, 0x192b0819), uvec2(0x08190819, 0x192b0819), uvec2(0x08191908, 0x192b0819),
- uvec2(0x082b0808, 0x192b0819), uvec2(0x19080819, 0x192b0819), uvec2(0x19081908, 0x192b0819), uvec2(0x19190808, 0x192b0819),
- uvec2(0x2b080808, 0x192b0819), uvec2(0x2b192b19, 0x192b0819), uvec2(0x08081908, 0x192b082b), uvec2(0x08190808, 0x192b082b),
- uvec2(0x19080808, 0x192b082b), uvec2(0x1919192b, 0x192b082b), uvec2(0x2b2b0819, 0x192b082b), uvec2(0x08080808, 0x192b1908),
- uvec2(0x08081919, 0x192b1908), uvec2(0x08082b08, 0x192b1908), uvec2(0x08190819, 0x192b1908), uvec2(0x08191908, 0x192b1908),
- uvec2(0x082b0808, 0x192b1908), uvec2(0x19080819, 0x192b1908), uvec2(0x19081908, 0x192b1908), uvec2(0x19190808, 0x192b1908),
- uvec2(0x2b080808, 0x192b1908), uvec2(0x08080819, 0x192b1919), uvec2(0x08081908, 0x192b1919), uvec2(0x08190808, 0x192b1919),
- uvec2(0x19080808, 0x192b1919), uvec2(0x19082b2b, 0x192b1919), uvec2(0x192b2b08, 0x192b1919), uvec2(0x2b19082b, 0x192b1919),
- uvec2(0x08080808, 0x192b192b), uvec2(0x2b191908, 0x192b192b), uvec2(0x08080819, 0x192b2b08), uvec2(0x08081908, 0x192b2b08),
- uvec2(0x08190808, 0x192b2b08), uvec2(0x192b1919, 0x192b2b08), uvec2(0x2b192b08, 0x192b2b08), uvec2(0x08080808, 0x192b2b19),
- uvec2(0x082b2b2b, 0x192b2b19), uvec2(0x1908082b, 0x192b2b2b), uvec2(0x2b2b0819, 0x192b2b2b), uvec2(0x08080808, 0x2b080808),
- uvec2(0x0808082b, 0x2b080808), uvec2(0x08081919, 0x2b080808), uvec2(0x08082b08, 0x2b080808), uvec2(0x08190819, 0x2b080808),
- uvec2(0x08191908, 0x2b080808), uvec2(0x08192b19, 0x2b080808), uvec2(0x082b0808, 0x2b080808), uvec2(0x082b1919, 0x2b080808),
- uvec2(0x19080819, 0x2b080808), uvec2(0x19081908, 0x2b080808), uvec2(0x19190808, 0x2b080808), uvec2(0x1919082b, 0x2b080808),
- uvec2(0x19191919, 0x2b080808), uvec2(0x19192b08, 0x2b080808), uvec2(0x192b0819, 0x2b080808), uvec2(0x2b080808, 0x2b080808),
- uvec2(0x2b081919, 0x2b080808), uvec2(0x2b190819, 0x2b080808), uvec2(0x2b191908, 0x2b080808), uvec2(0x08080819, 0x2b080819),
- uvec2(0x08081908, 0x2b080819), uvec2(0x08082b19, 0x2b080819), uvec2(0x08190808, 0x2b080819), uvec2(0x0819082b, 0x2b080819),
- uvec2(0x08191919, 0x2b080819), uvec2(0x08192b08, 0x2b080819), uvec2(0x082b0819, 0x2b080819), uvec2(0x082b1908, 0x2b080819),
- uvec2(0x19080808, 0x2b080819), uvec2(0x1908082b, 0x2b080819), uvec2(0x19081919, 0x2b080819), uvec2(0x19082b08, 0x2b080819),
- uvec2(0x19190819, 0x2b080819), uvec2(0x19191908, 0x2b080819), uvec2(0x2b080819, 0x2b080819), uvec2(0x2b081908, 0x2b080819),
- uvec2(0x2b190808, 0x2b080819), uvec2(0x2b2b2b19, 0x2b080819), uvec2(0x08080808, 0x2b08082b), uvec2(0x08081919, 0x2b08082b),
- uvec2(0x08082b2b, 0x2b08082b), uvec2(0x08190819, 0x2b08082b), uvec2(0x08191908, 0x2b08082b), uvec2(0x19080819, 0x2b08082b),
- uvec2(0x19081908, 0x2b08082b), uvec2(0x19190808, 0x2b08082b), uvec2(0x08080819, 0x2b081908), uvec2(0x08081908, 0x2b081908),
- uvec2(0x0808192b, 0x2b081908), uvec2(0x08082b19, 0x2b081908), uvec2(0x08190808, 0x2b081908), uvec2(0x0819082b, 0x2b081908),
- uvec2(0x08191919, 0x2b081908), uvec2(0x08192b08, 0x2b081908), uvec2(0x082b0819, 0x2b081908), uvec2(0x19080808, 0x2b081908),
- uvec2(0x1908082b, 0x2b081908), uvec2(0x19081919, 0x2b081908), uvec2(0x19082b08, 0x2b081908), uvec2(0x19190819, 0x2b081908),
- uvec2(0x19191908, 0x2b081908), uvec2(0x192b0808, 0x2b081908), uvec2(0x2b080819, 0x2b081908), uvec2(0x2b081908, 0x2b081908),
- uvec2(0x2b190808, 0x2b081908), uvec2(0x08080808, 0x2b081919), uvec2(0x0808082b, 0x2b081919), uvec2(0x08081919, 0x2b081919),
- uvec2(0x08082b08, 0x2b081919), uvec2(0x08190819, 0x2b081919), uvec2(0x08191908, 0x2b081919), uvec2(0x082b0808, 0x2b081919),
- uvec2(0x19080819, 0x2b081919), uvec2(0x19081908, 0x2b081919), uvec2(0x19190808, 0x2b081919), uvec2(0x2b080808, 0x2b081919),
- uvec2(0x2b082b2b, 0x2b081919), uvec2(0x08080819, 0x2b08192b), uvec2(0x08081908, 0x2b08192b), uvec2(0x08190808, 0x2b08192b),
- uvec2(0x082b2b19, 0x2b08192b), uvec2(0x19080808, 0x2b08192b), uvec2(0x08080808, 0x2b082b08), uvec2(0x08081919, 0x2b082b08),
- uvec2(0x08190819, 0x2b082b08), uvec2(0x08191908, 0x2b082b08), uvec2(0x19080819, 0x2b082b08), uvec2(0x19081908, 0x2b082b08),
- uvec2(0x19190808, 0x2b082b08), uvec2(0x2b2b082b, 0x2b082b08), uvec2(0x08080819, 0x2b082b19), uvec2(0x08081908, 0x2b082b19),
- uvec2(0x19080808, 0x2b082b19), uvec2(0x192b1919, 0x2b082b19), uvec2(0x082b082b, 0x2b082b2b), uvec2(0x19192b08, 0x2b082b2b),
- uvec2(0x19192b2b, 0x2b082b2b), uvec2(0x2b08082b, 0x2b082b2b), uvec2(0x2b2b082b, 0x2b082b2b), uvec2(0x08080819, 0x2b190808),
- uvec2(0x08081908, 0x2b190808), uvec2(0x08082b19, 0x2b190808), uvec2(0x08190808, 0x2b190808), uvec2(0x0819082b, 0x2b190808),
- uvec2(0x08191919, 0x2b190808), uvec2(0x08192b08, 0x2b190808), uvec2(0x082b1908, 0x2b190808), uvec2(0x19080808, 0x2b190808),
- uvec2(0x1908082b, 0x2b190808), uvec2(0x19081919, 0x2b190808), uvec2(0x19082b08, 0x2b190808), uvec2(0x19190819, 0x2b190808),
- uvec2(0x19191908, 0x2b190808), uvec2(0x192b0808, 0x2b190808), uvec2(0x2b080819, 0x2b190808), uvec2(0x2b081908, 0x2b190808),
- uvec2(0x2b190808, 0x2b190808), uvec2(0x08080808, 0x2b190819), uvec2(0x08081919, 0x2b190819), uvec2(0x08190819, 0x2b190819),
- uvec2(0x08191908, 0x2b190819), uvec2(0x19080819, 0x2b190819), uvec2(0x19081908, 0x2b190819), uvec2(0x19190808, 0x2b190819),
- uvec2(0x19192b2b, 0x2b190819), uvec2(0x08080819, 0x2b19082b), uvec2(0x08081908, 0x2b19082b), uvec2(0x08190808, 0x2b19082b),
- uvec2(0x19080808, 0x2b19082b), uvec2(0x2b2b192b, 0x2b19082b), uvec2(0x08080808, 0x2b191908), uvec2(0x0808082b, 0x2b191908),
- uvec2(0x08081919, 0x2b191908), uvec2(0x08082b08, 0x2b191908), uvec2(0x08190819, 0x2b191908), uvec2(0x08191908, 0x2b191908),
- uvec2(0x082b0808, 0x2b191908), uvec2(0x19080819, 0x2b191908), uvec2(0x19081908, 0x2b191908), uvec2(0x19190808, 0x2b191908),
- uvec2(0x2b080808, 0x2b191908), uvec2(0x2b19192b, 0x2b191908), uvec2(0x08080819, 0x2b191919), uvec2(0x08081908, 0x2b191919),
- uvec2(0x08190808, 0x2b191919), uvec2(0x19080808, 0x2b191919), uvec2(0x2b192b08, 0x2b191919), uvec2(0x2b2b0819, 0x2b191919),
- uvec2(0x08080808, 0x2b19192b), uvec2(0x1908192b, 0x2b19192b), uvec2(0x192b1908, 0x2b19192b), uvec2(0x08080819, 0x2b192b08),
- uvec2(0x08081908, 0x2b192b08), uvec2(0x08190808, 0x2b192b08), uvec2(0x082b192b, 0x2b192b08), uvec2(0x19080808, 0x2b192b08),
- uvec2(0x2b2b2b19, 0x2b192b08), uvec2(0x08080808, 0x2b192b19), uvec2(0x19082b19, 0x2b192b19), uvec2(0x1919082b, 0x2b192b19),
- uvec2(0x2b190808, 0x2b192b2b), uvec2(0x08080808, 0x2b2b0808), uvec2(0x08081919, 0x2b2b0808), uvec2(0x08082b2b, 0x2b2b0808),
- uvec2(0x08191908, 0x2b2b0808), uvec2(0x082b082b, 0x2b2b0808), uvec2(0x082b2b2b, 0x2b2b0808), uvec2(0x19080819, 0x2b2b0808),
- uvec2(0x19081908, 0x2b2b0808), uvec2(0x19190808, 0x2b2b0808), uvec2(0x2b2b082b, 0x2b2b0808), uvec2(0x2b2b2b2b, 0x2b2b0808),
- uvec2(0x19080808, 0x2b2b0819), uvec2(0x192b1919, 0x2b2b0819), uvec2(0x0808082b, 0x2b2b082b), uvec2(0x08082b2b, 0x2b2b082b),
- uvec2(0x082b082b, 0x2b2b082b), uvec2(0x082b2b08, 0x2b2b082b), uvec2(0x082b2b2b, 0x2b2b082b), uvec2(0x2b08082b, 0x2b2b082b),
- uvec2(0x2b082b08, 0x2b2b082b), uvec2(0x2b082b2b, 0x2b2b082b), uvec2(0x2b2b2b08, 0x2b2b082b), uvec2(0x08080819, 0x2b2b1908),
- uvec2(0x08081908, 0x2b2b1908), uvec2(0x08190808, 0x2b2b1908), uvec2(0x19080808, 0x2b2b1908), uvec2(0x2b082b19, 0x2b2b1908),
- uvec2(0x2b2b1908, 0x2b2b1908), uvec2(0x08080808, 0x2b2b1919), uvec2(0x08192b19, 0x2b2b1919), uvec2(0x19190819, 0x2b2b192b),
- uvec2(0x08082b2b, 0x2b2b2b08), uvec2(0x082b2b08, 0x2b2b2b08), uvec2(0x2b2b082b, 0x2b2b2b08), uvec2(0x19191908, 0x2b2b2b19),
- uvec2(0x2b08192b, 0x2b2b2b19), uvec2(0x08082b08, 0x2b2b2b2b), uvec2(0x08082b2b, 0x2b2b2b2b), uvec2(0x082b0808, 0x2b2b2b2b),
- uvec2(0x082b082b, 0x2b2b2b2b), uvec2(0x082b2b08, 0x2b2b2b2b), uvec2(0x2b082b08, 0x2b2b2b2b), uvec2(0x2b2b2b2b, 0x2b2b2b2b)
-};
-
-shared uvec2 iq2s_grid[1024];
-
-#define NEEDS_INIT_IQ_SHMEM
-void init_iq_shmem(uvec3 wgsize)
-{
- // copy the table into shared memory and sync
- [[unroll]] for (uint i = 0; i < iq2s_grid.length(); i += wgsize.x) {
- if (iq2s_grid.length() % wgsize.x == 0 || i + gl_LocalInvocationIndex.x < iq2s_grid_const.length()) {
- iq2s_grid[i + gl_LocalInvocationIndex.x] = iq2s_grid_const[i + gl_LocalInvocationIndex.x];
- }
- }
- barrier();
-}
-
-#define QUANT_K QUANT_K_IQ2_S
-#define QUANT_R QUANT_R_IQ2_S
-#define A_TYPE block_iq2_s
-#define A_TYPE_PACKED16 block_iq2_s_packed16
-#endif
-
-#define QUANT_K_IQ3_XXS 256
-#define QUANT_R_IQ3_XXS 1
-
-struct block_iq3_xxs
-{
- float16_t d;
- uint8_t qs[QUANT_K_IQ3_XXS/4 + QUANT_K_IQ3_XXS/8];
-};
-
-struct block_iq3_xxs_packed16
-{
- float16_t d;
- uint16_t qs[QUANT_K_IQ3_XXS/8 + QUANT_K_IQ3_XXS/16];
-};
-
-#if defined(DATA_A_IQ3_XXS)
-
-const uint32_t iq3xxs_grid_const[256] = {
- 0x04040404, 0x04040414, 0x04040424, 0x04040c0c, 0x04040c1c, 0x04040c3e, 0x04041404, 0x04041414,
- 0x04041c0c, 0x04042414, 0x04043e1c, 0x04043e2c, 0x040c040c, 0x040c041c, 0x040c0c04, 0x040c0c14,
- 0x040c140c, 0x040c142c, 0x040c1c04, 0x040c1c14, 0x040c240c, 0x040c2c24, 0x040c3e04, 0x04140404,
- 0x04140414, 0x04140424, 0x04140c0c, 0x04141404, 0x04141414, 0x04141c0c, 0x04141c1c, 0x04141c3e,
- 0x04142c0c, 0x04142c3e, 0x04143e2c, 0x041c040c, 0x041c043e, 0x041c0c04, 0x041c0c14, 0x041c142c,
- 0x041c3e04, 0x04240c1c, 0x04241c3e, 0x04242424, 0x04242c3e, 0x04243e1c, 0x04243e2c, 0x042c040c,
- 0x042c043e, 0x042c1c14, 0x042c2c14, 0x04341c2c, 0x04343424, 0x043e0c04, 0x043e0c24, 0x043e0c34,
- 0x043e241c, 0x043e340c, 0x0c04040c, 0x0c04041c, 0x0c040c04, 0x0c040c14, 0x0c04140c, 0x0c04141c,
- 0x0c041c04, 0x0c041c14, 0x0c041c24, 0x0c04243e, 0x0c042c04, 0x0c0c0404, 0x0c0c0414, 0x0c0c0c0c,
- 0x0c0c1404, 0x0c0c1414, 0x0c14040c, 0x0c14041c, 0x0c140c04, 0x0c140c14, 0x0c14140c, 0x0c141c04,
- 0x0c143e14, 0x0c1c0404, 0x0c1c0414, 0x0c1c1404, 0x0c1c1c0c, 0x0c1c2434, 0x0c1c3434, 0x0c24040c,
- 0x0c24042c, 0x0c242c04, 0x0c2c1404, 0x0c2c1424, 0x0c2c2434, 0x0c2c3e0c, 0x0c34042c, 0x0c3e1414,
- 0x0c3e2404, 0x14040404, 0x14040414, 0x14040c0c, 0x14040c1c, 0x14041404, 0x14041414, 0x14041434,
- 0x14041c0c, 0x14042414, 0x140c040c, 0x140c041c, 0x140c042c, 0x140c0c04, 0x140c0c14, 0x140c140c,
- 0x140c1c04, 0x140c341c, 0x140c343e, 0x140c3e04, 0x14140404, 0x14140414, 0x14140c0c, 0x14140c3e,
- 0x14141404, 0x14141414, 0x14141c3e, 0x14142404, 0x14142c2c, 0x141c040c, 0x141c0c04, 0x141c0c24,
- 0x141c3e04, 0x141c3e24, 0x14241c2c, 0x14242c1c, 0x142c041c, 0x142c143e, 0x142c240c, 0x142c3e24,
- 0x143e040c, 0x143e041c, 0x143e0c34, 0x143e242c, 0x1c04040c, 0x1c040c04, 0x1c040c14, 0x1c04140c,
- 0x1c04141c, 0x1c042c04, 0x1c04342c, 0x1c043e14, 0x1c0c0404, 0x1c0c0414, 0x1c0c1404, 0x1c0c1c0c,
- 0x1c0c2424, 0x1c0c2434, 0x1c14040c, 0x1c14041c, 0x1c140c04, 0x1c14142c, 0x1c142c14, 0x1c143e14,
- 0x1c1c0c0c, 0x1c1c1c1c, 0x1c241c04, 0x1c24243e, 0x1c243e14, 0x1c2c0404, 0x1c2c0434, 0x1c2c1414,
- 0x1c2c2c2c, 0x1c340c24, 0x1c341c34, 0x1c34341c, 0x1c3e1c1c, 0x1c3e3404, 0x24040424, 0x24040c3e,
- 0x24041c2c, 0x24041c3e, 0x24042c1c, 0x24042c3e, 0x240c3e24, 0x24141404, 0x24141c3e, 0x24142404,
- 0x24143404, 0x24143434, 0x241c043e, 0x241c242c, 0x24240424, 0x24242c0c, 0x24243424, 0x242c142c,
- 0x242c241c, 0x242c3e04, 0x243e042c, 0x243e0c04, 0x243e0c14, 0x243e1c04, 0x2c040c14, 0x2c04240c,
- 0x2c043e04, 0x2c0c0404, 0x2c0c0434, 0x2c0c1434, 0x2c0c2c2c, 0x2c140c24, 0x2c141c14, 0x2c143e14,
- 0x2c1c0414, 0x2c1c2c1c, 0x2c240c04, 0x2c24141c, 0x2c24143e, 0x2c243e14, 0x2c2c0414, 0x2c2c1c0c,
- 0x2c342c04, 0x2c3e1424, 0x2c3e2414, 0x34041424, 0x34042424, 0x34042434, 0x34043424, 0x340c140c,
- 0x340c340c, 0x34140c3e, 0x34143424, 0x341c1c04, 0x341c1c34, 0x34242424, 0x342c042c, 0x342c2c14,
- 0x34341c1c, 0x343e041c, 0x343e140c, 0x3e04041c, 0x3e04042c, 0x3e04043e, 0x3e040c04, 0x3e041c14,
- 0x3e042c14, 0x3e0c1434, 0x3e0c2404, 0x3e140c14, 0x3e14242c, 0x3e142c14, 0x3e1c0404, 0x3e1c0c2c,
- 0x3e1c1c1c, 0x3e1c3404, 0x3e24140c, 0x3e24240c, 0x3e2c0404, 0x3e2c0414, 0x3e2c1424, 0x3e341c04,
-};
-
-shared uint32_t iq3xxs_grid[256];
-
-#define NEEDS_INIT_IQ_SHMEM
-void init_iq_shmem(uvec3 wgsize)
-{
- // copy the table into shared memory and sync
- [[unroll]] for (uint i = 0; i < iq3xxs_grid.length(); i += wgsize.x) {
- if (iq3xxs_grid.length() % wgsize.x == 0 || i + gl_LocalInvocationIndex.x < iq3xxs_grid.length()) {
- iq3xxs_grid[i + gl_LocalInvocationIndex.x] = iq3xxs_grid_const[i + gl_LocalInvocationIndex.x];
- }
- }
- barrier();
-}
-
-#define QUANT_K QUANT_K_IQ3_XXS
-#define QUANT_R QUANT_R_IQ3_XXS
-#define A_TYPE block_iq3_xxs
-#define A_TYPE_PACKED16 block_iq3_xxs_packed16
-#endif
-
-#define QUANT_K_IQ3_S 256
-#define QUANT_R_IQ3_S 1
-
-struct block_iq3_s
-{
- float16_t d;
- uint8_t qs[QUANT_K_IQ3_S/4];
- uint8_t qh[QUANT_K_IQ3_S/32];
- uint8_t signs[QUANT_K_IQ3_S/8];
- uint8_t scales[QUANT_K_IQ3_S/64];
-};
-
-struct block_iq3_s_packed16
-{
- float16_t d;
- uint16_t qs[QUANT_K_IQ3_S/4/2];
- uint16_t qh[QUANT_K_IQ3_S/32/2];
- uint16_t signs[QUANT_K_IQ3_S/8/2];
- uint16_t scales[QUANT_K_IQ3_S/64/2];
-};
-
-#if defined(DATA_A_IQ3_S)
-
-const uint32_t iq3s_grid_const[512] = {
- 0x01010101, 0x01010103, 0x01010105, 0x0101010b, 0x0101010f, 0x01010301, 0x01010303, 0x01010305,
- 0x01010309, 0x0101030d, 0x01010501, 0x01010503, 0x0101050b, 0x01010707, 0x01010901, 0x01010905,
- 0x0101090b, 0x0101090f, 0x01010b03, 0x01010b07, 0x01010d01, 0x01010d05, 0x01010f03, 0x01010f09,
- 0x01010f0f, 0x01030101, 0x01030103, 0x01030105, 0x01030109, 0x01030301, 0x01030303, 0x0103030b,
- 0x01030501, 0x01030507, 0x0103050f, 0x01030703, 0x0103070b, 0x01030909, 0x01030d03, 0x01030d0b,
- 0x01030f05, 0x01050101, 0x01050103, 0x0105010b, 0x0105010f, 0x01050301, 0x01050307, 0x0105030d,
- 0x01050503, 0x0105050b, 0x01050701, 0x01050709, 0x01050905, 0x0105090b, 0x0105090f, 0x01050b03,
- 0x01050b07, 0x01050f01, 0x01050f07, 0x01070107, 0x01070303, 0x0107030b, 0x01070501, 0x01070505,
- 0x01070703, 0x01070707, 0x0107070d, 0x01070909, 0x01070b01, 0x01070b05, 0x01070d0f, 0x01070f03,
- 0x01070f0b, 0x01090101, 0x01090307, 0x0109030f, 0x01090503, 0x01090509, 0x01090705, 0x01090901,
- 0x01090907, 0x01090b03, 0x01090f01, 0x010b0105, 0x010b0109, 0x010b0501, 0x010b0505, 0x010b050d,
- 0x010b0707, 0x010b0903, 0x010b090b, 0x010b090f, 0x010b0d0d, 0x010b0f07, 0x010d010d, 0x010d0303,
- 0x010d0307, 0x010d0703, 0x010d0b05, 0x010d0f03, 0x010f0101, 0x010f0105, 0x010f0109, 0x010f0501,
- 0x010f0505, 0x010f050d, 0x010f0707, 0x010f0b01, 0x010f0b09, 0x03010101, 0x03010103, 0x03010105,
- 0x03010109, 0x03010301, 0x03010303, 0x03010307, 0x0301030b, 0x0301030f, 0x03010501, 0x03010505,
- 0x03010703, 0x03010709, 0x0301070d, 0x03010b09, 0x03010b0d, 0x03010d03, 0x03010f05, 0x03030101,
- 0x03030103, 0x03030107, 0x0303010d, 0x03030301, 0x03030309, 0x03030503, 0x03030701, 0x03030707,
- 0x03030903, 0x03030b01, 0x03030b05, 0x03030f01, 0x03030f0d, 0x03050101, 0x03050305, 0x0305030b,
- 0x0305030f, 0x03050501, 0x03050509, 0x03050705, 0x03050901, 0x03050907, 0x03050b0b, 0x03050d01,
- 0x03050f05, 0x03070103, 0x03070109, 0x0307010f, 0x03070301, 0x03070307, 0x03070503, 0x0307050f,
- 0x03070701, 0x03070709, 0x03070903, 0x03070d05, 0x03070f01, 0x03090107, 0x0309010b, 0x03090305,
- 0x03090309, 0x03090703, 0x03090707, 0x03090905, 0x0309090d, 0x03090b01, 0x03090b09, 0x030b0103,
- 0x030b0301, 0x030b0307, 0x030b0503, 0x030b0701, 0x030b0705, 0x030b0b03, 0x030d0501, 0x030d0509,
- 0x030d050f, 0x030d0909, 0x030d090d, 0x030f0103, 0x030f0107, 0x030f0301, 0x030f0305, 0x030f0503,
- 0x030f070b, 0x030f0903, 0x030f0d05, 0x030f0f01, 0x05010101, 0x05010103, 0x05010107, 0x0501010b,
- 0x0501010f, 0x05010301, 0x05010305, 0x05010309, 0x0501030d, 0x05010503, 0x05010507, 0x0501050f,
- 0x05010701, 0x05010705, 0x05010903, 0x05010907, 0x0501090b, 0x05010b01, 0x05010b05, 0x05010d0f,
- 0x05010f01, 0x05010f07, 0x05010f0b, 0x05030101, 0x05030105, 0x05030301, 0x05030307, 0x0503030f,
- 0x05030505, 0x0503050b, 0x05030703, 0x05030709, 0x05030905, 0x05030b03, 0x05050103, 0x05050109,
- 0x0505010f, 0x05050503, 0x05050507, 0x05050701, 0x0505070f, 0x05050903, 0x05050b07, 0x05050b0f,
- 0x05050f03, 0x05050f09, 0x05070101, 0x05070105, 0x0507010b, 0x05070303, 0x05070505, 0x05070509,
- 0x05070703, 0x05070707, 0x05070905, 0x05070b01, 0x05070d0d, 0x05090103, 0x0509010f, 0x05090501,
- 0x05090507, 0x05090705, 0x0509070b, 0x05090903, 0x05090f05, 0x05090f0b, 0x050b0109, 0x050b0303,
- 0x050b0505, 0x050b070f, 0x050b0901, 0x050b0b07, 0x050b0f01, 0x050d0101, 0x050d0105, 0x050d010f,
- 0x050d0503, 0x050d0b0b, 0x050d0d03, 0x050f010b, 0x050f0303, 0x050f050d, 0x050f0701, 0x050f0907,
- 0x050f0b01, 0x07010105, 0x07010303, 0x07010307, 0x0701030b, 0x0701030f, 0x07010505, 0x07010703,
- 0x07010707, 0x0701070b, 0x07010905, 0x07010909, 0x0701090f, 0x07010b03, 0x07010d07, 0x07010f03,
- 0x07030103, 0x07030107, 0x0703010b, 0x07030309, 0x07030503, 0x07030507, 0x07030901, 0x07030d01,
- 0x07030f05, 0x07030f0d, 0x07050101, 0x07050305, 0x07050501, 0x07050705, 0x07050709, 0x07050b01,
- 0x07070103, 0x07070301, 0x07070309, 0x07070503, 0x07070507, 0x0707050f, 0x07070701, 0x07070903,
- 0x07070907, 0x0707090f, 0x07070b0b, 0x07070f07, 0x07090107, 0x07090303, 0x0709030d, 0x07090505,
- 0x07090703, 0x07090b05, 0x07090d01, 0x07090d09, 0x070b0103, 0x070b0301, 0x070b0305, 0x070b050b,
- 0x070b0705, 0x070b0909, 0x070b0b0d, 0x070b0f07, 0x070d030d, 0x070d0903, 0x070f0103, 0x070f0107,
- 0x070f0501, 0x070f0505, 0x070f070b, 0x09010101, 0x09010109, 0x09010305, 0x09010501, 0x09010509,
- 0x0901050f, 0x09010705, 0x09010903, 0x09010b01, 0x09010f01, 0x09030105, 0x0903010f, 0x09030303,
- 0x09030307, 0x09030505, 0x09030701, 0x0903070b, 0x09030907, 0x09030b03, 0x09030b0b, 0x09050103,
- 0x09050107, 0x09050301, 0x0905030b, 0x09050503, 0x09050707, 0x09050901, 0x09050b0f, 0x09050d05,
- 0x09050f01, 0x09070109, 0x09070303, 0x09070307, 0x09070501, 0x09070505, 0x09070703, 0x0907070b,
- 0x09090101, 0x09090105, 0x09090509, 0x0909070f, 0x09090901, 0x09090f03, 0x090b010b, 0x090b010f,
- 0x090b0503, 0x090b0d05, 0x090d0307, 0x090d0709, 0x090d0d01, 0x090f0301, 0x090f030b, 0x090f0701,
- 0x090f0907, 0x090f0b03, 0x0b010105, 0x0b010301, 0x0b010309, 0x0b010505, 0x0b010901, 0x0b010909,
- 0x0b01090f, 0x0b010b05, 0x0b010d0d, 0x0b010f09, 0x0b030103, 0x0b030107, 0x0b03010b, 0x0b030305,
- 0x0b030503, 0x0b030705, 0x0b030f05, 0x0b050101, 0x0b050303, 0x0b050507, 0x0b050701, 0x0b05070d,
- 0x0b050b07, 0x0b070105, 0x0b07010f, 0x0b070301, 0x0b07050f, 0x0b070909, 0x0b070b03, 0x0b070d0b,
- 0x0b070f07, 0x0b090103, 0x0b090109, 0x0b090501, 0x0b090705, 0x0b09090d, 0x0b0b0305, 0x0b0b050d,
- 0x0b0b0b03, 0x0b0b0b07, 0x0b0d0905, 0x0b0f0105, 0x0b0f0109, 0x0b0f0505, 0x0d010303, 0x0d010307,
- 0x0d01030b, 0x0d010703, 0x0d010707, 0x0d010d01, 0x0d030101, 0x0d030501, 0x0d03050f, 0x0d030d09,
- 0x0d050305, 0x0d050709, 0x0d050905, 0x0d050b0b, 0x0d050d05, 0x0d050f01, 0x0d070101, 0x0d070309,
- 0x0d070503, 0x0d070901, 0x0d09050b, 0x0d090907, 0x0d090d05, 0x0d0b0101, 0x0d0b0107, 0x0d0b0709,
- 0x0d0b0d01, 0x0d0d010b, 0x0d0d0901, 0x0d0f0303, 0x0d0f0307, 0x0f010101, 0x0f010109, 0x0f01010f,
- 0x0f010501, 0x0f010505, 0x0f01070d, 0x0f010901, 0x0f010b09, 0x0f010d05, 0x0f030105, 0x0f030303,
- 0x0f030509, 0x0f030907, 0x0f03090b, 0x0f050103, 0x0f050109, 0x0f050301, 0x0f05030d, 0x0f050503,
- 0x0f050701, 0x0f050b03, 0x0f070105, 0x0f070705, 0x0f07070b, 0x0f070b07, 0x0f090103, 0x0f09010b,
- 0x0f090307, 0x0f090501, 0x0f090b01, 0x0f0b0505, 0x0f0b0905, 0x0f0d0105, 0x0f0d0703, 0x0f0f0101,
-};
-
-shared uint32_t iq3s_grid[512];
-
-#define NEEDS_INIT_IQ_SHMEM
-void init_iq_shmem(uvec3 wgsize)
-{
- // copy the table into shared memory and sync
- [[unroll]] for (uint i = 0; i < iq3s_grid.length(); i += wgsize.x) {
- if (iq3s_grid.length() % wgsize.x == 0 || i + gl_LocalInvocationIndex.x < iq3s_grid.length()) {
- iq3s_grid[i + gl_LocalInvocationIndex.x] = iq3s_grid_const[i + gl_LocalInvocationIndex.x];
- }
- }
- barrier();
-}
-
-#define QUANT_K QUANT_K_IQ3_S
-#define QUANT_R QUANT_R_IQ3_S
-#define A_TYPE block_iq3_s
-#define A_TYPE_PACKED16 block_iq3_s_packed16
-#endif
-
-#define QUANT_K_IQ4_XS 256
-#define QUANT_R_IQ4_XS 1
-
-struct block_iq4_xs
-{
- float16_t d;
- uint16_t scales_h;
- uint8_t scales_l[QUANT_K_IQ4_XS/64];
- uint8_t qs[QUANT_K_IQ4_XS/2];
-};
-
-#if defined(DATA_A_IQ4_XS)
-#define QUANT_K QUANT_K_IQ4_XS
-#define QUANT_R QUANT_R_IQ4_XS
-#define A_TYPE block_iq4_xs
-#endif
-
-#define QUANT_K_IQ4_NL 32
-#define QUANT_R_IQ4_NL 2
-
-struct block_iq4_nl
-{
- float16_t d;
- uint8_t qs[QUANT_K_IQ4_NL/2];
-};
-
-struct block_iq4_nl_packed16
-{
- float16_t d;
- uint16_t qs[QUANT_K_IQ4_NL/2/2];
-};
-
-#if defined(DATA_A_IQ4_NL)
-#define QUANT_K QUANT_K_IQ4_NL
-#define QUANT_R QUANT_R_IQ4_NL
-#define A_TYPE block_iq4_nl
-#define A_TYPE_PACKED16 block_iq4_nl_packed16
-#endif
-
-#define QUANT_K_MXFP4 32
-#define QUANT_R_MXFP4 2
-
-struct block_mxfp4
-{
- uint8_t e;
- uint8_t qs[QUANT_K_MXFP4/2];
-};
-
-//struct block_mxfp4_packed16
-//{
-// uint8_t e;
-// uint16_t qs[QUANT_K_MXFP4/2/2];
-//};
-
-#if defined(DATA_A_MXFP4)
-#define QUANT_K QUANT_K_MXFP4
-#define QUANT_R QUANT_R_MXFP4
-#define QUANT_AUXF 1
-#define A_TYPE block_mxfp4
-//#define A_TYPE_PACKED16 block_mxfp4_packed16
-#endif
-
-#if defined(DATA_A_IQ4_NL) || defined(DATA_A_IQ4_XS)
-const int8_t kvalues_iq4nl_const[16] = {
- int8_t(-127), int8_t(-104), int8_t(-83), int8_t(-65), int8_t(-49), int8_t(-35), int8_t(-22), int8_t(-10),
- int8_t(1), int8_t(13), int8_t(25), int8_t(38), int8_t(53), int8_t(69), int8_t(89), int8_t(113)
-};
-
-shared FLOAT_TYPE kvalues_iq4nl[16];
-
-#define NEEDS_INIT_IQ_SHMEM
-void init_iq_shmem(uvec3 wgsize)
-{
- // copy the table into shared memory and sync
- for (uint i = gl_LocalInvocationIndex.x; i < kvalues_iq4nl.length(); i += wgsize.x) {
- kvalues_iq4nl[i] = FLOAT_TYPE(kvalues_iq4nl_const[i]);
- }
- barrier();
-}
-#endif
-
-#if defined(DATA_A_MXFP4)
-const FLOAT_TYPE kvalues_mxfp4_const[16] = {
- FLOAT_TYPE(0.0f), FLOAT_TYPE(0.5f), FLOAT_TYPE(1.0f), FLOAT_TYPE(1.5f), FLOAT_TYPE(2.0f), FLOAT_TYPE(3.0f), FLOAT_TYPE(4.0f), FLOAT_TYPE(6.0f),
- FLOAT_TYPE(-0.0f), FLOAT_TYPE(-0.5f), FLOAT_TYPE(-1.0f), FLOAT_TYPE(-1.5f), FLOAT_TYPE(-2.0f), FLOAT_TYPE(-3.0f), FLOAT_TYPE(-4.0f), FLOAT_TYPE(-6.0f)
-};
-
-shared FLOAT_TYPE kvalues_mxfp4[16];
-
-#define NEEDS_INIT_IQ_SHMEM
-void init_iq_shmem(uvec3 wgsize)
-{
- // copy the table into shared memory and sync
- for (uint i = gl_LocalInvocationIndex.x; i < kvalues_mxfp4.length(); i += wgsize.x) {
- kvalues_mxfp4[i] = kvalues_mxfp4_const[i];
- }
- barrier();
-}
-#endif
-
-// returns the bfloat value in the low 16b.
-// See ggml_compute_fp32_to_bf16
-uint32_t fp32_to_bf16(float f)
-{
- uint32_t u = floatBitsToUint(f);
- u = (u + (0x7fff + ((u >> 16) & 1))) >> 16;
- return u;
-}
-
-float bf16_to_fp32(uint32_t u)
-{
- return uintBitsToFloat(u << 16);
-}
-
-vec4 bf16_to_fp32(uvec4 u)
-{
- return vec4(bf16_to_fp32(u.x), bf16_to_fp32(u.y), bf16_to_fp32(u.z), bf16_to_fp32(u.w));
-}
-
-float e8m0_to_fp32(uint8_t x) {
- uint32_t bits;
-
- if (x == 0) {
- bits = 0x00400000;
- } else {
- bits = x;
- bits = bits << 23;
- }
-
- return uintBitsToFloat(bits);
-}
-
-#if BDA
-
-#extension GL_EXT_buffer_reference : enable
-#extension GL_EXT_shader_explicit_arithmetic_types_int64 : enable
-
-#define BDA_STORAGE_T uint64_t
-#define BDA_OFFSET_T uint64_t
-
-#else
-
-#define BDA_STORAGE_T uvec2
-#define BDA_OFFSET_T uint
-
-#endif
-
-#endif // !defined(GGML_TYPES_COMP)
--- /dev/null
+#if !defined(GGML_TYPES_COMP)
+#define GGML_TYPES_COMP
+
+#extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
+#extension GL_EXT_shader_explicit_arithmetic_types_int32 : require
+#extension GL_EXT_shader_explicit_arithmetic_types_int16 : require
+#extension GL_EXT_shader_explicit_arithmetic_types_int8 : require
+#extension GL_EXT_shader_16bit_storage : require
+
+#if defined(DATA_A_F32)
+#define QUANT_K 1
+#define QUANT_R 1
+
+#if LOAD_VEC_A == 4
+#define A_TYPE vec4
+#elif LOAD_VEC_A == 8
+#define A_TYPE mat2x4
+#else
+#define A_TYPE float
+#endif
+#endif
+
+#if defined(DATA_A_F16)
+#define QUANT_K 1
+#define QUANT_R 1
+
+#if LOAD_VEC_A == 4
+#define A_TYPE f16vec4
+#elif LOAD_VEC_A == 8
+#define A_TYPE f16mat2x4
+#else
+#define A_TYPE float16_t
+#endif
+#endif
+
+#if defined(DATA_A_BF16)
+#define QUANT_K 1
+#define QUANT_R 1
+
+#if LOAD_VEC_A == 4
+#define A_TYPE u16vec4
+#elif LOAD_VEC_A == 8
+#error unsupported
+#else
+#define A_TYPE uint16_t
+#endif
+#endif
+
+#define QUANT_K_Q4_0 32
+#define QUANT_R_Q4_0 2
+
+struct block_q4_0
+{
+ float16_t d;
+ uint8_t qs[16];
+};
+struct block_q4_0_packed16
+{
+ float16_t d;
+ uint16_t qs[16/2];
+};
+
+#if defined(DATA_A_Q4_0)
+#define QUANT_K QUANT_K_Q4_0
+#define QUANT_R QUANT_R_Q4_0
+#define QUANT_AUXF 1
+#define A_TYPE block_q4_0
+#define A_TYPE_PACKED16 block_q4_0_packed16
+#endif
+
+#define QUANT_K_Q4_1 32
+#define QUANT_R_Q4_1 2
+
+struct block_q4_1
+{
+ float16_t d;
+ float16_t m;
+ uint8_t qs[16];
+};
+
+struct block_q4_1_packed16
+{
+ float16_t d;
+ float16_t m;
+ uint16_t qs[16/2];
+};
+
+struct block_q4_1_packed32
+{
+ f16vec2 dm;
+ uint32_t qs[16/4];
+};
+
+#if defined(DATA_A_Q4_1)
+#define QUANT_K QUANT_K_Q4_1
+#define QUANT_R QUANT_R_Q4_1
+#define QUANT_AUXF 2
+#define A_TYPE block_q4_1
+#define A_TYPE_PACKED16 block_q4_1_packed16
+#define A_TYPE_PACKED32 block_q4_1_packed32
+#endif
+
+#define QUANT_K_Q5_0 32
+#define QUANT_R_Q5_0 2
+
+struct block_q5_0
+{
+ float16_t d;
+ uint16_t qh[2];
+ uint8_t qs[16];
+};
+
+struct block_q5_0_packed16
+{
+ float16_t d;
+ uint16_t qh[2];
+ uint16_t qs[16/2];
+};
+
+#if defined(DATA_A_Q5_0)
+#define QUANT_K QUANT_K_Q5_0
+#define QUANT_R QUANT_R_Q5_0
+#define QUANT_AUXF 1
+#define A_TYPE block_q5_0
+#define A_TYPE_PACKED16 block_q5_0_packed16
+#endif
+
+#define QUANT_K_Q5_1 32
+#define QUANT_R_Q5_1 2
+
+struct block_q5_1
+{
+ float16_t d;
+ float16_t m;
+ uint qh;
+ uint8_t qs[16];
+};
+
+struct block_q5_1_packed16
+{
+ float16_t d;
+ float16_t m;
+ uint qh;
+ uint16_t qs[16/2];
+};
+
+struct block_q5_1_packed32
+{
+ f16vec2 dm;
+ uint qh;
+ uint32_t qs[16/4];
+};
+
+#if defined(DATA_A_Q5_1)
+#define QUANT_K QUANT_K_Q5_1
+#define QUANT_R QUANT_R_Q5_1
+#define QUANT_AUXF 2
+#define A_TYPE block_q5_1
+#define A_TYPE_PACKED16 block_q5_1_packed16
+#define A_TYPE_PACKED32 block_q5_1_packed32
+#endif
+
+#define QUANT_K_Q8_0 32
+#define QUANT_R_Q8_0 1
+
+struct block_q8_0
+{
+ float16_t d;
+ int8_t qs[32];
+};
+struct block_q8_0_packed16
+{
+ float16_t d;
+ int16_t qs[32/2];
+};
+struct block_q8_0_packed32
+{
+ float16_t d;
+ int32_t qs[32/4];
+};
+
+#if defined(DATA_A_Q8_0)
+#define QUANT_K QUANT_K_Q8_0
+#define QUANT_R QUANT_R_Q8_0
+#define QUANT_AUXF 1
+#define A_TYPE block_q8_0
+#define A_TYPE_PACKED16 block_q8_0_packed16
+#define A_TYPE_PACKED32 block_q8_0_packed32
+#endif
+
+#define QUANT_K_Q8_1 32
+#define QUANT_R_Q8_1 1
+
+struct block_q8_1
+{
+ f16vec2 ds;
+ int8_t qs[32];
+};
+struct block_q8_1_packed16
+{
+ f16vec2 ds;
+ int16_t qs[16];
+};
+struct block_q8_1_packed32
+{
+ f16vec2 ds;
+ int32_t qs[8];
+};
+
+// 4 blocks in one to allow 16-byte/128-bit alignment and loads
+struct block_q8_1_x4
+{
+ f16vec2 ds[4];
+ int32_t qs[32];
+};
+struct block_q8_1_x4_packed128
+{
+ f16vec2 ds[4];
+ ivec4 qs[8];
+};
+
+// K-quants
+#define QUANT_K_Q2_K 256
+
+struct block_q2_K
+{
+ uint8_t scales[QUANT_K_Q2_K/16];
+ uint8_t qs[QUANT_K_Q2_K/4];
+ f16vec2 d;
+};
+
+struct block_q2_K_packed16
+{
+ uint16_t scales[QUANT_K_Q2_K/16/2];
+ uint16_t qs[QUANT_K_Q2_K/4/2];
+ f16vec2 d;
+};
+
+struct block_q2_K_packed32
+{
+ uint32_t scales[QUANT_K_Q2_K/16/4];
+ uint32_t qs[QUANT_K_Q2_K/4/4];
+ f16vec2 d;
+};
+
+#if defined(DATA_A_Q2_K)
+#define QUANT_K QUANT_K_Q2_K
+#define QUANT_R 1
+#define A_TYPE block_q2_K
+#define A_TYPE_PACKED16 block_q2_K_packed16
+#define A_TYPE_PACKED32 block_q2_K_packed32
+#endif
+
+#define QUANT_K_Q3_K 256
+
+struct block_q3_K
+{
+ uint8_t hmask[QUANT_K_Q3_K/8];
+ uint8_t qs[QUANT_K_Q3_K/4];
+ uint8_t scales[12];
+ float16_t d;
+};
+
+struct block_q3_K_packed16
+{
+ uint16_t hmask[QUANT_K_Q3_K/8/2];
+ uint16_t qs[QUANT_K_Q3_K/4/2];
+ uint16_t scales[12/2];
+ float16_t d;
+};
+
+#if defined(DATA_A_Q3_K)
+#define QUANT_K QUANT_K_Q3_K
+#define QUANT_R 1
+#define A_TYPE block_q3_K
+#define A_TYPE_PACKED16 block_q3_K_packed16
+#endif
+
+#define QUANT_K_Q4_K 256
+
+struct block_q4_K
+{
+ f16vec2 d;
+ uint8_t scales[3*QUANT_K_Q4_K/64];
+ uint8_t qs[QUANT_K_Q4_K/2];
+};
+
+struct block_q4_K_packed16
+{
+ f16vec2 d;
+ uint16_t scales[3*QUANT_K_Q4_K/64/2];
+ uint16_t qs[QUANT_K_Q4_K/2/2];
+};
+
+struct block_q4_K_packed32
+{
+ f16vec2 d;
+ uint32_t scales[3*QUANT_K_Q4_K/64/4];
+ uint32_t qs[QUANT_K_Q4_K/2/4];
+};
+
+struct block_q4_K_packed128
+{
+ uvec4 q4k[9];
+};
+
+#if defined(DATA_A_Q4_K)
+#define QUANT_K QUANT_K_Q4_K
+#define QUANT_R 1
+#define A_TYPE block_q4_K
+#define A_TYPE_PACKED16 block_q4_K_packed16
+#define A_TYPE_PACKED32 block_q4_K_packed32
+#endif
+
+#define QUANT_K_Q5_K 256
+
+struct block_q5_K
+{
+ f16vec2 d;
+ uint8_t scales[12];
+ uint8_t qh[QUANT_K_Q5_K/8];
+ uint8_t qs[QUANT_K_Q5_K/2];
+};
+
+struct block_q5_K_packed16
+{
+ f16vec2 d;
+ uint16_t scales[12/2];
+ uint16_t qh[QUANT_K_Q5_K/8/2];
+ uint16_t qs[QUANT_K_Q5_K/2/2];
+};
+
+struct block_q5_K_packed128
+{
+ uvec4 q5k[11];
+};
+
+#if defined(DATA_A_Q5_K)
+#define QUANT_K QUANT_K_Q5_K
+#define QUANT_R 1
+#define A_TYPE block_q5_K
+#define A_TYPE_PACKED16 block_q5_K_packed16
+#endif
+
+#define QUANT_K_Q6_K 256
+
+struct block_q6_K
+{
+ uint8_t ql[QUANT_K_Q6_K/2];
+ uint8_t qh[QUANT_K_Q6_K/4];
+ int8_t scales[QUANT_K_Q6_K/16];
+ float16_t d;
+};
+
+struct block_q6_K_packed16
+{
+ uint16_t ql[QUANT_K_Q6_K/2/2];
+ uint16_t qh[QUANT_K_Q6_K/4/2];
+ int8_t scales[QUANT_K_Q6_K/16];
+ float16_t d;
+};
+
+#if defined(DATA_A_Q6_K)
+#define QUANT_K QUANT_K_Q6_K
+#define QUANT_R 1
+#define A_TYPE block_q6_K
+#define A_TYPE_PACKED16 block_q6_K_packed16
+#endif
+
+// IQuants
+
+#define QUANT_K_IQ1_S 256
+#define QUANT_R_IQ1_S 1
+
+struct block_iq1_s {
+ float16_t d;
+ uint8_t qs[QUANT_K_IQ1_S/8];
+ uint16_t qh[QUANT_K_IQ1_S/32];
+};
+
+#define QUANT_K_IQ1_M 256
+#define QUANT_R_IQ1_M 1
+
+struct block_iq1_m {
+ uint8_t qs[QUANT_K_IQ1_M/8];
+ uint8_t qh[QUANT_K_IQ1_M/16];
+ uint16_t scales[QUANT_K_IQ1_M/64];
+};
+
+struct block_iq1_m_packed64 {
+ uint64_t qs[QUANT_K_IQ1_M/8/8];
+ uint64_t qh[QUANT_K_IQ1_M/16/8];
+ uint64_t scales;
+};
+
+#if defined(DATA_A_IQ1_S)
+#define QUANT_K QUANT_K_IQ1_S
+#define QUANT_R QUANT_R_IQ1_S
+#define A_TYPE block_iq1_s
+#endif
+
+#if defined(DATA_A_IQ1_M)
+#define QUANT_K QUANT_K_IQ1_M
+#define QUANT_R QUANT_R_IQ1_M
+#define A_TYPE block_iq1_m
+#endif
+
+#if defined(DATA_A_IQ1_S) || defined(DATA_A_IQ1_M)
+#define IQ1S_DELTA 0.125f
+#define IQ1M_DELTA 0.125f
+
+// Packed IQ1S grid where every 2 vec8 are encoded on 32 bits (2 bits per coordinate).
+const uint[1024] iq1s_grid_const = {
+ 0xfffdffff, 0xfff7fff0, 0xffccfff5, 0xffdfffc0, 0xffd7ffdd, 0xff30ffd5, 0xff03ff0c, 0xff10ff01,
+ 0xff7dff7f, 0xff75ff77, 0xff5fff40, 0xff57ff5d, 0xfcf3ff55, 0xfcccfcf0, 0xfcc1fcc3, 0xfcc5fcc4,
+ 0xfc3cfcd0, 0xfc34fc31, 0xfc00fc0d, 0xfc1cfc05, 0xfc11fc13, 0xfc70fc17, 0xfc43fc4c, 0xfc50fc41,
+ 0xfdfdfdff, 0xfdf5fdf7, 0xfddffdc0, 0xfdd7fddd, 0xfd30fdd5, 0xfd04fd0c, 0xfd14fd13, 0xfd7dfd7f,
+ 0xfd75fd77, 0xfd40fd4c, 0xfd5ffd44, 0xfd57fd5d, 0xf3ccfd55, 0xf3c1f3c3, 0xf33cf3d0, 0xf300f334,
+ 0xf313f305, 0xf34cf310, 0xf350f344, 0xf0f3f0fc, 0xf0f1f0f0, 0xf0c7f0c0, 0xf0d4f0c5, 0xf030f03f,
+ 0xf00ff035, 0xf003f00c, 0xf001f000, 0xf01ff004, 0xf010f01d, 0xf015f017, 0xf04cf07c, 0xf047f040,
+ 0xf05cf045, 0xf050f053, 0xf054f051, 0xf1c4f1c3, 0xf133f13c, 0xf10df10f, 0xf107f100, 0xf11cf11f,
+ 0xf114f111, 0xf14cf170, 0xf144f143, 0xf7fdf7ff, 0xf7f5f7f7, 0xf7dff7c0, 0xf7d7f7dd, 0xf730f7d5,
+ 0xf701f70c, 0xf77ff710, 0xf777f77d, 0xf740f775, 0xf75df75f, 0xf755f757, 0xf4ccf4f0, 0xf4c4f4c3,
+ 0xf4d0f4d3, 0xf40ff43c, 0xf400f40c, 0xf413f41c, 0xf44cf414, 0xf441f443, 0xf450f444, 0xf5fdf5ff,
+ 0xf5f5f5f7, 0xf5dff5c0, 0xf5d7f5dd, 0xf530f5d5, 0xf504f50c, 0xf510f51c, 0xf57df57f, 0xf577f570,
+ 0xf540f575, 0xf55df55f, 0xf555f557, 0xcfcccfcf, 0xcfc4cfc3, 0xcfd0cfd3, 0xcf33cf3c, 0xcf00cf0f,
+ 0xcf1ccf07, 0xcf10cf13, 0xcf4ccf14, 0xcf41cf43, 0xcf50cf5c, 0xccf3ccfc, 0xccf4ccf1, 0xcccdcccf,
+ 0xccc7ccc0, 0xccd3ccdc, 0xcc30ccd4, 0xcc0fcc35, 0xcc0dcc0c, 0xcc00cc03, 0xcc04cc01, 0xcc10cc1f,
+ 0xcc4dcc73, 0xcc5ccc40, 0xcdcccc53, 0xcdc1cdc3, 0xcd3fcdd0, 0xcd34cd31, 0xcd00cd0d, 0xcd05cd07,
+ 0xcd11cd13, 0xcd4ccd70, 0xcd41cd43, 0xc3fccd50, 0xc3f4c3f1, 0xc3c0c3c3, 0xc3c4c3c7, 0xc3d1c3dc,
+ 0xc330c33c, 0xc337c331, 0xc30cc335, 0xc300c303, 0xc304c301, 0xc310c31d, 0xc373c317, 0xc34fc374,
+ 0xc340c343, 0xc344c347, 0xc35cc345, 0xc350c353, 0xc0fdc354, 0xc0f5c0f0, 0xc0c3c0cc, 0xc0c1c0c0,
+ 0xc0dfc0c4, 0xc0d0c0dd, 0xc0d5c0d7, 0xc033c03c, 0xc031c030, 0xc00dc00c, 0xc000c003, 0xc004c001,
+ 0xc01cc005, 0xc010c013, 0xc014c011, 0xc07dc07f, 0xc070c073, 0xc075c077, 0xc04cc04f, 0xc040c043,
+ 0xc044c041, 0xc05fc045, 0xc050c05d, 0xc1f3c1fc, 0xc1f1c1f0, 0xc1c1c1c0, 0xc1c5c1c7, 0xc1d1c1dc,
+ 0xc13dc13f, 0xc130c133, 0xc135c137, 0xc100c10c, 0xc107c101, 0xc11cc104, 0xc110c113, 0xc114c117,
+ 0xc171c115, 0xc14dc175, 0xc153c140, 0xc7ccc154, 0xc7d0c7c1, 0xc733c73c, 0xc734c731, 0xc700c70f,
+ 0xc705c707, 0xc71cc71f, 0xc711c713, 0xc770c714, 0xc743c74c, 0xc4cfc750, 0xc4c0c4cd, 0xc4dcc4c5,
+ 0xc43dc4d0, 0xc430c433, 0xc40cc437, 0xc400c403, 0xc404c401, 0xc41fc405, 0xc415c410, 0xc44cc474,
+ 0xc440c44d, 0xc45cc447, 0xc454c451, 0xc5c1c5f4, 0xc5d1c5d3, 0xc531c533, 0xc50fc534, 0xc500c50d,
+ 0xc51cc507, 0xc514c511, 0xc54cc570, 0xc545c541, 0xdffddfff, 0xdff5dff7, 0xdfdfdfc0, 0xdfd0dfdd,
+ 0xdfd5dfd7, 0xdf0cdf30, 0xdf1cdf04, 0xdf7fdf10, 0xdf77df7d, 0xdf40df75, 0xdf5ddf5f, 0xdf57df50,
+ 0xdcf0df55, 0xdcc3dccc, 0xdcd0dcc4, 0xdc33dc3d, 0xdc00dc34, 0xdc05dc07, 0xdc13dc1c, 0xdc11dc10,
+ 0xdc4fdc70, 0xdc44dc41, 0xddfcdc50, 0xddf5ddf7, 0xddc0ddcc, 0xdddddddf, 0xddd5ddd7, 0xdd0cdd30,
+ 0xdd04dd01, 0xdd7cdd10, 0xdd75dd77, 0xdd40dd4c, 0xdd5ddd5f, 0xdd55dd57, 0xd3c3d3f0, 0xd3c4d3c1,
+ 0xd333d3d0, 0xd331d330, 0xd30dd334, 0xd307d300, 0xd311d305, 0xd34cd370, 0xd344d343, 0xd350d35c,
+ 0xd0c0d0f4, 0xd0d4d0dc, 0xd030d03f, 0xd00cd037, 0xd000d003, 0xd01dd004, 0xd017d010, 0xd04fd074,
+ 0xd040d043, 0xd045d047, 0xd053d05c, 0xd054d051, 0xd1cfd1f0, 0xd1c4d1cd, 0xd13cd1d0, 0xd100d134,
+ 0xd11cd11f, 0xd173d114, 0xd14fd171, 0xd7ffd145, 0xd7f7d7fd, 0xd7c0d7f5, 0xd7ddd7df, 0xd7d5d7d7,
+ 0xd70cd730, 0xd710d703, 0xd77dd77f, 0xd775d777, 0xd75dd75f, 0xd755d757, 0xd4ccd4f4, 0xd4c4d4c3,
+ 0xd431d4d0, 0xd40dd434, 0xd41cd400, 0xd411d413, 0xd470d414, 0xd441d44f, 0xd453d444, 0xd5ffd450,
+ 0xd5f7d5fd, 0xd5dfd5f5, 0xd5d7d5dd, 0xd530d5d5, 0xd501d50c, 0xd510d504, 0xd57dd57f, 0xd575d577,
+ 0xd55fd540, 0xd557d55d, 0x3ff0d555, 0x3fc13fcc, 0x3f343fd0, 0x3f003f0d, 0x3f053f07, 0x3f133f1c,
+ 0x3f433f11, 0x3f5c3f44, 0x3cff3f51, 0x3cf33cfc, 0x3cf43cf1, 0x3cc03ccd, 0x3cc73cc1, 0x3cdc3cc5,
+ 0x3cd43cd1, 0x3c373c30, 0x3c0c3c35, 0x3c003c03, 0x3c043c01, 0x3c103c05, 0x3c153c17, 0x3c733c7c,
+ 0x3c4f3c71, 0x3c403c4d, 0x3c5c3c5f, 0x3df03c5d, 0x3dc33dcc, 0x3dd03dc1, 0x3d0d3d3c, 0x3d053d00,
+ 0x3d143d13, 0x3d433d74, 0x33fc3d50, 0x33c433c0, 0x333033d4, 0x33353337, 0x3303330c, 0x33013300,
+ 0x331d331c, 0x33173310, 0x337c3315, 0x33743371, 0x334d334f, 0x335f3340, 0x3354335c, 0x30fd30fc,
+ 0x30f530f0, 0x30c330cc, 0x30c130c0, 0x30df30c4, 0x30d530d0, 0x3033303c, 0x30313030, 0x300f3034,
+ 0x3003300c, 0x30013000, 0x30043007, 0x3013301c, 0x30113010, 0x307d3014, 0x30703073, 0x304c3077,
+ 0x30403043, 0x30443041, 0x30503045, 0x30553057, 0x31f031fc, 0x31c331f4, 0x31c731c0, 0x31dc31c5,
+ 0x31d431d3, 0x313d313f, 0x31373130, 0x310c310f, 0x3100310d, 0x31043101, 0x3110311d, 0x317c3117,
+ 0x31753170, 0x31403143, 0x3153315c, 0x37f03151, 0x37c037cc, 0x37d037c5, 0x3734373d, 0x3700370f,
+ 0x371c3707, 0x37113713, 0x37703714, 0x3743374c, 0x37443741, 0x34fc3750, 0x34f134f0, 0x34cf34f5,
+ 0x34c034c3, 0x34dc34c7, 0x34d134d3, 0x3430343f, 0x340c3435, 0x3403340d, 0x34013400, 0x341f3404,
+ 0x3410341d, 0x34153411, 0x34743471, 0x3440344d, 0x34473441, 0x3453345c, 0x34543451, 0x353335c1,
+ 0x35343531, 0x35073500, 0x35133505, 0x35433514, 0x0ffc3550, 0x0ff00ff3, 0x0ff40ff1, 0x0fc00fcd,
+ 0x0fdc0fc5, 0x0fd40fd3, 0x0f300f3f, 0x0f0c0f37, 0x0f000f03, 0x0f040f01, 0x0f170f10, 0x0f740f71,
+ 0x0f470f40, 0x0f5c0f5f, 0x0f540f51, 0x0cf70cf0, 0x0cf50cf4, 0x0cc30ccc, 0x0cc10cc0, 0x0cc40cc7,
+ 0x0cd00cdf, 0x0cd70cd1, 0x0c3c0cd5, 0x0c300c33, 0x0c340c31, 0x0c0c0c0f, 0x0c030c0d, 0x0c010c00,
+ 0x0c040c07, 0x0c1c0c05, 0x0c100c13, 0x0c140c11, 0x0c700c7d, 0x0c430c4c, 0x0c410c40, 0x0c5f0c44,
+ 0x0c550c50, 0x0df10dfc, 0x0dc00dcd, 0x0ddc0dc5, 0x0d3d0dd3, 0x0d350d30, 0x0d030d0c, 0x0d010d00,
+ 0x0d1d0d04, 0x0d700d10, 0x0d4d0d4f, 0x0d440d40, 0x0d530d45, 0x03f003f3, 0x03c303cc, 0x03c103c0,
+ 0x03c403c7, 0x03d003dc, 0x03d503d7, 0x0333033c, 0x03310330, 0x03350334, 0x030c030f, 0x03000303,
+ 0x03070301, 0x03050304, 0x031d031c, 0x03100313, 0x03140311, 0x0377037f, 0x034c0375, 0x03400343,
+ 0x03440341, 0x0353035c, 0x03550350, 0x00fd00fc, 0x00f000f3, 0x00f400f1, 0x00cc00cf, 0x00c300cd,
+ 0x00c100c0, 0x00c500c4, 0x00d300dc, 0x00d100d0, 0x003f00d4, 0x003d003c, 0x00300033, 0x00370031,
+ 0x000f0034, 0x000d000c, 0x00000003, 0x00070001, 0x00050004, 0x001c001f, 0x00100013, 0x00170011,
+ 0x00150014, 0x0073007c, 0x00740070, 0x004f0075, 0x0043004c, 0x00410040, 0x00440047, 0x0053005c,
+ 0x00510050, 0x01ff0054, 0x01fd01fc, 0x01f101f3, 0x01f401f7, 0x01c301cc, 0x01c701c0, 0x01df01c4,
+ 0x01dd01dc, 0x01d001d3, 0x01d701d1, 0x013c01d4, 0x01310130, 0x01340137, 0x010f0135, 0x010d010c,
+ 0x01000103, 0x01070101, 0x01050104, 0x0113011c, 0x01140110, 0x0170017d, 0x01770171, 0x01750174,
+ 0x0140014c, 0x015d0145, 0x01510150, 0x01540157, 0x07f007f3, 0x07f407f1, 0x07c007cf, 0x07dc07c7,
+ 0x073007d5, 0x07350737, 0x0703070c, 0x07010700, 0x07040707, 0x071d071f, 0x07100713, 0x0774077d,
+ 0x074d074f, 0x07470740, 0x0754075c, 0x04fd04fc, 0x04f504f0, 0x04c304cc, 0x04c104c0, 0x04d004c4,
+ 0x0433043c, 0x04310430, 0x040f0434, 0x040d040c, 0x04000403, 0x04070401, 0x04050404, 0x0413041c,
+ 0x04110410, 0x047c0414, 0x04740470, 0x0443044c, 0x04410440, 0x04440447, 0x05f30450, 0x05c005f7,
+ 0x05df05c5, 0x05d105d0, 0x053005d4, 0x05340537, 0x0500050c, 0x05070501, 0x051d0504, 0x05170510,
+ 0x057c0515, 0x054d0575, 0x05410540, 0x05450547, 0x1ff0055c, 0x1fc11fc3, 0x1fd01fc4, 0x1f0f1f33,
+ 0x1f011f00, 0x1f051f07, 0x1f131f1c, 0x1f141f11, 0x1f411f7c, 0x1cfc1f50, 0x1cf11cf3, 0x1ccd1cf4,
+ 0x1cdc1cc0, 0x1cd11cdd, 0x1c301cd4, 0x1c0c1c34, 0x1c011c00, 0x1c101c04, 0x1c151c11, 0x1c751c73,
+ 0x1c401c4d, 0x1c511c5c, 0x1dcc1c54, 0x1dc41dc1, 0x1d3c1d3f, 0x1d001d31, 0x1d071d01, 0x1d701d1f,
+ 0x1d411d4c, 0x13cc1d50, 0x13c013cd, 0x13c513c1, 0x13d113dc, 0x133f13d4, 0x1330133d, 0x13351337,
+ 0x1303130c, 0x13011300, 0x13051304, 0x131d131f, 0x13731310, 0x13741370, 0x134d134f, 0x13401343,
+ 0x13471341, 0x135c1345, 0x13541353, 0x10f710f0, 0x10cc10f5, 0x10c110c0, 0x103310c4, 0x10311030,
+ 0x100f1034, 0x1003100c, 0x10011000, 0x101c1004, 0x10101013, 0x10141011, 0x10741071, 0x104c1075,
+ 0x10411040, 0x10451044, 0x1050105d, 0x10571051, 0x11f411fd, 0x11df11c0, 0x11d711d1, 0x113f11d4,
+ 0x11371130, 0x110c1135, 0x11001103, 0x11071101, 0x111f1105, 0x11171110, 0x117d117f, 0x11751170,
+ 0x11411143, 0x11441147, 0x1153115f, 0x11551151, 0x17c417c1, 0x173c17d0, 0x1700170d, 0x171c1705,
+ 0x17701714, 0x1747174c, 0x14fc1751, 0x14cf14f3, 0x14dc14c0, 0x14d114d3, 0x143f14d4, 0x1430143c,
+ 0x14371431, 0x1403140c, 0x14011400, 0x141f1404, 0x14151410, 0x1473147d, 0x14401475, 0x1453145c,
+ 0x14541450, 0x15c115cc, 0x153c15c7, 0x15341533, 0x1500150f, 0x15051507, 0x15101513, 0x15711514,
+ 0x15471543, 0x15511545, 0x7ffd7fff, 0x7ff57ff7, 0x7fdd7fdf, 0x7fd57fd7, 0x7f0f7f30, 0x7f037f0c,
+ 0x7f047f01, 0x7f7f7f10, 0x7f777f7d, 0x7f407f75, 0x7f5d7f5f, 0x7f557f57, 0x7ccc7cf0, 0x7cc17cc3,
+ 0x7cd07cc4, 0x7c337c3c, 0x7c0f7c34, 0x7c007c0d, 0x7c077c01, 0x7c137c04, 0x7c147c11, 0x7c747c70,
+ 0x7c417c43, 0x7c507c44, 0x7dfd7dff, 0x7df57df7, 0x7ddf7dc0, 0x7dd77ddd, 0x7d0c7dd5, 0x7d047d03,
+ 0x7d7f7d10, 0x7d777d7d, 0x7d407d75, 0x7d5d7d5f, 0x7d557d57, 0x73c473c3, 0x7333733c, 0x7300730c,
+ 0x731c7305, 0x73147313, 0x73447343, 0x70f470fc, 0x70c070cd, 0x70d170c5, 0x703f70d4, 0x7030703c,
+ 0x700c7037, 0x70007003, 0x70047001, 0x70107005, 0x70177011, 0x707c7015, 0x70717073, 0x704f7074,
+ 0x7040704d, 0x70517047, 0x71c171cc, 0x71d071c4, 0x7133713c, 0x71357134, 0x7100710f, 0x71057104,
+ 0x7111711c, 0x71707115, 0x7145714c, 0x77ff7153, 0x77f777fd, 0x77c077f5, 0x77dd77df, 0x77d577d7,
+ 0x7730773c, 0x7703770c, 0x77107704, 0x777f7714, 0x7777777d, 0x77407775, 0x775d775f, 0x77557757,
+ 0x74f174f0, 0x74c374cc, 0x74d074c1, 0x7433743c, 0x74347431, 0x740d740f, 0x74057400, 0x7413741c,
+ 0x74417470, 0x74507444, 0x75fd75ff, 0x75f575f7, 0x75df75c0, 0x75d775dd, 0x753075d5, 0x7503750c,
+ 0x757f7501, 0x7577757d, 0x75407575, 0x755d755f, 0x75557557, 0x4fcc4ff0, 0x4fc74fc1, 0x4fd04fc4,
+ 0x4f314f3c, 0x4f004f34, 0x4f054f07, 0x4f154f14, 0x4f4c4f70, 0x4f414f43, 0x4f504f44, 0x4cf34cfc,
+ 0x4cf44cf1, 0x4cc04ccf, 0x4cc54cc7, 0x4cd34cdc, 0x4cd44cd1, 0x4c304c3f, 0x4c0c4c0f, 0x4c004c03,
+ 0x4c044c01, 0x4c104c1d, 0x4c714c73, 0x4c404c4d, 0x4c5c4c47, 0x4c514c53, 0x4df04c54, 0x4dc34dcc,
+ 0x4dd04dc4, 0x4d314d33, 0x4d0f4d34, 0x4d004d0d, 0x4d114d07, 0x4d704d14, 0x4d414d43, 0x43fc4d54,
+ 0x43f143f3, 0x43c043cf, 0x43d143c7, 0x4335433f, 0x4303430c, 0x43014300, 0x43044307, 0x431c431f,
+ 0x4310431d, 0x43714373, 0x4343434d, 0x43474340, 0x4354435c, 0x40f040ff, 0x40f540f7, 0x40cc40cf,
+ 0x40c040c3, 0x40c440c1, 0x40d040dc, 0x40d540d4, 0x4033403c, 0x40314030, 0x400f4034, 0x400d400c,
+ 0x40004003, 0x40074001, 0x40054004, 0x4013401c, 0x40114010, 0x407c4014, 0x40774070, 0x404d404c,
+ 0x40404043, 0x40444041, 0x405f4045, 0x4050405d, 0x40554057, 0x41f341fc, 0x41c041cf, 0x41df41c4,
+ 0x41d441d1, 0x41374130, 0x410c4134, 0x4100410d, 0x41044101, 0x41174110, 0x4173417d, 0x41754174,
+ 0x4143414d, 0x41534140, 0x41544151, 0x47c147f0, 0x47d047c4, 0x4731473c, 0x470d470f, 0x47014700,
+ 0x47134705, 0x47704710, 0x4741474c, 0x47504744, 0x44f144f3, 0x44cf44f4, 0x44c044cd, 0x44c544c7,
+ 0x44dc44df, 0x44d144d3, 0x443d443f, 0x44374430, 0x440c4435, 0x44004403, 0x44044401, 0x4410441d,
+ 0x44154411, 0x4473447c, 0x444d444f, 0x44454440, 0x4451445c, 0x45c045f0, 0x453345d0, 0x45344531,
+ 0x4500450f, 0x451c4507, 0x454c4570, 0x45404543, 0x5fff4541, 0x5ff75ffd, 0x5fc05ff5, 0x5fdd5fdf,
+ 0x5fd55fd7, 0x5f0c5f30, 0x5f015f03, 0x5f7f5f04, 0x5f775f7d, 0x5f405f75, 0x5f5d5f5f, 0x5f555f57,
+ 0x5cf45cf0, 0x5cc35ccc, 0x5cc45cc1, 0x5c315cc5, 0x5c0c5c34, 0x5c075c00, 0x5c1c5c05, 0x5c705c13,
+ 0x5c4d5c4f, 0x5c445c41, 0x5df75dfd, 0x5dcf5df5, 0x5ddd5dc4, 0x5dd55dd7, 0x5d0c5d30, 0x5d045d01,
+ 0x5d7f5d10, 0x5d775d7d, 0x5d405d75, 0x5d5d5d5f, 0x5d555d57, 0x53d053c4, 0x5333533c, 0x5303530f,
+ 0x53075300, 0x531c5305, 0x53115310, 0x53145317, 0x50f15370, 0x50cf50f4, 0x50c050cd, 0x50d150c7,
+ 0x503d50d4, 0x500c5030, 0x50005003, 0x50045001, 0x50155010, 0x5073507c, 0x50715070, 0x504d5074,
+ 0x50475040, 0x51cc51f0, 0x51c551c1, 0x51d051dc, 0x51315133, 0x510d5135, 0x51015100, 0x511f5107,
+ 0x5171511d, 0x5140514f, 0x51445141, 0x5153515c, 0x57ff5151, 0x57f757fd, 0x57df57f5, 0x57d757dd,
+ 0x570c57d5, 0x57015703, 0x577f5704, 0x5777577d, 0x57405775, 0x575d575f, 0x57555757, 0x54c354f0,
+ 0x54dc54c4, 0x543c54d0, 0x5400540f, 0x541c5405, 0x54145411, 0x5441544f, 0x55fd55ff, 0x55f555f7,
+ 0x55dd55df, 0x55d555d7, 0x5503550c, 0x557f5501, 0x5577557d, 0x55405575, 0x555d555f, 0x55555557
+};
+
+shared uint16_t iq1s_grid[2048];
+
+#define NEEDS_INIT_IQ_SHMEM
+void init_iq_shmem(uvec3 wgsize)
+{
+ // copy the table into shared memory and sync
+ [[unroll]] for (uint i = 0; i < iq1s_grid_const.length(); i += wgsize.x) {
+ uint idx = i + gl_LocalInvocationIndex.x;
+ if (iq1s_grid_const.length() % wgsize.x == 0 || idx < iq1s_grid_const.length()) {
+ u16vec2 g = unpack16(iq1s_grid_const[idx]);
+ iq1s_grid[2*idx+0] = g.x;
+ iq1s_grid[2*idx+1] = g.y;
+ }
+ }
+ barrier();
+}
+#endif
+
+#define QUANT_K_IQ2_XXS 256
+#define QUANT_R_IQ2_XXS 1
+
+struct block_iq2_xxs
+{
+ float16_t d;
+ uint8_t qs[QUANT_K_IQ2_XXS/4];
+};
+
+struct block_iq2_xxs_packed16
+{
+ float16_t d;
+ uint16_t qs[QUANT_K_IQ2_XXS/8];
+};
+
+#if defined(DATA_A_IQ2_XXS)
+
+const uvec2[256] iq2xxs_grid_const = {
+ uvec2(0x08080808, 0x08080808), uvec2(0x0808082b, 0x08080808), uvec2(0x08081919, 0x08080808), uvec2(0x08082b08, 0x08080808),
+ uvec2(0x08082b2b, 0x08080808), uvec2(0x08190819, 0x08080808), uvec2(0x08191908, 0x08080808), uvec2(0x082b0808, 0x08080808),
+ uvec2(0x082b082b, 0x08080808), uvec2(0x082b2b08, 0x08080808), uvec2(0x082b2b2b, 0x08080808), uvec2(0x19080819, 0x08080808),
+ uvec2(0x19081908, 0x08080808), uvec2(0x19190808, 0x08080808), uvec2(0x19192b08, 0x08080808), uvec2(0x192b0819, 0x08080808),
+ uvec2(0x192b1908, 0x08080808), uvec2(0x2b080808, 0x08080808), uvec2(0x2b08082b, 0x08080808), uvec2(0x2b082b2b, 0x08080808),
+ uvec2(0x2b2b082b, 0x08080808), uvec2(0x08080819, 0x08080819), uvec2(0x08081908, 0x08080819), uvec2(0x08190808, 0x08080819),
+ uvec2(0x08191919, 0x08080819), uvec2(0x19080808, 0x08080819), uvec2(0x2b081908, 0x08080819), uvec2(0x2b192b08, 0x08080819),
+ uvec2(0x08080808, 0x0808082b), uvec2(0x0808082b, 0x0808082b), uvec2(0x082b082b, 0x0808082b), uvec2(0x2b08082b, 0x0808082b),
+ uvec2(0x08080819, 0x08081908), uvec2(0x08081908, 0x08081908), uvec2(0x08190808, 0x08081908), uvec2(0x082b0819, 0x08081908),
+ uvec2(0x082b1908, 0x08081908), uvec2(0x19080808, 0x08081908), uvec2(0x1908082b, 0x08081908), uvec2(0x19082b08, 0x08081908),
+ uvec2(0x192b0808, 0x08081908), uvec2(0x2b080819, 0x08081908), uvec2(0x2b081908, 0x08081908), uvec2(0x2b190808, 0x08081908),
+ uvec2(0x2b2b1908, 0x08081908), uvec2(0x08080808, 0x08081919), uvec2(0x0808082b, 0x08081919), uvec2(0x08082b08, 0x08081919),
+ uvec2(0x082b0808, 0x08081919), uvec2(0x1908192b, 0x08081919), uvec2(0x192b2b19, 0x08081919), uvec2(0x2b080808, 0x08081919),
+ uvec2(0x2b190819, 0x08081919), uvec2(0x08082b19, 0x0808192b), uvec2(0x08190808, 0x0808192b), uvec2(0x19080808, 0x0808192b),
+ uvec2(0x2b081908, 0x0808192b), uvec2(0x2b2b1908, 0x0808192b), uvec2(0x08080808, 0x08082b08), uvec2(0x08081919, 0x08082b08),
+ uvec2(0x08082b08, 0x08082b08), uvec2(0x08191908, 0x08082b08), uvec2(0x082b2b08, 0x08082b08), uvec2(0x19080819, 0x08082b08),
+ uvec2(0x19081908, 0x08082b08), uvec2(0x19190808, 0x08082b08), uvec2(0x1919082b, 0x08082b08), uvec2(0x2b082b08, 0x08082b08),
+ uvec2(0x08081908, 0x08082b19), uvec2(0x19080808, 0x08082b19), uvec2(0x0808082b, 0x08082b2b), uvec2(0x08191908, 0x08082b2b),
+ uvec2(0x08080819, 0x08190808), uvec2(0x08081908, 0x08190808), uvec2(0x08190808, 0x08190808), uvec2(0x082b0819, 0x08190808),
+ uvec2(0x19080808, 0x08190808), uvec2(0x192b0808, 0x08190808), uvec2(0x2b081908, 0x08190808), uvec2(0x2b190808, 0x08190808),
+ uvec2(0x2b191919, 0x08190808), uvec2(0x08080808, 0x08190819), uvec2(0x08082b08, 0x08190819), uvec2(0x082b0808, 0x08190819),
+ uvec2(0x19190808, 0x08190819), uvec2(0x19192b2b, 0x08190819), uvec2(0x2b080808, 0x08190819), uvec2(0x082b1908, 0x0819082b),
+ uvec2(0x19081919, 0x0819082b), uvec2(0x08080808, 0x08191908), uvec2(0x08082b08, 0x08191908), uvec2(0x082b0808, 0x08191908),
+ uvec2(0x082b1919, 0x08191908), uvec2(0x19082b19, 0x08191908), uvec2(0x2b080808, 0x08191908), uvec2(0x08192b08, 0x08191919),
+ uvec2(0x192b082b, 0x08191919), uvec2(0x08080808, 0x0819192b), uvec2(0x0819192b, 0x0819192b), uvec2(0x08080819, 0x08192b08),
+ uvec2(0x08081908, 0x08192b08), uvec2(0x08190808, 0x08192b08), uvec2(0x19080808, 0x08192b08), uvec2(0x2b080819, 0x08192b08),
+ uvec2(0x08080808, 0x08192b19), uvec2(0x08081919, 0x08192b19), uvec2(0x2b2b0808, 0x08192b19), uvec2(0x19190819, 0x08192b2b),
+ uvec2(0x08080808, 0x082b0808), uvec2(0x0808082b, 0x082b0808), uvec2(0x08082b2b, 0x082b0808), uvec2(0x19081908, 0x082b0808),
+ uvec2(0x192b0819, 0x082b0808), uvec2(0x2b080808, 0x082b0808), uvec2(0x2b08082b, 0x082b0808), uvec2(0x082b2b19, 0x082b0819),
+ uvec2(0x19082b08, 0x082b0819), uvec2(0x08080808, 0x082b082b), uvec2(0x0808082b, 0x082b082b), uvec2(0x08080819, 0x082b1908),
+ uvec2(0x08081908, 0x082b1908), uvec2(0x08190808, 0x082b1908), uvec2(0x19080808, 0x082b1908), uvec2(0x1919192b, 0x082b1908),
+ uvec2(0x08080808, 0x082b1919), uvec2(0x19080819, 0x082b1919), uvec2(0x192b1908, 0x082b1919), uvec2(0x2b190808, 0x082b192b),
+ uvec2(0x08082b08, 0x082b2b08), uvec2(0x082b0808, 0x082b2b08), uvec2(0x2b191908, 0x082b2b08), uvec2(0x19081908, 0x082b2b2b),
+ uvec2(0x08080819, 0x19080808), uvec2(0x08081908, 0x19080808), uvec2(0x08190808, 0x19080808), uvec2(0x08192b08, 0x19080808),
+ uvec2(0x082b0819, 0x19080808), uvec2(0x082b1908, 0x19080808), uvec2(0x19080808, 0x19080808), uvec2(0x19082b08, 0x19080808),
+ uvec2(0x1919192b, 0x19080808), uvec2(0x192b0808, 0x19080808), uvec2(0x2b080819, 0x19080808), uvec2(0x2b081908, 0x19080808),
+ uvec2(0x2b190808, 0x19080808), uvec2(0x08080808, 0x19080819), uvec2(0x082b0808, 0x19080819), uvec2(0x192b0819, 0x19080819),
+ uvec2(0x2b080808, 0x19080819), uvec2(0x2b081919, 0x19080819), uvec2(0x08080819, 0x1908082b), uvec2(0x08190808, 0x1908082b),
+ uvec2(0x19082b08, 0x1908082b), uvec2(0x1919192b, 0x1908082b), uvec2(0x192b2b08, 0x1908082b), uvec2(0x08080808, 0x19081908),
+ uvec2(0x08082b08, 0x19081908), uvec2(0x082b0808, 0x19081908), uvec2(0x2b080808, 0x19081908), uvec2(0x2b192b19, 0x19081908),
+ uvec2(0x0819082b, 0x19081919), uvec2(0x082b1908, 0x19081919), uvec2(0x08080808, 0x1908192b), uvec2(0x08080819, 0x19082b08),
+ uvec2(0x08081908, 0x19082b08), uvec2(0x08190808, 0x19082b08), uvec2(0x19080808, 0x19082b08), uvec2(0x19081919, 0x19082b08),
+ uvec2(0x08080808, 0x19082b19), uvec2(0x19192b08, 0x19082b19), uvec2(0x192b0819, 0x19082b19), uvec2(0x2b08082b, 0x19082b19),
+ uvec2(0x19081919, 0x19082b2b), uvec2(0x2b190808, 0x19082b2b), uvec2(0x08080808, 0x19190808), uvec2(0x08082b08, 0x19190808),
+ uvec2(0x08190819, 0x19190808), uvec2(0x08192b19, 0x19190808), uvec2(0x082b0808, 0x19190808), uvec2(0x2b080808, 0x19190808),
+ uvec2(0x2b082b08, 0x19190808), uvec2(0x08081908, 0x19190819), uvec2(0x1908082b, 0x19190819), uvec2(0x2b2b1908, 0x19190819),
+ uvec2(0x2b190819, 0x1919082b), uvec2(0x2b190808, 0x19191908), uvec2(0x2b19082b, 0x19191908), uvec2(0x08082b2b, 0x19191919),
+ uvec2(0x08080819, 0x1919192b), uvec2(0x19191908, 0x1919192b), uvec2(0x08080808, 0x19192b08), uvec2(0x08190819, 0x19192b08),
+ uvec2(0x08192b19, 0x19192b08), uvec2(0x192b1908, 0x19192b08), uvec2(0x19080808, 0x19192b19), uvec2(0x08082b08, 0x19192b2b),
+ uvec2(0x08081908, 0x192b0808), uvec2(0x08190808, 0x192b0808), uvec2(0x19080808, 0x192b0808), uvec2(0x192b2b08, 0x192b0808),
+ uvec2(0x08080808, 0x192b0819), uvec2(0x19191919, 0x192b0819), uvec2(0x08192b08, 0x192b082b), uvec2(0x192b0808, 0x192b082b),
+ uvec2(0x08080808, 0x192b1908), uvec2(0x08081919, 0x192b1908), uvec2(0x08190808, 0x192b1919), uvec2(0x0819082b, 0x192b1919),
+ uvec2(0x2b081908, 0x192b1919), uvec2(0x1908082b, 0x192b2b08), uvec2(0x08080808, 0x2b080808), uvec2(0x0808082b, 0x2b080808),
+ uvec2(0x08082b2b, 0x2b080808), uvec2(0x19080819, 0x2b080808), uvec2(0x2b08082b, 0x2b080808), uvec2(0x08081908, 0x2b080819),
+ uvec2(0x08192b08, 0x2b080819), uvec2(0x19080808, 0x2b080819), uvec2(0x08190819, 0x2b08082b), uvec2(0x08080819, 0x2b081908),
+ uvec2(0x08081908, 0x2b081908), uvec2(0x08190808, 0x2b081908), uvec2(0x08191919, 0x2b081908), uvec2(0x19080808, 0x2b081908),
+ uvec2(0x192b0808, 0x2b081908), uvec2(0x08080808, 0x2b081919), uvec2(0x1908192b, 0x2b081919), uvec2(0x2b191908, 0x2b081919),
+ uvec2(0x08082b19, 0x2b08192b), uvec2(0x19080808, 0x2b08192b), uvec2(0x192b0808, 0x2b08192b), uvec2(0x0808082b, 0x2b082b08),
+ uvec2(0x08081908, 0x2b082b19), uvec2(0x08190819, 0x2b082b2b), uvec2(0x08081908, 0x2b190808), uvec2(0x08190808, 0x2b190808),
+ uvec2(0x082b1908, 0x2b190808), uvec2(0x19080808, 0x2b190808), uvec2(0x2b2b0819, 0x2b190808), uvec2(0x0819192b, 0x2b190819),
+ uvec2(0x2b080808, 0x2b190819), uvec2(0x19081919, 0x2b19082b), uvec2(0x08080808, 0x2b191908), uvec2(0x082b082b, 0x2b191908),
+ uvec2(0x19081908, 0x2b191908), uvec2(0x19190819, 0x2b191919), uvec2(0x2b080819, 0x2b192b08), uvec2(0x082b0808, 0x2b192b19),
+ uvec2(0x0808082b, 0x2b2b0808), uvec2(0x19190808, 0x2b2b0808), uvec2(0x2b081919, 0x2b2b0808), uvec2(0x08082b19, 0x2b2b0819),
+ uvec2(0x08080808, 0x2b2b082b), uvec2(0x08192b08, 0x2b2b1908), uvec2(0x19190808, 0x2b2b2b08), uvec2(0x08081908, 0x2b2b2b19)
+};
+
+shared uvec2 iq2xxs_grid[256];
+
+#define NEEDS_INIT_IQ_SHMEM
+void init_iq_shmem(uvec3 wgsize)
+{
+ // copy the table into shared memory and sync
+ [[unroll]] for (uint i = 0; i < iq2xxs_grid.length(); i += wgsize.x) {
+ if (iq2xxs_grid_const.length() % wgsize.x == 0 || i + gl_LocalInvocationIndex.x < iq2xxs_grid_const.length()) {
+ iq2xxs_grid[i + gl_LocalInvocationIndex.x] = iq2xxs_grid_const[i + gl_LocalInvocationIndex.x];
+ }
+ }
+ barrier();
+}
+
+#define QUANT_K QUANT_K_IQ2_XXS
+#define QUANT_R QUANT_R_IQ2_XXS
+#define A_TYPE block_iq2_xxs
+#define A_TYPE_PACKED16 block_iq2_xxs_packed16
+#endif
+
+#define QUANT_K_IQ2_XS 256
+#define QUANT_R_IQ2_XS 1
+
+struct block_iq2_xs
+{
+ float16_t d;
+ uint16_t qs[QUANT_K_IQ2_XS/8];
+ uint8_t scales[QUANT_K_IQ2_XS/32];
+};
+
+struct block_iq2_xs_packed16
+{
+ float16_t d;
+ uint16_t qs[QUANT_K_IQ2_XS/8];
+ uint16_t scales[QUANT_K_IQ2_XS/64];
+};
+
+#if defined(DATA_A_IQ2_XS)
+
+const uvec2 iq2xs_grid_const[512] = {
+ uvec2(0x08080808, 0x08080808), uvec2(0x0808082b, 0x08080808), uvec2(0x08081919, 0x08080808), uvec2(0x08082b08, 0x08080808),
+ uvec2(0x08082b2b, 0x08080808), uvec2(0x08190819, 0x08080808), uvec2(0x08191908, 0x08080808), uvec2(0x0819192b, 0x08080808),
+ uvec2(0x08192b19, 0x08080808), uvec2(0x082b0808, 0x08080808), uvec2(0x082b082b, 0x08080808), uvec2(0x082b1919, 0x08080808),
+ uvec2(0x082b2b08, 0x08080808), uvec2(0x19080819, 0x08080808), uvec2(0x19081908, 0x08080808), uvec2(0x1908192b, 0x08080808),
+ uvec2(0x19082b19, 0x08080808), uvec2(0x19190808, 0x08080808), uvec2(0x1919082b, 0x08080808), uvec2(0x19191919, 0x08080808),
+ uvec2(0x19192b08, 0x08080808), uvec2(0x192b0819, 0x08080808), uvec2(0x192b1908, 0x08080808), uvec2(0x2b080808, 0x08080808),
+ uvec2(0x2b08082b, 0x08080808), uvec2(0x2b081919, 0x08080808), uvec2(0x2b082b08, 0x08080808), uvec2(0x2b190819, 0x08080808),
+ uvec2(0x2b191908, 0x08080808), uvec2(0x2b192b19, 0x08080808), uvec2(0x2b2b0808, 0x08080808), uvec2(0x08080819, 0x08080819),
+ uvec2(0x08081908, 0x08080819), uvec2(0x0808192b, 0x08080819), uvec2(0x08082b19, 0x08080819), uvec2(0x08190808, 0x08080819),
+ uvec2(0x0819082b, 0x08080819), uvec2(0x08191919, 0x08080819), uvec2(0x08192b08, 0x08080819), uvec2(0x08192b2b, 0x08080819),
+ uvec2(0x082b0819, 0x08080819), uvec2(0x082b1908, 0x08080819), uvec2(0x19080808, 0x08080819), uvec2(0x1908082b, 0x08080819),
+ uvec2(0x19081919, 0x08080819), uvec2(0x19082b08, 0x08080819), uvec2(0x19190819, 0x08080819), uvec2(0x19191908, 0x08080819),
+ uvec2(0x192b0808, 0x08080819), uvec2(0x192b2b08, 0x08080819), uvec2(0x2b080819, 0x08080819), uvec2(0x2b081908, 0x08080819),
+ uvec2(0x2b190808, 0x08080819), uvec2(0x08080808, 0x0808082b), uvec2(0x0808082b, 0x0808082b), uvec2(0x08081919, 0x0808082b),
+ uvec2(0x08082b08, 0x0808082b), uvec2(0x08190819, 0x0808082b), uvec2(0x08191908, 0x0808082b), uvec2(0x082b0808, 0x0808082b),
+ uvec2(0x19080819, 0x0808082b), uvec2(0x19081908, 0x0808082b), uvec2(0x19190808, 0x0808082b), uvec2(0x19191919, 0x0808082b),
+ uvec2(0x2b080808, 0x0808082b), uvec2(0x2b082b2b, 0x0808082b), uvec2(0x08080819, 0x08081908), uvec2(0x08081908, 0x08081908),
+ uvec2(0x0808192b, 0x08081908), uvec2(0x08082b19, 0x08081908), uvec2(0x08190808, 0x08081908), uvec2(0x0819082b, 0x08081908),
+ uvec2(0x08191919, 0x08081908), uvec2(0x08192b08, 0x08081908), uvec2(0x082b0819, 0x08081908), uvec2(0x082b1908, 0x08081908),
+ uvec2(0x19080808, 0x08081908), uvec2(0x1908082b, 0x08081908), uvec2(0x19081919, 0x08081908), uvec2(0x19082b08, 0x08081908),
+ uvec2(0x19190819, 0x08081908), uvec2(0x19191908, 0x08081908), uvec2(0x1919192b, 0x08081908), uvec2(0x192b0808, 0x08081908),
+ uvec2(0x2b080819, 0x08081908), uvec2(0x2b081908, 0x08081908), uvec2(0x2b190808, 0x08081908), uvec2(0x08080808, 0x08081919),
+ uvec2(0x0808082b, 0x08081919), uvec2(0x08081919, 0x08081919), uvec2(0x08082b08, 0x08081919), uvec2(0x08190819, 0x08081919),
+ uvec2(0x08191908, 0x08081919), uvec2(0x082b0808, 0x08081919), uvec2(0x19080819, 0x08081919), uvec2(0x19081908, 0x08081919),
+ uvec2(0x19190808, 0x08081919), uvec2(0x192b0819, 0x08081919), uvec2(0x2b080808, 0x08081919), uvec2(0x08080819, 0x0808192b),
+ uvec2(0x08081908, 0x0808192b), uvec2(0x08190808, 0x0808192b), uvec2(0x082b192b, 0x0808192b), uvec2(0x19080808, 0x0808192b),
+ uvec2(0x1908082b, 0x0808192b), uvec2(0x2b081908, 0x0808192b), uvec2(0x08080808, 0x08082b08), uvec2(0x0808082b, 0x08082b08),
+ uvec2(0x08081919, 0x08082b08), uvec2(0x08082b08, 0x08082b08), uvec2(0x08082b2b, 0x08082b08), uvec2(0x08190819, 0x08082b08),
+ uvec2(0x08191908, 0x08082b08), uvec2(0x082b0808, 0x08082b08), uvec2(0x082b1919, 0x08082b08), uvec2(0x19080819, 0x08082b08),
+ uvec2(0x19081908, 0x08082b08), uvec2(0x19190808, 0x08082b08), uvec2(0x19192b08, 0x08082b08), uvec2(0x2b080808, 0x08082b08),
+ uvec2(0x2b2b0808, 0x08082b08), uvec2(0x2b2b2b2b, 0x08082b08), uvec2(0x08080819, 0x08082b19), uvec2(0x08081908, 0x08082b19),
+ uvec2(0x08190808, 0x08082b19), uvec2(0x19080808, 0x08082b19), uvec2(0x2b080819, 0x08082b19), uvec2(0x2b082b19, 0x08082b19),
+ uvec2(0x08080808, 0x08082b2b), uvec2(0x082b0808, 0x08082b2b), uvec2(0x082b2b08, 0x08082b2b), uvec2(0x2b19192b, 0x08082b2b),
+ uvec2(0x2b2b0808, 0x08082b2b), uvec2(0x08080819, 0x08190808), uvec2(0x08081908, 0x08190808), uvec2(0x0808192b, 0x08190808),
+ uvec2(0x08082b19, 0x08190808), uvec2(0x08190808, 0x08190808), uvec2(0x0819082b, 0x08190808), uvec2(0x08191919, 0x08190808),
+ uvec2(0x08192b08, 0x08190808), uvec2(0x082b0819, 0x08190808), uvec2(0x082b1908, 0x08190808), uvec2(0x19080808, 0x08190808),
+ uvec2(0x1908082b, 0x08190808), uvec2(0x19081919, 0x08190808), uvec2(0x19082b08, 0x08190808), uvec2(0x19190819, 0x08190808),
+ uvec2(0x19191908, 0x08190808), uvec2(0x192b0808, 0x08190808), uvec2(0x192b2b2b, 0x08190808), uvec2(0x2b080819, 0x08190808),
+ uvec2(0x2b081908, 0x08190808), uvec2(0x2b190808, 0x08190808), uvec2(0x08080808, 0x08190819), uvec2(0x0808082b, 0x08190819),
+ uvec2(0x08081919, 0x08190819), uvec2(0x08082b08, 0x08190819), uvec2(0x08190819, 0x08190819), uvec2(0x08191908, 0x08190819),
+ uvec2(0x082b0808, 0x08190819), uvec2(0x19080819, 0x08190819), uvec2(0x19081908, 0x08190819), uvec2(0x19190808, 0x08190819),
+ uvec2(0x2b080808, 0x08190819), uvec2(0x2b191908, 0x08190819), uvec2(0x2b19192b, 0x08190819), uvec2(0x08080819, 0x0819082b),
+ uvec2(0x08081908, 0x0819082b), uvec2(0x0808192b, 0x0819082b), uvec2(0x08190808, 0x0819082b), uvec2(0x19080808, 0x0819082b),
+ uvec2(0x192b0808, 0x0819082b), uvec2(0x08080808, 0x08191908), uvec2(0x0808082b, 0x08191908), uvec2(0x08081919, 0x08191908),
+ uvec2(0x08082b08, 0x08191908), uvec2(0x08190819, 0x08191908), uvec2(0x08191908, 0x08191908), uvec2(0x082b0808, 0x08191908),
+ uvec2(0x19080819, 0x08191908), uvec2(0x19081908, 0x08191908), uvec2(0x19082b19, 0x08191908), uvec2(0x19190808, 0x08191908),
+ uvec2(0x192b1908, 0x08191908), uvec2(0x2b080808, 0x08191908), uvec2(0x08080819, 0x08191919), uvec2(0x08081908, 0x08191919),
+ uvec2(0x08190808, 0x08191919), uvec2(0x19080808, 0x08191919), uvec2(0x08080808, 0x0819192b), uvec2(0x08191908, 0x0819192b),
+ uvec2(0x19082b19, 0x0819192b), uvec2(0x08080819, 0x08192b08), uvec2(0x08081908, 0x08192b08), uvec2(0x08190808, 0x08192b08),
+ uvec2(0x0819082b, 0x08192b08), uvec2(0x19080808, 0x08192b08), uvec2(0x19191908, 0x08192b08), uvec2(0x2b08192b, 0x08192b08),
+ uvec2(0x08080808, 0x08192b19), uvec2(0x08081919, 0x08192b19), uvec2(0x192b192b, 0x08192b19), uvec2(0x19190819, 0x08192b2b),
+ uvec2(0x2b2b2b19, 0x08192b2b), uvec2(0x08080808, 0x082b0808), uvec2(0x0808082b, 0x082b0808), uvec2(0x08081919, 0x082b0808),
+ uvec2(0x08082b08, 0x082b0808), uvec2(0x08082b2b, 0x082b0808), uvec2(0x08190819, 0x082b0808), uvec2(0x08191908, 0x082b0808),
+ uvec2(0x082b0808, 0x082b0808), uvec2(0x19080819, 0x082b0808), uvec2(0x19081908, 0x082b0808), uvec2(0x19190808, 0x082b0808),
+ uvec2(0x2b080808, 0x082b0808), uvec2(0x2b2b0808, 0x082b0808), uvec2(0x08080819, 0x082b0819), uvec2(0x08081908, 0x082b0819),
+ uvec2(0x08190808, 0x082b0819), uvec2(0x19080808, 0x082b0819), uvec2(0x19082b08, 0x082b0819), uvec2(0x192b1919, 0x082b0819),
+ uvec2(0x08080808, 0x082b082b), uvec2(0x082b082b, 0x082b082b), uvec2(0x2b080808, 0x082b082b), uvec2(0x2b2b2b08, 0x082b082b),
+ uvec2(0x08080819, 0x082b1908), uvec2(0x08081908, 0x082b1908), uvec2(0x08190808, 0x082b1908), uvec2(0x082b2b19, 0x082b1908),
+ uvec2(0x19080808, 0x082b1908), uvec2(0x08080808, 0x082b1919), uvec2(0x19080819, 0x082b1919), uvec2(0x1919082b, 0x082b1919),
+ uvec2(0x2b192b19, 0x082b1919), uvec2(0x08080819, 0x082b192b), uvec2(0x08192b2b, 0x082b192b), uvec2(0x2b2b192b, 0x082b192b),
+ uvec2(0x08080808, 0x082b2b08), uvec2(0x08082b08, 0x082b2b08), uvec2(0x08082b2b, 0x082b2b08), uvec2(0x082b0808, 0x082b2b08),
+ uvec2(0x19191919, 0x082b2b08), uvec2(0x2b082b08, 0x082b2b08), uvec2(0x2b2b082b, 0x082b2b08), uvec2(0x192b2b08, 0x082b2b19),
+ uvec2(0x2b190808, 0x082b2b19), uvec2(0x08082b08, 0x082b2b2b), uvec2(0x082b0808, 0x082b2b2b), uvec2(0x2b08082b, 0x082b2b2b),
+ uvec2(0x2b082b08, 0x082b2b2b), uvec2(0x2b082b2b, 0x082b2b2b), uvec2(0x08080819, 0x19080808), uvec2(0x08081908, 0x19080808),
+ uvec2(0x0808192b, 0x19080808), uvec2(0x08082b19, 0x19080808), uvec2(0x08190808, 0x19080808), uvec2(0x0819082b, 0x19080808),
+ uvec2(0x08191919, 0x19080808), uvec2(0x08192b08, 0x19080808), uvec2(0x082b0819, 0x19080808), uvec2(0x082b1908, 0x19080808),
+ uvec2(0x19080808, 0x19080808), uvec2(0x1908082b, 0x19080808), uvec2(0x19081919, 0x19080808), uvec2(0x19082b08, 0x19080808),
+ uvec2(0x19082b2b, 0x19080808), uvec2(0x19190819, 0x19080808), uvec2(0x19191908, 0x19080808), uvec2(0x192b0808, 0x19080808),
+ uvec2(0x192b1919, 0x19080808), uvec2(0x2b080819, 0x19080808), uvec2(0x2b081908, 0x19080808), uvec2(0x2b190808, 0x19080808),
+ uvec2(0x08080808, 0x19080819), uvec2(0x0808082b, 0x19080819), uvec2(0x08081919, 0x19080819), uvec2(0x08082b08, 0x19080819),
+ uvec2(0x08190819, 0x19080819), uvec2(0x08191908, 0x19080819), uvec2(0x082b0808, 0x19080819), uvec2(0x19080819, 0x19080819),
+ uvec2(0x19081908, 0x19080819), uvec2(0x19190808, 0x19080819), uvec2(0x2b080808, 0x19080819), uvec2(0x2b081919, 0x19080819),
+ uvec2(0x2b2b082b, 0x19080819), uvec2(0x08080819, 0x1908082b), uvec2(0x08081908, 0x1908082b), uvec2(0x08190808, 0x1908082b),
+ uvec2(0x0819082b, 0x1908082b), uvec2(0x082b2b19, 0x1908082b), uvec2(0x19080808, 0x1908082b), uvec2(0x08080808, 0x19081908),
+ uvec2(0x0808082b, 0x19081908), uvec2(0x08081919, 0x19081908), uvec2(0x08082b08, 0x19081908), uvec2(0x08190819, 0x19081908),
+ uvec2(0x08191908, 0x19081908), uvec2(0x08192b19, 0x19081908), uvec2(0x082b0808, 0x19081908), uvec2(0x19080819, 0x19081908),
+ uvec2(0x19081908, 0x19081908), uvec2(0x19190808, 0x19081908), uvec2(0x2b080808, 0x19081908), uvec2(0x2b191908, 0x19081908),
+ uvec2(0x08080819, 0x19081919), uvec2(0x08081908, 0x19081919), uvec2(0x08190808, 0x19081919), uvec2(0x082b1908, 0x19081919),
+ uvec2(0x19080808, 0x19081919), uvec2(0x2b192b2b, 0x19081919), uvec2(0x08080808, 0x1908192b), uvec2(0x08082b2b, 0x1908192b),
+ uvec2(0x19081908, 0x1908192b), uvec2(0x19190808, 0x1908192b), uvec2(0x08080819, 0x19082b08), uvec2(0x08081908, 0x19082b08),
+ uvec2(0x08190808, 0x19082b08), uvec2(0x19080808, 0x19082b08), uvec2(0x19081919, 0x19082b08), uvec2(0x19191908, 0x19082b08),
+ uvec2(0x192b082b, 0x19082b08), uvec2(0x08080808, 0x19082b19), uvec2(0x08190819, 0x19082b19), uvec2(0x19081908, 0x19082b19),
+ uvec2(0x19190808, 0x19082b19), uvec2(0x192b2b19, 0x19082b19), uvec2(0x08081908, 0x19082b2b), uvec2(0x08080808, 0x19190808),
+ uvec2(0x0808082b, 0x19190808), uvec2(0x08081919, 0x19190808), uvec2(0x08082b08, 0x19190808), uvec2(0x08190819, 0x19190808),
+ uvec2(0x08191908, 0x19190808), uvec2(0x082b0808, 0x19190808), uvec2(0x082b2b08, 0x19190808), uvec2(0x19080819, 0x19190808),
+ uvec2(0x19081908, 0x19190808), uvec2(0x19190808, 0x19190808), uvec2(0x2b080808, 0x19190808), uvec2(0x08080819, 0x19190819),
+ uvec2(0x08081908, 0x19190819), uvec2(0x08190808, 0x19190819), uvec2(0x08191919, 0x19190819), uvec2(0x19080808, 0x19190819),
+ uvec2(0x1908082b, 0x19190819), uvec2(0x08080808, 0x1919082b), uvec2(0x19081908, 0x1919082b), uvec2(0x2b2b2b2b, 0x1919082b),
+ uvec2(0x08080819, 0x19191908), uvec2(0x08081908, 0x19191908), uvec2(0x08190808, 0x19191908), uvec2(0x082b0819, 0x19191908),
+ uvec2(0x19080808, 0x19191908), uvec2(0x192b0808, 0x19191908), uvec2(0x2b080819, 0x19191908), uvec2(0x2b2b0819, 0x19191908),
+ uvec2(0x08080808, 0x19191919), uvec2(0x08082b08, 0x19191919), uvec2(0x2b080808, 0x19191919), uvec2(0x2b082b08, 0x19191919),
+ uvec2(0x082b0819, 0x1919192b), uvec2(0x192b2b08, 0x1919192b), uvec2(0x2b2b0819, 0x1919192b), uvec2(0x08080808, 0x19192b08),
+ uvec2(0x08191908, 0x19192b08), uvec2(0x19080819, 0x19192b08), uvec2(0x19190808, 0x19192b08), uvec2(0x2b192b19, 0x19192b08),
+ uvec2(0x08192b2b, 0x19192b19), uvec2(0x19080808, 0x19192b19), uvec2(0x1908082b, 0x19192b19), uvec2(0x2b081919, 0x19192b2b),
+ uvec2(0x08080819, 0x192b0808), uvec2(0x08081908, 0x192b0808), uvec2(0x08190808, 0x192b0808), uvec2(0x19080808, 0x192b0808),
+ uvec2(0x19191908, 0x192b0808), uvec2(0x192b082b, 0x192b0808), uvec2(0x2b08192b, 0x192b0808), uvec2(0x2b2b2b19, 0x192b0808),
+ uvec2(0x08080808, 0x192b0819), uvec2(0x082b1908, 0x192b082b), uvec2(0x19082b2b, 0x192b082b), uvec2(0x2b19082b, 0x192b082b),
+ uvec2(0x08080808, 0x192b1908), uvec2(0x0819192b, 0x192b1908), uvec2(0x08190808, 0x192b1919), uvec2(0x19080808, 0x192b1919),
+ uvec2(0x19081919, 0x192b1919), uvec2(0x2b2b1908, 0x192b1919), uvec2(0x08080819, 0x192b2b08), uvec2(0x192b2b2b, 0x192b2b08),
+ uvec2(0x082b1919, 0x192b2b19), uvec2(0x0808192b, 0x192b2b2b), uvec2(0x19191908, 0x192b2b2b), uvec2(0x192b082b, 0x192b2b2b),
+ uvec2(0x08080808, 0x2b080808), uvec2(0x0808082b, 0x2b080808), uvec2(0x08081919, 0x2b080808), uvec2(0x08082b08, 0x2b080808),
+ uvec2(0x08190819, 0x2b080808), uvec2(0x08191908, 0x2b080808), uvec2(0x082b0808, 0x2b080808), uvec2(0x082b2b2b, 0x2b080808),
+ uvec2(0x19080819, 0x2b080808), uvec2(0x19081908, 0x2b080808), uvec2(0x19190808, 0x2b080808), uvec2(0x2b080808, 0x2b080808),
+ uvec2(0x2b08082b, 0x2b080808), uvec2(0x2b2b2b08, 0x2b080808), uvec2(0x2b2b2b2b, 0x2b080808), uvec2(0x08080819, 0x2b080819),
+ uvec2(0x08081908, 0x2b080819), uvec2(0x0808192b, 0x2b080819), uvec2(0x08190808, 0x2b080819), uvec2(0x19080808, 0x2b080819),
+ uvec2(0x19190819, 0x2b080819), uvec2(0x19192b19, 0x2b080819), uvec2(0x08080808, 0x2b08082b), uvec2(0x082b0808, 0x2b08082b),
+ uvec2(0x2b080808, 0x2b08082b), uvec2(0x2b08082b, 0x2b08082b), uvec2(0x2b2b0808, 0x2b08082b), uvec2(0x2b2b2b08, 0x2b08082b),
+ uvec2(0x08080819, 0x2b081908), uvec2(0x08081908, 0x2b081908), uvec2(0x08190808, 0x2b081908), uvec2(0x0819082b, 0x2b081908),
+ uvec2(0x08191919, 0x2b081908), uvec2(0x19080808, 0x2b081908), uvec2(0x192b0808, 0x2b081908), uvec2(0x2b082b19, 0x2b081908),
+ uvec2(0x08080808, 0x2b081919), uvec2(0x19081908, 0x2b081919), uvec2(0x2b2b1919, 0x2b081919), uvec2(0x08192b08, 0x2b08192b),
+ uvec2(0x192b2b2b, 0x2b08192b), uvec2(0x08080808, 0x2b082b08), uvec2(0x08082b08, 0x2b082b08), uvec2(0x082b1919, 0x2b082b08),
+ uvec2(0x19192b2b, 0x2b082b08), uvec2(0x2b080808, 0x2b082b08), uvec2(0x2b08082b, 0x2b082b08), uvec2(0x2b2b2b08, 0x2b082b08),
+ uvec2(0x0808192b, 0x2b082b19), uvec2(0x082b082b, 0x2b082b2b), uvec2(0x2b080808, 0x2b082b2b), uvec2(0x2b082b08, 0x2b082b2b),
+ uvec2(0x2b19192b, 0x2b082b2b), uvec2(0x2b2b2b08, 0x2b082b2b), uvec2(0x08080819, 0x2b190808), uvec2(0x08081908, 0x2b190808),
+ uvec2(0x08190808, 0x2b190808), uvec2(0x19080808, 0x2b190808), uvec2(0x1919192b, 0x2b190808), uvec2(0x2b081908, 0x2b190808),
+ uvec2(0x08080808, 0x2b190819), uvec2(0x082b082b, 0x2b190819), uvec2(0x192b1908, 0x2b190819), uvec2(0x1919192b, 0x2b19082b),
+ uvec2(0x2b082b19, 0x2b19082b), uvec2(0x08080808, 0x2b191908), uvec2(0x08081919, 0x2b191908), uvec2(0x19081908, 0x2b191908),
+ uvec2(0x19190808, 0x2b191908), uvec2(0x19192b08, 0x2b191908), uvec2(0x082b2b19, 0x2b191919), uvec2(0x2b190808, 0x2b191919),
+ uvec2(0x2b19082b, 0x2b191919), uvec2(0x19080819, 0x2b19192b), uvec2(0x19190819, 0x2b192b08), uvec2(0x2b2b192b, 0x2b192b08),
+ uvec2(0x19082b19, 0x2b192b19), uvec2(0x08191919, 0x2b192b2b), uvec2(0x192b0808, 0x2b192b2b), uvec2(0x08080808, 0x2b2b0808),
+ uvec2(0x0808082b, 0x2b2b0808), uvec2(0x08082b08, 0x2b2b0808), uvec2(0x08082b2b, 0x2b2b0808), uvec2(0x082b0808, 0x2b2b0808),
+ uvec2(0x082b2b2b, 0x2b2b0808), uvec2(0x2b2b0808, 0x2b2b0808), uvec2(0x19190819, 0x2b2b0819), uvec2(0x19192b19, 0x2b2b0819),
+ uvec2(0x2b2b192b, 0x2b2b0819), uvec2(0x08080808, 0x2b2b082b), uvec2(0x0808082b, 0x2b2b082b), uvec2(0x08082b08, 0x2b2b082b),
+ uvec2(0x082b2b2b, 0x2b2b082b), uvec2(0x2b080808, 0x2b2b082b), uvec2(0x2b2b0808, 0x2b2b082b), uvec2(0x19080808, 0x2b2b1908),
+ uvec2(0x2b191919, 0x2b2b1908), uvec2(0x192b1919, 0x2b2b192b), uvec2(0x2b192b08, 0x2b2b192b), uvec2(0x08082b2b, 0x2b2b2b08),
+ uvec2(0x082b0808, 0x2b2b2b08), uvec2(0x082b082b, 0x2b2b2b08), uvec2(0x082b2b08, 0x2b2b2b08), uvec2(0x2b2b0808, 0x2b2b2b08),
+ uvec2(0x2b2b2b08, 0x2b2b2b08), uvec2(0x08081908, 0x2b2b2b19), uvec2(0x2b081908, 0x2b2b2b19), uvec2(0x2b08192b, 0x2b2b2b19),
+ uvec2(0x082b2b08, 0x2b2b2b2b), uvec2(0x082b2b2b, 0x2b2b2b2b), uvec2(0x2b190819, 0x2b2b2b2b), uvec2(0x2b2b2b2b, 0x2b2b2b2b),
+};
+
+shared uvec2 iq2xs_grid[512];
+
+#define NEEDS_INIT_IQ_SHMEM
+void init_iq_shmem(uvec3 wgsize)
+{
+ // copy the table into shared memory and sync
+ [[unroll]] for (uint i = 0; i < iq2xs_grid.length(); i += wgsize.x) {
+ if (iq2xs_grid.length() % wgsize.x == 0 || i + gl_LocalInvocationIndex.x < iq2xs_grid_const.length()) {
+ iq2xs_grid[i + gl_LocalInvocationIndex.x] = iq2xs_grid_const[i + gl_LocalInvocationIndex.x];
+ }
+ }
+ barrier();
+}
+
+#define QUANT_K QUANT_K_IQ2_XS
+#define QUANT_R QUANT_R_IQ2_XS
+#define A_TYPE block_iq2_xs
+#define A_TYPE_PACKED16 block_iq2_xs_packed16
+#endif
+
+#define QUANT_K_IQ2_S 256
+#define QUANT_R_IQ2_S 1
+
+struct block_iq2_s
+{
+ float16_t d;
+ uint8_t qs[QUANT_K_IQ2_S/4];
+ uint8_t qh[QUANT_K_IQ2_S/32];
+ uint8_t scales[QUANT_K_IQ2_S/32];
+};
+
+struct block_iq2_s_packed16
+{
+ float16_t d;
+ uint16_t qs[QUANT_K_IQ2_S/8];
+ uint16_t qh[QUANT_K_IQ2_S/64];
+ uint16_t scales[QUANT_K_IQ2_S/64];
+};
+
+#if defined(DATA_A_IQ2_S)
+
+const uvec2 iq2s_grid_const[1024] = {
+ uvec2(0x08080808, 0x08080808), uvec2(0x0808082b, 0x08080808), uvec2(0x08081919, 0x08080808), uvec2(0x08082b08, 0x08080808),
+ uvec2(0x08082b2b, 0x08080808), uvec2(0x08190819, 0x08080808), uvec2(0x08191908, 0x08080808), uvec2(0x0819192b, 0x08080808),
+ uvec2(0x08192b19, 0x08080808), uvec2(0x082b0808, 0x08080808), uvec2(0x082b082b, 0x08080808), uvec2(0x082b1919, 0x08080808),
+ uvec2(0x082b2b08, 0x08080808), uvec2(0x19080819, 0x08080808), uvec2(0x19081908, 0x08080808), uvec2(0x1908192b, 0x08080808),
+ uvec2(0x19082b19, 0x08080808), uvec2(0x19190808, 0x08080808), uvec2(0x1919082b, 0x08080808), uvec2(0x19191919, 0x08080808),
+ uvec2(0x19192b08, 0x08080808), uvec2(0x192b0819, 0x08080808), uvec2(0x192b1908, 0x08080808), uvec2(0x192b192b, 0x08080808),
+ uvec2(0x192b2b19, 0x08080808), uvec2(0x2b080808, 0x08080808), uvec2(0x2b08082b, 0x08080808), uvec2(0x2b081919, 0x08080808),
+ uvec2(0x2b082b08, 0x08080808), uvec2(0x2b190819, 0x08080808), uvec2(0x2b191908, 0x08080808), uvec2(0x2b2b0808, 0x08080808),
+ uvec2(0x2b2b1919, 0x08080808), uvec2(0x2b2b2b2b, 0x08080808), uvec2(0x08080819, 0x08080819), uvec2(0x08081908, 0x08080819),
+ uvec2(0x0808192b, 0x08080819), uvec2(0x08082b19, 0x08080819), uvec2(0x08190808, 0x08080819), uvec2(0x0819082b, 0x08080819),
+ uvec2(0x08191919, 0x08080819), uvec2(0x08192b08, 0x08080819), uvec2(0x082b0819, 0x08080819), uvec2(0x082b1908, 0x08080819),
+ uvec2(0x19080808, 0x08080819), uvec2(0x1908082b, 0x08080819), uvec2(0x19081919, 0x08080819), uvec2(0x19082b08, 0x08080819),
+ uvec2(0x19190819, 0x08080819), uvec2(0x19191908, 0x08080819), uvec2(0x1919192b, 0x08080819), uvec2(0x19192b19, 0x08080819),
+ uvec2(0x192b0808, 0x08080819), uvec2(0x192b1919, 0x08080819), uvec2(0x192b2b08, 0x08080819), uvec2(0x2b080819, 0x08080819),
+ uvec2(0x2b081908, 0x08080819), uvec2(0x2b190808, 0x08080819), uvec2(0x2b19082b, 0x08080819), uvec2(0x2b191919, 0x08080819),
+ uvec2(0x2b2b0819, 0x08080819), uvec2(0x2b2b1908, 0x08080819), uvec2(0x08080808, 0x0808082b), uvec2(0x0808082b, 0x0808082b),
+ uvec2(0x08081919, 0x0808082b), uvec2(0x08082b08, 0x0808082b), uvec2(0x08190819, 0x0808082b), uvec2(0x08191908, 0x0808082b),
+ uvec2(0x082b0808, 0x0808082b), uvec2(0x082b2b2b, 0x0808082b), uvec2(0x19080819, 0x0808082b), uvec2(0x19081908, 0x0808082b),
+ uvec2(0x1908192b, 0x0808082b), uvec2(0x19082b19, 0x0808082b), uvec2(0x19190808, 0x0808082b), uvec2(0x19191919, 0x0808082b),
+ uvec2(0x2b080808, 0x0808082b), uvec2(0x2b081919, 0x0808082b), uvec2(0x2b082b2b, 0x0808082b), uvec2(0x2b191908, 0x0808082b),
+ uvec2(0x2b2b082b, 0x0808082b), uvec2(0x08080819, 0x08081908), uvec2(0x08081908, 0x08081908), uvec2(0x0808192b, 0x08081908),
+ uvec2(0x08082b19, 0x08081908), uvec2(0x08190808, 0x08081908), uvec2(0x0819082b, 0x08081908), uvec2(0x08191919, 0x08081908),
+ uvec2(0x08192b08, 0x08081908), uvec2(0x082b0819, 0x08081908), uvec2(0x082b1908, 0x08081908), uvec2(0x082b192b, 0x08081908),
+ uvec2(0x082b2b19, 0x08081908), uvec2(0x19080808, 0x08081908), uvec2(0x1908082b, 0x08081908), uvec2(0x19081919, 0x08081908),
+ uvec2(0x19082b08, 0x08081908), uvec2(0x19082b2b, 0x08081908), uvec2(0x19190819, 0x08081908), uvec2(0x19191908, 0x08081908),
+ uvec2(0x1919192b, 0x08081908), uvec2(0x19192b19, 0x08081908), uvec2(0x192b0808, 0x08081908), uvec2(0x192b082b, 0x08081908),
+ uvec2(0x192b1919, 0x08081908), uvec2(0x2b080819, 0x08081908), uvec2(0x2b081908, 0x08081908), uvec2(0x2b08192b, 0x08081908),
+ uvec2(0x2b082b19, 0x08081908), uvec2(0x2b190808, 0x08081908), uvec2(0x2b191919, 0x08081908), uvec2(0x2b192b08, 0x08081908),
+ uvec2(0x2b2b0819, 0x08081908), uvec2(0x2b2b1908, 0x08081908), uvec2(0x08080808, 0x08081919), uvec2(0x0808082b, 0x08081919),
+ uvec2(0x08081919, 0x08081919), uvec2(0x08082b08, 0x08081919), uvec2(0x08082b2b, 0x08081919), uvec2(0x08190819, 0x08081919),
+ uvec2(0x08191908, 0x08081919), uvec2(0x0819192b, 0x08081919), uvec2(0x08192b19, 0x08081919), uvec2(0x082b0808, 0x08081919),
+ uvec2(0x082b1919, 0x08081919), uvec2(0x082b2b08, 0x08081919), uvec2(0x19080819, 0x08081919), uvec2(0x19081908, 0x08081919),
+ uvec2(0x1908192b, 0x08081919), uvec2(0x19082b19, 0x08081919), uvec2(0x19190808, 0x08081919), uvec2(0x1919082b, 0x08081919),
+ uvec2(0x19191919, 0x08081919), uvec2(0x19192b08, 0x08081919), uvec2(0x192b0819, 0x08081919), uvec2(0x192b1908, 0x08081919),
+ uvec2(0x2b080808, 0x08081919), uvec2(0x2b08082b, 0x08081919), uvec2(0x2b081919, 0x08081919), uvec2(0x2b082b08, 0x08081919),
+ uvec2(0x2b190819, 0x08081919), uvec2(0x2b191908, 0x08081919), uvec2(0x2b2b0808, 0x08081919), uvec2(0x08080819, 0x0808192b),
+ uvec2(0x08081908, 0x0808192b), uvec2(0x0808192b, 0x0808192b), uvec2(0x08082b19, 0x0808192b), uvec2(0x08190808, 0x0808192b),
+ uvec2(0x08191919, 0x0808192b), uvec2(0x19080808, 0x0808192b), uvec2(0x19081919, 0x0808192b), uvec2(0x19082b08, 0x0808192b),
+ uvec2(0x19190819, 0x0808192b), uvec2(0x19191908, 0x0808192b), uvec2(0x192b0808, 0x0808192b), uvec2(0x2b080819, 0x0808192b),
+ uvec2(0x2b081908, 0x0808192b), uvec2(0x2b190808, 0x0808192b), uvec2(0x08080808, 0x08082b08), uvec2(0x0808082b, 0x08082b08),
+ uvec2(0x08081919, 0x08082b08), uvec2(0x08082b08, 0x08082b08), uvec2(0x08190819, 0x08082b08), uvec2(0x08191908, 0x08082b08),
+ uvec2(0x0819192b, 0x08082b08), uvec2(0x08192b19, 0x08082b08), uvec2(0x082b0808, 0x08082b08), uvec2(0x082b1919, 0x08082b08),
+ uvec2(0x082b2b2b, 0x08082b08), uvec2(0x19080819, 0x08082b08), uvec2(0x19081908, 0x08082b08), uvec2(0x1908192b, 0x08082b08),
+ uvec2(0x19082b19, 0x08082b08), uvec2(0x19190808, 0x08082b08), uvec2(0x1919082b, 0x08082b08), uvec2(0x19191919, 0x08082b08),
+ uvec2(0x19192b08, 0x08082b08), uvec2(0x192b0819, 0x08082b08), uvec2(0x192b1908, 0x08082b08), uvec2(0x2b080808, 0x08082b08),
+ uvec2(0x2b081919, 0x08082b08), uvec2(0x2b191908, 0x08082b08), uvec2(0x2b2b2b2b, 0x08082b08), uvec2(0x08080819, 0x08082b19),
+ uvec2(0x08081908, 0x08082b19), uvec2(0x08190808, 0x08082b19), uvec2(0x0819082b, 0x08082b19), uvec2(0x08191919, 0x08082b19),
+ uvec2(0x08192b08, 0x08082b19), uvec2(0x082b0819, 0x08082b19), uvec2(0x19080808, 0x08082b19), uvec2(0x19081919, 0x08082b19),
+ uvec2(0x19082b08, 0x08082b19), uvec2(0x19190819, 0x08082b19), uvec2(0x19191908, 0x08082b19), uvec2(0x192b0808, 0x08082b19),
+ uvec2(0x2b080819, 0x08082b19), uvec2(0x2b190808, 0x08082b19), uvec2(0x08080808, 0x08082b2b), uvec2(0x08190819, 0x08082b2b),
+ uvec2(0x08191908, 0x08082b2b), uvec2(0x082b082b, 0x08082b2b), uvec2(0x082b2b08, 0x08082b2b), uvec2(0x082b2b2b, 0x08082b2b),
+ uvec2(0x19190808, 0x08082b2b), uvec2(0x2b192b19, 0x08082b2b), uvec2(0x08080819, 0x08190808), uvec2(0x08081908, 0x08190808),
+ uvec2(0x0808192b, 0x08190808), uvec2(0x08082b19, 0x08190808), uvec2(0x08190808, 0x08190808), uvec2(0x0819082b, 0x08190808),
+ uvec2(0x08191919, 0x08190808), uvec2(0x08192b08, 0x08190808), uvec2(0x082b0819, 0x08190808), uvec2(0x082b1908, 0x08190808),
+ uvec2(0x082b192b, 0x08190808), uvec2(0x19080808, 0x08190808), uvec2(0x1908082b, 0x08190808), uvec2(0x19081919, 0x08190808),
+ uvec2(0x19082b08, 0x08190808), uvec2(0x19190819, 0x08190808), uvec2(0x19191908, 0x08190808), uvec2(0x1919192b, 0x08190808),
+ uvec2(0x19192b19, 0x08190808), uvec2(0x192b0808, 0x08190808), uvec2(0x192b082b, 0x08190808), uvec2(0x192b1919, 0x08190808),
+ uvec2(0x192b2b08, 0x08190808), uvec2(0x2b080819, 0x08190808), uvec2(0x2b081908, 0x08190808), uvec2(0x2b08192b, 0x08190808),
+ uvec2(0x2b190808, 0x08190808), uvec2(0x2b191919, 0x08190808), uvec2(0x2b192b08, 0x08190808), uvec2(0x2b2b0819, 0x08190808),
+ uvec2(0x2b2b1908, 0x08190808), uvec2(0x08080808, 0x08190819), uvec2(0x0808082b, 0x08190819), uvec2(0x08081919, 0x08190819),
+ uvec2(0x08082b08, 0x08190819), uvec2(0x08082b2b, 0x08190819), uvec2(0x08190819, 0x08190819), uvec2(0x08191908, 0x08190819),
+ uvec2(0x0819192b, 0x08190819), uvec2(0x08192b19, 0x08190819), uvec2(0x082b0808, 0x08190819), uvec2(0x082b082b, 0x08190819),
+ uvec2(0x082b1919, 0x08190819), uvec2(0x082b2b08, 0x08190819), uvec2(0x19080819, 0x08190819), uvec2(0x19081908, 0x08190819),
+ uvec2(0x1908192b, 0x08190819), uvec2(0x19082b19, 0x08190819), uvec2(0x19190808, 0x08190819), uvec2(0x1919082b, 0x08190819),
+ uvec2(0x19191919, 0x08190819), uvec2(0x19192b08, 0x08190819), uvec2(0x192b0819, 0x08190819), uvec2(0x192b1908, 0x08190819),
+ uvec2(0x2b080808, 0x08190819), uvec2(0x2b08082b, 0x08190819), uvec2(0x2b081919, 0x08190819), uvec2(0x2b082b08, 0x08190819),
+ uvec2(0x2b190819, 0x08190819), uvec2(0x2b191908, 0x08190819), uvec2(0x08080819, 0x0819082b), uvec2(0x08081908, 0x0819082b),
+ uvec2(0x08082b19, 0x0819082b), uvec2(0x08190808, 0x0819082b), uvec2(0x08191919, 0x0819082b), uvec2(0x082b0819, 0x0819082b),
+ uvec2(0x082b1908, 0x0819082b), uvec2(0x19080808, 0x0819082b), uvec2(0x19081919, 0x0819082b), uvec2(0x19190819, 0x0819082b),
+ uvec2(0x19191908, 0x0819082b), uvec2(0x2b080819, 0x0819082b), uvec2(0x2b081908, 0x0819082b), uvec2(0x2b190808, 0x0819082b),
+ uvec2(0x08080808, 0x08191908), uvec2(0x0808082b, 0x08191908), uvec2(0x08081919, 0x08191908), uvec2(0x08082b08, 0x08191908),
+ uvec2(0x08190819, 0x08191908), uvec2(0x08191908, 0x08191908), uvec2(0x0819192b, 0x08191908), uvec2(0x08192b19, 0x08191908),
+ uvec2(0x082b0808, 0x08191908), uvec2(0x082b1919, 0x08191908), uvec2(0x082b2b08, 0x08191908), uvec2(0x19080819, 0x08191908),
+ uvec2(0x19081908, 0x08191908), uvec2(0x1908192b, 0x08191908), uvec2(0x19082b19, 0x08191908), uvec2(0x19190808, 0x08191908),
+ uvec2(0x1919082b, 0x08191908), uvec2(0x19191919, 0x08191908), uvec2(0x19192b08, 0x08191908), uvec2(0x192b0819, 0x08191908),
+ uvec2(0x192b1908, 0x08191908), uvec2(0x2b080808, 0x08191908), uvec2(0x2b08082b, 0x08191908), uvec2(0x2b081919, 0x08191908),
+ uvec2(0x2b082b08, 0x08191908), uvec2(0x2b190819, 0x08191908), uvec2(0x2b191908, 0x08191908), uvec2(0x2b2b0808, 0x08191908),
+ uvec2(0x08080819, 0x08191919), uvec2(0x08081908, 0x08191919), uvec2(0x0808192b, 0x08191919), uvec2(0x08082b19, 0x08191919),
+ uvec2(0x08190808, 0x08191919), uvec2(0x0819082b, 0x08191919), uvec2(0x08191919, 0x08191919), uvec2(0x08192b08, 0x08191919),
+ uvec2(0x082b0819, 0x08191919), uvec2(0x082b1908, 0x08191919), uvec2(0x19080808, 0x08191919), uvec2(0x1908082b, 0x08191919),
+ uvec2(0x19081919, 0x08191919), uvec2(0x19082b08, 0x08191919), uvec2(0x19190819, 0x08191919), uvec2(0x19191908, 0x08191919),
+ uvec2(0x192b0808, 0x08191919), uvec2(0x2b080819, 0x08191919), uvec2(0x2b081908, 0x08191919), uvec2(0x2b190808, 0x08191919),
+ uvec2(0x08080808, 0x0819192b), uvec2(0x08081919, 0x0819192b), uvec2(0x08082b08, 0x0819192b), uvec2(0x08190819, 0x0819192b),
+ uvec2(0x08191908, 0x0819192b), uvec2(0x082b0808, 0x0819192b), uvec2(0x19080819, 0x0819192b), uvec2(0x19081908, 0x0819192b),
+ uvec2(0x19190808, 0x0819192b), uvec2(0x2b080808, 0x0819192b), uvec2(0x2b2b2b2b, 0x0819192b), uvec2(0x08080819, 0x08192b08),
+ uvec2(0x08081908, 0x08192b08), uvec2(0x0808192b, 0x08192b08), uvec2(0x08082b19, 0x08192b08), uvec2(0x08190808, 0x08192b08),
+ uvec2(0x08191919, 0x08192b08), uvec2(0x08192b08, 0x08192b08), uvec2(0x082b0819, 0x08192b08), uvec2(0x19080808, 0x08192b08),
+ uvec2(0x1908082b, 0x08192b08), uvec2(0x19081919, 0x08192b08), uvec2(0x19082b08, 0x08192b08), uvec2(0x19190819, 0x08192b08),
+ uvec2(0x19191908, 0x08192b08), uvec2(0x192b0808, 0x08192b08), uvec2(0x2b080819, 0x08192b08), uvec2(0x2b081908, 0x08192b08),
+ uvec2(0x08080808, 0x08192b19), uvec2(0x0808082b, 0x08192b19), uvec2(0x08081919, 0x08192b19), uvec2(0x08082b08, 0x08192b19),
+ uvec2(0x08190819, 0x08192b19), uvec2(0x08191908, 0x08192b19), uvec2(0x082b0808, 0x08192b19), uvec2(0x19080819, 0x08192b19),
+ uvec2(0x19081908, 0x08192b19), uvec2(0x19190808, 0x08192b19), uvec2(0x192b2b19, 0x08192b19), uvec2(0x2b2b082b, 0x08192b19),
+ uvec2(0x08081908, 0x08192b2b), uvec2(0x08190808, 0x08192b2b), uvec2(0x19080808, 0x08192b2b), uvec2(0x1919192b, 0x08192b2b),
+ uvec2(0x08080808, 0x082b0808), uvec2(0x0808082b, 0x082b0808), uvec2(0x08081919, 0x082b0808), uvec2(0x08082b08, 0x082b0808),
+ uvec2(0x08190819, 0x082b0808), uvec2(0x08191908, 0x082b0808), uvec2(0x0819192b, 0x082b0808), uvec2(0x08192b19, 0x082b0808),
+ uvec2(0x082b0808, 0x082b0808), uvec2(0x082b1919, 0x082b0808), uvec2(0x082b2b2b, 0x082b0808), uvec2(0x19080819, 0x082b0808),
+ uvec2(0x19081908, 0x082b0808), uvec2(0x19190808, 0x082b0808), uvec2(0x1919082b, 0x082b0808), uvec2(0x19191919, 0x082b0808),
+ uvec2(0x192b1908, 0x082b0808), uvec2(0x2b080808, 0x082b0808), uvec2(0x2b082b2b, 0x082b0808), uvec2(0x2b191908, 0x082b0808),
+ uvec2(0x2b2b2b2b, 0x082b0808), uvec2(0x08080819, 0x082b0819), uvec2(0x08081908, 0x082b0819), uvec2(0x08190808, 0x082b0819),
+ uvec2(0x0819082b, 0x082b0819), uvec2(0x08191919, 0x082b0819), uvec2(0x082b0819, 0x082b0819), uvec2(0x19080808, 0x082b0819),
+ uvec2(0x1908082b, 0x082b0819), uvec2(0x19081919, 0x082b0819), uvec2(0x19190819, 0x082b0819), uvec2(0x19191908, 0x082b0819),
+ uvec2(0x192b0808, 0x082b0819), uvec2(0x2b080819, 0x082b0819), uvec2(0x2b081908, 0x082b0819), uvec2(0x2b190808, 0x082b0819),
+ uvec2(0x08080808, 0x082b082b), uvec2(0x08082b2b, 0x082b082b), uvec2(0x082b082b, 0x082b082b), uvec2(0x082b2b08, 0x082b082b),
+ uvec2(0x082b2b2b, 0x082b082b), uvec2(0x19081908, 0x082b082b), uvec2(0x19190808, 0x082b082b), uvec2(0x2b082b08, 0x082b082b),
+ uvec2(0x2b082b2b, 0x082b082b), uvec2(0x2b2b2b08, 0x082b082b), uvec2(0x08080819, 0x082b1908), uvec2(0x08081908, 0x082b1908),
+ uvec2(0x0808192b, 0x082b1908), uvec2(0x08082b19, 0x082b1908), uvec2(0x08190808, 0x082b1908), uvec2(0x08191919, 0x082b1908),
+ uvec2(0x08192b08, 0x082b1908), uvec2(0x082b0819, 0x082b1908), uvec2(0x082b1908, 0x082b1908), uvec2(0x19080808, 0x082b1908),
+ uvec2(0x1908082b, 0x082b1908), uvec2(0x19081919, 0x082b1908), uvec2(0x19082b08, 0x082b1908), uvec2(0x19190819, 0x082b1908),
+ uvec2(0x19191908, 0x082b1908), uvec2(0x192b0808, 0x082b1908), uvec2(0x2b080819, 0x082b1908), uvec2(0x2b081908, 0x082b1908),
+ uvec2(0x2b190808, 0x082b1908), uvec2(0x08080808, 0x082b1919), uvec2(0x08081919, 0x082b1919), uvec2(0x08082b08, 0x082b1919),
+ uvec2(0x08190819, 0x082b1919), uvec2(0x08191908, 0x082b1919), uvec2(0x082b0808, 0x082b1919), uvec2(0x19080819, 0x082b1919),
+ uvec2(0x19081908, 0x082b1919), uvec2(0x19190808, 0x082b1919), uvec2(0x192b192b, 0x082b1919), uvec2(0x2b080808, 0x082b1919),
+ uvec2(0x08080819, 0x082b192b), uvec2(0x08081908, 0x082b192b), uvec2(0x08190808, 0x082b192b), uvec2(0x19080808, 0x082b192b),
+ uvec2(0x19192b19, 0x082b192b), uvec2(0x08080808, 0x082b2b08), uvec2(0x08081919, 0x082b2b08), uvec2(0x08190819, 0x082b2b08),
+ uvec2(0x08191908, 0x082b2b08), uvec2(0x19080819, 0x082b2b08), uvec2(0x19081908, 0x082b2b08), uvec2(0x19190808, 0x082b2b08),
+ uvec2(0x2b082b2b, 0x082b2b08), uvec2(0x2b2b2b2b, 0x082b2b08), uvec2(0x08080819, 0x082b2b19), uvec2(0x08081908, 0x082b2b19),
+ uvec2(0x08190808, 0x082b2b19), uvec2(0x2b191919, 0x082b2b19), uvec2(0x08082b2b, 0x082b2b2b), uvec2(0x082b082b, 0x082b2b2b),
+ uvec2(0x192b1908, 0x082b2b2b), uvec2(0x2b082b08, 0x082b2b2b), uvec2(0x2b082b2b, 0x082b2b2b), uvec2(0x08080819, 0x19080808),
+ uvec2(0x08081908, 0x19080808), uvec2(0x0808192b, 0x19080808), uvec2(0x08082b19, 0x19080808), uvec2(0x08190808, 0x19080808),
+ uvec2(0x0819082b, 0x19080808), uvec2(0x08191919, 0x19080808), uvec2(0x08192b08, 0x19080808), uvec2(0x08192b2b, 0x19080808),
+ uvec2(0x082b0819, 0x19080808), uvec2(0x082b1908, 0x19080808), uvec2(0x082b192b, 0x19080808), uvec2(0x19080808, 0x19080808),
+ uvec2(0x1908082b, 0x19080808), uvec2(0x19081919, 0x19080808), uvec2(0x19082b08, 0x19080808), uvec2(0x19082b2b, 0x19080808),
+ uvec2(0x19190819, 0x19080808), uvec2(0x19191908, 0x19080808), uvec2(0x1919192b, 0x19080808), uvec2(0x19192b19, 0x19080808),
+ uvec2(0x192b0808, 0x19080808), uvec2(0x192b082b, 0x19080808), uvec2(0x192b1919, 0x19080808), uvec2(0x2b080819, 0x19080808),
+ uvec2(0x2b081908, 0x19080808), uvec2(0x2b190808, 0x19080808), uvec2(0x2b191919, 0x19080808), uvec2(0x2b192b08, 0x19080808),
+ uvec2(0x2b2b0819, 0x19080808), uvec2(0x2b2b1908, 0x19080808), uvec2(0x08080808, 0x19080819), uvec2(0x0808082b, 0x19080819),
+ uvec2(0x08081919, 0x19080819), uvec2(0x08082b08, 0x19080819), uvec2(0x08190819, 0x19080819), uvec2(0x08191908, 0x19080819),
+ uvec2(0x0819192b, 0x19080819), uvec2(0x08192b19, 0x19080819), uvec2(0x082b0808, 0x19080819), uvec2(0x082b082b, 0x19080819),
+ uvec2(0x082b1919, 0x19080819), uvec2(0x19080819, 0x19080819), uvec2(0x19081908, 0x19080819), uvec2(0x1908192b, 0x19080819),
+ uvec2(0x19082b19, 0x19080819), uvec2(0x19190808, 0x19080819), uvec2(0x1919082b, 0x19080819), uvec2(0x19191919, 0x19080819),
+ uvec2(0x19192b08, 0x19080819), uvec2(0x192b0819, 0x19080819), uvec2(0x192b1908, 0x19080819), uvec2(0x2b080808, 0x19080819),
+ uvec2(0x2b08082b, 0x19080819), uvec2(0x2b081919, 0x19080819), uvec2(0x2b082b08, 0x19080819), uvec2(0x2b190819, 0x19080819),
+ uvec2(0x2b191908, 0x19080819), uvec2(0x2b2b0808, 0x19080819), uvec2(0x08080819, 0x1908082b), uvec2(0x08081908, 0x1908082b),
+ uvec2(0x08190808, 0x1908082b), uvec2(0x0819082b, 0x1908082b), uvec2(0x08191919, 0x1908082b), uvec2(0x08192b08, 0x1908082b),
+ uvec2(0x082b1908, 0x1908082b), uvec2(0x19080808, 0x1908082b), uvec2(0x19081919, 0x1908082b), uvec2(0x19082b08, 0x1908082b),
+ uvec2(0x19190819, 0x1908082b), uvec2(0x19191908, 0x1908082b), uvec2(0x192b0808, 0x1908082b), uvec2(0x2b080819, 0x1908082b),
+ uvec2(0x2b081908, 0x1908082b), uvec2(0x08080808, 0x19081908), uvec2(0x0808082b, 0x19081908), uvec2(0x08081919, 0x19081908),
+ uvec2(0x08082b08, 0x19081908), uvec2(0x08082b2b, 0x19081908), uvec2(0x08190819, 0x19081908), uvec2(0x08191908, 0x19081908),
+ uvec2(0x0819192b, 0x19081908), uvec2(0x08192b19, 0x19081908), uvec2(0x082b0808, 0x19081908), uvec2(0x082b082b, 0x19081908),
+ uvec2(0x082b1919, 0x19081908), uvec2(0x082b2b08, 0x19081908), uvec2(0x19080819, 0x19081908), uvec2(0x19081908, 0x19081908),
+ uvec2(0x1908192b, 0x19081908), uvec2(0x19082b19, 0x19081908), uvec2(0x19190808, 0x19081908), uvec2(0x1919082b, 0x19081908),
+ uvec2(0x19191919, 0x19081908), uvec2(0x19192b08, 0x19081908), uvec2(0x192b0819, 0x19081908), uvec2(0x192b1908, 0x19081908),
+ uvec2(0x2b080808, 0x19081908), uvec2(0x2b08082b, 0x19081908), uvec2(0x2b081919, 0x19081908), uvec2(0x2b082b08, 0x19081908),
+ uvec2(0x2b190819, 0x19081908), uvec2(0x2b191908, 0x19081908), uvec2(0x2b2b0808, 0x19081908), uvec2(0x08080819, 0x19081919),
+ uvec2(0x08081908, 0x19081919), uvec2(0x0808192b, 0x19081919), uvec2(0x08082b19, 0x19081919), uvec2(0x08190808, 0x19081919),
+ uvec2(0x0819082b, 0x19081919), uvec2(0x08191919, 0x19081919), uvec2(0x08192b08, 0x19081919), uvec2(0x082b0819, 0x19081919),
+ uvec2(0x082b1908, 0x19081919), uvec2(0x19080808, 0x19081919), uvec2(0x1908082b, 0x19081919), uvec2(0x19081919, 0x19081919),
+ uvec2(0x19082b08, 0x19081919), uvec2(0x19190819, 0x19081919), uvec2(0x19191908, 0x19081919), uvec2(0x192b0808, 0x19081919),
+ uvec2(0x192b2b2b, 0x19081919), uvec2(0x2b080819, 0x19081919), uvec2(0x2b081908, 0x19081919), uvec2(0x2b190808, 0x19081919),
+ uvec2(0x08080808, 0x1908192b), uvec2(0x0808082b, 0x1908192b), uvec2(0x08081919, 0x1908192b), uvec2(0x08082b08, 0x1908192b),
+ uvec2(0x08190819, 0x1908192b), uvec2(0x08191908, 0x1908192b), uvec2(0x082b0808, 0x1908192b), uvec2(0x19080819, 0x1908192b),
+ uvec2(0x19081908, 0x1908192b), uvec2(0x19190808, 0x1908192b), uvec2(0x2b080808, 0x1908192b), uvec2(0x2b2b1919, 0x1908192b),
+ uvec2(0x08080819, 0x19082b08), uvec2(0x08081908, 0x19082b08), uvec2(0x08082b19, 0x19082b08), uvec2(0x08190808, 0x19082b08),
+ uvec2(0x0819082b, 0x19082b08), uvec2(0x08191919, 0x19082b08), uvec2(0x08192b08, 0x19082b08), uvec2(0x082b0819, 0x19082b08),
+ uvec2(0x082b1908, 0x19082b08), uvec2(0x19080808, 0x19082b08), uvec2(0x1908082b, 0x19082b08), uvec2(0x19081919, 0x19082b08),
+ uvec2(0x19082b08, 0x19082b08), uvec2(0x19190819, 0x19082b08), uvec2(0x19191908, 0x19082b08), uvec2(0x192b0808, 0x19082b08),
+ uvec2(0x2b081908, 0x19082b08), uvec2(0x2b190808, 0x19082b08), uvec2(0x08080808, 0x19082b19), uvec2(0x0808082b, 0x19082b19),
+ uvec2(0x08081919, 0x19082b19), uvec2(0x08082b08, 0x19082b19), uvec2(0x08190819, 0x19082b19), uvec2(0x08191908, 0x19082b19),
+ uvec2(0x082b0808, 0x19082b19), uvec2(0x19080819, 0x19082b19), uvec2(0x19081908, 0x19082b19), uvec2(0x19190808, 0x19082b19),
+ uvec2(0x2b080808, 0x19082b19), uvec2(0x2b19192b, 0x19082b19), uvec2(0x08080819, 0x19082b2b), uvec2(0x08081908, 0x19082b2b),
+ uvec2(0x08190808, 0x19082b2b), uvec2(0x19080808, 0x19082b2b), uvec2(0x08080808, 0x19190808), uvec2(0x0808082b, 0x19190808),
+ uvec2(0x08081919, 0x19190808), uvec2(0x08082b08, 0x19190808), uvec2(0x08190819, 0x19190808), uvec2(0x08191908, 0x19190808),
+ uvec2(0x0819192b, 0x19190808), uvec2(0x08192b19, 0x19190808), uvec2(0x082b0808, 0x19190808), uvec2(0x082b082b, 0x19190808),
+ uvec2(0x082b1919, 0x19190808), uvec2(0x082b2b08, 0x19190808), uvec2(0x19080819, 0x19190808), uvec2(0x19081908, 0x19190808),
+ uvec2(0x1908192b, 0x19190808), uvec2(0x19082b19, 0x19190808), uvec2(0x19190808, 0x19190808), uvec2(0x1919082b, 0x19190808),
+ uvec2(0x19191919, 0x19190808), uvec2(0x19192b08, 0x19190808), uvec2(0x192b0819, 0x19190808), uvec2(0x192b1908, 0x19190808),
+ uvec2(0x2b080808, 0x19190808), uvec2(0x2b08082b, 0x19190808), uvec2(0x2b081919, 0x19190808), uvec2(0x2b082b08, 0x19190808),
+ uvec2(0x2b190819, 0x19190808), uvec2(0x2b191908, 0x19190808), uvec2(0x08080819, 0x19190819), uvec2(0x08081908, 0x19190819),
+ uvec2(0x0808192b, 0x19190819), uvec2(0x08082b19, 0x19190819), uvec2(0x08190808, 0x19190819), uvec2(0x0819082b, 0x19190819),
+ uvec2(0x08191919, 0x19190819), uvec2(0x08192b08, 0x19190819), uvec2(0x082b0819, 0x19190819), uvec2(0x082b1908, 0x19190819),
+ uvec2(0x19080808, 0x19190819), uvec2(0x1908082b, 0x19190819), uvec2(0x19081919, 0x19190819), uvec2(0x19082b08, 0x19190819),
+ uvec2(0x19190819, 0x19190819), uvec2(0x19191908, 0x19190819), uvec2(0x192b0808, 0x19190819), uvec2(0x2b080819, 0x19190819),
+ uvec2(0x2b081908, 0x19190819), uvec2(0x2b190808, 0x19190819), uvec2(0x08080808, 0x1919082b), uvec2(0x08081919, 0x1919082b),
+ uvec2(0x08082b08, 0x1919082b), uvec2(0x08190819, 0x1919082b), uvec2(0x08191908, 0x1919082b), uvec2(0x082b0808, 0x1919082b),
+ uvec2(0x19080819, 0x1919082b), uvec2(0x19081908, 0x1919082b), uvec2(0x19190808, 0x1919082b), uvec2(0x192b2b19, 0x1919082b),
+ uvec2(0x2b080808, 0x1919082b), uvec2(0x08080819, 0x19191908), uvec2(0x08081908, 0x19191908), uvec2(0x0808192b, 0x19191908),
+ uvec2(0x08082b19, 0x19191908), uvec2(0x08190808, 0x19191908), uvec2(0x0819082b, 0x19191908), uvec2(0x08191919, 0x19191908),
+ uvec2(0x08192b08, 0x19191908), uvec2(0x082b0819, 0x19191908), uvec2(0x082b1908, 0x19191908), uvec2(0x19080808, 0x19191908),
+ uvec2(0x1908082b, 0x19191908), uvec2(0x19081919, 0x19191908), uvec2(0x19082b08, 0x19191908), uvec2(0x19190819, 0x19191908),
+ uvec2(0x19191908, 0x19191908), uvec2(0x192b0808, 0x19191908), uvec2(0x2b080819, 0x19191908), uvec2(0x2b081908, 0x19191908),
+ uvec2(0x2b190808, 0x19191908), uvec2(0x08080808, 0x19191919), uvec2(0x0808082b, 0x19191919), uvec2(0x08081919, 0x19191919),
+ uvec2(0x08082b08, 0x19191919), uvec2(0x08190819, 0x19191919), uvec2(0x08191908, 0x19191919), uvec2(0x082b0808, 0x19191919),
+ uvec2(0x19080819, 0x19191919), uvec2(0x19081908, 0x19191919), uvec2(0x19190808, 0x19191919), uvec2(0x2b080808, 0x19191919),
+ uvec2(0x08080819, 0x1919192b), uvec2(0x08081908, 0x1919192b), uvec2(0x08190808, 0x1919192b), uvec2(0x082b192b, 0x1919192b),
+ uvec2(0x19080808, 0x1919192b), uvec2(0x08080808, 0x19192b08), uvec2(0x0808082b, 0x19192b08), uvec2(0x08081919, 0x19192b08),
+ uvec2(0x08082b08, 0x19192b08), uvec2(0x08190819, 0x19192b08), uvec2(0x08191908, 0x19192b08), uvec2(0x082b0808, 0x19192b08),
+ uvec2(0x19080819, 0x19192b08), uvec2(0x19081908, 0x19192b08), uvec2(0x19190808, 0x19192b08), uvec2(0x19192b2b, 0x19192b08),
+ uvec2(0x2b080808, 0x19192b08), uvec2(0x08080819, 0x19192b19), uvec2(0x08081908, 0x19192b19), uvec2(0x08190808, 0x19192b19),
+ uvec2(0x19080808, 0x19192b19), uvec2(0x08080808, 0x19192b2b), uvec2(0x08192b19, 0x19192b2b), uvec2(0x2b081919, 0x19192b2b),
+ uvec2(0x2b2b2b08, 0x19192b2b), uvec2(0x08080819, 0x192b0808), uvec2(0x08081908, 0x192b0808), uvec2(0x0808192b, 0x192b0808),
+ uvec2(0x08190808, 0x192b0808), uvec2(0x0819082b, 0x192b0808), uvec2(0x08191919, 0x192b0808), uvec2(0x08192b08, 0x192b0808),
+ uvec2(0x082b0819, 0x192b0808), uvec2(0x082b1908, 0x192b0808), uvec2(0x19080808, 0x192b0808), uvec2(0x19081919, 0x192b0808),
+ uvec2(0x19082b08, 0x192b0808), uvec2(0x19190819, 0x192b0808), uvec2(0x19191908, 0x192b0808), uvec2(0x192b0808, 0x192b0808),
+ uvec2(0x2b081908, 0x192b0808), uvec2(0x2b190808, 0x192b0808), uvec2(0x08080808, 0x192b0819), uvec2(0x0808082b, 0x192b0819),
+ uvec2(0x08081919, 0x192b0819), uvec2(0x08082b08, 0x192b0819), uvec2(0x08190819, 0x192b0819), uvec2(0x08191908, 0x192b0819),
+ uvec2(0x082b0808, 0x192b0819), uvec2(0x19080819, 0x192b0819), uvec2(0x19081908, 0x192b0819), uvec2(0x19190808, 0x192b0819),
+ uvec2(0x2b080808, 0x192b0819), uvec2(0x2b192b19, 0x192b0819), uvec2(0x08081908, 0x192b082b), uvec2(0x08190808, 0x192b082b),
+ uvec2(0x19080808, 0x192b082b), uvec2(0x1919192b, 0x192b082b), uvec2(0x2b2b0819, 0x192b082b), uvec2(0x08080808, 0x192b1908),
+ uvec2(0x08081919, 0x192b1908), uvec2(0x08082b08, 0x192b1908), uvec2(0x08190819, 0x192b1908), uvec2(0x08191908, 0x192b1908),
+ uvec2(0x082b0808, 0x192b1908), uvec2(0x19080819, 0x192b1908), uvec2(0x19081908, 0x192b1908), uvec2(0x19190808, 0x192b1908),
+ uvec2(0x2b080808, 0x192b1908), uvec2(0x08080819, 0x192b1919), uvec2(0x08081908, 0x192b1919), uvec2(0x08190808, 0x192b1919),
+ uvec2(0x19080808, 0x192b1919), uvec2(0x19082b2b, 0x192b1919), uvec2(0x192b2b08, 0x192b1919), uvec2(0x2b19082b, 0x192b1919),
+ uvec2(0x08080808, 0x192b192b), uvec2(0x2b191908, 0x192b192b), uvec2(0x08080819, 0x192b2b08), uvec2(0x08081908, 0x192b2b08),
+ uvec2(0x08190808, 0x192b2b08), uvec2(0x192b1919, 0x192b2b08), uvec2(0x2b192b08, 0x192b2b08), uvec2(0x08080808, 0x192b2b19),
+ uvec2(0x082b2b2b, 0x192b2b19), uvec2(0x1908082b, 0x192b2b2b), uvec2(0x2b2b0819, 0x192b2b2b), uvec2(0x08080808, 0x2b080808),
+ uvec2(0x0808082b, 0x2b080808), uvec2(0x08081919, 0x2b080808), uvec2(0x08082b08, 0x2b080808), uvec2(0x08190819, 0x2b080808),
+ uvec2(0x08191908, 0x2b080808), uvec2(0x08192b19, 0x2b080808), uvec2(0x082b0808, 0x2b080808), uvec2(0x082b1919, 0x2b080808),
+ uvec2(0x19080819, 0x2b080808), uvec2(0x19081908, 0x2b080808), uvec2(0x19190808, 0x2b080808), uvec2(0x1919082b, 0x2b080808),
+ uvec2(0x19191919, 0x2b080808), uvec2(0x19192b08, 0x2b080808), uvec2(0x192b0819, 0x2b080808), uvec2(0x2b080808, 0x2b080808),
+ uvec2(0x2b081919, 0x2b080808), uvec2(0x2b190819, 0x2b080808), uvec2(0x2b191908, 0x2b080808), uvec2(0x08080819, 0x2b080819),
+ uvec2(0x08081908, 0x2b080819), uvec2(0x08082b19, 0x2b080819), uvec2(0x08190808, 0x2b080819), uvec2(0x0819082b, 0x2b080819),
+ uvec2(0x08191919, 0x2b080819), uvec2(0x08192b08, 0x2b080819), uvec2(0x082b0819, 0x2b080819), uvec2(0x082b1908, 0x2b080819),
+ uvec2(0x19080808, 0x2b080819), uvec2(0x1908082b, 0x2b080819), uvec2(0x19081919, 0x2b080819), uvec2(0x19082b08, 0x2b080819),
+ uvec2(0x19190819, 0x2b080819), uvec2(0x19191908, 0x2b080819), uvec2(0x2b080819, 0x2b080819), uvec2(0x2b081908, 0x2b080819),
+ uvec2(0x2b190808, 0x2b080819), uvec2(0x2b2b2b19, 0x2b080819), uvec2(0x08080808, 0x2b08082b), uvec2(0x08081919, 0x2b08082b),
+ uvec2(0x08082b2b, 0x2b08082b), uvec2(0x08190819, 0x2b08082b), uvec2(0x08191908, 0x2b08082b), uvec2(0x19080819, 0x2b08082b),
+ uvec2(0x19081908, 0x2b08082b), uvec2(0x19190808, 0x2b08082b), uvec2(0x08080819, 0x2b081908), uvec2(0x08081908, 0x2b081908),
+ uvec2(0x0808192b, 0x2b081908), uvec2(0x08082b19, 0x2b081908), uvec2(0x08190808, 0x2b081908), uvec2(0x0819082b, 0x2b081908),
+ uvec2(0x08191919, 0x2b081908), uvec2(0x08192b08, 0x2b081908), uvec2(0x082b0819, 0x2b081908), uvec2(0x19080808, 0x2b081908),
+ uvec2(0x1908082b, 0x2b081908), uvec2(0x19081919, 0x2b081908), uvec2(0x19082b08, 0x2b081908), uvec2(0x19190819, 0x2b081908),
+ uvec2(0x19191908, 0x2b081908), uvec2(0x192b0808, 0x2b081908), uvec2(0x2b080819, 0x2b081908), uvec2(0x2b081908, 0x2b081908),
+ uvec2(0x2b190808, 0x2b081908), uvec2(0x08080808, 0x2b081919), uvec2(0x0808082b, 0x2b081919), uvec2(0x08081919, 0x2b081919),
+ uvec2(0x08082b08, 0x2b081919), uvec2(0x08190819, 0x2b081919), uvec2(0x08191908, 0x2b081919), uvec2(0x082b0808, 0x2b081919),
+ uvec2(0x19080819, 0x2b081919), uvec2(0x19081908, 0x2b081919), uvec2(0x19190808, 0x2b081919), uvec2(0x2b080808, 0x2b081919),
+ uvec2(0x2b082b2b, 0x2b081919), uvec2(0x08080819, 0x2b08192b), uvec2(0x08081908, 0x2b08192b), uvec2(0x08190808, 0x2b08192b),
+ uvec2(0x082b2b19, 0x2b08192b), uvec2(0x19080808, 0x2b08192b), uvec2(0x08080808, 0x2b082b08), uvec2(0x08081919, 0x2b082b08),
+ uvec2(0x08190819, 0x2b082b08), uvec2(0x08191908, 0x2b082b08), uvec2(0x19080819, 0x2b082b08), uvec2(0x19081908, 0x2b082b08),
+ uvec2(0x19190808, 0x2b082b08), uvec2(0x2b2b082b, 0x2b082b08), uvec2(0x08080819, 0x2b082b19), uvec2(0x08081908, 0x2b082b19),
+ uvec2(0x19080808, 0x2b082b19), uvec2(0x192b1919, 0x2b082b19), uvec2(0x082b082b, 0x2b082b2b), uvec2(0x19192b08, 0x2b082b2b),
+ uvec2(0x19192b2b, 0x2b082b2b), uvec2(0x2b08082b, 0x2b082b2b), uvec2(0x2b2b082b, 0x2b082b2b), uvec2(0x08080819, 0x2b190808),
+ uvec2(0x08081908, 0x2b190808), uvec2(0x08082b19, 0x2b190808), uvec2(0x08190808, 0x2b190808), uvec2(0x0819082b, 0x2b190808),
+ uvec2(0x08191919, 0x2b190808), uvec2(0x08192b08, 0x2b190808), uvec2(0x082b1908, 0x2b190808), uvec2(0x19080808, 0x2b190808),
+ uvec2(0x1908082b, 0x2b190808), uvec2(0x19081919, 0x2b190808), uvec2(0x19082b08, 0x2b190808), uvec2(0x19190819, 0x2b190808),
+ uvec2(0x19191908, 0x2b190808), uvec2(0x192b0808, 0x2b190808), uvec2(0x2b080819, 0x2b190808), uvec2(0x2b081908, 0x2b190808),
+ uvec2(0x2b190808, 0x2b190808), uvec2(0x08080808, 0x2b190819), uvec2(0x08081919, 0x2b190819), uvec2(0x08190819, 0x2b190819),
+ uvec2(0x08191908, 0x2b190819), uvec2(0x19080819, 0x2b190819), uvec2(0x19081908, 0x2b190819), uvec2(0x19190808, 0x2b190819),
+ uvec2(0x19192b2b, 0x2b190819), uvec2(0x08080819, 0x2b19082b), uvec2(0x08081908, 0x2b19082b), uvec2(0x08190808, 0x2b19082b),
+ uvec2(0x19080808, 0x2b19082b), uvec2(0x2b2b192b, 0x2b19082b), uvec2(0x08080808, 0x2b191908), uvec2(0x0808082b, 0x2b191908),
+ uvec2(0x08081919, 0x2b191908), uvec2(0x08082b08, 0x2b191908), uvec2(0x08190819, 0x2b191908), uvec2(0x08191908, 0x2b191908),
+ uvec2(0x082b0808, 0x2b191908), uvec2(0x19080819, 0x2b191908), uvec2(0x19081908, 0x2b191908), uvec2(0x19190808, 0x2b191908),
+ uvec2(0x2b080808, 0x2b191908), uvec2(0x2b19192b, 0x2b191908), uvec2(0x08080819, 0x2b191919), uvec2(0x08081908, 0x2b191919),
+ uvec2(0x08190808, 0x2b191919), uvec2(0x19080808, 0x2b191919), uvec2(0x2b192b08, 0x2b191919), uvec2(0x2b2b0819, 0x2b191919),
+ uvec2(0x08080808, 0x2b19192b), uvec2(0x1908192b, 0x2b19192b), uvec2(0x192b1908, 0x2b19192b), uvec2(0x08080819, 0x2b192b08),
+ uvec2(0x08081908, 0x2b192b08), uvec2(0x08190808, 0x2b192b08), uvec2(0x082b192b, 0x2b192b08), uvec2(0x19080808, 0x2b192b08),
+ uvec2(0x2b2b2b19, 0x2b192b08), uvec2(0x08080808, 0x2b192b19), uvec2(0x19082b19, 0x2b192b19), uvec2(0x1919082b, 0x2b192b19),
+ uvec2(0x2b190808, 0x2b192b2b), uvec2(0x08080808, 0x2b2b0808), uvec2(0x08081919, 0x2b2b0808), uvec2(0x08082b2b, 0x2b2b0808),
+ uvec2(0x08191908, 0x2b2b0808), uvec2(0x082b082b, 0x2b2b0808), uvec2(0x082b2b2b, 0x2b2b0808), uvec2(0x19080819, 0x2b2b0808),
+ uvec2(0x19081908, 0x2b2b0808), uvec2(0x19190808, 0x2b2b0808), uvec2(0x2b2b082b, 0x2b2b0808), uvec2(0x2b2b2b2b, 0x2b2b0808),
+ uvec2(0x19080808, 0x2b2b0819), uvec2(0x192b1919, 0x2b2b0819), uvec2(0x0808082b, 0x2b2b082b), uvec2(0x08082b2b, 0x2b2b082b),
+ uvec2(0x082b082b, 0x2b2b082b), uvec2(0x082b2b08, 0x2b2b082b), uvec2(0x082b2b2b, 0x2b2b082b), uvec2(0x2b08082b, 0x2b2b082b),
+ uvec2(0x2b082b08, 0x2b2b082b), uvec2(0x2b082b2b, 0x2b2b082b), uvec2(0x2b2b2b08, 0x2b2b082b), uvec2(0x08080819, 0x2b2b1908),
+ uvec2(0x08081908, 0x2b2b1908), uvec2(0x08190808, 0x2b2b1908), uvec2(0x19080808, 0x2b2b1908), uvec2(0x2b082b19, 0x2b2b1908),
+ uvec2(0x2b2b1908, 0x2b2b1908), uvec2(0x08080808, 0x2b2b1919), uvec2(0x08192b19, 0x2b2b1919), uvec2(0x19190819, 0x2b2b192b),
+ uvec2(0x08082b2b, 0x2b2b2b08), uvec2(0x082b2b08, 0x2b2b2b08), uvec2(0x2b2b082b, 0x2b2b2b08), uvec2(0x19191908, 0x2b2b2b19),
+ uvec2(0x2b08192b, 0x2b2b2b19), uvec2(0x08082b08, 0x2b2b2b2b), uvec2(0x08082b2b, 0x2b2b2b2b), uvec2(0x082b0808, 0x2b2b2b2b),
+ uvec2(0x082b082b, 0x2b2b2b2b), uvec2(0x082b2b08, 0x2b2b2b2b), uvec2(0x2b082b08, 0x2b2b2b2b), uvec2(0x2b2b2b2b, 0x2b2b2b2b)
+};
+
+shared uvec2 iq2s_grid[1024];
+
+#define NEEDS_INIT_IQ_SHMEM
+void init_iq_shmem(uvec3 wgsize)
+{
+ // copy the table into shared memory and sync
+ [[unroll]] for (uint i = 0; i < iq2s_grid.length(); i += wgsize.x) {
+ if (iq2s_grid.length() % wgsize.x == 0 || i + gl_LocalInvocationIndex.x < iq2s_grid_const.length()) {
+ iq2s_grid[i + gl_LocalInvocationIndex.x] = iq2s_grid_const[i + gl_LocalInvocationIndex.x];
+ }
+ }
+ barrier();
+}
+
+#define QUANT_K QUANT_K_IQ2_S
+#define QUANT_R QUANT_R_IQ2_S
+#define A_TYPE block_iq2_s
+#define A_TYPE_PACKED16 block_iq2_s_packed16
+#endif
+
+#define QUANT_K_IQ3_XXS 256
+#define QUANT_R_IQ3_XXS 1
+
+struct block_iq3_xxs
+{
+ float16_t d;
+ uint8_t qs[QUANT_K_IQ3_XXS/4 + QUANT_K_IQ3_XXS/8];
+};
+
+struct block_iq3_xxs_packed16
+{
+ float16_t d;
+ uint16_t qs[QUANT_K_IQ3_XXS/8 + QUANT_K_IQ3_XXS/16];
+};
+
+#if defined(DATA_A_IQ3_XXS)
+
+const uint32_t iq3xxs_grid_const[256] = {
+ 0x04040404, 0x04040414, 0x04040424, 0x04040c0c, 0x04040c1c, 0x04040c3e, 0x04041404, 0x04041414,
+ 0x04041c0c, 0x04042414, 0x04043e1c, 0x04043e2c, 0x040c040c, 0x040c041c, 0x040c0c04, 0x040c0c14,
+ 0x040c140c, 0x040c142c, 0x040c1c04, 0x040c1c14, 0x040c240c, 0x040c2c24, 0x040c3e04, 0x04140404,
+ 0x04140414, 0x04140424, 0x04140c0c, 0x04141404, 0x04141414, 0x04141c0c, 0x04141c1c, 0x04141c3e,
+ 0x04142c0c, 0x04142c3e, 0x04143e2c, 0x041c040c, 0x041c043e, 0x041c0c04, 0x041c0c14, 0x041c142c,
+ 0x041c3e04, 0x04240c1c, 0x04241c3e, 0x04242424, 0x04242c3e, 0x04243e1c, 0x04243e2c, 0x042c040c,
+ 0x042c043e, 0x042c1c14, 0x042c2c14, 0x04341c2c, 0x04343424, 0x043e0c04, 0x043e0c24, 0x043e0c34,
+ 0x043e241c, 0x043e340c, 0x0c04040c, 0x0c04041c, 0x0c040c04, 0x0c040c14, 0x0c04140c, 0x0c04141c,
+ 0x0c041c04, 0x0c041c14, 0x0c041c24, 0x0c04243e, 0x0c042c04, 0x0c0c0404, 0x0c0c0414, 0x0c0c0c0c,
+ 0x0c0c1404, 0x0c0c1414, 0x0c14040c, 0x0c14041c, 0x0c140c04, 0x0c140c14, 0x0c14140c, 0x0c141c04,
+ 0x0c143e14, 0x0c1c0404, 0x0c1c0414, 0x0c1c1404, 0x0c1c1c0c, 0x0c1c2434, 0x0c1c3434, 0x0c24040c,
+ 0x0c24042c, 0x0c242c04, 0x0c2c1404, 0x0c2c1424, 0x0c2c2434, 0x0c2c3e0c, 0x0c34042c, 0x0c3e1414,
+ 0x0c3e2404, 0x14040404, 0x14040414, 0x14040c0c, 0x14040c1c, 0x14041404, 0x14041414, 0x14041434,
+ 0x14041c0c, 0x14042414, 0x140c040c, 0x140c041c, 0x140c042c, 0x140c0c04, 0x140c0c14, 0x140c140c,
+ 0x140c1c04, 0x140c341c, 0x140c343e, 0x140c3e04, 0x14140404, 0x14140414, 0x14140c0c, 0x14140c3e,
+ 0x14141404, 0x14141414, 0x14141c3e, 0x14142404, 0x14142c2c, 0x141c040c, 0x141c0c04, 0x141c0c24,
+ 0x141c3e04, 0x141c3e24, 0x14241c2c, 0x14242c1c, 0x142c041c, 0x142c143e, 0x142c240c, 0x142c3e24,
+ 0x143e040c, 0x143e041c, 0x143e0c34, 0x143e242c, 0x1c04040c, 0x1c040c04, 0x1c040c14, 0x1c04140c,
+ 0x1c04141c, 0x1c042c04, 0x1c04342c, 0x1c043e14, 0x1c0c0404, 0x1c0c0414, 0x1c0c1404, 0x1c0c1c0c,
+ 0x1c0c2424, 0x1c0c2434, 0x1c14040c, 0x1c14041c, 0x1c140c04, 0x1c14142c, 0x1c142c14, 0x1c143e14,
+ 0x1c1c0c0c, 0x1c1c1c1c, 0x1c241c04, 0x1c24243e, 0x1c243e14, 0x1c2c0404, 0x1c2c0434, 0x1c2c1414,
+ 0x1c2c2c2c, 0x1c340c24, 0x1c341c34, 0x1c34341c, 0x1c3e1c1c, 0x1c3e3404, 0x24040424, 0x24040c3e,
+ 0x24041c2c, 0x24041c3e, 0x24042c1c, 0x24042c3e, 0x240c3e24, 0x24141404, 0x24141c3e, 0x24142404,
+ 0x24143404, 0x24143434, 0x241c043e, 0x241c242c, 0x24240424, 0x24242c0c, 0x24243424, 0x242c142c,
+ 0x242c241c, 0x242c3e04, 0x243e042c, 0x243e0c04, 0x243e0c14, 0x243e1c04, 0x2c040c14, 0x2c04240c,
+ 0x2c043e04, 0x2c0c0404, 0x2c0c0434, 0x2c0c1434, 0x2c0c2c2c, 0x2c140c24, 0x2c141c14, 0x2c143e14,
+ 0x2c1c0414, 0x2c1c2c1c, 0x2c240c04, 0x2c24141c, 0x2c24143e, 0x2c243e14, 0x2c2c0414, 0x2c2c1c0c,
+ 0x2c342c04, 0x2c3e1424, 0x2c3e2414, 0x34041424, 0x34042424, 0x34042434, 0x34043424, 0x340c140c,
+ 0x340c340c, 0x34140c3e, 0x34143424, 0x341c1c04, 0x341c1c34, 0x34242424, 0x342c042c, 0x342c2c14,
+ 0x34341c1c, 0x343e041c, 0x343e140c, 0x3e04041c, 0x3e04042c, 0x3e04043e, 0x3e040c04, 0x3e041c14,
+ 0x3e042c14, 0x3e0c1434, 0x3e0c2404, 0x3e140c14, 0x3e14242c, 0x3e142c14, 0x3e1c0404, 0x3e1c0c2c,
+ 0x3e1c1c1c, 0x3e1c3404, 0x3e24140c, 0x3e24240c, 0x3e2c0404, 0x3e2c0414, 0x3e2c1424, 0x3e341c04,
+};
+
+shared uint32_t iq3xxs_grid[256];
+
+#define NEEDS_INIT_IQ_SHMEM
+void init_iq_shmem(uvec3 wgsize)
+{
+ // copy the table into shared memory and sync
+ [[unroll]] for (uint i = 0; i < iq3xxs_grid.length(); i += wgsize.x) {
+ if (iq3xxs_grid.length() % wgsize.x == 0 || i + gl_LocalInvocationIndex.x < iq3xxs_grid.length()) {
+ iq3xxs_grid[i + gl_LocalInvocationIndex.x] = iq3xxs_grid_const[i + gl_LocalInvocationIndex.x];
+ }
+ }
+ barrier();
+}
+
+#define QUANT_K QUANT_K_IQ3_XXS
+#define QUANT_R QUANT_R_IQ3_XXS
+#define A_TYPE block_iq3_xxs
+#define A_TYPE_PACKED16 block_iq3_xxs_packed16
+#endif
+
+#define QUANT_K_IQ3_S 256
+#define QUANT_R_IQ3_S 1
+
+struct block_iq3_s
+{
+ float16_t d;
+ uint8_t qs[QUANT_K_IQ3_S/4];
+ uint8_t qh[QUANT_K_IQ3_S/32];
+ uint8_t signs[QUANT_K_IQ3_S/8];
+ uint8_t scales[QUANT_K_IQ3_S/64];
+};
+
+struct block_iq3_s_packed16
+{
+ float16_t d;
+ uint16_t qs[QUANT_K_IQ3_S/4/2];
+ uint16_t qh[QUANT_K_IQ3_S/32/2];
+ uint16_t signs[QUANT_K_IQ3_S/8/2];
+ uint16_t scales[QUANT_K_IQ3_S/64/2];
+};
+
+#if defined(DATA_A_IQ3_S)
+
+const uint32_t iq3s_grid_const[512] = {
+ 0x01010101, 0x01010103, 0x01010105, 0x0101010b, 0x0101010f, 0x01010301, 0x01010303, 0x01010305,
+ 0x01010309, 0x0101030d, 0x01010501, 0x01010503, 0x0101050b, 0x01010707, 0x01010901, 0x01010905,
+ 0x0101090b, 0x0101090f, 0x01010b03, 0x01010b07, 0x01010d01, 0x01010d05, 0x01010f03, 0x01010f09,
+ 0x01010f0f, 0x01030101, 0x01030103, 0x01030105, 0x01030109, 0x01030301, 0x01030303, 0x0103030b,
+ 0x01030501, 0x01030507, 0x0103050f, 0x01030703, 0x0103070b, 0x01030909, 0x01030d03, 0x01030d0b,
+ 0x01030f05, 0x01050101, 0x01050103, 0x0105010b, 0x0105010f, 0x01050301, 0x01050307, 0x0105030d,
+ 0x01050503, 0x0105050b, 0x01050701, 0x01050709, 0x01050905, 0x0105090b, 0x0105090f, 0x01050b03,
+ 0x01050b07, 0x01050f01, 0x01050f07, 0x01070107, 0x01070303, 0x0107030b, 0x01070501, 0x01070505,
+ 0x01070703, 0x01070707, 0x0107070d, 0x01070909, 0x01070b01, 0x01070b05, 0x01070d0f, 0x01070f03,
+ 0x01070f0b, 0x01090101, 0x01090307, 0x0109030f, 0x01090503, 0x01090509, 0x01090705, 0x01090901,
+ 0x01090907, 0x01090b03, 0x01090f01, 0x010b0105, 0x010b0109, 0x010b0501, 0x010b0505, 0x010b050d,
+ 0x010b0707, 0x010b0903, 0x010b090b, 0x010b090f, 0x010b0d0d, 0x010b0f07, 0x010d010d, 0x010d0303,
+ 0x010d0307, 0x010d0703, 0x010d0b05, 0x010d0f03, 0x010f0101, 0x010f0105, 0x010f0109, 0x010f0501,
+ 0x010f0505, 0x010f050d, 0x010f0707, 0x010f0b01, 0x010f0b09, 0x03010101, 0x03010103, 0x03010105,
+ 0x03010109, 0x03010301, 0x03010303, 0x03010307, 0x0301030b, 0x0301030f, 0x03010501, 0x03010505,
+ 0x03010703, 0x03010709, 0x0301070d, 0x03010b09, 0x03010b0d, 0x03010d03, 0x03010f05, 0x03030101,
+ 0x03030103, 0x03030107, 0x0303010d, 0x03030301, 0x03030309, 0x03030503, 0x03030701, 0x03030707,
+ 0x03030903, 0x03030b01, 0x03030b05, 0x03030f01, 0x03030f0d, 0x03050101, 0x03050305, 0x0305030b,
+ 0x0305030f, 0x03050501, 0x03050509, 0x03050705, 0x03050901, 0x03050907, 0x03050b0b, 0x03050d01,
+ 0x03050f05, 0x03070103, 0x03070109, 0x0307010f, 0x03070301, 0x03070307, 0x03070503, 0x0307050f,
+ 0x03070701, 0x03070709, 0x03070903, 0x03070d05, 0x03070f01, 0x03090107, 0x0309010b, 0x03090305,
+ 0x03090309, 0x03090703, 0x03090707, 0x03090905, 0x0309090d, 0x03090b01, 0x03090b09, 0x030b0103,
+ 0x030b0301, 0x030b0307, 0x030b0503, 0x030b0701, 0x030b0705, 0x030b0b03, 0x030d0501, 0x030d0509,
+ 0x030d050f, 0x030d0909, 0x030d090d, 0x030f0103, 0x030f0107, 0x030f0301, 0x030f0305, 0x030f0503,
+ 0x030f070b, 0x030f0903, 0x030f0d05, 0x030f0f01, 0x05010101, 0x05010103, 0x05010107, 0x0501010b,
+ 0x0501010f, 0x05010301, 0x05010305, 0x05010309, 0x0501030d, 0x05010503, 0x05010507, 0x0501050f,
+ 0x05010701, 0x05010705, 0x05010903, 0x05010907, 0x0501090b, 0x05010b01, 0x05010b05, 0x05010d0f,
+ 0x05010f01, 0x05010f07, 0x05010f0b, 0x05030101, 0x05030105, 0x05030301, 0x05030307, 0x0503030f,
+ 0x05030505, 0x0503050b, 0x05030703, 0x05030709, 0x05030905, 0x05030b03, 0x05050103, 0x05050109,
+ 0x0505010f, 0x05050503, 0x05050507, 0x05050701, 0x0505070f, 0x05050903, 0x05050b07, 0x05050b0f,
+ 0x05050f03, 0x05050f09, 0x05070101, 0x05070105, 0x0507010b, 0x05070303, 0x05070505, 0x05070509,
+ 0x05070703, 0x05070707, 0x05070905, 0x05070b01, 0x05070d0d, 0x05090103, 0x0509010f, 0x05090501,
+ 0x05090507, 0x05090705, 0x0509070b, 0x05090903, 0x05090f05, 0x05090f0b, 0x050b0109, 0x050b0303,
+ 0x050b0505, 0x050b070f, 0x050b0901, 0x050b0b07, 0x050b0f01, 0x050d0101, 0x050d0105, 0x050d010f,
+ 0x050d0503, 0x050d0b0b, 0x050d0d03, 0x050f010b, 0x050f0303, 0x050f050d, 0x050f0701, 0x050f0907,
+ 0x050f0b01, 0x07010105, 0x07010303, 0x07010307, 0x0701030b, 0x0701030f, 0x07010505, 0x07010703,
+ 0x07010707, 0x0701070b, 0x07010905, 0x07010909, 0x0701090f, 0x07010b03, 0x07010d07, 0x07010f03,
+ 0x07030103, 0x07030107, 0x0703010b, 0x07030309, 0x07030503, 0x07030507, 0x07030901, 0x07030d01,
+ 0x07030f05, 0x07030f0d, 0x07050101, 0x07050305, 0x07050501, 0x07050705, 0x07050709, 0x07050b01,
+ 0x07070103, 0x07070301, 0x07070309, 0x07070503, 0x07070507, 0x0707050f, 0x07070701, 0x07070903,
+ 0x07070907, 0x0707090f, 0x07070b0b, 0x07070f07, 0x07090107, 0x07090303, 0x0709030d, 0x07090505,
+ 0x07090703, 0x07090b05, 0x07090d01, 0x07090d09, 0x070b0103, 0x070b0301, 0x070b0305, 0x070b050b,
+ 0x070b0705, 0x070b0909, 0x070b0b0d, 0x070b0f07, 0x070d030d, 0x070d0903, 0x070f0103, 0x070f0107,
+ 0x070f0501, 0x070f0505, 0x070f070b, 0x09010101, 0x09010109, 0x09010305, 0x09010501, 0x09010509,
+ 0x0901050f, 0x09010705, 0x09010903, 0x09010b01, 0x09010f01, 0x09030105, 0x0903010f, 0x09030303,
+ 0x09030307, 0x09030505, 0x09030701, 0x0903070b, 0x09030907, 0x09030b03, 0x09030b0b, 0x09050103,
+ 0x09050107, 0x09050301, 0x0905030b, 0x09050503, 0x09050707, 0x09050901, 0x09050b0f, 0x09050d05,
+ 0x09050f01, 0x09070109, 0x09070303, 0x09070307, 0x09070501, 0x09070505, 0x09070703, 0x0907070b,
+ 0x09090101, 0x09090105, 0x09090509, 0x0909070f, 0x09090901, 0x09090f03, 0x090b010b, 0x090b010f,
+ 0x090b0503, 0x090b0d05, 0x090d0307, 0x090d0709, 0x090d0d01, 0x090f0301, 0x090f030b, 0x090f0701,
+ 0x090f0907, 0x090f0b03, 0x0b010105, 0x0b010301, 0x0b010309, 0x0b010505, 0x0b010901, 0x0b010909,
+ 0x0b01090f, 0x0b010b05, 0x0b010d0d, 0x0b010f09, 0x0b030103, 0x0b030107, 0x0b03010b, 0x0b030305,
+ 0x0b030503, 0x0b030705, 0x0b030f05, 0x0b050101, 0x0b050303, 0x0b050507, 0x0b050701, 0x0b05070d,
+ 0x0b050b07, 0x0b070105, 0x0b07010f, 0x0b070301, 0x0b07050f, 0x0b070909, 0x0b070b03, 0x0b070d0b,
+ 0x0b070f07, 0x0b090103, 0x0b090109, 0x0b090501, 0x0b090705, 0x0b09090d, 0x0b0b0305, 0x0b0b050d,
+ 0x0b0b0b03, 0x0b0b0b07, 0x0b0d0905, 0x0b0f0105, 0x0b0f0109, 0x0b0f0505, 0x0d010303, 0x0d010307,
+ 0x0d01030b, 0x0d010703, 0x0d010707, 0x0d010d01, 0x0d030101, 0x0d030501, 0x0d03050f, 0x0d030d09,
+ 0x0d050305, 0x0d050709, 0x0d050905, 0x0d050b0b, 0x0d050d05, 0x0d050f01, 0x0d070101, 0x0d070309,
+ 0x0d070503, 0x0d070901, 0x0d09050b, 0x0d090907, 0x0d090d05, 0x0d0b0101, 0x0d0b0107, 0x0d0b0709,
+ 0x0d0b0d01, 0x0d0d010b, 0x0d0d0901, 0x0d0f0303, 0x0d0f0307, 0x0f010101, 0x0f010109, 0x0f01010f,
+ 0x0f010501, 0x0f010505, 0x0f01070d, 0x0f010901, 0x0f010b09, 0x0f010d05, 0x0f030105, 0x0f030303,
+ 0x0f030509, 0x0f030907, 0x0f03090b, 0x0f050103, 0x0f050109, 0x0f050301, 0x0f05030d, 0x0f050503,
+ 0x0f050701, 0x0f050b03, 0x0f070105, 0x0f070705, 0x0f07070b, 0x0f070b07, 0x0f090103, 0x0f09010b,
+ 0x0f090307, 0x0f090501, 0x0f090b01, 0x0f0b0505, 0x0f0b0905, 0x0f0d0105, 0x0f0d0703, 0x0f0f0101,
+};
+
+shared uint32_t iq3s_grid[512];
+
+#define NEEDS_INIT_IQ_SHMEM
+void init_iq_shmem(uvec3 wgsize)
+{
+ // copy the table into shared memory and sync
+ [[unroll]] for (uint i = 0; i < iq3s_grid.length(); i += wgsize.x) {
+ if (iq3s_grid.length() % wgsize.x == 0 || i + gl_LocalInvocationIndex.x < iq3s_grid.length()) {
+ iq3s_grid[i + gl_LocalInvocationIndex.x] = iq3s_grid_const[i + gl_LocalInvocationIndex.x];
+ }
+ }
+ barrier();
+}
+
+#define QUANT_K QUANT_K_IQ3_S
+#define QUANT_R QUANT_R_IQ3_S
+#define A_TYPE block_iq3_s
+#define A_TYPE_PACKED16 block_iq3_s_packed16
+#endif
+
+#define QUANT_K_IQ4_XS 256
+#define QUANT_R_IQ4_XS 1
+
+struct block_iq4_xs
+{
+ float16_t d;
+ uint16_t scales_h;
+ uint8_t scales_l[QUANT_K_IQ4_XS/64];
+ uint8_t qs[QUANT_K_IQ4_XS/2];
+};
+
+#if defined(DATA_A_IQ4_XS)
+#define QUANT_K QUANT_K_IQ4_XS
+#define QUANT_R QUANT_R_IQ4_XS
+#define A_TYPE block_iq4_xs
+#endif
+
+#define QUANT_K_IQ4_NL 32
+#define QUANT_R_IQ4_NL 2
+
+struct block_iq4_nl
+{
+ float16_t d;
+ uint8_t qs[QUANT_K_IQ4_NL/2];
+};
+
+struct block_iq4_nl_packed16
+{
+ float16_t d;
+ uint16_t qs[QUANT_K_IQ4_NL/2/2];
+};
+
+#if defined(DATA_A_IQ4_NL)
+#define QUANT_K QUANT_K_IQ4_NL
+#define QUANT_R QUANT_R_IQ4_NL
+#define A_TYPE block_iq4_nl
+#define A_TYPE_PACKED16 block_iq4_nl_packed16
+#endif
+
+#define QUANT_K_MXFP4 32
+#define QUANT_R_MXFP4 2
+
+struct block_mxfp4
+{
+ uint8_t e;
+ uint8_t qs[QUANT_K_MXFP4/2];
+};
+
+//struct block_mxfp4_packed16
+//{
+// uint8_t e;
+// uint16_t qs[QUANT_K_MXFP4/2/2];
+//};
+
+#if defined(DATA_A_MXFP4)
+#define QUANT_K QUANT_K_MXFP4
+#define QUANT_R QUANT_R_MXFP4
+#define QUANT_AUXF 1
+#define A_TYPE block_mxfp4
+//#define A_TYPE_PACKED16 block_mxfp4_packed16
+#endif
+
+#if defined(DATA_A_IQ4_NL) || defined(DATA_A_IQ4_XS)
+const int8_t kvalues_iq4nl_const[16] = {
+ int8_t(-127), int8_t(-104), int8_t(-83), int8_t(-65), int8_t(-49), int8_t(-35), int8_t(-22), int8_t(-10),
+ int8_t(1), int8_t(13), int8_t(25), int8_t(38), int8_t(53), int8_t(69), int8_t(89), int8_t(113)
+};
+
+shared FLOAT_TYPE kvalues_iq4nl[16];
+
+#define NEEDS_INIT_IQ_SHMEM
+void init_iq_shmem(uvec3 wgsize)
+{
+ // copy the table into shared memory and sync
+ for (uint i = gl_LocalInvocationIndex.x; i < kvalues_iq4nl.length(); i += wgsize.x) {
+ kvalues_iq4nl[i] = FLOAT_TYPE(kvalues_iq4nl_const[i]);
+ }
+ barrier();
+}
+#endif
+
+#if defined(DATA_A_MXFP4)
+const FLOAT_TYPE kvalues_mxfp4_const[16] = {
+ FLOAT_TYPE(0.0f), FLOAT_TYPE(0.5f), FLOAT_TYPE(1.0f), FLOAT_TYPE(1.5f), FLOAT_TYPE(2.0f), FLOAT_TYPE(3.0f), FLOAT_TYPE(4.0f), FLOAT_TYPE(6.0f),
+ FLOAT_TYPE(-0.0f), FLOAT_TYPE(-0.5f), FLOAT_TYPE(-1.0f), FLOAT_TYPE(-1.5f), FLOAT_TYPE(-2.0f), FLOAT_TYPE(-3.0f), FLOAT_TYPE(-4.0f), FLOAT_TYPE(-6.0f)
+};
+
+shared FLOAT_TYPE kvalues_mxfp4[16];
+
+#define NEEDS_INIT_IQ_SHMEM
+void init_iq_shmem(uvec3 wgsize)
+{
+ // copy the table into shared memory and sync
+ for (uint i = gl_LocalInvocationIndex.x; i < kvalues_mxfp4.length(); i += wgsize.x) {
+ kvalues_mxfp4[i] = kvalues_mxfp4_const[i];
+ }
+ barrier();
+}
+#endif
+
+// returns the bfloat value in the low 16b.
+// See ggml_compute_fp32_to_bf16
+uint32_t fp32_to_bf16(float f)
+{
+ uint32_t u = floatBitsToUint(f);
+ u = (u + (0x7fff + ((u >> 16) & 1))) >> 16;
+ return u;
+}
+
+float bf16_to_fp32(uint32_t u)
+{
+ return uintBitsToFloat(u << 16);
+}
+
+vec4 bf16_to_fp32(uvec4 u)
+{
+ return vec4(bf16_to_fp32(u.x), bf16_to_fp32(u.y), bf16_to_fp32(u.z), bf16_to_fp32(u.w));
+}
+
+float e8m0_to_fp32(uint8_t x) {
+ uint32_t bits;
+
+ if (x == 0) {
+ bits = 0x00400000;
+ } else {
+ bits = x;
+ bits = bits << 23;
+ }
+
+ return uintBitsToFloat(bits);
+}
+
+#if BDA
+
+#extension GL_EXT_buffer_reference : enable
+#extension GL_EXT_shader_explicit_arithmetic_types_int64 : enable
+
+#define BDA_STORAGE_T uint64_t
+#define BDA_OFFSET_T uint64_t
+
+#else
+
+#define BDA_STORAGE_T uvec2
+#define BDA_OFFSET_T uint
+
+#endif
+
+#endif // !defined(GGML_TYPES_COMP)
float sf0; float sf1; float sf2; float sf3;
} p;
-#include "types.comp"
+#include "types.glsl"
layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
+++ /dev/null
-#ifndef UTILS_COMP
-#define UTILS_COMP
-
-// mod and div are expensive and coordinates/dimensions are often power of 2 or equal to 1
-uint fastmod(uint a, uint b) {
- if ((b & (b-1)) == 0) {
- return a & (b-1);
- }
- return a % b;
-}
-
-uint fastdiv(uint a, uint b) {
- return (a < b) ? 0 : (a / b);
-}
-
-void get_indices(uint idx, out uint i00, out uint i01, out uint i02, out uint i03, uint ne00, uint ne01, uint ne02, uint ne03) {
- i03 = fastdiv(idx, (ne02*ne01*ne00));
- const uint i03_offset = i03 * ne02*ne01*ne00;
- i02 = fastdiv((idx - i03_offset), (ne01*ne00));
- const uint i02_offset = i02*ne01*ne00;
- i01 = (idx - i03_offset - i02_offset) / ne00;
- i00 = idx - i03_offset - i02_offset - i01*ne00;
-}
-
-#endif // UTILS_COMP
--- /dev/null
+#ifndef UTILS_COMP
+#define UTILS_COMP
+
+// mod and div are expensive and coordinates/dimensions are often power of 2 or equal to 1
+uint fastmod(uint a, uint b) {
+ if ((b & (b-1)) == 0) {
+ return a & (b-1);
+ }
+ return a % b;
+}
+
+uint fastdiv(uint a, uint b) {
+ return (a < b) ? 0 : (a / b);
+}
+
+void get_indices(uint idx, out uint i00, out uint i01, out uint i02, out uint i03, uint ne00, uint ne01, uint ne02, uint ne03) {
+ i03 = fastdiv(idx, (ne02*ne01*ne00));
+ const uint i03_offset = i03 * ne02*ne01*ne00;
+ i02 = fastdiv((idx - i03_offset), (ne01*ne00));
+ const uint i02_offset = i02*ne01*ne00;
+ i01 = (idx - i03_offset - i02_offset) / ne00;
+ i00 = idx - i03_offset - i02_offset - i01*ne00;
+}
+
+#endif // UTILS_COMP
std::mutex lock;
std::vector<std::pair<std::string, std::string>> shader_fnames;
+std::locale c_locale("C");
std::string GLSLC = "glslc";
-std::string input_dir = "vulkan-shaders";
+std::string input_filepath = "";
std::string output_dir = "/tmp";
-std::string target_hpp = "ggml-vulkan-shaders.hpp";
-std::string target_cpp = "ggml-vulkan-shaders.cpp";
-bool no_clean = false;
+std::string target_hpp = "";
+std::string target_cpp = "";
const std::vector<std::string> type_names = {
"f32",
};
namespace {
+
void execute_command(const std::string& command, std::string& stdout_str, std::string& stderr_str) {
#ifdef _WIN32
HANDLE stdout_read, stdout_write;
return path.substr(path.find_last_of("/\\") + 1);
}
+std::stringstream make_generic_stringstream() {
+ std::stringstream ss;
+ ss.imbue(c_locale);
+ return ss;
+}
+
+std::string read_binary_file(const std::string& path, bool may_not_exist = false) {
+ FILE* f = fopen(path.c_str(), "rb");
+ if (!f) {
+ if (!may_not_exist) {
+ std::cerr << "Error opening file: " << path << " (" << strerror(errno) << ")\n";
+ }
+ return {};
+ }
+
+ fseek(f, 0, SEEK_END);
+ size_t size = ftell(f);
+ fseek(f, 0, SEEK_SET);
+
+ std::string data(size, '\0');
+ size_t read_size = fread(data.data(), 1, size, f);
+ fclose(f);
+ if (read_size != size) {
+ std::cerr << "Error reading file: " << path << " (" << strerror(errno) << ")\n";
+ return {};
+ }
+
+ return data;
+}
+
+void write_binary_file(const std::string& path, const std::string& content) {
+ FILE* f = fopen(path.c_str(), "wb");
+ if (!f) {
+ std::cerr << "Error opening file for writing: " << path << " (" << strerror(errno) << ")\n";
+ return;
+ }
+
+ size_t write_size = fwrite(content.data(), 1, content.size(), f);
+ fclose(f);
+ if (write_size != content.size()) {
+ std::cerr << "Error writing file: " << path << " (" << strerror(errno) << ")\n";
+ return;
+ }
+}
+
+void write_file_if_changed(const std::string& path, const std::string& content) {
+ std::string existing = read_binary_file(path, true);
+ if (existing != content) {
+ write_binary_file(path, content);
+ }
+}
+
+
// variables to track number of compiles in progress
static uint32_t compile_count = 0;
static std::mutex compile_count_mutex;
static std::condition_variable compile_count_cond;
+static bool generate_dep_file = true;
+
+void decrement_compile_count(uint32_t * count) {
+ if (count) {
+ std::lock_guard<std::mutex> guard(compile_count_mutex);
+ assert(compile_count > 0);
+ compile_count--;
+ compile_count_cond.notify_all();
+ }
+}
-void string_to_spv_func(const std::string& _name, const std::string& in_fname, const std::map<std::string, std::string>& defines, bool fp16 = true, bool coopmat = false, bool coopmat2 = false, bool f16acc = false) {
- std::string name = _name + (f16acc ? "_f16acc" : "") + (coopmat ? "_cm1" : "") + (coopmat2 ? "_cm2" : (fp16 ? "" : "_fp32"));
- std::string out_fname = join_paths(output_dir, name + ".spv");
- std::string in_path = join_paths(input_dir, in_fname);
+using compile_count_guard = std::unique_ptr<uint32_t, decltype(&decrement_compile_count)>;
+
+compile_count_guard acquire_compile_slot() {
+ // wait until fewer than N compiles are in progress.
+ // 16 is an arbitrary limit, the goal is to avoid "failed to create pipe" errors.
+ uint32_t N = 16;
+ std::unique_lock<std::mutex> guard(compile_count_mutex);
+ compile_count_cond.wait(guard, [N] { return compile_count < N; });
+ compile_count++;
+ return compile_count_guard(&compile_count, &decrement_compile_count);
+}
+void string_to_spv_func(std::string name, std::string in_path, std::string out_path, std::map<std::string, std::string> defines, bool coopmat, bool dep_file, compile_count_guard slot) {
std::string target_env = (name.find("_cm2") != std::string::npos) ? "--target-env=vulkan1.3" : "--target-env=vulkan1.2";
// disable spirv-opt for coopmat shaders for https://github.com/ggerganov/llama.cpp/issues/10734
std::string opt_level = (coopmat || name.find("bf16") != std::string::npos) ? "" : "-O";
#ifdef _WIN32
- std::vector<std::string> cmd = {GLSLC, "-fshader-stage=compute", target_env, opt_level, "\"" + in_path + "\"", "-o", "\"" + out_fname + "\""};
+ std::vector<std::string> cmd = {GLSLC, "-fshader-stage=compute", target_env, opt_level, "\"" + in_path + "\"", "-o", "\"" + out_path + "\""};
#else
- std::vector<std::string> cmd = {GLSLC, "-fshader-stage=compute", target_env, opt_level, in_path, "-o", out_fname};
+ std::vector<std::string> cmd = {GLSLC, "-fshader-stage=compute", target_env, opt_level, in_path, "-o", out_path};
#endif
+ if (dep_file) {
+ cmd.push_back("-MD");
+ cmd.push_back("-MF");
+ cmd.push_back("\"" + target_cpp + ".d\"");
+ }
+
#ifdef GGML_VULKAN_SHADER_DEBUG_INFO
cmd.push_back("-g");
#endif
return;
}
+ if (dep_file) {
+ // replace .spv output path with the embed .cpp path which is used as output in CMakeLists.txt
+ std::string dep = read_binary_file(target_cpp + ".d", true);
+ if (!dep.empty()) {
+ size_t pos = dep.find(out_path);
+ if (pos != std::string::npos) {
+ dep.replace(pos, out_path.length(), target_cpp);
+ }
+ write_binary_file(target_cpp + ".d", dep);
+ }
+ }
+
std::lock_guard<std::mutex> guard(lock);
- shader_fnames.push_back(std::make_pair(name, out_fname));
+ shader_fnames.push_back(std::make_pair(name, out_path));
} catch (const std::exception& e) {
std::cerr << "Error executing command for " << name << ": " << e.what() << std::endl;
}
- {
- std::lock_guard<std::mutex> guard(compile_count_mutex);
- assert(compile_count > 0);
- compile_count--;
- }
- compile_count_cond.notify_all();
}
std::map<std::string, std::string> merge_maps(const std::map<std::string, std::string>& a, const std::map<std::string, std::string>& b) {
}
static std::vector<std::future<void>> compiles;
-void string_to_spv(const std::string& _name, const std::string& in_fname, const std::map<std::string, std::string>& defines, bool fp16 = true, bool coopmat = false, bool coopmat2 = false, bool f16acc = false) {
- {
- // wait until fewer than N compiles are in progress.
- // 16 is an arbitrary limit, the goal is to avoid "failed to create pipe" errors.
- uint32_t N = 16;
- std::unique_lock<std::mutex> guard(compile_count_mutex);
- while (compile_count >= N) {
- compile_count_cond.wait(guard);
- }
- compile_count++;
+void string_to_spv(std::string name, const std::string& source, const std::map<std::string, std::string>& defines, bool fp16 = true, bool coopmat = false, bool coopmat2 = false, bool f16acc = false) {
+ name = name + (f16acc ? "_f16acc" : "") + (coopmat ? "_cm1" : "") + (coopmat2 ? "_cm2" : (fp16 ? "" : "_fp32"));
+ std::string out_path = join_paths(output_dir, name + ".spv");
+
+ if (input_filepath == "") {
+ // No input source to compile, only generate header for all shaders
+ shader_fnames.push_back(std::pair(name, out_path));
+ return;
+ } else if (basename(input_filepath) != source) {
+ // Only compile shader variants matching the input filename
+ return;
}
- compiles.push_back(std::async(string_to_spv_func, _name, in_fname, defines, fp16, coopmat, coopmat2, f16acc));
+
+ compile_count_guard slot = acquire_compile_slot();
+ compiles.push_back(std::async(
+ string_to_spv_func, name, input_filepath, out_path, defines, coopmat, generate_dep_file, std::move(slot)));
+ // Don't write the same dep file from multiple processes
+ generate_dep_file = false;
}
void matmul_shaders(bool fp16, MatMulIdType matmul_id_type, bool coopmat, bool coopmat2, bool f16acc) {
}
void process_shaders() {
- std::cout << "ggml_vulkan: Generating and compiling shaders to SPIR-V" << std::endl;
std::map<std::string, std::string> base_dict = {{"FLOAT_TYPE", "float"}};
// matmul
}
void write_output_files() {
- FILE* hdr = fopen(target_hpp.c_str(), "w");
- FILE* src = fopen(target_cpp.c_str(), "w");
+ std::stringstream hdr = make_generic_stringstream();
+ std::stringstream src = make_generic_stringstream();
- fprintf(hdr, "#include <cstdint>\n\n");
- fprintf(src, "#include \"%s\"\n\n", basename(target_hpp).c_str());
+ hdr << "#include <cstdint>\n\n";
+ src << "#include \"" << basename(target_hpp) << "\"\n\n";
std::sort(shader_fnames.begin(), shader_fnames.end());
for (const auto& pair : shader_fnames) {
const std::string& path = pair.second;
#endif
- FILE* spv = fopen(path.c_str(), "rb");
- if (!spv) {
- std::cerr << "Error opening SPIR-V file: " << path << " (" << strerror(errno) << ")\n";
- continue;
- }
-
- fseek(spv, 0, SEEK_END);
- size_t size = ftell(spv);
- fseek(spv, 0, SEEK_SET);
-
- std::vector<unsigned char> data(size);
- size_t read_size = fread(data.data(), 1, size, spv);
- fclose(spv);
- if (read_size != size) {
- std::cerr << "Error reading SPIR-V file: " << path << " (" << strerror(errno) << ")\n";
- continue;
- }
-
- fprintf(hdr, "extern unsigned char %s_data[%zu];\n", name.c_str(), size);
- fprintf(hdr, "const uint64_t %s_len = %zu;\n\n", name.c_str(), size);
+ hdr << "extern const uint64_t " << name << "_len;\n";
+ hdr << "extern const unsigned char " << name << "_data[];\n\n";
- fprintf(src, "unsigned char %s_data[%zu] = {\n", name.c_str(), size);
- for (size_t i = 0; i < size; ++i) {
- fprintf(src, "0x%02x,", data[i]);
- if ((i + 1) % 12 == 0) fprintf(src, "\n");
- }
- fprintf(src, "\n};\n\n");
+ if (input_filepath != "") {
+ std::string data = read_binary_file(path);
+ if (data.empty()) {
+ continue;
+ }
- if (!no_clean) {
- std::remove(path.c_str());
+ src << "const uint64_t " << name << "_len = " << data.size() << ";\n";
+ src << "const unsigned char " << name << "_data[" << data.size() << "] = {\n" << std::hex;
+ auto bytes = reinterpret_cast<const uint8_t*>(data.data());
+ for (size_t i = 0; i < data.size(); ++i) {
+ src << "0x" << static_cast<int>(bytes[i]) << ",";
+ if ((i + 1) % 12 == 0) src << "\n";
+ }
+ src << std::dec << "\n};\n\n";
}
}
std::string suffixes[2] = {"_f32", "_f16"};
- for (const char *op : {"add", "sub", "mul", "div", "add_rms"}) {
- fprintf(hdr, "extern unsigned char *%s_data[2][2][2][2];\n", op);
- fprintf(hdr, "extern uint64_t %s_len[2][2][2][2];\n", op);
- std::string data = "unsigned char *" + std::string(op) + "_data[2][2][2][2] = ";
- std::string len = "uint64_t " + std::string(op) + "_len[2][2][2][2] = ";
+ for (auto op : {"add", "sub", "mul", "div", "add_rms"}) {
+ hdr << "extern const void * " << op << "_data[2][2][2][2];\n";
+ hdr << "extern const uint64_t " << op << "_len[2][2][2][2];\n";
+
+ std::string op_file = op == "add_rms" ? "add.comp" : std::string(op) + ".comp";
+ if (basename(input_filepath) != op_file) {
+ continue;
+ }
+ std::stringstream data = make_generic_stringstream();
+ std::stringstream len = make_generic_stringstream();
+ data << "const void * " << op << "_data[2][2][2][2] = ";
+ len << "const uint64_t " << op << "_len[2][2][2][2] = ";
for (uint32_t t0 = 0; t0 < 2; ++t0) {
if (t0 == 0) {
- data += "{";
- len += "{";
+ data << "{";
+ len << "{";
}
for (uint32_t t1 = 0; t1 < 2; ++t1) {
if (t1 == 0) {
- data += "{";
- len += "{";
+ data << "{";
+ len << "{";
}
for (uint32_t t2 = 0; t2 < 2; ++t2) {
if (t2 == 0) {
- data += "{";
- len += "{";
+ data << "{";
+ len << "{";
}
for (uint32_t rte = 0; rte < 2; ++rte) {
if (rte == 0) {
- data += "{";
- len += "{";
+ data << "{";
+ len << "{";
}
- data += op + suffixes[t0] + suffixes[t1] + suffixes[t2] + ((rte != 0) ? "_rte" : "");
- len += op + suffixes[t0] + suffixes[t1] + suffixes[t2] + ((rte != 0) ? "_rte" : "");
- data += "_data,";
- len += "_len,";
+ data << op << suffixes[t0] << suffixes[t1] << suffixes[t2] << ((rte != 0) ? "_rte" : "");
+ len << op << suffixes[t0] << suffixes[t1] << suffixes[t2] << ((rte != 0) ? "_rte" : "");
+ data << "_data,";
+ len << "_len,";
if (rte == 1) {
- data += "}, ";
- len += "}, ";
+ data << "}, ";
+ len << "}, ";
}
}
if (t2 == 1) {
- data += "}, ";
- len += "}, ";
+ data << "}, ";
+ len << "}, ";
}
}
if (t1 == 1) {
- data += "}, ";
- len += "}, ";
+ data << "}, ";
+ len << "}, ";
}
}
if (t0 == 1) {
- data += "};\n";
- len += "};\n";
+ data << "};\n";
+ len << "};\n";
}
}
- fputs(data.c_str(), src);
- fputs(len.c_str(), src);
+ src << data.str();
+ src << len.str();
}
std::vector<std::string> btypes = {"f16", "f32"};
if (btype == "q8_1" && !is_legacy_quant(tname)) {
continue;
}
- fprintf(hdr, "extern unsigned char *arr_dmmv_%s_%s_f32_data[3];\n", tname.c_str(), btype.c_str());
- fprintf(hdr, "extern uint64_t arr_dmmv_%s_%s_f32_len[3];\n", tname.c_str(), btype.c_str());
- std::string data = "unsigned char *arr_dmmv_" + tname + "_" + btype + "_f32_data[3] = {mul_mat_vec_" + tname + "_" + btype + "_f32_data, mul_mat_vec_" + tname + "_" + btype + "_f32_subgroup_data, mul_mat_vec_" + tname + "_" + btype + "_f32_subgroup_no_shmem_data};\n";
- std::string len = "uint64_t arr_dmmv_" + tname + "_" + btype + "_f32_len[3] = {mul_mat_vec_" + tname + "_" + btype + "_f32_len, mul_mat_vec_" + tname + "_" + btype + "_f32_subgroup_len, mul_mat_vec_" + tname + "_" + btype + "_f32_subgroup_no_shmem_len};\n";
- fputs(data.c_str(), src);
- fputs(len.c_str(), src);
+ hdr << "extern const void * arr_dmmv_" << tname << "_" << btype << "_f32_data[3];\n";
+ hdr << "extern const uint64_t arr_dmmv_" << tname << "_" << btype << "_f32_len[3];\n";
+ if (basename(input_filepath) == "mul_mat_vec.comp") {
+ src << "const void * arr_dmmv_" << tname << "_" << btype << "_f32_data[3] = {mul_mat_vec_" << tname << "_" << btype << "_f32_data, mul_mat_vec_" << tname << "_" << btype << "_f32_subgroup_data, mul_mat_vec_" << tname << "_" << btype << "_f32_subgroup_no_shmem_data};\n";
+ src << "const uint64_t arr_dmmv_" << tname << "_" << btype << "_f32_len[3] = {mul_mat_vec_" << tname << "_" << btype << "_f32_len, mul_mat_vec_" << tname << "_" << btype << "_f32_subgroup_len, mul_mat_vec_" << tname << "_" << btype << "_f32_subgroup_no_shmem_len};\n";
+ }
}
}
- fclose(hdr);
- fclose(src);
-}
+ if (input_filepath == "") {
+ write_file_if_changed(target_hpp, hdr.str());
+ }
+ if (target_cpp != "") {
+ write_binary_file(target_cpp, src.str());
+ }
}
+} // namespace
+
int main(int argc, char** argv) {
std::map<std::string, std::string> args;
for (int i = 1; i < argc; ++i) {
if (args.find("--glslc") != args.end()) {
GLSLC = args["--glslc"]; // Path to glslc
}
- if (args.find("--input-dir") != args.end()) {
- input_dir = args["--input-dir"]; // Directory containing shader sources
+ if (args.find("--source") != args.end()) {
+ input_filepath = args["--source"]; // The shader source file to compile
}
if (args.find("--output-dir") != args.end()) {
output_dir = args["--output-dir"]; // Directory for containing SPIR-V output
if (args.find("--target-cpp") != args.end()) {
target_cpp = args["--target-cpp"]; // Path to generated cpp file
}
- if (args.find("--no-clean") != args.end()) {
- no_clean = true; // Keep temporary SPIR-V files in output-dir after build
- }
-
- if (!directory_exists(input_dir)) {
- std::cerr << "\"" << input_dir << "\" must be a valid directory containing shader sources" << std::endl;
- return EXIT_FAILURE;
- }
if (!directory_exists(output_dir)) {
if (!create_directory(output_dir)) {