return oss.str();
}
-static std::mutex log_mutex;
-
class vk_memory_logger {
public:
vk_memory_logger(): total_device(0), total_host(0) {}
};
#ifdef GGML_VULKAN_MEMORY_DEBUG
+static std::mutex log_mutex;
+
void vk_memory_logger::log_allocation(vk_buffer_ref buf_ref, size_t size) {
std::lock_guard<std::mutex> guard(log_mutex);
vk_buffer buf = buf_ref.lock();
} else if (tensor->op == GGML_OP_IM2COL_3D) {
const int32_t s0 = tensor->op_params[0];
const int32_t s1 = tensor->op_params[1];
- const int32_t s1 = tensor->op_params[2];
+ const int32_t s2 = tensor->op_params[2];
const int32_t p0 = tensor->op_params[3];
const int32_t p1 = tensor->op_params[4];
- const int32_t p1 = tensor->op_params[5];
+ const int32_t p2 = tensor->op_params[5];
const int32_t d0 = tensor->op_params[6];
const int32_t d1 = tensor->op_params[7];
- const int32_t d1 = tensor->op_params[8];
+ const int32_t d2 = tensor->op_params[8];
const int32_t IC = tensor->op_params[9];
- tensor_clone = ggml_im2col(ggml_ctx, src_clone[0], src_clone[1], IC, s0, s1, s2, p0, p1, p2, d0, d1, d2, tensor->type);
+ tensor_clone = ggml_im2col_3d(ggml_ctx, src_clone[0], src_clone[1], IC, s0, s1, s2, p0, p1, p2, d0, d1, d2, tensor->type);
} else if (tensor->op == GGML_OP_TIMESTEP_EMBEDDING) {
const int32_t dim = tensor->op_params[0];
const int32_t max_period = tensor->op_params[1];
shared ACC_TYPE coopmat_stage[TM * TN * NUM_WARPS];
#endif
+#include "mul_mm_funcs.comp"
+
void main() {
#ifdef NEEDS_INIT_IQ_SHMEM
init_iq_shmem(gl_WorkGroupSize);
for (uint block = start_k; block < end_k; block += BK) {
[[unroll]] for (uint l = 0; l < BM; l += loadstride_a) {
-
-#if defined(DATA_A_F32) || defined(DATA_A_F16)
-#if LOAD_VEC_A == 8
- const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
- const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a * LOAD_VEC_A;
- A_TYPE32 aa = A_TYPE32(data_a[idx]);
- buf_a[buf_idx ] = FLOAT_TYPE(aa[0].x);
- buf_a[buf_idx + 1] = FLOAT_TYPE(aa[0].y);
- buf_a[buf_idx + 2] = FLOAT_TYPE(aa[0].z);
- buf_a[buf_idx + 3] = FLOAT_TYPE(aa[0].w);
- buf_a[buf_idx + 4] = FLOAT_TYPE(aa[1].x);
- buf_a[buf_idx + 5] = FLOAT_TYPE(aa[1].y);
- buf_a[buf_idx + 6] = FLOAT_TYPE(aa[1].z);
- buf_a[buf_idx + 7] = FLOAT_TYPE(aa[1].w);
-#elif LOAD_VEC_A == 4
- const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
- const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a * LOAD_VEC_A;
- A_TYPE32 aa = A_TYPE32(data_a[idx]);
- buf_a[buf_idx ] = FLOAT_TYPE(aa.x);
- buf_a[buf_idx + 1] = FLOAT_TYPE(aa.y);
- buf_a[buf_idx + 2] = FLOAT_TYPE(aa.z);
- buf_a[buf_idx + 3] = FLOAT_TYPE(aa.w);
-#else
- if (ir * BM + loadc_a + l < p.M && block + loadr_a < end_k) {
- buf_a[(loadc_a + l) * SHMEM_STRIDE + loadr_a] = FLOAT_TYPE(data_a[pos_a + (loadc_a + l) * p.stride_a + loadr_a]);
- } else {
- buf_a[(loadc_a + l) * SHMEM_STRIDE + loadr_a] = FLOAT_TYPE(0.0f);
- }
-#endif
-#elif defined(DATA_A_BF16)
-#if LOAD_VEC_A == 4
- const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
- const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a * LOAD_VEC_A;
- buf_a[buf_idx ] = TO_FLOAT_TYPE(data_a[idx].x);
- buf_a[buf_idx + 1] = TO_FLOAT_TYPE(data_a[idx].y);
- buf_a[buf_idx + 2] = TO_FLOAT_TYPE(data_a[idx].z);
- buf_a[buf_idx + 3] = TO_FLOAT_TYPE(data_a[idx].w);
-#else
- if (ir * BM + loadc_a + l < p.M && block + loadr_a < end_k) {
- buf_a[(loadc_a + l) * SHMEM_STRIDE + loadr_a] = TO_FLOAT_TYPE(data_a[pos_a + (loadc_a + l) * p.stride_a + loadr_a]);
- } else {
- buf_a[(loadc_a + l) * SHMEM_STRIDE + loadr_a] = TO_FLOAT_TYPE(uint16_t(0));
- }
-#endif
-#elif defined(DATA_A_Q4_0)
- const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
- const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + 4 * loadr_a;
-
- const uint ib = idx / 4;
- const uint iqs = idx & 0x03;
-
- const float d = float(data_a_packed16[ib].d);
- const uint vui = uint(data_a_packed16[ib].qs[2*iqs]) | (uint(data_a_packed16[ib].qs[2*iqs + 1]) << 16);
- const vec4 v0 = (vec4(unpack8(vui & 0x0F0F0F0F)) - 8.0f) * d;
- const vec4 v1 = (vec4(unpack8((vui >> 4) & 0x0F0F0F0F)) - 8.0f) * d;
-
- buf_a[buf_idx ] = FLOAT_TYPE(v0.x);
- buf_a[buf_idx + 1 ] = FLOAT_TYPE(v0.y);
- buf_a[buf_idx + 2 ] = FLOAT_TYPE(v0.z);
- buf_a[buf_idx + 3 ] = FLOAT_TYPE(v0.w);
- buf_a[buf_idx + 16] = FLOAT_TYPE(v1.x);
- buf_a[buf_idx + 17] = FLOAT_TYPE(v1.y);
- buf_a[buf_idx + 18] = FLOAT_TYPE(v1.z);
- buf_a[buf_idx + 19] = FLOAT_TYPE(v1.w);
-#elif defined(DATA_A_Q4_1)
- const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
- const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + 4 * loadr_a;
-
- const uint ib = idx / 4;
- const uint iqs = idx & 0x03;
-
- const float d = float(data_a_packed16[ib].d);
- const float m = float(data_a_packed16[ib].m);
- const uint vui = uint(data_a_packed16[ib].qs[2*iqs]) | (uint(data_a_packed16[ib].qs[2*iqs + 1]) << 16);
- const vec4 v0 = vec4(unpack8(vui & 0x0F0F0F0F)) * d + m;
- const vec4 v1 = vec4(unpack8((vui >> 4) & 0x0F0F0F0F)) * d + m;
-
- buf_a[buf_idx ] = FLOAT_TYPE(v0.x);
- buf_a[buf_idx + 1 ] = FLOAT_TYPE(v0.y);
- buf_a[buf_idx + 2 ] = FLOAT_TYPE(v0.z);
- buf_a[buf_idx + 3 ] = FLOAT_TYPE(v0.w);
- buf_a[buf_idx + 16] = FLOAT_TYPE(v1.x);
- buf_a[buf_idx + 17] = FLOAT_TYPE(v1.y);
- buf_a[buf_idx + 18] = FLOAT_TYPE(v1.z);
- buf_a[buf_idx + 19] = FLOAT_TYPE(v1.w);
-#elif defined(DATA_A_Q5_0)
- const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
- const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + 2 * loadr_a;
-
- const uint ib = idx / 8;
- const uint iqs = idx & 0x07;
-
- const float d = float(data_a_packed16[ib].d);
- const uint uint_qh = uint(data_a_packed16[ib].qh[1]) << 16 | uint(data_a_packed16[ib].qh[0]);
- const ivec2 qh0 = ivec2(((uint_qh >> 2*iqs) << 4) & 0x10, (uint_qh >> (2*iqs + 12)) & 0x10);
- const ivec2 qh1 = ivec2(((uint_qh >> (2*iqs + 1)) << 4) & 0x10, (uint_qh >> (2*iqs + 13)) & 0x10);
-
- const uint vui = uint(data_a_packed16[ib].qs[iqs]);
- const vec4 v = (vec4((vui & 0xF) | qh0.x, ((vui >> 4) & 0xF) | qh0.y, ((vui >> 8) & 0xF) | qh1.x, (vui >> 12) | qh1.y) - 16.0f) * d;
-
- buf_a[buf_idx ] = FLOAT_TYPE(v.x);
- buf_a[buf_idx + 1 ] = FLOAT_TYPE(v.z);
- buf_a[buf_idx + 16] = FLOAT_TYPE(v.y);
- buf_a[buf_idx + 17] = FLOAT_TYPE(v.w);
-#elif defined(DATA_A_Q5_1)
- const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
- const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + 2 * loadr_a;
-
- const uint ib = idx / 8;
- const uint iqs = idx & 0x07;
-
- const float d = float(data_a_packed16[ib].d);
- const float m = float(data_a_packed16[ib].m);
- const uint uint_qh = data_a_packed16[ib].qh;
- const ivec2 qh0 = ivec2(((uint_qh >> 2*iqs) << 4) & 0x10, (uint_qh >> (2*iqs + 12)) & 0x10);
- const ivec2 qh1 = ivec2(((uint_qh >> (2*iqs + 1)) << 4) & 0x10, (uint_qh >> (2*iqs + 13)) & 0x10);
-
- const uint vui = uint(data_a_packed16[ib].qs[iqs]);
- const vec4 v = vec4((vui & 0xF) | qh0.x, ((vui >> 4) & 0xF) | qh0.y, ((vui >> 8) & 0xF) | qh1.x, (vui >> 12) | qh1.y) * d + m;
-
- buf_a[buf_idx ] = FLOAT_TYPE(v.x);
- buf_a[buf_idx + 1 ] = FLOAT_TYPE(v.z);
- buf_a[buf_idx + 16] = FLOAT_TYPE(v.y);
- buf_a[buf_idx + 17] = FLOAT_TYPE(v.w);
-#elif defined(DATA_A_Q8_0)
- const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
- const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a * LOAD_VEC_A;
-
- const uint ib = idx / 8;
- const uint iqs = idx & 0x07;
-
- const float d = float(data_a_packed16[ib].d);
- const i8vec2 v0 = unpack8(int32_t(data_a_packed16[ib].qs[2*iqs])).xy; // vec4 used due to #12147
- const i8vec2 v1 = unpack8(int32_t(data_a_packed16[ib].qs[2*iqs + 1])).xy;
- const vec4 v = vec4(v0.x, v0.y, v1.x, v1.y) * d;
-
- buf_a[buf_idx ] = FLOAT_TYPE(v.x);
- buf_a[buf_idx + 1] = FLOAT_TYPE(v.y);
- buf_a[buf_idx + 2] = FLOAT_TYPE(v.z);
- buf_a[buf_idx + 3] = FLOAT_TYPE(v.w);
-#elif defined(DATA_A_Q2_K)
- const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
- const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a * LOAD_VEC_A;
-
- const uint ib = idx / 128; // 2 values per idx
- const uint iqs = idx % 128; // 0..127
-
- const uint qsi = (iqs / 64) * 32 + (iqs % 16) * 2; // 0,2,4..30
- const uint scalesi = iqs / 8; // 0..15
- const uint qsshift = ((iqs % 64) / 16) * 2; // 0,2,4,6
-
- const uvec2 qs = uvec2(data_a[ib].qs[qsi], data_a[ib].qs[qsi + 1]);
- const uint scales = data_a[ib].scales[scalesi];
- const vec2 d = vec2(data_a[ib].d);
-
- const vec2 v = d.x * float(scales & 0xF) * vec2((qs >> qsshift) & 3) - d.y * float(scales >> 4);
-
- buf_a[buf_idx ] = FLOAT_TYPE(v.x);
- buf_a[buf_idx + 1] = FLOAT_TYPE(v.y);
-#elif defined(DATA_A_Q3_K)
- const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
- const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a * LOAD_VEC_A;
-
- const uint ib = idx / 128; // 2 values per idx
- const uint iqs = idx % 128; // 0..127
-
- const uint n = iqs / 64; // 0,1
- const uint qsi = n * 32 + (iqs % 16) * 2; // 0,2,4..62
- const uint hmi = (iqs % 16) * 2; // 0,2,4..30
- const uint j = (iqs % 64) / 4; // 0..3
- const uint is = iqs / 8; // 0..15
- const uint halfsplit = ((iqs % 64) / 16); // 0,1,2,3
- const uint qsshift = halfsplit * 2; // 0,2,4,6
- const uint m = 1 << (4 * n + halfsplit); // 1,2,4,8,16,32,64,128
-
- const int8_t us = int8_t(((data_a[ib].scales[is % 8] >> (4 * int(is / 8))) & 0xF)
- | (((data_a[ib].scales[8 + (is % 4)] >> (2 * int(is / 4))) & 3) << 4));
- const float dl = float(data_a[ib].d) * float(us - 32);
-
- buf_a[buf_idx ] = FLOAT_TYPE(dl * float(int8_t((data_a[ib].qs[qsi ] >> qsshift) & 3) - (((data_a[ib].hmask[hmi ] & m) != 0) ? 0 : 4)));
- buf_a[buf_idx + 1] = FLOAT_TYPE(dl * float(int8_t((data_a[ib].qs[qsi + 1] >> qsshift) & 3) - (((data_a[ib].hmask[hmi + 1] & m) != 0) ? 0 : 4)));
-#elif defined(DATA_A_Q4_K)
- const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
- const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a * LOAD_VEC_A;
-
- const uint ib = idx / 128; // 2 values per idx
- const uint iqs = idx % 128; // 0..127
-
- const uint n = iqs / 32; // 0,1,2,3
- const uint b = (iqs % 32) / 16; // 0,1
- const uint is = 2 * n + b; // 0..7
- const uint qsi = n * 32 + (iqs % 16) * 2; // 0,2,4..126
-
- const vec2 loadd = vec2(data_a[ib].d);
-
- const uint scidx0 = (is < 4) ? is : (is + 4);
- const uint scidx1 = (is < 4) ? is : (is - 4);
- const uint scidxmask1 = (is < 4) ? 0x30 : 0xC0;
- const uint scidxshift1 = (is < 4) ? 0 : 2;
- const uint mbidx0 = is + 4;
- const uint mbidx1 = (is < 4) ? is + 4 : is;
- const uint mbidxmask0 = (is < 4) ? 0xF : 0xF0;
- const uint mbidxshift0 = (is < 4) ? 0 : 4;
- const uint mbidxmask1 = (is < 4) ? 0x30 : 0xC0;
- const uint mbidxshift1 = (is < 4) ? 0 : 2;
-
- const uint8_t sc = uint8_t((data_a[ib].scales[scidx0] & 0xF) | ((data_a[ib].scales[scidx1] & scidxmask1) >> scidxshift1));
- const uint8_t mbyte = uint8_t((data_a[ib].scales[mbidx0] & mbidxmask0) >> mbidxshift0 | ((data_a[ib].scales[mbidx1] & mbidxmask1) >> mbidxshift1));
-
- const float d = loadd.x * sc;
- const float m = -loadd.y * mbyte;
-
- buf_a[buf_idx ] = FLOAT_TYPE(fma(d, float((data_a[ib].qs[qsi ] >> (b * 4)) & 0xF), m));
- buf_a[buf_idx + 1] = FLOAT_TYPE(fma(d, float((data_a[ib].qs[qsi + 1] >> (b * 4)) & 0xF), m));
-#elif defined(DATA_A_Q5_K)
- const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
- const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a * LOAD_VEC_A;
-
- const uint ib = idx / 128; // 2 values per idx
- const uint iqs = idx % 128; // 0..127
-
- const uint n = iqs / 32; // 0,1,2,3
- const uint b = (iqs % 32) / 16; // 0,1
- const uint is = 2 * n + b; // 0..7
- const uint qsi = n * 32 + (iqs % 16) * 2; // 0,2,4..126
- const uint qhi = (iqs % 16) * 2; // 0,2,4..30
-
- const uint8_t hm = uint8_t(1 << (iqs / 16));
-
- const vec2 loadd = vec2(data_a[ib].d);
-
- const uint scidx0 = (is < 4) ? is : (is + 4);
- const uint scidx1 = (is < 4) ? is : (is - 4);
- const uint scidxmask1 = (is < 4) ? 0x30 : 0xC0;
- const uint scidxshift1 = (is < 4) ? 0 : 2;
- const uint mbidx0 = is + 4;
- const uint mbidx1 = (is < 4) ? is + 4 : is;
- const uint mbidxmask0 = (is < 4) ? 0xF : 0xF0;
- const uint mbidxshift0 = (is < 4) ? 0 : 4;
- const uint mbidxmask1 = (is < 4) ? 0x30 : 0xC0;
- const uint mbidxshift1 = (is < 4) ? 0 : 2;
-
- const uint8_t sc = uint8_t((data_a[ib].scales[scidx0] & 0xF) | ((data_a[ib].scales[scidx1] & scidxmask1) >> scidxshift1));
- const uint8_t mbyte = uint8_t(((data_a[ib].scales[mbidx0] & mbidxmask0) >> mbidxshift0) | ((data_a[ib].scales[mbidx1] & mbidxmask1) >> mbidxshift1));
-
- const float d = loadd.x * sc;
- const float m = -loadd.y * mbyte;
-
- buf_a[buf_idx ] = FLOAT_TYPE(fma(d, float((data_a[ib].qs[qsi ] >> (b * 4)) & 0xF) + float((data_a[ib].qh[qhi ] & hm) != 0 ? 16 : 0), m));
- buf_a[buf_idx + 1] = FLOAT_TYPE(fma(d, float((data_a[ib].qs[qsi + 1] >> (b * 4)) & 0xF) + float((data_a[ib].qh[qhi + 1] & hm) != 0 ? 16 : 0), m));
-#elif defined(DATA_A_Q6_K)
- const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
- const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a * LOAD_VEC_A;
-
- const uint ib = idx / 128; // 2 values per idx
- const uint iqs = idx % 128; // 0..127
-
- const uint n = iqs / 64; // 0,1
- const uint b = (iqs % 64) / 32; // 0,1
- const uint is_b = (iqs % 16) / 8; // 0,1
- const uint qhshift = ((iqs % 64) / 16) * 2; // 0,2,4,6
- const uint is = 8 * n + qhshift + is_b; // 0..15
- const uint qsi = n * 64 + (iqs % 32) * 2; // 0,2,4..126
- const uint qhi = n * 32 + (iqs % 16) * 2; // 0,2,4..62
-
- const float dscale = float(data_a[ib].d) * float(data_a[ib].scales[is]);
-
- buf_a[buf_idx ] = FLOAT_TYPE(dscale * float(int8_t(((data_a[ib].ql[qsi ] >> (b * 4)) & 0xF) | (((data_a[ib].qh[qhi ] >> qhshift) & 3) << 4)) - 32));
- buf_a[buf_idx + 1] = FLOAT_TYPE(dscale * float(int8_t(((data_a[ib].ql[qsi + 1] >> (b * 4)) & 0xF) | (((data_a[ib].qh[qhi + 1] >> qhshift) & 3) << 4)) - 32));
-#elif defined(DATA_A_IQ1_S)
- const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
- const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a * LOAD_VEC_A;
-
- const uint ib = idx / 32; // 8 values per idx
- const uint ib32 = (idx % 32) / 4; // 0..7
- const uint ib8 = idx % 32;
-
- const float d = float(data_a[ib].d);
- const uint qh = data_a[ib].qh[ib32];
- const uint qs = data_a[ib].qs[ib8];
- const float dl = d * (2 * bitfieldExtract(qh, 12, 3) + 1);
- const float delta = ((qh & 0x8000) != 0) ? -IQ1S_DELTA : IQ1S_DELTA;
- const int16_t grid = int16_t(iq1s_grid[qs | (bitfieldExtract(qh, 3 * int(ib8 & 3), 3) << 8)]);
-
- [[unroll]] for (int k = 0; k < 8; ++k) {
- buf_a[buf_idx + k] = FLOAT_TYPE(dl * (bitfieldExtract(grid, 2 * k, 2) + delta));
- }
-#elif defined(DATA_A_IQ1_M)
- const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
- const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a * LOAD_VEC_A;
-
- const uint ib = idx / 32; // 8 values per idx
- const uint ib8 = idx % 32;
- const uint ib16 = ib8 / 2;
-
- const uint16_t[4] scales = data_a[ib].scales;
- const u16vec4 s = u16vec4(scales[0], scales[1], scales[2], scales[3]) >> 12;
- const float d = float(unpackHalf2x16(s.x | (s.y << 4) | (s.z << 8) | (s.w << 12)).x);
- const uint sc = scales[ib8 / 8];
- const uint qs = data_a[ib].qs[ib8];
- const uint qh = data_a[ib].qh[ib16] >> (4 * (ib8 & 1));
- const float dl = d * (2 * bitfieldExtract(sc, 3 * int(ib16 & 3), 3) + 1);
- const float delta = ((qh & 8) != 0) ? -IQ1M_DELTA : IQ1M_DELTA;
- const int16_t grid = int16_t(iq1s_grid[qs | ((qh & 7) << 8)]);
-
- [[unroll]] for (int k = 0; k < 8; ++k) {
- buf_a[buf_idx + k] = FLOAT_TYPE(dl * (bitfieldExtract(grid, 2 * k, 2) + delta));
- }
-#elif defined(DATA_A_IQ2_XXS)
- const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
- const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a * LOAD_VEC_A;
-
- const uint ib = idx / 32; // 8 values per idx
- const uint ib32 = (idx % 32) / 4; // 0..7
- const uint ib8 = idx % 4;
-
- const float d = float(data_a[ib].d);
- const uint qs = data_a[ib].qs[8 * ib32 + ib8];
- const uint signs = pack32(u8vec4(
- data_a[ib].qs[8*ib32 + 4],
- data_a[ib].qs[8*ib32 + 5],
- data_a[ib].qs[8*ib32 + 6],
- data_a[ib].qs[8*ib32 + 7]
- ));
- const FLOAT_TYPE db = FLOAT_TYPE(d * 0.25 * (0.5 + (signs >> 28)));
- const uint32_t sign7 = bitfieldExtract(signs, 7 * int(ib8), 7);
- const uint sign = sign7 | (bitCount(sign7) << 7);
- const uvec2 grid = iq2xxs_grid[qs];
- const vec4 grid0 = vec4(unpack8(grid.x));
- const vec4 grid1 = vec4(unpack8(grid.y));
-
- buf_a[buf_idx ] = db * FLOAT_TYPE((sign & 1) != 0 ? -grid0.x : grid0.x);
- buf_a[buf_idx + 1] = db * FLOAT_TYPE((sign & 2) != 0 ? -grid0.y : grid0.y);
- buf_a[buf_idx + 2] = db * FLOAT_TYPE((sign & 4) != 0 ? -grid0.z : grid0.z);
- buf_a[buf_idx + 3] = db * FLOAT_TYPE((sign & 8) != 0 ? -grid0.w : grid0.w);
- buf_a[buf_idx + 4] = db * FLOAT_TYPE((sign & 16) != 0 ? -grid1.x : grid1.x);
- buf_a[buf_idx + 5] = db * FLOAT_TYPE((sign & 32) != 0 ? -grid1.y : grid1.y);
- buf_a[buf_idx + 6] = db * FLOAT_TYPE((sign & 64) != 0 ? -grid1.z : grid1.z);
- buf_a[buf_idx + 7] = db * FLOAT_TYPE((sign & 128) != 0 ? -grid1.w : grid1.w);
-#elif defined(DATA_A_IQ2_XS)
- const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
- const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a * LOAD_VEC_A;
-
- const uint ib = idx / 32; // 8 values per idx
- const uint ib32 = (idx % 32) / 4; // 0..7
- const uint ib8 = idx % 4; // 0..3
-
- const float d = float(data_a[ib].d);
- const uint scale = (data_a[ib].scales[ib32] >> (2 * (ib8 & 2))) & 0xf;
- const FLOAT_TYPE db = FLOAT_TYPE(d * 0.25 * (0.5 + scale));
- const uint qs = data_a[ib].qs[4 * ib32 + ib8];
- const uint sign7 = qs >> 9;
- const uint sign = sign7 | (bitCount(sign7) << 7);
- const uvec2 grid = iq2xs_grid[qs & 511];
- const vec4 grid0 = vec4(unpack8(grid.x));
- const vec4 grid1 = vec4(unpack8(grid.y));
-
- buf_a[buf_idx ] = db * FLOAT_TYPE((sign & 1) != 0 ? -grid0.x : grid0.x);
- buf_a[buf_idx + 1] = db * FLOAT_TYPE((sign & 2) != 0 ? -grid0.y : grid0.y);
- buf_a[buf_idx + 2] = db * FLOAT_TYPE((sign & 4) != 0 ? -grid0.z : grid0.z);
- buf_a[buf_idx + 3] = db * FLOAT_TYPE((sign & 8) != 0 ? -grid0.w : grid0.w);
- buf_a[buf_idx + 4] = db * FLOAT_TYPE((sign & 16) != 0 ? -grid1.x : grid1.x);
- buf_a[buf_idx + 5] = db * FLOAT_TYPE((sign & 32) != 0 ? -grid1.y : grid1.y);
- buf_a[buf_idx + 6] = db * FLOAT_TYPE((sign & 64) != 0 ? -grid1.z : grid1.z);
- buf_a[buf_idx + 7] = db * FLOAT_TYPE((sign & 128) != 0 ? -grid1.w : grid1.w);
-#elif defined(DATA_A_IQ2_S)
- const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
- const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a * LOAD_VEC_A;
-
- const uint ib = idx / 32; // 8 values per idx
- const uint ib8 = idx % 32; // 0..31
- const uint ib32 = ib8 / 4; // 0..7
-
- const uint scale = (data_a[ib].scales[ib32] >> (2 * (ib8 & 2))) & 0xf;
- const uint qs = data_a[ib].qs[ib8];
- const uint qh = data_a[ib].qh[ib32];
- const uint qhshift = 2 * (ib8 % 4);
- const uint sign = data_a[ib].qs[QUANT_K / 8 + ib8];
-
- const float d = float(data_a[ib].d);
- const FLOAT_TYPE db = FLOAT_TYPE(d * 0.25 * (0.5 + scale));
- const uvec2 grid = iq2s_grid[qs | ((qh << (8 - qhshift)) & 0x300)];
- const vec4 grid0 = vec4(unpack8(grid.x));
- const vec4 grid1 = vec4(unpack8(grid.y));
-
- buf_a[buf_idx ] = db * FLOAT_TYPE((sign & 1) != 0 ? -grid0.x : grid0.x);
- buf_a[buf_idx + 1] = db * FLOAT_TYPE((sign & 2) != 0 ? -grid0.y : grid0.y);
- buf_a[buf_idx + 2] = db * FLOAT_TYPE((sign & 4) != 0 ? -grid0.z : grid0.z);
- buf_a[buf_idx + 3] = db * FLOAT_TYPE((sign & 8) != 0 ? -grid0.w : grid0.w);
- buf_a[buf_idx + 4] = db * FLOAT_TYPE((sign & 16) != 0 ? -grid1.x : grid1.x);
- buf_a[buf_idx + 5] = db * FLOAT_TYPE((sign & 32) != 0 ? -grid1.y : grid1.y);
- buf_a[buf_idx + 6] = db * FLOAT_TYPE((sign & 64) != 0 ? -grid1.z : grid1.z);
- buf_a[buf_idx + 7] = db * FLOAT_TYPE((sign & 128) != 0 ? -grid1.w : grid1.w);
-#elif defined(DATA_A_IQ3_XXS)
- const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
- const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a * LOAD_VEC_A;
-
- const uint ib = idx / 64; // 4 values per idx
- const uint iqs = idx % 64; // 0..63
- const uint is = QUANT_K / 4 + 4 * (iqs / 8); // 8 values
-
- const float d = float(data_a[ib].d);
- const uint qs = data_a[ib].qs[iqs];
- const uint signs = pack32(u8vec4(
- data_a[ib].qs[is+0],
- data_a[ib].qs[is+1],
- data_a[ib].qs[is+2],
- data_a[ib].qs[is+3]
- ));
- const float db = d * 0.5 * (0.5 + (signs >> 28));
- const uint32_t sign7 = bitfieldExtract(signs, 7 * (int(iqs / 2) % 4), 7);
- const uint sign = (sign7 | (bitCount(sign7) << 7)) >> (4 * (idx % 2));
- const uint grid = iq3xxs_grid[qs];
- const vec4 v = db * vec4(unpack8(grid));
-
- buf_a[buf_idx ] = FLOAT_TYPE((sign & 1) != 0 ? -v.x : v.x);
- buf_a[buf_idx + 1] = FLOAT_TYPE((sign & 2) != 0 ? -v.y : v.y);
- buf_a[buf_idx + 2] = FLOAT_TYPE((sign & 4) != 0 ? -v.z : v.z);
- buf_a[buf_idx + 3] = FLOAT_TYPE((sign & 8) != 0 ? -v.w : v.w);
-#elif defined(DATA_A_IQ3_S)
- const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
- const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a * LOAD_VEC_A;
-
- const uint ib = idx / 64; // 4 values per idx
- const uint iqs = idx % 64; // 0..63
- const uint iqh = iqs / 8;
-
- const float d = float(data_a[ib].d);
- const uint qs = data_a[ib].qs[iqs];
- const uint qh = data_a[ib].qh[iqh];
- const int8_t sign = int8_t(data_a[ib].signs[iqs / 2] >> (4 * (idx % 2)));
- const uint scale = data_a[ib].scales[iqs / 16];
- const i8vec2 sign01 = i8vec2(1 - (2 & i8vec2(sign << 1, sign)));
- const float db = d * (1 + 2 * ((scale >> (4 * (iqh & 1))) & 0xf));
- const uint32_t grid = iq3s_grid[qs | ((qh << (8 - (iqs % 8))) & 256)];
- const vec4 v = db * vec4(unpack8(grid));
-
- buf_a[buf_idx ] = FLOAT_TYPE((sign & 1) != 0 ? -v.x : v.x);
- buf_a[buf_idx + 1] = FLOAT_TYPE((sign & 2) != 0 ? -v.y : v.y);
- buf_a[buf_idx + 2] = FLOAT_TYPE((sign & 4) != 0 ? -v.z : v.z);
- buf_a[buf_idx + 3] = FLOAT_TYPE((sign & 8) != 0 ? -v.w : v.w);
-#elif defined(DATA_A_IQ4_XS)
- const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
- const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a * LOAD_VEC_A;
-
- const uint ib = idx / 128; // 2 values per idx
- const uint ib32 = (idx % 128) / 16; // 0..7
- const uint iq = 16 * ib32 + 2 * (idx % 8);
-
- const uint sl = (data_a[ib].scales_l[ib32/2] >> (4 * (ib32 & 1))) & 0xF;
- const uint sh = ((data_a[ib].scales_h) >> (2 * ib32)) & 3;
- const uint qshift = (idx & 8) >> 1;
- u8vec2 qs = u8vec2(data_a[ib].qs[iq], data_a[ib].qs[iq + 1]);
- qs = (qs >> qshift) & uint8_t(0xF);
-
- const float d = float(data_a[ib].d);
- const vec2 v = d * float(int(sl | (sh << 4)) - 32) * vec2(kvalues_iq4nl[qs.x], kvalues_iq4nl[qs.y]);
-
- buf_a[buf_idx ] = FLOAT_TYPE(v.x);
- buf_a[buf_idx + 1] = FLOAT_TYPE(v.y);
-#elif defined(DATA_A_IQ4_NL)
- const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
- const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + 2 * loadr_a;
-
- const uint ib = idx / 8;
- const uint iqs = idx & 0x07;
-
- const FLOAT_TYPE d = FLOAT_TYPE(data_a_packed16[ib].d);
- const uint vui = uint(data_a_packed16[ib].qs[iqs]);
-
- buf_a[buf_idx ] = FLOAT_TYPE(kvalues_iq4nl[vui & 0xF]) * d;
- buf_a[buf_idx + 1 ] = FLOAT_TYPE(kvalues_iq4nl[bitfieldExtract(vui, 8, 4)]) * d;
- buf_a[buf_idx + 16] = FLOAT_TYPE(kvalues_iq4nl[bitfieldExtract(vui, 4, 4)]) * d;
- buf_a[buf_idx + 17] = FLOAT_TYPE(kvalues_iq4nl[vui >> 12]) * d;
-#elif defined(DATA_A_MXFP4)
- const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
- const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + 2 * loadr_a;
-
- const uint ib = idx / 8;
- const uint iqs = (idx & 0x07) * 2;
-
- const float d = e8m0_to_fp32(data_a[ib].e);
- const uint vui = uint(data_a[ib].qs[iqs]);
- const uint vui2 = uint(data_a[ib].qs[iqs+1]);
-
- buf_a[buf_idx ] = FLOAT_TYPE(kvalues_mxfp4[vui & 0xF] * d);
- buf_a[buf_idx + 16] = FLOAT_TYPE(kvalues_mxfp4[vui >> 4] * d);
- buf_a[buf_idx + 1] = FLOAT_TYPE(kvalues_mxfp4[vui2 & 0xF] * d);
- buf_a[buf_idx + 17] = FLOAT_TYPE(kvalues_mxfp4[vui2 >> 4] * d);
-#endif
+ load_a_to_shmem(pos_a, loadr_a, loadc_a + l, ir * BM + loadc_a + l, block + loadr_a, end_k);
}
[[unroll]] for (uint l = 0; l < BN; l += loadstride_b) {
-#if LOAD_VEC_B == 8
-#ifdef MUL_MAT_ID
- const u16vec2 row_idx = row_ids[loadc_b + l];
- const uint idx = pos_b + row_idx.y * p.batch_stride_b / LOAD_VEC_B + (row_idx.x % p.ne11) * p.stride_b / LOAD_VEC_B + loadr_b;
+#if !defined(MUL_MAT_ID)
+ load_b_to_shmem(pos_b, loadr_b, loadc_b + l, ic * BN + loadc_b + l, block + loadr_b, end_k);
#else
- const uint idx = pos_b + (loadc_b + l) * p.stride_b / LOAD_VEC_B + loadr_b;
-#endif
- const uint buf_idx = (loadc_b + l) * SHMEM_STRIDE + loadr_b * LOAD_VEC_B;
-#if defined(DATA_B_BF16)
- B_TYPE32 bb = TO_FLOAT_TYPE(data_b[idx]);
-#else
- B_TYPE32 bb = B_TYPE32(data_b[idx]);
-#endif
- buf_b[buf_idx + 0] = FLOAT_TYPE(bb[0].x);
- buf_b[buf_idx + 1] = FLOAT_TYPE(bb[0].y);
- buf_b[buf_idx + 2] = FLOAT_TYPE(bb[0].z);
- buf_b[buf_idx + 3] = FLOAT_TYPE(bb[0].w);
- buf_b[buf_idx + 4] = FLOAT_TYPE(bb[1].x);
- buf_b[buf_idx + 5] = FLOAT_TYPE(bb[1].y);
- buf_b[buf_idx + 6] = FLOAT_TYPE(bb[1].z);
- buf_b[buf_idx + 7] = FLOAT_TYPE(bb[1].w);
-#elif LOAD_VEC_B == 4
-#ifdef MUL_MAT_ID
- const u16vec2 row_idx = row_ids[loadc_b + l];
- const uint idx = pos_b + row_idx.y * p.batch_stride_b / LOAD_VEC_B + (row_idx.x % p.ne11) * p.stride_b / LOAD_VEC_B + loadr_b;
-#else
- const uint idx = pos_b + (loadc_b + l) * p.stride_b / LOAD_VEC_B + loadr_b;
-#endif
- const uint buf_idx = (loadc_b + l) * SHMEM_STRIDE + loadr_b * LOAD_VEC_B;
-#if defined(DATA_B_BF16)
- B_TYPE32 bb = TO_FLOAT_TYPE(data_b[idx]);
-#else
- B_TYPE32 bb = B_TYPE32(data_b[idx]);
-#endif
- buf_b[buf_idx + 0] = FLOAT_TYPE(bb.x);
- buf_b[buf_idx + 1] = FLOAT_TYPE(bb.y);
- buf_b[buf_idx + 2] = FLOAT_TYPE(bb.z);
- buf_b[buf_idx + 3] = FLOAT_TYPE(bb.w);
-#elif !MUL_MAT_ID
- if (ic * BN + loadc_b + l < p.N && block + loadr_b < end_k) {
- buf_b[(loadc_b + l) * SHMEM_STRIDE + loadr_b] = TO_FLOAT_TYPE(data_b[pos_b + (loadc_b + l) * p.stride_b + loadr_b]);
- } else {
- buf_b[(loadc_b + l) * SHMEM_STRIDE + loadr_b] = FLOAT_TYPE(0.0f);
- }
-#else
- const uint row_i = ic * BN + loadc_b + l;
- if (row_i < _ne1 && block + loadr_b < end_k) {
- const u16vec2 row_idx = row_ids[loadc_b + l];
- buf_b[(loadc_b + l) * SHMEM_STRIDE + loadr_b] = TO_FLOAT_TYPE(data_b[pos_b + row_idx.y * p.batch_stride_b + (row_idx.x % p.ne11) * p.stride_b + loadr_b]);
- } else {
- buf_b[(loadc_b + l) * SHMEM_STRIDE + loadr_b] = FLOAT_TYPE(0.0f);
- }
+ load_b_to_shmem(pos_b, loadr_b, loadc_b + l, ic, _ne1, block + loadr_b, end_k);
#endif
}
--- /dev/null
+void load_a_to_shmem(const uint pos_a, const uint row, const uint col, const uint idx_m, const uint idx_k, const uint end_k) {
+#if defined(DATA_A_F32) || defined(DATA_A_F16)
+#if LOAD_VEC_A == 8
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A;
+ FLOAT_TYPE_VEC8 aa = FLOAT_TYPE_VEC8(data_a[idx]);
+ buf_a[buf_idx ] = aa[0].x;
+ buf_a[buf_idx + 1] = aa[0].y;
+ buf_a[buf_idx + 2] = aa[0].z;
+ buf_a[buf_idx + 3] = aa[0].w;
+ buf_a[buf_idx + 4] = aa[1].x;
+ buf_a[buf_idx + 5] = aa[1].y;
+ buf_a[buf_idx + 6] = aa[1].z;
+ buf_a[buf_idx + 7] = aa[1].w;
+#elif LOAD_VEC_A == 4
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A;
+ FLOAT_TYPE_VEC4 aa = FLOAT_TYPE_VEC4(data_a[idx]);
+ buf_a[buf_idx ] = aa.x;
+ buf_a[buf_idx + 1] = aa.y;
+ buf_a[buf_idx + 2] = aa.z;
+ buf_a[buf_idx + 3] = aa.w;
+#else
+ if (idx_m < p.M && idx_k < end_k) {
+ buf_a[col * SHMEM_STRIDE + row] = FLOAT_TYPE(data_a[pos_a + col * p.stride_a + row]);
+ } else {
+ buf_a[col * SHMEM_STRIDE + row] = FLOAT_TYPE(0.0f);
+ }
+#endif
+#elif defined(DATA_A_BF16)
+#if LOAD_VEC_A == 4
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A;
+ FLOAT_TYPE_VEC4 aa = FLOAT_TYPE_VEC4(TO_FLOAT_TYPE(data_a[idx]));
+ buf_a[buf_idx ] = aa.x;
+ buf_a[buf_idx + 1] = aa.y;
+ buf_a[buf_idx + 2] = aa.z;
+ buf_a[buf_idx + 3] = aa.w;
+#else
+ if (idx_m < p.M && idx_k < end_k) {
+ buf_a[col * SHMEM_STRIDE + row] = TO_FLOAT_TYPE(data_a[pos_a + col * p.stride_a + row]);
+ } else {
+ buf_a[col * SHMEM_STRIDE + row] = TO_FLOAT_TYPE(uint16_t(0));
+ }
+#endif
+#elif defined(DATA_A_Q4_0)
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + 4 * row;
+
+ const uint ib = idx / 4;
+ const uint iqs = idx & 0x03;
+
+ const float d = float(data_a_packed16[ib].d);
+ const uint vui = uint(data_a_packed16[ib].qs[2*iqs]) | (uint(data_a_packed16[ib].qs[2*iqs + 1]) << 16);
+ const vec4 v0 = (vec4(unpack8(vui & 0x0F0F0F0F)) - 8.0f) * d;
+ const vec4 v1 = (vec4(unpack8((vui >> 4) & 0x0F0F0F0F)) - 8.0f) * d;
+
+ buf_a[buf_idx ] = FLOAT_TYPE(v0.x);
+ buf_a[buf_idx + 1 ] = FLOAT_TYPE(v0.y);
+ buf_a[buf_idx + 2 ] = FLOAT_TYPE(v0.z);
+ buf_a[buf_idx + 3 ] = FLOAT_TYPE(v0.w);
+ buf_a[buf_idx + 16] = FLOAT_TYPE(v1.x);
+ buf_a[buf_idx + 17] = FLOAT_TYPE(v1.y);
+ buf_a[buf_idx + 18] = FLOAT_TYPE(v1.z);
+ buf_a[buf_idx + 19] = FLOAT_TYPE(v1.w);
+#elif defined(DATA_A_Q4_1)
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + 4 * row;
+
+ const uint ib = idx / 4;
+ const uint iqs = idx & 0x03;
+
+ const float d = float(data_a_packed16[ib].d);
+ const float m = float(data_a_packed16[ib].m);
+ const uint vui = uint(data_a_packed16[ib].qs[2*iqs]) | (uint(data_a_packed16[ib].qs[2*iqs + 1]) << 16);
+ const vec4 v0 = vec4(unpack8(vui & 0x0F0F0F0F)) * d + m;
+ const vec4 v1 = vec4(unpack8((vui >> 4) & 0x0F0F0F0F)) * d + m;
+
+ buf_a[buf_idx ] = FLOAT_TYPE(v0.x);
+ buf_a[buf_idx + 1 ] = FLOAT_TYPE(v0.y);
+ buf_a[buf_idx + 2 ] = FLOAT_TYPE(v0.z);
+ buf_a[buf_idx + 3 ] = FLOAT_TYPE(v0.w);
+ buf_a[buf_idx + 16] = FLOAT_TYPE(v1.x);
+ buf_a[buf_idx + 17] = FLOAT_TYPE(v1.y);
+ buf_a[buf_idx + 18] = FLOAT_TYPE(v1.z);
+ buf_a[buf_idx + 19] = FLOAT_TYPE(v1.w);
+#elif defined(DATA_A_Q5_0)
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + 2 * row;
+
+ const uint ib = idx / 8;
+ const uint iqs = idx & 0x07;
+
+ const float d = float(data_a_packed16[ib].d);
+ const uint uint_qh = uint(data_a_packed16[ib].qh[1]) << 16 | uint(data_a_packed16[ib].qh[0]);
+ const ivec2 qh0 = ivec2(((uint_qh >> 2*iqs) << 4) & 0x10, (uint_qh >> (2*iqs + 12)) & 0x10);
+ const ivec2 qh1 = ivec2(((uint_qh >> (2*iqs + 1)) << 4) & 0x10, (uint_qh >> (2*iqs + 13)) & 0x10);
+
+ const uint vui = uint(data_a_packed16[ib].qs[iqs]);
+ const vec4 v = (vec4((vui & 0xF) | qh0.x, ((vui >> 4) & 0xF) | qh0.y, ((vui >> 8) & 0xF) | qh1.x, (vui >> 12) | qh1.y) - 16.0f) * d;
+
+ buf_a[buf_idx ] = FLOAT_TYPE(v.x);
+ buf_a[buf_idx + 1 ] = FLOAT_TYPE(v.z);
+ buf_a[buf_idx + 16] = FLOAT_TYPE(v.y);
+ buf_a[buf_idx + 17] = FLOAT_TYPE(v.w);
+#elif defined(DATA_A_Q5_1)
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + 2 * row;
+
+ const uint ib = idx / 8;
+ const uint iqs = idx & 0x07;
+
+ const float d = float(data_a_packed16[ib].d);
+ const float m = float(data_a_packed16[ib].m);
+ const uint uint_qh = data_a_packed16[ib].qh;
+ const ivec2 qh0 = ivec2(((uint_qh >> 2*iqs) << 4) & 0x10, (uint_qh >> (2*iqs + 12)) & 0x10);
+ const ivec2 qh1 = ivec2(((uint_qh >> (2*iqs + 1)) << 4) & 0x10, (uint_qh >> (2*iqs + 13)) & 0x10);
+
+ const uint vui = uint(data_a_packed16[ib].qs[iqs]);
+ const vec4 v = vec4((vui & 0xF) | qh0.x, ((vui >> 4) & 0xF) | qh0.y, ((vui >> 8) & 0xF) | qh1.x, (vui >> 12) | qh1.y) * d + m;
+
+ buf_a[buf_idx ] = FLOAT_TYPE(v.x);
+ buf_a[buf_idx + 1 ] = FLOAT_TYPE(v.z);
+ buf_a[buf_idx + 16] = FLOAT_TYPE(v.y);
+ buf_a[buf_idx + 17] = FLOAT_TYPE(v.w);
+#elif defined(DATA_A_Q8_0)
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A;
+
+ const uint ib = idx / 8;
+ const uint iqs = idx & 0x07;
+
+ const float d = float(data_a_packed16[ib].d);
+ const i8vec2 v0 = unpack8(int32_t(data_a_packed16[ib].qs[2*iqs])).xy; // vec4 used due to #12147
+ const i8vec2 v1 = unpack8(int32_t(data_a_packed16[ib].qs[2*iqs + 1])).xy;
+ const vec4 v = vec4(v0.x, v0.y, v1.x, v1.y) * d;
+
+ buf_a[buf_idx ] = FLOAT_TYPE(v.x);
+ buf_a[buf_idx + 1] = FLOAT_TYPE(v.y);
+ buf_a[buf_idx + 2] = FLOAT_TYPE(v.z);
+ buf_a[buf_idx + 3] = FLOAT_TYPE(v.w);
+#elif defined(DATA_A_Q2_K)
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A;
+
+ const uint ib = idx / 128; // 2 values per idx
+ const uint iqs = idx % 128; // 0..127
+
+ const uint qsi = (iqs / 64) * 32 + (iqs % 16) * 2; // 0,2,4..30
+ const uint scalesi = iqs / 8; // 0..15
+ const uint qsshift = ((iqs % 64) / 16) * 2; // 0,2,4,6
+
+ const uvec2 qs = uvec2(data_a[ib].qs[qsi], data_a[ib].qs[qsi + 1]);
+ const uint scales = data_a[ib].scales[scalesi];
+ const vec2 d = vec2(data_a[ib].d);
+
+ const vec2 v = d.x * float(scales & 0xF) * vec2((qs >> qsshift) & 3) - d.y * float(scales >> 4);
+
+ buf_a[buf_idx ] = FLOAT_TYPE(v.x);
+ buf_a[buf_idx + 1] = FLOAT_TYPE(v.y);
+#elif defined(DATA_A_Q3_K)
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A;
+
+ const uint ib = idx / 128; // 2 values per idx
+ const uint iqs = idx % 128; // 0..127
+
+ const uint n = iqs / 64; // 0,1
+ const uint qsi = n * 32 + (iqs % 16) * 2; // 0,2,4..62
+ const uint hmi = (iqs % 16) * 2; // 0,2,4..30
+ const uint j = (iqs % 64) / 4; // 0..3
+ const uint is = iqs / 8; // 0..15
+ const uint halfsplit = ((iqs % 64) / 16); // 0,1,2,3
+ const uint qsshift = halfsplit * 2; // 0,2,4,6
+ const uint m = 1 << (4 * n + halfsplit); // 1,2,4,8,16,32,64,128
+
+ const int8_t us = int8_t(((data_a[ib].scales[is % 8] >> (4 * int(is / 8))) & 0xF)
+ | (((data_a[ib].scales[8 + (is % 4)] >> (2 * int(is / 4))) & 3) << 4));
+ const float dl = float(data_a[ib].d) * float(us - 32);
+
+ buf_a[buf_idx ] = FLOAT_TYPE(dl * float(int8_t((data_a[ib].qs[qsi ] >> qsshift) & 3) - (((data_a[ib].hmask[hmi ] & m) != 0) ? 0 : 4)));
+ buf_a[buf_idx + 1] = FLOAT_TYPE(dl * float(int8_t((data_a[ib].qs[qsi + 1] >> qsshift) & 3) - (((data_a[ib].hmask[hmi + 1] & m) != 0) ? 0 : 4)));
+#elif defined(DATA_A_Q4_K)
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A;
+
+ const uint ib = idx / 128; // 2 values per idx
+ const uint iqs = idx % 128; // 0..127
+
+ const uint n = iqs / 32; // 0,1,2,3
+ const uint b = (iqs % 32) / 16; // 0,1
+ const uint is = 2 * n + b; // 0..7
+ const uint qsi = n * 32 + (iqs % 16) * 2; // 0,2,4..126
+
+ const vec2 loadd = vec2(data_a[ib].d);
+
+ const uint scidx0 = (is < 4) ? is : (is + 4);
+ const uint scidx1 = (is < 4) ? is : (is - 4);
+ const uint scidxmask1 = (is < 4) ? 0x30 : 0xC0;
+ const uint scidxshift1 = (is < 4) ? 0 : 2;
+ const uint mbidx0 = is + 4;
+ const uint mbidx1 = (is < 4) ? is + 4 : is;
+ const uint mbidxmask0 = (is < 4) ? 0xF : 0xF0;
+ const uint mbidxshift0 = (is < 4) ? 0 : 4;
+ const uint mbidxmask1 = (is < 4) ? 0x30 : 0xC0;
+ const uint mbidxshift1 = (is < 4) ? 0 : 2;
+
+ const uint8_t sc = uint8_t((data_a[ib].scales[scidx0] & 0xF) | ((data_a[ib].scales[scidx1] & scidxmask1) >> scidxshift1));
+ const uint8_t mbyte = uint8_t((data_a[ib].scales[mbidx0] & mbidxmask0) >> mbidxshift0 | ((data_a[ib].scales[mbidx1] & mbidxmask1) >> mbidxshift1));
+
+ const float d = loadd.x * sc;
+ const float m = -loadd.y * mbyte;
+
+ buf_a[buf_idx ] = FLOAT_TYPE(fma(d, float((data_a[ib].qs[qsi ] >> (b * 4)) & 0xF), m));
+ buf_a[buf_idx + 1] = FLOAT_TYPE(fma(d, float((data_a[ib].qs[qsi + 1] >> (b * 4)) & 0xF), m));
+#elif defined(DATA_A_Q5_K)
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A;
+
+ const uint ib = idx / 128; // 2 values per idx
+ const uint iqs = idx % 128; // 0..127
+
+ const uint n = iqs / 32; // 0,1,2,3
+ const uint b = (iqs % 32) / 16; // 0,1
+ const uint is = 2 * n + b; // 0..7
+ const uint qsi = n * 32 + (iqs % 16) * 2; // 0,2,4..126
+ const uint qhi = (iqs % 16) * 2; // 0,2,4..30
+
+ const uint8_t hm = uint8_t(1 << (iqs / 16));
+
+ const vec2 loadd = vec2(data_a[ib].d);
+
+ const uint scidx0 = (is < 4) ? is : (is + 4);
+ const uint scidx1 = (is < 4) ? is : (is - 4);
+ const uint scidxmask1 = (is < 4) ? 0x30 : 0xC0;
+ const uint scidxshift1 = (is < 4) ? 0 : 2;
+ const uint mbidx0 = is + 4;
+ const uint mbidx1 = (is < 4) ? is + 4 : is;
+ const uint mbidxmask0 = (is < 4) ? 0xF : 0xF0;
+ const uint mbidxshift0 = (is < 4) ? 0 : 4;
+ const uint mbidxmask1 = (is < 4) ? 0x30 : 0xC0;
+ const uint mbidxshift1 = (is < 4) ? 0 : 2;
+
+ const uint8_t sc = uint8_t((data_a[ib].scales[scidx0] & 0xF) | ((data_a[ib].scales[scidx1] & scidxmask1) >> scidxshift1));
+ const uint8_t mbyte = uint8_t(((data_a[ib].scales[mbidx0] & mbidxmask0) >> mbidxshift0) | ((data_a[ib].scales[mbidx1] & mbidxmask1) >> mbidxshift1));
+
+ const float d = loadd.x * sc;
+ const float m = -loadd.y * mbyte;
+
+ buf_a[buf_idx ] = FLOAT_TYPE(fma(d, float((data_a[ib].qs[qsi ] >> (b * 4)) & 0xF) + float((data_a[ib].qh[qhi ] & hm) != 0 ? 16 : 0), m));
+ buf_a[buf_idx + 1] = FLOAT_TYPE(fma(d, float((data_a[ib].qs[qsi + 1] >> (b * 4)) & 0xF) + float((data_a[ib].qh[qhi + 1] & hm) != 0 ? 16 : 0), m));
+#elif defined(DATA_A_Q6_K)
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A;
+
+ const uint ib = idx / 128; // 2 values per idx
+ const uint iqs = idx % 128; // 0..127
+
+ const uint n = iqs / 64; // 0,1
+ const uint b = (iqs % 64) / 32; // 0,1
+ const uint is_b = (iqs % 16) / 8; // 0,1
+ const uint qhshift = ((iqs % 64) / 16) * 2; // 0,2,4,6
+ const uint is = 8 * n + qhshift + is_b; // 0..15
+ const uint qsi = n * 64 + (iqs % 32) * 2; // 0,2,4..126
+ const uint qhi = n * 32 + (iqs % 16) * 2; // 0,2,4..62
+
+ const float dscale = float(data_a[ib].d) * float(data_a[ib].scales[is]);
+
+ buf_a[buf_idx ] = FLOAT_TYPE(dscale * float(int8_t(((data_a[ib].ql[qsi ] >> (b * 4)) & 0xF) | (((data_a[ib].qh[qhi ] >> qhshift) & 3) << 4)) - 32));
+ buf_a[buf_idx + 1] = FLOAT_TYPE(dscale * float(int8_t(((data_a[ib].ql[qsi + 1] >> (b * 4)) & 0xF) | (((data_a[ib].qh[qhi + 1] >> qhshift) & 3) << 4)) - 32));
+#elif defined(DATA_A_IQ1_S)
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A;
+
+ const uint ib = idx / 32; // 8 values per idx
+ const uint ib32 = (idx % 32) / 4; // 0..7
+ const uint ib8 = idx % 32;
+
+ const float d = float(data_a[ib].d);
+ const uint qh = data_a[ib].qh[ib32];
+ const uint qs = data_a[ib].qs[ib8];
+ const float dl = d * (2 * bitfieldExtract(qh, 12, 3) + 1);
+ const float delta = ((qh & 0x8000) != 0) ? -IQ1S_DELTA : IQ1S_DELTA;
+ const int16_t grid = int16_t(iq1s_grid[qs | (bitfieldExtract(qh, 3 * int(ib8 & 3), 3) << 8)]);
+
+ [[unroll]] for (int k = 0; k < 8; ++k) {
+ buf_a[buf_idx + k] = FLOAT_TYPE(dl * (bitfieldExtract(grid, 2 * k, 2) + delta));
+ }
+#elif defined(DATA_A_IQ1_M)
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A;
+
+ const uint ib = idx / 32; // 8 values per idx
+ const uint ib8 = idx % 32;
+ const uint ib16 = ib8 / 2;
+
+ const uint16_t[4] scales = data_a[ib].scales;
+ const u16vec4 s = u16vec4(scales[0], scales[1], scales[2], scales[3]) >> 12;
+ const float d = float(unpackHalf2x16(s.x | (s.y << 4) | (s.z << 8) | (s.w << 12)).x);
+ const uint sc = scales[ib8 / 8];
+ const uint qs = data_a[ib].qs[ib8];
+ const uint qh = data_a[ib].qh[ib16] >> (4 * (ib8 & 1));
+ const float dl = d * (2 * bitfieldExtract(sc, 3 * int(ib16 & 3), 3) + 1);
+ const float delta = ((qh & 8) != 0) ? -IQ1M_DELTA : IQ1M_DELTA;
+ const int16_t grid = int16_t(iq1s_grid[qs | ((qh & 7) << 8)]);
+
+ [[unroll]] for (int k = 0; k < 8; ++k) {
+ buf_a[buf_idx + k] = FLOAT_TYPE(dl * (bitfieldExtract(grid, 2 * k, 2) + delta));
+ }
+#elif defined(DATA_A_IQ2_XXS)
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A;
+
+ const uint ib = idx / 32; // 8 values per idx
+ const uint ib32 = (idx % 32) / 4; // 0..7
+ const uint ib8 = idx % 4;
+
+ const float d = float(data_a[ib].d);
+ const uint qs = data_a[ib].qs[8 * ib32 + ib8];
+ const uint signs = pack32(u8vec4(
+ data_a[ib].qs[8*ib32 + 4],
+ data_a[ib].qs[8*ib32 + 5],
+ data_a[ib].qs[8*ib32 + 6],
+ data_a[ib].qs[8*ib32 + 7]
+ ));
+ const FLOAT_TYPE db = FLOAT_TYPE(d * 0.25 * (0.5 + (signs >> 28)));
+ const uint32_t sign7 = bitfieldExtract(signs, 7 * int(ib8), 7);
+ const uint sign = sign7 | (bitCount(sign7) << 7);
+ const uvec2 grid = iq2xxs_grid[qs];
+ const vec4 grid0 = vec4(unpack8(grid.x));
+ const vec4 grid1 = vec4(unpack8(grid.y));
+
+ buf_a[buf_idx ] = db * FLOAT_TYPE((sign & 1) != 0 ? -grid0.x : grid0.x);
+ buf_a[buf_idx + 1] = db * FLOAT_TYPE((sign & 2) != 0 ? -grid0.y : grid0.y);
+ buf_a[buf_idx + 2] = db * FLOAT_TYPE((sign & 4) != 0 ? -grid0.z : grid0.z);
+ buf_a[buf_idx + 3] = db * FLOAT_TYPE((sign & 8) != 0 ? -grid0.w : grid0.w);
+ buf_a[buf_idx + 4] = db * FLOAT_TYPE((sign & 16) != 0 ? -grid1.x : grid1.x);
+ buf_a[buf_idx + 5] = db * FLOAT_TYPE((sign & 32) != 0 ? -grid1.y : grid1.y);
+ buf_a[buf_idx + 6] = db * FLOAT_TYPE((sign & 64) != 0 ? -grid1.z : grid1.z);
+ buf_a[buf_idx + 7] = db * FLOAT_TYPE((sign & 128) != 0 ? -grid1.w : grid1.w);
+#elif defined(DATA_A_IQ2_XS)
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A;
+
+ const uint ib = idx / 32; // 8 values per idx
+ const uint ib32 = (idx % 32) / 4; // 0..7
+ const uint ib8 = idx % 4; // 0..3
+
+ const float d = float(data_a[ib].d);
+ const uint scale = (data_a[ib].scales[ib32] >> (2 * (ib8 & 2))) & 0xf;
+ const FLOAT_TYPE db = FLOAT_TYPE(d * 0.25 * (0.5 + scale));
+ const uint qs = data_a[ib].qs[4 * ib32 + ib8];
+ const uint sign7 = qs >> 9;
+ const uint sign = sign7 | (bitCount(sign7) << 7);
+ const uvec2 grid = iq2xs_grid[qs & 511];
+ const vec4 grid0 = vec4(unpack8(grid.x));
+ const vec4 grid1 = vec4(unpack8(grid.y));
+
+ buf_a[buf_idx ] = db * FLOAT_TYPE((sign & 1) != 0 ? -grid0.x : grid0.x);
+ buf_a[buf_idx + 1] = db * FLOAT_TYPE((sign & 2) != 0 ? -grid0.y : grid0.y);
+ buf_a[buf_idx + 2] = db * FLOAT_TYPE((sign & 4) != 0 ? -grid0.z : grid0.z);
+ buf_a[buf_idx + 3] = db * FLOAT_TYPE((sign & 8) != 0 ? -grid0.w : grid0.w);
+ buf_a[buf_idx + 4] = db * FLOAT_TYPE((sign & 16) != 0 ? -grid1.x : grid1.x);
+ buf_a[buf_idx + 5] = db * FLOAT_TYPE((sign & 32) != 0 ? -grid1.y : grid1.y);
+ buf_a[buf_idx + 6] = db * FLOAT_TYPE((sign & 64) != 0 ? -grid1.z : grid1.z);
+ buf_a[buf_idx + 7] = db * FLOAT_TYPE((sign & 128) != 0 ? -grid1.w : grid1.w);
+#elif defined(DATA_A_IQ2_S)
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A;
+
+ const uint ib = idx / 32; // 8 values per idx
+ const uint ib8 = idx % 32; // 0..31
+ const uint ib32 = ib8 / 4; // 0..7
+
+ const uint scale = (data_a[ib].scales[ib32] >> (2 * (ib8 & 2))) & 0xf;
+ const uint qs = data_a[ib].qs[ib8];
+ const uint qh = data_a[ib].qh[ib32];
+ const uint qhshift = 2 * (ib8 % 4);
+ const uint sign = data_a[ib].qs[QUANT_K / 8 + ib8];
+
+ const float d = float(data_a[ib].d);
+ const FLOAT_TYPE db = FLOAT_TYPE(d * 0.25 * (0.5 + scale));
+ const uvec2 grid = iq2s_grid[qs | ((qh << (8 - qhshift)) & 0x300)];
+ const vec4 grid0 = vec4(unpack8(grid.x));
+ const vec4 grid1 = vec4(unpack8(grid.y));
+
+ buf_a[buf_idx ] = db * FLOAT_TYPE((sign & 1) != 0 ? -grid0.x : grid0.x);
+ buf_a[buf_idx + 1] = db * FLOAT_TYPE((sign & 2) != 0 ? -grid0.y : grid0.y);
+ buf_a[buf_idx + 2] = db * FLOAT_TYPE((sign & 4) != 0 ? -grid0.z : grid0.z);
+ buf_a[buf_idx + 3] = db * FLOAT_TYPE((sign & 8) != 0 ? -grid0.w : grid0.w);
+ buf_a[buf_idx + 4] = db * FLOAT_TYPE((sign & 16) != 0 ? -grid1.x : grid1.x);
+ buf_a[buf_idx + 5] = db * FLOAT_TYPE((sign & 32) != 0 ? -grid1.y : grid1.y);
+ buf_a[buf_idx + 6] = db * FLOAT_TYPE((sign & 64) != 0 ? -grid1.z : grid1.z);
+ buf_a[buf_idx + 7] = db * FLOAT_TYPE((sign & 128) != 0 ? -grid1.w : grid1.w);
+#elif defined(DATA_A_IQ3_XXS)
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A;
+
+ const uint ib = idx / 64; // 4 values per idx
+ const uint iqs = idx % 64; // 0..63
+ const uint is = QUANT_K / 4 + 4 * (iqs / 8); // 8 values
+
+ const float d = float(data_a[ib].d);
+ const uint qs = data_a[ib].qs[iqs];
+ const uint signs = pack32(u8vec4(
+ data_a[ib].qs[is+0],
+ data_a[ib].qs[is+1],
+ data_a[ib].qs[is+2],
+ data_a[ib].qs[is+3]
+ ));
+ const float db = d * 0.5 * (0.5 + (signs >> 28));
+ const uint32_t sign7 = bitfieldExtract(signs, 7 * (int(iqs / 2) % 4), 7);
+ const uint sign = (sign7 | (bitCount(sign7) << 7)) >> (4 * (idx % 2));
+ const uint grid = iq3xxs_grid[qs];
+ const vec4 v = db * vec4(unpack8(grid));
+
+ buf_a[buf_idx ] = FLOAT_TYPE((sign & 1) != 0 ? -v.x : v.x);
+ buf_a[buf_idx + 1] = FLOAT_TYPE((sign & 2) != 0 ? -v.y : v.y);
+ buf_a[buf_idx + 2] = FLOAT_TYPE((sign & 4) != 0 ? -v.z : v.z);
+ buf_a[buf_idx + 3] = FLOAT_TYPE((sign & 8) != 0 ? -v.w : v.w);
+#elif defined(DATA_A_IQ3_S)
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A;
+
+ const uint ib = idx / 64; // 4 values per idx
+ const uint iqs = idx % 64; // 0..63
+ const uint iqh = iqs / 8;
+
+ const float d = float(data_a[ib].d);
+ const uint qs = data_a[ib].qs[iqs];
+ const uint qh = data_a[ib].qh[iqh];
+ const int8_t sign = int8_t(data_a[ib].signs[iqs / 2] >> (4 * (idx % 2)));
+ const uint scale = data_a[ib].scales[iqs / 16];
+ const i8vec2 sign01 = i8vec2(1 - (2 & i8vec2(sign << 1, sign)));
+ const float db = d * (1 + 2 * ((scale >> (4 * (iqh & 1))) & 0xf));
+ const uint32_t grid = iq3s_grid[qs | ((qh << (8 - (iqs % 8))) & 256)];
+ const vec4 v = db * vec4(unpack8(grid));
+
+ buf_a[buf_idx ] = FLOAT_TYPE((sign & 1) != 0 ? -v.x : v.x);
+ buf_a[buf_idx + 1] = FLOAT_TYPE((sign & 2) != 0 ? -v.y : v.y);
+ buf_a[buf_idx + 2] = FLOAT_TYPE((sign & 4) != 0 ? -v.z : v.z);
+ buf_a[buf_idx + 3] = FLOAT_TYPE((sign & 8) != 0 ? -v.w : v.w);
+#elif defined(DATA_A_IQ4_XS)
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_A;
+
+ const uint ib = idx / 128; // 2 values per idx
+ const uint ib32 = (idx % 128) / 16; // 0..7
+ const uint iq = 16 * ib32 + 2 * (idx % 8);
+
+ const uint sl = (data_a[ib].scales_l[ib32/2] >> (4 * (ib32 & 1))) & 0xF;
+ const uint sh = ((data_a[ib].scales_h) >> (2 * ib32)) & 3;
+ const uint qshift = (idx & 8) >> 1;
+ u8vec2 qs = u8vec2(data_a[ib].qs[iq], data_a[ib].qs[iq + 1]);
+ qs = (qs >> qshift) & uint8_t(0xF);
+
+ const float d = float(data_a[ib].d);
+ const vec2 v = d * float(int(sl | (sh << 4)) - 32) * vec2(kvalues_iq4nl[qs.x], kvalues_iq4nl[qs.y]);
+
+ buf_a[buf_idx ] = FLOAT_TYPE(v.x);
+ buf_a[buf_idx + 1] = FLOAT_TYPE(v.y);
+#elif defined(DATA_A_IQ4_NL)
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + 2 * row;
+
+ const uint ib = idx / 8;
+ const uint iqs = idx & 0x07;
+
+ const FLOAT_TYPE d = FLOAT_TYPE(data_a_packed16[ib].d);
+ const uint vui = uint(data_a_packed16[ib].qs[iqs]);
+
+ buf_a[buf_idx ] = FLOAT_TYPE(kvalues_iq4nl[vui & 0xF]) * d;
+ buf_a[buf_idx + 1 ] = FLOAT_TYPE(kvalues_iq4nl[bitfieldExtract(vui, 8, 4)]) * d;
+ buf_a[buf_idx + 16] = FLOAT_TYPE(kvalues_iq4nl[bitfieldExtract(vui, 4, 4)]) * d;
+ buf_a[buf_idx + 17] = FLOAT_TYPE(kvalues_iq4nl[vui >> 12]) * d;
+#elif defined(DATA_A_MXFP4)
+ const uint idx = pos_a + col * p.stride_a / LOAD_VEC_A + row;
+ const uint buf_idx = col * SHMEM_STRIDE + 2 * row;
+
+ const uint ib = idx / 8;
+ const uint iqs = (idx & 0x07) * 2;
+
+ const float d = e8m0_to_fp32(data_a[ib].e);
+ const uint vui = uint(data_a[ib].qs[iqs]);
+ const uint vui2 = uint(data_a[ib].qs[iqs+1]);
+
+ buf_a[buf_idx ] = FLOAT_TYPE(kvalues_mxfp4[vui & 0xF] * d);
+ buf_a[buf_idx + 16] = FLOAT_TYPE(kvalues_mxfp4[vui >> 4] * d);
+ buf_a[buf_idx + 1] = FLOAT_TYPE(kvalues_mxfp4[vui2 & 0xF] * d);
+ buf_a[buf_idx + 17] = FLOAT_TYPE(kvalues_mxfp4[vui2 >> 4] * d);
+#endif
+}
+
+#if !defined(MUL_MAT_ID)
+void load_b_to_shmem(const uint pos_b, const uint row, const uint col, const uint idx_n, const uint idx_k, const uint end_k) {
+#if LOAD_VEC_B == 8
+ // Not supported for b_type bf16 because bf16mat2x4 does not exist
+ const uint idx = pos_b + col * p.stride_b / LOAD_VEC_B + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_B;
+ FLOAT_TYPE_VEC8 bb = FLOAT_TYPE_VEC8(data_b[idx]);
+ buf_b[buf_idx + 0] = bb[0].x;
+ buf_b[buf_idx + 1] = bb[0].y;
+ buf_b[buf_idx + 2] = bb[0].z;
+ buf_b[buf_idx + 3] = bb[0].w;
+ buf_b[buf_idx + 4] = bb[1].x;
+ buf_b[buf_idx + 5] = bb[1].y;
+ buf_b[buf_idx + 6] = bb[1].z;
+ buf_b[buf_idx + 7] = bb[1].w;
+#elif LOAD_VEC_B == 4
+ const uint idx = pos_b + col * p.stride_b / LOAD_VEC_B + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_B;
+#if defined(DATA_B_BF16)
+ FLOAT_TYPE_VEC4 bb = FLOAT_TYPE_VEC4(TO_FLOAT_TYPE(data_b[idx]));
+#else
+ FLOAT_TYPE_VEC4 bb = FLOAT_TYPE_VEC4(data_b[idx]);
+#endif
+ buf_b[buf_idx + 0] = bb.x;
+ buf_b[buf_idx + 1] = bb.y;
+ buf_b[buf_idx + 2] = bb.z;
+ buf_b[buf_idx + 3] = bb.w;
+#else // LOAD_VEC_B == 1
+ if (idx_n < p.N && idx_k < end_k) {
+ buf_b[col * SHMEM_STRIDE + row] = TO_FLOAT_TYPE(data_b[pos_b + col * p.stride_b + row]);
+ } else {
+ buf_b[col * SHMEM_STRIDE + row] = FLOAT_TYPE(0.0f);
+ }
+#endif
+}
+#else
+void load_b_to_shmem(const uint pos_b, const uint row, const uint col, const uint ic, const uint _ne1, const uint idx_k, const uint end_k) {
+#if LOAD_VEC_B == 8
+ // Not supported for b_type bf16 because bf16mat2x4 does not exist
+ const u16vec2 row_idx = row_ids[col];
+ const uint idx = pos_b + row_idx.y * p.batch_stride_b / LOAD_VEC_B + (row_idx.x % p.ne11) * p.stride_b / LOAD_VEC_B + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_B;
+ FLOAT_TYPE_VEC8 bb = FLOAT_TYPE_VEC8(data_b[idx]);
+ buf_b[buf_idx + 0] = bb[0].x;
+ buf_b[buf_idx + 1] = bb[0].y;
+ buf_b[buf_idx + 2] = bb[0].z;
+ buf_b[buf_idx + 3] = bb[0].w;
+ buf_b[buf_idx + 4] = bb[1].x;
+ buf_b[buf_idx + 5] = bb[1].y;
+ buf_b[buf_idx + 6] = bb[1].z;
+ buf_b[buf_idx + 7] = bb[1].w;
+#elif LOAD_VEC_B == 4
+ const u16vec2 row_idx = row_ids[col];
+ const uint idx = pos_b + row_idx.y * p.batch_stride_b / LOAD_VEC_B + (row_idx.x % p.ne11) * p.stride_b / LOAD_VEC_B + row;
+ const uint buf_idx = col * SHMEM_STRIDE + row * LOAD_VEC_B;
+#if defined(DATA_B_BF16)
+ FLOAT_TYPE_VEC4 bb = FLOAT_TYPE_VEC4(TO_FLOAT_TYPE(data_b[idx]));
+#else
+ FLOAT_TYPE_VEC4 bb = FLOAT_TYPE_VEC4(data_b[idx]);
+#endif
+ buf_b[buf_idx + 0] = bb.x;
+ buf_b[buf_idx + 1] = bb.y;
+ buf_b[buf_idx + 2] = bb.z;
+ buf_b[buf_idx + 3] = bb.w;
+#else // LOAD_VEC_B == 1
+ const uint row_i = ic * BN + col;
+ if (row_i < _ne1 && idx_k < end_k) {
+ const u16vec2 row_idx = row_ids[col];
+ buf_b[col * SHMEM_STRIDE + row] = TO_FLOAT_TYPE(data_b[pos_b + row_idx.y * p.batch_stride_b + (row_idx.x % p.ne11) * p.stride_b + row]);
+ } else {
+ buf_b[col * SHMEM_STRIDE + row] = FLOAT_TYPE(0.0f);
+ }
+#endif
+}
+#endif
#if !defined(LOAD_VEC_A) || LOAD_VEC_A == 1
#define A_TYPE float
-#define A_TYPE32 float
#elif LOAD_VEC_A == 4
#define A_TYPE vec4
-#define A_TYPE32 vec4
#elif LOAD_VEC_A == 8
#define A_TYPE mat2x4
-#define A_TYPE32 mat2x4
#endif
#endif
#if !defined(LOAD_VEC_A) || LOAD_VEC_A == 1
#define A_TYPE float16_t
-#define A_TYPE32 float
#elif LOAD_VEC_A == 4
#define A_TYPE f16vec4
-#define A_TYPE32 vec4
#elif LOAD_VEC_A == 8
#define A_TYPE f16mat2x4
-#define A_TYPE32 mat2x4
#endif
#endif
std::string aligned_b_type_f32 = coopmat2 ? "float" : fp16 ? "mat2x4" : "vec4";
std::string aligned_b_type_f16 = coopmat2 ? "float16_t" : fp16 ? "f16mat2x4" : "f16vec4";
- std::map<std::string, std::string> base_dict = {
- {"FLOAT_TYPE_VEC2", (coopmat2 || fp16) ? "f16vec2" : "vec2"},
- };
+ std::map<std::string, std::string> base_dict;
std::string shader_name = "matmul";
if (matmul_id_type == MatMulIdType::DEFAULT) {
const std::string source_name = coopmat2 ? "mul_mm_cm2.comp" : "mul_mm.comp";
- auto const &FLOAT_TYPE = [&](const std::string &t) -> std::string {
- if (t == "bf16") {
- // scalar path promotes to float
- if (!coopmat && !coopmat2) {
- return "float";
+ auto const &FLOAT_TYPE = [&](int vec, const std::string &t) -> std::string {
+ switch (vec) {
+ case 1:
+ if (t == "bf16") {
+ // scalar path promotes to float
+ if (!coopmat && !coopmat2) {
+ return "float";
+ }
+ return "bfloat16_t";
}
- return "bfloat16_t";
- }
- if (coopmat2 || fp16) {
- return "float16_t";
+ if (coopmat2 || fp16) {
+ return "float16_t";
+ }
+ return "float";
+ case 2:
+ if (t == "bf16") {
+ // scalar path promotes to float
+ if (!coopmat && !coopmat2) {
+ return "vec2";
+ }
+ return "bf16vec2";
+ }
+ if (coopmat2 || fp16) {
+ return "f16vec2";
+ }
+ return "vec2";
+ case 4:
+ if (t == "bf16") {
+ // scalar path promotes to float
+ if (!coopmat && !coopmat2) {
+ return "vec4";
+ }
+ return "bf16vec4";
+ }
+ if (coopmat2 || fp16) {
+ return "f16vec4";
+ }
+ return "vec4";
+ case 8:
+ if (t == "bf16") {
+ // scalar path promotes to float
+ if (!coopmat && !coopmat2) {
+ return "mat2x4";
+ }
+ throw std::runtime_error("bf16 vec8 not supported");
+ }
+ if (coopmat2 || fp16) {
+ return "f16mat2x4";
+ }
+ return "mat2x4";
+ default:
+ throw std::runtime_error("invalid vector size");
}
- return "float";
+ };
+
+ const std::map<std::string, std::string> float_type_dict_f16 = {
+ {"FLOAT_TYPE", FLOAT_TYPE(1, "f16")},
+ {"FLOAT_TYPE_VEC2", FLOAT_TYPE(2, "f16")},
+ {"FLOAT_TYPE_VEC4", FLOAT_TYPE(4, "f16")},
+ {"FLOAT_TYPE_VEC8", FLOAT_TYPE(8, "f16")},
};
// Shaders with f16 B_TYPE
- string_to_spv(shader_name + "_f32_f16", source_name, merge_maps(base_dict, {{"FLOAT_TYPE", FLOAT_TYPE("f16")}, {"DATA_A_F32", "1"}, {"B_TYPE", "float16_t"}, {"D_TYPE", "float"}, }), fp16, coopmat, coopmat2, f16acc);
- string_to_spv(shader_name + "_f32_f16_aligned", source_name, merge_maps(base_dict, {{"FLOAT_TYPE", FLOAT_TYPE("f16")}, {"DATA_A_F32", "1"}, {"LOAD_VEC_A", load_vec}, {"LOAD_VEC_B", load_vec}, {"B_TYPE", aligned_b_type_f16}, {"B_TYPE32", aligned_b_type_f32}, {"D_TYPE", "float"}, {"ALIGNED", "1"}}), fp16, coopmat, coopmat2, f16acc);
+ string_to_spv(shader_name + "_f32_f16", source_name, merge_maps(merge_maps(base_dict, float_type_dict_f16), {{"DATA_A_F32", "1"}, {"B_TYPE", "float16_t"}, {"D_TYPE", "float"}, }), fp16, coopmat, coopmat2, f16acc);
+ string_to_spv(shader_name + "_f32_f16_aligned", source_name, merge_maps(merge_maps(base_dict, float_type_dict_f16), {{"DATA_A_F32", "1"}, {"LOAD_VEC_A", load_vec}, {"LOAD_VEC_B", load_vec}, {"B_TYPE", aligned_b_type_f16}, {"D_TYPE", "float"}, {"ALIGNED", "1"}}), fp16, coopmat, coopmat2, f16acc);
- string_to_spv(shader_name + "_f16_aligned", source_name, merge_maps(base_dict, {{"FLOAT_TYPE", FLOAT_TYPE("f16")}, {"DATA_A_F16", "1"}, {"LOAD_VEC_A", load_vec}, {"LOAD_VEC_B", load_vec}, {"B_TYPE", aligned_b_type_f16}, {"B_TYPE32", aligned_b_type_f32}, {"D_TYPE", "float"}, {"ALIGNED", "1"}}), fp16, coopmat, coopmat2, f16acc);
- string_to_spv(shader_name + "_f16", source_name, merge_maps(base_dict, {{"FLOAT_TYPE", FLOAT_TYPE("f16")}, {"DATA_A_F16", "1"}, {"B_TYPE", "float16_t"}, {"D_TYPE", "float"}}), fp16, coopmat, coopmat2, f16acc);
+ string_to_spv(shader_name + "_f16", source_name, merge_maps(merge_maps(base_dict, float_type_dict_f16), {{"DATA_A_F16", "1"}, {"B_TYPE", "float16_t"}, {"D_TYPE", "float"}}), fp16, coopmat, coopmat2, f16acc);
+ string_to_spv(shader_name + "_f16_aligned", source_name, merge_maps(merge_maps(base_dict, float_type_dict_f16), {{"DATA_A_F16", "1"}, {"LOAD_VEC_A", load_vec}, {"LOAD_VEC_B", load_vec}, {"B_TYPE", aligned_b_type_f16}, {"D_TYPE", "float"}, {"ALIGNED", "1"}}), fp16, coopmat, coopmat2, f16acc);
// bf16
{
// scalar path promotes to float
std::string to_float_type = (coopmat || coopmat2) ? "uintBitsToBFloat16EXT" : "bf16_to_fp32";
+ const std::map<std::string, std::string> float_type_dict_bf16 = {
+ {"FLOAT_TYPE", FLOAT_TYPE(1, "bf16")},
+ {"FLOAT_TYPE_VEC2", FLOAT_TYPE(2, "bf16")},
+ {"FLOAT_TYPE_VEC4", FLOAT_TYPE(4, "bf16")},
+ };
+
// If bfloat16 is not supported, then only compile the scalar (promote to fp32) shader
#if !defined(GGML_VULKAN_BFLOAT16_GLSLC_SUPPORT)
if (!(coopmat || coopmat2))
#endif
{
- string_to_spv(shader_name + "_bf16_aligned", source_name, merge_maps(base_dict, {{"FLOAT_TYPE", FLOAT_TYPE("bf16")}, {"TO_FLOAT_TYPE", to_float_type}, {"DATA_A_BF16", "1"}, {"LOAD_VEC_A", load_vec_a}, {"LOAD_VEC_B", "4"}, {"B_TYPE", coopmat2 ? "bfloat16_t" : "u16vec4"}, {"B_TYPE32", "vec4"}, {"D_TYPE", "float"}, {"B_IS_FLOAT", "1"}, {"DATA_B_BF16", "1"}, {"ALIGNED", "1"}}), fp16, coopmat, coopmat2, f16acc);
- string_to_spv(shader_name + "_bf16", source_name, merge_maps(base_dict, {{"FLOAT_TYPE", FLOAT_TYPE("bf16")}, {"TO_FLOAT_TYPE", to_float_type}, {"DATA_A_BF16", "1"}, {"LOAD_VEC_A", load_vec_a_unaligned}, {"B_TYPE", coopmat2 ? "bfloat16_t" : "uint16_t"}, {"D_TYPE", "float"}, {"B_IS_FLOAT", "1"}, {"DATA_B_BF16", "1"}}), fp16, coopmat, coopmat2, f16acc);
+ string_to_spv(shader_name + "_bf16_aligned", source_name, merge_maps(merge_maps(base_dict, float_type_dict_bf16), {{"TO_FLOAT_TYPE", to_float_type}, {"DATA_A_BF16", "1"}, {"LOAD_VEC_A", load_vec_a}, {"LOAD_VEC_B", "4"}, {"B_TYPE", coopmat2 ? "bfloat16_t" : "u16vec4"}, {"D_TYPE", "float"}, {"B_IS_FLOAT", "1"}, {"DATA_B_BF16", "1"}, {"ALIGNED", "1"}}), fp16, coopmat, coopmat2, f16acc);
+ string_to_spv(shader_name + "_bf16", source_name, merge_maps(merge_maps(base_dict, float_type_dict_bf16), {{"TO_FLOAT_TYPE", to_float_type}, {"DATA_A_BF16", "1"}, {"LOAD_VEC_A", load_vec_a_unaligned}, {"B_TYPE", coopmat2 ? "bfloat16_t" : "uint16_t"}, {"D_TYPE", "float"}, {"B_IS_FLOAT", "1"}, {"DATA_B_BF16", "1"}}), fp16, coopmat, coopmat2, f16acc);
}
}
// For aligned matmul loads
std::string load_vec_a = (coopmat2 || tname == "f32" || tname == "f16" || tname == "bf16") ? load_vec : load_vec_quant;
+ const std::map<std::string, std::string> float_type_dict = {
+ {"FLOAT_TYPE", FLOAT_TYPE(1, tname)},
+ {"FLOAT_TYPE_VEC2", FLOAT_TYPE(2, tname)},
+ {"FLOAT_TYPE_VEC4", FLOAT_TYPE(4, tname)},
+ {"FLOAT_TYPE_VEC8", FLOAT_TYPE(8, tname)},
+ };
+
// don't generate f32 variants for coopmat2
if (!coopmat2) {
- string_to_spv(shader_name + "_" + tname + "_f32", source_name, merge_maps(base_dict, {{"FLOAT_TYPE", FLOAT_TYPE(tname)}, {data_a_key, "1"}, {"LOAD_VEC_A", load_vec_a_unaligned}, {"B_TYPE", "float"}, {"D_TYPE", "float"}}), fp16, coopmat, coopmat2, f16acc);
- string_to_spv(shader_name + "_" + tname + "_f32_aligned", source_name, merge_maps(base_dict, {{"FLOAT_TYPE", FLOAT_TYPE(tname)}, {data_a_key, "1"}, {"LOAD_VEC_A", load_vec_a}, {"LOAD_VEC_B", load_vec}, {"B_TYPE", aligned_b_type_f32}, {"B_TYPE32", aligned_b_type_f32}, {"D_TYPE", "float"}, {"ALIGNED", "1"}}), fp16, coopmat, coopmat2, f16acc);
+ string_to_spv(shader_name + "_" + tname + "_f32", source_name, merge_maps(merge_maps(base_dict, float_type_dict), {{data_a_key, "1"}, {"LOAD_VEC_A", load_vec_a_unaligned}, {"B_TYPE", "float"}, {"D_TYPE", "float"}}), fp16, coopmat, coopmat2, f16acc);
+ string_to_spv(shader_name + "_" + tname + "_f32_aligned", source_name, merge_maps(merge_maps(base_dict, float_type_dict), {{data_a_key, "1"}, {"LOAD_VEC_A", load_vec_a}, {"LOAD_VEC_B", load_vec}, {"B_TYPE", aligned_b_type_f32}, {"D_TYPE", "float"}, {"ALIGNED", "1"}}), fp16, coopmat, coopmat2, f16acc);
}
if (tname != "f16" && tname != "f32") {
- string_to_spv(shader_name + "_" + tname + "_f16", source_name, merge_maps(base_dict, {{"FLOAT_TYPE", FLOAT_TYPE(tname)}, {data_a_key, "1"}, {"LOAD_VEC_A", load_vec_a_unaligned}, {"B_TYPE", "float16_t"}, {"D_TYPE", "float"}}), fp16, coopmat, coopmat2, f16acc);
- string_to_spv(shader_name + "_" + tname + "_f16_aligned", source_name, merge_maps(base_dict, {{"FLOAT_TYPE", FLOAT_TYPE(tname)}, {data_a_key, "1"}, {"LOAD_VEC_A", load_vec_a}, {"LOAD_VEC_B", load_vec}, {"B_TYPE", aligned_b_type_f16}, {"B_TYPE32", aligned_b_type_f32}, {"D_TYPE", "float"}, {"ALIGNED", "1"}}), fp16, coopmat, coopmat2, f16acc);
+ string_to_spv(shader_name + "_" + tname + "_f16", source_name, merge_maps(merge_maps(base_dict, float_type_dict), {{data_a_key, "1"}, {"LOAD_VEC_A", load_vec_a_unaligned}, {"B_TYPE", "float16_t"}, {"D_TYPE", "float"}}), fp16, coopmat, coopmat2, f16acc);
+ string_to_spv(shader_name + "_" + tname + "_f16_aligned", source_name, merge_maps(merge_maps(base_dict, float_type_dict), {{data_a_key, "1"}, {"LOAD_VEC_A", load_vec_a}, {"LOAD_VEC_B", load_vec}, {"B_TYPE", aligned_b_type_f16}, {"D_TYPE", "float"}, {"ALIGNED", "1"}}), fp16, coopmat, coopmat2, f16acc);
}
#if defined(GGML_VULKAN_INTEGER_DOT_GLSLC_SUPPORT)
if (!coopmat && !coopmat2 && matmul_id_type == MatMulIdType::NONE && is_legacy_quant(tname)) {
- string_to_spv(shader_name + "_" + tname + "_q8_1", "mul_mmq.comp", merge_maps(base_dict, {{"FLOAT_TYPE", FLOAT_TYPE(tname)}, {data_a_key, "1"}, {"D_TYPE", "float"},}), fp16, coopmat, coopmat2, f16acc);
+ string_to_spv(shader_name + "_" + tname + "_q8_1", "mul_mmq.comp", merge_maps(merge_maps(base_dict, float_type_dict), {{data_a_key, "1"}, {"D_TYPE", "float"},}), fp16, coopmat, coopmat2, f16acc);
}
#endif
}