gemv_noshuffle_q4_1_f32
gemm_noshuffle_q4_1_f32
gemv_noshuffle_general_q8_0_f32
+ gemv_noshuffle_q4_k_f32
+ gemm_noshuffle_q4_k_f32
gemv_noshuffle_q6_k_f32
gemm_noshuffle_q6_k_f32
mul
cl_kernel kernel_restore_block_q4_0_noshuffle;
cl_kernel kernel_convert_block_q4_1_noshuffle;
cl_kernel kernel_restore_block_q4_1_noshuffle;
+ cl_kernel kernel_convert_block_q4_K_noshuffle;
+ cl_kernel kernel_restore_block_q4_K_noshuffle;
cl_kernel kernel_convert_block_q4_K, kernel_restore_block_q4_K;
cl_kernel kernel_convert_block_q6_K, kernel_restore_block_q6_K;
cl_kernel kernel_mul_mat_q4_0_f32_1d_8x_flat, kernel_mul_mat_q4_0_f32_1d_16x_flat;
cl_kernel kernel_gemm_noshuffle_q4_1_f32;
cl_kernel kernel_mul_mm_q8_0_f32_8x4;
cl_kernel CL_mul_mat_vec_q8_0_f32;
+ cl_kernel kernel_gemv_noshuffle_q4_k_f32;
+ cl_kernel kernel_gemm_noshuffle_q4_k_f32;
cl_kernel kernel_gemv_noshuffle_q6_K_f32;
cl_kernel kernel_gemm_noshuffle_q6_K_f32;
#endif // GGML_OPENCL_USE_ADRENO_KERNELS
CL_CHECK((backend_ctx->kernel_restore_block_q8_0_trans = clCreateKernel(backend_ctx->program_cvt, "kernel_restore_block_q8_0_trans", &err), err));
CL_CHECK((backend_ctx->kernel_convert_block_q4_K = clCreateKernel(backend_ctx->program_cvt, "kernel_convert_block_q4_K", &err), err));
CL_CHECK((backend_ctx->kernel_restore_block_q4_K = clCreateKernel(backend_ctx->program_cvt, "kernel_restore_block_q4_K", &err), err));
+ CL_CHECK((backend_ctx->kernel_convert_block_q4_K_noshuffle = clCreateKernel(backend_ctx->program_cvt, "kernel_convert_block_q4_K_noshuffle", &err), err));
+ CL_CHECK((backend_ctx->kernel_restore_block_q4_K_noshuffle = clCreateKernel(backend_ctx->program_cvt, "kernel_restore_block_q4_K_noshuffle", &err), err));
CL_CHECK((backend_ctx->kernel_convert_block_q6_K = clCreateKernel(backend_ctx->program_cvt, "kernel_convert_block_q6_K", &err), err));
CL_CHECK((backend_ctx->kernel_restore_block_q6_K = clCreateKernel(backend_ctx->program_cvt, "kernel_restore_block_q6_K", &err), err));
CL_CHECK((backend_ctx->kernel_convert_block_q6_K_noshuffle = clCreateKernel(backend_ctx->program_cvt, "kernel_convert_block_q6_K_noshuffle", &err), err));
GGML_LOG_CONT(".");
}
+ // gemm_noshuffle_q4_k_f32
+ {
+#ifdef GGML_OPENCL_EMBED_KERNELS
+ const std::string kernel_src {
+ #include "gemm_noshuffle_q4_k_f32.cl.h"
+ };
+#else
+ const std::string kernel_src = read_file("gemm_noshuffle_q4_k_f32.cl");
+#endif
+ cl_program prog = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts);
+ CL_CHECK((backend_ctx->kernel_gemm_noshuffle_q4_k_f32 = clCreateKernel(prog, "kernel_gemm_noshuffle_q4_k_f32", &err), err));
+ CL_CHECK(clReleaseProgram(prog));
+ GGML_LOG_CONT(".");
+ }
+
+ // gemv_noshuffle_q4_k_f32
+ {
+ std::string CL_gemv_compile_opts = std::string("-cl-std=") + opencl_c_std +
+ " -cl-mad-enable ";
+ if (backend_ctx->has_vector_subgroup_broadcast) {
+ CL_gemv_compile_opts += " -DVECTOR_SUB_GROUP_BROADCAST ";
+ }
+
+#ifdef GGML_OPENCL_EMBED_KERNELS
+ const std::string kernel_src {
+ #include "gemv_noshuffle_q4_k_f32.cl.h"
+ };
+#else
+ const std::string kernel_src = read_file("gemv_noshuffle_q4_k_f32.cl");
+#endif
+
+ cl_program prog = build_program_from_source(
+ backend_ctx->context, backend_ctx->device, kernel_src.c_str(), CL_gemv_compile_opts);
+
+ CL_CHECK((backend_ctx->kernel_gemv_noshuffle_q4_k_f32 = clCreateKernel(prog, "kernel_gemv_noshuffle_q4_k_f32", &err), err));
+ CL_CHECK(clReleaseProgram(prog));
+ GGML_LOG_CONT(".");
+ }
+
std::string CL_moe_compile_opts = std::string("-cl-std=") + opencl_c_std +
" -cl-mad-enable "
" -cl-fast-relaxed-math";
CL_BUFFER_CREATE_TYPE_REGION, ®ion, &err);
CL_CHECK(err);
+ #ifdef GGML_OPENCL_USE_ADRENO_KERNELS
cl_kernel kernel = backend_ctx->kernel_convert_block_q4_K;
+ if (use_adreno_kernels(backend_ctx, tensor)) {
+ kernel = backend_ctx->kernel_convert_block_q4_K_noshuffle;
+ }
+ #else
+ cl_kernel kernel = backend_ctx->kernel_convert_block_q4_K;
+ #endif
+
+ cl_uchar mask_0F = 0x0F;
+ cl_uchar mask_F0 = 0xF0;
+
CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &data_device));
CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra->q));
CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra->s));
CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_mem), &extra->d));
CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extra->dm));
+ CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_uchar), &mask_0F));
+ CL_CHECK(clSetKernelArg(kernel, 6, sizeof(cl_uchar), &mask_F0));
size_t global_work_size[] = {(size_t)ggml_nelements(tensor)/ggml_blck_size(tensor->type), 1, 1};
size_t local_work_size[] = {64, 1, 1};
CL_CHECK(clReleaseMemObject(data_device));
tensor->extra = extra;
+#ifdef GGML_OPENCL_USE_ADRENO_KERNELS
+ if (use_adreno_kernels(backend_ctx, tensor)) {
+
+ int M = tensor->ne[1];
+ int K = tensor->ne[0];
+
+ GGML_ASSERT(K % 32 == 0);
+
+ // Transpose q, d, dm as ushort
+ transpose_2d_as_16b(backend_ctx, extra->q, extra->q, size_q, K/4, M);
+ transpose_2d_as_16b(backend_ctx, extra->d, extra->d, size_d, K/256, M);
+ transpose_2d_as_16b(backend_ctx, extra->dm, extra->dm, size_dm, K/256, M);
+ }
+#endif // GGML_OPENCL_USE_ADRENO_KERNELS
return;
}
if (tensor->type == GGML_TYPE_Q6_K) {
ggml_nbytes(tensor), NULL, &err);
CL_CHECK(err);
+ cl_uchar mask_0F = 0x0F;
+ cl_uchar mask_F0 = 0xF0;
+
+#ifdef GGML_OPENCL_USE_ADRENO_KERNELS
+ if (use_adreno_kernels(backend_ctx, tensor)) {
+ int M = tensor->ne[1];
+ int K = tensor->ne[0];
+
+ size_t size_q = ggml_nelements(tensor)/ggml_blck_size(tensor->type)*ggml_blck_size(tensor->type)/2;
+ size_t size_d = ggml_nelements(tensor)/ggml_blck_size(tensor->type)*sizeof(ggml_fp16_t);
+ size_t size_dm = ggml_nelements(tensor)/ggml_blck_size(tensor->type)*sizeof(ggml_fp16_t);
+
+ static ggml_cl_buffer buf_trans_q;
+ static ggml_cl_buffer buf_trans_d;
+ static ggml_cl_buffer buf_trans_dm;
+
+ buf_trans_q.allocate(backend_ctx->context, size_q);
+ buf_trans_d.allocate(backend_ctx->context, size_d);
+ buf_trans_dm.allocate(backend_ctx->context, size_dm);
+
+ // Transpose q, d, dm back
+ transpose_2d_as_16b(backend_ctx, extra->q, buf_trans_q.buffer, size_q, M, K/4);
+ transpose_2d_as_16b(backend_ctx, extra->d, buf_trans_d.buffer, size_d, M, K/256);
+ transpose_2d_as_16b(backend_ctx, extra->dm, buf_trans_dm.buffer, size_dm, M, K/256);
+
+ cl_kernel kernel = backend_ctx->kernel_restore_block_q4_K_noshuffle;
+ CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &buf_trans_q.buffer));
+ CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra->s));
+ CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &buf_trans_d.buffer));
+ CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_mem), &buf_trans_dm.buffer));
+ CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &data_device));
+ CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_uchar), &mask_0F));
+ CL_CHECK(clSetKernelArg(kernel, 6, sizeof(cl_uchar), &mask_F0));
+
+ size_t global_work_size[] = {(size_t)ggml_nelements(tensor)/ggml_blck_size(tensor->type), 1, 1};
+ size_t local_work_size[] = {1, 1, 1};
+
+ CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL,
+ global_work_size, local_work_size, 0, NULL, NULL));
+ CL_CHECK(clEnqueueReadBuffer(queue, data_device, CL_TRUE, offset,
+ size, data, 0, NULL, NULL));
+ CL_CHECK(clReleaseMemObject(data_device));
+ return;
+ }
+#endif // GGML_OPENCL_USE_ADRENO_KERNELS
+
cl_kernel kernel = backend_ctx->kernel_restore_block_q4_K;
CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra->q));
CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra->s));
CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra->d));
CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_mem), &extra->dm));
CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &data_device));
+ CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_uchar), &mask_0F));
+ CL_CHECK(clSetKernelArg(kernel, 6, sizeof(cl_uchar), &mask_F0));
size_t global_work_size[] = {(size_t)ggml_nelements(tensor)/ggml_blck_size(tensor->type), 1, 1};
size_t local_work_size[] = {1, 1, 1};
#endif
}
+static void ggml_cl_mul_mat_q4_k_f32_adreno(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
+#ifdef GGML_OPENCL_USE_ADRENO_KERNELS
+ GGML_ASSERT(src0);
+ GGML_ASSERT(src0->extra);
+ GGML_ASSERT(src1);
+ GGML_ASSERT(src1->extra);
+ GGML_ASSERT(dst);
+ GGML_ASSERT(dst->extra);
+
+ ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context;
+
+ ggml_tensor_extra_cl * extra1 = (ggml_tensor_extra_cl *)src1->extra;
+ ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra;
+ ggml_tensor_extra_cl_q4_K * extra0_q4_k = (ggml_tensor_extra_cl_q4_K *)src0->extra;
+
+ cl_ulong offset1 = extra1->offset + src1->view_offs;
+ cl_ulong offsetd = extrad->offset + dst->view_offs;
+
+ const int ne00 = src0->ne[0];
+ const int ne01 = src0->ne[1];
+
+ const int ne1 = dst->ne[1];
+
+ GGML_ASSERT(ne00 % ggml_blck_size(src0->type) == 0);
+
+ cl_context context = backend_ctx->context;
+ cl_kernel kernel;
+
+ cl_int err;
+ cl_image_format img_fmt;
+ cl_image_desc img_desc;
+ cl_buffer_region region;
+
+ int M = ne01;
+ int N = ne1;
+ int K = ne00;
+
+ cl_uchar mask_d6 = 0x3F;
+ cl_uchar mask_d4 = 0x0F;
+ cl_uchar mask_hi2 = 0xC0;
+
+ if (ne1 == 1) {
+ cl_mem q_img = nullptr;
+ cl_mem b_sub_buf = nullptr;
+ cl_mem b_img = nullptr;
+
+ // image for q
+ img_fmt = { CL_R, CL_UNSIGNED_INT32};
+ memset(&img_desc, 0, sizeof(img_desc));
+ img_desc.image_type = CL_MEM_OBJECT_IMAGE1D_BUFFER;
+ img_desc.image_width = M * K / 2 / 4;
+ img_desc.buffer = extra0_q4_k->q;
+ CL_CHECK((q_img = clCreateImage(context, CL_MEM_READ_ONLY, &img_fmt, &img_desc, NULL, &err), err));
+
+ // subbuffer for activations
+ region.origin = offset1;
+ region.size = K * N * sizeof(float);
+ CL_CHECK((b_sub_buf = clCreateSubBuffer(extra1->data_device, 0, CL_BUFFER_CREATE_TYPE_REGION, ®ion, &err), err));
+
+ // image for activations
+ img_fmt = {CL_RGBA, CL_FLOAT};
+ memset(&img_desc, 0, sizeof(img_desc));
+ img_desc.image_type = CL_MEM_OBJECT_IMAGE1D_BUFFER;
+ img_desc.image_width = K * N / 4;
+ img_desc.buffer = b_sub_buf;
+ CL_CHECK((b_img = clCreateImage(context, CL_MEM_READ_ONLY, &img_fmt, &img_desc, NULL, &err), err));
+
+ kernel = backend_ctx->kernel_gemv_noshuffle_q4_k_f32;
+
+ CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &q_img));
+ CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra0_q4_k->d));
+ CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra0_q4_k->dm));
+ CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_mem), &extra0_q4_k->s));
+ CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &b_img));
+ CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_mem), &extrad->data_device));
+ CL_CHECK(clSetKernelArg(kernel, 6, sizeof(cl_ulong), &offsetd));
+ CL_CHECK(clSetKernelArg(kernel, 7, sizeof(cl_int), &ne00));
+ CL_CHECK(clSetKernelArg(kernel, 8, sizeof(cl_int), &ne01));
+ CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_uchar), &mask_d6));
+ CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_uchar), &mask_d4));
+ CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_uchar), &mask_hi2));
+
+ size_t local_work_size[3] = {64, 4, 1};
+ size_t global_work_size[3] = {(size_t)CEIL_DIV(ne01/2, 64)*64, 4, 1};
+
+ backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst);
+
+ CL_CHECK(clReleaseMemObject(q_img));
+ CL_CHECK(clReleaseMemObject(b_sub_buf));
+ CL_CHECK(clReleaseMemObject(b_img));
+ } else {
+
+ cl_mem b_sub_buf = nullptr;
+ cl_mem b_sub_buf_trans = nullptr;
+ cl_mem b_img = nullptr;
+ cl_mem b_img_trans = nullptr;
+
+ // subbuffer for activations
+ region.origin = offset1;
+ region.size = K * N * sizeof(float);
+ CL_CHECK((b_sub_buf = clCreateSubBuffer(extra1->data_device, 0, CL_BUFFER_CREATE_TYPE_REGION, ®ion, &err), err));
+
+ // image for activations
+ img_fmt = {CL_RGBA, CL_FLOAT};
+ memset(&img_desc, 0, sizeof(img_desc));
+ img_desc.image_type = CL_MEM_OBJECT_IMAGE1D_BUFFER;
+ img_desc.image_width = K * N / 4;
+ img_desc.buffer = b_sub_buf;
+ CL_CHECK((b_img = clCreateImage(context, CL_MEM_READ_ONLY, &img_fmt, &img_desc, NULL, &err), err));
+
+ // pad N to multiple of 8
+ int extra_elements = N % 8;
+ int padding = 0;
+ if (extra_elements > 0){
+ padding = 8 - extra_elements;
+ }
+
+ // subbuffer for transposed activations
+ region.origin = 0;
+ region.size = K * (N + padding) * sizeof(float)/2;
+ backend_ctx->prealloc_act_trans.allocate(context, region.size);
+ CL_CHECK((b_sub_buf_trans = clCreateSubBuffer(backend_ctx->prealloc_act_trans.buffer, 0, CL_BUFFER_CREATE_TYPE_REGION, ®ion, &err), err));
+
+ // image for transposed activations
+ img_fmt = {CL_RGBA, CL_HALF_FLOAT};
+ memset(&img_desc, 0, sizeof(img_desc));
+ img_desc.image_type = CL_MEM_OBJECT_IMAGE1D_BUFFER;
+ img_desc.image_width = K * (N + padding) / 4;
+ img_desc.buffer = b_sub_buf_trans;
+ CL_CHECK((b_img_trans = clCreateImage(context, 0, &img_fmt, &img_desc, NULL, &err), err));
+
+ // transpose activations
+ int height_B = N/4;
+ if (height_B == 0) {
+ height_B = 1;
+ }
+ int width_B = K/4;
+ int padded_height_B = (N + padding)/4;
+
+ kernel = backend_ctx->kernel_transpose_32_16;
+ CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &b_img));
+ CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &b_img_trans));
+ CL_CHECK(clSetKernelArg(kernel, 2, sizeof(int), &height_B));
+ CL_CHECK(clSetKernelArg(kernel, 3, sizeof(int), &width_B));
+ CL_CHECK(clSetKernelArg(kernel, 4, sizeof(int), &padded_height_B));
+
+ size_t local_work_size_t[2] = { 1, 16 };
+ size_t global_work_size_t[2] = { (size_t)width_B, (size_t)padded_height_B };
+ backend_ctx->enqueue_ndrange_kernel(kernel, 2, global_work_size_t, local_work_size_t, dst);
+
+ // gemm
+ kernel = backend_ctx->kernel_gemm_noshuffle_q4_k_f32;
+ int padded_N = N + padding;
+
+ CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0_q4_k->q));
+ CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra0_q4_k->s));
+ CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra0_q4_k->d));
+ CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_mem), &extra0_q4_k->dm));
+ CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &b_img_trans));
+ CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_mem), &extrad->data_device));
+ CL_CHECK(clSetKernelArg(kernel, 6, sizeof(cl_ulong), &offsetd));
+ CL_CHECK(clSetKernelArg(kernel, 7, sizeof(cl_int), &ne01));
+ CL_CHECK(clSetKernelArg(kernel, 8, sizeof(cl_int), &padded_N));
+ CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_int), &ne00));
+ CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_int), &ne1));
+ CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_uchar), &mask_d6));
+ CL_CHECK(clSetKernelArg(kernel, 12, sizeof(cl_uchar), &mask_d4));
+ CL_CHECK(clSetKernelArg(kernel, 13, sizeof(cl_uchar), &mask_hi2));
+
+ size_t global_work_size[3] = {(size_t)CEIL_DIV(ne1, 8), (size_t)CEIL_DIV(ne01, 4), 1};
+ size_t local_work_size[3] = {1, 128, 1};
+
+ backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst);
+ CL_CHECK(clReleaseMemObject(b_sub_buf));
+ CL_CHECK(clReleaseMemObject(b_sub_buf_trans));
+ CL_CHECK(clReleaseMemObject(b_img));
+ CL_CHECK(clReleaseMemObject(b_img_trans));
+ }
+#else
+ GGML_UNUSED(backend);
+ GGML_UNUSED(src0);
+ GGML_UNUSED(src1);
+ GGML_UNUSED(dst);
+#endif
+}
+
static void ggml_cl_mul_mat_q6_K_f32_adreno(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
#ifdef GGML_OPENCL_USE_ADRENO_KERNELS
GGML_ASSERT(src0);
return;
}
+ // q4_k x fp32
+ if (src0t == GGML_TYPE_Q4_K && src1t == GGML_TYPE_F32) {
+ ggml_cl_mul_mat_q4_k_f32_adreno(backend, src0, src1, dst);
+ return;
+ }
+
// q6_K x fp32
if (src0t == GGML_TYPE_Q6_K && src1t == GGML_TYPE_F32) {
ggml_cl_mul_mat_q6_K_f32_adreno(backend, src0, src1, dst);
// Convert the block_q4_K format to 4 separate arrays (AOS -> SOA).
// This kernel does not deshuffle the bits.
// Each thread processes a super block.
+// Mask args are just to keep the signature consistent with the no-shuffle
+// version and they are not used in this kernel.
//------------------------------------------------------------------------------
kernel void kernel_convert_block_q4_K(
global struct block_q4_K * src0,
global uchar * dst_q,
global uchar * dst_s,
global half * dst_d,
- global half * dst_dm
+ global half * dst_dm,
+ uchar mask_0F,
+ uchar mask_F0
) {
global struct block_q4_K * b = (global struct block_q4_K *) src0 + get_global_id(0);
global uchar * q = (global uchar *) dst_q + QK_K/2*get_global_id(0);
// Restore block_q4_K from flattened arrays.
// Each thread processes a super block.
+// Mask args are just to keep the signature consistent with the no-shuffle ones.
kernel void kernel_restore_block_q4_K(
global uchar * src_q,
global uchar * src_s,
global half * src_d,
global half * src_dm,
- global struct block_q4_K * dst
+ global struct block_q4_K * dst,
+ uchar mask_0F,
+ uchar mask_F0
) {
global struct block_q4_K * b = (global struct block_q4_K *) dst + get_global_id(0);
global uchar * q = (global uchar *) src_q + QK_K/2*get_global_id(0);
}
}
+kernel void kernel_convert_block_q4_K_noshuffle(
+ global struct block_q4_K * src0,
+ global uchar * dst_q,
+ global uchar * dst_s,
+ global half * dst_d,
+ global half * dst_dm,
+ uchar mask_0F,
+ uchar mask_F0
+) {
+ global struct block_q4_K * b = (global struct block_q4_K *) src0 + get_global_id(0);
+ global uchar * q = (global uchar *) dst_q + QK_K/2 * get_global_id(0);
+ global uchar * s = (global uchar *) dst_s + K_SCALE_SIZE * get_global_id(0);
+ global half * d = (global half *) dst_d + get_global_id(0);
+ global half * dm = (global half *) dst_dm + get_global_id(0);
+
+ *d = b->d;
+ *dm = b->dm;
+
+ for (int i = 0; i < QK_K / 64; ++i) {
+ for (int j = 0; j < 16; ++j) {
+ uchar x0 = b->q[i*32 + 2*j];
+ uchar x1 = b->q[i*32 + 2*j + 1];
+ q[i*32 + j] = convert_uchar(x0 & mask_0F) | convert_uchar((x1 & mask_0F) << 4);
+ q[i*32 + j + 16] = convert_uchar((x0 & mask_F0) >> 4) | convert_uchar(x1 & mask_F0);
+ }
+ }
+
+ for (int i = 0; i < K_SCALE_SIZE; ++i) {
+ s[i] = b->s[i];
+ }
+}
+
+kernel void kernel_restore_block_q4_K_noshuffle(
+ global uchar * src_q,
+ global uchar * src_s,
+ global half * src_d,
+ global half * src_dm,
+ global struct block_q4_K * dst,
+ uchar mask_0F,
+ uchar mask_F0
+) {
+ global struct block_q4_K * b = (global struct block_q4_K *) dst + get_global_id(0);
+ global uchar * q = (global uchar *) src_q + QK_K/2 * get_global_id(0);
+ global uchar * s = (global uchar *) src_s + K_SCALE_SIZE * get_global_id(0);
+ global half * d = (global half *) src_d + get_global_id(0);
+ global half * dm = (global half *) src_dm + get_global_id(0);
+
+ b->d = *d;
+ b->dm = *dm;
+
+ for (int i = 0; i < QK_K / 64; ++i) {
+ for (int j = 0; j < 16; ++j) {
+ uchar lo = q[i*32 + j];
+ uchar hi = q[i*32 + j + 16];
+ b->q[i*32 + 2*j] = convert_uchar((lo & mask_0F) | ((hi & mask_0F) << 4));
+ b->q[i*32 + 2*j + 1] = convert_uchar(((lo & mask_F0) >> 4) | (hi & mask_F0));
+ }
+ }
+
+ for (int i = 0; i < K_SCALE_SIZE; ++i) {
+ b->s[i] = s[i];
+ }
+}
+
//------------------------------------------------------------------------------
// kernel_convert_block_q6_K
// Convert the block_q6_K format to 3 separate arrays (AOS -> SOA).
--- /dev/null
+#pragma OPENCL EXTENSION cl_khr_fp16 : enable
+
+#ifdef cl_qcom_reqd_sub_group_size
+#pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable
+#define ADRENO_GPU 1
+#define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full")))
+#endif
+#define QK_K 256
+#define K_SCALE_SIZE 12
+
+inline void get_scale_min_k4(
+ int j,
+ global const uchar * q,
+ uchar * d,
+ uchar * m,
+ uchar mask_d6,
+ uchar mask_d4,
+ uchar mask_hi2
+) {
+ if (j < 4) {
+ *d = q[j] & mask_d6;
+ *m = q[j+4] & mask_d6;
+ } else {
+ *d = (q[j+4] & mask_d4) | ((q[j-4] & mask_hi2) >> 2);
+ *m = ((q[j+4] >> 4) & mask_d4) | ((q[j] & mask_hi2) >> 2);
+ }
+}
+
+#ifdef ADRENO_GPU
+REQD_SUBGROUP_SIZE_128
+#endif
+kernel void kernel_gemm_noshuffle_q4_k_f32(
+ global const ushort * src0_q,
+ global const uchar * src0_s,
+ global const half * src0_d,
+ global const half * src0_dm,
+ read_only image1d_buffer_t src1,
+ global float * dst,
+ ulong offsetd,
+ int m,
+ int n,
+ int k,
+ int n_no_padding,
+ uchar mask_d6,
+ uchar mask_d4,
+ uchar mask_hi2
+) {
+ dst = (global float *)((global char *)dst + offsetd);
+ int n_4 = n >> 2;
+ int gy = get_global_id(0);
+ int gx = get_global_id(1);
+ int gx_2 = gx << 2;
+
+ half8 c0 = 0, c1 = 0, c2 = 0, c3 = 0;
+ half8 B;
+ half4 dequantized_weights;
+
+ int num_blocks_K = k / QK_K;
+
+ global const ushort * weight_ptr = src0_q + gx_2;
+ global const half * d_ptr = src0_d + gx_2;
+ global const half * dm_ptr = src0_dm + gx_2;
+
+ for (int i = 0; i < k; i += 32) {
+ int sb_idx = i / QK_K;
+ int sub_idx = (i / 32) % 8;
+
+ half4 d = vload4(0, d_ptr + sb_idx * m);
+ half4 dm = vload4(0, dm_ptr + sb_idx * m);
+
+ global const uchar * sc0 = src0_s + (gx_2+0) * num_blocks_K * K_SCALE_SIZE + sb_idx * K_SCALE_SIZE;
+ global const uchar * sc1 = src0_s + (gx_2+1) * num_blocks_K * K_SCALE_SIZE + sb_idx * K_SCALE_SIZE;
+ global const uchar * sc2 = src0_s + (gx_2+2) * num_blocks_K * K_SCALE_SIZE + sb_idx * K_SCALE_SIZE;
+ global const uchar * sc3 = src0_s + (gx_2+3) * num_blocks_K * K_SCALE_SIZE + sb_idx * K_SCALE_SIZE;
+
+ uchar sv0, mn0, sv1, mn1, sv2, mn2, sv3, mn3;
+ get_scale_min_k4(sub_idx, sc0, &sv0, &mn0, mask_d6, mask_d4, mask_hi2);
+ get_scale_min_k4(sub_idx, sc1, &sv1, &mn1, mask_d6, mask_d4, mask_hi2);
+ get_scale_min_k4(sub_idx, sc2, &sv2, &mn2, mask_d6, mask_d4, mask_hi2);
+ get_scale_min_k4(sub_idx, sc3, &sv3, &mn3, mask_d6, mask_d4, mask_hi2);
+
+ half4 scale = convert_half4(convert_float4(d) * convert_float4((uchar4)(sv0, sv1, sv2, sv3)));
+ half4 mval = convert_half4(convert_float4(dm) * convert_float4((uchar4)(mn0, mn1, mn2, mn3)));
+
+ for (int l = 0; l < 32; l += 4) {
+ int ki = i + l;
+ ushort4 bits4 = vload4(0, weight_ptr + (ki/4) * m);
+
+ // j=0
+ B.s0123 = read_imageh(src1, gy*2 + (ki+0) * n_4);
+ B.s4567 = read_imageh(src1, gy*2+1 + (ki+0) * n_4);
+ dequantized_weights.s0 = (bits4.s0 & 0x000F) * scale.s0 - mval.s0;
+ dequantized_weights.s1 = (bits4.s1 & 0x000F) * scale.s1 - mval.s1;
+ dequantized_weights.s2 = (bits4.s2 & 0x000F) * scale.s2 - mval.s2;
+ dequantized_weights.s3 = (bits4.s3 & 0x000F) * scale.s3 - mval.s3;
+ c0 += B * dequantized_weights.s0;
+ c1 += B * dequantized_weights.s1;
+ c2 += B * dequantized_weights.s2;
+ c3 += B * dequantized_weights.s3;
+
+ // j=1
+ B.s0123 = read_imageh(src1, gy*2 + (ki+1) * n_4);
+ B.s4567 = read_imageh(src1, gy*2+1 + (ki+1) * n_4);
+ dequantized_weights.s0 = ((bits4.s0 & 0x00F0) >> 4) * scale.s0 - mval.s0;
+ dequantized_weights.s1 = ((bits4.s1 & 0x00F0) >> 4) * scale.s1 - mval.s1;
+ dequantized_weights.s2 = ((bits4.s2 & 0x00F0) >> 4) * scale.s2 - mval.s2;
+ dequantized_weights.s3 = ((bits4.s3 & 0x00F0) >> 4) * scale.s3 - mval.s3;
+ c0 += B * dequantized_weights.s0;
+ c1 += B * dequantized_weights.s1;
+ c2 += B * dequantized_weights.s2;
+ c3 += B * dequantized_weights.s3;
+
+ // j=2
+ B.s0123 = read_imageh(src1, gy*2 + (ki+2) * n_4);
+ B.s4567 = read_imageh(src1, gy*2+1 + (ki+2) * n_4);
+ dequantized_weights.s0 = ((bits4.s0 & 0x0F00) >> 8) * scale.s0 - mval.s0;
+ dequantized_weights.s1 = ((bits4.s1 & 0x0F00) >> 8) * scale.s1 - mval.s1;
+ dequantized_weights.s2 = ((bits4.s2 & 0x0F00) >> 8) * scale.s2 - mval.s2;
+ dequantized_weights.s3 = ((bits4.s3 & 0x0F00) >> 8) * scale.s3 - mval.s3;
+ c0 += B * dequantized_weights.s0;
+ c1 += B * dequantized_weights.s1;
+ c2 += B * dequantized_weights.s2;
+ c3 += B * dequantized_weights.s3;
+
+ // j=3
+ B.s0123 = read_imageh(src1, gy*2 + (ki+3) * n_4);
+ B.s4567 = read_imageh(src1, gy*2+1 + (ki+3) * n_4);
+ dequantized_weights.s0 = ((bits4.s0 & 0xF000) >> 12) * scale.s0 - mval.s0;
+ dequantized_weights.s1 = ((bits4.s1 & 0xF000) >> 12) * scale.s1 - mval.s1;
+ dequantized_weights.s2 = ((bits4.s2 & 0xF000) >> 12) * scale.s2 - mval.s2;
+ dequantized_weights.s3 = ((bits4.s3 & 0xF000) >> 12) * scale.s3 - mval.s3;
+ c0 += B * dequantized_weights.s0;
+ c1 += B * dequantized_weights.s1;
+ c2 += B * dequantized_weights.s2;
+ c3 += B * dequantized_weights.s3;
+ }
+ }
+
+ int idx = (gy<<3)*m + (gx<<2);
+
+ if (idx+3 < m*n_no_padding) {
+ vstore4((float4)(c0.s0, c1.s0, c2.s0, c3.s0), 0, dst + idx);
+ idx += m;
+ }
+ if (idx+3 < m*n_no_padding) {
+ vstore4((float4)(c0.s1, c1.s1, c2.s1, c3.s1), 0, dst + idx);
+ idx += m;
+ }
+ if (idx+3 < m*n_no_padding) {
+ vstore4((float4)(c0.s2, c1.s2, c2.s2, c3.s2), 0, dst + idx);
+ idx += m;
+ }
+ if (idx+3 < m*n_no_padding) {
+ vstore4((float4)(c0.s3, c1.s3, c2.s3, c3.s3), 0, dst + idx);
+ idx += m;
+ }
+ if (idx+3 < m*n_no_padding) {
+ vstore4((float4)(c0.s4, c1.s4, c2.s4, c3.s4), 0, dst + idx);
+ idx += m;
+ }
+ if (idx+3 < m*n_no_padding) {
+ vstore4((float4)(c0.s5, c1.s5, c2.s5, c3.s5), 0, dst + idx);
+ idx += m;
+ }
+ if (idx+3 < m*n_no_padding) {
+ vstore4((float4)(c0.s6, c1.s6, c2.s6, c3.s6), 0, dst + idx);
+ idx += m;
+ }
+ if (idx+3 < m*n_no_padding) {
+ vstore4((float4)(c0.s7, c1.s7, c2.s7, c3.s7), 0, dst + idx);
+ }
+}
--- /dev/null
+#pragma OPENCL EXTENSION cl_khr_fp16 : enable
+#pragma OPENCL EXTENSION cl_khr_subgroups : enable
+
+#ifdef cl_qcom_reqd_sub_group_size
+#pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable
+#define ADRENO_GPU 1
+#define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half")))
+#endif
+
+#define QK_K 256
+#define NSUBGROUPS 4
+#define SUBGROUP_SIZE 64
+
+inline void get_scale_min_k4(
+ int j,
+ global const uchar * q,
+ uchar * d,
+ uchar * m,
+ uchar mask_d6,
+ uchar mask_d4,
+ uchar mask_hi2
+) {
+ if (j < 4) {
+ *d = q[j] & mask_d6;
+ *m = q[j+4] & mask_d6;
+ } else {
+ *d = (q[j+4] & mask_d4) | ((q[j-4] & mask_hi2) >> 2);
+ *m = ((q[j+4] >> 4) & mask_d4) | ((q[j] & mask_hi2) >> 2);
+ }
+}
+
+#define dequantizeBlockAccum_ns_sgbroadcast_1_hi(total_sums, bits4, scale, minv, y) \
+ float shared_y; \
+ shared_y = sub_group_broadcast(y.s0, 0); \
+ total_sums.s0 += ((bits4.s0 & 0x000F) * scale.s0 - minv.s0) * shared_y; \
+ total_sums.s1 += ((bits4.s1 & 0x000F) * scale.s1 - minv.s1) * shared_y; \
+ shared_y = sub_group_broadcast(y.s1, 0); \
+ total_sums.s0 += (((bits4.s0 & 0x00F0) >> 4) * scale.s0 - minv.s0) * shared_y; \
+ total_sums.s1 += (((bits4.s1 & 0x00F0) >> 4) * scale.s1 - minv.s1) * shared_y; \
+ shared_y = sub_group_broadcast(y.s2, 0); \
+ total_sums.s0 += (((bits4.s0 & 0x0F00) >> 8) * scale.s0 - minv.s0) * shared_y; \
+ total_sums.s1 += (((bits4.s1 & 0x0F00) >> 8) * scale.s1 - minv.s1) * shared_y; \
+ shared_y = sub_group_broadcast(y.s3, 0); \
+ total_sums.s0 += (((bits4.s0 & 0xF000) >> 12) * scale.s0 - minv.s0) * shared_y; \
+ total_sums.s1 += (((bits4.s1 & 0xF000) >> 12) * scale.s1 - minv.s1) * shared_y; \
+ shared_y = sub_group_broadcast(y.s4, 0); \
+ total_sums.s0 += ((bits4.s2 & 0x000F) * scale.s0 - minv.s0) * shared_y; \
+ total_sums.s1 += ((bits4.s3 & 0x000F) * scale.s1 - minv.s1) * shared_y; \
+ shared_y = sub_group_broadcast(y.s5, 0); \
+ total_sums.s0 += (((bits4.s2 & 0x00F0) >> 4) * scale.s0 - minv.s0) * shared_y; \
+ total_sums.s1 += (((bits4.s3 & 0x00F0) >> 4) * scale.s1 - minv.s1) * shared_y; \
+ shared_y = sub_group_broadcast(y.s6, 0); \
+ total_sums.s0 += (((bits4.s2 & 0x0F00) >> 8) * scale.s0 - minv.s0) * shared_y; \
+ total_sums.s1 += (((bits4.s3 & 0x0F00) >> 8) * scale.s1 - minv.s1) * shared_y; \
+ shared_y = sub_group_broadcast(y.s7, 0); \
+ total_sums.s0 += (((bits4.s2 & 0xF000) >> 12) * scale.s0 - minv.s0) * shared_y; \
+ total_sums.s1 += (((bits4.s3 & 0xF000) >> 12) * scale.s1 - minv.s1) * shared_y; \
+ shared_y = sub_group_broadcast(y.s0, 1); \
+ total_sums.s0 += ((bits4.s4 & 0x000F) * scale.s0 - minv.s0) * shared_y; \
+ total_sums.s1 += ((bits4.s5 & 0x000F) * scale.s1 - minv.s1) * shared_y; \
+ shared_y = sub_group_broadcast(y.s1, 1); \
+ total_sums.s0 += (((bits4.s4 & 0x00F0) >> 4) * scale.s0 - minv.s0) * shared_y; \
+ total_sums.s1 += (((bits4.s5 & 0x00F0) >> 4) * scale.s1 - minv.s1) * shared_y; \
+ shared_y = sub_group_broadcast(y.s2, 1); \
+ total_sums.s0 += (((bits4.s4 & 0x0F00) >> 8) * scale.s0 - minv.s0) * shared_y; \
+ total_sums.s1 += (((bits4.s5 & 0x0F00) >> 8) * scale.s1 - minv.s1) * shared_y; \
+ shared_y = sub_group_broadcast(y.s3, 1); \
+ total_sums.s0 += (((bits4.s4 & 0xF000) >> 12) * scale.s0 - minv.s0) * shared_y; \
+ total_sums.s1 += (((bits4.s5 & 0xF000) >> 12) * scale.s1 - minv.s1) * shared_y; \
+ shared_y = sub_group_broadcast(y.s4, 1); \
+ total_sums.s0 += ((bits4.s6 & 0x000F) * scale.s0 - minv.s0) * shared_y; \
+ total_sums.s1 += ((bits4.s7 & 0x000F) * scale.s1 - minv.s1) * shared_y; \
+ shared_y = sub_group_broadcast(y.s5, 1); \
+ total_sums.s0 += (((bits4.s6 & 0x00F0) >> 4) * scale.s0 - minv.s0) * shared_y; \
+ total_sums.s1 += (((bits4.s7 & 0x00F0) >> 4) * scale.s1 - minv.s1) * shared_y; \
+ shared_y = sub_group_broadcast(y.s6, 1); \
+ total_sums.s0 += (((bits4.s6 & 0x0F00) >> 8) * scale.s0 - minv.s0) * shared_y; \
+ total_sums.s1 += (((bits4.s7 & 0x0F00) >> 8) * scale.s1 - minv.s1) * shared_y; \
+ shared_y = sub_group_broadcast(y.s7, 1); \
+ total_sums.s0 += (((bits4.s6 & 0xF000) >> 12) * scale.s0 - minv.s0) * shared_y; \
+ total_sums.s1 += (((bits4.s7 & 0xF000) >> 12) * scale.s1 - minv.s1) * shared_y; \
+
+
+#define dequantizeBlockAccum_ns_sgbroadcast_1_lo(total_sums, bits4, scale, minv, y) \
+ shared_y = sub_group_broadcast(y.s0, 2); \
+ total_sums.s0 += ((bits4.s0 & 0x000F) * scale.s0 - minv.s0) * shared_y; \
+ total_sums.s1 += ((bits4.s1 & 0x000F) * scale.s1 - minv.s1) * shared_y; \
+ shared_y = sub_group_broadcast(y.s1, 2); \
+ total_sums.s0 += (((bits4.s0 & 0x00F0) >> 4) * scale.s0 - minv.s0) * shared_y; \
+ total_sums.s1 += (((bits4.s1 & 0x00F0) >> 4) * scale.s1 - minv.s1) * shared_y; \
+ shared_y = sub_group_broadcast(y.s2, 2); \
+ total_sums.s0 += (((bits4.s0 & 0x0F00) >> 8) * scale.s0 - minv.s0) * shared_y; \
+ total_sums.s1 += (((bits4.s1 & 0x0F00) >> 8) * scale.s1 - minv.s1) * shared_y; \
+ shared_y = sub_group_broadcast(y.s3, 2); \
+ total_sums.s0 += (((bits4.s0 & 0xF000) >> 12) * scale.s0 - minv.s0) * shared_y; \
+ total_sums.s1 += (((bits4.s1 & 0xF000) >> 12) * scale.s1 - minv.s1) * shared_y; \
+ shared_y = sub_group_broadcast(y.s4, 2); \
+ total_sums.s0 += ((bits4.s2 & 0x000F) * scale.s0 - minv.s0) * shared_y; \
+ total_sums.s1 += ((bits4.s3 & 0x000F) * scale.s1 - minv.s1) * shared_y; \
+ shared_y = sub_group_broadcast(y.s5, 2); \
+ total_sums.s0 += (((bits4.s2 & 0x00F0) >> 4) * scale.s0 - minv.s0) * shared_y; \
+ total_sums.s1 += (((bits4.s3 & 0x00F0) >> 4) * scale.s1 - minv.s1) * shared_y; \
+ shared_y = sub_group_broadcast(y.s6, 2); \
+ total_sums.s0 += (((bits4.s2 & 0x0F00) >> 8) * scale.s0 - minv.s0) * shared_y; \
+ total_sums.s1 += (((bits4.s3 & 0x0F00) >> 8) * scale.s1 - minv.s1) * shared_y; \
+ shared_y = sub_group_broadcast(y.s7, 2); \
+ total_sums.s0 += (((bits4.s2 & 0xF000) >> 12) * scale.s0 - minv.s0) * shared_y; \
+ total_sums.s1 += (((bits4.s3 & 0xF000) >> 12) * scale.s1 - minv.s1) * shared_y; \
+ shared_y = sub_group_broadcast(y.s0, 3); \
+ total_sums.s0 += ((bits4.s4 & 0x000F) * scale.s0 - minv.s0) * shared_y; \
+ total_sums.s1 += ((bits4.s5 & 0x000F) * scale.s1 - minv.s1) * shared_y; \
+ shared_y = sub_group_broadcast(y.s1, 3); \
+ total_sums.s0 += (((bits4.s4 & 0x00F0) >> 4) * scale.s0 - minv.s0) * shared_y; \
+ total_sums.s1 += (((bits4.s5 & 0x00F0) >> 4) * scale.s1 - minv.s1) * shared_y; \
+ shared_y = sub_group_broadcast(y.s2, 3); \
+ total_sums.s0 += (((bits4.s4 & 0x0F00) >> 8) * scale.s0 - minv.s0) * shared_y; \
+ total_sums.s1 += (((bits4.s5 & 0x0F00) >> 8) * scale.s1 - minv.s1) * shared_y; \
+ shared_y = sub_group_broadcast(y.s3, 3); \
+ total_sums.s0 += (((bits4.s4 & 0xF000) >> 12) * scale.s0 - minv.s0) * shared_y; \
+ total_sums.s1 += (((bits4.s5 & 0xF000) >> 12) * scale.s1 - minv.s1) * shared_y; \
+ shared_y = sub_group_broadcast(y.s4, 3); \
+ total_sums.s0 += ((bits4.s6 & 0x000F) * scale.s0 - minv.s0) * shared_y; \
+ total_sums.s1 += ((bits4.s7 & 0x000F) * scale.s1 - minv.s1) * shared_y; \
+ shared_y = sub_group_broadcast(y.s5, 3); \
+ total_sums.s0 += (((bits4.s6 & 0x00F0) >> 4) * scale.s0 - minv.s0) * shared_y; \
+ total_sums.s1 += (((bits4.s7 & 0x00F0) >> 4) * scale.s1 - minv.s1) * shared_y; \
+ shared_y = sub_group_broadcast(y.s6, 3); \
+ total_sums.s0 += (((bits4.s6 & 0x0F00) >> 8) * scale.s0 - minv.s0) * shared_y; \
+ total_sums.s1 += (((bits4.s7 & 0x0F00) >> 8) * scale.s1 - minv.s1) * shared_y; \
+ shared_y = sub_group_broadcast(y.s7, 3); \
+ total_sums.s0 += (((bits4.s6 & 0xF000) >> 12) * scale.s0 - minv.s0) * shared_y; \
+ total_sums.s1 += (((bits4.s7 & 0xF000) >> 12) * scale.s1 - minv.s1) * shared_y; \
+
+
+#define dequantizeBlockAccum_ns_sgbroadcast_8_hi(total_sums, bits4, scale, minv, y) \
+ float8 shared_y; \
+ shared_y = sub_group_broadcast(y, 0); \
+ total_sums.s0 += ((bits4.s0 & 0x000F) * scale.s0 - minv.s0) * shared_y.s0; \
+ total_sums.s0 += (((bits4.s0 & 0x00F0) >> 4) * scale.s0 - minv.s0) * shared_y.s1; \
+ total_sums.s0 += (((bits4.s0 & 0x0F00) >> 8) * scale.s0 - minv.s0) * shared_y.s2; \
+ total_sums.s0 += (((bits4.s0 & 0xF000) >> 12) * scale.s0 - minv.s0) * shared_y.s3; \
+ total_sums.s0 += ((bits4.s2 & 0x000F) * scale.s0 - minv.s0) * shared_y.s4; \
+ total_sums.s0 += (((bits4.s2 & 0x00F0) >> 4) * scale.s0 - minv.s0) * shared_y.s5; \
+ total_sums.s0 += (((bits4.s2 & 0x0F00) >> 8) * scale.s0 - minv.s0) * shared_y.s6; \
+ total_sums.s0 += (((bits4.s2 & 0xF000) >> 12) * scale.s0 - minv.s0) * shared_y.s7; \
+ total_sums.s1 += ((bits4.s1 & 0x000F) * scale.s1 - minv.s1) * shared_y.s0; \
+ total_sums.s1 += (((bits4.s1 & 0x00F0) >> 4) * scale.s1 - minv.s1) * shared_y.s1; \
+ total_sums.s1 += (((bits4.s1 & 0x0F00) >> 8) * scale.s1 - minv.s1) * shared_y.s2; \
+ total_sums.s1 += (((bits4.s1 & 0xF000) >> 12) * scale.s1 - minv.s1) * shared_y.s3; \
+ total_sums.s1 += ((bits4.s3 & 0x000F) * scale.s1 - minv.s1) * shared_y.s4; \
+ total_sums.s1 += (((bits4.s3 & 0x00F0) >> 4) * scale.s1 - minv.s1) * shared_y.s5; \
+ total_sums.s1 += (((bits4.s3 & 0x0F00) >> 8) * scale.s1 - minv.s1) * shared_y.s6; \
+ total_sums.s1 += (((bits4.s3 & 0xF000) >> 12) * scale.s1 - minv.s1) * shared_y.s7; \
+ shared_y = sub_group_broadcast(y, 1); \
+ total_sums.s0 += ((bits4.s4 & 0x000F) * scale.s0 - minv.s0) * shared_y.s0; \
+ total_sums.s0 += (((bits4.s4 & 0x00F0) >> 4) * scale.s0 - minv.s0) * shared_y.s1; \
+ total_sums.s0 += (((bits4.s4 & 0x0F00) >> 8) * scale.s0 - minv.s0) * shared_y.s2; \
+ total_sums.s0 += (((bits4.s4 & 0xF000) >> 12) * scale.s0 - minv.s0) * shared_y.s3; \
+ total_sums.s0 += ((bits4.s6 & 0x000F) * scale.s0 - minv.s0) * shared_y.s4; \
+ total_sums.s0 += (((bits4.s6 & 0x00F0) >> 4) * scale.s0 - minv.s0) * shared_y.s5; \
+ total_sums.s0 += (((bits4.s6 & 0x0F00) >> 8) * scale.s0 - minv.s0) * shared_y.s6; \
+ total_sums.s0 += (((bits4.s6 & 0xF000) >> 12) * scale.s0 - minv.s0) * shared_y.s7; \
+ total_sums.s1 += ((bits4.s5 & 0x000F) * scale.s1 - minv.s1) * shared_y.s0; \
+ total_sums.s1 += (((bits4.s5 & 0x00F0) >> 4) * scale.s1 - minv.s1) * shared_y.s1; \
+ total_sums.s1 += (((bits4.s5 & 0x0F00) >> 8) * scale.s1 - minv.s1) * shared_y.s2; \
+ total_sums.s1 += (((bits4.s5 & 0xF000) >> 12) * scale.s1 - minv.s1) * shared_y.s3; \
+ total_sums.s1 += ((bits4.s7 & 0x000F) * scale.s1 - minv.s1) * shared_y.s4; \
+ total_sums.s1 += (((bits4.s7 & 0x00F0) >> 4) * scale.s1 - minv.s1) * shared_y.s5; \
+ total_sums.s1 += (((bits4.s7 & 0x0F00) >> 8) * scale.s1 - minv.s1) * shared_y.s6; \
+ total_sums.s1 += (((bits4.s7 & 0xF000) >> 12) * scale.s1 - minv.s1) * shared_y.s7; \
+
+
+#define dequantizeBlockAccum_ns_sgbroadcast_8_lo(total_sums, bits4, scale, minv, y) \
+ shared_y = sub_group_broadcast(y, 2); \
+ total_sums.s0 += ((bits4.s0 & 0x000F) * scale.s0 - minv.s0) * shared_y.s0; \
+ total_sums.s0 += (((bits4.s0 & 0x00F0) >> 4) * scale.s0 - minv.s0) * shared_y.s1; \
+ total_sums.s0 += (((bits4.s0 & 0x0F00) >> 8) * scale.s0 - minv.s0) * shared_y.s2; \
+ total_sums.s0 += (((bits4.s0 & 0xF000) >> 12) * scale.s0 - minv.s0) * shared_y.s3; \
+ total_sums.s0 += ((bits4.s2 & 0x000F) * scale.s0 - minv.s0) * shared_y.s4; \
+ total_sums.s0 += (((bits4.s2 & 0x00F0) >> 4) * scale.s0 - minv.s0) * shared_y.s5; \
+ total_sums.s0 += (((bits4.s2 & 0x0F00) >> 8) * scale.s0 - minv.s0) * shared_y.s6; \
+ total_sums.s0 += (((bits4.s2 & 0xF000) >> 12) * scale.s0 - minv.s0) * shared_y.s7; \
+ total_sums.s1 += ((bits4.s1 & 0x000F) * scale.s1 - minv.s1) * shared_y.s0; \
+ total_sums.s1 += (((bits4.s1 & 0x00F0) >> 4) * scale.s1 - minv.s1) * shared_y.s1; \
+ total_sums.s1 += (((bits4.s1 & 0x0F00) >> 8) * scale.s1 - minv.s1) * shared_y.s2; \
+ total_sums.s1 += (((bits4.s1 & 0xF000) >> 12) * scale.s1 - minv.s1) * shared_y.s3; \
+ total_sums.s1 += ((bits4.s3 & 0x000F) * scale.s1 - minv.s1) * shared_y.s4; \
+ total_sums.s1 += (((bits4.s3 & 0x00F0) >> 4) * scale.s1 - minv.s1) * shared_y.s5; \
+ total_sums.s1 += (((bits4.s3 & 0x0F00) >> 8) * scale.s1 - minv.s1) * shared_y.s6; \
+ total_sums.s1 += (((bits4.s3 & 0xF000) >> 12) * scale.s1 - minv.s1) * shared_y.s7; \
+ shared_y = sub_group_broadcast(y, 3); \
+ total_sums.s0 += ((bits4.s4 & 0x000F) * scale.s0 - minv.s0) * shared_y.s0; \
+ total_sums.s0 += (((bits4.s4 & 0x00F0) >> 4) * scale.s0 - minv.s0) * shared_y.s1; \
+ total_sums.s0 += (((bits4.s4 & 0x0F00) >> 8) * scale.s0 - minv.s0) * shared_y.s2; \
+ total_sums.s0 += (((bits4.s4 & 0xF000) >> 12) * scale.s0 - minv.s0) * shared_y.s3; \
+ total_sums.s0 += ((bits4.s6 & 0x000F) * scale.s0 - minv.s0) * shared_y.s4; \
+ total_sums.s0 += (((bits4.s6 & 0x00F0) >> 4) * scale.s0 - minv.s0) * shared_y.s5; \
+ total_sums.s0 += (((bits4.s6 & 0x0F00) >> 8) * scale.s0 - minv.s0) * shared_y.s6; \
+ total_sums.s0 += (((bits4.s6 & 0xF000) >> 12) * scale.s0 - minv.s0) * shared_y.s7; \
+ total_sums.s1 += ((bits4.s5 & 0x000F) * scale.s1 - minv.s1) * shared_y.s0; \
+ total_sums.s1 += (((bits4.s5 & 0x00F0) >> 4) * scale.s1 - minv.s1) * shared_y.s1; \
+ total_sums.s1 += (((bits4.s5 & 0x0F00) >> 8) * scale.s1 - minv.s1) * shared_y.s2; \
+ total_sums.s1 += (((bits4.s5 & 0xF000) >> 12) * scale.s1 - minv.s1) * shared_y.s3; \
+ total_sums.s1 += ((bits4.s7 & 0x000F) * scale.s1 - minv.s1) * shared_y.s4; \
+ total_sums.s1 += (((bits4.s7 & 0x00F0) >> 4) * scale.s1 - minv.s1) * shared_y.s5; \
+ total_sums.s1 += (((bits4.s7 & 0x0F00) >> 8) * scale.s1 - minv.s1) * shared_y.s6; \
+ total_sums.s1 += (((bits4.s7 & 0xF000) >> 12) * scale.s1 - minv.s1) * shared_y.s7; \
+
+#ifdef ADRENO_GPU
+REQD_SUBGROUP_SIZE_64
+#endif
+kernel void kernel_gemv_noshuffle_q4_k_f32(
+ read_only image1d_buffer_t src0_q,
+ global half2 * src0_d,
+ global half2 * src0_m,
+ global uchar * src0_s,
+ read_only image1d_buffer_t src1,
+ global float * dst,
+ ulong offsetd,
+ int ne00,
+ int ne01,
+ uchar mask_d6,
+ uchar mask_d4,
+ uchar mask_hi2)
+{
+ uint groupId = get_local_id(1);
+ uint gid = get_global_id(0);
+ ushort slid = get_sub_group_local_id();
+
+ uint K = ne00;
+ uint M = ne01;
+
+ uint LINE_STRIDE_A = M / 2;
+ uint BLOCK_STRIDE_A = NSUBGROUPS * M;
+ uint scales_per_row = (K / QK_K) * 12;
+
+ private uint4 regA;
+ private half2 regS;
+ private half2 regM;
+ private float8 regB;
+
+ private float2 totalSum = (float2)(0.0f);
+
+ for (uint k = groupId; k < (K / 32); k += NSUBGROUPS) {
+ uint sb = k / 8;
+ uint j = k % 8;
+
+ half2 d = src0_d[gid + sb * LINE_STRIDE_A];
+ half2 dm = src0_m[gid + sb * LINE_STRIDE_A];
+
+ global const uchar * sc0 = src0_s + 2 * gid * scales_per_row + sb * 12;
+ global const uchar * sc1 = src0_s + (2 * gid + 1) * scales_per_row + sb * 12;
+
+ uchar sv0, mn0, sv1, mn1;
+ get_scale_min_k4(j, sc0, &sv0, &mn0, mask_d6, mask_d4, mask_hi2);
+ get_scale_min_k4(j, sc1, &sv1, &mn1, mask_d6, mask_d4, mask_hi2);
+
+ regS = convert_half2(convert_float2(d) * convert_float2((uchar2)(sv0, sv1)));
+ regM = convert_half2(convert_float2(dm) * convert_float2((uchar2)(mn0, mn1)));
+
+ if (slid < 4) {
+ regB.s0123 = read_imagef(src1, (slid * 2 + k * 8));
+ regB.s4567 = read_imagef(src1, (1 + slid * 2 + k * 8));
+ }
+
+ // load half weights for two blocks in consecutive rows
+ regA.s0 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 0)).x;
+ regA.s1 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 1)).x;
+ regA.s2 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 2)).x;
+ regA.s3 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 3)).x;
+#ifdef VECTOR_SUB_GROUP_BROADCAST
+ dequantizeBlockAccum_ns_sgbroadcast_8_hi(totalSum, as_ushort8(regA), regS, regM, regB);
+#else
+ dequantizeBlockAccum_ns_sgbroadcast_1_hi(totalSum, as_ushort8(regA), regS, regM, regB);
+#endif // VECTOR_SUB_GROUP_BROADCAST
+
+ regA.s0 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 4)).x;
+ regA.s1 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 5)).x;
+ regA.s2 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 6)).x;
+ regA.s3 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 7)).x;
+#ifdef VECTOR_SUB_GROUP_BROADCAST
+ dequantizeBlockAccum_ns_sgbroadcast_8_lo(totalSum, as_ushort8(regA), regS, regM, regB);
+#else
+ dequantizeBlockAccum_ns_sgbroadcast_1_lo(totalSum, as_ushort8(regA), regS, regM, regB);
+#endif // VECTOR_SUB_GROUP_BROADCAST
+ }
+
+ // reduction in local memory, assumes #wave=4
+ local float2 reduceLM[SUBGROUP_SIZE * 3];
+ if (groupId == 1) {
+ reduceLM[SUBGROUP_SIZE * 0 + slid] = totalSum;
+ }
+ if (groupId == 2) {
+ reduceLM[SUBGROUP_SIZE * 1 + slid] = totalSum;
+ }
+ if (groupId == 3) {
+ reduceLM[SUBGROUP_SIZE * 2 + slid] = totalSum;
+ }
+
+ barrier(CLK_LOCAL_MEM_FENCE);
+
+ if (groupId == 0) {
+ totalSum += reduceLM[SUBGROUP_SIZE * 0 + slid];
+ }
+ if (groupId == 0) {
+ totalSum += reduceLM[SUBGROUP_SIZE * 1 + slid];
+ }
+ if (groupId == 0) {
+ totalSum += reduceLM[SUBGROUP_SIZE * 2 + slid];
+ }
+
+ // 2 outputs per fiber in wave 0
+ if (groupId == 0) {
+ dst = (global float*)((global char*)dst + offsetd);
+ vstore2(totalSum, 0, &(dst[gid * 2]));
+ }
+
+}