cl_kernel kernel_convert_block_q4_1, kernel_restore_block_q4_1;
cl_kernel kernel_convert_block_mxfp4, kernel_convert_block_mxfp4_trans, kernel_restore_block_mxfp4, kernel_restore_block_mxfp4_trans;
cl_kernel kernel_convert_block_q8_0, kernel_restore_block_q8_0, kernel_restore_block_q8_0_trans;
+ cl_kernel kernel_convert_block_q6_K_noshuffle, kernel_restore_block_q6_K_noshuffle;
cl_kernel kernel_mul_mat_q4_0_f32_8x_flat;
cl_kernel kernel_convert_block_q4_0_noshuffle;
cl_kernel kernel_restore_block_q4_0_noshuffle;
cl_kernel kernel_gemm_noshuffle_q4_1_f32;
cl_kernel kernel_mul_mm_q8_0_f32_8x4;
cl_kernel CL_mul_mat_vec_q8_0_f32;
+ cl_kernel kernel_gemv_noshuffle_q6_K_f32;
+ cl_kernel kernel_gemm_noshuffle_q6_K_f32;
#endif // GGML_OPENCL_USE_ADRENO_KERNELS
void free() {
CL_CHECK((backend_ctx->kernel_restore_block_q4_K = clCreateKernel(backend_ctx->program_cvt, "kernel_restore_block_q4_K", &err), err));
CL_CHECK((backend_ctx->kernel_convert_block_q6_K = clCreateKernel(backend_ctx->program_cvt, "kernel_convert_block_q6_K", &err), err));
CL_CHECK((backend_ctx->kernel_restore_block_q6_K = clCreateKernel(backend_ctx->program_cvt, "kernel_restore_block_q6_K", &err), err));
+ CL_CHECK((backend_ctx->kernel_convert_block_q6_K_noshuffle = clCreateKernel(backend_ctx->program_cvt, "kernel_convert_block_q6_K_noshuffle", &err), err));
+ CL_CHECK((backend_ctx->kernel_restore_block_q6_K_noshuffle = clCreateKernel(backend_ctx->program_cvt, "kernel_restore_block_q6_K_noshuffle", &err), err));
GGML_LOG_CONT(".");
}
CL_CHECK((backend_ctx->kernel_gemm_moe_mxfp4_f32 = clCreateKernel(backend_ctx->program_gemm_moe_mxfp4_f32, "kernel_gemm_moe_mxfp4_f32", &err), err));
GGML_LOG_CONT(".");
}
+
+ // gemv_noshuffle_q6_k_f32
+ {
+#ifdef GGML_OPENCL_EMBED_KERNELS
+ const std::string kernel_src {
+ #include "gemv_noshuffle_q6_k_f32.cl.h"
+ };
+#else
+ const std::string kernel_src = read_file("gemv_noshuffle_q6_k_f32.cl");
+#endif
+
+ std::string CL_gemv_compile_opts = std::string("-cl-std=") + opencl_c_std +
+ " -cl-mad-enable ";
+ if (backend_ctx->has_vector_subgroup_broadcast) {
+ CL_gemv_compile_opts += " -DVECTOR_SUB_GROUP_BROADCAT ";
+ }
+
+ cl_program prog =
+ build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), CL_gemv_compile_opts);
+
+ CL_CHECK((backend_ctx->kernel_gemv_noshuffle_q6_K_f32 = clCreateKernel(prog, "kernel_gemv_noshuffle_q6_K_f32", &err), err));
+ GGML_LOG_CONT(".");
+ }
+
+ // gemm_noshuffle_q6_k_f32
+ {
+#ifdef GGML_OPENCL_EMBED_KERNELS
+ const std::string kernel_src {
+ #include "gemm_noshuffle_q6_k_f32.cl.h"
+ };
+#else
+ const std::string kernel_src = read_file("gemm_noshuffle_q6_k_f32.cl");
+#endif
+ cl_program prog =
+ build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), CL_moe_compile_opts);
+
+ CL_CHECK((backend_ctx->kernel_gemm_noshuffle_q6_K_f32 = clCreateKernel(prog, "kernel_gemm_noshuffle_q6_K_f32", &err), err));
+ GGML_LOG_CONT(".");
+ }
#endif // GGML_OPENCL_USE_ADRENO_KERNELS
GGML_LOG_CONT("\n");
}
"Incorrect tensor size");
cl_int err;
- cl_mem data_device = clCreateBuffer(context, CL_MEM_READ_WRITE,
- ggml_nbytes(tensor), NULL, &err);
- CL_CHECK(err);
- CL_CHECK(clEnqueueWriteBuffer(
- queue, data_device, CL_TRUE, 0,
- ggml_nbytes(tensor), data, 0, NULL, NULL));
+ cl_mem data_device;
+ CL_CHECK((data_device = clCreateBuffer(context, CL_MEM_READ_WRITE, ggml_nbytes(tensor), NULL, &err), err));
+ CL_CHECK(clEnqueueWriteBuffer(queue, data_device, CL_TRUE, 0, ggml_nbytes(tensor), data, 0, NULL, NULL));
cl_buffer_region region;
// Subbuffer for ql
region.origin = align_to(extra_orig->offset + tensor->view_offs + offset, backend_ctx->alignment);
region.size = size_ql;
- extra->ql = clCreateSubBuffer(
- extra_orig->data_device, CL_MEM_READ_WRITE,
- CL_BUFFER_CREATE_TYPE_REGION, ®ion, &err);
- CL_CHECK(err);
+ CL_CHECK((extra->ql = clCreateSubBuffer(extra_orig->data_device, CL_MEM_READ_WRITE, CL_BUFFER_CREATE_TYPE_REGION, ®ion, &err), err));
auto previous_origin = region.origin;
// Subbuffer for qh
region.origin = align_to(previous_origin + size_ql, backend_ctx->alignment);
region.size = size_qh;
- extra->qh = clCreateSubBuffer(
- extra_orig->data_device, CL_MEM_READ_WRITE,
- CL_BUFFER_CREATE_TYPE_REGION, ®ion, &err);
- CL_CHECK(err);
+ CL_CHECK((extra->qh = clCreateSubBuffer(extra_orig->data_device, CL_MEM_READ_WRITE, CL_BUFFER_CREATE_TYPE_REGION, ®ion, &err), err));
previous_origin = region.origin;
// Subbuffer for scales
region.origin = align_to(previous_origin + size_qh, backend_ctx->alignment);
region.size = size_s;
- extra->s = clCreateSubBuffer(
- extra_orig->data_device, CL_MEM_READ_WRITE,
- CL_BUFFER_CREATE_TYPE_REGION, ®ion, &err);
- CL_CHECK(err);
+ CL_CHECK((extra->s = clCreateSubBuffer(extra_orig->data_device, CL_MEM_READ_WRITE, CL_BUFFER_CREATE_TYPE_REGION, ®ion, &err), err));
previous_origin = region.origin;
// Create subbuffer for d.
region.origin = align_to(previous_origin + size_s, backend_ctx->alignment);
region.size = size_d;
- extra->d = clCreateSubBuffer(
- extra_orig->data_device, CL_MEM_READ_WRITE,
- CL_BUFFER_CREATE_TYPE_REGION, ®ion, &err);
- CL_CHECK(err);
+ CL_CHECK((extra->d = clCreateSubBuffer(extra_orig->data_device, CL_MEM_READ_WRITE, CL_BUFFER_CREATE_TYPE_REGION, ®ion, &err), err));
previous_origin = region.origin;
// Flatten the weights
- cl_kernel kernel = backend_ctx->kernel_convert_block_q6_K;
-
- CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &data_device));
- CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra->ql));
- CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra->qh));
- CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_mem), &extra->s));
- CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extra->d));
+ cl_kernel kernel;
+#ifdef GGML_OPENCL_USE_ADRENO_KERNELS
+ kernel = backend_ctx->kernel_convert_block_q6_K;
+ if (use_adreno_kernels(backend_ctx, tensor)) {
+ kernel = backend_ctx->kernel_convert_block_q6_K_noshuffle;
+ }
+#else
+ kernel = backend_ctx->kernel_convert_block_q6_K;
+#endif // GGML_OPENCL_USE_ADRENO_KERNELS
- size_t global_work_size[] = {(size_t)ggml_nelements(tensor)/ggml_blck_size(tensor->type), 1, 1};
+ cl_uchar mask = 0xff;
+ cl_ulong n_blk = ggml_nelements(tensor)/ggml_blck_size(tensor->type);
+ CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &data_device));
+ CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra->ql));
+ CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra->qh));
+ CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_mem), &extra->s));
+ CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extra->d));
+ CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_uchar), &mask));
+ CL_CHECK(clSetKernelArg(kernel, 6, sizeof(cl_ulong), &n_blk));
+
+ size_t global_work_size[] = {(size_t)CEIL_DIV(n_blk, 64)*64, 1, 1};
size_t local_work_size[] = {64, 1, 1};
cl_event evt;
extra->size_d = size_d;
tensor->extra = extra;
+
+#ifdef GGML_OPENCL_USE_ADRENO_KERNELS
+ if (use_adreno_kernels(backend_ctx, tensor)) {
+ cl_int M = tensor->ne[1]; // ne01
+ cl_int K = tensor->ne[0]; // ne00
+
+ // Transpose ql as ushort
+ transpose_2d_as_16b(backend_ctx,
+ extra->ql, extra->ql, size_ql, K/4, M);
+
+ // Transpose qh as uchar
+ transpose_2d_as_8b(backend_ctx,
+ extra->qh, extra->qh, size_qh, K/4, M);
+
+ // Transpose s as ushort
+ transpose_2d_as_16b(backend_ctx,
+ extra->s, extra->s, size_s, K/16/2, M);
+
+ // Transpose d as ushort
+ transpose_2d_as_16b(backend_ctx,
+ extra->d, extra->d, size_d, K/256, M);
+ }
+#endif // GGML_OPENCL_USE_ADRENO_KERNELS
return;
}
#endif // GGML_OPENCL_SOA_Q
if (tensor->type == GGML_TYPE_Q6_K) {
ggml_tensor_extra_cl_q6_K * extra = (ggml_tensor_extra_cl_q6_K *)tensor->extra;
+#ifdef GGML_OPENCL_USE_ADRENO_KERNELS
+ if (use_adreno_kernels(backend_ctx, tensor)) {
+ static ggml_cl_buffer buf_trans_ql;
+ static ggml_cl_buffer buf_trans_qh;
+ static ggml_cl_buffer buf_trans_s;
+ static ggml_cl_buffer buf_trans_d;
+ static ggml_cl_buffer buf_unpacked;
+
+ cl_int M = tensor->ne[1]; // ne01
+ cl_int K = tensor->ne[0]; // ne00
+
+ GGML_ASSERT(K % ggml_blck_size(tensor->type) == 0);
+
+ size_t size_ql = ggml_nelements(tensor)/ggml_blck_size(tensor->type)*ggml_blck_size(tensor->type)/2;
+ size_t size_qh = ggml_nelements(tensor)/ggml_blck_size(tensor->type)*ggml_blck_size(tensor->type)/4;
+ size_t size_s = ggml_nelements(tensor)/ggml_blck_size(tensor->type)*ggml_blck_size(tensor->type)/16;
+ size_t size_d = ggml_nelements(tensor)/ggml_blck_size(tensor->type)*sizeof(ggml_fp16_t);
+ GGML_ASSERT(size_ql + size_qh + size_s + size_d == ggml_nbytes(tensor) && "Incorrect tensor size");
+
+ buf_trans_ql.allocate(backend_ctx->context, size_ql);
+ buf_trans_qh.allocate(backend_ctx->context, size_qh);
+ buf_trans_s.allocate(backend_ctx->context, size_s);
+ buf_trans_d.allocate(backend_ctx->context, size_d);
+ buf_unpacked.allocate(backend_ctx->context, ggml_nbytes(tensor));
+
+ // transpose ql, qh, s and d back
+ transpose_2d_as_16b(backend_ctx, extra->ql, buf_trans_ql.buffer, size_ql, M, K/4);
+ transpose_2d_as_8b(backend_ctx, extra->qh, buf_trans_qh.buffer, size_qh, M, K/4);
+ transpose_2d_as_16b(backend_ctx, extra->s, buf_trans_s.buffer, size_s, M, K/16/2);
+ transpose_2d_as_16b(backend_ctx, extra->d, buf_trans_d.buffer, size_d, M, K/256);
+
+ // unpack
+ cl_uchar mask = 0xFF;
+ cl_ulong n_blk = ggml_nelements(tensor)/ggml_blck_size(tensor->type);
+ cl_kernel kernel = backend_ctx->kernel_restore_block_q6_K_noshuffle;
+ CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &buf_trans_ql.buffer));
+ CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &buf_trans_qh.buffer));
+ CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &buf_trans_s.buffer));
+ CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_mem), &buf_trans_d.buffer));
+ CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &buf_unpacked.buffer));
+ CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_uchar), &mask));
+ CL_CHECK(clSetKernelArg(kernel, 6, sizeof(cl_ulong), &n_blk));
+
+ size_t global_work_size[] = {(size_t)n_blk, 1, 1};
+ size_t local_work_size[] = {1, 1, 1};
+
+ cl_event evt;
+ CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt));
+ CL_CHECK(clWaitForEvents(1, &evt));
+ CL_CHECK(clEnqueueReadBuffer(queue, buf_unpacked.buffer, CL_TRUE, offset, size, data, 0, NULL, NULL));
+
+ return;
+ }
+#endif // GGML_OPENCL_USE_ADRENO_KERNELS
+
cl_int err;
cl_mem data_device = clCreateBuffer(context, CL_MEM_READ_WRITE,
ggml_nbytes(tensor), NULL, &err);
CL_CHECK(err);
+ cl_uchar mask = 0xFF;
+ cl_ulong n_blk = ggml_nelements(tensor)/ggml_blck_size(tensor->type);
cl_kernel kernel = backend_ctx->kernel_restore_block_q6_K;
- CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra->ql));
- CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra->qh));
- CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra->s));
- CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_mem), &extra->d));
- CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &data_device));
-
- size_t global_work_size[] = {(size_t)ggml_nelements(tensor)/ggml_blck_size(tensor->type), 1, 1};
+ CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra->ql));
+ CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra->qh));
+ CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra->s));
+ CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_mem), &extra->d));
+ CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &data_device));
+ CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_uchar), &mask));
+ CL_CHECK(clSetKernelArg(kernel, 6, sizeof(cl_ulong), &n_blk));
+
+ size_t global_work_size[] = {(size_t)n_blk, 1, 1};
size_t local_work_size[] = {1, 1, 1};
cl_event evt;
static_assert(sizeof(block_q4_0) == sizeof(ggml_fp16_t) + QK4_0 / 2,
"wrong q4_0 block size/padding");
+#define QK_MXFP4 32
+
#include <math.h>
#ifdef __cplusplus
#include "half.hpp"
buf_d = malloc(size_e);
CL_CHECK(clEnqueueReadBuffer(queue, extra->q, CL_TRUE, 0, size_q, buf_q, 0, NULL, NULL));
- CL_CHECK(clEnqueueReadBuffer(queue, extra->d, CL_TRUE, 0, size_e, buf_d, 0, NULL, NULL));
+ CL_CHECK(clEnqueueReadBuffer(queue, extra->e, CL_TRUE, 0, size_e, buf_d, 0, NULL, NULL));
CL_CHECK(clFinish(queue));
} else {
// Read out the tensor from GPU memory.
#endif
}
+static void ggml_cl_mul_mat_q6_K_f32_adreno(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
+#ifdef GGML_OPENCL_USE_ADRENO_KERNELS
+ GGML_ASSERT(src0);
+ GGML_ASSERT(src0->extra);
+ GGML_ASSERT(src1);
+ GGML_ASSERT(src1->extra);
+ GGML_ASSERT(dst);
+ GGML_ASSERT(dst->extra);
+
+ ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context;
+
+ ggml_tensor_extra_cl_q6_K * extra0_q6_K = (ggml_tensor_extra_cl_q6_K *)src0->extra;
+ ggml_tensor_extra_cl * extra1 = (ggml_tensor_extra_cl *)src1->extra;
+ ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra;
+
+ cl_ulong offset1 = extra1->offset + src1->view_offs;
+ cl_ulong offsetd = extrad->offset + dst->view_offs;
+
+ const int ne00 = src0->ne[0];
+ const int ne01 = src0->ne[1];
+
+ const int ne1 = dst->ne[1];
+
+ GGML_ASSERT(ne00 % ggml_blck_size(src0->type) == 0);
+
+ cl_context context = backend_ctx->context;
+ cl_kernel kernel;
+
+ cl_int err;
+ cl_buffer_region region;
+ cl_image_format img_fmt;
+ cl_image_desc img_desc;
+
+ // subbuffer and image for activation
+ if (ne1 == 1) {
+ cl_mem ql_img = nullptr;
+ cl_mem qh_img = nullptr;
+ cl_mem b_sub_buffer = nullptr;
+ cl_mem b_img = nullptr;
+
+ // image for ql
+ img_fmt.image_channel_order = CL_R;
+ img_fmt.image_channel_data_type = CL_FLOAT;
+ memset(&img_desc, 0, sizeof(img_desc));
+ img_desc.image_type = CL_MEM_OBJECT_IMAGE1D_BUFFER;
+ img_desc.image_width = ne01 * ne00 / 8;
+ img_desc.buffer = extra0_q6_K->ql;
+ CL_CHECK((ql_img = clCreateImage(context, CL_MEM_READ_ONLY, &img_fmt, &img_desc, NULL, &err), err));
+
+ // image for qh
+ img_fmt.image_channel_order = CL_R;
+ img_fmt.image_channel_data_type = CL_HALF_FLOAT;
+ memset(&img_desc, 0, sizeof(img_desc));
+ img_desc.image_type = CL_MEM_OBJECT_IMAGE1D_BUFFER;
+ img_desc.image_width = ne01 * ne00 / 8;
+ img_desc.buffer = extra0_q6_K->qh;
+ CL_CHECK((qh_img = clCreateImage(context, CL_MEM_READ_ONLY, &img_fmt, &img_desc, NULL, &err), err));
+
+ region.origin = offset1;
+ region.size = ne00 * ne1 * sizeof(float);
+ CL_CHECK((b_sub_buffer = clCreateSubBuffer(extra1->data_device, 0, CL_BUFFER_CREATE_TYPE_REGION, ®ion, &err), err));
+
+ img_fmt.image_channel_order = CL_RGBA;
+ img_fmt.image_channel_data_type = CL_FLOAT;
+ memset(&img_desc, 0, sizeof(img_desc));
+ img_desc.image_type = CL_MEM_OBJECT_IMAGE1D_BUFFER;
+ img_desc.image_width = ne00 * ne1 / 4;
+ img_desc.buffer = b_sub_buffer;
+ CL_CHECK((b_img = clCreateImage(context, CL_MEM_READ_ONLY, &img_fmt, &img_desc, NULL, &err), err));
+
+ kernel = backend_ctx->kernel_gemv_noshuffle_q6_K_f32;
+
+ CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &ql_img));
+ CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &qh_img));
+ CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra0_q6_K->s));
+ CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_mem), &extra0_q6_K->d));
+ CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &b_img));
+ CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_mem), &extrad->data_device));
+ CL_CHECK(clSetKernelArg(kernel, 6, sizeof(cl_ulong), &offsetd));
+ CL_CHECK(clSetKernelArg(kernel, 7, sizeof(cl_int), &ne00));
+ CL_CHECK(clSetKernelArg(kernel, 8, sizeof(cl_int), &ne01));
+
+ size_t local_work_size[3] = {64, 4, 1};
+ size_t global_work_size[3] = {(size_t)CEIL_DIV(ne01/2, 64)*64, 4, 1};
+
+ backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst);
+
+ CL_CHECK(clReleaseMemObject(ql_img));
+ CL_CHECK(clReleaseMemObject(qh_img));
+ CL_CHECK(clReleaseMemObject(b_sub_buffer));
+ CL_CHECK(clReleaseMemObject(b_img));
+ } else {
+ cl_mem b_sub_buf;
+ cl_mem b_buf_trans;
+ cl_mem b_img;
+ cl_mem b_img_trans;
+
+ // subbuffer for activation
+ region.origin = offset1;
+ region.size = ne00 * ne1 * sizeof(float);
+ CL_CHECK((b_sub_buf = clCreateSubBuffer(extra1->data_device, 0, CL_BUFFER_CREATE_TYPE_REGION, ®ion, &err), err));
+
+ // image for activation
+ img_fmt.image_channel_order = CL_RGBA;
+ img_fmt.image_channel_data_type = CL_FLOAT;
+ memset(&img_desc, 0, sizeof(img_desc));
+ img_desc.image_type = CL_MEM_OBJECT_IMAGE1D_BUFFER;
+ img_desc.image_width = ne00 * ne1 / 4;
+ img_desc.buffer = b_sub_buf;
+ CL_CHECK((b_img = clCreateImage(context, CL_MEM_READ_ONLY, &img_fmt, &img_desc, NULL, &err), err));
+
+ // pad N to multiple of 8
+ int extra_elements = ne1 % 8;
+ int padding = 0;
+ if (extra_elements > 0){
+ padding = 8 - extra_elements;
+ }
+
+ // subbuffer for transposed activation
+ region.origin = 0;
+ region.size = ne00 * (ne1 + padding) * sizeof(float)/2;
+ backend_ctx->prealloc_act_trans.allocate(context, region.size);
+ CL_CHECK((b_buf_trans = clCreateSubBuffer(backend_ctx->prealloc_act_trans.buffer, 0, CL_BUFFER_CREATE_TYPE_REGION, ®ion, &err), err));
+
+ // image for transposed activation
+ img_fmt.image_channel_order = CL_RGBA;
+ img_fmt.image_channel_data_type = CL_HALF_FLOAT;
+ memset(&img_desc, 0, sizeof(img_desc));
+ img_desc.image_type = CL_MEM_OBJECT_IMAGE1D_BUFFER;
+ img_desc.image_width = ne00 * (ne1 + padding) / 4;
+ img_desc.buffer = b_buf_trans;
+ CL_CHECK((b_img_trans = clCreateImage(context, 0, &img_fmt, &img_desc, NULL, &err), err));
+
+ // transpose activation
+ int height_B = ne1/4;
+ if (height_B == 0) {
+ height_B = 1;
+ }
+ int width_B = ne00/4;
+ int padded_height_B = (ne1 + padding) / 4;
+
+ kernel = backend_ctx->kernel_transpose_32_16;
+ CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &b_img));
+ CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &b_img_trans));
+ CL_CHECK(clSetKernelArg(kernel, 2, sizeof(int), &height_B));
+ CL_CHECK(clSetKernelArg(kernel, 3, sizeof(int), &width_B));
+ CL_CHECK(clSetKernelArg(kernel, 4, sizeof(int), &padded_height_B));
+
+ size_t local_size_t[2] = { 1, 16 };
+ size_t global_size_t[2] = { (size_t)width_B, (size_t)padded_height_B };
+ backend_ctx->enqueue_ndrange_kernel(kernel, 2, global_size_t, local_size_t, dst);
+
+ // gemm
+ kernel = backend_ctx->kernel_gemm_noshuffle_q6_K_f32;
+ int padded_N = ne1 + padding;
+
+ cl_ushort mask_f000 = 0xF000;
+ cl_uchar mask_c0 = 0xC0;
+
+ CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0_q6_K->ql));
+ CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra0_q6_K->qh));
+ CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra0_q6_K->s));
+ CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_mem), &extra0_q6_K->d));
+ CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &b_img_trans));
+ CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_mem), &extrad->data_device));
+ CL_CHECK(clSetKernelArg(kernel, 6, sizeof(cl_ulong), &offsetd));
+ CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne01));
+ CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &padded_N));
+ CL_CHECK(clSetKernelArg(kernel, 9, sizeof(int), &ne00));
+ CL_CHECK(clSetKernelArg(kernel, 10, sizeof(int), &ne1));
+ CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_ushort),&mask_f000));
+ CL_CHECK(clSetKernelArg(kernel, 12, sizeof(cl_uchar), &mask_c0));
+
+ size_t global_work_size[3] = {(size_t)CEIL_DIV(ne1, 8), (size_t)CEIL_DIV(ne01, 4), 1};
+ size_t local_work_size[3] = {2, 128, 1};
+ backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst);
+
+ CL_CHECK(clReleaseMemObject(b_sub_buf));
+ CL_CHECK(clReleaseMemObject(b_img));
+ CL_CHECK(clReleaseMemObject(b_buf_trans));
+ CL_CHECK(clReleaseMemObject(b_img_trans));
+ }
+#else
+ GGML_UNUSED(backend);
+ GGML_UNUSED(src0);
+ GGML_UNUSED(src1);
+ GGML_UNUSED(dst);
+#endif
+}
+
static void ggml_cl_mul_mat(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
GGML_ASSERT(src0);
GGML_ASSERT(src0->extra);
return;
}
+ // q6_K x fp32
+ if (src0t == GGML_TYPE_Q6_K && src1t == GGML_TYPE_F32) {
+ ggml_cl_mul_mat_q6_K_f32_adreno(backend, src0, src1, dst);
+ return;
+ }
+
// q4_0 x fp32
if(src0t == GGML_TYPE_Q4_0 && src1t == GGML_TYPE_F32) {
// TODO: remove duplicate definitions of image description + format -- move to top
--- /dev/null
+#pragma OPENCL EXTENSION cl_khr_fp16 : enable
+#pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable
+
+#ifdef cl_qcom_reqd_sub_group_size
+#pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable
+#define ADRENO_GPU 1
+#define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full")))
+#endif
+
+#ifdef ADRENO_GPU
+REQD_SUBGROUP_SIZE_128
+#endif
+kernel void kernel_gemm_noshuffle_q6_K_f32(
+ global const ushort * src0_ql,
+ global const uchar * src0_qh,
+ global const ushort * src0_s,
+ global const half * src0_d,
+ read_only image1d_buffer_t src1,
+ global float * dst,
+ ulong offsetd,
+ int m,
+ int n,
+ int k,
+ int n_no_padding,
+ ushort mask_f000,
+ uchar mask_c0
+) {
+ dst = (global float *)( (global char *)dst + offsetd );
+
+ int m_4 = m >> 2;
+ int n_4 = n >> 2;
+
+ int gy = get_global_id(0); // n
+ int gx = get_global_id(1); // m
+ int gx_2 = gx << 2;
+
+ half8 c0 = 0, c1 = 0, c2 = 0, c3 = 0;
+ half8 B;
+ half4 dequantized_weights;
+
+ global const ushort * ptr_ql = src0_ql + gx_2;
+ global const uchar * ptr_qh = src0_qh + gx_2;
+ global const ushort * ptr_s = src0_s + gx_2;
+ global const half * ptr_d = src0_d + gx_2;
+
+ for (int i = 0; i < k; i += 4) {
+ // load 4x elements (ushort) of ql on M, each ushort contains 4 weights
+ // 4x ushort correspons to 4 rows on M
+ ushort4 bits4 = vload4(0, ptr_ql + (i/4)*m); // ql packed in 4s in ushort
+ uchar4 bits2 = vload4(0, ptr_qh + (i/4)*m); // qh packed in 4s in uchar
+
+ // load 4 consecutive scales
+ char8 scale_s_8 = as_char8(vload4(0, ptr_s + (i/16/2)*m)); // 1 char scale every 16 elements, packed in 2s
+ char4 scale_s = ((i/16) % 2) == 0 ? scale_s_8.s0246 : scale_s_8.s1357; // transposed as ushort, 2 blocks
+ half4 scale_d = vload4(0, ptr_d + (i/256)*m); // 1 half scale every 256 elements
+
+ // j=0
+ // load 2x 4 elements of activations on N, corresponding to 8 rows on N
+ B.s0123 = read_imageh(src1, gy*2 + (i + 0)*n_4 + 0);
+ B.s4567 = read_imageh(src1, gy*2 + (i + 0)*n_4 + 1);
+ dequantized_weights.s0 = (convert_half((bits4.s0 & 0x000F) | ((bits2.s0 & 0x03) << 4)) - 32.f) * scale_s.s0 * scale_d.s0;
+ dequantized_weights.s1 = (convert_half((bits4.s1 & 0x000F) | ((bits2.s1 & 0x03) << 4)) - 32.f) * scale_s.s1 * scale_d.s1;
+ dequantized_weights.s2 = (convert_half((bits4.s2 & 0x000F) | ((bits2.s2 & 0x03) << 4)) - 32.f) * scale_s.s2 * scale_d.s2;
+ dequantized_weights.s3 = (convert_half((bits4.s3 & 0x000F) | ((bits2.s3 & 0x03) << 4)) - 32.f) * scale_s.s3 * scale_d.s3;
+ c0 += B * dequantized_weights.s0;
+ c1 += B * dequantized_weights.s1;
+ c2 += B * dequantized_weights.s2;
+ c3 += B * dequantized_weights.s3;
+
+ // j=1
+ B.s0123 = read_imageh(src1, gy*2 + (i + 1)*n_4 + 0);
+ B.s4567 = read_imageh(src1, gy*2 + (i + 1)*n_4 + 1);
+ dequantized_weights.s0 = (convert_half((((bits4.s0 & 0x00F0) >> 4) | ((bits2.s0 & 0x0C) << 2))) - 32.f) * scale_s.s0 * scale_d.s0;
+ dequantized_weights.s1 = (convert_half((((bits4.s1 & 0x00F0) >> 4) | ((bits2.s1 & 0x0C) << 2))) - 32.f) * scale_s.s1 * scale_d.s1;
+ dequantized_weights.s2 = (convert_half((((bits4.s2 & 0x00F0) >> 4) | ((bits2.s2 & 0x0C) << 2))) - 32.f) * scale_s.s2 * scale_d.s2;
+ dequantized_weights.s3 = (convert_half((((bits4.s3 & 0x00F0) >> 4) | ((bits2.s3 & 0x0C) << 2))) - 32.f) * scale_s.s3 * scale_d.s3;
+ c0 += B * dequantized_weights.s0;
+ c1 += B * dequantized_weights.s1;
+ c2 += B * dequantized_weights.s2;
+ c3 += B * dequantized_weights.s3;
+
+ // j=2
+ B.s0123 = read_imageh(src1, gy*2 + (i + 2)*n_4 + 0);
+ B.s4567 = read_imageh(src1, gy*2 + (i + 2)*n_4 + 1);
+ dequantized_weights.s0 = (convert_half((((bits4.s0 & 0x0F00) >> 8) | (bits2.s0 & 0x30))) - 32.f) * scale_s.s0 * scale_d.s0;
+ dequantized_weights.s1 = (convert_half((((bits4.s1 & 0x0F00) >> 8) | (bits2.s1 & 0x30))) - 32.f) * scale_s.s1 * scale_d.s1;
+ dequantized_weights.s2 = (convert_half((((bits4.s2 & 0x0F00) >> 8) | (bits2.s2 & 0x30))) - 32.f) * scale_s.s2 * scale_d.s2;
+ dequantized_weights.s3 = (convert_half((((bits4.s3 & 0x0F00) >> 8) | (bits2.s3 & 0x30))) - 32.f) * scale_s.s3 * scale_d.s3;
+ c0 += B * dequantized_weights.s0;
+ c1 += B * dequantized_weights.s1;
+ c2 += B * dequantized_weights.s2;
+ c3 += B * dequantized_weights.s3;
+
+ // j=3
+ B.s0123 = read_imageh(src1, gy*2 + (i + 3)*n_4 + 0);
+ B.s4567 = read_imageh(src1, gy*2 + (i + 3)*n_4 + 1);
+ dequantized_weights.s0 = (convert_half((((bits4.s0 & mask_f000) >> 12) | ((bits2.s0 & mask_c0) >> 2))) - 32.f) * scale_s.s0 * scale_d.s0;
+ dequantized_weights.s1 = (convert_half((((bits4.s1 & mask_f000) >> 12) | ((bits2.s1 & mask_c0) >> 2))) - 32.f) * scale_s.s1 * scale_d.s1;
+ dequantized_weights.s2 = (convert_half((((bits4.s2 & mask_f000) >> 12) | ((bits2.s2 & mask_c0) >> 2))) - 32.f) * scale_s.s2 * scale_d.s2;
+ dequantized_weights.s3 = (convert_half((((bits4.s3 & mask_f000) >> 12) | ((bits2.s3 & mask_c0) >> 2))) - 32.f) * scale_s.s3 * scale_d.s3;
+ c0 += B * dequantized_weights.s0;
+ c1 += B * dequantized_weights.s1;
+ c2 += B * dequantized_weights.s2;
+ c3 += B * dequantized_weights.s3;
+ }
+
+ int idx = (gy<<3)*m + (gx<<2);
+
+ if(idx+3 < m*n_no_padding){
+ vstore4((float4)(c0.s0, c1.s0, c2.s0, c3.s0), 0, dst + idx);
+ idx += m;
+ }
+ if(idx+3 < m*n_no_padding){
+ vstore4((float4)(c0.s1, c1.s1, c2.s1, c3.s1), 0, dst + idx);
+ idx += m;
+ }
+ if(idx+3 < m*n_no_padding){
+ vstore4((float4)(c0.s2, c1.s2, c2.s2, c3.s2), 0, dst + idx);
+ idx += m;
+ }
+ if(idx+3 < m*n_no_padding){
+ vstore4((float4)(c0.s3, c1.s3, c2.s3, c3.s3), 0, dst + idx);
+ idx += m;
+ }
+ if(idx+3 < m*n_no_padding){
+ vstore4((float4)(c0.s4, c1.s4, c2.s4, c3.s4), 0, dst + idx);
+ idx += m;
+ }
+ if(idx+3 < m*n_no_padding){
+ vstore4((float4)(c0.s5, c1.s5, c2.s5, c3.s5), 0, dst + idx);
+ idx += m;
+ }
+ if(idx+3 < m*n_no_padding){
+ vstore4((float4)(c0.s6, c1.s6, c2.s6, c3.s6), 0, dst + idx);
+ idx += m;
+ }
+ if(idx+3 < m*n_no_padding){
+ vstore4((float4)(c0.s7, c1.s7, c2.s7, c3.s7), 0, dst + idx);
+ }
+}
--- /dev/null
+#pragma OPENCL EXTENSION cl_khr_fp16 : enable
+#pragma OPENCL EXTENSION cl_khr_subgroups : enable
+
+#ifdef cl_intel_required_subgroup_size
+#pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable
+#define INTEL_GPU 1
+#define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16)))
+#define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32)))
+#elif defined(cl_qcom_reqd_sub_group_size)
+#pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable
+#define ADRENO_GPU 1
+#define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half")))
+#define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full")))
+#endif
+
+#define NSUBGROUPS 4
+#define SUBGROUP_SIZE 64
+
+#define dequantize_block_acc_bcast_8_hi(total_sum, bits4, bits2, scale_d, scale_s, y) \
+ float8 shared_y; \
+ shared_y = sub_group_broadcast(y, 0); \
+ total_sum.s0 += ((float)(((bits4.s0 & 0x000F) ) | ((bits2.s0 & 0x03) << 4)) - 32.f) * scale_s.s0 * scale_d.s0 * shared_y.s0; \
+ total_sum.s0 += ((float)(((bits4.s0 & 0x00F0) >> 4) | ((bits2.s0 & 0x0C) << 2)) - 32.f) * scale_s.s0 * scale_d.s0 * shared_y.s1; \
+ total_sum.s0 += ((float)(((bits4.s0 & 0x0F00) >> 8) | ((bits2.s0 & 0x30) )) - 32.f) * scale_s.s0 * scale_d.s0 * shared_y.s2; \
+ total_sum.s0 += ((float)(((bits4.s0 & 0xF000) >> 12) | ((bits2.s0 & 0xC0) >> 2)) - 32.f) * scale_s.s0 * scale_d.s0 * shared_y.s3; \
+ total_sum.s0 += ((float)(((bits4.s2 & 0x000F) ) | ((bits2.s2 & 0x03) << 4)) - 32.f) * scale_s.s0 * scale_d.s0 * shared_y.s4; \
+ total_sum.s0 += ((float)(((bits4.s2 & 0x00F0) >> 4) | ((bits2.s2 & 0x0C) << 2)) - 32.f) * scale_s.s0 * scale_d.s0 * shared_y.s5; \
+ total_sum.s0 += ((float)(((bits4.s2 & 0x0F00) >> 8) | ((bits2.s2 & 0x30) )) - 32.f) * scale_s.s0 * scale_d.s0 * shared_y.s6; \
+ total_sum.s0 += ((float)(((bits4.s2 & 0xF000) >> 12) | ((bits2.s2 & 0xC0) >> 2)) - 32.f) * scale_s.s0 * scale_d.s0 * shared_y.s7; \
+ total_sum.s1 += ((float)(((bits4.s1 & 0x000F) ) | ((bits2.s1 & 0x03) << 4)) - 32.f) * scale_s.s2 * scale_d.s1 * shared_y.s0; \
+ total_sum.s1 += ((float)(((bits4.s1 & 0x00F0) >> 4) | ((bits2.s1 & 0x0C) << 2)) - 32.f) * scale_s.s2 * scale_d.s1 * shared_y.s1; \
+ total_sum.s1 += ((float)(((bits4.s1 & 0x0F00) >> 8) | ((bits2.s1 & 0x30) )) - 32.f) * scale_s.s2 * scale_d.s1 * shared_y.s2; \
+ total_sum.s1 += ((float)(((bits4.s1 & 0xF000) >> 12) | ((bits2.s1 & 0xC0) >> 2)) - 32.f) * scale_s.s2 * scale_d.s1 * shared_y.s3; \
+ total_sum.s1 += ((float)(((bits4.s3 & 0x000F) ) | ((bits2.s3 & 0x03) << 4)) - 32.f) * scale_s.s2 * scale_d.s1 * shared_y.s4; \
+ total_sum.s1 += ((float)(((bits4.s3 & 0x00F0) >> 4) | ((bits2.s3 & 0x0C) << 2)) - 32.f) * scale_s.s2 * scale_d.s1 * shared_y.s5; \
+ total_sum.s1 += ((float)(((bits4.s3 & 0x0F00) >> 8) | ((bits2.s3 & 0x30) )) - 32.f) * scale_s.s2 * scale_d.s1 * shared_y.s6; \
+ total_sum.s1 += ((float)(((bits4.s3 & 0xF000) >> 12) | ((bits2.s3 & 0xC0) >> 2)) - 32.f) * scale_s.s2 * scale_d.s1 * shared_y.s7; \
+ shared_y = sub_group_broadcast(y, 1); \
+ total_sum.s0 += ((float)(((bits4.s4 & 0x000F) ) | ((bits2.s4 & 0x03) << 4)) - 32.f) * scale_s.s0 * scale_d.s0 * shared_y.s0; \
+ total_sum.s0 += ((float)(((bits4.s4 & 0x00F0) >> 4) | ((bits2.s4 & 0x0C) << 2)) - 32.f) * scale_s.s0 * scale_d.s0 * shared_y.s1; \
+ total_sum.s0 += ((float)(((bits4.s4 & 0x0F00) >> 8) | ((bits2.s4 & 0x30) )) - 32.f) * scale_s.s0 * scale_d.s0 * shared_y.s2; \
+ total_sum.s0 += ((float)(((bits4.s4 & 0xF000) >> 12) | ((bits2.s4 & 0xC0) >> 2)) - 32.f) * scale_s.s0 * scale_d.s0 * shared_y.s3; \
+ total_sum.s0 += ((float)(((bits4.s6 & 0x000F) ) | ((bits2.s6 & 0x03) << 4)) - 32.f) * scale_s.s0 * scale_d.s0 * shared_y.s4; \
+ total_sum.s0 += ((float)(((bits4.s6 & 0x00F0) >> 4) | ((bits2.s6 & 0x0C) << 2)) - 32.f) * scale_s.s0 * scale_d.s0 * shared_y.s5; \
+ total_sum.s0 += ((float)(((bits4.s6 & 0x0F00) >> 8) | ((bits2.s6 & 0x30) )) - 32.f) * scale_s.s0 * scale_d.s0 * shared_y.s6; \
+ total_sum.s0 += ((float)(((bits4.s6 & 0xF000) >> 12) | ((bits2.s6 & 0xC0) >> 2)) - 32.f) * scale_s.s0 * scale_d.s0 * shared_y.s7; \
+ total_sum.s1 += ((float)(((bits4.s5 & 0x000F) ) | ((bits2.s5 & 0x03) << 4)) - 32.f) * scale_s.s2 * scale_d.s1 * shared_y.s0; \
+ total_sum.s1 += ((float)(((bits4.s5 & 0x00F0) >> 4) | ((bits2.s5 & 0x0C) << 2)) - 32.f) * scale_s.s2 * scale_d.s1 * shared_y.s1; \
+ total_sum.s1 += ((float)(((bits4.s5 & 0x0F00) >> 8) | ((bits2.s5 & 0x30) )) - 32.f) * scale_s.s2 * scale_d.s1 * shared_y.s2; \
+ total_sum.s1 += ((float)(((bits4.s5 & 0xF000) >> 12) | ((bits2.s5 & 0xC0) >> 2)) - 32.f) * scale_s.s2 * scale_d.s1 * shared_y.s3; \
+ total_sum.s1 += ((float)(((bits4.s7 & 0x000F) ) | ((bits2.s7 & 0x03) << 4)) - 32.f) * scale_s.s2 * scale_d.s1 * shared_y.s4; \
+ total_sum.s1 += ((float)(((bits4.s7 & 0x00F0) >> 4) | ((bits2.s7 & 0x0C) << 2)) - 32.f) * scale_s.s2 * scale_d.s1 * shared_y.s5; \
+ total_sum.s1 += ((float)(((bits4.s7 & 0x0F00) >> 8) | ((bits2.s7 & 0x30) )) - 32.f) * scale_s.s2 * scale_d.s1 * shared_y.s6; \
+ total_sum.s1 += ((float)(((bits4.s7 & 0xF000) >> 12) | ((bits2.s7 & 0xC0) >> 2)) - 32.f) * scale_s.s2 * scale_d.s1 * shared_y.s7; \
+
+#define dequantize_block_acc_bcast_8_lo(total_sum, bits4, bits2, scale_d, scale_s, y) \
+ shared_y = sub_group_broadcast(y, 2); \
+ total_sum.s0 += ((float)(((bits4.s0 & 0x000F) ) | ((bits2.s0 & 0x03) << 4)) - 32.f) * scale_s.s1 * scale_d.s0 * shared_y.s0; \
+ total_sum.s0 += ((float)(((bits4.s0 & 0x00F0) >> 4) | ((bits2.s0 & 0x0C) << 2)) - 32.f) * scale_s.s1 * scale_d.s0 * shared_y.s1; \
+ total_sum.s0 += ((float)(((bits4.s0 & 0x0F00) >> 8) | ((bits2.s0 & 0x30) )) - 32.f) * scale_s.s1 * scale_d.s0 * shared_y.s2; \
+ total_sum.s0 += ((float)(((bits4.s0 & 0xF000) >> 12) | ((bits2.s0 & 0xC0) >> 2)) - 32.f) * scale_s.s1 * scale_d.s0 * shared_y.s3; \
+ total_sum.s0 += ((float)(((bits4.s2 & 0x000F) ) | ((bits2.s2 & 0x03) << 4)) - 32.f) * scale_s.s1 * scale_d.s0 * shared_y.s4; \
+ total_sum.s0 += ((float)(((bits4.s2 & 0x00F0) >> 4) | ((bits2.s2 & 0x0C) << 2)) - 32.f) * scale_s.s1 * scale_d.s0 * shared_y.s5; \
+ total_sum.s0 += ((float)(((bits4.s2 & 0x0F00) >> 8) | ((bits2.s2 & 0x30) )) - 32.f) * scale_s.s1 * scale_d.s0 * shared_y.s6; \
+ total_sum.s0 += ((float)(((bits4.s2 & 0xF000) >> 12) | ((bits2.s2 & 0xC0) >> 2)) - 32.f) * scale_s.s1 * scale_d.s0 * shared_y.s7; \
+ total_sum.s1 += ((float)(((bits4.s1 & 0x000F) ) | ((bits2.s1 & 0x03) << 4)) - 32.f) * scale_s.s3 * scale_d.s1 * shared_y.s0; \
+ total_sum.s1 += ((float)(((bits4.s1 & 0x00F0) >> 4) | ((bits2.s1 & 0x0C) << 2)) - 32.f) * scale_s.s3 * scale_d.s1 * shared_y.s1; \
+ total_sum.s1 += ((float)(((bits4.s1 & 0x0F00) >> 8) | ((bits2.s1 & 0x30) )) - 32.f) * scale_s.s3 * scale_d.s1 * shared_y.s2; \
+ total_sum.s1 += ((float)(((bits4.s1 & 0xF000) >> 12) | ((bits2.s1 & 0xC0) >> 2)) - 32.f) * scale_s.s3 * scale_d.s1 * shared_y.s3; \
+ total_sum.s1 += ((float)(((bits4.s3 & 0x000F) ) | ((bits2.s3 & 0x03) << 4)) - 32.f) * scale_s.s3 * scale_d.s1 * shared_y.s4; \
+ total_sum.s1 += ((float)(((bits4.s3 & 0x00F0) >> 4) | ((bits2.s3 & 0x0C) << 2)) - 32.f) * scale_s.s3 * scale_d.s1 * shared_y.s5; \
+ total_sum.s1 += ((float)(((bits4.s3 & 0x0F00) >> 8) | ((bits2.s3 & 0x30) )) - 32.f) * scale_s.s3 * scale_d.s1 * shared_y.s6; \
+ total_sum.s1 += ((float)(((bits4.s3 & 0xF000) >> 12) | ((bits2.s3 & 0xC0) >> 2)) - 32.f) * scale_s.s3 * scale_d.s1 * shared_y.s7; \
+ shared_y = sub_group_broadcast(y, 3); \
+ total_sum.s0 += ((float)(((bits4.s4 & 0x000F) ) | ((bits2.s4 & 0x03) << 4)) - 32.f) * scale_s.s1 * scale_d.s0 * shared_y.s0; \
+ total_sum.s0 += ((float)(((bits4.s4 & 0x00F0) >> 4) | ((bits2.s4 & 0x0C) << 2)) - 32.f) * scale_s.s1 * scale_d.s0 * shared_y.s1; \
+ total_sum.s0 += ((float)(((bits4.s4 & 0x0F00) >> 8) | ((bits2.s4 & 0x30) )) - 32.f) * scale_s.s1 * scale_d.s0 * shared_y.s2; \
+ total_sum.s0 += ((float)(((bits4.s4 & 0xF000) >> 12) | ((bits2.s4 & 0xC0) >> 2)) - 32.f) * scale_s.s1 * scale_d.s0 * shared_y.s3; \
+ total_sum.s0 += ((float)(((bits4.s6 & 0x000F) ) | ((bits2.s6 & 0x03) << 4)) - 32.f) * scale_s.s1 * scale_d.s0 * shared_y.s4; \
+ total_sum.s0 += ((float)(((bits4.s6 & 0x00F0) >> 4) | ((bits2.s6 & 0x0C) << 2)) - 32.f) * scale_s.s1 * scale_d.s0 * shared_y.s5; \
+ total_sum.s0 += ((float)(((bits4.s6 & 0x0F00) >> 8) | ((bits2.s6 & 0x30) )) - 32.f) * scale_s.s1 * scale_d.s0 * shared_y.s6; \
+ total_sum.s0 += ((float)(((bits4.s6 & 0xF000) >> 12) | ((bits2.s6 & 0xC0) >> 2)) - 32.f) * scale_s.s1 * scale_d.s0 * shared_y.s7; \
+ total_sum.s1 += ((float)(((bits4.s5 & 0x000F) ) | ((bits2.s5 & 0x03) << 4)) - 32.f) * scale_s.s3 * scale_d.s1 * shared_y.s0; \
+ total_sum.s1 += ((float)(((bits4.s5 & 0x00F0) >> 4) | ((bits2.s5 & 0x0C) << 2)) - 32.f) * scale_s.s3 * scale_d.s1 * shared_y.s1; \
+ total_sum.s1 += ((float)(((bits4.s5 & 0x0F00) >> 8) | ((bits2.s5 & 0x30) )) - 32.f) * scale_s.s3 * scale_d.s1 * shared_y.s2; \
+ total_sum.s1 += ((float)(((bits4.s5 & 0xF000) >> 12) | ((bits2.s5 & 0xC0) >> 2)) - 32.f) * scale_s.s3 * scale_d.s1 * shared_y.s3; \
+ total_sum.s1 += ((float)(((bits4.s7 & 0x000F) ) | ((bits2.s7 & 0x03) << 4)) - 32.f) * scale_s.s3 * scale_d.s1 * shared_y.s4; \
+ total_sum.s1 += ((float)(((bits4.s7 & 0x00F0) >> 4) | ((bits2.s7 & 0x0C) << 2)) - 32.f) * scale_s.s3 * scale_d.s1 * shared_y.s5; \
+ total_sum.s1 += ((float)(((bits4.s7 & 0x0F00) >> 8) | ((bits2.s7 & 0x30) )) - 32.f) * scale_s.s3 * scale_d.s1 * shared_y.s6; \
+ total_sum.s1 += ((float)(((bits4.s7 & 0xF000) >> 12) | ((bits2.s7 & 0xC0) >> 2)) - 32.f) * scale_s.s3 * scale_d.s1 * shared_y.s7; \
+
+#define dequantize_block_acc_bcast_1_hi(total_sum, bits4, bits2, scale_d, scale_s, y) \
+ float shared_y; \
+ shared_y = sub_group_broadcast(y.s0, 0); \
+ total_sum.s0 += ((float)(((bits4.s0 & 0x000F) ) | ((bits2.s0 & 0x03) << 4)) - 32.f) * scale_s.s0 * scale_d.s0 * shared_y; \
+ total_sum.s1 += ((float)(((bits4.s1 & 0x000F) ) | ((bits2.s1 & 0x03) << 4)) - 32.f) * scale_s.s2 * scale_d.s1 * shared_y; \
+ shared_y = sub_group_broadcast(y.s1, 0); \
+ total_sum.s0 += ((float)(((bits4.s0 & 0x00F0) >> 4) | ((bits2.s0 & 0x0C) << 2)) - 32.f) * scale_s.s0 * scale_d.s0 * shared_y; \
+ total_sum.s1 += ((float)(((bits4.s1 & 0x00F0) >> 4) | ((bits2.s1 & 0x0C) << 2)) - 32.f) * scale_s.s2 * scale_d.s1 * shared_y; \
+ shared_y = sub_group_broadcast(y.s2, 0); \
+ total_sum.s0 += ((float)(((bits4.s0 & 0x0F00) >> 8) | ((bits2.s0 & 0x30) )) - 32.f) * scale_s.s0 * scale_d.s0 * shared_y; \
+ total_sum.s1 += ((float)(((bits4.s1 & 0x0F00) >> 8) | ((bits2.s1 & 0x30) )) - 32.f) * scale_s.s2 * scale_d.s1 * shared_y; \
+ shared_y = sub_group_broadcast(y.s3, 0); \
+ total_sum.s0 += ((float)(((bits4.s0 & 0xF000) >> 12) | ((bits2.s0 & 0xC0) >> 2)) - 32.f) * scale_s.s0 * scale_d.s0 * shared_y; \
+ total_sum.s1 += ((float)(((bits4.s1 & 0xF000) >> 12) | ((bits2.s1 & 0xC0) >> 2)) - 32.f) * scale_s.s2 * scale_d.s1 * shared_y; \
+ shared_y = sub_group_broadcast(y.s4, 0); \
+ total_sum.s0 += ((float)(((bits4.s2 & 0x000F) ) | ((bits2.s2 & 0x03) << 4)) - 32.f) * scale_s.s0 * scale_d.s0 * shared_y; \
+ total_sum.s1 += ((float)(((bits4.s3 & 0x000F) ) | ((bits2.s3 & 0x03) << 4)) - 32.f) * scale_s.s2 * scale_d.s1 * shared_y; \
+ shared_y = sub_group_broadcast(y.s5, 0); \
+ total_sum.s0 += ((float)(((bits4.s2 & 0x00F0) >> 4) | ((bits2.s2 & 0x0C) << 2)) - 32.f) * scale_s.s0 * scale_d.s0 * shared_y; \
+ total_sum.s1 += ((float)(((bits4.s3 & 0x00F0) >> 4) | ((bits2.s3 & 0x0C) << 2)) - 32.f) * scale_s.s2 * scale_d.s1 * shared_y; \
+ shared_y = sub_group_broadcast(y.s6, 0); \
+ total_sum.s0 += ((float)(((bits4.s2 & 0x0F00) >> 8) | ((bits2.s2 & 0x30) )) - 32.f) * scale_s.s0 * scale_d.s0 * shared_y; \
+ total_sum.s1 += ((float)(((bits4.s3 & 0x0F00) >> 8) | ((bits2.s3 & 0x30) )) - 32.f) * scale_s.s2 * scale_d.s1 * shared_y; \
+ shared_y = sub_group_broadcast(y.s7, 0); \
+ total_sum.s0 += ((float)(((bits4.s2 & 0xF000) >> 12) | ((bits2.s2 & 0xC0) >> 2)) - 32.f) * scale_s.s0 * scale_d.s0 * shared_y; \
+ total_sum.s1 += ((float)(((bits4.s3 & 0xF000) >> 12) | ((bits2.s3 & 0xC0) >> 2)) - 32.f) * scale_s.s2 * scale_d.s1 * shared_y; \
+ shared_y = sub_group_broadcast(y.s0, 1); \
+ total_sum.s0 += ((float)(((bits4.s4 & 0x000F) ) | ((bits2.s4 & 0x03) << 4)) - 32.f) * scale_s.s0 * scale_d.s0 * shared_y; \
+ total_sum.s1 += ((float)(((bits4.s5 & 0x000F) ) | ((bits2.s5 & 0x03) << 4)) - 32.f) * scale_s.s2 * scale_d.s1 * shared_y; \
+ shared_y = sub_group_broadcast(y.s1, 1); \
+ total_sum.s0 += ((float)(((bits4.s4 & 0x00F0) >> 4) | ((bits2.s4 & 0x0C) << 2)) - 32.f) * scale_s.s0 * scale_d.s0 * shared_y; \
+ total_sum.s1 += ((float)(((bits4.s5 & 0x00F0) >> 4) | ((bits2.s5 & 0x0C) << 2)) - 32.f) * scale_s.s2 * scale_d.s1 * shared_y; \
+ shared_y = sub_group_broadcast(y.s2, 1); \
+ total_sum.s0 += ((float)(((bits4.s4 & 0x0F00) >> 8) | ((bits2.s4 & 0x30) )) - 32.f) * scale_s.s0 * scale_d.s0 * shared_y; \
+ total_sum.s1 += ((float)(((bits4.s5 & 0x0F00) >> 8) | ((bits2.s5 & 0x30) )) - 32.f) * scale_s.s2 * scale_d.s1 * shared_y; \
+ shared_y = sub_group_broadcast(y.s3, 1); \
+ total_sum.s0 += ((float)(((bits4.s4 & 0xF000) >> 12) | ((bits2.s4 & 0xC0) >> 2)) - 32.f) * scale_s.s0 * scale_d.s0 * shared_y; \
+ total_sum.s1 += ((float)(((bits4.s5 & 0xF000) >> 12) | ((bits2.s5 & 0xC0) >> 2)) - 32.f) * scale_s.s2 * scale_d.s1 * shared_y; \
+ shared_y = sub_group_broadcast(y.s4, 1); \
+ total_sum.s0 += ((float)(((bits4.s6 & 0x000F) ) | ((bits2.s6 & 0x03) << 4)) - 32.f) * scale_s.s0 * scale_d.s0 * shared_y; \
+ total_sum.s1 += ((float)(((bits4.s7 & 0x000F) ) | ((bits2.s7 & 0x03) << 4)) - 32.f) * scale_s.s2 * scale_d.s1 * shared_y; \
+ shared_y = sub_group_broadcast(y.s5, 1); \
+ total_sum.s0 += ((float)(((bits4.s6 & 0x00F0) >> 4) | ((bits2.s6 & 0x0C) << 2)) - 32.f) * scale_s.s0 * scale_d.s0 * shared_y; \
+ total_sum.s1 += ((float)(((bits4.s7 & 0x00F0) >> 4) | ((bits2.s7 & 0x0C) << 2)) - 32.f) * scale_s.s2 * scale_d.s1 * shared_y; \
+ shared_y = sub_group_broadcast(y.s6, 1); \
+ total_sum.s0 += ((float)(((bits4.s6 & 0x0F00) >> 8) | ((bits2.s6 & 0x30) )) - 32.f) * scale_s.s0 * scale_d.s0 * shared_y; \
+ total_sum.s1 += ((float)(((bits4.s7 & 0x0F00) >> 8) | ((bits2.s7 & 0x30) )) - 32.f) * scale_s.s2 * scale_d.s1 * shared_y; \
+ shared_y = sub_group_broadcast(y.s7, 1); \
+ total_sum.s0 += ((float)(((bits4.s6 & 0xF000) >> 12) | ((bits2.s6 & 0xC0) >> 2)) - 32.f) * scale_s.s0 * scale_d.s0 * shared_y; \
+ total_sum.s1 += ((float)(((bits4.s7 & 0xF000) >> 12) | ((bits2.s7 & 0xC0) >> 2)) - 32.f) * scale_s.s2 * scale_d.s1 * shared_y; \
+
+#define dequantize_block_acc_bcast_1_lo(total_sum, bits4, bits2, scale_d, scale_s, y) \
+ shared_y = sub_group_broadcast(y.s0, 2); \
+ total_sum.s0 += ((float)(((bits4.s0 & 0x000F) ) | ((bits2.s0 & 0x03) << 4)) - 32.f) * scale_s.s1 * scale_d.s0 * shared_y; \
+ total_sum.s1 += ((float)(((bits4.s1 & 0x000F) ) | ((bits2.s1 & 0x03) << 4)) - 32.f) * scale_s.s3 * scale_d.s1 * shared_y; \
+ shared_y = sub_group_broadcast(y.s1, 2); \
+ total_sum.s0 += ((float)(((bits4.s0 & 0x00F0) >> 4) | ((bits2.s0 & 0x0C) << 2)) - 32.f) * scale_s.s1 * scale_d.s0 * shared_y; \
+ total_sum.s1 += ((float)(((bits4.s1 & 0x00F0) >> 4) | ((bits2.s1 & 0x0C) << 2)) - 32.f) * scale_s.s3 * scale_d.s1 * shared_y; \
+ shared_y = sub_group_broadcast(y.s2, 2); \
+ total_sum.s0 += ((float)(((bits4.s0 & 0x0F00) >> 8) | ((bits2.s0 & 0x30) )) - 32.f) * scale_s.s1 * scale_d.s0 * shared_y; \
+ total_sum.s1 += ((float)(((bits4.s1 & 0x0F00) >> 8) | ((bits2.s1 & 0x30) )) - 32.f) * scale_s.s3 * scale_d.s1 * shared_y; \
+ shared_y = sub_group_broadcast(y.s3, 2); \
+ total_sum.s0 += ((float)(((bits4.s0 & 0xF000) >> 12) | ((bits2.s0 & 0xC0) >> 2)) - 32.f) * scale_s.s1 * scale_d.s0 * shared_y; \
+ total_sum.s1 += ((float)(((bits4.s1 & 0xF000) >> 12) | ((bits2.s1 & 0xC0) >> 2)) - 32.f) * scale_s.s3 * scale_d.s1 * shared_y; \
+ shared_y = sub_group_broadcast(y.s4, 2); \
+ total_sum.s0 += ((float)(((bits4.s2 & 0x000F) ) | ((bits2.s2 & 0x03) << 4)) - 32.f) * scale_s.s1 * scale_d.s0 * shared_y; \
+ total_sum.s1 += ((float)(((bits4.s3 & 0x000F) ) | ((bits2.s3 & 0x03) << 4)) - 32.f) * scale_s.s3 * scale_d.s1 * shared_y; \
+ shared_y = sub_group_broadcast(y.s5, 2); \
+ total_sum.s0 += ((float)(((bits4.s2 & 0x00F0) >> 4) | ((bits2.s2 & 0x0C) << 2)) - 32.f) * scale_s.s1 * scale_d.s0 * shared_y; \
+ total_sum.s1 += ((float)(((bits4.s3 & 0x00F0) >> 4) | ((bits2.s3 & 0x0C) << 2)) - 32.f) * scale_s.s3 * scale_d.s1 * shared_y; \
+ shared_y = sub_group_broadcast(y.s6, 2); \
+ total_sum.s0 += ((float)(((bits4.s2 & 0x0F00) >> 8) | ((bits2.s2 & 0x30) )) - 32.f) * scale_s.s1 * scale_d.s0 * shared_y; \
+ total_sum.s1 += ((float)(((bits4.s3 & 0x0F00) >> 8) | ((bits2.s3 & 0x30) )) - 32.f) * scale_s.s3 * scale_d.s1 * shared_y; \
+ shared_y = sub_group_broadcast(y.s7, 2); \
+ total_sum.s0 += ((float)(((bits4.s2 & 0xF000) >> 12) | ((bits2.s2 & 0xC0) >> 2)) - 32.f) * scale_s.s1 * scale_d.s0 * shared_y; \
+ total_sum.s1 += ((float)(((bits4.s3 & 0xF000) >> 12) | ((bits2.s3 & 0xC0) >> 2)) - 32.f) * scale_s.s3 * scale_d.s1 * shared_y; \
+ shared_y = sub_group_broadcast(y.s0, 3); \
+ total_sum.s0 += ((float)(((bits4.s4 & 0x000F) ) | ((bits2.s4 & 0x03) << 4)) - 32.f) * scale_s.s1 * scale_d.s0 * shared_y; \
+ total_sum.s1 += ((float)(((bits4.s5 & 0x000F) ) | ((bits2.s5 & 0x03) << 4)) - 32.f) * scale_s.s3 * scale_d.s1 * shared_y; \
+ shared_y = sub_group_broadcast(y.s1, 3); \
+ total_sum.s0 += ((float)(((bits4.s4 & 0x00F0) >> 4) | ((bits2.s4 & 0x0C) << 2)) - 32.f) * scale_s.s1 * scale_d.s0 * shared_y; \
+ total_sum.s1 += ((float)(((bits4.s5 & 0x00F0) >> 4) | ((bits2.s5 & 0x0C) << 2)) - 32.f) * scale_s.s3 * scale_d.s1 * shared_y; \
+ shared_y = sub_group_broadcast(y.s2, 3); \
+ total_sum.s0 += ((float)(((bits4.s4 & 0x0F00) >> 8) | ((bits2.s4 & 0x30) )) - 32.f) * scale_s.s1 * scale_d.s0 * shared_y; \
+ total_sum.s1 += ((float)(((bits4.s5 & 0x0F00) >> 8) | ((bits2.s5 & 0x30) )) - 32.f) * scale_s.s3 * scale_d.s1 * shared_y; \
+ shared_y = sub_group_broadcast(y.s3, 3); \
+ total_sum.s0 += ((float)(((bits4.s4 & 0xF000) >> 12) | ((bits2.s4 & 0xC0) >> 2)) - 32.f) * scale_s.s1 * scale_d.s0 * shared_y; \
+ total_sum.s1 += ((float)(((bits4.s5 & 0xF000) >> 12) | ((bits2.s5 & 0xC0) >> 2)) - 32.f) * scale_s.s3 * scale_d.s1 * shared_y; \
+ shared_y = sub_group_broadcast(y.s4, 3); \
+ total_sum.s0 += ((float)(((bits4.s6 & 0x000F) ) | ((bits2.s6 & 0x03) << 4)) - 32.f) * scale_s.s1 * scale_d.s0 * shared_y; \
+ total_sum.s1 += ((float)(((bits4.s7 & 0x000F) ) | ((bits2.s7 & 0x03) << 4)) - 32.f) * scale_s.s3 * scale_d.s1 * shared_y; \
+ shared_y = sub_group_broadcast(y.s5, 3); \
+ total_sum.s0 += ((float)(((bits4.s6 & 0x00F0) >> 4) | ((bits2.s6 & 0x0C) << 2)) - 32.f) * scale_s.s1 * scale_d.s0 * shared_y; \
+ total_sum.s1 += ((float)(((bits4.s7 & 0x00F0) >> 4) | ((bits2.s7 & 0x0C) << 2)) - 32.f) * scale_s.s3 * scale_d.s1 * shared_y; \
+ shared_y = sub_group_broadcast(y.s6, 3); \
+ total_sum.s0 += ((float)(((bits4.s6 & 0x0F00) >> 8) | ((bits2.s6 & 0x30) )) - 32.f) * scale_s.s1 * scale_d.s0 * shared_y; \
+ total_sum.s1 += ((float)(((bits4.s7 & 0x0F00) >> 8) | ((bits2.s7 & 0x30) )) - 32.f) * scale_s.s3 * scale_d.s1 * shared_y; \
+ shared_y = sub_group_broadcast(y.s7, 3); \
+ total_sum.s0 += ((float)(((bits4.s6 & 0xF000) >> 12) | ((bits2.s6 & 0xC0) >> 2)) - 32.f) * scale_s.s1 * scale_d.s0 * shared_y; \
+ total_sum.s1 += ((float)(((bits4.s7 & 0xF000) >> 12) | ((bits2.s7 & 0xC0) >> 2)) - 32.f) * scale_s.s3 * scale_d.s1 * shared_y; \
+
+#if defined(ADRENO_GPU)
+REQD_SUBGROUP_SIZE_64
+#endif
+kernel void kernel_gemv_noshuffle_q6_K_f32(
+ read_only image1d_buffer_t src0_ql,
+ read_only image1d_buffer_t src0_qh,
+ global half2 * src0_s,
+ global half2 * src0_d,
+ read_only image1d_buffer_t src1,
+ global float * dst,
+ ulong offsetd,
+ int ne00,
+ int ne01
+) {
+ int grp = get_local_id(1);
+ int gid = get_global_id(0);
+ ushort slid = get_sub_group_local_id();
+
+ int nb = ne00 / 32;
+
+ uint4 reg_a_l;
+ ushort4 reg_a_h;
+ half2 reg_d;
+ char4 reg_s;
+ float8 reg_b;
+
+ float2 total_sum = 0.0f;
+
+ int line_stride_a = ne01 / 2;
+ int block_stride_a = NSUBGROUPS * ne01;
+
+ for (int k = grp; k < nb; k += NSUBGROUPS) {
+ reg_d = src0_d[gid + k/8 * line_stride_a];
+ reg_s = as_char4(src0_s[gid + k * line_stride_a]);
+
+ if (slid < 4) {
+ reg_b.s0123 = read_imagef(src1, 0 + slid*2 + k*8);
+ reg_b.s4567 = read_imagef(src1, 1 + slid*2 + k*8);
+ }
+
+ reg_a_l.s0 = read_imageui(src0_ql, gid + k*block_stride_a + line_stride_a*0).x;
+ reg_a_l.s1 = read_imageui(src0_ql, gid + k*block_stride_a + line_stride_a*1).x;
+ reg_a_l.s2 = read_imageui(src0_ql, gid + k*block_stride_a + line_stride_a*2).x;
+ reg_a_l.s3 = read_imageui(src0_ql, gid + k*block_stride_a + line_stride_a*3).x;
+
+ reg_a_h.s0 = as_ushort(read_imageh(src0_qh, gid + k*block_stride_a + line_stride_a*0).x);
+ reg_a_h.s1 = as_ushort(read_imageh(src0_qh, gid + k*block_stride_a + line_stride_a*1).x);
+ reg_a_h.s2 = as_ushort(read_imageh(src0_qh, gid + k*block_stride_a + line_stride_a*2).x);
+ reg_a_h.s3 = as_ushort(read_imageh(src0_qh, gid + k*block_stride_a + line_stride_a*3).x);
+
+#ifdef VECTOR_SUB_GROUP_BROADCAT
+ dequantize_block_acc_bcast_8_hi(total_sum, as_ushort8(reg_a_l), as_uchar8(reg_a_h), reg_d, reg_s, reg_b);
+#else
+ dequantize_block_acc_bcast_1_hi(total_sum, as_ushort8(reg_a_l), as_uchar8(reg_a_h), reg_d, reg_s, reg_b);
+#endif // VECTOR_SUB_GROUP_BROADCAT
+
+ reg_a_l.s0 = read_imageui(src0_ql, gid + k*block_stride_a + line_stride_a*4).x;
+ reg_a_l.s1 = read_imageui(src0_ql, gid + k*block_stride_a + line_stride_a*5).x;
+ reg_a_l.s2 = read_imageui(src0_ql, gid + k*block_stride_a + line_stride_a*6).x;
+ reg_a_l.s3 = read_imageui(src0_ql, gid + k*block_stride_a + line_stride_a*7).x;
+
+ reg_a_h.s0 = as_ushort(read_imageh(src0_qh, gid + k*block_stride_a + line_stride_a*4).x);
+ reg_a_h.s1 = as_ushort(read_imageh(src0_qh, gid + k*block_stride_a + line_stride_a*5).x);
+ reg_a_h.s2 = as_ushort(read_imageh(src0_qh, gid + k*block_stride_a + line_stride_a*6).x);
+ reg_a_h.s3 = as_ushort(read_imageh(src0_qh, gid + k*block_stride_a + line_stride_a*7).x);
+
+#ifdef VECTOR_SUB_GROUP_BROADCAT
+ dequantize_block_acc_bcast_8_lo(total_sum, as_ushort8(reg_a_l), as_uchar8(reg_a_h), reg_d, reg_s, reg_b);
+#else
+ dequantize_block_acc_bcast_1_lo(total_sum, as_ushort8(reg_a_l), as_uchar8(reg_a_h), reg_d, reg_s, reg_b);
+#endif // VECTOR_SUB_GROUP_BROADCAT
+ }
+
+ local float2 reduce_lm[SUBGROUP_SIZE * 3];
+ if (grp == 1) {
+ reduce_lm[SUBGROUP_SIZE*0 + slid] = total_sum;
+ }
+ if (grp == 2) {
+ reduce_lm[SUBGROUP_SIZE*1 + slid] = total_sum;
+ }
+ if (grp == 3) {
+ reduce_lm[SUBGROUP_SIZE*2 + slid] = total_sum;
+ }
+
+ barrier(CLK_LOCAL_MEM_FENCE);
+
+ if (grp == 0) {
+ total_sum += reduce_lm[SUBGROUP_SIZE*0 + slid];
+ }
+ if (grp == 0) {
+ total_sum += reduce_lm[SUBGROUP_SIZE*1 + slid];
+ }
+ if (grp == 0) {
+ total_sum += reduce_lm[SUBGROUP_SIZE*2 + slid];
+ }
+
+ if (grp == 0) {
+ dst = (global float*)((global char*)dst + offsetd);
+ vstore2(total_sum, 0, &(dst[gid * 2]));
+ }
+}