cl_kernel kernel_mul_mm_f16_f32_kq;
cl_kernel kernel_mul_mat_q4_0_f32, kernel_mul_mat_q4_0_f32_v;
cl_kernel kernel_convert_block_q4_0, kernel_restore_block_q4_0;
+ cl_kernel kernel_convert_block_q4_1, kernel_restore_block_q4_1;
cl_kernel kernel_convert_block_mxfp4, kernel_convert_block_mxfp4_trans, kernel_restore_block_mxfp4, kernel_restore_block_mxfp4_trans;
cl_kernel kernel_convert_block_q8_0, kernel_restore_block_q8_0, kernel_restore_block_q8_0_trans;
cl_kernel kernel_mul_mat_q4_0_f32_8x_flat;
cl_kernel kernel_restore_block_q4_0_noshuffle;
cl_kernel kernel_convert_block_q6_K, kernel_restore_block_q6_K;
cl_kernel kernel_mul_mat_q4_0_f32_1d_8x_flat, kernel_mul_mat_q4_0_f32_1d_16x_flat;
+ cl_kernel kernel_mul_mv_q4_1_f32;
+ cl_kernel kernel_mul_mv_q4_1_f32_flat;
cl_kernel kernel_mul_mv_q4_K_f32;
cl_kernel kernel_mul_mv_q6_K_f32;
cl_kernel kernel_mul_mv_q6_K_f32_flat;
cl_kernel kernel_mul_mv_id_mxfp4_f32_flat;
cl_kernel kernel_mul_mm_f32_f32_l4_lm;
cl_kernel kernel_mul_mm_f16_f32_l4_lm;
+ cl_kernel kernel_mul_mm_q4_0_f32_l4_lm;
+ cl_kernel kernel_mul_mm_q4_1_f32_l4_lm;
cl_kernel kernel_mul_mm_q8_0_f32_l4_lm;
cl_kernel kernel_mul_mm_q6_k_f32_l4_lm;
CL_CHECK((backend_ctx->kernel_restore_block_q4_0_noshuffle = clCreateKernel(backend_ctx->program_cvt, "kernel_restore_block_q4_0_noshuffle", &err), err));
CL_CHECK((backend_ctx->kernel_convert_block_q4_0 = clCreateKernel(backend_ctx->program_cvt, "kernel_convert_block_q4_0", &err), err));
CL_CHECK((backend_ctx->kernel_restore_block_q4_0 = clCreateKernel(backend_ctx->program_cvt, "kernel_restore_block_q4_0", &err), err));
+ CL_CHECK((backend_ctx->kernel_convert_block_q4_1 = clCreateKernel(backend_ctx->program_cvt, "kernel_convert_block_q4_1", &err), err));
+ CL_CHECK((backend_ctx->kernel_restore_block_q4_1 = clCreateKernel(backend_ctx->program_cvt, "kernel_restore_block_q4_1", &err), err));
CL_CHECK((backend_ctx->kernel_convert_block_mxfp4 = clCreateKernel(backend_ctx->program_cvt, "kernel_convert_block_mxfp4", &err), err));
CL_CHECK((backend_ctx->kernel_convert_block_mxfp4_trans = clCreateKernel(backend_ctx->program_cvt, "kernel_convert_block_mxfp4_trans", &err), err));
CL_CHECK((backend_ctx->kernel_restore_block_mxfp4_trans = clCreateKernel(backend_ctx->program_cvt, "kernel_restore_block_mxfp4_trans", &err), err));
GGML_LOG_CONT(".");
}
+ // mul_mv_q4_1_f32
+ {
+#ifdef GGML_OPENCL_EMBED_KERNELS
+ const std::string kernel_src {
+ #include "mul_mv_q4_1_f32.cl.h"
+ };
+#else
+ const std::string kernel_src = read_file("mul_mv_q4_1_f32.cl");
+#endif
+ cl_program prog =
+ build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts);
+
+ CL_CHECK((backend_ctx->kernel_mul_mv_q4_1_f32 = clCreateKernel(prog, "kernel_mul_mv_q4_1_f32", &err), err));
+ CL_CHECK(clReleaseProgram(prog));
+ GGML_LOG_CONT(".");
+ }
+
+ // mul_mv_q4_1_f32_flat
+ {
+#ifdef GGML_OPENCL_EMBED_KERNELS
+ const std::string kernel_src {
+ #include "mul_mv_q4_1_f32_flat.cl.h"
+ };
+#else
+ const std::string kernel_src = read_file("mul_mv_q4_1_f32_flat.cl");
+#endif
+ cl_program prog =
+ build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts);
+
+ CL_CHECK((backend_ctx->kernel_mul_mv_q4_1_f32_flat = clCreateKernel(prog, "kernel_mul_mv_q4_1_f32_flat", &err), err));
+ CL_CHECK(clReleaseProgram(prog));
+ GGML_LOG_CONT(".");
+ }
+
// mul_mv_q4_k_f32
{
#ifdef GGML_OPENCL_EMBED_KERNELS
GGML_LOG_CONT(".");
}
+ // mul_mm_q4_0_f32_l4_lm
+ {
+#ifdef GGML_OPENCL_EMBED_KERNELS
+ const std::string kernel_src {
+ #include "mul_mm_q4_0_f32_l4_lm.cl.h"
+ };
+#else
+ const std::string kernel_src = read_file("mul_mm_q4_0_f32_l4_lm.cl");
+#endif
+ cl_program prog =
+ build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts);
+
+ CL_CHECK((backend_ctx->kernel_mul_mm_q4_0_f32_l4_lm = clCreateKernel(prog, "kernel_mul_mm_q4_0_f32_l4_lm", &err), err));
+ GGML_LOG_CONT(".");
+ }
+
+ // mul_mm_q4_1_f32_l4_lm
+ {
+#ifdef GGML_OPENCL_EMBED_KERNELS
+ const std::string kernel_src {
+ #include "mul_mm_q4_1_f32_l4_lm.cl.h"
+ };
+#else
+ const std::string kernel_src = read_file("mul_mm_q4_1_f32_l4_lm.cl");
+#endif
+ cl_program prog =
+ build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts);
+
+ CL_CHECK((backend_ctx->kernel_mul_mm_q4_1_f32_l4_lm = clCreateKernel(prog, "kernel_mul_mm_q4_1_f32_l4_lm", &err), err));
+ GGML_LOG_CONT(".");
+ }
+
// mul_mm_q8_0_f32_l4_lm
{
#ifdef GGML_OPENCL_EMBED_KERNELS
}
};
+struct ggml_tensor_extra_cl_q4_1 {
+ // Quantized values.
+ cl_mem q = nullptr;
+ // Quantized values in image1d_buffer_t.
+ cl_mem q_img = nullptr;
+ // Scales.
+ cl_mem d = nullptr;
+ // Scales in image1d_buffer_t.
+ cl_mem d_img = nullptr;
+ // Min
+ cl_mem m = nullptr;
+ // Min in image1d_buffer_t.
+ cl_mem m_img = nullptr;
+ // Size of quantized values.
+ size_t size_q = 0;
+ // Size of scales.
+ size_t size_d = 0;
+ // Size of min values.
+ size_t size_m = 0;
+
+ ~ggml_tensor_extra_cl_q4_1() {
+ reset();
+ }
+
+ void reset() {
+ // q and d are subbuffers into the bigger buffer allocated in ggml_backend_buffer.
+ // They must be properly released so that the original buffer can be
+ // properly released to avoid memory leak.
+ if (q != nullptr) {
+ CL_CHECK(clReleaseMemObject(q));
+ q = nullptr;
+ }
+ if (d != nullptr) {
+ CL_CHECK(clReleaseMemObject(d));
+ d = nullptr;
+ }
+ if (m != nullptr) {
+ CL_CHECK(clReleaseMemObject(m));
+ m = nullptr;
+ }
+ // Currently, q_img and d_img are only initialized when SMALL_ALLOC is
+ // enabled. They point to the images in ggml_backend_opencl_buffer_context.
+ // So, there is no need to release them here.
+ // TODO: initialize them for non SMALL_PATH path, or remove them.
+ q_img = nullptr;
+ d_img = nullptr;
+ m_img = nullptr;
+ size_q = 0;
+ size_d = 0;
+ size_m = 0;
+ }
+};
+
struct ggml_tensor_extra_cl_mxfp4 {
// Quantized values.
cl_mem q = nullptr;
return true;
} else if (op->src[0]->type == GGML_TYPE_F32) {
return op->src[1]->type == GGML_TYPE_F32;
- } else if (op->src[0]->type == GGML_TYPE_Q4_0 || op->src[0]->type == GGML_TYPE_MXFP4 ||
- op->src[0]->type == GGML_TYPE_Q4_K ||
+ } else if (op->src[0]->type == GGML_TYPE_Q4_0 || op->src[0]->type == GGML_TYPE_Q4_1 ||
+ op->src[0]->type == GGML_TYPE_MXFP4 ||
+ op->src[0]->type == GGML_TYPE_Q4_K ||
op->src[0]->type == GGML_TYPE_Q6_K) {
return op->src[1]->type == GGML_TYPE_F32 && ggml_is_contiguous(op->src[0]) && ggml_is_contiguous(op->src[1]);
} else if (op->src[0]->type == GGML_TYPE_Q8_0) {
return extra;
}
+ ggml_tensor_extra_cl_q4_1 * ggml_opencl_alloc_temp_tensor_extra_q4_1() {
+ ggml_tensor_extra_cl_q4_1 * extra;
+ if (temp_tensor_extras_q4_1.empty()) {
+ extra = new ggml_tensor_extra_cl_q4_1();
+ } else {
+ extra = temp_tensor_extras_q4_1.back();
+ temp_tensor_extras_q4_1.pop_back();
+ }
+
+ temp_tensor_extras_q4_1_in_use.push_back(extra);
+
+ extra->reset();
+ return extra;
+ }
+
ggml_tensor_extra_cl_mxfp4 * ggml_opencl_alloc_temp_tensor_extra_mxfp4() {
ggml_tensor_extra_cl_mxfp4 * extra;
if (temp_tensor_extras_mxfp4.empty()) {
}
temp_tensor_extras_q4_0_in_use.clear();
+ for (ggml_tensor_extra_cl_q4_1 * e : temp_tensor_extras_q4_1_in_use) {
+ temp_tensor_extras_q4_1.push_back(e);
+ }
+ temp_tensor_extras_q4_1_in_use.clear();
+
for (ggml_tensor_extra_cl_mxfp4 * e : temp_tensor_extras_mxfp4_in_use) {
temp_tensor_extras_mxfp4.push_back(e);
}
std::vector<ggml_tensor_extra_cl *> temp_tensor_extras_in_use;
std::vector<ggml_tensor_extra_cl_q4_0 *> temp_tensor_extras_q4_0;
std::vector<ggml_tensor_extra_cl_q4_0 *> temp_tensor_extras_q4_0_in_use;
+ std::vector<ggml_tensor_extra_cl_q4_1 *> temp_tensor_extras_q4_1;
+ std::vector<ggml_tensor_extra_cl_q4_1 *> temp_tensor_extras_q4_1_in_use;
std::vector<ggml_tensor_extra_cl_mxfp4 *> temp_tensor_extras_mxfp4;
std::vector<ggml_tensor_extra_cl_mxfp4 *> temp_tensor_extras_mxfp4_in_use;
std::vector<ggml_tensor_extra_cl_q8_0 *> temp_tensor_extras_q8_0;
return;
}
+ if (tensor->type == GGML_TYPE_Q4_1) {
+ ggml_tensor_extra_cl * extra_orig = (ggml_tensor_extra_cl *)tensor->extra;
+ GGML_ASSERT(extra_orig && "Tesnors in OpenCL backend should have been allocated and initialized");
+
+ // Allocate the new extra and create aliases from the original.
+ ggml_backend_opencl_buffer_context * ctx = (ggml_backend_opencl_buffer_context *) buffer->context;
+ ggml_tensor_extra_cl_q4_1 * extra = ctx->ggml_opencl_alloc_temp_tensor_extra_q4_1();
+
+ size_t size_d = ggml_nelements(tensor)/ggml_blck_size(tensor->type)*sizeof(ggml_fp16_t);
+ size_t size_m = ggml_nelements(tensor)/ggml_blck_size(tensor->type)*sizeof(ggml_fp16_t);
+ size_t size_q = ggml_nelements(tensor)/ggml_blck_size(tensor->type)*ggml_blck_size(tensor->type)/2;
+ GGML_ASSERT(size_d + size_m + size_q == ggml_nbytes(tensor) && "Incorrect tensor size");
+
+ cl_int err;
+ cl_mem data_device = clCreateBuffer(context, CL_MEM_READ_WRITE,
+ ggml_nbytes(tensor), NULL, &err);
+ CL_CHECK(err);
+ CL_CHECK(clEnqueueWriteBuffer(
+ queue, data_device, CL_TRUE, 0,
+ ggml_nbytes(tensor), data, 0, NULL, NULL));
+
+ cl_buffer_region region;
+
+ // The original tensor memory is divided into scales and quants, i.e.,
+ // we first store scales, mins, then quants.
+ // Create subbuffer for scales.
+ region.origin = align_to(extra_orig->offset + tensor->view_offs + offset, backend_ctx->alignment);
+ region.size = size_d;
+ extra->d = clCreateSubBuffer(
+ extra_orig->data_device, CL_MEM_READ_WRITE,
+ CL_BUFFER_CREATE_TYPE_REGION, ®ion, &err);
+ CL_CHECK(err);
+ auto previous_origin = region.origin;
+
+ // Create subbuffer for mins.
+ region.origin = align_to(previous_origin + size_d, backend_ctx->alignment);
+ region.size = size_m;
+ extra->m = clCreateSubBuffer(
+ extra_orig->data_device, CL_MEM_READ_WRITE,
+ CL_BUFFER_CREATE_TYPE_REGION, ®ion, &err);
+ CL_CHECK(err);
+ previous_origin = region.origin;
+
+ // Create subbuffer for quants.
+ region.origin = align_to(previous_origin + size_m, backend_ctx->alignment);
+ region.size = size_q;
+ extra->q = clCreateSubBuffer(
+ extra_orig->data_device, CL_MEM_READ_WRITE,
+ CL_BUFFER_CREATE_TYPE_REGION, ®ion, &err);
+ CL_CHECK(err);
+
+ cl_kernel kernel = backend_ctx->kernel_convert_block_q4_1;
+ CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &data_device));
+ CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra->q));
+ CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra->d));
+ CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_mem), &extra->m));
+
+ size_t global_work_size[] = {(size_t)ggml_nelements(tensor)/ggml_blck_size(tensor->type), 1, 1};
+ size_t local_work_size[] = {64, 1, 1};
+
+ cl_event evt;
+ CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt));
+ CL_CHECK(clWaitForEvents(1, &evt));
+ CL_CHECK(clReleaseMemObject(data_device));
+
+ tensor->extra = extra;
+
+ return;
+ }
if (tensor->type == GGML_TYPE_MXFP4) {
ggml_tensor_extra_cl * extra_orig = (ggml_tensor_extra_cl *)tensor->extra;
GGML_ASSERT(extra_orig && "Tesnors in OpenCL backend should have been allocated and initialized");
size, data, 0, NULL, NULL));
CL_CHECK(clReleaseMemObject(data_device));
return;
- } else if (tensor->type == GGML_TYPE_MXFP4) {
+ }
+ if (tensor->type == GGML_TYPE_Q4_1) {
+ ggml_tensor_extra_cl_q4_1 * extra = (ggml_tensor_extra_cl_q4_1 *)tensor->extra;
+
+ cl_int err;
+ cl_mem data_device = clCreateBuffer(context, CL_MEM_READ_WRITE,
+ ggml_nbytes(tensor), NULL, &err);
+ CL_CHECK(err);
+
+ cl_kernel kernel = backend_ctx->kernel_restore_block_q4_1;
+ CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra->q));
+ CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra->d));
+ CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra->m));
+ CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_mem), &data_device));
+
+ size_t global_work_size[] = {(size_t)ggml_nelements(tensor)/ggml_blck_size(tensor->type), 1, 1};
+ size_t local_work_size[] = {1, 1, 1};
+
+ cl_event evt;
+ CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL,
+ global_work_size, local_work_size, 0, NULL, &evt));
+ CL_CHECK(clWaitForEvents(1, &evt));
+ CL_CHECK(clEnqueueReadBuffer(
+ queue, data_device, CL_TRUE, offset,
+ size, data, 0, NULL, NULL));
+ CL_CHECK(clReleaseMemObject(data_device));
+ return;
+ }
+ if (tensor->type == GGML_TYPE_MXFP4) {
ggml_tensor_extra_cl_mxfp4 * extra = (ggml_tensor_extra_cl_mxfp4 *)tensor->extra;
cl_int err;
#ifdef GGML_OPENCL_SOA_Q
ggml_tensor_extra_cl_q4_0 * extra0_q4_0 = (ggml_tensor_extra_cl_q4_0 *)src0->extra;
+ ggml_tensor_extra_cl_q4_1 * extra0_q4_1 = (ggml_tensor_extra_cl_q4_1 *)src0->extra;
ggml_tensor_extra_cl_mxfp4 * extra0_mxfp4 = (ggml_tensor_extra_cl_mxfp4 *)src0->extra;
ggml_tensor_extra_cl_q8_0 * extra0_q8_0 = (ggml_tensor_extra_cl_q8_0 *)src0->extra;
ggml_tensor_extra_cl_q6_K * extra0_q6_K = (ggml_tensor_extra_cl_q6_K *)src0->extra;
backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst);
return;
}
+ case GGML_TYPE_Q4_0: {
+ if (ne11 < 32) {
+ break;
+ }
+ if (!ggml_is_contiguous(src0) || !ggml_is_contiguous(src1)) {
+ break;
+ }
+
+ kernel = backend_ctx->kernel_mul_mm_q4_0_f32_l4_lm;
+ nth0 = 128; // calculated as (BM*BN)/(TM*TN)
+
+ int batch_stride_a = ne00*ne01;
+ int batch_stride_b = ne10*ne11;
+ int batch_stride_d = ne0*ne1;
+
+ CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0_q4_0->q));
+ CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra0_q4_0->d));
+ CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device));
+ CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1));
+ CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device));
+ CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd));
+ CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne00));
+ CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne01));
+ CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne02));
+ CL_CHECK(clSetKernelArg(kernel, 9, sizeof(int), &ne11));
+ CL_CHECK(clSetKernelArg(kernel, 10, sizeof(int), &ne12));
+ CL_CHECK(clSetKernelArg(kernel, 11, sizeof(int), &ne10)); // stride_a
+ CL_CHECK(clSetKernelArg(kernel, 12, sizeof(int), &ne10)); // stride_b
+ CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &ne01)); // stride_d
+ CL_CHECK(clSetKernelArg(kernel, 14, sizeof(int), &batch_stride_a));
+ CL_CHECK(clSetKernelArg(kernel, 15, sizeof(int), &batch_stride_b));
+ CL_CHECK(clSetKernelArg(kernel, 16, sizeof(int), &batch_stride_d));
+ CL_CHECK(clSetKernelArg(kernel, 17, sizeof(int), &r2));
+ CL_CHECK(clSetKernelArg(kernel, 18, sizeof(int), &r3));
+
+ // 64 is block tile size BM and BN - change here when BM and BN in the kernel are changed.
+ size_t global_work_size[] = {(size_t)(CEIL_DIV(ne01, 64)*nth0), (size_t)(CEIL_DIV(ne11, 64)), (size_t)ne12*ne13};
+ size_t local_work_size[] = {(size_t)nth0, 1, 1};
+
+ backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst);
+ return;
+ }
+ case GGML_TYPE_Q4_1: {
+ if (ne11 < 32) {
+ break;
+ }
+ if (!ggml_is_contiguous(src0) || !ggml_is_contiguous(src1)) {
+ break;
+ }
+
+ kernel = backend_ctx->kernel_mul_mm_q4_1_f32_l4_lm;
+ nth0 = 128; // calculated as (BM*BN)/(TM*TN)
+
+ int batch_stride_a = ne00*ne01;
+ int batch_stride_b = ne10*ne11;
+ int batch_stride_d = ne0*ne1;
+
+ CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0_q4_1->q));
+ CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra0_q4_1->d));
+ CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra0_q4_1->m));
+ CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_mem), &extra1->data_device));
+ CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_ulong), &offset1));
+ CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_mem), &extrad->data_device));
+ CL_CHECK(clSetKernelArg(kernel, 6, sizeof(cl_ulong), &offsetd));
+ CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne00));
+ CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne01));
+ CL_CHECK(clSetKernelArg(kernel, 9, sizeof(int), &ne02));
+ CL_CHECK(clSetKernelArg(kernel, 10, sizeof(int), &ne11));
+ CL_CHECK(clSetKernelArg(kernel, 11, sizeof(int), &ne12));
+ CL_CHECK(clSetKernelArg(kernel, 12, sizeof(int), &ne10)); // stride_a
+ CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &ne10)); // stride_b
+ CL_CHECK(clSetKernelArg(kernel, 14, sizeof(int), &ne01)); // stride_d
+ CL_CHECK(clSetKernelArg(kernel, 15, sizeof(int), &batch_stride_a));
+ CL_CHECK(clSetKernelArg(kernel, 16, sizeof(int), &batch_stride_b));
+ CL_CHECK(clSetKernelArg(kernel, 17, sizeof(int), &batch_stride_d));
+ CL_CHECK(clSetKernelArg(kernel, 18, sizeof(int), &r2));
+ CL_CHECK(clSetKernelArg(kernel, 19, sizeof(int), &r3));
+
+ // 64 is block tile size BM and BN - change here when BM and BN in the kernel are changed.
+ size_t global_work_size[] = {(size_t)(CEIL_DIV(ne01, 64)*nth0), (size_t)(CEIL_DIV(ne11, 64)), (size_t)ne12*ne13};
+ size_t local_work_size[] = {(size_t)nth0, 1, 1};
+
+ backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst);
+ return;
+ }
case GGML_TYPE_Q8_0: {
if (ne11 < 32) {
break;
CL_CHECK(clSetKernelArg(kernel, 14, sizeof(int), &r3));
#endif // GGML_OPENCL_SOA_Q
break;
- case GGML_TYPE_Q4_1:
+ case GGML_TYPE_Q4_1: {
+#ifdef GGML_OPENCL_SOA_Q
+ if (backend_ctx->gpu_family == INTEL) {
+ nth0 = 16;
+ nth1 = 1;
+ ndst = 4;
+ } else if (backend_ctx->gpu_family == ADRENO) {
+ nth0 = 64;
+ nth1 = 1;
+ ndst = 4;
+ } else {
+ GGML_ASSERT(false && "TODO: Unknown GPU");
+ }
+
+ kernel = backend_ctx->kernel_mul_mv_q4_1_f32_flat;
+
+ CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0_q4_1->q));
+ CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra0_q4_1->d));
+ CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra0_q4_1->m));
+ CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_mem), &extra1->data_device));
+ CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_ulong), &offset1));
+ CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_mem), &extrad->data_device));
+ CL_CHECK(clSetKernelArg(kernel, 6, sizeof(cl_ulong), &offsetd));
+ CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne00));
+ CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne01));
+ CL_CHECK(clSetKernelArg(kernel, 9, sizeof(int), &ne02));
+ CL_CHECK(clSetKernelArg(kernel, 10, sizeof(int), &ne10));
+ CL_CHECK(clSetKernelArg(kernel, 11, sizeof(int), &ne12));
+ CL_CHECK(clSetKernelArg(kernel, 12, sizeof(int), &ne0));
+ CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &ne1));
+ CL_CHECK(clSetKernelArg(kernel, 14, sizeof(int), &r2));
+ CL_CHECK(clSetKernelArg(kernel, 15, sizeof(int), &r3));
+#else
+ if (backend_ctx->gpu_family == INTEL) {
+ nth0 = 16;
+ nth1 = 1;
+ ndst = 4;
+ } else if (backend_ctx->gpu_family == ADRENO) {
+ nth0 = 64;
+ nth1 = 1;
+ ndst = 4;
+ } else {
+ GGML_ASSERT(false && "TODO: Unknown GPU");
+ }
+
+ kernel = backend_ctx->kernel_mul_mv_q4_1_f32;
+
+ CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device));
+ CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0));
+ CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device));
+ CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1));
+ CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device));
+ CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd));
+ CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne00));
+ CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne01));
+ CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne02));
+ CL_CHECK(clSetKernelArg(kernel, 9, sizeof(int), &ne10));
+ CL_CHECK(clSetKernelArg(kernel, 10, sizeof(int), &ne12));
+ CL_CHECK(clSetKernelArg(kernel, 11, sizeof(int), &ne0));
+ CL_CHECK(clSetKernelArg(kernel, 12, sizeof(int), &ne1));
+ CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &r2));
+ CL_CHECK(clSetKernelArg(kernel, 14, sizeof(int), &r3));
+#endif // GGML_OPENCL_SOA_Q
+ break;
+ }
case GGML_TYPE_Q8_0: {
#ifdef GGML_OPENCL_SOA_Q
kernel = backend_ctx->kernel_mul_mv_q8_0_f32_flat;
--- /dev/null
+#pragma OPENCL EXTENSION cl_khr_fp16 : enable
+
+#ifdef cl_intel_subgroups
+#pragma OPENCL EXTENSION cl_intel_subgroups : enable
+#else
+#pragma OPENCL EXTENSION cl_khr_subgroups : enable
+#endif
+
+#ifdef cl_intel_required_subgroup_size
+#pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable
+#define INTEL_GPU 1
+#define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16)))
+#define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32)))
+#elif defined(cl_qcom_reqd_sub_group_size)
+#pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable
+#define ADRENO_GPU 1
+#define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half")))
+#define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full")))
+#endif
+
+#define QK4_1 32
+
+struct block_q4_1 {
+ half d; // delta
+ half m; // min
+ uchar qs[QK4_1 / 2]; // nibbles / quants
+};
+
+inline float block_q4_1_dot_y_flat(
+ global const uchar * x,
+ global const half * dh,
+ global const half * mh,
+ float sumy,
+ float16 yl,
+ int il
+) {
+ float d = *dh;
+ float m = *mh;
+ global const ushort * qs = ((global const ushort *) x + il/2);
+
+ float4 acc = (float4)(0.0f, 0.0f, 0.0f, 0.0f);
+
+ acc.s0 += yl.s0 * (qs[0] & 0x000F);
+ acc.s0 += yl.s1 * (qs[0] & 0x0F00);
+ acc.s0 += yl.s8 * (qs[0] & 0x00F0);
+ acc.s3 += yl.s9 * (qs[0] & 0xF000);
+
+ acc.s0 += yl.s2 * (qs[1] & 0x000F);
+ acc.s1 += yl.s3 * (qs[1] & 0x0F00);
+ acc.s2 += yl.sa * (qs[1] & 0x00F0);
+ acc.s3 += yl.sb * (qs[1] & 0xF000);
+
+ acc.s0 += yl.s4 * (qs[2] & 0x000F);
+ acc.s1 += yl.s5 * (qs[2] & 0x0F00);
+ acc.s2 += yl.sc * (qs[2] & 0x00F0);
+ acc.s3 += yl.sd * (qs[2] & 0xF000);
+
+ acc.s0 += yl.s6 * (qs[3] & 0x000F);
+ acc.s1 += yl.s7 * (qs[3] & 0x0F00);
+ acc.s2 += yl.se * (qs[3] & 0x00F0);
+ acc.s3 += yl.sf * (qs[3] & 0xF000);
+
+ return d * (acc.s0 + acc.s1 + acc.s2 + acc.s3) + sumy * m;
+}
+
+#undef N_DST
+#undef N_SIMDGROUP
+#undef N_SIMDWIDTH
+
+#ifdef INTEL_GPU
+#define N_DST 4 // each subgroup works on 4 rows
+#define N_SIMDGROUP 1 // number of subgroups in a thread group
+#define N_SIMDWIDTH 16 // assuming subgroup size is 16
+#elif defined (ADRENO_GPU)
+#define N_DST 4
+#define N_SIMDGROUP 1
+#define N_SIMDWIDTH 64
+#endif
+
+inline void mul_vec_q_n_f32_flat(
+ global void * src0_q,
+ global void * src0_d,
+ global void * src0_m,
+ global float * src1,
+ global float * dst,
+ int ne00,
+ int ne01,
+ int ne02,
+ int ne10,
+ int ne12,
+ int ne0,
+ int ne1,
+ int r2,
+ int r3
+) {
+ const ulong nb = ne00/QK4_1;
+
+ int r0 = get_group_id(0);
+ int r1 = get_group_id(1);
+ int im = get_group_id(2);
+
+ int first_row = (r0 * N_SIMDGROUP + get_sub_group_id()) * N_DST;
+
+ int i12 = im%ne12;
+ int i13 = im/ne12;
+
+ ulong offset0 = first_row * nb + (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02);
+
+ // The number of scales/mins is the same as the number of blocks.
+ ulong offset0_dm = (first_row * nb + (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02));
+ // Each block contains QK4_1/2 uchars, hence offset for qs is as follows.
+ ulong offset0_q = (first_row * nb + (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02)) * QK4_1/2;
+
+ global uchar * x = (global uchar *) src0_q + offset0_q;
+ global half * d = (global half *) src0_d + offset0_dm;
+ global half * m = (global half *) src0_m + offset0_dm;
+ global float * y = (global float *) src1 + r1*ne10 + im*ne00*ne1;
+
+ float16 yl;
+ float4 sumf = (float4)(0.f, 0.f, 0.f, 0.f);
+
+ int ix = get_sub_group_local_id()/2;
+ int il = 8*(get_sub_group_local_id()%2);
+
+ global float * yb = y + ix * QK4_1 + il;
+
+ for (int ib = ix; ib < nb; ib += N_SIMDWIDTH/2) {
+ float sumy = 0;
+
+ sumy += yb[0];
+ sumy += yb[1];
+ sumy += yb[2];
+ sumy += yb[3];
+ sumy += yb[4];
+ sumy += yb[5];
+ sumy += yb[6];
+ sumy += yb[7];
+
+ sumy += yb[16];
+ sumy += yb[17];
+ sumy += yb[18];
+ sumy += yb[19];
+ sumy += yb[20];
+ sumy += yb[21];
+ sumy += yb[22];
+ sumy += yb[23];
+
+
+ yl.s0 = yb[0];
+ yl.s1 = yb[1]/256.f;
+
+ yl.s2 = yb[2];
+ yl.s3 = yb[3]/256.f;
+
+ yl.s4 = yb[4];
+ yl.s5 = yb[5]/256.f;
+
+ yl.s6 = yb[6];
+ yl.s7 = yb[7]/256.f;
+
+ yl.s8 = yb[16]/16.f;
+ yl.s9 = yb[17]/4096.f;
+
+ yl.sa = yb[18]/16.f;
+ yl.sb = yb[19]/4096.f;
+
+ yl.sc = yb[20]/16.f;
+ yl.sd = yb[21]/4096.f;
+
+ yl.se = yb[22]/16.f;
+ yl.sf = yb[23]/4096.f;
+
+ sumf.s0 += block_q4_1_dot_y_flat(x + ib*QK4_1/2 + 0*nb*QK4_1/2, d + ib + 0*nb, m + ib + 0*nb, sumy, yl, il);
+ sumf.s1 += block_q4_1_dot_y_flat(x + ib*QK4_1/2 + 1*nb*QK4_1/2, d + ib + 1*nb, m + ib + 1*nb, sumy, yl, il);
+ sumf.s2 += block_q4_1_dot_y_flat(x + ib*QK4_1/2 + 2*nb*QK4_1/2, d + ib + 2*nb, m + ib + 2*nb, sumy, yl, il);
+ sumf.s3 += block_q4_1_dot_y_flat(x + ib*QK4_1/2 + 3*nb*QK4_1/2, d + ib + 3*nb, m + ib + 3*nb, sumy, yl, il);
+
+ yb += QK4_1 * (N_SIMDWIDTH/2);
+ }
+
+ float4 tot = (float4)(
+ sub_group_reduce_add(sumf.s0), sub_group_reduce_add(sumf.s1),
+ sub_group_reduce_add(sumf.s2), sub_group_reduce_add(sumf.s3)
+ );
+
+ if (get_sub_group_local_id() == 0) {
+ if (first_row + 0 < ne01) {
+ dst[r1*ne0 + im*ne0*ne1 + first_row + 0] = tot.s0;
+ }
+ if (first_row + 1 < ne01) {
+ dst[r1*ne0 + im*ne0*ne1 + first_row + 1] = tot.s1;
+ }
+ if (first_row + 2 < ne01) {
+ dst[r1*ne0 + im*ne0*ne1 + first_row + 2] = tot.s2;
+ }
+ if (first_row + 3 < ne01) {
+ dst[r1*ne0 + im*ne0*ne1 + first_row + 3] = tot.s3;
+ }
+ }
+}
+
+#ifdef INTEL_GPU
+REQD_SUBGROUP_SIZE_16
+#elif defined (ADRENO_GPU)
+REQD_SUBGROUP_SIZE_64
+#endif
+kernel void kernel_mul_mv_q4_1_f32_flat(
+ global void * src0_q,
+ global void * src0_d,
+ global void * src0_m,
+ global float * src1,
+ ulong offset1,
+ global float * dst,
+ ulong offsetd,
+ int ne00,
+ int ne01,
+ int ne02,
+ int ne10,
+ int ne12,
+ int ne0,
+ int ne1,
+ int r2,
+ int r3
+) {
+ src1 = (global float*)((global char*)src1 + offset1);
+ dst = (global float*)((global char*)dst + offsetd);
+
+ mul_vec_q_n_f32_flat(src0_q, src0_d, src0_m, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3);
+}