cl_kernel kernel_rms_norm, kernel_rms_norm_mul;
cl_kernel kernel_group_norm, kernel_group_norm_mul_add;
cl_kernel kernel_diag_mask_inf, kernel_diag_mask_inf_8;
+ cl_kernel kernel_diag_f32;
cl_kernel kernel_soft_max, kernel_soft_max_4;
cl_kernel kernel_soft_max_f16, kernel_soft_max_4_f16;
std::map<std::pair<int, int>, cl_kernel> kernels_flash_attn_f16;
cl_kernel kernel_pad;
cl_kernel kernel_tanh_f32, kernel_tanh_f32_4, kernel_tanh_f32_nc;
cl_kernel kernel_tanh_f16, kernel_tanh_f16_4, kernel_tanh_f16_nc;
+ cl_kernel kernel_neg_f32, kernel_neg_f32_4, kernel_neg_f32_nc;
+ cl_kernel kernel_neg_f16, kernel_neg_f16_4, kernel_neg_f16_nc;
+ cl_kernel kernel_exp_f32, kernel_exp_f32_4, kernel_exp_f32_nc;
+ cl_kernel kernel_exp_f16, kernel_exp_f16_4, kernel_exp_f16_nc;
cl_kernel kernel_expm1_f32, kernel_expm1_f32_4, kernel_expm1_f32_nc;
cl_kernel kernel_expm1_f16, kernel_expm1_f16_4, kernel_expm1_f16_nc;
cl_kernel kernel_softplus_f32, kernel_softplus_f32_4, kernel_softplus_f32_nc;
GGML_LOG_CONT(".");
}
+ // diag
+ {
+#ifdef GGML_OPENCL_EMBED_KERNELS
+ const std::string kernel_src {
+ #include "diag.cl.h"
+ };
+#else
+ const std::string kernel_src = read_file("diag.cl");
+#endif
+ cl_program prog =
+ build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts);
+
+ CL_CHECK((backend_ctx->kernel_diag_f32 = clCreateKernel(prog, "kernel_diag_f32", &err), err));
+ CL_CHECK(clReleaseProgram(prog));
+ GGML_LOG_CONT(".");
+ }
+
// gelu
{
#ifdef GGML_OPENCL_EMBED_KERNELS
GGML_LOG_CONT(".");
}
+ // neg
+ {
+#ifdef GGML_OPENCL_EMBED_KERNELS
+ const std::string kernel_src {
+ #include "neg.cl.h"
+ };
+#else
+ const std::string kernel_src = read_file("neg.cl");
+#endif
+ cl_program prog =
+ build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts);
+ CL_CHECK((backend_ctx->kernel_neg_f32 = clCreateKernel(prog, "kernel_neg_f32", &err), err));
+ CL_CHECK((backend_ctx->kernel_neg_f32_4 = clCreateKernel(prog, "kernel_neg_f32_4", &err), err));
+ CL_CHECK((backend_ctx->kernel_neg_f32_nc = clCreateKernel(prog, "kernel_neg_f32_nc", &err), err));
+ CL_CHECK((backend_ctx->kernel_neg_f16 = clCreateKernel(prog, "kernel_neg_f16", &err), err));
+ CL_CHECK((backend_ctx->kernel_neg_f16_4 = clCreateKernel(prog, "kernel_neg_f16_4", &err), err));
+ CL_CHECK((backend_ctx->kernel_neg_f16_nc = clCreateKernel(prog, "kernel_neg_f16_nc", &err), err));
+ CL_CHECK(clReleaseProgram(prog));
+ GGML_LOG_CONT(".");
+ }
+
+ // exp
+ {
+#ifdef GGML_OPENCL_EMBED_KERNELS
+ const std::string kernel_src {
+ #include "exp.cl.h"
+ };
+#else
+ const std::string kernel_src = read_file("exp.cl");
+#endif
+ cl_program prog =
+ build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts);
+ CL_CHECK((backend_ctx->kernel_exp_f32 = clCreateKernel(prog, "kernel_exp_f32", &err), err));
+ CL_CHECK((backend_ctx->kernel_exp_f32_4 = clCreateKernel(prog, "kernel_exp_f32_4", &err), err));
+ CL_CHECK((backend_ctx->kernel_exp_f32_nc = clCreateKernel(prog, "kernel_exp_f32_nc", &err), err));
+ CL_CHECK((backend_ctx->kernel_exp_f16 = clCreateKernel(prog, "kernel_exp_f16", &err), err));
+ CL_CHECK((backend_ctx->kernel_exp_f16_4 = clCreateKernel(prog, "kernel_exp_f16_4", &err), err));
+ CL_CHECK((backend_ctx->kernel_exp_f16_nc = clCreateKernel(prog, "kernel_exp_f16_nc", &err), err));
+ CL_CHECK(clReleaseProgram(prog));
+ GGML_LOG_CONT(".");
+ }
+
// expm1
{
#ifdef GGML_OPENCL_EMBED_KERNELS
case GGML_UNARY_OP_SIGMOID:
return ggml_is_contiguous(op->src[0]);
case GGML_UNARY_OP_TANH:
+ case GGML_UNARY_OP_NEG:
+ case GGML_UNARY_OP_EXP:
return op->src[0]->type == GGML_TYPE_F32 || op->src[0]->type == GGML_TYPE_F16;
case GGML_UNARY_OP_EXPM1:
return op->src[0]->type == GGML_TYPE_F32 || op->src[0]->type == GGML_TYPE_F16;
case GGML_OP_PERMUTE:
case GGML_OP_TRANSPOSE:
return true;
+ case GGML_OP_DIAG:
+ return true;
case GGML_OP_DIAG_MASK_INF:
return op->ne[3] == 1;
case GGML_OP_ROPE: {
}
}
+static void ggml_cl_neg(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
+ GGML_ASSERT(src0);
+ GGML_ASSERT(src0->extra);
+ GGML_ASSERT(dst);
+ GGML_ASSERT(dst->extra);
+
+ UNUSED(src1);
+
+ ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context;
+
+ ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra;
+ ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra;
+
+ cl_ulong offset0 = extra0->offset + src0->view_offs;
+ cl_ulong offsetd = extrad->offset + dst->view_offs;
+
+ GGML_TENSOR_LOCALS(int, ne0, src0, ne);
+ GGML_TENSOR_LOCALS(cl_ulong, nb0, src0, nb);
+ GGML_TENSOR_LOCALS(int, ne, dst, ne);
+ GGML_TENSOR_LOCALS(cl_ulong, nb, dst, nb);
+
+ cl_kernel kernel;
+
+ if (ggml_is_contiguous(src0)) {
+ // Handle contiguous input
+ int n = ggml_nelements(dst);
+ if (n % 4 == 0) {
+ if (src0->type == GGML_TYPE_F32) {
+ kernel = backend_ctx->kernel_neg_f32_4;
+ } else {
+ kernel = backend_ctx->kernel_neg_f16_4;
+ }
+ n /= 4;
+ } else {
+ if (src0->type == GGML_TYPE_F32) {
+ kernel = backend_ctx->kernel_neg_f32;
+ } else {
+ kernel = backend_ctx->kernel_neg_f16;
+ }
+ }
+
+ CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device));
+ CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0));
+ CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extrad->data_device));
+ CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offsetd));
+ CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_int), &n));
+
+ size_t global_work_size[] = {(size_t)CEIL_DIV(n, 64)*64, 1, 1};
+ size_t local_work_size[] = {64, 1, 1};
+
+ backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst);
+ } else {
+ // Handle non-contiguous input
+ if (src0->type == GGML_TYPE_F32) {
+ kernel = backend_ctx->kernel_neg_f32_nc;
+ } else {
+ kernel = backend_ctx->kernel_neg_f16_nc;
+ }
+
+ CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device));
+ CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0));
+ CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extrad->data_device));
+ CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offsetd));
+ CL_CHECK(clSetKernelArg(kernel, 4, sizeof(int), &ne00));
+ CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &nb00));
+ CL_CHECK(clSetKernelArg(kernel, 6, sizeof(cl_ulong), &nb01));
+ CL_CHECK(clSetKernelArg(kernel, 7, sizeof(cl_ulong), &nb02));
+ CL_CHECK(clSetKernelArg(kernel, 8, sizeof(cl_ulong), &nb03));
+ CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_ulong), &nb0));
+ CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &nb1));
+ CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_ulong), &nb2));
+ CL_CHECK(clSetKernelArg(kernel, 12, sizeof(cl_ulong), &nb3));
+
+ int nth = 64;
+
+ size_t global_work_size[] = {(size_t)ne01*nth, (size_t)ne02, (size_t)ne03};
+ size_t local_work_size[] = {(size_t)nth, 1, 1};
+
+ backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst);
+ }
+}
+
+static void ggml_cl_exp(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
+ GGML_ASSERT(src0);
+ GGML_ASSERT(src0->extra);
+ GGML_ASSERT(dst);
+ GGML_ASSERT(dst->extra);
+
+ UNUSED(src1);
+
+ ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context;
+
+ ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra;
+ ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra;
+
+ cl_ulong offset0 = extra0->offset + src0->view_offs;
+ cl_ulong offsetd = extrad->offset + dst->view_offs;
+
+ GGML_TENSOR_LOCALS(int, ne0, src0, ne);
+ GGML_TENSOR_LOCALS(cl_ulong, nb0, src0, nb);
+ GGML_TENSOR_LOCALS(int, ne, dst, ne);
+ GGML_TENSOR_LOCALS(cl_ulong, nb, dst, nb);
+
+ cl_kernel kernel;
+
+ if (ggml_is_contiguous(src0)) {
+ // Handle contiguous input
+ int n = ggml_nelements(dst);
+ if (n % 4 == 0) {
+ if (src0->type == GGML_TYPE_F32) {
+ kernel = backend_ctx->kernel_exp_f32_4;
+ } else {
+ kernel = backend_ctx->kernel_exp_f16_4;
+ }
+ n /= 4;
+ } else {
+ if (src0->type == GGML_TYPE_F32) {
+ kernel = backend_ctx->kernel_exp_f32;
+ } else {
+ kernel = backend_ctx->kernel_exp_f16;
+ }
+ }
+
+ CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device));
+ CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0));
+ CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extrad->data_device));
+ CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offsetd));
+ CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_int), &n));
+
+ size_t global_work_size[] = {(size_t)CEIL_DIV(n, 64)*64, 1, 1};
+ size_t local_work_size[] = {64, 1, 1};
+
+ backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst);
+ } else {
+ // Handle non-contiguous input
+ if (src0->type == GGML_TYPE_F32) {
+ kernel = backend_ctx->kernel_exp_f32_nc;
+ } else {
+ kernel = backend_ctx->kernel_exp_f16_nc;
+ }
+
+ CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device));
+ CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0));
+ CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extrad->data_device));
+ CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offsetd));
+ CL_CHECK(clSetKernelArg(kernel, 4, sizeof(int), &ne00));
+ CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &nb00));
+ CL_CHECK(clSetKernelArg(kernel, 6, sizeof(cl_ulong), &nb01));
+ CL_CHECK(clSetKernelArg(kernel, 7, sizeof(cl_ulong), &nb02));
+ CL_CHECK(clSetKernelArg(kernel, 8, sizeof(cl_ulong), &nb03));
+ CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_ulong), &nb0));
+ CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &nb1));
+ CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_ulong), &nb2));
+ CL_CHECK(clSetKernelArg(kernel, 12, sizeof(cl_ulong), &nb3));
+
+ int nth = 64;
+
+ size_t global_work_size[] = {(size_t)ne01*nth, (size_t)ne02, (size_t)ne03};
+ size_t local_work_size[] = {(size_t)nth, 1, 1};
+
+ backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst);
+ }
+}
+
static void ggml_cl_expm1(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
GGML_ASSERT(src0);
GGML_ASSERT(src0->extra);
}
}
+static void ggml_cl_diag(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
+ GGML_ASSERT(src0);
+ GGML_ASSERT(src0->extra);
+ GGML_ASSERT(dst);
+ GGML_ASSERT(dst->extra);
+
+ UNUSED(src1);
+
+ ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context;
+
+ ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra;
+ ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra;
+
+ cl_ulong offset0 = extra0->offset + src0->view_offs;
+ cl_ulong offsetd = extrad->offset + dst->view_offs;
+
+ GGML_TENSOR_LOCALS(int, ne0, src0, ne);
+ GGML_TENSOR_LOCALS(cl_ulong, nb0, src0, nb);
+ GGML_TENSOR_LOCALS(int, ne, dst, ne);
+ GGML_TENSOR_LOCALS(cl_ulong, nb, dst, nb);
+
+ cl_kernel kernel = backend_ctx->kernel_diag_f32;
+
+ CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device));
+ CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0));
+ CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extrad->data_device));
+ CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offsetd));
+ CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_ulong), &nb01));
+ CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &nb02));
+ CL_CHECK(clSetKernelArg(kernel, 6, sizeof(cl_ulong), &nb03));
+ CL_CHECK(clSetKernelArg(kernel, 7, sizeof(cl_int), &ne0));
+ CL_CHECK(clSetKernelArg(kernel, 8, sizeof(cl_ulong), &nb0));
+ CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_ulong), &nb2));
+ CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &nb3));
+
+ int nth = 64;
+
+ size_t global_work_size[] = {(size_t)ne1*nth, (size_t)ne2, (size_t)ne3};
+ size_t local_work_size[] = {(size_t)nth, 1, 1};
+
+ backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst);
+}
+
static void ggml_cl_soft_max(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
GGML_ASSERT(src0);
GGML_ASSERT(src0->extra);
}
func = ggml_cl_tanh;
break;
+ case GGML_UNARY_OP_NEG:
+ if (!any_on_device) {
+ return false;
+ }
+ func = ggml_cl_neg;
+ break;
+ case GGML_UNARY_OP_EXP:
+ if (!any_on_device) {
+ return false;
+ }
+ func = ggml_cl_exp;
+ break;
case GGML_UNARY_OP_EXPM1:
if (!any_on_device) {
return false;
}
func = ggml_cl_nop;
break;
+ case GGML_OP_DIAG:
+ if (!any_on_device) {
+ return false;
+ }
+ func = ggml_cl_diag;
+ break;
case GGML_OP_DIAG_MASK_INF:
if (!any_on_device) {
return false;