#include "common.hpp"
+#include "ggml.h"
#include "element_wise.hpp"
static void acc_f32(const float * x, const float * y, float * dst, const int ne,
}
}
-static void gelu_f32(const float * x, float * dst, const int k,
+template<typename T>
+static void gelu(const T * x, T * dst, const int k,
const sycl::nd_item<3> &item_ct1) {
- const float GELU_COEF_A = 0.044715f;
- const float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f;
+ const T GELU_COEF_A = static_cast<T>(0.044715f);
+ const T SQRT_2_OVER_PI = static_cast<T>(0.79788456080286535587989211986876f);
const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
item_ct1.get_local_id(2);
}
float xi = x[i];
- dst[i] = 0.5f * xi *
- (1.0f +
- sycl::tanh(SQRT_2_OVER_PI * xi * (1.0f + GELU_COEF_A * xi * xi)));
+ dst[i] = static_cast<T>(0.5f) * xi *
+ (static_cast<T>(1.0f) +
+ sycl::tanh(SQRT_2_OVER_PI * xi * (static_cast<T>(1.0f) + GELU_COEF_A * xi * xi)));
}
-static void silu_f32(const float * x, float * dst, const int k,
+template<typename T>
+static void silu(const T * x, T * dst, const int k,
const sycl::nd_item<3> &item_ct1) {
const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
item_ct1.get_local_id(2);
if (i >= k) {
return;
}
- dst[i] = x[i] / (1.0f + sycl::native::exp(-x[i]));
+ dst[i] = x[i] / (static_cast<T>(1.0f) + sycl::native::exp(-x[i]));
}
-static void gelu_quick_f32(const float *x, float *dst, int k,
+template<typename T>
+static void gelu_quick(const T *x, T *dst, int k,
const sycl::nd_item<3> &item_ct1) {
const float GELU_QUICK_COEF = -1.702f;
const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
if (i >= k) {
return;
}
- dst[i] = x[i] * (1.0f / (1.0f + sycl::native::exp(GELU_QUICK_COEF * x[i])));
+ dst[i] = x[i] * (static_cast<T>(1.0f) / (static_cast<T>(1.0f) + sycl::native::exp(GELU_QUICK_COEF * x[i])));
}
-static void tanh_f32(const float *x, float *dst, int k,
+template<typename T>
+static void tanh(const T *x, T *dst, int k,
const sycl::nd_item<3> &item_ct1) {
const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
item_ct1.get_local_id(2);
if (i >= k) {
return;
}
- dst[i] = sycl::tanh((float)(x[i]));
+ dst[i] = sycl::tanh((x[i]));
}
-static void relu_f32(const float * x, float * dst, const int k,
+template<typename T>
+static void relu(const T * x, T * dst, const int k,
const sycl::nd_item<3> &item_ct1) {
const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
item_ct1.get_local_id(2);
if (i >= k) {
return;
}
- dst[i] = sycl::fmax((float)(x[i]), (float)0);
+ dst[i] = sycl::fmax((x[i]), static_cast<T>(0));
}
-static void sigmoid_f32(const float * x, float * dst, const int k,
+template<typename T>
+static void sigmoid(const T * x, T * dst, const int k,
const sycl::nd_item<3> &item_ct1) {
const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
item_ct1.get_local_id(2);
if (i >= k) {
return;
}
- dst[i] = 1.0f / (1.0f + sycl::native::exp(-x[i]));
+ dst[i] = 1.0f / (static_cast<T>(1.0f) + sycl::native::exp(-x[i]));
}
-static void sqrt_f32(const float * x, float * dst, const int k,
+template<typename T>
+static void sqrt(const T * x, T * dst, const int k,
const sycl::nd_item<3> &item_ct1) {
const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
item_ct1.get_local_id(2);
dst[i] = sycl::sqrt(x[i]);
}
-static void sin_f32(const float * x, float * dst, const int k,
+template<typename T>
+static void sin(const T * x, T * dst, const int k,
const sycl::nd_item<3> &item_ct1) {
const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
item_ct1.get_local_id(2);
dst[i] = sycl::sin(x[i]);
}
-static void cos_f32(const float * x, float * dst, const int k,
+template<typename T>
+static void cos(const T * x, T * dst, const int k,
const sycl::nd_item<3> &item_ct1) {
const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
item_ct1.get_local_id(2);
dst[i] = sycl::cos(x[i]);
}
-static void hardsigmoid_f32(const float * x, float * dst, const int k,
+template<typename T>
+static void hardsigmoid(const T * x, T * dst, const int k,
const sycl::nd_item<3> &item_ct1) {
const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
item_ct1.get_local_id(2);
if (i >= k) {
return;
}
- dst[i] = sycl::fmin(1.0f, sycl::fmax(0.0f, (x[i] + 3.0f) / 6.0f));
+ dst[i] = sycl::fmin(static_cast<T>(1.0f), sycl::fmax(static_cast<T>(0.0f), (x[i] + static_cast<T>(3.0f)) / static_cast<T>(6.0f)));
}
-static void hardswish_f32(const float * x, float * dst, const int k,
+template<typename T>
+static void hardswish(const T * x, T * dst, const int k,
const sycl::nd_item<3> &item_ct1) {
const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
item_ct1.get_local_id(2);
if (i >= k) {
return;
}
- dst[i] = x[i] * sycl::fmin(1.0f, sycl::fmax(0.0f, (x[i] + 3.0f) / 6.0f));
+ dst[i] = x[i] * sycl::fmin(static_cast<T>(1.0f), sycl::fmax(static_cast<T>(0.0f), (x[i] + static_cast<T>(3.0f)) / static_cast<T>(6.0f)));
}
-static void exp_f32(const float * x, float * dst, const int k,
+template<typename T>
+static void exp(const T * x, T * dst, const int k,
const sycl::nd_item<3> &item_ct1) {
const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
item_ct1.get_local_id(2);
dst[i] = sycl::exp(x[i]);
}
-static void log_f32(const float * x, float * dst, const int k,
+template<typename T>
+static void log(const T * x, T * dst, const int k,
const sycl::nd_item<3> &item_ct1) {
const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
item_ct1.get_local_id(2);
if (i >= k) {
return;
}
- float xi = x[i];
+ T xi = x[i];
if (xi <= 0) {
- dst[i] = -INFINITY;
+ dst[i] = neg_infinity<T>();
} else {
dst[i] = sycl::log(xi);
}
}
-static void neg_f32(const float * x, float * dst, const int k,
+template<typename T>
+static void neg(const T * x, T * dst, const int k,
const sycl::nd_item<3> &item_ct1) {
const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
item_ct1.get_local_id(2);
dst[i] = -x[i];
}
-static void step_f32(const float * x, float * dst, const int k,
+template<typename T>
+static void step(const T * x, T * dst, const int k,
const sycl::nd_item<3> &item_ct1) {
const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
item_ct1.get_local_id(2);
if (i >= k) {
return;
}
- dst[i] = x[i] > 0.0f;
+ dst[i] = x[i] > static_cast<T>(0.0f);
}
-static void leaky_relu_f32(const float *x, float *dst, const int k, const float negative_slope,
+template<typename T>
+static void leaky_relu(const T *x, T *dst, const int k, const float negative_slope,
const sycl::nd_item<3> &item_ct1) {
const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
item_ct1.get_local_id(2);
if (i >= k) {
return;
}
- dst[i] = sycl::fmax((float)(x[i]), (float)0) +
- sycl::fmin((float)(x[i]), 0.0f) * negative_slope;
+ dst[i] = sycl::fmax((x[i]), static_cast<T>(0)) +
+ sycl::fmin((x[i]), static_cast<T>(0.0f)) * negative_slope;
}
-static void sqr_f32(const float * x, float * dst, const int k,
+template<typename T>
+static void sqr(const T * x, T * dst, const int k,
const sycl::nd_item<3> &item_ct1) {
const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
item_ct1.get_local_id(2);
dst[i] = x[i] * x[i];
}
-static void upscale_f32(const float *x, float *dst, const int nb00, const int nb01,
+template<typename T>
+static void upscale(const T *x, T *dst, const int nb00, const int nb01,
const int nb02, const int nb03, const int ne10, const int ne11,
const int ne12, const int ne13, const float sf0, const float sf1,
const float sf2, const float sf3, const sycl::nd_item<1> &item_ct1) {
int i02 = i12 / sf2;
int i03 = i13 / sf3;
- dst[index] = *(const float *)((const char *)x + i03 * nb03 + i02 * nb02 + i01 * nb01 + i00 * nb00);
+ dst[index] = *(const T *)((const char *)x + i03 * nb03 + i02 * nb02 + i01 * nb01 + i00 * nb00);
}
-static void pad_f32(const float *x, float *dst, const int ne0, const int ne00, const int ne01, const int ne02,
+template <typename T>
+static void pad(const T *x, T *dst, const int ne0, const int ne00, const int ne01, const int ne02,
const sycl::nd_item<3> &item_ct1) {
int nidx = item_ct1.get_local_id(2) +
item_ct1.get_group(2) * item_ct1.get_local_range(2);
item_ct1.get_group(0) * ne00 * ne01;
dst[offset_dst] = x[offset_src];
} else {
- dst[offset_dst] = 0.0f;
+ dst[offset_dst] = static_cast<T>(0.0f);
}
}
+template<typename T>
+static void clamp(const T * x, T * dst, const float min, const float max, const int k,
+ const sycl::nd_item<3> &item_ct1) {
+ const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
+ item_ct1.get_local_id(2);
+
+ if (i >= k) {
+ return;
+ }
+
+ dst[i] = x[i] < static_cast<T>(min) ? static_cast<T>(min) : (x[i] > static_cast<T>(max) ? static_cast<T>(max) : x[i]);
+}
static void acc_f32_sycl(const float *x, const float *y, float *dst,
const int n_elements, const int ne10, const int ne11,
});
}
-static void gelu_f32_sycl(const float *x, float *dst, const int k,
+template<typename T>
+static void gelu_sycl(const T *x, T *dst, const int k,
queue_ptr stream) {
const int num_blocks = (k + SYCL_GELU_BLOCK_SIZE - 1) / SYCL_GELU_BLOCK_SIZE;
stream->parallel_for(
sycl::range<3>(1, 1, SYCL_GELU_BLOCK_SIZE),
sycl::range<3>(1, 1, SYCL_GELU_BLOCK_SIZE)),
[=](sycl::nd_item<3> item_ct1) {
- gelu_f32(x, dst, k, item_ct1);
+ gelu(x, dst, k, item_ct1);
});
}
-static void silu_f32_sycl(const float *x, float *dst, const int k,
+template<typename T>
+static void silu_sycl(const T *x, T *dst, const int k,
queue_ptr stream) {
const int num_blocks = (k + SYCL_SILU_BLOCK_SIZE - 1) / SYCL_SILU_BLOCK_SIZE;
stream->parallel_for(
sycl::range<3>(1, 1, SYCL_SILU_BLOCK_SIZE),
sycl::range<3>(1, 1, SYCL_SILU_BLOCK_SIZE)),
[=](sycl::nd_item<3> item_ct1) {
- silu_f32(x, dst, k, item_ct1);
+ silu(x, dst, k, item_ct1);
});
}
-static void gelu_quick_f32_sycl(const float *x, float *dst, const int k,
+template<typename T>
+static void gelu_quick_sycl(const T *x, T *dst, const int k,
queue_ptr stream) {
const int num_blocks = (k + SYCL_GELU_BLOCK_SIZE - 1) / SYCL_GELU_BLOCK_SIZE;
stream->parallel_for(
sycl::range<3>(1, 1, SYCL_GELU_BLOCK_SIZE),
sycl::range<3>(1, 1, SYCL_GELU_BLOCK_SIZE)),
[=](sycl::nd_item<3> item_ct1) {
- gelu_quick_f32(x, dst, k, item_ct1);
+ gelu_quick(x, dst, k, item_ct1);
});
}
-static void tanh_f32_sycl(const float *x, float *dst, const int k,
+template<typename T>
+static void tanh_sycl(const T *x, T *dst, const int k,
queue_ptr stream) {
const int num_blocks = (k + SYCL_TANH_BLOCK_SIZE - 1) / SYCL_TANH_BLOCK_SIZE;
stream->parallel_for(
sycl::range<3>(1, 1, SYCL_TANH_BLOCK_SIZE),
sycl::range<3>(1, 1, SYCL_TANH_BLOCK_SIZE)),
[=](sycl::nd_item<3> item_ct1) {
- tanh_f32(x, dst, k, item_ct1);
+ tanh(x, dst, k, item_ct1);
});
}
-static void relu_f32_sycl(const float *x, float *dst, const int k,
+template<typename T>
+static void relu_sycl(const T *x, T *dst, const int k,
queue_ptr stream) {
const int num_blocks = (k + SYCL_RELU_BLOCK_SIZE - 1) / SYCL_RELU_BLOCK_SIZE;
stream->parallel_for(
sycl::range<3>(1, 1, SYCL_RELU_BLOCK_SIZE),
sycl::range<3>(1, 1, SYCL_RELU_BLOCK_SIZE)),
[=](sycl::nd_item<3> item_ct1) {
- relu_f32(x, dst, k, item_ct1);
+ relu(x, dst, k, item_ct1);
});
}
-static void hardsigmoid_f32_sycl(const float *x, float *dst, const int k,
+template<typename T>
+static void hardsigmoid_sycl(const T *x, T *dst, const int k,
queue_ptr stream) {
const int num_blocks = (k + SYCL_HARDSIGMOID_BLOCK_SIZE - 1) / SYCL_HARDSIGMOID_BLOCK_SIZE;
stream->parallel_for(
sycl::range<3>(1, 1, SYCL_HARDSIGMOID_BLOCK_SIZE),
sycl::range<3>(1, 1, SYCL_HARDSIGMOID_BLOCK_SIZE)),
[=](sycl::nd_item<3> item_ct1) {
- hardsigmoid_f32(x, dst, k, item_ct1);
+ hardsigmoid(x, dst, k, item_ct1);
});
}
-static void hardswish_f32_sycl(const float *x, float *dst, const int k,
+template<typename T>
+static void hardswish_sycl(const T *x, T *dst, const int k,
queue_ptr stream) {
const int num_blocks = (k + SYCL_HARDSWISH_BLOCK_SIZE - 1) / SYCL_HARDSWISH_BLOCK_SIZE;
stream->parallel_for(
sycl::range<3>(1, 1, SYCL_HARDSWISH_BLOCK_SIZE),
sycl::range<3>(1, 1, SYCL_HARDSWISH_BLOCK_SIZE)),
[=](sycl::nd_item<3> item_ct1) {
- hardswish_f32(x, dst, k, item_ct1);
+ hardswish(x, dst, k, item_ct1);
});
}
-static void exp_f32_sycl(const float *x, float *dst, const int k,
+template<typename T>
+static void exp_sycl(const T *x, T *dst, const int k,
queue_ptr stream) {
const int num_blocks = (k + SYCL_EXP_BLOCK_SIZE - 1) / SYCL_EXP_BLOCK_SIZE;
stream->parallel_for(
sycl::range<3>(1, 1, SYCL_EXP_BLOCK_SIZE),
sycl::range<3>(1, 1, SYCL_EXP_BLOCK_SIZE)),
[=](sycl::nd_item<3> item_ct1) {
- exp_f32(x, dst, k, item_ct1);
+ exp(x, dst, k, item_ct1);
});
}
-static void log_f32_sycl(const float *x, float *dst, const int k,
+template<typename T>
+static void log_sycl(const T *x, T *dst, const int k,
queue_ptr stream) {
const int num_blocks = (k + SYCL_EXP_BLOCK_SIZE - 1) / SYCL_EXP_BLOCK_SIZE;
stream->parallel_for(
sycl::range<3>(1, 1, SYCL_EXP_BLOCK_SIZE),
sycl::range<3>(1, 1, SYCL_EXP_BLOCK_SIZE)),
[=](sycl::nd_item<3> item_ct1) {
- log_f32(x, dst, k, item_ct1);
+ log(x, dst, k, item_ct1);
});
}
-static void neg_f32_sycl(const float *x, float *dst, const int k,
+template<typename T>
+static void neg_sycl(const T *x, T *dst, const int k,
queue_ptr stream) {
const int num_blocks = (k + SYCL_NEG_BLOCK_SIZE - 1) / SYCL_NEG_BLOCK_SIZE;
stream->parallel_for(
sycl::range<3>(1, 1, SYCL_NEG_BLOCK_SIZE),
sycl::range<3>(1, 1, SYCL_NEG_BLOCK_SIZE)),
[=](sycl::nd_item<3> item_ct1) {
- neg_f32(x, dst, k, item_ct1);
+ neg(x, dst, k, item_ct1);
});
}
-static void step_f32_sycl(const float *x, float *dst, const int k,
+template<typename T>
+static void step_sycl(const T *x, T *dst, const int k,
queue_ptr stream) {
const int num_blocks = (k + SYCL_NEG_BLOCK_SIZE - 1) / SYCL_NEG_BLOCK_SIZE;
stream->parallel_for(
sycl::range<3>(1, 1, SYCL_NEG_BLOCK_SIZE),
sycl::range<3>(1, 1, SYCL_NEG_BLOCK_SIZE)),
[=](sycl::nd_item<3> item_ct1) {
- step_f32(x, dst, k, item_ct1);
+ step(x, dst, k, item_ct1);
});
}
-static void sigmoid_f32_sycl(const float *x, float *dst, const int k,
+template<typename T>
+static void sigmoid_sycl(const T *x, T *dst, const int k,
queue_ptr stream) {
const int num_blocks = (k + SYCL_SIGMOID_BLOCK_SIZE - 1) / SYCL_SIGMOID_BLOCK_SIZE;
stream->parallel_for(
sycl::range<3>(1, 1, SYCL_SIGMOID_BLOCK_SIZE),
sycl::range<3>(1, 1, SYCL_SIGMOID_BLOCK_SIZE)),
[=](sycl::nd_item<3> item_ct1) {
- sigmoid_f32(x, dst, k, item_ct1);
+ sigmoid(x, dst, k, item_ct1);
});
}
-static void sqrt_f32_sycl(const float *x, float *dst, const int k,
+template<typename T>
+static void sqrt_sycl(const T *x, T *dst, const int k,
queue_ptr stream) {
const int num_blocks = (k + SYCL_SQRT_BLOCK_SIZE - 1) / SYCL_SQRT_BLOCK_SIZE;
stream->parallel_for(
sycl::range<3>(1, 1, SYCL_SQRT_BLOCK_SIZE),
sycl::range<3>(1, 1, SYCL_SQRT_BLOCK_SIZE)),
[=](sycl::nd_item<3> item_ct1) {
- sqrt_f32(x, dst, k, item_ct1);
+ sqrt(x, dst, k, item_ct1);
});
}
-static void sin_f32_sycl(const float *x, float *dst, const int k,
+template<typename T>
+static void sin_sycl(const T *x, T *dst, const int k,
queue_ptr stream) {
const int num_blocks = (k + SYCL_SIN_BLOCK_SIZE - 1) / SYCL_SIN_BLOCK_SIZE;
stream->parallel_for(
sycl::range<3>(1, 1, SYCL_SIN_BLOCK_SIZE),
sycl::range<3>(1, 1, SYCL_SIN_BLOCK_SIZE)),
[=](sycl::nd_item<3> item_ct1) {
- sin_f32(x, dst, k, item_ct1);
+ sin(x, dst, k, item_ct1);
});
}
-static void cos_f32_sycl(const float *x, float *dst, const int k,
+template<typename T>
+static void cos_sycl(const T *x, T *dst, const int k,
queue_ptr stream) {
const int num_blocks = (k + SYCL_SIN_BLOCK_SIZE - 1) / SYCL_SIN_BLOCK_SIZE;
stream->parallel_for(
sycl::range<3>(1, 1, SYCL_SIN_BLOCK_SIZE),
sycl::range<3>(1, 1, SYCL_SIN_BLOCK_SIZE)),
[=](sycl::nd_item<3> item_ct1) {
- cos_f32(x, dst, k, item_ct1);
+ cos(x, dst, k, item_ct1);
});
}
-static void leaky_relu_f32_sycl(const float *x, float *dst, const int k,
+template<typename T>
+static void leaky_relu_sycl(const T *x, T *dst, const int k,
const float negative_slope,
queue_ptr stream) {
const int num_blocks = (k + SYCL_RELU_BLOCK_SIZE - 1) / SYCL_RELU_BLOCK_SIZE;
sycl::range<3>(1, 1, SYCL_RELU_BLOCK_SIZE),
sycl::range<3>(1, 1, SYCL_RELU_BLOCK_SIZE)),
[=](sycl::nd_item<3> item_ct1) {
- leaky_relu_f32(x, dst, k, negative_slope, item_ct1);
+ leaky_relu(x, dst, k, negative_slope, item_ct1);
});
}
-static void sqr_f32_sycl(const float *x, float *dst, const int k,
+template<typename T>
+static void sqr_sycl(const T *x, T *dst, const int k,
queue_ptr stream) {
const int num_blocks = (k + SYCL_SQR_BLOCK_SIZE - 1) / SYCL_SQR_BLOCK_SIZE;
stream->parallel_for(
sycl::range<3>(1, 1, SYCL_SQR_BLOCK_SIZE),
sycl::range<3>(1, 1, SYCL_SQR_BLOCK_SIZE)),
[=](sycl::nd_item<3> item_ct1) {
- sqr_f32(x, dst, k, item_ct1);
+ sqr(x, dst, k, item_ct1);
});
}
-static void upscale_f32_sycl(const float *x, float *dst, const int nb00, const int nb01,
+template<typename T>
+static void upscale_sycl(const T *x, T *dst, const int nb00, const int nb01,
const int nb02, const int nb03, const int ne10, const int ne11,
const int ne12, const int ne13, const float sf0, const float sf1,
const float sf2, const float sf3, queue_ptr stream) {
stream->parallel_for(
sycl::nd_range<1>(gridDim, sycl::range<1>(SYCL_UPSCALE_BLOCK_SIZE)),
[=](sycl::nd_item<1> item_ct1) {
- upscale_f32(x, dst, nb00, nb01, nb02, nb03, ne10, ne11, ne12, ne13, sf0, sf1, sf2, sf3, item_ct1);
+ upscale(x, dst, nb00, nb01, nb02, nb03, ne10, ne11, ne12, ne13, sf0, sf1, sf2, sf3, item_ct1);
});
}
-static void pad_f32_sycl(const float *x, float *dst, const int ne00,
+template<typename T>
+static void pad_sycl(const T *x, T *dst, const int ne00,
const int ne01, const int ne02, const int ne0,
const int ne1, const int ne2, queue_ptr stream) {
int num_blocks = (ne0 + SYCL_PAD_BLOCK_SIZE - 1) / SYCL_PAD_BLOCK_SIZE;
sycl::nd_range<3>(gridDim * sycl::range<3>(1, 1, SYCL_PAD_BLOCK_SIZE),
sycl::range<3>(1, 1, SYCL_PAD_BLOCK_SIZE)),
[=](sycl::nd_item<3> item_ct1) {
- pad_f32(x, dst, ne0, ne00, ne01, ne02, item_ct1);
+ pad(x, dst, ne0, ne00, ne01, ne02, item_ct1);
});
}
-inline void ggml_sycl_op_silu(ggml_backend_sycl_context & ctx, ggml_tensor *dst) {
+template<typename T>
+static void clamp_sycl(const T *x, T *dst, const float min,
+ const float max, const int k,
+ queue_ptr stream) {
+ const int num_blocks = (k + SYCL_CLAMP_BLOCK_SIZE - 1) / SYCL_CLAMP_BLOCK_SIZE;
+ stream->parallel_for(
+ sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) *
+ sycl::range<3>(1, 1, SYCL_CLAMP_BLOCK_SIZE),
+ sycl::range<3>(1, 1, SYCL_CLAMP_BLOCK_SIZE)),
+ [=](sycl::nd_item<3> item_ct1) {
+ clamp(x, dst, min, max, k, item_ct1);
+ });
+}
+inline void ggml_sycl_op_silu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+#if defined (GGML_SYCL_F16)
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
+#else
GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
- GGML_ASSERT( dst->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+#endif
+ GGML_ASSERT(dst->src[0]->type == dst->type);
dpct::queue_ptr main_stream = ctx.stream();
SYCL_CHECK(ggml_sycl_set_device(ctx.device));
- const float * src0_dd = static_cast<const float *>(dst->src[0]->data);
- float * dst_dd = static_cast<float *>(dst->data);
-
-
- silu_f32_sycl(src0_dd, dst_dd, ggml_nelements(dst->src[0]), main_stream);
+ switch (dst->type) {
+#if defined (GGML_SYCL_F16)
+ case GGML_TYPE_F16:
+ {
+ auto data_pts = cast_data<sycl::half>(dst);
+ silu_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
+ break;
+ }
+#endif
+ case GGML_TYPE_F32:
+ {
+ auto data_pts = cast_data<float>(dst);
+ silu_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
+ break;
+ }
+ default:
+ GGML_ABORT("GGML tensor type not supported!\n");
+ break;
+ }
}
-inline void ggml_sycl_op_gelu(ggml_backend_sycl_context & ctx, ggml_tensor *dst) {
-
+inline void ggml_sycl_op_gelu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+#if defined (GGML_SYCL_F16)
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
+#else
GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
- GGML_ASSERT( dst->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+#endif
+ GGML_ASSERT(dst->src[0]->type == dst->type);
dpct::queue_ptr main_stream = ctx.stream();
SYCL_CHECK(ggml_sycl_set_device(ctx.device));
- const float * src0_dd = static_cast<const float *>(dst->src[0]->data);
- float * dst_dd = static_cast<float *>(dst->data);
-
- gelu_f32_sycl(src0_dd, dst_dd, ggml_nelements(dst->src[0]), main_stream);
+ switch (dst->type) {
+#if defined (GGML_SYCL_F16)
+ case GGML_TYPE_F16:
+ {
+ auto data_pts = cast_data<sycl::half>(dst);
+ gelu_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
+ break;
+ }
+#endif
+ case GGML_TYPE_F32:
+ {
+ auto data_pts = cast_data<float>(dst);
+ gelu_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
+ break;
+ }
+ default:
+ GGML_ABORT("GGML tensor type not supported!\n");
+ break;
+ }
}
inline void ggml_sycl_op_gelu_quick(ggml_backend_sycl_context & ctx, ggml_tensor *dst) {
-
+#if defined (GGML_SYCL_F16)
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
+#else
GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
- GGML_ASSERT( dst->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+#endif
+ GGML_ASSERT(dst->src[0]->type == dst->type);
dpct::queue_ptr main_stream = ctx.stream();
SYCL_CHECK(ggml_sycl_set_device(ctx.device));
- const float * src0_dd = static_cast<const float *>(dst->src[0]->data);
- float * dst_dd = static_cast<float *>(dst->data);
-
- gelu_quick_f32_sycl(src0_dd, dst_dd, ggml_nelements(dst->src[0]), main_stream);
+ switch (dst->type) {
+#if defined (GGML_SYCL_F16)
+ case GGML_TYPE_F16:
+ {
+ auto data_pts = cast_data<sycl::half>(dst);
+ gelu_quick_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
+ break;
+ }
+#endif
+ case GGML_TYPE_F32:
+ {
+ auto data_pts = cast_data<float>(dst);
+ gelu_quick_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
+ break;
+ }
+ default:
+ GGML_ABORT("GGML tensor type not supported!\n");
+ break;
+ }
}
-inline void ggml_sycl_op_tanh(ggml_backend_sycl_context & ctx, ggml_tensor *dst) {
-
+inline void ggml_sycl_op_tanh(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+#if defined (GGML_SYCL_F16)
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
+#else
GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
- GGML_ASSERT( dst->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+#endif
+ GGML_ASSERT(dst->src[0]->type == dst->type);
dpct::queue_ptr main_stream = ctx.stream();
SYCL_CHECK(ggml_sycl_set_device(ctx.device));
- const float * src0_dd = static_cast<const float *>(dst->src[0]->data);
- float * dst_dd = static_cast<float *>(dst->data);
- tanh_f32_sycl(src0_dd, dst_dd, ggml_nelements(dst->src[0]), main_stream);
+ switch (dst->type) {
+#if defined (GGML_SYCL_F16)
+ case GGML_TYPE_F16:
+ {
+ auto data_pts = cast_data<sycl::half>(dst);
+ tanh_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
+ break;
+ }
+#endif
+ case GGML_TYPE_F32:
+ {
+ auto data_pts = cast_data<float>(dst);
+ tanh_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
+ break;
+ }
+ default:
+ GGML_ABORT("GGML tensor type not supported!\n");
+ break;
+ }
}
-inline void ggml_sycl_op_relu(ggml_backend_sycl_context & ctx, ggml_tensor *dst) {
-
+inline void ggml_sycl_op_relu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+#if defined (GGML_SYCL_F16)
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
+#else
GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
- GGML_ASSERT( dst->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+#endif
+ GGML_ASSERT(dst->src[0]->type == dst->type);
dpct::queue_ptr main_stream = ctx.stream();
SYCL_CHECK(ggml_sycl_set_device(ctx.device));
- const float * src0_dd = static_cast<const float *>(dst->src[0]->data);
- float * dst_dd = static_cast<float *>(dst->data);
- relu_f32_sycl(src0_dd, dst_dd, ggml_nelements(dst->src[0]), main_stream);
+ switch (dst->type) {
+#if defined (GGML_SYCL_F16)
+ case GGML_TYPE_F16:
+ {
+ auto data_pts = cast_data<sycl::half>(dst);
+ relu_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
+ break;
+ }
+#endif
+ case GGML_TYPE_F32:
+ {
+ auto data_pts = cast_data<float>(dst);
+ relu_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
+ break;
+ }
+ default:
+ GGML_ABORT("GGML tensor type not supported!\n");
+ break;
+ }
}
-inline void ggml_sycl_op_hardsigmoid(ggml_backend_sycl_context & ctx, ggml_tensor *dst) {
-
+inline void ggml_sycl_op_hardsigmoid(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+#if defined (GGML_SYCL_F16)
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
+#else
GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
- GGML_ASSERT( dst->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+#endif
+ GGML_ASSERT(dst->src[0]->type == dst->type);
+
dpct::queue_ptr main_stream = ctx.stream();
SYCL_CHECK(ggml_sycl_set_device(ctx.device));
- const float * src0_dd = static_cast<const float *>(dst->src[0]->data);
- float * dst_dd = static_cast<float *>(dst->data);
- hardsigmoid_f32_sycl(src0_dd, dst_dd, ggml_nelements(dst->src[0]), main_stream);
+ switch (dst->type) {
+#if defined (GGML_SYCL_F16)
+ case GGML_TYPE_F16:
+ {
+ auto data_pts = cast_data<sycl::half>(dst);
+ hardsigmoid_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
+ break;
+ }
+#endif
+ case GGML_TYPE_F32:
+ {
+ auto data_pts = cast_data<float>(dst);
+ hardsigmoid_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
+ break;
+ }
+ default:
+ GGML_ABORT("GGML tensor type not supported!\n");
+ break;
+ }
}
-inline void ggml_sycl_op_hardswish(ggml_backend_sycl_context & ctx, ggml_tensor *dst) {
-
+inline void ggml_sycl_op_hardswish(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+#if defined (GGML_SYCL_F16)
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
+#else
GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
- GGML_ASSERT( dst->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+#endif
+ GGML_ASSERT(dst->src[0]->type == dst->type);
dpct::queue_ptr main_stream = ctx.stream();
SYCL_CHECK(ggml_sycl_set_device(ctx.device));
- const float * src0_dd = static_cast<const float *>(dst->src[0]->data);
- float * dst_dd = static_cast<float *>(dst->data);
-
- hardswish_f32_sycl(src0_dd, dst_dd, ggml_nelements(dst->src[0]), main_stream);
+ switch (dst->type) {
+#if defined (GGML_SYCL_F16)
+ case GGML_TYPE_F16:
+ {
+ auto data_pts = cast_data<sycl::half>(dst);
+ hardswish_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
+ break;
+ }
+#endif
+ case GGML_TYPE_F32:
+ {
+ auto data_pts = cast_data<float>(dst);
+ hardswish_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
+ break;
+ }
+ default:
+ GGML_ABORT("GGML tensor type not supported!\n");
+ break;
+ }
}
-inline void ggml_sycl_op_exp(ggml_backend_sycl_context & ctx, ggml_tensor *dst) {
-
+inline void ggml_sycl_op_exp(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+#if defined (GGML_SYCL_F16)
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
+#else
GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
- GGML_ASSERT( dst->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+#endif
+ GGML_ASSERT(dst->src[0]->type == dst->type);
dpct::queue_ptr main_stream = ctx.stream();
SYCL_CHECK(ggml_sycl_set_device(ctx.device));
- const float * src0_dd = static_cast<const float *>(dst->src[0]->data);
- float * dst_dd = static_cast<float *>(dst->data);
-
- exp_f32_sycl(src0_dd, dst_dd, ggml_nelements(dst->src[0]), main_stream);
+ switch (dst->type) {
+#if defined (GGML_SYCL_F16)
+ case GGML_TYPE_F16:
+ {
+ auto data_pts = cast_data<sycl::half>(dst);
+ exp_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
+ break;
+ }
+#endif
+ case GGML_TYPE_F32:
+ {
+ auto data_pts = cast_data<float>(dst);
+ exp_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
+ break;
+ }
+ default:
+ GGML_ABORT("GGML tensor type not supported!\n");
+ break;
+ }
}
-inline void ggml_sycl_op_log(ggml_backend_sycl_context & ctx, ggml_tensor *dst) {
-
+inline void ggml_sycl_op_log(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+#if defined (GGML_SYCL_F16)
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
+#else
GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
- GGML_ASSERT( dst->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+#endif
+ GGML_ASSERT(dst->src[0]->type == dst->type);
dpct::queue_ptr main_stream = ctx.stream();
SYCL_CHECK(ggml_sycl_set_device(ctx.device));
- const float * src0_dd = static_cast<const float *>(dst->src[0]->data);
- float * dst_dd = static_cast<float *>(dst->data);
-
- log_f32_sycl(src0_dd, dst_dd, ggml_nelements(dst->src[0]), main_stream);
+ switch (dst->type) {
+#if defined (GGML_SYCL_F16)
+ case GGML_TYPE_F16:
+ {
+ auto data_pts = cast_data<sycl::half>(dst);
+ log_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
+ break;
+ }
+#endif
+ case GGML_TYPE_F32:
+ {
+ auto data_pts = cast_data<float>(dst);
+ log_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
+ break;
+ }
+ default:
+ GGML_ABORT("GGML tensor type not supported!\n");
+ break;
+ }
}
-inline void ggml_sycl_op_sigmoid(ggml_backend_sycl_context & ctx, ggml_tensor *dst) {
-
+inline void ggml_sycl_op_sigmoid(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+#if defined (GGML_SYCL_F16)
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
+#else
GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
- GGML_ASSERT( dst->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+#endif
+ GGML_ASSERT(dst->src[0]->type == dst->type);
dpct::queue_ptr main_stream = ctx.stream();
SYCL_CHECK(ggml_sycl_set_device(ctx.device));
- const float * src0_dd = static_cast<const float *>(dst->src[0]->data);
- float * dst_dd = static_cast<float *>(dst->data);
-
-
- sigmoid_f32_sycl(src0_dd, dst_dd, ggml_nelements(dst->src[0]), main_stream);
+ switch (dst->type) {
+#if defined (GGML_SYCL_F16)
+ case GGML_TYPE_F16:
+ {
+ auto data_pts = cast_data<sycl::half>(dst);
+ sigmoid_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
+ break;
+ }
+#endif
+ case GGML_TYPE_F32:
+ {
+ auto data_pts = cast_data<float>(dst);
+ sigmoid_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
+ break;
+ }
+ default:
+ GGML_ABORT("GGML tensor type not supported!\n");
+ break;
+ }
}
-inline void ggml_sycl_op_sqrt(ggml_backend_sycl_context & ctx, ggml_tensor *dst) {
-
+inline void ggml_sycl_op_sqrt(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+#if defined (GGML_SYCL_F16)
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
+#else
GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
- GGML_ASSERT( dst->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+#endif
+ GGML_ASSERT(dst->src[0]->type == dst->type);
+
dpct::queue_ptr main_stream = ctx.stream();
SYCL_CHECK(ggml_sycl_set_device(ctx.device));
- const float * src0_dd = static_cast<const float *>(dst->src[0]->data);
- float * dst_dd = static_cast<float *>(dst->data);
-
-
- sqrt_f32_sycl(src0_dd, dst_dd, ggml_nelements(dst->src[0]), main_stream);
+ switch (dst->type) {
+#if defined (GGML_SYCL_F16)
+ case GGML_TYPE_F16:
+ {
+ auto data_pts = cast_data<sycl::half>(dst);
+ sqrt_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
+ break;
+ }
+#endif
+ case GGML_TYPE_F32:
+ {
+ auto data_pts = cast_data<float>(dst);
+ sqrt_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
+ break;
+ }
+ default:
+ GGML_ABORT("GGML tensor type not supported!\n");
+ break;
+ }
}
-inline void ggml_sycl_op_sin(ggml_backend_sycl_context & ctx, ggml_tensor *dst) {
-
+inline void ggml_sycl_op_sin(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+#if defined (GGML_SYCL_F16)
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
+#else
GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
- GGML_ASSERT( dst->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+#endif
+ GGML_ASSERT(dst->src[0]->type == dst->type);
dpct::queue_ptr main_stream = ctx.stream();
SYCL_CHECK(ggml_sycl_set_device(ctx.device));
- const float * src0_dd = static_cast<const float *>(dst->src[0]->data);
- float * dst_dd = static_cast<float *>(dst->data);
-
- sin_f32_sycl(src0_dd, dst_dd, ggml_nelements(dst->src[0]), main_stream);
+ switch (dst->type) {
+#if defined (GGML_SYCL_F16)
+ case GGML_TYPE_F16:
+ {
+ auto data_pts = cast_data<sycl::half>(dst);
+ sin_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
+ break;
+ }
+#endif
+ case GGML_TYPE_F32:
+ {
+ auto data_pts = cast_data<float>(dst);
+ sin_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
+ break;
+ }
+ default:
+ GGML_ABORT("GGML tensor type not supported!\n");
+ break;
+ }
}
-inline void ggml_sycl_op_cos(ggml_backend_sycl_context & ctx, ggml_tensor *dst) {
-
+inline void ggml_sycl_op_cos(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+#if defined (GGML_SYCL_F16)
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
+#else
GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
- GGML_ASSERT( dst->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+#endif
+ GGML_ASSERT(dst->src[0]->type == dst->type);
dpct::queue_ptr main_stream = ctx.stream();
SYCL_CHECK(ggml_sycl_set_device(ctx.device));
- const float * src0_dd = static_cast<const float *>(dst->src[0]->data);
- float * dst_dd = static_cast<float *>(dst->data);
-
- cos_f32_sycl(src0_dd, dst_dd, ggml_nelements(dst->src[0]), main_stream);
+ switch (dst->type) {
+#if defined (GGML_SYCL_F16)
+ case GGML_TYPE_F16:
+ {
+ auto data_pts = cast_data<sycl::half>(dst);
+ cos_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
+ break;
+ }
+#endif
+ case GGML_TYPE_F32:
+ {
+ auto data_pts = cast_data<float>(dst);
+ cos_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
+ break;
+ }
+ default:
+ GGML_ABORT("GGML tensor type not supported!\n");
+ break;
+ }
}
-inline void ggml_sycl_op_step(ggml_backend_sycl_context & ctx, ggml_tensor *dst) {
-
+inline void ggml_sycl_op_step(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+#if defined (GGML_SYCL_F16)
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
+#else
GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
- GGML_ASSERT( dst->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+#endif
+ GGML_ASSERT(dst->src[0]->type == dst->type);
dpct::queue_ptr main_stream = ctx.stream();
SYCL_CHECK(ggml_sycl_set_device(ctx.device));
- const float * src0_dd = static_cast<const float *>(dst->src[0]->data);
- float * dst_dd = static_cast<float *>(dst->data);
-
- step_f32_sycl(src0_dd, dst_dd, ggml_nelements(dst->src[0]), main_stream);
+ switch (dst->type) {
+#if defined (GGML_SYCL_F16)
+ case GGML_TYPE_F16:
+ {
+ auto data_pts = cast_data<sycl::half>(dst);
+ step_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
+ break;
+ }
+#endif
+ case GGML_TYPE_F32:
+ {
+ auto data_pts = cast_data<float>(dst);
+ step_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
+ break;
+ }
+ default:
+ GGML_ABORT("GGML tensor type not supported!\n");
+ break;
+ }
}
-
-inline void ggml_sycl_op_neg(ggml_backend_sycl_context & ctx, ggml_tensor *dst) {
-
+inline void ggml_sycl_op_neg(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+#if defined (GGML_SYCL_F16)
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
+#else
GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
- GGML_ASSERT( dst->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+#endif
+ GGML_ASSERT(dst->src[0]->type == dst->type);
dpct::queue_ptr main_stream = ctx.stream();
SYCL_CHECK(ggml_sycl_set_device(ctx.device));
- const float * src0_dd = static_cast<const float *>(dst->src[0]->data);
- float * dst_dd = static_cast<float *>(dst->data);
-
- neg_f32_sycl(src0_dd, dst_dd, ggml_nelements(dst->src[0]), main_stream);
+ switch (dst->type) {
+#if defined (GGML_SYCL_F16)
+ case GGML_TYPE_F16:
+ {
+ auto data_pts = cast_data<sycl::half>(dst);
+ neg_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
+ break;
+ }
+#endif
+ case GGML_TYPE_F32:
+ {
+ auto data_pts = cast_data<float>(dst);
+ neg_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
+ break;
+ }
+ default:
+ GGML_ABORT("GGML tensor type not supported!\n");
+ break;
+ }
}
-inline void ggml_sycl_op_leaky_relu(ggml_backend_sycl_context & ctx, ggml_tensor *dst) {
-
+inline void ggml_sycl_op_leaky_relu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+#if defined (GGML_SYCL_F16)
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
+#else
GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
- GGML_ASSERT( dst->type == GGML_TYPE_F32);
- dpct::queue_ptr main_stream = ctx.stream();
- SYCL_CHECK(ggml_sycl_set_device(ctx.device));
- const float * src0_dd = static_cast<const float *>(dst->src[0]->data);
- float * dst_dd = static_cast<float *>(dst->data);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+#endif
+ GGML_ASSERT(dst->src[0]->type == dst->type);
float negative_slope;
memcpy(&negative_slope, dst->op_params, sizeof(float));
-
- leaky_relu_f32_sycl(src0_dd, dst_dd, ggml_nelements(dst->src[0]), negative_slope, main_stream);
+ dpct::queue_ptr main_stream = ctx.stream();
+ SYCL_CHECK(ggml_sycl_set_device(ctx.device));
+ switch (dst->type) {
+#if defined (GGML_SYCL_F16)
+ case GGML_TYPE_F16:
+ {
+ auto data_pts = cast_data<sycl::half>(dst);
+ leaky_relu_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), negative_slope, main_stream);
+ break;
+ }
+#endif
+ case GGML_TYPE_F32:
+ {
+ auto data_pts = cast_data<float>(dst);
+ leaky_relu_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), negative_slope, main_stream);
+ break;
+ }
+ default:
+ GGML_ABORT("GGML tensor type not supported!\n");
+ break;
+ }
}
-inline void ggml_sycl_op_sqr(ggml_backend_sycl_context & ctx, ggml_tensor *dst) {
-
+inline void ggml_sycl_op_sqr(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ #if defined (GGML_SYCL_F16)
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
+#else
GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
- GGML_ASSERT( dst->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+#endif
+ GGML_ASSERT(dst->src[0]->type == dst->type);
dpct::queue_ptr main_stream = ctx.stream();
SYCL_CHECK(ggml_sycl_set_device(ctx.device));
- const float * src0_dd = static_cast<const float *>(dst->src[0]->data);
- float * dst_dd = static_cast<float *>(dst->data);
-
- sqr_f32_sycl(src0_dd, dst_dd, ggml_nelements(dst->src[0]), main_stream);
+ switch (dst->type) {
+#if defined (GGML_SYCL_F16)
+ case GGML_TYPE_F16:
+ {
+ auto data_pts = cast_data<sycl::half>(dst);
+ sqr_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
+ break;
+ }
+#endif
+ case GGML_TYPE_F32:
+ {
+ auto data_pts = cast_data<float>(dst);
+ sqr_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream);
+ break;
+ }
+ default:
+ GGML_ABORT("GGML tensor type not supported!\n");
+ break;
+ }
}
-inline void ggml_sycl_op_upscale(ggml_backend_sycl_context & ctx, ggml_tensor *dst) {
-
+inline void ggml_sycl_op_upscale(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+#if defined (GGML_SYCL_F16)
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
+#else
GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
GGML_ASSERT(dst->type == GGML_TYPE_F32);
+#endif
+ GGML_ASSERT(dst->src[0]->type == dst->type);
+
dpct::queue_ptr main_stream = ctx.stream();
SYCL_CHECK(ggml_sycl_set_device(ctx.device));
- const float * src0_dd = static_cast<const float *>(dst->src[0]->data);
- float * dst_dd = static_cast<float *>(dst->data);
- const float sf0 = (float)dst->ne[0]/dst->src[0]->ne[0];
- const float sf1 = (float)dst->ne[1]/dst->src[0]->ne[1];
- const float sf2 = (float)dst->ne[2]/dst->src[0]->ne[2];
- const float sf3 = (float)dst->ne[3]/dst->src[0]->ne[3];
+ const float sf0 = (float) dst->ne[0] / dst->src[0]->ne[0];
+ const float sf1 = (float) dst->ne[1] / dst->src[0]->ne[1];
+ const float sf2 = (float) dst->ne[2] / dst->src[0]->ne[2];
+ const float sf3 = (float) dst->ne[3] / dst->src[0]->ne[3];
+ switch (dst->type) {
+#if defined (GGML_SYCL_F16)
+ case GGML_TYPE_F16:
+ {
+ auto data_pts = cast_data<sycl::half>(dst);
+ upscale_sycl(data_pts.src, data_pts.dst, dst->src[0]->nb[0], dst->src[0]->nb[1], dst->src[0]->nb[2],
+ dst->src[0]->nb[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], sf0, sf1, sf2, sf3,
+ main_stream);
+ break;
+ }
+#endif
+ case GGML_TYPE_F32:
+ {
+ auto data_pts = cast_data<float>(dst);
+ upscale_sycl(data_pts.src, data_pts.dst, dst->src[0]->nb[0], dst->src[0]->nb[1], dst->src[0]->nb[2],
+ dst->src[0]->nb[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], sf0, sf1, sf2, sf3,
+ main_stream);
+ break;
+ }
+ default:
+ GGML_ABORT("GGML tensor type not supported!\n");
+ break;
+ }
+}
- upscale_f32_sycl(src0_dd, dst_dd, dst->src[0]->nb[0], dst->src[0]->nb[1], dst->src[0]->nb[2], dst->src[0]->nb[3],
- dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], sf0, sf1, sf2, sf3,
- main_stream);
+inline void ggml_sycl_op_pad(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+#if defined (GGML_SYCL_F16)
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
+#else
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+#endif
+ GGML_ASSERT(dst->src[0]->type == dst->type);
+ GGML_ASSERT(dst->src[0]->ne[3] == 1 && dst->ne[3] == 1); // just 3D tensors
+ dpct::queue_ptr main_stream = ctx.stream();
+ SYCL_CHECK(ggml_sycl_set_device(ctx.device));
+ switch (dst->type) {
+#if defined (GGML_SYCL_F16)
+ case GGML_TYPE_F16:
+ {
+ auto data_pts = cast_data<sycl::half>(dst);
+ pad_sycl(data_pts.src, data_pts.dst, dst->src[0]->ne[0], dst->src[0]->ne[1], dst->src[0]->ne[2], dst->ne[0],
+ dst->ne[1], dst->ne[2], main_stream);
+ break;
+ }
+#endif
+ case GGML_TYPE_F32:
+ {
+ auto data_pts = cast_data<float>(dst);
+ pad_sycl(data_pts.src, data_pts.dst, dst->src[0]->ne[0], dst->src[0]->ne[1], dst->src[0]->ne[2], dst->ne[0],
+ dst->ne[1], dst->ne[2], main_stream);
+ break;
+ }
+ default:
+ GGML_ABORT("GGML tensor type not supported!\n");
+ break;
+ }
}
-inline void ggml_sycl_op_pad(ggml_backend_sycl_context & ctx, ggml_tensor *dst) {
+inline void ggml_sycl_op_clamp(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+#if defined(GGML_SYCL_F16)
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
+#else
GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
GGML_ASSERT(dst->type == GGML_TYPE_F32);
- GGML_ASSERT(dst->src[0]->ne[3] == 1 && dst->ne[3] == 1); // just 3D tensors
+#endif
+ GGML_ASSERT(dst->src[0]->type == dst->type);
dpct::queue_ptr main_stream = ctx.stream();
SYCL_CHECK(ggml_sycl_set_device(ctx.device));
- const float * src0_dd = static_cast<const float *>(dst->src[0]->data);
- float * dst_dd = static_cast<float *>(dst->data);
-
- pad_f32_sycl(src0_dd, dst_dd,
- dst->src[0]->ne[0], dst->src[0]->ne[1], dst->src[0]->ne[2],
- dst->ne[0], dst->ne[1], dst->ne[2], main_stream);
+ float min;
+ float max;
+ memcpy(&min, dst->op_params, sizeof(float));
+ memcpy(&max, (float *) dst->op_params + 1, sizeof(float));
+
+ switch (dst->type) {
+#if defined(GGML_SYCL_F16)
+ case GGML_TYPE_F16:
+ {
+ auto data_pts = cast_data<sycl::half>(dst);
+ clamp_sycl(data_pts.src, data_pts.dst, min, max, ggml_nelements(dst->src[0]), main_stream);
+ break;
+ }
+#endif
+ case GGML_TYPE_F32:
+ {
+ auto data_pts = cast_data<float>(dst);
+ clamp_sycl(data_pts.src, data_pts.dst, min, max, ggml_nelements(dst->src[0]), main_stream);
+ break;
+ }
+ default:
+ GGML_ABORT("GGML tensor type not supported!\n");
+ break;
+ }
}
inline void ggml_sycl_op_acc(ggml_backend_sycl_context & ctx, ggml_tensor *dst) {
acc_f32_sycl(src0_dd, src1_dd, dst_dd, ggml_nelements(dst), dst->src[1]->ne[0], dst->src[1]->ne[1], dst->src[1]->ne[2], nb1, nb2, offset, main_stream);
}
+
inline void ggml_sycl_op_add(ggml_backend_sycl_context & ctx, ggml_tensor *dst) {
ggml_sycl_op_bin_bcast<bin_bcast_sycl<op_add>>(ctx, dst->src[0], dst->src[1], dst);
void ggml_sycl_sqrt(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
- GGML_SYCL_DEBUG("call %s\n", __func__);
+ GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
ggml_sycl_op_sqrt(ctx, dst);
GGML_SYCL_DEBUG("call %s done\n", __func__);
}
void ggml_sycl_sin(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
- GGML_SYCL_DEBUG("call %s\n", __func__);
+ GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
ggml_sycl_op_sin(ctx, dst);
GGML_SYCL_DEBUG("call %s done\n", __func__);
}
void ggml_sycl_cos(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
- GGML_SYCL_DEBUG("call %s\n", __func__);
+ GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
ggml_sycl_op_cos(ctx, dst);
GGML_SYCL_DEBUG("call %s done\n", __func__);
}
void ggml_sycl_acc(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
- GGML_SYCL_DEBUG("call %s\n", __func__);
+ GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
ggml_sycl_op_acc(ctx, dst);
GGML_SYCL_DEBUG("call %s done\n", __func__);
}
void ggml_sycl_gelu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
- GGML_SYCL_DEBUG("call %s\n", __func__);
+ GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
ggml_sycl_op_gelu(ctx, dst);
GGML_SYCL_DEBUG("call %s done\n", __func__);
}
void ggml_sycl_silu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
- GGML_SYCL_DEBUG("call %s\n", __func__);
+ GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
ggml_sycl_op_silu(ctx, dst);
GGML_SYCL_DEBUG("call %s done\n", __func__);
}
void ggml_sycl_gelu_quick(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
- GGML_SYCL_DEBUG("call %s\n", __func__);
+ GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
ggml_sycl_op_gelu_quick(ctx, dst);
GGML_SYCL_DEBUG("call %s done\n", __func__);
}
void ggml_sycl_tanh(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
- GGML_SYCL_DEBUG("call %s\n", __func__);
+ GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
ggml_sycl_op_tanh(ctx, dst);
GGML_SYCL_DEBUG("call %s done\n", __func__);
}
void ggml_sycl_relu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
- GGML_SYCL_DEBUG("call %s\n", __func__);
+ GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
ggml_sycl_op_relu(ctx, dst);
GGML_SYCL_DEBUG("call %s done\n", __func__);
}
void ggml_sycl_sigmoid(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
- GGML_SYCL_DEBUG("call %s\n", __func__);
+ GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
ggml_sycl_op_sigmoid(ctx, dst);
GGML_SYCL_DEBUG("call %s done\n", __func__);
}
void ggml_sycl_hardsigmoid(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
- GGML_SYCL_DEBUG("call %s\n", __func__);
+ GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
ggml_sycl_op_hardsigmoid(ctx, dst);
GGML_SYCL_DEBUG("call %s done\n", __func__);
}
void ggml_sycl_hardswish(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
- GGML_SYCL_DEBUG("call %s\n", __func__);
+ GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
ggml_sycl_op_hardswish(ctx, dst);
GGML_SYCL_DEBUG("call %s done\n", __func__);
}
void ggml_sycl_exp(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
- GGML_SYCL_DEBUG("call %s\n", __func__);
+ GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
ggml_sycl_op_exp(ctx, dst);
GGML_SYCL_DEBUG("call %s done\n", __func__);
}
void ggml_sycl_log(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
- GGML_SYCL_DEBUG("call %s\n", __func__);
+ GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
ggml_sycl_op_log(ctx, dst);
GGML_SYCL_DEBUG("call %s done\n", __func__);
}
void ggml_sycl_neg(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
- GGML_SYCL_DEBUG("call %s\n", __func__);
+ GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
ggml_sycl_op_neg(ctx, dst);
GGML_SYCL_DEBUG("call %s done\n", __func__);
}
void ggml_sycl_step(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
- GGML_SYCL_DEBUG("call %s\n", __func__);
+ GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
ggml_sycl_op_step(ctx, dst);
GGML_SYCL_DEBUG("call %s done\n", __func__);
}
void ggml_sycl_leaky_relu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
- GGML_SYCL_DEBUG("call %s\n", __func__);
+ GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
ggml_sycl_op_leaky_relu(ctx, dst);
GGML_SYCL_DEBUG("call %s done\n", __func__);
}
void ggml_sycl_sqr(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
- GGML_SYCL_DEBUG("call %s\n", __func__);
+ GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
ggml_sycl_op_sqr(ctx, dst);
GGML_SYCL_DEBUG("call %s done\n", __func__);
}
void ggml_sycl_upscale(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
- GGML_SYCL_DEBUG("call %s\n", __func__);
+ GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
ggml_sycl_op_upscale(ctx, dst);
GGML_SYCL_DEBUG("call %s done\n", __func__);
}
void ggml_sycl_pad(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
- GGML_SYCL_DEBUG("call %s\n", __func__);
+ GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
ggml_sycl_op_pad(ctx, dst);
GGML_SYCL_DEBUG("call %s done\n", __func__);
}
+void ggml_sycl_clamp(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ GGML_SYCL_DEBUG("call %s: DST Tensor type: %s\n", __func__, ggml_type_name(dst->type));
+ ggml_sycl_op_clamp(ctx, dst);
+ GGML_SYCL_DEBUG("call %s done\n", __func__);
+}
+
+
void ggml_sycl_add(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {