add_compile_options(-I./) #include DPCT
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-narrowing")
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3")
if (GGML_SYCL_TARGET STREQUAL "NVIDIA")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsycl-targets=nvptx64-nvidia-cuda")
+ add_compile_definitions(GGML_SYCL_WARP_SIZE=32)
+ else()
+ add_compile_definitions(GGML_SYCL_WARP_SIZE=16)
endif()
file(GLOB GGML_HEADERS_SYCL "ggml-sycl/*.hpp")
const float *src1_dd, float *dst_dd,
const queue_ptr &main_stream);
-static __dpct_inline__ float warp_reduce_sum(float x,
- const sycl::nd_item<3> &item_ct1) {
-#pragma unroll
- for (int mask = 16; mask > 0; mask >>= 1) {
- /*
- DPCT1096:98: The right-most dimension of the work-group used in the SYCL
- kernel that calls this function may be less than "32". The function
- "dpct::permute_sub_group_by_xor" may return an unexpected result on the
- CPU device. Modify the size of the work-group to ensure that the value
- of the right-most dimension is a multiple of "32".
- */
- x += dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), x, mask);
- }
- return x;
-}
-
-static __dpct_inline__ sycl::float2
-warp_reduce_sum(sycl::float2 a, const sycl::nd_item<3> &item_ct1) {
-#pragma unroll
- for (int mask = 16; mask > 0; mask >>= 1) {
- a.x() += dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), a.x(),
- mask);
- a.y() += dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), a.y(),
- mask);
- }
- return a;
-}
-
-static __dpct_inline__ float warp_reduce_max(float x,
- const sycl::nd_item<3> &item_ct1) {
-#pragma unroll
- for (int mask = 16; mask > 0; mask >>= 1) {
- /*
- DPCT1096:97: The right-most dimension of the work-group used in the SYCL
- kernel that calls this function may be less than "32". The function
- "dpct::permute_sub_group_by_xor" may return an unexpected result on the
- CPU device. Modify the size of the work-group to ensure that the value
- of the right-most dimension is a multiple of "32".
- */
- x = sycl::fmax(x, dpct::permute_sub_group_by_xor(
- item_ct1.get_sub_group(), x, mask));
- }
- return x;
-}
-
static __dpct_inline__ float op_repeat(const float a, const float b) {
return b;
GGML_UNUSED(a);
dst[i] = x[i] * x[i];
}
-static void norm_f32(const float * x, float * dst, const int ncols, const float eps,
- const sycl::nd_item<3> &item_ct1, sycl::float2 *s_sum, int block_size) {
- const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) +
- item_ct1.get_local_id(1);
- const int tid = item_ct1.get_local_id(2);
-
- sycl::float2 mean_var = sycl::float2(0.f, 0.f);
-
- for (int col = tid; col < ncols; col += block_size) {
- const float xi = x[row*ncols + col];
- mean_var.x() += xi;
- mean_var.y() += xi * xi;
- }
-
- // sum up partial sums
- mean_var = warp_reduce_sum(mean_var, item_ct1);
- if (block_size > WARP_SIZE) {
-
- int warp_id = item_ct1.get_local_id(2) / WARP_SIZE;
- int lane_id = item_ct1.get_local_id(2) % WARP_SIZE;
- if (lane_id == 0) {
- s_sum[warp_id] = mean_var;
- }
- /*
- DPCT1118:0: SYCL group functions and algorithms must be encountered in
- converged control flow. You may need to adjust the code.
- */
- item_ct1.barrier(sycl::access::fence_space::local_space);
- mean_var = s_sum[lane_id];
- mean_var = warp_reduce_sum(mean_var, item_ct1);
- }
-
- const float mean = mean_var.x() / ncols;
- const float var = mean_var.y() / ncols - mean * mean;
- const float inv_std = sycl::rsqrt(var + eps);
-
- for (int col = tid; col < ncols; col += block_size) {
- dst[row*ncols + col] = (x[row*ncols + col] - mean) * inv_std;
- }
-}
-
static void concat_f32(const float *x,const float *y, float *dst, const int ne0, const int ne02,
const sycl::nd_item<3> &item_ct1) {
int nidx = item_ct1.get_local_id(2) +
}
}
-static void group_norm_f32(const float * x, float * dst, const int group_size, const int ne_elements, const float eps,
- const sycl::nd_item<3> &item_ct1, float *s_sum, int block_size) {
- int start = item_ct1.get_group(2) * group_size;
- int end = start + group_size;
-
- start += item_ct1.get_local_id(2);
-
- if (end >= ne_elements) {
- end = ne_elements;
- }
-
- float tmp = 0.0f; // partial sum for thread in warp
-
- for (int j = start; j < end; j += block_size) {
- tmp += x[j];
- }
-
- tmp = warp_reduce_sum(tmp, item_ct1);
- if (block_size > WARP_SIZE) {
-
- int warp_id = item_ct1.get_local_id(2) / WARP_SIZE;
- int lane_id = item_ct1.get_local_id(2) % WARP_SIZE;
- if (lane_id == 0) {
- s_sum[warp_id] = tmp;
- }
- /*
- DPCT1118:1: SYCL group functions and algorithms must be encountered in
- converged control flow. You may need to adjust the code.
- */
- /*
- DPCT1065:54: Consider replacing sycl::nd_item::barrier() with
- sycl::nd_item::barrier(sycl::access::fence_space::local_space) for
- better performance if there is no access to global memory.
- */
- item_ct1.barrier();
- tmp = s_sum[lane_id];
- tmp = warp_reduce_sum(tmp, item_ct1);
- }
-
- float mean = tmp / group_size;
- tmp = 0.0f;
-
- for (int j = start; j < end; j += block_size) {
- float xi = x[j] - mean;
- dst[j] = xi;
- tmp += xi * xi;
- }
-
- tmp = warp_reduce_sum(tmp, item_ct1);
- if (block_size > WARP_SIZE) {
-
- int warp_id = item_ct1.get_local_id(2) / WARP_SIZE;
- int lane_id = item_ct1.get_local_id(2) % WARP_SIZE;
- if (lane_id == 0) {
- s_sum[warp_id] = tmp;
- }
- /*
- DPCT1118:2: SYCL group functions and algorithms must be encountered in
- converged control flow. You may need to adjust the code.
- */
- /*
- DPCT1065:55: Consider replacing sycl::nd_item::barrier() with
- sycl::nd_item::barrier(sycl::access::fence_space::local_space) for
- better performance if there is no access to global memory.
- */
- item_ct1.barrier();
- tmp = s_sum[lane_id];
- tmp = warp_reduce_sum(tmp, item_ct1);
- }
-
- float variance = tmp / group_size;
- float scale = sycl::rsqrt(variance + eps);
- for (int j = start; j < end; j += block_size) {
- dst[j] *= scale;
- }
-}
-
-static void rms_norm_f32(const float * x, float * dst, const int ncols, const float eps,
- const sycl::nd_item<3> &item_ct1, float *s_sum, int block_size) {
- const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) +
- item_ct1.get_local_id(1);
- const int tid = item_ct1.get_local_id(2);
-
- float tmp = 0.0f; // partial sum for thread in warp
-
- for (int col = tid; col < ncols; col += block_size) {
- const float xi = x[row*ncols + col];
- tmp += xi * xi;
- }
-
- // sum up partial sums
- tmp = warp_reduce_sum(tmp, item_ct1);
- if (block_size > WARP_SIZE) {
-
- int warp_id = item_ct1.get_local_id(2) / WARP_SIZE;
- int lane_id = item_ct1.get_local_id(2) % WARP_SIZE;
- if (lane_id == 0) {
- s_sum[warp_id] = tmp;
- }
- /*
- DPCT1118:3: SYCL group functions and algorithms must be encountered in
- converged control flow. You may need to adjust the code.
- */
- item_ct1.barrier(sycl::access::fence_space::local_space);
- tmp = s_sum[lane_id];
- tmp = warp_reduce_sum(tmp, item_ct1);
- }
-
- const float mean = tmp / ncols;
- const float scale = sycl::rsqrt(mean + eps);
-
- for (int col = tid; col < ncols; col += block_size) {
- dst[row*ncols + col] = scale * x[row*ncols + col];
- }
-}
-
+template<int QUANT_BLOCK_TILE>
static void quantize_q8_1(const float * __restrict__ x, void * __restrict__ vy, const int kx, const int kx_padded,
const sycl::nd_item<3> &item_ct1) {
- const int ix = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
- item_ct1.get_local_id(2);
+ const int ix = (item_ct1.get_local_range(2) * item_ct1.get_group(2) +
+ item_ct1.get_local_id(2)) * QUANT_BLOCK_TILE;
if (ix >= kx_padded) {
return;
const int ib = i_padded / QK8_1; // block index
const int iqs = i_padded % QK8_1; // quant index
-
- const float xi = ix < kx ? x[iy*kx + ix] : 0.0f;
- float amax = sycl::fabs((float)xi);
- float sum = xi;
-
+ typedef sycl::vec<float, QUANT_BLOCK_TILE> TC;
+ typedef sycl::vec<int8_t, QUANT_BLOCK_TILE> TQ;
+ TC zeros;
+ TQ qzeros;
#pragma unroll
- for (int mask = 16; mask > 0; mask >>= 1) {
- amax = sycl::fmax(amax, dpct::permute_sub_group_by_xor(
- item_ct1.get_sub_group(), amax, mask));
- sum +=
- dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), sum, mask);
+ for (int i = 0; i < QUANT_BLOCK_TILE; i++)
+ {
+ zeros[i] = 0.f;
+ qzeros[i] = 0;
+ }
+ const TC xi = ix < kx ? *(TC *)&x[iy * kx + ix] : zeros;
+ float sum = xi[0];
+ float amax = sycl::fabs(xi[0]);
+#pragma unroll
+ for (int i = 1; i < QUANT_BLOCK_TILE; i++)
+ {
+ sum += xi[i];
+ amax = sycl::fmax(sycl::fabs(xi[i]), amax);
}
+ sum = warp_reduce_sum(sum, item_ct1);
+ amax = warp_reduce_max(amax, item_ct1);
const float d = amax / 127;
- const int8_t q = amax == 0.0f ? 0 : sycl::round(xi / d);
+ TQ q = qzeros;
+ if (amax != 0.0f)
+ {
+#pragma unroll
+ for (int i = 0; i < QUANT_BLOCK_TILE; i++) {
+ q[i] = sycl::round(xi[i] / d);
+ }
+ }
- y[ib].qs[iqs] = q;
+ *(TQ *)&y[ib].qs[iqs] = q;
if (iqs > 0) {
return;
// sum up partial sums and write back result
#pragma unroll
- for (int mask = 16; mask > 0; mask >>= 1) {
+ for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) {
tmp +=
dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
}
// sum up partial sums and write back result
#pragma unroll
- for (int mask = 16; mask > 0; mask >>= 1) {
+ for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) {
tmp +=
dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
}
});
}
-static void norm_f32_sycl(const float *x, float *dst, const int ncols,
- const int nrows, const float eps,
- queue_ptr stream) {
- GGML_ASSERT(ncols % WARP_SIZE == 0);
- if (ncols < 1024) {
- const sycl::range<3> block_dims(1, 1, WARP_SIZE);
- stream->submit([&](sycl::handler &cgh) {
- sycl::local_accessor<sycl::float2, 1> s_sum_acc_ct1(
- sycl::range<1>(32), cgh);
-
- cgh.parallel_for(
- sycl::nd_range<3>(sycl::range<3>(1, 1, nrows) * block_dims,
- block_dims),
- [=](sycl::nd_item<3> item_ct1)
- [[intel::reqd_sub_group_size(32)]] {
- norm_f32(x, dst, ncols, eps, item_ct1,
- s_sum_acc_ct1.get_pointer(), WARP_SIZE);
- });
- });
- } else {
- const int work_group_size = get_work_group_size(stream->get_device());
- const sycl::range<3> block_dims(1, 1, work_group_size);
- /*
- DPCT1049:17: The work-group size passed to the SYCL kernel may exceed
- the limit. To get the device limit, query
- info::device::max_work_group_size. Adjust the work-group size if needed.
- */
- stream->submit([&](sycl::handler &cgh) {
- sycl::local_accessor<sycl::float2, 1> s_sum_acc_ct1(
- sycl::range<1>(32), cgh);
-
- cgh.parallel_for(
- sycl::nd_range<3>(sycl::range<3>(1, 1, nrows) * block_dims,
- block_dims),
- [=](sycl::nd_item<3> item_ct1)
- [[intel::reqd_sub_group_size(32)]] {
- norm_f32(x, dst, ncols, eps, item_ct1,
- s_sum_acc_ct1.get_pointer(), work_group_size);
- });
- });
- }
-}
-
-static void group_norm_f32_sycl(const float *x, float *dst,
- const int num_groups, const int group_size,
- const int ne_elements, queue_ptr stream) {
- static const float eps = 1e-6f;
- if (group_size < 1024) {
- const sycl::range<3> block_dims(1, 1, WARP_SIZE);
- stream->submit([&](sycl::handler &cgh) {
- sycl::local_accessor<float, 1> s_sum_acc_ct1(sycl::range<1>(32),
- cgh);
-
- const float eps_ct4 = eps;
-
- cgh.parallel_for(
- sycl::nd_range<3>(sycl::range<3>(1, 1, num_groups) * block_dims,
- block_dims),
- [=](sycl::nd_item<3> item_ct1)
- [[intel::reqd_sub_group_size(32)]] {
- group_norm_f32(
- x, dst, group_size, ne_elements, eps_ct4, item_ct1,
- s_sum_acc_ct1.get_pointer(), WARP_SIZE);
- });
- });
- } else {
- const int work_group_size = get_work_group_size(stream->get_device());
- const sycl::range<3> block_dims(1, 1, work_group_size);
- /*
- DPCT1049:18: The work-group size passed to the SYCL kernel may exceed
- the limit. To get the device limit, query
- info::device::max_work_group_size. Adjust the work-group size if needed.
- */
-
- stream->submit([&](sycl::handler &cgh) {
- sycl::local_accessor<float, 1> s_sum_acc_ct1(sycl::range<1>(32),
- cgh);
-
- const float eps_ct4 = eps;
-
- cgh.parallel_for(
- sycl::nd_range<3>(sycl::range<3>(1, 1, num_groups) * block_dims,
- block_dims),
- [=](sycl::nd_item<3> item_ct1)
- [[intel::reqd_sub_group_size(32)]] {
- group_norm_f32(x, dst, group_size, ne_elements,
- eps_ct4, item_ct1,
- s_sum_acc_ct1.get_pointer(), work_group_size);
- });
- });
- }
-}
-
static void concat_f32_sycl(const float *x, const float *y, float *dst,
const int ne0, int ne1, int ne2, int ne02,
queue_ptr stream) {
});
}
-static void rms_norm_f32_sycl(const float *x, float *dst, const int ncols,
- const int nrows, const float eps,
- queue_ptr stream) {
- GGML_ASSERT(ncols % WARP_SIZE == 0);
- // printf("%s ncols=%d, nrows=%d, WARP_SIZE=%d\n", __func__, ncols, nrows, WARP_SIZE);
- if (ncols < 1024) {
- const sycl::range<3> block_dims(1, 1, WARP_SIZE);
- stream->submit([&](sycl::handler &cgh) {
- sycl::local_accessor<float, 1> s_sum_acc_ct1(sycl::range<1>(32),
- cgh);
-
- cgh.parallel_for(
- sycl::nd_range<3>(sycl::range<3>(1, 1, nrows) * block_dims,
- block_dims),
- [=](sycl::nd_item<3> item_ct1)
- [[intel::reqd_sub_group_size(32)]] {
- rms_norm_f32(x, dst, ncols, eps, item_ct1,
- s_sum_acc_ct1.get_pointer(), WARP_SIZE);
- });
- });
- } else {
- const int work_group_size = get_work_group_size(stream->get_device());
- const sycl::range<3> block_dims(1, 1, work_group_size);
- /*
- DPCT1049:19: The work-group size passed to the SYCL kernel may exceed
- the limit. To get the device limit, query
- info::device::max_work_group_size. Adjust the work-group size if needed.
- */
- stream->submit([&](sycl::handler &cgh) {
- sycl::local_accessor<float, 1> s_sum_acc_ct1(sycl::range<1>(32),
- cgh);
-
- cgh.parallel_for(
- sycl::nd_range<3>(sycl::range<3>(1, 1, nrows) * block_dims,
- block_dims),
- [=](sycl::nd_item<3> item_ct1)
- [[intel::reqd_sub_group_size(32)]] {
- rms_norm_f32(x, dst, ncols, eps, item_ct1,
- s_sum_acc_ct1.get_pointer(), work_group_size);
- });
- });
- }
-}
-
static void quantize_row_q8_1_sycl(const float *x, void *vy, const int kx,
const int ky, const int kx_padded,
queue_ptr stream) {
const int block_num_x = (kx_padded + SYCL_QUANTIZE_BLOCK_SIZE - 1) / SYCL_QUANTIZE_BLOCK_SIZE;
const sycl::range<3> num_blocks(1, ky, block_num_x);
- const sycl::range<3> block_size(1, 1, SYCL_DEQUANTIZE_BLOCK_SIZE);
+ int constexpr QUANT_BLOCK_TILE = QK8_1 / WARP_SIZE;
+ static_assert(QK8_1 % WARP_SIZE == 0);
+ const sycl::range<3> block_size(1, 1, SYCL_QUANTIZE_BLOCK_SIZE / QUANT_BLOCK_TILE);
{
dpct::has_capability_or_fail(stream->get_device(),
{sycl::aspect::fp16});
stream->parallel_for(
sycl::nd_range<3>(num_blocks * block_size, block_size),
- [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
- quantize_q8_1(x, vy, kx, kx_padded, item_ct1);
+ [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(WARP_SIZE)]] {
+ quantize_q8_1<QUANT_BLOCK_TILE>(x, vy, kx, kx_padded, item_ct1);
});
}
}
stream->parallel_for(
sycl::nd_range<3>(block_nums * block_dims, block_dims),
- [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
+ [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(WARP_SIZE)]] {
mul_mat_p021_f16_f32(vx, y, dst, ncols_x, nrows_x, nchannels_x,
nchannels_y, item_ct1);
});
stream->parallel_for(
sycl::nd_range<3>(block_nums * block_dims, block_dims),
- [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
+ [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(WARP_SIZE)]] {
mul_mat_vec_nc_f16_f32(vx, y, dst, ncols_x, nrows_x,
row_stride_x, channel_stride_x,
nchannels_y / nchannels_x, item_ct1);
const sycl::range<3> block_nums(1, nrows, 1);
stream->parallel_for(sycl::nd_range<3>(block_nums * block_dims, block_dims),
[=](sycl::nd_item<3> item_ct1)
- [[intel::reqd_sub_group_size(32)]] {
+ [[intel::reqd_sub_group_size(WARP_SIZE)]] {
k_sum_rows_f32(x, dst, ncols, item_ct1);
});
}
cgh.parallel_for(
sycl::nd_range<3>(block_nums * block_dims, block_dims),
- [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
+ [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(WARP_SIZE)]] {
soft_max_f32<vals_smem, ncols_template, block_size_template>(x, mask, dst, ncols_par,
nrows_y, scale, max_bias, m0,
m1, n_head_log2, item_ct1,
return user_number;
}
-static inline int get_work_group_size(const sycl::device& device) {
- dpct::device_info prop;
- dpct::get_device_info(prop, device);
- return prop.get_max_work_group_size();
-}
-
static void ggml_check_sycl() try {
static bool initialized = false;
(void) src1_dd;
}
-inline void ggml_sycl_op_norm(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1,
- ggml_tensor *dst, const float *src0_dd,
- const float *src1_dd, float *dst_dd,
- const queue_ptr &main_stream) {
-
- GGML_ASSERT(src0->type == GGML_TYPE_F32);
- GGML_ASSERT( dst->type == GGML_TYPE_F32);
-
- const int64_t ne00 = src0->ne[0];
- const int64_t nrows = ggml_nrows(src0);
-
- float eps;
- memcpy(&eps, dst->op_params, sizeof(float));
-
- norm_f32_sycl(src0_dd, dst_dd, ne00, nrows, eps, main_stream);
-
- (void) src1;
- (void) dst;
- (void) src1_dd;
-}
-
-inline void ggml_sycl_op_group_norm(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
- const ggml_tensor *src1, ggml_tensor *dst,
- const float *src0_dd, const float *src1_dd,
- float *dst_dd,
- const queue_ptr &main_stream) {
-
- GGML_ASSERT(src0->type == GGML_TYPE_F32);
- GGML_ASSERT( dst->type == GGML_TYPE_F32);
-
- int num_groups = dst->op_params[0];
- int group_size = src0->ne[0] * src0->ne[1] * ((src0->ne[2] + num_groups - 1) / num_groups);
- group_norm_f32_sycl(src0_dd, dst_dd, num_groups, group_size, src0->ne[0] * src0->ne[1] * src0->ne[2], main_stream);
-
- (void) src1;
- (void) dst;
- (void) src1_dd;
-}
-
inline void ggml_sycl_op_concat(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
const ggml_tensor *src1, ggml_tensor *dst,
const float *src0_dd, const float *src1_dd,
(void) src1_dd;
}
-inline void ggml_sycl_op_rms_norm(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
- const ggml_tensor *src1, ggml_tensor *dst,
- const float *src0_dd, const float *src1_dd,
- float *dst_dd,
- const queue_ptr &main_stream) {
-
- GGML_ASSERT(src0->type == GGML_TYPE_F32);
- GGML_ASSERT( dst->type == GGML_TYPE_F32);
-
- const int64_t ne00 = src0->ne[0];
- const int64_t nrows = ggml_nrows(src0);
-
- float eps;
- memcpy(&eps, dst->op_params, sizeof(float));
-
- rms_norm_f32_sycl(src0_dd, dst_dd, ne00, nrows, eps, main_stream);
-
- (void) src1;
- (void) dst;
- (void) src1_dd;
-}
-
static int64_t get_row_rounding(ggml_type type, const std::array<float, GGML_SYCL_MAX_DEVICES> & tensor_split) {
int64_t min_compute_capability = INT_MAX;
int64_t max_compute_capability = INT_MIN;
static void ggml_sycl_mul_mat(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
const bool split = ggml_backend_buffer_is_sycl_split(src0->buffer);
-
int64_t min_compute_capability = INT_MAX;
if (split) {
#include "mmq.hpp"
#include "mmvq.hpp"
#include "rope.hpp"
+#include "norm.hpp"
#endif // GGML_SYCL_BACKEND_HPP
}
};
+// common host functions
+
+static inline int get_work_group_size(const sycl::device& device) {
+ dpct::device_info prop;
+ dpct::get_device_info(prop, device);
+ return prop.get_max_work_group_size();
+}
+
+
+// common device functions
+
+static __dpct_inline__ float warp_reduce_sum(float x,
+ const sycl::nd_item<3>& item_ct1) {
+#pragma unroll
+ for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) {
+ /*
+ DPCT1096:98: The right-most dimension of the work-group used in the SYCL
+ kernel that calls this function may be less than "32". The function
+ "dpct::permute_sub_group_by_xor" may return an unexpected result on the
+ CPU device. Modify the size of the work-group to ensure that the value
+ of the right-most dimension is a multiple of "32".
+ */
+ x += dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), x, mask);
+ }
+ return x;
+}
+
+static __dpct_inline__ sycl::float2
+warp_reduce_sum(sycl::float2 a, const sycl::nd_item<3>& item_ct1) {
+#pragma unroll
+ for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) {
+ a.x() += dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), a.x(),
+ mask);
+ a.y() += dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), a.y(),
+ mask);
+ }
+ return a;
+}
+
+static __dpct_inline__ float warp_reduce_max(float x,
+ const sycl::nd_item<3>& item_ct1) {
+#pragma unroll
+ for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) {
+ /*
+ DPCT1096:97: The right-most dimension of the work-group used in the SYCL
+ kernel that calls this function may be less than "32". The function
+ "dpct::permute_sub_group_by_xor" may return an unexpected result on the
+ CPU device. Modify the size of the work-group to ensure that the value
+ of the right-most dimension is a multiple of "32".
+ */
+ x = sycl::fmax(x, dpct::permute_sub_group_by_xor(
+ item_ct1.get_sub_group(), x, mask));
+ }
+ return x;
+}
#endif // GGML_SYCL_COMMON_HPP
// sum up partial sums and write back result
#pragma unroll
- for (int mask = 16; mask > 0; mask >>= 1) {
+ for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) {
tmp +=
dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
}
stream->parallel_for(
sycl::nd_range<3>(block_nums * block_dims, block_dims),
- [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
+ [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(WARP_SIZE)]] {
dequantize_mul_mat_vec<1, 1, convert_f16>(vx, y, dst, ncols,
nrows, item_ct1);
});
// sum up partial sums and write back result
#pragma unroll
- for (int mask = 16; mask > 0; mask >>= 1) {
+ for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) {
tmp +=
dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
}
// sum up partial sums and write back result
#pragma unroll
- for (int mask = 16; mask > 0; mask >>= 1) {
+ for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) {
tmp +=
dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
}
// sum up partial sums and write back result
#pragma unroll
- for (int mask = 16; mask > 0; mask >>= 1) {
+ for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) {
tmp +=
dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
}
// sum up partial sums and write back result
#pragma unroll
- for (int mask = 16; mask > 0; mask >>= 1) {
+ for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) {
tmp +=
dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
}
// sum up partial sums and write back result
#pragma unroll
- for (int mask = 16; mask > 0; mask >>= 1) {
+ for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) {
tmp +=
dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
}
stream->parallel_for(
sycl::nd_range<3>(block_nums * block_dims, block_dims),
- [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
+ [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(WARP_SIZE)]] {
dequantize_mul_mat_vec<QK4_0, QR4_0, dequantize_q4_0>(
vx, y, dst, ncols, nrows, item_ct1);
});
stream->parallel_for(
sycl::nd_range<3>(block_nums * block_dims, block_dims),
- [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
+ [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(WARP_SIZE)]] {
dequantize_mul_mat_vec<QK4_1, QR4_1, dequantize_q4_1>(
vx, y, dst, ncols, nrows, item_ct1);
});
stream->parallel_for(
sycl::nd_range<3>(block_nums * block_dims, block_dims),
- [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
+ [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(WARP_SIZE)]] {
dequantize_mul_mat_vec<QK5_0, QR5_0, dequantize_q5_0>(
vx, y, dst, ncols, nrows, item_ct1);
});
stream->parallel_for(
sycl::nd_range<3>(block_nums * block_dims, block_dims),
- [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
+ [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(WARP_SIZE)]] {
dequantize_mul_mat_vec<QK5_1, QR5_1, dequantize_q5_1>(
vx, y, dst, ncols, nrows, item_ct1);
});
stream->parallel_for(
sycl::nd_range<3>(block_nums * block_dims, block_dims),
- [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
+ [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(WARP_SIZE)]] {
dequantize_mul_mat_vec<QK8_0, QR8_0, dequantize_q8_0>(
vx, y, dst, ncols, nrows, item_ct1);
});
const int ny = 2; // very slightly faster than 1 even when K_QUANTS_PER_ITERATION = 2
const int block_num_y = (nrows + ny - 1) / ny;
const sycl::range<3> block_nums(1, 1, block_num_y);
- const sycl::range<3> block_dims(1, ny, 32);
+ const sycl::range<3> block_dims(1, ny, WARP_SIZE);
stream->parallel_for(
sycl::nd_range<3>(block_nums * block_dims, block_dims),
- [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
+ [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(WARP_SIZE)]] {
dequantize_mul_mat_vec_q2_k(vx, y, dst, ncols, nrows, item_ct1);
});
}
const int ny = 2 / K_QUANTS_PER_ITERATION;
const int block_num_y = (nrows + ny - 1) / ny;
const sycl::range<3> block_nums(1, 1, block_num_y);
- const sycl::range<3> block_dims(1, ny, 32);
+ const sycl::range<3> block_dims(1, ny, WARP_SIZE);
stream->parallel_for(
sycl::nd_range<3>(block_nums * block_dims, block_dims),
- [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
+ [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(WARP_SIZE)]] {
dequantize_mul_mat_vec_q3_k(vx, y, dst, ncols, nrows, item_ct1);
});
}
const int ny = 2 / K_QUANTS_PER_ITERATION;
const int block_num_y = (nrows + ny - 1) / ny;
const sycl::range<3> block_nums(1, 1, block_num_y);
- const sycl::range<3> block_dims(1, ny, 32);
+ const sycl::range<3> block_dims(1, ny, WARP_SIZE);
stream->parallel_for(
sycl::nd_range<3>(block_nums * block_dims, block_dims),
- [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
+ [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(WARP_SIZE)]] {
dequantize_mul_mat_vec_q4_k(vx, y, dst, ncols, nrows, item_ct1);
});
}
const int nrows,
dpct::queue_ptr stream) {
GGML_ASSERT(ncols % QK_K == 0);
- const sycl::range<3> block_dims(1, 1, 32);
+ const sycl::range<3> block_dims(1, 1, WARP_SIZE);
stream->parallel_for(
sycl::nd_range<3>(sycl::range<3>(1, 1, nrows) * block_dims, block_dims),
- [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
+ [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(WARP_SIZE)]] {
dequantize_mul_mat_vec_q5_k(vx, y, dst, ncols, item_ct1);
});
}
const int ny = 2 / K_QUANTS_PER_ITERATION;
const int block_num_y = (nrows + ny - 1) / ny;
const sycl::range<3> block_nums(1, 1, block_num_y);
- const sycl::range<3> block_dims(1, ny, 32);
+ const sycl::range<3> block_dims(1, ny, WARP_SIZE);
stream->parallel_for(
sycl::nd_range<3>(block_nums * block_dims, block_dims),
- [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
+ [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(WARP_SIZE)]] {
dequantize_mul_mat_vec_q6_k(vx, y, dst, ncols, nrows, item_ct1);
});
}
// sum up partial sums and write back result
#pragma unroll
- for (int mask = 16; mask > 0; mask >>= 1) {
+ for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) {
tmp +=
dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
}
// sum up partial sums and write back result
#pragma unroll
- for (int mask = 16; mask > 0; mask >>= 1) {
+ for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) {
tmp +=
dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
}
// sum up partial sums and write back result
#pragma unroll
- for (int mask = 16; mask > 0; mask >>= 1) {
+ for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) {
tmp +=
dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
}
// sum up partial sums and write back result
#pragma unroll
- for (int mask = 16; mask > 0; mask >>= 1) {
+ for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) {
tmp +=
dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
}
// sum up partial sums and write back result
#pragma unroll
- for (int mask = 16; mask > 0; mask >>= 1) {
+ for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) {
tmp +=
dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
}
// sum up partial sums and write back result
#pragma unroll
- for (int mask = 16; mask > 0; mask >>= 1) {
+ for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) {
tmp +=
dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
}
// sum up partial sums and write back result
#pragma unroll
- for (int mask = 16; mask > 0; mask >>= 1) {
+ for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) {
tmp +=
dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
}
// sum up partial sums and write back result
#pragma unroll
- for (int mask = 16; mask > 0; mask >>= 1) {
+ for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) {
tmp +=
dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
}
// sum up partial sums and write back result
#pragma unroll
- for (int mask = 16; mask > 0; mask >>= 1) {
+ for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) {
tmp +=
dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
}
// sum up partial sums and write back result
#pragma unroll
- for (int mask = 16; mask > 0; mask >>= 1) {
+ for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) {
tmp +=
dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
}
cgh.parallel_for(
sycl::nd_range<3>(block_nums * block_dims, block_dims),
[=](sycl::nd_item<3> item_ct1)
- [[intel::reqd_sub_group_size(32)]] {
+ [[intel::reqd_sub_group_size(WARP_SIZE)]] {
mul_mat_vec_q<QK4_0, QI4_0, block_q4_0,
VDR_Q4_0_Q8_1_MMVQ, vec_dot_q4_0_q8_1>(
vx, vy, dst, ncols, nrows, item_ct1);
cgh.parallel_for(
sycl::nd_range<3>(block_nums * block_dims, block_dims),
[=](sycl::nd_item<3> item_ct1)
- [[intel::reqd_sub_group_size(32)]] {
+ [[intel::reqd_sub_group_size(WARP_SIZE)]] {
mul_mat_vec_q<QK4_0, QI4_1, block_q4_1,
VDR_Q4_1_Q8_1_MMVQ, vec_dot_q4_1_q8_1>(
vx, vy, dst, ncols, nrows, item_ct1);
cgh.parallel_for(
sycl::nd_range<3>(block_nums * block_dims, block_dims),
[=](sycl::nd_item<3> item_ct1)
- [[intel::reqd_sub_group_size(32)]] {
+ [[intel::reqd_sub_group_size(WARP_SIZE)]] {
mul_mat_vec_q<QK5_0, QI5_0, block_q5_0,
VDR_Q5_0_Q8_1_MMVQ, vec_dot_q5_0_q8_1>(
vx, vy, dst, ncols, nrows, item_ct1);
cgh.parallel_for(
sycl::nd_range<3>(block_nums * block_dims, block_dims),
[=](sycl::nd_item<3> item_ct1)
- [[intel::reqd_sub_group_size(32)]] {
+ [[intel::reqd_sub_group_size(WARP_SIZE)]] {
mul_mat_vec_q<QK5_1, QI5_1, block_q5_1,
VDR_Q5_1_Q8_1_MMVQ, vec_dot_q5_1_q8_1>(
vx, vy, dst, ncols, nrows, item_ct1);
cgh.parallel_for(
sycl::nd_range<3>(block_nums * block_dims, block_dims),
[=](sycl::nd_item<3> item_ct1)
- [[intel::reqd_sub_group_size(32)]] {
+ [[intel::reqd_sub_group_size(WARP_SIZE)]] {
mul_mat_vec_q<QK8_0, QI8_0, block_q8_0,
VDR_Q8_0_Q8_1_MMVQ, vec_dot_q8_0_q8_1>(
vx, vy, dst, ncols, nrows, item_ct1);
cgh.parallel_for(
sycl::nd_range<3>(block_nums * block_dims, block_dims),
[=](sycl::nd_item<3> item_ct1)
- [[intel::reqd_sub_group_size(32)]] {
+ [[intel::reqd_sub_group_size(WARP_SIZE)]] {
mul_mat_vec_q<QK_K, QI2_K, block_q2_K,
VDR_Q2_K_Q8_1_MMVQ, vec_dot_q2_K_q8_1>(
vx, vy, dst, ncols, nrows, item_ct1);
cgh.parallel_for(
sycl::nd_range<3>(block_nums * block_dims, block_dims),
[=](sycl::nd_item<3> item_ct1)
- [[intel::reqd_sub_group_size(32)]] {
+ [[intel::reqd_sub_group_size(WARP_SIZE)]] {
mul_mat_vec_q<QK_K, QI3_K, block_q3_K,
VDR_Q3_K_Q8_1_MMVQ, vec_dot_q3_K_q8_1>(
vx, vy, dst, ncols, nrows, item_ct1);
cgh.parallel_for(
sycl::nd_range<3>(block_nums * block_dims, block_dims),
[=](sycl::nd_item<3> item_ct1)
- [[intel::reqd_sub_group_size(32)]] {
+ [[intel::reqd_sub_group_size(WARP_SIZE)]] {
mul_mat_vec_q<QK_K, QI4_K, block_q4_K,
VDR_Q4_K_Q8_1_MMVQ, vec_dot_q4_K_q8_1>(
vx, vy, dst, ncols, nrows, item_ct1);
cgh.parallel_for(
sycl::nd_range<3>(block_nums * block_dims, block_dims),
[=](sycl::nd_item<3> item_ct1)
- [[intel::reqd_sub_group_size(32)]] {
+ [[intel::reqd_sub_group_size(WARP_SIZE)]] {
mul_mat_vec_q<QK_K, QI5_K, block_q5_K,
VDR_Q5_K_Q8_1_MMVQ, vec_dot_q5_K_q8_1>(
vx, vy, dst, ncols, nrows, item_ct1);
cgh.parallel_for(
sycl::nd_range<3>(block_nums * block_dims, block_dims),
[=](sycl::nd_item<3> item_ct1)
- [[intel::reqd_sub_group_size(32)]] {
+ [[intel::reqd_sub_group_size(WARP_SIZE)]] {
mul_mat_vec_q<QK_K, QI6_K, block_q6_K,
VDR_Q6_K_Q8_1_MMVQ, vec_dot_q6_K_q8_1>(
vx, vy, dst, ncols, nrows, item_ct1);
cgh.parallel_for(
sycl::nd_range<3>(block_nums * block_dims, block_dims),
[=](sycl::nd_item<3> item_ct1)
- [[intel::reqd_sub_group_size(32)]] {
+ [[intel::reqd_sub_group_size(WARP_SIZE)]] {
mul_mat_vec_q_iq2_xxs_q8_1<QK_K, QI2_XXS/2, block_iq2_xxs, 1>(
vx, vy, dst, ncols, nrows, item_ct1);
});
cgh.parallel_for(
sycl::nd_range<3>(block_nums * block_dims, block_dims),
[=](sycl::nd_item<3> item_ct1)
- [[intel::reqd_sub_group_size(32)]] {
+ [[intel::reqd_sub_group_size(WARP_SIZE)]] {
mul_mat_vec_q_iq2_xs_q8_1<QK_K, QI2_XS/2, block_iq2_xs, 1>(
vx, vy, dst, ncols, nrows, item_ct1);
});
cgh.parallel_for(
sycl::nd_range<3>(block_nums * block_dims, block_dims),
[=](sycl::nd_item<3> item_ct1)
- [[intel::reqd_sub_group_size(32)]] {
+ [[intel::reqd_sub_group_size(WARP_SIZE)]] {
mul_mat_vec_q_iq2_s_q8_1<QK_K, QI2_S/2, block_iq2_s, 1>(
vx, vy, dst, ncols, nrows, item_ct1);
});
cgh.parallel_for(
sycl::nd_range<3>(block_nums * block_dims, block_dims),
[=](sycl::nd_item<3> item_ct1)
- [[intel::reqd_sub_group_size(32)]] {
+ [[intel::reqd_sub_group_size(WARP_SIZE)]] {
mul_mat_vec_q_iq3_xxs_q8_1<QK_K, QI3_XXS/2, block_iq3_xxs, 1>(
vx, vy, dst, ncols, nrows, item_ct1);
});
cgh.parallel_for(
sycl::nd_range<3>(block_nums * block_dims, block_dims),
[=](sycl::nd_item<3> item_ct1)
- [[intel::reqd_sub_group_size(32)]] {
+ [[intel::reqd_sub_group_size(WARP_SIZE)]] {
mul_mat_vec_q_iq3_s_q8_1<QK_K, QI3_S/2, block_iq3_s, 1>(
vx, vy, dst, ncols, nrows, item_ct1);
});
cgh.parallel_for(
sycl::nd_range<3>(block_nums * block_dims, block_dims),
[=](sycl::nd_item<3> item_ct1)
- [[intel::reqd_sub_group_size(32)]] {
+ [[intel::reqd_sub_group_size(WARP_SIZE)]] {
mul_mat_vec_q_iq1_s_q8_1<QK_K, QI1_S, block_iq1_s, 1>(
vx, vy, dst, ncols, nrows, item_ct1);
});
cgh.parallel_for(
sycl::nd_range<3>(block_nums * block_dims, block_dims),
[=](sycl::nd_item<3> item_ct1)
- [[intel::reqd_sub_group_size(32)]] {
+ [[intel::reqd_sub_group_size(WARP_SIZE)]] {
mul_mat_vec_q_iq1_m_q8_1<QK_K, QI1_S, block_iq1_m, 1>(
vx, vy, dst, ncols, nrows, item_ct1);
});
cgh.parallel_for(
sycl::nd_range<3>(block_nums * block_dims, block_dims),
[=](sycl::nd_item<3> item_ct1)
- [[intel::reqd_sub_group_size(32)]] {
+ [[intel::reqd_sub_group_size(WARP_SIZE)]] {
mul_mat_vec_q_iq4_nl_q8_1<QK4_NL, QI4_NL, block_iq4_nl, 1>(
vx, vy, dst, ncols, nrows, item_ct1);
});
cgh.parallel_for(
sycl::nd_range<3>(block_nums * block_dims, block_dims),
[=](sycl::nd_item<3> item_ct1)
- [[intel::reqd_sub_group_size(32)]] {
+ [[intel::reqd_sub_group_size(WARP_SIZE)]] {
mul_mat_vec_q_iq4_xs_q8_1<QK_K, QI4_XS/4, block_iq4_xs, 1>(
vx, vy, dst, ncols, nrows, item_ct1);
});
const ggml_tensor *src0, const ggml_tensor *src1, ggml_tensor *dst,
const char *src0_dd_i, const float *src1_ddf_i, const char *src1_ddq_i,
float *dst_dd_i, const int64_t row_low, const int64_t row_high,
- const int64_t src1_ncols, const int64_t src1_padded_row_size,
+ const int64_t src1_ncols, const int64_t src1_padded_col_size,
const dpct::queue_ptr &stream) {
const int64_t ne10 = src1->ne[0];
int id;
SYCL_CHECK(
CHECK_TRY_ERROR(id = get_current_device_id()));
-
+ const size_t q8_1_ts = sizeof(block_q8_1);
+ const size_t q8_1_bs = QK8_1;
// the main device has a larger memory buffer to hold the results from all GPUs
// nrows_dst == nrows of the matrix that the kernel writes into
const int64_t nrows_dst = id == ctx.device ? ne00 : row_diff;
-
- switch (src0->type) {
+ for (int i = 0; i < src1_ncols; i++)
+ {
+ const size_t src1_ddq_i_offset = i * src1_padded_col_size * q8_1_ts / q8_1_bs;
+ const char* src1_ddq_i_bs = src1_ddq_i + src1_ddq_i_offset;
+ float* dst_dd_i_bs = dst_dd_i + i * dst->ne[0];
+ switch (src0->type) {
case GGML_TYPE_Q4_0:
- mul_mat_vec_q4_0_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
+ mul_mat_vec_q4_0_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream);
break;
case GGML_TYPE_Q4_1:
- mul_mat_vec_q4_1_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
+ mul_mat_vec_q4_1_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream);
break;
case GGML_TYPE_Q5_0:
- mul_mat_vec_q5_0_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
+ mul_mat_vec_q5_0_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream);
break;
case GGML_TYPE_Q5_1:
- mul_mat_vec_q5_1_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
+ mul_mat_vec_q5_1_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream);
break;
case GGML_TYPE_Q8_0:
- mul_mat_vec_q8_0_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
+ mul_mat_vec_q8_0_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream);
break;
case GGML_TYPE_Q2_K:
- mul_mat_vec_q2_K_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
+ mul_mat_vec_q2_K_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream);
break;
case GGML_TYPE_Q3_K:
- mul_mat_vec_q3_K_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
+ mul_mat_vec_q3_K_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream);
break;
case GGML_TYPE_Q4_K:
- mul_mat_vec_q4_K_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
+ mul_mat_vec_q4_K_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream);
break;
case GGML_TYPE_Q5_K:
- mul_mat_vec_q5_K_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
+ mul_mat_vec_q5_K_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream);
break;
case GGML_TYPE_Q6_K:
- mul_mat_vec_q6_K_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
+ mul_mat_vec_q6_K_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream);
break;
case GGML_TYPE_IQ1_S:
- mul_mat_vec_iq1_s_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
+ mul_mat_vec_iq1_s_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream);
break;
case GGML_TYPE_IQ1_M:
- mul_mat_vec_iq1_m_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
+ mul_mat_vec_iq1_m_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream);
break;
case GGML_TYPE_IQ2_XXS:
- mul_mat_vec_iq2_xxs_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
+ mul_mat_vec_iq2_xxs_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream);
break;
case GGML_TYPE_IQ2_XS:
- mul_mat_vec_iq2_xs_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
+ mul_mat_vec_iq2_xs_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream);
break;
case GGML_TYPE_IQ2_S:
- mul_mat_vec_iq2_s_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
+ mul_mat_vec_iq2_s_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream);
break;
case GGML_TYPE_IQ3_XXS:
- mul_mat_vec_iq3_xxs_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
+ mul_mat_vec_iq3_xxs_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream);
break;
case GGML_TYPE_IQ3_S:
- mul_mat_vec_iq3_s_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
+ mul_mat_vec_iq3_s_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream);
break;
case GGML_TYPE_IQ4_NL:
- mul_mat_vec_iq4_nl_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
+ mul_mat_vec_iq4_nl_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream);
break;
case GGML_TYPE_IQ4_XS:
- mul_mat_vec_iq4_xs_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
+ mul_mat_vec_iq4_xs_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream);
break;
default:
GGML_ASSERT(false);
break;
+ }
}
-
(void) src1;
(void) dst;
(void) src1_ddf_i;
- (void) src1_ncols;
- (void) src1_padded_row_size;
}
--- /dev/null
+#include "norm.hpp"
+
+static void norm_f32(const float* x, float* dst, const int ncols, const float eps,
+ const sycl::nd_item<3>& item_ct1, sycl::float2* s_sum, int block_size) {
+ const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) +
+ item_ct1.get_local_id(1);
+ const int tid = item_ct1.get_local_id(2);
+
+ const int nthreads = item_ct1.get_local_range(2);
+ const int nwarps = nthreads / WARP_SIZE;
+ assert(nwarps % WARP_SIZE == 0);
+ sycl::float2 mean_var = sycl::float2(0.f, 0.f);
+
+ for (int col = tid; col < ncols; col += block_size) {
+ const float xi = x[row * ncols + col];
+ mean_var.x() += xi;
+ mean_var.y() += xi * xi;
+ }
+
+ // sum up partial sums
+ mean_var = warp_reduce_sum(mean_var, item_ct1);
+ if (block_size > WARP_SIZE) {
+
+ int warp_id = item_ct1.get_local_id(2) / WARP_SIZE;
+ int lane_id = item_ct1.get_local_id(2) % WARP_SIZE;
+ if (lane_id == 0) {
+ s_sum[warp_id] = mean_var;
+ }
+ /*
+ DPCT1118:0: SYCL group functions and algorithms must be encountered in
+ converged control flow. You may need to adjust the code.
+ */
+ item_ct1.barrier(sycl::access::fence_space::local_space);
+ mean_var = 0.f;
+ int nreduce = nwarps / WARP_SIZE;
+ for (size_t i = 0; i < nreduce; i += 1)
+ {
+ mean_var += s_sum[lane_id + i * WARP_SIZE];
+ }
+ mean_var = warp_reduce_sum(mean_var, item_ct1);
+ }
+
+ const float mean = mean_var.x() / ncols;
+ const float var = mean_var.y() / ncols - mean * mean;
+ const float inv_std = sycl::rsqrt(var + eps);
+
+ for (int col = tid; col < ncols; col += block_size) {
+ dst[row * ncols + col] = (x[row * ncols + col] - mean) * inv_std;
+ }
+}
+
+static void group_norm_f32(const float* x, float* dst, const int group_size, const int ne_elements, const float eps,
+ const sycl::nd_item<3>& item_ct1, float* s_sum, int block_size) {
+ int start = item_ct1.get_group(2) * group_size;
+ int end = start + group_size;
+ const int nthreads = item_ct1.get_local_range(2);
+ const int nwarps = nthreads / WARP_SIZE;
+ assert(nwarps % WARP_SIZE == 0);
+ start += item_ct1.get_local_id(2);
+
+ if (end >= ne_elements) {
+ end = ne_elements;
+ }
+
+ float tmp = 0.0f; // partial sum for thread in warp
+
+ for (int j = start; j < end; j += block_size) {
+ tmp += x[j];
+ }
+
+ tmp = warp_reduce_sum(tmp, item_ct1);
+ if (block_size > WARP_SIZE) {
+
+ int warp_id = item_ct1.get_local_id(2) / WARP_SIZE;
+ int lane_id = item_ct1.get_local_id(2) % WARP_SIZE;
+ if (lane_id == 0) {
+ s_sum[warp_id] = tmp;
+ }
+ /*
+ DPCT1118:1: SYCL group functions and algorithms must be encountered in
+ converged control flow. You may need to adjust the code.
+ */
+ /*
+ DPCT1065:54: Consider replacing sycl::nd_item::barrier() with
+ sycl::nd_item::barrier(sycl::access::fence_space::local_space) for
+ better performance if there is no access to global memory.
+ */
+ item_ct1.barrier();
+ tmp = 0.f;
+ int nreduce = nwarps / WARP_SIZE;
+ for (size_t i = 0; i < nreduce; i += 1)
+ {
+ tmp += s_sum[lane_id + i * WARP_SIZE];
+ }
+ tmp = warp_reduce_sum(tmp, item_ct1);
+ }
+
+ float mean = tmp / group_size;
+ tmp = 0.0f;
+
+ for (int j = start; j < end; j += block_size) {
+ float xi = x[j] - mean;
+ dst[j] = xi;
+ tmp += xi * xi;
+ }
+
+ tmp = warp_reduce_sum(tmp, item_ct1);
+ if (block_size > WARP_SIZE) {
+
+ int warp_id = item_ct1.get_local_id(2) / WARP_SIZE;
+ int lane_id = item_ct1.get_local_id(2) % WARP_SIZE;
+ if (lane_id == 0) {
+ s_sum[warp_id] = tmp;
+ }
+ /*
+ DPCT1118:2: SYCL group functions and algorithms must be encountered in
+ converged control flow. You may need to adjust the code.
+ */
+ /*
+ DPCT1065:55: Consider replacing sycl::nd_item::barrier() with
+ sycl::nd_item::barrier(sycl::access::fence_space::local_space) for
+ better performance if there is no access to global memory.
+ */
+ item_ct1.barrier();
+ tmp = s_sum[lane_id];
+ tmp = warp_reduce_sum(tmp, item_ct1);
+ }
+
+ float variance = tmp / group_size;
+ float scale = sycl::rsqrt(variance + eps);
+ for (int j = start; j < end; j += block_size) {
+ dst[j] *= scale;
+ }
+}
+
+static void rms_norm_f32(const float* x, float* dst, const int ncols, const float eps,
+ const sycl::nd_item<3>& item_ct1, float* s_sum, int block_size) {
+ const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) +
+ item_ct1.get_local_id(1);
+ const int tid = item_ct1.get_local_id(2);
+ const int nthreads = item_ct1.get_local_range(2);
+ const int nwarps = nthreads / WARP_SIZE;
+ assert(nwarps % WARP_SIZE == 0);
+ float tmp = 0.0f; // partial sum for thread in warp
+
+ for (int col = tid; col < ncols; col += block_size) {
+ const float xi = x[row * ncols + col];
+ tmp += xi * xi;
+ }
+
+ // sum up partial sums
+ tmp = warp_reduce_sum(tmp, item_ct1);
+ if (block_size > WARP_SIZE) {
+
+ int warp_id = item_ct1.get_local_id(2) / WARP_SIZE;
+ int lane_id = item_ct1.get_local_id(2) % WARP_SIZE;
+ if (lane_id == 0) {
+ s_sum[warp_id] = tmp;
+ }
+ /*
+ DPCT1118:3: SYCL group functions and algorithms must be encountered in
+ converged control flow. You may need to adjust the code.
+ */
+ item_ct1.barrier(sycl::access::fence_space::local_space);
+ int nreduce = nwarps / WARP_SIZE;
+ tmp = 0.f;
+ for (size_t i = 0; i < nreduce; i += 1)
+ {
+ tmp += s_sum[lane_id + i * WARP_SIZE];
+ }
+ tmp = warp_reduce_sum(tmp, item_ct1);
+ }
+
+ const float mean = tmp / ncols;
+ const float scale = sycl::rsqrt(mean + eps);
+
+ for (int col = tid; col < ncols; col += block_size) {
+ dst[row * ncols + col] = scale * x[row * ncols + col];
+ }
+}
+
+static void norm_f32_sycl(const float* x, float* dst, const int ncols,
+ const int nrows, const float eps,
+ queue_ptr stream) {
+ GGML_ASSERT(ncols % WARP_SIZE == 0);
+ if (ncols < 1024) {
+ const sycl::range<3> block_dims(1, 1, WARP_SIZE);
+ stream->submit([&](sycl::handler& cgh) {
+ cgh.parallel_for(
+ sycl::nd_range<3>(sycl::range<3>(1, 1, nrows) * block_dims,
+ block_dims),
+ [=](sycl::nd_item<3> item_ct1)
+ [[intel::reqd_sub_group_size(WARP_SIZE)]] {
+ norm_f32(x, dst, ncols, eps, item_ct1,
+ nullptr, WARP_SIZE);
+ });
+ });
+ }
+ else {
+ const int work_group_size = get_work_group_size(stream->get_device());
+ const sycl::range<3> block_dims(1, 1, work_group_size);
+ /*
+ DPCT1049:17: The work-group size passed to the SYCL kernel may exceed
+ the limit. To get the device limit, query
+ info::device::max_work_group_size. Adjust the work-group size if needed.
+ */
+ stream->submit([&](sycl::handler& cgh) {
+ sycl::local_accessor<sycl::float2, 1> s_sum_acc_ct1(
+ sycl::range<1>(work_group_size / WARP_SIZE), cgh);
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(sycl::range<3>(1, 1, nrows) * block_dims,
+ block_dims),
+ [=](sycl::nd_item<3> item_ct1)
+ [[intel::reqd_sub_group_size(WARP_SIZE)]] {
+ norm_f32(x, dst, ncols, eps, item_ct1,
+ s_sum_acc_ct1.get_pointer(), work_group_size);
+ });
+ });
+ }
+}
+
+static void group_norm_f32_sycl(const float* x, float* dst,
+ const int num_groups, const int group_size,
+ const int ne_elements, queue_ptr stream) {
+ static const float eps = 1e-6f;
+ if (group_size < 1024) {
+ const sycl::range<3> block_dims(1, 1, WARP_SIZE);
+ stream->submit([&](sycl::handler& cgh) {
+ const float eps_ct4 = eps;
+ cgh.parallel_for(
+ sycl::nd_range<3>(sycl::range<3>(1, 1, num_groups) * block_dims,
+ block_dims),
+ [=](sycl::nd_item<3> item_ct1)
+ [[intel::reqd_sub_group_size(WARP_SIZE)]] {
+ group_norm_f32(
+ x, dst, group_size, ne_elements, eps_ct4, item_ct1,
+ nullptr, WARP_SIZE);
+ });
+ });
+ }
+ else {
+ const int work_group_size = get_work_group_size(stream->get_device());
+ const sycl::range<3> block_dims(1, 1, work_group_size);
+ /*
+ DPCT1049:18: The work-group size passed to the SYCL kernel may exceed
+ the limit. To get the device limit, query
+ info::device::max_work_group_size. Adjust the work-group size if needed.
+ */
+
+ stream->submit([&](sycl::handler& cgh) {
+ sycl::local_accessor<float, 1> s_sum_acc_ct1(sycl::range<1>(work_group_size / WARP_SIZE),
+ cgh);
+
+ const float eps_ct4 = eps;
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(sycl::range<3>(1, 1, num_groups) * block_dims,
+ block_dims),
+ [=](sycl::nd_item<3> item_ct1)
+ [[intel::reqd_sub_group_size(WARP_SIZE)]] {
+ group_norm_f32(x, dst, group_size, ne_elements,
+ eps_ct4, item_ct1,
+ s_sum_acc_ct1.get_pointer(), work_group_size);
+ });
+ });
+ }
+}
+
+static void rms_norm_f32_sycl(const float* x, float* dst, const int ncols,
+ const int nrows, const float eps,
+ queue_ptr stream) {
+ GGML_ASSERT(ncols % WARP_SIZE == 0);
+ // printf("%s ncols=%d, nrows=%d, WARP_SIZE=%d\n", __func__, ncols, nrows, WARP_SIZE);
+ if (ncols < 1024) {
+ const sycl::range<3> block_dims(1, 1, WARP_SIZE);
+ stream->submit([&](sycl::handler& cgh) {
+ cgh.parallel_for(
+ sycl::nd_range<3>(sycl::range<3>(1, 1, nrows) * block_dims,
+ block_dims),
+ [=](sycl::nd_item<3> item_ct1)
+ [[intel::reqd_sub_group_size(WARP_SIZE)]] {
+ rms_norm_f32(x, dst, ncols, eps, item_ct1,
+ nullptr, WARP_SIZE);
+ });
+ });
+ }
+ else {
+ const int work_group_size = get_work_group_size(stream->get_device());
+ const sycl::range<3> block_dims(1, 1, work_group_size);
+ /*
+ DPCT1049:19: The work-group size passed to the SYCL kernel may exceed
+ the limit. To get the device limit, query
+ info::device::max_work_group_size. Adjust the work-group size if needed.
+ */
+ stream->submit([&](sycl::handler& cgh) {
+ sycl::local_accessor<float, 1> s_sum_acc_ct1(sycl::range<1>(work_group_size / WARP_SIZE),
+ cgh);
+ cgh.parallel_for(
+ sycl::nd_range<3>(sycl::range<3>(1, 1, nrows) * block_dims,
+ block_dims),
+ [=](sycl::nd_item<3> item_ct1)
+ [[intel::reqd_sub_group_size(WARP_SIZE)]] {
+ rms_norm_f32(x, dst, ncols, eps, item_ct1,
+ s_sum_acc_ct1.get_pointer(), work_group_size);
+ });
+ });
+ }
+}
+
+void ggml_sycl_op_norm(ggml_backend_sycl_context& ctx, const ggml_tensor* src0, const ggml_tensor* src1,
+ ggml_tensor* dst, const float* src0_dd,
+ const float* src1_dd, float* dst_dd,
+ const queue_ptr& main_stream) {
+
+ GGML_ASSERT(src0->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+
+ const int64_t ne00 = src0->ne[0];
+ const int64_t nrows = ggml_nrows(src0);
+
+ float eps;
+ memcpy(&eps, dst->op_params, sizeof(float));
+
+ norm_f32_sycl(src0_dd, dst_dd, ne00, nrows, eps, main_stream);
+
+ (void)src1;
+ (void)dst;
+ (void)src1_dd;
+}
+
+void ggml_sycl_op_group_norm(ggml_backend_sycl_context& ctx, const ggml_tensor* src0,
+ const ggml_tensor* src1, ggml_tensor* dst,
+ const float* src0_dd, const float* src1_dd,
+ float* dst_dd,
+ const queue_ptr& main_stream) {
+
+ GGML_ASSERT(src0->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+
+ int num_groups = dst->op_params[0];
+ int group_size = src0->ne[0] * src0->ne[1] * ((src0->ne[2] + num_groups - 1) / num_groups);
+ group_norm_f32_sycl(src0_dd, dst_dd, num_groups, group_size, src0->ne[0] * src0->ne[1] * src0->ne[2], main_stream);
+
+ (void)src1;
+ (void)dst;
+ (void)src1_dd;
+}
+
+void ggml_sycl_op_rms_norm(ggml_backend_sycl_context& ctx, const ggml_tensor* src0,
+ const ggml_tensor* src1, ggml_tensor* dst,
+ const float* src0_dd, const float* src1_dd,
+ float* dst_dd,
+ const queue_ptr& main_stream) {
+
+ GGML_ASSERT(src0->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+
+ const int64_t ne00 = src0->ne[0];
+ const int64_t nrows = ggml_nrows(src0);
+
+ float eps;
+ memcpy(&eps, dst->op_params, sizeof(float));
+
+ rms_norm_f32_sycl(src0_dd, dst_dd, ne00, nrows, eps, main_stream);
+
+ (void)src1;
+ (void)dst;
+ (void)src1_dd;
+}
--- /dev/null
+//
+// MIT license
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: MIT
+//
+
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+
+#ifndef GGML_SYCL_NORM_HPP
+#define GGML_SYCL_NORM_HPP
+
+#include "common.hpp"
+
+void ggml_sycl_op_norm(ggml_backend_sycl_context& ctx, const ggml_tensor* src0, const ggml_tensor* src1,
+ ggml_tensor* dst, const float* src0_dd,
+ const float* src1_dd, float* dst_dd,
+ const queue_ptr& main_stream);
+
+void ggml_sycl_op_rms_norm(ggml_backend_sycl_context& ctx, const ggml_tensor* src0,
+ const ggml_tensor* src1, ggml_tensor* dst,
+ const float* src0_dd, const float* src1_dd,
+ float* dst_dd,
+ const queue_ptr& main_stream);
+
+void ggml_sycl_op_group_norm(ggml_backend_sycl_context& ctx, const ggml_tensor* src0,
+ const ggml_tensor* src1, ggml_tensor* dst,
+ const float* src0_dd, const float* src1_dd,
+ float* dst_dd,
+ const queue_ptr& main_stream);
+
+#endif // GGML_SYCL_NORM_HPP
#define GGML_SYCL_MAX_STREAMS 8
#define GGML_SYCL_MAX_BUFFERS 256
-#define WARP_SIZE 32
+#define WARP_SIZE GGML_SYCL_WARP_SIZE
#define MATRIX_ROW_PADDING 512 // last row of quant. matrices is a multiple of this to avoid out-of-bounds memory accesses
#define SYCL_GELU_BLOCK_SIZE 256