ggml_sycl_op_bin_bcast<bin_bcast_sycl<op_sub>>(ctx, dst->src[0], dst->src[1], dst);
}
+inline void ggml_sycl_op_count_equal(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ ggml_sycl_op_bin_bcast<bin_bcast_sycl<op_count_equal>>(ctx, dst->src[0], dst->src[1], dst);
+}
+
inline void ggml_sycl_op_mul(ggml_backend_sycl_context & ctx, ggml_tensor *dst) {
ggml_sycl_op_bin_bcast<bin_bcast_sycl<op_mul>>(ctx, dst->src[0], dst->src[1], dst);
ggml_sycl_op_sub(ctx, dst);
}
+void ggml_sycl_count_equal(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/2);
+ ggml_sycl_op_count_equal(ctx, dst);
+}
+
void ggml_sycl_mul(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/2);
ggml_sycl_op_mul(ctx, dst);
return a - b;
}
+static __dpct_inline__ float op_count_equal(const float a, const float b) {
+ return (a == b) ? 1.0f : 0.0f;
+}
+
+void ggml_sycl_count_equal(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
static __dpct_inline__ float op_mul(const float a, const float b) {
return a * b;
}
case GGML_OP_SUB:
ggml_sycl_sub(ctx, dst);
break;
+ case GGML_OP_COUNT_EQUAL:
+ ggml_sycl_count_equal(ctx, dst);
+ break;
case GGML_OP_ACC:
ggml_sycl_acc(ctx, dst);
break;
case GGML_OP_ADD:
case GGML_OP_ADD1:
case GGML_OP_SUB:
+ case GGML_OP_COUNT_EQUAL:
case GGML_OP_MUL:
case GGML_OP_DIV:
case GGML_OP_REPEAT: