vk_pipeline pipeline_hardsigmoid[2];
vk_pipeline pipeline_hardswish[2];
vk_pipeline pipeline_abs[2];
+ vk_pipeline pipeline_softplus[2];
+ vk_pipeline pipeline_step[2];
+ vk_pipeline pipeline_round[2];
+ vk_pipeline pipeline_ceil[2];
+ vk_pipeline pipeline_floor[2];
+ vk_pipeline pipeline_trunc[2];
+
+ vk_pipeline pipeline_add1_f16_f16;
+ vk_pipeline pipeline_add1_f16_f32;
+ vk_pipeline pipeline_add1_f32_f32;
+
+ vk_pipeline pipeline_arange_f32;
+
+ vk_pipeline pipeline_fill_f32;
vk_pipeline pipeline_geglu[2];
vk_pipeline pipeline_reglu[2];
CREATE_UNARY(hardsigmoid)
CREATE_UNARY(hardswish)
CREATE_UNARY(abs)
+ CREATE_UNARY(softplus)
+ CREATE_UNARY(step)
+ CREATE_UNARY(round)
+ CREATE_UNARY(ceil)
+ CREATE_UNARY(floor)
+ CREATE_UNARY(trunc)
#undef CREATE_UNARY
#define CREATE_UNARY_RTE(name) \
CREATE_UNARY_RTE(exp)
#undef CREATE_UNARY_RTE
+ ggml_vk_create_pipeline(device, device->pipeline_add1_f16_f16, "add1_f16_f16", add1_f16_f16_len, add1_f16_f16_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_add1_f16_f32, "add1_f16_f32", add1_f16_f32_len, add1_f16_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_add1_f32_f32, "add1_f32_f32", add1_f32_f32_len, add1_f32_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {}, 1);
+
+ ggml_vk_create_pipeline(device, device->pipeline_arange_f32, "arange_f32", arange_f32_len, arange_f32_data, "main", 1, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
+
+ ggml_vk_create_pipeline(device, device->pipeline_fill_f32, "fill_f32", fill_f32_len, fill_f32_data, "main", 1, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
+
#define CREATE_GLU(name) \
if (device->float_controls_rte_fp16) { \
ggml_vk_create_pipeline(device, device->pipeline_ ## name [0], #name "_f32_rte", name ## _f32_rte_len, name ## _f32_rte_data, "main", 3, sizeof(vk_op_glu_push_constants), {512, 1, 1}, {}, 1, true); \
return ctx->device->pipeline_hardswish[dst->type == GGML_TYPE_F16];
case GGML_UNARY_OP_ABS:
return ctx->device->pipeline_abs[dst->type == GGML_TYPE_F16];
+ case GGML_UNARY_OP_SOFTPLUS:
+ return ctx->device->pipeline_softplus[dst->type == GGML_TYPE_F16];
+ case GGML_UNARY_OP_STEP:
+ return ctx->device->pipeline_step[dst->type == GGML_TYPE_F16];
+ case GGML_UNARY_OP_ROUND:
+ return ctx->device->pipeline_round[dst->type == GGML_TYPE_F16];
+ case GGML_UNARY_OP_CEIL:
+ return ctx->device->pipeline_ceil[dst->type == GGML_TYPE_F16];
+ case GGML_UNARY_OP_FLOOR:
+ return ctx->device->pipeline_floor[dst->type == GGML_TYPE_F16];
+ case GGML_UNARY_OP_TRUNC:
+ return ctx->device->pipeline_trunc[dst->type == GGML_TYPE_F16];
default:
break;
}
case GGML_OP_CONV_TRANSPOSE_2D:
if (src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32 &&
ggml_is_contiguous(src0) && ggml_is_contiguous(src1) && ggml_is_contiguous(dst)) {
- std::array<uint32_t, 3> elements;
+ std::array<uint32_t, 3> elements{};
if (op == GGML_OP_CONV_2D) elements = ggml_vk_get_conv_elements(dst);
else if (op == GGML_OP_CONV_TRANSPOSE_2D) elements = ggml_vk_get_conv_transpose_2d_elements(dst);
vk_conv_shapes shape;
}
}
return nullptr;
+ case GGML_OP_ADD1:
+ if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) {
+ return ctx->device->pipeline_add1_f16_f16;
+ }
+ if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F16) {
+ return ctx->device->pipeline_add1_f16_f32;
+ }
+ if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
+ return ctx->device->pipeline_add1_f32_f32;
+ }
+ return nullptr;
+ case GGML_OP_ARANGE:
+ if (dst->type == GGML_TYPE_F32) {
+ return ctx->device->pipeline_arange_f32;
+ }
+ return nullptr;
+ case GGML_OP_FILL:
+ if (dst->type == GGML_TYPE_F32) {
+ return ctx->device->pipeline_fill_f32;
+ }
+ return nullptr;
default:
return nullptr;
}
case GGML_OP_SUB:
case GGML_OP_DIV:
case GGML_OP_MUL:
+ case GGML_OP_ADD1:
+ case GGML_OP_ARANGE:
+ case GGML_OP_FILL:
case GGML_OP_SCALE:
case GGML_OP_SQR:
case GGML_OP_SQRT:
ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_SQRT, vk_op_unary_push_constants_init(src0, dst));
}
+static void ggml_vk_add1(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
+ const uint32_t src0_type_size = ggml_type_size(src0->type);
+ const uint32_t src1_type_size = ggml_type_size(src1->type);
+ const uint32_t dst_type_size = ggml_type_size(dst->type);
+
+ ggml_vk_op_f32<vk_op_binary_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_ADD1, {
+ (uint32_t)ggml_nelements(src0),
+ (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
+ (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size,
+ (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
+ 0,
+ 0.0f, 0.0f, 0,
+ });
+}
+
+static void ggml_vk_arange(ggml_backend_vk_context * ctx, vk_context& subctx, ggml_tensor * dst) {
+ VK_LOG_DEBUG("ggml_vk_arange(dst=" << dst << ", ne=" << ggml_nelements(dst) << ")");
+
+ vk_op_push_constants pc = {
+ (uint32_t)ggml_nelements(dst),
+ 1,
+ ggml_get_op_params_f32(dst, 0),
+ ggml_get_op_params_f32(dst, 2),
+ };
+
+ vk_pipeline pipeline = ggml_vk_op_get_pipeline(ctx, nullptr, nullptr, nullptr, dst, GGML_OP_ARANGE);
+ GGML_ASSERT(pipeline != nullptr);
+
+ ggml_pipeline_request_descriptor_sets(ctx, pipeline, 1);
+ vk_subbuffer dst_buf = ggml_vk_tensor_subbuffer(ctx, dst, false);
+
+ std::array<uint32_t, 3> elements = { (uint32_t)ggml_nelements(dst), 1, 1 };
+
+ ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { dst_buf }, pc, elements);
+}
+
+static void ggml_vk_fill(ggml_backend_vk_context * ctx, vk_context& subctx, ggml_tensor * dst) {
+ VK_LOG_DEBUG("ggml_vk_fill(dst=" << dst << ", ne=" << ggml_nelements(dst) << ")");
+
+ vk_op_push_constants pc = {
+ (uint32_t)ggml_nelements(dst),
+ 1,
+ ggml_get_op_params_f32(dst, 0),
+ 0.0f,
+ };
+
+ vk_pipeline pipeline = ggml_vk_op_get_pipeline(ctx, nullptr, nullptr, nullptr, dst, GGML_OP_FILL);
+ GGML_ASSERT(pipeline != nullptr);
+
+ ggml_pipeline_request_descriptor_sets(ctx, pipeline, 1);
+ vk_subbuffer dst_buf = ggml_vk_tensor_subbuffer(ctx, dst, false);
+
+ std::array<uint32_t, 3> elements = { (uint32_t)ggml_nelements(dst), 1, 1 };
+
+ ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { dst_buf }, pc, elements);
+}
+
static void ggml_vk_sin(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_SIN, vk_op_unary_push_constants_init(src0, dst));
}
case GGML_UNARY_OP_HARDSIGMOID:
case GGML_UNARY_OP_HARDSWISH:
case GGML_UNARY_OP_ABS:
+ case GGML_UNARY_OP_SOFTPLUS:
+ case GGML_UNARY_OP_STEP:
+ case GGML_UNARY_OP_ROUND:
+ case GGML_UNARY_OP_CEIL:
+ case GGML_UNARY_OP_FLOOR:
+ case GGML_UNARY_OP_TRUNC:
break;
default:
return false;
case GGML_OP_SUB:
case GGML_OP_MUL:
case GGML_OP_DIV:
+ case GGML_OP_ADD1:
+ case GGML_OP_ARANGE:
+ case GGML_OP_FILL:
case GGML_OP_CONCAT:
case GGML_OP_UPSCALE:
case GGML_OP_SCALE:
case GGML_OP_UPSCALE:
ggml_vk_upscale(ctx, compute_ctx, src0, node);
+ break;
+ case GGML_OP_ADD1:
+ ggml_vk_add1(ctx, compute_ctx, src0, src1, node);
+
+ break;
+ case GGML_OP_ARANGE:
+ ggml_vk_arange(ctx, compute_ctx, node);
+
+ break;
+ case GGML_OP_FILL:
+ ggml_vk_fill(ctx, compute_ctx, node);
+
break;
case GGML_OP_SCALE:
ggml_vk_scale(ctx, compute_ctx, src0, node);
case GGML_UNARY_OP_HARDSIGMOID:
case GGML_UNARY_OP_HARDSWISH:
case GGML_UNARY_OP_ABS:
+ case GGML_UNARY_OP_SOFTPLUS:
+ case GGML_UNARY_OP_STEP:
+ case GGML_UNARY_OP_ROUND:
+ case GGML_UNARY_OP_CEIL:
+ case GGML_UNARY_OP_FLOOR:
+ case GGML_UNARY_OP_TRUNC:
ggml_vk_unary(ctx, compute_ctx, src0, node);
break;
default:
case GGML_OP_SUB:
case GGML_OP_MUL:
case GGML_OP_DIV:
+ case GGML_OP_ADD1:
+ case GGML_OP_ARANGE:
+ case GGML_OP_FILL:
case GGML_OP_ADD_ID:
case GGML_OP_CONCAT:
case GGML_OP_UPSCALE:
case GGML_UNARY_OP_HARDSIGMOID:
case GGML_UNARY_OP_HARDSWISH:
case GGML_UNARY_OP_ABS:
+ case GGML_UNARY_OP_SOFTPLUS:
+ case GGML_UNARY_OP_STEP:
+ case GGML_UNARY_OP_ROUND:
+ case GGML_UNARY_OP_CEIL:
+ case GGML_UNARY_OP_FLOOR:
+ case GGML_UNARY_OP_TRUNC:
buf = tensor->buffer;
break;
default:
case GGML_UNARY_OP_HARDSIGMOID:
case GGML_UNARY_OP_HARDSWISH:
case GGML_UNARY_OP_ABS:
+ case GGML_UNARY_OP_SOFTPLUS:
+ case GGML_UNARY_OP_STEP:
+ case GGML_UNARY_OP_ROUND:
+ case GGML_UNARY_OP_CEIL:
+ case GGML_UNARY_OP_FLOOR:
+ case GGML_UNARY_OP_TRUNC:
return ggml_is_contiguous(op->src[0]) &&
(op->src[0]->type == GGML_TYPE_F32 || op->src[0]->type == GGML_TYPE_F16) &&
(op->type == GGML_TYPE_F32 || op->type == GGML_TYPE_F16) &&
case GGML_OP_UPSCALE:
case GGML_OP_ACC:
case GGML_OP_CONCAT:
+ case GGML_OP_ADD1:
+ case GGML_OP_ARANGE:
+ case GGML_OP_FILL:
case GGML_OP_SCALE:
case GGML_OP_PAD:
case GGML_OP_ROLL:
} else if (tensor->op == GGML_OP_SCALE) {
const float * params = (const float *)tensor->op_params;
tensor_clone = ggml_scale_bias(ggml_ctx, src_clone[0], params[0], params[1]);
+ } else if (tensor->op == GGML_OP_ADD1) {
+ tensor_clone = ggml_add1(ggml_ctx, src_clone[0], src_clone[1]);
+ } else if (tensor->op == GGML_OP_ARANGE) {
+ const float start = ggml_get_op_params_f32(tensor, 0);
+ const float stop = ggml_get_op_params_f32(tensor, 1);
+ const float step = ggml_get_op_params_f32(tensor, 2);
+ tensor_clone = ggml_arange(ggml_ctx, start, stop, step);
+ } else if (tensor->op == GGML_OP_FILL) {
+ const float value = ggml_get_op_params_f32(tensor, 0);
+ tensor_clone = ggml_fill(ggml_ctx, tensor_clone, value);
} else if (tensor->op == GGML_OP_SQR) {
tensor_clone = ggml_sqr(ggml_ctx, src_clone[0]);
} else if (tensor->op == GGML_OP_SQRT) {
case GGML_UNARY_OP_ABS:
tensor_clone = ggml_abs(ggml_ctx, src_clone[0]);
break;
+ case GGML_UNARY_OP_SOFTPLUS:
+ tensor_clone = ggml_softplus(ggml_ctx, src_clone[0]);
+ break;
+ case GGML_UNARY_OP_STEP:
+ tensor_clone = ggml_step(ggml_ctx, src_clone[0]);
+ break;
+ case GGML_UNARY_OP_ROUND:
+ tensor_clone = ggml_round(ggml_ctx, src_clone[0]);
+ break;
+ case GGML_UNARY_OP_CEIL:
+ tensor_clone = ggml_ceil(ggml_ctx, src_clone[0]);
+ break;
+ case GGML_UNARY_OP_FLOOR:
+ tensor_clone = ggml_floor(ggml_ctx, src_clone[0]);
+ break;
+ case GGML_UNARY_OP_TRUNC:
+ tensor_clone = ggml_trunc(ggml_ctx, src_clone[0]);
+ break;
default:
std::cerr << "Missing vk_check_results OP: " << ggml_op_name(tensor->op) << std::endl;
GGML_ABORT("fatal error");
string_to_spv("abs_f16", "abs.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}});
string_to_spv("abs_f32", "abs.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
+ string_to_spv("softplus_f16", "softplus.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}});
+ string_to_spv("softplus_f32", "softplus.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
+
+ string_to_spv("add1_f16_f16", "add1.comp", {{"A_TYPE", "float16_t"}, {"B_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}, {"FLOAT_TYPE", "float"}});
+ string_to_spv("add1_f16_f32", "add1.comp", {{"A_TYPE", "float16_t"}, {"B_TYPE", "float"}, {"D_TYPE", "float16_t"}, {"FLOAT_TYPE", "float"}});
+ string_to_spv("add1_f32_f32", "add1.comp", {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});
+ string_to_spv("arange_f32", "arange.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});
+ string_to_spv("fill_f32", "fill.comp", {{"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});
+ string_to_spv("step_f16", "step.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}});
+ string_to_spv("step_f32", "step.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
+ string_to_spv("round_f16", "round.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}});
+ string_to_spv("round_f32", "round.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
+ string_to_spv("ceil_f16", "ceil.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}});
+ string_to_spv("ceil_f32", "ceil.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
+ string_to_spv("floor_f16", "floor.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}});
+ string_to_spv("floor_f32", "floor.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
+ string_to_spv("trunc_f16", "trunc.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}});
+ string_to_spv("trunc_f32", "trunc.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
+
for (auto rte : {false, true}) {
std::string suffix = rte ? "_rte" : "";
string_to_spv("geglu_f16" + suffix, "geglu.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}, {"RTE16", rte ? "1" : "0"}});