GGML_UNARY_OP_EXP,
GGML_UNARY_OP_GELU_ERF,
GGML_UNARY_OP_XIELU,
+ GGML_UNARY_OP_FLOOR,
+ GGML_UNARY_OP_CEIL,
+ GGML_UNARY_OP_ROUND,
+ GGML_UNARY_OP_TRUNC,
GGML_UNARY_OP_COUNT,
};
struct ggml_context * ctx,
struct ggml_tensor * a);
+ GGML_API struct ggml_tensor * ggml_floor(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a);
+
+ GGML_API struct ggml_tensor * ggml_floor_inplace(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a);
+
+ GGML_API struct ggml_tensor * ggml_ceil(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a);
+
+ GGML_API struct ggml_tensor * ggml_ceil_inplace(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a);
+
+ GGML_API struct ggml_tensor * ggml_round(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a);
+
+ GGML_API struct ggml_tensor * ggml_round_inplace(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a);
+
+ /**
+ * Truncates the fractional part of each element in the tensor (towards zero).
+ * For example: trunc(3.7) = 3.0, trunc(-2.9) = -2.0
+ * Similar to std::trunc in C/C++.
+ */
+
+ GGML_API struct ggml_tensor * ggml_trunc(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a);
+
+ GGML_API struct ggml_tensor * ggml_trunc_inplace(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a);
+
+
+
// xIELU activation function
// x = x * (c_a(alpha_n) + c_b(alpha_p, beta) * sigmoid(beta * x)) + eps * (x > 0)
// where c_a = softplus and c_b(a, b) = softplus(a) + b are constraining functions
case GGML_UNARY_OP_HARDSWISH:
case GGML_UNARY_OP_HARDSIGMOID:
case GGML_UNARY_OP_EXP:
+ case GGML_UNARY_OP_FLOOR:
+ case GGML_UNARY_OP_CEIL:
+ case GGML_UNARY_OP_ROUND:
+ case GGML_UNARY_OP_TRUNC:
{
n_tasks = 1;
} break;
{
ggml_compute_forward_exp(params, dst);
} break;
+ case GGML_UNARY_OP_FLOOR:
+ {
+ ggml_compute_forward_floor(params, dst);
+ } break;
+ case GGML_UNARY_OP_CEIL:
+ {
+ ggml_compute_forward_ceil(params, dst);
+ } break;
+ case GGML_UNARY_OP_ROUND:
+ {
+ ggml_compute_forward_round(params, dst);
+ } break;
+ case GGML_UNARY_OP_TRUNC:
+ {
+ ggml_compute_forward_trunc(params, dst);
+ } break;
case GGML_UNARY_OP_XIELU:
{
ggml_compute_forward_xielu(params, dst);
return logf(x);
}
+static inline float op_floor(float x) {
+ return floorf(x);
+}
+
+static inline float op_ceil(float x) {
+ return ceilf(x);
+}
+
+static inline float op_round(float x) {
+ return roundf(x);
+}
+
+static inline float op_trunc(float x) {
+ return truncf(x);
+}
+
template <float (*op)(float), typename src0_t, typename dst_t>
static inline void vec_unary_op(int64_t n, dst_t * y, const src0_t * x) {
constexpr auto src0_to_f32 = type_conversion_table<src0_t>::to_f32;
unary_op<op_log>(params, dst);
}
+void ggml_compute_forward_floor(const ggml_compute_params * params, ggml_tensor * dst) {
+ unary_op<op_floor>(params, dst);
+}
+
+void ggml_compute_forward_ceil(const ggml_compute_params * params, ggml_tensor * dst) {
+ unary_op<op_ceil>(params, dst);
+}
+
+void ggml_compute_forward_round(const ggml_compute_params * params, ggml_tensor * dst) {
+ unary_op<op_round>(params, dst);
+}
+
+void ggml_compute_forward_trunc(const ggml_compute_params * params, ggml_tensor * dst) {
+ unary_op<op_trunc>(params, dst);
+}
+
void ggml_compute_forward_xielu(const ggml_compute_params * params, ggml_tensor * dst) {
const float alpha_n = ggml_get_op_params_f32(dst, 1);
const float alpha_p = ggml_get_op_params_f32(dst, 2);
void ggml_compute_forward_sin(const struct ggml_compute_params * params, struct ggml_tensor * dst);
void ggml_compute_forward_cos(const struct ggml_compute_params * params, struct ggml_tensor * dst);
void ggml_compute_forward_log(const struct ggml_compute_params * params, struct ggml_tensor * dst);
+void ggml_compute_forward_floor(const struct ggml_compute_params * params, struct ggml_tensor * dst);
+void ggml_compute_forward_ceil(const struct ggml_compute_params * params, struct ggml_tensor * dst);
+void ggml_compute_forward_round(const struct ggml_compute_params * params, struct ggml_tensor * dst);
+void ggml_compute_forward_trunc(const struct ggml_compute_params * params, struct ggml_tensor * dst);
void ggml_compute_forward_xielu(const struct ggml_compute_params * params, struct ggml_tensor * dst);
#ifdef __cplusplus
"EXP",
"GELU_ERF",
"XIELU",
+ "FLOOR",
+ "CEIL",
+ "ROUND",
+ "TRUNC",
};
-static_assert(GGML_UNARY_OP_COUNT == 16, "GGML_UNARY_OP_COUNT != 16");
+static_assert(GGML_UNARY_OP_COUNT == 20, "GGML_UNARY_OP_COUNT != 20");
static const char * GGML_GLU_OP_NAME[GGML_GLU_OP_COUNT] = {
"REGLU",
return result;
}
+// ggml_floor
+
+struct ggml_tensor * ggml_floor(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a) {
+ return ggml_unary(ctx, a, GGML_UNARY_OP_FLOOR);
+}
+
+struct ggml_tensor * ggml_floor_inplace(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a) {
+ return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_FLOOR);
+}
+
+// ggml_ceil
+
+struct ggml_tensor * ggml_ceil(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a) {
+ return ggml_unary(ctx, a, GGML_UNARY_OP_CEIL);
+}
+
+struct ggml_tensor * ggml_ceil_inplace(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a) {
+ return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_CEIL);
+}
+
+//ggml_round
+
+struct ggml_tensor * ggml_round(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a) {
+ return ggml_unary(ctx, a, GGML_UNARY_OP_ROUND);
+}
+
+struct ggml_tensor * ggml_round_inplace(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a) {
+ return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_ROUND);
+}
+
+//ggml_trunc
+
+struct ggml_tensor * ggml_trunc(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a) {
+ return ggml_unary(ctx, a, GGML_UNARY_OP_TRUNC);
+}
+
+struct ggml_tensor * ggml_trunc_inplace(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a) {
+ return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_TRUNC);
+}
+
struct ggml_tensor * ggml_glu(
struct ggml_context * ctx,
struct ggml_tensor * a,