From: Georgi Gerganov Date: Tue, 30 May 2023 10:49:08 +0000 (+0300) Subject: ggml : fix ggml op conv_1d enum names X-Git-Tag: upstream/0.0.1642~1419 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=4078f637b078c5099da898d139b1c450258d869d;p=pkg%2Fggml%2Fsources%2Fggml ggml : fix ggml op conv_1d enum names --- diff --git a/include/ggml/ggml.h b/include/ggml/ggml.h index 9e3604bc..e0c50c29 100644 --- a/include/ggml/ggml.h +++ b/include/ggml/ggml.h @@ -316,8 +316,8 @@ extern "C" { GGML_OP_ROPE_BACK, GGML_OP_ALIBI, GGML_OP_CLAMP, - GGML_OP_CONV_1D_1S, - GGML_OP_CONV_1D_2S, + GGML_OP_CONV_1D_S1_PH, + GGML_OP_CONV_1D_S2_PH, GGML_OP_FLASH_ATTN, GGML_OP_FLASH_FF, diff --git a/src/ggml.c b/src/ggml.c index 0080c3b5..bff058cc 100644 --- a/src/ggml.c +++ b/src/ggml.c @@ -3545,8 +3545,8 @@ static const char * GGML_OP_NAME[GGML_OP_COUNT] = { "ROPE_BACK", "ALIBI", "CLAMP", - "CONV_1D_1S", - "CONV_1D_2S", + "CONV_1D_S1_PH", + "CONV_1D_S2_PH", "FLASH_ATTN", "FLASH_FF", @@ -6412,7 +6412,7 @@ struct ggml_tensor * ggml_conv_1d_s1_ph( const int64_t ne[4] = { b->ne[0], a->ne[2], 1, 1, }; struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne); - result->op = GGML_OP_CONV_1D_1S; + result->op = GGML_OP_CONV_1D_S1_PH; result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; result->src0 = a; result->src1 = b; @@ -6439,7 +6439,7 @@ struct ggml_tensor * ggml_conv_1d_s2_ph( const int64_t ne[4] = { b->ne[0]/2, a->ne[2], 1, 1, }; struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne); - result->op = GGML_OP_CONV_1D_2S; + result->op = GGML_OP_CONV_1D_S2_PH; result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; result->src0 = a; result->src1 = b; @@ -13062,11 +13062,11 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm { ggml_compute_forward_clamp(params, tensor->src0, tensor->src1, tensor); } break; - case GGML_OP_CONV_1D_1S: + case GGML_OP_CONV_1D_S1_PH: { ggml_compute_forward_conv_1d_s1_ph(params, tensor->src0, tensor->src1, tensor); } break; - case GGML_OP_CONV_1D_2S: + case GGML_OP_CONV_1D_S2_PH: { ggml_compute_forward_conv_1d_s2_ph(params, tensor->src0, tensor->src1, tensor); } break; @@ -13759,11 +13759,11 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor // noop } } break; - case GGML_OP_CONV_1D_1S: + case GGML_OP_CONV_1D_S1_PH: { GGML_ASSERT(false); // TODO: not implemented } break; - case GGML_OP_CONV_1D_2S: + case GGML_OP_CONV_1D_S2_PH: { GGML_ASSERT(false); // TODO: not implemented } break; @@ -14271,8 +14271,8 @@ void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph) { node->n_tasks = 1; //TODO } break; - case GGML_OP_CONV_1D_1S: - case GGML_OP_CONV_1D_2S: + case GGML_OP_CONV_1D_S1_PH: + case GGML_OP_CONV_1D_S2_PH: { node->n_tasks = n_threads;