/*.op =*/ GGML_OP_NONE,
/*.is_param =*/ false,
/*.grad =*/ NULL,
- /*.src0 =*/ NULL,
- /*.src1 =*/ NULL,
- /*.opt =*/ { NULL },
+ /*.src =*/ { NULL },
/*.perf_runs =*/ 0,
/*.perf_cycles =*/ 0,
/*.perf_time_us =*/ 0,
result->op = GGML_OP_DUP;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = NULL;
+ result->src[0] = a;
+ result->src[1] = NULL;
return result;
}
result->op = GGML_OP_ADD;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = b;
+ result->src[0] = a;
+ result->src[1] = b;
return result;
}
result->op = GGML_OP_ADD1;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = b;
+ result->src[0] = a;
+ result->src[1] = b;
return result;
}
result->op = GGML_OP_ACC;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = b;
- result->opt[0] = c;
+ result->src[0] = a;
+ result->src[1] = b;
+ result->src[2] = c;
return result;
}
result->op = GGML_OP_SUB;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = b;
+ result->src[0] = a;
+ result->src[1] = b;
return result;
}
result->op = GGML_OP_MUL;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = b;
+ result->src[0] = a;
+ result->src[1] = b;
return result;
}
result->op = GGML_OP_DIV;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = b;
+ result->src[0] = a;
+ result->src[1] = b;
return result;
}
result->op = GGML_OP_SQR;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = NULL;
+ result->src[0] = a;
+ result->src[1] = NULL;
return result;
}
result->op = GGML_OP_SQRT;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = NULL;
+ result->src[0] = a;
+ result->src[1] = NULL;
return result;
}
result->op = GGML_OP_LOG;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = NULL;
+ result->src[0] = a;
+ result->src[1] = NULL;
return result;
}
result->op = GGML_OP_SUM;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = NULL;
+ result->src[0] = a;
+ result->src[1] = NULL;
return result;
}
result->op = GGML_OP_SUM_ROWS;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = NULL;
+ result->src[0] = a;
+ result->src[1] = NULL;
return result;
}
result->op = GGML_OP_MEAN;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = NULL;
+ result->src[0] = a;
+ result->src[1] = NULL;
return result;
}
result->op = GGML_OP_ARGMAX;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = NULL;
+ result->src[0] = a;
+ result->src[1] = NULL;
return result;
}
result->op = GGML_OP_REPEAT;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = b;
+ result->src[0] = a;
+ result->src[1] = b;
return result;
}
result->op = GGML_OP_REPEAT_BACK;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = b;
+ result->src[0] = a;
+ result->src[1] = b;
return result;
}
result->op = GGML_OP_ABS;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = NULL;
+ result->src[0] = a;
+ result->src[1] = NULL;
return result;
}
result->op = GGML_OP_SGN;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = NULL;
+ result->src[0] = a;
+ result->src[1] = NULL;
return result;
}
result->op = GGML_OP_NEG;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = NULL;
+ result->src[0] = a;
+ result->src[1] = NULL;
return result;
}
result->op = GGML_OP_STEP;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = NULL;
+ result->src[0] = a;
+ result->src[1] = NULL;
return result;
}
result->op = GGML_OP_TANH;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = NULL;
+ result->src[0] = a;
+ result->src[1] = NULL;
return result;
}
result->op = GGML_OP_ELU;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = NULL;
+ result->src[0] = a;
+ result->src[1] = NULL;
return result;
}
result->op = GGML_OP_RELU;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = NULL;
+ result->src[0] = a;
+ result->src[1] = NULL;
return result;
}
result->op = GGML_OP_GELU;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = NULL;
+ result->src[0] = a;
+ result->src[1] = NULL;
return result;
}
result->op = GGML_OP_GELU_QUICK;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = NULL;
+ result->src[0] = a;
+ result->src[1] = NULL;
return result;
}
result->op = GGML_OP_SILU;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = NULL;
+ result->src[0] = a;
+ result->src[1] = NULL;
return result;
}
result->op = GGML_OP_SILU_BACK;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = b;
+ result->src[0] = a;
+ result->src[1] = b;
return result;
}
result->op = GGML_OP_NORM;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = NULL; // TODO: maybe store epsilon here?
+ result->src[0] = a;
+ result->src[1] = NULL; // TODO: maybe store epsilon here?
return result;
}
result->op = GGML_OP_RMS_NORM;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = NULL; // TODO: maybe store epsilon here?
+ result->src[0] = a;
+ result->src[1] = NULL; // TODO: maybe store epsilon here?
return result;
}
result->op = GGML_OP_RMS_NORM_BACK;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = b;
+ result->src[0] = a;
+ result->src[1] = b;
return result;
}
result->op = GGML_OP_MUL_MAT;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = b;
+ result->src[0] = a;
+ result->src[1] = b;
return result;
}
result->op = GGML_OP_OUT_PROD;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = b;
+ result->src[0] = a;
+ result->src[1] = b;
return result;
}
result->op = GGML_OP_SCALE;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = b;
+ result->src[0] = a;
+ result->src[1] = b;
return result;
}
result->op = GGML_OP_SET;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = b;
- result->opt[0] = c;
+ result->src[0] = a;
+ result->src[1] = b;
+ result->src[2] = c;
return result;
}
result->op = GGML_OP_CPY;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = b;
+ result->src[0] = a;
+ result->src[1] = b;
return result;
}
result->op = GGML_OP_CONT;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = NULL;
+ result->src[0] = a;
+ result->src[1] = NULL;
return result;
}
result->op = GGML_OP_RESHAPE;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = NULL;
+ result->src[0] = a;
+ result->src[1] = NULL;
return result;
}
result->op = GGML_OP_RESHAPE;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = NULL;
+ result->src[0] = a;
+ result->src[1] = NULL;
return result;
}
result->op = GGML_OP_RESHAPE;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = NULL;
+ result->src[0] = a;
+ result->src[1] = NULL;
return result;
}
result->op = GGML_OP_RESHAPE;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = NULL;
+ result->src[0] = a;
+ result->src[1] = NULL;
return result;
}
result->op = GGML_OP_RESHAPE;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = NULL;
+ result->src[0] = a;
+ result->src[1] = NULL;
return result;
}
result->op = GGML_OP_VIEW;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = NULL;
- result->opt[0] = offs;
+ result->src[0] = a;
+ result->src[1] = NULL;
+ result->src[2] = offs;
return result;
}
result->op = GGML_OP_VIEW;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = NULL;
- result->opt[0] = offs;
+ result->src[0] = a;
+ result->src[1] = NULL;
+ result->src[2] = offs;
return result;
}
result->op = GGML_OP_VIEW;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = NULL;
- result->opt[0] = offs;
+ result->src[0] = a;
+ result->src[1] = NULL;
+ result->src[2] = offs;
return result;
}
result->op = GGML_OP_VIEW;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = NULL;
- result->opt[0] = offs;
+ result->src[0] = a;
+ result->src[1] = NULL;
+ result->src[2] = offs;
return result;
}
result->op = GGML_OP_PERMUTE;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = NULL;
+ result->src[0] = a;
+ result->src[1] = NULL;
if (is_node) {
ggml_scratch_save(ctx);
ggml_scratch_load(ctx);
- result->opt[0] = b;
+ result->src[2] = b;
}
return result;
result->op = GGML_OP_TRANSPOSE;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = NULL;
+ result->src[0] = a;
+ result->src[1] = NULL;
return result;
}
result->op = GGML_OP_GET_ROWS;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = b;
+ result->src[0] = a;
+ result->src[1] = b;
return result;
}
result->op = GGML_OP_GET_ROWS_BACK;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = b;
- result->opt[0] = c;
+ result->src[0] = a;
+ result->src[1] = b;
+ result->src[2] = c;
return result;
}
result->op = GGML_OP_DIAG;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = NULL;
+ result->src[0] = a;
+ result->src[1] = NULL;
return result;
}
result->op = GGML_OP_DIAG_MASK_INF;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = b;
+ result->src[0] = a;
+ result->src[1] = b;
return result;
}
result->op = GGML_OP_DIAG_MASK_ZERO;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = b;
+ result->src[0] = a;
+ result->src[1] = b;
return result;
}
result->op = GGML_OP_SOFT_MAX;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = NULL;
+ result->src[0] = a;
+ result->src[1] = NULL;
return result;
}
result->op = GGML_OP_SOFT_MAX_BACK;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = b;
+ result->src[0] = a;
+ result->src[1] = b;
return result;
}
result->op = GGML_OP_ROPE;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = b;
+ result->src[0] = a;
+ result->src[1] = b;
return result;
}
result->op = GGML_OP_ROPE_BACK;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = b;
+ result->src[0] = a;
+ result->src[1] = b;
return result;
}
result->op = GGML_OP_ALIBI;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = b;
+ result->src[0] = a;
+ result->src[1] = b;
return result;
}
result->op = GGML_OP_CLAMP;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = b;
+ result->src[0] = a;
+ result->src[1] = b;
return result;
}
result->op = GGML_OP_CONV_1D;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = b;
- result->opt[0] = c;
+ result->src[0] = a;
+ result->src[1] = b;
+ result->src[2] = c;
return result;
}
result->op = GGML_OP_CONV_2D;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = b;
- result->opt[0] = c;
+ result->src[0] = a;
+ result->src[1] = b;
+ result->src[2] = c;
return result;
result->op = GGML_OP_FLASH_ATTN;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = q;
- result->src1 = k;
- result->opt[0] = v;
- result->opt[1] = ggml_new_i32(ctx, masked ? 1 : 0);
+ result->src[0] = q;
+ result->src[1] = k;
+ result->src[2] = v;
+ result->src[3] = ggml_new_i32(ctx, masked ? 1 : 0);
return result;
}
result->op = GGML_OP_FLASH_FF;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = b0;
- result->opt[0] = b1;
- result->opt[1] = c0;
- result->opt[2] = c1;
+ result->src[0] = a;
+ result->src[1] = b0;
+ result->src[2] = b1;
+ result->src[3] = c0;
+ result->src[4] = c1;
return result;
}
result->op = GGML_OP_FLASH_ATTN_BACK;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = q;
- result->src1 = k;
- result->opt[0] = v;
- result->opt[1] = d;
- result->opt[2] = ggml_new_i32(ctx, masked ? 1 : 0);
+ result->src[0] = q;
+ result->src[1] = k;
+ result->src[2] = v;
+ result->src[3] = d;
+ result->src[4] = ggml_new_i32(ctx, masked ? 1 : 0);
return result;
}
result->op = GGML_OP_WIN_PART;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = NULL;
- result->opt[0] = b;
+ result->src[0] = a;
+ result->src[1] = NULL;
+ result->src[2] = b;
return result;
}
result->op = GGML_OP_WIN_UNPART;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = NULL;
- result->opt[0] = b;
+ result->src[0] = a;
+ result->src[1] = NULL;
+ result->src[2] = b;
return result;
}
result->op = GGML_OP_MAP_UNARY;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->opt[0] = addr_tensor;
+ result->src[0] = a;
+ result->src[2] = addr_tensor;
return result;
}
result->op = GGML_OP_MAP_BINARY;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = b;
- result->opt[0] = addr_tensor;
+ result->src[0] = a;
+ result->src[1] = b;
+ result->src[2] = addr_tensor;
return result;
}
result->op = GGML_OP_MAP_CUSTOM1;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->opt[0] = addr_tensor;
+ result->src[0] = a;
+ result->src[2] = addr_tensor;
return result;
}
result->op = GGML_OP_MAP_CUSTOM2;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = b;
- result->opt[0] = addr_tensor;
+ result->src[0] = a;
+ result->src[1] = b;
+ result->src[2] = addr_tensor;
return result;
}
result->op = GGML_OP_MAP_CUSTOM3;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = b;
- result->opt[0] = addr_tensor;
- result->opt[1] = c;
+ result->src[0] = a;
+ result->src[1] = b;
+ result->src[2] = addr_tensor;
+ result->src[3] = c;
return result;
}
result->op = GGML_OP_CROSS_ENTROPY_LOSS;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
- result->src0 = a;
- result->src1 = b;
+ result->src[0] = a;
+ result->src[1] = b;
return result;
}
result->op = GGML_OP_CROSS_ENTROPY_LOSS_BACK;
result->grad = NULL;
- result->src0 = a;
- result->src1 = b;
- result->opt[0] = c;
+ result->src[0] = a;
+ result->src[1] = b;
+ result->src[2] = c;
return result;
}
if (skip_cpu) {
return;
}
- GGML_ASSERT(tensor->src0 == NULL || tensor->src0->backend == GGML_BACKEND_CPU);
- GGML_ASSERT(tensor->src1 == NULL || tensor->src1->backend == GGML_BACKEND_CPU);
+ GGML_ASSERT(tensor->src[0] == NULL || tensor->src[0]->backend == GGML_BACKEND_CPU);
+ GGML_ASSERT(tensor->src[1] == NULL || tensor->src[1]->backend == GGML_BACKEND_CPU);
#endif // GGML_USE_CUBLAS
switch (tensor->op) {
case GGML_OP_DUP:
{
- ggml_compute_forward_dup(params, tensor->src0, tensor);
+ ggml_compute_forward_dup(params, tensor->src[0], tensor);
} break;
case GGML_OP_ADD:
{
- ggml_compute_forward_add(params, tensor->src0, tensor->src1, tensor);
+ ggml_compute_forward_add(params, tensor->src[0], tensor->src[1], tensor);
} break;
case GGML_OP_ADD1:
{
- ggml_compute_forward_add1(params, tensor->src0, tensor->src1, tensor);
+ ggml_compute_forward_add1(params, tensor->src[0], tensor->src[1], tensor);
} break;
case GGML_OP_ACC:
{
- ggml_compute_forward_acc(params, tensor->src0, tensor->src1, tensor->opt[0], tensor);
+ ggml_compute_forward_acc(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor);
} break;
case GGML_OP_SUB:
{
- ggml_compute_forward_sub(params, tensor->src0, tensor->src1, tensor);
+ ggml_compute_forward_sub(params, tensor->src[0], tensor->src[1], tensor);
} break;
case GGML_OP_MUL:
{
- ggml_compute_forward_mul(params, tensor->src0, tensor->src1, tensor);
+ ggml_compute_forward_mul(params, tensor->src[0], tensor->src[1], tensor);
} break;
case GGML_OP_DIV:
{
- ggml_compute_forward_div(params, tensor->src0, tensor->src1, tensor);
+ ggml_compute_forward_div(params, tensor->src[0], tensor->src[1], tensor);
} break;
case GGML_OP_SQR:
{
- ggml_compute_forward_sqr(params, tensor->src0, tensor);
+ ggml_compute_forward_sqr(params, tensor->src[0], tensor);
} break;
case GGML_OP_SQRT:
{
- ggml_compute_forward_sqrt(params, tensor->src0, tensor);
+ ggml_compute_forward_sqrt(params, tensor->src[0], tensor);
} break;
case GGML_OP_LOG:
{
- ggml_compute_forward_log(params, tensor->src0, tensor);
+ ggml_compute_forward_log(params, tensor->src[0], tensor);
} break;
case GGML_OP_SUM:
{
- ggml_compute_forward_sum(params, tensor->src0, tensor);
+ ggml_compute_forward_sum(params, tensor->src[0], tensor);
} break;
case GGML_OP_SUM_ROWS:
{
- ggml_compute_forward_sum_rows(params, tensor->src0, tensor);
+ ggml_compute_forward_sum_rows(params, tensor->src[0], tensor);
} break;
case GGML_OP_MEAN:
{
- ggml_compute_forward_mean(params, tensor->src0, tensor);
+ ggml_compute_forward_mean(params, tensor->src[0], tensor);
} break;
case GGML_OP_ARGMAX:
{
- ggml_compute_forward_argmax(params, tensor->src0, tensor);
+ ggml_compute_forward_argmax(params, tensor->src[0], tensor);
} break;
case GGML_OP_REPEAT:
{
- ggml_compute_forward_repeat(params, tensor->src0, tensor);
+ ggml_compute_forward_repeat(params, tensor->src[0], tensor);
} break;
case GGML_OP_REPEAT_BACK:
{
- ggml_compute_forward_repeat_back(params, tensor->src0, tensor);
+ ggml_compute_forward_repeat_back(params, tensor->src[0], tensor);
} break;
case GGML_OP_ABS:
{
- ggml_compute_forward_abs(params, tensor->src0, tensor);
+ ggml_compute_forward_abs(params, tensor->src[0], tensor);
} break;
case GGML_OP_SGN:
{
- ggml_compute_forward_sgn(params, tensor->src0, tensor);
+ ggml_compute_forward_sgn(params, tensor->src[0], tensor);
} break;
case GGML_OP_NEG:
{
- ggml_compute_forward_neg(params, tensor->src0, tensor);
+ ggml_compute_forward_neg(params, tensor->src[0], tensor);
} break;
case GGML_OP_STEP:
{
- ggml_compute_forward_step(params, tensor->src0, tensor);
+ ggml_compute_forward_step(params, tensor->src[0], tensor);
} break;
case GGML_OP_TANH:
{
- ggml_compute_forward_tanh(params, tensor->src0, tensor);
+ ggml_compute_forward_tanh(params, tensor->src[0], tensor);
} break;
case GGML_OP_ELU:
{
- ggml_compute_forward_elu(params, tensor->src0, tensor);
+ ggml_compute_forward_elu(params, tensor->src[0], tensor);
} break;
case GGML_OP_RELU:
{
- ggml_compute_forward_relu(params, tensor->src0, tensor);
+ ggml_compute_forward_relu(params, tensor->src[0], tensor);
} break;
case GGML_OP_GELU:
{
- ggml_compute_forward_gelu(params, tensor->src0, tensor);
+ ggml_compute_forward_gelu(params, tensor->src[0], tensor);
} break;
case GGML_OP_GELU_QUICK:
{
- ggml_compute_forward_gelu_quick(params, tensor->src0, tensor);
+ ggml_compute_forward_gelu_quick(params, tensor->src[0], tensor);
} break;
case GGML_OP_SILU:
{
- ggml_compute_forward_silu(params, tensor->src0, tensor);
+ ggml_compute_forward_silu(params, tensor->src[0], tensor);
} break;
case GGML_OP_SILU_BACK:
{
- ggml_compute_forward_silu_back(params, tensor->src0, tensor->src1, tensor);
+ ggml_compute_forward_silu_back(params, tensor->src[0], tensor->src[1], tensor);
} break;
case GGML_OP_NORM:
{
- ggml_compute_forward_norm(params, tensor->src0, tensor);
+ ggml_compute_forward_norm(params, tensor->src[0], tensor);
} break;
case GGML_OP_RMS_NORM:
{
- ggml_compute_forward_rms_norm(params, tensor->src0, tensor);
+ ggml_compute_forward_rms_norm(params, tensor->src[0], tensor);
} break;
case GGML_OP_RMS_NORM_BACK:
{
- ggml_compute_forward_rms_norm_back(params, tensor->src0, tensor->src1, tensor);
+ ggml_compute_forward_rms_norm_back(params, tensor->src[0], tensor->src[1], tensor);
} break;
case GGML_OP_MUL_MAT:
{
- ggml_compute_forward_mul_mat(params, tensor->src0, tensor->src1, tensor);
+ ggml_compute_forward_mul_mat(params, tensor->src[0], tensor->src[1], tensor);
} break;
case GGML_OP_OUT_PROD:
{
- ggml_compute_forward_out_prod(params, tensor->src0, tensor->src1, tensor);
+ ggml_compute_forward_out_prod(params, tensor->src[0], tensor->src[1], tensor);
} break;
case GGML_OP_SCALE:
{
- ggml_compute_forward_scale(params, tensor->src0, tensor->src1, tensor);
+ ggml_compute_forward_scale(params, tensor->src[0], tensor->src[1], tensor);
} break;
case GGML_OP_SET:
{
- ggml_compute_forward_set(params, tensor->src0, tensor->src1, tensor->opt[0], tensor);
+ ggml_compute_forward_set(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor);
} break;
case GGML_OP_CPY:
{
- ggml_compute_forward_cpy(params, tensor->src0, tensor);
+ ggml_compute_forward_cpy(params, tensor->src[0], tensor);
} break;
case GGML_OP_CONT:
{
- ggml_compute_forward_cont(params, tensor->src0, tensor);
+ ggml_compute_forward_cont(params, tensor->src[0], tensor);
} break;
case GGML_OP_RESHAPE:
{
- ggml_compute_forward_reshape(params, tensor->src0, tensor);
+ ggml_compute_forward_reshape(params, tensor->src[0], tensor);
} break;
case GGML_OP_VIEW:
{
- ggml_compute_forward_view(params, tensor->src0);
+ ggml_compute_forward_view(params, tensor->src[0]);
} break;
case GGML_OP_PERMUTE:
{
- ggml_compute_forward_permute(params, tensor->src0);
+ ggml_compute_forward_permute(params, tensor->src[0]);
} break;
case GGML_OP_TRANSPOSE:
{
- ggml_compute_forward_transpose(params, tensor->src0);
+ ggml_compute_forward_transpose(params, tensor->src[0]);
} break;
case GGML_OP_GET_ROWS:
{
- ggml_compute_forward_get_rows(params, tensor->src0, tensor->src1, tensor);
+ ggml_compute_forward_get_rows(params, tensor->src[0], tensor->src[1], tensor);
} break;
case GGML_OP_GET_ROWS_BACK:
{
- ggml_compute_forward_get_rows_back(params, tensor->src0, tensor->src1, tensor->opt[0], tensor);
+ ggml_compute_forward_get_rows_back(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor);
} break;
case GGML_OP_DIAG:
{
- ggml_compute_forward_diag(params, tensor->src0, tensor);
+ ggml_compute_forward_diag(params, tensor->src[0], tensor);
} break;
case GGML_OP_DIAG_MASK_INF:
{
- ggml_compute_forward_diag_mask_inf(params, tensor->src0, tensor->src1, tensor);
+ ggml_compute_forward_diag_mask_inf(params, tensor->src[0], tensor->src[1], tensor);
} break;
case GGML_OP_DIAG_MASK_ZERO:
{
- ggml_compute_forward_diag_mask_zero(params, tensor->src0, tensor->src1, tensor);
+ ggml_compute_forward_diag_mask_zero(params, tensor->src[0], tensor->src[1], tensor);
} break;
case GGML_OP_SOFT_MAX:
{
- ggml_compute_forward_soft_max(params, tensor->src0, tensor);
+ ggml_compute_forward_soft_max(params, tensor->src[0], tensor);
} break;
case GGML_OP_SOFT_MAX_BACK:
{
- ggml_compute_forward_soft_max_back(params, tensor->src0, tensor->src1, tensor);
+ ggml_compute_forward_soft_max_back(params, tensor->src[0], tensor->src[1], tensor);
} break;
case GGML_OP_ROPE:
{
- ggml_compute_forward_rope(params, tensor->src0, tensor->src1, tensor);
+ ggml_compute_forward_rope(params, tensor->src[0], tensor->src[1], tensor);
} break;
case GGML_OP_ROPE_BACK:
{
- ggml_compute_forward_rope_back(params, tensor->src0, tensor->src1, tensor);
+ ggml_compute_forward_rope_back(params, tensor->src[0], tensor->src[1], tensor);
} break;
case GGML_OP_ALIBI:
{
- ggml_compute_forward_alibi(params, tensor->src0, tensor->src1, tensor);
+ ggml_compute_forward_alibi(params, tensor->src[0], tensor->src[1], tensor);
} break;
case GGML_OP_CLAMP:
{
- ggml_compute_forward_clamp(params, tensor->src0, tensor->src1, tensor);
+ ggml_compute_forward_clamp(params, tensor->src[0], tensor->src[1], tensor);
} break;
case GGML_OP_CONV_1D:
{
- ggml_compute_forward_conv_1d(params, tensor->src0, tensor->src1, tensor->opt[0], tensor);
+ ggml_compute_forward_conv_1d(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor);
} break;
case GGML_OP_CONV_2D:
{
- ggml_compute_forward_conv_2d(params, tensor->src0, tensor->src1, tensor->opt[0], tensor);
+ ggml_compute_forward_conv_2d(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor);
} break;
case GGML_OP_FLASH_ATTN:
{
- const int32_t t = ggml_get_i32_1d(tensor->opt[1], 0);
+ const int32_t t = ggml_get_i32_1d(tensor->src[3], 0);
GGML_ASSERT(t == 0 || t == 1);
const bool masked = t != 0;
- ggml_compute_forward_flash_attn(params, tensor->src0, tensor->src1, tensor->opt[0], masked, tensor);
+ ggml_compute_forward_flash_attn(params, tensor->src[0], tensor->src[1], tensor->src[2], masked, tensor);
} break;
case GGML_OP_FLASH_FF:
{
- ggml_compute_forward_flash_ff(params, tensor->src0, tensor->src1, tensor->opt[0], tensor->opt[1], tensor->opt[2], tensor);
+ ggml_compute_forward_flash_ff(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor->src[3], tensor->src[4], tensor);
} break;
case GGML_OP_FLASH_ATTN_BACK:
{
- int32_t t = ggml_get_i32_1d(tensor->opt[2], 0);
+ int32_t t = ggml_get_i32_1d(tensor->src[4], 0);
GGML_ASSERT(t == 0 || t == 1);
bool masked = t != 0;
- ggml_compute_forward_flash_attn_back(params, tensor->src0, tensor->src1, tensor->opt[0], tensor->opt[1], masked, tensor);
+ ggml_compute_forward_flash_attn_back(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor->src[3], masked, tensor);
} break;
case GGML_OP_WIN_PART:
{
- ggml_compute_forward_win_part(params, tensor->src0, tensor->opt[0], tensor);
+ ggml_compute_forward_win_part(params, tensor->src[0], tensor->src[2], tensor);
} break;
case GGML_OP_WIN_UNPART:
{
- ggml_compute_forward_win_unpart(params, tensor->src0, tensor->opt[0], tensor);
+ ggml_compute_forward_win_unpart(params, tensor->src[0], tensor->src[2], tensor);
} break;
case GGML_OP_MAP_UNARY:
{
- const ggml_unary_op_f32_t fun = *((ggml_unary_op_f32_t *)tensor->opt[0]->data);
- ggml_compute_forward_map_unary(params, tensor->src0, tensor, fun);
+ const ggml_unary_op_f32_t fun = *((ggml_unary_op_f32_t *)tensor->src[2]->data);
+ ggml_compute_forward_map_unary(params, tensor->src[0], tensor, fun);
}
break;
case GGML_OP_MAP_BINARY:
{
- const ggml_binary_op_f32_t fun = *((ggml_binary_op_f32_t *)tensor->opt[0]->data);
- ggml_compute_forward_map_binary(params, tensor->src0, tensor->src1, tensor, fun);
+ const ggml_binary_op_f32_t fun = *((ggml_binary_op_f32_t *)tensor->src[2]->data);
+ ggml_compute_forward_map_binary(params, tensor->src[0], tensor->src[1], tensor, fun);
}
break;
case GGML_OP_MAP_CUSTOM1:
{
- const ggml_custom1_op_f32_t fun = *((ggml_custom1_op_f32_t *)tensor->opt[0]->data);
- ggml_compute_forward_map_custom1(params, tensor->src0, tensor, fun);
+ const ggml_custom1_op_f32_t fun = *((ggml_custom1_op_f32_t *)tensor->src[2]->data);
+ ggml_compute_forward_map_custom1(params, tensor->src[0], tensor, fun);
}
break;
case GGML_OP_MAP_CUSTOM2:
{
- const ggml_custom2_op_f32_t fun = *((ggml_custom2_op_f32_t *)tensor->opt[0]->data);
- ggml_compute_forward_map_custom2(params, tensor->src0, tensor->src1, tensor, fun);
+ const ggml_custom2_op_f32_t fun = *((ggml_custom2_op_f32_t *)tensor->src[2]->data);
+ ggml_compute_forward_map_custom2(params, tensor->src[0], tensor->src[1], tensor, fun);
}
break;
case GGML_OP_MAP_CUSTOM3:
{
- const ggml_custom3_op_f32_t fun = *((ggml_custom3_op_f32_t *)tensor->opt[0]->data);
- ggml_compute_forward_map_custom3(params, tensor->src0, tensor->src1, tensor->opt[1], tensor, fun);
+ const ggml_custom3_op_f32_t fun = *((ggml_custom3_op_f32_t *)tensor->src[2]->data);
+ ggml_compute_forward_map_custom3(params, tensor->src[0], tensor->src[1], tensor->src[3], tensor, fun);
}
break;
case GGML_OP_CROSS_ENTROPY_LOSS:
{
- ggml_compute_forward_cross_entropy_loss(params, tensor->src0, tensor->src1, tensor);
+ ggml_compute_forward_cross_entropy_loss(params, tensor->src[0], tensor->src[1], tensor);
}
break;
case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
{
- ggml_compute_forward_cross_entropy_loss_back(params, tensor->src0, tensor->src1, tensor->opt[0], tensor);
+ ggml_compute_forward_cross_entropy_loss_back(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor);
}
break;
case GGML_OP_NONE:
////////////////////////////////////////////////////////////////////////////////
static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor * tensor, bool inplace) {
- struct ggml_tensor * src0 = tensor->src0;
- struct ggml_tensor * src1 = tensor->src1;
+ struct ggml_tensor * src0 = tensor->src[0];
+ struct ggml_tensor * src1 = tensor->src[1];
switch (tensor->op) {
case GGML_OP_DUP:
src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace);
}
if (src1->grad) {
- GGML_ASSERT(ggml_nelements(tensor->opt[0]) == 5);
- GGML_ASSERT(tensor->opt[0]->type == GGML_TYPE_I32);
- const size_t nb1 = (( int32_t * ) tensor->opt[0]->data)[0];
- const size_t nb2 = (( int32_t * ) tensor->opt[0]->data)[1];
- const size_t nb3 = (( int32_t * ) tensor->opt[0]->data)[2];
- const size_t offset = (( int32_t * ) tensor->opt[0]->data)[3];
+ GGML_ASSERT(ggml_nelements(tensor->src[2]) == 5);
+ GGML_ASSERT(tensor->src[2]->type == GGML_TYPE_I32);
+ const size_t nb1 = (( int32_t * ) tensor->src[2]->data)[0];
+ const size_t nb2 = (( int32_t * ) tensor->src[2]->data)[1];
+ const size_t nb3 = (( int32_t * ) tensor->src[2]->data)[2];
+ const size_t offset = (( int32_t * ) tensor->src[2]->data)[3];
struct ggml_tensor * tensor_grad_view = ggml_view_4d(ctx,
tensor->grad,
} break;
case GGML_OP_SET:
{
- GGML_ASSERT(ggml_nelements(tensor->opt[0]) == 5);
- GGML_ASSERT(tensor->opt[0]->type == GGML_TYPE_I32);
- const size_t nb1 = (( int32_t * ) tensor->opt[0]->data)[0];
- const size_t nb2 = (( int32_t * ) tensor->opt[0]->data)[1];
- const size_t nb3 = (( int32_t * ) tensor->opt[0]->data)[2];
- const size_t offset = (( int32_t * ) tensor->opt[0]->data)[3];
+ GGML_ASSERT(ggml_nelements(tensor->src[2]) == 5);
+ GGML_ASSERT(tensor->src[2]->type == GGML_TYPE_I32);
+ const size_t nb1 = (( int32_t * ) tensor->src[2]->data)[0];
+ const size_t nb2 = (( int32_t * ) tensor->src[2]->data)[1];
+ const size_t nb3 = (( int32_t * ) tensor->src[2]->data)[2];
+ const size_t offset = (( int32_t * ) tensor->src[2]->data)[3];
struct ggml_tensor * tensor_grad_view = NULL;
if (src0->grad) {
size_t offset;
- GGML_ASSERT(sizeof(offset) <= ggml_nbytes(tensor->opt[0]));
- memcpy(&offset, tensor->opt[0]->data, sizeof(offset));
+ GGML_ASSERT(sizeof(offset) <= ggml_nbytes(tensor->src[2]));
+ memcpy(&offset, tensor->src[2]->data, sizeof(offset));
size_t nb1 = tensor->nb[1];
size_t nb2 = tensor->nb[2];
{
// necessary for llama
if (src0->grad) {
- int32_t * axes = (int32_t *) tensor->opt[0]->data;
+ int32_t * axes = (int32_t *) tensor->src[2]->data;
int axis0 = axes[0] & 0x3;
int axis1 = axes[1] & 0x3;
int axis2 = axes[2] & 0x3;
case GGML_OP_FLASH_ATTN:
{
struct ggml_tensor * flash_grad = NULL;
- if (src0->grad || src1->grad || tensor->opt[0]->grad) {
- int32_t t = ggml_get_i32_1d(tensor->opt[1], 0);
+ if (src0->grad || src1->grad || tensor->src[2]->grad) {
+ int32_t t = ggml_get_i32_1d(tensor->src[3], 0);
GGML_ASSERT(t == 0 || t == 1);
bool masked = t != 0;
flash_grad =
ggml_flash_attn_back(ctx,
src0,
src1,
- tensor->opt[0],
+ tensor->src[2],
tensor->grad,
masked);
}
inplace);
}
- struct ggml_tensor * opt0 = tensor->opt[0];
+ struct ggml_tensor * opt0 = tensor->src[2];
if (opt0->grad) {
struct ggml_tensor * grad_v = NULL;
}
}
- if (node->src0) {
- ggml_visit_parents(cgraph, node->src0);
- }
-
- if (node->src1) {
- ggml_visit_parents(cgraph, node->src1);
- }
-
- for (int i = 0; i < GGML_MAX_OPT; ++i) {
- if (node->opt[i]) {
- ggml_visit_parents(cgraph, node->opt[i]);
+ for (int i = 0; i < GGML_MAX_SRC; ++i) {
+ if (node->src[i]) {
+ ggml_visit_parents(cgraph, node->src[i]);
}
}
size_t cur = 0;
- if (ggml_is_quantized(node->src0->type)) {
- cur = GGML_TYPE_SIZE[GGML_TYPE_F32] * node->src0->ne[0] * n_tasks;
+ if (ggml_is_quantized(node->src[0]->type)) {
+ cur = GGML_TYPE_SIZE[GGML_TYPE_F32] * node->src[0]->ne[0] * n_tasks;
}
work_size = MAX(work_size, cur);
size_t cur = 0;
- if (ggml_is_quantized(node->src0->type)) {
- cur = GGML_TYPE_SIZE[GGML_TYPE_F32] * node->src1->ne[0] * n_tasks;
+ if (ggml_is_quantized(node->src[0]->type)) {
+ cur = GGML_TYPE_SIZE[GGML_TYPE_F32] * node->src[1]->ne[0] * n_tasks;
}
work_size = MAX(work_size, cur);
n_tasks = n_threads;
// TODO: use different scheduling for different matrix sizes
- //const int nr0 = ggml_nrows(node->src0);
- //const int nr1 = ggml_nrows(node->src1);
+ //const int nr0 = ggml_nrows(node->src[0]);
+ //const int nr1 = ggml_nrows(node->src[1]);
//n_tasks = MIN(n_threads, MAX(1, nr0/128));
//printf("nr0 = %8d, nr1 = %8d, nr0*nr1 = %8d, n_tasks%d\n", nr0, nr1, nr0*nr1, n_tasks);
size_t cur = 0;
- const enum ggml_type vec_dot_type = type_traits[node->src0->type].vec_dot_type;
+ const enum ggml_type vec_dot_type = type_traits[node->src[0]->type].vec_dot_type;
#if defined(GGML_USE_CUBLAS)
- if (ggml_cuda_can_mul_mat(node->src0, node->src1, node)) {
+ if (ggml_cuda_can_mul_mat(node->src[0], node->src[1], node)) {
n_tasks = 1; // TODO: this actually is doing nothing
// the threads are still spinning
} else
#elif defined(GGML_USE_CLBLAST)
- if (ggml_cl_can_mul_mat(node->src0, node->src1, node)) {
+ if (ggml_cl_can_mul_mat(node->src[0], node->src[1], node)) {
n_tasks = 1; // TODO: this actually is doing nothing
// the threads are still spinning
- cur = ggml_cl_mul_mat_get_wsize(node->src0, node->src1, node);
+ cur = ggml_cl_mul_mat_get_wsize(node->src[0], node->src[1], node);
} else
#endif
#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
- if (ggml_compute_forward_mul_mat_use_blas(node->src0, node->src1, node)) {
+ if (ggml_compute_forward_mul_mat_use_blas(node->src[0], node->src[1], node)) {
n_tasks = 1; // TODO: this actually is doing nothing
// the threads are still spinning
- if (node->src0->type != GGML_TYPE_F32) {
+ if (node->src[0]->type != GGML_TYPE_F32) {
// here we need memory just for single 2D matrix from src0
- cur = GGML_TYPE_SIZE[GGML_TYPE_F32]*(node->src0->ne[0]*node->src0->ne[1]);
+ cur = GGML_TYPE_SIZE[GGML_TYPE_F32]*(node->src[0]->ne[0]*node->src[0]->ne[1]);
}
} else
#endif
- if (node->src1->type != vec_dot_type) {
- cur = GGML_TYPE_SIZE[vec_dot_type]*ggml_nelements(node->src1)/GGML_BLCK_SIZE[vec_dot_type];
+ if (node->src[1]->type != vec_dot_type) {
+ cur = GGML_TYPE_SIZE[vec_dot_type]*ggml_nelements(node->src[1])/GGML_BLCK_SIZE[vec_dot_type];
} else {
cur = 0;
}
{
n_tasks = n_threads;
- GGML_ASSERT(node->src0->ne[3] == 1);
- GGML_ASSERT(node->src1->ne[2] == 1);
- GGML_ASSERT(node->src1->ne[3] == 1);
+ GGML_ASSERT(node->src[0]->ne[3] == 1);
+ GGML_ASSERT(node->src[1]->ne[2] == 1);
+ GGML_ASSERT(node->src[1]->ne[3] == 1);
size_t cur = 0;
- const int nk = node->src0->ne[0];
+ const int nk = node->src[0]->ne[0];
- if (node->src0->type == GGML_TYPE_F16 &&
- node->src1->type == GGML_TYPE_F32) {
+ if (node->src[0]->type == GGML_TYPE_F16 &&
+ node->src[1]->type == GGML_TYPE_F32) {
cur = sizeof(ggml_fp16_t)*(
- nk*ggml_up32(node->src0->ne[1])*node->src0->ne[2] +
- ( 2*(nk/2) + node->src1->ne[0])*node->src1->ne[1]
+ nk*ggml_up32(node->src[0]->ne[1])*node->src[0]->ne[2] +
+ ( 2*(nk/2) + node->src[1]->ne[0])*node->src[1]->ne[1]
);
- } else if (node->src0->type == GGML_TYPE_F32 &&
- node->src1->type == GGML_TYPE_F32) {
+ } else if (node->src[0]->type == GGML_TYPE_F32 &&
+ node->src[1]->type == GGML_TYPE_F32) {
cur = sizeof(float)*(
- nk*ggml_up32(node->src0->ne[1])*node->src0->ne[2] +
- ( 2*(nk/2) + node->src1->ne[0])*node->src1->ne[1]
+ nk*ggml_up32(node->src[0]->ne[1])*node->src[0]->ne[2] +
+ ( 2*(nk/2) + node->src[1]->ne[0])*node->src[1]->ne[1]
);
} else {
GGML_ASSERT(false);
{
n_tasks = n_threads;
- GGML_ASSERT(node->src1->ne[3] == 1);
+ GGML_ASSERT(node->src[1]->ne[3] == 1);
- const int64_t ne00 = node->src0->ne[0]; // W
- const int64_t ne01 = node->src0->ne[1]; // H
- const int64_t ne02 = node->src0->ne[2]; // C
- const int64_t ne03 = node->src0->ne[3]; // N
+ const int64_t ne00 = node->src[0]->ne[0]; // W
+ const int64_t ne01 = node->src[0]->ne[1]; // H
+ const int64_t ne02 = node->src[0]->ne[2]; // C
+ const int64_t ne03 = node->src[0]->ne[3]; // N
- const int64_t ne10 = node->src1->ne[0]; // W
- const int64_t ne11 = node->src1->ne[1]; // H
- const int64_t ne12 = node->src1->ne[2]; // C
+ const int64_t ne10 = node->src[1]->ne[0]; // W
+ const int64_t ne11 = node->src[1]->ne[1]; // H
+ const int64_t ne12 = node->src[1]->ne[2]; // C
const int64_t nk = ne00*ne01;
size_t cur = 0;
- if (node->src0->type == GGML_TYPE_F16 &&
- node->src1->type == GGML_TYPE_F32) {
+ if (node->src[0]->type == GGML_TYPE_F16 &&
+ node->src[1]->type == GGML_TYPE_F32) {
cur = sizeof(ggml_fp16_t)*(ne10*ne11*ne12);
- } else if (node->src0->type == GGML_TYPE_F32 &&
- node->src1->type == GGML_TYPE_F32) {
+ } else if (node->src[0]->type == GGML_TYPE_F32 &&
+ node->src[1]->type == GGML_TYPE_F32) {
cur = sizeof(float)* (ne10*ne11*ne12);
} else {
GGML_ASSERT(false);
size_t cur = 0;
- const int64_t ne11 = ggml_up(node->src1->ne[1], GGML_SOFT_MAX_UNROLL);
+ const int64_t ne11 = ggml_up(node->src[1]->ne[1], GGML_SOFT_MAX_UNROLL);
- if (node->src1->type == GGML_TYPE_F32) {
+ if (node->src[1]->type == GGML_TYPE_F32) {
cur = sizeof(float)*ne11*n_tasks; // TODO: this can become (n_tasks-1)
cur += sizeof(float)*ne11*n_tasks; // this is overestimated by x2
}
- if (node->src1->type == GGML_TYPE_F16) {
+ if (node->src[1]->type == GGML_TYPE_F16) {
cur = sizeof(float)*ne11*n_tasks; // TODO: this can become (n_tasks-1)
cur += sizeof(float)*ne11*n_tasks; // this is overestimated by x2
}
size_t cur = 0;
- if (node->src1->type == GGML_TYPE_F32) {
- cur = sizeof(float)*node->src1->ne[1]*n_tasks; // TODO: this can become (n_tasks-1)
- cur += sizeof(float)*node->src1->ne[1]*n_tasks; // this is overestimated by x2
+ if (node->src[1]->type == GGML_TYPE_F32) {
+ cur = sizeof(float)*node->src[1]->ne[1]*n_tasks; // TODO: this can become (n_tasks-1)
+ cur += sizeof(float)*node->src[1]->ne[1]*n_tasks; // this is overestimated by x2
}
- if (node->src1->type == GGML_TYPE_F16) {
- cur = sizeof(float)*node->src1->ne[1]*n_tasks; // TODO: this can become (n_tasks-1)
- cur += sizeof(float)*node->src1->ne[1]*n_tasks; // this is overestimated by x2
+ if (node->src[1]->type == GGML_TYPE_F16) {
+ cur = sizeof(float)*node->src[1]->ne[1]*n_tasks; // TODO: this can become (n_tasks-1)
+ cur += sizeof(float)*node->src[1]->ne[1]*n_tasks; // this is overestimated by x2
}
work_size = MAX(work_size, cur);
size_t cur = 0;
- const int64_t D = node->src0->ne[0];
- const int64_t ne11 = ggml_up(node->src1->ne[1], GGML_SOFT_MAX_UNROLL);
+ const int64_t D = node->src[0]->ne[0];
+ const int64_t ne11 = ggml_up(node->src[1]->ne[1], GGML_SOFT_MAX_UNROLL);
const int64_t mxDn = MAX(D, ne11) * 2; // *2 because of S and SM in ggml_compute_forward_flash_attn_back
- if (node->src1->type == GGML_TYPE_F32) {
+ if (node->src[1]->type == GGML_TYPE_F32) {
cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1)
cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2
}
- if (node->src1->type == GGML_TYPE_F16) {
+ if (node->src[1]->type == GGML_TYPE_F16) {
cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1)
cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2
}
{
n_tasks = n_threads;
- size_t cur = ggml_type_size(node->type)*(n_tasks + node->src0->ne[0]*n_tasks);
+ size_t cur = ggml_type_size(node->type)*(n_tasks + node->src[0]->ne[0]*n_tasks);
work_size = MAX(work_size, cur);
} break;
{
n_tasks = n_threads;
- size_t cur = ggml_type_size(node->type)*node->src0->ne[0]*n_tasks;
+ size_t cur = ggml_type_size(node->type)*node->src[0]->ne[0]*n_tasks;
work_size = MAX(work_size, cur);
} break;
ggml_graph_export_leaf(cgraph->leafs[i], fout);
GGML_ASSERT(cgraph->leafs[i]->op == GGML_OP_NONE);
- GGML_ASSERT(cgraph->leafs[i]->src0 == NULL);
- GGML_ASSERT(cgraph->leafs[i]->src1 == NULL);
+ GGML_ASSERT(cgraph->leafs[i]->src[0] == NULL);
+ GGML_ASSERT(cgraph->leafs[i]->src[1] == NULL);
}
// header
for (int i = 0; i < cgraph->n_nodes; ++i) {
ggml_graph_export_node(cgraph->nodes[i], "DST", fout);
- if (cgraph->nodes[i]->src0) {
- ggml_graph_export_node(cgraph->nodes[i]->src0, "SRC0", fout);
- }
-
- if (cgraph->nodes[i]->src1) {
- ggml_graph_export_node(cgraph->nodes[i]->src1, "SRC1", fout);
- }
-
- for (int j = 0; j < GGML_MAX_OPT; ++j) {
- if (cgraph->nodes[i]->opt[j]) {
- ggml_graph_export_node(cgraph->nodes[i]->opt[j], "OPT", fout);
+ for (int j = 0; j < GGML_MAX_SRC; ++j) {
+ if (cgraph->nodes[i]->src[j]) {
+ ggml_graph_export_node(cgraph->nodes[i]->src[j], "SRC", fout);
}
}
// output the op arguments
{
- struct ggml_tensor * args[2 + GGML_MAX_OPT] = { NULL };
-
- args[0] = tensor->src0;
- args[1] = tensor->src1;
+ struct ggml_tensor * args[GGML_MAX_SRC] = { NULL };
- for (int j = 0; j < GGML_MAX_OPT; ++j) {
- args[2 + j] = tensor->opt[j];
+ for (int j = 0; j < GGML_MAX_SRC; ++j) {
+ args[j] = tensor->src[j];
}
- for (int j = 0; j < 2 + GGML_MAX_OPT; ++j) {
+ for (int j = 0; j < GGML_MAX_SRC; ++j) {
if (args[j]) {
int32_t idx = -1;
const char * ptr_name = ptr; ptr += GGML_MAX_NAME;
- const int32_t * ptr_arg_idx = (const int32_t *) ptr; ptr += (2 + GGML_MAX_OPT)*sizeof(int32_t);
+ const int32_t * ptr_arg_idx = (const int32_t *) ptr; ptr += GGML_MAX_SRC*sizeof(int32_t);
- struct ggml_tensor * args[2 + GGML_MAX_OPT] = { NULL };
+ struct ggml_tensor * args[GGML_MAX_SRC] = { NULL };
// parse args
- for (int j = 0; j < 2 + GGML_MAX_OPT; ++j) {
+ for (int j = 0; j < GGML_MAX_SRC; ++j) {
const int32_t arg_idx = ptr_arg_idx[j];
if (arg_idx == -1) {
tensor->nb[j] = nb[j];
}
- tensor->src0 = args[0];
- tensor->src1 = args[1];
-
- for (int j = 0; j < GGML_MAX_OPT; ++j) {
- tensor->opt[j] = args[2 + j];
+ for (int j = 0; j < GGML_MAX_SRC; ++j) {
+ tensor->src[j] = args[j];
}
result.nodes[i] = tensor;
for (int i = 0; i < gb->n_nodes; i++) {
struct ggml_tensor * node = gb->nodes[i];
- if (node->src0) {
- ggml_graph_dump_dot_node_edge(fp, gb, node, node->src0, "x");
- }
-
- if (node->src1) {
- ggml_graph_dump_dot_node_edge(fp, gb, node, node->src1, "y");
- }
-
- for (int j = 0; j < GGML_MAX_OPT; j++) {
- if (node->opt[j]) {
+ for (int j = 0; j < GGML_MAX_SRC; j++) {
+ if (node->src[j]) {
char label[16];
- snprintf(label, sizeof(label), "opt %d", j);
- ggml_graph_dump_dot_node_edge(fp, gb, node, node->opt[j], label);
+ snprintf(label, sizeof(label), "src %d", j);
+ ggml_graph_dump_dot_node_edge(fp, gb, node, node->src[j], label);
}
}
}
for (int i = 0; i < gb->n_leafs; i++) {
struct ggml_tensor * node = gb->leafs[i];
- if (node->src0) {
- ggml_graph_dump_dot_leaf_edge(fp, node, node->src0, "x");
- }
-
- if (node->src1) {
- ggml_graph_dump_dot_leaf_edge(fp, node, node->src1, "y");
- }
-
- for (int j = 0; j < GGML_MAX_OPT; j++) {
- if (node->opt[j]) {
+ for (int j = 0; j < GGML_MAX_SRC; j++) {
+ if (node->src[j]) {
char label[16];
- snprintf(label, sizeof(label), "opt %d", j);
- ggml_graph_dump_dot_leaf_edge(fp, node, node->opt[j], label);
+ snprintf(label, sizeof(label), "src %d", j);
+ ggml_graph_dump_dot_leaf_edge(fp, node, node->src[j], label);
}
}
}