return wtype;
}
+size_t ggml_tensor_overhead(void) {
+ return GGML_OBJECT_SIZE + GGML_TENSOR_SIZE + 16;
+}
+
static inline bool ggml_is_transposed(const struct ggml_tensor * tensor) {
return tensor->nb[0] > tensor->nb[1];
}
}
struct ggml_tensor * ggml_get_tensor_by_name(struct ggml_cgraph * cgraph, const char * name) {
+ for (int i = 0; i < cgraph->n_leafs; i++) {
+ struct ggml_tensor * leaf = cgraph->leafs[i];
+
+ if (strcmp(leaf->name, name) == 0) {
+ return leaf;
+ }
+ }
+
for (int i = 0; i < cgraph->n_nodes; i++) {
struct ggml_tensor * node = cgraph->nodes[i];
static const size_t GGML_TENSOR_SIZE = sizeof(struct ggml_tensor);
- // use this to compute the memory overhead of a tensor
- static const size_t GGML_TENSOR_OVERHEAD = (GGML_OBJECT_SIZE + GGML_TENSOR_SIZE + 16);
-
// computation graph
struct ggml_cgraph {
int n_nodes;
// TODO: temporary until model loading of ggml examples is refactored
GGML_API enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype);
+ // use this to compute the memory overhead of a tensor
+ GGML_API size_t ggml_tensor_overhead(void);
+
// main
GGML_API struct ggml_context * ggml_init(struct ggml_init_params params);