int n_threads;
void * work_data;
size_t work_size;
+
+ ggml_abort_callback abort_callback;
+ void * abort_callback_data;
};
GGML_CALL static const char * ggml_backend_cpu_name(ggml_backend_t backend) {
cpu_plan->cplan.work_data = malloc(cpu_plan->cplan.work_size);
}
+ cpu_plan->cplan.abort_callback = cpu_ctx->abort_callback;
+ cpu_plan->cplan.abort_callback_data = cpu_ctx->abort_callback_data;
+
return cpu_plan;
}
cpu_ctx->work_data = realloc(cpu_ctx->work_data, cplan.work_size);
cpu_ctx->work_size = cplan.work_size;
}
-
cplan.work_data = cpu_ctx->work_data;
+ cplan.abort_callback = cpu_ctx->abort_callback;
+ cplan.abort_callback_data = cpu_ctx->abort_callback_data;
+
ggml_graph_compute(cgraph, &cplan);
return true;
}
ggml_backend_t ggml_backend_cpu_init(void) {
struct ggml_backend_cpu_context * ctx = malloc(sizeof(struct ggml_backend_cpu_context));
- ctx->n_threads = GGML_DEFAULT_N_THREADS;
- ctx->work_data = NULL;
- ctx->work_size = 0;
+ ctx->n_threads = GGML_DEFAULT_N_THREADS;
+ ctx->work_data = NULL;
+ ctx->work_size = 0;
+ ctx->abort_callback = NULL;
+ ctx->abort_callback_data = NULL;
ggml_backend_t cpu_backend = malloc(sizeof(struct ggml_backend));
ctx->n_threads = n_threads;
}
+void ggml_backend_cpu_set_abort_callback(ggml_backend_t backend_cpu, ggml_abort_callback abort_callback, void * abort_callback_data) {
+ GGML_ASSERT(ggml_backend_is_cpu(backend_cpu));
+
+ struct ggml_backend_cpu_context * ctx = (struct ggml_backend_cpu_context *)backend_cpu->context;
+ ctx->abort_callback = abort_callback;
+ ctx->abort_callback_data = abort_callback_data;
+}
+
GGML_CALL ggml_backend_buffer_t ggml_backend_cpu_buffer_from_ptr(void * ptr, size_t size) {
return ggml_backend_buffer_init(ggml_backend_cpu_buffer_type(), cpu_backend_buffer_i_from_ptr, ptr, size);
}
GGML_API ggml_backend_t ggml_backend_cpu_init(void);
- GGML_API GGML_CALL bool ggml_backend_is_cpu (ggml_backend_t backend);
- GGML_API void ggml_backend_cpu_set_n_threads(ggml_backend_t backend_cpu, int n_threads);
+ GGML_API GGML_CALL bool ggml_backend_is_cpu (ggml_backend_t backend);
+ GGML_API void ggml_backend_cpu_set_n_threads (ggml_backend_t backend_cpu, int n_threads);
+ GGML_API void ggml_backend_cpu_set_abort_callback(ggml_backend_t backend_cpu, ggml_abort_callback abort_callback, void * abort_callback_data);
// Create a backend buffer from an existing pointer
GGML_API GGML_CALL ggml_backend_buffer_t ggml_backend_cpu_buffer_from_ptr(void * ptr, size_t size);
atomic_int node_n; // active graph node
atomic_int node_task; // active graph node task phase
- bool (*abort_callback)(void * data); // abort ggml_graph_compute when true
+ ggml_abort_callback abort_callback; // abort ggml_graph_compute when true
void * abort_callback_data;
};
static const size_t GGML_TENSOR_SIZE = sizeof(struct ggml_tensor);
+ // Abort callback
+ // If not NULL, called before ggml computation
+ // If it returns true, the computation is aborted
+ typedef bool (*ggml_abort_callback)(void * data);
+
// the compute plan that needs to be prepared for ggml_graph_compute()
// since https://github.com/ggerganov/ggml/issues/287
struct ggml_cplan {
int n_threads;
// abort ggml_graph_compute when true
- bool (*abort_callback)(void * data);
- void * abort_callback_data;
+ ggml_abort_callback abort_callback;
+ void * abort_callback_data;
};
enum ggml_cgraph_eval_order {