}
}
- AT_PRINTF("block %d\n", best_fit_block);
-
if (best_fit_block == -1) {
// the last block is our last resort
struct free_block * block = &alloc->free_blocks[alloc->n_free_blocks - 1];
return;
}
}
+
struct free_block * block = &alloc->free_blocks[best_fit_block];
void * addr = block->addr;
block->addr = (char*)block->addr + size;
}
}
+ AT_PRINTF("block %d, addr %p\n", best_fit_block, addr);
+
tensor->data = addr;
tensor->buffer = alloc->buffer;
if (!alloc->measure) {
alloc->free_blocks[0].size = SIZE_MAX/2; // restrict maximum size of a measure allocator to half size_t max to avoid overflows
} else {
alloc->free_blocks[0].size = ggml_backend_buffer_get_size(alloc->buffer) - align_offset;
+ ggml_backend_buffer_reset(alloc->buffer);
}
}
return alloc;
}
-ggml_tallocr_t ggml_tallocr_new_measure_from_backend(struct ggml_backend * backend) {
+ggml_tallocr_t ggml_tallocr_new_measure_from_buft(struct ggml_backend_buffer_type * buft) {
// create a backend buffer to get the correct tensor allocation sizes
- ggml_backend_buffer_t buffer = ggml_backend_alloc_buffer(backend, 1);
+ ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(buft, 1);
// TODO: move alloc initialization to a common ggml_tallocr_new_impl function
ggml_tallocr_t alloc = ggml_tallocr_new_from_buffer(buffer);
return alloc;
}
-ggml_tallocr_t ggml_tallocr_new_from_backend(struct ggml_backend * backend, size_t size) {
- ggml_backend_buffer_t buffer = ggml_backend_alloc_buffer(backend, size);
+ggml_tallocr_t ggml_tallocr_new_measure_from_backend(struct ggml_backend * backend) {
+ return ggml_tallocr_new_measure_from_buft(ggml_backend_get_default_buffer_type(backend));
+}
+
+ggml_tallocr_t ggml_tallocr_new_from_buft(struct ggml_backend_buffer_type * buft, size_t size) {
+ // create a backend buffer to get the correct tensor allocation sizes
+ ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(buft, size);
ggml_tallocr_t alloc = ggml_tallocr_new_from_buffer(buffer);
alloc->buffer_owned = true;
return alloc;
}
+ggml_tallocr_t ggml_tallocr_new_from_backend(struct ggml_backend * backend, size_t size) {
+ return ggml_tallocr_new_from_buft(ggml_backend_get_default_buffer_type(backend), size);
+}
+
ggml_tallocr_t ggml_tallocr_new_from_buffer(struct ggml_backend_buffer * buffer) {
ggml_tallocr_t alloc = (ggml_tallocr_t)malloc(sizeof(struct ggml_tallocr));
if (nbytes == 0) {
// all the tensors in the context are already allocated
+#ifndef NDEBUG
+ fprintf(stderr, "%s: all tensors in the context are already allocated\n", __func__);
+#endif
return NULL;
}
ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(buft, nbytes);
+ if (buffer == NULL) {
+ // failed to allocate buffer
+#ifndef NDEBUG
+ fprintf(stderr, "%s: failed to allocate buffer\n", __func__);
+#endif
+ return NULL;
+ }
+
ggml_tallocr_t tallocr = ggml_tallocr_new_from_buffer(buffer);
for (struct ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
GGML_API ggml_tallocr_t ggml_tallocr_new(void * data, size_t size, size_t alignment);
GGML_API ggml_tallocr_t ggml_tallocr_new_measure(size_t alignment);
-GGML_API ggml_tallocr_t ggml_tallocr_new_from_buffer(struct ggml_backend_buffer * buffer);
+GGML_API ggml_tallocr_t ggml_tallocr_new_from_buft(struct ggml_backend_buffer_type * buft, size_t size);
GGML_API ggml_tallocr_t ggml_tallocr_new_from_backend(struct ggml_backend * backend, size_t size); // allocates an owned buffer
+GGML_API ggml_tallocr_t ggml_tallocr_new_from_buffer(struct ggml_backend_buffer * buffer);
+GGML_API ggml_tallocr_t ggml_tallocr_new_measure_from_buft(struct ggml_backend_buffer_type * buft);
GGML_API ggml_tallocr_t ggml_tallocr_new_measure_from_backend(struct ggml_backend * backend);
GGML_API struct ggml_backend_buffer * ggml_tallocr_get_buffer(ggml_tallocr_t talloc);
typedef void * ggml_backend_buffer_type_context_t;
struct ggml_backend_buffer_type_i {
+ const char * (*get_name) (ggml_backend_buffer_type_t buft);
ggml_backend_buffer_t (*alloc_buffer) (ggml_backend_buffer_type_t buft, size_t size);
size_t (*get_alignment) (ggml_backend_buffer_type_t buft); // tensor alignment
- size_t (*get_alloc_size) (ggml_backend_buffer_type_t buft, struct ggml_tensor * tensor); // data size needed to allocate the tensor, including padding
+ size_t (*get_alloc_size) (ggml_backend_buffer_type_t buft, const struct ggml_tensor * tensor); // data size needed to allocate the tensor, including padding
bool (*supports_backend)(ggml_backend_buffer_type_t buft, ggml_backend_t backend); // check if the buffer type is usable by the backend
// check if tensor data is in host memory
// should be equivalent to supports_backend(buft, ggml_backend_cpu_init())
typedef void * ggml_backend_buffer_context_t;
struct ggml_backend_buffer_i {
- void (*free_buffer) (ggml_backend_buffer_t buffer);
- //void (*reset) (ggml_backend_buffer_t buffer); // reset any internal state due to tensor initialization, such as tensor extras
- void * (*get_base) (ggml_backend_buffer_t buffer);
- void (*init_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor);
- void (*set_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
- void (*get_tensor) (ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
- // (optional) copy tensor between different buffer-type, allow for single-copy tranfers
- void (*cpy_tensor_from)(ggml_backend_buffer_t buffer, struct ggml_tensor * src, struct ggml_tensor * dst);
- void (*cpy_tensor_to) (ggml_backend_buffer_t buffer, struct ggml_tensor * src, struct ggml_tensor * dst);
- void (*clear) (ggml_backend_buffer_t buffer, uint8_t value);
+ const char * (*get_name) (ggml_backend_buffer_t buffer);
+ void (*free_buffer)(ggml_backend_buffer_t buffer);
+ void * (*get_base) (ggml_backend_buffer_t buffer);
+ void (*init_tensor)(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor);
+ void (*set_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
+ void (*get_tensor) (ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
+ bool (*cpy_tensor) (ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst); // dst is in the buffer, src may be in any buffer
+ void (*clear) (ggml_backend_buffer_t buffer, uint8_t value);
+ void (*reset) (ggml_backend_buffer_t buffer); // reset any internal state due to tensor initialization, such as tensor extras
};
struct ggml_backend_buffer {
ggml_backend_buffer_type_t buft;
ggml_backend_buffer_context_t context;
size_t size;
+ enum ggml_backend_buffer_usage usage;
};
ggml_backend_buffer_t ggml_backend_buffer_init(
ggml_backend_buffer_context_t context,
size_t size);
+ // do not use directly, use ggml_backend_tensor_copy instead
+ bool ggml_backend_buffer_copy_tensor(const struct ggml_tensor * src, struct ggml_tensor * dst);
//
// Backend
// buffer allocation
ggml_backend_buffer_type_t (*get_default_buffer_type)(ggml_backend_t backend);
- // (optional) asynchroneous tensor data access
+ // (optional) asynchronous tensor data access
void (*set_tensor_async)(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
void (*get_tensor_async)(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
+ bool (*cpy_tensor_async)(ggml_backend_t backend, const struct ggml_tensor * src, struct ggml_tensor * dst);
- // (optional) asynchroneous tensor copy
- void (*cpy_tensor_from_async)(ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst);
- void (*cpy_tensor_to_async) (ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst);
-
+ // (optional) complete all pending operations
void (*synchronize)(ggml_backend_t backend);
// compute graph with a plan
- ggml_backend_graph_plan_t (*graph_plan_create) (ggml_backend_t backend, struct ggml_cgraph * cgraph);
+ ggml_backend_graph_plan_t (*graph_plan_create) (ggml_backend_t backend, const struct ggml_cgraph * cgraph);
void (*graph_plan_free) (ggml_backend_t backend, ggml_backend_graph_plan_t plan);
void (*graph_plan_compute)(ggml_backend_t backend, ggml_backend_graph_plan_t plan);
- // compute graph without a plan
+ // compute graph without a plan (async)
bool (*graph_compute)(ggml_backend_t backend, struct ggml_cgraph * cgraph);
// check if the backend supports an operation
ggml_backend_context_t context;
};
-
//
// Backend registry
//
// backend buffer type
+const char * ggml_backend_buft_name(ggml_backend_buffer_type_t buft) {
+ return buft->iface.get_name(buft);
+}
+
ggml_backend_buffer_t ggml_backend_buft_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
return buft->iface.alloc_buffer(buft, size);
}
/* .buft = */ buft,
/* .context = */ context,
/* .size = */ size,
+ /* .usage = */ GGML_BACKEND_BUFFER_USAGE_ANY
};
return buffer;
}
+const char * ggml_backend_buffer_name(ggml_backend_buffer_t buffer) {
+ return buffer->iface.get_name(buffer);
+}
+
void ggml_backend_buffer_free(ggml_backend_buffer_t buffer) {
if (buffer == NULL) {
return;
}
size_t ggml_backend_buffer_get_alignment (ggml_backend_buffer_t buffer) {
- return ggml_backend_buft_get_alignment(ggml_backend_buffer_type(buffer));
+ return ggml_backend_buft_get_alignment(ggml_backend_buffer_get_type(buffer));
}
size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) {
- return ggml_backend_buft_get_alloc_size(ggml_backend_buffer_type(buffer), tensor);
+ return ggml_backend_buft_get_alloc_size(ggml_backend_buffer_get_type(buffer), tensor);
}
void ggml_backend_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
}
bool ggml_backend_buffer_is_host(ggml_backend_buffer_t buffer) {
- return ggml_backend_buft_is_host(ggml_backend_buffer_type(buffer));
+ return ggml_backend_buft_is_host(ggml_backend_buffer_get_type(buffer));
}
-ggml_backend_buffer_type_t ggml_backend_buffer_type(ggml_backend_buffer_t buffer) {
+void ggml_backend_buffer_set_usage(ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage) {
+ buffer->usage = usage;
+}
+
+ggml_backend_buffer_type_t ggml_backend_buffer_get_type(ggml_backend_buffer_t buffer) {
return buffer->buft;
}
+void ggml_backend_buffer_reset(ggml_backend_buffer_t buffer) {
+ if (buffer->iface.reset) {
+ buffer->iface.reset(buffer);
+ }
+}
+
+bool ggml_backend_buffer_copy_tensor(const struct ggml_tensor * src, struct ggml_tensor * dst) {
+ ggml_backend_buffer_t dst_buf = dst->view_src ? dst->view_src->buffer : dst->buffer;
+ if (dst_buf->iface.cpy_tensor) {
+ return src->buffer->iface.cpy_tensor(dst_buf, src, dst);
+ }
+ return false;
+}
+
// backend
const char * ggml_backend_name(ggml_backend_t backend) {
GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds");
- backend->iface.set_tensor_async(backend, tensor, data, offset, size);
+ if (backend->iface.set_tensor_async == NULL) {
+ ggml_backend_tensor_set(tensor, data, offset, size);
+ } else {
+ backend->iface.set_tensor_async(backend, tensor, data, offset, size);
+ }
}
void ggml_backend_tensor_get_async(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor read out of bounds");
- backend->iface.get_tensor_async(backend, tensor, data, offset, size);
+ if (backend->iface.get_tensor_async == NULL) {
+ ggml_backend_tensor_get(tensor, data, offset, size);
+ } else {
+ backend->iface.get_tensor_async(backend, tensor, data, offset, size);
+ }
}
void ggml_backend_tensor_set(struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
+ ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
+
GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
- GGML_ASSERT(tensor->buffer != NULL && "tensor buffer not set");
+ GGML_ASSERT(buf != NULL && "tensor buffer not set");
GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds");
- tensor->buffer->iface.set_tensor(tensor->buffer, tensor, data, offset, size);
+ tensor->buffer->iface.set_tensor(buf, tensor, data, offset, size);
}
void ggml_backend_tensor_get(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
+ ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
+
GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
GGML_ASSERT(tensor->buffer != NULL && "tensor buffer not set");
GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor read out of bounds");
- tensor->buffer->iface.get_tensor(tensor->buffer, tensor, data, offset, size);
+ tensor->buffer->iface.get_tensor(buf, tensor, data, offset, size);
}
void ggml_backend_synchronize(ggml_backend_t backend) {
void ggml_backend_graph_plan_compute(ggml_backend_t backend, ggml_backend_graph_plan_t plan) {
backend->iface.graph_plan_compute(backend, plan);
-
- // TODO: optional sync
- ggml_backend_synchronize(backend);
}
bool ggml_backend_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
- if (!backend->iface.graph_compute(backend, cgraph)) {
- return false;
- }
-
- // TODO: optional sync
- ggml_backend_synchronize(backend);
- return true;
+ return backend->iface.graph_compute(backend, cgraph);
}
bool ggml_backend_supports_op(ggml_backend_t backend, const struct ggml_tensor * op) {
}
void ggml_backend_tensor_copy(struct ggml_tensor * src, struct ggml_tensor * dst) {
- //printf("src: %s ne: [%d %d %d %d] nb: [%d %d %d %d]\n", src->name, (int)src->ne[0], (int)src->ne[1], (int)src->ne[2], (int)src->ne[3], (int)src->nb[0], (int)src->nb[1], (int)src->nb[2], (int)src->nb[3]);
- //printf("dst: %s ne: [%d %d %d %d] nb: [%d %d %d %d]\n", dst->name, (int)dst->ne[0], (int)dst->ne[1], (int)dst->ne[2], (int)dst->ne[3], (int)dst->nb[0], (int)dst->nb[1], (int)dst->nb[2], (int)dst->nb[3]);
GGML_ASSERT(ggml_are_same_layout(src, dst) && "cannot copy tensors with different layouts");
- // fprintf(stderr, "cpy tensor %s from %s to %s (%lu bytes)\n", src->name, ggml_backend_name(src->backend), ggml_backend_name(dst->backend), ggml_nbytes(src));
-
if (src == dst) {
return;
}
- // TODO: allow backends to support copy to/from same backend
-
- if (dst->buffer->iface.cpy_tensor_from != NULL) {
- dst->buffer->iface.cpy_tensor_from(dst->buffer, src, dst);
- } else if (src->buffer->iface.cpy_tensor_to != NULL) {
- src->buffer->iface.cpy_tensor_to(src->buffer, src, dst);
- } else {
- // shouldn't be hit when copying from/to CPU
- #ifndef NDEBUG
- fprintf(stderr, "ggml_backend_tensor_copy: neither cpy_tensor_from nor cpy_tensor_to "
- "are implemented for %s and %s, falling back to get/set\n", src->name, dst->name);
- #endif
+ if (ggml_backend_buffer_is_host(src->buffer)) {
+ ggml_backend_tensor_set(dst, src->data, 0, ggml_nbytes(src));
+ } else if (ggml_backend_buffer_is_host(dst->buffer)) {
+ ggml_backend_tensor_get(src, dst->data, 0, ggml_nbytes(src));
+ } else if (!ggml_backend_buffer_copy_tensor(src, dst)) {
+#ifndef NDEBUG
+ fprintf(stderr, "%s: warning: slow copy from %s to %s\n", __func__, ggml_backend_buffer_name(src->buffer), ggml_backend_buffer_name(dst->buffer));
+#endif
size_t nbytes = ggml_nbytes(src);
void * data = malloc(nbytes);
ggml_backend_tensor_get(src, data, 0, nbytes);
}
}
+void ggml_backend_tensor_copy_async(ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst) {
+ GGML_ASSERT(ggml_are_same_layout(src, dst) && "cannot copy tensors with different layouts");
+
+ if (src == dst) {
+ return;
+ }
+
+ if (ggml_backend_buft_supports_backend(src->buffer->buft, backend) && ggml_backend_buft_supports_backend(dst->buffer->buft, backend)) {
+ if (backend->iface.cpy_tensor_async != NULL) {
+ if (backend->iface.cpy_tensor_async(backend, src, dst)) {
+ return;
+ }
+ }
+ }
+
+ size_t nbytes = ggml_nbytes(src);
+ if (ggml_backend_buffer_is_host(src->buffer)) {
+ ggml_backend_tensor_set_async(backend, dst, src->data, 0, nbytes);
+ }
+ else {
+ ggml_backend_tensor_copy(src, dst);
+ }
+}
+
+
// backend registry
#define GGML_MAX_BACKENDS_REG 16
// backend CPU
+static const char * ggml_backend_cpu_buffer_name(ggml_backend_buffer_t buffer) {
+ return "CPU";
+
+ GGML_UNUSED(buffer);
+}
+
static void * ggml_backend_cpu_buffer_get_base(ggml_backend_buffer_t buffer) {
return (void *)buffer->context;
}
GGML_UNUSED(buffer);
}
-static void ggml_backend_cpu_buffer_cpy_tensor_from(ggml_backend_buffer_t buffer, struct ggml_tensor * src, struct ggml_tensor * dst) {
- ggml_backend_tensor_get(src, dst->data, 0, ggml_nbytes(src));
-
- GGML_UNUSED(buffer);
-}
-
-static void ggml_backend_cpu_buffer_cpy_tensor_to(ggml_backend_buffer_t buffer, struct ggml_tensor * src, struct ggml_tensor * dst) {
- ggml_backend_tensor_set(dst, src->data, 0, ggml_nbytes(src));
+static bool ggml_backend_cpu_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) {
+ if (ggml_backend_buffer_is_host(src->buffer)) {
+ memcpy(dst->data, src->data, ggml_nbytes(src));
+ return true;
+ }
+ return false;
GGML_UNUSED(buffer);
}
}
static struct ggml_backend_buffer_i cpu_backend_buffer_i = {
+ /* .get_name = */ ggml_backend_cpu_buffer_name,
/* .free_buffer = */ ggml_backend_cpu_buffer_free_buffer,
/* .get_base = */ ggml_backend_cpu_buffer_get_base,
/* .init_tensor = */ NULL, // no initialization required
/* .set_tensor = */ ggml_backend_cpu_buffer_set_tensor,
/* .get_tensor = */ ggml_backend_cpu_buffer_get_tensor,
- /* .cpy_tensor_from = */ ggml_backend_cpu_buffer_cpy_tensor_from,
- /* .cpy_tensor_to = */ ggml_backend_cpu_buffer_cpy_tensor_to,
+ /* .cpy_tensor = */ ggml_backend_cpu_buffer_cpy_tensor,
/* .clear = */ ggml_backend_cpu_buffer_clear,
+ /* .reset = */ NULL,
};
// for buffers from ptr, free is not called
static struct ggml_backend_buffer_i cpu_backend_buffer_i_from_ptr = {
+ /* .get_name = */ ggml_backend_cpu_buffer_name,
/* .free_buffer = */ NULL, // ptr is not owned by the buffer, so it does not need to be freed
/* .get_base = */ ggml_backend_cpu_buffer_get_base,
/* .init_tensor = */ NULL, // no initialization required
/* .set_tensor = */ ggml_backend_cpu_buffer_set_tensor,
/* .get_tensor = */ ggml_backend_cpu_buffer_get_tensor,
- /* .cpy_tensor_from = */ ggml_backend_cpu_buffer_cpy_tensor_from,
- /* .cpy_tensor_to = */ ggml_backend_cpu_buffer_cpy_tensor_to,
+ /* .cpy_tensor = */ ggml_backend_cpu_buffer_cpy_tensor,
/* .clear = */ ggml_backend_cpu_buffer_clear,
+ /* .reset = */ NULL,
};
static const size_t TENSOR_ALIGNMENT = 64; // should be enough for AVX 512
+static const char * ggml_backend_cpu_buffer_type_get_name(ggml_backend_buffer_type_t buft) {
+ return "CPU";
+
+ GGML_UNUSED(buft);
+}
+
static ggml_backend_buffer_t ggml_backend_cpu_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
size += TENSOR_ALIGNMENT; // malloc may return an address that is not aligned
void * data = malloc(size); // TODO: maybe use GGML_ALIGNED_MALLOC?
ggml_backend_buffer_type_t ggml_backend_cpu_buffer_type(void) {
static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type = {
/* .iface = */ {
+ /* .get_name = */ ggml_backend_cpu_buffer_type_get_name,
/* .alloc_buffer = */ ggml_backend_cpu_buffer_type_alloc_buffer,
/* .get_alignment = */ ggml_backend_cpu_buffer_type_get_alignment,
/* .get_alloc_size = */ NULL, // defaults to ggml_nbytes
#include <hbwmalloc.h>
+static const char * ggml_backend_cpu_hbm_buffer_type_get_name(ggml_backend_buffer_type_t buft) {
+ return "CPU_HBM";
+
+ GGML_UNUSED(buft);
+}
+
+static const char * ggml_backend_cpu_hbm_buffer_get_name(ggml_backend_buffer_t buf) {
+ return "CPU_HBM";
+
+ GGML_UNUSED(buf);
+}
+
static void ggml_backend_cpu_hbm_buffer_free_buffer(ggml_backend_buffer_t buffer) {
hbw_free(buffer->context);
}
return NULL;
}
- // FIXME: this is a hack to avoid having to implement a new buffer type
ggml_backend_buffer_t buffer = ggml_backend_cpu_buffer_from_ptr(ptr, size);
buffer->buft = buft;
+ buffer->iface.get_name = ggml_backend_cpu_hbm_buffer_get_name;
buffer->iface.free_buffer = ggml_backend_cpu_hbm_buffer_free_buffer;
return buffer;
}
-ggml_backend_buffer_type_t ggml_backend_cpu_hbm_buffer_type() {
+ggml_backend_buffer_type_t ggml_backend_cpu_hbm_buffer_type(void) {
static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type_hbm = {
/* .iface = */ {
+ /* .get_name = */ ggml_backend_cpu_hbm_buffer_type_get_name,
/* .alloc_buffer = */ ggml_backend_cpu_hbm_buffer_type_alloc_buffer,
/* .get_alignment = */ ggml_backend_cpu_buffer_type_get_alignment,
/* .get_alloc_size = */ NULL, // defaults to ggml_nbytes
struct ggml_cgraph cgraph;
};
-static ggml_backend_graph_plan_t ggml_backend_cpu_graph_plan_create(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
+static ggml_backend_graph_plan_t ggml_backend_cpu_graph_plan_create(ggml_backend_t backend, const struct ggml_cgraph * cgraph) {
struct ggml_backend_cpu_context * cpu_ctx = (struct ggml_backend_cpu_context *)backend->context;
struct ggml_backend_plan_cpu * cpu_plan = malloc(sizeof(struct ggml_backend_plan_cpu));
/* .get_default_buffer_type = */ ggml_backend_cpu_get_default_buffer_type,
/* .set_tensor_async = */ NULL,
/* .get_tensor_async = */ NULL,
- /* .cpy_tensor_from_async = */ NULL,
- /* .cpy_tensor_to_async = */ NULL,
+ /* .cpy_tensor_async = */ NULL,
/* .synchronize = */ NULL,
/* .graph_plan_create = */ ggml_backend_cpu_graph_plan_create,
/* .graph_plan_free = */ ggml_backend_cpu_graph_plan_free,
}
bool ggml_backend_is_cpu(ggml_backend_t backend) {
- return backend->iface.get_name == ggml_backend_cpu_name;
+ return backend && backend->iface.get_name == ggml_backend_cpu_name;
}
void ggml_backend_cpu_set_n_threads(ggml_backend_t backend_cpu, int n_threads) {
// scheduler
-#define GGML_MAX_BACKENDS 4
+#define GGML_MAX_BACKENDS 16
#define GGML_MAX_SPLITS 256
#define GGML_MAX_SPLIT_INPUTS 16
int i_end;
struct ggml_tensor * inputs[GGML_MAX_SPLIT_INPUTS];
int n_inputs;
+ // graph view of this split
struct ggml_cgraph graph;
};
struct ggml_backend_sched {
+ bool is_reset; // true if the scheduler has been reset since the last graph split
+
int n_backends;
ggml_backend_t backends[GGML_MAX_BACKENDS];
+ ggml_backend_buffer_type_t bufts[GGML_MAX_BACKENDS];
ggml_tallocr_t tallocs[GGML_MAX_BACKENDS];
ggml_gallocr_t galloc;
+ // hash keys of the nodes in the graph
struct ggml_hash_set hash_set;
- ggml_tallocr_t * node_talloc; // [hash_set.size]
- struct ggml_tensor * (* node_copies)[GGML_MAX_BACKENDS]; // [hash_set.size][GGML_MAX_BACKENDS]
+ // hash values (arrays of [hash_set.size])
+ ggml_tallocr_t * node_talloc; // tallocr assigned to each node (indirectly this is the backend)
+ struct ggml_tensor * (* node_copies)[GGML_MAX_BACKENDS]; // copies of each node for each destination backend
+ // copy of the graph with modified inputs
struct ggml_cgraph * graph;
+
struct ggml_backend_sched_split splits[GGML_MAX_SPLITS];
int n_splits;
return INT_MAX;
}
-static ggml_backend_t get_buffer_backend(ggml_backend_sched_t sched, ggml_backend_buffer_t buffer) {
+static ggml_tallocr_t sched_allocr_from_buffer(ggml_backend_sched_t sched, ggml_backend_buffer_t buffer) {
if (buffer == NULL) {
return NULL;
}
+
+ // check if this is already allocate in a allocr buffer (from user manual allocations)
+ for (int i = 0; i < sched->n_backends; i++) {
+ if (ggml_tallocr_get_buffer(sched->tallocs[i]) == buffer) {
+ return sched->tallocs[i];
+ }
+ }
+
// find highest prio backend that supports the buffer type
for (int i = 0; i < sched->n_backends; i++) {
if (ggml_backend_buft_supports_backend(buffer->buft, sched->backends[i])) {
- return sched->backends[i];
+ return sched->tallocs[i];
}
}
GGML_ASSERT(false && "tensor buffer type not supported by any backend");
if (allocr == NULL) {
return NULL;
}
- // find highest prio backend that supports the buffer type
for (int i = 0; i < sched->n_backends; i++) {
if (sched->tallocs[i] == allocr) {
return sched->backends[i];
}
#if 0
-static char causes[GGML_DEFAULT_GRAPH_SIZE*8 + GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS][128]; // debug, remove
+static char causes[GGML_DEFAULT_GRAPH_SIZE*16 + GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS][128]; // debug only
#define SET_CAUSE(node, ...) sprintf(causes[hash_id(node)], __VA_ARGS__)
#define GET_CAUSE(node) causes[hash_id(node)]
#else
#endif
// returns the backend that should be used for the node based on the current locations
-static ggml_backend_t sched_backend_from_cur(ggml_backend_sched_t sched, struct ggml_tensor * node) {
- // if the dst tensor is already allocated in a buffer, we must assume that it is critical to keep it there
- // ie. kv cache updates
- // note that this doesn't allow fallback to CPU. need to add output tensors to the splits to copy the data back to the original backend.
+static ggml_tallocr_t sched_allocr_from_cur(ggml_backend_sched_t sched, struct ggml_tensor * node) {
+ // assign pre-allocated nodes to their backend
// dst
- ggml_backend_t cur_backend = get_buffer_backend(sched, node->buffer);
- if (cur_backend != NULL) {
+ ggml_tallocr_t cur_allocr = sched_allocr_from_buffer(sched, node->buffer);
+ if (cur_allocr != NULL) {
SET_CAUSE(node, "1.dst");
- return cur_backend;
+ return cur_allocr;
}
-
// view_src
- if (node->view_src != NULL && get_buffer_backend(sched, node->view_src->buffer) != NULL) {
- SET_CAUSE(node, "1.vsrc");
- return get_buffer_backend(sched, node->view_src->buffer);
+ if (node->view_src != NULL) {
+ cur_allocr = sched_allocr_from_buffer(sched, node->view_src->buffer);
+ if (cur_allocr != NULL) {
+ SET_CAUSE(node, "1.vsrc");
+ return cur_allocr;
+ }
}
-
- // src
- int cur_prio = INT_MAX;
- size_t cur_size = 0;
-
+ // assign nodes that use weights to the backend of the weights
for (int i = 0; i < GGML_MAX_SRC; i++) {
const struct ggml_tensor * src = node->src[i];
if (src == NULL) {
break;
}
- ggml_backend_t src_backend = get_buffer_backend(sched, src->buffer);
- if (src_backend != NULL) {
- int src_prio = sched_backend_prio(sched, src_backend);
- size_t src_size = ggml_nbytes(src);
- if (src_prio < cur_prio && src_size >= cur_size) {
- cur_prio = src_prio;
- cur_size = src_size;
- cur_backend = src_backend;
- SET_CAUSE(node, "1.src%d", i);
- }
+ if (src->buffer != NULL && src->buffer->usage == GGML_BACKEND_BUFFER_USAGE_WEIGHTS) {
+ ggml_tallocr_t src_allocr = sched_allocr_from_buffer(sched, src->buffer);
+ // operations with weights are always run on the same backend as the weights
+ SET_CAUSE(node, "1.wgt%d", i);
+ return src_allocr;
}
}
- return cur_backend;
+
+ return NULL;
}
static char * fmt_size(size_t size) {
}
ggml_tallocr_t node_allocr = node_allocr(node);
ggml_backend_t node_backend = node_allocr ? get_allocr_backend(sched, node_allocr) : NULL; // FIXME:
- fprintf(stderr, "node #%3d (%10.10s): %20.20s (%4.4s) [%4.4s %8.8s]:", i, ggml_op_name(node->op), node->name,
+ fprintf(stderr, "node #%3d (%10.10s): %20.20s (%5.5s) [%5.5s %8.8s]:", i, ggml_op_name(node->op), node->name,
fmt_size(ggml_nbytes(node)), node_allocr ? ggml_backend_name(node_backend) : "NULL", GET_CAUSE(node));
for (int j = 0; j < GGML_MAX_SRC; j++) {
struct ggml_tensor * src = node->src[j];
}
ggml_tallocr_t src_allocr = node_allocr(src);
ggml_backend_t src_backend = src_allocr ? get_allocr_backend(sched, src_allocr) : NULL;
- fprintf(stderr, " %20.20s (%4.4s) [%4.4s %8.8s]", src->name,
+ fprintf(stderr, " %20.20s (%5.5s) [%5.5s %8.8s]", src->name,
fmt_size(ggml_nbytes(src)), src_backend ? ggml_backend_name(src_backend) : "NULL", GET_CAUSE(src));
}
fprintf(stderr, "\n");
return dup;
}
+
+//#define DEBUG_PASS1
+//#define DEBUG_PASS2
+//#define DEBUG_PASS3
+//#define DEBUG_PASS4
+
// assigns backends to ops and splits the graph into subgraphs that can be computed on the same backend
-// TODO: merge passes
static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
- // reset state
- size_t hash_size = sched->hash_set.size;
- memset(sched->hash_set.keys, 0, sizeof(sched->hash_set.keys[0]) * hash_size);
- memset(sched->node_talloc, 0, sizeof(sched->node_talloc[0]) * hash_size);
- memset(sched->node_copies, 0, sizeof(sched->node_copies[0]) * hash_size);
+ // reset splits
sched->n_splits = 0;
+ sched->is_reset = false;
struct ggml_init_params params = {
/* .mem_size = */ sizeof(sched->context_buffer),
/* .no_alloc = */ true
};
- if (sched->ctx != NULL) {
- ggml_free(sched->ctx);
- }
+ ggml_free(sched->ctx);
sched->ctx = ggml_init(params);
+ if (sched->ctx == NULL) {
+ fprintf(stderr, "%s: failed to initialize context\n", __func__);
+ GGML_ASSERT(false);
+ }
- // pass 1: assign backends to ops with allocated inputs
+ // pass 1: assign backends to ops with pre-allocated inputs
for (int i = 0; i < graph->n_leafs; i++) {
struct ggml_tensor * leaf = graph->leafs[i];
if (node_allocr(leaf) != NULL) {
// do not overwrite user assignments
continue;
}
- ggml_backend_t leaf_backend = get_buffer_backend(sched, leaf->buffer);
- if (leaf_backend == NULL && leaf->view_src != NULL) {
- leaf_backend = get_buffer_backend(sched, leaf->view_src->buffer);
- }
- if (leaf_backend != NULL) {
- node_allocr(leaf) = ggml_backend_sched_get_tallocr(sched, leaf_backend);
- }
+ node_allocr(leaf) = sched_allocr_from_cur(sched, leaf);
}
for (int i = 0; i < graph->n_nodes; i++) {
// do not overwrite user assignments
continue;
}
- ggml_backend_t node_backend = sched_backend_from_cur(sched, node);
- if (node_backend != NULL) {
- node_allocr(node) = ggml_backend_sched_get_tallocr(sched, node_backend);
+ node_allocr(node) = sched_allocr_from_cur(sched, node);
+ // src
+ for (int j = 0; j < GGML_MAX_SRC; j++) {
+ struct ggml_tensor * src = node->src[j];
+ if (src == NULL) {
+ break;
+ }
+ if (node_allocr(src) == NULL) {
+ node_allocr(src) = sched_allocr_from_cur(sched, src);
+ }
}
}
- //printf("PASS 1 ASSIGNMENTS\n"); sched_print_assignments(sched, graph);
+#ifdef DEBUG_PASS1
+ fprintf(stderr, "PASS 1 ASSIGNMENTS\n"); sched_print_assignments(sched, graph);
+#endif
- // pass 2: assign backends to ops from current assignments
- // TODO:
- // - reuse sched_backend_from_cur
- for (int i = 0; i < graph->n_nodes; i++) {
- struct ggml_tensor * node = graph->nodes[i];
- ggml_tallocr_t node_allocr = node_allocr(node);
- if (node_allocr == NULL) {
- int cur_prio = INT_MAX;
- size_t cur_size = 0;
- for (int j = 0; j < GGML_MAX_SRC; j++) {
- struct ggml_tensor * src = node->src[j];
- if (src == NULL) {
- break;
+ // pass 2: expand current backend assignments
+ // assign the same backend to adjacent nodes
+ // expand gpu backends (i.e. non last prio) up and down, ignoring cpu (the lowest priority backend)
+ // thus, cpu will never be used unless weights are on cpu, or there are no gpu ops between cpu ops
+
+ // pass 2.1 expand gpu up
+ {
+ ggml_tallocr_t cur_allocr = NULL;
+ for (int i = graph->n_nodes - 1; i >= 0; i--) {
+ struct ggml_tensor * node = graph->nodes[i];
+ if (ggml_is_view_op(node->op)) {
+ continue;
+ }
+ ggml_tallocr_t node_allocr = node_allocr(node);
+ if (node_allocr != NULL) {
+ if (sched_allocr_prio(sched, node_allocr) == sched->n_backends - 1) {
+ // skip cpu (lowest prio backend)
+ cur_allocr = NULL;
+ } else {
+ cur_allocr = node_allocr;
}
- ggml_tallocr_t src_allocr = node_allocr(src);
- if (src_allocr != NULL) {
- int src_prio = sched_allocr_prio(sched, src_allocr);
- size_t src_size = ggml_nbytes(src);
- if (src_prio < cur_prio && src_size >= cur_size) {
- cur_prio = src_prio;
- cur_size = src_size;
- node_allocr = src_allocr;
- SET_CAUSE(node, "2.src%d", j);
- }
+ } else {
+ node_allocr(node) = cur_allocr;
+ SET_CAUSE(node, "2.1");
+ }
+ }
+ }
+
+ // pass 2.2 expand gpu down
+ {
+ ggml_tallocr_t cur_allocr = NULL;
+ for (int i = 0; i < graph->n_nodes; i++) {
+ struct ggml_tensor * node = graph->nodes[i];
+ if (ggml_is_view_op(node->op)) {
+ continue;
+ }
+ ggml_tallocr_t node_allocr = node_allocr(node);
+ if (node_allocr != NULL) {
+ if (sched_allocr_prio(sched, node_allocr) == sched->n_backends - 1) {
+ // skip cpu (lowest prio backend)
+ cur_allocr = NULL;
+ } else {
+ cur_allocr = node_allocr;
}
+ } else {
+ node_allocr(node) = cur_allocr;
+ SET_CAUSE(node, "2.2");
}
+ }
+ }
+
+ // pass 2.3 expand rest up
+ {
+ ggml_tallocr_t cur_allocr = NULL;
+ for (int i = graph->n_nodes - 1; i >= 0; i--) {
+ struct ggml_tensor * node = graph->nodes[i];
+ if (ggml_is_view_op(node->op)) {
+ continue;
+ }
+ ggml_tallocr_t node_allocr = node_allocr(node);
if (node_allocr != NULL) {
- node_allocr(node) = node_allocr;
+ cur_allocr = node_allocr;
+ } else {
+ node_allocr(node) = cur_allocr;
+ SET_CAUSE(node, "2.3");
}
}
}
- //printf("PASS 2 ASSIGNMENTS\n"); sched_print_assignments(sched, graph);
+#ifdef DEBUG_PASS2
+ fprintf(stderr, "PASS 2 ASSIGNMENTS\n"); sched_print_assignments(sched, graph);
+#endif
- // pass 3: assign backends to remaining src from dst (should only be leafs)
+ // pass 3: assign backends to remaining src from dst and view_src
for (int i = 0; i < graph->n_nodes; i++) {
struct ggml_tensor * node = graph->nodes[i];
- ggml_tallocr_t node_allocr = node_allocr(node);
+ ggml_tallocr_t cur_allocr = node_allocr(node);
+ if (node->view_src != NULL && cur_allocr == NULL) {
+ cur_allocr = node_allocr(node) = node_allocr(node->view_src);
+ SET_CAUSE(node, "3.vsrc");
+ }
for (int j = 0; j < GGML_MAX_SRC; j++) {
struct ggml_tensor * src = node->src[j];
if (src == NULL) {
}
ggml_tallocr_t src_allocr = node_allocr(src);
if (src_allocr == NULL) {
- node_allocr(src) = node_allocr;
+ if (src->view_src != NULL) {
+ // views are always on the same backend as the source
+ node_allocr(src) = node_allocr(src->view_src);
+ SET_CAUSE(src, "3.vsrc");
+ } else {
+ node_allocr(src) = cur_allocr;
+ SET_CAUSE(src, "3.cur");
+ }
}
}
}
- //printf("PASS 3 ASSIGNMENTS\n"); sched_print_assignments(sched, graph);
+#ifdef DEBUG_PASS3
+ fprintf(stderr, "PASS 3 ASSIGNMENTS\n"); sched_print_assignments(sched, graph);
+#endif
// pass 4: split graph, find tensors that need to be copied
- // TODO:
- // - when switching from a less preferred backend to a more preferred backend, check if it is possible to move the switch to an earlier point for the same cost
- // find first backend
- int cur_split = 0;
- for (int i = 0; i < graph->n_nodes; i++) {
- struct ggml_tensor * node = graph->nodes[i];
- if (node->view_src == NULL) {
- sched->splits[0].tallocr = node_allocr(node);
- break;
+ {
+ int cur_split = 0;
+ // find the backend of the first split, skipping view ops
+ for (int i = 0; i < graph->n_nodes; i++) {
+ struct ggml_tensor * node = graph->nodes[i];
+ if (!ggml_is_view_op(node->op)) {
+ sched->splits[0].tallocr = node_allocr(node);
+ break;
+ }
}
- }
- sched->splits[0].i_start = 0;
- sched->splits[0].n_inputs = 0;
- memset(sched->splits[0].inputs, 0, sizeof(sched->splits[0].inputs)); //HACK
- ggml_tallocr_t cur_allocr = sched->splits[0].tallocr;
- size_t cur_backend_id = sched_allocr_prio(sched, cur_allocr);
- for (int i = 0; i < graph->n_nodes; i++) {
- struct ggml_tensor * node = graph->nodes[i];
+ sched->splits[0].i_start = 0;
+ sched->splits[0].n_inputs = 0;
+ memset(sched->splits[0].inputs, 0, sizeof(sched->splits[0].inputs)); //HACK
+ ggml_tallocr_t cur_allocr = sched->splits[0].tallocr;
+ size_t cur_backend_id = sched_allocr_prio(sched, cur_allocr);
+ for (int i = 0; i < graph->n_nodes; i++) {
+ struct ggml_tensor * node = graph->nodes[i];
+
+ if (ggml_is_view_op(node->op)) {
+ continue;
+ }
- if (ggml_is_view_op(node->op)) {
- continue;
- }
+ ggml_tallocr_t node_allocr = node_allocr(node);
+
+ if (node_allocr != cur_allocr) {
+ sched->splits[cur_split].i_end = i;
+ cur_split++;
+ GGML_ASSERT(cur_split < GGML_MAX_SPLITS);
+ sched->splits[cur_split].tallocr = node_allocr;
+ sched->splits[cur_split].i_start = i;
+ sched->splits[cur_split].n_inputs = 0;
+ cur_allocr = node_allocr;
+ cur_backend_id = sched_allocr_prio(sched, cur_allocr);
+ }
- ggml_tallocr_t node_allocr = node_allocr(node);
+ // find inputs that are not on the same backend
+ for (int j = 0; j < GGML_MAX_SRC; j++) {
+ struct ggml_tensor * src = node->src[j];
+ if (src == NULL) {
+ break;
+ }
+ ggml_tallocr_t src_allocr = node_allocr(src);
+ GGML_ASSERT(src_allocr != NULL); // all inputs should be assigned by now
+ if (src_allocr != node_allocr) {
+ // check if the input is already in the split
+ bool found = false;
+ for (int k = 0; k < sched->splits[cur_split].n_inputs; k++) {
+ if (sched->splits[cur_split].inputs[k] == src) {
+ found = true;
+ break;
+ }
+ }
- if (node_allocr != cur_allocr) {
- sched->splits[cur_split].i_end = i;
- cur_split++;
- GGML_ASSERT(cur_split < GGML_MAX_SPLITS);
- sched->splits[cur_split].tallocr = node_allocr;
- sched->splits[cur_split].i_start = i;
- sched->splits[cur_split].n_inputs = 0;
- memset(sched->splits[cur_split].inputs, 0, sizeof(sched->splits[cur_split].inputs)); //HACK
- cur_allocr = node_allocr;
- cur_backend_id = sched_allocr_prio(sched, cur_allocr);
- }
+ if (!found) {
+ int n_inputs = sched->splits[cur_split].n_inputs++;
+ //printf("split %d input %d: %s (%s)\n", cur_split, n_inputs, src->name, ggml_backend_name(get_allocr_backend(sched, src_allocr)));
+ GGML_ASSERT(n_inputs < GGML_MAX_SPLIT_INPUTS);
+ sched->splits[cur_split].inputs[n_inputs] = src;
+ }
- // find inputs that are not on the same backend
- for (int j = 0; j < GGML_MAX_SRC; j++) {
- struct ggml_tensor * src = node->src[j];
- if (src == NULL) {
- break;
- }
- ggml_tallocr_t src_allocr = node_allocr(src);
- if (src_allocr != node_allocr) {
- int n_inputs = sched->splits[cur_split].n_inputs++;
- GGML_ASSERT(n_inputs < GGML_MAX_SPLIT_INPUTS);
- sched->splits[cur_split].inputs[n_inputs] = (struct ggml_tensor *)src;
-
- // create copies
- size_t id = hash_id(src);
- if (sched->node_copies[id][cur_backend_id] == NULL) {
- struct ggml_tensor * tensor_copy = ggml_dup_tensor_layout(sched->ctx, src);
- sched->node_copies[id][cur_backend_id] = tensor_copy;
- node_allocr(tensor_copy) = cur_allocr;
- ggml_backend_t backend = get_allocr_backend(sched, cur_allocr);
- ggml_format_name(tensor_copy, "%s#%s", ggml_backend_name(backend), src->name);
+ // create a copy of the input in the split's backend
+ size_t id = hash_id(src);
+ if (sched->node_copies[id][cur_backend_id] == NULL) {
+ ggml_backend_t backend = get_allocr_backend(sched, cur_allocr);
+ struct ggml_tensor * tensor_copy = ggml_dup_tensor_layout(sched->ctx, src);
+ ggml_format_name(tensor_copy, "%s#%s", ggml_backend_name(backend), src->name);
+
+ sched->node_copies[id][cur_backend_id] = tensor_copy;
+ node_allocr(tensor_copy) = cur_allocr;
+ SET_CAUSE(tensor_copy, "4.cpy");
+ }
+ node->src[j] = sched->node_copies[id][cur_backend_id];
}
- node->src[j] = sched->node_copies[id][cur_backend_id];
}
}
+ sched->splits[cur_split].i_end = graph->n_nodes;
+ sched->n_splits = cur_split + 1;
}
- sched->splits[cur_split].i_end = graph->n_nodes;
- sched->n_splits = cur_split + 1;
-
- //fprintf(stderr, "PASS 4 ASSIGNMENTS\n"); sched_print_assignments(sched, graph); fflush(stdout);
+#ifdef DEBUG_PASS4
+ fprintf(stderr, "PASS 4 ASSIGNMENTS\n"); sched_print_assignments(sched, graph);
+#endif
-#if 1
+#ifndef NDEBUG
// sanity check: all sources should have the same backend as the node
for (int i = 0; i < graph->n_nodes; i++) {
struct ggml_tensor * node = graph->nodes[i];
if (node_allocr == NULL) {
fprintf(stderr, "!!!!!!! %s has no backend\n", node->name);
}
+ if (node->view_src != NULL && node_allocr != node_allocr(node->view_src)) {
+ fprintf(stderr, "!!!!!!! %s has backend %s, view_src %s has backend %s\n",
+ node->name, node_allocr ? ggml_backend_name(get_allocr_backend(sched, node_allocr)) : "NULL",
+ node->view_src->name, node_allocr(node->view_src) ? ggml_backend_name(get_allocr_backend(sched, node_allocr(node->view_src))) : "NULL");
+ }
for (int j = 0; j < GGML_MAX_SRC; j++) {
struct ggml_tensor * src = node->src[j];
if (src == NULL) {
node->name, node_allocr ? ggml_backend_name(get_allocr_backend(sched, node_allocr)) : "NULL",
j, src->name, src_allocr ? ggml_backend_name(get_allocr_backend(sched, src_allocr)) : "NULL");
}
+ if (src->view_src != NULL && src_allocr != node_allocr(src->view_src)) {
+ fprintf(stderr, "!!!!!!! [src] %s has backend %s, view_src %s has backend %s\n",
+ src->name, src_allocr ? ggml_backend_name(get_allocr_backend(sched, src_allocr)) : "NULL",
+ src->view_src->name, node_allocr(src->view_src) ? ggml_backend_name(get_allocr_backend(sched, node_allocr(src->view_src))) : "NULL");
+ }
}
}
+ fflush(stderr);
#endif
// create copies of the graph for each split
for (int j = 0; j < split->n_inputs; j++) {
struct ggml_tensor * input = split->inputs[j];
struct ggml_tensor * input_cpy = sched->node_copies[hash_id(input)][sched_allocr_prio(sched, split->tallocr)];
+ // add a dependency to the input source so that it is not freed before the copy is done
+ GGML_ASSERT(input_cpy->src[0] == NULL || input_cpy->src[0] == input);
input_cpy->src[0] = input;
graph_copy->nodes[graph_copy->n_nodes++] = input_cpy;
}
uint64_t copy_start_us = ggml_time_us();
for (int j = 0; j < split->n_inputs; j++) {
struct ggml_tensor * input = split->inputs[j];
- struct ggml_tensor * input_cpy = sched->node_copies[hash_id(input)][sched_backend_prio(sched, split_backend)];
- if (input->buffer == NULL) {
- if (input->view_src == NULL) {
- fprintf(stderr, "input %s has no buffer and no view_src\n", input->name);
- exit(1);
- }
- // FIXME: may need to use the sched buffer instead
- ggml_backend_view_init(input->view_src->buffer, input);
- }
- if (input_cpy->buffer == NULL) {
- fprintf(stderr, "input_cpy %s has no buffer\n", input_cpy->name);
- exit(1);
- }
- //GGML_ASSERT(input->buffer->backend != input_cpy->buffer->backend);
- //GGML_ASSERT(input_cpy->buffer->backend == split_backend);
- ggml_backend_tensor_copy(input, input_cpy);
+ struct ggml_tensor * input_cpy = sched->node_copies[hash_id(input)][split_backend_id];
+
+ GGML_ASSERT(input->buffer != NULL);
+ GGML_ASSERT(input_cpy->buffer != NULL);
+
+ // TODO: avoid this copy if it was already copied in a previous split, and the input didn't change
+ // this is important to avoid copying constants such as KQ_mask and inp_pos multiple times
+ ggml_backend_tensor_copy_async(split_backend, input, input_cpy);
}
- // ggml_backend_synchronize(split_backend);
+ //ggml_backend_synchronize(split_backend); // necessary to measure copy time
int64_t copy_end_us = ggml_time_us();
copy_us[split_backend_id] += copy_end_us - copy_start_us;
uint64_t compute_start_us = ggml_time_us();
ggml_backend_graph_compute(split_backend, &split->graph);
- // ggml_backend_synchronize(split_backend);
+ //ggml_backend_synchronize(split_backend); // necessary to measure compute time
uint64_t compute_end_us = ggml_time_us();
compute_us[split_backend_id] += compute_end_us - compute_start_us;
}
for (int i = 0; i < sched->n_backends; i++) {
ggml_tallocr_reset(sched->tallocs[i]);
}
+ // reset state for the next run
+ size_t hash_size = sched->hash_set.size;
+ memset(sched->hash_set.keys, 0, sizeof(sched->hash_set.keys[0]) * hash_size);
+ memset(sched->node_talloc, 0, sizeof(sched->node_talloc[0]) * hash_size);
+ memset(sched->node_copies, 0, sizeof(sched->node_copies[0]) * hash_size);
+
+ sched->is_reset = true;
}
-ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, int n_backends) {
+ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size) {
+ GGML_ASSERT(n_backends > 0);
GGML_ASSERT(n_backends <= GGML_MAX_BACKENDS);
- struct ggml_backend_sched * sched = malloc(sizeof(struct ggml_backend_sched));
- memset(sched, 0, sizeof(struct ggml_backend_sched));
+ struct ggml_backend_sched * sched = calloc(sizeof(struct ggml_backend_sched), 1);
+
+ // initialize hash table
+ sched->hash_set = ggml_hash_set_new(graph_size + GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS);
+ sched->node_talloc = calloc(sizeof(sched->node_talloc[0]) * sched->hash_set.size, 1);
+ sched->node_copies = calloc(sizeof(sched->node_copies[0]) * sched->hash_set.size, 1);
sched->n_backends = n_backends;
for (int i = 0; i < n_backends; i++) {
sched->backends[i] = backends[i];
+ sched->bufts[i] = bufts ? bufts[i] : ggml_backend_get_default_buffer_type(backends[i]);
}
sched->galloc = ggml_gallocr_new();
// init measure allocs for each backend
for (int i = 0; i < n_backends; i++) {
- sched->tallocs[i] = ggml_tallocr_new_measure_from_backend(backends[i]);
+ sched->tallocs[i] = ggml_tallocr_new_measure_from_buft(sched->bufts[i]);
}
+ sched_reset(sched);
+
return sched;
}
ggml_tallocr_free(sched->tallocs[i]);
}
ggml_gallocr_free(sched->galloc);
+ ggml_free(sched->ctx);
free(sched->hash_set.keys);
free(sched->node_talloc);
free(sched->node_copies);
}
void ggml_backend_sched_init_measure(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph) {
- // initialize hash tables
- size_t hash_size = measure_graph->visited_hash_table.size + GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS;
- sched->hash_set.size = hash_size;
- sched->hash_set.keys = malloc(sizeof(sched->hash_set.keys[0]) * hash_size);
- sched->node_talloc = malloc(sizeof(sched->node_talloc[0]) * hash_size);
- sched->node_copies = malloc(sizeof(sched->node_copies[0]) * hash_size);
+ GGML_ASSERT(ggml_tallocr_is_measure(sched->tallocs[0])); // can only be initialized once
sched_split_graph(sched, measure_graph);
sched_alloc_splits(sched);
for (int i = 0; i < sched->n_backends; i++) {
size_t size = ggml_tallocr_max_size(sched->tallocs[i]);
ggml_tallocr_free(sched->tallocs[i]);
- sched->tallocs[i] = ggml_tallocr_new_from_backend(sched->backends[i], size);
+ sched->tallocs[i] = ggml_tallocr_new_from_buft(sched->bufts[i], size);
}
sched_reset(sched);
}
void ggml_backend_sched_graph_compute(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
- GGML_ASSERT(sched->hash_set.size >= graph->visited_hash_table.size + GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS);
+ GGML_ASSERT((int)sched->hash_set.size >= graph->n_nodes + GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS);
+
+ if (!sched->is_reset) {
+ sched_reset(sched);
+ }
sched_split_graph(sched, graph);
sched_alloc_splits(sched);
sched_compute_splits(sched);
+}
+
+void ggml_backend_sched_reset(ggml_backend_sched_t sched) {
sched_reset(sched);
}
+int ggml_backend_sched_get_n_splits(ggml_backend_sched_t sched) {
+ return sched->n_splits;
+}
+
ggml_tallocr_t ggml_backend_sched_get_tallocr(ggml_backend_sched_t sched, ggml_backend_t backend) {
int backend_index = sched_backend_prio(sched, backend);
+ GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends);
return sched->tallocs[backend_index];
}
ggml_backend_buffer_t ggml_backend_sched_get_buffer(ggml_backend_sched_t sched, ggml_backend_t backend) {
int backend_index = sched_backend_prio(sched, backend);
+ GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends);
return ggml_tallocr_get_buffer(sched->tallocs[backend_index]);
}
node_allocr(node) = sched->tallocs[backend_index];
}
+ggml_backend_t ggml_backend_sched_get_node_backend(ggml_backend_sched_t sched, struct ggml_tensor * node) {
+ ggml_tallocr_t allocr = node_allocr(node);
+ if (allocr == NULL) {
+ return NULL;
+ }
+ return get_allocr_backend(sched, allocr);
+}
+
// utils
+
void ggml_backend_view_init(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) {
GGML_ASSERT(tensor->buffer == NULL);
- //GGML_ASSERT(tensor->data == NULL); // views of pre-allocted tensors may have the data set, but still need to be initialized
+ //GGML_ASSERT(tensor->data == NULL); // views of pre-allocated tensors may have the data set in ggml_new_tensor, but still need to be initialized by the backend
GGML_ASSERT(tensor->view_src != NULL);
GGML_ASSERT(tensor->view_src->buffer != NULL);
GGML_ASSERT(tensor->view_src->data != NULL);
struct ggml_tensor * dst = node_copies[id];
if (dst->view_src != NULL) {
+ graph_init_tensor(hash_set, node_copies, node_init, src->view_src);
ggml_backend_view_init(dst->view_src->buffer, dst);
}
else {
struct ggml_context * ctx_allocated = ggml_init(params);
struct ggml_context * ctx_unallocated = ggml_init(params);
+ if (ctx_allocated == NULL || ctx_unallocated == NULL) {
+ fprintf(stderr, "failed to allocate context for graph copy\n");
+ free(hash_set.keys);
+ free(node_copies);
+ free(node_init);
+ ggml_free(ctx_allocated);
+ ggml_free(ctx_unallocated);
+ return (struct ggml_backend_graph_copy) {
+ /* .buffer = */ NULL,
+ /* .ctx_allocated = */ NULL,
+ /* .ctx_unallocated = */ NULL,
+ /* .graph = */ NULL,
+ };
+ }
+
// dup nodes
for (int i = 0; i < graph->n_nodes; i++) {
struct ggml_tensor * node = graph->nodes[i];
// allocate nodes
ggml_backend_buffer_t buffer = ggml_backend_alloc_ctx_tensors(ctx_allocated, backend);
+ if (buffer == NULL) {
+ fprintf(stderr, "failed to allocate buffer for graph copy\n");
+ free(hash_set.keys);
+ free(node_copies);
+ free(node_init);
+ ggml_free(ctx_allocated);
+ ggml_free(ctx_unallocated);
+ return (struct ggml_backend_graph_copy) {
+ /* .buffer = */ NULL,
+ /* .ctx_allocated = */ NULL,
+ /* .ctx_unallocated = */ NULL,
+ /* .graph = */ NULL,
+ };
+ }
//printf("copy buffer size: %zu MB\n", ggml_backend_buffer_get_size(buffer) / 1024 / 1024);
ggml_free(copy.ctx_unallocated);
}
-void ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t backend2, struct ggml_cgraph * graph, ggml_backend_eval_callback callback, void * user_data) {
+bool ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t backend2, struct ggml_cgraph * graph, ggml_backend_eval_callback callback, void * user_data) {
struct ggml_backend_graph_copy copy = ggml_backend_graph_copy(backend2, graph);
+ if (copy.buffer == NULL) {
+ return false;
+ }
+
struct ggml_cgraph * g1 = graph;
struct ggml_cgraph * g2 = copy.graph;
}
ggml_backend_graph_copy_free(copy);
+
+ return true;
}
//
// buffer type
- GGML_API ggml_backend_buffer_t ggml_backend_buft_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size);
- GGML_API size_t ggml_backend_buft_get_alignment (ggml_backend_buffer_type_t buft);
- GGML_API size_t ggml_backend_buft_get_alloc_size(ggml_backend_buffer_type_t buft, struct ggml_tensor * tensor);
- GGML_API bool ggml_backend_buft_supports_backend(ggml_backend_buffer_type_t buft, ggml_backend_t backend);
- GGML_API bool ggml_backend_buft_is_host (ggml_backend_buffer_type_t buft);
+ GGML_API const char * ggml_backend_buft_name (ggml_backend_buffer_type_t buft);
+ GGML_API ggml_backend_buffer_t ggml_backend_buft_alloc_buffer (ggml_backend_buffer_type_t buft, size_t size);
+ GGML_API size_t ggml_backend_buft_get_alignment (ggml_backend_buffer_type_t buft);
+ GGML_API size_t ggml_backend_buft_get_alloc_size (ggml_backend_buffer_type_t buft, struct ggml_tensor * tensor);
+ GGML_API bool ggml_backend_buft_supports_backend(ggml_backend_buffer_type_t buft, ggml_backend_t backend);
+ GGML_API bool ggml_backend_buft_is_host (ggml_backend_buffer_type_t buft);
// buffer
- GGML_API void ggml_backend_buffer_free (ggml_backend_buffer_t buffer);
- GGML_API void * ggml_backend_buffer_get_base (ggml_backend_buffer_t buffer);
- GGML_API size_t ggml_backend_buffer_get_size (ggml_backend_buffer_t buffer);
- GGML_API void ggml_backend_buffer_init_tensor (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor);
- GGML_API size_t ggml_backend_buffer_get_alignment (ggml_backend_buffer_t buffer);
- GGML_API size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor);
- GGML_API void ggml_backend_buffer_clear (ggml_backend_buffer_t buffer, uint8_t value);
- GGML_API bool ggml_backend_buffer_is_host (ggml_backend_buffer_t buffer);
- GGML_API ggml_backend_buffer_type_t ggml_backend_buffer_type(ggml_backend_buffer_t buffer);
+ enum ggml_backend_buffer_usage {
+ GGML_BACKEND_BUFFER_USAGE_ANY = 0,
+ GGML_BACKEND_BUFFER_USAGE_WEIGHTS = 1,
+ };
+
+ GGML_API const char * ggml_backend_buffer_name (ggml_backend_buffer_t buffer);
+ GGML_API void ggml_backend_buffer_free (ggml_backend_buffer_t buffer);
+ GGML_API void * ggml_backend_buffer_get_base (ggml_backend_buffer_t buffer);
+ GGML_API size_t ggml_backend_buffer_get_size (ggml_backend_buffer_t buffer);
+ GGML_API void ggml_backend_buffer_init_tensor (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor);
+ GGML_API size_t ggml_backend_buffer_get_alignment (ggml_backend_buffer_t buffer);
+ GGML_API size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor);
+ GGML_API void ggml_backend_buffer_clear (ggml_backend_buffer_t buffer, uint8_t value);
+ GGML_API bool ggml_backend_buffer_is_host (ggml_backend_buffer_t buffer);
+ GGML_API void ggml_backend_buffer_set_usage (ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage);
+ GGML_API ggml_backend_buffer_type_t ggml_backend_buffer_get_type (ggml_backend_buffer_t buffer);
+ GGML_API void ggml_backend_buffer_reset (ggml_backend_buffer_t buffer);
//
// Backend
typedef struct ggml_backend_sched * ggml_backend_sched_t;
// Initialize a backend scheduler
- GGML_API ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, int n_backends);
-
- GGML_API void ggml_backend_sched_free(ggml_backend_sched_t sched);
-
+ GGML_API ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size);
+ GGML_API void ggml_backend_sched_free(ggml_backend_sched_t sched);
// Initialize backend buffers from a measure graph
- GGML_API void ggml_backend_sched_init_measure(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph);
+ GGML_API void ggml_backend_sched_init_measure(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph);
+ // Get the number of splits of the last graph
+ GGML_API int ggml_backend_sched_get_n_splits(ggml_backend_sched_t sched);
GGML_API ggml_tallocr_t ggml_backend_sched_get_tallocr(ggml_backend_sched_t sched, ggml_backend_t backend);
GGML_API ggml_backend_buffer_t ggml_backend_sched_get_buffer (ggml_backend_sched_t sched, ggml_backend_t backend);
- GGML_API void ggml_backend_sched_set_node_backend(ggml_backend_sched_t sched, struct ggml_tensor * node, ggml_backend_t backend);
+ GGML_API void ggml_backend_sched_set_node_backend(ggml_backend_sched_t sched, struct ggml_tensor * node, ggml_backend_t backend);
+ GGML_API ggml_backend_t ggml_backend_sched_get_node_backend(ggml_backend_sched_t sched, struct ggml_tensor * node);
- // Allocate a graph on the backend scheduler
- GGML_API void ggml_backend_sched_graph_compute(
- ggml_backend_sched_t sched,
- struct ggml_cgraph * graph);
+ // Allocate and compute graph on the backend scheduler
+ GGML_API void ggml_backend_sched_graph_compute(ggml_backend_sched_t sched, struct ggml_cgraph * graph);
+ // Reset all assignments and allocators - must be called before using the sched allocators to allocate inputs
+ GGML_API void ggml_backend_sched_reset(ggml_backend_sched_t sched);
//
// Utils
typedef bool (*ggml_backend_eval_callback)(int node_index, struct ggml_tensor * t1, struct ggml_tensor * t2, void * user_data);
// Compare the output of two backends
- GGML_API void ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t backend2, struct ggml_cgraph * graph, ggml_backend_eval_callback callback, void * user_data);
+ GGML_API bool ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t backend2, struct ggml_cgraph * graph, ggml_backend_eval_callback callback, void * user_data);
// Tensor initialization
GGML_API void ggml_backend_tensor_alloc(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, void * addr);
#include <limits>
#include <stdint.h>
#include <stdio.h>
+#include <string>
#include <vector>
-
+#include <map>
+#include <array>
+#include "ggml-cuda.h"
+#include "ggml.h"
+#include "ggml-backend-impl.h"
#if defined(GGML_USE_HIPBLAS)
#include <hip/hip_runtime.h>
#define cudaMemcpyKind hipMemcpyKind
#define cudaMemset hipMemset
#define cudaMemsetAsync hipMemsetAsync
+#define cudaMemGetInfo hipMemGetInfo
#define cudaOccupancyMaxPotentialBlockSize hipOccupancyMaxPotentialBlockSize
#define cudaSetDevice hipSetDevice
#define cudaStreamCreateWithFlags hipStreamCreateWithFlags
#endif // defined(GGML_USE_HIPBLAS)
-#include "ggml-cuda.h"
-#include "ggml.h"
-#include "ggml-backend-impl.h"
-
#define CUDART_HMAX 11070 // CUDA 11.7, min. ver. for which __hmax and __hmax2 are known to work (may be higher than needed)
#define CC_PASCAL 600
static int g_device_count = -1;
static int g_main_device = 0;
-static float g_tensor_split[GGML_CUDA_MAX_DEVICES] = {0};
+static std::array<float, GGML_CUDA_MAX_DEVICES> g_default_tensor_split = {};
struct cuda_device_capabilities {
int cc; // compute capability
static cuda_device_capabilities g_device_caps[GGML_CUDA_MAX_DEVICES] = { {0, 0, false, 0} };
-static void * g_scratch_buffer = nullptr;
-static size_t g_scratch_size = 0; // disabled by default
-static size_t g_scratch_offset = 0;
-
static cublasHandle_t g_cublas_handles[GGML_CUDA_MAX_DEVICES] = {nullptr};
[[noreturn]]
CUDA_CHECK(cudaGetDeviceProperties(&prop, id));
fprintf(stderr, " Device %d: %s, compute capability %d.%d, VMM: %s\n", id, prop.name, prop.major, prop.minor, device_vmm ? "yes" : "no");
- g_tensor_split[id] = total_vram;
+ g_default_tensor_split[id] = total_vram;
total_vram += prop.totalGlobalMem;
+
#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
g_device_caps[id].cc = 100*prop.major + 10*prop.minor + CC_OFFSET_AMD;
#else
g_device_caps[id].smpb = prop.sharedMemPerBlock;
}
for (int id = 0; id < g_device_count; ++id) {
- g_tensor_split[id] /= total_vram;
+ g_default_tensor_split[id] /= total_vram;
}
for (int id = 0; id < g_device_count; ++id) {
}
}
-void ggml_cuda_set_tensor_split(const float * tensor_split) {
- if (tensor_split == nullptr) {
- return;
- }
- bool all_zero = true;
- for (int i = 0; i < g_device_count; ++i) {
- if (tensor_split[i] != 0.0f) {
- all_zero = false;
- break;
- }
- }
- if (all_zero) {
- return;
- }
- float split_sum = 0.0f;
- for (int i = 0; i < g_device_count; ++i) {
- g_tensor_split[i] = split_sum;
- split_sum += tensor_split[i];
- }
- for (int i = 0; i < g_device_count; ++i) {
- g_tensor_split[i] /= split_sum;
- }
-}
-
void * ggml_cuda_host_malloc(size_t size) {
if (getenv("GGML_CUDA_NO_PINNED") != nullptr) {
return nullptr;
(void) src1_ddf_i;
}
-static int64_t get_row_rounding(ggml_type type) {
+static int64_t get_row_rounding(ggml_type type, const std::array<float, GGML_CUDA_MAX_DEVICES> & tensor_split) {
int64_t min_compute_capability = INT_MAX;
int64_t max_compute_capability = INT_MIN;
for (int id = 0; id < g_device_count; ++id) {
- if (g_tensor_split[id] < (id + 1 < g_device_count ? g_tensor_split[id + 1] : 1.0f)) {
+ if (tensor_split[id] < (id + 1 < g_device_count ? tensor_split[id + 1] : 1.0f)) {
if (min_compute_capability > g_device_caps[id].cc) {
min_compute_capability = g_device_caps[id].cc;
}
#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
}
+static void get_row_split(int64_t * row_low, int64_t * row_high, const ggml_tensor * tensor, const std::array<float, GGML_CUDA_MAX_DEVICES> & tensor_split, int id) {
+ const int64_t nrows = ggml_nrows(tensor);
+ const int64_t rounding = get_row_rounding(tensor->type, tensor_split);
+
+ *row_low = id == 0 ? 0 : nrows*tensor_split[id];
+ *row_low -= *row_low % rounding;
+
+ if (id == g_device_count - 1) {
+ *row_high = nrows;
+ } else {
+ *row_high = nrows*tensor_split[id + 1];
+ *row_high -= *row_high % rounding;
+ }
+}
+
static void ggml_cuda_op_mul_mat_vec_q(
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i,
const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols,
peer_access_enabled = enable_peer_access;
}
+// FIXME: move this somewhere else
+struct ggml_backend_cuda_split_buffer_type_context {
+ std::array<float, GGML_CUDA_MAX_DEVICES> tensor_split;
+};
+
static void ggml_cuda_op_mul_mat(
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, ggml_cuda_op_mul_mat_t op,
const bool convert_src1_to_q8_1) {
GGML_ASSERT(!(split && ne03 > 1));
GGML_ASSERT(!(split && ne02 < ne12));
+ std::array<float, GGML_CUDA_MAX_DEVICES> tensor_split;
+ if (split) {
+ // TODO: check that src0->buffer->buft is a split buffer type, replace GGML_BACKEND_GPU_SPLIT check
+ // GGML_ASSERT(src0->buffer != nullptr && src0->buffer->buft == ...);
+ ggml_backend_cuda_split_buffer_type_context * buft_ctx = (ggml_backend_cuda_split_buffer_type_context *) src0->buffer->buft->context;
+ tensor_split = buft_ctx->tensor_split;
+ }
+
struct dev_data {
cuda_pool_alloc<char> src0_dd_alloc;
cuda_pool_alloc<float> src1_ddf_alloc;
// for multi GPU, get the row boundaries from tensor split
// and round to mul_mat_q tile sizes
if (split) {
- const int64_t rounding = get_row_rounding(src0->type);
+ const int64_t rounding = get_row_rounding(src0->type, tensor_split);
if (id != 0) {
- dev[id].row_low = ne01*g_tensor_split[id];
+ dev[id].row_low = ne01*tensor_split[id];
if (dev[id].row_low < ne01) {
dev[id].row_low -= dev[id].row_low % rounding;
}
}
if (id != g_device_count - 1) {
- dev[id].row_high = ne01*g_tensor_split[id + 1];
+ dev[id].row_high = ne01*tensor_split[id + 1];
if (dev[id].row_high < ne01) {
dev[id].row_high -= dev[id].row_high % rounding;
}
const bool split = src0->backend == GGML_BACKEND_GPU_SPLIT;
int64_t min_compute_capability = INT_MAX;
- for (int id = 0; id < g_device_count; ++id) {
- if (min_compute_capability > g_device_caps[id].cc && g_tensor_split[id] < (id + 1 < g_device_count ? g_tensor_split[id + 1] : 1.0f)) {
- min_compute_capability = g_device_caps[id].cc;
+
+ if (split) {
+ ggml_backend_cuda_split_buffer_type_context * buft_ctx = (ggml_backend_cuda_split_buffer_type_context *) src0->buffer->buft->context;
+ auto & tensor_split = buft_ctx->tensor_split;
+ for (int id = 0; id < g_device_count; ++id) {
+ if (min_compute_capability > g_device_caps[id].cc && tensor_split[id] < (id + 1 < g_device_count ? tensor_split[id + 1] : 1.0f)) {
+ min_compute_capability = g_device_caps[id].cc;
+ }
}
+ } else {
+ min_compute_capability = g_device_caps[g_main_device].cc;
}
#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
} else if (!split && all_on_device && !fp16_performance_good && src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && src1->ne[1] == 1) {
// KQV single-batch
ggml_cuda_mul_mat_vec_nc(src0, src1, dst);
- } else if (!split && all_on_device && fp16_performance_good && src0->type == GGML_TYPE_F16 && !ggml_is_transposed(src0) && !ggml_is_transposed(src1)) {
+ } else if (!split && all_on_device && fp16_performance_good && src0->type == GGML_TYPE_F16 && !ggml_is_transposed(src0) && !ggml_is_transposed(src1) && src1->ne[2]*src1->ne[3] > 1) {
// KQ + KQV multi-batch
ggml_cuda_mul_mat_mat_batched_cublas(src0, src1, dst);
} else if (src0->type == GGML_TYPE_F32) {
return nrows_split*ggml_row_size(tensor->type, tensor->ne[0]);
}
-void ggml_cuda_transform_tensor(void * data, struct ggml_tensor * tensor) {
- const int64_t nrows = ggml_nrows(tensor);
-
- const int64_t ne0 = tensor->ne[0];
-
- const size_t nb1 = tensor->nb[1];
-
- ggml_backend_type backend = tensor->backend;
- ggml_tensor_extra_gpu * extra = new struct ggml_tensor_extra_gpu;
- memset(extra, 0, sizeof(*extra));
-
- for (int id = 0; id < g_device_count; ++id) {
- if (backend == GGML_BACKEND_GPU && id != g_main_device) {
- continue;
- }
-
- ggml_cuda_set_device(id);
-
- int64_t row_low, row_high;
- if (backend == GGML_BACKEND_GPU) {
- row_low = 0;
- row_high = nrows;
- } else if (backend == GGML_BACKEND_GPU_SPLIT) {
- const int64_t rounding = get_row_rounding(tensor->type);
-
- row_low = id == 0 ? 0 : nrows*g_tensor_split[id];
- row_low -= row_low % rounding;
-
- if (id == g_device_count - 1) {
- row_high = nrows;
- } else {
- row_high = nrows*g_tensor_split[id + 1];
- row_high -= row_high % rounding;
- }
- } else {
- GGML_ASSERT(false);
- }
- if (row_low == row_high) {
- continue;
- }
-
- int64_t nrows_split = row_high - row_low;
-
- const size_t offset_split = row_low*nb1;
- size_t size = ggml_nbytes_split(tensor, nrows_split);
- const size_t original_size = size;
-
- // pad last row to a multiple of 512 elements to avoid out-of-bounds memory accesses
- if (ne0 % MATRIX_ROW_PADDING != 0) {
- size += ggml_row_size(tensor->type, MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING);
- }
-
- char * buf;
- CUDA_CHECK(cudaMalloc(&buf, size));
- char * buf_host = (char *)data + offset_split;
-
- // set padding to 0 to avoid possible NaN values
- if (size > original_size) {
- CUDA_CHECK(cudaMemset(buf + original_size, 0, size - original_size));
- }
-
- CUDA_CHECK(cudaMemcpy(buf, buf_host, original_size, cudaMemcpyHostToDevice));
-
- extra->data_device[id] = buf;
-
- if (backend == GGML_BACKEND_GPU_SPLIT) {
- for (int64_t is = 0; is < MAX_STREAMS; ++is) {
- CUDA_CHECK(cudaEventCreateWithFlags(&extra->events[id][is], cudaEventDisableTiming));
- }
- }
- }
-
- tensor->extra = extra;
-}
-
-void ggml_cuda_free_data(struct ggml_tensor * tensor) {
- if (!tensor || !tensor->extra || (tensor->backend != GGML_BACKEND_GPU && tensor->backend != GGML_BACKEND_GPU_SPLIT) ) {
- return;
- }
-
- ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) tensor->extra;
-
- for (int id = 0; id < g_device_count; ++id) {
- ggml_cuda_set_device(id);
- if (extra->data_device[id] != nullptr) {
- CUDA_CHECK(cudaFree(extra->data_device[id]));
- }
-
- for (int64_t is = 0; is < MAX_STREAMS; ++is) {
- if (extra->events[id][is] != nullptr) {
- CUDA_CHECK(cudaEventDestroy(extra->events[id][is]));
- }
- }
- }
-
- delete extra;
-}
-
-static ggml_tensor_extra_gpu * g_temp_tensor_extras = nullptr;
-static size_t g_temp_tensor_extra_index = 0;
-
-static ggml_tensor_extra_gpu * ggml_cuda_alloc_temp_tensor_extra() {
- if (g_temp_tensor_extras == nullptr) {
- g_temp_tensor_extras = new ggml_tensor_extra_gpu[GGML_CUDA_MAX_NODES];
- }
-
- size_t alloc_index = g_temp_tensor_extra_index;
- g_temp_tensor_extra_index = (g_temp_tensor_extra_index + 1) % GGML_CUDA_MAX_NODES;
- ggml_tensor_extra_gpu * extra = &g_temp_tensor_extras[alloc_index];
- memset(extra, 0, sizeof(*extra));
-
- return extra;
-}
-
-static void ggml_cuda_assign_buffers_impl(struct ggml_tensor * tensor, bool scratch, bool force_inplace, bool no_alloc) {
- if (scratch && g_scratch_size == 0) {
- return;
- }
-
- tensor->backend = GGML_BACKEND_GPU;
-
- // recursively assign CUDA buffers until a compute tensor is found
- if (tensor->src[0] != nullptr && tensor->src[0]->backend == GGML_BACKEND_CPU) {
- const ggml_op src0_op = tensor->src[0]->op;
- if (src0_op == GGML_OP_RESHAPE || src0_op == GGML_OP_TRANSPOSE || src0_op == GGML_OP_VIEW || src0_op == GGML_OP_PERMUTE) {
- ggml_cuda_assign_buffers_impl(tensor->src[0], scratch, force_inplace, no_alloc);
- }
- }
- if (tensor->op == GGML_OP_CPY && tensor->src[1]->backend == GGML_BACKEND_CPU) {
- ggml_cuda_assign_buffers_impl(tensor->src[1], scratch, force_inplace, no_alloc);
- }
-
- if (scratch && no_alloc) {
- return;
- }
-
- ggml_tensor_extra_gpu * extra;
-
- const bool inplace = (tensor->src[0] != nullptr && tensor->src[0]->data == tensor->data) ||
- tensor->op == GGML_OP_VIEW ||
- force_inplace;
- const size_t size = ggml_nbytes(tensor);
-
- ggml_cuda_set_device(g_main_device);
- if (inplace && (tensor->src[0]->backend == GGML_BACKEND_GPU || tensor->src[0]->backend == GGML_BACKEND_GPU_SPLIT)) {
- ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu * ) tensor->src[0]->extra;
- char * src0_ddc = (char *) src0_extra->data_device[g_main_device];
- size_t offset = 0;
- if (tensor->op == GGML_OP_VIEW) {
- memcpy(&offset, tensor->op_params, sizeof(size_t));
- }
- extra = ggml_cuda_alloc_temp_tensor_extra();
- extra->data_device[g_main_device] = src0_ddc + offset;
- } else if (tensor->op == GGML_OP_CPY) {
- ggml_tensor_extra_gpu * src1_extra = (ggml_tensor_extra_gpu * ) tensor->src[1]->extra;
- void * src1_ddv = src1_extra->data_device[g_main_device];
- extra = ggml_cuda_alloc_temp_tensor_extra();
- extra->data_device[g_main_device] = src1_ddv;
- } else if (scratch) {
- GGML_ASSERT(size <= g_scratch_size);
- if (g_scratch_offset + size > g_scratch_size) {
- g_scratch_offset = 0;
- }
-
- char * data = (char *) g_scratch_buffer;
- if (data == nullptr) {
- CUDA_CHECK(cudaMalloc(&data, g_scratch_size));
- g_scratch_buffer = data;
- }
- extra = ggml_cuda_alloc_temp_tensor_extra();
- extra->data_device[g_main_device] = data + g_scratch_offset;
-
- g_scratch_offset += size;
-
- GGML_ASSERT(g_scratch_offset <= g_scratch_size);
- } else { // allocate new buffers outside of scratch
- void * data;
- CUDA_CHECK(cudaMalloc(&data, size));
- CUDA_CHECK(cudaMemset(data, 0, size));
- extra = new ggml_tensor_extra_gpu;
- memset(extra, 0, sizeof(*extra));
- extra->data_device[g_main_device] = data;
- }
-
- tensor->extra = extra;
-}
-
-void ggml_cuda_assign_scratch_offset(struct ggml_tensor * tensor, size_t offset) {
- if (g_scratch_size == 0) {
- return;
- }
- if (g_scratch_buffer == nullptr) {
- ggml_cuda_set_device(g_main_device);
- CUDA_CHECK(cudaMalloc(&g_scratch_buffer, g_scratch_size));
- }
-
- ggml_tensor_extra_gpu * extra = ggml_cuda_alloc_temp_tensor_extra();
-
- const bool inplace = tensor->view_src != nullptr;
-
- if (inplace && (tensor->view_src->backend == GGML_BACKEND_GPU || tensor->view_src->backend == GGML_BACKEND_GPU_SPLIT)) {
- ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu * ) tensor->view_src->extra;
- char * src0_ddc = (char *) src0_extra->data_device[g_main_device];
- size_t view_offset = 0;
- if (tensor->op == GGML_OP_VIEW) {
- memcpy(&view_offset, tensor->op_params, sizeof(size_t));
- }
- extra->data_device[g_main_device] = src0_ddc + view_offset;
- } else {
- extra->data_device[g_main_device] = (char *) g_scratch_buffer + offset;
- }
-
- tensor->extra = extra;
-}
-
-void ggml_cuda_copy_to_device(struct ggml_tensor * tensor) {
- GGML_ASSERT(tensor->backend == GGML_BACKEND_GPU);
- GGML_ASSERT(ggml_is_contiguous(tensor));
-
- ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) tensor->extra;
- ggml_cuda_set_device(g_main_device);
- CUDA_CHECK(cudaMemcpy(extra->data_device[g_main_device], tensor->data, ggml_nbytes(tensor), cudaMemcpyHostToDevice));
-}
-
-void ggml_cuda_assign_buffers(struct ggml_tensor * tensor) {
- ggml_cuda_assign_buffers_impl(tensor, true, false, false);
-}
-
-void ggml_cuda_assign_buffers_no_alloc(struct ggml_tensor * tensor) {
- ggml_cuda_assign_buffers_impl(tensor, true, false, true);
-}
-
-void ggml_cuda_assign_buffers_no_scratch(struct ggml_tensor * tensor) {
- ggml_cuda_assign_buffers_impl(tensor, false, false, false);
-}
-
-void ggml_cuda_assign_buffers_force_inplace(struct ggml_tensor * tensor) {
- ggml_cuda_assign_buffers_impl(tensor, false, true, false);
-}
-
-void ggml_cuda_set_main_device(const int main_device) {
+static void ggml_cuda_set_main_device(const int main_device) {
if (main_device >= g_device_count) {
fprintf(stderr, "warning: cannot set main_device=%d because there are only %d devices. Using device %d instead.\n",
main_device, g_device_count, g_main_device);
if (g_main_device != main_device && g_device_count > 1) {
g_main_device = main_device;
- cudaDeviceProp prop;
- CUDA_CHECK(cudaGetDeviceProperties(&prop, g_main_device));
- fprintf(stderr, "%s: using device %d (%s) as main device\n", __func__, g_main_device, prop.name);
- }
-}
-
-void ggml_cuda_set_scratch_size(const size_t scratch_size) {
- // this is a hack to not completely break llama.cpp when using multiple models or contexts simultaneously
- // it still won't always work as expected, but it's better than nothing
- if (scratch_size > g_scratch_size) {
- ggml_cuda_free_scratch();
- }
- g_scratch_size = std::max(g_scratch_size, scratch_size);
-}
-
-void ggml_cuda_free_scratch() {
- if (g_scratch_buffer == nullptr) {
- return;
+ //cudaDeviceProp prop;
+ //CUDA_CHECK(cudaGetDeviceProperties(&prop, g_main_device));
+ //fprintf(stderr, "%s: using device %d (%s) as main device\n", __func__, g_main_device, prop.name);
}
-
- CUDA_CHECK(cudaFree(g_scratch_buffer));
- g_scratch_buffer = nullptr;
}
bool ggml_cuda_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor) {
#define UNUSED GGML_UNUSED
+struct ggml_backend_cuda_context {
+ int device;
+ std::string name;
+};
+
// cuda buffer
-struct ggml_backend_buffer_context_cuda {
+struct ggml_backend_cuda_buffer_context {
int device;
void * dev_ptr = nullptr;
ggml_tensor_extra_gpu * temp_tensor_extras = nullptr;
size_t temp_tensor_extra_index = 0;
+ std::string name;
- ggml_backend_buffer_context_cuda(int device, void * dev_ptr) : device(device), dev_ptr(dev_ptr) {}
+ ggml_backend_cuda_buffer_context(int device, void * dev_ptr) :
+ device(device), dev_ptr(dev_ptr),
+ name(GGML_CUDA_NAME + std::to_string(device)) {
+ }
- ~ggml_backend_buffer_context_cuda() {
+ ~ggml_backend_cuda_buffer_context() {
delete[] temp_tensor_extras;
}
ggml_tensor_extra_gpu * ggml_cuda_alloc_temp_tensor_extra() {
+ // TODO: remove GGML_CUDA_MAX_NODES, allocate dynamically and reuse in backend_buffer_reset
if (temp_tensor_extras == nullptr) {
temp_tensor_extras = new ggml_tensor_extra_gpu[GGML_CUDA_MAX_NODES];
}
}
};
+static const char * ggml_backend_cuda_buffer_get_name(ggml_backend_buffer_t buffer) {
+ ggml_backend_cuda_buffer_context * ctx = (ggml_backend_cuda_buffer_context *)buffer->context;
+ return ctx->name.c_str();
+}
+
+static bool ggml_backend_buffer_is_cuda(ggml_backend_buffer_t buffer) {
+ return buffer->iface.get_name == ggml_backend_cuda_buffer_get_name;
+}
+
static void ggml_backend_cuda_buffer_free_buffer(ggml_backend_buffer_t buffer) {
- ggml_backend_buffer_context_cuda * ctx = (ggml_backend_buffer_context_cuda *)buffer->context;
+ ggml_backend_cuda_buffer_context * ctx = (ggml_backend_cuda_buffer_context *)buffer->context;
CUDA_CHECK(cudaFree(ctx->dev_ptr));
delete ctx;
}
static void * ggml_backend_cuda_buffer_get_base(ggml_backend_buffer_t buffer) {
- ggml_backend_buffer_context_cuda * ctx = (ggml_backend_buffer_context_cuda *)buffer->context;
+ ggml_backend_cuda_buffer_context * ctx = (ggml_backend_cuda_buffer_context *)buffer->context;
return ctx->dev_ptr;
}
static void ggml_backend_cuda_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor) {
- ggml_backend_buffer_context_cuda * ctx = (ggml_backend_buffer_context_cuda *)buffer->context;
+ ggml_backend_cuda_buffer_context * ctx = (ggml_backend_cuda_buffer_context *)buffer->context;
if (tensor->view_src != NULL && tensor->view_offs == 0) {
assert(tensor->view_src->buffer->buft == buffer->buft);
CUDA_CHECK(cudaMemsetAsync((char *)tensor->data + original_size, 0, padded_size - original_size, g_cudaStreams[ctx->device][0]));
}
}
-
- UNUSED(buffer);
}
static void ggml_backend_cuda_buffer_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
GGML_ASSERT(tensor->backend == GGML_BACKEND_GPU);
- ggml_backend_buffer_context_cuda * ctx = (ggml_backend_buffer_context_cuda *)buffer->context;
+ ggml_backend_cuda_buffer_context * ctx = (ggml_backend_cuda_buffer_context *)buffer->context;
ggml_cuda_set_device(ctx->device);
CUDA_CHECK(cudaDeviceSynchronize());
static void ggml_backend_cuda_buffer_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) {
GGML_ASSERT(tensor->backend == GGML_BACKEND_GPU);
- ggml_backend_buffer_context_cuda * ctx = (ggml_backend_buffer_context_cuda *)buffer->context;
+ ggml_backend_cuda_buffer_context * ctx = (ggml_backend_cuda_buffer_context *)buffer->context;
ggml_cuda_set_device(ctx->device);
CUDA_CHECK(cudaDeviceSynchronize());
-
CUDA_CHECK(cudaMemcpy(data, (const char *)tensor->data + offset, size, cudaMemcpyDeviceToHost));
+ CUDA_CHECK(cudaDeviceSynchronize());
+}
+
+static bool ggml_backend_cuda_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * src, ggml_tensor * dst) {
+ if (ggml_backend_buffer_is_cuda(src->buffer)) {
+ ggml_backend_cuda_buffer_context * src_ctx = (ggml_backend_cuda_buffer_context *)src->buffer->context;
+ ggml_backend_cuda_buffer_context * dst_ctx = (ggml_backend_cuda_buffer_context *)buffer->context;
+
+ ggml_cuda_set_device(src_ctx->device);
+ CUDA_CHECK(cudaDeviceSynchronize());
+ ggml_cuda_set_device(dst_ctx->device);
+ CUDA_CHECK(cudaDeviceSynchronize());
+ CUDA_CHECK(cudaMemcpy((char *)dst->data, (const char *)src->data, ggml_nbytes(src), cudaMemcpyDeviceToDevice));
+ CUDA_CHECK(cudaDeviceSynchronize());
+
+ return true;
+ }
+ return false;
}
static void ggml_backend_cuda_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
- ggml_backend_buffer_context_cuda * ctx = (ggml_backend_buffer_context_cuda *)buffer->context;
+ ggml_backend_cuda_buffer_context * ctx = (ggml_backend_cuda_buffer_context *)buffer->context;
ggml_cuda_set_device(ctx->device);
CUDA_CHECK(cudaDeviceSynchronize());
-
CUDA_CHECK(cudaMemset(ctx->dev_ptr, value, buffer->size));
+ CUDA_CHECK(cudaDeviceSynchronize());
}
-static struct ggml_backend_buffer_i cuda_backend_buffer_interface = {
+static ggml_backend_buffer_i ggml_backend_cuda_buffer_interface = {
+ /* .get_name = */ ggml_backend_cuda_buffer_get_name,
/* .free_buffer = */ ggml_backend_cuda_buffer_free_buffer,
/* .get_base = */ ggml_backend_cuda_buffer_get_base,
/* .init_tensor = */ ggml_backend_cuda_buffer_init_tensor,
/* .set_tensor = */ ggml_backend_cuda_buffer_set_tensor,
/* .get_tensor = */ ggml_backend_cuda_buffer_get_tensor,
- /* .cpy_tensor_from = */ NULL,
- /* .cpy_tensor_to = */ NULL,
+ /* .cpy_tensor = */ ggml_backend_cuda_buffer_cpy_tensor,
/* .clear = */ ggml_backend_cuda_buffer_clear,
+ /* .reset = */ NULL,
};
// cuda buffer type
+struct ggml_backend_cuda_buffer_type_context {
+ int device;
+ std::string name;
+};
+
+static const char * ggml_backend_cuda_buffer_type_name(ggml_backend_buffer_type_t buft) {
+ ggml_backend_cuda_buffer_type_context * ctx = (ggml_backend_cuda_buffer_type_context *)buft->context;
+
+ return ctx->name.c_str();
+}
+
static ggml_backend_buffer_t ggml_backend_cuda_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
- int device = (int) (intptr_t) buft->context;
+ ggml_backend_cuda_buffer_type_context * buft_ctx = (ggml_backend_cuda_buffer_type_context *)buft->context;
- ggml_cuda_set_device(device);
+ ggml_cuda_set_device(buft_ctx->device);
size = std::max(size, (size_t)1); // cudaMalloc returns null for size 0
void * dev_ptr;
- CUDA_CHECK(cudaMalloc(&dev_ptr, size));
+ cudaError_t err = cudaMalloc(&dev_ptr, size);
+ if (err != cudaSuccess) {
+ fprintf(stderr, "%s: allocating %.2f MiB on device %d: cudaMalloc failed: %s\n", __func__, size/1024.0/1024.0, buft_ctx->device, cudaGetErrorString(err));
+ return nullptr;
+ }
- ggml_backend_buffer_context_cuda * ctx = new ggml_backend_buffer_context_cuda(device, dev_ptr);
+ ggml_backend_cuda_buffer_context * ctx = new ggml_backend_cuda_buffer_context(buft_ctx->device, dev_ptr);
- return ggml_backend_buffer_init(buft, cuda_backend_buffer_interface, ctx, size);
+ return ggml_backend_buffer_init(buft, ggml_backend_cuda_buffer_interface, ctx, size);
}
static size_t ggml_backend_cuda_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
UNUSED(buft);
}
-static size_t ggml_backend_cuda_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, ggml_tensor * tensor) {
+static size_t ggml_backend_cuda_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) {
int64_t row_low = 0;
int64_t row_high = ggml_nrows(tensor);
int64_t nrows_split = row_high - row_low;
}
static bool ggml_backend_cuda_buffer_type_supports_backend(ggml_backend_buffer_type_t buft, ggml_backend_t backend) {
- return ggml_backend_is_cuda(backend);
+ if (!ggml_backend_is_cuda(backend)) {
+ return false;
+ }
- UNUSED(buft);
+ ggml_backend_cuda_buffer_type_context * buft_ctx = (ggml_backend_cuda_buffer_type_context *)buft->context;
+ ggml_backend_cuda_context * cuda_ctx = (ggml_backend_cuda_context *)backend->context;
+
+ return buft_ctx->device == cuda_ctx->device;
}
static ggml_backend_buffer_type_i ggml_backend_cuda_buffer_type_interface = {
+ /* .get_name = */ ggml_backend_cuda_buffer_type_name,
/* .alloc_buffer = */ ggml_backend_cuda_buffer_type_alloc_buffer,
/* .get_alignment = */ ggml_backend_cuda_buffer_type_get_alignment,
/* .get_alloc_size = */ ggml_backend_cuda_buffer_type_get_alloc_size,
/* .supports_backend = */ ggml_backend_cuda_buffer_type_supports_backend,
- /* .is_host = */ nullptr,
+ /* .is_host = */ NULL,
};
ggml_backend_buffer_type_t ggml_backend_cuda_buffer_type(int device) {
- static struct ggml_backend_buffer_type ggml_backend_cuda_buffer_types[GGML_CUDA_MAX_DEVICES];
+ // FIXME: this is not thread safe
+ if (device >= ggml_backend_cuda_get_device_count()) {
+ return nullptr;
+ }
+
+ static ggml_backend_buffer_type ggml_backend_cuda_buffer_types[GGML_CUDA_MAX_DEVICES];
static bool ggml_backend_cuda_buffer_type_initialized = false;
for (int i = 0; i < GGML_CUDA_MAX_DEVICES; i++) {
ggml_backend_cuda_buffer_types[i] = {
/* .iface = */ ggml_backend_cuda_buffer_type_interface,
- /* .context = */ (ggml_backend_buffer_type_context_t) (intptr_t) i,
+ /* .context = */ new ggml_backend_cuda_buffer_type_context{i, GGML_CUDA_NAME + std::to_string(i)},
};
}
ggml_backend_cuda_buffer_type_initialized = true;
return &ggml_backend_cuda_buffer_types[device];
}
+// cuda split buffer
+
+struct ggml_backend_cuda_split_buffer_context {
+ ~ggml_backend_cuda_split_buffer_context() {
+ for (ggml_tensor_extra_gpu * extra : tensor_extras) {
+ for (int id = 0; id < g_device_count; ++id) {
+ for (int64_t is = 0; is < MAX_STREAMS; ++is) {
+ if (extra->events[id][is] != nullptr) {
+ CUDA_CHECK(cudaEventDestroy(extra->events[id][is]));
+ }
+ }
+ if (extra->data_device[id] != nullptr) {
+ CUDA_CHECK(cudaFree(extra->data_device[id]));
+ }
+ }
+ delete extra;
+ }
+ }
+
+ std::vector<ggml_tensor_extra_gpu *> tensor_extras;
+};
+
+static const char * ggml_backend_cuda_split_buffer_get_name(ggml_backend_buffer_t buffer) {
+ return GGML_CUDA_NAME "_Split";
+
+ UNUSED(buffer);
+}
+
+// unused at the moment
+//static bool ggml_backend_buffer_is_cuda_split(ggml_backend_buffer_t buffer) {
+// return buffer->iface.get_name == ggml_backend_cuda_split_buffer_get_name;
+//}
+
+static void ggml_backend_cuda_split_buffer_free_buffer(ggml_backend_buffer_t buffer) {
+ ggml_backend_cuda_split_buffer_context * ctx = (ggml_backend_cuda_split_buffer_context *)buffer->context;
+ delete ctx;
+}
+
+static void * ggml_backend_cuda_split_buffer_get_base(ggml_backend_buffer_t buffer) {
+ // the pointers are stored in the tensor extras, this is just a dummy address and never dereferenced
+ return (void *)0x1000;
+
+ UNUSED(buffer);
+}
+
+static void ggml_backend_cuda_split_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor) {
+ GGML_ASSERT(tensor->view_src == nullptr); // views of split tensors are not supported
+
+ ggml_backend_cuda_split_buffer_context * ctx = (ggml_backend_cuda_split_buffer_context *)buffer->context;
+ ggml_backend_cuda_split_buffer_type_context * buft_ctx = (ggml_backend_cuda_split_buffer_type_context *)buffer->buft->context;
+
+ const int64_t ne0 = tensor->ne[0];
+
+ ggml_tensor_extra_gpu * extra = new ggml_tensor_extra_gpu{};
+
+ ctx->tensor_extras.push_back(extra);
+
+ for (int id = 0; id < g_device_count; ++id) {
+ int64_t row_low, row_high;
+ get_row_split(&row_low, &row_high, tensor, buft_ctx->tensor_split, id);
+
+ int64_t nrows_split = row_high - row_low;
+ if (nrows_split == 0) {
+ continue;
+ }
+
+ size_t size = ggml_nbytes_split(tensor, nrows_split);
+ const size_t original_size = size;
+
+ // pad last row to a multiple of 512 elements to avoid out-of-bounds memory accesses
+ if (ne0 % MATRIX_ROW_PADDING != 0) {
+ size += ggml_row_size(tensor->type, MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING);
+ }
+
+ // FIXME: do not crash if cudaMalloc fails
+ // currently, init_tensor cannot fail, it needs to be fixed in ggml-backend first
+ ggml_cuda_set_device(id);
+ char * buf;
+ CUDA_CHECK(cudaMalloc(&buf, size));
+
+ // set padding to 0 to avoid possible NaN values
+ if (size > original_size) {
+ CUDA_CHECK(cudaMemset(buf + original_size, 0, size - original_size));
+ }
+
+ extra->data_device[id] = buf;
+
+ for (int64_t is = 0; is < MAX_STREAMS; ++is) {
+ CUDA_CHECK(cudaEventCreateWithFlags(&extra->events[id][is], cudaEventDisableTiming));
+ }
+ }
+ tensor->backend = GGML_BACKEND_GPU_SPLIT;
+ tensor->extra = extra;
+}
+
+static void ggml_backend_cuda_split_buffer_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
+ // split tensors must always be set in their entirety at once
+ GGML_ASSERT(offset == 0);
+ GGML_ASSERT(size == ggml_nbytes(tensor));
+
+ ggml_backend_cuda_split_buffer_type_context * buft_ctx = (ggml_backend_cuda_split_buffer_type_context *)buffer->buft->context;
+
+ const int64_t ne0 = tensor->ne[0];
+ const size_t nb1 = tensor->nb[1];
+ ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *)tensor->extra;
+
+ for (int id = 0; id < g_device_count; ++id) {
+ int64_t row_low, row_high;
+ get_row_split(&row_low, &row_high, tensor, buft_ctx->tensor_split, id);
+
+ int64_t nrows_split = row_high - row_low;
+ if (nrows_split == 0) {
+ continue;
+ }
+
+ const size_t offset_split = row_low*nb1;
+ size_t size = ggml_nbytes_split(tensor, nrows_split);
+ const size_t original_size = size;
+
+ // pad last row to a multiple of 512 elements to avoid out-of-bounds memory accesses
+ if (ne0 % MATRIX_ROW_PADDING != 0) {
+ size += ggml_row_size(tensor->type, MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING);
+ }
+
+ const char * buf_host = (const char *)data + offset_split;
+ CUDA_CHECK(cudaMemcpy(extra->data_device[id], buf_host, original_size, cudaMemcpyHostToDevice));
+ }
+}
+
+static void ggml_backend_cuda_split_buffer_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) {
+ // split tensors must always be set in their entirety at once
+ GGML_ASSERT(offset == 0);
+ GGML_ASSERT(size == ggml_nbytes(tensor));
+
+ ggml_backend_cuda_split_buffer_type_context * buft_ctx = (ggml_backend_cuda_split_buffer_type_context *)buffer->buft->context;
+
+ const int64_t ne0 = tensor->ne[0];
+ const size_t nb1 = tensor->nb[1];
+ ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *)tensor->extra;
+
+ for (int id = 0; id < g_device_count; ++id) {
+ int64_t row_low, row_high;
+ get_row_split(&row_low, &row_high, tensor, buft_ctx->tensor_split, id);
+
+ int64_t nrows_split = row_high - row_low;
+ if (nrows_split == 0) {
+ continue;
+ }
+
+ const size_t offset_split = row_low*nb1;
+ size_t size = ggml_nbytes_split(tensor, nrows_split);
+ const size_t original_size = size;
+
+ // pad last row to a multiple of 512 elements to avoid out-of-bounds memory accesses
+ if (ne0 % MATRIX_ROW_PADDING != 0) {
+ size += ggml_row_size(tensor->type, MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING);
+ }
+
+ char * buf_host = (char *)data + offset_split;
+ CUDA_CHECK(cudaMemcpy(buf_host, extra->data_device[id], original_size, cudaMemcpyDeviceToHost));
+ }
+}
+
+static void ggml_backend_cuda_split_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
+ UNUSED(buffer);
+ UNUSED(value);
+}
+
+static struct ggml_backend_buffer_i ggml_backend_cuda_split_buffer_interface = {
+ /* .get_name = */ ggml_backend_cuda_split_buffer_get_name,
+ /* .free_buffer = */ ggml_backend_cuda_split_buffer_free_buffer,
+ /* .get_base = */ ggml_backend_cuda_split_buffer_get_base,
+ /* .init_tensor = */ ggml_backend_cuda_split_buffer_init_tensor,
+ /* .set_tensor = */ ggml_backend_cuda_split_buffer_set_tensor,
+ /* .get_tensor = */ ggml_backend_cuda_split_buffer_get_tensor,
+ /* .cpy_tensor = */ NULL,
+ /* .clear = */ ggml_backend_cuda_split_buffer_clear,
+ /* .reset = */ NULL,
+};
+
+// cuda split buffer type
+
+static const char * ggml_backend_cuda_split_buffer_type_name(ggml_backend_buffer_type_t buft) {
+ return GGML_CUDA_NAME "_Split";
+
+ UNUSED(buft);
+}
+
+static ggml_backend_buffer_t ggml_backend_cuda_split_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
+ // since we don't know the exact split after rounding, we cannot allocate the device buffers at this point
+ // instead, we allocate them for each tensor separately in init_tensor
+ // however, the size still represents the maximum cumulative size of all the device buffers after the tensors are allocated,
+ // as returned by get_alloc_size. this limit is enforced during tensor allocation by ggml-alloc, so it must be correct.
+ ggml_backend_cuda_split_buffer_context * ctx = new ggml_backend_cuda_split_buffer_context();
+
+ return ggml_backend_buffer_init(buft, ggml_backend_cuda_split_buffer_interface, ctx, size);
+}
+
+static size_t ggml_backend_cuda_split_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
+ return 128;
+
+ UNUSED(buft);
+}
+
+static size_t ggml_backend_cuda_split_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) {
+ ggml_backend_cuda_split_buffer_type_context * ctx = (ggml_backend_cuda_split_buffer_type_context *)buft->context;
+
+ size_t total_size = 0;
+
+ const int64_t ne0 = tensor->ne[0];
+
+ for (int id = 0; id < g_device_count; ++id) {
+ int64_t row_low, row_high;
+ get_row_split(&row_low, &row_high, tensor, ctx->tensor_split, id);
+
+ int64_t nrows_split = row_high - row_low;
+ if (nrows_split == 0) {
+ continue;
+ }
+
+ total_size += ggml_nbytes_split(tensor, nrows_split);
+
+ // pad last row to a multiple of 512 elements to avoid out-of-bounds memory accesses
+ if (ne0 % MATRIX_ROW_PADDING != 0) {
+ total_size += ggml_row_size(tensor->type, MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING);
+ }
+ }
+
+ return total_size;
+}
+
+static bool ggml_backend_cuda_split_buffer_type_supports_backend(ggml_backend_buffer_type_t buft, ggml_backend_t backend) {
+ return ggml_backend_is_cuda(backend);
+
+ UNUSED(buft);
+}
+
+static bool ggml_backend_cuda_split_buffer_type_is_host(ggml_backend_buffer_type_t buft) {
+ return false;
+
+ UNUSED(buft);
+}
+
+static ggml_backend_buffer_type_i ggml_backend_cuda_split_buffer_type_interface = {
+ /* .get_name = */ ggml_backend_cuda_split_buffer_type_name,
+ /* .alloc_buffer = */ ggml_backend_cuda_split_buffer_type_alloc_buffer,
+ /* .get_alignment = */ ggml_backend_cuda_split_buffer_type_get_alignment,
+ /* .get_alloc_size = */ ggml_backend_cuda_split_buffer_type_get_alloc_size,
+ /* .supports_backend = */ ggml_backend_cuda_split_buffer_type_supports_backend,
+ /* .is_host = */ ggml_backend_cuda_split_buffer_type_is_host,
+};
+
+ggml_backend_buffer_type_t ggml_backend_cuda_split_buffer_type(const float * tensor_split) {
+ // FIXME: this is not thread safe
+ static std::map<std::array<float, GGML_CUDA_MAX_DEVICES>, struct ggml_backend_buffer_type> buft_map;
+
+ std::array<float, GGML_CUDA_MAX_DEVICES> tensor_split_arr = {};
+
+ bool all_zero = tensor_split == nullptr || std::all_of(tensor_split, tensor_split + GGML_CUDA_MAX_DEVICES, [](float x) { return x == 0.0f; });
+ if (all_zero) {
+ tensor_split_arr = g_default_tensor_split;
+ } else {
+ float split_sum = 0.0f;
+ for (int i = 0; i < g_device_count; ++i) {
+ tensor_split_arr[i] = split_sum;
+ split_sum += tensor_split[i];
+ }
+ for (int i = 0; i < g_device_count; ++i) {
+ tensor_split_arr[i] /= split_sum;
+ }
+ }
+
+ auto it = buft_map.find(tensor_split_arr);
+ if (it != buft_map.end()) {
+ return &it->second;
+ }
+
+ struct ggml_backend_buffer_type buft {
+ /* .iface = */ ggml_backend_cuda_split_buffer_type_interface,
+ /* .context = */ new ggml_backend_cuda_split_buffer_type_context{tensor_split_arr},
+ };
+
+ auto result = buft_map.emplace(tensor_split_arr, buft);
+ return &result.first->second;
+}
+
// host buffer type
+static const char * ggml_backend_cuda_host_buffer_type_name(ggml_backend_buffer_type_t buft) {
+ return GGML_CUDA_NAME "_Host";
+
+ UNUSED(buft);
+}
+
+static const char * ggml_backend_cuda_host_buffer_name(ggml_backend_buffer_t buffer) {
+ return GGML_CUDA_NAME "_Host";
+
+ UNUSED(buffer);
+}
+
static void ggml_backend_cuda_host_buffer_free_buffer(ggml_backend_buffer_t buffer) {
ggml_cuda_host_free(buffer->context);
}
return ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size);
}
- // FIXME: this is a hack to avoid having to implement a new buffer type
ggml_backend_buffer_t buffer = ggml_backend_cpu_buffer_from_ptr(ptr, size);
buffer->buft = buft;
+ buffer->iface.get_name = ggml_backend_cuda_host_buffer_name;
buffer->iface.free_buffer = ggml_backend_cuda_host_buffer_free_buffer;
return buffer;
ggml_backend_buffer_type_t ggml_backend_cuda_host_buffer_type() {
static struct ggml_backend_buffer_type ggml_backend_cuda_buffer_type_host = {
/* .iface = */ {
+ /* .get_name = */ ggml_backend_cuda_host_buffer_type_name,
/* .alloc_buffer = */ ggml_backend_cuda_host_buffer_type_alloc_buffer,
/* .get_alignment = */ ggml_backend_cpu_buffer_type()->iface.get_alignment,
/* .get_alloc_size = */ ggml_backend_cpu_buffer_type()->iface.get_alloc_size,
// backend
-struct ggml_backend_context_cuda {
- int device;
-};
-
static const char * ggml_backend_cuda_name(ggml_backend_t backend) {
- return GGML_CUDA_NAME;
+ ggml_backend_cuda_context * cuda_ctx = (ggml_backend_cuda_context *)backend->context;
- UNUSED(backend);
+ return cuda_ctx->name.c_str();
}
static void ggml_backend_cuda_free(ggml_backend_t backend) {
- ggml_backend_context_cuda * cuda_ctx = (ggml_backend_context_cuda *)backend->context;
+ ggml_backend_cuda_context * cuda_ctx = (ggml_backend_cuda_context *)backend->context;
delete cuda_ctx;
delete backend;
}
static ggml_backend_buffer_type_t ggml_backend_cuda_get_default_buffer_type(ggml_backend_t backend) {
- ggml_backend_context_cuda * cuda_ctx = (ggml_backend_context_cuda *)backend->context;
+ ggml_backend_cuda_context * cuda_ctx = (ggml_backend_cuda_context *)backend->context;
return ggml_backend_cuda_buffer_type(cuda_ctx->device);
}
static void ggml_backend_cuda_set_tensor_async(ggml_backend_t backend, ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
- ggml_backend_context_cuda * cuda_ctx = (ggml_backend_context_cuda *)backend->context;
+ ggml_backend_cuda_context * cuda_ctx = (ggml_backend_cuda_context *)backend->context;
GGML_ASSERT(tensor->buffer->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device) && "unsupported buffer type");
GGML_ASSERT(tensor->backend == GGML_BACKEND_GPU);
}
static void ggml_backend_cuda_get_tensor_async(ggml_backend_t backend, const ggml_tensor * tensor, void * data, size_t offset, size_t size) {
- ggml_backend_context_cuda * cuda_ctx = (ggml_backend_context_cuda *)backend->context;
+ ggml_backend_cuda_context * cuda_ctx = (ggml_backend_cuda_context *)backend->context;
GGML_ASSERT(tensor->buffer->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device) && "unsupported buffer type");
GGML_ASSERT(tensor->backend == GGML_BACKEND_GPU);
CUDA_CHECK(cudaMemcpyAsync(data, (const char *)tensor->data + offset, size, cudaMemcpyDeviceToHost, g_cudaStreams[cuda_ctx->device][0]));
}
-static void ggml_backend_cuda_synchronize(ggml_backend_t backend) {
- ggml_backend_context_cuda * cuda_ctx = (ggml_backend_context_cuda *)backend->context;
-
- CUDA_CHECK(cudaStreamSynchronize(g_cudaStreams[cuda_ctx->device][0]));
-
- UNUSED(backend);
-}
-
-static ggml_backend_graph_plan_t ggml_backend_cuda_graph_plan_create(ggml_backend_t backend, ggml_cgraph * cgraph) {
- GGML_ASSERT(!"not implemented");
+static bool ggml_backend_cuda_cpy_tensor_async(ggml_backend_t backend, const ggml_tensor * src, ggml_tensor * dst) {
+ ggml_backend_cuda_context * cuda_ctx = (ggml_backend_cuda_context *)backend->context;
- return nullptr;
+ if (dst->buffer->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device) && ggml_backend_buffer_is_cuda(src->buffer)) {
+ CUDA_CHECK(cudaMemcpyAsync(dst->data, src->data, ggml_nbytes(dst), cudaMemcpyDeviceToDevice, g_cudaStreams[cuda_ctx->device][0]));
+ return true;
+ }
- UNUSED(backend);
- UNUSED(cgraph);
+ return false;
}
-static void ggml_backend_cuda_graph_plan_free(ggml_backend_t backend, ggml_backend_graph_plan_t plan) {
- GGML_ASSERT(!"not implemented");
-
- UNUSED(backend);
- UNUSED(plan);
-}
+static void ggml_backend_cuda_synchronize(ggml_backend_t backend) {
+ ggml_backend_cuda_context * cuda_ctx = (ggml_backend_cuda_context *)backend->context;
-static void ggml_backend_cuda_graph_plan_compute(ggml_backend_t backend, ggml_backend_graph_plan_t plan) {
- GGML_ASSERT(!"not implemented");
+ CUDA_CHECK(cudaStreamSynchronize(g_cudaStreams[cuda_ctx->device][0]));
UNUSED(backend);
- UNUSED(plan);
}
static bool ggml_backend_cuda_graph_compute(ggml_backend_t backend, ggml_cgraph * cgraph) {
- ggml_backend_context_cuda * cuda_ctx = (ggml_backend_context_cuda *)backend->context;
+ ggml_backend_cuda_context * cuda_ctx = (ggml_backend_cuda_context *)backend->context;
ggml_cuda_set_main_device(cuda_ctx->device);
for (int i = 0; i < cgraph->n_nodes; i++) {
ggml_tensor * node = cgraph->nodes[i];
- if (node->op == GGML_OP_RESHAPE || node->op == GGML_OP_TRANSPOSE || node->op == GGML_OP_VIEW || node->op == GGML_OP_PERMUTE)
+ if (node->op == GGML_OP_RESHAPE || node->op == GGML_OP_TRANSPOSE || node->op == GGML_OP_VIEW || node->op == GGML_OP_PERMUTE || node->op == GGML_OP_NONE) {
continue;
+ }
- assert(node->backend == GGML_BACKEND_GPU);
+#ifndef NDEBUG
+ assert(node->backend == GGML_BACKEND_GPU || node->backend == GGML_BACKEND_GPU_SPLIT);
assert(node->buffer->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device));
assert(node->extra != nullptr);
for (int j = 0; j < GGML_MAX_SRC; j++) {
if (node->src[j] != nullptr) {
- assert(node->src[j]->backend == GGML_BACKEND_GPU);
+ assert(node->src[j]->backend == GGML_BACKEND_GPU || node->src[j]->backend == GGML_BACKEND_GPU_SPLIT);
assert(node->src[j]->buffer->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device));
assert(node->src[j]->extra != nullptr);
}
}
+#endif
bool ok = ggml_cuda_compute_forward(¶ms, node);
if (!ok) {
fprintf(stderr, "%s: error: op not supported %s (%s)\n", __func__, node->name, ggml_op_name(node->op));
}
GGML_ASSERT(ok);
-
-#if 0
- if (node->type == GGML_TYPE_F32) {
- cudaDeviceSynchronize();
- std::vector<float> tmp(ggml_nelements(node), 0.0f);
- cudaMemcpy(tmp.data(), node->data, ggml_nelements(node)*sizeof(float), cudaMemcpyDeviceToHost);
- printf("\n%s (%s) (%s %s) (%s %s): ", node->name, ggml_op_name(node->op),
- ggml_type_name(node->src[0]->type),
- node->src[1] ? ggml_type_name(node->src[1]->type) : "none",
- node->src[0]->name,
- node->src[1] ? node->src[1]->name : "none");
- double sum = 0.0;
- double sq_sum = 0.0;
- for (int i = 0; i < ggml_nelements(node); i++) {
- printf("%f ", tmp[i]);
- sum += tmp[i];
- sq_sum += tmp[i]*tmp[i];
- }
- printf("\n");
- printf("sum: %f, ", sum);
- printf("sq_sum: %f\n", sq_sum);
- }
-#endif
}
- UNUSED(backend);
-
return true;
}
UNUSED(backend);
}
-static ggml_backend_i cuda_backend_i = {
+static ggml_backend_i ggml_backend_cuda_interface = {
/* .get_name = */ ggml_backend_cuda_name,
/* .free = */ ggml_backend_cuda_free,
/* .get_default_buffer_type = */ ggml_backend_cuda_get_default_buffer_type,
/* .set_tensor_async = */ ggml_backend_cuda_set_tensor_async,
/* .get_tensor_async = */ ggml_backend_cuda_get_tensor_async,
- /* .cpy_tensor_from_async = */ NULL,
- /* .cpy_tensor_to_async = */ NULL,
+ /* .cpy_tensor_async = */ ggml_backend_cuda_cpy_tensor_async,
/* .synchronize = */ ggml_backend_cuda_synchronize,
- /* .graph_plan_create = */ ggml_backend_cuda_graph_plan_create,
- /* .graph_plan_free = */ ggml_backend_cuda_graph_plan_free,
- /* .graph_plan_compute = */ ggml_backend_cuda_graph_plan_compute,
+ /* .graph_plan_create = */ NULL,
+ /* .graph_plan_free = */ NULL,
+ /* .graph_plan_compute = */ NULL,
/* .graph_compute = */ ggml_backend_cuda_graph_compute,
/* .supports_op = */ ggml_backend_cuda_supports_op,
};
// not strictly necessary, but it may reduce the overhead of the first graph_compute
ggml_cuda_set_main_device(device);
- ggml_backend_context_cuda * ctx = new ggml_backend_context_cuda {
- /* .device = */ device
+ ggml_backend_cuda_context * ctx = new ggml_backend_cuda_context {
+ /* .device = */ device,
+ /* .name = */ GGML_CUDA_NAME + std::to_string(device),
};
ggml_backend_t cuda_backend = new ggml_backend {
- /* .interface = */ cuda_backend_i,
+ /* .interface = */ ggml_backend_cuda_interface,
/* .context = */ ctx
};
}
bool ggml_backend_is_cuda(ggml_backend_t backend) {
- return backend->iface.get_name == ggml_backend_cuda_name;
+ return backend && backend->iface.get_name == ggml_backend_cuda_name;
+}
+
+int ggml_backend_cuda_get_device_count() {
+ return ggml_cuda_get_device_count();
+}
+
+void ggml_backend_cuda_get_device_description(int device, char * description, size_t description_size) {
+ ggml_cuda_get_device_description(device, description, description_size);
+}
+
+void ggml_backend_cuda_get_device_memory(int device, size_t * free, size_t * total) {
+ ggml_cuda_set_device(device);
+
+ CUDA_CHECK(cudaMemGetInfo(free, total));
}
+// backend registry
static ggml_backend_t ggml_backend_reg_cuda_init(const char * params, void * user_data) {
ggml_backend_t cuda_backend = ggml_backend_cuda_init((int) (intptr_t) user_data);
return cuda_backend;
GGML_API void ggml_cuda_host_free(void * ptr);
GGML_API bool ggml_cuda_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
-GGML_API void ggml_cuda_set_tensor_split(const float * tensor_split);
-GGML_API void ggml_cuda_transform_tensor(void * data, struct ggml_tensor * tensor);
-GGML_API void ggml_cuda_free_data(struct ggml_tensor * tensor);
-
-GGML_API void ggml_cuda_assign_buffers(struct ggml_tensor * tensor);
-GGML_API void ggml_cuda_assign_buffers_no_scratch(struct ggml_tensor * tensor);
-GGML_API void ggml_cuda_assign_buffers_force_inplace(struct ggml_tensor * tensor);
-
-GGML_API void ggml_cuda_assign_buffers_no_alloc(struct ggml_tensor * tensor);
-GGML_API void ggml_cuda_assign_scratch_offset(struct ggml_tensor * tensor, size_t offset);
-GGML_API void ggml_cuda_copy_to_device(struct ggml_tensor * tensor);
-
-GGML_API void ggml_cuda_set_main_device(int main_device);
-GGML_API void ggml_cuda_set_mul_mat_q(bool mul_mat_q);
-GGML_API void ggml_cuda_set_scratch_size(size_t scratch_size);
-GGML_API void ggml_cuda_free_scratch(void);
GGML_API bool ggml_cuda_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor);
GGML_API int ggml_cuda_get_device_count(void);
GGML_API ggml_backend_t ggml_backend_cuda_init(int device);
GGML_API bool ggml_backend_is_cuda(ggml_backend_t backend);
-GGML_API int ggml_backend_cuda_get_device(ggml_backend_t backend);
GGML_API ggml_backend_buffer_type_t ggml_backend_cuda_buffer_type(int device);
-
-// pinned host buffer for use with CPU backend for faster copies between CPU and GPU
+// split tensor buffer that splits matrices by rows across multiple devices
+GGML_API ggml_backend_buffer_type_t ggml_backend_cuda_split_buffer_type(const float * tensor_split);
+// pinned host buffer for use with the CPU backend for faster copies between CPU and GPU
GGML_API ggml_backend_buffer_type_t ggml_backend_cuda_host_buffer_type(void);
+GGML_API int ggml_backend_cuda_get_device_count(void);
+GGML_API void ggml_backend_cuda_get_device_description(int device, char * description, size_t description_size);
+GGML_API void ggml_backend_cuda_get_device_memory(int device, size_t * free, size_t * total);
+
#ifdef __cplusplus
}
#endif
#define GGML_HASHTABLE_FULL ((size_t)-1)
#define GGML_HASHTABLE_ALREADY_EXISTS ((size_t)-2)
+struct ggml_hash_set ggml_hash_set_new(size_t size);
+
bool ggml_hash_contains (const struct ggml_hash_set hash_set, struct ggml_tensor * key);
// returns GGML_HASHTABLE_FULL if table is full, otherwise the current index of the key or where it should be inserted
}
}
-static void * ggml_backend_metal_buffer_get_base(ggml_backend_buffer_t buffer) {
- struct ggml_backend_metal_buffer_context * ctx = (struct ggml_backend_metal_buffer_context *)buffer->context;
+static const char * ggml_backend_metal_buffer_get_name(ggml_backend_buffer_t buffer) {
+ return "Metal";
- return ctx->all_data;
+ UNUSED(buffer);
}
static void ggml_backend_metal_buffer_free_buffer(ggml_backend_buffer_t buffer) {
free(ctx);
}
+static void * ggml_backend_metal_buffer_get_base(ggml_backend_buffer_t buffer) {
+ struct ggml_backend_metal_buffer_context * ctx = (struct ggml_backend_metal_buffer_context *)buffer->context;
+
+ return ctx->all_data;
+}
+
static void ggml_backend_metal_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
memcpy((char *)tensor->data + offset, data, size);
UNUSED(buffer);
}
-static void ggml_backend_metal_buffer_cpy_tensor_from(ggml_backend_buffer_t buffer, struct ggml_tensor * src, struct ggml_tensor * dst) {
- ggml_backend_tensor_get(src, dst->data, 0, ggml_nbytes(src));
-
- UNUSED(buffer);
-}
-
-static void ggml_backend_metal_buffer_cpy_tensor_to(ggml_backend_buffer_t buffer, struct ggml_tensor * src, struct ggml_tensor * dst) {
- ggml_backend_tensor_set(dst, src->data, 0, ggml_nbytes(src));
+static bool ggml_backend_metal_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) {
+ if (ggml_backend_buffer_is_host(src->buffer)) {
+ memcpy(dst->data, src->data, ggml_nbytes(src));
+ return true;
+ }
+ return false;
UNUSED(buffer);
}
}
static struct ggml_backend_buffer_i ggml_backend_metal_buffer_i = {
+ /* .get_name = */ ggml_backend_metal_buffer_get_name,
/* .free_buffer = */ ggml_backend_metal_buffer_free_buffer,
/* .get_base = */ ggml_backend_metal_buffer_get_base,
/* .init_tensor = */ NULL,
/* .set_tensor = */ ggml_backend_metal_buffer_set_tensor,
/* .get_tensor = */ ggml_backend_metal_buffer_get_tensor,
- /* .cpy_tensor_from = */ ggml_backend_metal_buffer_cpy_tensor_from,
- /* .cpy_tensor_to = */ ggml_backend_metal_buffer_cpy_tensor_to,
+ /* .cpy_tensor = */ ggml_backend_metal_buffer_cpy_tensor,
/* .clear = */ ggml_backend_metal_buffer_clear,
+ /* .reset = */ NULL,
};
// default buffer type
+static const char * ggml_backend_metal_buffer_type_get_name(ggml_backend_buffer_type_t buft) {
+ return "Metal";
+
+ UNUSED(buft);
+}
+
static ggml_backend_buffer_t ggml_backend_metal_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
struct ggml_backend_metal_buffer_context * ctx = malloc(sizeof(struct ggml_backend_metal_buffer_context));
ggml_backend_buffer_type_t ggml_backend_metal_buffer_type(void) {
static struct ggml_backend_buffer_type ggml_backend_buffer_type_metal = {
/* .iface = */ {
+ /* .get_name = */ ggml_backend_metal_buffer_type_get_name,
/* .alloc_buffer = */ ggml_backend_metal_buffer_type_alloc_buffer,
/* .get_alignment = */ ggml_backend_metal_buffer_type_get_alignment,
/* .get_alloc_size = */ NULL, // defaults to ggml_nbytes
ctx->n_buffers = 0;
const size_t size_page = sysconf(_SC_PAGESIZE);
+
+ // page-align the data ptr
+ {
+ const uintptr_t offs = (uintptr_t) data % size_page;
+ data = (void *) ((char *) data - offs);
+ size += offs;
+ }
+
size_t size_aligned = size;
if ((size_aligned % size_page) != 0) {
size_aligned += (size_page - (size_aligned % size_page));
UNUSED(backend);
}
-static struct ggml_backend_i metal_backend_i = {
+static struct ggml_backend_i ggml_backend_metal_i = {
/* .get_name = */ ggml_backend_metal_name,
/* .free = */ ggml_backend_metal_free,
/* .get_default_buffer_type = */ ggml_backend_metal_get_default_buffer_type,
/* .set_tensor_async = */ NULL,
/* .get_tensor_async = */ NULL,
- /* .cpy_tensor_from_async = */ NULL,
- /* .cpy_tensor_to_async = */ NULL,
+ /* .cpy_tensor_async = */ NULL,
/* .synchronize = */ NULL,
/* .graph_plan_create = */ NULL,
/* .graph_plan_free = */ NULL,
ggml_backend_t metal_backend = malloc(sizeof(struct ggml_backend));
*metal_backend = (struct ggml_backend) {
- /* .interface = */ metal_backend_i,
+ /* .interface = */ ggml_backend_metal_i,
/* .context = */ ctx,
};
}
bool ggml_backend_is_metal(ggml_backend_t backend) {
- return backend->iface.get_name == ggml_backend_metal_name;
+ return backend && backend->iface.get_name == ggml_backend_metal_name;
}
void ggml_backend_metal_set_n_cb(ggml_backend_t backend, int n_cb) {
#include "ggml.h"
#include "ggml-opencl.h"
+#include "ggml-backend-impl.h"
#include <array>
#include <atomic>
#include <sstream>
#include <vector>
-#define CL_TARGET_OPENCL_VERSION 110
+#define CL_TARGET_OPENCL_VERSION 120
#include <clblast.h>
#if defined(_MSC_VER)
}
void ggml_cl_init(void) {
+ static bool initialized = false;
+ if (initialized) {
+ return;
+ }
+ initialized = true;
+
cl_int err;
struct cl_device;
} else {
d_X = ggml_cl_pool_malloc(sizeof(float) * x_ne, &x_size);
}
- cl_mem d_Y = ggml_cl_pool_malloc(sizeof(float) * y_ne, &y_size);
- cl_mem d_D = ggml_cl_pool_malloc(sizeof(float) * d_ne, &d_size);
+ cl_mem d_Y = src1->backend == GGML_BACKEND_GPU ? (cl_mem) src1->extra : ggml_cl_pool_malloc(sizeof(float) * y_ne, &y_size);
+ cl_mem d_D = dst->backend == GGML_BACKEND_GPU ? (cl_mem) dst->extra : ggml_cl_pool_malloc(sizeof(float) * d_ne, &d_size);
size_t x_offset = 0;
for (int64_t i12 = i02 * r2, e12 = i12 + r2; i12 < e12; i12++) {
// copy src1 to device
- CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_Y, 0, src1, i13, i12, NULL));
+ if (src1->backend == GGML_BACKEND_CPU) {
+ CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_Y, 0, src1, i13, i12, NULL));
+ }
CL_CHECK(clFinish(queue));
}
// copy dst to host
- float * d = (float *) ((char *) dst->data + i12*nb2 + i13*nb3);
- CL_CHECK(clEnqueueReadBuffer(queue, d_D, true, 0, sizeof(float) * d_ne, d, 1, &ev_sgemm, NULL));
+ if (dst->backend == GGML_BACKEND_CPU) {
+ float * d = (float *) ((char *) dst->data + i12*nb2 + i13*nb3);
+ CL_CHECK(clEnqueueReadBuffer(queue, d_D, true, 0, sizeof(float) * d_ne, d, 1, &ev_sgemm, NULL));
+ }
}
}
}
if (src0->backend != GGML_BACKEND_GPU) {
ggml_cl_pool_free(d_X, x_size);
}
- ggml_cl_pool_free(d_Y, y_size);
- ggml_cl_pool_free(d_D, d_size);
+ if (src1->backend != GGML_BACKEND_GPU) {
+ ggml_cl_pool_free(d_Y, y_size);
+ }
+ if (dst->backend != GGML_BACKEND_GPU) {
+ ggml_cl_pool_free(d_D, d_size);
+ }
}
static void ggml_cl_mul_mat_f16(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, void * wdata, size_t wsize) {
CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_X, 0, src0, i03, i02, NULL));
}
+ // FIXME: convert on device
+
for (int64_t i12 = i02 * r2, e12 = i12 + r2; i12 < e12; i12++) {
// convert src1 to fp16
// TODO: use multiple threads
}
// copy dst to host, then convert to float
- CL_CHECK(clEnqueueReadBuffer(queue, d_D, true, 0, sizeof(ggml_fp16_t) * d_ne, tmp, 1, &ev_sgemm, NULL));
-
- float * d = (float *) ((char *) dst->data + i12*nb2 + i13*nb3);
-
- ggml_fp16_to_fp32_row(tmp, d, d_ne);
+ if (dst->backend == GGML_BACKEND_CPU) {
+ CL_CHECK(clEnqueueReadBuffer(queue, d_D, true, 0, sizeof(ggml_fp16_t) * d_ne, tmp, 1, &ev_sgemm, NULL));
+ float * d = (float *) ((char *) dst->data + i12*nb2 + i13*nb3);
+ ggml_fp16_to_fp32_row(tmp, d, d_ne);
+ } else {
+ // FIXME: convert dst to fp32 on device
+ }
}
}
}
}
-bool ggml_cl_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst) {
+bool ggml_cl_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, const struct ggml_tensor * dst) {
const int64_t ne10 = src1->ne[0];
const int64_t ne0 = dst->ne[0];
tensor->extra = dst;
GGML_ASSERT(tensor->backend == GGML_BACKEND_GPU);
}
+
+// ggml-backend
+
+// buffer
+
+struct ggml_backend_opencl_buffer_context {
+ ~ggml_backend_opencl_buffer_context() {
+ if (buffer) {
+ clReleaseMemObject(buffer);
+ }
+ for (auto * sub_buffer : sub_buffers) {
+ clReleaseMemObject(sub_buffer);
+ }
+ }
+
+ cl_mem buffer;
+ std::vector<cl_mem> sub_buffers;
+};
+
+static void * const cl_ptr_base = (void *)(uintptr_t) 0x1000;
+
+static const char * ggml_backend_opencl_buffer_get_name(ggml_backend_buffer_t buffer) {
+ return "OpenCL";
+
+ GGML_UNUSED(buffer);
+}
+
+static void ggml_backend_opencl_buffer_free_buffer(ggml_backend_buffer_t buffer) {
+ ggml_backend_opencl_buffer_context * ctx = (ggml_backend_opencl_buffer_context *) buffer->context;
+ delete ctx;
+}
+
+static void * ggml_backend_opencl_buffer_get_base(ggml_backend_buffer_t buffer) {
+ return cl_ptr_base;
+
+ GGML_UNUSED(buffer);
+}
+
+static void ggml_backend_opencl_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor) {
+ if (tensor->view_src != NULL && tensor->view_offs == 0) {
+ tensor->extra = tensor->view_src->extra;
+ } else {
+ ggml_backend_opencl_buffer_context * ctx = (ggml_backend_opencl_buffer_context *) buffer->context;
+ cl_buffer_region region = {(size_t)((char *)tensor->data - (char *)cl_ptr_base), ggml_nbytes(tensor)};
+ cl_int err;
+ cl_mem sub_buffer = clCreateSubBuffer(ctx->buffer, CL_MEM_READ_WRITE, CL_BUFFER_CREATE_TYPE_REGION, ®ion, &err);
+ CL_CHECK(err);
+ ctx->sub_buffers.push_back(sub_buffer);
+ tensor->extra = sub_buffer;
+ }
+ tensor->backend = GGML_BACKEND_GPU;
+}
+
+static void ggml_backend_opencl_buffer_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
+ cl_mem tensor_buffer = (cl_mem) tensor->extra;
+ CL_CHECK(clEnqueueWriteBuffer(queue, tensor_buffer, true, offset, size, data, 0, NULL, NULL));
+ CL_CHECK(clFinish(queue));
+
+ GGML_UNUSED(buffer);
+}
+
+static void ggml_backend_opencl_buffer_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) {
+ cl_mem tensor_buffer = (cl_mem) tensor->extra;
+ CL_CHECK(clEnqueueReadBuffer(queue, tensor_buffer, true, offset, size, data, 0, NULL, NULL));
+ CL_CHECK(clFinish(queue));
+
+ GGML_UNUSED(buffer);
+}
+
+static void ggml_backend_opencl_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
+ ggml_backend_opencl_buffer_context * ctx = (ggml_backend_opencl_buffer_context *) buffer->context;
+ CL_CHECK(clEnqueueFillBuffer(queue, ctx->buffer, &value, sizeof(value), 0, buffer->size, 0, NULL, NULL));
+ CL_CHECK(clFinish(queue));
+}
+
+static void ggml_backend_opencl_buffer_reset(ggml_backend_buffer_t buffer) {
+ ggml_backend_opencl_buffer_context * ctx = (ggml_backend_opencl_buffer_context *) buffer->context;
+ for (auto * sub_buffer : ctx->sub_buffers) {
+ clReleaseMemObject(sub_buffer);
+ }
+ ctx->sub_buffers.clear();
+}
+
+static ggml_backend_buffer_i ggml_backend_opencl_buffer_interface = {
+ /* .get_name = */ ggml_backend_opencl_buffer_get_name,
+ /* .free_buffer = */ ggml_backend_opencl_buffer_free_buffer,
+ /* .get_base = */ ggml_backend_opencl_buffer_get_base,
+ /* .init_tensor = */ ggml_backend_opencl_buffer_init_tensor,
+ /* .set_tensor = */ ggml_backend_opencl_buffer_set_tensor,
+ /* .get_tensor = */ ggml_backend_opencl_buffer_get_tensor,
+ /* .cpy_tensor = */ NULL,
+ /* .clear = */ ggml_backend_opencl_buffer_clear,
+ /* .reset = */ ggml_backend_opencl_buffer_reset,
+};
+
+// buffer type
+
+static const char * ggml_backend_opencl_buffer_type_name(ggml_backend_buffer_type_t buffer_type) {
+ return "OpenCL";
+
+ GGML_UNUSED(buffer_type);
+}
+
+static ggml_backend_buffer_t ggml_backend_opencl_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buffer_type, size_t size) {
+ ggml_cl_init();
+
+ cl_int err;
+ cl_mem mem = clCreateBuffer(context, CL_MEM_READ_WRITE, size, NULL, &err);
+ if (err != CL_SUCCESS) {
+ fprintf(stderr, "%s: failed to allocate %.2f MiB\n", __func__, size / 1024.0 / 1024.0);
+ return nullptr;
+ }
+
+ ggml_backend_opencl_buffer_context * ctx = new ggml_backend_opencl_buffer_context{mem, {}};
+
+ return ggml_backend_buffer_init(buffer_type, ggml_backend_opencl_buffer_interface, ctx, size);
+}
+
+static size_t ggml_backend_opencl_buffer_type_get_alignment(ggml_backend_buffer_type_t buffer_type) {
+ // FIXME: not thread safe, device may not be initialized yet
+ static cl_uint alignment = -1;
+ if (alignment == (cl_uint)-1) {
+ ggml_cl_init();
+ clGetDeviceInfo(device, CL_DEVICE_MEM_BASE_ADDR_ALIGN, sizeof(cl_uint), &alignment, NULL);
+ }
+ return alignment;
+
+ GGML_UNUSED(buffer_type);
+}
+
+static bool ggml_backend_opencl_buffer_type_supports_backend(ggml_backend_buffer_type_t buffer_type, ggml_backend_t backend) {
+ //return ggml_backend_is_opencl(backend); // opencl must be used through the cpu backend
+ return ggml_backend_is_cpu(backend);
+
+ GGML_UNUSED(buffer_type);
+}
+
+static ggml_backend_buffer_type_i ggml_backend_opencl_buffer_type_interface = {
+ /* .get_name = */ ggml_backend_opencl_buffer_type_name,
+ /* .alloc_buffer = */ ggml_backend_opencl_buffer_type_alloc_buffer,
+ /* .get_alignment = */ ggml_backend_opencl_buffer_type_get_alignment,
+ /* .get_alloc_size = */ NULL,
+ /* .supports_backend = */ ggml_backend_opencl_buffer_type_supports_backend,
+ /* .is_host = */ NULL,
+};
+
+
+ggml_backend_buffer_type_t ggml_backend_opencl_buffer_type() {
+ static ggml_backend_buffer_type buffer_type = {
+ /* .iface = */ ggml_backend_opencl_buffer_type_interface,
+ /* .context = */ nullptr,
+ };
+
+ return &buffer_type;
+}
+
+#if 0
+// host buffer type
+
+static const char * ggml_backend_opencl_host_buffer_type_name(ggml_backend_buffer_type_t buft) {
+ return "CL_Host";
+
+ GGML_UNUSED(buft);
+}
+
+static const char * ggml_backend_opencl_host_buffer_name(ggml_backend_buffer_t buffer) {
+ return "CL_Host";
+
+ GGML_UNUSED(buffer);
+}
+
+static void ggml_backend_opencl_host_buffer_free_buffer(ggml_backend_buffer_t buffer) {
+ ggml_cl_host_free(buffer->context);
+}
+
+static ggml_backend_buffer_t ggml_backend_opencl_host_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
+ void * ptr = ggml_cl_host_malloc(size);
+
+ if (ptr == nullptr) {
+ // fallback to cpu buffer
+ return ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size);
+ }
+
+ ggml_backend_buffer_t buffer = ggml_backend_cpu_buffer_from_ptr(ptr, size);
+ buffer->buft = buft;
+ buffer->iface.get_name = ggml_backend_opencl_host_buffer_name;
+ buffer->iface.free_buffer = ggml_backend_opencl_host_buffer_free_buffer;
+
+ return buffer;
+}
+
+ggml_backend_buffer_type_t ggml_backend_opencl_host_buffer_type() {
+ static struct ggml_backend_buffer_type ggml_backend_opencl_buffer_type_host = {
+ /* .iface = */ {
+ /* .get_name = */ ggml_backend_opencl_host_buffer_type_name,
+ /* .alloc_buffer = */ ggml_backend_opencl_host_buffer_type_alloc_buffer,
+ /* .get_alignment = */ ggml_backend_cpu_buffer_type()->iface.get_alignment,
+ /* .get_alloc_size = */ ggml_backend_cpu_buffer_type()->iface.get_alloc_size,
+ /* .supports_backend = */ ggml_backend_cpu_buffer_type()->iface.supports_backend,
+ /* .is_host = */ ggml_backend_cpu_buffer_type()->iface.is_host,
+ },
+ /* .context = */ nullptr,
+ };
+
+ return &ggml_backend_opencl_buffer_type_host;
+}
+
+// backend
+
+static const char * ggml_backend_opencl_name(ggml_backend_t backend) {
+ return "OpenCL";
+
+ GGML_UNUSED(backend);
+}
+
+static void ggml_backend_opencl_free(ggml_backend_t backend) {
+ GGML_UNUSED(backend);
+}
+
+static ggml_backend_buffer_type_t ggml_backend_opencl_get_default_buffer_type(ggml_backend_t backend) {
+ return ggml_backend_opencl_buffer_type();
+
+ GGML_UNUSED(backend);
+}
+
+static bool ggml_backend_opencl_graph_compute(ggml_backend_t backend, ggml_cgraph * graph) {
+ for (int i = 0; i < graph->n_nodes; ++i) {
+ ggml_tensor * node = graph->nodes[i];
+ switch (node->op) {
+ case GGML_OP_MUL_MAT:
+ ggml_cl_mul_mat(node->src[0], node->src[1], node, nullptr, 0);
+ break;
+ case GGML_OP_MUL:
+ ggml_cl_mul(node->src[0], node->src[1], node);
+ break;
+ default:
+ GGML_ASSERT(false);
+ }
+ }
+
+ return true;
+
+ GGML_UNUSED(backend);
+}
+
+static bool ggml_backend_opencl_supports_op(ggml_backend_t backend, const ggml_tensor * op) {
+ switch (op->op) {
+ case GGML_OP_MUL_MAT:
+ return ggml_cl_can_mul_mat(op->src[0], op->src[1], op);
+ case GGML_OP_MUL:
+ // return ggml_can_repeat_rows(op->src[1], op->src[0]);
+ return true;
+ default:
+ return false;
+ }
+
+ GGML_UNUSED(backend);
+}
+
+static ggml_backend_i opencl_backend_i = {
+ /* .get_name = */ ggml_backend_opencl_name,
+ /* .free = */ ggml_backend_opencl_free,
+ /* .get_default_buffer_type = */ ggml_backend_opencl_get_default_buffer_type,
+ /* .set_tensor_async = */ NULL,
+ /* .get_tensor_async = */ NULL,
+ /* .cpy_tensor_from_async = */ NULL,
+ /* .cpy_tensor_to_async = */ NULL,
+ /* .synchronize = */ NULL,
+ /* .graph_plan_create = */ NULL,
+ /* .graph_plan_free = */ NULL,
+ /* .graph_plan_compute = */ NULL,
+ /* .graph_compute = */ ggml_backend_opencl_graph_compute,
+ /* .supports_op = */ ggml_backend_opencl_supports_op,
+};
+
+ggml_backend_t ggml_backend_opencl_init() {
+ ggml_backend_t backend = new ggml_backend {
+ /* .interface = */ opencl_backend_i,
+ /* .context = */ nullptr
+ };
+
+ return backend;
+}
+
+bool ggml_backend_is_opencl(ggml_backend_t backend) {
+ return backend && backend->iface.get_name == ggml_backend_opencl_name;
+}
+#endif
#pragma once
#include "ggml.h"
+#include "ggml-backend.h"
#ifdef __cplusplus
extern "C" {
GGML_API void ggml_cl_init(void);
GGML_API void ggml_cl_mul(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
-GGML_API bool ggml_cl_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
+GGML_API bool ggml_cl_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, const struct ggml_tensor * dst);
GGML_API size_t ggml_cl_mul_mat_get_wsize(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
GGML_API void ggml_cl_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst, void * wdata, size_t wsize);
-GGML_API void * ggml_cl_host_malloc(size_t size);
-GGML_API void ggml_cl_host_free(void * ptr);
+// GGML_API void * ggml_cl_host_malloc(size_t size);
+// GGML_API void ggml_cl_host_free(void * ptr);
GGML_API void ggml_cl_free_data(const struct ggml_tensor* tensor);
GGML_API void ggml_cl_transform_tensor(void * data, struct ggml_tensor * tensor);
+// backend API
+
+// GGML_API ggml_backend_t ggml_backend_opencl_init(void);
+
+// GGML_API bool ggml_backend_is_opencl(ggml_backend_t backend);
+
+GGML_API ggml_backend_buffer_type_t ggml_backend_opencl_buffer_type(void);
+// GGML_API ggml_backend_buffer_type_t ggml_backend_opencl_host_buffer_type(void);
+
#ifdef __cplusplus
}
#endif
}
void ggml_free(struct ggml_context * ctx) {
+ if (ctx == NULL) {
+ return;
+ }
+
// make this function thread safe
ggml_critical_section_start();
return ggml_cpy_impl(ctx, a, b);
}
+struct ggml_tensor * ggml_cast(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ enum ggml_type type) {
+ bool is_node = false;
+
+ struct ggml_tensor * result = ggml_new_tensor(ctx, type, GGML_MAX_DIMS, a->ne);
+ ggml_format_name(result, "%s (copy)", a->name);
+
+ result->op = GGML_OP_CPY;
+ result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
+ result->src[0] = a;
+ result->src[1] = result;
+
+ return result;
+}
+
// ggml_cont
static struct ggml_tensor * ggml_cont_impl(
return i;
}
-static struct ggml_hash_set ggml_hash_set_new(size_t size) {
+struct ggml_hash_set ggml_hash_set_new(size_t size) {
size = ggml_hash_size(size);
struct ggml_hash_set result;
result.size = size;
return GGML_EXIT_SUCCESS;
}
-struct ggml_cplan ggml_graph_plan(struct ggml_cgraph * cgraph, int n_threads) {
+struct ggml_cplan ggml_graph_plan(const struct ggml_cgraph * cgraph, int n_threads) {
if (n_threads <= 0) {
n_threads = GGML_DEFAULT_N_THREADS;
}
} break;
case GGML_OP_MUL_MAT_ID:
{
+ cur = 0;
const struct ggml_tensor * src0 = node->src[2];
const struct ggml_tensor * src1 = node->src[1];
const enum ggml_type vec_dot_type = type_traits[src0->type].vec_dot_type;
if (src1->type != vec_dot_type) {
- cur = ggml_row_size(vec_dot_type, ggml_nelements(src1));
+ cur += ggml_row_size(vec_dot_type, ggml_nelements(src1));
}
const int n_as = ggml_get_op_params_i32(node, 1);
- cur = GGML_PAD(cur, sizeof(int64_t)); // align
+ cur += GGML_PAD(cur, sizeof(int64_t)); // align
cur += n_as * sizeof(int64_t); // matrix_row_counts
cur += n_as * src1->ne[1] * sizeof(int64_t); // matrix_rows
} break;
struct ggml_tensor * a,
struct ggml_tensor * b);
+ GGML_API struct ggml_tensor * ggml_cast(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ enum ggml_type type);
+
// make contiguous
GGML_API struct ggml_tensor * ggml_cont(
struct ggml_context * ctx,
// ggml_graph_plan() has to be called before ggml_graph_compute()
// when plan.work_size > 0, caller must allocate memory for plan.work_data
- GGML_API struct ggml_cplan ggml_graph_plan (struct ggml_cgraph * cgraph, int n_threads /*= GGML_DEFAULT_N_THREADS*/);
- GGML_API int ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan);
+ GGML_API struct ggml_cplan ggml_graph_plan (const struct ggml_cgraph * cgraph, int n_threads /*= GGML_DEFAULT_N_THREADS*/);
+ GGML_API int ggml_graph_compute( struct ggml_cgraph * cgraph, struct ggml_cplan * cplan);
// same as ggml_graph_compute() but the work data is allocated as a part of the context
// note: the drawback of this API is that you must have ensured that the context has enough memory for the work data