typedef std::shared_ptr<vk_context_struct> vk_context;
typedef std::weak_ptr<vk_context_struct> vk_context_ref;
-struct ggml_tensor_extra_gpu {
- vk_buffer_ref buffer_gpu;
- uint64_t offset;
-
- void reset() {
- buffer_gpu.reset();
- offset = 0;
- }
-};
-
struct ggml_vk_garbage_collector {
std::vector<vk_semaphore> tl_semaphores;
std::vector<vk_semaphore> semaphores;
std::vector<vk_context_ref> tensor_ctxs;
};
+static void * const vk_ptr_base = (void *)(uintptr_t) 0x1000; // NOLINT
+
+static uint64_t vk_tensor_offset(const ggml_tensor * tensor) {
+ if (tensor->view_src) {
+ return (uint8_t *) tensor->view_src->data - (uint8_t *) vk_ptr_base;
+ }
+ return (uint8_t *) tensor->data - (uint8_t *) vk_ptr_base;
+}
+
+struct ggml_backend_vk_buffer_context {
+ vk_device_ref device;
+ vk_buffer dev_buffer;
+ std::string name;
+
+ ggml_backend_vk_buffer_context(vk_device_ref device, vk_buffer&& dev_buffer, std::string& name) :
+ device(device),
+ dev_buffer(dev_buffer),
+ name(name) {
+ }
+
+ ~ggml_backend_vk_buffer_context() {
+ ggml_vk_destroy_buffer(dev_buffer);
+ }
+};
+
#ifdef GGML_VULKAN_MEMORY_DEBUG
void vk_memory_logger::log_allocation(vk_buffer_ref buf_ref, size_t size) {
std::lock_guard<std::mutex> guard(log_mutex);
const uint64_t r2 = ne12 / ne02;
const uint64_t r3 = ne13 / ne03;
- ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) dst->extra;
- ggml_tensor_extra_gpu * extra_src0 = (ggml_tensor_extra_gpu *) src0->extra;
- ggml_tensor_extra_gpu * extra_src1 = (ggml_tensor_extra_gpu *) src1->extra;
+ ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
+ ggml_backend_vk_buffer_context * src0_buf_ctx = (ggml_backend_vk_buffer_context *)src0->buffer->context;
+ ggml_backend_vk_buffer_context * src1_buf_ctx = (ggml_backend_vk_buffer_context *)src1->buffer->context;
vk_buffer d_Qx;
size_t qx_buf_offset = 0;
return;
}
- vk_buffer d_D = extra->buffer_gpu.lock();
- const uint64_t d_buf_offset = extra->offset + dst->view_offs;
+ vk_buffer d_D = dst_buf_ctx->dev_buffer;
+ const uint64_t d_buf_offset = vk_tensor_offset(dst) + dst->view_offs;
GGML_ASSERT(d_D != nullptr);
GGML_ASSERT(d_D->size >= d_buf_offset + d_sz * ne02 * ne03);
vk_buffer d_X;
vk_buffer d_Y;
uint64_t y_buf_offset = 0;
if (!src0_uma) {
- d_Qx = extra_src0->buffer_gpu.lock();
- qx_buf_offset = extra_src0->offset + src0->view_offs;
+ d_Qx = src0_buf_ctx->dev_buffer;
+ qx_buf_offset = vk_tensor_offset(src0) + src0->view_offs;
GGML_ASSERT(d_Qx != nullptr);
}
if (!src1_uma) {
- d_Qy = extra_src1->buffer_gpu.lock();
- qy_buf_offset = extra_src1->offset + src1->view_offs;
+ d_Qy = src1_buf_ctx->dev_buffer;
+ qy_buf_offset = vk_tensor_offset(src1) + src1->view_offs;
GGML_ASSERT(d_Qy != nullptr);
}
if (qx_needs_dequant) {
const uint64_t r2 = ne12 / ne02;
const uint64_t r3 = ne13 / ne03;
- ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) dst->extra;
- ggml_tensor_extra_gpu * extra_src0 = (ggml_tensor_extra_gpu *) src0->extra;
- ggml_tensor_extra_gpu * extra_src1 = (ggml_tensor_extra_gpu *) src1->extra;
+ ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
+ ggml_backend_vk_buffer_context * src0_buf_ctx = (ggml_backend_vk_buffer_context *)src0->buffer->context;
+ ggml_backend_vk_buffer_context * src1_buf_ctx = (ggml_backend_vk_buffer_context *)src1->buffer->context;
vk_buffer d_Qx;
size_t qx_buf_offset = 0;
return;
}
- vk_buffer d_D = extra->buffer_gpu.lock();
- const uint64_t d_buf_offset = extra->offset + dst->view_offs;
+ vk_buffer d_D = dst_buf_ctx->dev_buffer;
+ const uint64_t d_buf_offset = vk_tensor_offset(dst) + dst->view_offs;
GGML_ASSERT(d_D != nullptr);
vk_buffer d_X;
uint64_t x_buf_offset = 0;
vk_buffer d_Y;
uint64_t y_buf_offset = 0;
if(!src0_uma) {
- d_Qx = extra_src0->buffer_gpu.lock();
- qx_buf_offset = extra_src0->offset + src0->view_offs;
+ d_Qx = src0_buf_ctx->dev_buffer;
+ qx_buf_offset = vk_tensor_offset(src0) + src0->view_offs;
GGML_ASSERT(d_Qx != nullptr);
}
if(!src1_uma) {
- d_Qy = extra_src1->buffer_gpu.lock();
- qy_buf_offset = extra_src1->offset + src1->view_offs;
+ d_Qy = src1_buf_ctx->dev_buffer;
+ qy_buf_offset = vk_tensor_offset(src1) + src1->view_offs;
GGML_ASSERT(d_Qy != nullptr);
}
if (qx_needs_dequant) {
GGML_ASSERT(ne11 == 1);
- ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) dst->extra;
- ggml_tensor_extra_gpu * extra_src0 = (ggml_tensor_extra_gpu *) src0->extra;
- ggml_tensor_extra_gpu * extra_src1 = (ggml_tensor_extra_gpu *) src1->extra;
+ ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
+ ggml_backend_vk_buffer_context * src0_buf_ctx = (ggml_backend_vk_buffer_context *)src0->buffer->context;
+ ggml_backend_vk_buffer_context * src1_buf_ctx = (ggml_backend_vk_buffer_context *)src1->buffer->context;
vk_buffer d_Qy;
size_t qy_buf_offset = 0;
return;
}
- vk_buffer d_D = extra->buffer_gpu.lock();
- const uint64_t d_buf_offset = extra->offset + dst->view_offs;
+ vk_buffer d_D = dst_buf_ctx->dev_buffer;
+ const uint64_t d_buf_offset = vk_tensor_offset(dst) + dst->view_offs;
GGML_ASSERT(d_D != nullptr);
- vk_buffer d_Qx = extra_src0->buffer_gpu.lock();
- const uint64_t qx_buf_offset = extra_src0->offset + src0->view_offs;
+ vk_buffer d_Qx = src0_buf_ctx->dev_buffer;
+ const uint64_t qx_buf_offset = vk_tensor_offset(src0) + src0->view_offs;
GGML_ASSERT(d_Qx != nullptr);
if (!src1_uma) {
- d_Qy = extra_src1->buffer_gpu.lock();
- qy_buf_offset = extra_src1->offset + src1->view_offs;
+ d_Qy = src1_buf_ctx->dev_buffer;
+ qy_buf_offset = vk_tensor_offset(src1) + src1->view_offs;
GGML_ASSERT(d_Qx != nullptr);
}
GGML_ASSERT(ne11 == 1);
- ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) dst->extra;
- ggml_tensor_extra_gpu * extra_src0 = (ggml_tensor_extra_gpu *) src0->extra;
- ggml_tensor_extra_gpu * extra_src1 = (ggml_tensor_extra_gpu *) src1->extra;
+ ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
+ ggml_backend_vk_buffer_context * src0_buf_ctx = (ggml_backend_vk_buffer_context *)src0->buffer->context;
+ ggml_backend_vk_buffer_context * src1_buf_ctx = (ggml_backend_vk_buffer_context *)src1->buffer->context;
vk_buffer d_Qy = nullptr;
size_t qy_buf_offset = 0;
return;
}
- vk_buffer d_D = extra->buffer_gpu.lock();
- const uint64_t d_buf_offset = extra->offset + dst->view_offs;
+ vk_buffer d_D = dst_buf_ctx->dev_buffer;
+ const uint64_t d_buf_offset = vk_tensor_offset(dst) + dst->view_offs;
GGML_ASSERT(d_D != nullptr);
- vk_buffer d_Qx = extra_src0->buffer_gpu.lock();
- const uint64_t qx_buf_offset = extra_src0->offset + src0->view_offs;
+ vk_buffer d_Qx = src0_buf_ctx->dev_buffer;
+ const uint64_t qx_buf_offset = vk_tensor_offset(src0) + src0->view_offs;
GGML_ASSERT(d_Qx != nullptr);
if (!src1_uma) {
- d_Qy = extra_src1->buffer_gpu.lock();
- qy_buf_offset = extra_src1->offset + src1->view_offs;
+ d_Qy = src1_buf_ctx->dev_buffer;
+ qy_buf_offset = vk_tensor_offset(src1) + src1->view_offs;
GGML_ASSERT(d_Qx != nullptr);
}
const uint64_t n_as = ne02;
- ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) dst->extra;
- ggml_tensor_extra_gpu * extra_src0 = (ggml_tensor_extra_gpu *) src0->extra;
- ggml_tensor_extra_gpu * extra_src1 = (ggml_tensor_extra_gpu *) src1->extra;
- ggml_tensor_extra_gpu * extra_ids = (ggml_tensor_extra_gpu *) ids->extra;
+ ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
+ ggml_backend_vk_buffer_context * src0_buf_ctx = (ggml_backend_vk_buffer_context *)src0->buffer->context;
+ ggml_backend_vk_buffer_context * src1_buf_ctx = (ggml_backend_vk_buffer_context *)src1->buffer->context;
+ ggml_backend_vk_buffer_context * ids_buf_ctx = (ggml_backend_vk_buffer_context *)ids->buffer->context;
vk_buffer d_Qx;
size_t qx_buf_offset = 0;
return;
}
- vk_buffer d_D = extra->buffer_gpu.lock();
- const uint64_t d_buf_offset = extra->offset + dst->view_offs;
+ vk_buffer d_D = dst_buf_ctx->dev_buffer;
+ const uint64_t d_buf_offset = vk_tensor_offset(dst) + dst->view_offs;
GGML_ASSERT(d_D != nullptr);
vk_buffer d_X;
uint64_t x_buf_offset = 0;
vk_buffer d_Y;
uint64_t y_buf_offset = 0;
if (!src0_uma) {
- d_Qx = extra_src0->buffer_gpu.lock();
- qx_buf_offset = extra_src0->offset + src0->view_offs;
+ d_Qx = src0_buf_ctx->dev_buffer;
+ qx_buf_offset = vk_tensor_offset(src0) + src0->view_offs;
GGML_ASSERT(d_Qx != nullptr);
}
if (!src1_uma) {
- d_Qy = extra_src1->buffer_gpu.lock();
- qy_buf_offset = extra_src1->offset + src1->view_offs;
+ d_Qy = src1_buf_ctx->dev_buffer;
+ qy_buf_offset = vk_tensor_offset(src1) + src1->view_offs;
GGML_ASSERT(d_Qy != nullptr);
}
if (!ids_uma) {
- d_ids = extra_ids->buffer_gpu.lock();
- ids_buf_offset = extra_ids->offset + ids->view_offs;
+ d_ids = ids_buf_ctx->dev_buffer;
+ ids_buf_offset = vk_tensor_offset(ids) + ids->view_offs;
GGML_ASSERT(d_ids != nullptr);
}
if (qx_needs_dequant) {
const uint64_t ne22 = dst->ne[2];
const uint64_t ne23 = dst->ne[3];
- ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) dst->extra;
- ggml_tensor_extra_gpu * extra_src0 = (ggml_tensor_extra_gpu *) src0->extra;
- ggml_tensor_extra_gpu * extra_src1 = (ggml_tensor_extra_gpu *) src1->extra;
- ggml_tensor_extra_gpu * extra_ids = (ggml_tensor_extra_gpu *) ids->extra;
+ ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
+ ggml_backend_vk_buffer_context * src0_buf_ctx = (ggml_backend_vk_buffer_context *)src0->buffer->context;
+ ggml_backend_vk_buffer_context * src1_buf_ctx = (ggml_backend_vk_buffer_context *)src1->buffer->context;
+ ggml_backend_vk_buffer_context * ids_buf_ctx = (ggml_backend_vk_buffer_context *)ids->buffer->context;
vk_buffer d_Qx;
size_t qx_buf_offset = 0;
return;
}
- vk_buffer d_D = extra->buffer_gpu.lock();
- const uint64_t d_buf_offset = extra->offset + dst->view_offs;
+ vk_buffer d_D = dst_buf_ctx->dev_buffer;
+ const uint64_t d_buf_offset = vk_tensor_offset(dst) + dst->view_offs;
GGML_ASSERT(d_D != nullptr);
vk_buffer d_X;
uint64_t x_buf_offset = 0;
vk_buffer d_Y;
uint64_t y_buf_offset = 0;
if(!src0_uma) {
- d_Qx = extra_src0->buffer_gpu.lock();
- qx_buf_offset = extra_src0->offset + src0->view_offs;
+ d_Qx = src0_buf_ctx->dev_buffer;
+ qx_buf_offset = vk_tensor_offset(src0) + src0->view_offs;
GGML_ASSERT(d_Qx != nullptr);
}
if(!src1_uma) {
- d_Qy = extra_src1->buffer_gpu.lock();
- qy_buf_offset = extra_src1->offset + src1->view_offs;
+ d_Qy = src1_buf_ctx->dev_buffer;
+ qy_buf_offset = vk_tensor_offset(src1) + src1->view_offs;
GGML_ASSERT(d_Qy != nullptr);
}
if(!ids_uma) {
- d_ids = extra_ids->buffer_gpu.lock();
- ids_buf_offset = extra_ids->offset + ids->view_offs;
+ d_ids = ids_buf_ctx->dev_buffer;
+ ids_buf_offset = vk_tensor_offset(ids) + ids->view_offs;
GGML_ASSERT(d_ids != nullptr);
}
if (qx_needs_dequant) {
std::cerr << "), " << ggml_op_name(op) << ", " << (dryrun ? "dryrun" : "") << ")");
GGML_ASSERT(op == GGML_OP_GET_ROWS || (!ggml_is_quantized(src0->type) && (src1 == nullptr || !ggml_is_quantized(src1->type)))); // NOLINT
GGML_ASSERT(ggml_vk_op_supports_incontiguous(op) || ggml_vk_dim01_contiguous(src0)); // NOLINT
- GGML_ASSERT(dst->extra != nullptr);
+ GGML_ASSERT(dst->buffer != nullptr);
const uint64_t ne00 = src0->ne[0];
const uint64_t ne01 = src0->ne[1];
const uint64_t ne02 = src0->ne[2];
const bool op_supports_incontiguous = ggml_vk_op_supports_incontiguous(op);
- ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) dst->extra;
- ggml_tensor_extra_gpu * extra_src0 = (ggml_tensor_extra_gpu *) src0->extra;
- ggml_tensor_extra_gpu * extra_src1 = use_src1 ? (ggml_tensor_extra_gpu *) src1->extra : nullptr;
- ggml_tensor_extra_gpu * extra_src2 = use_src2 ? (ggml_tensor_extra_gpu *) src2->extra : nullptr;
+ ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
+ ggml_backend_vk_buffer_context * src0_buf_ctx = (ggml_backend_vk_buffer_context *)src0->buffer->context;
+ ggml_backend_vk_buffer_context * src1_buf_ctx = use_src1 ? (ggml_backend_vk_buffer_context *)src1->buffer->context : nullptr;
+ ggml_backend_vk_buffer_context * src2_buf_ctx = use_src2 ? (ggml_backend_vk_buffer_context *)src2->buffer->context : nullptr;
vk_buffer d_X = nullptr;
size_t x_buf_offset = 0;
uint64_t z_sz = use_src2 ? ggml_type_size(src2->type) * ne2 : 0;
uint64_t d_sz = ggml_type_size(dst->type) * ned;
- vk_buffer d_D = extra->buffer_gpu.lock();
+ vk_buffer d_D = dst_buf_ctx->dev_buffer;
// Workaround for tiny tensor inputs on ROPE
if (op == GGML_OP_ROPE && use_src1 && y_sz > d_D->size) {
}
GGML_ASSERT(d_D != nullptr);
- uint64_t d_buf_offset = ((extra->offset + dst->view_offs) / ctx->device->properties.limits.minStorageBufferOffsetAlignment) * ctx->device->properties.limits.minStorageBufferOffsetAlignment;
- GGML_ASSERT(d_buf_offset == extra->offset || op == GGML_OP_CPY); // NOLINT
+ uint64_t d_buf_offset = ((vk_tensor_offset(dst) + dst->view_offs) / ctx->device->properties.limits.minStorageBufferOffsetAlignment) * ctx->device->properties.limits.minStorageBufferOffsetAlignment;
+ GGML_ASSERT(d_buf_offset == vk_tensor_offset(dst) || op == GGML_OP_CPY); // NOLINT
if(!src0_uma) {
- d_X = extra_src0->buffer_gpu.lock();
- x_buf_offset = extra_src0->offset + src0->view_offs;
+ d_X = src0_buf_ctx->dev_buffer;
+ x_buf_offset = vk_tensor_offset(src0) + src0->view_offs;
GGML_ASSERT(d_X != nullptr);
}
if (use_src1 && !src1_uma) {
- d_Y = extra_src1->buffer_gpu.lock();
- y_buf_offset = extra_src1->offset + src1->view_offs;
+ d_Y = src1_buf_ctx->dev_buffer;
+ y_buf_offset = vk_tensor_offset(src1) + src1->view_offs;
GGML_ASSERT(d_Y != nullptr);
}
if (use_src2 && !src2_uma) {
- d_Z = extra_src2->buffer_gpu.lock();
- z_buf_offset = extra_src2->offset + src2->view_offs;
+ d_Z = src2_buf_ctx->dev_buffer;
+ z_buf_offset = vk_tensor_offset(src2) + src2->view_offs;
GGML_ASSERT(d_Z != nullptr);
}
}
static void ggml_vk_acc(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
- ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) dst->extra;
const uint32_t src0_type_size = ggml_type_size(src0->type);
const uint32_t src1_type_size = ggml_type_size(src1->type);
const uint32_t dst_type_size = ggml_type_size(dst->type);
- const uint32_t d_offset = ((extra->offset + dst->view_offs) % ctx->device->properties.limits.minStorageBufferOffsetAlignment) / dst_type_size;
+ const uint32_t d_offset = ((vk_tensor_offset(dst) + dst->view_offs) % ctx->device->properties.limits.minStorageBufferOffsetAlignment) / dst_type_size;
int nb1 = dst->op_params[0] / 4; // 4 bytes of float32
int nb2 = dst->op_params[1] / 4; // 4 bytes of float32
}
static void ggml_vk_cpy(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
- ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) dst->extra;
const uint32_t src0_type_size = ggml_type_size(src0->type);
const uint32_t dst_type_size = ggml_type_size(dst->type);
- const uint32_t d_offset = ((extra->offset + dst->view_offs) % ctx->device->properties.limits.minStorageBufferOffsetAlignment) / dst_type_size;
+ const uint32_t d_offset = ((vk_tensor_offset(dst) + dst->view_offs) % ctx->device->properties.limits.minStorageBufferOffsetAlignment) / dst_type_size;
ggml_vk_op_f32<vk_op_unary_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_CPY, {
(uint32_t)ggml_nelements(src0),
}
#endif
-static ggml_tensor_extra_gpu * ggml_vk_tensor_create_extra(ggml_tensor * tensor) {
- VK_LOG_DEBUG("ggml_vk_create_extra(" << tensor << " (" << tensor->name << ", " << ggml_op_name(tensor->op) << "))");
- ggml_tensor_extra_gpu * extra = new ggml_tensor_extra_gpu;
- extra->reset();
- tensor->extra = extra;
- return extra;
-}
-
static void ggml_vk_preallocate_buffers(ggml_backend_vk_context * ctx) {
#if defined(GGML_VULKAN_RUN_TESTS)
ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_F32);
// Returns true if node has enqueued work into the queue, false otherwise
// If submit is true the current all operations queued so far are being submitted to Vulkan to overlap cmdlist creation and GPU execution.
static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_tensor * node, int node_idx, ggml_tensor *node_begin, int node_idx_begin, bool dryrun, bool last_node, bool submit){
- ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) node->extra;
-
- if (ggml_is_empty(node) || extra == nullptr) {
+ if (ggml_is_empty(node) || !node->buffer) {
return false;
}
}
static bool ggml_vk_compute_forward(ggml_backend_vk_context * ctx, ggml_tensor * tensor, int tensor_idx, bool use_fence = true){
- ggml_tensor_extra_gpu * extra = nullptr;
+ ggml_backend_buffer * buf = nullptr;
switch (tensor->op) {
case GGML_OP_ADD:
case GGML_OP_TIMESTEP_EMBEDDING:
case GGML_OP_LEAKY_RELU:
case GGML_OP_REPEAT:
- extra = (ggml_tensor_extra_gpu *) tensor->extra;
+ buf = tensor->buffer;
break;
case GGML_OP_UNARY:
case GGML_UNARY_OP_GELU_QUICK:
case GGML_UNARY_OP_RELU:
case GGML_UNARY_OP_TANH:
- extra = (ggml_tensor_extra_gpu *) tensor->extra;
+ buf = tensor->buffer;
break;
default:
return false;
break;
case GGML_OP_MUL_MAT:
case GGML_OP_MUL_MAT_ID:
- extra = (ggml_tensor_extra_gpu *) tensor->extra;
+ buf = tensor->buffer;
break;
default:
return false;
}
- if (extra == nullptr) {
+ if (buf == nullptr) {
return false;
}
// device backend
-static void * const vk_ptr_base = (void *)(uintptr_t) 0x1000; // NOLINT
-
-struct ggml_backend_vk_buffer_context {
- vk_device_ref device;
- vk_buffer dev_buffer;
- ggml_tensor_extra_gpu * temp_tensor_extras = nullptr;
- size_t temp_tensor_extra_index = 0;
- std::string name;
-
- ggml_backend_vk_buffer_context(vk_device_ref device, vk_buffer&& dev_buffer, std::string& name) :
- device(device),
- dev_buffer(dev_buffer),
- name(name) {
- }
-
- ~ggml_backend_vk_buffer_context() {
- ggml_vk_destroy_buffer(dev_buffer);
- if (temp_tensor_extras != nullptr) {
- delete[] temp_tensor_extras;
- }
- }
-
- ggml_tensor_extra_gpu * ggml_vk_alloc_temp_tensor_extra() {
- if (temp_tensor_extras == nullptr) {
- temp_tensor_extras = new ggml_tensor_extra_gpu[GGML_VK_MAX_NODES];
- }
-
- size_t alloc_index = temp_tensor_extra_index;
- temp_tensor_extra_index = (temp_tensor_extra_index + 1) % GGML_VK_MAX_NODES;
- ggml_tensor_extra_gpu * extra = &temp_tensor_extras[alloc_index];
- extra->reset();
-
- return extra;
- }
-};
-
GGML_CALL static const char * ggml_backend_vk_buffer_get_name(ggml_backend_buffer_t buffer) {
ggml_backend_vk_buffer_context * ctx = (ggml_backend_vk_buffer_context *)buffer->context;
return ctx->name.c_str();
GGML_CALL static void ggml_backend_vk_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor) {
VK_LOG_DEBUG("ggml_backend_vk_buffer_init_tensor(" << buffer << " (" << buffer->context << "), " << tensor << ")");
- ggml_backend_vk_buffer_context * ctx = (ggml_backend_vk_buffer_context *)buffer->context;
-
if (tensor->view_src != nullptr) {
GGML_ASSERT(tensor->view_src->buffer->buft == buffer->buft);
- GGML_ASSERT(tensor->view_src->extra != nullptr);
- tensor->extra = tensor->view_src->extra;
- } else {
- ggml_tensor_extra_gpu * extra = ctx->ggml_vk_alloc_temp_tensor_extra();
- extra->buffer_gpu = ctx->dev_buffer;
- extra->offset = (uint8_t *) tensor->data - (uint8_t *) vk_ptr_base;
- tensor->extra = extra;
}
}
GGML_CALL static void ggml_backend_vk_buffer_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
VK_LOG_DEBUG("ggml_backend_vk_buffer_set_tensor(" << buffer << ", " << tensor << ", " << data << ", " << offset << ", " << size << ")");
- ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) tensor->extra;
+ ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)buffer->context;
+ vk_buffer buf = buf_ctx->dev_buffer;
- vk_buffer buf = extra->buffer_gpu.lock();
-
- ggml_vk_buffer_write(buf, extra->offset + tensor->view_offs + offset, data, size);
-
- GGML_UNUSED(buffer);
+ ggml_vk_buffer_write(buf, vk_tensor_offset(tensor) + tensor->view_offs + offset, data, size);
}
GGML_CALL static void ggml_backend_vk_buffer_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) {
VK_LOG_DEBUG("ggml_backend_vk_buffer_get_tensor(" << buffer << ", " << tensor << ", " << data << ", " << offset << ", " << size << ")");
- ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) tensor->extra;
-
- vk_buffer buf = extra->buffer_gpu.lock();
+ ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)buffer->context;
- ggml_vk_buffer_read(buf, extra->offset + tensor->view_offs + offset, data, size);
+ vk_buffer buf = buf_ctx->dev_buffer;
- GGML_UNUSED(buffer);
+ ggml_vk_buffer_read(buf, vk_tensor_offset(tensor) + tensor->view_offs + offset, data, size);
}
GGML_CALL static bool ggml_backend_vk_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * src, ggml_tensor * dst) {
if (ggml_backend_buffer_is_vk(src->buffer)) {
- ggml_tensor_extra_gpu * src_extra = (ggml_tensor_extra_gpu *) src->extra;
- ggml_tensor_extra_gpu * dst_extra = (ggml_tensor_extra_gpu *) dst->extra;
+ ggml_backend_vk_buffer_context * src_buf_ctx = (ggml_backend_vk_buffer_context *)src->buffer->context;
+ ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
- vk_buffer src_buf = src_extra->buffer_gpu.lock();
- vk_buffer dst_buf = dst_extra->buffer_gpu.lock();
+ vk_buffer src_buf = src_buf_ctx->dev_buffer;
+ vk_buffer dst_buf = dst_buf_ctx->dev_buffer;
- ggml_vk_buffer_copy(dst_buf, dst_extra->offset + dst->view_offs, src_buf, src_extra->offset + src->view_offs, ggml_nbytes(src));
+ ggml_vk_buffer_copy(dst_buf, vk_tensor_offset(dst) + dst->view_offs, src_buf, vk_tensor_offset(src) + src->view_offs, ggml_nbytes(src));
return true;
}
ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
GGML_ASSERT((tensor->buffer->buft == ggml_backend_vk_get_default_buffer_type(backend) || tensor->buffer->buft == ggml_backend_vk_host_buffer_type()) && "unsupported buffer type");
- ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) tensor->extra;
+ ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)tensor->buffer->context;
vk_context transfer_ctx;
transfer_ctx = ctx->transfer_ctx.lock();
}
- vk_buffer buf = extra->buffer_gpu.lock();
+ vk_buffer buf = buf_ctx->dev_buffer;
- ggml_vk_buffer_write_async(transfer_ctx, buf, extra->offset + tensor->view_offs + offset, data, size);
+ ggml_vk_buffer_write_async(transfer_ctx, buf, vk_tensor_offset(tensor) + tensor->view_offs + offset, data, size);
}
GGML_CALL static void ggml_backend_vk_get_tensor_async(ggml_backend_t backend, const ggml_tensor * tensor, void * data, size_t offset, size_t size) {
ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
GGML_ASSERT((tensor->buffer->buft == ggml_backend_vk_get_default_buffer_type(backend) || tensor->buffer->buft == ggml_backend_vk_host_buffer_type()) && "unsupported buffer type");
- ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) tensor->extra;
+ ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)tensor->buffer->context;
vk_context transfer_ctx;
transfer_ctx = ctx->transfer_ctx.lock();
}
- vk_buffer buf = extra->buffer_gpu.lock();
+ vk_buffer buf = buf_ctx->dev_buffer;
- ggml_vk_buffer_read_async(transfer_ctx, buf, extra->offset + tensor->view_offs + offset, data, size);
+ ggml_vk_buffer_read_async(transfer_ctx, buf, vk_tensor_offset(tensor) + tensor->view_offs + offset, data, size);
}
GGML_CALL static bool ggml_backend_vk_cpy_tensor_async(ggml_backend_t backend, const ggml_tensor * src, ggml_tensor * dst) {
VK_LOG_DEBUG("ggml_backend_vk_cpy_tensor_async()");
ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
if ((dst->buffer->buft == ggml_backend_vk_get_default_buffer_type(backend) || dst->buffer->buft == ggml_backend_vk_host_buffer_type()) && ggml_backend_buffer_is_vk(src->buffer)) {
- ggml_tensor_extra_gpu * src_extra = (ggml_tensor_extra_gpu *) src->extra;
- ggml_tensor_extra_gpu * dst_extra = (ggml_tensor_extra_gpu *) dst->extra;
+ ggml_backend_vk_buffer_context * src_buf_ctx = (ggml_backend_vk_buffer_context *)src->buffer->context;
+ ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
vk_context transfer_ctx;
transfer_ctx = ctx->transfer_ctx.lock();
}
- vk_buffer src_buf = src_extra->buffer_gpu.lock();
- vk_buffer dst_buf = dst_extra->buffer_gpu.lock();
+ vk_buffer src_buf = src_buf_ctx->dev_buffer;
+ vk_buffer dst_buf = dst_buf_ctx->dev_buffer;
- ggml_vk_buffer_copy_async(transfer_ctx, dst_buf, dst_extra->offset + dst->view_offs, src_buf, src_extra->offset + src->view_offs, ggml_nbytes(src));
+ ggml_vk_buffer_copy_async(transfer_ctx, dst_buf, vk_tensor_offset(dst) + dst->view_offs, src_buf, vk_tensor_offset(src) + src->view_offs, ggml_nbytes(src));
return true;
}
const size_t tensor_size = ggml_nbytes(tensor);
tensor_data = malloc(tensor_size);
- ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) tensor->extra;
+ ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)tensor->buffer->context;
- vk_buffer buffer_gpu = extra->buffer_gpu.lock();
- ggml_vk_buffer_read(buffer_gpu, extra->offset + tensor->view_offs, tensor_data, tensor_size);
+ vk_buffer buffer_gpu = buf_ctx->dev_buffer;
+ ggml_vk_buffer_read(buffer_gpu, vk_tensor_offset(tensor) + tensor->view_offs, tensor_data, tensor_size);
}
std::cerr << "TENSOR CHECK " << name << " (" << tensor->name << "): " << ggml_op_name(tensor->op) << std::endl;
memcpy(src0_clone->data, src0->data, src0_size);
memcpy(src0_clone->nb, src0->nb, sizeof(size_t) * GGML_MAX_DIMS);
} else if (ggml_backend_buffer_is_vk(src0->buffer)) {
- ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) src0->extra;
- vk_buffer buffer_gpu = extra->buffer_gpu.lock();
- uint64_t offset = extra->offset + src0->view_offs;
+ ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)src0->buffer->context;
+ vk_buffer& buffer_gpu = buf_ctx->dev_buffer;
+ uint64_t offset = vk_tensor_offset(src0) + src0->view_offs;
if (!ggml_is_contiguous(src0) && ggml_vk_dim01_contiguous(src0)) {
for (int i3 = 0; i3 < src0->ne[3]; i3++) {
for (int i2 = 0; i2 < src0->ne[2]; i2++) {
memcpy(src1_clone->data, src1->data, src1_size);
memcpy(src1_clone->nb, src1->nb, sizeof(size_t) * GGML_MAX_DIMS);
} else if (ggml_backend_buffer_is_vk(src1->buffer)) {
- ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) src1->extra;
- vk_buffer buffer_gpu = extra->buffer_gpu.lock();
- uint64_t offset = extra->offset + src1->view_offs;
+ ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)src1->buffer->context;
+ vk_buffer& buffer_gpu = buf_ctx->dev_buffer;
+ uint64_t offset = vk_tensor_offset(src1) + src1->view_offs;
if (!ggml_is_contiguous(src1) && ggml_vk_dim01_contiguous(src1)) {
for (int i3 = 0; i3 < src1->ne[3]; i3++) {
for (int i2 = 0; i2 < src1->ne[2]; i2++) {
memcpy(src2_clone->data, src2->data, src2_size);
memcpy(src2_clone->nb, src2->nb, sizeof(size_t) * GGML_MAX_DIMS);
} else if (ggml_backend_buffer_is_vk(src2->buffer)) {
- ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) src2->extra;
- vk_buffer buffer_gpu = extra->buffer_gpu.lock();
- uint64_t offset = extra->offset + src2->view_offs;
+ ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)src2->buffer->context;
+ vk_buffer& buffer_gpu = buf_ctx->dev_buffer;
+ uint64_t offset = vk_tensor_offset(src2) + src2->view_offs;
if (!ggml_is_contiguous(src2) && ggml_vk_dim01_contiguous(src2)) {
for (int i3 = 0; i3 < src2->ne[3]; i3++) {
for (int i2 = 0; i2 < src2->ne[2]; i2++) {
} else if (tensor->op == GGML_OP_PAD) {
tensor_clone = ggml_pad(ggml_ctx, src0_clone, tensor->ne[0] - src0_clone->ne[0], tensor->ne[1] - src0_clone->ne[1], tensor->ne[2] - src0_clone->ne[2], tensor->ne[3] - src0_clone->ne[3]);
} else if (tensor->op == GGML_OP_REPEAT) {
- tensor_clone = ggml_repeat(ggml_ctx, src0_clone, src1_clone);
+ tensor_clone = ggml_repeat(ggml_ctx, src0_clone, tensor);
} else if (tensor->op == GGML_OP_ADD) {
tensor_clone = ggml_add(ggml_ctx, src0_clone, src1_clone);
} else if (tensor->op == GGML_OP_ACC) {
size_t tensor_size = ggml_nbytes(tensor);
tensor_data = malloc(tensor_size);
- ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) tensor->extra;
+ ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)tensor->buffer->context;
- vk_buffer buffer_gpu = extra->buffer_gpu.lock();
- if (extra->offset + tensor->view_offs + tensor_size >= buffer_gpu->size) {
- tensor_size = buffer_gpu->size - (extra->offset + tensor->view_offs);
+ vk_buffer& buffer_gpu = buf_ctx->dev_buffer;
+ uint64_t offset = vk_tensor_offset(tensor) + tensor->view_offs;
+ if (offset + tensor_size >= buffer_gpu->size) {
+ tensor_size = buffer_gpu->size - offset;
}
- ggml_vk_buffer_read(buffer_gpu, extra->offset + tensor->view_offs, tensor_data, tensor_size);
+ ggml_vk_buffer_read(buffer_gpu, offset, tensor_data, tensor_size);
}
float first_error_result = -1.0f;