size_t d_size;
cl_mem d_X;
if (src0->backend == GGML_BACKEND_CL) {
- d_X = *(cl_mem*) src0->data;
+ d_X = (cl_mem) src0->data;
} else {
d_X = ggml_cl_pool_malloc(sizeof(ggml_fp16_t) * x_ne, &x_size, CL_MEM_READ_ONLY);
}
size_t d_size;
cl_mem d_X;
if (src0->backend == GGML_BACKEND_CL) {
- d_X = *(cl_mem*) src0->data;
+ d_X = (cl_mem) src0->data;
} else {
d_X = ggml_cl_pool_malloc(sizeof(ggml_fp16_t) * x_ne, &x_size, CL_MEM_READ_ONLY);
}
if (src0->backend == GGML_BACKEND_CPU) {
CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_Q, 0, src0, i03, i02, NULL));
} else if (src0->backend == GGML_BACKEND_CL) {
- d_Q = *(cl_mem*) src0->data;
+ d_Q = (cl_mem) src0->data;
} else {
GGML_ASSERT(false);
}
const size_t q_sz = ggml_type_size(type) * ne0 * ne1 * ne2 * ne3 / ggml_blck_size(type);
size_t q_size;
- cl_mem* dst = (cl_mem*) malloc(sizeof(cl_mem));
- *dst = ggml_cl_pool_malloc(q_sz, &q_size, CL_MEM_READ_ONLY);
+ cl_mem dst = ggml_cl_pool_malloc(q_sz, &q_size, CL_MEM_READ_ONLY);
// copy tensor to device
for (int64_t i3 = 0; i3 < ne3; i3++) {
for (int64_t i2 = 0; i2 < ne2; i2++) {
int i = i3*ne2 + i2;
- CL_CHECK(ggml_cl_h2d_tensor_2d(queue, *dst, i*ne0*ne1, tensor, i3, i2, NULL));
+ CL_CHECK(ggml_cl_h2d_tensor_2d(queue, dst, i*ne0*ne1, tensor, i3, i2, NULL));
}
}