clReleaseMemObject(mem);
}
+void ggml_cl_free_data(const struct ggml_tensor* tensor) {
+ if (tensor->backend != GGML_BACKEND_GPU) {
+ return;
+ }
+
+ cl_mem mem = (cl_mem)tensor->data;
+ clReleaseMemObject(mem);
+}
+
static cl_int ggml_cl_h2d_tensor_2d(cl_command_queue queue, cl_mem dst, size_t offset, const struct ggml_tensor * src, uint64_t i3, uint64_t i2, cl_event* ev) {
cl_int err;
const uint64_t ne0 = src->ne[0];
void * ggml_cl_host_malloc(size_t size);
void ggml_cl_host_free(void * ptr);
+void ggml_cl_free_data(const struct ggml_tensor* tensor);
+
void ggml_cl_transform_tensor(struct ggml_tensor * tensor);
void ggml_cl_load_data(const char * fname, struct ggml_tensor * tensor, size_t offset);
for (size_t i = 0; i < tensors_by_name.size(); ++i) {
ggml_cuda_free_data(tensors_by_name[i].second);
}
-#endif // GGML_USE_CUBLAS
+#elif defined(GGML_USE_CLBLAST)
+ for (size_t i = 0; i < tensors_by_name.size(); ++i) {
+ ggml_cl_free_data(tensors_by_name[i].second);
+ }
+#endif
}
};