GGML_UNUSED(buft);
}
+inline void * aligned_malloc_host(size_t alignment, size_t size) {
+#ifdef _WIN32
+ return _aligned_malloc(size, alignment);
+#else
+ return aligned_alloc(alignment, size);
+#endif
+}
+
+inline void free_aligned_mem_host(void * memblock) {
+#ifdef _WIN32
+ _aligned_free(memblock);
+#else
+ free(memblock);
+#endif
+}
+
static void ggml_backend_sycl_host_buffer_free_buffer(ggml_backend_buffer_t buffer) {
- ggml_sycl_host_free(buffer->context);
+ free_aligned_mem_host((void *)buffer->context);
}
static ggml_backend_buffer_t ggml_backend_sycl_host_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
- void * ptr = ggml_sycl_host_malloc(size);
-
+ void * ptr = aligned_malloc_host(TENSOR_ALIGNMENT, size);
if (ptr == nullptr) {
// fallback to cpu buffer
return ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size);