def ggml_cpu_has_clblast() -> int:
""" GGML_API int ggml_cpu_has_clblast (void);"""
...
- def ggml_cpu_has_cublas() -> int:
- """ GGML_API int ggml_cpu_has_cublas (void);"""
+ def ggml_cpu_has_cuda() -> int:
+ """ GGML_API int ggml_cpu_has_cuda (void);"""
...
def ggml_cpu_has_f16c() -> int:
""" GGML_API int ggml_cpu_has_f16c (void);"""
def ggml_init(params: ffi.CData) -> ffi.CData:
""" GGML_API struct ggml_context * ggml_init(struct ggml_init_params params);"""
...
- def ggml_init_cublas() -> None:
- """GGML_API void ggml_init_cublas(void);"""
+ def ggml_init_cuda() -> None:
+ """GGML_API void ggml_init_cuda(void);"""
...
def ggml_internal_get_type_traits(type: int) -> ffi.CData:
""" ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type);"""
add_executable(${TEST_TARGET} simple-backend.cpp)
target_link_libraries(${TEST_TARGET} PRIVATE ggml)
-if (GGML_CUBLAS)
- add_compile_definitions(GGML_USE_CUBLAS)
+if (GGML_CUDA)
+ add_compile_definitions(GGML_USE_CUDA)
endif()
if (GGML_METAL)
#include "ggml/ggml-alloc.h"
#include "ggml/ggml-backend.h"
-#ifdef GGML_USE_CUBLAS
+#ifdef GGML_USE_CUDA
#include "ggml-cuda.h"
#endif
// initialize the tensors of the model in this case two matrices 2x2
void load_model(simple_model & model, float * a, float * b, int rows_A, int cols_A, int rows_B, int cols_B) {
// initialize the backend
-#ifdef GGML_USE_CUBLAS
+#ifdef GGML_USE_CUDA
fprintf(stderr, "%s: using CUDA backend\n", __func__);
model.backend = ggml_backend_cuda_init(0); // init device 0
if (!model.backend) {