From: Georgi Gerganov Date: Wed, 27 Mar 2024 11:10:13 +0000 (+0200) Subject: examples : more CUDA leftovers (#0) X-Git-Tag: upstream/0.0.1642~803 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=39060c7b417ed51c91048bc3059c06d429fb3c50;p=pkg%2Fggml%2Fsources%2Fggml examples : more CUDA leftovers (#0) --- diff --git a/examples/magika/CMakeLists.txt b/examples/magika/CMakeLists.txt index 5543237b..b0529f6f 100644 --- a/examples/magika/CMakeLists.txt +++ b/examples/magika/CMakeLists.txt @@ -8,8 +8,8 @@ target_link_libraries(${TEST_TARGET} PRIVATE ggml common common-ggml) # # For GPU offloading -if (GGML_CUBLAS) - add_compile_definitions(GGML_USE_CUBLAS) +if (GGML_CUDA) + add_compile_definitions(GGML_USE_CUDA) endif() if (GGML_CLBLAST) diff --git a/examples/python/ggml/__init__.pyi b/examples/python/ggml/__init__.pyi index b08ed40f..0c277098 100644 --- a/examples/python/ggml/__init__.pyi +++ b/examples/python/ggml/__init__.pyi @@ -568,8 +568,8 @@ class lib: def ggml_cpu_has_clblast() -> int: """ GGML_API int ggml_cpu_has_clblast (void);""" ... - def ggml_cpu_has_cublas() -> int: - """ GGML_API int ggml_cpu_has_cublas (void);""" + def ggml_cpu_has_cuda() -> int: + """ GGML_API int ggml_cpu_has_cuda (void);""" ... def ggml_cpu_has_f16c() -> int: """ GGML_API int ggml_cpu_has_f16c (void);""" @@ -967,8 +967,8 @@ class lib: def ggml_init(params: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_context * ggml_init(struct ggml_init_params params);""" ... - def ggml_init_cublas() -> None: - """GGML_API void ggml_init_cublas(void);""" + def ggml_init_cuda() -> None: + """GGML_API void ggml_init_cuda(void);""" ... def ggml_internal_get_type_traits(type: int) -> ffi.CData: """ ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type);""" diff --git a/examples/simple/CMakeLists.txt b/examples/simple/CMakeLists.txt index 4fbdf0f5..7d3aae02 100644 --- a/examples/simple/CMakeLists.txt +++ b/examples/simple/CMakeLists.txt @@ -12,8 +12,8 @@ set(TEST_TARGET simple-backend) add_executable(${TEST_TARGET} simple-backend.cpp) target_link_libraries(${TEST_TARGET} PRIVATE ggml) -if (GGML_CUBLAS) - add_compile_definitions(GGML_USE_CUBLAS) +if (GGML_CUDA) + add_compile_definitions(GGML_USE_CUDA) endif() if (GGML_METAL) diff --git a/examples/simple/simple-backend.cpp b/examples/simple/simple-backend.cpp index fbda1ff8..4ae6f3c8 100644 --- a/examples/simple/simple-backend.cpp +++ b/examples/simple/simple-backend.cpp @@ -2,7 +2,7 @@ #include "ggml/ggml-alloc.h" #include "ggml/ggml-backend.h" -#ifdef GGML_USE_CUBLAS +#ifdef GGML_USE_CUDA #include "ggml-cuda.h" #endif @@ -44,7 +44,7 @@ struct simple_model { // initialize the tensors of the model in this case two matrices 2x2 void load_model(simple_model & model, float * a, float * b, int rows_A, int cols_A, int rows_B, int cols_B) { // initialize the backend -#ifdef GGML_USE_CUBLAS +#ifdef GGML_USE_CUDA fprintf(stderr, "%s: using CUDA backend\n", __func__); model.backend = ggml_backend_cuda_init(0); // init device 0 if (!model.backend) {