From: Georgi Gerganov Date: Sat, 13 Jan 2024 22:09:26 +0000 (+0200) Subject: examples : adapt to metal API X-Git-Tag: upstream/0.0.1642~1079 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=aea446526b03a523b5c578912183e7f83f4f4519;p=pkg%2Fggml%2Fsources%2Fggml examples : adapt to metal API --- diff --git a/examples/gpt-2/main-backend.cpp b/examples/gpt-2/main-backend.cpp index dfda108a..27591613 100644 --- a/examples/gpt-2/main-backend.cpp +++ b/examples/gpt-2/main-backend.cpp @@ -209,7 +209,7 @@ bool gpt2_model_load(const std::string & fname, gpt2_model & model, gpt_vocab & #ifdef GGML_USE_METAL if (n_gpu_layers > 0) { fprintf(stderr, "%s: using Metal backend\n", __func__); - ggml_metal_log_set_callback(ggml_log_callback_default, nullptr); + ggml_backend_metal_log_set_callback(ggml_log_callback_default, nullptr); model.backend = ggml_backend_metal_init(); if (!model.backend) { fprintf(stderr, "%s: ggml_backend_metal_init() failed\n", __func__); diff --git a/examples/gpt-2/main-batched.cpp b/examples/gpt-2/main-batched.cpp index 926c6f74..02b70760 100644 --- a/examples/gpt-2/main-batched.cpp +++ b/examples/gpt-2/main-batched.cpp @@ -298,7 +298,7 @@ bool gpt2_model_load(const std::string & fname, gpt2_model & model, gpt_vocab & #ifdef GGML_USE_METAL if (n_gpu_layers > 0) { fprintf(stderr, "%s: using Metal backend\n", __func__); - ggml_metal_log_set_callback(ggml_log_callback_default, nullptr); + ggml_backend_metal_log_set_callback(ggml_log_callback_default, nullptr); model.backend = ggml_backend_metal_init(); if (!model.backend) { fprintf(stderr, "%s: ggml_backend_metal_init() failed\n", __func__); diff --git a/examples/gpt-2/main.cpp b/examples/gpt-2/main.cpp index 9d3ae92c..05ce370e 100644 --- a/examples/gpt-2/main.cpp +++ b/examples/gpt-2/main.cpp @@ -118,7 +118,7 @@ void init_backends(gpt2_model & model, const gpt_params & params) { #ifdef GGML_USE_METAL if (params.n_gpu_layers > 0) { fprintf(stderr, "%s: using Metal backend\n", __func__); - ggml_metal_log_set_callback(ggml_log_callback_default, nullptr); + ggml_backend_metal_log_set_callback(ggml_log_callback_default, nullptr); gpu_backend = ggml_backend_metal_init(); if (!gpu_backend) { fprintf(stderr, "%s: ggml_backend_metal_init() failed\n", __func__); diff --git a/examples/whisper/whisper.cpp b/examples/whisper/whisper.cpp index ca39b58a..3d47dfe7 100644 --- a/examples/whisper/whisper.cpp +++ b/examples/whisper/whisper.cpp @@ -1070,7 +1070,7 @@ static ggml_backend_t whisper_backend_init(const whisper_context_params & params #ifdef GGML_USE_METAL if (params.use_gpu) { WHISPER_LOG_INFO("%s: using Metal backend\n", __func__); - ggml_metal_log_set_callback(g_state.log_callback, g_state.log_callback_user_data); + ggml_backend_metal_log_set_callback(g_state.log_callback, g_state.log_callback_user_data); backend_gpu = ggml_backend_metal_init(); if (!backend_gpu) { WHISPER_LOG_ERROR("%s: ggml_backend_metal_init() failed\n", __func__); diff --git a/tests/test-conv1d.cpp b/tests/test-conv1d.cpp index 79edddb0..936f9661 100644 --- a/tests/test-conv1d.cpp +++ b/tests/test-conv1d.cpp @@ -88,7 +88,7 @@ void load_model(test_model & model, bool use_gpu = false) { #ifdef GGML_USE_METAL if (use_gpu) { fprintf(stderr, "%s: using Metal backend\n", __func__); - ggml_metal_log_set_callback(ggml_log_callback_default, nullptr); + ggml_backend_metal_log_set_callback(ggml_log_callback_default, nullptr); model.backend = ggml_backend_metal_init(); if (!model.backend) { fprintf(stderr, "%s: ggml_backend_metal_init() failed\n", __func__); diff --git a/tests/test-conv2d.cpp b/tests/test-conv2d.cpp index 00287faa..06398d0b 100644 --- a/tests/test-conv2d.cpp +++ b/tests/test-conv2d.cpp @@ -88,7 +88,7 @@ void load_model(test_model & model, bool use_gpu = false) { #ifdef GGML_USE_METAL if (use_gpu) { fprintf(stderr, "%s: using Metal backend\n", __func__); - ggml_metal_log_set_callback(ggml_log_callback_default, nullptr); + ggml_backend_metal_log_set_callback(ggml_log_callback_default, nullptr); model.backend = ggml_backend_metal_init(); if (!model.backend) { fprintf(stderr, "%s: ggml_backend_metal_init() failed\n", __func__); diff --git a/tests/test-mul-mat.cpp b/tests/test-mul-mat.cpp index 99359e2f..7380c973 100644 --- a/tests/test-mul-mat.cpp +++ b/tests/test-mul-mat.cpp @@ -68,7 +68,7 @@ void load_model(test_model & model, float* a, float* b, int M, int N, int K, boo #ifdef GGML_USE_METAL if (use_gpu) { fprintf(stderr, "%s: using Metal backend\n", __func__); - ggml_metal_log_set_callback(ggml_log_callback_default, nullptr); + ggml_backend_metal_log_set_callback(ggml_log_callback_default, nullptr); model.backend = ggml_backend_metal_init(); if (!model.backend) { fprintf(stderr, "%s: ggml_backend_metal_init() failed\n", __func__);