From: Przemysław Pawełczyk Date: Sun, 25 Jun 2023 13:13:50 +0000 (+0200) Subject: talk-llama : fix build after ggml sync (#1049) X-Git-Tag: upstream/1.7.4~1398 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=62642bb61cb169cb767b8452762d5173e2193f11;p=pkg%2Fggml%2Fsources%2Fwhisper.cpp talk-llama : fix build after ggml sync (#1049) sed -i 's,GGML_BACKEND_CUDA,GGML_BACKEND_GPU,g' examples/talk-llama/llama.cpp --- diff --git a/examples/talk-llama/llama.cpp b/examples/talk-llama/llama.cpp index 4cbc8d6b..942407b8 100644 --- a/examples/talk-llama/llama.cpp +++ b/examples/talk-llama/llama.cpp @@ -1002,7 +1002,7 @@ static void llama_model_load_internal( } #ifdef GGML_USE_CUBLAS -#define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_CUDA +#define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_GPU #else #define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_CPU #endif @@ -1054,7 +1054,7 @@ static void llama_model_load_internal( layer.w2 = ml->get_tensor(layers_i + ".feed_forward.w2.weight", { n_ff, n_embd}, backend); layer.w3 = ml->get_tensor(layers_i + ".feed_forward.w3.weight", {n_embd, n_ff}, backend); - if (backend == GGML_BACKEND_CUDA) { + if (backend == GGML_BACKEND_GPU) { vram_total += ggml_nbytes(layer.attention_norm) + ggml_nbytes(layer.wq) + ggml_nbytes(layer.wk) + ggml_nbytes(layer.wv) + ggml_nbytes(layer.wo) + ggml_nbytes(layer.attention_norm) + @@ -1115,7 +1115,7 @@ static void llama_model_load_internal( } } for (llama_load_tensor & lt : ml->tensors_map.tensors) { - if (lt.ggml_tensor->backend != GGML_BACKEND_CUDA) { + if (lt.ggml_tensor->backend != GGML_BACKEND_GPU) { continue; } if (progress_callback) {