]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
CUDA: use only 1 thread if fully offloaded (#2915)
authorJohannes Gäßler <redacted>
Thu, 21 Sep 2023 08:43:53 +0000 (10:43 +0200)
committerGitHub <redacted>
Thu, 21 Sep 2023 08:43:53 +0000 (11:43 +0300)
llama.cpp

index 358bf5ec8a7ad7c541450fd59790a2a8b14b2cf5..346636501ce15c8a9b73e28e9b2d4bbf1d273440 100644 (file)
--- a/llama.cpp
+++ b/llama.cpp
@@ -3765,6 +3765,15 @@ static bool llama_eval_internal(
         n_threads = std::min(4, n_threads);
     }
 
+    // If all tensors can be run on the GPU then using more than 1 thread is detrimental.
+    const bool full_offload_supported = model.arch == LLM_ARCH_LLAMA ||
+        model.arch == LLM_ARCH_BAICHUAN ||
+        model.arch == LLM_ARCH_FALCON;
+    const bool fully_offloaded = model.n_gpu_layers >= (int) hparams.n_layer + 3;
+    if (ggml_cpu_has_cublas() && full_offload_supported && fully_offloaded) {
+        n_threads = 1;
+    }
+
     struct ggml_tensor * res        = gf->nodes[gf->n_nodes - 1];
     struct ggml_tensor * embeddings = gf->nodes[gf->n_nodes - 2];