]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
llama : mark LLM_ARCH_STARCODER as full offload supported (#3945)
authorMeng Zhang <redacted>
Sun, 5 Nov 2023 12:40:08 +0000 (04:40 -0800)
committerGitHub <redacted>
Sun, 5 Nov 2023 12:40:08 +0000 (14:40 +0200)
as done in https://github.com/ggerganov/llama.cpp/pull/3827

llama.cpp

index cc0211ceb02113dab4672a20979364cab6a9ec91..e165390005c8501dd509d62f8754831b8ad0051a 100644 (file)
--- a/llama.cpp
+++ b/llama.cpp
@@ -5164,11 +5164,12 @@ static int llama_decode_internal(
 
     // If all tensors can be run on the GPU then using more than 1 thread is detrimental.
     const bool full_offload_supported =
-        model.arch == LLM_ARCH_LLAMA    ||
-        model.arch == LLM_ARCH_BAICHUAN ||
-        model.arch == LLM_ARCH_FALCON   ||
-        model.arch == LLM_ARCH_REFACT   ||
-        model.arch == LLM_ARCH_MPT;
+        model.arch == LLM_ARCH_LLAMA      ||
+        model.arch == LLM_ARCH_BAICHUAN   ||
+        model.arch == LLM_ARCH_FALCON     ||
+        model.arch == LLM_ARCH_REFACT     ||
+        model.arch == LLM_ARCH_MPT        ||
+        model.arch == LLM_ARCH_STARCODER;
 
     const bool fully_offloaded = model.n_gpu_layers >= (int) hparams.n_layer + 3;
     if (ggml_cpu_has_cublas() && full_offload_supported && fully_offloaded) {