]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
CUDA: use min compute capability of GPUs actually used (#2506)
authorCebtenzzre <redacted>
Fri, 4 Aug 2023 15:35:22 +0000 (11:35 -0400)
committerGitHub <redacted>
Fri, 4 Aug 2023 15:35:22 +0000 (17:35 +0200)
ggml-cuda.cu

index 4321e46741a40171349d90c71db21a09f8109089..d64d7045ccf3e50e4051898fd1a9a646bfe89379 100644 (file)
@@ -5347,7 +5347,8 @@ void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1, ggml_
         } else {
             int min_compute_capability = INT_MAX;
             for (int id = 0; id < g_device_count; ++id) {
-                if (min_compute_capability > g_compute_capabilities[id]) {
+                if (min_compute_capability > g_compute_capabilities[id]
+                        && g_tensor_split[id] < (id + 1 < g_device_count ? g_tensor_split[id + 1] : 1.0f)) {
                     min_compute_capability = g_compute_capabilities[id];
                 }
             }