From: Cebtenzzre Date: Fri, 4 Aug 2023 15:35:22 +0000 (-0400) Subject: CUDA: use min compute capability of GPUs actually used (#2506) X-Git-Tag: gguf-v0.4.0~356 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=4329d1acb01c353803a54733b8eef9d93d0b84b2;p=pkg%2Fggml%2Fsources%2Fllama.cpp CUDA: use min compute capability of GPUs actually used (#2506) --- diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 4321e467..d64d7045 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -5347,7 +5347,8 @@ void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1, ggml_ } else { int min_compute_capability = INT_MAX; for (int id = 0; id < g_device_count; ++id) { - if (min_compute_capability > g_compute_capabilities[id]) { + if (min_compute_capability > g_compute_capabilities[id] + && g_tensor_split[id] < (id + 1 < g_device_count ? g_tensor_split[id + 1] : 1.0f)) { min_compute_capability = g_compute_capabilities[id]; } }