]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
CUDA: fix logic for V100 + GGML_CUDA_FORCE_MMQ (#12098)
authorJohannes Gäßler <redacted>
Fri, 28 Feb 2025 08:26:43 +0000 (09:26 +0100)
committerGitHub <redacted>
Fri, 28 Feb 2025 08:26:43 +0000 (09:26 +0100)
ggml/src/ggml-cuda/mmq.cuh

index 0451c65f302777d9514527dbac5eb9fa6668e03f..f2aca1f2014e1dd4d846054bb5d05a0e9a8ffd13 100644 (file)
@@ -109,9 +109,9 @@ static constexpr __device__ int get_mmq_x_max_device() {
 
 #if __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA
 #ifdef GGML_CUDA_FORCE_MMQ
-    return MMQ_DP4A_MAX_BATCH_SIZE;
-#else // GGML_CUDA_FORCE_MMQ
     return 128;
+#else // GGML_CUDA_FORCE_MMQ
+    return MMQ_DP4A_MAX_BATCH_SIZE;
 #endif // GGML_CUDA_FORCE_MMQ
 #else // __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA