]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
mmq.cu: tune mmq/rocblas switching for RDNA (#18537)
authorBeinsezii <redacted>
Tue, 6 Jan 2026 15:26:07 +0000 (07:26 -0800)
committerGitHub <redacted>
Tue, 6 Jan 2026 15:26:07 +0000 (16:26 +0100)
* Patch perf regression for mmq kernels in ROCm

recover performance regression for https://github.com/ggml-org/llama.cpp/issues/17917

* add n_experts branch like the cdna path

* mmq.cu: tune mmq/wmma switching for RDNA

* mmq.cu: move amd wmma mmq/wmma switching behind IS_RDNA3

* Update ggml/src/ggml-cuda/mmq.cu

Co-authored-by: Johannes Gäßler <redacted>
---------

Co-authored-by: Jiacheng (Jason) Chen <redacted>
Co-authored-by: jiachengjason <redacted>
Co-authored-by: Johannes Gäßler <redacted>
ggml/src/ggml-cuda/mmq.cu

index 85692d454300011b8794d5da082247ff2f514b28..ceb95758d20505d73527dcaeaedcdd742fd09c5b 100644 (file)
@@ -333,6 +333,28 @@ bool ggml_cuda_should_use_mmq(enum ggml_type type, int cc, int64_t ne11, int64_t
     }
 
     if (amd_wmma_available(cc)) {
+        // RDNA 4 is consistently worse on rocblas
+        // https://github.com/ggml-org/llama.cpp/pull/18537#issuecomment-3706422301
+        if (GGML_CUDA_CC_IS_RDNA3(cc)) {
+            // High expert counts almost always better on MMQ
+            // due to a large amount of graph splits
+            // https://github.com/ggml-org/llama.cpp/pull/18202
+            if (n_experts >= 64) {
+                return true;
+            }
+
+            switch (type) {
+                // These quants are really bad on MMQ
+                case GGML_TYPE_Q2_K:
+                case GGML_TYPE_Q6_K:
+                // These quants are usually worse but not always
+                case GGML_TYPE_IQ2_XS:
+                case GGML_TYPE_IQ2_S:
+                    return ne11 <= 128;
+                default:
+                    return true;
+            }
+        }
         return true;
     }