* Patch perf regression for mmq kernels in ROCm
recover performance regression for https://github.com/ggml-org/llama.cpp/issues/17917
* add n_experts branch like the cdna path
* mmq.cu: tune mmq/wmma switching for RDNA
* mmq.cu: move amd wmma mmq/wmma switching behind IS_RDNA3
* Update ggml/src/ggml-cuda/mmq.cu
Co-authored-by: Johannes Gäßler <redacted>
---------
Co-authored-by: Jiacheng (Jason) Chen <redacted>
Co-authored-by: jiachengjason <redacted>
Co-authored-by: Johannes Gäßler <redacted>
}
if (amd_wmma_available(cc)) {
+ // RDNA 4 is consistently worse on rocblas
+ // https://github.com/ggml-org/llama.cpp/pull/18537#issuecomment-3706422301
+ if (GGML_CUDA_CC_IS_RDNA3(cc)) {
+ // High expert counts almost always better on MMQ
+ // due to a large amount of graph splits
+ // https://github.com/ggml-org/llama.cpp/pull/18202
+ if (n_experts >= 64) {
+ return true;
+ }
+
+ switch (type) {
+ // These quants are really bad on MMQ
+ case GGML_TYPE_Q2_K:
+ case GGML_TYPE_Q6_K:
+ // These quants are usually worse but not always
+ case GGML_TYPE_IQ2_XS:
+ case GGML_TYPE_IQ2_S:
+ return ne11 <= 128;
+ default:
+ return true;
+ }
+ }
return true;
}