]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
CUDA: avoid mul + bias fusion when doing fusion (#16935)
authorAman Gupta <redacted>
Tue, 4 Nov 2025 02:53:48 +0000 (10:53 +0800)
committerGitHub <redacted>
Tue, 4 Nov 2025 02:53:48 +0000 (10:53 +0800)
ggml/src/ggml-cuda/ggml-cuda.cu

index 5667ec0c4d709be51f7b02b8775cc57f9acb61b3..415a7e962d7795ac8f7abb932469df8ae3a8a3a6 100644 (file)
@@ -2115,6 +2115,14 @@ static bool ggml_cuda_should_fuse_mul_mat_vec_f(const ggml_tensor * tensor) {
     const int cc      = ggml_cuda_info().devices[ggml_cuda_get_device()].cc;
     use_mul_mat_vec_f = use_mul_mat_vec_f && ggml_cuda_should_use_mmvf(src0->type, cc, src0->ne, is_mul_mat_id ? src1->ne[2] : src1->ne[1]);
 
+    const bool split = ggml_backend_buft_is_cuda_split(src0->buffer->buft) ||
+                       ggml_backend_buft_is_cuda_split(src1->buffer->buft);
+
+    //TODO: add support for fusion for split buffers
+    if (split) {
+        return false;
+    }
+
     //we only support fusion for ncols_dst = 1
     if (tensor->op == GGML_OP_MUL_MAT && dst->ne[1] != 1) {
         return false;
@@ -2154,6 +2162,15 @@ static bool ggml_cuda_should_fuse_mul_mat_vec_q(const ggml_tensor * tensor) {
         return false;
     }
 
+
+    const bool split = ggml_backend_buft_is_cuda_split(src0->buffer->buft) ||
+                       ggml_backend_buft_is_cuda_split(src1->buffer->buft);
+
+    //TODO: add support for fusion for split buffers
+    if (split) {
+        return false;
+    }
+
     return use_mul_mat_vec_q;
 }