]> git.djapps.eu Git - pkg/ggml/sources/ggml/commitdiff
ggml : apply mul_mat broadcast fix (sync llama.cpp)
authorGeorgi Gerganov <redacted>
Wed, 12 Jul 2023 17:52:37 +0000 (20:52 +0300)
committerGeorgi Gerganov <redacted>
Wed, 12 Jul 2023 17:52:37 +0000 (20:52 +0300)
src/ggml.c

index 1eaf17c9873855825fedfe60d07fbb7dc988c336..c137ae658df7f7bf97e74f85cdca55c0ab8cf7b5 100644 (file)
@@ -10815,7 +10815,13 @@ static void ggml_compute_forward_mul_mat(
 
         const int64_t ir0 = (ir1/ne11)%(ne02*ne03);
         const int64_t i03 = (ir0/(ne02));
-        const int64_t i02 = (ir0 - i03*ne02);
+        // Hack for "Falcon multi-query-attention key stutter" / alternative to ggml_repeat2.
+        // See https://github.com/ggerganov/llama.cpp/issues/1602#issuecomment-1606087470:
+        // GG: this is likely the correct way to broadcast, though need some more thought
+        //     therefore leaving the comments to remind us for now
+        const int64_t i02 = (i12 / (ne12 / ne02));
+        // Original from PR/224 (and also essential/correct for non-broadcast matmuls in Falcon)
+        // const int64_t i02 = (ir0 - i03*ne02);
 
         const int64_t i1 = i11;
         const int64_t i2 = i12;
@@ -13108,10 +13114,9 @@ static void ggml_compute_forward_conv_2d(
 
     if (s0 == src0->ne[0] && s1 == src0->ne[1]) {
         ggml_compute_forward_conv_2d_sk_p0(params, src0, src1, dst);
-    }
-    else {
+    } else {
         GGML_ASSERT(false); // only stride equal to kernel size is supported
-    };
+    }
 }
 
 // ggml_compute_forward_pool_1d_sk_p0