]> git.djapps.eu Git - pkg/ggml/sources/ggml/commitdiff
ggml : remove GGML_KQ_MASK_PAD constant (llama/17910)
authorGeorgi Gerganov <redacted>
Wed, 10 Dec 2025 18:53:16 +0000 (20:53 +0200)
committerGeorgi Gerganov <redacted>
Thu, 11 Dec 2025 13:33:01 +0000 (15:33 +0200)
* ggml : remove GGML_KQ_MASK_PAD constant

* cont : remove comment

include/ggml.h
src/ggml.c
tests/test-backend-ops.cpp

index 6bc762c069743ecf4b6f3f4e0da894b04c010090..686da3dbd107835c9454532da9ee1ddd1ea9f5c8 100644 (file)
@@ -2305,13 +2305,11 @@ extern "C" {
             float                 stop,
             float                 step);
 
-#define GGML_KQ_MASK_PAD 1
-
-    // q:    [n_embd_k, n_batch,     n_head,    ne3 ]
-    // k:    [n_embd_k, n_kv,        n_head_kv, ne3 ]
-    // v:    [n_embd_v, n_kv,        n_head_kv, ne3 ] !! not transposed !!
-    // mask: [n_kv,     n_batch_pad, ne32,      ne33] !! n_batch_pad = GGML_PAD(n_batch, GGML_KQ_MASK_PAD) !!
-    // res:  [n_embd_v, n_head,      n_batch,   ne3 ] !! permuted !!
+    // q:    [n_embd_k, n_batch, n_head,    ne3 ]
+    // k:    [n_embd_k, n_kv,    n_head_kv, ne3 ]
+    // v:    [n_embd_v, n_kv,    n_head_kv, ne3 ] !! not transposed !!
+    // mask: [n_kv,     n_batch, ne32,      ne33]
+    // res:  [n_embd_v, n_head,  n_batch,   ne3 ] !! permuted !!
     //
     // broadcast:
     //   n_head % n_head_kv == 0
index 530ff7b95399e812606e28a4272c3db2f535b402..f0913cd35967f43e19ca9fe7a9d95ed5a2d3d550 100644 (file)
@@ -5260,8 +5260,6 @@ struct ggml_tensor * ggml_flash_attn_ext(
 
     if (mask) {
         GGML_ASSERT(ggml_is_contiguous(mask));
-        GGML_ASSERT(mask->ne[1] >= GGML_PAD(q->ne[1], GGML_KQ_MASK_PAD) &&
-                "the Flash-Attention kernel requires the mask to be padded to GGML_KQ_MASK_PAD and at least n_queries big");
         //GGML_ASSERT(ggml_can_repeat_rows(mask, qk));
 
         GGML_ASSERT(q->ne[2] % mask->ne[2] == 0);
index a6f266601feeee99a8ef5d0a03fd634d9660ceca..7be1f6603877ceabd8c1830592c9079c9f542404 100644 (file)
@@ -5875,7 +5875,7 @@ struct test_flash_attn_ext : public test_case {
 
         ggml_tensor * m = nullptr;
         if (mask) {
-            m = ggml_new_tensor_4d(ctx, GGML_TYPE_F16, kv, GGML_PAD(nb, GGML_KQ_MASK_PAD), 1, nr23[1]);
+            m = ggml_new_tensor_4d(ctx, GGML_TYPE_F16, kv, nb, 1, nr23[1]);
             ggml_set_name(m, "m");
         }