From: Georgi Gerganov Date: Wed, 10 Dec 2025 18:53:16 +0000 (+0200) Subject: ggml : remove GGML_KQ_MASK_PAD constant (llama/17910) X-Git-Tag: upstream/0.9.4.395~30 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=4767bda16156e02bfe0acebb29b42e5c1893b3df;p=pkg%2Fggml%2Fsources%2Fggml ggml : remove GGML_KQ_MASK_PAD constant (llama/17910) * ggml : remove GGML_KQ_MASK_PAD constant * cont : remove comment --- diff --git a/include/ggml.h b/include/ggml.h index 6bc762c0..686da3db 100644 --- a/include/ggml.h +++ b/include/ggml.h @@ -2305,13 +2305,11 @@ extern "C" { float stop, float step); -#define GGML_KQ_MASK_PAD 1 - - // q: [n_embd_k, n_batch, n_head, ne3 ] - // k: [n_embd_k, n_kv, n_head_kv, ne3 ] - // v: [n_embd_v, n_kv, n_head_kv, ne3 ] !! not transposed !! - // mask: [n_kv, n_batch_pad, ne32, ne33] !! n_batch_pad = GGML_PAD(n_batch, GGML_KQ_MASK_PAD) !! - // res: [n_embd_v, n_head, n_batch, ne3 ] !! permuted !! + // q: [n_embd_k, n_batch, n_head, ne3 ] + // k: [n_embd_k, n_kv, n_head_kv, ne3 ] + // v: [n_embd_v, n_kv, n_head_kv, ne3 ] !! not transposed !! + // mask: [n_kv, n_batch, ne32, ne33] + // res: [n_embd_v, n_head, n_batch, ne3 ] !! permuted !! // // broadcast: // n_head % n_head_kv == 0 diff --git a/src/ggml.c b/src/ggml.c index 530ff7b9..f0913cd3 100644 --- a/src/ggml.c +++ b/src/ggml.c @@ -5260,8 +5260,6 @@ struct ggml_tensor * ggml_flash_attn_ext( if (mask) { GGML_ASSERT(ggml_is_contiguous(mask)); - GGML_ASSERT(mask->ne[1] >= GGML_PAD(q->ne[1], GGML_KQ_MASK_PAD) && - "the Flash-Attention kernel requires the mask to be padded to GGML_KQ_MASK_PAD and at least n_queries big"); //GGML_ASSERT(ggml_can_repeat_rows(mask, qk)); GGML_ASSERT(q->ne[2] % mask->ne[2] == 0); diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index a6f26660..7be1f660 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -5875,7 +5875,7 @@ struct test_flash_attn_ext : public test_case { ggml_tensor * m = nullptr; if (mask) { - m = ggml_new_tensor_4d(ctx, GGML_TYPE_F16, kv, GGML_PAD(nb, GGML_KQ_MASK_PAD), 1, nr23[1]); + m = ggml_new_tensor_4d(ctx, GGML_TYPE_F16, kv, nb, 1, nr23[1]); ggml_set_name(m, "m"); }