]> git.djapps.eu Git - pkg/ggml/sources/ggml/commitdiff
cuda : add Pad Reflect 1D support (llama/14659)
authorYavor Ivanov <redacted>
Fri, 22 Aug 2025 11:06:29 +0000 (14:06 +0300)
committerGeorgi Gerganov <redacted>
Fri, 5 Sep 2025 09:54:01 +0000 (12:54 +0300)
* Add Pad Reflect 1D CUDA support

* Update src/ggml-cuda/pad_reflect_1d.cu

Co-authored-by: Johannes Gäßler <redacted>
---------

Co-authored-by: Johannes Gäßler <redacted>
src/ggml-cuda/ggml-cuda.cu
src/ggml-cuda/pad_reflect_1d.cu [new file with mode: 0644]
src/ggml-cuda/pad_reflect_1d.cuh [new file with mode: 0644]

index 4e17fd211e1bba40a83d8fde3acfb326eafbfe30..d29a0b573f19300af868f388caf5db085e205663 100644 (file)
@@ -49,6 +49,7 @@
 #include "ggml-cuda/wkv.cuh"
 #include "ggml-cuda/gla.cuh"
 #include "ggml-cuda/set-rows.cuh"
+#include "ggml-cuda/pad_reflect_1d.cuh"
 #include "ggml.h"
 
 #include <algorithm>
@@ -2352,6 +2353,9 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg
         case GGML_OP_PAD:
             ggml_cuda_op_pad(ctx, dst);
             break;
+        case GGML_OP_PAD_REFLECT_1D:
+            ggml_cuda_op_pad_reflect_1d(ctx, dst);
+            break;
         case GGML_OP_ARANGE:
             ggml_cuda_op_arange(ctx, dst);
             break;
@@ -3490,6 +3494,7 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g
             return ggml_is_contiguous(op->src[0]);
         case GGML_OP_UPSCALE:
         case GGML_OP_PAD:
+        case GGML_OP_PAD_REFLECT_1D:
         case GGML_OP_ARANGE:
         case GGML_OP_TIMESTEP_EMBEDDING:
         case GGML_OP_LEAKY_RELU:
diff --git a/src/ggml-cuda/pad_reflect_1d.cu b/src/ggml-cuda/pad_reflect_1d.cu
new file mode 100644 (file)
index 0000000..4ed34ae
--- /dev/null
@@ -0,0 +1,82 @@
+#include "pad_reflect_1d.cuh"
+
+static __global__ void pad_reflect_1d_kernel_f32(
+    const void * __restrict__ src0,
+    void * __restrict__ dst,
+    const int64_t ne0,
+    const int64_t ne00,
+    const int64_t ne01,
+    const int64_t ne02,
+    const int64_t ne03,
+    const int64_t nb00,
+    const int64_t nb01,
+    const int64_t nb02,
+    const int64_t nb03,
+    const int64_t nb0,
+    const int64_t nb1,
+    const int64_t nb2,
+    const int64_t nb3,
+    const int p0,
+    const int p1) {
+
+    const int64_t i3 = blockIdx.z;
+    const int64_t i2 = blockIdx.y;
+    const int64_t i1 = blockIdx.x;
+
+    if (i1 >= ne01 || i2 >= ne02 || i3 >= ne03) {
+        return;
+    }
+
+    const char * src0_ptr = (const char *)src0 + i3*nb03 + i2*nb02 + i1*nb01;
+    char * dst_ptr = (char *)dst + i3*nb3 + i2*nb2 + i1*nb1;
+
+    for (int64_t i0 = threadIdx.x; i0 < ne0; i0 += blockDim.x) {
+        float value;
+
+        if (i0 < p0) {
+            // Left padding - reflect
+            value = *(const float *)(src0_ptr + (p0 - i0) * nb00);
+        } else if (i0 < ne0 - p1) {
+            // Middle - copy
+            value = *(const float *)(src0_ptr + (i0 - p0) * nb00);
+        } else {
+            // Right padding - reflect
+            int64_t src_idx = (ne0 - p1 - p0) - (p1 + 1 - (ne0 - i0)) - 1;
+            value = *(const float *)(src0_ptr + src_idx * nb00);
+        }
+
+        *(float *)(dst_ptr + i0 * nb0) = value;
+    }
+}
+
+void ggml_cuda_op_pad_reflect_1d(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
+    const ggml_tensor * src0 = dst->src[0];
+    cudaStream_t stream = ctx.stream();
+
+    GGML_ASSERT(src0->type == GGML_TYPE_F32);
+    GGML_ASSERT(dst->type == GGML_TYPE_F32);
+
+    const int32_t * opts = (const int32_t *) dst->op_params;
+    const int p0 = opts[0];
+    const int p1 = opts[1];
+
+    const int64_t ne00 = src0->ne[0];
+    const int64_t ne01 = src0->ne[1];
+    const int64_t ne02 = src0->ne[2];
+    const int64_t ne03 = src0->ne[3];
+
+    const int64_t ne0 = dst->ne[0];
+
+    GGML_ASSERT(ne0 == ne00 + p0 + p1);
+
+    const dim3 block_dims(CUDA_PAD_REFLECT_1D_BLOCK_SIZE, 1, 1);
+    const dim3 grid_dims(ne01, ne02, ne03);
+
+    pad_reflect_1d_kernel_f32<<<grid_dims, block_dims, 0, stream>>>(
+        src0->data, dst->data,
+        ne0, ne00, ne01, ne02, ne03,
+        src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3],
+        dst->nb[0], dst->nb[1], dst->nb[2], dst->nb[3],
+        p0, p1
+    );
+}
diff --git a/src/ggml-cuda/pad_reflect_1d.cuh b/src/ggml-cuda/pad_reflect_1d.cuh
new file mode 100644 (file)
index 0000000..15f2ed1
--- /dev/null
@@ -0,0 +1,5 @@
+#include "common.cuh"
+
+#define CUDA_PAD_REFLECT_1D_BLOCK_SIZE 256
+
+void ggml_cuda_op_pad_reflect_1d(ggml_backend_cuda_context & ctx, ggml_tensor * dst);