]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
metal : refactor + optimize v2 (#15995)
authorGeorgi Gerganov <redacted>
Wed, 17 Sep 2025 17:38:12 +0000 (20:38 +0300)
committerGitHub <redacted>
Wed, 17 Sep 2025 17:38:12 +0000 (20:38 +0300)
* metal : improve naming

* metal : refactor device

ggml-ci

* cont : props

ggml-ci

* metal : apply ggml_mem_ranges_t

ggml-ci

* metal : remove GGML_METAL_USE_BF16

ggml-ci

* metal : refactor device buffer

ggml-ci

* cont : fix naming

* metal : sync before destroying the backend

ggml-ci

* metal : refactor context

ggml-ci

* metal : migrate ggml-metal.m to ggml-metal.cpp

ggml-ci

* metal : adjust ops API

ggml-ci

* metal : use C++ to store piplienes

ggml-ci

* metal : migrate ops to separate functions

ggml-ci

* metal : add ggml_metal_library_t

ggml-ci

* metal : improve naming

ggml-ci

* metal : cleanp

ggml-ci

* metal : add support for GGML_OP_LOG

ggml-ci

* metal : fix error handling

ggml-ci

19 files changed:
ci/run.sh
ggml/CMakeLists.txt
ggml/include/ggml-metal.h
ggml/include/ggml.h
ggml/src/ggml-metal/CMakeLists.txt
ggml/src/ggml-metal/ggml-metal-common.cpp
ggml/src/ggml-metal/ggml-metal-common.h
ggml/src/ggml-metal/ggml-metal-context.h [new file with mode: 0644]
ggml/src/ggml-metal/ggml-metal-context.m [new file with mode: 0644]
ggml/src/ggml-metal/ggml-metal-device.cpp [new file with mode: 0644]
ggml/src/ggml-metal/ggml-metal-device.h [new file with mode: 0644]
ggml/src/ggml-metal/ggml-metal-device.m [new file with mode: 0644]
ggml/src/ggml-metal/ggml-metal-impl.h
ggml/src/ggml-metal/ggml-metal-ops.cpp [new file with mode: 0644]
ggml/src/ggml-metal/ggml-metal-ops.h [new file with mode: 0644]
ggml/src/ggml-metal/ggml-metal.cpp [new file with mode: 0644]
ggml/src/ggml-metal/ggml-metal.m [deleted file]
ggml/src/ggml-metal/ggml-metal.metal
tests/test-backend-ops.cpp

index ba099680cd50ab8c4734875938c7c325aa627e55..8e20b4e24dbc77cb2b9db3b7a1b54c8cb55b4478 100755 (executable)
--- a/ci/run.sh
+++ b/ci/run.sh
@@ -45,7 +45,7 @@ SRC=`pwd`
 CMAKE_EXTRA="-DLLAMA_FATAL_WARNINGS=ON -DLLAMA_CURL=ON"
 
 if [ ! -z ${GG_BUILD_METAL} ]; then
-    CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_METAL=ON -DGGML_METAL_USE_BF16=ON"
+    CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_METAL=ON"
 fi
 
 if [ ! -z ${GG_BUILD_CUDA} ]; then
index d06464f5eba5e2e8841dc555e97380ebc29c1bf9..f4ccf273b7e6e2cd67680a24c7fb6eff352923df 100644 (file)
@@ -190,7 +190,6 @@ option(GGML_WEBGPU                          "ggml: use WebGPU"
 option(GGML_WEBGPU_DEBUG                    "ggml: enable WebGPU debug output"                OFF)
 option(GGML_ZDNN                            "ggml: use zDNN"                                  OFF)
 option(GGML_METAL                           "ggml: use Metal"                                 ${GGML_METAL_DEFAULT})
-option(GGML_METAL_USE_BF16                  "ggml: use bfloat if available"                   OFF)
 option(GGML_METAL_NDEBUG                    "ggml: disable Metal debugging"                   OFF)
 option(GGML_METAL_SHADER_DEBUG              "ggml: compile Metal with -fno-fast-math"         OFF)
 option(GGML_METAL_EMBED_LIBRARY             "ggml: embed Metal library"                       ${GGML_METAL})
index 1163438bc26875ccb922d0cbe00f4c2941777f3d..433838f0d6d68d1fffabbe18a23e3c2bca3ee2fc 100644 (file)
@@ -39,6 +39,7 @@ extern "C" {
 // user-code should use only these functions
 //
 
+// TODO: remove in the future
 GGML_BACKEND_API ggml_backend_t ggml_backend_metal_init(void);
 
 GGML_BACKEND_API bool ggml_backend_is_metal(ggml_backend_t backend);
index b7b472c56ec61d3488bc20bd7e0e5ce89c443b7b..36b23dc6d0d827407ac667119b52c656eab2a6d4 100644 (file)
@@ -284,19 +284,19 @@ __host__ __device__ constexpr inline void ggml_unused_vars_impl(Args&&...) noexc
 //    GGML_TENSOR_LOCALS(size_t,  nb1, src1, nb);
 //
 #define GGML_TENSOR_LOCALS_1(type, prefix, pointer, array) \
-    const type prefix##0 = (pointer)->array[0]; \
+    const type prefix##0 = (pointer) ? (pointer)->array[0] : 0; \
     GGML_UNUSED(prefix##0);
 #define GGML_TENSOR_LOCALS_2(type, prefix, pointer, array) \
     GGML_TENSOR_LOCALS_1    (type, prefix, pointer, array) \
-    const type prefix##1 = (pointer)->array[1]; \
+    const type prefix##1 = (pointer) ? (pointer)->array[1] : 0; \
     GGML_UNUSED(prefix##1);
 #define GGML_TENSOR_LOCALS_3(type, prefix, pointer, array) \
     GGML_TENSOR_LOCALS_2    (type, prefix, pointer, array) \
-    const type prefix##2 = (pointer)->array[2]; \
+    const type prefix##2 = (pointer) ? (pointer)->array[2] : 0; \
     GGML_UNUSED(prefix##2);
 #define GGML_TENSOR_LOCALS(type, prefix, pointer, array) \
     GGML_TENSOR_LOCALS_3  (type, prefix, pointer, array) \
-    const type prefix##3 = (pointer)->array[3]; \
+    const type prefix##3 = (pointer) ? (pointer)->array[3] : 0; \
     GGML_UNUSED(prefix##3);
 
 #define GGML_TENSOR_UNARY_OP_LOCALS \
index 65c131b6216874581dca766b2a03796c57b28907..63418fe143083ba6c77e2cd3277f3b5e527d339d 100644 (file)
@@ -5,8 +5,12 @@ find_library(METALKIT_FRAMEWORK MetalKit   REQUIRED)
 message(STATUS "Metal framework found")
 
 ggml_add_backend_library(ggml-metal
-                         ggml-metal.m
+                         ggml-metal.cpp
+                         ggml-metal-device.m
+                         ggml-metal-device.cpp
                          ggml-metal-common.cpp
+                         ggml-metal-context.m
+                         ggml-metal-ops.cpp
                         )
 
 target_link_libraries(ggml-metal PRIVATE
@@ -19,10 +23,6 @@ if (GGML_METAL_NDEBUG)
     add_compile_definitions(GGML_METAL_NDEBUG)
 endif()
 
-if (GGML_METAL_USE_BF16)
-    add_compile_definitions(GGML_METAL_USE_BF16)
-endif()
-
 # copy metal files to bin directory
 configure_file(../ggml-common.h  ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-common.h     COPYONLY)
 configure_file(ggml-metal.metal  ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.metal  COPYONLY)
index cb39e5b2ab5bbf1636b5b1b20d2d18d92096b31c..34d27b6324201314cbf0b471554670c6e7bc3292 100644 (file)
@@ -22,7 +22,7 @@ struct ggml_mem_ranges {
     int debug = 0;
 };
 
-struct ggml_mem_ranges * ggml_mem_ranges_init(int debug) {
+ggml_mem_ranges_t ggml_mem_ranges_init(int debug) {
     auto * res = new ggml_mem_ranges;
 
     res->ranges.reserve(256);
@@ -31,15 +31,15 @@ struct ggml_mem_ranges * ggml_mem_ranges_init(int debug) {
     return res;
 }
 
-void ggml_mem_ranges_free(ggml_mem_ranges * mrs) {
+void ggml_mem_ranges_free(ggml_mem_ranges_t mrs) {
     delete mrs;
 }
 
-void ggml_mem_ranges_reset(ggml_mem_ranges * mrs) {
+void ggml_mem_ranges_reset(ggml_mem_ranges_t mrs) {
     mrs->ranges.clear();
 }
 
-static bool ggml_mem_ranges_add(ggml_mem_ranges * mrs, ggml_mem_range mr) {
+static bool ggml_mem_ranges_add(ggml_mem_ranges_t mrs, ggml_mem_range mr) {
     mrs->ranges.push_back(mr);
 
     return true;
@@ -87,7 +87,7 @@ static ggml_mem_range ggml_mem_range_from_tensor_dst(const ggml_tensor * tensor)
     return ggml_mem_range_from_tensor(tensor, MEM_RANGE_TYPE_DST);
 }
 
-static bool ggml_mem_ranges_add_src(ggml_mem_ranges * mrs, const ggml_tensor * tensor) {
+static bool ggml_mem_ranges_add_src(ggml_mem_ranges_t mrs, const ggml_tensor * tensor) {
     GGML_ASSERT(tensor);
 
     ggml_mem_range mr = ggml_mem_range_from_tensor_src(tensor);
@@ -99,7 +99,7 @@ static bool ggml_mem_ranges_add_src(ggml_mem_ranges * mrs, const ggml_tensor * t
     return ggml_mem_ranges_add(mrs, mr);
 }
 
-static bool ggml_mem_ranges_add_dst(ggml_mem_ranges * mrs, const ggml_tensor * tensor) {
+static bool ggml_mem_ranges_add_dst(ggml_mem_ranges_t mrs, const ggml_tensor * tensor) {
     GGML_ASSERT(tensor);
 
     ggml_mem_range mr = ggml_mem_range_from_tensor_dst(tensor);
@@ -111,7 +111,7 @@ static bool ggml_mem_ranges_add_dst(ggml_mem_ranges * mrs, const ggml_tensor * t
     return ggml_mem_ranges_add(mrs, mr);
 }
 
-bool ggml_mem_ranges_add(ggml_mem_ranges * mrs, const ggml_tensor * tensor) {
+bool ggml_mem_ranges_add(ggml_mem_ranges_t mrs, const ggml_tensor * tensor) {
     for (int i = 0; i < GGML_MAX_DIMS; i++) {
         if (tensor->src[i]) {
             ggml_mem_ranges_add_src(mrs, tensor->src[i]);
@@ -121,7 +121,7 @@ bool ggml_mem_ranges_add(ggml_mem_ranges * mrs, const ggml_tensor * tensor) {
     return ggml_mem_ranges_add_dst(mrs, tensor);
 }
 
-static bool ggml_mem_ranges_check(const ggml_mem_ranges * mrs, ggml_mem_range mr) {
+static bool ggml_mem_ranges_check(ggml_mem_ranges_t mrs, ggml_mem_range mr) {
     for (size_t i = 0; i < mrs->ranges.size(); i++) {
         const auto & cmp = mrs->ranges[i];
 
@@ -152,7 +152,7 @@ static bool ggml_mem_ranges_check(const ggml_mem_ranges * mrs, ggml_mem_range mr
     return true;
 }
 
-static bool ggml_mem_ranges_check_src(const ggml_mem_ranges * mrs, const ggml_tensor * tensor) {
+static bool ggml_mem_ranges_check_src(ggml_mem_ranges_t mrs, const ggml_tensor * tensor) {
     GGML_ASSERT(tensor);
 
     ggml_mem_range mr = ggml_mem_range_from_tensor_src(tensor);
@@ -162,7 +162,7 @@ static bool ggml_mem_ranges_check_src(const ggml_mem_ranges * mrs, const ggml_te
     return res;
 }
 
-static bool ggml_mem_ranges_check_dst(const ggml_mem_ranges * mrs, const ggml_tensor * tensor) {
+static bool ggml_mem_ranges_check_dst(ggml_mem_ranges_t mrs, const ggml_tensor * tensor) {
     GGML_ASSERT(tensor);
 
     ggml_mem_range mr = ggml_mem_range_from_tensor_dst(tensor);
@@ -172,7 +172,7 @@ static bool ggml_mem_ranges_check_dst(const ggml_mem_ranges * mrs, const ggml_te
     return res;
 }
 
-bool ggml_mem_ranges_check(const ggml_mem_ranges * mrs, const ggml_tensor * tensor) {
+bool ggml_mem_ranges_check(ggml_mem_ranges_t mrs, const ggml_tensor * tensor) {
     for (int i = 0; i < GGML_MAX_DIMS; i++) {
         if (tensor->src[i]) {
             if (!ggml_mem_ranges_check_src(mrs, tensor->src[i])) {
@@ -222,7 +222,7 @@ struct node_info {
 
 static std::vector<int> ggml_metal_graph_optimize_reorder(const std::vector<node_info> & nodes) {
     // helper to add node src and dst ranges
-    const auto & h_add = [](ggml_mem_ranges * mrs, const node_info & node) {
+    const auto & h_add = [](ggml_mem_ranges_t mrs, const node_info & node) {
         for (int i = 0; i < GGML_MAX_SRC; i++) {
             if (node.node->src[i]) {
                 if (!ggml_mem_ranges_add_src(mrs, node.node->src[i])) {
@@ -246,7 +246,7 @@ static std::vector<int> ggml_metal_graph_optimize_reorder(const std::vector<node
     };
 
     // helper to check if a node can run concurrently with the existing set of nodes
-    const auto & h_check = [](const ggml_mem_ranges * mrs, const node_info & node) {
+    const auto & h_check = [](ggml_mem_ranges_t mrs, const node_info & node) {
         for (int i = 0; i < GGML_MAX_SRC; i++) {
             if (node.node->src[i]) {
                 if (!ggml_mem_ranges_check_src(mrs, node.node->src[i])) {
@@ -301,10 +301,10 @@ static std::vector<int> ggml_metal_graph_optimize_reorder(const std::vector<node
     std::vector<bool> used(n, false);
 
     // the memory ranges for the set of currently concurrent nodes
-    ggml_mem_ranges * mrs0 = ggml_mem_ranges_init(0);
+    ggml_mem_ranges_t mrs0 = ggml_mem_ranges_init(0);
 
     // the memory ranges for the set of nodes that haven't been processed yet, when looking forward for a node to reorder
-    ggml_mem_ranges * mrs1 = ggml_mem_ranges_init(0);
+    ggml_mem_ranges_t mrs1 = ggml_mem_ranges_init(0);
 
     for (int i0 = 0; i0 < n; i0++) {
         if (used[i0]) {
@@ -375,7 +375,7 @@ static std::vector<int> ggml_metal_graph_optimize_reorder(const std::vector<node
     return res;
 }
 
-void ggml_metal_graph_optimize(ggml_cgraph * gf) {
+void ggml_graph_optimize(ggml_cgraph * gf) {
     constexpr int MAX_FUSE = 16;
 
     const int n = gf->n_nodes;
index c1402895b90d0e790785ee72b625561dbca28ad2..3acbc6ae174aa043cb5bc0fc1d39c907b15b70af 100644 (file)
@@ -25,27 +25,27 @@ enum ggml_mem_range_type {
 //   can be added to the set without violating the constraints (i.e. if it can be executed concurrently with the
 //   tasks already in the set)
 //
-struct ggml_mem_ranges;
+typedef struct ggml_mem_ranges * ggml_mem_ranges_t;
 
-struct ggml_mem_ranges * ggml_mem_ranges_init(int debug);
-void ggml_mem_ranges_free(struct ggml_mem_ranges * mrs);
+ggml_mem_ranges_t ggml_mem_ranges_init(int debug);
+void ggml_mem_ranges_free(ggml_mem_ranges_t mrs);
 
 // remove all ranges from the set
-void ggml_mem_ranges_reset(struct ggml_mem_ranges * mrs);
+void ggml_mem_ranges_reset(ggml_mem_ranges_t mrs);
 
 // add src or dst ranges to track
-bool ggml_mem_ranges_add(struct ggml_mem_ranges * mrs, const struct ggml_tensor * tensor);
+bool ggml_mem_ranges_add(ggml_mem_ranges_t mrs, const struct ggml_tensor * tensor);
 
 // return false if:
 // - new src range overlaps with any existing dst range
 // - new dst range overlaps with any existing range (src or dst)
-bool ggml_mem_ranges_check(const struct ggml_mem_ranges * mrs, const struct ggml_tensor * tensor);
+bool ggml_mem_ranges_check(ggml_mem_ranges_t mrs, const struct ggml_tensor * tensor);
 
 // reorder the nodes in the graph to improve concurrency, while respecting fusion
 //
 // note: this implementation is generic and not specific to metal
 //       if it proves to work well, we can start using it for other backends in the future
-void ggml_metal_graph_optimize(struct ggml_cgraph * gf);
+void ggml_graph_optimize(struct ggml_cgraph * gf);
 
 #ifdef __cplusplus
 }
diff --git a/ggml/src/ggml-metal/ggml-metal-context.h b/ggml/src/ggml-metal/ggml-metal-context.h
new file mode 100644 (file)
index 0000000..ec2b686
--- /dev/null
@@ -0,0 +1,33 @@
+#pragma once
+
+#include "ggml-metal-device.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+//
+// backend context
+//
+
+typedef struct ggml_metal * ggml_metal_t;
+
+ggml_metal_t ggml_metal_init(ggml_metal_device_t dev);
+void ggml_metal_free(ggml_metal_t ctx);
+
+void ggml_metal_synchronize(ggml_metal_t ctx);
+
+void ggml_metal_set_tensor_async(ggml_metal_t ctx, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
+void ggml_metal_get_tensor_async(ggml_metal_t ctx, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
+
+enum ggml_status ggml_metal_graph_compute (ggml_metal_t ctx, struct ggml_cgraph * gf);
+void             ggml_metal_graph_optimize(ggml_metal_t ctx, struct ggml_cgraph * gf);
+
+void ggml_metal_set_n_cb            (ggml_metal_t ctx, int n_cb);
+void ggml_metal_set_abort_callback  (ggml_metal_t ctx, ggml_abort_callback abort_callback, void * user_data);
+bool ggml_metal_supports_family     (ggml_metal_t ctx, int family);
+void ggml_metal_capture_next_compute(ggml_metal_t ctx);
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/ggml/src/ggml-metal/ggml-metal-context.m b/ggml/src/ggml-metal/ggml-metal-context.m
new file mode 100644 (file)
index 0000000..af9ff21
--- /dev/null
@@ -0,0 +1,575 @@
+#import "ggml-metal-context.h"
+
+#import "ggml-impl.h"
+#import "ggml-backend-impl.h"
+
+#import "ggml-metal-impl.h"
+#import "ggml-metal-common.h"
+#import "ggml-metal-ops.h"
+
+#import <Foundation/Foundation.h>
+
+#import <Metal/Metal.h>
+
+#undef MIN
+#undef MAX
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+
+// max number of MTLCommandBuffer used to submit a graph for processing
+#define GGML_METAL_MAX_COMMAND_BUFFERS 8
+
+struct ggml_metal_command_buffer {
+    id<MTLCommandBuffer> obj;
+};
+
+struct ggml_metal {
+    id<MTLDevice>       device;
+    id<MTLCommandQueue> queue; // currently a pointer to the device queue, but might become separate queue [TAG_QUEUE_PER_BACKEND]
+
+    ggml_metal_device_t  dev;
+    ggml_metal_library_t lib;
+
+    dispatch_queue_t d_queue;
+
+    // additional, inference-time compiled pipelines
+    ggml_metal_pipelines_t pipelines_ext;
+
+    bool use_bfloat;
+    bool use_fusion;
+    bool use_concurrency;
+    bool use_graph_optimize;
+
+    int debug_graph;
+    int debug_fusion;
+
+    // how many times a given op was fused
+    uint64_t fuse_cnt[GGML_OP_COUNT];
+
+    // capture state
+    bool capture_next_compute;
+    bool capture_started;
+
+    id<MTLCaptureScope> capture_scope;
+
+    // command buffer state
+    int n_cb;           // number of extra threads used to submit the command buffers
+    int n_nodes_0;      // number of nodes submitted by the main thread
+    int n_nodes_1;      // remaining number of nodes submitted by the n_cb threads
+    int n_nodes_per_cb;
+
+    struct ggml_cgraph * gf;
+
+    // the callback given to the thread pool
+    void (^encode_async)(size_t ith);
+
+    // n_cb command buffers + 1 used by the main thread
+    struct ggml_metal_command_buffer cmd_bufs[GGML_METAL_MAX_COMMAND_BUFFERS + 1];
+
+    // extra command buffers for things like getting, setting and copying tensors
+    NSMutableArray * cmd_bufs_ext;
+
+    // the last command buffer queued into the Metal queue with operations relevant to the current Metal backend
+    id<MTLCommandBuffer> cmd_buf_last;
+
+    // abort ggml_metal_graph_compute if callback returns true
+    ggml_abort_callback abort_callback;
+    void *              abort_callback_data;
+};
+
+ggml_metal_t ggml_metal_init(ggml_metal_device_t dev) {
+    GGML_LOG_INFO("%s: allocating\n", __func__);
+
+#if TARGET_OS_OSX && !GGML_METAL_NDEBUG
+    // Show all the Metal device instances in the system
+    NSArray * devices = MTLCopyAllDevices();
+    for (id<MTLDevice> device in devices) {
+        GGML_LOG_INFO("%s: found device: %s\n", __func__, [[device name] UTF8String]);
+    }
+    [devices release]; // since it was created by a *Copy* C method
+#endif
+
+    // init context
+    ggml_metal_t res = calloc(1, sizeof(struct ggml_metal));
+
+    res->device = ggml_metal_device_get_obj(dev);
+
+    GGML_LOG_INFO("%s: picking default device: %s\n", __func__, [[res->device name] UTF8String]);
+
+    // TODO: would it be better to have one queue for the backend and one queue for the device?
+    //       the graph encoders and async ops would use the backend queue while the sync ops would use the device queue?
+    //res->queue = [device newCommandQueue]; [TAG_QUEUE_PER_BACKEND]
+    res->queue = ggml_metal_device_get_queue(dev);
+    if (res->queue == nil) {
+        GGML_LOG_ERROR("%s: error: failed to create command queue\n", __func__);
+        return NULL;
+    }
+
+    res->dev = dev;
+    res->lib = ggml_metal_device_get_library(dev);
+    if (res->lib == NULL) {
+        GGML_LOG_WARN("%s: the device does not have a precompiled Metal library - this is unexpected\n", __func__);
+        GGML_LOG_WARN("%s: will try to compile it on the fly\n", __func__);
+
+        res->lib = ggml_metal_library_init(dev);
+        if (res->lib == NULL) {
+            GGML_LOG_ERROR("%s: error: failed to initialize the Metal library\n", __func__);
+
+            free(res);
+
+            return NULL;
+        }
+    }
+
+    const struct ggml_metal_device_props * props_dev = ggml_metal_device_get_props(dev);
+
+    res->d_queue = dispatch_queue_create("ggml-metal", DISPATCH_QUEUE_CONCURRENT);
+
+    res->use_bfloat      = props_dev->has_bfloat;
+    res->use_fusion      = getenv("GGML_METAL_FUSION_DISABLE") == nil;
+    res->use_concurrency = getenv("GGML_METAL_CONCURRENCY_DISABLE") == nil;
+
+    {
+        const char * val = getenv("GGML_METAL_GRAPH_DEBUG");
+        res->debug_graph = val ? atoi(val) : 0;
+    }
+
+    {
+        const char * val = getenv("GGML_METAL_FUSION_DEBUG");
+        res->debug_fusion = val ? atoi(val) : 0;
+    }
+
+    res->use_graph_optimize = true;
+
+    if (getenv("GGML_METAL_GRAPH_OPTIMIZE_DISABLE") != NULL) {
+        res->use_graph_optimize = false;
+    }
+
+    memset(res->fuse_cnt, 0, sizeof(res->fuse_cnt));
+
+    GGML_LOG_INFO("%s: use bfloat         = %s\n", __func__, res->use_bfloat         ? "true" : "false");
+    GGML_LOG_INFO("%s: use fusion         = %s\n", __func__, res->use_fusion         ? "true" : "false");
+    GGML_LOG_INFO("%s: use concurrency    = %s\n", __func__, res->use_concurrency    ? "true" : "false");
+    GGML_LOG_INFO("%s: use graph optimize = %s\n", __func__, res->use_graph_optimize ? "true" : "false");
+
+    res->capture_next_compute = false;
+    res->capture_started = false;
+    res->capture_scope = nil;
+
+    res->gf = nil;
+    res->encode_async = nil;
+    for (int i = 0; i < GGML_METAL_MAX_COMMAND_BUFFERS; ++i) {
+        res->cmd_bufs[i].obj = nil;
+    }
+
+    res->cmd_bufs_ext = [[NSMutableArray alloc] init];
+
+    res->cmd_buf_last = nil;
+
+    res->pipelines_ext = ggml_metal_pipelines_init();
+
+    return res;
+}
+
+void ggml_metal_free(ggml_metal_t ctx) {
+    GGML_LOG_INFO("%s: deallocating\n", __func__);
+
+    for (int i = 0; i < GGML_METAL_MAX_COMMAND_BUFFERS; ++i) {
+        if (ctx->cmd_bufs[i].obj) {
+            [ctx->cmd_bufs[i].obj release];
+        }
+    }
+
+    for (int i = 0; i < (int) ctx->cmd_bufs_ext.count; ++i) {
+        if (ctx->cmd_bufs_ext[i]) {
+            [ctx->cmd_bufs_ext[i] release];
+        }
+    }
+
+    [ctx->cmd_bufs_ext removeAllObjects];
+    [ctx->cmd_bufs_ext release];
+
+    if (ctx->pipelines_ext) {
+        ggml_metal_pipelines_free(ctx->pipelines_ext);
+        ctx->pipelines_ext = nil;
+    }
+
+    if (ctx->debug_fusion > 0) {
+        GGML_LOG_DEBUG("%s: fusion stats:\n", __func__);
+        for (int i = 0; i < GGML_OP_COUNT; i++) {
+            if (ctx->fuse_cnt[i] == 0) {
+                continue;
+            }
+
+            // note: cannot use ggml_log here
+            GGML_LOG_DEBUG("%s: - %s: %" PRIu64 "\n", __func__, ggml_op_name((enum ggml_op) i), ctx->fuse_cnt[i]);
+        }
+    }
+
+    Block_release(ctx->encode_async);
+
+    //[ctx->queue release]; // [TAG_QUEUE_PER_BACKEND]
+
+    dispatch_release(ctx->d_queue);
+
+    free(ctx);
+}
+
+void ggml_metal_synchronize(ggml_metal_t ctx) {
+    // wait for any backend operations to finish
+    if (ctx->cmd_buf_last) {
+        [ctx->cmd_buf_last waitUntilCompleted];
+        ctx->cmd_buf_last = nil;
+    }
+
+    // release any completed command buffers
+    if (ctx->cmd_bufs_ext.count > 0) {
+        for (size_t i = 0; i < ctx->cmd_bufs_ext.count; ++i) {
+            id<MTLCommandBuffer> cmd_buf = ctx->cmd_bufs_ext[i];
+
+            MTLCommandBufferStatus status = [cmd_buf status];
+            if (status != MTLCommandBufferStatusCompleted) {
+                GGML_LOG_ERROR("%s: error: command buffer %d failed with status %d\n", __func__, (int) i, (int) status);
+                if (status == MTLCommandBufferStatusError) {
+                    GGML_LOG_ERROR("error: %s\n", [[cmd_buf error].localizedDescription UTF8String]);
+                }
+                GGML_ABORT("fatal error");
+            }
+
+            [cmd_buf release];
+        }
+
+        [ctx->cmd_bufs_ext removeAllObjects];
+    }
+}
+
+static struct ggml_metal_buffer_id ggml_metal_get_buffer_id(const struct ggml_tensor * t) {
+    if (!t) {
+        return (struct ggml_metal_buffer_id) { nil, 0 };
+    }
+
+    ggml_backend_buffer_t buffer = t->view_src ? t->view_src->buffer : t->buffer;
+
+    return ggml_metal_buffer_get_id(buffer->context, t);
+}
+
+void ggml_metal_set_tensor_async(ggml_metal_t ctx, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
+    @autoreleasepool {
+        // wrap the source data into a Metal buffer
+        id<MTLBuffer> buf_src = [ctx->device newBufferWithBytes:data
+                                                         length:size
+                                                        options:MTLResourceStorageModeShared];
+
+        struct ggml_metal_buffer_id bid_dst = ggml_metal_get_buffer_id(tensor);
+        if (bid_dst.metal == nil) {
+            GGML_ABORT("%s: failed to find buffer for tensor '%s'\n", __func__, tensor->name);
+        }
+
+        bid_dst.offs += offset;
+
+        // queue the copy operation into the queue of the Metal context
+        // this will be queued at the end, after any currently ongoing GPU operations
+        id<MTLCommandBuffer> cmd_buf = [ctx->queue commandBufferWithUnretainedReferences];
+        id<MTLBlitCommandEncoder> encoder = [cmd_buf blitCommandEncoder];
+
+        [encoder copyFromBuffer:buf_src
+                   sourceOffset:0
+                       toBuffer:bid_dst.metal
+              destinationOffset:bid_dst.offs
+                           size:size];
+
+        [encoder endEncoding];
+        [cmd_buf commit];
+
+        // do not wait here for completion
+        //[cmd_buf waitUntilCompleted];
+
+        // instead, remember a reference to the command buffer and wait for it later if needed
+        [ctx->cmd_bufs_ext addObject:cmd_buf];
+        ctx->cmd_buf_last = cmd_buf;
+
+        [cmd_buf retain];
+    }
+}
+
+void ggml_metal_get_tensor_async(ggml_metal_t ctx, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
+    @autoreleasepool {
+        id<MTLBuffer> buf_dst = [ctx->device newBufferWithBytesNoCopy:data
+                                                               length:size
+                                                              options:MTLResourceStorageModeShared
+                                                          deallocator:nil];
+
+        struct ggml_metal_buffer_id bid_src = ggml_metal_get_buffer_id(tensor);
+        if (bid_src.metal == nil) {
+            GGML_ABORT("%s: failed to find buffer for tensor '%s'\n", __func__, tensor->name);
+        }
+
+        bid_src.offs += offset;
+
+        // queue the copy operation into the queue of the Metal context
+        // this will be queued at the end, after any currently ongoing GPU operations
+        id<MTLCommandBuffer> cmd_buf = [ctx->queue commandBufferWithUnretainedReferences];
+        id<MTLBlitCommandEncoder> encoder = [cmd_buf blitCommandEncoder];
+
+        [encoder copyFromBuffer:bid_src.metal
+                   sourceOffset:bid_src.offs
+                       toBuffer:buf_dst
+              destinationOffset:0
+                           size:size];
+
+        [encoder endEncoding];
+        [cmd_buf commit];
+
+        // do not wait here for completion
+        //[cmd_buf waitUntilCompleted];
+
+        // instead, remember a reference to the command buffer and wait for it later if needed
+        [ctx->cmd_bufs_ext addObject:cmd_buf];
+        ctx->cmd_buf_last = cmd_buf;
+
+        [cmd_buf retain];
+    }
+}
+
+enum ggml_status ggml_metal_graph_compute(ggml_metal_t ctx, struct ggml_cgraph * gf) {
+    // number of nodes encoded by the main thread (empirically determined)
+    const int n_main = 64;
+
+    // number of threads in addition to the main thread
+    const int n_cb = ctx->n_cb;
+
+    // submit the ggml compute graph to the GPU by creating command buffers and encoding the ops in them
+    // the first n_nodes_0 are encoded and submitted for processing directly by the calling thread
+    // while these nodes are processing, we start n_cb threads to enqueue the rest of the nodes
+    // each thread creates it's own command buffer and enqueues the ops in parallel
+    //
+    // tests on M1 Pro and M2 Ultra using LLaMA models, show that optimal values for n_cb are 1 or 2
+
+    @autoreleasepool {
+        ctx->gf = gf;
+
+        ctx->n_nodes_0 = MIN(n_main, gf->n_nodes);
+        ctx->n_nodes_1 = gf->n_nodes - ctx->n_nodes_0;
+
+        ctx->n_nodes_per_cb = (ctx->n_nodes_1 + ctx->n_cb - 1) / ctx->n_cb;
+
+        const bool use_capture = ctx->capture_next_compute;
+        if (use_capture) {
+            ctx->capture_next_compute = false;
+
+            // make sure all previous computations have finished before starting the capture
+            if (ctx->cmd_buf_last) {
+                [ctx->cmd_buf_last waitUntilCompleted];
+                ctx->cmd_buf_last = nil;
+            }
+
+            if (!ctx->capture_started) {
+                // create capture scope
+                ctx->capture_scope = [[MTLCaptureManager sharedCaptureManager] newCaptureScopeWithDevice:ctx->device];
+
+                MTLCaptureDescriptor * descriptor = [MTLCaptureDescriptor new];
+                descriptor.captureObject = ctx->capture_scope;
+                descriptor.destination = MTLCaptureDestinationGPUTraceDocument;
+                descriptor.outputURL = [NSURL fileURLWithPath:[NSString stringWithFormat:@"/tmp/perf-metal.gputrace"]];
+
+                NSError * error = nil;
+                if (![[MTLCaptureManager sharedCaptureManager] startCaptureWithDescriptor:descriptor error:&error]) {
+                    GGML_LOG_ERROR("%s: error: unable to start capture '%s'\n", __func__, [[error localizedDescription] UTF8String]);
+                } else {
+                    [ctx->capture_scope beginScope];
+                    ctx->capture_started = true;
+                }
+            }
+        }
+
+        // the main thread commits the first few commands immediately
+        // cmd_buf[n_cb]
+        {
+            id<MTLCommandBuffer> cmd_buf = [ctx->queue commandBufferWithUnretainedReferences];
+            [cmd_buf retain];
+
+            if (ctx->cmd_bufs[n_cb].obj) {
+                [ctx->cmd_bufs[n_cb].obj release];
+            }
+            ctx->cmd_bufs[n_cb].obj = cmd_buf;
+
+            [cmd_buf enqueue];
+
+            ctx->encode_async(n_cb);
+        }
+
+        // remember the command buffer for the next iteration
+        ctx->cmd_buf_last = ctx->cmd_bufs[n_cb].obj;
+
+        // prepare the rest of the command buffers asynchronously (optional)
+        // cmd_buf[0.. n_cb)
+        for (int cb_idx = 0; cb_idx < n_cb; ++cb_idx) {
+            id<MTLCommandBuffer> cmd_buf = [ctx->queue commandBufferWithUnretainedReferences];
+            [cmd_buf retain];
+
+            if (ctx->cmd_bufs[cb_idx].obj) {
+                [ctx->cmd_bufs[cb_idx].obj release];
+            }
+            ctx->cmd_bufs[cb_idx].obj = cmd_buf;
+
+            // always enqueue the first two command buffers
+            // enqueue all of the command buffers if we don't need to abort
+            if (cb_idx < 2 || ctx->abort_callback == NULL) {
+                [cmd_buf enqueue];
+
+                // update the pointer to the last queued command buffer
+                // this is needed to implement synchronize()
+                ctx->cmd_buf_last = cmd_buf;
+            }
+        }
+
+        dispatch_apply(n_cb, ctx->d_queue, ctx->encode_async);
+
+        // for debugging: block until graph is computed
+        //[ctx->cmd_buf_last waitUntilCompleted];
+
+        // enter here only when capturing in order to wait for all computation to finish
+        // otherwise, we leave the graph to compute asynchronously
+        if (!use_capture && ctx->capture_started) {
+            // wait for completion and check status of each command buffer
+            // needed to detect if the device ran out-of-memory for example (#1881)
+            {
+                id<MTLCommandBuffer> cmd_buf = ctx->cmd_bufs[n_cb].obj;
+                [cmd_buf waitUntilCompleted];
+
+                MTLCommandBufferStatus status = [cmd_buf status];
+                if (status != MTLCommandBufferStatusCompleted) {
+                    GGML_LOG_INFO("%s: command buffer %d failed with status %lu\n", __func__, n_cb, status);
+                    if (status == MTLCommandBufferStatusError) {
+                        GGML_LOG_INFO("error: %s\n", [[cmd_buf error].localizedDescription UTF8String]);
+                    }
+
+                    return GGML_STATUS_FAILED;
+                }
+            }
+
+            for (int i = 0; i < n_cb; ++i) {
+                id<MTLCommandBuffer> cmd_buf = ctx->cmd_bufs[i].obj;
+                [cmd_buf waitUntilCompleted];
+
+                MTLCommandBufferStatus status = [cmd_buf status];
+                if (status != MTLCommandBufferStatusCompleted) {
+                    GGML_LOG_INFO("%s: command buffer %d failed with status %lu\n", __func__, i, status);
+                    if (status == MTLCommandBufferStatusError) {
+                        GGML_LOG_INFO("error: %s\n", [[cmd_buf error].localizedDescription UTF8String]);
+                    }
+
+                    return GGML_STATUS_FAILED;
+                }
+
+                id<MTLCommandBuffer> next_buffer = (i + 1 < n_cb ? ctx->cmd_bufs[i + 1].obj : nil);
+                if (!next_buffer) {
+                    continue;
+                }
+
+                const bool next_queued = ([next_buffer status] != MTLCommandBufferStatusNotEnqueued);
+                if (next_queued) {
+                    continue;
+                }
+
+                if (ctx->abort_callback && ctx->abort_callback(ctx->abort_callback_data)) {
+                    GGML_LOG_INFO("%s: command buffer %d aborted", __func__, i);
+                    return GGML_STATUS_ABORTED;
+                }
+
+                [next_buffer commit];
+            }
+
+            [ctx->capture_scope endScope];
+            [[MTLCaptureManager sharedCaptureManager] stopCapture];
+        }
+    }
+
+    return GGML_STATUS_SUCCESS;
+}
+
+void ggml_metal_graph_optimize(ggml_metal_t ctx, struct ggml_cgraph * gf) {
+    //const int64_t t_start = ggml_time_us();
+
+    if (ctx->use_graph_optimize) {
+        ggml_graph_optimize(gf);
+    }
+
+    //printf("%s: graph optimize took %.3f ms\n", __func__, (ggml_time_us() - t_start) / 1000.0);
+}
+
+void ggml_metal_set_n_cb(ggml_metal_t ctx, int n_cb) {
+    if (ctx->n_cb != n_cb) {
+        ctx->n_cb = MIN(n_cb, GGML_METAL_MAX_COMMAND_BUFFERS);
+
+        if (ctx->n_cb > 2) {
+            GGML_LOG_WARN("%s: n_cb = %d, using n_cb > 2 is not recommended and can degrade the performance in some cases\n", __func__, n_cb);
+        }
+    }
+
+    if (ctx->encode_async) {
+        Block_release(ctx->encode_async);
+    }
+
+    ctx->encode_async = Block_copy(^(size_t iter) {
+        const int cb_idx = iter;
+        const int n_cb_l = ctx->n_cb;
+
+        const int n_nodes_0 = ctx->n_nodes_0;
+        const int n_nodes_1 = ctx->n_nodes_1;
+
+        const int n_nodes_per_cb = ctx->n_nodes_per_cb;
+
+        int idx_start = 0;
+        int idx_end   = n_nodes_0;
+
+        if (cb_idx < n_cb_l) {
+            idx_start = n_nodes_0 + (                                         (cb_idx + 0) * n_nodes_per_cb);
+            idx_end   = n_nodes_0 + (MIN((cb_idx == n_cb_l - 1) ? n_nodes_1 : (cb_idx + 1) * n_nodes_per_cb, n_nodes_1));
+        }
+
+        id<MTLCommandBuffer> cmd_buf = ctx->cmd_bufs[cb_idx].obj;
+
+        ggml_metal_op_t ctx_op = ggml_metal_op_init(
+            ctx->dev,
+            cmd_buf,
+            ctx->gf,
+            idx_start,
+            idx_end,
+            ctx->use_fusion,
+            ctx->use_concurrency,
+            ctx->capture_next_compute,
+            ctx->debug_graph,
+            ctx->debug_fusion);
+
+        for (int idx = idx_start; idx < idx_end;) {
+            const int res = ggml_metal_op_encode(ctx_op, idx);
+            if (res == 0) {
+                break;
+            }
+
+            idx += res;
+        }
+
+        ggml_metal_op_free(ctx_op);
+
+        if (cb_idx < 2 || ctx->abort_callback == NULL) {
+            [cmd_buf commit];
+        }
+    });
+}
+
+void ggml_metal_set_abort_callback(ggml_metal_t ctx, ggml_abort_callback abort_callback, void * user_data) {
+    ctx->abort_callback = abort_callback;
+    ctx->abort_callback_data = user_data;
+}
+
+bool ggml_metal_supports_family(ggml_metal_t ctx, int family) {
+    GGML_ASSERT(ctx->device != nil);
+
+    return [ctx->device supportsFamily:(MTLGPUFamilyApple1 + family - 1)];
+}
+
+void ggml_metal_capture_next_compute(ggml_metal_t ctx) {
+    ctx->capture_next_compute = true;
+}
diff --git a/ggml/src/ggml-metal/ggml-metal-device.cpp b/ggml/src/ggml-metal/ggml-metal-device.cpp
new file mode 100644 (file)
index 0000000..5f04789
--- /dev/null
@@ -0,0 +1,1366 @@
+#include "ggml-metal-device.h"
+
+#include "ggml-metal-impl.h"
+
+#include "ggml-impl.h"
+
+#include <cassert>
+#include <memory>
+#include <string>
+#include <unordered_map>
+
+struct ggml_metal_device_deleter {
+    void operator()(ggml_metal_device_t ctx) {
+        ggml_metal_device_free(ctx);
+    }
+};
+
+typedef std::unique_ptr<ggml_metal_device, ggml_metal_device_deleter> ggml_metal_device_ptr;
+
+ggml_metal_device_t ggml_metal_device_get(void) {
+    static ggml_metal_device_ptr ctx { ggml_metal_device_init() };
+
+    return ctx.get();
+}
+
+struct ggml_metal_pipelines {
+    std::unordered_map<std::string, ggml_metal_pipeline_t> data;
+};
+
+ggml_metal_pipelines_t ggml_metal_pipelines_init(void) {
+    ggml_metal_pipelines_t res = new ggml_metal_pipelines();
+
+    return res;
+}
+
+void ggml_metal_pipelines_free(ggml_metal_pipelines_t ppls) {
+    for (auto it = ppls->data.begin(); it != ppls->data.end(); ++it) {
+        ggml_metal_pipeline_free(it->second);
+    }
+
+    delete ppls;
+}
+
+void ggml_metal_pipelines_add(ggml_metal_pipelines_t ppls, const char * name, ggml_metal_pipeline_t pipeline) {
+    ppls->data[name] = pipeline;
+}
+
+ggml_metal_pipeline_t ggml_metal_pipelines_get(ggml_metal_pipelines_t ppls, const char * name) {
+    if  (ppls->data.find(name) == ppls->data.end()) {
+        return nullptr;
+    }
+
+    return ppls->data[name];
+}
+
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_base(ggml_metal_library_t lib, ggml_op op) {
+    char base[256];
+    char name[256];
+
+    const char * op_str = "undefined";
+    switch (op) {
+        case GGML_OP_ADD_ID: op_str = "add_id"; break;
+        case GGML_OP_CONCAT: op_str = "concat"; break;
+        default: GGML_ABORT("fatal error");
+    };
+
+    snprintf(base, 256, "kernel_%s", op_str);
+    snprintf(name, 256, "%s", base);
+
+    ggml_metal_pipeline_t res = ggml_metal_library_get_pipeline(lib, name);
+    if (res) {
+        return res;
+    }
+
+    res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+
+    return res;
+}
+
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_cpy(ggml_metal_library_t lib, ggml_type tsrc, ggml_type tdst) {
+    char base[256];
+    char name[256];
+
+    snprintf(base, 256, "kernel_cpy_%s_%s", ggml_type_name(tsrc), ggml_type_name(tdst));
+    snprintf(name, 256, "%s", base);
+
+    ggml_metal_pipeline_t res = ggml_metal_library_get_pipeline(lib, name);
+    if (res) {
+        return res;
+    }
+
+    res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+
+    return res;
+}
+
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_pool_2d(ggml_metal_library_t lib, const ggml_tensor * op, ggml_op_pool op_pool) {
+    GGML_ASSERT(ggml_is_contiguous(op->src[0]));
+    GGML_ASSERT(op->src[0]->type == GGML_TYPE_F32 && op->src[0]->type == op->type);
+
+    const char * pool_str = "undefined";
+    switch (op_pool) {
+        case GGML_OP_POOL_AVG: pool_str = "avg"; break;
+        case GGML_OP_POOL_MAX: pool_str = "max"; break;
+        default: GGML_ASSERT(false && "not implemented");
+    };
+
+    char base[256];
+    char name[256];
+
+    snprintf(base, 256, "kernel_pool_2d_%s_%s", pool_str, ggml_type_name(op->src[0]->type));
+    snprintf(name, 256, "%s", base);
+
+    ggml_metal_pipeline_t res = ggml_metal_library_get_pipeline(lib, name);
+    if (res) {
+        return res;
+    }
+
+    res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+
+    return res;
+}
+
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_get_rows(ggml_metal_library_t lib, ggml_type tsrc) {
+    char base[256];
+    char name[256];
+
+    snprintf(base, 256, "kernel_get_rows_%s", ggml_type_name(tsrc));
+    snprintf(name, 256, "%s", base);
+
+    ggml_metal_pipeline_t res = ggml_metal_library_get_pipeline(lib, name);
+    if (res) {
+        return res;
+    }
+
+    res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+
+    return res;
+}
+
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_set_rows(ggml_metal_library_t lib, ggml_type tdst) {
+    char base[256];
+    char name[256];
+
+    snprintf(base, 256, "kernel_set_rows_%s", ggml_type_name(tdst));
+    snprintf(name, 256, "%s", base);
+
+    ggml_metal_pipeline_t res = ggml_metal_library_get_pipeline(lib, name);
+    if (res) {
+        return res;
+    }
+
+    res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+
+    return res;
+}
+
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_repeat(ggml_metal_library_t lib, ggml_type tsrc) {
+    char base[256];
+    char name[256];
+
+    snprintf(base, 256, "kernel_repeat_%s", ggml_type_name(tsrc));
+    snprintf(name, 256, "%s", base);
+
+    ggml_metal_pipeline_t res = ggml_metal_library_get_pipeline(lib, name);
+    if (res) {
+        return res;
+    }
+
+    res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+
+    return res;
+}
+
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_unary(ggml_metal_library_t lib, const ggml_tensor * op) {
+    GGML_ASSERT(ggml_is_contiguous(op->src[0]));
+
+    char base[256];
+    char name[256];
+
+    const int64_t n = ggml_nelements(op);
+
+    const char * op_str = "undefined";
+    switch (op->op) {
+        case GGML_OP_SCALE:      op_str = "scale";      break;
+        case GGML_OP_CLAMP:      op_str = "clamp";      break;
+        case GGML_OP_SQR:        op_str = "sqr";        break;
+        case GGML_OP_SQRT:       op_str = "sqrt";       break;
+        case GGML_OP_SIN:        op_str = "sin";        break;
+        case GGML_OP_COS:        op_str = "cos";        break;
+        case GGML_OP_LOG:        op_str = "log";        break;
+        case GGML_OP_LEAKY_RELU: op_str = "leaky_relu"; break;
+        case GGML_OP_UNARY:
+            switch (ggml_get_unary_op(op)) {
+                case GGML_UNARY_OP_TANH:        op_str = "tanh";        break;
+                case GGML_UNARY_OP_RELU:        op_str = "relu";        break;
+                case GGML_UNARY_OP_SIGMOID:     op_str = "sigmoid";     break;
+                case GGML_UNARY_OP_GELU:        op_str = "gelu";        break;
+                case GGML_UNARY_OP_GELU_ERF:    op_str = "gelu_erf";    break;
+                case GGML_UNARY_OP_GELU_QUICK:  op_str = "gelu_quick";  break;
+                case GGML_UNARY_OP_SILU:        op_str = "silu";        break;
+                case GGML_UNARY_OP_ELU:         op_str = "elu";         break;
+                case GGML_UNARY_OP_NEG:         op_str = "neg";         break;
+                case GGML_UNARY_OP_ABS:         op_str = "abs";         break;
+                case GGML_UNARY_OP_SGN:         op_str = "sgn";         break;
+                case GGML_UNARY_OP_STEP:        op_str = "step";        break;
+                case GGML_UNARY_OP_HARDSWISH:   op_str = "hardswish";   break;
+                case GGML_UNARY_OP_HARDSIGMOID: op_str = "hardsigmoid"; break;
+                case GGML_UNARY_OP_EXP:         op_str = "exp";         break;
+                default: GGML_ABORT("fatal error");
+            } break;
+        default: GGML_ABORT("fatal error");
+    };
+
+    const char * suffix = "";
+    if (n % 4 == 0) {
+        suffix = "_4";
+    }
+
+    snprintf(base, 256, "kernel_%s_%s%s", op_str, ggml_type_name(op->src[0]->type), suffix);
+    snprintf(name, 256, "%s", base);
+
+    ggml_metal_pipeline_t res = ggml_metal_library_get_pipeline(lib, name);
+    if (res) {
+        return res;
+    }
+
+    res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+
+    return res;
+}
+
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_glu(ggml_metal_library_t lib, const ggml_tensor * op) {
+    GGML_ASSERT(ggml_is_contiguous_1(op->src[0]));
+
+    char base[256];
+    char name[256];
+
+    const char * op_str = "undefined";
+    switch (op->op) {
+        case GGML_OP_GLU:
+            switch (ggml_get_glu_op(op)) {
+                case GGML_GLU_OP_REGLU:        op_str = "reglu";        break;
+                case GGML_GLU_OP_GEGLU:        op_str = "geglu";        break;
+                case GGML_GLU_OP_SWIGLU:       op_str = "swiglu";       break;
+                case GGML_GLU_OP_SWIGLU_OAI:   op_str = "swiglu_oai";   break;
+                case GGML_GLU_OP_GEGLU_ERF:    op_str = "geglu_erf";    break;
+                case GGML_GLU_OP_GEGLU_QUICK:  op_str = "geglu_quick";  break;
+                default: GGML_ABORT("fatal error");
+            } break;
+        default: GGML_ABORT("fatal error");
+    };
+
+    snprintf(base, 256, "kernel_%s_%s", op_str, ggml_type_name(op->src[0]->type));
+    snprintf(name, 256, "%s", base);
+
+    ggml_metal_pipeline_t res = ggml_metal_library_get_pipeline(lib, name);
+    if (res) {
+        return res;
+    }
+
+    res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+
+    return res;
+}
+
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_sum_rows(ggml_metal_library_t lib, const ggml_tensor * op) {
+    GGML_ASSERT(op->src[0]->nb[0] == ggml_type_size(op->src[0]->type));
+
+    char base[256];
+    char name[256];
+
+    const char * op_str = "undefined";
+    switch (op->op) {
+        case GGML_OP_SUM_ROWS:
+            op_str = "sum_rows"; break;
+        case GGML_OP_MEAN:
+            op_str = "mean"; break;
+        default: GGML_ABORT("fatal error");
+    };
+
+    snprintf(base, 256, "kernel_%s_%s", op_str, ggml_type_name(op->src[0]->type));
+
+    snprintf(name, 256, "%s", base);
+
+    ggml_metal_pipeline_t res = ggml_metal_library_get_pipeline(lib, name);
+    if (res) {
+        return res;
+    }
+
+    res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+
+    ggml_metal_pipeline_set_smem(res, 32*sizeof(float));
+
+    return res;
+}
+
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_soft_max(ggml_metal_library_t lib, const ggml_tensor * op) {
+    GGML_ASSERT(!op->src[1] || op->src[1]->type == GGML_TYPE_F16 || op->src[1]->type == GGML_TYPE_F32);
+
+    char base[256];
+    char name[256];
+
+    const char * suffix = "";
+
+    if (op->src[0]->ne[0] % 4 == 0) {
+        suffix = "_4";
+    }
+
+    const ggml_type tsrc1 = op->src[1] ? op->src[1]->type : GGML_TYPE_F32;
+
+    snprintf(base, 256, "kernel_soft_max_%s%s", ggml_type_name(tsrc1), suffix);
+    snprintf(name, 256, "%s", base);
+
+    ggml_metal_pipeline_t res = ggml_metal_library_get_pipeline(lib, name);
+    if (res) {
+        return res;
+    }
+
+    res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+
+    ggml_metal_pipeline_set_smem(res, 32*sizeof(float));
+
+    return res;
+}
+
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_ssm_conv(ggml_metal_library_t lib, const ggml_tensor * op) {
+    GGML_ASSERT(op->src[0]->type == GGML_TYPE_F32);
+    GGML_ASSERT(op->src[1]->type == GGML_TYPE_F32);
+
+    GGML_ASSERT(ggml_is_contiguous(op->src[0]));
+    GGML_ASSERT(ggml_is_contiguous(op->src[1]));
+
+    char base[256];
+    char name[256];
+
+    snprintf(base, 256, "kernel_ssm_conv_%s_%s", ggml_type_name(op->src[0]->type), ggml_type_name(op->src[1]->type));
+    snprintf(name, 256, "%s", base);
+
+    ggml_metal_pipeline_t res = ggml_metal_library_get_pipeline(lib, name);
+    if (res) {
+        return res;
+    }
+
+    res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+
+    return res;
+}
+
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_ssm_scan(ggml_metal_library_t lib, const ggml_tensor * op)  {
+    char base[256];
+    char name[256];
+
+    if (op->src[3]->ne[0] == 1) {
+        snprintf(base, 256, "kernel_ssm_scan_group_%s", ggml_type_name(op->src[0]->type));
+    } else {
+        snprintf(base, 256, "kernel_ssm_scan_%s", ggml_type_name(op->src[0]->type));
+    }
+    snprintf(name, 256, "%s", base);
+
+    ggml_metal_pipeline_t res = ggml_metal_library_get_pipeline(lib, name);
+    if (res) {
+        return res;
+    }
+
+    res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+
+    ggml_metal_pipeline_set_smem(res, 32*sizeof(float));
+
+    return res;
+}
+
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_rwkv(ggml_metal_library_t lib, const ggml_tensor * op) {
+    char base[256];
+    char name[256];
+
+    const int64_t C = op->ne[0];
+    const int64_t H = op->src[0]->ne[1];
+
+    switch (op->op) {
+        case GGML_OP_RWKV_WKV6:
+            {
+                GGML_ASSERT(op->src[5]->type == GGML_TYPE_F32);
+                GGML_ASSERT(C % H == 0);
+                GGML_ASSERT(C / H == 64);
+
+                snprintf(base, 256, "kernel_rwkv_wkv6_%s", ggml_type_name(op->src[0]->type));
+            } break;
+        case GGML_OP_RWKV_WKV7:
+            {
+                GGML_ASSERT(op->src[6]->type == GGML_TYPE_F32);
+                GGML_ASSERT(C % H == 0);
+                GGML_ASSERT(C / H == 64);
+
+                snprintf(base, 256, "kernel_rwkv_wkv7_%s", ggml_type_name(op->src[0]->type));
+            } break;
+        default:
+            GGML_ABORT("fatal error");
+    }
+
+    snprintf(name, 256, "%s", base);
+
+    ggml_metal_pipeline_t res = ggml_metal_library_get_pipeline(lib, name);
+    if (res) {
+        return res;
+    }
+
+    res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+
+    return res;
+}
+
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_mul_mv_ext(ggml_metal_library_t lib, ggml_type tsrc0, ggml_type tsrc1, int r1ptg) {
+    char base[256];
+    char name[256];
+
+    snprintf(base, 256, "kernel_mul_mv_ext_%s_%s_r1_%d", ggml_type_name(tsrc0), ggml_type_name(tsrc1), r1ptg);
+    snprintf(name, 256, "%s", base);
+
+    ggml_metal_pipeline_t res = ggml_metal_library_get_pipeline(lib, name);
+    if (res) {
+        return res;
+    }
+
+    res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+
+    return res;
+}
+
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_mul_mm(ggml_metal_library_t lib, ggml_type tsrc0, ggml_type tsrc1) {
+    char base[256];
+    char name[256];
+
+    snprintf(base, 256, "kernel_mul_mm_%s_%s", ggml_type_name(tsrc0), ggml_type_name(tsrc1));
+    snprintf(name, 256, "%s", base);
+
+    ggml_metal_pipeline_t res = ggml_metal_library_get_pipeline(lib, name);
+    if (res) {
+        return res;
+    }
+
+    res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+
+    ggml_metal_pipeline_set_smem(res, 8192);
+
+    return res;
+}
+
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_mul_mv(ggml_metal_library_t lib, const ggml_tensor * op) {
+    GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+    GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne);
+
+    char base[256];
+    char name[256];
+
+    int nsg = 0; // number of simdgroups
+    int nr0 = 0; // number of src0 rows per simdgroup
+    int nr1 = 1; // number of src1 rows per threadgroup
+
+    size_t smem = 0; // shared memory
+
+    const ggml_type tsrc0 = op->src[0]->type;
+    const ggml_type tsrc1 = op->src[1]->type;
+
+    const char * suffix = "";
+
+    // use custom matrix x vector kernel
+    switch (tsrc0) {
+        case GGML_TYPE_F32:
+            {
+                GGML_ASSERT(op->src[1]->type == GGML_TYPE_F32);
+
+                nsg = 1;
+                nr0 = 1;
+                nr1 = 4;
+                if (ne00 == 4) {
+                    nr0 = 32;
+                    suffix = "_c4";
+                }
+            } break;
+        case GGML_TYPE_F16:
+        case GGML_TYPE_BF16:
+            {
+                nsg = 1;
+                nr0 = 1;
+                if (op->src[1]->type == GGML_TYPE_F32) {
+                    if (ne00 == 4) {
+                        nr0 = 32;
+                        nr1 = 4;
+                        suffix = "_c4";
+                    } else if (ne11 * ne12 < 4) {
+                        suffix = "_1row";
+                    } else if (ne00 >= 128 && ne01 >= 8 && ne00%4 == 0) {
+                        suffix = "_l4";
+                        nr1 = ne11;
+                    } else {
+                        nr1 = 4;
+                    }
+                } else {
+                    nr1 = 4;
+                }
+            } break;
+        case GGML_TYPE_Q4_0:
+            {
+                nsg = N_SG_Q4_0;
+                nr0 = N_R0_Q4_0;
+            } break;
+        case GGML_TYPE_Q4_1:
+            {
+                nsg = N_SG_Q4_1;
+                nr0 = N_R0_Q4_1;
+            } break;
+        case GGML_TYPE_Q5_0:
+            {
+                nsg = N_SG_Q5_0;
+                nr0 = N_R0_Q5_0;
+            } break;
+        case GGML_TYPE_Q5_1:
+            {
+                nsg = N_SG_Q5_1;
+                nr0 = N_R0_Q5_1;
+            } break;
+        case GGML_TYPE_Q8_0:
+            {
+                nsg = N_SG_Q8_0;
+                nr0 = N_R0_Q8_0;
+                smem = 32*sizeof(float)*N_R0_Q8_0;
+            } break;
+        case GGML_TYPE_MXFP4:
+            {
+                nsg = N_SG_MXFP4;
+                nr0 = N_R0_MXFP4;
+                smem = 32*sizeof(float);
+            } break;
+        case GGML_TYPE_Q2_K:
+            {
+                nsg = N_SG_Q2_K;
+                nr0 = N_R0_Q2_K;
+            } break;
+        case GGML_TYPE_Q3_K:
+            {
+                nsg = N_SG_Q3_K;
+                nr0 = N_R0_Q3_K;
+            } break;
+        case GGML_TYPE_Q4_K:
+            {
+                nsg = N_SG_Q4_K;
+                nr0 = N_R0_Q4_K;
+            } break;
+        case GGML_TYPE_Q5_K:
+            {
+                nsg = N_SG_Q5_K;
+                nr0 = N_R0_Q5_K;
+            } break;
+        case GGML_TYPE_Q6_K:
+            {
+                nsg = N_SG_Q6_K;
+                nr0 = N_R0_Q6_K;
+            } break;
+        case GGML_TYPE_IQ2_XXS:
+            {
+                nsg = N_SG_IQ2_XXS;
+                nr0 = N_R0_IQ2_XXS;
+                smem = 256*8+128;
+            } break;
+        case GGML_TYPE_IQ2_XS:
+            {
+                nsg = N_SG_IQ2_XS;
+                nr0 = N_R0_IQ2_XS;
+                smem = 512*8+128;
+            } break;
+        case GGML_TYPE_IQ3_XXS:
+            {
+                nsg = N_SG_IQ3_XXS;
+                nr0 = N_R0_IQ3_XXS;
+                smem = 256*4+128;
+            } break;
+        case GGML_TYPE_IQ3_S:
+            {
+                nsg = N_SG_IQ3_S;
+                nr0 = N_R0_IQ3_S;
+                smem = 512*4;
+            } break;
+        case GGML_TYPE_IQ2_S:
+            {
+                nsg = N_SG_IQ2_S;
+                nr0 = N_R0_IQ2_S;
+            } break;
+        case GGML_TYPE_IQ1_S:
+            {
+                nsg = N_SG_IQ1_S;
+                nr0 = N_R0_IQ1_S;
+            } break;
+        case GGML_TYPE_IQ1_M:
+            {
+                nsg = N_SG_IQ1_M;
+                nr0 = N_R0_IQ1_M;
+            } break;
+        case GGML_TYPE_IQ4_NL:
+            {
+                nsg = N_SG_IQ4_NL;
+                nr0 = N_R0_IQ4_NL;
+                smem = 32*sizeof(float);
+            } break;
+        case GGML_TYPE_IQ4_XS:
+            {
+                nsg = N_SG_IQ4_XS;
+                nr0 = N_R0_IQ4_XS;
+                smem = 32*sizeof(float);
+            } break;
+        default:
+            {
+                GGML_LOG_ERROR("Asserting on type %d\n", (int) tsrc0);
+                GGML_ABORT("not implemented");
+            }
+    };
+
+    snprintf(base, 256, "kernel_mul_mv_%s_%s%s", ggml_type_name(tsrc0), ggml_type_name(tsrc1), suffix);
+    snprintf(name, 256, "%s", base);
+
+    ggml_metal_pipeline_t res = ggml_metal_library_get_pipeline(lib, name);
+    if (res) {
+        return res;
+    }
+
+    res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+
+    ggml_metal_pipeline_set_nr0 (res, nr0);
+    ggml_metal_pipeline_set_nr1 (res, nr1);
+    ggml_metal_pipeline_set_nsg (res, nsg);
+    ggml_metal_pipeline_set_smem(res, smem);
+
+    return res;
+}
+
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_mul_mm_id_map0(ggml_metal_library_t lib, int ne02, int ne20) {
+    char base[256];
+    char name[256];
+
+    snprintf(base, 256, "kernel_mul_mm_id_map0_ne20_%d", ne20);
+    snprintf(name, 256, "%s", base);
+
+    ggml_metal_pipeline_t res = ggml_metal_library_get_pipeline(lib, name);
+    if (res) {
+        return res;
+    }
+
+    res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+
+    const size_t smem = (size_t) ne02*ne20*sizeof(uint16_t);
+
+    ggml_metal_pipeline_set_smem(res, smem);
+
+    return res;
+}
+
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_mul_mm_id(ggml_metal_library_t lib, ggml_type tsrc0, ggml_type tsrc1) {
+    char base[256];
+    char name[256];
+
+    snprintf(base, 256, "kernel_mul_mm_id_%s_%s", ggml_type_name(tsrc0), ggml_type_name(tsrc1));
+    snprintf(name, 256, "%s", base);
+
+    ggml_metal_pipeline_t res = ggml_metal_library_get_pipeline(lib, name);
+    if (res) {
+        return res;
+    }
+
+    res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+
+    ggml_metal_pipeline_set_smem(res, 8192);
+
+    return res;
+}
+
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_mul_mv_id(ggml_metal_library_t lib, const ggml_tensor * op) {
+    GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+    GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne);
+
+    char base[256];
+    char name[256];
+
+    int nsg = 0; // number of simdgroups
+    int nr0 = 0; // number of src0 rows per simdgroup
+    int nr1 = 1; // number of src1 rows per threadgroup
+
+    size_t smem = 0; // shared memory
+
+    const ggml_type tsrc0 = op->src[0]->type;
+    const ggml_type tsrc1 = op->src[1]->type;
+
+        // use custom matrix x vector kernel
+    switch (tsrc0) {
+        case GGML_TYPE_F32:
+            {
+                GGML_ASSERT(op->src[1]->type == GGML_TYPE_F32);
+                nsg = 1;
+                nr0 = 1;
+            } break;
+        case GGML_TYPE_F16:
+            {
+                GGML_ASSERT(op->src[1]->type == GGML_TYPE_F32);
+                nsg = 1;
+                nr0 = 1;
+            } break;
+        case GGML_TYPE_BF16:
+            {
+                GGML_ASSERT(op->src[1]->type == GGML_TYPE_F32);
+                nsg = 1;
+                nr0 = 1;
+            } break;
+        case GGML_TYPE_Q4_0:
+            {
+                nsg = N_SG_Q4_0;
+                nr0 = N_R0_Q4_0;
+            } break;
+        case GGML_TYPE_Q4_1:
+            {
+                nsg = N_SG_Q4_1;
+                nr0 = N_R0_Q4_1;
+            } break;
+        case GGML_TYPE_Q5_0:
+            {
+                nsg = N_SG_Q5_0;
+                nr0 = N_R0_Q5_0;
+            } break;
+        case GGML_TYPE_Q5_1:
+            {
+                nsg = N_SG_Q5_1;
+                nr0 = N_R0_Q5_1;
+            } break;
+        case GGML_TYPE_Q8_0:
+            {
+                nsg = N_SG_Q8_0;
+                nr0 = N_R0_Q8_0;
+                smem = 32*sizeof(float)*N_R0_Q8_0;
+            } break;
+        case GGML_TYPE_MXFP4:
+            {
+                nsg = N_SG_MXFP4;
+                nr0 = N_R0_MXFP4;
+                smem = 32*sizeof(float);
+            } break;
+        case GGML_TYPE_Q2_K:
+            {
+                nsg = N_SG_Q2_K;
+                nr0 = N_R0_Q2_K;
+            } break;
+        case GGML_TYPE_Q3_K:
+            {
+                nsg = N_SG_Q3_K;
+                nr0 = N_R0_Q3_K;
+            } break;
+        case GGML_TYPE_Q4_K:
+            {
+                nsg = N_SG_Q4_K;
+                nr0 = N_R0_Q4_K;
+            } break;
+        case GGML_TYPE_Q5_K:
+            {
+                nsg = N_SG_Q5_K;
+                nr0 = N_R0_Q5_K;
+            } break;
+        case GGML_TYPE_Q6_K:
+            {
+                nsg = N_SG_Q6_K;
+                nr0 = N_R0_Q6_K;
+            } break;
+        case GGML_TYPE_IQ2_XXS:
+            {
+                nsg = N_SG_IQ2_XXS;
+                nr0 = N_R0_IQ2_XXS;
+                smem = 256*8+128;
+            } break;
+        case GGML_TYPE_IQ2_XS:
+            {
+                nsg = N_SG_IQ2_XS;
+                nr0 = N_R0_IQ2_XS;
+                smem = 512*8+128;
+            } break;
+        case GGML_TYPE_IQ3_XXS:
+            {
+                nsg = N_SG_IQ3_XXS;
+                nr0 = N_R0_IQ3_XXS;
+                smem = 256*4+128;
+            } break;
+        case GGML_TYPE_IQ3_S:
+            {
+                nsg = N_SG_IQ3_S;
+                nr0 = N_R0_IQ3_S;
+                smem = 512*4;
+            } break;
+        case GGML_TYPE_IQ2_S:
+            {
+                nsg = N_SG_IQ2_S;
+                nr0 = N_R0_IQ2_S;
+            } break;
+        case GGML_TYPE_IQ1_S:
+            {
+                nsg = N_SG_IQ1_S;
+                nr0 = N_R0_IQ1_S;
+            } break;
+        case GGML_TYPE_IQ1_M:
+            {
+                nsg = N_SG_IQ1_M;
+                nr0 = N_R0_IQ1_M;
+            } break;
+        case GGML_TYPE_IQ4_NL:
+            {
+                nsg = N_SG_IQ4_NL;
+                nr0 = N_R0_IQ4_NL;
+                smem = 32*sizeof(float);
+            } break;
+        case GGML_TYPE_IQ4_XS:
+            {
+                nsg = N_SG_IQ4_XS;
+                nr0 = N_R0_IQ4_XS;
+                smem = 32*sizeof(float);
+            } break;
+        default:
+            {
+                GGML_LOG_ERROR("Asserting on type %d\n", (int)op->src[2]->type);
+                GGML_ABORT("not implemented");
+            }
+    };
+
+    snprintf(base, 256, "kernel_mul_mv_id_%s_%s", ggml_type_name(tsrc0), ggml_type_name(tsrc1));
+    snprintf(name, 256, "%s", base);
+
+    ggml_metal_pipeline_t res = ggml_metal_library_get_pipeline(lib, name);
+    if (res) {
+        return res;
+    }
+
+    res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+
+    ggml_metal_pipeline_set_nr0 (res, nr0);
+    ggml_metal_pipeline_set_nr1 (res, nr1);
+    ggml_metal_pipeline_set_nsg (res, nsg);
+    ggml_metal_pipeline_set_smem(res, smem);
+
+    return res;
+}
+
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_argmax(ggml_metal_library_t lib, const ggml_tensor * op) {
+    GGML_ASSERT(op->src[0]->type == GGML_TYPE_F32);
+    GGML_ASSERT(ggml_is_contiguous_1(op->src[0]));
+    GGML_ASSERT(op->src[0]->nb[0] == ggml_type_size(op->src[0]->type));
+
+    char base[256];
+    char name[256];
+
+    snprintf(base, 256, "kernel_argmax_%s", ggml_type_name(op->src[0]->type));
+    snprintf(name, 256, "%s", base);
+
+    ggml_metal_pipeline_t res = ggml_metal_library_get_pipeline(lib, name);
+    if (res) {
+        return res;
+    }
+
+    res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+
+    ggml_metal_pipeline_set_smem(res, 32*(sizeof(float) + sizeof(int32_t)));
+
+    return res;
+}
+
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_argsort(ggml_metal_library_t lib, const ggml_tensor * op) {
+    assert(op->op == GGML_OP_ARGSORT);
+
+    char base[256];
+    char name[256];
+
+    ggml_sort_order order = (ggml_sort_order) op->op_params[0];
+
+    const char * order_str = "undefined";
+    switch (order) {
+        case GGML_SORT_ORDER_ASC:  order_str = "asc";  break;
+        case GGML_SORT_ORDER_DESC: order_str = "desc"; break;
+        default: GGML_ABORT("fatal error");
+    };
+
+    snprintf(base, 256, "kernel_argsort_%s_%s_%s", ggml_type_name(op->src[0]->type), ggml_type_name(op->type), order_str);
+    snprintf(name, 256, "%s", base);
+
+    ggml_metal_pipeline_t res = ggml_metal_library_get_pipeline(lib, name);
+    if (res) {
+        return res;
+    }
+
+    res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+
+    return res;
+}
+
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_flash_attn_ext(
+        ggml_metal_library_t lib,
+        const ggml_tensor * op,
+        bool    has_mask,
+        bool    has_sinks,
+        bool    has_bias,
+        bool    has_scap,
+        int32_t nsg) {
+    assert(op->op == GGML_OP_FLASH_ATTN_EXT);
+
+    char base[256];
+    char name[256];
+
+    const int32_t dk = (int32_t) op->src[1]->ne[0];
+    const int32_t dv = (int32_t) op->src[2]->ne[0];
+
+    const int32_t ns10 = op->src[1]->nb[1]/op->src[1]->nb[0];
+    const int32_t ns20 = op->src[2]->nb[1]/op->src[2]->nb[0];
+
+    snprintf(base, 256, "kernel_%s_%s_dk%d_dv%d",
+            "flash_attn_ext",
+            ggml_type_name(op->src[1]->type),
+            dk,
+            dv);
+
+    snprintf(name, 256, "kernel_%s_%s_dk%d_dv%d_mask=%d_sinks=%d_bias=%d_scap=%d_ns10=%d_ns20=%d_nsg=%d",
+            "flash_attn_ext",
+            ggml_type_name(op->src[1]->type),
+            dk,
+            dv,
+            has_mask,
+            has_sinks,
+            has_bias,
+            has_scap,
+            ns10,
+            ns20,
+            nsg);
+
+    ggml_metal_pipeline_t res = ggml_metal_library_get_pipeline(lib, name);
+    if (res) {
+        return res;
+    }
+
+    ggml_metal_cv_t cv = ggml_metal_cv_init();
+
+    ggml_metal_cv_set_bool(cv, has_mask,  FC_FLASH_ATTN_EXT + 0);
+    ggml_metal_cv_set_bool(cv, has_sinks, FC_FLASH_ATTN_EXT + 1);
+    ggml_metal_cv_set_bool(cv, has_bias,  FC_FLASH_ATTN_EXT + 2);
+    ggml_metal_cv_set_bool(cv, has_scap,  FC_FLASH_ATTN_EXT + 3);
+
+    ggml_metal_cv_set_int32(cv, ns10, FC_FLASH_ATTN_EXT + 20);
+    ggml_metal_cv_set_int32(cv, ns20, FC_FLASH_ATTN_EXT + 21);
+    ggml_metal_cv_set_int32(cv, nsg,  FC_FLASH_ATTN_EXT + 22);
+
+    res = ggml_metal_library_compile_pipeline(lib, base, name, cv);
+
+    ggml_metal_cv_free(cv);
+
+    return res;
+}
+
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_flash_attn_ext_vec(
+        ggml_metal_library_t lib,
+        const ggml_tensor * op,
+        bool    has_mask,
+        bool    has_sinks,
+        bool    has_bias,
+        bool    has_scap,
+        int32_t nsg,
+        int32_t nwg) {
+    assert(op->op == GGML_OP_FLASH_ATTN_EXT);
+
+    char base[256];
+    char name[256];
+
+    const int32_t dk = (int32_t) op->src[1]->ne[0];
+    const int32_t dv = (int32_t) op->src[2]->ne[0];
+
+    const int32_t ns10 = op->src[1]->nb[1]/op->src[1]->nb[0];
+    const int32_t ns20 = op->src[2]->nb[1]/op->src[2]->nb[0];
+
+    snprintf(base, 256, "kernel_%s_%s_dk%d_dv%d",
+            "flash_attn_ext_vec",
+            ggml_type_name(op->src[1]->type),
+            dk,
+            dv);
+
+    snprintf(name, 256, "kernel_%s_%s_dk%d_dv%d_mask=%d_sink=%d_bias=%d_softcap=%d_ns10=%d_ns20=%d_nsg=%d_nwg=%d",
+            "flash_attn_ext_vec",
+            ggml_type_name(op->src[1]->type),
+            dk,
+            dv,
+            has_mask,
+            has_sinks,
+            has_bias,
+            has_scap,
+            ns10,
+            ns20,
+            nsg, nwg);
+
+    ggml_metal_pipeline_t res = ggml_metal_library_get_pipeline(lib, name);
+    if (res) {
+        return res;
+    }
+
+    ggml_metal_cv_t cv = ggml_metal_cv_init();
+
+    ggml_metal_cv_set_bool(cv, has_mask,  FC_FLASH_ATTN_EXT_VEC + 0);
+    ggml_metal_cv_set_bool(cv, has_sinks, FC_FLASH_ATTN_EXT_VEC + 1);
+    ggml_metal_cv_set_bool(cv, has_bias,  FC_FLASH_ATTN_EXT_VEC + 2);
+    ggml_metal_cv_set_bool(cv, has_scap,  FC_FLASH_ATTN_EXT_VEC + 3);
+
+    ggml_metal_cv_set_int32(cv, ns10, FC_FLASH_ATTN_EXT_VEC + 20);
+    ggml_metal_cv_set_int32(cv, ns20, FC_FLASH_ATTN_EXT_VEC + 21);
+    ggml_metal_cv_set_int32(cv, nsg,  FC_FLASH_ATTN_EXT_VEC + 22);
+    ggml_metal_cv_set_int32(cv, nwg,  FC_FLASH_ATTN_EXT_VEC + 23);
+
+    res = ggml_metal_library_compile_pipeline(lib, base, name, cv);
+
+    ggml_metal_cv_free(cv);
+
+    return res;
+}
+
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_flash_attn_ext_vec_reduce(
+        ggml_metal_library_t lib,
+        const ggml_tensor * op,
+        int32_t dv,
+        int32_t nwg) {
+    assert(op->op == GGML_OP_FLASH_ATTN_EXT);
+
+    char base[256];
+    char name[256];
+
+    snprintf(base, 256, "kernel_flash_attn_ext_vec_reduce");
+    snprintf(name, 256, "kernel_flash_attn_ext_vec_reduce_dv=%d_nwg=%d", dv, nwg);
+
+    ggml_metal_pipeline_t res = ggml_metal_library_get_pipeline(lib, name);
+    if (res) {
+        return res;
+    }
+
+    ggml_metal_cv_t cv = ggml_metal_cv_init();
+
+    ggml_metal_cv_set_int32(cv, dv,  FC_FLASH_ATTN_EXT_VEC_REDUCE + 0);
+    ggml_metal_cv_set_int32(cv, nwg, FC_FLASH_ATTN_EXT_VEC_REDUCE + 1);
+
+    res = ggml_metal_library_compile_pipeline(lib, base, name, cv);
+
+    ggml_metal_cv_free(cv);
+
+    return res;
+
+    GGML_UNUSED(op);
+}
+
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_bin(
+        ggml_metal_library_t lib,
+        ggml_op op,
+        int32_t n_fuse,
+        bool row) {
+    char base[256];
+    char name[256];
+
+    const char * op_str = "undefined";
+    switch (op) {
+        case GGML_OP_ADD:   op_str = "add";   break;
+        case GGML_OP_SUB:   op_str = "sub";   break;
+        case GGML_OP_MUL:   op_str = "mul";   break;
+        case GGML_OP_DIV:   op_str = "div";   break;
+        default: GGML_ABORT("fatal error");
+    };
+
+    if (row) {
+        snprintf(base, 256, "kernel_%s_row_c4_fuse_%d", op_str, n_fuse);
+    } else {
+        snprintf(base, 256, "kernel_%s_fuse_%d", op_str, n_fuse);
+    }
+
+    snprintf(name, 256, "%s", base);
+
+    ggml_metal_pipeline_t res = ggml_metal_library_get_pipeline(lib, name);
+    if (res) {
+        return res;
+    }
+
+    res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+
+    return res;
+}
+
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_rms_norm(ggml_metal_library_t lib, const ggml_tensor * op, int32_t n_fuse) {
+    assert(op->op == GGML_OP_RMS_NORM);
+
+    GGML_ASSERT(op->src[0]->ne[0] % 4 == 0);
+    GGML_ASSERT(ggml_is_contiguous_rows(op->src[0]));
+
+    char base[256];
+    char name[256];
+
+    switch (n_fuse) {
+        case 1: snprintf(base, 256, "kernel_rms_norm_f32");         break;
+        case 2: snprintf(base, 256, "kernel_rms_norm_mul_f32");     break;
+        case 3: snprintf(base, 256, "kernel_rms_norm_mul_add_f32"); break;
+        default: GGML_ABORT("fatal error");
+    }
+
+    snprintf(name, 256, "%s", base);
+
+    ggml_metal_pipeline_t res = ggml_metal_library_get_pipeline(lib, name);
+    if (res) {
+        return res;
+    }
+
+    res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+
+    ggml_metal_pipeline_set_smem(res, 32*sizeof(float));
+
+    return res;
+}
+
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_l2_norm(ggml_metal_library_t lib, const ggml_tensor * op) {
+    assert(op->op == GGML_OP_L2_NORM);
+
+    GGML_ASSERT(op->src[0]->ne[0] % 4 == 0);
+    GGML_ASSERT(ggml_is_contiguous_1(op->src[0]));
+
+    char base[256];
+    char name[256];
+
+    snprintf(base, 256, "kernel_l2_norm_f32");
+    snprintf(name, 256, "%s", base);
+
+    ggml_metal_pipeline_t res = ggml_metal_library_get_pipeline(lib, name);
+    if (res) {
+        return res;
+    }
+
+    res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+
+    ggml_metal_pipeline_set_smem(res, 32*sizeof(float));
+
+    return res;
+}
+
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_group_norm(ggml_metal_library_t lib, const ggml_tensor * op) {
+    assert(op->op == GGML_OP_GROUP_NORM);
+
+    GGML_ASSERT(ggml_is_contiguous(op->src[0]));
+
+    char base[256];
+    char name[256];
+
+    snprintf(base, 256, "kernel_group_norm_f32");
+    snprintf(name, 256, "%s", base);
+
+    ggml_metal_pipeline_t res = ggml_metal_library_get_pipeline(lib, name);
+    if (res) {
+        return res;
+    }
+
+    res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+
+    ggml_metal_pipeline_set_smem(res, 32*sizeof(float));
+
+    return res;
+}
+
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_norm(ggml_metal_library_t lib, const ggml_tensor * op) {
+    assert(op->op == GGML_OP_NORM);
+
+    GGML_ASSERT(op->src[0]->ne[0] % 4 == 0);
+    GGML_ASSERT(ggml_is_contiguous_1(op->src[0]));
+
+    char base[256];
+    char name[256];
+
+    snprintf(base, 256, "kernel_norm_f32");
+    snprintf(name, 256, "%s", base);
+
+    ggml_metal_pipeline_t res = ggml_metal_library_get_pipeline(lib, name);
+    if (res) {
+        return res;
+    }
+
+    res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+
+    ggml_metal_pipeline_set_smem(res, 32*sizeof(float));
+
+    return res;
+}
+
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_rope(ggml_metal_library_t lib, const ggml_tensor * op) {
+    assert(op->op == GGML_OP_ROPE);
+
+    char base[256];
+    char name[256];
+
+    const int mode = ((const int32_t *) op->op_params)[2];
+
+    const bool is_neox   = mode & GGML_ROPE_TYPE_NEOX;
+    const bool is_mrope  = mode & GGML_ROPE_TYPE_MROPE;
+    const bool is_vision = mode == GGML_ROPE_TYPE_VISION;
+
+    if (is_neox) {
+        snprintf(base, 256, "kernel_rope_neox_%s", ggml_type_name(op->src[0]->type));
+    } else if (is_mrope && !is_vision) {
+        GGML_ASSERT(op->src[1]->ne[0]*4 >= op->src[0]->ne[2]); // need at least 4 pos per token
+        snprintf(base, 256, "kernel_rope_multi_%s", ggml_type_name(op->src[0]->type));
+    } else if (is_vision) {
+        GGML_ASSERT(op->src[1]->ne[0]*4 >= op->src[0]->ne[2]); // need at least 4 pos per token
+        snprintf(base, 256, "kernel_rope_vision_%s", ggml_type_name(op->src[0]->type));
+    } else {
+        snprintf(base, 256, "kernel_rope_norm_%s", ggml_type_name(op->src[0]->type));
+    }
+
+    snprintf(name, 256, "%s", base);
+
+    ggml_metal_pipeline_t res = ggml_metal_library_get_pipeline(lib, name);
+    if (res) {
+        return res;
+    }
+
+    res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+
+    return res;
+}
+
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_im2col(ggml_metal_library_t lib, const ggml_tensor * op) {
+    assert(op->op == GGML_OP_IM2COL);
+
+    GGML_ASSERT(ggml_is_contiguous(op->src[1]));
+    GGML_ASSERT(op->src[1]->type == GGML_TYPE_F32);
+    GGML_ASSERT(op->type         == GGML_TYPE_F16 || op->type == GGML_TYPE_F32);
+
+    char base[256];
+    char name[256];
+
+    snprintf(base, 256, "kernel_im2col_ext_%s", ggml_type_name(op->type));
+    snprintf(name, 256, "%s", base);
+
+    ggml_metal_pipeline_t res = ggml_metal_library_get_pipeline(lib, name);
+    if (res) {
+        return res;
+    }
+
+    res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+
+    return res;
+}
+
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_conv_transpose_1d(ggml_metal_library_t lib, const ggml_tensor * op) {
+    assert(op->op == GGML_OP_CONV_TRANSPOSE_1D);
+
+    GGML_ASSERT(ggml_is_contiguous(op->src[0]));
+    GGML_ASSERT(ggml_is_contiguous(op->src[1]));
+    GGML_ASSERT(op->src[0]->type == GGML_TYPE_F16 || op->src[0]->type == GGML_TYPE_F32);
+    GGML_ASSERT(op->src[1]->type == GGML_TYPE_F32);
+    GGML_ASSERT(op->type         == GGML_TYPE_F32);
+
+    char base[256];
+    char name[256];
+
+    snprintf(base, 256, "kernel_conv_transpose_1d_%s_%s", ggml_type_name(op->src[0]->type), ggml_type_name(op->src[1]->type));
+    snprintf(name, 256, "%s", base);
+
+    ggml_metal_pipeline_t res = ggml_metal_library_get_pipeline(lib, name);
+    if (res) {
+        return res;
+    }
+
+    res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+
+    return res;
+}
+
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_upscale(ggml_metal_library_t lib, const ggml_tensor * op) {
+    assert(op->op == GGML_OP_UPSCALE);
+
+    char base[256];
+    char name[256];
+
+    snprintf(base, 256, "kernel_upscale_%s", ggml_type_name(op->src[0]->type));
+    snprintf(name, 256, "%s", base);
+
+    ggml_metal_pipeline_t res = ggml_metal_library_get_pipeline(lib, name);
+    if (res) {
+        return res;
+    }
+
+    res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+
+    return res;
+}
+
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_pad(ggml_metal_library_t lib, const ggml_tensor * op) {
+    assert(op->op == GGML_OP_PAD);
+
+    char base[256];
+    char name[256];
+
+    snprintf(base, 256, "kernel_pad_%s", ggml_type_name(op->src[0]->type));
+    snprintf(name, 256, "%s", base);
+
+    ggml_metal_pipeline_t res = ggml_metal_library_get_pipeline(lib, name);
+    if (res) {
+        return res;
+    }
+
+    res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+
+    return res;
+}
+
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_pad_reflect_1d(ggml_metal_library_t lib, const ggml_tensor * op) {
+    assert(op->op == GGML_OP_PAD_REFLECT_1D);
+
+    char base[256];
+    char name[256];
+
+    snprintf(base, 256, "kernel_pad_reflect_1d_%s", ggml_type_name(op->src[0]->type));
+    snprintf(name, 256, "%s", base);
+
+    ggml_metal_pipeline_t res = ggml_metal_library_get_pipeline(lib, name);
+    if (res) {
+        return res;
+    }
+
+    res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+
+    return res;
+}
+
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_arange(ggml_metal_library_t lib, const ggml_tensor * op) {
+    assert(op->op == GGML_OP_ARANGE);
+
+    char base[256];
+    char name[256];
+
+    snprintf(base, 256, "kernel_arange_%s", ggml_type_name(op->type));
+    snprintf(name, 256, "%s", base);
+
+    ggml_metal_pipeline_t res = ggml_metal_library_get_pipeline(lib, name);
+    if (res) {
+        return res;
+    }
+
+    res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+
+    return res;
+}
+
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_timestep_embedding(ggml_metal_library_t lib, const ggml_tensor * op) {
+    assert(op->op == GGML_OP_TIMESTEP_EMBEDDING);
+
+    char base[256];
+    char name[256];
+
+    snprintf(base, 256, "kernel_timestep_embedding_%s", ggml_type_name(op->src[0]->type));
+    snprintf(name, 256, "%s", base);
+
+    ggml_metal_pipeline_t res = ggml_metal_library_get_pipeline(lib, name);
+    if (res) {
+        return res;
+    }
+
+    res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+
+    return res;
+}
+
diff --git a/ggml/src/ggml-metal/ggml-metal-device.h b/ggml/src/ggml-metal/ggml-metal-device.h
new file mode 100644 (file)
index 0000000..c48337f
--- /dev/null
@@ -0,0 +1,226 @@
+#pragma once
+
+#include "ggml.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct ggml_metal_buffer_id {
+    void * metal; // id<MTLBuffer>
+    size_t offs;
+};
+
+typedef struct ggml_metal_device * ggml_metal_device_t;
+
+//
+// MTLFunctionConstantValues wrapper
+//
+
+typedef struct ggml_metal_cv * ggml_metal_cv_t;
+
+ggml_metal_cv_t ggml_metal_cv_init(void);
+void ggml_metal_cv_free(ggml_metal_cv_t cv);
+
+void ggml_metal_cv_set_int32(ggml_metal_cv_t cv, int32_t value, int32_t idx);
+void ggml_metal_cv_set_bool (ggml_metal_cv_t cv, bool    value, int32_t idx);
+
+//
+// MTLComputePipelineState wrapper
+//
+
+typedef struct ggml_metal_pipeline * ggml_metal_pipeline_t;
+
+ggml_metal_pipeline_t ggml_metal_pipeline_init(void);
+void ggml_metal_pipeline_free(ggml_metal_pipeline_t pipeline);
+
+void ggml_metal_pipeline_set_nsg(ggml_metal_pipeline_t pipeline, int nsg);
+int  ggml_metal_pipeline_get_nsg(ggml_metal_pipeline_t pipeline);
+
+void ggml_metal_pipeline_set_nr0(ggml_metal_pipeline_t pipeline, int nr0);
+int  ggml_metal_pipeline_get_nr0(ggml_metal_pipeline_t pipeline);
+
+void ggml_metal_pipeline_set_nr1(ggml_metal_pipeline_t pipeline, int nr1);
+int  ggml_metal_pipeline_get_nr1(ggml_metal_pipeline_t pipeline);
+
+void   ggml_metal_pipeline_set_smem(ggml_metal_pipeline_t pipeline, size_t smem);
+size_t ggml_metal_pipeline_get_smem(ggml_metal_pipeline_t pipeline);
+
+int ggml_metal_pipeline_max_theads_per_threadgroup(ggml_metal_pipeline_t pipeline);
+
+// a collection of pipelines
+typedef struct ggml_metal_pipelines * ggml_metal_pipelines_t;
+
+ggml_metal_pipelines_t ggml_metal_pipelines_init(void);
+void ggml_metal_pipelines_free(ggml_metal_pipelines_t ppls);
+
+void                  ggml_metal_pipelines_add(ggml_metal_pipelines_t ppls, const char * name, ggml_metal_pipeline_t pipeline);
+ggml_metal_pipeline_t ggml_metal_pipelines_get(ggml_metal_pipelines_t ppls, const char * name);
+
+//
+// MTLCommandBuffer wrapper
+//
+
+typedef void * ggml_metal_cmd_buf_t;
+
+//
+// MTLComputeCommandEncoder wrapper
+//
+
+typedef struct ggml_metal_encoder * ggml_metal_encoder_t;
+
+ggml_metal_encoder_t ggml_metal_encoder_init(ggml_metal_cmd_buf_t cmd_buf_raw, bool concurrent);
+void ggml_metal_encoder_free(ggml_metal_encoder_t encoder);
+
+void ggml_metal_encoder_debug_group_push(ggml_metal_encoder_t encoder, const char * name);
+void ggml_metal_encoder_debug_group_pop (ggml_metal_encoder_t encoder);
+
+void ggml_metal_encoder_set_pipeline(ggml_metal_encoder_t encoder, ggml_metal_pipeline_t pipeline);
+
+void ggml_metal_encoder_set_bytes (ggml_metal_encoder_t encoder, void * data, size_t size, int idx);
+void ggml_metal_encoder_set_buffer(ggml_metal_encoder_t encoder, struct ggml_metal_buffer_id buffer, int idx);
+
+void ggml_metal_encoder_set_threadgroup_memory_size(ggml_metal_encoder_t encoder, size_t size, int idx);
+
+void ggml_metal_encoder_dispatch_threadgroups(ggml_metal_encoder_t encoder, int tg0, int tg1, int tg2, int tptg0, int tptg1, int tptg2);
+
+void ggml_metal_encoder_memory_barrier(ggml_metal_encoder_t encoder);
+
+void ggml_metal_encoder_end_encoding(ggml_metal_encoder_t encoder);
+
+//
+// MTLLibrary wrapper
+//
+
+typedef struct ggml_metal_library * ggml_metal_library_t;
+
+ggml_metal_library_t ggml_metal_library_init(ggml_metal_device_t dev);
+void ggml_metal_library_free(ggml_metal_library_t lib);
+
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline    (ggml_metal_library_t lib, const char * name);
+ggml_metal_pipeline_t ggml_metal_library_compile_pipeline(ggml_metal_library_t lib, const char * base, const char * name, ggml_metal_cv_t cv);
+
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_base              (ggml_metal_library_t lib, enum ggml_op op);
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_cpy               (ggml_metal_library_t lib, enum ggml_type tsrc, enum ggml_type tdst);
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_pool_2d           (ggml_metal_library_t lib, const struct ggml_tensor * op, enum ggml_op_pool op_pool);
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_get_rows          (ggml_metal_library_t lib, enum ggml_type tsrc);
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_set_rows          (ggml_metal_library_t lib, enum ggml_type tdst);
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_repeat            (ggml_metal_library_t lib, enum ggml_type tsrc);
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_unary             (ggml_metal_library_t lib, const struct ggml_tensor * op);
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_glu               (ggml_metal_library_t lib, const struct ggml_tensor * op);
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_sum_rows          (ggml_metal_library_t lib, const struct ggml_tensor * op);
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_soft_max          (ggml_metal_library_t lib, const struct ggml_tensor * op);
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_ssm_conv          (ggml_metal_library_t lib, const struct ggml_tensor * op);
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_ssm_scan          (ggml_metal_library_t lib, const struct ggml_tensor * op);
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_rwkv              (ggml_metal_library_t lib, const struct ggml_tensor * op);
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_mul_mv_ext        (ggml_metal_library_t lib, enum ggml_type tsrc0, enum ggml_type tsrc1, int r1ptg);
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_mul_mm            (ggml_metal_library_t lib, enum ggml_type tsrc0, enum ggml_type tsrc1);
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_mul_mv            (ggml_metal_library_t lib, const struct ggml_tensor * op);
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_mul_mm_id_map0    (ggml_metal_library_t lib, int ne02, int ne20);
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_mul_mm_id         (ggml_metal_library_t lib, enum ggml_type tsrc0, enum ggml_type tsrc1);
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_mul_mv_id         (ggml_metal_library_t lib, const struct ggml_tensor * op);
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_argmax            (ggml_metal_library_t lib, const struct ggml_tensor * op);
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_argsort           (ggml_metal_library_t lib, const struct ggml_tensor * op);
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_bin               (ggml_metal_library_t lib, enum ggml_op op, int32_t n_fuse, bool row);
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_rms_norm          (ggml_metal_library_t lib, const struct ggml_tensor * op, int32_t n_fuse);
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_l2_norm           (ggml_metal_library_t lib, const struct ggml_tensor * op);
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_group_norm        (ggml_metal_library_t lib, const struct ggml_tensor * op);
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_norm              (ggml_metal_library_t lib, const struct ggml_tensor * op);
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_rope              (ggml_metal_library_t lib, const struct ggml_tensor * op);
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_im2col            (ggml_metal_library_t lib, const struct ggml_tensor * op);
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_conv_transpose_1d (ggml_metal_library_t lib, const struct ggml_tensor * op);
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_upscale           (ggml_metal_library_t lib, const struct ggml_tensor * op);
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_pad               (ggml_metal_library_t lib, const struct ggml_tensor * op);
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_pad_reflect_1d    (ggml_metal_library_t lib, const struct ggml_tensor * op);
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_arange            (ggml_metal_library_t lib, const struct ggml_tensor * op);
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_timestep_embedding(ggml_metal_library_t lib, const struct ggml_tensor * op);
+
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_flash_attn_ext(
+        ggml_metal_library_t lib,
+        const struct ggml_tensor * op,
+        bool    has_mask,
+        bool    has_sinks,
+        bool    has_bias,
+        bool    has_scap,
+        int32_t nsg);
+
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_flash_attn_ext_vec(
+        ggml_metal_library_t lib,
+        const struct ggml_tensor * op,
+        bool    has_mask,
+        bool    has_sinks,
+        bool    has_bias,
+        bool    has_scap,
+        int32_t nsg,
+        int32_t nwg);
+
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline_flash_attn_ext_vec_reduce(
+        ggml_metal_library_t lib,
+        const struct ggml_tensor * op,
+        int32_t dv,
+        int32_t nwg);
+
+//
+// device
+//
+
+struct ggml_metal_device_props {
+    char name[128];
+
+    size_t max_buffer_size;
+    size_t max_working_set_size;
+    size_t max_theadgroup_memory_size;
+
+    bool has_simdgroup_reduction;
+    bool has_simdgroup_mm;
+    bool has_unified_memory;
+    bool has_bfloat;
+    bool use_residency_sets;
+    bool use_shared_buffers;
+
+    bool supports_gpu_family_apple7;
+};
+
+ggml_metal_device_t ggml_metal_device_init(void);
+void ggml_metal_device_free(ggml_metal_device_t dev);
+
+// return a singleton that is automatically destroyed when the program exits
+ggml_metal_device_t ggml_metal_device_get(void);
+
+void * ggml_metal_device_get_obj  (ggml_metal_device_t dev); // id<MTLDevice>
+void * ggml_metal_device_get_queue(ggml_metal_device_t dev); // id<MTLCommandQueue>
+
+ggml_metal_library_t ggml_metal_device_get_library(ggml_metal_device_t dev);
+
+void ggml_metal_device_get_memory(ggml_metal_device_t dev, size_t * free, size_t * total);
+bool ggml_metal_device_supports_op(ggml_metal_device_t dev, const struct ggml_tensor * op);
+
+const struct ggml_metal_device_props * ggml_metal_device_get_props(ggml_metal_device_t dev);
+
+//
+// device buffers
+//
+
+typedef struct ggml_metal_buffer * ggml_metal_buffer_t;
+
+ggml_metal_buffer_t ggml_metal_buffer_init(ggml_metal_device_t dev, size_t size, bool shared);
+ggml_metal_buffer_t ggml_metal_buffer_map (ggml_metal_device_t dev, void * ptr, size_t size, size_t max_tensor_size);
+
+void   ggml_metal_buffer_free     (ggml_metal_buffer_t buf);
+void * ggml_metal_buffer_get_base (ggml_metal_buffer_t buf);
+bool   ggml_metal_buffer_is_shared(ggml_metal_buffer_t buf);
+
+void   ggml_metal_buffer_memset_tensor(ggml_metal_buffer_t buf, struct ggml_tensor * tensor, uint8_t value, size_t offset, size_t size);
+void   ggml_metal_buffer_set_tensor   (ggml_metal_buffer_t buf, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
+void   ggml_metal_buffer_get_tensor   (ggml_metal_buffer_t buf, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
+void   ggml_metal_buffer_clear        (ggml_metal_buffer_t buf, uint8_t value);
+
+// finds the Metal buffer that contains the tensor data on the GPU device
+// the assumption is that there is 1-to-1 mapping between the host and device memory buffers, so we can find the
+// Metal buffer based on the host memory pointer
+//
+struct ggml_metal_buffer_id ggml_metal_buffer_get_id(ggml_metal_buffer_t buf, const struct ggml_tensor * t);
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/ggml/src/ggml-metal/ggml-metal-device.m b/ggml/src/ggml-metal/ggml-metal-device.m
new file mode 100644 (file)
index 0000000..9983640
--- /dev/null
@@ -0,0 +1,1289 @@
+#import "ggml-metal-device.h"
+
+#import "ggml-impl.h"
+#import "ggml-threading.h"
+
+#include <Foundation/Foundation.h>
+
+#include <Metal/Metal.h>
+
+#ifndef TARGET_OS_VISION
+#define TARGET_OS_VISION 0
+#endif
+
+// create residency sets only on macOS >= 15.0
+#if !TARGET_CPU_X86_64 && TARGET_OS_OSX && __MAC_OS_X_VERSION_MAX_ALLOWED >= 150000 || \
+    TARGET_OS_IOS && __IPHONE_OS_VERSION_MAX_ALLOWED >= 180000 || \
+    TARGET_OS_TV && __TV_OS_VERSION_MAX_ALLOWED >= 180000 || \
+    TARGET_OS_VISION && __VISION_OS_VERSION_MAX_ALLOWED >= 200000
+#define GGML_METAL_HAS_RESIDENCY_SETS 1
+#endif
+
+// overload of MTLGPUFamilyMetal3 (not available in some environments)
+static const NSInteger MTLGPUFamilyMetal3_GGML = 5001;
+
+#if !GGML_METAL_EMBED_LIBRARY
+// Here to assist with NSBundle Path Hack
+@interface GGMLMetalClass : NSObject
+@end
+@implementation GGMLMetalClass
+@end
+#endif
+
+//
+// MTLFunctionConstantValues wrapper
+//
+
+struct ggml_metal_cv {
+    MTLFunctionConstantValues * obj;
+};
+
+ggml_metal_cv_t ggml_metal_cv_init(void) {
+    ggml_metal_cv_t res = calloc(1, sizeof(struct ggml_metal_cv));
+
+    res->obj = [[MTLFunctionConstantValues alloc] init];
+
+    return res;
+}
+
+void ggml_metal_cv_free(ggml_metal_cv_t cv) {
+    [cv->obj release];
+    free(cv);
+}
+
+void ggml_metal_cv_set_int32(ggml_metal_cv_t cv, int32_t value, int32_t idx) {
+    [cv->obj setConstantValue:&value type:MTLDataTypeInt atIndex:idx];
+}
+
+void ggml_metal_cv_set_bool(ggml_metal_cv_t cv, bool value, int32_t idx) {
+    [cv->obj setConstantValue:&value type:MTLDataTypeBool atIndex:idx];
+}
+
+//
+// MTLComputePipelineState wrapper
+//
+
+struct ggml_metal_pipeline {
+    id<MTLComputePipelineState> obj;
+
+    // suggested dispatch sizes
+    int nsg;
+
+    int nr0;
+    int nr1;
+
+    size_t smem;
+};
+
+ggml_metal_pipeline_t ggml_metal_pipeline_init(void) {
+    ggml_metal_pipeline_t res = calloc(1, sizeof(struct ggml_metal_pipeline));
+
+    *res = (struct ggml_metal_pipeline) {
+        /*.obj  =*/ nil,
+        /*.nsg  =*/ 0,
+        /*.nr0  =*/ 0,
+        /*.nr1  =*/ 0,
+        /*.smem =*/ 0,
+    };
+
+    return res;
+}
+
+void ggml_metal_pipeline_free(ggml_metal_pipeline_t pipeline) {
+    [pipeline->obj release];
+
+    free(pipeline);
+}
+
+void ggml_metal_pipeline_set_nsg(ggml_metal_pipeline_t pipeline, int nsg) {
+    pipeline->nsg = nsg;
+}
+
+int ggml_metal_pipeline_get_nsg(ggml_metal_pipeline_t pipeline) {
+    return pipeline->nsg;
+}
+
+void ggml_metal_pipeline_set_nr0(ggml_metal_pipeline_t pipeline, int nr0) {
+    pipeline->nr0 = nr0;
+}
+
+int ggml_metal_pipeline_get_nr0(ggml_metal_pipeline_t pipeline) {
+    return pipeline->nr0;
+}
+
+void ggml_metal_pipeline_set_nr1(ggml_metal_pipeline_t pipeline, int nr1) {
+    pipeline->nr1 = nr1;
+}
+
+int ggml_metal_pipeline_get_nr1(ggml_metal_pipeline_t pipeline) {
+    return pipeline->nr1;
+}
+
+void   ggml_metal_pipeline_set_smem(ggml_metal_pipeline_t pipeline, size_t smem) {
+    pipeline->smem = smem;
+}
+
+size_t ggml_metal_pipeline_get_smem(ggml_metal_pipeline_t pipeline) {
+    return pipeline->smem;
+}
+
+int ggml_metal_pipeline_max_theads_per_threadgroup(ggml_metal_pipeline_t pipeline) {
+    return pipeline->obj.maxTotalThreadsPerThreadgroup;
+}
+
+struct ggml_metal_library {
+    id<MTLLibrary> obj;
+    id<MTLDevice> device;
+
+    ggml_metal_pipelines_t pipelines; // cache of compiled pipelines
+};
+
+ggml_metal_library_t ggml_metal_library_init(ggml_metal_device_t dev) {
+    id<MTLLibrary> library = nil;
+    id<MTLDevice> device = ggml_metal_device_get_obj(dev);
+
+    // load library
+    //
+    // - first check if the library is embedded
+    // - then check if the library is in the bundle
+    // - if not found, load the source and compile it
+    // - if that fails, return NULL
+    //
+    // TODO: move to a function
+    {
+        const int64_t t_start = ggml_time_us();
+
+        NSError * error = nil;
+        NSString * src = nil;
+
+#if GGML_METAL_EMBED_LIBRARY
+        GGML_LOG_INFO("%s: using embedded metal library\n", __func__);
+
+        extern const char ggml_metallib_start[];
+        extern const char ggml_metallib_end[];
+
+        src = [[NSString alloc] initWithBytes:ggml_metallib_start length:(ggml_metallib_end-ggml_metallib_start) encoding:NSUTF8StringEncoding];
+#else
+
+#ifdef SWIFT_PACKAGE
+        NSBundle * bundle = SWIFTPM_MODULE_BUNDLE;
+#else
+        NSBundle * bundle = [NSBundle bundleForClass:[GGMLMetalClass class]];
+#endif
+
+        NSString * path_lib = [bundle pathForResource:@"default" ofType:@"metallib"];
+        if (path_lib == nil) {
+            // Try to find the resource in the directory where the current binary located.
+            NSString * bin_cur = [[NSProcessInfo processInfo] arguments][0];
+            NSString * bin_dir = [bin_cur stringByDeletingLastPathComponent];
+
+            NSString * path_lib_default = [NSString pathWithComponents:@[bin_dir, @"default.metallib"]];
+            if ([[NSFileManager defaultManager] isReadableFileAtPath:path_lib_default]) {
+                GGML_LOG_INFO("%s: found '%s'\n", __func__, [path_lib_default UTF8String]);
+
+                NSDictionary * atts = [[NSFileManager defaultManager] attributesOfItemAtPath:path_lib_default error:&error];
+                if (atts && atts[NSFileType] == NSFileTypeSymbolicLink) {
+                    // Optionally, if this is a symlink, try to resolve it.
+                    path_lib_default = [[NSFileManager defaultManager] destinationOfSymbolicLinkAtPath:path_lib_default error:&error];
+                    if (path_lib_default && [path_lib_default length] > 0 && ![[path_lib_default substringToIndex:1] isEqualToString:@"/"]) {
+                        // It is a relative path, adding the binary directory as directory prefix.
+                        path_lib_default = [NSString pathWithComponents:@[bin_dir, path_lib_default]];
+                    }
+                    if (!path_lib_default || ![[NSFileManager defaultManager] isReadableFileAtPath:path_lib_default]) {
+                        // Link to the resource could not be resolved.
+                        path_lib_default = nil;
+                    } else {
+                        GGML_LOG_INFO("%s: symlink resolved '%s'\n", __func__, [path_lib_default UTF8String]);
+                    }
+                }
+            } else {
+                // The resource couldn't be found in the binary's directory.
+                path_lib_default = nil;
+            }
+
+            path_lib = path_lib_default;
+        }
+
+        if (path_lib != nil) {
+            // pre-compiled library found
+            NSURL * libURL = [NSURL fileURLWithPath:path_lib];
+            GGML_LOG_INFO("%s: loading '%s'\n", __func__, [path_lib UTF8String]);
+
+            library = [device newLibraryWithURL:libURL error:&error];
+            if (error) {
+                GGML_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]);
+                return nil;
+            }
+        } else {
+            GGML_LOG_INFO("%s: default.metallib not found, loading from source\n", __func__);
+
+            NSString * path_source;
+            NSString * path_resource = [[NSProcessInfo processInfo].environment objectForKey:@"GGML_METAL_PATH_RESOURCES"];
+
+            GGML_LOG_INFO("%s: GGML_METAL_PATH_RESOURCES = %s\n", __func__, path_resource ? [path_resource UTF8String] : "nil");
+
+            if (path_resource) {
+                path_source = [path_resource stringByAppendingPathComponent:@"ggml-metal.metal"];
+            } else {
+                path_source = [bundle pathForResource:@"ggml-metal" ofType:@"metal"];
+            }
+
+            if (path_source == nil) {
+                GGML_LOG_WARN("%s: error: could not use bundle path to find ggml-metal.metal, falling back to trying cwd\n", __func__);
+                path_source = @"ggml-metal.metal";
+            }
+
+            GGML_LOG_INFO("%s: loading '%s'\n", __func__, [path_source UTF8String]);
+
+            src = [NSString stringWithContentsOfFile:path_source encoding:NSUTF8StringEncoding error:&error];
+            if (error) {
+                GGML_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]);
+                return nil;
+            }
+        }
+#endif
+
+        if (!library) {
+            @autoreleasepool {
+                // dictionary of preprocessor macros
+                NSMutableDictionary * prep = [NSMutableDictionary dictionary];
+
+                if (ggml_metal_device_get_props(dev)->has_bfloat) {
+                    [prep setObject:@"1" forKey:@"GGML_METAL_HAS_BF16"];
+                }
+
+#if GGML_METAL_EMBED_LIBRARY
+                [prep setObject:@"1" forKey:@"GGML_METAL_EMBED_LIBRARY"];
+#endif
+
+                MTLCompileOptions * options = [MTLCompileOptions new];
+                options.preprocessorMacros = prep;
+
+                //[options setFastMathEnabled:false];
+
+                library = [device newLibraryWithSource:src options:options error:&error];
+                if (error) {
+                    GGML_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]);
+                    return nil;
+                }
+
+#if !__has_feature(objc_arc)
+                [options release];
+#endif
+            }
+        }
+
+#if GGML_METAL_EMBED_LIBRARY
+        [src release];
+#endif // GGML_METAL_EMBED_LIBRARY
+
+        GGML_LOG_INFO("%s: loaded in %.3f sec\n", __func__, (ggml_time_us() - t_start) / 1e6);
+    }
+
+    ggml_metal_library_t res = calloc(1, sizeof(struct ggml_metal_library));
+
+    res->obj = library;
+    res->device = device;
+    res->pipelines = ggml_metal_pipelines_init();
+
+    return res;
+}
+
+void ggml_metal_library_free(ggml_metal_library_t lib) {
+    if (!lib) {
+        return;
+    }
+
+    if (lib->obj) {
+        [lib->obj release];
+    }
+
+    ggml_metal_pipelines_free(lib->pipelines);
+
+    free(lib);
+}
+
+ggml_metal_pipeline_t ggml_metal_library_get_pipeline(ggml_metal_library_t lib, const char * name) {
+    return ggml_metal_pipelines_get(lib->pipelines, name);
+}
+
+ggml_metal_pipeline_t ggml_metal_library_compile_pipeline(ggml_metal_library_t lib, const char * base, const char * name, ggml_metal_cv_t cv) {
+    // note: the pipelines are cached in the library per device, so they are shared across all metal contexts
+    ggml_critical_section_start();
+
+    ggml_metal_pipeline_t res = ggml_metal_library_get_pipeline(lib, name);
+    if (res) {
+        ggml_critical_section_end();
+
+        return res;
+    }
+
+    res = ggml_metal_pipeline_init();
+
+    @autoreleasepool {
+        NSError * error = nil;
+
+        NSString * base_func = [NSString stringWithUTF8String:base];
+
+        GGML_LOG_DEBUG("%s: compiling pipeline: base = '%s', name = '%s'\n", __func__, base, name);
+
+        id<MTLFunction> mtl_function = [lib->obj newFunctionWithName:base_func constantValues:(cv ? cv->obj : nil) error:&error];
+        if (!mtl_function) {
+            ggml_critical_section_end();
+
+            GGML_LOG_ERROR("%s: error: failed to compile pipeline: base = '%s', name = '%s'\n", __func__, base, name);
+            GGML_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]);
+
+            return nil;
+        }
+
+        res->obj = [lib->device newComputePipelineStateWithFunction:mtl_function error:&error];
+
+        ggml_metal_pipelines_add(lib->pipelines, name, res);
+
+        [mtl_function release];
+
+        GGML_LOG_DEBUG("%s: loaded %-40s %16p | th_max = %4d | th_width = %4d\n", __func__, name, (void *) res->obj,
+                (int) res->obj.maxTotalThreadsPerThreadgroup,
+                (int) res->obj.threadExecutionWidth);
+    }
+
+    ggml_critical_section_end();
+
+    return res;
+}
+
+//
+// MTLComputeCommandEncoder wrapper
+//
+
+struct ggml_metal_encoder {
+    id<MTLComputeCommandEncoder> obj;
+};
+
+ggml_metal_encoder_t ggml_metal_encoder_init(ggml_metal_cmd_buf_t cmd_buf_raw, bool concurrent) {
+    ggml_metal_encoder_t res = calloc(1, sizeof(struct ggml_metal_encoder));
+
+    id<MTLCommandBuffer> cmd_buf = (id<MTLCommandBuffer>) cmd_buf_raw;
+
+    if (concurrent) {
+        res->obj = [cmd_buf computeCommandEncoderWithDispatchType: MTLDispatchTypeConcurrent];
+    } else {
+        res->obj = [cmd_buf computeCommandEncoder];
+    }
+
+    [res->obj retain];
+
+    return res;
+}
+
+void ggml_metal_encoder_free(ggml_metal_encoder_t encoder) {
+    [encoder->obj release];
+    free(encoder);
+}
+
+void ggml_metal_encoder_debug_group_push(ggml_metal_encoder_t encoder, const char * name) {
+    [encoder->obj pushDebugGroup:[NSString stringWithCString:name encoding:NSUTF8StringEncoding]];
+}
+
+void ggml_metal_encoder_debug_group_pop (ggml_metal_encoder_t encoder) {
+    [encoder->obj popDebugGroup];
+}
+
+void ggml_metal_encoder_set_pipeline(ggml_metal_encoder_t encoder, ggml_metal_pipeline_t pipeline) {
+    [encoder->obj setComputePipelineState:pipeline->obj];
+}
+
+void ggml_metal_encoder_set_bytes(ggml_metal_encoder_t encoder, void * data, size_t size, int idx) {
+    [encoder->obj setBytes:data length:size atIndex:idx];
+}
+
+void ggml_metal_encoder_set_buffer(ggml_metal_encoder_t encoder, struct ggml_metal_buffer_id buffer, int idx) {
+    [encoder->obj setBuffer:buffer.metal offset:buffer.offs atIndex:idx];
+}
+
+void ggml_metal_encoder_set_threadgroup_memory_size(ggml_metal_encoder_t encoder, size_t size, int idx) {
+    [encoder->obj setThreadgroupMemoryLength:size atIndex:idx];
+}
+
+void ggml_metal_encoder_dispatch_threadgroups(ggml_metal_encoder_t encoder, int tg0, int tg1, int tg2, int tptg0, int tptg1, int tptg2) {
+    [encoder->obj dispatchThreadgroups:MTLSizeMake(tg0, tg1, tg2) threadsPerThreadgroup:MTLSizeMake(tptg0, tptg1, tptg2)];
+}
+
+void ggml_metal_encoder_memory_barrier(ggml_metal_encoder_t encoder) {
+    [encoder->obj memoryBarrierWithScope:MTLBarrierScopeBuffers];
+}
+
+void ggml_metal_encoder_end_encoding(ggml_metal_encoder_t encoder) {
+    [encoder->obj endEncoding];
+}
+
+struct ggml_metal_device {
+    id<MTLDevice> mtl_device;
+
+    // a single global queue shared by all Metal backends
+    // technically not needed for devices with unified memory, but enables discrete GPUs support
+    // ref: https://github.com/ggml-org/llama.cpp/pull/15906
+    id<MTLCommandQueue> mtl_queue;
+
+    ggml_metal_library_t library;
+
+    struct ggml_metal_device_props props;
+};
+
+ggml_metal_device_t ggml_metal_device_init(void) {
+    ggml_metal_device_t dev = calloc(1, sizeof(struct ggml_metal_device));
+
+    assert(dev != NULL);
+
+    if (dev->mtl_device == nil) {
+        dev->mtl_device = MTLCreateSystemDefaultDevice();
+
+        if (dev->mtl_device) {
+            dev->mtl_queue = [dev->mtl_device newCommandQueue];
+            if (dev->mtl_queue == nil) {
+                GGML_LOG_ERROR("%s: error: failed to create command queue\n", __func__);
+            }
+
+            dev->props.has_simdgroup_reduction  = [dev->mtl_device supportsFamily:MTLGPUFamilyApple7];
+            dev->props.has_simdgroup_reduction |= [dev->mtl_device supportsFamily:MTLGPUFamilyMetal3_GGML];
+
+            dev->props.has_simdgroup_mm = [dev->mtl_device supportsFamily:MTLGPUFamilyApple7];
+            dev->props.has_unified_memory = dev->mtl_device.hasUnifiedMemory;
+
+            dev->props.has_bfloat  = [dev->mtl_device supportsFamily:MTLGPUFamilyMetal3_GGML];
+            dev->props.has_bfloat |= [dev->mtl_device supportsFamily:MTLGPUFamilyApple6];
+
+            dev->props.use_residency_sets = true;
+#if defined(GGML_METAL_HAS_RESIDENCY_SETS)
+            dev->props.use_residency_sets = getenv("GGML_METAL_NO_RESIDENCY") == nil;
+#endif
+
+            dev->props.use_shared_buffers = dev->props.has_unified_memory;
+
+            if (getenv("GGML_METAL_SHARED_BUFFERS_DISABLE") != NULL) {
+                dev->props.use_shared_buffers = false;
+            }
+
+            dev->props.supports_gpu_family_apple7 = [dev->mtl_device supportsFamily:MTLGPUFamilyApple7];
+
+            dev->props.max_buffer_size            = dev->mtl_device.maxBufferLength;
+            dev->props.max_working_set_size       = dev->mtl_device.recommendedMaxWorkingSetSize;
+            dev->props.max_theadgroup_memory_size = dev->mtl_device.maxThreadgroupMemoryLength;
+
+            strncpy(dev->props.name, [[dev->mtl_device name] UTF8String], sizeof(dev->props.name) - 1);
+
+            dev->library = ggml_metal_library_init(dev);
+            if (!dev->library) {
+                GGML_LOG_ERROR("%s: error: failed to create library\n", __func__);
+            }
+
+            // --------------------------------------------------
+
+            // print MTL GPU family:
+            GGML_LOG_INFO("%s: GPU name:   %s\n", __func__, dev->props.name);
+
+            // determine max supported GPU family
+            // https://developer.apple.com/metal/Metal-Shading-Language-Specification.pdf
+            // https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf
+            {
+                for (int i = MTLGPUFamilyApple1 + 20; i >= MTLGPUFamilyApple1; --i) {
+                    if ([dev->mtl_device supportsFamily:i]) {
+                        GGML_LOG_INFO("%s: GPU family: MTLGPUFamilyApple%d  (%d)\n", __func__, i - (int) MTLGPUFamilyApple1 + 1, i);
+                        break;
+                    }
+                }
+
+                for (int i = MTLGPUFamilyCommon1 + 5; i >= MTLGPUFamilyCommon1; --i) {
+                    if ([dev->mtl_device supportsFamily:i]) {
+                        GGML_LOG_INFO("%s: GPU family: MTLGPUFamilyCommon%d (%d)\n", __func__, i - (int) MTLGPUFamilyCommon1 + 1, i);
+                        break;
+                    }
+                }
+
+                for (int i = MTLGPUFamilyMetal3_GGML + 5; i >= MTLGPUFamilyMetal3_GGML; --i) {
+                    if ([dev->mtl_device supportsFamily:i]) {
+                        GGML_LOG_INFO("%s: GPU family: MTLGPUFamilyMetal%d  (%d)\n", __func__, i - (int) MTLGPUFamilyMetal3_GGML + 3, i);
+                        break;
+                    }
+                }
+            }
+
+            GGML_LOG_INFO("%s: simdgroup reduction   = %s\n", __func__, dev->props.has_simdgroup_reduction ? "true" : "false");
+            GGML_LOG_INFO("%s: simdgroup matrix mul. = %s\n", __func__, dev->props.has_simdgroup_mm        ? "true" : "false");
+            GGML_LOG_INFO("%s: has unified memory    = %s\n", __func__, dev->props.has_unified_memory      ? "true" : "false");
+            GGML_LOG_INFO("%s: has bfloat            = %s\n", __func__, dev->props.has_bfloat              ? "true" : "false");
+            GGML_LOG_INFO("%s: use residency sets    = %s\n", __func__, dev->props.use_residency_sets      ? "true" : "false");
+            GGML_LOG_INFO("%s: use shared buffers    = %s\n", __func__, dev->props.use_shared_buffers      ? "true" : "false");
+
+#if TARGET_OS_OSX || (TARGET_OS_IOS && __clang_major__ >= 15)
+            if (@available(macOS 10.12, iOS 16.0, *)) {
+                GGML_LOG_INFO("%s: recommendedMaxWorkingSetSize  = %8.2f MB\n", __func__, dev->props.max_working_set_size / 1e6);
+            }
+#endif
+        }
+    }
+
+    return dev;
+}
+
+void ggml_metal_device_free(ggml_metal_device_t dev) {
+    assert(dev != NULL);
+
+    ggml_metal_library_free(dev->library);
+    dev->library = NULL;
+
+    if (dev->mtl_queue) {
+        [dev->mtl_queue release];
+        dev->mtl_queue = nil;
+    }
+
+    if (dev->mtl_device) {
+        [dev->mtl_device release];
+        dev->mtl_device = nil;
+    }
+
+    free(dev);
+}
+
+void * ggml_metal_device_get_obj(ggml_metal_device_t dev) {
+    return dev->mtl_device;
+}
+
+void * ggml_metal_device_get_queue(ggml_metal_device_t dev) {
+    return dev->mtl_queue;
+}
+
+ggml_metal_library_t ggml_metal_device_get_library(ggml_metal_device_t dev) {
+    return dev->library;
+}
+
+void ggml_metal_device_get_memory(ggml_metal_device_t dev, size_t * free, size_t * total) {
+    if (@available(macOS 10.12, iOS 16.0, *)) {
+        *total = dev->mtl_device.recommendedMaxWorkingSetSize;
+        *free  = *total - dev->mtl_device.currentAllocatedSize;
+    } else {
+        *free = 0;
+        *total = 0;
+    }
+}
+
+bool ggml_metal_device_supports_op(ggml_metal_device_t dev, const struct ggml_tensor * op) {
+    const bool has_simdgroup_mm        = dev->props.has_simdgroup_mm;
+    const bool has_simdgroup_reduction = dev->props.has_simdgroup_reduction;
+    const bool has_bfloat              = dev->props.has_bfloat;
+
+    if (!has_bfloat) {
+        if (op->type == GGML_TYPE_BF16) {
+            return false;
+        }
+
+        for (size_t i = 0, n = 3; i < n; ++i) {
+            if (op->src[i] != NULL && op->src[i]->type == GGML_TYPE_BF16) {
+                return false;
+            }
+        }
+    }
+
+    switch (op->op) {
+        case GGML_OP_UNARY:
+            switch (ggml_get_unary_op(op)) {
+                case GGML_UNARY_OP_TANH:
+                case GGML_UNARY_OP_RELU:
+                case GGML_UNARY_OP_SIGMOID:
+                case GGML_UNARY_OP_GELU:
+                case GGML_UNARY_OP_GELU_ERF:
+                case GGML_UNARY_OP_GELU_QUICK:
+                case GGML_UNARY_OP_SILU:
+                case GGML_UNARY_OP_ELU:
+                case GGML_UNARY_OP_NEG:
+                case GGML_UNARY_OP_ABS:
+                case GGML_UNARY_OP_SGN:
+                case GGML_UNARY_OP_STEP:
+                case GGML_UNARY_OP_HARDSWISH:
+                case GGML_UNARY_OP_HARDSIGMOID:
+                case GGML_UNARY_OP_EXP:
+                    return ggml_is_contiguous(op->src[0]) && op->src[0]->type == GGML_TYPE_F32;
+                default:
+                    return false;
+            }
+        case GGML_OP_GLU:
+            switch (ggml_get_glu_op(op)) {
+                case GGML_GLU_OP_REGLU:
+                case GGML_GLU_OP_GEGLU:
+                case GGML_GLU_OP_SWIGLU:
+                case GGML_GLU_OP_SWIGLU_OAI:
+                case GGML_GLU_OP_GEGLU_ERF:
+                case GGML_GLU_OP_GEGLU_QUICK:
+                    return ggml_is_contiguous_1(op->src[0]) && op->src[0]->type == GGML_TYPE_F32;
+               default:
+                    return false;
+            }
+        case GGML_OP_NONE:
+        case GGML_OP_RESHAPE:
+        case GGML_OP_VIEW:
+        case GGML_OP_TRANSPOSE:
+        case GGML_OP_PERMUTE:
+        case GGML_OP_CONCAT:
+            return true;
+        case GGML_OP_ADD:
+        case GGML_OP_SUB:
+        case GGML_OP_MUL:
+        case GGML_OP_DIV:
+        case GGML_OP_ADD_ID:
+            return op->src[0]->type == GGML_TYPE_F32;
+        case GGML_OP_ACC:
+        case GGML_OP_REPEAT:
+        case GGML_OP_SCALE:
+        case GGML_OP_CONV_TRANSPOSE_1D:
+            return true;
+        case GGML_OP_CLAMP:
+            return op->src[0]->type == GGML_TYPE_F32;
+        case GGML_OP_SQR:
+        case GGML_OP_SQRT:
+        case GGML_OP_SIN:
+        case GGML_OP_COS:
+        case GGML_OP_LOG:
+            return ggml_is_contiguous(op->src[0]) && op->src[0]->type == GGML_TYPE_F32;
+        case GGML_OP_SUM_ROWS:
+        case GGML_OP_MEAN:
+        case GGML_OP_SOFT_MAX:
+        case GGML_OP_GROUP_NORM:
+            return has_simdgroup_reduction && ggml_is_contiguous_rows(op->src[0]);
+        case GGML_OP_RMS_NORM:
+        case GGML_OP_L2_NORM:
+            return has_simdgroup_reduction && (op->ne[0] % 4 == 0 && ggml_is_contiguous_1(op->src[0]));
+        case GGML_OP_ARGMAX:
+            return has_simdgroup_reduction;
+        case GGML_OP_NORM:
+            return has_simdgroup_reduction && (op->ne[0] % 4 == 0 && ggml_is_contiguous_1(op->src[0]));
+        case GGML_OP_ROPE:
+            return true;
+        case GGML_OP_IM2COL:
+            return ggml_is_contiguous(op->src[1]) && op->src[1]->type == GGML_TYPE_F32 && (op->type == GGML_TYPE_F16 || op->type == GGML_TYPE_F32);
+        case GGML_OP_POOL_1D:
+            return false;
+        case GGML_OP_UPSCALE:
+            return op->src[0]->type == GGML_TYPE_F32 && op->op_params[0] == GGML_SCALE_MODE_NEAREST;
+        case GGML_OP_POOL_2D:
+            return op->src[0]->type == GGML_TYPE_F32;
+        case GGML_OP_PAD:
+            return (ggml_get_op_params_i32(op, 0) == 0) && (ggml_get_op_params_i32(op, 2) == 0) &&
+                   (ggml_get_op_params_i32(op, 4) == 0) && (ggml_get_op_params_i32(op, 6) == 0);
+        case GGML_OP_PAD_REFLECT_1D:
+        case GGML_OP_TIMESTEP_EMBEDDING:
+        case GGML_OP_ARGSORT:
+        case GGML_OP_LEAKY_RELU:
+            return op->src[0]->type == GGML_TYPE_F32;
+        case GGML_OP_ARANGE:
+            return true;
+        case GGML_OP_FLASH_ATTN_EXT:
+            // for new head sizes, add checks here
+            if (op->src[0]->ne[0] != 40 &&
+                op->src[0]->ne[0] != 64 &&
+                op->src[0]->ne[0] != 80 &&
+                op->src[0]->ne[0] != 96 &&
+                op->src[0]->ne[0] != 112 &&
+                op->src[0]->ne[0] != 128 &&
+                op->src[0]->ne[0] != 192 &&
+                op->src[0]->ne[0] != 256) {
+                return false;
+            }
+            if (op->src[0]->ne[0] == 576) {
+                // DeepSeek sizes
+                // TODO: disabled for now, until optmized
+                return false;
+            }
+            if (op->src[1]->type != op->src[2]->type) {
+                return false;
+            }
+            return has_simdgroup_mm; // TODO: over-restricted for vec-kernels
+        case GGML_OP_SSM_CONV:
+        case GGML_OP_SSM_SCAN:
+            return has_simdgroup_reduction;
+        case GGML_OP_RWKV_WKV6:
+        case GGML_OP_RWKV_WKV7:
+            return true;
+        case GGML_OP_MUL_MAT:
+        case GGML_OP_MUL_MAT_ID:
+            return has_simdgroup_reduction &&
+                (op->src[0]->type != GGML_TYPE_F32 || op->src[1]->type == GGML_TYPE_F32);
+        case GGML_OP_CPY:
+        case GGML_OP_DUP:
+        case GGML_OP_CONT:
+            {
+                switch (op->src[0]->type) {
+                    case GGML_TYPE_F32:
+                        switch (op->type) {
+                           case GGML_TYPE_F32:
+                           case GGML_TYPE_F16:
+                           case GGML_TYPE_BF16:
+                           case GGML_TYPE_Q8_0:
+                           case GGML_TYPE_Q4_0:
+                           case GGML_TYPE_Q4_1:
+                           case GGML_TYPE_Q5_0:
+                           case GGML_TYPE_Q5_1:
+                           case GGML_TYPE_IQ4_NL:
+                           case GGML_TYPE_I32:
+                                return true;
+                           default:
+                                return false;
+                        }
+                    case GGML_TYPE_F16:
+                        switch (op->type) {
+                            case GGML_TYPE_F32:
+                            case GGML_TYPE_F16:
+                                return true;
+                            default:
+                                return false;
+                        }
+                    case GGML_TYPE_BF16:
+                        switch (op->type) {
+                            case GGML_TYPE_F32:
+                            case GGML_TYPE_BF16:
+                                return true;
+                            default:
+                                return false;
+                        }
+                    case GGML_TYPE_Q4_0:
+                    case GGML_TYPE_Q4_1:
+                    case GGML_TYPE_Q5_0:
+                    case GGML_TYPE_Q5_1:
+                    case GGML_TYPE_Q8_0:
+                        switch (op->type) {
+                            case GGML_TYPE_F32:
+                            case GGML_TYPE_F16:
+                                return true;
+                            default:
+                                return false;
+                        }
+                    case GGML_TYPE_I32:
+                        return op->type == GGML_TYPE_F32;
+                    default:
+                        return false;
+                };
+            }
+        case GGML_OP_GET_ROWS:
+            {
+                return op->ne[3] == 1;
+            }
+        case GGML_OP_SET_ROWS:
+            {
+                if (op->src[0]->type != GGML_TYPE_F32) {
+                    return false;
+                }
+
+                switch (op->type) {
+                    case GGML_TYPE_F32:
+                    case GGML_TYPE_F16:
+                    case GGML_TYPE_BF16:
+                    case GGML_TYPE_Q8_0:
+                    case GGML_TYPE_Q4_0:
+                    case GGML_TYPE_Q4_1:
+                    case GGML_TYPE_Q5_0:
+                    case GGML_TYPE_Q5_1:
+                    case GGML_TYPE_IQ4_NL:
+                        return true;
+                    default:
+                        return false;
+                };
+            }
+        default:
+            return false;
+    }
+}
+
+const struct ggml_metal_device_props * ggml_metal_device_get_props(ggml_metal_device_t dev) {
+    return &dev->props;
+}
+
+//
+// device buffers
+//
+
+// max memory buffers that can be mapped to the device
+#define GGML_METAL_MAX_BUFFERS 64
+
+struct ggml_metal_buffer_wrapper {
+    void   * data;
+    size_t   size;
+
+    id<MTLBuffer> metal;
+};
+
+struct ggml_metal_buffer {
+    void * all_data; // TODO: https://github.com/ggml-org/llama.cpp/pull/15985
+    size_t all_size;
+
+    // if false, the Metal buffer data is allocated in private GPU memory and is not shared with the host
+    bool is_shared;
+
+    // multiple buffers are used only to avoid the maximum buffer size limitation when using mmap
+    int n_buffers;
+    struct ggml_metal_buffer_wrapper buffers[GGML_METAL_MAX_BUFFERS];
+
+    bool use_residency_sets;
+
+    // optional MTLResidencySet
+    // note: cannot use explicity "id<MTLResidencySet>" here because it is not available on certain OSes
+    id rset;
+
+    // pointers to global device objects
+    id<MTLDevice> device;
+    id<MTLCommandQueue> queue;
+};
+
+static void ggml_metal_log_allocated_size(id<MTLDevice> device, size_t size_aligned) {
+#ifndef GGML_METAL_NDEBUG
+#if TARGET_OS_OSX || (TARGET_OS_IOS && __clang_major__ >= 15)
+    if (@available(macOS 10.12, iOS 16.0, *)) {
+        GGML_LOG_DEBUG("%s: allocated buffer, size = %8.2f MiB, (%8.2f / %8.2f)\n",
+                __func__,
+                size_aligned / 1024.0 / 1024.0,
+                device.currentAllocatedSize / 1024.0 / 1024.0,
+                device.recommendedMaxWorkingSetSize / 1024.0 / 1024.0);
+
+        if (device.currentAllocatedSize > device.recommendedMaxWorkingSetSize) {
+            GGML_LOG_WARN("%s: warning: current allocated size is greater than the recommended max working set size\n", __func__);
+        }
+    } else {
+        GGML_LOG_INFO("%s: allocated buffer, size = %8.2f MiB, (%8.2f)\n",
+                __func__,
+                size_aligned / 1024.0 / 1024.0,
+                device.currentAllocatedSize / 1024.0 / 1024.0);
+    }
+#endif
+#endif
+    GGML_UNUSED(device);
+    GGML_UNUSED(size_aligned);
+}
+
+// rset init
+static bool ggml_metal_buffer_rset_init(ggml_metal_buffer_t buf) {
+    buf->rset = nil;
+
+    if (!buf->use_residency_sets) {
+        return true;
+    }
+
+#if defined(GGML_METAL_HAS_RESIDENCY_SETS)
+    if (@available(macOS 15.0, iOS 18.0, tvOS 18.0, visionOS 2.0, *)) {
+        MTLResidencySetDescriptor * desc = [[MTLResidencySetDescriptor alloc] init];
+        desc.label = @"ggml_metal";
+        desc.initialCapacity = buf->n_buffers;
+
+        NSError * error;
+        buf->rset = [buf->device newResidencySetWithDescriptor:desc error:&error];
+        if (error) {
+            GGML_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]);
+            [desc release];
+            return false;
+        }
+
+        [desc release];
+
+        for (int i = 0; i < buf->n_buffers; i++) {
+            [buf->rset addAllocation:buf->buffers[i].metal];
+        }
+
+        [buf->rset commit];
+        [buf->rset requestResidency];
+
+        return true;
+    }
+#endif
+
+    return true;
+}
+
+// rset free
+static void ggml_metal_buffer_rset_free(ggml_metal_buffer_t buf) {
+#if defined(GGML_METAL_HAS_RESIDENCY_SETS)
+    if (@available(macOS 15.0, iOS 18.0, tvOS 18.0, visionOS 2.0, *)) {
+        if (buf->rset) {
+            [buf->rset endResidency];
+            [buf->rset removeAllAllocations];
+            [buf->rset release];
+        }
+    }
+#else
+    GGML_UNUSED(buf);
+#endif
+}
+
+static void * ggml_metal_host_malloc(size_t n) {
+    void * data = NULL;
+
+#if TARGET_OS_OSX
+    kern_return_t err = vm_allocate((vm_map_t) mach_task_self(), (void *) &data, n, VM_FLAGS_ANYWHERE);
+    if (err != KERN_SUCCESS) {
+        GGML_LOG_ERROR("%s: error: vm_allocate failed\n", __func__);
+        return NULL;
+    }
+#else
+    const int result = posix_memalign((void **) &data, sysconf(_SC_PAGESIZE), n);
+    if (result != 0) {
+        GGML_LOG_ERROR("%s: error: posix_memalign failed\n", __func__);
+        return NULL;
+    }
+#endif
+
+    return data;
+}
+
+ggml_metal_buffer_t ggml_metal_buffer_init(ggml_metal_device_t dev, size_t size, bool shared) {
+    ggml_metal_buffer_t res = calloc(1, sizeof(struct ggml_metal_buffer));
+
+    const size_t size_page = sysconf(_SC_PAGESIZE);
+
+    size_t size_aligned = size;
+    if ((size_aligned % size_page) != 0) {
+        size_aligned += (size_page - (size_aligned % size_page));
+    }
+
+    const struct ggml_metal_device_props * props_dev = ggml_metal_device_get_props(dev);
+
+    shared = shared && props_dev->use_shared_buffers;
+
+    // allocate shared buffer if the device supports it and it is required by the buffer type
+    if (shared) {
+        res->all_data = ggml_metal_host_malloc(size_aligned);
+        res->is_shared = true;
+    } else {
+        // dummy, non-NULL value - we'll populate this after creating the Metal buffer below
+        res->all_data = (void *) 0x000000400ULL;
+        res->is_shared = false;
+    }
+    res->all_size = size_aligned;
+
+    res->device = ggml_metal_device_get_obj(dev);
+    res->queue  = ggml_metal_device_get_queue(dev);
+
+    res->n_buffers = 1;
+
+    if (res->all_data != NULL) {
+        res->buffers[0].size  = size;
+        res->buffers[0].metal = nil;
+
+        if (size_aligned > 0) {
+            if (props_dev->use_shared_buffers &&shared) {
+                res->buffers[0].metal = [res->device newBufferWithBytesNoCopy:res->all_data
+                                                                  length:size_aligned
+                                                                 options:MTLResourceStorageModeShared
+                                                             deallocator:nil];
+            } else {
+                res->buffers[0].metal = [res->device newBufferWithLength:size_aligned options:MTLResourceStorageModePrivate];
+
+                res->all_data = (void *) (res->buffers[0].metal.gpuAddress);
+            }
+        }
+
+        res->buffers[0].data = res->all_data;
+    }
+
+    if (size_aligned > 0 && (res->all_data == NULL || res->buffers[0].metal == nil)) {
+        GGML_LOG_ERROR("%s: error: failed to allocate buffer, size = %8.2f MiB\n", __func__, size_aligned / 1024.0 / 1024.0);
+        free(res);
+        return NULL;
+    }
+
+    res->use_residency_sets = props_dev->use_residency_sets;
+
+    if (!ggml_metal_buffer_rset_init(res)) {
+        GGML_LOG_ERROR("%s: error: failed to initialize residency set\n", __func__);
+        free(res);
+        return NULL;
+    }
+
+    //ggml_metal_log_allocated_size(device, size_aligned);
+
+    return res;
+}
+
+ggml_metal_buffer_t ggml_metal_buffer_map(ggml_metal_device_t dev, void * ptr, size_t size, size_t max_tensor_size) {
+    ggml_metal_buffer_t res = calloc(1, sizeof(struct ggml_metal_buffer));
+
+    res->all_data = ptr;
+    res->all_size = size;
+
+    res->is_shared = true;
+
+    res->n_buffers = 0;
+
+    const size_t size_page = sysconf(_SC_PAGESIZE);
+
+    // page-align the data ptr
+    {
+        const uintptr_t offs = (uintptr_t) ptr % size_page;
+        ptr  = (void *) ((char *) ptr - offs);
+        size += offs;
+    }
+
+    size_t size_aligned = size;
+    if ((size_aligned % size_page) != 0) {
+        size_aligned += (size_page - (size_aligned % size_page));
+    }
+
+    res->device = ggml_metal_device_get_obj(dev);
+    res->queue  = ggml_metal_device_get_queue(dev);
+
+    const struct ggml_metal_device_props * props_dev = ggml_metal_device_get_props(dev);
+
+    // the buffer fits into the max buffer size allowed by the device
+    if (size_aligned <= props_dev->max_buffer_size) {
+        res->buffers[res->n_buffers].data  = ptr;
+        res->buffers[res->n_buffers].size  = size;
+        res->buffers[res->n_buffers].metal = nil;
+
+        if (size_aligned > 0) {
+            res->buffers[res->n_buffers].metal = [res->device newBufferWithBytesNoCopy:ptr length:size_aligned options:MTLResourceStorageModeShared deallocator:nil];
+
+            if (res->buffers[res->n_buffers].metal == nil) {
+                GGML_LOG_ERROR("%s: error: failed to allocate buffer, size = %8.2f MiB\n", __func__, size_aligned / 1024.0 / 1024.0);
+                free(res);
+                return NULL;
+            }
+        }
+
+        ggml_metal_log_allocated_size(res->device, size_aligned);
+
+        ++res->n_buffers;
+    } else {
+        // this overlap between the views will guarantee that the tensor with the maximum size will fully fit into
+        // one of the views
+        const size_t size_ovlp = ((max_tensor_size + size_page - 1) / size_page + 1) * size_page; // round-up 2 pages just in case
+        const size_t size_step = props_dev->max_buffer_size - size_ovlp;
+        const size_t size_view = props_dev->max_buffer_size;
+
+        for (size_t i = 0; i < size; i += size_step) {
+            const size_t size_step_aligned = (i + size_view <= size) ? size_view : (size_aligned - i);
+
+            res->buffers[res->n_buffers].data  = (void *) ((uint8_t *) ptr + i);
+            res->buffers[res->n_buffers].size  = size_step_aligned;
+            res->buffers[res->n_buffers].metal = nil;
+
+            if (size_step_aligned > 0) {
+                res->buffers[res->n_buffers].metal = [res->device newBufferWithBytesNoCopy:(void *) ((uint8_t *) ptr + i) length:size_step_aligned options:MTLResourceStorageModeShared deallocator:nil];
+
+                if (res->buffers[res->n_buffers].metal == nil) {
+                    GGML_LOG_ERROR("%s: error: failed to allocate buffer, size = %8.2f MiB\n", __func__, size_step_aligned / 1024.0 / 1024.0);
+                    free(res);
+                    return NULL;
+                }
+            }
+
+            ggml_metal_log_allocated_size(res->device, size_step_aligned);
+
+            if (i + size_step < size) {
+                GGML_LOG_INFO("\n");
+            }
+
+            ++res->n_buffers;
+        }
+    }
+
+    res->use_residency_sets = props_dev->use_residency_sets;
+
+    if (!ggml_metal_buffer_rset_init(res)) {
+        GGML_LOG_ERROR("%s: error: failed to initialize residency set\n", __func__);
+        free(res);
+        return NULL;
+    }
+
+    return res;
+}
+
+void ggml_metal_buffer_free(ggml_metal_buffer_t buf) {
+    for (int i = 0; i < buf->n_buffers; i++) {
+        [buf->buffers[i].metal release];
+    }
+
+    ggml_metal_buffer_rset_free(buf);
+
+    if (buf->is_shared) {
+#if TARGET_OS_OSX
+        vm_deallocate((vm_map_t)mach_task_self(), (vm_address_t)buf->all_data, buf->all_size);
+#else
+        free(buf->all_data);
+#endif
+    }
+
+    free(buf);
+}
+
+void * ggml_metal_buffer_get_base(ggml_metal_buffer_t buf) {
+    return buf->all_data;
+}
+
+bool ggml_metal_buffer_is_shared(ggml_metal_buffer_t buf) {
+    return buf->is_shared;
+}
+
+void ggml_metal_buffer_memset_tensor(ggml_metal_buffer_t buf, struct ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) {
+    if (buf->is_shared) {
+        memset((char *)tensor->data + offset, value, size);
+        return;
+    }
+
+    @autoreleasepool {
+        // dst
+        struct ggml_metal_buffer_id bid_dst = ggml_metal_buffer_get_id(buf, tensor);
+        bid_dst.offs += offset;
+
+        id<MTLCommandQueue>  queue   = buf->queue;
+        id<MTLCommandBuffer> cmd_buf = [queue commandBufferWithUnretainedReferences];
+
+        {
+            id<MTLBlitCommandEncoder> encoder = [cmd_buf blitCommandEncoder];
+
+            [encoder fillBuffer:bid_dst.metal
+                          range:NSMakeRange(bid_dst.offs, bid_dst.offs + size)
+                          value:value];
+
+            [encoder endEncoding];
+        }
+
+        [cmd_buf commit];
+        [cmd_buf waitUntilCompleted];
+    }
+}
+
+void ggml_metal_buffer_set_tensor(ggml_metal_buffer_t buf, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
+    if (buf->is_shared) {
+        memcpy((char *)tensor->data + offset, data, size);
+        return;
+    }
+
+    @autoreleasepool {
+        // src
+        void * data_ptr = (void *)(uintptr_t) data; // "const cast" the src data
+        id<MTLBuffer> buf_src = [buf->device newBufferWithBytesNoCopy:data_ptr
+                                                               length:size
+                                                              options:MTLResourceStorageModeShared
+                                                          deallocator:nil];
+
+        // dst
+        struct ggml_metal_buffer_id bid_dst = ggml_metal_buffer_get_id(buf, tensor);
+        bid_dst.offs += offset;
+
+        // note: for experimentation purposes, here we use a semaphore to wait for the copy to complete
+        //       this is alternative to waitUntilCompleted, which should be faster, but don't seem to make much difference
+        dispatch_semaphore_t completion_semaphore = dispatch_semaphore_create(0);
+
+        id<MTLCommandQueue>  queue   = buf->queue;
+        id<MTLCommandBuffer> cmd_buf = [queue commandBufferWithUnretainedReferences];
+
+        {
+            id<MTLBlitCommandEncoder> encoder = [cmd_buf blitCommandEncoder];
+
+            [encoder copyFromBuffer:buf_src
+                       sourceOffset:0
+                           toBuffer:bid_dst.metal
+                  destinationOffset:bid_dst.offs
+                               size:size];
+
+            [encoder endEncoding];
+        }
+
+        [cmd_buf addCompletedHandler:^(id<MTLCommandBuffer> cb) {
+                             // TODO: can check for errors here
+            GGML_UNUSED(cb);
+
+            dispatch_semaphore_signal(completion_semaphore);
+        }];
+
+        [cmd_buf commit];
+
+        dispatch_semaphore_wait(completion_semaphore, DISPATCH_TIME_FOREVER);
+        dispatch_release(completion_semaphore);
+
+        //[cmd_buf waitUntilCompleted];
+    }
+}
+
+void ggml_metal_buffer_get_tensor(ggml_metal_buffer_t buf, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
+    if (buf->is_shared) {
+        memcpy(data, (const char *)tensor->data + offset, size);
+        return;
+    }
+
+    @autoreleasepool {
+        // src
+        struct ggml_metal_buffer_id bid_src = ggml_metal_buffer_get_id(buf, tensor);
+        bid_src.offs += offset;
+
+        // dst
+        id<MTLBuffer> buf_dst = [buf->device newBufferWithBytesNoCopy:data
+                                                               length:size
+                                                              options:MTLResourceStorageModeShared
+                                                          deallocator:nil];
+
+        id<MTLCommandQueue>  queue   = buf->queue;
+        id<MTLCommandBuffer> cmd_buf = [queue commandBufferWithUnretainedReferences];
+
+        {
+            id<MTLBlitCommandEncoder> encoder = [cmd_buf blitCommandEncoder];
+
+            [encoder copyFromBuffer:bid_src.metal
+                       sourceOffset:bid_src.offs
+                           toBuffer:buf_dst
+                  destinationOffset:0
+                               size:size];
+
+            [encoder endEncoding];
+        }
+
+        [cmd_buf commit];
+        [cmd_buf waitUntilCompleted];
+    }
+}
+
+void ggml_metal_buffer_clear(ggml_metal_buffer_t buf, uint8_t value) {
+    if (buf->is_shared) {
+        memset(buf->all_data, value, buf->all_size);
+        return;
+    }
+
+    @autoreleasepool {
+        id<MTLCommandQueue>  queue   = buf->queue;
+        id<MTLCommandBuffer> cmd_buf = [queue commandBufferWithUnretainedReferences];
+
+        {
+            id<MTLBlitCommandEncoder> encoder = [cmd_buf blitCommandEncoder];
+
+            [encoder fillBuffer:buf->buffers[0].metal
+                          range:NSMakeRange(0, buf->buffers[0].size)
+                          value:value];
+
+            [encoder endEncoding];
+        }
+
+        [cmd_buf commit];
+        [cmd_buf waitUntilCompleted];
+    }
+}
+
+struct ggml_metal_buffer_id ggml_metal_buffer_get_id(ggml_metal_buffer_t buf, const struct ggml_tensor * t) {
+    struct ggml_metal_buffer_id res = { nil, 0 };
+
+    const int64_t tsize = ggml_nbytes(t);
+
+    // find the view that contains the tensor fully
+    for (int i = 0; i < buf->n_buffers; ++i) {
+        const int64_t ioffs = (int64_t) t->data - (int64_t) buf->buffers[i].data;
+
+        //GGML_LOG_INFO("ioffs = %10ld, tsize = %10ld, sum = %10ld, buf->buffers[%d].size = %10ld\n", ioffs, tsize, ioffs + tsize, i, buf->buffers[i].size);
+        if (ioffs >= 0 && ioffs + tsize <= (int64_t) buf->buffers[i].size) {
+            res.metal = buf->buffers[i].metal;
+            res.offs  = (size_t) ioffs;
+
+            //GGML_LOG_INFO("%s: tensor '%16s', offs = %8ld\n", __func__, t->name, *offs);
+
+            return res;
+        }
+    }
+
+    GGML_LOG_ERROR("%s: error: tensor '%s' buffer is nil\n", __func__, t->name);
+
+    return res;
+}
index 651943fa923804677a9d093f7af962f500f07684..0776bb6485cc9741c0d0296f363103a7dd799601 100644 (file)
@@ -165,6 +165,16 @@ typedef struct {
     uint64_t nb3;
 } ggml_metal_kargs_repeat;
 
+typedef struct {
+    float scale;
+    float bias;
+} ggml_metal_kargs_scale;
+
+typedef struct {
+    float min;
+    float max;
+} ggml_metal_kargs_clamp;
+
 typedef struct {
     int64_t  ne00;
     int64_t  ne01;
@@ -453,7 +463,7 @@ typedef struct {
     uint64_t nb00;
     uint64_t nb01;
     uint64_t nb02;
-    int32_t  n_groups;
+    int32_t  ngrp;
     float    eps;
 } ggml_metal_kargs_group_norm;
 
@@ -506,14 +516,6 @@ typedef struct {
     uint64_t nb01;
     uint64_t nb02;
     uint64_t nb03;
-    int64_t  ne10;
-    int64_t  ne11;
-    int64_t  ne12;
-    int64_t  ne13;
-    uint64_t nb10;
-    uint64_t nb11;
-    uint64_t nb12;
-    uint64_t nb13;
     int64_t  ne0;
     int64_t  ne1;
     int64_t  ne2;
@@ -547,12 +549,6 @@ typedef struct {
     int32_t  n_head_log2;
 } ggml_metal_kargs_soft_max;
 
-typedef struct {
-    int64_t  ne00;
-    int64_t  ne01;
-    int      n_past;
-} ggml_metal_kargs_diag_mask_inf;
-
 typedef struct {
     int64_t  ne00;
     int64_t  ne01;
@@ -579,7 +575,7 @@ typedef struct {
     int64_t  n_group;
     int64_t  n_seq_tokens;
     int64_t  n_seqs;
-    int64_t  s_off;
+    uint64_t s_off;
     uint64_t nb01;
     uint64_t nb02;
     uint64_t nb03;
@@ -719,7 +715,12 @@ typedef struct {
     int64_t  IW;
     int64_t  OH;
     int64_t  OW;
-    int64_t  parallel_elements;
+    int64_t  np;
 } ggml_metal_kargs_pool_2d;
 
+typedef struct {
+     int64_t ne00;
+    uint64_t nb01;
+} ggml_metal_kargs_argmax;
+
 #endif // GGML_METAL_IMPL
diff --git a/ggml/src/ggml-metal/ggml-metal-ops.cpp b/ggml/src/ggml-metal/ggml-metal-ops.cpp
new file mode 100644 (file)
index 0000000..839c168
--- /dev/null
@@ -0,0 +1,3188 @@
+#include "ggml-metal-ops.h"
+
+#include "ggml.h"
+#include "ggml-impl.h"
+#include "ggml-backend-impl.h"
+
+#include "ggml-metal-impl.h"
+#include "ggml-metal-common.h"
+#include "ggml-metal-device.h"
+
+#include <cassert>
+#include <algorithm>
+
+static ggml_metal_buffer_id ggml_metal_get_buffer_id(const ggml_tensor * t) {
+    if (!t) {
+        return { nullptr, 0 };
+    }
+
+    ggml_backend_buffer_t buffer = t->view_src ? t->view_src->buffer : t->buffer;
+
+    ggml_metal_buffer_t ctx = (ggml_metal_buffer_t) buffer->context;
+
+    return ggml_metal_buffer_get_id(ctx, t);
+}
+
+struct ggml_metal_op {
+    ggml_metal_device_t  dev;
+    ggml_metal_library_t lib;
+    ggml_metal_encoder_t enc;
+    ggml_mem_ranges_t    mem_ranges;
+
+    ggml_cgraph * gf;
+
+    int idx_start;
+    int idx_end;
+
+    bool use_fusion;
+    bool use_concurrency;
+    bool use_capture;
+
+    int debug_graph;
+    int debug_fusion;
+};
+
+ggml_metal_op_t ggml_metal_op_init(
+        ggml_metal_device_t dev,
+        ggml_metal_cmd_buf_t cmd_buf,
+        ggml_cgraph * gf,
+        int idx_start,
+        int idx_end,
+        bool use_fusion,
+        bool use_concurrency,
+        bool use_capture,
+        int debug_graph,
+        int debug_fusion) {
+    ggml_metal_op_t res = new ggml_metal_op();
+
+    *res = {
+        /*.dev             =*/ dev,
+        /*.lib             =*/ ggml_metal_device_get_library(dev),
+        /*.enc             =*/ ggml_metal_encoder_init(cmd_buf, use_concurrency),
+        /*.mem_ranges      =*/ ggml_mem_ranges_init(debug_graph),
+        /*.gf              =*/ gf,
+        /*.idx_start       =*/ idx_start,
+        /*.idx_end         =*/ idx_end,
+        /*.use_fusion      =*/ use_fusion,
+        /*.use_concurrency =*/ use_concurrency,
+        /*.use_capture     =*/ use_capture,
+        /*.debug_graph     =*/ debug_graph,
+        /*.debug_fusion    =*/ debug_fusion,
+    };
+
+    return res;
+}
+
+void ggml_metal_op_free(ggml_metal_op_t ctx) {
+    ggml_metal_encoder_end_encoding(ctx->enc);
+    ggml_metal_encoder_free(ctx->enc);
+    ggml_mem_ranges_free(ctx->mem_ranges);
+
+    delete ctx;
+}
+
+static bool ggml_metal_op_concurrency_reset(ggml_metal_op_t ctx) {
+    if (!ctx->mem_ranges) {
+        return true;
+    }
+
+    ggml_metal_encoder_memory_barrier(ctx->enc);
+
+    ggml_mem_ranges_reset(ctx->mem_ranges);
+
+    return true;
+}
+
+static bool ggml_metal_op_concurrency_check(ggml_metal_op_t ctx, const ggml_tensor * node) {
+    if (!ctx->mem_ranges) {
+        return false;
+    }
+
+    return ggml_mem_ranges_check(ctx->mem_ranges, node);
+}
+
+static bool ggml_metal_op_concurrency_add(ggml_metal_op_t ctx, const ggml_tensor * node) {
+    if (!ctx->mem_ranges) {
+        return true;
+    }
+
+    return ggml_mem_ranges_add(ctx->mem_ranges, node);
+}
+
+static int ggml_metal_op_encode_impl(ggml_metal_op_t ctx, int idx) {
+    struct ggml_cgraph * gf = ctx->gf;
+
+    struct ggml_tensor ** nodes = ggml_graph_nodes(gf) + idx;
+    struct ggml_tensor *  node  = nodes[0];
+
+    //GGML_LOG_INFO("%s: encoding node %3d, op = %8s\n", __func__, idx, ggml_op_name(node->op));
+
+    if (ggml_is_empty(node)) {
+        return 1;
+    }
+
+    switch (node->op) {
+        case GGML_OP_NONE:
+        case GGML_OP_RESHAPE:
+        case GGML_OP_VIEW:
+        case GGML_OP_TRANSPOSE:
+        case GGML_OP_PERMUTE:
+            {
+                // noop -> next node
+            } return 1;
+        default:
+            {
+            } break;
+    }
+
+    if (!ggml_metal_device_supports_op(ctx->dev, node)) {
+        GGML_LOG_ERROR("%s: error: unsupported op '%s'\n", __func__, ggml_op_desc(node));
+        GGML_ABORT("unsupported op");
+    }
+
+    int n_fuse = 1;
+
+    // check if the current node can run concurrently with other nodes before it
+    // the condition is that:
+    //  - the current node cannot write to any previous src or dst ranges
+    //  - the current node cannot read from any previous dst ranges
+    //
+    // if the condition is not satisfied, we put a memory barrier and clear all ranges
+    // otherwise, we add the new ranges to the encoding context and process the node concurrently
+    //
+    {
+        const bool is_concurrent = ggml_metal_op_concurrency_check(ctx, node);
+
+        if (!is_concurrent) {
+            ggml_metal_op_concurrency_reset(ctx);
+        }
+
+        if (ctx->debug_graph > 0) {
+            GGML_LOG_DEBUG("%s: node[%5d] - %-12s %s\n", __func__, idx, ggml_op_name(node->op), is_concurrent ? "(concurrent)" : "");
+        }
+        if (ctx->debug_graph > 1) {
+            GGML_TENSOR_LOCALS( int64_t, ne0, node->src[0], ne);
+            GGML_TENSOR_LOCALS(uint64_t, nb0, node->src[0], nb);
+            GGML_TENSOR_LOCALS( int64_t, ne1, node->src[1], ne);
+            GGML_TENSOR_LOCALS(uint64_t, nb1, node->src[1], nb);
+            GGML_TENSOR_LOCALS( int64_t, ne,  node,         ne);
+            GGML_TENSOR_LOCALS(uint64_t, nb,  node,         nb);
+
+            if (node->src[0]) {
+                GGML_LOG_DEBUG("%s: src0 - %4s [%5lld, %5lld, %5lld, %5lld] [%5lld, %5lld, %5lld, %5lld], %d, %s\n", __func__, ggml_type_name(node->src[0]->type), ne00, ne01, ne02, ne03, nb00, nb01, nb02, nb03,
+                        ggml_is_contiguous(node->src[0]), node->src[0]->name);
+            }
+            if (node->src[1]) {
+                GGML_LOG_DEBUG("%s: src1 - %4s [%5lld, %5lld, %5lld, %5lld] [%5lld, %5lld, %5lld, %5lld], %d, %s\n", __func__, ggml_type_name(node->src[1]->type), ne10, ne11, ne12, ne13, nb10, nb11, nb12, nb13,
+                        ggml_is_contiguous(node->src[1]), node->src[1]->name);
+            }
+            if (node) {
+                GGML_LOG_DEBUG("%s: node  - %4s [%5lld, %5lld, %5lld, %5lld] [%5lld, %5lld, %5lld, %5lld], 1, %s\n", __func__, ggml_type_name(node->type), ne0, ne1, ne2, ne3, nb0, nb1, nb2, nb3,
+                        node->name);
+            }
+        }
+    }
+
+    switch (node->op) {
+        case GGML_OP_CONCAT:
+            {
+                n_fuse = ggml_metal_op_concat(ctx, idx);
+            } break;
+        case GGML_OP_ADD:
+        case GGML_OP_SUB:
+        case GGML_OP_MUL:
+        case GGML_OP_DIV:
+            {
+                n_fuse = ggml_metal_op_bin(ctx, idx);
+            } break;
+        case GGML_OP_ADD_ID:
+            {
+                n_fuse = ggml_metal_op_add_id(ctx, idx);
+            } break;
+        case GGML_OP_REPEAT:
+            {
+                n_fuse = ggml_metal_op_repeat(ctx, idx);
+            } break;
+        case GGML_OP_ACC:
+            {
+                n_fuse = ggml_metal_op_acc(ctx, idx);
+            } break;
+        case GGML_OP_SCALE:
+            {
+                n_fuse = ggml_metal_op_scale(ctx, idx);
+            } break;
+        case GGML_OP_CLAMP:
+            {
+                n_fuse = ggml_metal_op_clamp(ctx, idx);
+            } break;
+        case GGML_OP_SQR:
+        case GGML_OP_SQRT:
+        case GGML_OP_SIN:
+        case GGML_OP_COS:
+        case GGML_OP_LOG:
+        case GGML_OP_UNARY:
+            {
+                n_fuse = ggml_metal_op_unary(ctx, idx);
+            } break;
+        case GGML_OP_GLU:
+            {
+                n_fuse = ggml_metal_op_glu(ctx, idx);
+            } break;
+        case GGML_OP_SUM_ROWS:
+        case GGML_OP_MEAN:
+            {
+                n_fuse = ggml_metal_op_sum_rows(ctx, idx);
+            } break;
+        case GGML_OP_SOFT_MAX:
+            {
+                n_fuse = ggml_metal_op_soft_max(ctx, idx);
+            } break;
+        case GGML_OP_SSM_CONV:
+            {
+                n_fuse = ggml_metal_op_ssm_conv(ctx, idx);
+            } break;
+        case GGML_OP_SSM_SCAN:
+            {
+                n_fuse = ggml_metal_op_ssm_scan(ctx, idx);
+            } break;
+        case GGML_OP_RWKV_WKV6:
+        case GGML_OP_RWKV_WKV7:
+            {
+                n_fuse = ggml_metal_op_rwkv(ctx, idx);
+            } break;
+        case GGML_OP_MUL_MAT:
+            {
+                n_fuse = ggml_metal_op_mul_mat(ctx, idx);
+            } break;
+        case GGML_OP_MUL_MAT_ID:
+            {
+                n_fuse = ggml_metal_op_mul_mat_id(ctx, idx);
+            } break;
+        case GGML_OP_GET_ROWS:
+            {
+                n_fuse = ggml_metal_op_get_rows(ctx, idx);
+            } break;
+        case GGML_OP_SET_ROWS:
+            {
+                n_fuse = ggml_metal_op_set_rows(ctx, idx);
+            } break;
+        case GGML_OP_RMS_NORM:
+            {
+                n_fuse = ggml_metal_op_rms_norm(ctx, idx);
+            } break;
+        case GGML_OP_L2_NORM:
+            {
+                n_fuse = ggml_metal_op_l2_norm(ctx, idx);
+            } break;
+        case GGML_OP_GROUP_NORM:
+            {
+                n_fuse = ggml_metal_op_group_norm(ctx, idx);
+            } break;
+        case GGML_OP_NORM:
+            {
+                n_fuse = ggml_metal_op_norm(ctx, idx);
+            } break;
+        case GGML_OP_ROPE:
+            {
+                n_fuse = ggml_metal_op_rope(ctx, idx);
+            } break;
+        case GGML_OP_IM2COL:
+            {
+                n_fuse = ggml_metal_op_im2col(ctx, idx);
+            } break;
+        case GGML_OP_CONV_TRANSPOSE_1D:
+            {
+                n_fuse = ggml_metal_op_conv_transpose_1d(ctx, idx);
+            } break;
+        case GGML_OP_UPSCALE:
+            {
+                n_fuse = ggml_metal_op_upscale(ctx, idx);
+            } break;
+        case GGML_OP_PAD:
+            {
+                n_fuse = ggml_metal_op_pad(ctx, idx);
+            } break;
+        case GGML_OP_PAD_REFLECT_1D:
+            {
+                n_fuse = ggml_metal_op_pad_reflect_1d(ctx, idx);
+            } break;
+        case GGML_OP_ARANGE:
+            {
+                n_fuse = ggml_metal_op_arange(ctx, idx);
+            } break;
+        case GGML_OP_TIMESTEP_EMBEDDING:
+            {
+                n_fuse = ggml_metal_op_timestep_embedding(ctx, idx);
+            } break;
+        case GGML_OP_ARGSORT:
+            {
+                n_fuse = ggml_metal_op_argsort(ctx, idx);
+            } break;
+        case GGML_OP_LEAKY_RELU:
+            {
+                n_fuse = ggml_metal_op_leaky_relu(ctx, idx);
+            } break;
+        case GGML_OP_FLASH_ATTN_EXT:
+            {
+                n_fuse = ggml_metal_op_flash_attn_ext(ctx, idx);
+            } break;
+        case GGML_OP_DUP:
+        case GGML_OP_CPY:
+        case GGML_OP_CONT:
+            {
+                n_fuse = ggml_metal_op_cpy(ctx, idx);
+            } break;
+        case GGML_OP_POOL_2D:
+            {
+                n_fuse = ggml_metal_op_pool_2d(ctx, idx);
+            } break;
+        case GGML_OP_ARGMAX:
+            {
+                n_fuse = ggml_metal_op_argmax(ctx, idx);
+            } break;
+       default:
+            {
+                GGML_LOG_ERROR("%s: error: node %3d, op = %8s not implemented\n", __func__, idx, ggml_op_name(node->op));
+                GGML_ABORT("fatal error");
+            }
+    }
+
+    if (ctx->debug_graph > 0) {
+        if (n_fuse > 1) {
+            GGML_LOG_DEBUG("%s:               fuse %d ops\n", __func__, n_fuse);
+        }
+    }
+
+    // update the mem ranges in the encoding context
+    for (int i = 0; i < n_fuse; ++i) {
+        if (!ggml_metal_op_concurrency_add(ctx, nodes[i])) {
+            ggml_metal_op_concurrency_reset(ctx);
+        }
+    }
+
+    return n_fuse;
+}
+
+int ggml_metal_op_encode(ggml_metal_op_t ctx, int idx) {
+    if (ctx->use_capture) {
+        ggml_metal_encoder_debug_group_push(ctx->enc, ggml_op_desc(ggml_graph_node(ctx->gf, idx)));
+    }
+
+    int res = ggml_metal_op_encode_impl(ctx, idx);
+    if (idx + res > ctx->idx_end) {
+        GGML_ABORT("fusion error: nodes spanning multiple encoders have been fused. this indicates a bug in the fusion logic %s",
+                "https://github.com/ggml-org/llama.cpp/pull/14849");
+    }
+
+    if (ctx->use_capture) {
+        ggml_metal_encoder_debug_group_pop(ctx->enc);
+    }
+
+    return res;
+}
+
+int ggml_metal_op_concat(ggml_metal_op_t ctx, int idx) {
+    ggml_cgraph * gf = ctx->gf;
+    ggml_tensor * op = ggml_graph_node(gf, idx);
+
+    ggml_metal_library_t lib = ctx->lib;
+    ggml_metal_encoder_t enc = ctx->enc;
+
+    GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne,  op,         ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb,  op,         nb);
+
+    const int32_t dim = ((const int32_t *) op->op_params)[0];
+
+    ggml_metal_kargs_concat args = {
+        /*.ne00 =*/ ne00,
+        /*.ne01 =*/ ne01,
+        /*.ne02 =*/ ne02,
+        /*.ne03 =*/ ne03,
+        /*.nb00 =*/ nb00,
+        /*.nb01 =*/ nb01,
+        /*.nb02 =*/ nb02,
+        /*.nb03 =*/ nb03,
+        /*.ne10 =*/ ne10,
+        /*.ne11 =*/ ne11,
+        /*.ne12 =*/ ne12,
+        /*.ne13 =*/ ne13,
+        /*.nb10 =*/ nb10,
+        /*.nb11 =*/ nb11,
+        /*.nb12 =*/ nb12,
+        /*.nb13 =*/ nb13,
+        /*.ne0  =*/ ne0,
+        /*.ne1  =*/ ne1,
+        /*.ne2  =*/ ne2,
+        /*.ne3  =*/ ne3,
+        /*.nb0  =*/ nb0,
+        /*.nb1  =*/ nb1,
+        /*.nb2  =*/ nb2,
+        /*.nb3  =*/ nb3,
+        /*.dim  =*/ dim,
+    };
+
+    ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_base(lib, GGML_OP_CONCAT);
+
+    ggml_metal_encoder_set_pipeline(enc, pipeline);
+    ggml_metal_encoder_set_bytes   (enc, &args, sizeof(args), 0);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[1]), 2);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op),         3);
+
+    const int nth = std::min(1024, ne0);
+
+    ggml_metal_encoder_dispatch_threadgroups(enc, ne1, ne2, ne3, nth, 1, 1);
+
+    return 1;
+}
+
+int ggml_metal_op_repeat(ggml_metal_op_t ctx, int idx) {
+    ggml_cgraph * gf = ctx->gf;
+    ggml_tensor * op = ggml_graph_node(gf, idx);
+
+    ggml_metal_library_t lib = ctx->lib;
+    ggml_metal_encoder_t enc = ctx->enc;
+
+    GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne,  op,         ne);
+    GGML_TENSOR_LOCALS(uint32_t, nb,  op,         nb);
+
+    ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_repeat(lib, op->type);
+
+    ggml_metal_kargs_repeat args = {
+        /*.ne00 =*/ ne00,
+        /*.ne01 =*/ ne01,
+        /*.ne02 =*/ ne02,
+        /*.ne03 =*/ ne03,
+        /*.nb00 =*/ nb00,
+        /*.nb01 =*/ nb01,
+        /*.nb02 =*/ nb02,
+        /*.nb03 =*/ nb03,
+        /*.ne0  =*/ ne0,
+        /*.ne1  =*/ ne1,
+        /*.ne2  =*/ ne2,
+        /*.ne3  =*/ ne3,
+        /*.nb0  =*/ nb0,
+        /*.nb1  =*/ nb1,
+        /*.nb2  =*/ nb2,
+        /*.nb3  =*/ nb3,
+    };
+
+    ggml_metal_encoder_set_pipeline(enc, pipeline);
+    ggml_metal_encoder_set_bytes   (enc, &args, sizeof(args), 0);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op),         2);
+
+    const int nth = std::min(ggml_metal_pipeline_max_theads_per_threadgroup(pipeline), ne0);
+
+    ggml_metal_encoder_dispatch_threadgroups(enc, ne1, ne2, ne3, nth, 1, 1);
+
+    return 1;
+}
+
+int ggml_metal_op_acc(ggml_metal_op_t ctx, int idx) {
+    ggml_cgraph * gf = ctx->gf;
+    ggml_tensor * op = ggml_graph_node(gf, idx);
+
+    ggml_metal_library_t lib = ctx->lib;
+    ggml_metal_encoder_t enc = ctx->enc;
+
+    GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne,  op,         ne);
+    GGML_TENSOR_LOCALS(uint32_t, nb,  op,         nb);
+
+    GGML_ASSERT(op->src[0]->type == GGML_TYPE_F32);
+    GGML_ASSERT(op->src[1]->type == GGML_TYPE_F32);
+    GGML_ASSERT(op->type         == GGML_TYPE_F32);
+
+    GGML_ASSERT(ggml_is_contiguous(op->src[0]));
+    GGML_ASSERT(ggml_is_contiguous(op->src[1]));
+
+    const size_t pnb1 = ((const int32_t *) op->op_params)[0];
+    const size_t pnb2 = ((const int32_t *) op->op_params)[1];
+    const size_t pnb3 = ((const int32_t *) op->op_params)[2];
+    const size_t offs = ((const int32_t *) op->op_params)[3];
+
+    const bool inplace = (bool) ((const int32_t *) op->op_params)[4];
+
+    if (!inplace) {
+        // run a separete kernel to cpy src->dst
+        // not sure how to avoid this
+        // TODO: make a simpler cpy_bytes kernel
+
+        //const id<MTLComputePipelineState> pipeline = ctx->pipelines[GGML_METAL_PIPELINE_TYPE_CPY_F32_F32].obj;
+        ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_cpy(lib, op->src[0]->type, op->type);
+
+        ggml_metal_kargs_cpy args = {
+            /*.ne00 =*/ ne00,
+            /*.ne01 =*/ ne01,
+            /*.ne02 =*/ ne02,
+            /*.ne03 =*/ ne03,
+            /*.nb00 =*/ nb00,
+            /*.nb01 =*/ nb01,
+            /*.nb02 =*/ nb02,
+            /*.nb03 =*/ nb03,
+            /*.ne0  =*/ ne0,
+            /*.ne1  =*/ ne1,
+            /*.ne2  =*/ ne2,
+            /*.ne3  =*/ ne3,
+            /*.nb0  =*/ nb0,
+            /*.nb1  =*/ nb1,
+            /*.nb2  =*/ nb2,
+            /*.nb3  =*/ nb3,
+        };
+
+        ggml_metal_encoder_set_pipeline(enc, pipeline);
+        ggml_metal_encoder_set_bytes   (enc, &args, sizeof(args), 0);
+        ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+        ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op),         2);
+
+        const int nth = std::min(ggml_metal_pipeline_max_theads_per_threadgroup(pipeline), ne00);
+
+        ggml_metal_encoder_dispatch_threadgroups(enc, ne01, ne02, ne03, nth, 1, 1);
+
+        ggml_metal_op_concurrency_reset(ctx);
+    }
+
+    ggml_metal_kargs_bin args = {
+        /*.ne00 =*/ ne00,
+        /*.ne01 =*/ ne01,
+        /*.ne02 =*/ ne02,
+        /*.ne03 =*/ ne03,
+        /*.nb00 =*/ nb00,
+        /*.nb01 =*/ pnb1,
+        /*.nb02 =*/ pnb2,
+        /*.nb03 =*/ pnb3,
+        /*.ne10 =*/ ne10,
+        /*.ne11 =*/ ne11,
+        /*.ne12 =*/ ne12,
+        /*.ne13 =*/ ne13,
+        /*.nb10 =*/ nb10,
+        /*.nb11 =*/ nb11,
+        /*.nb12 =*/ nb12,
+        /*.nb13 =*/ nb13,
+        /*.ne0  =*/ ne0,
+        /*.ne1  =*/ ne1,
+        /*.ne2  =*/ ne2,
+        /*.ne3  =*/ ne3,
+        /*.nb0  =*/ nb0,
+        /*.nb1  =*/ pnb1,
+        /*.nb2  =*/ pnb2,
+        /*.nb3  =*/ pnb3,
+        /*.offs =*/ offs,
+        /*.o1   =*/ { 0 },
+    };
+
+    ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_bin(lib, GGML_OP_ADD, 1, false);
+
+    ggml_metal_encoder_set_pipeline(enc, pipeline);
+    ggml_metal_encoder_set_bytes   (enc, &args, sizeof(args), 0);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[1]), 2);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op),         3);
+
+    const int nth = std::min(ggml_metal_pipeline_max_theads_per_threadgroup(pipeline), ne00);
+
+    ggml_metal_encoder_dispatch_threadgroups(enc, ne11, ne12, ne13, nth, 1, 1);
+
+    return 1;
+}
+
+int ggml_metal_op_scale(ggml_metal_op_t ctx, int idx) {
+    ggml_cgraph * gf = ctx->gf;
+    ggml_tensor * op = ggml_graph_node(gf, idx);
+
+    ggml_metal_library_t lib = ctx->lib;
+    ggml_metal_encoder_t enc = ctx->enc;
+
+    GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne,  op,         ne);
+    GGML_TENSOR_LOCALS(uint32_t, nb,  op,         nb);
+
+    float scale;
+    float bias;
+    memcpy(&scale, ((const int32_t *) op->op_params) + 0, sizeof(float));
+    memcpy(&bias,  ((const int32_t *) op->op_params) + 1, sizeof(float));
+
+    ggml_metal_kargs_scale args = {
+        /*.scale =*/ scale,
+        /*.bias  =*/ bias,
+    };
+
+    int64_t n = ggml_nelements(op);
+
+    if (n % 4 == 0) {
+        n /= 4;
+    }
+
+    ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_unary(lib, op);
+
+    ggml_metal_encoder_set_pipeline(enc, pipeline);
+    ggml_metal_encoder_set_bytes   (enc, &args, sizeof(args), 0);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op),         2);
+
+    ggml_metal_encoder_dispatch_threadgroups(enc, n, 1, 1, 1, 1, 1);
+
+    return 1;
+}
+
+int ggml_metal_op_clamp(ggml_metal_op_t ctx, int idx) {
+    ggml_cgraph * gf = ctx->gf;
+    ggml_tensor * op = ggml_graph_node(gf, idx);
+
+    ggml_metal_library_t lib = ctx->lib;
+    ggml_metal_encoder_t enc = ctx->enc;
+
+    GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne,  op,         ne);
+    GGML_TENSOR_LOCALS(uint32_t, nb,  op,         nb);
+
+    float min;
+    float max;
+    memcpy(&min, ((const int32_t *) op->op_params) + 0, sizeof(float));
+    memcpy(&max, ((const int32_t *) op->op_params) + 1, sizeof(float));
+
+    ggml_metal_kargs_clamp args = {
+        /*.min =*/ min,
+        /*.max =*/ max,
+    };
+
+    int64_t n = ggml_nelements(op);
+
+    if (n % 4 == 0) {
+        n /= 4;
+    }
+
+    ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_unary(lib, op);
+
+    ggml_metal_encoder_set_pipeline(enc, pipeline);
+    ggml_metal_encoder_set_bytes   (enc, &args, sizeof(args), 0);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op),         2);
+
+    ggml_metal_encoder_dispatch_threadgroups(enc, n, 1, 1, 1, 1, 1);
+
+    return 1;
+}
+
+int ggml_metal_op_unary(ggml_metal_op_t ctx, int idx) {
+    ggml_cgraph * gf = ctx->gf;
+    ggml_tensor * op = ggml_graph_node(gf, idx);
+
+    ggml_metal_library_t lib = ctx->lib;
+    ggml_metal_encoder_t enc = ctx->enc;
+
+    GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne,  op,         ne);
+    GGML_TENSOR_LOCALS(uint32_t, nb,  op,         nb);
+
+    int64_t n = ggml_nelements(op);
+
+    if (n % 4 == 0) {
+        n /= 4;
+    }
+
+    ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_unary(lib, op);
+
+    ggml_metal_encoder_set_pipeline(enc, pipeline);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[0]), 0);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op),         1);
+
+    ggml_metal_encoder_dispatch_threadgroups(enc, n, 1, 1, 1, 1, 1);
+
+    return 1;
+}
+
+int ggml_metal_op_glu(ggml_metal_op_t ctx, int idx) {
+    ggml_cgraph * gf = ctx->gf;
+    ggml_tensor * op = ggml_graph_node(gf, idx);
+
+    ggml_metal_library_t lib = ctx->lib;
+    ggml_metal_encoder_t enc = ctx->enc;
+
+    GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne,  op,         ne);
+    GGML_TENSOR_LOCALS(uint32_t, nb,  op,         nb);
+
+    if (op->src[1]) {
+        GGML_ASSERT(ggml_are_same_shape(op->src[0], op->src[1]));
+    }
+
+    ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_glu(lib, op);
+
+    const int32_t swp = ggml_get_op_params_i32(op, 1);
+    const float alpha = ggml_get_op_params_f32(op, 2);
+    const float limit = ggml_get_op_params_f32(op, 3);
+
+    const int32_t i00 = swp ? ne0 : 0;
+    const int32_t i10 = swp ? 0 : ne0;
+
+    ggml_metal_kargs_glu args = {
+        /*.ne00 =*/ ne00,
+        /*.nb01 =*/ nb01,
+        /*.ne10 =*/ op->src[1] ? ne10 : ne00,
+        /*.nb11 =*/ op->src[1] ? nb11 : nb01,
+        /*.ne0  =*/ ne0,
+        /*.nb1  =*/ nb1,
+        /*.i00  =*/ op->src[1] ? 0 : i00,
+        /*.i10  =*/ op->src[1] ? 0 : i10,
+        /*.alpha=*/ alpha,
+        /*.limit=*/ limit
+    };
+
+    const int64_t nrows = ggml_nrows(op->src[0]);
+
+    const int32_t nth = std::min(ggml_metal_pipeline_max_theads_per_threadgroup(pipeline), ne00/2);
+
+    //[encoder setComputePipelineState:pipeline];
+    //[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
+    //if (src1) {
+    //    [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
+    //} else {
+    //    [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1];
+    //}
+    //[encoder setBuffer:id_dst  offset:offs_dst  atIndex:2];
+    //[encoder setBytes:&args length:sizeof(args) atIndex:3];
+
+    //[encoder dispatchThreadgroups:MTLSizeMake(nrows, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
+
+    ggml_metal_encoder_set_pipeline(enc, pipeline);
+    ggml_metal_encoder_set_bytes   (enc, &args, sizeof(args), 0);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+    if (op->src[1]) {
+        ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[1]), 2);
+    } else {
+        ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[0]), 2);
+    }
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op),         3);
+
+    ggml_metal_encoder_dispatch_threadgroups(enc, nrows, 1, 1, nth, 1, 1);
+
+    return 1;
+}
+
+int ggml_metal_op_sum_rows(ggml_metal_op_t ctx, int idx) {
+    ggml_cgraph * gf = ctx->gf;
+    ggml_tensor * op = ggml_graph_node(gf, idx);
+
+    ggml_metal_library_t lib = ctx->lib;
+    ggml_metal_encoder_t enc = ctx->enc;
+
+    GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne,  op,         ne);
+    GGML_TENSOR_LOCALS(uint32_t, nb,  op,         nb);
+
+    ggml_metal_kargs_sum_rows args = {
+        /*.ne00 =*/ ne00,
+        /*.ne01 =*/ ne01,
+        /*.ne02 =*/ ne02,
+        /*.ne03 =*/ ne03,
+        /*.nb00 =*/ nb00,
+        /*.nb01 =*/ nb01,
+        /*.nb02 =*/ nb02,
+        /*.nb03 =*/ nb03,
+        /*.ne0  =*/ ne0,
+        /*.ne1  =*/ ne1,
+        /*.ne2  =*/ ne2,
+        /*.ne3  =*/ ne3,
+        /*.nb0  =*/ nb0,
+        /*.nb1  =*/ nb1,
+        /*.nb2  =*/ nb2,
+        /*.nb3  =*/ nb3,
+    };
+
+    ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_sum_rows(lib, op);
+
+    int nth = 32; // SIMD width
+
+    while (nth < ne00 && nth < ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)) {
+        nth *= 2;
+    }
+
+    nth = std::min(nth, ggml_metal_pipeline_max_theads_per_threadgroup(pipeline));
+    nth = std::min(nth, ne00);
+
+    const size_t smem = ggml_metal_pipeline_get_smem(pipeline);
+
+    //[encoder setComputePipelineState:pipeline];
+    //[encoder setBytes:&args length:sizeof(args) atIndex:0];
+    //[encoder setBuffer:id_src0 offset:offs_src0 atIndex:1];
+    //[encoder setBuffer:id_dst  offset:offs_dst  atIndex:2];
+    //[encoder setThreadgroupMemoryLength:32*sizeof(float) atIndex:0];
+
+    //[encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
+
+    ggml_metal_encoder_set_pipeline(enc, pipeline);
+    ggml_metal_encoder_set_bytes   (enc, &args, sizeof(args), 0);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op),         2);
+
+    ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0);
+
+    ggml_metal_encoder_dispatch_threadgroups(enc, ne01, ne02, ne03, nth, 1, 1);
+
+    return 1;
+}
+
+int ggml_metal_op_get_rows(ggml_metal_op_t ctx, int idx) {
+    ggml_cgraph * gf = ctx->gf;
+    ggml_tensor * op = ggml_graph_node(gf, idx);
+
+    ggml_metal_library_t lib = ctx->lib;
+    ggml_metal_encoder_t enc = ctx->enc;
+
+    GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne,  op,         ne);
+    GGML_TENSOR_LOCALS(uint32_t, nb,  op,         nb);
+
+    ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_get_rows(lib, op->src[0]->type);
+
+    ggml_metal_kargs_get_rows args = {
+        /*.ne00 =*/ ne00,
+        /*.nb01 =*/ nb01,
+        /*.nb02 =*/ nb02,
+        /*.ne10 =*/ ne10,
+        /*.nb10 =*/ nb10,
+        /*.nb11 =*/ nb11,
+        /*.nb1  =*/ nb1,
+        /*.nb2  =*/ nb2,
+    };
+
+    ggml_metal_encoder_set_pipeline(enc, pipeline);
+    ggml_metal_encoder_set_bytes   (enc, &args, sizeof(args), 0);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[1]), 2);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op),         3);
+
+    ggml_metal_encoder_dispatch_threadgroups(enc, ne10, ne11, ne12, 32, 1, 1);
+
+    return 1;
+}
+
+int ggml_metal_op_set_rows(ggml_metal_op_t ctx, int idx) {
+    ggml_cgraph * gf = ctx->gf;
+    ggml_tensor * op = ggml_graph_node(gf, idx);
+
+    ggml_metal_library_t lib = ctx->lib;
+    ggml_metal_encoder_t enc = ctx->enc;
+
+    GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne,  op,         ne);
+    GGML_TENSOR_LOCALS(uint32_t, nb,  op,         nb);
+
+    ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_set_rows(lib, op->type);
+
+    const int32_t nk0 = ne0/ggml_blck_size(op->type);
+
+    int nth = 32; // SIMD width
+
+    while (nth < nk0 && nth < ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)) {
+        nth *= 2;
+    }
+
+    int nrptg = 1;
+    if (nth > nk0) {
+        nrptg = (nth + nk0 - 1)/nk0;
+        nth   = nk0;
+
+        if (nrptg*nth > ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)) {
+            nrptg--;
+        }
+    }
+
+    nth = std::min(nth, nk0);
+
+    ggml_metal_kargs_set_rows args = {
+        /*.nk0  =*/ nk0,
+        /*.ne01 =*/ ne01,
+        /*.nb01 =*/ nb01,
+        /*.nb02 =*/ nb02,
+        /*.nb03 =*/ nb03,
+        /*.ne11 =*/ ne11,
+        /*.ne12 =*/ ne12,
+        /*.nb10 =*/ nb10,
+        /*.nb11 =*/ nb11,
+        /*.nb12 =*/ nb12,
+        /*.nb1  =*/ nb1,
+        /*.nb2  =*/ nb2,
+        /*.nb3  =*/ nb3,
+    };
+
+    ggml_metal_encoder_set_pipeline(enc, pipeline);
+    ggml_metal_encoder_set_bytes   (enc, &args, sizeof(args), 0);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[1]), 2);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op),         3);
+
+    ggml_metal_encoder_dispatch_threadgroups(enc, (ne01 + nrptg - 1)/nrptg, ne02, ne03, nth, nrptg, 1);
+
+    return 1;
+}
+
+int ggml_metal_op_soft_max(ggml_metal_op_t ctx, int idx) {
+    ggml_cgraph * gf = ctx->gf;
+    ggml_tensor * op = ggml_graph_node(gf, idx);
+
+    ggml_metal_library_t lib = ctx->lib;
+    ggml_metal_encoder_t enc = ctx->enc;
+
+    GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne2, op->src[2], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb2, op->src[2], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne,  op,         ne);
+    GGML_TENSOR_LOCALS(uint32_t, nb,  op,         nb);
+
+    float scale;
+    float max_bias;
+
+    memcpy(&scale,    ((const int32_t *) op->op_params) + 0, sizeof(scale));
+    memcpy(&max_bias, ((const int32_t *) op->op_params) + 1, sizeof(max_bias));
+
+    const uint32_t n_head      = op->src[0]->ne[2];
+    const  int32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head));
+
+    const float m0 = powf(2.0f, -(max_bias       ) / n_head_log2);
+    const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
+
+    // softmax
+
+    ggml_metal_kargs_soft_max args = {
+        /*.ne00        =*/ ne00,
+        /*.ne01        =*/ ne01,
+        /*.ne02        =*/ ne02,
+        /*.nb01        =*/ nb01,
+        /*.nb02        =*/ nb02,
+        /*.nb03        =*/ nb03,
+        /*.ne11        =*/ ne11,
+        /*.ne12        =*/ ne12,
+        /*.ne13        =*/ ne13,
+        /*.nb11        =*/ nb11,
+        /*.nb12        =*/ nb12,
+        /*.nb13        =*/ nb13,
+        /*.nb1         =*/ nb1,
+        /*.nb2         =*/ nb2,
+        /*.nb3         =*/ nb3,
+        /*.scale       =*/ scale,
+        /*.max_bias    =*/ max_bias,
+        /*.m0          =*/ m0,
+        /*.m1          =*/ m1,
+        /*.n_head_log2 =*/ n_head_log2,
+    };
+
+    ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_soft_max(lib, op);
+
+    int nth = 32; // SIMD width
+
+    if (ne00%4 == 0) {
+        while (nth < ne00/4 && nth*ne01*ne02*ne03 < 256) {
+            nth *= 2;
+        }
+    } else {
+        while (nth < ne00 && nth*ne01*ne02*ne03 < 256) {
+            nth *= 2;
+        }
+    }
+
+    const size_t smem = ggml_metal_pipeline_get_smem(pipeline);
+
+    ggml_metal_encoder_set_pipeline(enc, pipeline);
+    ggml_metal_encoder_set_bytes(enc, &args, sizeof(args), 0);
+    ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+    if (op->src[1]) {
+        ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op->src[1]), 2);
+    } else {
+        ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op->src[0]), 2);
+    }
+    if (op->src[2]) {
+        ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op->src[2]), 3);
+    } else {
+        ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op->src[0]), 3);
+    }
+    ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op), 4);
+
+    ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0);
+
+    ggml_metal_encoder_dispatch_threadgroups(enc, ne01, ne02, ne03, nth, 1, 1);
+
+    return 1;
+}
+
+int ggml_metal_op_ssm_conv(ggml_metal_op_t ctx, int idx) {
+    ggml_cgraph * gf = ctx->gf;
+    ggml_tensor * op = ggml_graph_node(gf, idx);
+
+    ggml_metal_library_t lib = ctx->lib;
+    ggml_metal_encoder_t enc = ctx->enc;
+
+    GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne,  op,         ne);
+    GGML_TENSOR_LOCALS(uint32_t, nb,  op,         nb);
+
+    ggml_metal_kargs_ssm_conv args = {
+        /*.ne00 =*/ ne00,
+        /*.ne01 =*/ ne01,
+        /*.ne02 =*/ ne02,
+        /*.nb00 =*/ nb00,
+        /*.nb01 =*/ nb01,
+        /*.nb02 =*/ nb02,
+        /*.ne10 =*/ ne10,
+        /*.ne11 =*/ ne11,
+        /*.nb10 =*/ nb10,
+        /*.nb11 =*/ nb11,
+        /*.ne0  =*/ ne0,
+        /*.ne1  =*/ ne1,
+        /*.ne2  =*/ ne2,
+        /*.nb0  =*/ nb0,
+        /*.nb1  =*/ nb1,
+        /*.nb2  =*/ nb2,
+    };
+
+    ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_ssm_conv(lib, op);
+
+    ggml_metal_encoder_set_pipeline(enc, pipeline);
+    ggml_metal_encoder_set_bytes(enc, &args, sizeof(args), 0);
+    ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+    ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op->src[1]), 2);
+    ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op), 3);
+
+    ggml_metal_encoder_dispatch_threadgroups(enc, ne01, ne1, ne02, 1, 1, 1);
+
+    return 1;
+}
+
+int ggml_metal_op_ssm_scan(ggml_metal_op_t ctx, int idx) {
+    ggml_cgraph * gf = ctx->gf;
+    ggml_tensor * op = ggml_graph_node(gf, idx);
+
+    ggml_metal_library_t lib = ctx->lib;
+    ggml_metal_encoder_t enc = ctx->enc;
+
+    GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne2, op->src[2], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb2, op->src[2], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne3, op->src[3], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb3, op->src[3], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne4, op->src[4], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb4, op->src[4], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne5, op->src[5], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb5, op->src[5], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne6, op->src[6], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb6, op->src[6], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne,  op,         ne);
+    GGML_TENSOR_LOCALS(uint32_t, nb,  op,         nb);
+
+    const ggml_tensor * src3 = op->src[3];
+    const ggml_tensor * src4 = op->src[4];
+    const ggml_tensor * src5 = op->src[5];
+    const ggml_tensor * src6 = op->src[6];
+
+    GGML_ASSERT(src3);
+    GGML_ASSERT(src4);
+    GGML_ASSERT(src5);
+    GGML_ASSERT(src6);
+
+    const int64_t d_state      = ne00;
+    const int64_t d_inner      = ne01;
+    const int64_t n_head       = ne02;
+    const int64_t n_group      = ne41;
+    const int64_t n_seq_tokens = ne12;
+    const int64_t n_seqs       = ne13;
+
+    ggml_metal_kargs_ssm_scan args = {
+        /*.d_state      =*/ d_state,
+        /*.d_inner      =*/ d_inner,
+        /*.n_head       =*/ n_head,
+        /*.n_group      =*/ n_group,
+        /*.n_seq_tokens =*/ n_seq_tokens,
+        /*.n_seqs       =*/ n_seqs,
+        /*.s_off        =*/ ggml_nelements(op->src[1]) * sizeof(float),
+        /*.nb01         =*/ nb01,
+        /*.nb02         =*/ nb02,
+        /*.nb03         =*/ nb03,
+        /*.nb11         =*/ nb11,
+        /*.nb12         =*/ nb12,
+        /*.nb13         =*/ nb13,
+        /*.nb21         =*/ nb21,
+        /*.nb22         =*/ nb22,
+        /*.nb31         =*/ nb31,
+        /*.nb41         =*/ nb41,
+        /*.nb42         =*/ nb42,
+        /*.nb43         =*/ nb43,
+        /*.nb51         =*/ nb51,
+        /*.nb52         =*/ nb52,
+        /*.nb53         =*/ nb53,
+    };
+
+    ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_ssm_scan(lib, op);
+
+    const size_t sms = ggml_metal_pipeline_get_smem(pipeline);
+
+    ggml_metal_encoder_set_pipeline(enc, pipeline);
+    ggml_metal_encoder_set_bytes   (enc, &args, sizeof(args), 0);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[1]), 2);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[2]), 3);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[3]), 4);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[4]), 5);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[5]), 6);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[6]), 7);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op),         8);
+
+    ggml_metal_encoder_set_threadgroup_memory_size(enc, sms, 0);
+
+    if (ne30 == 1) {
+        // Mamba-2
+        ggml_metal_encoder_dispatch_threadgroups(enc, d_inner, n_head, n_seqs, d_state, 1, 1);
+    } else {
+        GGML_ASSERT(d_inner == 1);
+        ggml_metal_encoder_dispatch_threadgroups(enc, n_head, n_seqs, 1, d_state, 1, 1);
+    }
+
+    return 1;
+}
+
+int ggml_metal_op_rwkv(ggml_metal_op_t ctx, int idx) {
+    ggml_cgraph * gf = ctx->gf;
+    ggml_tensor * op = ggml_graph_node(gf, idx);
+
+    ggml_metal_library_t lib = ctx->lib;
+    ggml_metal_encoder_t enc = ctx->enc;
+
+    GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne,  op,         ne);
+    GGML_TENSOR_LOCALS(uint32_t, nb,  op,         nb);
+
+    const int64_t B = op->op == GGML_OP_RWKV_WKV6 ? op->src[5]->ne[1] : op->src[6]->ne[1];
+    const int64_t T = op->src[0]->ne[2];
+    const int64_t C = op->ne[0];
+    const int64_t H = op->src[0]->ne[1];
+
+    ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_rwkv(lib, op);
+
+    int ida = 0;
+
+    ggml_metal_encoder_set_pipeline(enc, pipeline);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[0]), ida++);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[1]), ida++);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[2]), ida++);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[3]), ida++);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[4]), ida++);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[5]), ida++);
+    if (op->op == GGML_OP_RWKV_WKV7) {
+        ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[6]), ida++);
+    }
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op),         ida++);
+    ggml_metal_encoder_set_bytes   (enc, (void *) &B, sizeof(B), ida++);
+    ggml_metal_encoder_set_bytes   (enc, (void *) &T, sizeof(T), ida++);
+    ggml_metal_encoder_set_bytes   (enc, (void *) &C, sizeof(C), ida++);
+    ggml_metal_encoder_set_bytes   (enc, (void *) &H, sizeof(H), ida++);
+
+    ggml_metal_encoder_dispatch_threadgroups(enc, B * H, 1, 1, C/H, 1, 1);
+
+    return 1;
+}
+
+int ggml_metal_op_cpy(ggml_metal_op_t ctx, int idx) {
+    ggml_cgraph * gf = ctx->gf;
+    ggml_tensor * op = ggml_graph_node(gf, idx);
+
+    ggml_metal_library_t lib = ctx->lib;
+    ggml_metal_encoder_t enc = ctx->enc;
+
+    GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne,  op,         ne);
+    GGML_TENSOR_LOCALS(uint32_t, nb,  op,         nb);
+
+    ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_cpy(lib, op->src[0]->type, op->type);
+
+    GGML_ASSERT(ne00 % ggml_blck_size(op->src[0]->type) == 0);
+
+    // TODO: support
+    //const int32_t nk00 = ne00/ggml_blck_size(op->type);
+    const int32_t nk00 = ne00;
+
+    int nth = 32; // SIMD width
+
+    while (nth < nk00 && nth < ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)) {
+        nth *= 2;
+    }
+
+    nth = std::min(nth, ggml_metal_pipeline_max_theads_per_threadgroup(pipeline));
+
+    // when rows are small, we can batch them together in a single threadgroup
+    int nrptg = 1;
+
+    // TODO: relax this constraint in the future
+    if (ggml_blck_size(op->src[0]->type) == 1 && ggml_blck_size(op->type) == 1) {
+        if (nth > nk00) {
+            nrptg = (nth + nk00 - 1)/nk00;
+            nth   = nk00;
+
+            if (nrptg*nth > ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)) {
+                nrptg--;
+            }
+        }
+    }
+
+    nth = std::min(nth, nk00);
+
+    ggml_metal_kargs_cpy args = {
+        /*.ne00 =*/ nk00,
+        /*.ne01 =*/ ne01,
+        /*.ne02 =*/ ne02,
+        /*.ne03 =*/ ne03,
+        /*.nb00 =*/ nb00,
+        /*.nb01 =*/ nb01,
+        /*.nb02 =*/ nb02,
+        /*.nb03 =*/ nb03,
+        /*.ne0  =*/ ne0,
+        /*.ne1  =*/ ne1,
+        /*.ne2  =*/ ne2,
+        /*.ne3  =*/ ne3,
+        /*.nb0  =*/ nb0,
+        /*.nb1  =*/ nb1,
+        /*.nb2  =*/ nb2,
+        /*.nb3  =*/ nb3,
+    };
+
+    ggml_metal_encoder_set_pipeline(enc, pipeline);
+    ggml_metal_encoder_set_bytes   (enc, &args, sizeof(args), 0);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op),         2);
+
+    ggml_metal_encoder_dispatch_threadgroups(enc, ne01, ne02, ne03, nth, nrptg, 1);
+
+    return 1;
+}
+
+int ggml_metal_op_pool_2d(ggml_metal_op_t ctx, int idx) {
+    ggml_cgraph * gf = ctx->gf;
+    ggml_tensor * op = ggml_graph_node(gf, idx);
+
+    ggml_metal_library_t lib = ctx->lib;
+    ggml_metal_encoder_t enc = ctx->enc;
+
+    GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne,  op,         ne);
+    GGML_TENSOR_LOCALS(uint32_t, nb,  op,         nb);
+
+    const int32_t * opts = op->op_params;
+    ggml_op_pool op_pool = (ggml_op_pool) opts[0];
+
+    const int32_t k0 = opts[1];
+    const int32_t k1 = opts[2];
+    const int32_t s0 = opts[3];
+    const int32_t s1 = opts[4];
+    const int32_t p0 = opts[5];
+    const int32_t p1 = opts[6];
+
+    const int64_t IH = op->src[0]->ne[1];
+    const int64_t IW = op->src[0]->ne[0];
+
+    const int64_t N  = op->ne[3];
+    const int64_t OC = op->ne[2];
+    const int64_t OH = op->ne[1];
+    const int64_t OW = op->ne[0];
+
+    const int64_t np = N * OC * OH * OW;
+
+    ggml_metal_kargs_pool_2d args_pool_2d = {
+        /* .k0 = */ k0,
+        /* .k1 = */ k1,
+        /* .s0 = */ s0,
+        /* .s1 = */ s1,
+        /* .p0 = */ p0,
+        /* .p1 = */ p1,
+        /* .IH = */ IH,
+        /* .IW = */ IW,
+        /* .OH = */ OH,
+        /* .OW = */ OW,
+        /* .np = */ np
+    };
+
+    ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_pool_2d(lib, op, op_pool);
+
+    const int nth = std::min(ggml_metal_pipeline_max_theads_per_threadgroup(pipeline), (int) np);
+    const int ntg = (np + nth - 1) / nth;
+
+    ggml_metal_encoder_set_pipeline(enc, pipeline);
+    ggml_metal_encoder_set_bytes   (enc, &args_pool_2d, sizeof(args_pool_2d), 0);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op),         2);
+
+    ggml_metal_encoder_dispatch_threadgroups(enc, ntg, 1, 1, nth, 1, 1);
+
+    return 1;
+}
+
+int ggml_metal_op_mul_mat(ggml_metal_op_t ctx, int idx) {
+    ggml_cgraph * gf = ctx->gf;
+    ggml_tensor * op = ggml_graph_node(gf, idx);
+
+    ggml_metal_library_t lib = ctx->lib;
+    ggml_metal_encoder_t enc = ctx->enc;
+
+    const ggml_metal_device_props * props_dev = ggml_metal_device_get_props(ctx->dev);
+
+    GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne,  op,         ne);
+    GGML_TENSOR_LOCALS(uint32_t, nb,  op,         nb);
+
+    GGML_ASSERT(ne00 == ne10);
+
+    GGML_ASSERT(ne12 % ne02 == 0);
+    GGML_ASSERT(ne13 % ne03 == 0);
+
+    const int16_t r2 = ne12/ne02;
+    const int16_t r3 = ne13/ne03;
+
+    // find the break-even point where the matrix-matrix kernel becomes more efficient compared
+    // to the matrix-vector kernel
+    const int ne11_mm_min = 8;
+
+    // first try to use small-batch mat-mv kernels
+    // these should be efficient for BS [2, ~8]
+    if (op->src[1]->type == GGML_TYPE_F32 && (ne00%128 == 0) &&
+        (
+         (
+          (
+           op->src[0]->type == GGML_TYPE_F32  || // TODO: helper function
+           op->src[0]->type == GGML_TYPE_F16  ||
+           op->src[0]->type == GGML_TYPE_Q4_0 ||
+           op->src[0]->type == GGML_TYPE_Q4_1 ||
+           op->src[0]->type == GGML_TYPE_Q5_0 ||
+           op->src[0]->type == GGML_TYPE_Q5_1 ||
+           op->src[0]->type == GGML_TYPE_Q8_0 ||
+           op->src[0]->type == GGML_TYPE_MXFP4 ||
+           op->src[0]->type == GGML_TYPE_IQ4_NL ||
+           false) && (ne11 >= 2 && ne11 <= 8)
+         ) ||
+         (
+          (
+           op->src[0]->type == GGML_TYPE_Q4_K ||
+           op->src[0]->type == GGML_TYPE_Q5_K ||
+           op->src[0]->type == GGML_TYPE_Q6_K ||
+           false) && (ne11 >= 4 && ne11 <= 8)
+         )
+        )
+       ) {
+        // TODO: determine the optimal parameters based on grid utilization
+        //       I still don't know why we should not always use the maximum available threads:
+        //
+        //       nsg = pipeline.maxTotalThreadsPerThreadgroup / 32
+        //
+        //       my current hypothesis is that the work grid is not evenly divisible for different nsg
+        //       values and there can be some tail effects when nsg is high. need to confirm this
+        //
+        const int nsg    = 2;                 // num simdgroups per threadgroup
+
+        // num threads along row per simdgroup
+        int16_t nxpsg = 0;
+        if (ne00 % 256 == 0 && ne11 < 3) {
+            nxpsg = 16;
+        } else if (ne00 % 128 == 0) {
+            nxpsg = 8;
+        } else {
+            nxpsg = 4;
+        }
+
+        const int16_t nypsg  = 32/nxpsg;          // num threads along col per simdgroup (i.e. a simdgroup processes that many src0 rows at a time)
+        const int16_t r0ptg  = nypsg*nsg;         // num src0 rows per threadgroup
+              int16_t r1ptg  = 4;                 // num src1 rows per threadgroup
+
+        // note: not sure how optimal are those across all different hardware. there might be someting cleverer
+        switch (ne11) {
+            case 2:
+                r1ptg = 2; break;
+            case 3:
+            case 6:
+                r1ptg = 3; break;
+            case 4:
+            case 7:
+            case 8:
+                r1ptg = 4; break;
+            case 5:
+                r1ptg = 5; break;
+            default:
+                GGML_ABORT("unsupported ne11");
+        };
+
+        ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_mul_mv_ext(lib, op->src[0]->type, op->src[1]->type, r1ptg);
+
+        ggml_metal_kargs_mul_mv_ext args = {
+            /*.ne00  =*/ ne00,
+            /*.ne01  =*/ ne01,
+            /*.ne02  =*/ ne02,
+            /*.nb00  =*/ nb00,
+            /*.nb01  =*/ nb01,
+            /*.nb02  =*/ nb02,
+            /*.nb03  =*/ nb03,
+            /*.ne10  =*/ ne10,
+            /*.ne11  =*/ ne11,
+            /*.ne12  =*/ ne12,
+            /*.nb10  =*/ nb10,
+            /*.nb11  =*/ nb11,
+            /*.nb12  =*/ nb12,
+            /*.nb13  =*/ nb13,
+            /*.ne0   =*/ ne0,
+            /*.ne1   =*/ ne1,
+            /*.r2    =*/ r2,
+            /*.r3    =*/ r3,
+            /*.nsg   =*/ nsg,
+            /*.nxpsg =*/ nxpsg,
+            /*.r1ptg =*/ r1ptg,
+        };
+
+        ggml_metal_encoder_set_pipeline(enc, pipeline);
+        ggml_metal_encoder_set_bytes   (enc, &args, sizeof(args), 0);
+        ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+        ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[1]), 2);
+        ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op),         3);
+
+        ggml_metal_encoder_dispatch_threadgroups(enc, ((ne01 + r0ptg - 1)/r0ptg), ((ne11 + r1ptg - 1)/r1ptg), ne12*ne13, 32, nsg, 1);
+    } else if (
+        !ggml_is_transposed(op->src[0]) &&
+        !ggml_is_transposed(op->src[1]) &&
+        // for now the matrix-matrix multiplication kernel only works on A14+/M1+ SoCs
+        // AMD GPU and older A-chips will reuse matrix-vector multiplication kernel
+        props_dev->has_simdgroup_mm &&
+        op->src[1]->type == GGML_TYPE_F32 &&
+        ne00 % 32 == 0 && ne00 >= 64 &&
+        (ne11 > ne11_mm_min || (ggml_is_quantized(op->src[0]->type) && ne12 > 1))) {
+        //printf("matrix: ne00 = %6d, ne01 = %6d, ne02 = %6d, ne11 = %6d, ne12 = %6d\n", ne00, ne01, ne02, ne11, ne12);
+
+        // some Metal matrix data types require aligned pointers
+        // ref: https://developer.apple.com/metal/Metal-Shading-Language-Specification.pdf (Table 2.5)
+        switch (op->src[0]->type) {
+            case GGML_TYPE_F32:  GGML_ASSERT(nb01 % 16 == 0); break;
+            case GGML_TYPE_F16:  GGML_ASSERT(nb01 % 8  == 0); break;
+            case GGML_TYPE_BF16: GGML_ASSERT(nb01 % 8  == 0); break;
+            default: break;
+        }
+
+        ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_mul_mm(lib, op->src[0]->type, op->src[1]->type);
+
+        ggml_metal_kargs_mul_mm args = {
+            /*.ne00 =*/ ne00,
+            /*.ne02 =*/ ne02,
+            /*.nb01 =*/ nb01,
+            /*.nb02 =*/ nb02,
+            /*.nb03 =*/ nb03,
+            /*.ne12 =*/ ne12,
+            /*.nb10 =*/ nb10,
+            /*.nb11 =*/ nb11,
+            /*.nb12 =*/ nb12,
+            /*.nb13 =*/ nb13,
+            /*.ne0  =*/ ne0,
+            /*.ne1  =*/ ne1,
+            /*.r2   =*/ r2,
+            /*.r3   =*/ r3,
+        };
+
+        ggml_metal_encoder_set_pipeline(enc, pipeline);
+        ggml_metal_encoder_set_bytes   (enc, &args, sizeof(args), 0);
+        ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+        ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[1]), 2);
+        ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op),         3);
+
+        const size_t smem = ggml_metal_pipeline_get_smem(pipeline);
+
+        ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0);
+        ggml_metal_encoder_dispatch_threadgroups(enc, ((ne11 + 31)/32), ((ne01 + 63)/64), ne12*ne13, 128, 1, 1);
+    } else {
+        ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_mul_mv(lib, op);
+
+        ggml_metal_kargs_mul_mv args = {
+            /*.ne00 =*/ ne00,
+            /*.ne01 =*/ ne01,
+            /*.ne02 =*/ ne02,
+            /*.nb00 =*/ nb00,
+            /*.nb01 =*/ nb01,
+            /*.nb02 =*/ nb02,
+            /*.nb03 =*/ nb03,
+            /*.ne10 =*/ ne10,
+            /*.ne11 =*/ ne11,
+            /*.ne12 =*/ ne12,
+            /*.nb10 =*/ nb10,
+            /*.nb11 =*/ nb11,
+            /*.nb12 =*/ nb12,
+            /*.nb13 =*/ nb13,
+            /*.ne0  =*/ ne0,
+            /*.ne1  =*/ ne1,
+            /*.r2   =*/ r2,
+            /*.r3   =*/ r3,
+        };
+
+        const int nr0 = ggml_metal_pipeline_get_nr0(pipeline);
+        const int nr1 = ggml_metal_pipeline_get_nr1(pipeline);
+        const int nsg = ggml_metal_pipeline_get_nsg(pipeline);
+
+        const size_t smem = ggml_metal_pipeline_get_smem(pipeline);
+
+        ggml_metal_encoder_set_pipeline(enc, pipeline);
+        ggml_metal_encoder_set_bytes   (enc, &args, sizeof(args), 0);
+        ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+        ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[1]), 2);
+        ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op),         3);
+
+        ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0);
+
+        if (op->src[0]->type == GGML_TYPE_Q8_0) {
+            ggml_metal_encoder_dispatch_threadgroups(enc, ((ne01 + nr0 - 1)/(nr0)), ((ne11 + nr1 - 1)/nr1), ne12*ne13, 32, nsg, 1);
+        } else {
+            ggml_metal_encoder_dispatch_threadgroups(enc, ((ne01 + nr0*nsg - 1)/(nr0*nsg)), ((ne11 + nr1 - 1)/nr1), ne12*ne13, 32, nsg, 1);
+        }
+    }
+
+    return 1;
+}
+
+size_t ggml_metal_op_mul_mat_id_extra_tpe(const ggml_tensor * op) {
+    assert(op->op == GGML_OP_MUL_MAT_ID);
+
+    const int64_t ne02 = op->src[0]->ne[2]; // n_expert
+
+    return ggml_type_size(GGML_TYPE_I32)*ne02;
+}
+
+size_t ggml_metal_op_mul_mat_id_extra_ids(const ggml_tensor * op) {
+    assert(op->op == GGML_OP_MUL_MAT_ID);
+
+    const int64_t ne02 = op->src[0]->ne[2]; // n_expert
+    const int64_t ne21 = op->src[2]->ne[1]; // n_token
+
+    return ggml_type_size(GGML_TYPE_I32)*ne02*ne21;
+}
+
+int ggml_metal_op_mul_mat_id(ggml_metal_op_t ctx, int idx) {
+    ggml_cgraph * gf = ctx->gf;
+    ggml_tensor * op = ggml_graph_node(gf, idx);
+
+    ggml_metal_library_t lib = ctx->lib;
+    ggml_metal_encoder_t enc = ctx->enc;
+
+    const ggml_metal_device_props * props_dev = ggml_metal_device_get_props(ctx->dev);
+
+    GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne2, op->src[2], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb2, op->src[2], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne,  op,         ne);
+    GGML_TENSOR_LOCALS(uint32_t, nb,  op,         nb);
+
+    // src2 = ids
+    GGML_ASSERT(op->src[2]->type == GGML_TYPE_I32);
+
+    GGML_ASSERT(!ggml_is_transposed(op->src[0]));
+    GGML_ASSERT(!ggml_is_transposed(op->src[1]));
+
+    GGML_ASSERT(op->src[1]->type == GGML_TYPE_F32);
+
+    GGML_ASSERT(ne03 == 1);
+    GGML_ASSERT(ne13 == 1);
+
+    ggml_metal_buffer_id bid_src0 = ggml_metal_get_buffer_id(op->src[0]);
+    ggml_metal_buffer_id bid_src1 = ggml_metal_get_buffer_id(op->src[1]);
+    ggml_metal_buffer_id bid_src2 = ggml_metal_get_buffer_id(op->src[2]);
+    ggml_metal_buffer_id bid_dst  = ggml_metal_get_buffer_id(op);
+
+    const uint32_t r2 = 1;
+    const uint32_t r3 = 1;
+
+    // find the break-even point where the matrix-matrix kernel becomes more efficient compared
+    // to the matrix-vector kernel
+    // ne20 = n_used_experts
+    // ne21 = n_rows (batch size)
+    const int ne21_mm_id_min = 32;
+
+    if (props_dev->has_simdgroup_mm &&
+        ne00 % 32 == 0 && ne00 >= 64 &&
+        (ne21 >= ne21_mm_id_min)) {
+        GGML_ASSERT(ne00 % 4 == 0);
+
+        // some Metal matrix data types require aligned pointers
+        // ref: https://developer.apple.com/metal/Metal-Shading-Language-Specification.pdf (Table 2.5)
+        switch (op->src[0]->type) {
+            case GGML_TYPE_F32:  GGML_ASSERT(nb01 % 16 == 0); break;
+            case GGML_TYPE_F16:  GGML_ASSERT(nb01 % 8  == 0); break;
+            case GGML_TYPE_BF16: GGML_ASSERT(nb01 % 8  == 0); break;
+            default: break;
+        }
+
+        // extra buffers for intermediate id mapping
+        ggml_metal_buffer_id bid_tpe = bid_dst;
+        bid_tpe.offs += ggml_nbytes(op);
+
+        ggml_metal_buffer_id bid_ids = bid_tpe;
+        bid_ids.offs += ggml_metal_op_mul_mat_id_extra_tpe(op);
+
+        {
+            ggml_metal_kargs_mul_mm_id_map0 args = {
+                ne02,
+                ne10,
+                ne11, // n_expert_used (bcast)
+                nb11,
+                nb12,
+                ne21, // n_tokens
+                ne20, // n_expert_used
+                nb21,
+            };
+
+            ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_mul_mm_id_map0(lib, ne02, ne20);
+
+            const size_t smem = ggml_metal_pipeline_get_smem(pipeline);
+
+            GGML_ASSERT(ne02 <= ggml_metal_pipeline_max_theads_per_threadgroup(pipeline));
+
+            GGML_ASSERT(smem <= props_dev->max_theadgroup_memory_size);
+
+            ggml_metal_encoder_set_pipeline(enc, pipeline);
+            ggml_metal_encoder_set_bytes   (enc, &args, sizeof(args), 0);
+            ggml_metal_encoder_set_buffer  (enc, bid_src2, 1);
+            ggml_metal_encoder_set_buffer  (enc, bid_tpe,  2);
+            ggml_metal_encoder_set_buffer  (enc, bid_ids,  3);
+
+            ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0);
+
+            ggml_metal_encoder_dispatch_threadgroups(enc, 1, 1, 1, ne02, 1, 1);
+        }
+
+        // this barrier is always needed because the next kernel has to wait for the id maps to be computed
+        ggml_metal_op_concurrency_reset(ctx);
+
+        {
+            ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_mul_mm_id(lib, op->src[0]->type, GGML_TYPE_F16);
+
+            ggml_metal_kargs_mul_mm_id args = {
+                /*.ne00  =*/ ne00,
+                /*.ne02  =*/ ne02,
+                /*.nb01  =*/ nb01,
+                /*.nb02  =*/ nb02,
+                /*.nb03  =*/ nb03,
+                /*.ne11  =*/ ne11, // n_expert_used (bcast)
+                /*.nb10  =*/ nb10,
+                /*.nb11  =*/ nb11,
+                /*.nb12  =*/ nb12,
+                /*.nb13  =*/ nb13,
+                /*.ne20  =*/ ne20, // n_expert_used
+                /*.ne21  =*/ ne21, // n_tokens
+                /*.ne0   =*/ ne0,
+                /*.ne1   =*/ ne1,
+                /*.r2    =*/ r2,
+                /*.r3    =*/ r3,
+            };
+
+            ggml_metal_encoder_set_pipeline(enc, pipeline);
+            ggml_metal_encoder_set_bytes   (enc, &args, sizeof(args), 0);
+            ggml_metal_encoder_set_buffer  (enc, bid_src0, 1);
+            ggml_metal_encoder_set_buffer  (enc, bid_src1, 2);
+            ggml_metal_encoder_set_buffer  (enc, bid_tpe,  3);
+            ggml_metal_encoder_set_buffer  (enc, bid_ids,  4);
+            ggml_metal_encoder_set_buffer  (enc, bid_dst,  5);
+
+            const size_t smem = ggml_metal_pipeline_get_smem(pipeline);
+
+            ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0);
+
+            ggml_metal_encoder_dispatch_threadgroups(enc, (ne21 + 31)/32, (ne01 + 63)/64, ne02, 128, 1, 1);
+        }
+    } else {
+        ggml_metal_kargs_mul_mv_id args = {
+            /*.nei0 =*/ ne20,
+            /*.nei1 =*/ ne21,
+            /*.nbi1 =*/ nb21,
+            /*.ne00 =*/ ne00,
+            /*.ne01 =*/ ne01,
+            /*.ne02 =*/ ne02,
+            /*.nb00 =*/ nb00,
+            /*.nb01 =*/ nb01,
+            /*.nb02 =*/ nb02,
+            /*.ne10 =*/ ne10,
+            /*.ne11 =*/ ne11,
+            /*.ne12 =*/ ne12,
+            /*.ne13 =*/ ne13,
+            /*.nb10 =*/ nb10,
+            /*.nb11 =*/ nb11,
+            /*.nb12 =*/ nb12,
+            /*.ne0  =*/ ne0,
+            /*.ne1  =*/ ne1,
+            /*.nb1  =*/ nb1,
+        };
+
+        ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_mul_mv_id(lib, op);
+
+        const int nr0 = ggml_metal_pipeline_get_nr0(pipeline);
+        const int nr1 = ggml_metal_pipeline_get_nr1(pipeline);
+        const int nsg = ggml_metal_pipeline_get_nsg(pipeline);
+
+        const size_t smem = ggml_metal_pipeline_get_smem(pipeline);
+
+        if (ggml_is_quantized(op->src[0]->type)) {
+            GGML_ASSERT(ne00 >= nsg*nr0);
+        }
+
+        ggml_metal_encoder_set_pipeline(enc, pipeline);
+        ggml_metal_encoder_set_bytes(enc, &args, sizeof(args), 0);
+        ggml_metal_encoder_set_buffer(enc, bid_src0, 1);
+        ggml_metal_encoder_set_buffer(enc, bid_src1, 2);
+        ggml_metal_encoder_set_buffer(enc, bid_dst,  3);
+        ggml_metal_encoder_set_buffer(enc, bid_src2, 4);
+
+        const int64_t _ne1 = 1;
+        const int64_t ne123 = ne20*ne21;
+
+        ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0);
+
+        if (op->src[0]->type == GGML_TYPE_Q8_0) {
+            ggml_metal_encoder_dispatch_threadgroups(enc, (ne01 + nr0 - 1)/(nr0), (_ne1 + nr1 - 1)/nr1, ne123, 32, nsg, 1);
+        } else {
+            ggml_metal_encoder_dispatch_threadgroups(enc, (ne01 + nr0*nsg - 1)/(nr0*nsg), (_ne1 + nr1 - 1)/nr1, ne123, 32, nsg, 1);
+        }
+    }
+
+    return 1;
+}
+
+int ggml_metal_op_add_id(ggml_metal_op_t ctx, int idx) {
+    ggml_cgraph * gf = ctx->gf;
+    ggml_tensor * op = ggml_graph_node(gf, idx);
+
+    ggml_metal_library_t lib = ctx->lib;
+    ggml_metal_encoder_t enc = ctx->enc;
+
+    GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne2, op->src[2], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb2, op->src[2], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne,  op,         ne);
+
+    GGML_ASSERT(op->src[0]->type == GGML_TYPE_F32);
+    GGML_ASSERT(op->src[1]->type == GGML_TYPE_F32);
+    GGML_ASSERT(op->src[2]->type == GGML_TYPE_I32);
+    GGML_ASSERT(op->type         == GGML_TYPE_F32);
+
+    GGML_ASSERT(ggml_is_contiguous_rows(op->src[0]));
+
+    ggml_metal_kargs_add_id args = {
+        /*.ne0  =*/ ne0,
+        /*.ne1  =*/ ne1,
+        /*.nb01 =*/ nb01,
+        /*.nb02 =*/ nb02,
+        /*.nb11 =*/ nb11,
+        /*.nb21 =*/ nb21,
+    };
+
+    ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_base(lib, GGML_OP_ADD_ID);
+
+    ggml_metal_encoder_set_pipeline(enc, pipeline);
+    ggml_metal_encoder_set_bytes   (enc, &args, sizeof(args), 0);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[1]), 2);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[2]), 3);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op),         4);
+
+    const int nth = std::min(ggml_metal_pipeline_max_theads_per_threadgroup(pipeline), ne00);
+
+    ggml_metal_encoder_dispatch_threadgroups(enc, ne01, ne02, 1, nth, 1, 1);
+
+    return 1;
+}
+
+bool ggml_metal_op_flash_attn_ext_use_vec(const ggml_tensor * op) {
+    assert(op->op == GGML_OP_FLASH_ATTN_EXT);
+
+    const int64_t ne00 = op->src[0]->ne[0]; // head size
+    const int64_t ne01 = op->src[0]->ne[1]; // batch size
+
+    // use vec kernel if the batch size is small and if the head size is supported
+    return (ne01 < 20) && (ne00 % 32 == 0);
+}
+
+size_t ggml_metal_op_flash_attn_ext_extra_tmp(const ggml_tensor * op) {
+    assert(op->op == GGML_OP_FLASH_ATTN_EXT);
+
+    const int64_t nwg = 32;
+
+    const int64_t ne01 = op->src[0]->ne[1];
+    const int64_t ne02 = op->src[0]->ne[2];
+    const int64_t ne03 = op->src[0]->ne[3];
+    const int64_t ne20 = op->src[2]->ne[0];
+
+    // temp buffer for writing the results from each workgroup
+    // - ne20: the size of the Value head
+    // -  + 2: the S and M values for each intermediate result
+    return ggml_type_size(GGML_TYPE_F32)*(ne01*ne02*ne03*nwg*(ne20 + 2));
+}
+
+int ggml_metal_op_flash_attn_ext(ggml_metal_op_t ctx, int idx) {
+    ggml_cgraph * gf = ctx->gf;
+    ggml_tensor * op = ggml_graph_node(gf, idx);
+
+    ggml_metal_library_t lib = ctx->lib;
+    ggml_metal_encoder_t enc = ctx->enc;
+
+    const ggml_metal_device_props * props_dev = ggml_metal_device_get_props(ctx->dev);
+
+    GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne2, op->src[2], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb2, op->src[2], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne3, op->src[3], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb3, op->src[3], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne,  op,         ne);
+    GGML_TENSOR_LOCALS( int32_t, nb,  op,         nb);
+
+    GGML_ASSERT(ne00 % 4  == 0);
+    GGML_ASSERT(ne11 % 32 == 0);
+
+    GGML_ASSERT(op->src[0]->type == GGML_TYPE_F32);
+    GGML_ASSERT(op->src[1]->type == op->src[2]->type);
+
+    //GGML_ASSERT(ggml_are_same_shape (src1, src2));
+    GGML_ASSERT(ne11 == ne21);
+    GGML_ASSERT(ne12 == ne22);
+
+    GGML_ASSERT(!op->src[3] || op->src[3]->type == GGML_TYPE_F16);
+    GGML_ASSERT(!op->src[3] || op->src[3]->ne[1] >= GGML_PAD(op->src[0]->ne[1], 8) &&
+            "the Flash-Attention Metal kernel requires the mask to be padded to 8 and at least n_queries big");
+
+    float scale;
+    float max_bias;
+    float logit_softcap;
+
+    memcpy(&scale,         ((const int32_t *) op->op_params) + 0, sizeof(scale));
+    memcpy(&max_bias,      ((const int32_t *) op->op_params) + 1, sizeof(max_bias));
+    memcpy(&logit_softcap, ((const int32_t *) op->op_params) + 2, sizeof(logit_softcap));
+
+    if (logit_softcap != 0.0f) {
+        scale /= logit_softcap;
+    }
+
+    const bool has_mask  = op->src[3] != NULL;
+    const bool has_sinks = op->src[4] != NULL;
+    const bool has_bias  = max_bias != 0.0f;
+    const bool has_scap  = logit_softcap != 0.0f;
+
+    const uint32_t n_head      = op->src[0]->ne[2];
+    const  int32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head));
+
+    const float m0 = powf(2.0f, -(max_bias       ) / n_head_log2);
+    const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
+
+    GGML_ASSERT(ne01 < 65536);
+
+    if (!ggml_metal_op_flash_attn_ext_use_vec(op)) {
+        // half8x8 kernel
+        const int64_t nqptg = 8;  // queries per threadgroup    !! sync with kernel template arguments !!
+        const int64_t ncpsg = 64; // cache values per simdgroup !! sync with kernel template arguments !!
+
+        GGML_ASSERT(nqptg <= 32);
+        GGML_ASSERT(nqptg  % 8  == 0);
+        GGML_ASSERT(ncpsg  % 32 == 0);
+
+        const int is_q = ggml_is_quantized(op->src[1]->type) ? 1 : 0;
+
+        // 2*(2*ncpsg)
+        // ncpsg soft_max values + ncpsg mask values
+        //
+        // 16*32*(nsg)
+        // the shared memory needed for the simdgroups to load the KV cache
+        // each thread loads (dequantizes) 16 head elements, there are 32 threads in th SG
+        //
+#define FATTN_SMEM(nsg) (GGML_PAD((nqptg*(ne00 + 2*GGML_PAD(ne20, 64) + 2*(2*ncpsg)) + is_q*(16*32*(nsg)))*(sizeof(float)/2), 16))
+
+        //int64_t nsgmax = 4;
+        //
+        //if (is_q) {
+        //    nsgmax = 2;
+        //    while (true) {
+        //        const size_t smem = FATTN_SMEM(nsgmax);
+        //        if (smem > props_dev->max_theadgroup_memory_size) {
+        //            break;
+        //        }
+        //        nsgmax *= 2;
+        //    }
+        //    nsgmax /= 2;
+        //}
+
+        // simdgroups per threadgroup (a.k.a. warps)
+        //nsg = ne01 <= nqptg ? MAX(4, MIN(nsgmax, MIN(ne11/ncpsg, (int64_t) pipeline.maxTotalThreadsPerThreadgroup/32))) : 4;
+        int32_t nsg = 4;
+
+        const size_t smem = FATTN_SMEM(nsg);
+
+        ggml_metal_kargs_flash_attn_ext args = {
+            /*.ne01          =*/ ne01,
+            /*.ne02          =*/ ne02,
+            /*.ne03          =*/ ne03,
+            /*.nb01          =*/ nb01,
+            /*.nb02          =*/ nb02,
+            /*.nb03          =*/ nb03,
+            /*.ne11          =*/ ne11,
+            /*.ne_12_2       =*/ ne12,
+            /*.ne_12_3       =*/ ne13,
+            /*.ns10          =*/ int32_t(nb11/nb10),
+            /*.nb11          =*/ nb11,
+            /*.nb12          =*/ nb12,
+            /*.nb13          =*/ nb13,
+            /*.ns20          =*/ int32_t(nb21/nb20),
+            /*.nb21          =*/ nb21,
+            /*.nb22          =*/ nb22,
+            /*.nb23          =*/ nb23,
+            /*.ne32          =*/ ne32,
+            /*.ne33          =*/ ne33,
+            /*.nb31          =*/ nb31,
+            /*.nb32          =*/ nb32,
+            /*.nb33          =*/ nb33,
+            /*.ne1           =*/ ne1,
+            /*.ne2           =*/ ne2,
+            /*.ne3           =*/ ne3,
+            /*.scale         =*/ scale,
+            /*.max_bias      =*/ max_bias,
+            /*.m0            =*/ m0,
+            /*.m1            =*/ m1,
+            /*.n_head_log2   =*/ n_head_log2,
+            /*.logit_softcap =*/ logit_softcap,
+        };
+
+        ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_flash_attn_ext(lib, op, has_mask, has_sinks, has_bias, has_scap, nsg);
+
+        ggml_metal_encoder_set_pipeline(enc, pipeline);
+        ggml_metal_encoder_set_bytes   (enc, &args, sizeof(args), 0);
+        ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+        ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[1]), 2);
+        ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[2]), 3);
+        if (op->src[3]) {
+            ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op->src[3]), 4);
+        } else {
+            ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op->src[0]), 4);
+        }
+        if (op->src[4]) {
+            ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op->src[4]), 5);
+        } else {
+            ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op->src[0]), 5);
+        }
+        ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op),         6);
+
+        ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0);
+
+        ggml_metal_encoder_dispatch_threadgroups(enc, (ne01 + nqptg - 1)/nqptg, ne02, ne03, 32, nsg, 1);
+#undef FATTN_SMEM
+    } else {
+        // half4x4 kernel
+        const int64_t nqptg = 1;  // queries per threadgroup    !! sync with kernel template arguments !!
+        const int64_t ncpsg = 32; // cache values per simdgroup !! sync with kernel template arguments !!
+        const int64_t nkpsg = 1*ncpsg;
+
+        GGML_ASSERT(nqptg <= 32);
+        GGML_ASSERT(nqptg  % 1  == 0);
+        GGML_ASSERT(ncpsg  % 32 == 0);
+
+        // ne00 + 2*ncpsg*(nsg)
+        // for each query, we load it as f16 in shared memory (ne00)
+        // and store the soft_max values and the mask
+        //
+        // ne20*(nsg)
+        // each simdgroup has a full f32 head vector in shared mem to accumulate results
+        //
+#define FATTN_SMEM(nsg) (GGML_PAD((nqptg*(GGML_PAD(ne00, 128) + 4*ncpsg*(nsg)) + 2*GGML_PAD(ne20, 128)*(nsg))*(sizeof(float)/2), 16))
+
+        int64_t nsgmax = 2;
+        while (true) {
+            const size_t smem = FATTN_SMEM(nsgmax);
+            // avoid using more than half of the threadgroup memory - can cause slow downs especially for large head sizes
+            if (smem > props_dev->max_theadgroup_memory_size/2) {
+                break;
+            }
+            nsgmax *= 2;
+        }
+        nsgmax /= 2;
+
+        // simdgroups per threadgroup (a.k.a. warps)
+        //const int64_t nsgt = MAX(2, MIN(nsgmax, MIN((ne11 + nkpsg - 1)/(nkpsg), (int64_t) pipeline.maxTotalThreadsPerThreadgroup/32)));
+        const int64_t nsgt = MAX(2, MIN(nsgmax, MIN((ne11 + nkpsg - 1)/(nkpsg), (int64_t) 1024/32)));
+
+        int64_t nsg = 1;
+        while (nsg <= nsgt) {
+            nsg *= 2;
+        }
+        nsg /= 2;
+
+        // workgroups
+        // each workgroup handles nsg*nkpsg cache values
+        int32_t nwg = 1;
+        if (false) {
+            // for small KV caches, we could launch a single workgroup and write the results directly to dst/
+            // however, this does not lead to significant improvement, so disabled
+            nwg = 1;
+            nsg = 4;
+        } else {
+            nwg = 32;
+            nsg = 1;
+            while (2*nwg*nsg*nkpsg < ne11 && nsg < 4) {
+                nsg *= 2;
+            }
+        }
+
+        ggml_metal_kargs_flash_attn_ext_vec args = {
+            /*.ne01          =*/ ne01,
+            /*.ne02          =*/ ne02,
+            /*.ne03          =*/ ne03,
+            /*.nb01          =*/ nb01,
+            /*.nb02          =*/ nb02,
+            /*.nb03          =*/ nb03,
+            /*.ne11          =*/ ne11,
+            /*.ne_12_2       =*/ ne12,
+            /*.ne_12_3       =*/ ne13,
+            /*.ns10          =*/ int32_t(nb11/nb10),
+            /*.nb11          =*/ nb11,
+            /*.nb12          =*/ nb12,
+            /*.nb13          =*/ nb13,
+            /*.ns20          =*/ int32_t(nb21/nb20),
+            /*.nb21          =*/ nb21,
+            /*.nb22          =*/ nb22,
+            /*.nb23          =*/ nb23,
+            /*.ne32          =*/ ne32,
+            /*.ne33          =*/ ne33,
+            /*.nb31          =*/ nb31,
+            /*.nb32          =*/ nb32,
+            /*.nb33          =*/ nb33,
+            /*.ne1           =*/ ne1,
+            /*.ne2           =*/ ne2,
+            /*.ne3           =*/ ne3,
+            /*.scale         =*/ scale,
+            /*.max_bias      =*/ max_bias,
+            /*.m0            =*/ m0,
+            /*.m1            =*/ m1,
+            /*.n_head_log2   =*/ n_head_log2,
+            /*.logit_softcap =*/ logit_softcap,
+        };
+
+        ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_flash_attn_ext_vec(lib, op, has_mask, has_sinks, has_bias, has_scap, nsg, nwg);
+
+        GGML_ASSERT(nsg*32 <= ggml_metal_pipeline_max_theads_per_threadgroup(pipeline));
+
+        ggml_metal_encoder_set_pipeline(enc, pipeline);
+        ggml_metal_encoder_set_bytes   (enc, &args, sizeof(args), 0);
+        ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+        ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[1]), 2);
+        ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[2]), 3);
+        if (op->src[3]) {
+            ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op->src[3]), 4);
+        } else {
+            ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op->src[0]), 4);
+        }
+        if (op->src[4]) {
+            ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op->src[4]), 5);
+        } else {
+            ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op->src[0]), 5);
+        }
+
+        const size_t smem = FATTN_SMEM(nsg);
+
+        //printf("smem: %zu, max: %zu, nsg = %d, nsgmax = %d\n", smem, props_dev->max_theadgroup_memory_size, (int) nsg, (int) nsgmax);
+        GGML_ASSERT(smem <= props_dev->max_theadgroup_memory_size);
+
+        if (nwg == 1) {
+            // using 1 workgroup -> write the result directly into dst
+            ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op), 6);
+
+            ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0);
+
+            ggml_metal_encoder_dispatch_threadgroups(enc, (ne01 + nqptg - 1)/nqptg, ne02, ne03*nwg, 32, nsg, 1);
+        } else {
+            // sanity checks
+            GGML_ASSERT(ne01*ne02*ne03 == ne1*ne2*ne3);
+            GGML_ASSERT((uint64_t)ne1*ne2*ne3 <= (1u << 31));
+
+            ggml_metal_buffer_id bid_dst = ggml_metal_get_buffer_id(op);
+
+            // write the results from each workgroup into a temp buffer
+            ggml_metal_buffer_id bid_tmp = bid_dst;
+            bid_tmp.offs += ggml_nbytes(op);
+            ggml_metal_encoder_set_buffer(enc, bid_tmp, 6);
+
+            ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0);
+            ggml_metal_encoder_dispatch_threadgroups(enc, (ne01 + nqptg - 1)/nqptg, ne02, ne03*nwg, 32, nsg, 1);
+
+            // sync the 2 kernels
+            ggml_metal_op_concurrency_reset(ctx);
+
+            // reduce the results from the workgroups
+            {
+                const int32_t nrows = ne1*ne2*ne3;
+
+                ggml_metal_kargs_flash_attn_ext_vec_reduce args0 = {
+                    nrows,
+                };
+
+                ggml_metal_pipeline_t pipeline0 = ggml_metal_library_get_pipeline_flash_attn_ext_vec_reduce(lib, op, ne20, nwg);
+
+                ggml_metal_encoder_set_pipeline(enc, pipeline0);
+                ggml_metal_encoder_set_bytes   (enc, &args0, sizeof(args0), 0);
+                ggml_metal_encoder_set_buffer  (enc, bid_tmp, 1);
+                ggml_metal_encoder_set_buffer  (enc, bid_dst, 2);
+
+                ggml_metal_encoder_dispatch_threadgroups(enc, nrows, 1, 1, 32*nwg, 1, 1);
+            }
+        }
+#undef FATTN_SMEM
+    }
+
+    return 1;
+}
+
+int ggml_metal_op_bin(ggml_metal_op_t ctx, int idx) {
+    ggml_cgraph * gf = ctx->gf;
+    ggml_tensor * op = ggml_graph_node(gf, idx);
+
+    ggml_tensor ** ops = ggml_graph_nodes(gf) + idx;
+
+    ggml_metal_library_t lib = ctx->lib;
+    ggml_metal_encoder_t enc = ctx->enc;
+
+    const int idx_end = ctx->idx_end;
+
+    const bool use_fusion = ctx->use_fusion;
+
+    const int debug_fusion = ctx->debug_fusion;
+
+    GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne,  op,         ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb,  op,         nb);
+
+    GGML_ASSERT(op->src[0]->type == GGML_TYPE_F32);
+    GGML_ASSERT(op->src[1]->type == GGML_TYPE_F32);
+
+    GGML_ASSERT(ggml_is_contiguous_rows(op->src[0]));
+    GGML_ASSERT(ggml_is_contiguous_rows(op->src[1]));
+
+    bool bcast_row = false;
+
+    ggml_metal_buffer_id bid_src0 = ggml_metal_get_buffer_id(op->src[0]);
+    ggml_metal_buffer_id bid_src1 = ggml_metal_get_buffer_id(op->src[1]);
+    ggml_metal_buffer_id bid_dst  = ggml_metal_get_buffer_id(op);
+
+    ggml_metal_kargs_bin args = {
+        /*.ne00 =*/ ne00,
+        /*.ne01 =*/ ne01,
+        /*.ne02 =*/ ne02,
+        /*.ne03 =*/ ne03,
+        /*.nb00 =*/ nb00,
+        /*.nb01 =*/ nb01,
+        /*.nb02 =*/ nb02,
+        /*.nb03 =*/ nb03,
+        /*.ne10 =*/ ne10,
+        /*.ne11 =*/ ne11,
+        /*.ne12 =*/ ne12,
+        /*.ne13 =*/ ne13,
+        /*.nb10 =*/ nb10,
+        /*.nb11 =*/ nb11,
+        /*.nb12 =*/ nb12,
+        /*.nb13 =*/ nb13,
+        /*.ne0  =*/ ne0,
+        /*.ne1  =*/ ne1,
+        /*.ne2  =*/ ne2,
+        /*.ne3  =*/ ne3,
+        /*.nb0  =*/ nb0,
+        /*.nb1  =*/ nb1,
+        /*.nb2  =*/ nb2,
+        /*.nb3  =*/ nb3,
+        /*.offs =*/ 0,
+        /*.o1   =*/ { bid_src1.offs },
+    };
+
+    ggml_op fops[8];
+
+    int n_fuse = 1;
+
+    // c[0] = add(a,    b[0])
+    // c[1] = add(c[0], b[1])
+    // c[2] = add(c[1], b[2])
+    // ...
+    if (use_fusion) {
+        fops[0] = GGML_OP_ADD;
+        fops[1] = GGML_OP_ADD;
+        fops[2] = GGML_OP_ADD;
+        fops[3] = GGML_OP_ADD;
+        fops[4] = GGML_OP_ADD;
+        fops[5] = GGML_OP_ADD;
+        fops[6] = GGML_OP_ADD;
+        fops[7] = GGML_OP_ADD;
+
+        // note: in metal, we sometimes encode the graph in parallel so we have to avoid fusing ops
+        //       across splits. idx_end indicates the last node in the current split
+        for (n_fuse = 0; n_fuse <= 6 && idx + n_fuse + 1 < idx_end; ++n_fuse) {
+            if (!ggml_can_fuse(gf, idx + n_fuse, fops + n_fuse, 2)) {
+                break;
+            }
+
+            if (ops[n_fuse] != ops[n_fuse + 1]->src[0]) {
+                break;
+            }
+
+            // b[0] === b[1] === ...
+            if (!ggml_are_same_layout(ops[n_fuse]->src[1], ops[n_fuse + 1]->src[1])) {
+                break;
+            }
+
+            // only fuse ops if src1 is in the same Metal buffer
+            ggml_metal_buffer_id bid_fuse = ggml_metal_get_buffer_id(ops[n_fuse + 1]->src[1]);
+            if (bid_fuse.metal != bid_src1.metal) {
+                break;
+            }
+
+            //ctx->fuse_cnt[ops[n_fuse + 1]->op]++;
+
+            args.o1[n_fuse + 1] = bid_fuse.offs;
+        }
+
+        ++n_fuse;
+
+        if (debug_fusion > 1 && n_fuse > 1) {
+            GGML_LOG_DEBUG("%s: fuse: ADD x %d\n", __func__, n_fuse);
+        }
+    }
+
+    // the offsets of src1 and all fused buffers are relative to the start of the src1 buffer
+    bid_src1.offs = 0;
+
+    ggml_metal_pipeline_t pipeline = nullptr;
+
+    if (ggml_nelements(op->src[1]) == ne10 && ggml_is_contiguous(op->src[1]) && ne00 % 4 == 0 && ne10 % 4 == 0) {
+        GGML_ASSERT(ggml_is_contiguous(op->src[0]));
+
+        // src1 is a row
+        GGML_ASSERT(ne11 == 1);
+
+        pipeline = ggml_metal_library_get_pipeline_bin(lib, op->op, n_fuse, true);
+
+        bcast_row = true;
+    } else {
+        pipeline = ggml_metal_library_get_pipeline_bin(lib, op->op, n_fuse, false);
+    }
+
+    if (n_fuse > 1) {
+        bid_dst = ggml_metal_get_buffer_id(ops[n_fuse - 1]);
+
+        for (int i = 1; i < n_fuse; ++i) {
+            if (!ggml_metal_op_concurrency_check(ctx, ops[i])) {
+                ggml_metal_op_concurrency_reset(ctx);
+
+                break;
+            }
+        }
+    }
+
+    ggml_metal_encoder_set_pipeline(enc, pipeline);
+    ggml_metal_encoder_set_bytes   (enc, &args, sizeof(args), 0);
+    ggml_metal_encoder_set_buffer  (enc, bid_src0, 1);
+    ggml_metal_encoder_set_buffer  (enc, bid_src1, 2);
+    ggml_metal_encoder_set_buffer  (enc, bid_dst,  3);
+
+    if (bcast_row) {
+        const int64_t n = ggml_nelements(op)/4;
+
+        ggml_metal_encoder_dispatch_threadgroups(enc, n, 1, 1, 1, 1, 1);
+    } else {
+        int nth = 32;
+
+        while (16*nth < ne0 && nth < ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)) {
+            nth *= 2;
+        }
+
+        ggml_metal_encoder_dispatch_threadgroups(enc, ne01, ne02, ne03, nth, 1, 1);
+    }
+
+    return n_fuse;
+}
+
+int ggml_metal_op_rms_norm(ggml_metal_op_t ctx, int idx) {
+    ggml_cgraph * gf = ctx->gf;
+    ggml_tensor * op = ggml_graph_node(gf, idx);
+
+    ggml_metal_library_t lib = ctx->lib;
+    ggml_metal_encoder_t enc = ctx->enc;
+
+    const int idx_end = ctx->idx_end;
+
+    const bool use_fusion = ctx->use_fusion;
+
+    const int debug_fusion = ctx->debug_fusion;
+
+    ggml_tensor ** ops = ggml_graph_nodes(gf) + idx;
+
+    GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne,  op,         ne);
+    GGML_TENSOR_LOCALS(uint32_t, nb,  op,         nb);
+
+    float eps;
+    memcpy(&eps, op->op_params, sizeof(float));
+
+    ggml_metal_buffer_id bid_src0 = ggml_metal_get_buffer_id(op->src[0]);
+    ggml_metal_buffer_id bid_dst  = ggml_metal_get_buffer_id(op);
+
+    ggml_metal_kargs_rms_norm args = {
+        /*.ne00   =*/ ne00,
+        /*.ne00_4 =*/ ne00/4,
+        /*.nb1    =*/ nb1,
+        /*.nb2    =*/ nb2,
+        /*.nb3    =*/ nb3,
+        /*.eps    =*/ eps,
+        /*.nef1   =*/ { ne01 },
+        /*.nef2   =*/ { ne02 },
+        /*.nef3   =*/ { ne03 },
+        /*.nbf1   =*/ { nb01 },
+        /*.nbf2   =*/ { nb02 },
+        /*.nbf3   =*/ { nb03 },
+    };
+
+    ggml_op fops[8];
+
+    int n_fuse = 1;
+
+    ggml_metal_buffer_id bid_fuse[2] = { bid_src0, bid_src0 };
+
+    // d[0] = rms_norm(a)
+    // d[1] = mul(d[0], b)
+    // d[2] = add(d[1], c)
+    if (use_fusion) {
+        fops[0] = GGML_OP_RMS_NORM;
+        fops[1] = GGML_OP_MUL;
+        fops[2] = GGML_OP_ADD;
+
+        for (n_fuse = 0; n_fuse <= 1 && idx + n_fuse + 1 < idx_end; ++n_fuse) {
+            if (!ggml_can_fuse(gf, idx + n_fuse, fops + n_fuse, 2)) {
+                break;
+            }
+
+            if (ops[n_fuse] != ops[n_fuse + 1]->src[0]) {
+                break;
+            }
+
+            if (ops[n_fuse + 1]->src[1]->ne[0] != op->ne[0]) {
+                break;
+            }
+
+            if (!ggml_is_contiguous_rows(ops[n_fuse + 1]->src[1])) {
+                break;
+            }
+
+            if (ops[n_fuse + 1]->type != GGML_TYPE_F32) {
+                break;
+            }
+
+            //ctx->fuse_cnt[ops[n_fuse + 1]->op]++;
+
+            bid_fuse[n_fuse] = ggml_metal_get_buffer_id(ops[n_fuse + 1]->src[1]);
+
+            args.nef1[n_fuse + 1] = ops[n_fuse + 1]->src[1]->ne[1];
+            args.nef2[n_fuse + 1] = ops[n_fuse + 1]->src[1]->ne[2];
+            args.nef3[n_fuse + 1] = ops[n_fuse + 1]->src[1]->ne[3];
+
+            args.nbf1[n_fuse + 1] = ops[n_fuse + 1]->src[1]->nb[1];
+            args.nbf2[n_fuse + 1] = ops[n_fuse + 1]->src[1]->nb[2];
+            args.nbf3[n_fuse + 1] = ops[n_fuse + 1]->src[1]->nb[3];
+        }
+
+        ++n_fuse;
+
+        if (debug_fusion > 1 && n_fuse > 1) {
+            if (n_fuse == 2) {
+                GGML_LOG_DEBUG("%s: fuse: RMS_NORM + MUL\n", __func__);
+            }
+            if (n_fuse == 3) {
+                GGML_LOG_DEBUG("%s: fuse: RMS_NORM + MUL + ADD\n", __func__);
+            }
+        }
+    }
+
+    if (n_fuse > 1) {
+        bid_dst = ggml_metal_get_buffer_id(ops[n_fuse - 1]);
+
+        for (int i = 1; i < n_fuse; ++i) {
+            if (!ggml_metal_op_concurrency_check(ctx, ops[i])) {
+                ggml_metal_op_concurrency_reset(ctx);
+
+                break;
+            }
+        }
+    }
+
+    ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_rms_norm(lib, op, n_fuse);
+
+    int nth = 32; // SIMD width
+
+    while (nth < ne00/4 && nth < ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)) {
+        nth *= 2;
+    }
+
+    nth = std::min(nth, ggml_metal_pipeline_max_theads_per_threadgroup(pipeline));
+    nth = std::min(nth, ne00/4);
+
+    const size_t smem = ggml_metal_pipeline_get_smem(pipeline);
+
+    ggml_metal_encoder_set_pipeline(enc, pipeline);
+    ggml_metal_encoder_set_bytes   (enc, &args, sizeof(args), 0);
+    ggml_metal_encoder_set_buffer  (enc, bid_src0, 1);
+    ggml_metal_encoder_set_buffer  (enc, bid_fuse[0], 2);
+    ggml_metal_encoder_set_buffer  (enc, bid_fuse[1], 3);
+    ggml_metal_encoder_set_buffer  (enc, bid_dst, 4);
+
+    ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0);
+
+    ggml_metal_encoder_dispatch_threadgroups(enc, ne01, ne02, ne03, nth, 1, 1);
+
+    return n_fuse;
+}
+
+int ggml_metal_op_l2_norm(ggml_metal_op_t ctx, int idx) {
+    ggml_cgraph * gf = ctx->gf;
+    ggml_tensor * op = ggml_graph_node(gf, idx);
+
+    ggml_metal_library_t lib = ctx->lib;
+    ggml_metal_encoder_t enc = ctx->enc;
+
+    GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne,  op,         ne);
+    GGML_TENSOR_LOCALS(uint32_t, nb,  op,         nb);
+
+    float eps;
+    memcpy(&eps, op->op_params, sizeof(float));
+
+    int nth = 32; // SIMD width
+
+    ggml_metal_kargs_l2_norm args = {
+        /*.ne00   =*/ ne00,
+        /*.ne00_4 =*/ ne00/4,
+        /*.nb01   =*/ nb01,
+        /*.eps    =*/ eps,
+    };
+
+    ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_l2_norm(lib, op);
+
+    while (nth < ne00/4 && nth < ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)) {
+        nth *= 2;
+    }
+
+    nth = std::min(nth, ggml_metal_pipeline_max_theads_per_threadgroup(pipeline));
+    nth = std::min(nth, ne00/4);
+
+    const size_t smem = ggml_metal_pipeline_get_smem(pipeline);
+
+    const int64_t nrows = ggml_nrows(op->src[0]);
+
+    ggml_metal_encoder_set_pipeline(enc, pipeline);
+    ggml_metal_encoder_set_bytes   (enc, &args, sizeof(args), 0);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op),         2);
+
+    ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0);
+
+    ggml_metal_encoder_dispatch_threadgroups(enc, nrows, 1, 1, nth, 1, 1);
+
+    return 1;
+}
+
+int ggml_metal_op_group_norm(ggml_metal_op_t ctx, int idx) {
+    ggml_cgraph * gf = ctx->gf;
+    ggml_tensor * op = ggml_graph_node(gf, idx);
+
+    ggml_metal_library_t lib = ctx->lib;
+    ggml_metal_encoder_t enc = ctx->enc;
+
+    GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne,  op,         ne);
+    GGML_TENSOR_LOCALS(uint32_t, nb,  op,         nb);
+
+    const int32_t ngrp = ((const int32_t *) op->op_params)[0];
+
+    float eps;
+    memcpy(&eps, op->op_params + 1, sizeof(float));
+
+    ggml_metal_kargs_group_norm args = {
+        /*.ne00 =*/ ne00,
+        /*.ne01 =*/ ne01,
+        /*.ne02 =*/ ne02,
+        /*.nb00 =*/ nb00,
+        /*.nb01 =*/ nb01,
+        /*.nb02 =*/ nb02,
+        /*.ngrp =*/ ngrp,
+        /*.eps  =*/ eps,
+    };
+
+    ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_group_norm(lib, op);
+
+    int nth = 32; // SIMD width
+    //while (nth < ne00/4 && nth < ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)) {
+    //    nth *= 2;
+    //}
+
+    //nth = std::min(nth, ggml_metal_pipeline_max_theads_per_threadgroup(pipeline));
+    //nth = std::min(nth, ne00/4);
+
+    const size_t smem = ggml_metal_pipeline_get_smem(pipeline);
+
+    ggml_metal_encoder_set_pipeline(enc, pipeline);
+    ggml_metal_encoder_set_bytes   (enc, &args, sizeof(args), 0);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op),         2);
+
+    ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0);
+
+    ggml_metal_encoder_dispatch_threadgroups(enc, ngrp, 1, 1, nth, 1, 1);
+
+    return 1;
+}
+
+int ggml_metal_op_norm(ggml_metal_op_t ctx, int idx) {
+    ggml_cgraph * gf = ctx->gf;
+    ggml_tensor * op = ggml_graph_node(gf, idx);
+
+    ggml_metal_library_t lib = ctx->lib;
+    ggml_metal_encoder_t enc = ctx->enc;
+
+    GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne,  op,         ne);
+    GGML_TENSOR_LOCALS(uint32_t, nb,  op,         nb);
+
+    float eps;
+    memcpy(&eps, op->op_params, sizeof(float));
+
+    ggml_metal_kargs_norm args = {
+        /*.ne00   =*/ ne00,
+        /*.ne00_4 =*/ ne00/4,
+        /*.nb01   =*/ nb01,
+        /*.eps    =*/ eps,
+    };
+
+    ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_norm(lib, op);
+
+    int nth = 32; // SIMD width
+    while (nth < ne00/4 && nth < ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)) {
+        nth *= 2;
+    }
+
+    nth = std::min(nth, ggml_metal_pipeline_max_theads_per_threadgroup(pipeline));
+    nth = std::min(nth, ne00/4);
+
+    const size_t smem = ggml_metal_pipeline_get_smem(pipeline);
+
+    const int64_t nrows = ggml_nrows(op->src[0]);
+
+    ggml_metal_encoder_set_pipeline(enc, pipeline);
+    ggml_metal_encoder_set_bytes   (enc, &args, sizeof(args), 0);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op),         2);
+
+    ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0);
+
+    ggml_metal_encoder_dispatch_threadgroups(enc, nrows, 1, 1, nth, 1, 1);
+
+    return 1;
+}
+
+int ggml_metal_op_rope(ggml_metal_op_t ctx, int idx) {
+    ggml_cgraph * gf = ctx->gf;
+    ggml_tensor * op = ggml_graph_node(gf, idx);
+
+    ggml_metal_library_t lib = ctx->lib;
+    ggml_metal_encoder_t enc = ctx->enc;
+
+    GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne,  op,         ne);
+    GGML_TENSOR_LOCALS(uint32_t, nb,  op,         nb);
+
+    // make sure we have one or more position id(ne10) per token(ne02)
+    GGML_ASSERT(ne10 % ne02 == 0);
+    GGML_ASSERT(ne10 >= ne02);
+
+    const int nth = std::min(1024, ne00);
+
+    const int n_past     = ((const int32_t *) op->op_params)[0];
+    const int n_dims     = ((const int32_t *) op->op_params)[1];
+  //const int mode       = ((const int32_t *) op->op_params)[2];
+    // skip 3, n_ctx, used in GLM RoPE, unimplemented in metal
+    const int n_ctx_orig = ((const int32_t *) op->op_params)[4];
+
+    float freq_base;
+    float freq_scale;
+    float ext_factor;
+    float attn_factor;
+    float beta_fast;
+    float beta_slow;
+
+    memcpy(&freq_base,   (const int32_t *) op->op_params +  5, sizeof(float));
+    memcpy(&freq_scale,  (const int32_t *) op->op_params +  6, sizeof(float));
+    memcpy(&ext_factor,  (const int32_t *) op->op_params +  7, sizeof(float));
+    memcpy(&attn_factor, (const int32_t *) op->op_params +  8, sizeof(float));
+    memcpy(&beta_fast,   (const int32_t *) op->op_params +  9, sizeof(float));
+    memcpy(&beta_slow,   (const int32_t *) op->op_params + 10, sizeof(float));
+
+    // mrope
+    const int sect_0 = ((const int32_t *) op->op_params)[11];
+    const int sect_1 = ((const int32_t *) op->op_params)[12];
+    const int sect_2 = ((const int32_t *) op->op_params)[13];
+    const int sect_3 = ((const int32_t *) op->op_params)[14];
+
+    ggml_metal_kargs_rope args = {
+        /*.ne00        =*/ ne00,
+        /*.ne01        =*/ ne01,
+        /*.ne02        =*/ ne02,
+        /*.ne03        =*/ ne03,
+        /*.nb00        =*/ nb00,
+        /*.nb01        =*/ nb01,
+        /*.nb02        =*/ nb02,
+        /*.nb03        =*/ nb03,
+        /*.ne0         =*/ ne0,
+        /*.ne1         =*/ ne1,
+        /*.ne2         =*/ ne2,
+        /*.ne3         =*/ ne3,
+        /*.nb0         =*/ nb0,
+        /*.nb1         =*/ nb1,
+        /*.nb2         =*/ nb2,
+        /*.nb3         =*/ nb3,
+        /*.n_past      =*/ n_past,
+        /*.n_dims      =*/ n_dims,
+        /*.n_ctx_orig  =*/ n_ctx_orig,
+        /*.freq_base   =*/ freq_base,
+        /*.freq_scale  =*/ freq_scale,
+        /*.ext_factor  =*/ ext_factor,
+        /*.attn_factor =*/ attn_factor,
+        /*.beta_fast   =*/ beta_fast,
+        /*.beta_slow   =*/ beta_slow,
+        /* sect_0      =*/ sect_0,
+        /* sect_1      =*/ sect_1,
+        /* sect_2      =*/ sect_2,
+        /* sect_3      =*/ sect_3,
+    };
+
+    ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_rope(lib, op);
+
+    ggml_metal_encoder_set_pipeline(enc, pipeline);
+    ggml_metal_encoder_set_bytes   (enc, &args, sizeof(args), 0);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[1]), 2);
+    if (op->src[2]) {
+        ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[2]), 3);
+    } else {
+        ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[0]), 3);
+    }
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op),         4);
+
+    ggml_metal_encoder_dispatch_threadgroups(enc, ne01, ne02, ne03, nth, 1, 1);
+
+    return 1;
+}
+
+int ggml_metal_op_im2col(ggml_metal_op_t ctx, int idx) {
+    ggml_cgraph * gf = ctx->gf;
+    ggml_tensor * op = ggml_graph_node(gf, idx);
+
+    ggml_metal_library_t lib = ctx->lib;
+    ggml_metal_encoder_t enc = ctx->enc;
+
+    GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne,  op,         ne);
+    GGML_TENSOR_LOCALS(uint32_t, nb,  op,         nb);
+
+    const int32_t s0 = ((const int32_t *)(op->op_params))[0];
+    const int32_t s1 = ((const int32_t *)(op->op_params))[1];
+    const int32_t p0 = ((const int32_t *)(op->op_params))[2];
+    const int32_t p1 = ((const int32_t *)(op->op_params))[3];
+    const int32_t d0 = ((const int32_t *)(op->op_params))[4];
+    const int32_t d1 = ((const int32_t *)(op->op_params))[5];
+
+    const bool is_2D = ((const int32_t *)(op->op_params))[6] == 1;
+
+    const int32_t N  = op->src[1]->ne[is_2D ? 3 : 2];
+    const int32_t IC = op->src[1]->ne[is_2D ? 2 : 1];
+    const int32_t IH = is_2D ? op->src[1]->ne[1] : 1;
+    const int32_t IW =         op->src[1]->ne[0];
+
+    const int32_t KH = is_2D ? op->src[0]->ne[1] : 1;
+    const int32_t KW =         op->src[0]->ne[0];
+
+    const int32_t OH = is_2D ? op->ne[2] : 1;
+    const int32_t OW =         op->ne[1];
+
+    const int32_t CHW = IC * KH * KW;
+
+    const uint64_t ofs0 = op->src[1]->nb[is_2D ? 3 : 2] / 4;
+    const uint64_t ofs1 = op->src[1]->nb[is_2D ? 2 : 1] / 4;
+
+
+    ggml_metal_kargs_im2col args = {
+        /*.ofs0 =*/ ofs0,
+        /*.ofs1 =*/ ofs1,
+        /*.IW   =*/ IW,
+        /*.IH   =*/ IH,
+        /*.CHW  =*/ CHW,
+        /*.s0   =*/ s0,
+        /*.s1   =*/ s1,
+        /*.p0   =*/ p0,
+        /*.p1   =*/ p1,
+        /*.d0   =*/ d0,
+        /*.d1   =*/ d1,
+        /*.N    =*/ N,
+        /*.KH   =*/ KH,
+        /*.KW   =*/ KW,
+        /*.KHW  =*/ KH * KW,
+    };
+
+    ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_im2col(lib, op);
+
+    const uint64_t n_threads = std::min(ggml_metal_pipeline_max_theads_per_threadgroup(pipeline), N);
+    const int64_t  quotient  = N / n_threads + (N % n_threads > 0 ? 1 : 0);
+
+    ggml_metal_encoder_set_pipeline(enc, pipeline);
+    ggml_metal_encoder_set_bytes   (enc, &args, sizeof(args), 0);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[1]), 1);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op),         2);
+
+    ggml_metal_encoder_dispatch_threadgroups(enc, quotient * CHW, OH, OW, n_threads, 1, 1);
+
+    return 1;
+}
+
+int ggml_metal_op_conv_transpose_1d(ggml_metal_op_t ctx, int idx) {
+    ggml_cgraph * gf = ctx->gf;
+    ggml_tensor * op = ggml_graph_node(gf, idx);
+
+    ggml_metal_library_t lib = ctx->lib;
+    ggml_metal_encoder_t enc = ctx->enc;
+
+    GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne,  op,         ne);
+    GGML_TENSOR_LOCALS(uint32_t, nb,  op,         nb);
+
+    const int32_t s0 = ((const int32_t *)(op->op_params))[0];
+
+    const int32_t IC = op->src[1]->ne[1];
+    const int32_t IL = op->src[1]->ne[0];
+
+    const int32_t K  = op->src[0]->ne[0];
+
+    const int32_t OL = op->ne[0];
+    const int32_t OC = op->ne[1];
+
+    ggml_metal_kargs_conv_transpose_1d args = {
+        /*.IC  =*/ IC,
+        /*.IL  =*/ IL,
+        /*.K   =*/ K,
+        /*.s0  =*/ s0,
+        /*.nb0 =*/ nb0,
+        /*.nb1 =*/ nb1,
+    };
+
+    ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_conv_transpose_1d(lib, op);
+
+    ggml_metal_encoder_set_pipeline(enc, pipeline);
+    ggml_metal_encoder_set_bytes   (enc, &args, sizeof(args), 0);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[1]), 2);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op),         3);
+
+    ggml_metal_encoder_dispatch_threadgroups(enc, OL, OC, 1, 1, 1, 1);
+
+    return 1;
+}
+
+int ggml_metal_op_upscale(ggml_metal_op_t ctx, int idx) {
+    ggml_cgraph * gf = ctx->gf;
+    ggml_tensor * op = ggml_graph_node(gf, idx);
+
+    ggml_metal_library_t lib = ctx->lib;
+    ggml_metal_encoder_t enc = ctx->enc;
+
+    GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne,  op,         ne);
+    GGML_TENSOR_LOCALS(uint32_t, nb,  op,         nb);
+
+    const float sf0 = (float)ne0/op->src[0]->ne[0];
+    const float sf1 = (float)ne1/op->src[0]->ne[1];
+    const float sf2 = (float)ne2/op->src[0]->ne[2];
+    const float sf3 = (float)ne3/op->src[0]->ne[3];
+
+    ggml_metal_kargs_upscale args = {
+        /*.ne00 =*/ ne00,
+        /*.ne01 =*/ ne01,
+        /*.ne02 =*/ ne02,
+        /*.ne03 =*/ ne03,
+        /*.nb00 =*/ nb00,
+        /*.nb01 =*/ nb01,
+        /*.nb02 =*/ nb02,
+        /*.nb03 =*/ nb03,
+        /*.ne0 =*/ ne0,
+        /*.ne1 =*/ ne1,
+        /*.ne2 =*/ ne2,
+        /*.ne3 =*/ ne3,
+        /*.nb0 =*/ nb0,
+        /*.nb1 =*/ nb1,
+        /*.nb2 =*/ nb2,
+        /*.nb3 =*/ nb3,
+        /*.sf0 =*/ sf0,
+        /*.sf1 =*/ sf1,
+        /*.sf2 =*/ sf2,
+        /*.sf3 =*/ sf3
+    };
+
+    ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_upscale(lib, op);
+
+    const int nth = std::min(ggml_metal_pipeline_max_theads_per_threadgroup(pipeline), ne0);
+
+    ggml_metal_encoder_set_pipeline(enc, pipeline);
+    ggml_metal_encoder_set_bytes   (enc, &args, sizeof(args), 0);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op),         2);
+
+    ggml_metal_encoder_dispatch_threadgroups(enc, ne1, ne2, ne3, nth, 1, 1);
+
+    return 1;
+}
+
+int ggml_metal_op_pad(ggml_metal_op_t ctx, int idx) {
+    ggml_cgraph * gf = ctx->gf;
+    ggml_tensor * op = ggml_graph_node(gf, idx);
+
+    ggml_metal_library_t lib = ctx->lib;
+    ggml_metal_encoder_t enc = ctx->enc;
+
+    GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne,  op,         ne);
+    GGML_TENSOR_LOCALS(uint32_t, nb,  op,         nb);
+
+    ggml_metal_kargs_pad args = {
+        /*.ne00 =*/ ne00,
+        /*.ne01 =*/ ne01,
+        /*.ne02 =*/ ne02,
+        /*.ne03 =*/ ne03,
+        /*.nb00 =*/ nb00,
+        /*.nb01 =*/ nb01,
+        /*.nb02 =*/ nb02,
+        /*.nb03 =*/ nb03,
+        /*.ne0  =*/ ne0,
+        /*.ne1  =*/ ne1,
+        /*.ne2  =*/ ne2,
+        /*.ne3  =*/ ne3,
+        /*.nb0  =*/ nb0,
+        /*.nb1  =*/ nb1,
+        /*.nb2  =*/ nb2,
+        /*.nb3  =*/ nb3
+    };
+
+    ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_pad(lib, op);
+
+    const int nth = std::min(1024, ne0);
+
+    ggml_metal_encoder_set_pipeline(enc, pipeline);
+    ggml_metal_encoder_set_bytes   (enc, &args, sizeof(args), 0);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op),         2);
+
+    ggml_metal_encoder_dispatch_threadgroups(enc, ne1, ne2, ne3, nth, 1, 1);
+
+    return 1;
+}
+
+int ggml_metal_op_pad_reflect_1d(ggml_metal_op_t ctx, int idx) {
+    ggml_cgraph * gf = ctx->gf;
+    ggml_tensor * op = ggml_graph_node(gf, idx);
+
+    ggml_metal_library_t lib = ctx->lib;
+    ggml_metal_encoder_t enc = ctx->enc;
+
+    GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne,  op,         ne);
+    GGML_TENSOR_LOCALS(uint32_t, nb,  op,         nb);
+
+    ggml_metal_kargs_pad_reflect_1d args = {
+        /*.ne00 =*/ ne00,
+        /*.ne01 =*/ ne01,
+        /*.ne02 =*/ ne02,
+        /*.ne03 =*/ ne03,
+        /*.nb00 =*/ nb00,
+        /*.nb01 =*/ nb01,
+        /*.nb02 =*/ nb02,
+        /*.nb03 =*/ nb03,
+        /*.ne0  =*/ ne0,
+        /*.ne1  =*/ ne1,
+        /*.ne2  =*/ ne2,
+        /*.ne3  =*/ ne3,
+        /*.nb0  =*/ nb0,
+        /*.nb1  =*/ nb1,
+        /*.nb2  =*/ nb2,
+        /*.nb3  =*/ nb3,
+        /*.p0 =*/ ((const int32_t *)(op->op_params))[0],
+        /*.p1 =*/ ((const int32_t *)(op->op_params))[1]
+    };
+
+    ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_pad_reflect_1d(lib, op);
+
+    const int nth = std::min(1024, ne0);
+
+    ggml_metal_encoder_set_pipeline(enc, pipeline);
+    ggml_metal_encoder_set_bytes   (enc, &args, sizeof(args), 0);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op),         2);
+
+    ggml_metal_encoder_dispatch_threadgroups(enc, ne1, ne2, ne3, nth, 1, 1);
+
+    return 1;
+}
+
+int ggml_metal_op_arange(ggml_metal_op_t ctx, int idx) {
+    ggml_cgraph * gf = ctx->gf;
+    ggml_tensor * op = ggml_graph_node(gf, idx);
+
+    ggml_metal_library_t lib = ctx->lib;
+    ggml_metal_encoder_t enc = ctx->enc;
+
+    GGML_TENSOR_LOCALS( int32_t, ne,  op,         ne);
+    GGML_TENSOR_LOCALS(uint32_t, nb,  op,         nb);
+
+    float start;
+    float step;
+
+    memcpy(&start, ((const int32_t *) op->op_params) + 0, sizeof(float));
+    memcpy(&step,  ((const int32_t *) op->op_params) + 2, sizeof(float));
+
+    ggml_metal_kargs_arange args = {
+        /*.ne0   =*/ ne0,
+        /*.start =*/ start,
+        /*.step  =*/ step
+    };
+
+    const int nth = std::min(1024, ne0);
+
+    ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_arange(lib, op);
+
+    //[encoder setComputePipelineState:pipeline];
+    //[encoder setBuffer:id_dst  offset:offs_dst  atIndex:0];
+    //[encoder setBytes:&args length:sizeof(args) atIndex:1];
+
+    //[encoder dispatchThreadgroups:MTLSizeMake(1, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
+
+    ggml_metal_encoder_set_pipeline(enc, pipeline);
+    ggml_metal_encoder_set_bytes   (enc, &args, sizeof(args), 0);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op), 1);
+
+    ggml_metal_encoder_dispatch_threadgroups(enc, 1, 1, 1, nth, 1, 1);
+
+    return 1;
+}
+
+int ggml_metal_op_timestep_embedding(ggml_metal_op_t ctx, int idx) {
+    ggml_cgraph * gf = ctx->gf;
+    ggml_tensor * op = ggml_graph_node(gf, idx);
+
+    ggml_metal_library_t lib = ctx->lib;
+    ggml_metal_encoder_t enc = ctx->enc;
+
+    GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne,  op,         ne);
+    GGML_TENSOR_LOCALS(uint32_t, nb,  op,         nb);
+
+    const int dim        = op->op_params[0];
+    const int max_period = op->op_params[1];
+
+    ggml_metal_kargs_timestep_embedding args = {
+        /*.nb1 =*/ nb1,
+        /*.dim =*/ dim,
+        /*.max_period =*/ max_period,
+    };
+
+    ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_timestep_embedding(lib, op);
+
+    const int nth = std::max(1, std::min(1024, dim/2));
+
+    ggml_metal_encoder_set_pipeline(enc, pipeline);
+    ggml_metal_encoder_set_bytes   (enc, &args, sizeof(args), 0);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op),         2);
+
+    ggml_metal_encoder_dispatch_threadgroups(enc, ne00, 1, 1, nth, 1, 1);
+
+    return 1;
+}
+
+int ggml_metal_op_argmax(ggml_metal_op_t ctx, int idx) {
+    ggml_cgraph * gf = ctx->gf;
+    ggml_tensor * op = ggml_graph_node(gf, idx);
+
+    ggml_metal_library_t lib = ctx->lib;
+    ggml_metal_encoder_t enc = ctx->enc;
+
+    GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne,  op,         ne);
+    GGML_TENSOR_LOCALS(uint32_t, nb,  op,         nb);
+
+    ggml_metal_kargs_argmax args = {
+        /*.ne00 = */ ne00,
+        /*.nb01 = */ nb01,
+    };
+
+    ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_argmax(lib, op);
+
+    const int64_t nrows = ggml_nrows(op->src[0]);
+
+    int nth = 32; // SIMD width
+    while (nth < ne00 && nth*ne01*ne02*ne03 < 256) {
+        nth *= 2;
+    }
+
+    const size_t smem = ggml_metal_pipeline_get_smem(pipeline);
+
+    ggml_metal_encoder_set_pipeline(enc, pipeline);
+    ggml_metal_encoder_set_bytes   (enc, &args, sizeof(args), 0);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op),         2);
+
+    ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0);
+
+    ggml_metal_encoder_dispatch_threadgroups(enc, nrows, 1, 1, nth, 1, 1);
+
+    return 1;
+}
+
+int ggml_metal_op_argsort(ggml_metal_op_t ctx, int idx) {
+    ggml_cgraph * gf = ctx->gf;
+    ggml_tensor * op = ggml_graph_node(gf, idx);
+
+    ggml_metal_library_t lib = ctx->lib;
+    ggml_metal_encoder_t enc = ctx->enc;
+
+    GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne,  op,         ne);
+    GGML_TENSOR_LOCALS(uint32_t, nb,  op,         nb);
+
+    // bitonic sort requires the number of elements to be power of 2
+    int64_t ne00_padded = 1;
+    while (ne00_padded < ne00) {
+        ne00_padded *= 2;
+    }
+
+    ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_argsort(lib, op);
+
+    const int64_t nrows = ggml_nrows(op->src[0]);
+
+    // Metal kernels require the buffer size to be multiple of 16 bytes
+    // https://developer.apple.com/documentation/metal/mtlcomputecommandencoder/1443142-setthreadgroupmemorylength
+    const size_t smem = GGML_PAD(ne00_padded*sizeof(int32_t), 16);
+
+    ggml_metal_kargs_argsort args = {
+        /*.ncols =*/ ne00,
+        /*.ncols_pad =*/ ne00_padded
+    };
+
+    ggml_metal_encoder_set_pipeline(enc, pipeline);
+    ggml_metal_encoder_set_bytes   (enc, &args, sizeof(args), 0);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op),         2);
+
+    ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0);
+
+    ggml_metal_encoder_dispatch_threadgroups(enc, 1, nrows, 1, ne00_padded, 1, 1);
+
+    return 1;
+}
+
+int ggml_metal_op_leaky_relu(ggml_metal_op_t ctx, int idx) {
+    ggml_cgraph * gf = ctx->gf;
+    ggml_tensor * op = ggml_graph_node(gf, idx);
+
+    ggml_metal_library_t lib = ctx->lib;
+    ggml_metal_encoder_t enc = ctx->enc;
+
+    GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+    GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+    GGML_TENSOR_LOCALS( int32_t, ne,  op,         ne);
+    GGML_TENSOR_LOCALS(uint32_t, nb,  op,         nb);
+
+    float slope;
+    memcpy(&slope, op->op_params, sizeof(float));
+
+    ggml_metal_kargs_leaky_relu args = {
+        /*.slope =*/ slope
+    };
+
+    ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_unary(lib, op);
+
+    int64_t n = ggml_nelements(op);
+
+    if (n % 4 == 0) {
+        n /= 4;
+    }
+
+    ggml_metal_encoder_set_pipeline(enc, pipeline);
+    ggml_metal_encoder_set_bytes   (enc, &args, sizeof(args), 0);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+    ggml_metal_encoder_set_buffer  (enc, ggml_metal_get_buffer_id(op),         2);
+
+    ggml_metal_encoder_dispatch_threadgroups(enc, n, 1, 1, 1, 1, 1);
+
+    return 1;
+}
diff --git a/ggml/src/ggml-metal/ggml-metal-ops.h b/ggml/src/ggml-metal/ggml-metal-ops.h
new file mode 100644 (file)
index 0000000..b620de1
--- /dev/null
@@ -0,0 +1,81 @@
+#pragma once
+
+#include "ggml-metal-device.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct ggml_metal_op * ggml_metal_op_t;
+
+ggml_metal_op_t ggml_metal_op_init(
+        ggml_metal_device_t dev,
+        ggml_metal_cmd_buf_t cmd_buf,
+        struct ggml_cgraph * gf,
+        int  idx_start,
+        int  idx_end,
+        bool use_fusion,
+        bool use_concurrency,
+        bool use_capture,
+        int  debug_graph,
+        int  debug_fusion);
+
+void ggml_metal_op_free(ggml_metal_op_t ctx);
+
+int ggml_metal_op_encode(ggml_metal_op_t ctx, int idx);
+
+//
+// available ops:
+//
+
+// tokens per expert
+size_t ggml_metal_op_mul_mat_id_extra_tpe(const struct ggml_tensor * op);
+
+// id map [n_tokens, n_expert]
+size_t ggml_metal_op_mul_mat_id_extra_ids(const struct ggml_tensor * op);
+
+// return true if we should use the FA vector kernel for this op
+bool ggml_metal_op_flash_attn_ext_use_vec(const struct ggml_tensor * op);
+
+size_t ggml_metal_op_flash_attn_ext_extra_tmp(const struct ggml_tensor * op);
+
+int ggml_metal_op_concat            (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_repeat            (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_acc               (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_scale             (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_clamp             (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_unary             (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_glu               (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_sum_rows          (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_get_rows          (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_set_rows          (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_soft_max          (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_ssm_conv          (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_ssm_scan          (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_rwkv              (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_cpy               (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_pool_2d           (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_mul_mat           (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_mul_mat_id        (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_add_id            (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_flash_attn_ext    (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_bin               (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_rms_norm          (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_l2_norm           (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_group_norm        (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_norm              (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_rope              (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_im2col            (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_conv_transpose_1d (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_upscale           (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_pad               (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_pad_reflect_1d    (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_arange            (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_timestep_embedding(ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_argmax            (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_argsort           (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_leaky_relu        (ggml_metal_op_t ctx, int idx);
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/ggml/src/ggml-metal/ggml-metal.cpp b/ggml/src/ggml-metal/ggml-metal.cpp
new file mode 100644 (file)
index 0000000..fd0e6ed
--- /dev/null
@@ -0,0 +1,718 @@
+#include "ggml-metal.h"
+
+#include "ggml-impl.h"
+#include "ggml-backend-impl.h"
+
+#include "ggml-metal-device.h"
+#include "ggml-metal-context.h"
+#include "ggml-metal-ops.h"
+
+// globals
+
+// initialized in ggml_backend_metal_reg
+static ggml_backend_reg    g_ggml_metal_reg;
+static ggml_backend_device g_ggml_metal_device;
+
+////////////////////////////////////////////////////////////////////////////////
+// backend interface
+////////////////////////////////////////////////////////////////////////////////
+
+// shared buffer
+
+static void ggml_backend_metal_buffer_shared_free_buffer(ggml_backend_buffer_t buffer) {
+    ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context;
+
+    GGML_ASSERT(ggml_metal_buffer_is_shared(ctx));
+
+    ggml_metal_buffer_free(ctx);
+}
+
+static void * ggml_backend_metal_buffer_shared_get_base(ggml_backend_buffer_t buffer) {
+    ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context;
+
+    GGML_ASSERT(ggml_metal_buffer_is_shared(ctx));
+
+    return ggml_metal_buffer_get_base(ctx);
+}
+
+static void ggml_backend_metal_buffer_shared_memset_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) {
+    ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context;
+
+    GGML_ASSERT(ggml_metal_buffer_is_shared(ctx));
+
+    ggml_metal_buffer_memset_tensor(ctx, tensor, value, offset, size);
+}
+
+static void ggml_backend_metal_buffer_shared_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
+    ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context;
+
+    GGML_ASSERT(ggml_metal_buffer_is_shared(ctx));
+
+    ggml_metal_buffer_set_tensor(ctx, tensor, data, offset, size);
+}
+
+static void ggml_backend_metal_buffer_shared_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) {
+    ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context;
+
+    GGML_ASSERT(ggml_metal_buffer_is_shared(ctx));
+
+    ggml_metal_buffer_get_tensor(ctx, tensor, data, offset, size);
+}
+
+static bool ggml_backend_metal_buffer_shared_cpy_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * src, ggml_tensor * dst) {
+    ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context;
+
+    GGML_ASSERT(ggml_metal_buffer_is_shared(ctx));
+
+    GGML_UNUSED(buffer);
+    GGML_UNUSED(src);
+    GGML_UNUSED(dst);
+
+    return false;
+}
+
+static void ggml_backend_metal_buffer_shared_clear(ggml_backend_buffer_t buffer, uint8_t value) {
+    ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context;
+
+    GGML_ASSERT(ggml_metal_buffer_is_shared(ctx));
+
+    ggml_metal_buffer_clear(ctx, value);
+}
+
+static ggml_backend_buffer_i ggml_backend_metal_buffer_shared_i = {
+    /* .free_buffer     = */ ggml_backend_metal_buffer_shared_free_buffer,
+    /* .get_base        = */ ggml_backend_metal_buffer_shared_get_base,
+    /* .init_tensor     = */ NULL,
+    /* .memset_tensor   = */ ggml_backend_metal_buffer_shared_memset_tensor,
+    /* .set_tensor      = */ ggml_backend_metal_buffer_shared_set_tensor,
+    /* .get_tensor      = */ ggml_backend_metal_buffer_shared_get_tensor,
+    /* .cpy_tensor      = */ ggml_backend_metal_buffer_shared_cpy_tensor,
+    /* .clear           = */ ggml_backend_metal_buffer_shared_clear,
+    /* .reset           = */ NULL,
+};
+
+// private buffer
+
+static void ggml_backend_metal_buffer_private_free_buffer(ggml_backend_buffer_t buffer) {
+    ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context;
+
+    GGML_ASSERT(!ggml_metal_buffer_is_shared(ctx));
+
+    ggml_metal_buffer_free(ctx);
+}
+
+static void * ggml_backend_metal_buffer_private_get_base(ggml_backend_buffer_t buffer) {
+    ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context;
+
+    GGML_ASSERT(!ggml_metal_buffer_is_shared(ctx));
+
+    return ggml_metal_buffer_get_base(ctx);
+}
+
+static void ggml_backend_metal_buffer_private_memset_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) {
+    ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context;
+
+    GGML_ASSERT(!ggml_metal_buffer_is_shared(ctx));
+
+    ggml_metal_buffer_memset_tensor(ctx, tensor, value, offset, size);
+}
+
+static void ggml_backend_metal_buffer_private_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
+    ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context;
+
+    GGML_ASSERT(!ggml_metal_buffer_is_shared(ctx));
+
+    ggml_metal_buffer_set_tensor(ctx, tensor, data, offset, size);
+}
+
+static void ggml_backend_metal_buffer_private_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) {
+    ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context;
+
+    GGML_ASSERT(!ggml_metal_buffer_is_shared(ctx));
+
+    ggml_metal_buffer_get_tensor(ctx, tensor, data, offset, size);
+}
+
+static bool ggml_backend_metal_buffer_private_cpy_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * src, ggml_tensor * dst) {
+    ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context;
+
+    GGML_ASSERT(!ggml_metal_buffer_is_shared(ctx));
+
+    GGML_UNUSED(buffer);
+    GGML_UNUSED(src);
+    GGML_UNUSED(dst);
+
+    return false;
+}
+
+static void ggml_backend_metal_buffer_private_clear(ggml_backend_buffer_t buffer, uint8_t value) {
+    ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context;
+
+    GGML_ASSERT(!ggml_metal_buffer_is_shared(ctx));
+
+    ggml_metal_buffer_clear(ctx, value);
+}
+
+static ggml_backend_buffer_i ggml_backend_metal_buffer_private_i = {
+    /* .free_buffer     = */ ggml_backend_metal_buffer_private_free_buffer,
+    /* .get_base        = */ ggml_backend_metal_buffer_private_get_base,
+    /* .init_tensor     = */ NULL,
+    /* .memset_tensor   = */ ggml_backend_metal_buffer_private_memset_tensor,
+    /* .set_tensor      = */ ggml_backend_metal_buffer_private_set_tensor,
+    /* .get_tensor      = */ ggml_backend_metal_buffer_private_get_tensor,
+    /* .cpy_tensor      = */ ggml_backend_metal_buffer_private_cpy_tensor,
+    /* .clear           = */ ggml_backend_metal_buffer_private_clear,
+    /* .reset           = */ NULL,
+};
+
+//
+// buffer types
+//
+
+// common method for allocating shread or private Metal buffers
+static ggml_backend_buffer_t ggml_backend_metal_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size, bool shared) {
+    ggml_metal_device_t ctx_dev = (ggml_metal_device_t)buft->device->context;
+    ggml_metal_buffer_t res = ggml_metal_buffer_init(ctx_dev, size, shared);
+
+    ggml_backend_buffer_i buf_i = ggml_metal_buffer_is_shared(res)
+        ? ggml_backend_metal_buffer_shared_i
+        : ggml_backend_metal_buffer_private_i;
+
+    return ggml_backend_buffer_init(buft, buf_i, res, size);
+}
+
+static size_t ggml_backend_metal_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) {
+    size_t res = ggml_nbytes(tensor);
+
+    // some operations require additional memory for fleeting data:
+    switch (tensor->op) {
+        case GGML_OP_MUL_MAT_ID:
+            {
+                res += ggml_metal_op_mul_mat_id_extra_tpe(tensor);
+                res += ggml_metal_op_mul_mat_id_extra_ids(tensor);
+            } break;
+        case GGML_OP_FLASH_ATTN_EXT:
+            {
+                if (ggml_metal_op_flash_attn_ext_use_vec(tensor)) {
+                    res += ggml_metal_op_flash_attn_ext_extra_tmp(tensor);
+                }
+            } break;
+        default:
+            break;
+    }
+
+    return res;
+
+    GGML_UNUSED(buft);
+}
+
+// default (shared) buffer type
+
+static const char * ggml_backend_metal_buffer_type_shared_get_name(ggml_backend_buffer_type_t buft) {
+    return "Metal";
+
+    GGML_UNUSED(buft);
+}
+
+static ggml_backend_buffer_t ggml_backend_metal_buffer_type_shared_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
+    return ggml_backend_metal_buffer_type_alloc_buffer(buft, size, true);
+}
+
+static size_t ggml_backend_metal_buffer_type_shared_get_alignment(ggml_backend_buffer_type_t buft) {
+    return 32;
+
+    GGML_UNUSED(buft);
+}
+
+static size_t ggml_backend_metal_buffer_type_shared_get_max_size(ggml_backend_buffer_type_t buft) {
+    ggml_metal_device_t ctx_dev = (ggml_metal_device_t)buft->device->context;
+
+    return ggml_metal_device_get_props(ctx_dev)->max_buffer_size;
+}
+
+static size_t ggml_backend_metal_buffer_type_shared_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) {
+    return ggml_backend_metal_buffer_type_get_alloc_size(buft, tensor);
+}
+
+static bool ggml_backend_metal_buffer_type_shared_is_host(ggml_backend_buffer_type_t buft) {
+    return false;
+
+    GGML_UNUSED(buft);
+}
+
+static ggml_backend_buffer_type_t ggml_backend_metal_buffer_type_shared(void) {
+    static ggml_backend_buffer_type ggml_backend_buffer_type_metal = {
+        /* .iface = */ {
+            /* .get_name         = */ ggml_backend_metal_buffer_type_shared_get_name,
+            /* .alloc_buffer     = */ ggml_backend_metal_buffer_type_shared_alloc_buffer,
+            /* .get_alignment    = */ ggml_backend_metal_buffer_type_shared_get_alignment,
+            /* .get_max_size     = */ ggml_backend_metal_buffer_type_shared_get_max_size,
+            /* .get_alloc_size   = */ ggml_backend_metal_buffer_type_shared_get_alloc_size,
+            /* .is_host          = */ ggml_backend_metal_buffer_type_shared_is_host,
+        },
+        /* .device  = */ &g_ggml_metal_device,
+        /* .context = */ NULL,
+    };
+
+    return &ggml_backend_buffer_type_metal;
+}
+
+// default (private) buffer type
+
+static const char * ggml_backend_metal_buffer_type_private_get_name(ggml_backend_buffer_type_t buft) {
+    return "Metal_Private";
+
+    GGML_UNUSED(buft);
+}
+
+static ggml_backend_buffer_t ggml_backend_metal_buffer_type_private_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
+    return ggml_backend_metal_buffer_type_alloc_buffer(buft, size, false);
+}
+
+static size_t ggml_backend_metal_buffer_type_private_get_alignment(ggml_backend_buffer_type_t buft) {
+    return 32;
+
+    GGML_UNUSED(buft);
+}
+
+static size_t ggml_backend_metal_buffer_type_private_get_max_size(ggml_backend_buffer_type_t buft) {
+    ggml_metal_device_t ctx_dev = (ggml_metal_device_t)buft->device->context;
+
+    return ggml_metal_device_get_props(ctx_dev)->max_buffer_size;
+}
+
+static size_t ggml_backend_metal_buffer_type_private_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) {
+    return ggml_backend_metal_buffer_type_get_alloc_size(buft, tensor);
+}
+
+static bool ggml_backend_metal_buffer_type_private_is_host(ggml_backend_buffer_type_t buft) {
+    return false;
+
+    GGML_UNUSED(buft);
+}
+
+static ggml_backend_buffer_type_t ggml_backend_metal_buffer_type_private(void) {
+    static ggml_backend_buffer_type ggml_backend_buffer_type_metal = {
+        /* .iface = */ {
+            /* .get_name         = */ ggml_backend_metal_buffer_type_private_get_name,
+            /* .alloc_buffer     = */ ggml_backend_metal_buffer_type_private_alloc_buffer,
+            /* .get_alignment    = */ ggml_backend_metal_buffer_type_private_get_alignment,
+            /* .get_max_size     = */ ggml_backend_metal_buffer_type_private_get_max_size,
+            /* .get_alloc_size   = */ ggml_backend_metal_buffer_type_private_get_alloc_size,
+            /* .is_host          = */ ggml_backend_metal_buffer_type_private_is_host,
+        },
+        /* .device  = */ &g_ggml_metal_device,
+        /* .context = */ NULL,
+    };
+
+    return &ggml_backend_buffer_type_metal;
+}
+
+// mapped buffer type
+
+static const char * ggml_backend_metal_buffer_type_mapped_get_name(ggml_backend_buffer_type_t buft) {
+    return "Metal_Mapped";
+
+    GGML_UNUSED(buft);
+}
+
+static ggml_backend_buffer_t ggml_backend_metal_buffer_type_mapped_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
+    // for mapped buffers, prefer shared memory
+    return ggml_backend_metal_buffer_type_alloc_buffer(buft, size, true);
+}
+
+static size_t ggml_backend_metal_buffer_type_mapped_get_alignment(ggml_backend_buffer_type_t buft) {
+    return 32;
+
+    GGML_UNUSED(buft);
+}
+
+static size_t ggml_backend_metal_buffer_type_mapped_get_max_size(ggml_backend_buffer_type_t buft) {
+    ggml_metal_device_t ctx_dev = (ggml_metal_device_t)buft->device->context;
+
+    return ggml_metal_device_get_props(ctx_dev)->max_buffer_size;
+}
+
+static size_t ggml_backend_metal_buffer_type_mapped_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) {
+    return ggml_backend_metal_buffer_type_get_alloc_size(buft, tensor);
+}
+
+static bool ggml_backend_metal_buffer_type_mapped_is_host(ggml_backend_buffer_type_t buft) {
+    return false;
+
+    GGML_UNUSED(buft);
+}
+
+static ggml_backend_buffer_type_t ggml_backend_metal_buffer_type_mapped(void) {
+    // note: not obvious, but this buffer type still needs to implement .alloc_buffer:
+    //       https://github.com/ggml-org/llama.cpp/pull/15832#discussion_r2333177099
+    static ggml_backend_buffer_type ggml_backend_buffer_type_mapped_metal = {
+        /* .iface = */ {
+            /* .get_name         = */ ggml_backend_metal_buffer_type_mapped_get_name,
+            /* .alloc_buffer     = */ ggml_backend_metal_buffer_type_mapped_alloc_buffer,
+            /* .get_alignment    = */ ggml_backend_metal_buffer_type_mapped_get_alignment,
+            /* .get_max_size     = */ ggml_backend_metal_buffer_type_mapped_get_max_size,
+            /* .get_alloc_size   = */ ggml_backend_metal_buffer_type_mapped_get_alloc_size,
+            /* .is_host          = */ ggml_backend_metal_buffer_type_mapped_is_host,
+        },
+        /* .device  = */ &g_ggml_metal_device,
+        /* .context = */ NULL,
+    };
+
+    return &ggml_backend_buffer_type_mapped_metal;
+}
+
+// backend
+
+static const char * ggml_backend_metal_name(ggml_backend_t backend) {
+    return "Metal";
+
+    GGML_UNUSED(backend);
+}
+
+static void ggml_backend_metal_free(ggml_backend_t backend) {
+    ggml_metal_t ctx = (ggml_metal_t)backend->context;
+
+    // wait for any ongoing async operations to finish
+    ggml_metal_synchronize(ctx);
+
+    ggml_metal_free(ctx);
+
+    free(backend);
+}
+
+static void ggml_backend_metal_synchronize(ggml_backend_t backend) {
+    ggml_metal_t ctx = (ggml_metal_t)backend->context;
+
+    ggml_metal_synchronize(ctx);
+}
+
+static void ggml_backend_metal_set_tensor_async(ggml_backend_t backend, ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
+    ggml_metal_t ctx = (ggml_metal_t)backend->context;
+
+    ggml_metal_set_tensor_async(ctx, tensor, data, offset, size);
+}
+
+static void ggml_backend_metal_get_tensor_async(ggml_backend_t backend, const ggml_tensor * tensor, void * data, size_t offset, size_t size) {
+    ggml_metal_t ctx = (ggml_metal_t)backend->context;
+
+    ggml_metal_get_tensor_async(ctx, tensor, data, offset, size);
+}
+
+static bool ggml_backend_metal_cpy_tensor_async(ggml_backend_t backend_src, ggml_backend_t backend_dst, const ggml_tensor * src, ggml_tensor * dst) {
+    return false;
+
+    GGML_UNUSED(backend_src);
+    GGML_UNUSED(backend_dst);
+    GGML_UNUSED(src);
+    GGML_UNUSED(dst);
+}
+
+static enum ggml_status ggml_backend_metal_graph_compute(ggml_backend_t backend, ggml_cgraph * cgraph) {
+    ggml_metal_t ctx = (ggml_metal_t)backend->context;
+
+    return ggml_metal_graph_compute(ctx, cgraph);
+}
+
+static void ggml_backend_metal_graph_optimize(ggml_backend_t backend, ggml_cgraph * cgraph) {
+    ggml_metal_t ctx = (ggml_metal_t)backend->context;
+
+    ggml_metal_graph_optimize(ctx, cgraph);
+}
+
+static void ggml_backend_metal_set_n_cb(ggml_backend_t backend, int n_cb) {
+    GGML_ASSERT(ggml_backend_is_metal(backend));
+
+    ggml_metal_t ctx = (ggml_metal_t)backend->context;
+
+    ggml_metal_set_n_cb(ctx, n_cb);
+
+}
+
+static ggml_backend_i ggml_backend_metal_i = {
+    /* .get_name                = */ ggml_backend_metal_name,
+    /* .free                    = */ ggml_backend_metal_free,
+    /* .set_tensor_async        = */ ggml_backend_metal_set_tensor_async,
+    /* .get_tensor_async        = */ ggml_backend_metal_get_tensor_async,
+    /* .cpy_tensor_async        = */ ggml_backend_metal_cpy_tensor_async, // only needed for multi-GPU setups
+    /* .synchronize             = */ ggml_backend_metal_synchronize,
+    /* .graph_plan_create       = */ NULL,
+    /* .graph_plan_free         = */ NULL,
+    /* .graph_plan_update       = */ NULL,
+    /* .graph_plan_compute      = */ NULL,
+    /* .graph_compute           = */ ggml_backend_metal_graph_compute,
+
+    // the events API is needed only for multi-GPU setups, so likely no need to implement it for Metal
+    // in any case, these docs seem relevant if we ever decide to implement it:
+    // https://developer.apple.com/documentation/metal/mtlcommandbuffer#Synchronizing-Passes-with-Events
+    /* .event_record            = */ NULL,
+    /* .event_wait              = */ NULL,
+    /* .optimize_graph          = */ ggml_backend_metal_graph_optimize,
+};
+
+static ggml_guid_t ggml_backend_metal_guid(void) {
+    static ggml_guid guid = { 0x81, 0xa1, 0x8b, 0x1e, 0x71, 0xec, 0x79, 0xed, 0x2b, 0x85, 0xdc, 0x8a, 0x61, 0x98, 0x30, 0xe6 };
+    return &guid;
+}
+
+ggml_backend_t ggml_backend_metal_init(void) {
+    ggml_backend_dev_t dev = ggml_backend_reg_dev_get(ggml_backend_metal_reg(), 0);
+    ggml_metal_device_t ctx_dev = (ggml_metal_device_t)dev->context;
+
+    ggml_metal_t ctx = ggml_metal_init(ctx_dev);
+    if (ctx == NULL) {
+        GGML_LOG_ERROR("%s: error: failed to allocate context\n", __func__);
+        return NULL;
+    }
+
+    ggml_backend_t backend = (ggml_backend_t) malloc(sizeof(ggml_backend));
+
+    *backend = {
+        /* .guid      = */ ggml_backend_metal_guid(),
+        /* .interface = */ ggml_backend_metal_i,
+        /* .device    = */ dev,
+        /* .context   = */ ctx,
+    };
+
+    ggml_backend_metal_set_n_cb(backend, 1);
+
+    return backend;
+}
+
+bool ggml_backend_is_metal(ggml_backend_t backend) {
+    return backend != NULL && ggml_guid_matches(backend->guid, ggml_backend_metal_guid());
+}
+
+void ggml_backend_metal_set_abort_callback(ggml_backend_t backend, ggml_abort_callback abort_callback, void * user_data) {
+    GGML_ASSERT(ggml_backend_is_metal(backend));
+
+    ggml_metal_t ctx = (ggml_metal_t)backend->context;
+
+    ggml_metal_set_abort_callback(ctx, abort_callback, user_data);
+}
+
+bool ggml_backend_metal_supports_family(ggml_backend_t backend, int family) {
+    GGML_ASSERT(ggml_backend_is_metal(backend));
+
+    ggml_metal_t ctx = (ggml_metal_t)backend->context;
+
+    return ggml_metal_supports_family(ctx, family);
+}
+
+void ggml_backend_metal_capture_next_compute(ggml_backend_t backend) {
+    GGML_ASSERT(ggml_backend_is_metal(backend));
+
+    ggml_metal_t ctx = (ggml_metal_t)backend->context;
+
+    ggml_metal_capture_next_compute(ctx);
+}
+
+// backend device
+
+static const char * ggml_backend_metal_device_get_name(ggml_backend_dev_t dev) {
+    return "Metal";
+
+    GGML_UNUSED(dev);
+}
+
+static const char * ggml_backend_metal_device_get_description(ggml_backend_dev_t dev) {
+    ggml_metal_device_t ctx_dev = (ggml_metal_device_t)dev->context;
+
+    return ggml_metal_device_get_props(ctx_dev)->name;
+}
+
+static void ggml_backend_metal_device_get_memory(ggml_backend_dev_t dev, size_t * free, size_t * total) {
+    ggml_metal_device_t ctx_dev = (ggml_metal_device_t)dev->context;
+
+    ggml_metal_device_get_memory(ctx_dev, free, total);
+}
+
+static enum ggml_backend_dev_type ggml_backend_metal_device_get_type(ggml_backend_dev_t dev) {
+    return GGML_BACKEND_DEVICE_TYPE_GPU;
+
+    GGML_UNUSED(dev);
+}
+
+static void ggml_backend_metal_device_get_props(ggml_backend_dev_t dev, ggml_backend_dev_props * props) {
+    props->name        = ggml_backend_metal_device_get_name(dev);
+    props->description = ggml_backend_metal_device_get_description(dev);
+    props->type        = ggml_backend_metal_device_get_type(dev);
+
+    ggml_backend_metal_device_get_memory(dev, &props->memory_free, &props->memory_total);
+
+    props->caps = {
+        /* .async                 = */ true,
+        /* .host_buffer           = */ false,
+        /* .buffer_from_host_ptr  = */ true,
+        /* .events                = */ false,
+    };
+}
+
+static ggml_backend_t ggml_backend_metal_device_init(ggml_backend_dev_t dev, const char * params) {
+    ggml_metal_device_t ctx_dev = (ggml_metal_device_t)dev->context;
+
+    ggml_metal_t ctx = ggml_metal_init(ctx_dev);
+    if (ctx == NULL) {
+        GGML_LOG_ERROR("%s: error: failed to allocate context\n", __func__);
+        return NULL;
+    }
+
+    ggml_backend_t backend = (ggml_backend_t) malloc(sizeof(ggml_backend));
+
+    *backend = {
+        /* .guid      = */ ggml_backend_metal_guid(),
+        /* .interface = */ ggml_backend_metal_i,
+        /* .device    = */ dev,
+        /* .context   = */ ctx,
+    };
+
+    ggml_backend_metal_set_n_cb(backend, 1);
+
+    return backend;
+
+    GGML_UNUSED(params);
+}
+
+static ggml_backend_buffer_type_t ggml_backend_metal_device_get_buffer_type(ggml_backend_dev_t dev) {
+    ggml_metal_device_t ctx_dev = (ggml_metal_device_t)dev->context;
+
+    const ggml_metal_device_props * props_dev = ggml_metal_device_get_props(ctx_dev);
+
+    return props_dev->use_shared_buffers ? ggml_backend_metal_buffer_type_shared() : ggml_backend_metal_buffer_type_private();
+}
+
+static ggml_backend_buffer_t ggml_backend_metal_device_buffer_mapped(ggml_backend_dev_t dev, void * ptr, size_t size, size_t max_tensor_size) {
+    ggml_metal_device_t ctx_dev = (ggml_metal_device_t)dev->context;
+
+    ggml_metal_buffer_t res = ggml_metal_buffer_map(ctx_dev, ptr, size, max_tensor_size);
+
+    return ggml_backend_buffer_init(ggml_backend_metal_buffer_type_mapped(), ggml_backend_metal_buffer_shared_i, res, size);
+}
+
+static bool ggml_backend_metal_device_supports_op(ggml_backend_dev_t dev, const ggml_tensor * op) {
+    ggml_metal_device_t ctx_dev = (ggml_metal_device_t)dev->context;
+
+    return ggml_metal_device_supports_op(ctx_dev, op);
+}
+
+static bool ggml_backend_metal_device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) {
+    return
+        buft->iface.get_name == ggml_backend_metal_buffer_type_shared_get_name ||
+        buft->iface.get_name == ggml_backend_metal_buffer_type_private_get_name ||
+        buft->iface.get_name == ggml_backend_metal_buffer_type_mapped_get_name;
+
+    GGML_UNUSED(dev);
+}
+
+static int64_t get_op_batch_size(const ggml_tensor * op) {
+    switch (op->op) {
+        case GGML_OP_MUL_MAT:
+            return op->ne[1];
+        case GGML_OP_MUL_MAT_ID:
+            return op->ne[2];
+        default:
+            return ggml_nrows(op);
+    }
+}
+
+static bool ggml_backend_metal_device_offload_op(ggml_backend_dev_t dev, const ggml_tensor * op) {
+    const int min_batch_size = 32;
+
+    return (op->op == GGML_OP_MUL_MAT ||
+            op->op == GGML_OP_MUL_MAT_ID) &&
+            get_op_batch_size(op) >= min_batch_size;
+
+    GGML_UNUSED(dev);
+    GGML_UNUSED(op);
+}
+
+static ggml_backend_device_i ggml_backend_metal_device_i = {
+    /* .get_name             = */ ggml_backend_metal_device_get_name,
+    /* .get_description      = */ ggml_backend_metal_device_get_description,
+    /* .get_memory           = */ ggml_backend_metal_device_get_memory,
+    /* .get_type             = */ ggml_backend_metal_device_get_type,
+    /* .get_props            = */ ggml_backend_metal_device_get_props,
+    /* .init_backend         = */ ggml_backend_metal_device_init,
+    /* .get_buffer_type      = */ ggml_backend_metal_device_get_buffer_type,
+    /* .get_host_buffer_type = */ NULL,
+    /* .buffer_from_host_ptr = */ ggml_backend_metal_device_buffer_mapped,
+    /* .supports_op          = */ ggml_backend_metal_device_supports_op,
+    /* .supports_buft        = */ ggml_backend_metal_device_supports_buft,
+    /* .offload_op           = */ ggml_backend_metal_device_offload_op,
+    /* .event_new            = */ NULL,
+    /* .event_free           = */ NULL,
+    /* .event_synchronize    = */ NULL,
+};
+
+// backend registry
+
+static const char * ggml_backend_metal_reg_get_name(ggml_backend_reg_t reg) {
+    return "Metal";
+
+    GGML_UNUSED(reg);
+}
+
+static size_t ggml_backend_metal_reg_device_count(ggml_backend_reg_t reg) {
+    return 1;
+
+    GGML_UNUSED(reg);
+}
+
+static ggml_backend_dev_t ggml_backend_metal_reg_device_get(ggml_backend_reg_t reg, size_t index) {
+    GGML_ASSERT(index == 0);
+
+    return &g_ggml_metal_device;
+
+    GGML_UNUSED(reg);
+    GGML_UNUSED(index);
+}
+
+static ggml_backend_feature g_ggml_backend_metal_features[] = {
+#if defined(GGML_METAL_EMBED_LIBRARY)
+    { "EMBED_LIBRARY", "1" },
+#endif
+    { NULL, NULL },
+};
+
+static ggml_backend_feature * ggml_backend_metal_get_features(ggml_backend_reg_t reg) {
+    return g_ggml_backend_metal_features;
+
+    GGML_UNUSED(reg);
+}
+
+static void * ggml_backend_metal_get_proc_address(ggml_backend_reg_t reg, const char * name) {
+    if (strcmp(name, "ggml_backend_get_features") == 0) {
+        return (void *)ggml_backend_metal_get_features;
+    }
+
+    return NULL;
+
+    GGML_UNUSED(reg);
+}
+
+static ggml_backend_reg_i ggml_backend_metal_reg_i = {
+    /* .get_name         = */ ggml_backend_metal_reg_get_name,
+    /* .device_count     = */ ggml_backend_metal_reg_device_count,
+    /* .device_get       = */ ggml_backend_metal_reg_device_get,
+    /* .get_proc_address = */ ggml_backend_metal_get_proc_address,
+};
+
+ggml_backend_reg_t ggml_backend_metal_reg(void) {
+    {
+        g_ggml_metal_reg = {
+            /* .api_version = */ GGML_BACKEND_API_VERSION,
+            /* .iface       = */ ggml_backend_metal_reg_i,
+            /* .context     = */ NULL,
+        };
+
+        g_ggml_metal_device = {
+            /* .iface   = */ ggml_backend_metal_device_i,
+            /* .reg     = */ &g_ggml_metal_reg,
+            /* .context = */ ggml_metal_device_get(),
+        };
+    }
+
+    return &g_ggml_metal_reg;
+}
+
+GGML_BACKEND_DL_IMPL(ggml_backend_metal_reg)
diff --git a/ggml/src/ggml-metal/ggml-metal.m b/ggml/src/ggml-metal/ggml-metal.m
deleted file mode 100644 (file)
index 2243c17..0000000
+++ /dev/null
@@ -1,6897 +0,0 @@
-#import "ggml-metal.h"
-
-#import "ggml-impl.h"
-#import "ggml-backend-impl.h"
-#import "ggml-metal-impl.h"
-#import "ggml-metal-common.h"
-
-#import <Foundation/Foundation.h>
-
-#import <Metal/Metal.h>
-
-#undef MIN
-#undef MAX
-#define MIN(a, b) ((a) < (b) ? (a) : (b))
-#define MAX(a, b) ((a) > (b) ? (a) : (b))
-
-// max memory buffers that can be mapped to the device
-#define GGML_METAL_MAX_BUFFERS 64
-
-// max number of MTLCommandBuffer used to submit a graph for processing
-#define GGML_METAL_MAX_COMMAND_BUFFERS 8
-
-#ifndef TARGET_OS_VISION
-#define TARGET_OS_VISION 0
-#endif
-
-// create residency sets only on macOS >= 15.0
-#if !TARGET_CPU_X86_64 && TARGET_OS_OSX && __MAC_OS_X_VERSION_MAX_ALLOWED >= 150000 || \
-    TARGET_OS_IOS && __IPHONE_OS_VERSION_MAX_ALLOWED >= 180000 || \
-    TARGET_OS_TV && __TV_OS_VERSION_MAX_ALLOWED >= 180000 || \
-    TARGET_OS_VISION && __VISION_OS_VERSION_MAX_ALLOWED >= 200000
-#define GGML_METAL_HAS_RESIDENCY_SETS 1
-#endif
-
-// globals
-
-// overload of MTLGPUFamilyMetal3 (not available in some environments)
-static const NSInteger MTLGPUFamilyMetal3_GGML = 5001;
-
-// initialized in ggml_backend_metal_reg
-static struct ggml_backend_reg    g_ggml_backend_metal_reg;
-static struct ggml_backend_device g_ggml_backend_metal_device;
-
-// information about a Metal device
-// note: assumes single GPU device - the default one
-// TODO: support multiple GPU devices
-static struct ggml_backend_metal_device_context {
-    id<MTLDevice>  mtl_device;
-    int            mtl_device_ref_count;
-    id<MTLLibrary> mtl_library;
-
-    // a single global queue shared by all Metal backends
-    // technically not needed for devices with unified memory, but enables discrete GPUs support
-    // ref: https://github.com/ggml-org/llama.cpp/pull/15906
-    id<MTLCommandQueue> mtl_queue;
-
-    NSLock * mtl_lock;
-
-    bool has_simdgroup_reduction;
-    bool has_simdgroup_mm;
-    bool has_residency_sets;
-    bool has_bfloat;
-    bool use_bfloat;
-    bool use_fusion;
-    bool use_concurrency;
-    bool use_shared_buffers;
-    bool use_graph_optimize;
-
-    int debug_graph;
-    int debug_fusion;
-
-    // how many times a given op was fused
-    uint64_t fuse_cnt[GGML_OP_COUNT];
-
-    size_t max_size;
-
-    char name[128];
-} g_ggml_ctx_dev_main = {
-    /*.mtl_device              =*/ nil,
-    /*.mtl_device_ref_count    =*/ 0,
-    /*.mtl_library             =*/ nil,
-    /*.mtl_queue               =*/ nil,
-    /*.mtl_lock                =*/ nil,
-    /*.has_simdgroup_reduction =*/ false,
-    /*.has_simdgroup_mm        =*/ false,
-    /*.has_residency_sets      =*/ false,
-    /*.has_bfloat              =*/ false,
-    /*.use_bfloat              =*/ false,
-    /*.use_fusion              =*/ true,
-    /*.use_concurrency         =*/ true,
-    /*.use_shared_buffers      =*/ true,
-    /*.use_graph_optimize      =*/ true,
-    /*.debug_graph             =*/ 0,
-    /*.debug_fusion            =*/ 0,
-    /*.fuse_cnt                =*/ { 0 },
-    /*.max_size                =*/ 0,
-    /*.name                    =*/ "",
-};
-
-// acquire
-static id<MTLDevice> ggml_backend_metal_device_acq(struct ggml_backend_metal_device_context * ctx) {
-    assert(ctx != NULL);
-
-    if (ctx->mtl_lock == nil) {
-        ctx->mtl_lock = [[NSLock alloc] init];
-    }
-
-    if (ctx->mtl_device == nil) {
-        ctx->mtl_device = MTLCreateSystemDefaultDevice();
-
-        if (ctx->mtl_device) {
-            ctx->mtl_queue = [ctx->mtl_device newCommandQueue];
-            if (ctx->mtl_queue == nil) {
-                GGML_LOG_ERROR("%s: error: failed to create command queue\n", __func__);
-            }
-
-            ctx->has_simdgroup_reduction  = [ctx->mtl_device supportsFamily:MTLGPUFamilyApple7];
-            ctx->has_simdgroup_reduction |= [ctx->mtl_device supportsFamily:MTLGPUFamilyMetal3_GGML];
-
-            ctx->has_simdgroup_mm = [ctx->mtl_device supportsFamily:MTLGPUFamilyApple7];
-
-#if defined(GGML_METAL_HAS_RESIDENCY_SETS)
-            ctx->has_residency_sets = getenv("GGML_METAL_NO_RESIDENCY") == nil;
-#endif
-
-            ctx->has_bfloat  = [ctx->mtl_device supportsFamily:MTLGPUFamilyMetal3_GGML];
-            ctx->has_bfloat |= [ctx->mtl_device supportsFamily:MTLGPUFamilyApple6];
-
-#if defined(GGML_METAL_USE_BF16)
-            ctx->use_bfloat = ctx->has_bfloat;
-#else
-            ctx->use_bfloat = false;
-#endif
-
-            ctx->use_fusion      = getenv("GGML_METAL_FUSION_DISABLE") == nil;
-            ctx->use_concurrency = getenv("GGML_METAL_CONCURRENCY_DISABLE") == nil;
-
-            {
-                const char * val = getenv("GGML_METAL_GRAPH_DEBUG");
-                ctx->debug_graph = val ? atoi(val) : 0;
-            }
-
-            {
-                const char * val = getenv("GGML_METAL_FUSION_DEBUG");
-                ctx->debug_fusion = val ? atoi(val) : 0;
-            }
-
-            ctx->use_shared_buffers = ctx->mtl_device.hasUnifiedMemory;
-
-            if (getenv("GGML_METAL_SHARED_BUFFERS_DISABLE") != NULL) {
-                ctx->use_shared_buffers = false;
-            }
-
-            ctx->use_graph_optimize = true;
-
-            if (getenv("GGML_METAL_GRAPH_OPTIMIZE_DISABLE") != NULL) {
-                ctx->use_graph_optimize = false;
-            }
-
-            memset(ctx->fuse_cnt, 0, sizeof(ctx->fuse_cnt));
-
-            ctx->max_size = ctx->mtl_device.maxBufferLength;
-
-            strncpy(ctx->name, [[ctx->mtl_device name] UTF8String], sizeof(ctx->name) - 1);
-        }
-    }
-
-    ctx->mtl_device_ref_count++;
-
-    return ctx->mtl_device;
-}
-
-// release
-static void ggml_backend_metal_device_rel(struct ggml_backend_metal_device_context * ctx) {
-    assert(ctx != NULL);
-    assert(ctx->mtl_device_ref_count > 0);
-
-    ctx->mtl_device_ref_count--;
-
-    if (ctx->mtl_device_ref_count == 0) {
-        if (ctx->debug_fusion > 0) {
-            fprintf(stderr, "%s: fusion stats:\n", __func__);
-            for (int i = 0; i < GGML_OP_COUNT; i++) {
-                if (ctx->fuse_cnt[i] == 0) {
-                    continue;
-                }
-
-                // note: cannot use ggml_log here
-                fprintf(stderr, "%s: - %s: %" PRIu64 "\n", __func__, ggml_op_name((enum ggml_op) i), ctx->fuse_cnt[i]);
-            }
-        }
-
-        if (ctx->mtl_lock) {
-            [ctx->mtl_lock release];
-            ctx->mtl_lock = nil;
-        }
-
-        if (ctx->mtl_library) {
-            [ctx->mtl_library release];
-            ctx->mtl_library = nil;
-        }
-
-        if (ctx->mtl_queue) {
-            [ctx->mtl_queue release];
-            ctx->mtl_queue = nil;
-        }
-
-        if (ctx->mtl_device) {
-            [ctx->mtl_device release];
-            ctx->mtl_device = nil;
-        }
-    }
-}
-
-// kernels
-
-struct ggml_metal_kernel {
-    id<MTLComputePipelineState> pipeline;
-};
-
-@interface ggml_metal_kernel_wrapper : NSObject
-
-@property (nonatomic, assign) struct ggml_metal_kernel kernel;
-
-@end
-
-@implementation ggml_metal_kernel_wrapper
-- (void) dealloc {
-    [_kernel.pipeline release];
-    [super dealloc];
-}
-@end
-
-enum ggml_metal_kernel_type {
-    GGML_METAL_KERNEL_TYPE_ADD_ID,
-    GGML_METAL_KERNEL_TYPE_REPEAT_F32,
-    GGML_METAL_KERNEL_TYPE_REPEAT_F16,
-    GGML_METAL_KERNEL_TYPE_REPEAT_I32,
-    GGML_METAL_KERNEL_TYPE_REPEAT_I16,
-    GGML_METAL_KERNEL_TYPE_SCALE,
-    GGML_METAL_KERNEL_TYPE_SCALE_4,
-    GGML_METAL_KERNEL_TYPE_CLAMP,
-    GGML_METAL_KERNEL_TYPE_TANH,
-    GGML_METAL_KERNEL_TYPE_RELU,
-    GGML_METAL_KERNEL_TYPE_SIGMOID,
-    GGML_METAL_KERNEL_TYPE_GELU,
-    GGML_METAL_KERNEL_TYPE_GELU_4,
-    GGML_METAL_KERNEL_TYPE_GELU_ERF,
-    GGML_METAL_KERNEL_TYPE_GELU_ERF_4,
-    GGML_METAL_KERNEL_TYPE_GELU_QUICK,
-    GGML_METAL_KERNEL_TYPE_GELU_QUICK_4,
-    GGML_METAL_KERNEL_TYPE_SILU,
-    GGML_METAL_KERNEL_TYPE_SILU_4,
-    GGML_METAL_KERNEL_TYPE_ELU,
-    GGML_METAL_KERNEL_TYPE_ABS,
-    GGML_METAL_KERNEL_TYPE_SGN,
-    GGML_METAL_KERNEL_TYPE_STEP,
-    GGML_METAL_KERNEL_TYPE_HARDSWISH,
-    GGML_METAL_KERNEL_TYPE_HARDSIGMOID,
-    GGML_METAL_KERNEL_TYPE_EXP,
-    GGML_METAL_KERNEL_TYPE_SOFT_MAX_F16,
-    GGML_METAL_KERNEL_TYPE_SOFT_MAX_F16_4,
-    GGML_METAL_KERNEL_TYPE_SOFT_MAX_F32,
-    GGML_METAL_KERNEL_TYPE_SOFT_MAX_F32_4,
-    GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF,
-    GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF_8,
-    GGML_METAL_KERNEL_TYPE_GET_ROWS_F32,
-    GGML_METAL_KERNEL_TYPE_GET_ROWS_F16,
-    GGML_METAL_KERNEL_TYPE_GET_ROWS_BF16,
-    GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_0,
-    GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_1,
-    GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_0,
-    GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_1,
-    GGML_METAL_KERNEL_TYPE_GET_ROWS_Q8_0,
-    GGML_METAL_KERNEL_TYPE_GET_ROWS_MXFP4,
-    GGML_METAL_KERNEL_TYPE_GET_ROWS_Q2_K,
-    GGML_METAL_KERNEL_TYPE_GET_ROWS_Q3_K,
-    GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_K,
-    GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_K,
-    GGML_METAL_KERNEL_TYPE_GET_ROWS_Q6_K,
-    GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_XXS,
-    GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_XS,
-    GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ3_XXS,
-    GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ3_S,
-    GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_S,
-    GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ1_S,
-    GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ1_M,
-    GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_NL,
-    GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_XS,
-    GGML_METAL_KERNEL_TYPE_GET_ROWS_I32,
-    GGML_METAL_KERNEL_TYPE_SET_ROWS_F32,
-    GGML_METAL_KERNEL_TYPE_SET_ROWS_F16,
-    GGML_METAL_KERNEL_TYPE_SET_ROWS_BF16,
-    GGML_METAL_KERNEL_TYPE_SET_ROWS_Q8_0,
-    GGML_METAL_KERNEL_TYPE_SET_ROWS_Q4_0,
-    GGML_METAL_KERNEL_TYPE_SET_ROWS_Q4_1,
-    GGML_METAL_KERNEL_TYPE_SET_ROWS_Q5_0,
-    GGML_METAL_KERNEL_TYPE_SET_ROWS_Q5_1,
-    GGML_METAL_KERNEL_TYPE_SET_ROWS_IQ4_NL,
-    GGML_METAL_KERNEL_TYPE_L2_NORM,
-    GGML_METAL_KERNEL_TYPE_GROUP_NORM,
-    GGML_METAL_KERNEL_TYPE_NORM,
-    GGML_METAL_KERNEL_TYPE_SSM_CONV_F32,
-    GGML_METAL_KERNEL_TYPE_SSM_SCAN_F32,
-    GGML_METAL_KERNEL_TYPE_SSM_SCAN_F32_GROUP,
-    GGML_METAL_KERNEL_TYPE_RWKV_WKV6_F32,
-    GGML_METAL_KERNEL_TYPE_RWKV_WKV7_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_F32_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_F32_F32_C4,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_C4,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_1ROW,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_L4,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F16,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_F32_C4,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_F32_1ROW,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_F32_L4,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_BF16,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_0_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_1_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_0_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_1_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_Q8_0_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_MXFP4_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_F32_F32_R1_2,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_F32_F32_R1_3,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_F32_F32_R1_4,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_F32_F32_R1_5,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_F16_F32_R1_2,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_F16_F32_R1_3,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_F16_F32_R1_4,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_F16_F32_R1_5,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_0_F32_R1_2,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_0_F32_R1_3,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_0_F32_R1_4,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_0_F32_R1_5,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_1_F32_R1_2,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_1_F32_R1_3,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_1_F32_R1_4,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_1_F32_R1_5,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_0_F32_R1_2,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_0_F32_R1_3,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_0_F32_R1_4,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_0_F32_R1_5,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_1_F32_R1_2,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_1_F32_R1_3,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_1_F32_R1_4,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_1_F32_R1_5,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q8_0_F32_R1_2,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q8_0_F32_R1_3,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q8_0_F32_R1_4,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q8_0_F32_R1_5,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_MXFP4_F32_R1_2,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_MXFP4_F32_R1_3,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_MXFP4_F32_R1_4,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_MXFP4_F32_R1_5,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_K_F32_R1_2,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_K_F32_R1_3,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_K_F32_R1_4,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_K_F32_R1_5,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_K_F32_R1_2,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_K_F32_R1_3,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_K_F32_R1_4,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_K_F32_R1_5,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q6_K_F32_R1_2,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q6_K_F32_R1_3,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q6_K_F32_R1_4,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q6_K_F32_R1_5,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_IQ4_NL_F32_R1_2,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_IQ4_NL_F32_R1_3,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_IQ4_NL_F32_R1_4,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_IQ4_NL_F32_R1_5,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_Q2_K_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_Q3_K_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_K_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_K_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_Q6_K_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_XXS_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_XS_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_XXS_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_S_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_S_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_IQ1_S_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_IQ1_M_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_NL_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_XS_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F32_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32,
-  //GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32_1ROW,
-  //GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32_L4,
-  //GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F16,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_ID_BF16_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_0_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_1_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_0_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_1_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q8_0_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_ID_MXFP4_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q2_K_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q3_K_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_K_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_K_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q6_K_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_XXS_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_XS_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ3_XXS_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ3_S_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_S_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ1_S_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ1_M_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_NL_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_XS_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_F32_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_F16_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_BF16_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_0_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_1_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_0_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_1_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_Q8_0_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_MXFP4_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_Q2_K_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_Q3_K_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_K_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_K_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_Q6_K_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_XXS_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_XS_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_XXS_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_S_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_S_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_IQ1_S_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_IQ1_M_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_NL_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_XS_F32,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_ID_MAP0_F16_NE20_1,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_ID_MAP0_F16_NE20_2,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_ID_MAP0_F16_NE20_4,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_ID_MAP0_F16_NE20_6,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_ID_MAP0_F16_NE20_8,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_ID_MAP0_F16_NE20_10,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_ID_MAP0_F16_NE20_16,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_ID_F32_F16,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_ID_F16_F16,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_ID_BF16_F16,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_0_F16,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_1_F16,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_0_F16,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_1_F16,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q8_0_F16,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_ID_MXFP4_F16,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q2_K_F16,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q3_K_F16,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_K_F16,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_K_F16,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q6_K_F16,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_XXS_F16,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_XS_F16,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_XXS_F16,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_S_F16,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_S_F16,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ1_S_F16,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ1_M_F16,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_NL_F16,
-    GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_XS_F16,
-    GGML_METAL_KERNEL_TYPE_ROPE_NORM_F32,
-    GGML_METAL_KERNEL_TYPE_ROPE_NORM_F16,
-    GGML_METAL_KERNEL_TYPE_ROPE_MULTI_F32,
-    GGML_METAL_KERNEL_TYPE_ROPE_MULTI_F16,
-    GGML_METAL_KERNEL_TYPE_ROPE_VISION_F32,
-    GGML_METAL_KERNEL_TYPE_ROPE_VISION_F16,
-    GGML_METAL_KERNEL_TYPE_ROPE_NEOX_F32,
-    GGML_METAL_KERNEL_TYPE_ROPE_NEOX_F16,
-    GGML_METAL_KERNEL_TYPE_IM2COL_F16,
-    GGML_METAL_KERNEL_TYPE_IM2COL_F32,
-    GGML_METAL_KERNEL_TYPE_IM2COL_EXT_F16,
-    GGML_METAL_KERNEL_TYPE_IM2COL_EXT_F32,
-    GGML_METAL_KERNEL_TYPE_CONV_TRANSPOSE_1D_F32_F32,
-    GGML_METAL_KERNEL_TYPE_CONV_TRANSPOSE_1D_F16_F32,
-    GGML_METAL_KERNEL_TYPE_UPSCALE_F32,
-    GGML_METAL_KERNEL_TYPE_PAD_F32,
-    GGML_METAL_KERNEL_TYPE_PAD_REFLECT_1D_F32,
-    GGML_METAL_KERNEL_TYPE_ARANGE_F32,
-    GGML_METAL_KERNEL_TYPE_TIMESTEP_EMBEDDING_F32,
-    GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_ASC,
-    GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_DESC,
-    GGML_METAL_KERNEL_TYPE_LEAKY_RELU_F32,
-    GGML_METAL_KERNEL_TYPE_CPY_F32_F32,
-    GGML_METAL_KERNEL_TYPE_CPY_F32_F16,
-    GGML_METAL_KERNEL_TYPE_CPY_F32_BF16,
-    GGML_METAL_KERNEL_TYPE_CPY_F16_F16,
-    GGML_METAL_KERNEL_TYPE_CPY_F16_F32,
-    GGML_METAL_KERNEL_TYPE_CPY_BF16_F32,
-    GGML_METAL_KERNEL_TYPE_CPY_BF16_BF16,
-    GGML_METAL_KERNEL_TYPE_CPY_F32_I32,
-    GGML_METAL_KERNEL_TYPE_CPY_I32_F32,
-    GGML_METAL_KERNEL_TYPE_CPY_F32_Q8_0,
-    GGML_METAL_KERNEL_TYPE_CPY_F32_Q4_0,
-    GGML_METAL_KERNEL_TYPE_CPY_F32_Q4_1,
-    GGML_METAL_KERNEL_TYPE_CPY_F32_Q5_0,
-    GGML_METAL_KERNEL_TYPE_CPY_F32_Q5_1,
-    GGML_METAL_KERNEL_TYPE_CPY_F32_IQ4_NL,
-    GGML_METAL_KERNEL_TYPE_CPY_Q4_0_F32,
-    GGML_METAL_KERNEL_TYPE_CPY_Q4_0_F16,
-    GGML_METAL_KERNEL_TYPE_CPY_Q4_1_F32,
-    GGML_METAL_KERNEL_TYPE_CPY_Q4_1_F16,
-    GGML_METAL_KERNEL_TYPE_CPY_Q5_0_F32,
-    GGML_METAL_KERNEL_TYPE_CPY_Q5_0_F16,
-    GGML_METAL_KERNEL_TYPE_CPY_Q5_1_F32,
-    GGML_METAL_KERNEL_TYPE_CPY_Q5_1_F16,
-    GGML_METAL_KERNEL_TYPE_CPY_Q8_0_F32,
-    GGML_METAL_KERNEL_TYPE_CPY_Q8_0_F16,
-    GGML_METAL_KERNEL_TYPE_CONCAT,
-    GGML_METAL_KERNEL_TYPE_SQR,
-    GGML_METAL_KERNEL_TYPE_SQRT,
-    GGML_METAL_KERNEL_TYPE_SIN,
-    GGML_METAL_KERNEL_TYPE_COS,
-    GGML_METAL_KERNEL_TYPE_NEG,
-    GGML_METAL_KERNEL_TYPE_REGLU,
-    GGML_METAL_KERNEL_TYPE_GEGLU,
-    GGML_METAL_KERNEL_TYPE_SWIGLU,
-    GGML_METAL_KERNEL_TYPE_SWIGLU_OAI,
-    GGML_METAL_KERNEL_TYPE_GEGLU_ERF,
-    GGML_METAL_KERNEL_TYPE_GEGLU_QUICK,
-    GGML_METAL_KERNEL_TYPE_SUM_ROWS,
-    GGML_METAL_KERNEL_TYPE_MEAN,
-    GGML_METAL_KERNEL_TYPE_POOL_2D_AVG_F32,
-    GGML_METAL_KERNEL_TYPE_POOL_2D_MAX_F32,
-    GGML_METAL_KERNEL_TYPE_ARGMAX,
-
-    GGML_METAL_KERNEL_TYPE_COUNT
-};
-
-struct ggml_metal_command_buffer {
-    id<MTLCommandBuffer> obj;
-
-    // used to enable concurrent execution of ops in the command buffers
-    struct ggml_mem_ranges * mem_ranges;
-};
-
-struct ggml_backend_metal_context {
-    id<MTLDevice>       device;
-    id<MTLCommandQueue> queue; // currently a pointer to the device queue, but might become separate queue [TAG_QUEUE_PER_BACKEND]
-
-    dispatch_queue_t d_queue;
-
-    // the set of pre-compiled kernels for this context
-    struct ggml_metal_kernel kernels[GGML_METAL_KERNEL_TYPE_COUNT];
-
-    // additional, inference-time compiled kernels
-    NSMutableDictionary * kernels_ext;
-
-    // capture state
-    bool capture_next_compute;
-    bool capture_started;
-
-    id<MTLCaptureScope> capture_scope;
-
-    // command buffer state
-    int n_cb;           // number of extra threads used to submit the command buffers
-    int n_nodes_0;      // number of nodes submitted by the main thread
-    int n_nodes_1;      // remaining number of nodes submitted by the n_cb threads
-    int n_nodes_per_cb;
-
-    struct ggml_cgraph * gf;
-
-    // the callback given to the thread pool
-    void (^encode_async)(size_t ith);
-
-    // n_cb command buffers + 1 used by the main thread
-    struct ggml_metal_command_buffer cmd_bufs[GGML_METAL_MAX_COMMAND_BUFFERS + 1];
-
-    // extra command buffers for things like getting, setting and copying tensors
-    NSMutableArray * cmd_bufs_ext;
-
-    // the last command buffer queued into the Metal queue with operations relevant to the current Metal backend
-    id<MTLCommandBuffer> cmd_buf_last;
-
-    // abort ggml_metal_graph_compute if callback returns true
-    ggml_abort_callback abort_callback;
-    void *              abort_callback_data;
-};
-
-// MSL code
-// TODO: move the contents here when ready
-//       for now it is easier to work in a separate file
-// static NSString * const msl_library_source = @"see metal.metal";
-
-#if !GGML_METAL_EMBED_LIBRARY
-// Here to assist with NSBundle Path Hack
-@interface GGMLMetalClass : NSObject
-@end
-@implementation GGMLMetalClass
-@end
-#endif
-
-static void * ggml_metal_host_malloc(size_t n) {
-    void * data = NULL;
-
-#if TARGET_OS_OSX
-    kern_return_t err = vm_allocate((vm_map_t) mach_task_self(), (void *) &data, n, VM_FLAGS_ANYWHERE);
-    if (err != KERN_SUCCESS) {
-        GGML_LOG_ERROR("%s: error: vm_allocate failed\n", __func__);
-        return NULL;
-    }
-#else
-    const int result = posix_memalign((void **) &data, sysconf(_SC_PAGESIZE), n);
-    if (result != 0) {
-        GGML_LOG_ERROR("%s: error: posix_memalign failed\n", __func__);
-        return NULL;
-    }
-#endif
-
-    return data;
-}
-
-// load library
-//
-// - first check if the library is embedded
-// - then check if the library is in the bundle
-// - if not found, load the source and compile it
-// - if that fails, return NULL
-static id<MTLLibrary> ggml_metal_load_library(id<MTLDevice> device, bool use_bfloat) {
-    const int64_t t_start = ggml_time_us();
-
-    id<MTLLibrary> metal_library = nil;
-    NSError * error = nil;
-    NSString * src = nil;
-
-#if GGML_METAL_EMBED_LIBRARY
-    GGML_LOG_INFO("%s: using embedded metal library\n", __func__);
-
-    extern const char ggml_metallib_start[];
-    extern const char ggml_metallib_end[];
-
-    src = [[NSString alloc] initWithBytes:ggml_metallib_start length:(ggml_metallib_end-ggml_metallib_start) encoding:NSUTF8StringEncoding];
-
-#else
-
-#ifdef SWIFT_PACKAGE
-    NSBundle * bundle = SWIFTPM_MODULE_BUNDLE;
-#else
-    NSBundle * bundle = [NSBundle bundleForClass:[GGMLMetalClass class]];
-#endif
-
-    NSString * path_lib = [bundle pathForResource:@"default" ofType:@"metallib"];
-    if (path_lib == nil) {
-        // Try to find the resource in the directory where the current binary located.
-        NSString * current_binary = [[NSProcessInfo processInfo] arguments][0];
-        NSString * bin_dir = [current_binary stringByDeletingLastPathComponent];
-        NSString * default_metallib_path = [NSString pathWithComponents:@[bin_dir, @"default.metallib"]];
-        if ([[NSFileManager defaultManager] isReadableFileAtPath:default_metallib_path]) {
-            GGML_LOG_INFO("%s: found '%s'\n", __func__, [default_metallib_path UTF8String]);
-            NSDictionary * atts = [[NSFileManager defaultManager] attributesOfItemAtPath:default_metallib_path error:&error];
-            if (atts && atts[NSFileType] == NSFileTypeSymbolicLink) {
-                // Optionally, if this is a symlink, try to resolve it.
-                default_metallib_path = [[NSFileManager defaultManager] destinationOfSymbolicLinkAtPath:default_metallib_path error:&error];
-                if (default_metallib_path && [default_metallib_path length] > 0 && ![[default_metallib_path substringToIndex:1] isEqualToString:@"/"]) {
-                    // It is a relative path, adding the binary directory as directory prefix.
-                    default_metallib_path = [NSString pathWithComponents:@[bin_dir, default_metallib_path]];
-                }
-                if (!default_metallib_path || ![[NSFileManager defaultManager] isReadableFileAtPath:default_metallib_path]) {
-                    // Link to the resource could not be resolved.
-                    default_metallib_path = nil;
-                } else {
-                    GGML_LOG_INFO("%s: symlink resolved '%s'\n", __func__, [default_metallib_path UTF8String]);
-                }
-            }
-        } else {
-            // The resource couldn't be found in the binary's directory.
-            default_metallib_path = nil;
-        }
-        path_lib = default_metallib_path;
-    }
-
-    if (path_lib != nil) {
-        // pre-compiled library found
-        NSURL * libURL = [NSURL fileURLWithPath:path_lib];
-        GGML_LOG_INFO("%s: loading '%s'\n", __func__, [path_lib UTF8String]);
-
-        metal_library = [device newLibraryWithURL:libURL error:&error];
-        if (error) {
-            GGML_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]);
-            return NULL;
-        }
-    } else {
-        GGML_LOG_INFO("%s: default.metallib not found, loading from source\n", __func__);
-
-        NSString * path_source;
-        NSString * path_resource = [[NSProcessInfo processInfo].environment objectForKey:@"GGML_METAL_PATH_RESOURCES"];
-
-        GGML_LOG_INFO("%s: GGML_METAL_PATH_RESOURCES = %s\n", __func__, path_resource ? [path_resource UTF8String] : "nil");
-
-        if (path_resource) {
-            path_source = [path_resource stringByAppendingPathComponent:@"ggml-metal.metal"];
-        } else {
-            path_source = [bundle pathForResource:@"ggml-metal" ofType:@"metal"];
-        }
-
-        if (path_source == nil) {
-            GGML_LOG_WARN("%s: error: could not use bundle path to find ggml-metal.metal, falling back to trying cwd\n", __func__);
-            path_source = @"ggml-metal.metal";
-        }
-
-        GGML_LOG_INFO("%s: loading '%s'\n", __func__, [path_source UTF8String]);
-
-        src = [NSString stringWithContentsOfFile:path_source encoding:NSUTF8StringEncoding error:&error];
-        if (error) {
-            GGML_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]);
-            return NULL;
-        }
-    }
-#endif
-
-    if (!metal_library) {
-        @autoreleasepool {
-            // dictionary of preprocessor macros
-            NSMutableDictionary * prep = [NSMutableDictionary dictionary];
-
-            if (use_bfloat) {
-                [prep setObject:@"1" forKey:@"GGML_METAL_USE_BF16"];
-            }
-
-#if GGML_METAL_EMBED_LIBRARY
-            [prep setObject:@"1" forKey:@"GGML_METAL_EMBED_LIBRARY"];
-#endif
-
-            MTLCompileOptions * options = [MTLCompileOptions new];
-            options.preprocessorMacros = prep;
-
-            //[options setFastMathEnabled:false];
-
-            metal_library = [device newLibraryWithSource:src options:options error:&error];
-            if (error) {
-                GGML_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]);
-                return NULL;
-            }
-
-#if !__has_feature(objc_arc)
-            [options release];
-#endif
-        }
-    }
-
-#if GGML_METAL_EMBED_LIBRARY
-    [src release];
-#endif // GGML_METAL_EMBED_LIBRARY
-
-    GGML_LOG_INFO("%s: loaded in %.3f sec\n", __func__, (ggml_time_us() - t_start) / 1e6);
-
-    return metal_library;
-}
-
-static struct ggml_backend_metal_context * ggml_metal_init(ggml_backend_dev_t dev) {
-    GGML_LOG_INFO("%s: allocating\n", __func__);
-
-#if TARGET_OS_OSX && !GGML_METAL_NDEBUG
-    // Show all the Metal device instances in the system
-    NSArray * devices = MTLCopyAllDevices();
-    for (id<MTLDevice> device in devices) {
-        GGML_LOG_INFO("%s: found device: %s\n", __func__, [[device name] UTF8String]);
-    }
-    [devices release]; // since it was created by a *Copy* C method
-#endif
-
-    // init context
-    struct ggml_backend_metal_context * ctx = calloc(1, sizeof(struct ggml_backend_metal_context));
-    struct ggml_backend_metal_device_context * ctx_dev = dev->context;
-
-    id<MTLDevice> device = ctx_dev->mtl_device;
-
-    GGML_LOG_INFO("%s: picking default device: %s\n", __func__, [[device name] UTF8String]);
-
-    ctx->device = device;
-
-    // TODO: question - would it be better to have one queue for the backend and one queue for the device?
-    //                  the graph encoders and async ops would use the backend queue while the sync ops would use the device queue?
-    //ctx->queue = [device newCommandQueue]; [TAG_QUEUE_PER_BACKEND]
-    ctx->queue = ctx_dev->mtl_queue;
-    if (ctx->queue == nil) {
-        GGML_LOG_ERROR("%s: error: failed to create command queue\n", __func__);
-        return NULL;
-    }
-
-    ctx->d_queue = dispatch_queue_create("ggml-metal", DISPATCH_QUEUE_CONCURRENT);
-
-    // load library
-    {
-        [ctx_dev->mtl_lock lock];
-
-        if (ctx_dev->mtl_library == nil) {
-            ctx_dev->mtl_library = ggml_metal_load_library(device, ctx_dev->use_bfloat);
-        }
-
-        [ctx_dev->mtl_lock unlock];
-    }
-
-    id<MTLLibrary> metal_library = ctx_dev->mtl_library;
-    if (metal_library == nil) {
-        GGML_LOG_ERROR("%s: error: metal library is nil\n", __func__);
-        return NULL;
-    }
-
-    // print MTL GPU family:
-    GGML_LOG_INFO("%s: GPU name:   %s\n", __func__, [[device name] UTF8String]);
-
-    // determine max supported GPU family
-    // https://developer.apple.com/metal/Metal-Shading-Language-Specification.pdf
-    // https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf
-    {
-        for (int i = MTLGPUFamilyApple1 + 20; i >= MTLGPUFamilyApple1; --i) {
-            if ([device supportsFamily:i]) {
-                GGML_LOG_INFO("%s: GPU family: MTLGPUFamilyApple%d  (%d)\n", __func__, i - (int) MTLGPUFamilyApple1 + 1, i);
-                break;
-            }
-        }
-
-        for (int i = MTLGPUFamilyCommon1 + 5; i >= MTLGPUFamilyCommon1; --i) {
-            if ([device supportsFamily:i]) {
-                GGML_LOG_INFO("%s: GPU family: MTLGPUFamilyCommon%d (%d)\n", __func__, i - (int) MTLGPUFamilyCommon1 + 1, i);
-                break;
-            }
-        }
-
-        for (int i = MTLGPUFamilyMetal3_GGML + 5; i >= MTLGPUFamilyMetal3_GGML; --i) {
-            if ([device supportsFamily:i]) {
-                GGML_LOG_INFO("%s: GPU family: MTLGPUFamilyMetal%d  (%d)\n", __func__, i - (int) MTLGPUFamilyMetal3_GGML + 3, i);
-                break;
-            }
-        }
-    }
-
-    GGML_LOG_INFO("%s: simdgroup reduction   = %s\n", __func__, ctx_dev->has_simdgroup_reduction     ? "true" : "false");
-    GGML_LOG_INFO("%s: simdgroup matrix mul. = %s\n", __func__, ctx_dev->has_simdgroup_mm            ? "true" : "false");
-    GGML_LOG_INFO("%s: has residency sets    = %s\n", __func__, ctx_dev->has_residency_sets          ? "true" : "false");
-    GGML_LOG_INFO("%s: has bfloat            = %s\n", __func__, ctx_dev->has_bfloat                  ? "true" : "false");
-    GGML_LOG_INFO("%s: use bfloat            = %s\n", __func__, ctx_dev->use_bfloat                  ? "true" : "false");
-    GGML_LOG_INFO("%s: use fusion            = %s\n", __func__, ctx_dev->use_fusion                  ? "true" : "false");
-    GGML_LOG_INFO("%s: use concurrency       = %s\n", __func__, ctx_dev->use_concurrency             ? "true" : "false");
-    GGML_LOG_INFO("%s: use shared buffers    = %s\n", __func__, ctx_dev->use_shared_buffers          ? "true" : "false");
-    GGML_LOG_INFO("%s: use graph optimize    = %s\n", __func__, ctx_dev->use_graph_optimize          ? "true" : "false");
-    GGML_LOG_INFO("%s: hasUnifiedMemory      = %s\n", __func__, ctx_dev->mtl_device.hasUnifiedMemory ? "true" : "false");
-
-    ctx->capture_next_compute = false;
-    ctx->capture_started = false;
-    ctx->capture_scope = nil;
-
-    ctx->gf = nil;
-    ctx->encode_async = nil;
-    for (int i = 0; i < GGML_METAL_MAX_COMMAND_BUFFERS; ++i) {
-        ctx->cmd_bufs[i].obj = nil;
-
-        if (ctx_dev->use_concurrency) {
-            ctx->cmd_bufs[i].mem_ranges = ggml_mem_ranges_init(ctx_dev->debug_graph);
-        }
-    }
-
-    ctx->cmd_bufs_ext = [[NSMutableArray alloc] init];
-
-    ctx->cmd_buf_last = nil;
-
-#if TARGET_OS_OSX || (TARGET_OS_IOS && __clang_major__ >= 15)
-    if (@available(macOS 10.12, iOS 16.0, *)) {
-        GGML_LOG_INFO("%s: recommendedMaxWorkingSetSize  = %8.2f MB\n", __func__, device.recommendedMaxWorkingSetSize / 1e6);
-    }
-#endif
-
-    // load kernels
-    {
-        NSError * error = nil;
-
-        for (int i = 0; i < GGML_METAL_KERNEL_TYPE_COUNT; ++i) {
-            ctx->kernels[i].pipeline = nil;
-        }
-
-#define GGML_METAL_ADD_KERNEL(e, name, supported) \
-        if (supported) { \
-            struct ggml_metal_kernel * kernel = &ctx->kernels[e]; \
-            id<MTLFunction> metal_function = [metal_library newFunctionWithName:@"kernel_"#name]; \
-            kernel->pipeline = [device newComputePipelineStateWithFunction:metal_function error:&error]; \
-            GGML_LOG_DEBUG("%s: loaded %-40s %16p | th_max = %4d | th_width = %4d\n", __func__, "kernel_"#name, (void *) kernel->pipeline, \
-                    (int) kernel->pipeline.maxTotalThreadsPerThreadgroup, \
-                    (int) kernel->pipeline.threadExecutionWidth); \
-            [metal_function release]; \
-            if (error) { \
-                GGML_LOG_ERROR("%s: error: load pipeline error: %s\n", __func__, [[error description] UTF8String]); \
-                return NULL; \
-            } \
-        } else { \
-            GGML_LOG_WARN("%s: skipping %-40s (not supported)\n", __func__, "kernel_"#name); \
-        }
-
-        const bool has_simdgroup_mm        = ctx_dev->has_simdgroup_mm;
-        const bool has_simdgroup_reduction = ctx_dev->has_simdgroup_reduction;
-        const bool use_bfloat              = ctx_dev->use_bfloat;
-
-        // simd_sum and simd_max requires MTLGPUFamilyApple7
-
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ADD_ID,                          add_id,                          true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_REPEAT_F32,                      repeat_f32,                      true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_REPEAT_F16,                      repeat_f16,                      true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_REPEAT_I32,                      repeat_i32,                      true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_REPEAT_I16,                      repeat_i16,                      true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SCALE,                           scale,                           true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SCALE_4,                         scale_4,                         true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CLAMP,                           clamp,                           true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_TANH,                            tanh,                            true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_RELU,                            relu,                            true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SIGMOID,                         sigmoid,                         true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU,                            gelu,                            true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU_4,                          gelu_4,                          true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU_ERF,                        gelu_erf,                        true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU_ERF_4,                      gelu_erf_4,                      true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU_QUICK,                      gelu_quick,                      true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU_QUICK_4,                    gelu_quick_4,                    true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SILU,                            silu,                            true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SILU_4,                          silu_4,                          true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ELU,                             elu,                             true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ABS,                             abs,                             true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SGN,                             sgn,                             true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_STEP,                            step,                            true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_HARDSWISH,                       hardswish,                       true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_HARDSIGMOID,                     hardsigmoid,                     true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_EXP,                             exp,                             true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SOFT_MAX_F16,                    soft_max_f16,                    has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SOFT_MAX_F16_4,                  soft_max_f16_4,                  has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SOFT_MAX_F32,                    soft_max_f32,                    has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SOFT_MAX_F32_4,                  soft_max_f32_4,                  has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF,                   diag_mask_inf,                   true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF_8,                 diag_mask_inf_8,                 true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_F32,                    get_rows_f32,                    true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_F16,                    get_rows_f16,                    true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_BF16,                   get_rows_bf16,                   use_bfloat);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_0,                   get_rows_q4_0,                   true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_1,                   get_rows_q4_1,                   true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_0,                   get_rows_q5_0,                   true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_1,                   get_rows_q5_1,                   true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q8_0,                   get_rows_q8_0,                   true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_MXFP4,                  get_rows_mxfp4,                  true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q2_K,                   get_rows_q2_K,                   true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q3_K,                   get_rows_q3_K,                   true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_K,                   get_rows_q4_K,                   true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_K,                   get_rows_q5_K,                   true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q6_K,                   get_rows_q6_K,                   true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_XXS,                get_rows_iq2_xxs,                true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_XS,                 get_rows_iq2_xs,                 true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ3_XXS,                get_rows_iq3_xxs,                true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ3_S,                  get_rows_iq3_s,                  true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_S,                  get_rows_iq2_s,                  true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ1_S,                  get_rows_iq1_s,                  true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ1_M,                  get_rows_iq1_m,                  true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_NL,                 get_rows_iq4_nl,                 true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_XS,                 get_rows_iq4_xs,                 true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_I32,                    get_rows_i32,                    true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SET_ROWS_F32,                    set_rows_f32,                    true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SET_ROWS_F16,                    set_rows_f16,                    true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SET_ROWS_BF16,                   set_rows_bf16,                   use_bfloat);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SET_ROWS_Q8_0,                   set_rows_q8_0,                   true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SET_ROWS_Q4_0,                   set_rows_q4_0,                   true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SET_ROWS_Q4_1,                   set_rows_q4_1,                   true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SET_ROWS_Q5_0,                   set_rows_q5_0,                   true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SET_ROWS_Q5_1,                   set_rows_q5_1,                   true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SET_ROWS_IQ4_NL,                 set_rows_iq4_nl,                 true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_L2_NORM,                         l2_norm,                         has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GROUP_NORM,                      group_norm,                      has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_NORM,                            norm,                            has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SSM_CONV_F32,                    ssm_conv_f32,                    true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SSM_SCAN_F32,                    ssm_scan_f32,                    has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SSM_SCAN_F32_GROUP,              ssm_scan_f32_group,              has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_RWKV_WKV6_F32,                   rwkv_wkv6_f32,                   true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_RWKV_WKV7_F32,                   rwkv_wkv7_f32,                   true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F32_F32,                  mul_mv_f32_f32,                  has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F32_F32_C4,               mul_mv_f32_f32_c4,               true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_F32,                 mul_mv_bf16_f32,                 has_simdgroup_reduction && use_bfloat);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_F32_C4,              mul_mv_bf16_f32_c4,              use_bfloat);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_F32_1ROW,            mul_mv_bf16_f32_1row,            has_simdgroup_reduction && use_bfloat);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_F32_L4,              mul_mv_bf16_f32_l4,              has_simdgroup_reduction && use_bfloat);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_BF16,                mul_mv_bf16_bf16,                has_simdgroup_reduction && use_bfloat);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32,                  mul_mv_f16_f32,                  has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_C4,               mul_mv_f16_f32_c4,               true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_1ROW,             mul_mv_f16_f32_1row,             has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_L4,               mul_mv_f16_f32_l4,               has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F16,                  mul_mv_f16_f16,                  has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_0_F32,                 mul_mv_q4_0_f32,                 has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_1_F32,                 mul_mv_q4_1_f32,                 has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_0_F32,                 mul_mv_q5_0_f32,                 has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_1_F32,                 mul_mv_q5_1_f32,                 has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q8_0_F32,                 mul_mv_q8_0_f32,                 has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_MXFP4_F32,                mul_mv_mxfp4_f32,                has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_F32_F32_R1_2,         mul_mv_ext_f32_f32_r1_2,         has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_F32_F32_R1_3,         mul_mv_ext_f32_f32_r1_3,         has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_F32_F32_R1_4,         mul_mv_ext_f32_f32_r1_4,         has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_F32_F32_R1_5,         mul_mv_ext_f32_f32_r1_5,         has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_F16_F32_R1_2,         mul_mv_ext_f16_f32_r1_2,         has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_F16_F32_R1_3,         mul_mv_ext_f16_f32_r1_3,         has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_F16_F32_R1_4,         mul_mv_ext_f16_f32_r1_4,         has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_F16_F32_R1_5,         mul_mv_ext_f16_f32_r1_5,         has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_0_F32_R1_2,        mul_mv_ext_q4_0_f32_r1_2,        has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_0_F32_R1_3,        mul_mv_ext_q4_0_f32_r1_3,        has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_0_F32_R1_4,        mul_mv_ext_q4_0_f32_r1_4,        has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_0_F32_R1_5,        mul_mv_ext_q4_0_f32_r1_5,        has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_1_F32_R1_2,        mul_mv_ext_q4_1_f32_r1_2,        has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_1_F32_R1_3,        mul_mv_ext_q4_1_f32_r1_3,        has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_1_F32_R1_4,        mul_mv_ext_q4_1_f32_r1_4,        has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_1_F32_R1_5,        mul_mv_ext_q4_1_f32_r1_5,        has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_0_F32_R1_2,        mul_mv_ext_q5_0_f32_r1_2,        has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_0_F32_R1_3,        mul_mv_ext_q5_0_f32_r1_3,        has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_0_F32_R1_4,        mul_mv_ext_q5_0_f32_r1_4,        has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_0_F32_R1_5,        mul_mv_ext_q5_0_f32_r1_5,        has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_1_F32_R1_2,        mul_mv_ext_q5_1_f32_r1_2,        has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_1_F32_R1_3,        mul_mv_ext_q5_1_f32_r1_3,        has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_1_F32_R1_4,        mul_mv_ext_q5_1_f32_r1_4,        has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_1_F32_R1_5,        mul_mv_ext_q5_1_f32_r1_5,        has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q8_0_F32_R1_2,        mul_mv_ext_q8_0_f32_r1_2,        has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q8_0_F32_R1_3,        mul_mv_ext_q8_0_f32_r1_3,        has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q8_0_F32_R1_4,        mul_mv_ext_q8_0_f32_r1_4,        has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q8_0_F32_R1_5,        mul_mv_ext_q8_0_f32_r1_5,        has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_MXFP4_F32_R1_2,       mul_mv_ext_mxfp4_f32_r1_2,       has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_MXFP4_F32_R1_3,       mul_mv_ext_mxfp4_f32_r1_3,       has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_MXFP4_F32_R1_4,       mul_mv_ext_mxfp4_f32_r1_4,       has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_MXFP4_F32_R1_5,       mul_mv_ext_mxfp4_f32_r1_5,       has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_K_F32_R1_2,        mul_mv_ext_q4_K_f32_r1_2,        has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_K_F32_R1_3,        mul_mv_ext_q4_K_f32_r1_3,        has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_K_F32_R1_4,        mul_mv_ext_q4_K_f32_r1_4,        has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_K_F32_R1_5,        mul_mv_ext_q4_K_f32_r1_5,        has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_K_F32_R1_2,        mul_mv_ext_q5_K_f32_r1_2,        has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_K_F32_R1_3,        mul_mv_ext_q5_K_f32_r1_3,        has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_K_F32_R1_4,        mul_mv_ext_q5_K_f32_r1_4,        has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_K_F32_R1_5,        mul_mv_ext_q5_K_f32_r1_5,        has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q6_K_F32_R1_2,        mul_mv_ext_q6_K_f32_r1_2,        has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q6_K_F32_R1_3,        mul_mv_ext_q6_K_f32_r1_3,        has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q6_K_F32_R1_4,        mul_mv_ext_q6_K_f32_r1_4,        has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q6_K_F32_R1_5,        mul_mv_ext_q6_K_f32_r1_5,        has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_IQ4_NL_F32_R1_2,      mul_mv_ext_iq4_nl_f32_r1_2,      has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_IQ4_NL_F32_R1_3,      mul_mv_ext_iq4_nl_f32_r1_3,      has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_IQ4_NL_F32_R1_4,      mul_mv_ext_iq4_nl_f32_r1_4,      has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_IQ4_NL_F32_R1_5,      mul_mv_ext_iq4_nl_f32_r1_5,      has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q2_K_F32,                 mul_mv_q2_K_f32,                 has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q3_K_F32,                 mul_mv_q3_K_f32,                 has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_K_F32,                 mul_mv_q4_K_f32,                 has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_K_F32,                 mul_mv_q5_K_f32,                 has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q6_K_F32,                 mul_mv_q6_K_f32,                 has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_XXS_F32,              mul_mv_iq2_xxs_f32,              has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_XS_F32,               mul_mv_iq2_xs_f32,               has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_XXS_F32,              mul_mv_iq3_xxs_f32,              has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_S_F32,                mul_mv_iq3_s_f32,                has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_S_F32,                mul_mv_iq2_s_f32,                has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ1_S_F32,                mul_mv_iq1_s_f32,                has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ1_M_F32,                mul_mv_iq1_m_f32,                has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_NL_F32,               mul_mv_iq4_nl_f32,               has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_XS_F32,               mul_mv_iq4_xs_f32,               has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F32_F32,               mul_mv_id_f32_f32,               has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32,               mul_mv_id_f16_f32,               has_simdgroup_reduction);
-      //GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32_1ROW,          mul_mv_id_f16_f32_1row,          has_simdgroup_reduction);
-      //GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32_L4,            mul_mv_id_f16_f32_l4,            has_simdgroup_reduction);
-      //GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F16,               mul_mv_id_f16_f16,               has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_BF16_F32,              mul_mv_id_bf16_f32,              has_simdgroup_reduction && use_bfloat);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_0_F32,              mul_mv_id_q4_0_f32,              has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_1_F32,              mul_mv_id_q4_1_f32,              has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_0_F32,              mul_mv_id_q5_0_f32,              has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_1_F32,              mul_mv_id_q5_1_f32,              has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q8_0_F32,              mul_mv_id_q8_0_f32,              has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_MXFP4_F32,             mul_mv_id_mxfp4_f32,             has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q2_K_F32,              mul_mv_id_q2_K_f32,              has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q3_K_F32,              mul_mv_id_q3_K_f32,              has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_K_F32,              mul_mv_id_q4_K_f32,              has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_K_F32,              mul_mv_id_q5_K_f32,              has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q6_K_F32,              mul_mv_id_q6_K_f32,              has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_XXS_F32,           mul_mv_id_iq2_xxs_f32,           has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_XS_F32,            mul_mv_id_iq2_xs_f32,            has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ3_XXS_F32,           mul_mv_id_iq3_xxs_f32,           has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ3_S_F32,             mul_mv_id_iq3_s_f32,             has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_S_F32,             mul_mv_id_iq2_s_f32,             has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ1_S_F32,             mul_mv_id_iq1_s_f32,             has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ1_M_F32,             mul_mv_id_iq1_m_f32,             has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_NL_F32,            mul_mv_id_iq4_nl_f32,            has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_XS_F32,            mul_mv_id_iq4_xs_f32,            has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_F32_F32,                  mul_mm_f32_f32,                  has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_F16_F32,                  mul_mm_f16_f32,                  has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_BF16_F32,                 mul_mm_bf16_f32,                 has_simdgroup_mm && use_bfloat);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_0_F32,                 mul_mm_q4_0_f32,                 has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_1_F32,                 mul_mm_q4_1_f32,                 has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_0_F32,                 mul_mm_q5_0_f32,                 has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_1_F32,                 mul_mm_q5_1_f32,                 has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q8_0_F32,                 mul_mm_q8_0_f32,                 has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_MXFP4_F32,                mul_mm_mxfp4_f32,                has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q2_K_F32,                 mul_mm_q2_K_f32,                 has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q3_K_F32,                 mul_mm_q3_K_f32,                 has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_K_F32,                 mul_mm_q4_K_f32,                 has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_K_F32,                 mul_mm_q5_K_f32,                 has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q6_K_F32,                 mul_mm_q6_K_f32,                 has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_XXS_F32,              mul_mm_iq2_xxs_f32,              has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_XS_F32,               mul_mm_iq2_xs_f32,               has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_XXS_F32,              mul_mm_iq3_xxs_f32,              has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_S_F32,                mul_mm_iq3_s_f32,                has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_S_F32,                mul_mm_iq2_s_f32,                has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ1_S_F32,                mul_mm_iq1_s_f32,                has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ1_M_F32,                mul_mm_iq1_m_f32,                has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_NL_F32,               mul_mm_iq4_nl_f32,               has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_XS_F32,               mul_mm_iq4_xs_f32,               has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_MAP0_F16_NE20_1,       mul_mm_id_map0_f16_ne20_1,       has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_MAP0_F16_NE20_2,       mul_mm_id_map0_f16_ne20_2,       has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_MAP0_F16_NE20_4,       mul_mm_id_map0_f16_ne20_4,       has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_MAP0_F16_NE20_6,       mul_mm_id_map0_f16_ne20_6,       has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_MAP0_F16_NE20_8,       mul_mm_id_map0_f16_ne20_8,       has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_MAP0_F16_NE20_10,      mul_mm_id_map0_f16_ne20_10,      has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_MAP0_F16_NE20_16,      mul_mm_id_map0_f16_ne20_16,      has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_F32_F16,               mul_mm_id_f32_f16,               has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_F16_F16,               mul_mm_id_f16_f16,               has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_BF16_F16,              mul_mm_id_bf16_f16,              has_simdgroup_mm && use_bfloat);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_0_F16,              mul_mm_id_q4_0_f16,              has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_1_F16,              mul_mm_id_q4_1_f16,              has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_0_F16,              mul_mm_id_q5_0_f16,              has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_1_F16,              mul_mm_id_q5_1_f16,              has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q8_0_F16,              mul_mm_id_q8_0_f16,              has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_MXFP4_F16,             mul_mm_id_mxfp4_f16,             has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q2_K_F16,              mul_mm_id_q2_K_f16,              has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q3_K_F16,              mul_mm_id_q3_K_f16,              has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_K_F16,              mul_mm_id_q4_K_f16,              has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_K_F16,              mul_mm_id_q5_K_f16,              has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q6_K_F16,              mul_mm_id_q6_K_f16,              has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_XXS_F16,           mul_mm_id_iq2_xxs_f16,           has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_XS_F16,            mul_mm_id_iq2_xs_f16,            has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_XXS_F16,           mul_mm_id_iq3_xxs_f16,           has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_S_F16,             mul_mm_id_iq3_s_f16,             has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_S_F16,             mul_mm_id_iq2_s_f16,             has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ1_S_F16,             mul_mm_id_iq1_s_f16,             has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ1_M_F16,             mul_mm_id_iq1_m_f16,             has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_NL_F16,            mul_mm_id_iq4_nl_f16,            has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_XS_F16,            mul_mm_id_iq4_xs_f16,            has_simdgroup_mm);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ROPE_NORM_F32,                   rope_norm_f32,                   true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ROPE_NORM_F16,                   rope_norm_f16,                   true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ROPE_MULTI_F32,                  rope_multi_f32,                  true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ROPE_MULTI_F16,                  rope_multi_f16,                  true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ROPE_VISION_F32,                 rope_vision_f32,                 true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ROPE_VISION_F16,                 rope_vision_f16,                 true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ROPE_NEOX_F32,                   rope_neox_f32,                   true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ROPE_NEOX_F16,                   rope_neox_f16,                   true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_IM2COL_F16,                      im2col_f16,                      true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_IM2COL_F32,                      im2col_f32,                      true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_IM2COL_EXT_F16,                  im2col_ext_f16,                  true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_IM2COL_EXT_F32,                  im2col_ext_f32,                  true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CONV_TRANSPOSE_1D_F32_F32,       conv_transpose_1d_f32_f32,       true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CONV_TRANSPOSE_1D_F16_F32,       conv_transpose_1d_f16_f32,       true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_UPSCALE_F32,                     upscale_f32,                     true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_PAD_F32,                         pad_f32,                         true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_PAD_REFLECT_1D_F32,              pad_reflect_1d_f32,              true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_TIMESTEP_EMBEDDING_F32,          timestep_embedding_f32,          true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARANGE_F32,                      arange_f32,                      true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_ASC,             argsort_f32_i32_asc,             true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_DESC,            argsort_f32_i32_desc,            true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_LEAKY_RELU_F32,                  leaky_relu_f32,                  true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_F32,                     cpy_f32_f32,                     true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_F16,                     cpy_f32_f16,                     true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_BF16,                    cpy_f32_bf16,                    use_bfloat);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F16_F32,                     cpy_f16_f32,                     true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F16_F16,                     cpy_f16_f16,                     true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_BF16_F32,                    cpy_bf16_f32,                    use_bfloat);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_BF16_BF16,                   cpy_bf16_bf16,                   use_bfloat);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_I32,                     cpy_f32_i32,                     true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_I32_F32,                     cpy_i32_f32,                     true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q8_0,                    cpy_f32_q8_0,                    true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q4_0,                    cpy_f32_q4_0,                    true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q4_1,                    cpy_f32_q4_1,                    true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q5_0,                    cpy_f32_q5_0,                    true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q5_1,                    cpy_f32_q5_1,                    true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_IQ4_NL,                  cpy_f32_iq4_nl,                  true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_Q4_0_F32,                    cpy_q4_0_f32,                    true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_Q4_0_F16,                    cpy_q4_0_f16,                    true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_Q4_1_F32,                    cpy_q4_1_f32,                    true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_Q4_1_F16,                    cpy_q4_1_f16,                    true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_Q5_0_F32,                    cpy_q5_0_f32,                    true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_Q5_0_F16,                    cpy_q5_0_f16,                    true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_Q5_1_F32,                    cpy_q5_1_f32,                    true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_Q5_1_F16,                    cpy_q5_1_f16,                    true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_Q8_0_F32,                    cpy_q8_0_f32,                    true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_Q8_0_F16,                    cpy_q8_0_f16,                    true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CONCAT,                          concat,                          true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SQR,                             sqr,                             true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SQRT,                            sqrt,                            true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SIN,                             sin,                             true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_COS,                             cos,                             true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_NEG,                             neg,                             true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_REGLU,                           reglu,                           true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GEGLU,                           geglu,                           true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SWIGLU,                          swiglu,                          true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SWIGLU_OAI,                      swiglu_oai,                      true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GEGLU_ERF,                       geglu_erf,                       true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GEGLU_QUICK,                     geglu_quick,                     true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SUM_ROWS,                        sum_rows,                        has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MEAN,                            mean,                            has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARGMAX,                          argmax,                          has_simdgroup_reduction);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_POOL_2D_AVG_F32,                 pool_2d_avg_f32,                 true);
-        GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_POOL_2D_MAX_F32,                 pool_2d_max_f32,                 true);
-    }
-
-    ctx->kernels_ext = [[NSMutableDictionary alloc] init];
-
-    return ctx;
-}
-
-static id<MTLComputePipelineState> ggml_metal_get_kernel(struct ggml_backend_metal_context * ctx, const char * name) {
-    NSString * key = [NSString stringWithUTF8String:name];
-
-    ggml_metal_kernel_wrapper * obj = [ctx->kernels_ext objectForKey:key];
-    if (obj) {
-        return obj.kernel.pipeline;
-    }
-
-    return nil;
-}
-
-static id<MTLComputePipelineState> ggml_metal_compile_kernel(ggml_backend_t backend, const char * base, const char * name, MTLFunctionConstantValues * cv) {
-    struct ggml_backend_metal_context        * ctx     = backend->context;
-    struct ggml_backend_metal_device_context * ctx_dev = backend->device->context;
-
-    id<MTLComputePipelineState> res = nil;
-
-    @autoreleasepool {
-        NSError * error = nil;
-
-        NSString * base_func = [NSString stringWithUTF8String:base];
-
-        GGML_LOG_DEBUG("%s: compiling kernel: base = '%s', name = '%s'\n", __func__, base, name);
-
-        // TODO: make sure it is thread-safe to compile kernels in parallel
-        id<MTLFunction> metal_function = [ctx_dev->mtl_library newFunctionWithName:base_func constantValues:cv error:&error];
-        if (!metal_function) {
-            GGML_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]);
-
-            return nil;
-        }
-
-        struct ggml_metal_kernel kernel = {
-            /*.pipeline =*/ [ctx_dev->mtl_device newComputePipelineStateWithFunction:metal_function error:&error],
-        };
-
-        ggml_metal_kernel_wrapper * obj = [[ggml_metal_kernel_wrapper alloc] init];
-        obj.kernel = kernel;
-
-        res = obj.kernel.pipeline;
-
-        NSString * key = [NSString stringWithUTF8String:name];
-        [ctx->kernels_ext setObject:obj forKey:key];
-
-        [metal_function release];
-        [obj release];
-
-        GGML_LOG_DEBUG("%s: loaded %-40s %16p | th_max = %4d | th_width = %4d\n", __func__, name, (void *) kernel.pipeline,
-                (int) kernel.pipeline.maxTotalThreadsPerThreadgroup,
-                (int) kernel.pipeline.threadExecutionWidth);
-    }
-
-    return res;
-}
-
-// tokens per expert
-static size_t ggml_metal_mul_mat_id_extra_tpe(const struct ggml_tensor * op) {
-    assert(op->op == GGML_OP_MUL_MAT_ID);
-
-    const int64_t ne02 = op->src[0]->ne[2]; // n_expert
-
-    return ggml_type_size(GGML_TYPE_I32)*ne02;
-}
-
-// id map [n_tokens, n_expert]
-static size_t ggml_metal_mul_mat_id_extra_ids(const struct ggml_tensor * op) {
-    assert(op->op == GGML_OP_MUL_MAT_ID);
-
-    const int64_t ne02 = op->src[0]->ne[2]; // n_expert
-    const int64_t ne21 = op->src[2]->ne[1]; // n_token
-
-    return ggml_type_size(GGML_TYPE_I32)*ne02*ne21;
-}
-
-// return true if we should use the FA vector kernel for this op
-static bool ggml_metal_flash_attn_ext_use_vec(const struct ggml_tensor * op) {
-    assert(op->op == GGML_OP_FLASH_ATTN_EXT);
-
-    const int64_t ne00 = op->src[0]->ne[0]; // head size
-    const int64_t ne01 = op->src[0]->ne[1]; // batch size
-
-    // use vec kernel if the batch size is small and if the head size is supported
-    return (ne01 < 20) && (ne00 % 32 == 0);
-}
-
-static size_t ggml_metal_flash_attn_ext_extra_tmp(const struct ggml_tensor * op) {
-    assert(op->op == GGML_OP_FLASH_ATTN_EXT);
-
-    const int64_t nwg = 32;
-
-    const int64_t ne01 = op->src[0]->ne[1];
-    const int64_t ne02 = op->src[0]->ne[2];
-    const int64_t ne03 = op->src[0]->ne[3];
-    const int64_t ne20 = op->src[2]->ne[0];
-
-    // temp buffer for writing the results from each workgroup
-    // - ne20: the size of the Value head
-    // -  + 2: the S and M values for each intermediate result
-    return ggml_type_size(GGML_TYPE_F32)*(ne01*ne02*ne03*nwg*(ne20 + 2));
-}
-
-static id<MTLComputePipelineState> ggml_metal_get_pipeline_flash_attn_ext(
-        ggml_backend_t backend, struct ggml_tensor * op,
-        bool    has_mask,
-        bool    has_sinks,
-        bool    has_bias,
-        bool    has_scap,
-        int32_t nsg) {
-    struct ggml_backend_metal_context * ctx = backend->context;
-
-    char base[256];
-    char name[256];
-
-    @autoreleasepool {
-        const int32_t dk = (int32_t) op->src[1]->ne[0];
-        const int32_t dv = (int32_t) op->src[2]->ne[0];
-
-        const int32_t ns10 = op->src[1]->nb[1]/op->src[1]->nb[0];
-        const int32_t ns20 = op->src[2]->nb[1]/op->src[2]->nb[0];
-
-        snprintf(base, 256, "kernel_%s_%s_dk%d_dv%d",
-                "flash_attn_ext",
-                ggml_type_name(op->src[1]->type),
-                dk,
-                dv);
-
-        snprintf(name, 256, "kernel_%s_%s_dk%d_dv%d_mask=%d_sinks=%d_bias=%d_scap=%d_ns10=%d_ns20=%d_nsg=%d",
-                "flash_attn_ext",
-                ggml_type_name(op->src[1]->type),
-                dk,
-                dv,
-                has_mask,
-                has_sinks,
-                has_bias,
-                has_scap,
-                ns10,
-                ns20,
-                nsg);
-
-        id<MTLComputePipelineState> res = ggml_metal_get_kernel(ctx, name);
-        if (res) {
-            // kernel found
-            return res;
-        }
-
-        MTLFunctionConstantValues * cv = [[MTLFunctionConstantValues alloc] init];
-
-        [cv setConstantValue:&has_mask  type:MTLDataTypeBool atIndex:FC_FLASH_ATTN_EXT + 0];
-        [cv setConstantValue:&has_sinks type:MTLDataTypeBool atIndex:FC_FLASH_ATTN_EXT + 1];
-        [cv setConstantValue:&has_bias  type:MTLDataTypeBool atIndex:FC_FLASH_ATTN_EXT + 2];
-        [cv setConstantValue:&has_scap  type:MTLDataTypeBool atIndex:FC_FLASH_ATTN_EXT + 3];
-
-        [cv setConstantValue:&ns10 type:MTLDataTypeInt atIndex:FC_FLASH_ATTN_EXT + 20];
-        [cv setConstantValue:&ns20 type:MTLDataTypeInt atIndex:FC_FLASH_ATTN_EXT + 21];
-        [cv setConstantValue:&nsg  type:MTLDataTypeInt atIndex:FC_FLASH_ATTN_EXT + 22];
-
-        res = ggml_metal_compile_kernel(backend, base, name, cv);
-
-        [cv release];
-
-        return res;
-    }
-}
-
-static id<MTLComputePipelineState> ggml_metal_get_pipeline_flash_attn_ext_vec(
-        ggml_backend_t backend, struct ggml_tensor * op,
-        bool    has_mask,
-        bool    has_sinks,
-        bool    has_bias,
-        bool    has_scap,
-        int32_t nsg,
-        int32_t nwg) {
-    struct ggml_backend_metal_context * ctx = backend->context;
-
-    char base[256];
-    char name[256];
-
-    @autoreleasepool {
-        const int32_t dk = (int32_t) op->src[1]->ne[0];
-        const int32_t dv = (int32_t) op->src[2]->ne[0];
-
-        const int32_t ns10 = op->src[1]->nb[1]/op->src[1]->nb[0];
-        const int32_t ns20 = op->src[2]->nb[1]/op->src[2]->nb[0];
-
-        snprintf(base, 256, "kernel_%s_%s_dk%d_dv%d",
-                "flash_attn_ext_vec",
-                ggml_type_name(op->src[1]->type),
-                dk,
-                dv);
-
-        snprintf(name, 256, "kernel_%s_%s_dk%d_dv%d_mask=%d_sink=%d_bias=%d_softcap=%d_ns10=%d_ns20=%d_nsg=%d_nwg=%d",
-                "flash_attn_ext_vec",
-                ggml_type_name(op->src[1]->type),
-                dk,
-                dv,
-                has_mask,
-                has_sinks,
-                has_bias,
-                has_scap,
-                ns10,
-                ns20,
-                nsg, nwg);
-
-        id<MTLComputePipelineState> res = ggml_metal_get_kernel(ctx, name);
-        if (res) {
-            // kernel found
-            return res;
-        }
-
-        MTLFunctionConstantValues * cv = [[MTLFunctionConstantValues alloc] init];
-
-        [cv setConstantValue:&has_mask  type:MTLDataTypeBool atIndex:FC_FLASH_ATTN_EXT_VEC + 0];
-        [cv setConstantValue:&has_sinks type:MTLDataTypeBool atIndex:FC_FLASH_ATTN_EXT_VEC + 1];
-        [cv setConstantValue:&has_bias  type:MTLDataTypeBool atIndex:FC_FLASH_ATTN_EXT_VEC + 2];
-        [cv setConstantValue:&has_scap  type:MTLDataTypeBool atIndex:FC_FLASH_ATTN_EXT_VEC + 3];
-
-        [cv setConstantValue:&ns10 type:MTLDataTypeInt atIndex:FC_FLASH_ATTN_EXT_VEC + 20];
-        [cv setConstantValue:&ns20 type:MTLDataTypeInt atIndex:FC_FLASH_ATTN_EXT_VEC + 21];
-        [cv setConstantValue:&nsg  type:MTLDataTypeInt atIndex:FC_FLASH_ATTN_EXT_VEC + 22];
-        [cv setConstantValue:&nwg  type:MTLDataTypeInt atIndex:FC_FLASH_ATTN_EXT_VEC + 23];
-
-        res = ggml_metal_compile_kernel(backend, base, name, cv);
-
-        [cv release];
-
-        return res;
-    }
-}
-
-static id<MTLComputePipelineState> ggml_metal_get_pipeline_flash_attn_ext_vec_reduce(
-        ggml_backend_t backend, struct ggml_tensor * op,
-        int32_t dv,
-        int32_t nwg) {
-    struct ggml_backend_metal_context * ctx = backend->context;
-
-    char base[256];
-    char name[256];
-
-    @autoreleasepool {
-        snprintf(base, 256, "kernel_flash_attn_ext_vec_reduce");
-        snprintf(name, 256, "kernel_flash_attn_ext_vec_reduce_dv=%d_nwg=%d", dv, nwg);
-
-        id<MTLComputePipelineState> res = ggml_metal_get_kernel(ctx, name);
-        if (res) {
-            // kernel found
-            return res;
-        }
-
-        MTLFunctionConstantValues * cv = [[MTLFunctionConstantValues alloc] init];
-
-        [cv setConstantValue:&dv  type:MTLDataTypeInt atIndex:FC_FLASH_ATTN_EXT_VEC_REDUCE + 0];
-        [cv setConstantValue:&nwg type:MTLDataTypeInt atIndex:FC_FLASH_ATTN_EXT_VEC_REDUCE + 1];
-
-        res = ggml_metal_compile_kernel(backend, base, name, cv);
-
-        [cv release];
-
-        return res;
-    }
-
-    GGML_UNUSED(op);
-}
-
-static id<MTLComputePipelineState> ggml_metal_get_pipeline_bin(
-        ggml_backend_t backend, enum ggml_op op,
-        int32_t n_fuse,
-        bool row) {
-    struct ggml_backend_metal_context * ctx = backend->context;
-
-    char base[256];
-    char name[256];
-
-    @autoreleasepool {
-        const char * op_str = "undefined";
-        switch (op) {
-            case GGML_OP_ADD:   op_str = "add";   break;
-            case GGML_OP_SUB:   op_str = "sub";   break;
-            case GGML_OP_MUL:   op_str = "mul";   break;
-            case GGML_OP_DIV:   op_str = "div";   break;
-            default: GGML_ABORT("fatal error");
-        };
-
-        if (row) {
-            snprintf(base, 256, "kernel_%s_row_c4_fuse_%d", op_str, n_fuse);
-        } else {
-            snprintf(base, 256, "kernel_%s_fuse_%d", op_str, n_fuse);
-        }
-
-        snprintf(name, 256, "%s", base);
-
-        id<MTLComputePipelineState> res = ggml_metal_get_kernel(ctx, name);
-        if (res) {
-            // kernel found
-            return res;
-        }
-
-        return ggml_metal_compile_kernel(backend, base, name, nil);
-    }
-}
-
-static id<MTLComputePipelineState> ggml_metal_get_pipeline_rms_norm(
-        ggml_backend_t backend, struct ggml_tensor * op,
-        int32_t n_fuse) {
-    struct ggml_backend_metal_context * ctx = backend->context;
-
-    char base[256];
-    char name[256];
-
-    @autoreleasepool {
-        switch (n_fuse) {
-            case 1: snprintf(base, 256, "kernel_rms_norm");              break;
-            case 2: snprintf(base, 256, "kernel_rms_norm_mul");     break;
-            case 3: snprintf(base, 256, "kernel_rms_norm_mul_add"); break;
-            default: GGML_ABORT("fatal error");
-        }
-
-        snprintf(name, 256, "%s", base);
-
-        id<MTLComputePipelineState> res = ggml_metal_get_kernel(ctx, name);
-        if (res) {
-            // kernel found
-            return res;
-        }
-
-        return ggml_metal_compile_kernel(backend, base, name, nil);
-    }
-
-    GGML_UNUSED(op);
-}
-
-static void ggml_metal_free(struct ggml_backend_metal_context * ctx) {
-    GGML_LOG_INFO("%s: deallocating\n", __func__);
-
-    for (int i = 0; i < GGML_METAL_KERNEL_TYPE_COUNT; ++i) {
-        [ctx->kernels[i].pipeline release];
-    }
-
-    if (ctx->kernels_ext) {
-        [ctx->kernels_ext release];
-        ctx->kernels_ext = nil;
-    }
-
-    Block_release(ctx->encode_async);
-
-    //[ctx->queue release]; // [TAG_QUEUE_PER_BACKEND]
-
-    for (int i = 0; i < GGML_METAL_MAX_COMMAND_BUFFERS; ++i) {
-        if (ctx->cmd_bufs[i].obj) {
-            [ctx->cmd_bufs[i].obj release];
-        }
-
-        if (ctx->cmd_bufs[i].mem_ranges) {
-            ggml_mem_ranges_free(ctx->cmd_bufs[i].mem_ranges);
-        }
-    }
-
-    [ctx->cmd_bufs_ext removeAllObjects];
-    [ctx->cmd_bufs_ext release];
-
-    dispatch_release(ctx->d_queue);
-
-    free(ctx);
-}
-
-// temporarily defined here for compatibility between ggml-backend and the old API
-
-struct ggml_backend_metal_buffer {
-    void   * data;
-    size_t   size;
-
-    id<MTLBuffer> metal;
-};
-
-struct ggml_backend_metal_buffer_context {
-    void * all_data;
-    size_t all_size;
-
-    // if false, the Metal buffer data is allocated in private GPU memory and is not shared with the host
-    bool is_shared;
-
-    // multiple buffers are used only to avoid the maximum buffer size limitation when using mmap
-    int n_buffers;
-    struct ggml_backend_metal_buffer buffers[GGML_METAL_MAX_BUFFERS];
-
-    // optional MTLResidencySet
-    // note: cannot use explicity "id<MTLResidencySet>" here because it is not available on certain OSes
-    id rset;
-
-    // pointers to global device objects
-    id<MTLDevice> device;
-    id<MTLCommandQueue> queue;
-};
-
-// rset init
-static bool ggml_backend_metal_buffer_rset_init(
-        struct ggml_backend_metal_buffer_context * ctx,
-        struct ggml_backend_metal_device_context * ctx_dev,
-        id<MTLDevice> device) {
-    ctx->rset = nil;
-
-    if (!ctx_dev->has_residency_sets) {
-        return true;
-    }
-
-#if defined(GGML_METAL_HAS_RESIDENCY_SETS)
-    if (@available(macOS 15.0, iOS 18.0, tvOS 18.0, visionOS 2.0, *)) {
-        MTLResidencySetDescriptor * desc = [[MTLResidencySetDescriptor alloc] init];
-        desc.label = @"ggml_backend_metal";
-        desc.initialCapacity = ctx->n_buffers;
-
-        NSError * error;
-        ctx->rset = [device newResidencySetWithDescriptor:desc error:&error];
-        if (error) {
-            GGML_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]);
-            [desc release];
-            return false;
-        }
-
-        [desc release];
-
-        for (int i = 0; i < ctx->n_buffers; i++) {
-            [ctx->rset addAllocation:ctx->buffers[i].metal];
-        }
-
-        [ctx->rset commit];
-        [ctx->rset requestResidency];
-
-        return true;
-    }
-#else
-    GGML_UNUSED(ctx_dev);
-    GGML_UNUSED(device);
-#endif
-
-    return true;
-}
-
-// rset free
-static void ggml_backend_metal_buffer_rset_free(struct ggml_backend_metal_buffer_context * ctx) {
-#if defined(GGML_METAL_HAS_RESIDENCY_SETS)
-    if (@available(macOS 15.0, iOS 18.0, tvOS 18.0, visionOS 2.0, *)) {
-        if (ctx->rset) {
-            [ctx->rset endResidency];
-            [ctx->rset removeAllAllocations];
-            [ctx->rset release];
-        }
-    }
-#else
-    GGML_UNUSED(ctx);
-#endif
-}
-
-// finds the Metal buffer that contains the tensor data on the GPU device
-// the assumption is that there is 1-to-1 mapping between the host and device memory buffers, so we can find the
-// Metal buffer based on the host memory pointer
-//
-static id<MTLBuffer> ggml_metal_get_buffer(const struct ggml_tensor * t, size_t * offs) {
-    //GGML_LOG_INFO("%s: data tensor '%16s', offs_data = %8ld, offs_eval = %8ld, offs_cach = %8ld\n", __func__, t->name, offs_data, offs_eval, offs_cach);
-
-    const int64_t tsize = ggml_nbytes(t);
-
-    ggml_backend_buffer_t buffer = t->view_src ? t->view_src->buffer : t->buffer;
-
-    struct ggml_backend_metal_buffer_context * buf_ctx = (struct ggml_backend_metal_buffer_context *) buffer->context;
-
-    // find the view that contains the tensor fully
-    for (int i = 0; i < buf_ctx->n_buffers; ++i) {
-        const int64_t ioffs = (int64_t) t->data - (int64_t) buf_ctx->buffers[i].data;
-
-        //GGML_LOG_INFO("ioffs = %10ld, tsize = %10ld, sum = %10ld, buf_ctx->buffers[%d].size = %10ld\n", ioffs, tsize, ioffs + tsize, i, buf_ctx->buffers[i].size);
-        if (ioffs >= 0 && ioffs + tsize <= (int64_t) buf_ctx->buffers[i].size) {
-            *offs = (size_t) ioffs;
-
-            //GGML_LOG_INFO("%s: tensor '%16s', offs = %8ld\n", __func__, t->name, *offs);
-
-            return buf_ctx->buffers[i].metal;
-        }
-    }
-
-    GGML_LOG_ERROR("%s: error: tensor '%s' buffer is nil\n", __func__, t->name);
-
-    return nil;
-}
-
-static bool ggml_metal_supports_op(const struct ggml_backend_metal_device_context * ctx_dev, const struct ggml_tensor * op) {
-    const bool has_simdgroup_mm        = ctx_dev->has_simdgroup_mm;
-    const bool has_simdgroup_reduction = ctx_dev->has_simdgroup_reduction;
-    const bool use_bfloat              = ctx_dev->use_bfloat;
-
-    if (!use_bfloat) {
-        if (op->type == GGML_TYPE_BF16) {
-            return false;
-        }
-
-        for (size_t i = 0, n = 3; i < n; ++i) {
-            if (op->src[i] != NULL && op->src[i]->type == GGML_TYPE_BF16) {
-                return false;
-            }
-        }
-    }
-
-    switch (op->op) {
-        case GGML_OP_UNARY:
-            switch (ggml_get_unary_op(op)) {
-                case GGML_UNARY_OP_TANH:
-                case GGML_UNARY_OP_RELU:
-                case GGML_UNARY_OP_SIGMOID:
-                case GGML_UNARY_OP_GELU:
-                case GGML_UNARY_OP_GELU_ERF:
-                case GGML_UNARY_OP_GELU_QUICK:
-                case GGML_UNARY_OP_SILU:
-                case GGML_UNARY_OP_ELU:
-                case GGML_UNARY_OP_NEG:
-                case GGML_UNARY_OP_ABS:
-                case GGML_UNARY_OP_SGN:
-                case GGML_UNARY_OP_STEP:
-                case GGML_UNARY_OP_HARDSWISH:
-                case GGML_UNARY_OP_HARDSIGMOID:
-                case GGML_UNARY_OP_EXP:
-                    return ggml_is_contiguous(op->src[0]) && op->src[0]->type == GGML_TYPE_F32;
-                default:
-                    return false;
-            }
-        case GGML_OP_GLU:
-            switch (ggml_get_glu_op(op)) {
-                case GGML_GLU_OP_REGLU:
-                case GGML_GLU_OP_GEGLU:
-                case GGML_GLU_OP_SWIGLU:
-                case GGML_GLU_OP_SWIGLU_OAI:
-                case GGML_GLU_OP_GEGLU_ERF:
-                case GGML_GLU_OP_GEGLU_QUICK:
-                    return ggml_is_contiguous_1(op->src[0]) && op->src[0]->type == GGML_TYPE_F32;
-               default:
-                    return false;
-            }
-        case GGML_OP_NONE:
-        case GGML_OP_RESHAPE:
-        case GGML_OP_VIEW:
-        case GGML_OP_TRANSPOSE:
-        case GGML_OP_PERMUTE:
-        case GGML_OP_CONCAT:
-            return true;
-        case GGML_OP_ADD:
-        case GGML_OP_SUB:
-        case GGML_OP_MUL:
-        case GGML_OP_DIV:
-        case GGML_OP_ADD_ID:
-            return op->src[0]->type == GGML_TYPE_F32;
-        case GGML_OP_ACC:
-        case GGML_OP_REPEAT:
-        case GGML_OP_SCALE:
-        case GGML_OP_CONV_TRANSPOSE_1D:
-            return true;
-        case GGML_OP_CLAMP:
-            return op->src[0]->type == GGML_TYPE_F32;
-        case GGML_OP_SQR:
-        case GGML_OP_SQRT:
-        case GGML_OP_SIN:
-        case GGML_OP_COS:
-            return ggml_is_contiguous(op->src[0]) && op->src[0]->type == GGML_TYPE_F32;
-        case GGML_OP_LOG:
-            return false; // TODO: implement
-        case GGML_OP_SUM_ROWS:
-        case GGML_OP_MEAN:
-        case GGML_OP_SOFT_MAX:
-        case GGML_OP_GROUP_NORM:
-            return has_simdgroup_reduction && ggml_is_contiguous_rows(op->src[0]);
-        case GGML_OP_RMS_NORM:
-        case GGML_OP_L2_NORM:
-            return has_simdgroup_reduction && (op->ne[0] % 4 == 0 && ggml_is_contiguous_1(op->src[0]));
-        case GGML_OP_ARGMAX:
-            return has_simdgroup_reduction;
-        case GGML_OP_NORM:
-            return has_simdgroup_reduction && (op->ne[0] % 4 == 0 && ggml_is_contiguous_1(op->src[0]));
-        case GGML_OP_ROPE:
-            return true;
-        case GGML_OP_IM2COL:
-            return ggml_is_contiguous(op->src[1]) && op->src[1]->type == GGML_TYPE_F32 && (op->type == GGML_TYPE_F16 || op->type == GGML_TYPE_F32);
-        case GGML_OP_POOL_1D:
-            return false;
-        case GGML_OP_UPSCALE:
-            return op->src[0]->type == GGML_TYPE_F32 && op->op_params[0] == GGML_SCALE_MODE_NEAREST;
-        case GGML_OP_POOL_2D:
-            return op->src[0]->type == GGML_TYPE_F32;
-        case GGML_OP_PAD:
-            return (ggml_get_op_params_i32(op, 0) == 0) && (ggml_get_op_params_i32(op, 2) == 0) &&
-                   (ggml_get_op_params_i32(op, 4) == 0) && (ggml_get_op_params_i32(op, 6) == 0);
-        case GGML_OP_PAD_REFLECT_1D:
-        case GGML_OP_TIMESTEP_EMBEDDING:
-        case GGML_OP_ARGSORT:
-        case GGML_OP_LEAKY_RELU:
-            return op->src[0]->type == GGML_TYPE_F32;
-        case GGML_OP_ARANGE:
-            return true;
-        case GGML_OP_FLASH_ATTN_EXT:
-            // for new head sizes, add checks here
-            if (op->src[0]->ne[0] != 40 &&
-                op->src[0]->ne[0] != 64 &&
-                op->src[0]->ne[0] != 80 &&
-                op->src[0]->ne[0] != 96 &&
-                op->src[0]->ne[0] != 112 &&
-                op->src[0]->ne[0] != 128 &&
-                op->src[0]->ne[0] != 192 &&
-                op->src[0]->ne[0] != 256) {
-                return false;
-            }
-            if (op->src[0]->ne[0] == 576) {
-                // DeepSeek sizes
-                // TODO: disabled for now, until optmized
-                return false;
-            }
-            if (op->src[1]->type != op->src[2]->type) {
-                return false;
-            }
-            return has_simdgroup_mm; // TODO: over-restricted for vec-kernels
-        case GGML_OP_SSM_CONV:
-        case GGML_OP_SSM_SCAN:
-            return has_simdgroup_reduction;
-        case GGML_OP_RWKV_WKV6:
-        case GGML_OP_RWKV_WKV7:
-            return true;
-        case GGML_OP_MUL_MAT:
-        case GGML_OP_MUL_MAT_ID:
-            return has_simdgroup_reduction &&
-                (op->src[0]->type != GGML_TYPE_F32 || op->src[1]->type == GGML_TYPE_F32);
-        case GGML_OP_CPY:
-        case GGML_OP_DUP:
-        case GGML_OP_CONT:
-            {
-                switch (op->src[0]->type) {
-                    case GGML_TYPE_F32:
-                        switch (op->type) {
-                           case GGML_TYPE_F32:
-                           case GGML_TYPE_F16:
-                           case GGML_TYPE_BF16:
-                           case GGML_TYPE_Q8_0:
-                           case GGML_TYPE_Q4_0:
-                           case GGML_TYPE_Q4_1:
-                           case GGML_TYPE_Q5_0:
-                           case GGML_TYPE_Q5_1:
-                           case GGML_TYPE_IQ4_NL:
-                           case GGML_TYPE_I32:
-                                return true;
-                           default:
-                                return false;
-                        }
-                    case GGML_TYPE_F16:
-                        switch (op->type) {
-                            case GGML_TYPE_F32:
-                            case GGML_TYPE_F16:
-                                return true;
-                            default:
-                                return false;
-                        }
-                    case GGML_TYPE_BF16:
-                        switch (op->type) {
-                            case GGML_TYPE_F32:
-                            case GGML_TYPE_BF16:
-                                return true;
-                            default:
-                                return false;
-                        }
-                    case GGML_TYPE_Q4_0:
-                    case GGML_TYPE_Q4_1:
-                    case GGML_TYPE_Q5_0:
-                    case GGML_TYPE_Q5_1:
-                    case GGML_TYPE_Q8_0:
-                        switch (op->type) {
-                            case GGML_TYPE_F32:
-                            case GGML_TYPE_F16:
-                                return true;
-                            default:
-                                return false;
-                        }
-                    case GGML_TYPE_I32:
-                        return op->type == GGML_TYPE_F32;
-                    default:
-                        return false;
-                };
-            }
-        case GGML_OP_DIAG_MASK_INF:
-        case GGML_OP_GET_ROWS:
-            {
-                return op->ne[3] == 1;
-            }
-        case GGML_OP_SET_ROWS:
-            {
-                if (op->src[0]->type != GGML_TYPE_F32) {
-                    return false;
-                }
-
-                switch (op->type) {
-                    case GGML_TYPE_F32:
-                    case GGML_TYPE_F16:
-                    case GGML_TYPE_BF16:
-                    case GGML_TYPE_Q8_0:
-                    case GGML_TYPE_Q4_0:
-                    case GGML_TYPE_Q4_1:
-                    case GGML_TYPE_Q5_0:
-                    case GGML_TYPE_Q5_1:
-                    case GGML_TYPE_IQ4_NL:
-                        return true;
-                    default:
-                        return false;
-                };
-            }
-        default:
-            return false;
-    }
-}
-
-struct ggml_metal_encode_context {
-    ggml_backend_t backend;
-
-    id<MTLComputeCommandEncoder> encoder;
-
-    struct ggml_mem_ranges * mem_ranges;
-};
-
-static bool ggml_metal_encode_concurrency_reset(struct ggml_metal_encode_context * ctx) {
-    if (!ctx->mem_ranges) {
-        return true;
-    }
-
-    [ctx->encoder memoryBarrierWithScope:MTLBarrierScopeBuffers];
-
-    ggml_mem_ranges_reset(ctx->mem_ranges);
-
-    return true;
-}
-
-static bool ggml_metal_encode_concurrency_check(struct ggml_metal_encode_context * ctx, const struct ggml_tensor * node) {
-    if (!ctx->mem_ranges) {
-        return false;
-    }
-
-    return ggml_mem_ranges_check(ctx->mem_ranges, node);
-}
-
-static bool ggml_metal_encode_concurrency_add(struct ggml_metal_encode_context * ctx, const struct ggml_tensor * node) {
-    if (!ctx->mem_ranges) {
-        return true;
-    }
-
-    return ggml_mem_ranges_add(ctx->mem_ranges, node);
-}
-
-static int ggml_metal_encode_node(struct ggml_metal_encode_context * ctx_enc, int idx, int idx_end) {
-    ggml_backend_t backend = ctx_enc->backend;
-
-    id<MTLComputeCommandEncoder> encoder = ctx_enc->encoder;
-
-    struct ggml_backend_metal_context        * ctx     = backend->context;
-    struct ggml_backend_metal_device_context * ctx_dev = backend->device->context;
-
-    struct ggml_cgraph * gf = ctx->gf;
-
-    enum ggml_op ops[8];
-
-    struct ggml_tensor ** nodes = ggml_graph_nodes(gf) + idx;
-    struct ggml_tensor *  node  = nodes[0];
-
-    //GGML_LOG_INFO("%s: encoding node %3d, op = %8s\n", __func__, idx, ggml_op_name(node->op));
-
-    struct ggml_tensor * src0 = node->src[0];
-    struct ggml_tensor * src1 = node->src[1];
-    struct ggml_tensor * src2 = node->src[2];
-    struct ggml_tensor * dst  = node;
-
-    if (ggml_is_empty(dst)) {
-        return 1;
-    }
-
-    switch (dst->op) {
-        case GGML_OP_NONE:
-        case GGML_OP_RESHAPE:
-        case GGML_OP_VIEW:
-        case GGML_OP_TRANSPOSE:
-        case GGML_OP_PERMUTE:
-            {
-                // noop -> next node
-            } return 1;
-        default:
-            {
-            } break;
-    }
-
-    if (!ggml_metal_supports_op(ctx_dev, dst)) {
-        GGML_LOG_ERROR("%s: error: unsupported op '%s'\n", __func__, ggml_op_desc(dst));
-        GGML_ABORT("unsupported op");
-    }
-
-    const int64_t  ne00 = src0 ? src0->ne[0] : 0;
-    const int64_t  ne01 = src0 ? src0->ne[1] : 0;
-    const int64_t  ne02 = src0 ? src0->ne[2] : 0;
-    const int64_t  ne03 = src0 ? src0->ne[3] : 0;
-
-    const uint64_t nb00 = src0 ? src0->nb[0] : 0;
-    const uint64_t nb01 = src0 ? src0->nb[1] : 0;
-    const uint64_t nb02 = src0 ? src0->nb[2] : 0;
-    const uint64_t nb03 = src0 ? src0->nb[3] : 0;
-
-    const int64_t  ne10 = src1 ? src1->ne[0] : 0;
-    const int64_t  ne11 = src1 ? src1->ne[1] : 0;
-    const int64_t  ne12 = src1 ? src1->ne[2] : 0;
-    const int64_t  ne13 = src1 ? src1->ne[3] : 0;
-
-    const uint64_t nb10 = src1 ? src1->nb[0] : 0;
-    const uint64_t nb11 = src1 ? src1->nb[1] : 0;
-    const uint64_t nb12 = src1 ? src1->nb[2] : 0;
-    const uint64_t nb13 = src1 ? src1->nb[3] : 0;
-
-    const int64_t  ne20 = src2 ? src2->ne[0] : 0;
-    const int64_t  ne21 = src2 ? src2->ne[1] : 0;
-    const int64_t  ne22 = src2 ? src2->ne[2] : 0; GGML_UNUSED(ne22);
-    const int64_t  ne23 = src2 ? src2->ne[3] : 0; GGML_UNUSED(ne23);
-
-    const uint64_t nb20 = src2 ? src2->nb[0] : 0; GGML_UNUSED(nb20);
-    const uint64_t nb21 = src2 ? src2->nb[1] : 0;
-    const uint64_t nb22 = src2 ? src2->nb[2] : 0;
-    const uint64_t nb23 = src2 ? src2->nb[3] : 0; GGML_UNUSED(nb23);
-
-    const int64_t  ne0  =  dst ?  dst->ne[0] : 0;
-    const int64_t  ne1  =  dst ?  dst->ne[1] : 0;
-    const int64_t  ne2  =  dst ?  dst->ne[2] : 0;
-    const int64_t  ne3  =  dst ?  dst->ne[3] : 0;
-
-    const uint64_t nb0  =  dst ?  dst->nb[0] : 0;
-    const uint64_t nb1  =  dst ?  dst->nb[1] : 0;
-    const uint64_t nb2  =  dst ?  dst->nb[2] : 0;
-    const uint64_t nb3  =  dst ?  dst->nb[3] : 0;
-
-    size_t offs_src[GGML_MAX_SRC];
-
-    id<MTLBuffer> id_src[GGML_MAX_SRC];
-
-    enum ggml_type srct[GGML_MAX_SRC];
-
-    for (int i = 0; i < GGML_MAX_SRC; i++) {
-        offs_src[i] = 0;
-        id_src[i] = node->src[i] ? ggml_metal_get_buffer(node->src[i], &offs_src[i]) : nil;
-        srct[i]   = node->src[i] ? node->src[i]->type : GGML_TYPE_COUNT;
-    }
-
-    // TODO: tmp shorthands - remove
-    size_t offs_src0 = offs_src[0];
-    size_t offs_src1 = offs_src[1];
-    size_t offs_src2 = offs_src[2];
-
-    id<MTLBuffer> id_src0 = id_src[0];
-    id<MTLBuffer> id_src1 = id_src[1];
-    id<MTLBuffer> id_src2 = id_src[2];
-
-    const enum ggml_type src0t = src0 ? src0->type : GGML_TYPE_COUNT;
-    const enum ggml_type src1t = src1 ? src1->type : GGML_TYPE_COUNT;
-    const enum ggml_type src2t = src2 ? src2->type : GGML_TYPE_COUNT;
-    const enum ggml_type dstt  = dst  ? dst->type  : GGML_TYPE_COUNT;
-
-    size_t offs_dst = 0;
-
-    id<MTLBuffer> id_dst = dst ? ggml_metal_get_buffer(dst, &offs_dst) : nil;
-
-    int n_fuse = 1;
-
-    // check if the current node can run concurrently with other nodes before it
-    // the condition is that:
-    //  - the current node cannot write to any previous src or dst ranges
-    //  - the current node cannot read from any previous dst ranges
-    //
-    // if the condition is not satisfied, we put a memory barrier and clear all ranges
-    // otherwise, we add the new ranges to the encoding context and process the node concurrently
-    //
-    {
-        const bool is_concurrent = ggml_metal_encode_concurrency_check(ctx_enc, node);
-
-        if (!is_concurrent) {
-            ggml_metal_encode_concurrency_reset(ctx_enc);
-        }
-
-        if (ctx_dev->debug_graph > 0) {
-            GGML_LOG_DEBUG("%s: node[%5d] - %-12s %s\n", __func__, idx, ggml_op_name(dst->op), is_concurrent ? "(concurrent)" : "");
-        }
-        if (ctx_dev->debug_graph > 1) {
-            if (src0) {
-                GGML_LOG_DEBUG("%s: src0 - %4s [%5lld, %5lld, %5lld, %5lld] [%5lld, %5lld, %5lld, %5lld], %d, %s\n", __func__, ggml_type_name(src0t), ne00, ne01, ne02, ne03, nb00, nb01, nb02, nb03,
-                        ggml_is_contiguous(src0), src0->name);
-            }
-            if (src1) {
-                GGML_LOG_DEBUG("%s: src1 - %4s [%5lld, %5lld, %5lld, %5lld] [%5lld, %5lld, %5lld, %5lld], %d, %s\n", __func__, ggml_type_name(src1t), ne10, ne11, ne12, ne13, nb10, nb11, nb12, nb13,
-                        ggml_is_contiguous(src1), src1->name);
-            }
-            if (dst) {
-                GGML_LOG_DEBUG("%s: dst  - %4s [%5lld, %5lld, %5lld, %5lld] [%5lld, %5lld, %5lld, %5lld], 1, %s\n", __func__, ggml_type_name(dstt), ne0, ne1, ne2, ne3, nb0, nb1, nb2, nb3,
-                        dst->name);
-            }
-        }
-    }
-
-    id<MTLDevice> device = ctx_dev->mtl_device;
-
-    switch (dst->op) {
-        case GGML_OP_CONCAT:
-            {
-                id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CONCAT].pipeline;
-
-                const int32_t dim = ((const int32_t *) dst->op_params)[0];
-
-                ggml_metal_kargs_concat args = {
-                    /*.ne00 =*/ ne00,
-                    /*.ne01 =*/ ne01,
-                    /*.ne02 =*/ ne02,
-                    /*.ne03 =*/ ne03,
-                    /*.nb00 =*/ nb00,
-                    /*.nb01 =*/ nb01,
-                    /*.nb02 =*/ nb02,
-                    /*.nb03 =*/ nb03,
-                    /*.ne10 =*/ ne10,
-                    /*.ne11 =*/ ne11,
-                    /*.ne12 =*/ ne12,
-                    /*.ne13 =*/ ne13,
-                    /*.nb10 =*/ nb10,
-                    /*.nb11 =*/ nb11,
-                    /*.nb12 =*/ nb12,
-                    /*.nb13 =*/ nb13,
-                    /*.ne0  =*/ ne0,
-                    /*.ne1  =*/ ne1,
-                    /*.ne2  =*/ ne2,
-                    /*.ne3  =*/ ne3,
-                    /*.nb0  =*/ nb0,
-                    /*.nb1  =*/ nb1,
-                    /*.nb2  =*/ nb2,
-                    /*.nb3  =*/ nb3,
-                    /*.dim  =*/ dim,
-                };
-
-                [encoder setComputePipelineState:pipeline];
-                [encoder setBytes:&args length:sizeof(args) atIndex:0];
-                [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1];
-                [encoder setBuffer:id_src1 offset:offs_src1 atIndex:2];
-                [encoder setBuffer:id_dst  offset:offs_dst  atIndex:3];
-
-                const int nth = MIN(1024, ne0);
-
-                [encoder dispatchThreadgroups:MTLSizeMake(ne1, ne2, ne3) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
-            } break;
-        case GGML_OP_ADD:
-        case GGML_OP_SUB:
-        case GGML_OP_MUL:
-        case GGML_OP_DIV:
-            {
-                GGML_ASSERT(src0t == GGML_TYPE_F32);
-                GGML_ASSERT(src1t == GGML_TYPE_F32);
-
-                GGML_ASSERT(ggml_is_contiguous_rows(src0));
-                GGML_ASSERT(ggml_is_contiguous_rows(src1));
-
-                const size_t offs = 0;
-
-                bool bcast_row = false;
-
-                ggml_metal_kargs_bin args = {
-                    /*.ne00 =*/ ne00,
-                    /*.ne01 =*/ ne01,
-                    /*.ne02 =*/ ne02,
-                    /*.ne03 =*/ ne03,
-                    /*.nb00 =*/ nb00,
-                    /*.nb01 =*/ nb01,
-                    /*.nb02 =*/ nb02,
-                    /*.nb03 =*/ nb03,
-                    /*.ne10 =*/ ne10,
-                    /*.ne11 =*/ ne11,
-                    /*.ne12 =*/ ne12,
-                    /*.ne13 =*/ ne13,
-                    /*.nb10 =*/ nb10,
-                    /*.nb11 =*/ nb11,
-                    /*.nb12 =*/ nb12,
-                    /*.nb13 =*/ nb13,
-                    /*.ne0  =*/ ne0,
-                    /*.ne1  =*/ ne1,
-                    /*.ne2  =*/ ne2,
-                    /*.ne3  =*/ ne3,
-                    /*.nb0  =*/ nb0,
-                    /*.nb1  =*/ nb1,
-                    /*.nb2  =*/ nb2,
-                    /*.nb3  =*/ nb3,
-                    /*.offs =*/ offs,
-                    /*.o1   =*/ { offs_src1 },
-                };
-
-                // c[0] = add(a,    b[0])
-                // c[1] = add(c[0], b[1])
-                // c[2] = add(c[1], b[2])
-                // ...
-                if (ctx_dev->use_fusion) {
-                    ops[0] = GGML_OP_ADD;
-                    ops[1] = GGML_OP_ADD;
-                    ops[2] = GGML_OP_ADD;
-                    ops[3] = GGML_OP_ADD;
-                    ops[4] = GGML_OP_ADD;
-                    ops[5] = GGML_OP_ADD;
-                    ops[6] = GGML_OP_ADD;
-                    ops[7] = GGML_OP_ADD;
-
-                    size_t offs_fuse;
-                    id<MTLBuffer> id_fuse;
-
-                    // note: in metal, we sometimes encode the graph in parallel so we have to avoid fusing nodes
-                    //       across splits. idx_end indicates the last node in the current split
-                    for (n_fuse = 0; n_fuse <= 6 && idx + n_fuse + 1 < idx_end; ++n_fuse) {
-                        if (!ggml_can_fuse(gf, idx + n_fuse, ops + n_fuse, 2)) {
-                            break;
-                        }
-
-                        if (nodes[n_fuse] != nodes[n_fuse + 1]->src[0]) {
-                            break;
-                        }
-
-                        // b[0] === b[1] === ...
-                        if (!ggml_are_same_layout(nodes[n_fuse]->src[1], nodes[n_fuse + 1]->src[1])) {
-                            break;
-                        }
-
-                        // only fuse nodes if src1 is in the same Metal buffer
-                        id_fuse = ggml_metal_get_buffer(nodes[n_fuse + 1]->src[1], &offs_fuse);
-                        if (id_fuse != id_src1) {
-                            break;
-                        }
-
-                        ctx_dev->fuse_cnt[nodes[n_fuse + 1]->op]++;
-
-                        args.o1[n_fuse + 1] = offs_fuse;
-                    }
-
-                    ++n_fuse;
-
-                    if (ctx_dev->debug_fusion > 1 && n_fuse > 1) {
-                        GGML_LOG_DEBUG("%s: fuse: ADD x %d\n", __func__, n_fuse);
-                    }
-                }
-
-                id<MTLComputePipelineState> pipeline = nil;
-
-                if (ggml_nelements(src1) == ne10 && ggml_is_contiguous(src1) && ne00 % 4 == 0 && ne10 % 4 == 0) {
-                    GGML_ASSERT(ggml_is_contiguous(src0));
-
-                    // src1 is a row
-                    GGML_ASSERT(ne11 == 1);
-
-                    pipeline = ggml_metal_get_pipeline_bin(backend, dst->op, n_fuse, true);
-
-                    bcast_row = true;
-                } else {
-                    pipeline = ggml_metal_get_pipeline_bin(backend, dst->op, n_fuse, false);
-                }
-
-                if (n_fuse > 1) {
-                    id_dst = ggml_metal_get_buffer(nodes[n_fuse - 1], &offs_dst);
-
-                    for (int i = 1; i < n_fuse; ++i) {
-                        if (!ggml_metal_encode_concurrency_check(ctx_enc, nodes[i])) {
-                            ggml_metal_encode_concurrency_reset(ctx_enc);
-
-                            break;
-                        }
-                    }
-                }
-
-                [encoder setComputePipelineState:pipeline];
-                [encoder setBytes:&args length:sizeof(args) atIndex:0];
-                [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1];
-                [encoder setBuffer:id_src1 offset:0         atIndex:2];
-                [encoder setBuffer:id_dst  offset:offs_dst  atIndex:3];
-
-                if (bcast_row) {
-                    const int64_t n = ggml_nelements(dst)/4;
-
-                    [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
-                } else {
-                    int nth = 32;
-
-                    while (16*nth < ne0 && nth < (int) pipeline.maxTotalThreadsPerThreadgroup) {
-                        nth *= 2;
-                    }
-
-                    [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
-                }
-            } break;
-        case GGML_OP_ADD_ID:
-            {
-                GGML_ASSERT(src0t == GGML_TYPE_F32);
-                GGML_ASSERT(src1t == GGML_TYPE_F32);
-                GGML_ASSERT(src2t == GGML_TYPE_I32);
-                GGML_ASSERT(dstt  == GGML_TYPE_F32);
-
-                GGML_ASSERT(ggml_is_contiguous_rows(src0));
-
-                id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ADD_ID].pipeline;
-
-                ggml_metal_kargs_add_id args = {
-                    /*.ne0  =*/ ne0,
-                    /*.ne1  =*/ ne1,
-                    /*.nb01 =*/ nb01,
-                    /*.nb02 =*/ nb02,
-                    /*.nb11 =*/ nb11,
-                    /*.nb21 =*/ nb21,
-                };
-
-                [encoder setComputePipelineState:pipeline];
-                [encoder setBytes:&args length:sizeof(args) atIndex:0];
-                [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1];
-                [encoder setBuffer:id_src1 offset:offs_src1 atIndex:2];
-                [encoder setBuffer:id_src2 offset:offs_src2 atIndex:3];
-                [encoder setBuffer:id_dst  offset:offs_dst  atIndex:4];
-
-                const int nth = MIN((int) pipeline.maxTotalThreadsPerThreadgroup, ne00);
-
-                [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
-            } break;
-        case GGML_OP_REPEAT:
-            {
-                id<MTLComputePipelineState> pipeline;
-
-                switch (src0t) {
-                    case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_REPEAT_F32].pipeline; break;
-                    case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_REPEAT_F16].pipeline; break;
-                    case GGML_TYPE_I32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_REPEAT_I32].pipeline; break;
-                    case GGML_TYPE_I16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_REPEAT_I16].pipeline; break;
-                    default: GGML_ABORT("fatal error");
-                }
-
-                ggml_metal_kargs_repeat args = {
-                    /*.ne00 =*/ ne00,
-                    /*.ne01 =*/ ne01,
-                    /*.ne02 =*/ ne02,
-                    /*.ne03 =*/ ne03,
-                    /*.nb00 =*/ nb00,
-                    /*.nb01 =*/ nb01,
-                    /*.nb02 =*/ nb02,
-                    /*.nb03 =*/ nb03,
-                    /*.ne0  =*/ ne0,
-                    /*.ne1  =*/ ne1,
-                    /*.ne2  =*/ ne2,
-                    /*.ne3  =*/ ne3,
-                    /*.nb0  =*/ nb0,
-                    /*.nb1  =*/ nb1,
-                    /*.nb2  =*/ nb2,
-                    /*.nb3  =*/ nb3,
-                };
-
-                [encoder setComputePipelineState:pipeline];
-                [encoder setBytes:&args length:sizeof(args) atIndex:0];
-                [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1];
-                [encoder setBuffer:id_dst  offset:offs_dst  atIndex:2];
-
-                const int nth = MIN((int) pipeline.maxTotalThreadsPerThreadgroup, ne0);
-
-                [encoder dispatchThreadgroups:MTLSizeMake(ne1, ne2, ne3) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
-            } break;
-        case GGML_OP_ACC:
-            {
-                GGML_ASSERT(src0t == GGML_TYPE_F32);
-                GGML_ASSERT(src1t == GGML_TYPE_F32);
-                GGML_ASSERT(dstt  == GGML_TYPE_F32);
-
-                GGML_ASSERT(ggml_is_contiguous(src0));
-                GGML_ASSERT(ggml_is_contiguous(src1));
-
-                const size_t pnb1 = ((const int32_t *) dst->op_params)[0];
-                const size_t pnb2 = ((const int32_t *) dst->op_params)[1];
-                const size_t pnb3 = ((const int32_t *) dst->op_params)[2];
-                const size_t offs = ((const int32_t *) dst->op_params)[3];
-
-                const bool inplace = (bool) ((const int32_t *) dst->op_params)[4];
-
-                if (!inplace) {
-                    // run a separete kernel to cpy src->dst
-                    // not sure how to avoid this
-                    // TODO: make a simpler cpy_bytes kernel
-
-                    const id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_F32].pipeline;
-
-                    ggml_metal_kargs_cpy args = {
-                        /*.ne00 =*/ ne00,
-                        /*.ne01 =*/ ne01,
-                        /*.ne02 =*/ ne02,
-                        /*.ne03 =*/ ne03,
-                        /*.nb00 =*/ nb00,
-                        /*.nb01 =*/ nb01,
-                        /*.nb02 =*/ nb02,
-                        /*.nb03 =*/ nb03,
-                        /*.ne0  =*/ ne0,
-                        /*.ne1  =*/ ne1,
-                        /*.ne2  =*/ ne2,
-                        /*.ne3  =*/ ne3,
-                        /*.nb0  =*/ nb0,
-                        /*.nb1  =*/ nb1,
-                        /*.nb2  =*/ nb2,
-                        /*.nb3  =*/ nb3,
-                    };
-
-                    [encoder setComputePipelineState:pipeline];
-                    [encoder setBytes:&args length:sizeof(args) atIndex:0];
-                    [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1];
-                    [encoder setBuffer:id_dst  offset:offs_dst  atIndex:2];
-
-                    const int nth = MIN((int) pipeline.maxTotalThreadsPerThreadgroup, ne00);
-
-                    [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
-
-                    ggml_metal_encode_concurrency_reset(ctx_enc);
-                }
-
-                ggml_metal_kargs_bin args = {
-                    /*.ne00 =*/ ne00,
-                    /*.ne01 =*/ ne01,
-                    /*.ne02 =*/ ne02,
-                    /*.ne03 =*/ ne03,
-                    /*.nb00 =*/ nb00,
-                    /*.nb01 =*/ pnb1,
-                    /*.nb02 =*/ pnb2,
-                    /*.nb03 =*/ pnb3,
-                    /*.ne10 =*/ ne10,
-                    /*.ne11 =*/ ne11,
-                    /*.ne12 =*/ ne12,
-                    /*.ne13 =*/ ne13,
-                    /*.nb10 =*/ nb10,
-                    /*.nb11 =*/ nb11,
-                    /*.nb12 =*/ nb12,
-                    /*.nb13 =*/ nb13,
-                    /*.ne0  =*/ ne0,
-                    /*.ne1  =*/ ne1,
-                    /*.ne2  =*/ ne2,
-                    /*.ne3  =*/ ne3,
-                    /*.nb0  =*/ nb0,
-                    /*.nb1  =*/ pnb1,
-                    /*.nb2  =*/ pnb2,
-                    /*.nb3  =*/ pnb3,
-                    /*.offs =*/ offs,
-                    /*.o1   =*/ { offs_src1},
-                };
-
-                //const id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ADD].pipeline;
-                const id<MTLComputePipelineState> pipeline = ggml_metal_get_pipeline_bin(backend, GGML_OP_ADD, 1, false);
-
-                [encoder setComputePipelineState:pipeline];
-                [encoder setBytes:&args length:sizeof(args) atIndex:0];
-                [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1];
-                [encoder setBuffer:id_src1 offset:0         atIndex:2];
-                [encoder setBuffer:id_dst  offset:offs_dst  atIndex:3];
-
-                const int nth = MIN((int) pipeline.maxTotalThreadsPerThreadgroup, ne00);
-
-                [encoder dispatchThreadgroups:MTLSizeMake(ne11, ne12, ne13) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
-            } break;
-        case GGML_OP_SCALE:
-            {
-                GGML_ASSERT(ggml_is_contiguous(src0));
-
-                float scale;
-                float bias;
-                memcpy(&scale, ((const int32_t *) dst->op_params) + 0, sizeof(float));
-                memcpy(&bias,  ((const int32_t *) dst->op_params) + 1, sizeof(float));
-
-                int64_t n = ggml_nelements(dst);
-
-                id<MTLComputePipelineState> pipeline = nil;
-
-                if (n % 4 == 0) {
-                    n /= 4;
-                    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SCALE_4].pipeline;
-                } else {
-                    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SCALE].pipeline;
-                }
-
-                [encoder setComputePipelineState:pipeline];
-                [encoder setBuffer:id_src0   offset:offs_src0 atIndex:0];
-                [encoder setBuffer:id_dst    offset:offs_dst  atIndex:1];
-                [encoder setBytes:&scale length:sizeof(scale) atIndex:2];
-                [encoder setBytes:&bias  length:sizeof(bias)  atIndex:3];
-
-                [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
-            } break;
-        case GGML_OP_CLAMP:
-            {
-                id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CLAMP].pipeline;
-
-                float min;
-                float max;
-                memcpy(&min, ((const int32_t *) dst->op_params) + 0, sizeof(float));
-                memcpy(&max, ((const int32_t *) dst->op_params) + 1, sizeof(float));
-
-                [encoder setComputePipelineState:pipeline];
-                [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
-                [encoder setBuffer:id_dst  offset:offs_dst  atIndex:1];
-                [encoder setBytes:&min   length:sizeof(min) atIndex:2];
-                [encoder setBytes:&max   length:sizeof(max) atIndex:3];
-
-                const int64_t n = ggml_nelements(dst);
-
-                [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
-            } break;
-        case GGML_OP_UNARY:
-            switch (ggml_get_unary_op(node)) {
-                // we are not taking into account the strides, so for now require contiguous tensors
-                GGML_ASSERT(ggml_is_contiguous(src0));
-
-                case GGML_UNARY_OP_TANH:
-                {
-                    id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_TANH].pipeline;
-
-                    [encoder setComputePipelineState:pipeline];
-                    [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
-                    [encoder setBuffer:id_dst  offset:offs_dst  atIndex:1];
-
-                    const int64_t n = ggml_nelements(dst);
-
-                    [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
-                } break;
-                case GGML_UNARY_OP_RELU:
-                {
-                    id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_RELU].pipeline;
-
-                    [encoder setComputePipelineState:pipeline];
-                    [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
-                    [encoder setBuffer:id_dst  offset:offs_dst  atIndex:1];
-
-                    const int64_t n = ggml_nelements(dst);
-
-                    [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
-                } break;
-                case GGML_UNARY_OP_SIGMOID:
-                {
-                    id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SIGMOID].pipeline;
-
-                    [encoder setComputePipelineState:pipeline];
-                    [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
-                    [encoder setBuffer:id_dst  offset:offs_dst  atIndex:1];
-
-                    const int64_t n = ggml_nelements(dst);
-
-                    [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
-                } break;
-                case GGML_UNARY_OP_GELU:
-                {
-                    int64_t n = ggml_nelements(dst);
-
-                    id<MTLComputePipelineState> pipeline = nil;
-
-                    if (n % 4 == 0) {
-                        pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GELU_4].pipeline;
-                        n /= 4;
-                    } else {
-                        pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GELU].pipeline;
-                    }
-
-                    [encoder setComputePipelineState:pipeline];
-                    [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
-                    [encoder setBuffer:id_dst  offset:offs_dst  atIndex:1];
-
-                    [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
-                } break;
-                case GGML_UNARY_OP_GELU_ERF:
-                {
-                    int64_t n = ggml_nelements(dst);
-
-                    id<MTLComputePipelineState> pipeline = nil;
-
-                    if (n % 4 == 0) {
-                        pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GELU_ERF_4].pipeline;
-                        n /= 4;
-                    } else {
-                        pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GELU_ERF].pipeline;
-                    }
-
-                    [encoder setComputePipelineState:pipeline];
-                    [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
-                    [encoder setBuffer:id_dst  offset:offs_dst  atIndex:1];
-
-                    [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
-                } break;
-                case GGML_UNARY_OP_GELU_QUICK:
-                {
-                    int64_t n = ggml_nelements(dst);
-
-                    id<MTLComputePipelineState> pipeline = nil;
-
-                    if (n % 4 == 0) {
-                        pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GELU_QUICK_4].pipeline;
-                        n /= 4;
-                    } else {
-                        pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GELU_QUICK].pipeline;
-                    }
-
-                    [encoder setComputePipelineState:pipeline];
-                    [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
-                    [encoder setBuffer:id_dst  offset:offs_dst  atIndex:1];
-
-                    [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
-                } break;
-                case GGML_UNARY_OP_SILU:
-                {
-                    int64_t n = ggml_nelements(dst);
-
-                    id<MTLComputePipelineState> pipeline = nil;
-
-                    if (n % 4 == 0) {
-                        pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SILU_4].pipeline;
-                        n /= 4;
-                    } else {
-                        pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SILU].pipeline;
-                    }
-
-                    [encoder setComputePipelineState:pipeline];
-                    [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
-                    [encoder setBuffer:id_dst  offset:offs_dst  atIndex:1];
-
-                    [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
-                } break;
-                case GGML_UNARY_OP_ELU:
-                {
-                    id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ELU].pipeline;
-
-                    [encoder setComputePipelineState:pipeline];
-                    [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
-                    [encoder setBuffer:id_dst  offset:offs_dst  atIndex:1];
-
-                    const int64_t n = ggml_nelements(dst);
-
-                    [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
-                } break;
-                case GGML_UNARY_OP_NEG:
-                {
-                    id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_NEG].pipeline;
-
-                    [encoder setComputePipelineState:pipeline];
-                    [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
-                    [encoder setBuffer:id_dst  offset:offs_dst  atIndex:1];
-
-                    const int64_t n = ggml_nelements(dst);
-
-                    [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
-                } break;
-                case GGML_UNARY_OP_ABS:
-                {
-                    id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ABS].pipeline;
-
-                    [encoder setComputePipelineState:pipeline];
-                    [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
-                    [encoder setBuffer:id_dst  offset:offs_dst  atIndex:1];
-
-                    const int64_t n = ggml_nelements(dst);
-
-                    [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
-                } break;
-                case GGML_UNARY_OP_SGN:
-                {
-                    id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SGN].pipeline;
-
-                    [encoder setComputePipelineState:pipeline];
-                    [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
-                    [encoder setBuffer:id_dst  offset:offs_dst  atIndex:1];
-
-                    const int64_t n = ggml_nelements(dst);
-
-                    [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
-                } break;
-                case GGML_UNARY_OP_STEP:
-                {
-                    id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_STEP].pipeline;
-
-                    [encoder setComputePipelineState:pipeline];
-                    [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
-                    [encoder setBuffer:id_dst  offset:offs_dst  atIndex:1];
-
-                    const int64_t n = ggml_nelements(dst);
-
-                    [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
-                } break;
-                case GGML_UNARY_OP_HARDSWISH:
-                {
-                    id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_HARDSWISH].pipeline;
-
-                    [encoder setComputePipelineState:pipeline];
-                    [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
-                    [encoder setBuffer:id_dst  offset:offs_dst  atIndex:1];
-
-                    const int64_t n = ggml_nelements(dst);
-
-                    [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
-                } break;
-                case GGML_UNARY_OP_HARDSIGMOID:
-                {
-                    id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_HARDSIGMOID].pipeline;
-
-                    [encoder setComputePipelineState:pipeline];
-                    [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
-                    [encoder setBuffer:id_dst  offset:offs_dst  atIndex:1];
-
-                    const int64_t n = ggml_nelements(dst);
-
-                    [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
-                } break;
-                case GGML_UNARY_OP_EXP:
-                {
-                    id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_EXP].pipeline;
-
-                    [encoder setComputePipelineState:pipeline];
-                    [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
-                    [encoder setBuffer:id_dst  offset:offs_dst  atIndex:1];
-
-                    const int64_t n = ggml_nelements(dst);
-
-                    [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
-                } break;
-                default:
-                {
-                    GGML_LOG_WARN("%s: node %3d, op = %8s not implemented\n", __func__, idx, ggml_op_name(dst->op));
-                    GGML_ABORT("fatal error");
-                }
-            } break;
-        case GGML_OP_GLU:
-            {
-                GGML_ASSERT(ggml_is_contiguous_1(src0));
-
-                if (src1) {
-                    GGML_ASSERT(ggml_are_same_shape(src0, src1));
-                }
-
-                id<MTLComputePipelineState> pipeline = nil;
-
-                switch (ggml_get_glu_op(node)) {
-                    case GGML_GLU_OP_REGLU:
-                        pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_REGLU].pipeline;
-                        break;
-                    case GGML_GLU_OP_GEGLU:
-                        pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GEGLU].pipeline;
-                        break;
-                    case GGML_GLU_OP_SWIGLU:
-                        pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SWIGLU].pipeline;
-                        break;
-                    case GGML_GLU_OP_SWIGLU_OAI:
-                        pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SWIGLU_OAI].pipeline;
-                        break;
-                    case GGML_GLU_OP_GEGLU_ERF:
-                        pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GEGLU_ERF].pipeline;
-                        break;
-                    case GGML_GLU_OP_GEGLU_QUICK:
-                        pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GEGLU_QUICK].pipeline;
-                        break;
-                    default:
-                        GGML_ABORT("fatal error");
-                }
-
-                const int32_t swp = ggml_get_op_params_i32(dst, 1);
-                const float alpha = ggml_get_op_params_f32(dst, 2);
-                const float limit = ggml_get_op_params_f32(dst, 3);
-
-                const int32_t i00 = swp ? ne0 : 0;
-                const int32_t i10 = swp ? 0 : ne0;
-
-                ggml_metal_kargs_glu args = {
-                    /*.ne00 =*/ ne00,
-                    /*.nb01 =*/ nb01,
-                    /*.ne10 =*/ src1 ? ne10 : ne00,
-                    /*.nb11 =*/ src1 ? nb11 : nb01,
-                    /*.ne0  =*/ ne0,
-                    /*.nb1  =*/ nb1,
-                    /*.i00  =*/ src1 ? 0 : i00,
-                    /*.i10  =*/ src1 ? 0 : i10,
-                    /*.alpha=*/ alpha,
-                    /*.limit=*/ limit
-                };
-
-                [encoder setComputePipelineState:pipeline];
-                [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
-                if (src1) {
-                    [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
-                } else {
-                    [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1];
-                }
-                [encoder setBuffer:id_dst  offset:offs_dst  atIndex:2];
-                [encoder setBytes:&args length:sizeof(args) atIndex:3];
-
-                const int64_t nrows = ggml_nrows(src0);
-
-                const int32_t nth = MIN((int) pipeline.maxTotalThreadsPerThreadgroup, ne00/2);
-
-                [encoder dispatchThreadgroups:MTLSizeMake(nrows, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
-            } break;
-        case GGML_OP_SQR:
-            {
-                GGML_ASSERT(ggml_is_contiguous(src0));
-
-                id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SQR].pipeline;
-
-                [encoder setComputePipelineState:pipeline];
-                [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
-                [encoder setBuffer:id_dst  offset:offs_dst atIndex:1];
-
-                const int64_t n = ggml_nelements(dst);
-
-                [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
-            } break;
-        case GGML_OP_SQRT:
-            {
-                GGML_ASSERT(ggml_is_contiguous(src0));
-
-                id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SQRT].pipeline;
-
-                [encoder setComputePipelineState:pipeline];
-                [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
-                [encoder setBuffer:id_dst  offset:offs_dst atIndex:1];
-
-                const int64_t n = ggml_nelements(dst);
-
-                [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
-            } break;
-        case GGML_OP_SIN:
-            {
-                GGML_ASSERT(ggml_is_contiguous(src0));
-
-                id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SIN].pipeline;
-
-                [encoder setComputePipelineState:pipeline];
-                [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
-                [encoder setBuffer:id_dst  offset:offs_dst atIndex:1];
-
-                const int64_t n = ggml_nelements(dst);
-
-                [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
-            } break;
-        case GGML_OP_COS:
-            {
-                GGML_ASSERT(ggml_is_contiguous(src0));
-
-                id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_COS].pipeline;
-
-                [encoder setComputePipelineState:pipeline];
-                [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
-                [encoder setBuffer:id_dst  offset:offs_dst atIndex:1];
-
-                const int64_t n = ggml_nelements(dst);
-
-                [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
-            } break;
-        case GGML_OP_SUM_ROWS:
-        case GGML_OP_MEAN:
-            {
-                GGML_ASSERT(src0->nb[0] == ggml_type_size(src0->type));
-
-                id<MTLComputePipelineState> pipeline = nil;
-
-                switch (dst->op) {
-                    case GGML_OP_SUM_ROWS:
-                        pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SUM_ROWS].pipeline;
-                        break;
-                    case GGML_OP_MEAN:
-                        pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MEAN].pipeline;
-                        break;
-                    default:
-                        GGML_ABORT("fatal error");
-                }
-
-                int nth = 32; // SIMD width
-
-                while (nth < ne00 && nth < (int) pipeline.maxTotalThreadsPerThreadgroup) {
-                    nth *= 2;
-                }
-
-                nth = MIN(nth, (int) pipeline.maxTotalThreadsPerThreadgroup);
-                nth = MIN(nth, ne00);
-
-                ggml_metal_kargs_sum_rows args = {
-                   /*.ne00 =*/ ne00,
-                   /*.ne01 =*/ ne01,
-                   /*.ne02 =*/ ne02,
-                   /*.ne03 =*/ ne03,
-                   /*.nb00 =*/ nb00,
-                   /*.nb01 =*/ nb01,
-                   /*.nb02 =*/ nb02,
-                   /*.nb03 =*/ nb03,
-                   /*.ne10 =*/ ne10,
-                   /*.ne11 =*/ ne11,
-                   /*.ne12 =*/ ne12,
-                   /*.ne13 =*/ ne13,
-                   /*.nb10 =*/ nb10,
-                   /*.nb11 =*/ nb11,
-                   /*.nb12 =*/ nb12,
-                   /*.nb13 =*/ nb13,
-                   /*.ne0  =*/ ne0,
-                   /*.ne1  =*/ ne1,
-                   /*.ne2  =*/ ne2,
-                   /*.ne3  =*/ ne3,
-                   /*.nb0  =*/ nb0,
-                   /*.nb1  =*/ nb1,
-                   /*.nb2  =*/ nb2,
-                   /*.nb3  =*/ nb3,
-                };
-
-                [encoder setComputePipelineState:pipeline];
-                [encoder setBytes:&args length:sizeof(args) atIndex:0];
-                [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1];
-                [encoder setBuffer:id_dst  offset:offs_dst  atIndex:2];
-                [encoder setThreadgroupMemoryLength:32*sizeof(float) atIndex:0];
-
-                [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
-            } break;
-        case GGML_OP_SOFT_MAX:
-            {
-                GGML_ASSERT(!src1 || src1->type == GGML_TYPE_F16 || src1->type == GGML_TYPE_F32);
-
-                int nth = 32; // SIMD width
-
-                id<MTLComputePipelineState> pipeline = nil;
-
-                const bool use_f16 = (src1 && src1->type == GGML_TYPE_F16);
-
-                if (ne00%4 == 0) {
-                    while (nth < ne00/4 && nth*ne01*ne02*ne03 < 256) {
-                        nth *= 2;
-                    }
-                    if (use_f16) {
-                        pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SOFT_MAX_F16_4].pipeline;
-                    } else {
-                        pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SOFT_MAX_F32_4].pipeline;
-                    }
-                } else {
-                    while (nth < ne00 && nth*ne01*ne02*ne03 < 256) {
-                        nth *= 2;
-                    }
-                    if (use_f16) {
-                        pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SOFT_MAX_F16].pipeline;
-                    } else {
-                        pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SOFT_MAX_F32].pipeline;
-                    }
-                }
-
-                float scale;
-                float max_bias;
-
-                memcpy(&scale,    ((const int32_t *) dst->op_params) + 0, sizeof(scale));
-                memcpy(&max_bias, ((const int32_t *) dst->op_params) + 1, sizeof(max_bias));
-
-                const uint32_t n_head      = src0->ne[2];
-                const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head));
-
-                const float m0 = powf(2.0f, -(max_bias       ) / n_head_log2);
-                const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
-
-                id<MTLBuffer> h_src0 = id_src0;
-
-                // softmax
-
-                ggml_metal_kargs_soft_max args = {
-                    /*.ne00        =*/ ne00,
-                    /*.ne01        =*/ ne01,
-                    /*.ne02        =*/ ne02,
-                    /*.nb01        =*/ nb01,
-                    /*.nb02        =*/ nb02,
-                    /*.nb03        =*/ nb03,
-                    /*.ne11        =*/ ne11,
-                    /*.ne12        =*/ ne12,
-                    /*.ne13        =*/ ne13,
-                    /*.nb11        =*/ nb11,
-                    /*.nb12        =*/ nb12,
-                    /*.nb13        =*/ nb13,
-                    /*.nb1         =*/ nb1,
-                    /*.nb2         =*/ nb2,
-                    /*.nb3         =*/ nb3,
-                    /*.scale       =*/ scale,
-                    /*.max_bias    =*/ max_bias,
-                    /*.m0          =*/ m0,
-                    /*.m1          =*/ m1,
-                    /*.n_head_log2 =*/ n_head_log2,
-                };
-
-                [encoder setComputePipelineState:pipeline];
-                [encoder setBuffer:h_src0 offset:offs_src0      atIndex:0];
-                if (id_src1) {
-                    [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
-                } else {
-                    [encoder setBuffer:h_src0 offset:offs_src0  atIndex:1];
-                }
-                if (id_src2) {
-                    [encoder setBuffer:id_src2 offset:offs_src2 atIndex:2];
-                } else {
-                    [encoder setBuffer:h_src0 offset:offs_src0  atIndex:2];
-                }
-                [encoder setBuffer:id_dst offset:offs_dst       atIndex:3];
-                [encoder setBytes:&args   length:sizeof(args)   atIndex:4];
-
-                [encoder setThreadgroupMemoryLength:32*sizeof(float) atIndex:0];
-
-                [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
-            } break;
-        case GGML_OP_DIAG_MASK_INF:
-            {
-                const int n_past = ((const int32_t *)(dst->op_params))[0];
-
-                id<MTLComputePipelineState> pipeline = nil;
-
-                if (ne00%8 == 0) {
-                    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF_8].pipeline;
-                } else {
-                    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF].pipeline;
-                }
-
-                ggml_metal_kargs_diag_mask_inf args = {
-                    /*.ne00 =*/ ne00,
-                    /*.ne01 =*/ ne01,
-                    /*.n_past =*/ n_past,
-                };
-
-                [encoder setComputePipelineState:pipeline];
-                [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
-                [encoder setBuffer:id_dst  offset:offs_dst  atIndex:1];
-                [encoder setBytes:&args  length:sizeof(args) atIndex:2];
-
-                if (ne00%8 == 0) {
-                    [encoder dispatchThreadgroups:MTLSizeMake(ne00*ne01*ne02/8, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
-                }
-                else {
-                    [encoder dispatchThreadgroups:MTLSizeMake(ne00, ne01, ne02) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
-                }
-            } break;
-        case GGML_OP_SSM_CONV:
-            {
-                GGML_ASSERT(src0t == GGML_TYPE_F32);
-                GGML_ASSERT(src1t == GGML_TYPE_F32);
-
-                GGML_ASSERT(ggml_is_contiguous(src0));
-                GGML_ASSERT(ggml_is_contiguous(src1));
-
-                id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SSM_CONV_F32].pipeline;
-
-                ggml_metal_kargs_ssm_conv args = {
-                    /*.ne00 =*/ ne00,
-                    /*.ne01 =*/ ne01,
-                    /*.ne02 =*/ ne02,
-                    /*.nb00 =*/ nb00,
-                    /*.nb01 =*/ nb01,
-                    /*.nb02 =*/ nb02,
-                    /*.ne10 =*/ ne10,
-                    /*.ne11 =*/ ne11,
-                    /*.nb10 =*/ nb10,
-                    /*.nb11 =*/ nb11,
-                    /*.ne0  =*/ ne0,
-                    /*.ne1  =*/ ne1,
-                    /*.ne2  =*/ ne2,
-                    /*.nb0  =*/ nb0,
-                    /*.nb1  =*/ nb1,
-                    /*.nb2  =*/ nb2,
-                };
-
-                [encoder setComputePipelineState:pipeline];
-                [encoder setBuffer:id_src0 offset:offs_src0    atIndex:0];
-                [encoder setBuffer:id_src1 offset:offs_src1    atIndex:1];
-                [encoder setBuffer:id_dst  offset:offs_dst     atIndex:2];
-                [encoder setBytes:&args    length:sizeof(args) atIndex:3];
-
-                [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne1, ne02) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
-            } break;
-        case GGML_OP_SSM_SCAN:
-            {
-                struct ggml_tensor * src3 = node->src[3];
-                struct ggml_tensor * src4 = node->src[4];
-                struct ggml_tensor * src5 = node->src[5];
-                struct ggml_tensor * src6 = node->src[6];
-
-                GGML_ASSERT(src3);
-                GGML_ASSERT(src4);
-                GGML_ASSERT(src5);
-                GGML_ASSERT(src6);
-
-                size_t offs_src3 = 0;
-                size_t offs_src4 = 0;
-                size_t offs_src5 = 0;
-                size_t offs_src6 = 0;
-
-                id<MTLBuffer> id_src3 = src3 ? ggml_metal_get_buffer(src3, &offs_src3) : nil;
-                id<MTLBuffer> id_src4 = src4 ? ggml_metal_get_buffer(src4, &offs_src4) : nil;
-                id<MTLBuffer> id_src5 = src5 ? ggml_metal_get_buffer(src5, &offs_src5) : nil;
-                id<MTLBuffer> id_src6 = src6 ? ggml_metal_get_buffer(src6, &offs_src6) : nil;
-
-                const int64_t  ne30 = src3->ne[0];
-                const int64_t  ne31 = src3->ne[1]; GGML_UNUSED(ne31);
-
-                const uint64_t nb30 = src3->nb[0]; GGML_UNUSED(nb30);
-                const uint64_t nb31 = src3->nb[1];
-
-                const int64_t  ne40 = src4->ne[0]; GGML_UNUSED(ne40);
-                const int64_t  ne41 = src4->ne[1];
-                const int64_t  ne42 = src4->ne[2]; GGML_UNUSED(ne42);
-                const int64_t  ne43 = src4->ne[3]; GGML_UNUSED(ne43);
-
-                const uint64_t nb40 = src4->nb[0]; GGML_UNUSED(nb40);
-                const uint64_t nb41 = src4->nb[1];
-                const uint64_t nb42 = src4->nb[2];
-                const uint64_t nb43 = src4->nb[3];
-
-                const int64_t  ne50 = src5->ne[0]; GGML_UNUSED(ne50);
-                const int64_t  ne51 = src5->ne[1]; GGML_UNUSED(ne51);
-                const int64_t  ne52 = src5->ne[2]; GGML_UNUSED(ne52);
-                const int64_t  ne53 = src5->ne[3]; GGML_UNUSED(ne53);
-
-                const uint64_t nb50 = src5->nb[0]; GGML_UNUSED(nb50);
-                const uint64_t nb51 = src5->nb[1];
-                const uint64_t nb52 = src5->nb[2];
-                const uint64_t nb53 = src5->nb[3];
-
-                const int64_t  ne60 = src6->ne[0]; GGML_UNUSED(ne60);
-
-                const uint64_t nb60 = src6->nb[0]; GGML_UNUSED(nb60);
-
-                const int64_t d_state      = ne00;
-                const int64_t d_inner      = ne01;
-                const int64_t n_head       = ne02;
-                const int64_t n_group      = ne41;
-                const int64_t n_seq_tokens = ne12;
-                const int64_t n_seqs       = ne13;
-
-                id<MTLComputePipelineState> pipeline = nil;
-
-                if (ne30 == 1) {
-                    // Mamba-2
-                    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SSM_SCAN_F32_GROUP].pipeline;
-                } else {
-                    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SSM_SCAN_F32].pipeline;
-                }
-
-                ggml_metal_kargs_ssm_scan args = {
-                    /*.d_state      =*/ d_state,
-                    /*.d_inner      =*/ d_inner,
-                    /*.n_head       =*/ n_head,
-                    /*.n_group      =*/ n_group,
-                    /*.n_seq_tokens =*/ n_seq_tokens,
-                    /*.n_seqs       =*/ n_seqs,
-                    /*.s_off        =*/ ggml_nelements(src1) * sizeof(float),
-                    /*.nb01         =*/ nb01,
-                    /*.nb02         =*/ nb02,
-                    /*.nb03         =*/ nb03,
-                    /*.nb11         =*/ nb11,
-                    /*.nb12         =*/ nb12,
-                    /*.nb13         =*/ nb13,
-                    /*.nb21         =*/ nb21,
-                    /*.nb22         =*/ nb22,
-                    /*.nb31         =*/ nb31,
-                    /*.nb41         =*/ nb41,
-                    /*.nb42         =*/ nb42,
-                    /*.nb43         =*/ nb43,
-                    /*.nb51         =*/ nb51,
-                    /*.nb52         =*/ nb52,
-                    /*.nb53         =*/ nb53,
-                };
-
-                [encoder setComputePipelineState:pipeline];
-                [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
-                [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
-                [encoder setBuffer:id_src2 offset:offs_src2 atIndex:2];
-                [encoder setBuffer:id_src3 offset:offs_src3 atIndex:3];
-                [encoder setBuffer:id_src4 offset:offs_src4 atIndex:4];
-                [encoder setBuffer:id_src5 offset:offs_src5 atIndex:5];
-                [encoder setBuffer:id_src6 offset:offs_src6 atIndex:6];
-                [encoder setBuffer:id_dst  offset:offs_dst  atIndex:7];
-                [encoder setBytes:&args    length:sizeof(args) atIndex:8];
-
-                // One shared memory bucket for each simd group in the threadgroup
-                // NOTE: Metal kernels require the buffer size to be multiple of 16 bytes
-                //  https://developer.apple.com/documentation/metal/mtlcomputecommandencoder/1443142-setthreadgroupmemorylength
-                if (d_state >= 32) {
-                    GGML_ASSERT((int64_t)(d_state / 32) <= 32);
-                    const int64_t shmem_size = 32;
-                    GGML_ASSERT(d_state <= (int64_t)pipeline.maxTotalThreadsPerThreadgroup);
-                    [encoder setThreadgroupMemoryLength:(shmem_size)*sizeof(float) atIndex:0];
-                }
-
-                if (ne30 == 1) {
-                    // Mamba-2
-                    [encoder dispatchThreadgroups:MTLSizeMake(d_inner, n_head, n_seqs) threadsPerThreadgroup:MTLSizeMake(d_state, 1, 1)];
-                } else {
-                    GGML_ASSERT(d_inner == 1);
-                    [encoder dispatchThreadgroups:MTLSizeMake(n_head, n_seqs, 1) threadsPerThreadgroup:MTLSizeMake(d_state, 1, 1)];
-                }
-            } break;
-        case GGML_OP_RWKV_WKV6:
-            {
-                const int64_t B = dst->src[5]->ne[1];
-                const int64_t T = dst->src[0]->ne[2];
-                const int64_t C = dst->ne[0];
-                const int64_t H = dst->src[0]->ne[1];
-
-                GGML_ASSERT(dst->src[5]->type == GGML_TYPE_F32);
-                GGML_ASSERT(C % H == 0);
-                GGML_ASSERT(C / H == 64);
-
-                size_t offs_src3 = 0;
-                size_t offs_src4 = 0;
-                size_t offs_src5 = 0;
-
-                id<MTLBuffer> id_src3 = dst->src[3] ? ggml_metal_get_buffer(dst->src[3], &offs_src3) : nil;
-                id<MTLBuffer> id_src4 = dst->src[4] ? ggml_metal_get_buffer(dst->src[4], &offs_src4) : nil;
-                id<MTLBuffer> id_src5 = dst->src[5] ? ggml_metal_get_buffer(dst->src[5], &offs_src5) : nil;
-
-                id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_RWKV_WKV6_F32].pipeline;
-
-                [encoder setComputePipelineState:pipeline];
-                [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
-                [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
-                [encoder setBuffer:id_src2 offset:offs_src2 atIndex:2];
-                [encoder setBuffer:id_src3 offset:offs_src3 atIndex:3];
-                [encoder setBuffer:id_src4 offset:offs_src4 atIndex:4];
-                [encoder setBuffer:id_src5 offset:offs_src5 atIndex:5];
-                [encoder setBuffer:id_dst  offset:offs_dst  atIndex:6];
-
-                [encoder setBytes:&B length:sizeof(B) atIndex:7];
-                [encoder setBytes:&T length:sizeof(T) atIndex:8];
-                [encoder setBytes:&C length:sizeof(C) atIndex:9];
-                [encoder setBytes:&H length:sizeof(H) atIndex:10];
-
-                [encoder dispatchThreadgroups:MTLSizeMake(B * H, 1, 1) threadsPerThreadgroup:MTLSizeMake(C/ H, 1, 1)];
-            } break;
-        case GGML_OP_RWKV_WKV7:
-            {
-                const int64_t B = dst->src[6]->ne[1];
-                const int64_t T = dst->src[0]->ne[2];
-                const int64_t C = dst->ne[0];
-                const int64_t H = dst->src[0]->ne[1];
-
-                GGML_ASSERT(dst->src[6]->type == GGML_TYPE_F32);
-                GGML_ASSERT(C % H == 0);
-                GGML_ASSERT(C / H == 64);
-
-                size_t offs_src3 = 0;
-                size_t offs_src4 = 0;
-                size_t offs_src5 = 0;
-                size_t offs_src6 = 0;
-
-                id<MTLBuffer> id_src3 = dst->src[3] ? ggml_metal_get_buffer(dst->src[3], &offs_src3) : nil;
-                id<MTLBuffer> id_src4 = dst->src[4] ? ggml_metal_get_buffer(dst->src[4], &offs_src4) : nil;
-                id<MTLBuffer> id_src5 = dst->src[5] ? ggml_metal_get_buffer(dst->src[5], &offs_src5) : nil;
-                id<MTLBuffer> id_src6 = dst->src[6] ? ggml_metal_get_buffer(dst->src[6], &offs_src6) : nil;
-
-                id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_RWKV_WKV7_F32].pipeline;
-
-                [encoder setComputePipelineState:pipeline];
-                [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
-                [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
-                [encoder setBuffer:id_src2 offset:offs_src2 atIndex:2];
-                [encoder setBuffer:id_src3 offset:offs_src3 atIndex:3];
-                [encoder setBuffer:id_src4 offset:offs_src4 atIndex:4];
-                [encoder setBuffer:id_src5 offset:offs_src5 atIndex:5];
-                [encoder setBuffer:id_src6 offset:offs_src6 atIndex:6];
-                [encoder setBuffer:id_dst  offset:offs_dst  atIndex:7];
-
-                [encoder setBytes:&B length:sizeof(B) atIndex:8];
-                [encoder setBytes:&T length:sizeof(T) atIndex:9];
-                [encoder setBytes:&C length:sizeof(C) atIndex:10];
-                [encoder setBytes:&H length:sizeof(H) atIndex:11];
-
-                [encoder dispatchThreadgroups:MTLSizeMake(B * H, 1, 1) threadsPerThreadgroup:MTLSizeMake(C/ H, 1, 1)];
-            } break;
-        case GGML_OP_MUL_MAT:
-            {
-                GGML_ASSERT(ne00 == ne10);
-
-                GGML_ASSERT(ne12 % ne02 == 0);
-                GGML_ASSERT(ne13 % ne03 == 0);
-
-                const uint32_t r2 = ne12/ne02;
-                const uint32_t r3 = ne13/ne03;
-
-                // find the break-even point where the matrix-matrix kernel becomes more efficient compared
-                // to the matrix-vector kernel
-                const int ne11_mm_min = 8;
-
-                // first try to use small-batch mat-mv kernels
-                // these should be efficient for BS [2, ~8]
-                if (src1t == GGML_TYPE_F32 && (ne00%128 == 0) &&
-                    (
-                     (
-                      (
-                       src0t == GGML_TYPE_F32  || // TODO: helper function
-                       src0t == GGML_TYPE_F16  ||
-                       src0t == GGML_TYPE_Q4_0 ||
-                       src0t == GGML_TYPE_Q4_1 ||
-                       src0t == GGML_TYPE_Q5_0 ||
-                       src0t == GGML_TYPE_Q5_1 ||
-                       src0t == GGML_TYPE_Q8_0 ||
-                       src0t == GGML_TYPE_MXFP4 ||
-                       src0t == GGML_TYPE_IQ4_NL ||
-                       false) && (ne11 >= 2 && ne11 <= 8)
-                     ) ||
-                     (
-                      (
-                       src0t == GGML_TYPE_Q4_K ||
-                       src0t == GGML_TYPE_Q5_K ||
-                       src0t == GGML_TYPE_Q6_K ||
-                       false) && (ne11 >= 4 && ne11 <= 8)
-                     )
-                    )
-                   ) {
-                    // TODO: determine the optimal parameters based on grid utilization
-                    //       I still don't know why we should not always use the maximum available threads:
-                    //
-                    //       nsg = pipeline.maxTotalThreadsPerThreadgroup / 32
-                    //
-                    //       my current hypothesis is that the work grid is not evenly divisible for different nsg
-                    //       values and there can be some tail effects when nsg is high. need to confirm this
-                    //
-                    const int nsg    = 2;                 // num simdgroups per threadgroup
-
-                    // num threads along row per simdgroup
-                    int nxpsg = 0;
-                    if (ne00 % 256 == 0 && ne11 < 3) {
-                        nxpsg = 16;
-                    } else if (ne00 % 128 == 0) {
-                        nxpsg = 8;
-                    } else {
-                        nxpsg = 4;
-                    }
-
-                    const int nypsg  = 32/nxpsg;          // num threads along col per simdgroup (i.e. a simdgroup processes that many src0 rows at a time)
-                    const int r0ptg  = nypsg*nsg;         // num src0 rows per threadgroup
-                          int r1ptg  = 4;                 // num src1 rows per threadgroup
-
-                    // note: not sure how optimal are those across all different hardware. there might be someting cleverer
-                    switch (ne11) {
-                        case 2:
-                            r1ptg = 2; break;
-                        case 3:
-                        case 6:
-                            r1ptg = 3; break;
-                        case 4:
-                        case 7:
-                        case 8:
-                            r1ptg = 4; break;
-                        case 5:
-                            r1ptg = 5; break;
-                    };
-
-                    id<MTLComputePipelineState> pipeline = nil;
-
-                    switch (src0->type) {
-                        case GGML_TYPE_F32:
-                            switch (r1ptg) {
-                                case 2: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_F32_F32_R1_2].pipeline; break;
-                                case 3: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_F32_F32_R1_3].pipeline; break;
-                                case 4: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_F32_F32_R1_4].pipeline; break;
-                                case 5: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_F32_F32_R1_5].pipeline; break;
-                                default: GGML_ABORT("not implemented");
-                            } break;
-                        case GGML_TYPE_F16:
-                            switch (r1ptg) {
-                                case 2: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_F16_F32_R1_2].pipeline; break;
-                                case 3: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_F16_F32_R1_3].pipeline; break;
-                                case 4: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_F16_F32_R1_4].pipeline; break;
-                                case 5: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_F16_F32_R1_5].pipeline; break;
-                                default: GGML_ABORT("not implemented");
-                            } break;
-                        case GGML_TYPE_Q4_0:
-                            switch (r1ptg) {
-                                case 2: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_0_F32_R1_2].pipeline; break;
-                                case 3: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_0_F32_R1_3].pipeline; break;
-                                case 4: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_0_F32_R1_4].pipeline; break;
-                                case 5: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_0_F32_R1_5].pipeline; break;
-                                default: GGML_ABORT("not implemented");
-                            } break;
-                        case GGML_TYPE_Q4_1:
-                            switch (r1ptg) {
-                                case 2: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_1_F32_R1_2].pipeline; break;
-                                case 3: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_1_F32_R1_3].pipeline; break;
-                                case 4: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_1_F32_R1_4].pipeline; break;
-                                case 5: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_1_F32_R1_5].pipeline; break;
-                                default: GGML_ABORT("not implemented");
-                            } break;
-                        case GGML_TYPE_Q5_0:
-                            switch (r1ptg) {
-                                case 2: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_0_F32_R1_2].pipeline; break;
-                                case 3: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_0_F32_R1_3].pipeline; break;
-                                case 4: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_0_F32_R1_4].pipeline; break;
-                                case 5: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_0_F32_R1_5].pipeline; break;
-                                default: GGML_ABORT("not implemented");
-                            } break;
-                        case GGML_TYPE_Q5_1:
-                            switch (r1ptg) {
-                                case 2: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_1_F32_R1_2].pipeline; break;
-                                case 3: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_1_F32_R1_3].pipeline; break;
-                                case 4: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_1_F32_R1_4].pipeline; break;
-                                case 5: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_1_F32_R1_5].pipeline; break;
-                                default: GGML_ABORT("not implemented");
-                            } break;
-                        case GGML_TYPE_Q8_0:
-                            switch (r1ptg) {
-                                case 2: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q8_0_F32_R1_2].pipeline; break;
-                                case 3: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q8_0_F32_R1_3].pipeline; break;
-                                case 4: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q8_0_F32_R1_4].pipeline; break;
-                                case 5: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q8_0_F32_R1_5].pipeline; break;
-                                default: GGML_ABORT("not implemented");
-                            } break;
-                        case GGML_TYPE_MXFP4:
-                            switch (r1ptg) {
-                                case 2: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_MXFP4_F32_R1_2].pipeline; break;
-                                case 3: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_MXFP4_F32_R1_3].pipeline; break;
-                                case 4: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_MXFP4_F32_R1_4].pipeline; break;
-                                case 5: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_MXFP4_F32_R1_5].pipeline; break;
-                                default: GGML_ABORT("not implemented");
-                            } break;
-                        case GGML_TYPE_Q4_K:
-                            switch (r1ptg) {
-                                case 2: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_K_F32_R1_2].pipeline; break;
-                                case 3: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_K_F32_R1_3].pipeline; break;
-                                case 4: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_K_F32_R1_4].pipeline; break;
-                                case 5: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_K_F32_R1_5].pipeline; break;
-                                default: GGML_ABORT("not implemented");
-                            } break;
-                        case GGML_TYPE_Q5_K:
-                            switch (r1ptg) {
-                                case 2: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_K_F32_R1_2].pipeline; break;
-                                case 3: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_K_F32_R1_3].pipeline; break;
-                                case 4: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_K_F32_R1_4].pipeline; break;
-                                case 5: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_K_F32_R1_5].pipeline; break;
-                                default: GGML_ABORT("not implemented");
-                            } break;
-                        case GGML_TYPE_Q6_K:
-                            switch (r1ptg) {
-                                case 2: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q6_K_F32_R1_2].pipeline; break;
-                                case 3: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q6_K_F32_R1_3].pipeline; break;
-                                case 4: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q6_K_F32_R1_4].pipeline; break;
-                                case 5: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q6_K_F32_R1_5].pipeline; break;
-                                default: GGML_ABORT("not implemented");
-                            } break;
-                        case GGML_TYPE_IQ4_NL:
-                            switch (r1ptg) {
-                                case 2: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_IQ4_NL_F32_R1_2].pipeline; break;
-                                case 3: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_IQ4_NL_F32_R1_3].pipeline; break;
-                                case 4: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_IQ4_NL_F32_R1_4].pipeline; break;
-                                case 5: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_IQ4_NL_F32_R1_5].pipeline; break;
-                                default: GGML_ABORT("not implemented");
-                            } break;
-                        default: GGML_ABORT("not implemented");
-                    }
-
-                    ggml_metal_kargs_mul_mv_ext args = {
-                        /*.ne00  =*/ ne00,
-                        /*.ne01  =*/ ne01,
-                        /*.ne02  =*/ ne02,
-                        /*.nb00  =*/ nb00,
-                        /*.nb01  =*/ nb01,
-                        /*.nb02  =*/ nb02,
-                        /*.nb03  =*/ nb03,
-                        /*.ne10  =*/ ne10,
-                        /*.ne11  =*/ ne11,
-                        /*.ne12  =*/ ne12,
-                        /*.nb10  =*/ nb10,
-                        /*.nb11  =*/ nb11,
-                        /*.nb12  =*/ nb12,
-                        /*.nb13  =*/ nb13,
-                        /*.ne0   =*/ ne0,
-                        /*.ne1   =*/ ne1,
-                        /*.r2    =*/ r2,
-                        /*.r3    =*/ r3,
-                        /*.nsg   =*/ nsg,
-                        /*.nxpsg =*/ nxpsg,
-                        /*.r1ptg =*/ r1ptg,
-                    };
-
-                    [encoder setComputePipelineState:pipeline];
-                    [encoder setBytes:&args length:sizeof(args) atIndex:0];
-                    [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1];
-                    [encoder setBuffer:id_src1 offset:offs_src1 atIndex:2];
-                    [encoder setBuffer:id_dst  offset:offs_dst  atIndex:3];
-
-                    //printf("ne01 = %lld nr0ptg = %d\n", ne01, nr0ptg);
-                    [encoder dispatchThreadgroups:MTLSizeMake((ne01 + r0ptg - 1)/r0ptg, (ne11 + r1ptg - 1)/r1ptg, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(32, nsg, 1)];
-                } else
-                // for now the matrix-matrix multiplication kernel only works on A14+/M1+ SoCs
-                // AMD GPU and older A-chips will reuse matrix-vector multiplication kernel
-                if ([device supportsFamily:MTLGPUFamilyApple7] &&
-                        !ggml_is_transposed(src0) &&
-                        !ggml_is_transposed(src1) &&
-                        src1t == GGML_TYPE_F32 &&
-                        ne00 % 32 == 0 && ne00 >= 64 &&
-                        (ne11 > ne11_mm_min || (ggml_is_quantized(src0t) && ne12 > 1))) {
-                    //printf("matrix: ne00 = %6d, ne01 = %6d, ne02 = %6d, ne11 = %6d, ne12 = %6d\n", ne00, ne01, ne02, ne11, ne12);
-
-                    // some Metal matrix data types require aligned pointers
-                    // ref: https://developer.apple.com/metal/Metal-Shading-Language-Specification.pdf (Table 2.5)
-                    switch (src0->type) {
-                        case GGML_TYPE_F32:  GGML_ASSERT(nb01 % 16 == 0); break;
-                        case GGML_TYPE_F16:  GGML_ASSERT(nb01 % 8  == 0); break;
-                        case GGML_TYPE_BF16: GGML_ASSERT(nb01 % 8  == 0); break;
-                        default: break;
-                    }
-
-                    id<MTLComputePipelineState> pipeline = nil;
-
-                    switch (src0->type) {
-                        case GGML_TYPE_F32:     pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_F32_F32    ].pipeline; break;
-                        case GGML_TYPE_F16:     pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_F16_F32    ].pipeline; break;
-                        case GGML_TYPE_BF16:    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_BF16_F32   ].pipeline; break;
-                        case GGML_TYPE_Q4_0:    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_0_F32   ].pipeline; break;
-                        case GGML_TYPE_Q4_1:    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_1_F32   ].pipeline; break;
-                        case GGML_TYPE_Q5_0:    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_0_F32   ].pipeline; break;
-                        case GGML_TYPE_Q5_1:    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_1_F32   ].pipeline; break;
-                        case GGML_TYPE_Q8_0:    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q8_0_F32   ].pipeline; break;
-                        case GGML_TYPE_MXFP4:   pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_MXFP4_F32  ].pipeline; break;
-                        case GGML_TYPE_Q2_K:    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q2_K_F32   ].pipeline; break;
-                        case GGML_TYPE_Q3_K:    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q3_K_F32   ].pipeline; break;
-                        case GGML_TYPE_Q4_K:    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_K_F32   ].pipeline; break;
-                        case GGML_TYPE_Q5_K:    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_K_F32   ].pipeline; break;
-                        case GGML_TYPE_Q6_K:    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q6_K_F32   ].pipeline; break;
-                        case GGML_TYPE_IQ2_XXS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_XXS_F32].pipeline; break;
-                        case GGML_TYPE_IQ2_XS:  pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_XS_F32 ].pipeline; break;
-                        case GGML_TYPE_IQ3_XXS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_XXS_F32].pipeline; break;
-                        case GGML_TYPE_IQ3_S:   pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_S_F32  ].pipeline; break;
-                        case GGML_TYPE_IQ2_S:   pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_S_F32  ].pipeline; break;
-                        case GGML_TYPE_IQ1_S:   pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ1_S_F32  ].pipeline; break;
-                        case GGML_TYPE_IQ1_M:   pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ1_M_F32  ].pipeline; break;
-                        case GGML_TYPE_IQ4_NL:  pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_NL_F32 ].pipeline; break;
-                        case GGML_TYPE_IQ4_XS:  pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_XS_F32 ].pipeline; break;
-                        default: GGML_ABORT("MUL MAT-MAT not implemented");
-                    }
-
-                    ggml_metal_kargs_mul_mm args = {
-                        /*.ne00 =*/ ne00,
-                        /*.ne02 =*/ ne02,
-                        /*.nb01 =*/ nb01,
-                        /*.nb02 =*/ nb02,
-                        /*.nb03 =*/ nb03,
-                        /*.ne12 =*/ ne12,
-                        /*.nb10 =*/ nb10,
-                        /*.nb11 =*/ nb11,
-                        /*.nb12 =*/ nb12,
-                        /*.nb13 =*/ nb13,
-                        /*.ne0  =*/ ne0,
-                        /*.ne1  =*/ ne1,
-                        /*.r2   =*/ r2,
-                        /*.r3   =*/ r3,
-                    };
-
-                    [encoder setComputePipelineState:pipeline];
-                    [encoder setBytes:&args    length:sizeof(args) atIndex:0];
-                    [encoder setBuffer:id_src0 offset:offs_src0    atIndex:1];
-                    [encoder setBuffer:id_src1 offset:offs_src1    atIndex:2];
-                    [encoder setBuffer:id_dst  offset:offs_dst     atIndex:3];
-
-                    [encoder setThreadgroupMemoryLength:8192 atIndex:0];
-                    [encoder dispatchThreadgroups:MTLSizeMake((ne11 + 31)/32, (ne01 + 63)/64, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(128, 1, 1)];
-                } else {
-                    id<MTLComputePipelineState> pipeline = nil;
-
-                    int nsg = 0; // number of simdgroups
-                    int nr0 = 0; // number of src0 rows per simdgroup
-                    int nr1 = 1; // number of src1 rows per threadgroup
-
-                    size_t smem = 0; // shared memory
-
-                    // use custom matrix x vector kernel
-                    switch (src0t) {
-                        case GGML_TYPE_F32:
-                            {
-                                GGML_ASSERT(src1t == GGML_TYPE_F32);
-                                nsg = 1;
-                                nr0 = 1;
-                                nr1 = 4;
-                                if (ne00 == 4) {
-                                    nr0 = 32;
-                                    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F32_F32_C4].pipeline;
-                                } else {
-                                    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F32_F32].pipeline;
-                                }
-                            } break;
-                        case GGML_TYPE_F16:
-                            {
-                                nsg = 1;
-                                nr0 = 1;
-                                if (src1t == GGML_TYPE_F32) {
-                                    if (ne00 == 4) {
-                                        nr0 = 32;
-                                        nr1 = 4;
-                                        pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_C4].pipeline;
-                                    } else if (ne11 * ne12 < 4) {
-                                        pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_1ROW].pipeline;
-                                    } else if (ne00 >= 128 && ne01 >= 8 && ne00%4 == 0) {
-                                        pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_L4].pipeline;
-                                        nr1 = ne11;
-                                    } else {
-                                        pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32].pipeline;
-                                        nr1 = 4;
-                                    }
-                                } else {
-                                    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F16].pipeline;
-                                    nr1 = 4;
-                                }
-                            } break;
-                        case GGML_TYPE_BF16:
-                            {
-                                nsg = 1;
-                                nr0 = 1;
-                                if (src1t == GGML_TYPE_F32) {
-                                    if (ne00 == 4) {
-                                        nr0 = 32;
-                                        nr1 = 4;
-                                        pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_F32_C4].pipeline;
-                                    } else if (ne11 * ne12 < 4) {
-                                        pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_F32_1ROW].pipeline;
-                                    } else if (ne00 >= 128 && ne01 >= 8 && ne00%4 == 0) {
-                                        pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_F32_L4].pipeline;
-                                        nr1 = ne11;
-                                    } else {
-                                        pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_F32].pipeline;
-                                        nr1 = 4;
-                                    }
-                                } else {
-                                    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_BF16].pipeline;
-                                    nr1 = 4;
-                                }
-                            } break;
-                        case GGML_TYPE_Q4_0:
-                            {
-                                nsg = N_SG_Q4_0;
-                                nr0 = N_R0_Q4_0;
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_0_F32].pipeline;
-                            } break;
-                        case GGML_TYPE_Q4_1:
-                            {
-                                nsg = N_SG_Q4_1;
-                                nr0 = N_R0_Q4_1;
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_1_F32].pipeline;
-                            } break;
-                        case GGML_TYPE_Q5_0:
-                            {
-                                nsg = N_SG_Q5_0;
-                                nr0 = N_R0_Q5_0;
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_0_F32].pipeline;
-                            } break;
-                        case GGML_TYPE_Q5_1:
-                            {
-                                nsg = N_SG_Q5_1;
-                                nr0 = N_R0_Q5_1;
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_1_F32].pipeline;
-                            } break;
-                        case GGML_TYPE_Q8_0:
-                            {
-                                nsg = N_SG_Q8_0;
-                                nr0 = N_R0_Q8_0;
-                                smem = 32*sizeof(float)*N_R0_Q8_0;
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q8_0_F32].pipeline;
-                            } break;
-                        case GGML_TYPE_MXFP4:
-                            {
-                                nsg = N_SG_MXFP4;
-                                nr0 = N_R0_MXFP4;
-                                smem = 32*sizeof(float);
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_MXFP4_F32].pipeline;
-                            } break;
-                        case GGML_TYPE_Q2_K:
-                            {
-                                nsg = N_SG_Q2_K;
-                                nr0 = N_R0_Q2_K;
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q2_K_F32].pipeline;
-                            } break;
-                        case GGML_TYPE_Q3_K:
-                            {
-                                nsg = N_SG_Q3_K;
-                                nr0 = N_R0_Q3_K;
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q3_K_F32].pipeline;
-                            } break;
-                        case GGML_TYPE_Q4_K:
-                            {
-                                nsg = N_SG_Q4_K;
-                                nr0 = N_R0_Q4_K;
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_K_F32].pipeline;
-                            } break;
-                        case GGML_TYPE_Q5_K:
-                            {
-                                nsg = N_SG_Q5_K;
-                                nr0 = N_R0_Q5_K;
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_K_F32].pipeline;
-                            } break;
-                        case GGML_TYPE_Q6_K:
-                            {
-                                nsg = N_SG_Q6_K;
-                                nr0 = N_R0_Q6_K;
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q6_K_F32].pipeline;
-                            } break;
-                        case GGML_TYPE_IQ2_XXS:
-                            {
-                                nsg = N_SG_IQ2_XXS;
-                                nr0 = N_R0_IQ2_XXS;
-                                smem = 256*8+128;
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_XXS_F32].pipeline;
-                            } break;
-                        case GGML_TYPE_IQ2_XS:
-                            {
-                                nsg = N_SG_IQ2_XS;
-                                nr0 = N_R0_IQ2_XS;
-                                smem = 512*8+128;
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_XS_F32].pipeline;
-                            } break;
-                        case GGML_TYPE_IQ3_XXS:
-                            {
-                                nsg = N_SG_IQ3_XXS;
-                                nr0 = N_R0_IQ3_XXS;
-                                smem = 256*4+128;
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_XXS_F32].pipeline;
-                            } break;
-                        case GGML_TYPE_IQ3_S:
-                            {
-                                nsg = N_SG_IQ3_S;
-                                nr0 = N_R0_IQ3_S;
-                                smem = 512*4;
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_S_F32].pipeline;
-                            } break;
-                        case GGML_TYPE_IQ2_S:
-                            {
-                                nsg = N_SG_IQ2_S;
-                                nr0 = N_R0_IQ2_S;
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_S_F32].pipeline;
-                            } break;
-                        case GGML_TYPE_IQ1_S:
-                            {
-                                nsg = N_SG_IQ1_S;
-                                nr0 = N_R0_IQ1_S;
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ1_S_F32].pipeline;
-                            } break;
-                        case GGML_TYPE_IQ1_M:
-                            {
-                                nsg = N_SG_IQ1_M;
-                                nr0 = N_R0_IQ1_M;
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ1_M_F32].pipeline;
-                            } break;
-                        case GGML_TYPE_IQ4_NL:
-                            {
-                                nsg = N_SG_IQ4_NL;
-                                nr0 = N_R0_IQ4_NL;
-                                smem = 32*sizeof(float);
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_NL_F32].pipeline;
-                            } break;
-                        case GGML_TYPE_IQ4_XS:
-                            {
-                                nsg = N_SG_IQ4_XS;
-                                nr0 = N_R0_IQ4_XS;
-                                smem = 32*sizeof(float);
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_XS_F32].pipeline;
-                            } break;
-                        default:
-                            {
-                                GGML_LOG_ERROR("Asserting on type %d\n", (int)src0t);
-                                GGML_ABORT("not implemented");
-                            }
-                    };
-
-                    ggml_metal_kargs_mul_mv args = {
-                        /*.ne00 =*/ ne00,
-                        /*.ne01 =*/ ne01,
-                        /*.ne02 =*/ ne02,
-                        /*.nb00 =*/ nb00,
-                        /*.nb01 =*/ nb01,
-                        /*.nb02 =*/ nb02,
-                        /*.nb03 =*/ nb03,
-                        /*.ne10 =*/ ne10,
-                        /*.ne11 =*/ ne11,
-                        /*.ne12 =*/ ne12,
-                        /*.nb10 =*/ nb10,
-                        /*.nb11 =*/ nb11,
-                        /*.nb12 =*/ nb12,
-                        /*.nb13 =*/ nb13,
-                        /*.ne0  =*/ ne0,
-                        /*.ne1  =*/ ne1,
-                        /*.r2   =*/ r2,
-                        /*.r3   =*/ r3,
-                    };
-
-                    [encoder setComputePipelineState:pipeline];
-                    [encoder setBytes:&args length:sizeof(args) atIndex:0];
-                    [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1];
-                    [encoder setBuffer:id_src1 offset:offs_src1 atIndex:2];
-                    [encoder setBuffer:id_dst  offset:offs_dst  atIndex:3];
-
-                    if (smem > 0) {
-                        [encoder setThreadgroupMemoryLength:smem atIndex:0];
-                    }
-
-                    if (src0t == GGML_TYPE_Q8_0) {
-                        [encoder dispatchThreadgroups:MTLSizeMake((ne01 + nr0 - 1)/(nr0), (ne11 + nr1 - 1)/nr1, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(32, nsg, 1)];
-                    } else {
-                        [encoder dispatchThreadgroups:MTLSizeMake((ne01 + nr0*nsg - 1)/(nr0*nsg), (ne11 + nr1 - 1)/nr1, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(32, nsg, 1)];
-                    }
-                }
-            } break;
-        case GGML_OP_MUL_MAT_ID:
-            {
-                // src2 = ids
-                GGML_ASSERT(src2t == GGML_TYPE_I32);
-
-                GGML_ASSERT(!ggml_is_transposed(src0));
-                GGML_ASSERT(!ggml_is_transposed(src1));
-
-                GGML_ASSERT(src1t == GGML_TYPE_F32);
-
-                GGML_ASSERT(ne03 == 1);
-                GGML_ASSERT(ne13 == 1);
-
-                const uint32_t r2 = 1;
-                const uint32_t r3 = 1;
-
-                // find the break-even point where the matrix-matrix kernel becomes more efficient compared
-                // to the matrix-vector kernel
-                // ne20 = n_used_experts
-                // ne21 = n_rows (batch size)
-                const int ne21_mm_id_min = 32;
-
-                // for now the matrix-matrix multiplication kernel only works on A14+/M1+ SoCs
-                // AMD GPU and older A-chips will reuse matrix-vector multiplication kernel
-                if ([device supportsFamily:MTLGPUFamilyApple7] &&
-                        ne00 % 32 == 0 && ne00 >= 64 &&
-                        (ne21 >= ne21_mm_id_min)) {
-                    GGML_ASSERT(ne00 % 4 == 0);
-
-                    // some Metal matrix data types require aligned pointers
-                    // ref: https://developer.apple.com/metal/Metal-Shading-Language-Specification.pdf (Table 2.5)
-                    switch (src0->type) {
-                        case GGML_TYPE_F32:  GGML_ASSERT(nb01 % 16 == 0); break;
-                        case GGML_TYPE_F16:  GGML_ASSERT(nb01 % 8  == 0); break;
-                        case GGML_TYPE_BF16: GGML_ASSERT(nb01 % 8  == 0); break;
-                        default: break;
-                    }
-
-                    // extra buffers for intermediate id mapping
-                    size_t offs_tpe = offs_dst + ggml_nbytes(dst);
-                    size_t offs_ids = offs_tpe + ggml_metal_mul_mat_id_extra_tpe(dst);
-
-                    {
-                        ggml_metal_kargs_mul_mm_id_map0 args = {
-                            ne02,
-                            ne10,
-                            ne11, // n_expert_used (bcast)
-                            nb11,
-                            nb12,
-                            ne21, // n_tokens
-                            ne20, // n_expert_used
-                            nb21,
-                        };
-
-                        id<MTLComputePipelineState> pipeline = nil;
-
-                        pipeline = nil;
-
-                        switch (ne20) {
-                            case 1:  pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_MAP0_F16_NE20_1 ].pipeline; break;
-                            case 2:  pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_MAP0_F16_NE20_2 ].pipeline; break;
-                            case 4:  pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_MAP0_F16_NE20_4 ].pipeline; break;
-                            case 6:  pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_MAP0_F16_NE20_6 ].pipeline; break;
-                            case 8:  pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_MAP0_F16_NE20_8 ].pipeline; break;
-                            case 10: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_MAP0_F16_NE20_10].pipeline; break;
-                            case 16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_MAP0_F16_NE20_16].pipeline; break;
-                            default: GGML_ABORT("missing specialization for ne20 = %d", (int) ne20);
-                        }
-
-                        GGML_ASSERT(ne02 <= (int) pipeline.maxTotalThreadsPerThreadgroup);
-
-                        const size_t smem = ne02*ne20*sizeof(uint16_t);
-
-                        GGML_ASSERT(smem <= device.maxThreadgroupMemoryLength);
-
-                        [encoder setComputePipelineState:pipeline];
-                        [encoder setBytes:&args    length:sizeof(args) atIndex:0];
-                        [encoder setBuffer:id_src2 offset:offs_src2    atIndex:1];
-                        [encoder setBuffer:id_dst  offset:offs_tpe     atIndex:2];
-                        [encoder setBuffer:id_dst  offset:offs_ids     atIndex:3];
-                        [encoder setThreadgroupMemoryLength:smem atIndex:0];
-
-                        [encoder dispatchThreadgroups:MTLSizeMake(1, 1, 1) threadsPerThreadgroup:MTLSizeMake(ne02, 1, 1)];
-                    }
-
-                    // this barrier is always needed because the next kernel has to wait for the id maps to be computed
-                    ggml_metal_encode_concurrency_reset(ctx_enc);
-
-                    {
-                        id<MTLComputePipelineState> pipeline = nil;
-
-                        switch (src0->type) {
-                            case GGML_TYPE_F32:     pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_F32_F16    ].pipeline; break;
-                            case GGML_TYPE_F16:     pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_F16_F16    ].pipeline; break;
-                            case GGML_TYPE_BF16:    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_BF16_F16   ].pipeline; break;
-                            case GGML_TYPE_Q4_0:    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_0_F16   ].pipeline; break;
-                            case GGML_TYPE_Q4_1:    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_1_F16   ].pipeline; break;
-                            case GGML_TYPE_Q5_0:    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_0_F16   ].pipeline; break;
-                            case GGML_TYPE_Q5_1:    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_1_F16   ].pipeline; break;
-                            case GGML_TYPE_Q8_0:    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q8_0_F16   ].pipeline; break;
-                            case GGML_TYPE_MXFP4:   pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_MXFP4_F16  ].pipeline; break;
-                            case GGML_TYPE_Q2_K:    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q2_K_F16   ].pipeline; break;
-                            case GGML_TYPE_Q3_K:    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q3_K_F16   ].pipeline; break;
-                            case GGML_TYPE_Q4_K:    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_K_F16   ].pipeline; break;
-                            case GGML_TYPE_Q5_K:    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_K_F16   ].pipeline; break;
-                            case GGML_TYPE_Q6_K:    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q6_K_F16   ].pipeline; break;
-                            case GGML_TYPE_IQ2_XXS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_XXS_F16].pipeline; break;
-                            case GGML_TYPE_IQ2_XS:  pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_XS_F16 ].pipeline; break;
-                            case GGML_TYPE_IQ3_XXS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_XXS_F16].pipeline; break;
-                            case GGML_TYPE_IQ3_S:   pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_S_F16  ].pipeline; break;
-                            case GGML_TYPE_IQ2_S:   pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_S_F16  ].pipeline; break;
-                            case GGML_TYPE_IQ1_S:   pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ1_S_F16  ].pipeline; break;
-                            case GGML_TYPE_IQ1_M:   pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ1_M_F16  ].pipeline; break;
-                            case GGML_TYPE_IQ4_NL:  pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_NL_F16 ].pipeline; break;
-                            case GGML_TYPE_IQ4_XS:  pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_XS_F16 ].pipeline; break;
-                            default: GGML_ABORT("MUL_MAT_ID not implemented");
-                        }
-
-                        ggml_metal_kargs_mul_mm_id args = {
-                            /*.ne00  =*/ ne00,
-                            /*.ne02  =*/ ne02,
-                            /*.nb01  =*/ nb01,
-                            /*.nb02  =*/ nb02,
-                            /*.nb03  =*/ nb03,
-                            /*.ne11  =*/ ne11, // n_expert_used (bcast)
-                            /*.nb10  =*/ nb10,
-                            /*.nb11  =*/ nb11,
-                            /*.nb12  =*/ nb12,
-                            /*.nb13  =*/ nb13,
-                            /*.ne20  =*/ ne20, // n_expert_used
-                            /*.ne21  =*/ ne21, // n_tokens
-                            /*.ne0   =*/ ne0,
-                            /*.ne1   =*/ ne1,
-                            /*.r2    =*/ r2,
-                            /*.r3    =*/ r3,
-                        };
-
-                        [encoder setComputePipelineState:pipeline];
-                        [encoder setBytes:&args    length:sizeof(args) atIndex:0];
-                        [encoder setBuffer:id_src0 offset:offs_src0    atIndex:1];
-                        [encoder setBuffer:id_src1 offset:offs_src1    atIndex:2];
-                        [encoder setBuffer:id_dst  offset:offs_tpe     atIndex:3];
-                        [encoder setBuffer:id_dst  offset:offs_ids     atIndex:4];
-                        [encoder setBuffer:id_dst  offset:offs_dst     atIndex:5];
-
-                        [encoder setThreadgroupMemoryLength:8192 atIndex:0];
-                        [encoder dispatchThreadgroups:MTLSizeMake((ne21 + 31)/32, (ne01 + 63)/64, ne02) threadsPerThreadgroup:MTLSizeMake(128, 1, 1)];
-                    }
-                } else {
-                    id<MTLComputePipelineState> pipeline = nil;
-
-                    int nsg = 0; // number of simdgroups
-                    int nr0 = 0; // number of src0 rows per simdgroup
-                    int nr1 = 1; // number of src1 rows per threadgroup
-
-                    size_t smem = 0; // shared memory
-
-                    // use custom matrix x vector kernel
-                    switch (src0t) {
-                        case GGML_TYPE_F32:
-                            {
-                                GGML_ASSERT(src1t == GGML_TYPE_F32);
-                                nsg = 1;
-                                nr0 = 1;
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F32_F32].pipeline;
-                            } break;
-                        case GGML_TYPE_F16:
-                            {
-                                GGML_ASSERT(src1t == GGML_TYPE_F32);
-                                nsg = 1;
-                                nr0 = 1;
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32].pipeline;
-                            } break;
-                        case GGML_TYPE_BF16:
-                            {
-                                GGML_ASSERT(src1t == GGML_TYPE_F32);
-                                nsg = 1;
-                                nr0 = 1;
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_BF16_F32].pipeline;
-                            } break;
-                        case GGML_TYPE_Q4_0:
-                            {
-                                nsg = N_SG_Q4_0;
-                                nr0 = N_R0_Q4_0;
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_0_F32].pipeline;
-                            } break;
-                        case GGML_TYPE_Q4_1:
-                            {
-                                nsg = N_SG_Q4_1;
-                                nr0 = N_R0_Q4_1;
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_1_F32].pipeline;
-                            } break;
-                        case GGML_TYPE_Q5_0:
-                            {
-                                nsg = N_SG_Q5_0;
-                                nr0 = N_R0_Q5_0;
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_0_F32].pipeline;
-                            } break;
-                        case GGML_TYPE_Q5_1:
-                            {
-                                nsg = N_SG_Q5_1;
-                                nr0 = N_R0_Q5_1;
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_1_F32].pipeline;
-                            } break;
-                        case GGML_TYPE_Q8_0:
-                            {
-                                nsg = N_SG_Q8_0;
-                                nr0 = N_R0_Q8_0;
-                                smem = 32*sizeof(float)*N_R0_Q8_0;
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q8_0_F32].pipeline;
-                            } break;
-                        case GGML_TYPE_MXFP4:
-                            {
-                                nsg = N_SG_MXFP4;
-                                nr0 = N_R0_MXFP4;
-                                smem = 32*sizeof(float);
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_MXFP4_F32].pipeline;
-                            } break;
-                        case GGML_TYPE_Q2_K:
-                            {
-                                nsg = N_SG_Q2_K;
-                                nr0 = N_R0_Q2_K;
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q2_K_F32].pipeline;
-                            } break;
-                        case GGML_TYPE_Q3_K:
-                            {
-                                nsg = N_SG_Q3_K;
-                                nr0 = N_R0_Q3_K;
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q3_K_F32].pipeline;
-                            } break;
-                        case GGML_TYPE_Q4_K:
-                            {
-                                nsg = N_SG_Q4_K;
-                                nr0 = N_R0_Q4_K;
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_K_F32].pipeline;
-                            } break;
-                        case GGML_TYPE_Q5_K:
-                            {
-                                nsg = N_SG_Q5_K;
-                                nr0 = N_R0_Q5_K;
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_K_F32].pipeline;
-                            } break;
-                        case GGML_TYPE_Q6_K:
-                            {
-                                nsg = N_SG_Q6_K;
-                                nr0 = N_R0_Q6_K;
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q6_K_F32].pipeline;
-                            } break;
-                        case GGML_TYPE_IQ2_XXS:
-                            {
-                                nsg = N_SG_IQ2_XXS;
-                                nr0 = N_R0_IQ2_XXS;
-                                smem = 256*8+128;
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_XXS_F32].pipeline;
-                            } break;
-                        case GGML_TYPE_IQ2_XS:
-                            {
-                                nsg = N_SG_IQ2_XS;
-                                nr0 = N_R0_IQ2_XS;
-                                smem = 512*8+128;
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_XS_F32].pipeline;
-                            } break;
-                        case GGML_TYPE_IQ3_XXS:
-                            {
-                                nsg = N_SG_IQ3_XXS;
-                                nr0 = N_R0_IQ3_XXS;
-                                smem = 256*4+128;
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ3_XXS_F32].pipeline;
-                            } break;
-                        case GGML_TYPE_IQ3_S:
-                            {
-                                nsg = N_SG_IQ3_S;
-                                nr0 = N_R0_IQ3_S;
-                                smem = 512*4;
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ3_S_F32].pipeline;
-                            } break;
-                        case GGML_TYPE_IQ2_S:
-                            {
-                                nsg = N_SG_IQ2_S;
-                                nr0 = N_R0_IQ2_S;
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_S_F32].pipeline;
-                            } break;
-                        case GGML_TYPE_IQ1_S:
-                            {
-                                nsg = N_SG_IQ1_S;
-                                nr0 = N_R0_IQ1_S;
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ1_S_F32].pipeline;
-                            } break;
-                        case GGML_TYPE_IQ1_M:
-                            {
-                                nsg = N_SG_IQ1_M;
-                                nr0 = N_R0_IQ1_M;
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ1_M_F32].pipeline;
-                            } break;
-                        case GGML_TYPE_IQ4_NL:
-                            {
-                                nsg = N_SG_IQ4_NL;
-                                nr0 = N_R0_IQ4_NL;
-                                smem = 32*sizeof(float);
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_NL_F32].pipeline;
-                            } break;
-                        case GGML_TYPE_IQ4_XS:
-                            {
-                                nsg = N_SG_IQ4_XS;
-                                nr0 = N_R0_IQ4_XS;
-                                smem = 32*sizeof(float);
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_XS_F32].pipeline;
-                            } break;
-                        default:
-                            {
-                                GGML_LOG_ERROR("Asserting on type %d\n", (int)src2t);
-                                GGML_ABORT("not implemented");
-                            }
-                    };
-
-                    if (ggml_is_quantized(src0t)) {
-                        GGML_ASSERT(ne00 >= nsg*nr0);
-                    }
-
-                    ggml_metal_kargs_mul_mv_id args = {
-                        /*.nei0 =*/ ne20,
-                        /*.nei1 =*/ ne21,
-                        /*.nbi1 =*/ nb21,
-                        /*.ne00 =*/ ne00,
-                        /*.ne01 =*/ ne01,
-                        /*.ne02 =*/ ne02,
-                        /*.nb00 =*/ nb00,
-                        /*.nb01 =*/ nb01,
-                        /*.nb02 =*/ nb02,
-                        /*.ne10 =*/ ne10,
-                        /*.ne11 =*/ ne11,
-                        /*.ne12 =*/ ne12,
-                        /*.ne13 =*/ ne13,
-                        /*.nb10 =*/ nb10,
-                        /*.nb11 =*/ nb11,
-                        /*.nb12 =*/ nb12,
-                        /*.ne0  =*/ ne0,
-                        /*.ne1  =*/ ne1,
-                        /*.nb1  =*/ nb1,
-                    };
-
-                    [encoder setComputePipelineState:pipeline];
-                    [encoder setBytes:&args length:sizeof(args) atIndex:0];
-                    [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1];
-                    [encoder setBuffer:id_src1 offset:offs_src1 atIndex:2];
-                    [encoder setBuffer:id_dst  offset:offs_dst  atIndex:3];
-                    [encoder setBuffer:id_src2 offset:offs_src2 atIndex:4];
-
-                    const int64_t _ne1 = 1;
-                    const int64_t ne123 = ne20*ne21;
-
-                    if (smem > 0) {
-                        [encoder setThreadgroupMemoryLength:smem atIndex:0];
-                    }
-
-                    if (src0t == GGML_TYPE_Q8_0) {
-                        [encoder dispatchThreadgroups:MTLSizeMake((ne01 + nr0 - 1)/(nr0), (_ne1 + nr1 - 1)/nr1, ne123) threadsPerThreadgroup:MTLSizeMake(32, nsg, 1)];
-                    } else {
-                        [encoder dispatchThreadgroups:MTLSizeMake((ne01 + nr0*nsg - 1)/(nr0*nsg), (_ne1 + nr1 - 1)/nr1, ne123) threadsPerThreadgroup:MTLSizeMake(32, nsg, 1)];
-                    }
-                }
-            } break;
-        case GGML_OP_GET_ROWS:
-            {
-                id<MTLComputePipelineState> pipeline = nil;
-
-                switch (src0->type) {
-                    case GGML_TYPE_F32:     pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_F32    ].pipeline; break;
-                    case GGML_TYPE_F16:     pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_F16    ].pipeline; break;
-                    case GGML_TYPE_BF16:    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_BF16   ].pipeline; break;
-                    case GGML_TYPE_Q4_0:    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_0   ].pipeline; break;
-                    case GGML_TYPE_Q4_1:    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_1   ].pipeline; break;
-                    case GGML_TYPE_Q5_0:    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_0   ].pipeline; break;
-                    case GGML_TYPE_Q5_1:    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_1   ].pipeline; break;
-                    case GGML_TYPE_Q8_0:    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q8_0   ].pipeline; break;
-                    case GGML_TYPE_MXFP4:   pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_MXFP4  ].pipeline; break;
-                    case GGML_TYPE_Q2_K:    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q2_K   ].pipeline; break;
-                    case GGML_TYPE_Q3_K:    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q3_K   ].pipeline; break;
-                    case GGML_TYPE_Q4_K:    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_K   ].pipeline; break;
-                    case GGML_TYPE_Q5_K:    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_K   ].pipeline; break;
-                    case GGML_TYPE_Q6_K:    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q6_K   ].pipeline; break;
-                    case GGML_TYPE_IQ2_XXS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_XXS].pipeline; break;
-                    case GGML_TYPE_IQ2_XS:  pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_XS ].pipeline; break;
-                    case GGML_TYPE_IQ3_XXS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ3_XXS].pipeline; break;
-                    case GGML_TYPE_IQ3_S:   pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ3_S  ].pipeline; break;
-                    case GGML_TYPE_IQ2_S:   pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_S  ].pipeline; break;
-                    case GGML_TYPE_IQ1_S:   pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ1_S  ].pipeline; break;
-                    case GGML_TYPE_IQ1_M:   pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ1_M  ].pipeline; break;
-                    case GGML_TYPE_IQ4_NL:  pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_NL ].pipeline; break;
-                    case GGML_TYPE_IQ4_XS:  pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_XS ].pipeline; break;
-                    case GGML_TYPE_I32:     pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_I32    ].pipeline; break;
-                    default: GGML_ABORT("not implemented");
-                }
-
-                ggml_metal_kargs_get_rows args = {
-                    /*.ne00 =*/ ne00,
-                    /*.nb01 =*/ nb01,
-                    /*.nb02 =*/ nb02,
-                    /*.ne10 =*/ ne10,
-                    /*.nb10 =*/ nb10,
-                    /*.nb11 =*/ nb11,
-                    /*.nb1 =*/ nb1,
-                    /*.nb2 =*/ nb2,
-                };
-
-                [encoder setComputePipelineState:pipeline];
-                [encoder setBytes:&args    length:sizeof(args) atIndex:0];
-                [encoder setBuffer:id_src0 offset:offs_src0    atIndex:1];
-                [encoder setBuffer:id_src1 offset:offs_src1    atIndex:2];
-                [encoder setBuffer:id_dst  offset:offs_dst     atIndex:3];
-
-                [encoder dispatchThreadgroups:MTLSizeMake(ne10, ne11, 1) threadsPerThreadgroup:MTLSizeMake(32, 1, 1)];
-            } break;
-        case GGML_OP_SET_ROWS:
-            {
-                id<MTLComputePipelineState> pipeline = nil;
-
-                switch (dst->type) {
-                    case GGML_TYPE_F32:    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SET_ROWS_F32   ].pipeline; break;
-                    case GGML_TYPE_F16:    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SET_ROWS_F16   ].pipeline; break;
-                    case GGML_TYPE_BF16:   pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SET_ROWS_BF16  ].pipeline; break;
-                    case GGML_TYPE_Q8_0:   pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SET_ROWS_Q8_0  ].pipeline; break;
-                    case GGML_TYPE_Q4_0:   pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SET_ROWS_Q4_0  ].pipeline; break;
-                    case GGML_TYPE_Q4_1:   pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SET_ROWS_Q4_1  ].pipeline; break;
-                    case GGML_TYPE_Q5_0:   pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SET_ROWS_Q5_0  ].pipeline; break;
-                    case GGML_TYPE_Q5_1:   pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SET_ROWS_Q5_1  ].pipeline; break;
-                    case GGML_TYPE_IQ4_NL: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SET_ROWS_IQ4_NL].pipeline; break;
-                    default: GGML_ABORT("not implemented");
-                }
-
-                const int32_t nk0 = ne0/ggml_blck_size(dst->type);
-
-                int nth = 32; // SIMD width
-
-                while (nth < nk0 && nth < (int) pipeline.maxTotalThreadsPerThreadgroup) {
-                    nth *= 2;
-                }
-
-                int nrptg = 1;
-                if (nth > nk0) {
-                    nrptg = (nth + nk0 - 1)/nk0;
-                    nth   = nk0;
-
-                    if (nrptg*nth > (int) pipeline.maxTotalThreadsPerThreadgroup) {
-                        nrptg--;
-                    }
-                }
-
-                nth = MIN(nth, nk0);
-
-                ggml_metal_kargs_set_rows args = {
-                    /*.nk0  =*/ nk0,
-                    /*.ne01 =*/ ne01,
-                    /*.nb01 =*/ nb01,
-                    /*.nb02 =*/ nb02,
-                    /*.nb03 =*/ nb03,
-                    /*.ne11 =*/ ne11,
-                    /*.ne12 =*/ ne12,
-                    /*.nb10 =*/ nb10,
-                    /*.nb11 =*/ nb11,
-                    /*.nb12 =*/ nb12,
-                    /*.nb1  =*/ nb1,
-                    /*.nb2  =*/ nb2,
-                    /*.nb3  =*/ nb3,
-                };
-
-                [encoder setComputePipelineState:pipeline];
-                [encoder setBytes:&args    length:sizeof(args) atIndex:0];
-                [encoder setBuffer:id_src0 offset:offs_src0    atIndex:1];
-                [encoder setBuffer:id_src1 offset:offs_src1    atIndex:2];
-                [encoder setBuffer:id_dst  offset:offs_dst     atIndex:3];
-
-                [encoder dispatchThreadgroups:MTLSizeMake((ne01 + nrptg - 1)/nrptg, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, nrptg, 1)];
-            } break;
-        case GGML_OP_RMS_NORM:
-            {
-                GGML_ASSERT(ne00 % 4 == 0);
-                GGML_ASSERT(ggml_is_contiguous_rows(src0));
-
-                float eps;
-                memcpy(&eps, dst->op_params, sizeof(float));
-
-                ggml_metal_kargs_rms_norm args = {
-                    /*.ne00   =*/ ne00,
-                    /*.ne00_4 =*/ ne00/4,
-                    /*.nb1    =*/ nb1,
-                    /*.nb2    =*/ nb2,
-                    /*.nb3    =*/ nb3,
-                    /*.eps    =*/ eps,
-                    /*.nef1   =*/ { ne01 },
-                    /*.nef2   =*/ { ne02 },
-                    /*.nef3   =*/ { ne03 },
-                    /*.nbf1   =*/ { nb01 },
-                    /*.nbf2   =*/ { nb02 },
-                    /*.nbf3   =*/ { nb03 },
-                };
-
-                size_t offs_fuse[2] = { 0, 0 };
-                id<MTLBuffer> id_fuse[2] = { id_src0, id_src0 };
-
-                // d[0] = rms_norm(a)
-                // d[1] = mul(d[0], b)
-                // d[2] = add(d[1], c)
-                if (ctx_dev->use_fusion) {
-                    ops[0] = GGML_OP_RMS_NORM;
-                    ops[1] = GGML_OP_MUL;
-                    ops[2] = GGML_OP_ADD;
-
-                    for (n_fuse = 0; n_fuse <= 1 && idx + n_fuse + 1 < idx_end; ++n_fuse) {
-                        if (!ggml_can_fuse(gf, idx + n_fuse, ops + n_fuse, 2)) {
-                            break;
-                        }
-
-                        if (nodes[n_fuse] != nodes[n_fuse + 1]->src[0]) {
-                            break;
-                        }
-
-                        if (nodes[n_fuse + 1]->src[1]->ne[0] != node->ne[0]) {
-                            break;
-                        }
-
-                        if (!ggml_is_contiguous_rows(nodes[n_fuse + 1]->src[1])) {
-                            break;
-                        }
-
-                        if (nodes[n_fuse + 1]->type != GGML_TYPE_F32) {
-                            break;
-                        }
-
-                        ctx_dev->fuse_cnt[nodes[n_fuse + 1]->op]++;
-
-                        id_fuse[n_fuse] = ggml_metal_get_buffer(nodes[n_fuse + 1]->src[1], &offs_fuse[n_fuse]);
-
-                        args.nef1[n_fuse + 1] = nodes[n_fuse + 1]->src[1]->ne[1];
-                        args.nef2[n_fuse + 1] = nodes[n_fuse + 1]->src[1]->ne[2];
-                        args.nef3[n_fuse + 1] = nodes[n_fuse + 1]->src[1]->ne[3];
-
-                        args.nbf1[n_fuse + 1] = nodes[n_fuse + 1]->src[1]->nb[1];
-                        args.nbf2[n_fuse + 1] = nodes[n_fuse + 1]->src[1]->nb[2];
-                        args.nbf3[n_fuse + 1] = nodes[n_fuse + 1]->src[1]->nb[3];
-                    }
-
-                    ++n_fuse;
-
-                    if (ctx_dev->debug_fusion > 1 && n_fuse > 1) {
-                        if (n_fuse == 2) {
-                            GGML_LOG_DEBUG("%s: fuse: RMS_NORM + MUL\n", __func__);
-                        }
-                        if (n_fuse == 3) {
-                            GGML_LOG_DEBUG("%s: fuse: RMS_NORM + MUL + ADD\n", __func__);
-                        }
-                    }
-                }
-
-                if (n_fuse > 1) {
-                    id_dst = ggml_metal_get_buffer(nodes[n_fuse - 1], &offs_dst);
-
-                    for (int i = 1; i < n_fuse; ++i) {
-                        if (!ggml_metal_encode_concurrency_check(ctx_enc, nodes[i])) {
-                            ggml_metal_encode_concurrency_reset(ctx_enc);
-
-                            break;
-                        }
-                    }
-                }
-
-                const id<MTLComputePipelineState> pipeline = ggml_metal_get_pipeline_rms_norm(backend, node, n_fuse);
-
-                int nth = 32; // SIMD width
-
-                while (nth < ne00/4 && nth < (int) pipeline.maxTotalThreadsPerThreadgroup) {
-                    nth *= 2;
-                }
-
-                nth = MIN(nth, (int) pipeline.maxTotalThreadsPerThreadgroup);
-                nth = MIN(nth, ne00/4);
-
-                [encoder setComputePipelineState:pipeline];
-                [encoder setBytes:&args length:sizeof(args)       atIndex:0];
-                [encoder setBuffer:id_src0    offset:offs_src0    atIndex:1];
-                [encoder setBuffer:id_fuse[0] offset:offs_fuse[0] atIndex:2];
-                [encoder setBuffer:id_fuse[1] offset:offs_fuse[1] atIndex:3];
-                [encoder setBuffer:id_dst     offset:offs_dst     atIndex:4];
-
-                [encoder setThreadgroupMemoryLength:32*sizeof(float) atIndex:0];
-
-                [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
-            } break;
-        case GGML_OP_L2_NORM:
-            {
-                GGML_ASSERT(ne00 % 4 == 0);
-                GGML_ASSERT(ggml_is_contiguous_1(src0));
-
-                float eps;
-                memcpy(&eps, dst->op_params, sizeof(float));
-
-                id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_L2_NORM].pipeline;
-
-                int nth = 32; // SIMD width
-
-                while (nth < ne00/4 && nth < (int) pipeline.maxTotalThreadsPerThreadgroup) {
-                    nth *= 2;
-                }
-
-                nth = MIN(nth, (int) pipeline.maxTotalThreadsPerThreadgroup);
-                nth = MIN(nth, ne00/4);
-
-                ggml_metal_kargs_l2_norm args = {
-                    /*.ne00   =*/ ne00,
-                    /*.ne00_4 =*/ ne00/4,
-                    /*.nb01   =*/ nb01,
-                    /*.eps    =*/ eps,
-                };
-
-                [encoder setComputePipelineState:pipeline];
-                [encoder setBytes:&args length:sizeof(args) atIndex:0];
-                [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1];
-                [encoder setBuffer:id_dst  offset:offs_dst  atIndex:2];
-
-                [encoder setThreadgroupMemoryLength:32*sizeof(float) atIndex:0];
-
-                const int64_t nrows = ggml_nrows(src0);
-
-                [encoder dispatchThreadgroups:MTLSizeMake(nrows, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
-            } break;
-        case GGML_OP_GROUP_NORM:
-            {
-                GGML_ASSERT(ggml_is_contiguous(src0));
-
-                float eps;
-                memcpy(&eps, dst->op_params + 1, sizeof(float));
-
-                const int32_t n_groups = ((const int32_t *) dst->op_params)[0];
-
-                int nth = 32; // SIMD width
-
-                //while (nth < ne00/4 && nth < 1024) {
-                //    nth *= 2;
-                //}
-
-                id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GROUP_NORM].pipeline;
-
-                ggml_metal_kargs_group_norm args = {
-                    /*.ne00 =*/ ne00,
-                    /*.ne01 =*/ ne01,
-                    /*.ne02 =*/ ne02,
-                    /*.nb00 =*/ nb00,
-                    /*.nb01 =*/ nb01,
-                    /*.nb02 =*/ nb02,
-                    /*.n_groups =*/ n_groups,
-                    /*.eps =*/ eps,
-                };
-
-                [encoder setComputePipelineState:pipeline];
-                [encoder setBuffer:id_src0  offset:offs_src0        atIndex:0];
-                [encoder setBuffer:id_dst   offset:offs_dst         atIndex:1];
-                [encoder setBytes:&args     length:sizeof(args)     atIndex:2];
-                [encoder setThreadgroupMemoryLength:32*sizeof(float) atIndex:0];
-
-                [encoder dispatchThreadgroups:MTLSizeMake(n_groups, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
-            } break;
-        case GGML_OP_NORM:
-            {
-                GGML_ASSERT(ne00 % 4 == 0);
-                GGML_ASSERT(ggml_is_contiguous_1(src0));
-
-                float eps;
-                memcpy(&eps, dst->op_params, sizeof(float));
-
-                id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_NORM].pipeline;
-
-                int nth = 32; // SIMD width
-
-                while (nth < ne00/4 && nth < (int) pipeline.maxTotalThreadsPerThreadgroup) {
-                    nth *= 2;
-                }
-
-                nth = MIN(nth, (int) pipeline.maxTotalThreadsPerThreadgroup);
-                nth = MIN(nth, ne00/4);
-
-                ggml_metal_kargs_norm args = {
-                    /*.ne00   =*/ ne00,
-                    /*.ne00_4 =*/ ne00/4,
-                    /*.nb01   =*/ nb01,
-                    /*.eps    =*/ eps,
-                };
-
-                [encoder setComputePipelineState:pipeline];
-                [encoder setBytes:&args length:sizeof(args) atIndex:0];
-                [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1];
-                [encoder setBuffer:id_dst  offset:offs_dst  atIndex:2];
-
-                [encoder setThreadgroupMemoryLength:32*sizeof(float) atIndex:0];
-
-                const int64_t nrows = ggml_nrows(src0);
-
-                [encoder dispatchThreadgroups:MTLSizeMake(nrows, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
-            } break;
-        case GGML_OP_ROPE:
-            {
-                // make sure we have one or more position id(ne10) per token(ne02)
-                GGML_ASSERT(ne10 % ne02 == 0);
-                GGML_ASSERT(ne10 >= ne02);
-
-                const int nth = MIN(1024, ne00);
-
-                const int n_past     = ((const int32_t *) dst->op_params)[0];
-                const int n_dims     = ((const int32_t *) dst->op_params)[1];
-                const int mode       = ((const int32_t *) dst->op_params)[2];
-                // skip 3, n_ctx, used in GLM RoPE, unimplemented in metal
-                const int n_ctx_orig = ((const int32_t *) dst->op_params)[4];
-
-                float freq_base;
-                float freq_scale;
-                float ext_factor;
-                float attn_factor;
-                float beta_fast;
-                float beta_slow;
-
-                memcpy(&freq_base,   (const int32_t *) dst->op_params +  5, sizeof(float));
-                memcpy(&freq_scale,  (const int32_t *) dst->op_params +  6, sizeof(float));
-                memcpy(&ext_factor,  (const int32_t *) dst->op_params +  7, sizeof(float));
-                memcpy(&attn_factor, (const int32_t *) dst->op_params +  8, sizeof(float));
-                memcpy(&beta_fast,   (const int32_t *) dst->op_params +  9, sizeof(float));
-                memcpy(&beta_slow,   (const int32_t *) dst->op_params + 10, sizeof(float));
-
-                const bool is_neox   = mode & GGML_ROPE_TYPE_NEOX;
-                const bool is_mrope  = mode & GGML_ROPE_TYPE_MROPE;
-                const bool is_vision = mode == GGML_ROPE_TYPE_VISION;
-
-                // mrope
-                const int sect_0 = ((const int32_t *) dst->op_params)[11];
-                const int sect_1 = ((const int32_t *) dst->op_params)[12];
-                const int sect_2 = ((const int32_t *) dst->op_params)[13];
-                const int sect_3 = ((const int32_t *) dst->op_params)[14];
-
-                id<MTLComputePipelineState> pipeline = nil;
-
-                if (is_neox) {
-                    switch (src0->type) {
-                        case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ROPE_NEOX_F32].pipeline; break;
-                        case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ROPE_NEOX_F16].pipeline; break;
-                        default: GGML_ABORT("fatal error");
-                    };
-                } else if (is_mrope && !is_vision) {
-                    GGML_ASSERT(ne10*4 >= ne02); // need at least 4 pos per token
-                    switch (src0->type) {
-                        case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ROPE_MULTI_F32].pipeline; break;
-                        case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ROPE_MULTI_F16].pipeline; break;
-                        default: GGML_ABORT("fatal error");
-                    };
-                } else if (is_vision) {
-                    GGML_ASSERT(ne10*4 >= ne02); // need at least 4 pos per token
-                    switch (src0->type) {
-                        case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ROPE_VISION_F32].pipeline; break;
-                        case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ROPE_VISION_F16].pipeline; break;
-                        default: GGML_ABORT("fatal error");
-                    };
-                } else {
-                    switch (src0->type) {
-                        case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ROPE_NORM_F32].pipeline; break;
-                        case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ROPE_NORM_F16].pipeline; break;
-                        default: GGML_ABORT("fatal error");
-                    };
-                }
-
-                ggml_metal_kargs_rope args = {
-                    /*.ne00        =*/ ne00,
-                    /*.ne01        =*/ ne01,
-                    /*.ne02        =*/ ne02,
-                    /*.ne03        =*/ ne03,
-                    /*.nb00        =*/ nb00,
-                    /*.nb01        =*/ nb01,
-                    /*.nb02        =*/ nb02,
-                    /*.nb03        =*/ nb03,
-                    /*.ne0         =*/ ne0,
-                    /*.ne1         =*/ ne1,
-                    /*.ne2         =*/ ne2,
-                    /*.ne3         =*/ ne3,
-                    /*.nb0         =*/ nb0,
-                    /*.nb1         =*/ nb1,
-                    /*.nb2         =*/ nb2,
-                    /*.nb3         =*/ nb3,
-                    /*.n_past      =*/ n_past,
-                    /*.n_dims      =*/ n_dims,
-                    /*.n_ctx_orig  =*/ n_ctx_orig,
-                    /*.freq_base   =*/ freq_base,
-                    /*.freq_scale  =*/ freq_scale,
-                    /*.ext_factor  =*/ ext_factor,
-                    /*.attn_factor =*/ attn_factor,
-                    /*.beta_fast   =*/ beta_fast,
-                    /*.beta_slow   =*/ beta_slow,
-                    /* sect_0      =*/ sect_0,
-                    /* sect_1      =*/ sect_1,
-                    /* sect_2      =*/ sect_2,
-                    /* sect_3      =*/ sect_3,
-                };
-
-                [encoder setComputePipelineState:pipeline];
-                [encoder setBytes:&args length:sizeof(args)     atIndex:0];
-                [encoder setBuffer:id_src0 offset:offs_src0     atIndex:1];
-                [encoder setBuffer:id_src1 offset:offs_src1     atIndex:2];
-                if (id_src2 != nil) {
-                    [encoder setBuffer:id_src2 offset:offs_src2 atIndex:3];
-                } else {
-                    [encoder setBuffer:id_src0 offset:offs_src0 atIndex:3];
-                }
-                [encoder setBuffer:id_dst  offset:offs_dst      atIndex:4];
-
-                [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
-            } break;
-        case GGML_OP_IM2COL:
-            {
-                GGML_ASSERT(ggml_is_contiguous(src1));
-                GGML_ASSERT(src1->type == GGML_TYPE_F32);
-                GGML_ASSERT( dst->type == GGML_TYPE_F16 || dst->type == GGML_TYPE_F32);
-
-                const int32_t s0 = ((const int32_t *)(dst->op_params))[0];
-                const int32_t s1 = ((const int32_t *)(dst->op_params))[1];
-                const int32_t p0 = ((const int32_t *)(dst->op_params))[2];
-                const int32_t p1 = ((const int32_t *)(dst->op_params))[3];
-                const int32_t d0 = ((const int32_t *)(dst->op_params))[4];
-                const int32_t d1 = ((const int32_t *)(dst->op_params))[5];
-
-                const bool is_2D = ((const int32_t *)(dst->op_params))[6] == 1;
-
-                const int32_t N  = src1->ne[is_2D ? 3 : 2];
-                const int32_t IC = src1->ne[is_2D ? 2 : 1];
-                const int32_t IH = is_2D ? src1->ne[1] : 1;
-                const int32_t IW =         src1->ne[0];
-
-                const int32_t KH = is_2D ? src0->ne[1] : 1;
-                const int32_t KW =         src0->ne[0];
-
-                const int32_t OH = is_2D ? dst->ne[2] : 1;
-                const int32_t OW =         dst->ne[1];
-
-                const int32_t CHW = IC * KH * KW;
-
-                const uint64_t ofs0 = src1->nb[is_2D ? 3 : 2] / 4;
-                const uint64_t ofs1 = src1->nb[is_2D ? 2 : 1] / 4;
-
-                id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_IM2COL_F32].pipeline;
-
-                const bool is_gt_mttpt = ((size_t)(N * KH * KW)) > pipeline.maxTotalThreadsPerThreadgroup;
-
-                switch (dst->type) {
-                    case GGML_TYPE_F32: {
-                        pipeline = (is_gt_mttpt ?
-                                    ctx->kernels[GGML_METAL_KERNEL_TYPE_IM2COL_EXT_F32].pipeline
-                                    :
-                                    ctx->kernels[GGML_METAL_KERNEL_TYPE_IM2COL_F32].pipeline);
-                    } break;
-                    case GGML_TYPE_F16: {
-                        pipeline = (is_gt_mttpt ?
-                                    ctx->kernels[GGML_METAL_KERNEL_TYPE_IM2COL_EXT_F16].pipeline
-                                    :
-                                    ctx->kernels[GGML_METAL_KERNEL_TYPE_IM2COL_F16].pipeline);
-                    } break;
-                    default: GGML_ABORT("fatal error");
-                };
-
-                ggml_metal_kargs_im2col args = {
-                    /*.ofs0 =*/ ofs0,
-                    /*.ofs1 =*/ ofs1,
-                    /*.IW   =*/ IW,
-                    /*.IH   =*/ IH,
-                    /*.CHW  =*/ CHW,
-                    /*.s0   =*/ s0,
-                    /*.s1   =*/ s1,
-                    /*.p0   =*/ p0,
-                    /*.p1   =*/ p1,
-                    /*.d0   =*/ d0,
-                    /*.d1   =*/ d1,
-                    /*.N    =*/ N,
-                    /*.KH   =*/ KH,
-                    /*.KW   =*/ KW,
-                    /*.KHW  =*/ KH * KW,
-                };
-
-                [encoder setComputePipelineState:pipeline];
-                [encoder setBuffer:id_src1 offset:offs_src1       atIndex:0];
-                [encoder setBuffer:id_dst  offset:offs_dst        atIndex:1];
-                [encoder setBytes:&args length:sizeof(args)       atIndex:2];
-
-                if (is_gt_mttpt) {
-                    const uint64_t n_threads = MIN(pipeline.maxTotalThreadsPerThreadgroup, (uint64_t)N);
-
-                    const int64_t  quotient  = N / n_threads + (N % n_threads > 0 ? 1 : 0);
-
-                    [encoder dispatchThreadgroups:MTLSizeMake(quotient * CHW, OH, OW) threadsPerThreadgroup:MTLSizeMake(n_threads, 1, 1)];
-                } else {
-                    [encoder dispatchThreadgroups:MTLSizeMake(IC, OH, OW) threadsPerThreadgroup:MTLSizeMake(N, KH, KW)];
-                }
-            } break;
-        case GGML_OP_CONV_TRANSPOSE_1D:
-            {
-                GGML_ASSERT(ggml_is_contiguous(src0));
-                GGML_ASSERT(ggml_is_contiguous(src1));
-                GGML_ASSERT(src0->type == GGML_TYPE_F16 || src0->type == GGML_TYPE_F32);
-                GGML_ASSERT(src1->type == GGML_TYPE_F32);
-                GGML_ASSERT( dst->type == GGML_TYPE_F32);
-
-                const int32_t s0 = ((const int32_t *)(dst->op_params))[0];
-
-                const int32_t IC = src1->ne[1];
-                const int32_t IL = src1->ne[0];
-
-                const int32_t K  = src0->ne[0];
-
-                const int32_t OL = dst->ne[0];
-                const int32_t OC = dst->ne[1];
-
-                id<MTLComputePipelineState> pipeline;
-
-                switch (src0->type) {
-                    case GGML_TYPE_F32: {
-                        pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CONV_TRANSPOSE_1D_F32_F32].pipeline;
-                    } break;
-                    case GGML_TYPE_F16: {
-                        pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CONV_TRANSPOSE_1D_F16_F32].pipeline;
-                    } break;
-                    default: GGML_ABORT("fatal error");
-                };
-
-                ggml_metal_kargs_conv_transpose_1d args = {
-                    /*.IC =*/ IC,
-                    /*.IL =*/ IL,
-                    /*.K  =*/ K,
-                    /*.s0 =*/ s0,
-                    /*.nb0 =*/ nb0,
-                    /*.nb1 =*/ nb1,
-                };
-
-                [encoder setComputePipelineState:pipeline];
-                [encoder setBuffer:id_src0 offset:offs_src0         atIndex:0];
-                [encoder setBuffer:id_src1 offset:offs_src1         atIndex:1];
-                [encoder setBuffer:id_dst  offset:offs_dst          atIndex:2];
-                [encoder setBytes:&args    length:sizeof(args)       atIndex:3];
-
-                [encoder dispatchThreadgroups:MTLSizeMake(OL, OC, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
-            } break;
-        case GGML_OP_UPSCALE:
-            {
-                GGML_ASSERT(src0->type == GGML_TYPE_F32);
-
-                const float sf0 = (float)ne0/src0->ne[0];
-                const float sf1 = (float)ne1/src0->ne[1];
-                const float sf2 = (float)ne2/src0->ne[2];
-                const float sf3 = (float)ne3/src0->ne[3];
-
-                const id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_UPSCALE_F32].pipeline;
-
-                ggml_metal_kargs_upscale args = {
-                    /*.ne00 =*/ ne00,
-                    /*.ne01 =*/ ne01,
-                    /*.ne02 =*/ ne02,
-                    /*.ne03 =*/ ne03,
-                    /*.nb00 =*/ nb00,
-                    /*.nb01 =*/ nb01,
-                    /*.nb02 =*/ nb02,
-                    /*.nb03 =*/ nb03,
-                    /*.ne0 =*/ ne0,
-                    /*.ne1 =*/ ne1,
-                    /*.ne2 =*/ ne2,
-                    /*.ne3 =*/ ne3,
-                    /*.nb0 =*/ nb0,
-                    /*.nb1 =*/ nb1,
-                    /*.nb2 =*/ nb2,
-                    /*.nb3 =*/ nb3,
-                    /*.sf0 =*/ sf0,
-                    /*.sf1 =*/ sf1,
-                    /*.sf2 =*/ sf2,
-                    /*.sf3 =*/ sf3
-                };
-
-                [encoder setComputePipelineState:pipeline];
-                [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
-                [encoder setBuffer:id_dst  offset:offs_dst  atIndex:1];
-                [encoder setBytes:&args length:sizeof(args) atIndex:2];
-
-                const int nth = MIN((int) pipeline.maxTotalThreadsPerThreadgroup, ne0);
-
-                [encoder dispatchThreadgroups:MTLSizeMake(ne1, ne2, ne3) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
-            } break;
-        case GGML_OP_PAD:
-            {
-                GGML_ASSERT(src0->type == GGML_TYPE_F32);
-
-                id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_PAD_F32].pipeline;
-
-                ggml_metal_kargs_pad args = {
-                    /*.ne00 =*/ ne00,
-                    /*.ne01 =*/ ne01,
-                    /*.ne02 =*/ ne02,
-                    /*.ne03 =*/ ne03,
-                    /*.nb00 =*/ nb00,
-                    /*.nb01 =*/ nb01,
-                    /*.nb02 =*/ nb02,
-                    /*.nb03 =*/ nb03,
-                    /*.ne0 =*/ ne0,
-                    /*.ne1 =*/ ne1,
-                    /*.ne2 =*/ ne2,
-                    /*.ne3 =*/ ne3,
-                    /*.nb0 =*/ nb0,
-                    /*.nb1 =*/ nb1,
-                    /*.nb2 =*/ nb2,
-                    /*.nb3 =*/ nb3
-                };
-
-                [encoder setComputePipelineState:pipeline];
-                [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
-                [encoder setBuffer:id_dst  offset:offs_dst  atIndex:1];
-                [encoder setBytes:&args length:sizeof(args) atIndex:2];
-
-                const int nth = MIN(1024, ne0);
-
-                [encoder dispatchThreadgroups:MTLSizeMake(ne1, ne2, ne3) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
-            } break;
-        case GGML_OP_PAD_REFLECT_1D:
-            {
-                GGML_ASSERT(src0->type == GGML_TYPE_F32);
-
-                const int32_t p0 = ((const int32_t *)(dst->op_params))[0];
-                const int32_t p1 = ((const int32_t *)(dst->op_params))[1];
-
-                id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_PAD_REFLECT_1D_F32].pipeline;
-
-                ggml_metal_kargs_pad_reflect_1d args = {
-                    /*.ne00 =*/ ne00,
-                    /*.ne01 =*/ ne01,
-                    /*.ne02 =*/ ne02,
-                    /*.ne03 =*/ ne03,
-                    /*.nb00 =*/ nb00,
-                    /*.nb01 =*/ nb01,
-                    /*.nb02 =*/ nb02,
-                    /*.nb03 =*/ nb03,
-                    /*.ne0 =*/ ne0,
-                    /*.ne1 =*/ ne1,
-                    /*.ne2 =*/ ne2,
-                    /*.ne3 =*/ ne3,
-                    /*.nb0 =*/ nb0,
-                    /*.nb1 =*/ nb1,
-                    /*.nb2 =*/ nb2,
-                    /*.nb3 =*/ nb3,
-                    /*.p0 =*/ p0,
-                    /*.p1 =*/ p1
-                };
-
-                [encoder setComputePipelineState:pipeline];
-                [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
-                [encoder setBuffer:id_dst  offset:offs_dst  atIndex:1];
-                [encoder setBytes:&args length:sizeof(args) atIndex:2];
-
-                const int nth = MIN(1024, ne0);
-
-                [encoder dispatchThreadgroups:MTLSizeMake(ne1, ne2, ne3) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
-            } break;
-        case GGML_OP_ARANGE:
-            {
-                GGML_ASSERT(dst->type == GGML_TYPE_F32);
-
-                float start;
-                float step;
-
-                memcpy(&start, ((const int32_t *) dst->op_params) + 0, sizeof(float));
-                memcpy(&step,  ((const int32_t *) dst->op_params) + 2, sizeof(float));
-
-                id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ARANGE_F32].pipeline;
-
-                ggml_metal_kargs_arange args = {
-                    /*.ne0 =*/ ne0,
-                    /*.start =*/ start,
-                    /*.step =*/ step
-                };
-
-                [encoder setComputePipelineState:pipeline];
-                [encoder setBuffer:id_dst  offset:offs_dst  atIndex:0];
-                [encoder setBytes:&args length:sizeof(args) atIndex:1];
-
-                const int nth = MIN(1024, ne0);
-
-                [encoder dispatchThreadgroups:MTLSizeMake(1, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
-            } break;
-        case GGML_OP_TIMESTEP_EMBEDDING:
-            {
-                GGML_ASSERT(src0->type == GGML_TYPE_F32);
-
-                const int dim        = dst->op_params[0];
-                const int max_period = dst->op_params[1];
-
-                const int half = dim / 2;
-
-                id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_TIMESTEP_EMBEDDING_F32].pipeline;
-
-                ggml_metal_kargs_timestep_embedding args = {
-                    /*.nb1 =*/ nb1,
-                    /*.dim =*/ dim,
-                    /*.max_period =*/ max_period
-                };
-
-                [encoder setComputePipelineState:pipeline];
-                [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
-                [encoder setBuffer:id_dst  offset:offs_dst  atIndex:1];
-                [encoder setBytes:&args length:sizeof(args) atIndex:2];
-
-                const int nth = MIN(1024, half);
-
-                [encoder dispatchThreadgroups:MTLSizeMake(ne00, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
-            } break;
-        case GGML_OP_ARGSORT:
-            {
-                GGML_ASSERT(src0->type == GGML_TYPE_F32);
-                GGML_ASSERT( dst->type == GGML_TYPE_I32);
-
-                const int nrows = ggml_nrows(src0);
-
-                enum ggml_sort_order order = (enum ggml_sort_order) dst->op_params[0];
-
-                // bitonic sort requires the number of elements to be power of 2
-                int64_t ne00_padded = 1;
-                while (ne00_padded < ne00) {
-                    ne00_padded *= 2;
-                }
-
-                // Metal kernels require the buffer size to be multiple of 16 bytes
-                // https://developer.apple.com/documentation/metal/mtlcomputecommandencoder/1443142-setthreadgroupmemorylength
-                const int mem_size = GGML_PAD(ne00_padded*sizeof(int32_t), 16);
-
-                id<MTLComputePipelineState> pipeline = nil;
-
-                switch (order) {
-                    case GGML_SORT_ORDER_ASC:  pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_ASC].pipeline;  break;
-                    case GGML_SORT_ORDER_DESC: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_DESC].pipeline; break;
-                    default: GGML_ABORT("fatal error");
-                };
-
-                ggml_metal_kargs_argsort args = {
-                    /*.ncols =*/ ne00,
-                    /*.ncols_pad =*/ ne00_padded
-                };
-
-                [encoder setComputePipelineState:pipeline];
-                [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
-                [encoder setBuffer:id_dst  offset:offs_dst  atIndex:1];
-                [encoder setBytes:&args length:sizeof(args) atIndex:2];
-                [encoder setThreadgroupMemoryLength:mem_size atIndex:0];
-
-                [encoder dispatchThreadgroups:MTLSizeMake(1, nrows, 1) threadsPerThreadgroup:MTLSizeMake(ne00_padded, 1, 1)];
-            } break;
-        case GGML_OP_LEAKY_RELU:
-            {
-                GGML_ASSERT(src0->type == GGML_TYPE_F32);
-
-                float slope;
-                memcpy(&slope, dst->op_params, sizeof(float));
-
-                id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_LEAKY_RELU_F32].pipeline;
-
-                ggml_metal_kargs_leaky_relu args = {
-                    /*.slope =*/ slope
-                };
-
-                [encoder setComputePipelineState:pipeline];
-                [encoder setBuffer:id_src0 offset:offs_src0   atIndex:0];
-                [encoder setBuffer:id_dst  offset:offs_dst    atIndex:1];
-                [encoder setBytes:&args length:sizeof(args)   atIndex:2];
-
-                const int64_t n = ggml_nelements(dst);
-
-                [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
-            } break;
-        case GGML_OP_FLASH_ATTN_EXT:
-            {
-                GGML_ASSERT(ne00 % 4  == 0);
-                GGML_ASSERT(ne11 % 32 == 0);
-
-                GGML_ASSERT(src0->type == GGML_TYPE_F32);
-                GGML_ASSERT(src1->type == src2->type);
-
-                //GGML_ASSERT(ggml_are_same_shape (src1, src2));
-                GGML_ASSERT(ne11 == ne21);
-                GGML_ASSERT(ne12 == ne22);
-
-                struct ggml_tensor * src3 = node->src[3]; // mask
-                struct ggml_tensor * src4 = node->src[4]; // sinks
-
-                size_t offs_src3 = 0;
-                size_t offs_src4 = 0;
-
-                id<MTLBuffer> id_src3 = src3 ? ggml_metal_get_buffer(src3, &offs_src3) : nil;
-                id<MTLBuffer> id_src4 = src4 ? ggml_metal_get_buffer(src4, &offs_src4) : nil;
-
-                GGML_ASSERT(!src3 || src3->type == GGML_TYPE_F16);
-                GGML_ASSERT(!src3 || src3->ne[1] >= GGML_PAD(src0->ne[1], 8) &&
-                        "the Flash-Attention Metal kernel requires the mask to be padded to 8 and at least n_queries big");
-
-                const int64_t  ne30 = src3 ? src3->ne[0] : 0; GGML_UNUSED(ne30);
-                //const int64_t  ne31 = src3 ? src3->ne[1] : 0;
-                const int64_t  ne32 = src3 ? src3->ne[2] : 0; GGML_UNUSED(ne32);
-                const int64_t  ne33 = src3 ? src3->ne[3] : 0; GGML_UNUSED(ne33);
-
-                const uint64_t nb30 = src3 ? src3->nb[0] : 0; GGML_UNUSED(nb30);
-                const uint64_t nb31 = src3 ? src3->nb[1] : 0;
-                const uint64_t nb32 = src3 ? src3->nb[2] : 0; GGML_UNUSED(nb32);
-                const uint64_t nb33 = src3 ? src3->nb[3] : 0; GGML_UNUSED(nb33);
-
-                float scale;
-                float max_bias;
-                float logit_softcap;
-
-                memcpy(&scale,         ((const int32_t *) dst->op_params) + 0, sizeof(scale));
-                memcpy(&max_bias,      ((const int32_t *) dst->op_params) + 1, sizeof(max_bias));
-                memcpy(&logit_softcap, ((const int32_t *) dst->op_params) + 2, sizeof(logit_softcap));
-
-                if (logit_softcap != 0.0f) {
-                    scale /= logit_softcap;
-                }
-
-                const bool has_mask  = src3 != NULL;
-                const bool has_sinks = src4 != NULL;
-                const bool has_bias  = max_bias != 0.0f;
-                const bool has_scap  = logit_softcap != 0.0f;
-
-                const uint32_t n_head      = src0->ne[2];
-                const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head));
-
-                const float m0 = powf(2.0f, -(max_bias       ) / n_head_log2);
-                const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
-
-                GGML_ASSERT(ne01 < 65536);
-
-                if (!ggml_metal_flash_attn_ext_use_vec(dst)) {
-                    // half8x8 kernel
-                    const int64_t nqptg = 8;  // queries per threadgroup    !! sync with kernel template arguments !!
-                    const int64_t ncpsg = 64; // cache values per simdgroup !! sync with kernel template arguments !!
-
-                    GGML_ASSERT(nqptg <= 32);
-                    GGML_ASSERT(nqptg  % 8  == 0);
-                    GGML_ASSERT(ncpsg  % 32 == 0);
-
-                    const int is_q = ggml_is_quantized(src1->type) ? 1 : 0;
-
-                    // 2*(2*ncpsg)
-                    // ncpsg soft_max values + ncpsg mask values
-                    //
-                    // 16*32*(nsg)
-                    // the shared memory needed for the simdgroups to load the KV cache
-                    // each thread loads (dequantizes) 16 head elements, there are 32 threads in th SG
-                    //
-#define FATTN_SMEM(nsg) (GGML_PAD((nqptg*(ne00 + 2*GGML_PAD(ne20, 64) + 2*(2*ncpsg)) + is_q*(16*32*(nsg)))*(sizeof(float)/2), 16))
-
-                    //int64_t nsgmax = 4;
-                    //
-                    //if (is_q) {
-                    //    nsgmax = 2;
-                    //    while (true) {
-                    //        const size_t smem = FATTN_SMEM(nsgmax);
-                    //        if (smem > device.maxThreadgroupMemoryLength/2) {
-                    //            break;
-                    //        }
-                    //        nsgmax *= 2;
-                    //    }
-                    //    nsgmax /= 2;
-                    //}
-
-                    // simdgroups per threadgroup (a.k.a. warps)
-                    //nsg = ne01 <= nqptg ? MAX(4, MIN(nsgmax, MIN(ne11/ncpsg, (int64_t) pipeline.maxTotalThreadsPerThreadgroup/32))) : 4;
-                    int32_t nsg = 4;
-
-                    const size_t smem = FATTN_SMEM(nsg);
-
-                    ggml_metal_kargs_flash_attn_ext args = {
-                        /*.ne01          =*/ ne01,
-                        /*.ne02          =*/ ne02,
-                        /*.ne03          =*/ ne03,
-                        /*.nb01          =*/ nb01,
-                        /*.nb02          =*/ nb02,
-                        /*.nb03          =*/ nb03,
-                        /*.ne11          =*/ ne11,
-                        /*.ne_12_2       =*/ ne12,
-                        /*.ne_12_3       =*/ ne13,
-                        /*.ns10          =*/ nb11/nb10,
-                        /*.nb11          =*/ nb11,
-                        /*.nb12          =*/ nb12,
-                        /*.nb13          =*/ nb13,
-                        /*.ns20          =*/ nb21/nb20,
-                        /*.nb21          =*/ nb21,
-                        /*.nb22          =*/ nb22,
-                        /*.nb23          =*/ nb23,
-                        /*.ne32          =*/ ne32,
-                        /*.ne33          =*/ ne33,
-                        /*.nb31          =*/ nb31,
-                        /*.nb32          =*/ nb32,
-                        /*.nb33          =*/ nb33,
-                        /*.ne1           =*/ ne1,
-                        /*.ne2           =*/ ne2,
-                        /*.ne3           =*/ ne3,
-                        /*.scale         =*/ scale,
-                        /*.max_bias      =*/ max_bias,
-                        /*.m0            =*/ m0,
-                        /*.m1            =*/ m1,
-                        /*.n_head_log2   =*/ n_head_log2,
-                        /*.logit_softcap =*/ logit_softcap,
-                    };
-
-                    id<MTLComputePipelineState> pipeline = ggml_metal_get_pipeline_flash_attn_ext(backend, node, has_mask, has_sinks, has_bias, has_scap, nsg);
-
-                    [encoder setComputePipelineState:pipeline];
-                    [encoder setBytes:&args length:sizeof(args)     atIndex:0];
-                    [encoder setBuffer:id_src0 offset:offs_src0     atIndex:1];
-                    [encoder setBuffer:id_src1 offset:offs_src1     atIndex:2];
-                    [encoder setBuffer:id_src2 offset:offs_src2     atIndex:3];
-                    if (id_src3) {
-                        [encoder setBuffer:id_src3 offset:offs_src3 atIndex:4];
-                    } else {
-                        [encoder setBuffer:id_src0 offset:offs_src0 atIndex:4];
-                    }
-                    if (id_src4) {
-                        [encoder setBuffer:id_src4 offset:offs_src4 atIndex:5];
-                    } else {
-                        [encoder setBuffer:id_src0 offset:offs_src0 atIndex:5];
-                    }
-
-                    [encoder setBuffer:id_dst offset:offs_dst atIndex:6];
-
-                    //printf("smem: %zu, max: %zu, nsg = %d, ne02 = %d, ne12 = %d\n", smem, device.maxThreadgroupMemoryLength, (int) nsg, ne02, ne12);
-                    GGML_ASSERT(smem <= device.maxThreadgroupMemoryLength);
-                    [encoder setThreadgroupMemoryLength:smem atIndex:0];
-                    [encoder dispatchThreadgroups:MTLSizeMake((ne01 + nqptg - 1)/nqptg, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(32, nsg, 1)];
-#undef FATTN_SMEM
-                } else {
-                    // half4x4 kernel
-                    const int64_t nqptg = 1;  // queries per threadgroup    !! sync with kernel template arguments !!
-                    const int64_t ncpsg = 32; // cache values per simdgroup !! sync with kernel template arguments !!
-                    const int64_t nkpsg = 1*ncpsg;
-
-                    GGML_ASSERT(nqptg <= 32);
-                    GGML_ASSERT(nqptg  % 1  == 0);
-                    GGML_ASSERT(ncpsg  % 32 == 0);
-
-                    // ne00 + 2*ncpsg*(nsg)
-                    // for each query, we load it as f16 in shared memory (ne00)
-                    // and store the soft_max values and the mask
-                    //
-                    // ne20*(nsg)
-                    // each simdgroup has a full f32 head vector in shared mem to accumulate results
-                    //
-#define FATTN_SMEM(nsg) (GGML_PAD((nqptg*(GGML_PAD(ne00, 128) + 4*ncpsg*(nsg)) + 2*GGML_PAD(ne20, 128)*(nsg))*(sizeof(float)/2), 16))
-
-                    int64_t nsgmax = 2;
-                    while (true) {
-                        const size_t smem = FATTN_SMEM(nsgmax);
-                        // avoid using more than half of the threadgroup memory - can cause slow downs especially for large head sizes
-                        if (smem > device.maxThreadgroupMemoryLength/2) {
-                            break;
-                        }
-                        nsgmax *= 2;
-                    }
-                    nsgmax /= 2;
-
-                    // simdgroups per threadgroup (a.k.a. warps)
-                    //const int64_t nsgt = MAX(2, MIN(nsgmax, MIN((ne11 + nkpsg - 1)/(nkpsg), (int64_t) pipeline.maxTotalThreadsPerThreadgroup/32)));
-                    const int64_t nsgt = MAX(2, MIN(nsgmax, MIN((ne11 + nkpsg - 1)/(nkpsg), (int64_t) 1024/32)));
-
-                    int64_t nsg = 1;
-                    while (nsg <= nsgt) {
-                        nsg *= 2;
-                    }
-                    nsg /= 2;
-
-                    // workgroups
-                    // each workgroup handles nsg*nkpsg cache values
-                    int32_t nwg = 1;
-                    if (false) {
-                        // for small KV caches, we could launch a single workgroup and write the results directly to dst/
-                        // however, this does not lead to significant improvement, so disabled
-                        nwg = 1;
-                        nsg = 4;
-                    } else {
-                        nwg = 32;
-                        nsg = 1;
-                        while (2*nwg*nsg*nkpsg < ne11 && nsg < 4) {
-                            nsg *= 2;
-                        }
-                    }
-
-                    ggml_metal_kargs_flash_attn_ext_vec args = {
-                        /*.ne01          =*/ ne01,
-                        /*.ne02          =*/ ne02,
-                        /*.ne03          =*/ ne03,
-                        /*.nb01          =*/ nb01,
-                        /*.nb02          =*/ nb02,
-                        /*.nb03          =*/ nb03,
-                        /*.ne11          =*/ ne11,
-                        /*.ne_12_2       =*/ ne12,
-                        /*.ne_12_3       =*/ ne13,
-                        /*.ns10          =*/ nb11/nb10,
-                        /*.nb11          =*/ nb11,
-                        /*.nb12          =*/ nb12,
-                        /*.nb13          =*/ nb13,
-                        /*.ns20          =*/ nb21/nb20,
-                        /*.nb21          =*/ nb21,
-                        /*.nb22          =*/ nb22,
-                        /*.nb23          =*/ nb23,
-                        /*.ne32          =*/ ne32,
-                        /*.ne33          =*/ ne33,
-                        /*.nb31          =*/ nb31,
-                        /*.nb32          =*/ nb32,
-                        /*.nb33          =*/ nb33,
-                        /*.ne1           =*/ ne1,
-                        /*.ne2           =*/ ne2,
-                        /*.ne3           =*/ ne3,
-                        /*.scale         =*/ scale,
-                        /*.max_bias      =*/ max_bias,
-                        /*.m0            =*/ m0,
-                        /*.m1            =*/ m1,
-                        /*.n_head_log2   =*/ n_head_log2,
-                        /*.logit_softcap =*/ logit_softcap,
-                    };
-
-                    id<MTLComputePipelineState> pipeline = ggml_metal_get_pipeline_flash_attn_ext_vec(backend, node, has_mask, has_sinks, has_bias, has_scap, nsg, nwg);
-
-                    GGML_ASSERT(nsg*32 <= (int) pipeline.maxTotalThreadsPerThreadgroup);
-
-                    [encoder setComputePipelineState:pipeline];
-                    [encoder setBytes:&args length:sizeof(args)     atIndex:0];
-                    [encoder setBuffer:id_src0 offset:offs_src0     atIndex:1];
-                    [encoder setBuffer:id_src1 offset:offs_src1     atIndex:2];
-                    [encoder setBuffer:id_src2 offset:offs_src2     atIndex:3];
-                    if (id_src3) {
-                        [encoder setBuffer:id_src3 offset:offs_src3 atIndex:4];
-                    } else {
-                        [encoder setBuffer:id_src0 offset:offs_src0 atIndex:4];
-                    }
-                    if (id_src4) {
-                        [encoder setBuffer:id_src4 offset:offs_src4 atIndex:5];
-                    } else {
-                        [encoder setBuffer:id_src0 offset:offs_src0 atIndex:5];
-                    }
-
-                    const size_t smem = FATTN_SMEM(nsg);
-
-                    //printf("smem: %zu, max: %zu, nsg = %d, nsgmax = %d\n", smem, device.maxThreadgroupMemoryLength, (int) nsg, (int) nsgmax);
-                    GGML_ASSERT(smem <= device.maxThreadgroupMemoryLength);
-
-                    if (nwg == 1) {
-                        // using 1 workgroup -> write the result directly into dst
-                        [encoder setBuffer:id_dst offset:offs_dst atIndex:6];
-
-                        [encoder setThreadgroupMemoryLength:smem atIndex:0];
-                        [encoder dispatchThreadgroups:MTLSizeMake((ne01 + nqptg - 1)/nqptg, ne02, ne03*nwg) threadsPerThreadgroup:MTLSizeMake(32, nsg, 1)];
-                    } else {
-                        // sanity checks
-                        GGML_ASSERT(ne01*ne02*ne03 == ne1*ne2*ne3);
-                        GGML_ASSERT(ne1*ne2*ne3 <= (1u << 31));
-
-                        // write the results from each workgroup into a temp buffer
-                        const size_t offs_tmp = offs_dst + ggml_nbytes(dst);
-                        [encoder setBuffer:id_dst offset:offs_tmp atIndex:6];
-
-                        [encoder setThreadgroupMemoryLength:smem atIndex:0];
-                        [encoder dispatchThreadgroups:MTLSizeMake((ne01 + nqptg - 1)/nqptg, ne02, ne03*nwg) threadsPerThreadgroup:MTLSizeMake(32, nsg, 1)];
-
-                        // sync the 2 kernels
-                        ggml_metal_encode_concurrency_reset(ctx_enc);
-
-                        // reduce the results from the workgroups
-                        {
-                            const int32_t nrows = ne1*ne2*ne3;
-
-                            ggml_metal_kargs_flash_attn_ext_vec_reduce args0 = {
-                                nrows,
-                            };
-
-                            id<MTLComputePipelineState> pipeline0 = ggml_metal_get_pipeline_flash_attn_ext_vec_reduce(backend, node, ne20, nwg);
-
-                            [encoder setComputePipelineState:pipeline0];
-                            [encoder setBytes:&args0   length:sizeof(args0) atIndex:0];
-                            [encoder setBuffer:id_dst  offset:offs_tmp      atIndex:1];
-                            [encoder setBuffer:id_dst  offset:offs_dst      atIndex:2];
-
-                            //printf("ne1 = %d, ne2 = %d, ne3 = %d, ne20 = %d\n", ne1, ne2, ne3, ne20);
-                            [encoder dispatchThreadgroups:MTLSizeMake(nrows, 1, 1) threadsPerThreadgroup:MTLSizeMake(32*nwg, 1, 1)];
-                        }
-                    }
-#undef FATTN_SMEM
-                }
-            } break;
-        case GGML_OP_DUP:
-        case GGML_OP_CPY:
-        case GGML_OP_CONT:
-            {
-                id<MTLComputePipelineState> pipeline = nil;
-
-                switch (src0t) {
-                    case GGML_TYPE_F32:
-                        {
-                            GGML_ASSERT(ne0 % ggml_blck_size(dst->type) == 0);
-
-                            switch (dstt) {
-                                case GGML_TYPE_F32:    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_F32].pipeline; break;
-                                case GGML_TYPE_I32:    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_I32].pipeline; break;
-                                case GGML_TYPE_F16:    pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_F16].pipeline; break;
-                                case GGML_TYPE_BF16:   pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_BF16].pipeline; break;
-                                case GGML_TYPE_Q8_0:   pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_Q8_0].pipeline; break;
-                                case GGML_TYPE_Q4_0:   pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_Q4_0].pipeline; break;
-                                case GGML_TYPE_Q4_1:   pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_Q4_1].pipeline; break;
-                                case GGML_TYPE_Q5_0:   pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_Q5_0].pipeline; break;
-                                case GGML_TYPE_Q5_1:   pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_Q5_1].pipeline; break;
-                                case GGML_TYPE_IQ4_NL: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_IQ4_NL].pipeline; break;
-                                default: GGML_ABORT("not implemented");
-                            };
-                        } break;
-                    case GGML_TYPE_I32:
-                        {
-                            switch (dstt) {
-                                case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_I32_F32].pipeline; break;
-                                default: GGML_ABORT("not implemented");
-                            };
-                        } break;
-                    case GGML_TYPE_F16:
-                        {
-                            switch (dstt) {
-                                case GGML_TYPE_F32:  pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F16_F32].pipeline; break;
-                                case GGML_TYPE_F16:  pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F16_F16].pipeline; break;
-                                default: GGML_ABORT("not implemented");
-                            };
-                        } break;
-                    case GGML_TYPE_BF16:
-                        {
-                            switch (dstt) {
-                                case GGML_TYPE_F32:  pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_BF16_F32].pipeline; break;
-                                case GGML_TYPE_BF16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_BF16_BF16].pipeline; break;
-                                default: GGML_ABORT("not implemented");
-                            };
-                        } break;
-                    case GGML_TYPE_Q4_0:
-                        {
-                            switch (dstt) {
-                                case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_Q4_0_F32].pipeline; break;
-                                case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_Q4_0_F16].pipeline; break;
-                                default: GGML_ABORT("not implemented");
-                            };
-                        } break;
-                    case GGML_TYPE_Q4_1:
-                        {
-                            switch (dstt) {
-                                case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_Q4_1_F32].pipeline; break;
-                                case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_Q4_1_F16].pipeline; break;
-                                default: GGML_ABORT("not implemented");
-                            };
-                        } break;
-                    case GGML_TYPE_Q5_0:
-                        {
-                            switch (dstt) {
-                                case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_Q5_0_F32].pipeline; break;
-                                case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_Q5_0_F16].pipeline; break;
-                                default: GGML_ABORT("not implemented");
-                            };
-                        } break;
-                    case GGML_TYPE_Q5_1:
-                        {
-                            switch (dstt) {
-                                case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_Q5_1_F32].pipeline; break;
-                                case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_Q5_1_F16].pipeline; break;
-                                default: GGML_ABORT("not implemented");
-                            };
-                        } break;
-                    case GGML_TYPE_Q8_0:
-                        {
-                            switch (dstt) {
-                                case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_Q8_0_F32].pipeline; break;
-                                case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_Q8_0_F16].pipeline; break;
-                                default: GGML_ABORT("not implemented");
-                            };
-                        } break;
-                    default: GGML_ABORT("not implemented");
-                }
-
-                GGML_ASSERT(ne00 % ggml_blck_size(src0->type) == 0);
-
-                // TODO: support
-                //const int32_t nk00 = ne00/ggml_blck_size(dst->type);
-                const int32_t nk00 = ne00;
-
-                int nth = 32; // SIMD width
-
-                while (nth < nk00 && nth < (int) pipeline.maxTotalThreadsPerThreadgroup) {
-                    nth *= 2;
-                }
-
-                nth = MIN(nth, (int) pipeline.maxTotalThreadsPerThreadgroup);
-
-                // when rows are small, we can batch them together in a single threadgroup
-                int nrptg = 1;
-
-                // TODO: relax this constraint in the future
-                if (ggml_blck_size(src0->type) == 1 && ggml_blck_size(dst->type) == 1) {
-                    if (nth > nk00) {
-                        nrptg = (nth + nk00 - 1)/nk00;
-                        nth   = nk00;
-
-                        if (nrptg*nth > (int) pipeline.maxTotalThreadsPerThreadgroup) {
-                            nrptg--;
-                        }
-                    }
-                }
-
-                nth = MIN(nth, nk00);
-
-                ggml_metal_kargs_cpy args = {
-                    /*.ne00 =*/ nk00,
-                    /*.ne01 =*/ ne01,
-                    /*.ne02 =*/ ne02,
-                    /*.ne03 =*/ ne03,
-                    /*.nb00 =*/ nb00,
-                    /*.nb01 =*/ nb01,
-                    /*.nb02 =*/ nb02,
-                    /*.nb03 =*/ nb03,
-                    /*.ne0  =*/ ne0,
-                    /*.ne1  =*/ ne1,
-                    /*.ne2  =*/ ne2,
-                    /*.ne3  =*/ ne3,
-                    /*.nb0  =*/ nb0,
-                    /*.nb1  =*/ nb1,
-                    /*.nb2  =*/ nb2,
-                    /*.nb3  =*/ nb3,
-                };
-
-                [encoder setComputePipelineState:pipeline];
-                [encoder setBytes:&args length:sizeof(args) atIndex:0];
-                [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1];
-                [encoder setBuffer:id_dst  offset:offs_dst  atIndex:2];
-
-                [encoder dispatchThreadgroups:MTLSizeMake((ne01 + nrptg - 1)/nrptg, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, nrptg, 1)];
-            } break;
-        case GGML_OP_POOL_2D:
-            {
-                GGML_ASSERT(ggml_is_contiguous(src0));
-                GGML_ASSERT(src0t == GGML_TYPE_F32 && src0t == dstt);
-
-                const int32_t * opts = dst->op_params;
-                enum ggml_op_pool op = opts[0];
-
-                id<MTLComputePipelineState> pipeline = nil;
-                switch (src0t) {
-                    case GGML_TYPE_F32: {
-                        switch(op) {
-                            case GGML_OP_POOL_AVG:
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_POOL_2D_AVG_F32].pipeline; break;
-                            case GGML_OP_POOL_MAX:
-                                pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_POOL_2D_MAX_F32].pipeline; break;
-                            default: GGML_ASSERT(false && "not implemented");
-                        }
-                    } break;
-                    default: GGML_ASSERT(false && "not implemented");
-                }
-
-                const int32_t k0 = opts[1];
-                const int32_t k1 = opts[2];
-                const int32_t s0 = opts[3];
-                const int32_t s1 = opts[4];
-                const int32_t p0 = opts[5];
-                const int32_t p1 = opts[6];
-
-                const int64_t IH = src0->ne[1];
-                const int64_t IW = src0->ne[0];
-
-                const int64_t N  = dst->ne[3];
-                const int64_t OC = dst->ne[2];
-                const int64_t OH = dst->ne[1];
-                const int64_t OW = dst->ne[0];
-
-                const int64_t parallel_elements = N * OC * OH * OW;
-                const int64_t n_threads = MIN((int64_t)[pipeline maxTotalThreadsPerThreadgroup], parallel_elements);
-                const int64_t n_tg = (parallel_elements + n_threads - 1) / n_threads;
-
-                ggml_metal_kargs_pool_2d args_pool_2d = {
-                    /* .k0 = */ k0,
-                    /* .k1 = */ k1,
-                    /* .s0 = */ s0,
-                    /* .s1 = */ s1,
-                    /* .p0 = */ p0,
-                    /* .p1 = */ p1,
-                    /* .IH = */ IH,
-                    /* .IW = */ IW,
-                    /* .OH = */ OH,
-                    /* .OW = */ OW,
-                    /* .parallel_elements = */ parallel_elements
-                };
-
-                [encoder setComputePipelineState:pipeline];
-                [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
-                [encoder setBuffer:id_dst  offset:offs_dst  atIndex:1];
-                [encoder setBytes:&args_pool_2d length:sizeof(args_pool_2d) atIndex:2];
-
-                [encoder dispatchThreadgroups:MTLSizeMake(n_tg, 1, 1) threadsPerThreadgroup:MTLSizeMake(n_threads, 1, 1)];
-            } break;
-        case GGML_OP_ARGMAX:
-            {
-                GGML_ASSERT(src0->type == GGML_TYPE_F32);
-                GGML_ASSERT(ggml_is_contiguous_1(src0));
-                GGML_ASSERT(nb00 == ggml_type_size(src0->type));
-
-                const int64_t nrows = ggml_nrows(src0);
-
-                int nth = 32; // SIMD width
-                while (nth < ne00 && nth*ne01*ne02*ne03 < 256) {
-                    nth *= 2;
-                }
-
-                id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ARGMAX].pipeline;
-
-                [encoder setComputePipelineState:pipeline];
-                [encoder setBuffer:id_src0 offset:offs_src0        atIndex:0];
-                [encoder setBuffer:id_dst  offset:offs_dst         atIndex:1];
-                [encoder setBytes:&ne00    length:sizeof( int64_t) atIndex:2];
-                [encoder setBytes:&nb01    length:sizeof(uint64_t) atIndex:3];
-                [encoder setThreadgroupMemoryLength:32*sizeof(float)   atIndex:0];
-                [encoder setThreadgroupMemoryLength:32*sizeof(int32_t) atIndex:1];
-
-                [encoder dispatchThreadgroups:MTLSizeMake(nrows, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
-            } break;
-       default:
-            {
-                GGML_LOG_ERROR("%s: error: node %3d, op = %8s not implemented\n", __func__, idx, ggml_op_name(dst->op));
-                GGML_ABORT("fatal error");
-            }
-    }
-
-    if (ctx_dev->debug_graph > 0) {
-        if (n_fuse > 1) {
-            GGML_LOG_DEBUG("%s:               fuse %d ops\n", __func__, n_fuse);
-        }
-    }
-
-    // update the mem ranges in the encoding context
-    for (int i = 0; i < n_fuse; ++i) {
-        if (!ggml_metal_encode_concurrency_add(ctx_enc, nodes[i])) {
-            ggml_metal_encode_concurrency_reset(ctx_enc);
-        }
-    }
-
-    return n_fuse;
-}
-
-static enum ggml_status ggml_metal_graph_compute(
-            ggml_backend_t   backend,
-        struct ggml_cgraph * gf) {
-    struct ggml_backend_metal_context        * ctx     = backend->context;
-    struct ggml_backend_metal_device_context * ctx_dev = backend->device->context;
-
-    // number of nodes encoded by the main thread (empirically determined)
-    const int n_main = 64;
-
-    // number of threads in addition to the main thread
-    const int n_cb = ctx->n_cb;
-
-    // submit the ggml compute graph to the GPU by creating command buffers and encoding the ops in them
-    // the first n_nodes_0 are encoded and submitted for processing directly by the calling thread
-    // while these nodes are processing, we start n_cb threads to enqueue the rest of the nodes
-    // each thread creates it's own command buffer and enqueues the ops in parallel
-    //
-    // tests on M1 Pro and M2 Ultra using LLaMA models, show that optimal values for n_cb are 1 or 2
-
-    @autoreleasepool {
-        ctx->gf = gf;
-
-        ctx->n_nodes_0 = MIN(n_main, gf->n_nodes);
-        ctx->n_nodes_1 = gf->n_nodes - ctx->n_nodes_0;
-
-        ctx->n_nodes_per_cb = (ctx->n_nodes_1 + ctx->n_cb - 1) / ctx->n_cb;
-
-        const bool should_capture = ctx->capture_next_compute;
-        if (should_capture) {
-            ctx->capture_next_compute = false;
-
-            // make sure all previous computations have finished before starting the capture
-            if (ctx->cmd_buf_last) {
-                [ctx->cmd_buf_last waitUntilCompleted];
-                ctx->cmd_buf_last = nil;
-            }
-
-            if (!ctx->capture_started) {
-                // create capture scope
-                ctx->capture_scope = [[MTLCaptureManager sharedCaptureManager] newCaptureScopeWithDevice:ctx_dev->mtl_device];
-
-                MTLCaptureDescriptor * descriptor = [MTLCaptureDescriptor new];
-                descriptor.captureObject = ctx->capture_scope;
-                descriptor.destination = MTLCaptureDestinationGPUTraceDocument;
-                descriptor.outputURL = [NSURL fileURLWithPath:[NSString stringWithFormat:@"/tmp/perf-metal.gputrace"]];
-
-                NSError * error = nil;
-                if (![[MTLCaptureManager sharedCaptureManager] startCaptureWithDescriptor:descriptor error:&error]) {
-                    GGML_LOG_ERROR("%s: error: unable to start capture '%s'\n", __func__, [[error localizedDescription] UTF8String]);
-                } else {
-                    [ctx->capture_scope beginScope];
-                    ctx->capture_started = true;
-                }
-            }
-        }
-
-        // the main thread commits the first few commands immediately
-        // cmd_buf[n_cb]
-        {
-            id<MTLCommandBuffer> cmd_buf = [ctx->queue commandBufferWithUnretainedReferences];
-            [cmd_buf retain];
-
-            if (ctx->cmd_bufs[n_cb].obj) {
-                [ctx->cmd_bufs[n_cb].obj release];
-            }
-            ctx->cmd_bufs[n_cb].obj = cmd_buf;
-
-            [cmd_buf enqueue];
-
-            ctx->encode_async(n_cb);
-        }
-
-        // remember the command buffer for the next iteration
-        ctx->cmd_buf_last = ctx->cmd_bufs[n_cb].obj;
-
-        // prepare the rest of the command buffers asynchronously (optional)
-        // cmd_buf[0.. n_cb)
-        for (int cb_idx = 0; cb_idx < n_cb; ++cb_idx) {
-            id<MTLCommandBuffer> cmd_buf = [ctx->queue commandBufferWithUnretainedReferences];
-            [cmd_buf retain];
-
-            if (ctx->cmd_bufs[cb_idx].obj) {
-                [ctx->cmd_bufs[cb_idx].obj release];
-            }
-            ctx->cmd_bufs[cb_idx].obj = cmd_buf;
-
-            // always enqueue the first two command buffers
-            // enqueue all of the command buffers if we don't need to abort
-            if (cb_idx < 2 || ctx->abort_callback == NULL) {
-                [cmd_buf enqueue];
-
-                // update the pointer to the last queued command buffer
-                // this is needed to implement synchronize()
-                ctx->cmd_buf_last = cmd_buf;
-            }
-        }
-
-        dispatch_apply(n_cb, ctx->d_queue, ctx->encode_async);
-
-        // for debugging: block until graph is computed
-        //[ctx->cmd_buf_last waitUntilCompleted];
-
-        // enter here only when capturing in order to wait for all computation to finish
-        // otherwise, we leave the graph to compute asynchronously
-        if (!should_capture && ctx->capture_started) {
-            // wait for completion and check status of each command buffer
-            // needed to detect if the device ran out-of-memory for example (#1881)
-            {
-                id<MTLCommandBuffer> cmd_buf = ctx->cmd_bufs[n_cb].obj;
-                [cmd_buf waitUntilCompleted];
-
-                MTLCommandBufferStatus status = [cmd_buf status];
-                if (status != MTLCommandBufferStatusCompleted) {
-                    GGML_LOG_INFO("%s: command buffer %d failed with status %lu\n", __func__, n_cb, status);
-                    if (status == MTLCommandBufferStatusError) {
-                        GGML_LOG_INFO("error: %s\n", [[cmd_buf error].localizedDescription UTF8String]);
-                    }
-
-                    return GGML_STATUS_FAILED;
-                }
-            }
-
-            for (int i = 0; i < n_cb; ++i) {
-                id<MTLCommandBuffer> cmd_buf = ctx->cmd_bufs[i].obj;
-                [cmd_buf waitUntilCompleted];
-
-                MTLCommandBufferStatus status = [cmd_buf status];
-                if (status != MTLCommandBufferStatusCompleted) {
-                    GGML_LOG_INFO("%s: command buffer %d failed with status %lu\n", __func__, i, status);
-                    if (status == MTLCommandBufferStatusError) {
-                        GGML_LOG_INFO("error: %s\n", [[cmd_buf error].localizedDescription UTF8String]);
-                    }
-
-                    return GGML_STATUS_FAILED;
-                }
-
-                id<MTLCommandBuffer> next_buffer = (i + 1 < n_cb ? ctx->cmd_bufs[i + 1].obj : nil);
-                if (!next_buffer) {
-                    continue;
-                }
-
-                const bool next_queued = ([next_buffer status] != MTLCommandBufferStatusNotEnqueued);
-                if (next_queued) {
-                    continue;
-                }
-
-                if (ctx->abort_callback && ctx->abort_callback(ctx->abort_callback_data)) {
-                    GGML_LOG_INFO("%s: command buffer %d aborted", __func__, i);
-                    return GGML_STATUS_ABORTED;
-                }
-
-                [next_buffer commit];
-            }
-
-            [ctx->capture_scope endScope];
-            [[MTLCaptureManager sharedCaptureManager] stopCapture];
-        }
-    }
-
-    return GGML_STATUS_SUCCESS;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// backend interface
-////////////////////////////////////////////////////////////////////////////////
-
-// shared buffer
-
-static void ggml_backend_metal_buffer_shared_free_buffer(ggml_backend_buffer_t buffer) {
-    struct ggml_backend_metal_buffer_context * ctx = (struct ggml_backend_metal_buffer_context *)buffer->context;
-
-    for (int i = 0; i < ctx->n_buffers; i++) {
-        [ctx->buffers[i].metal release];
-    }
-
-    ggml_backend_metal_buffer_rset_free(ctx);
-
-    GGML_ASSERT(ctx->is_shared);
-
-    {
-#if TARGET_OS_OSX
-        vm_deallocate((vm_map_t)mach_task_self(), (vm_address_t)ctx->all_data, ctx->all_size);
-#else
-        free(ctx->all_data);
-#endif
-    }
-
-    free(ctx);
-}
-
-static void * ggml_backend_metal_buffer_shared_get_base(ggml_backend_buffer_t buffer) {
-    struct ggml_backend_metal_buffer_context * ctx = (struct ggml_backend_metal_buffer_context *)buffer->context;
-
-    return ctx->all_data;
-}
-
-static void ggml_backend_metal_buffer_shared_memset_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) {
-    struct ggml_backend_metal_buffer_context * ctx = (struct ggml_backend_metal_buffer_context *)buffer->context;
-
-    GGML_ASSERT(ctx->is_shared);
-
-    memset((char *)tensor->data + offset, value, size);
-}
-
-static void ggml_backend_metal_buffer_shared_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
-    struct ggml_backend_metal_buffer_context * ctx = (struct ggml_backend_metal_buffer_context *)buffer->context;
-
-    GGML_ASSERT(ctx->is_shared);
-
-    memcpy((char *)tensor->data + offset, data, size);
-}
-
-static void ggml_backend_metal_buffer_shared_get_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
-    struct ggml_backend_metal_buffer_context * ctx = (struct ggml_backend_metal_buffer_context *)buffer->context;
-
-    GGML_ASSERT(ctx->is_shared);
-
-    memcpy(data, (const char *)tensor->data + offset, size);
-}
-
-static bool ggml_backend_metal_buffer_shared_cpy_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) {
-    GGML_UNUSED(buffer);
-    GGML_UNUSED(src);
-    GGML_UNUSED(dst);
-
-    return false;
-}
-
-static void ggml_backend_metal_buffer_shared_clear(ggml_backend_buffer_t buffer, uint8_t value) {
-    struct ggml_backend_metal_buffer_context * ctx = (struct ggml_backend_metal_buffer_context *)buffer->context;
-
-    GGML_ASSERT(ctx->is_shared);
-
-    memset(ctx->all_data, value, ctx->all_size);
-}
-
-static struct ggml_backend_buffer_i ggml_backend_metal_buffer_shared_i = {
-    /* .free_buffer     = */ ggml_backend_metal_buffer_shared_free_buffer,
-    /* .get_base        = */ ggml_backend_metal_buffer_shared_get_base,
-    /* .init_tensor     = */ NULL,
-    /* .memset_tensor   = */ ggml_backend_metal_buffer_shared_memset_tensor,
-    /* .set_tensor      = */ ggml_backend_metal_buffer_shared_set_tensor,
-    /* .get_tensor      = */ ggml_backend_metal_buffer_shared_get_tensor,
-    /* .cpy_tensor      = */ ggml_backend_metal_buffer_shared_cpy_tensor,
-    /* .clear           = */ ggml_backend_metal_buffer_shared_clear,
-    /* .reset           = */ NULL,
-};
-
-// private buffer
-
-static void ggml_backend_metal_buffer_private_free_buffer(ggml_backend_buffer_t buffer) {
-    struct ggml_backend_metal_buffer_context * ctx = (struct ggml_backend_metal_buffer_context *)buffer->context;
-
-    for (int i = 0; i < ctx->n_buffers; i++) {
-        [ctx->buffers[i].metal release];
-    }
-
-    ggml_backend_metal_buffer_rset_free(ctx);
-
-    GGML_ASSERT(!ctx->is_shared);
-
-    free(ctx);
-}
-
-static void * ggml_backend_metal_buffer_private_get_base(ggml_backend_buffer_t buffer) {
-    struct ggml_backend_metal_buffer_context * ctx = (struct ggml_backend_metal_buffer_context *)buffer->context;
-
-    return ctx->all_data;
-}
-
-static void ggml_backend_metal_buffer_private_memset_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) {
-    struct ggml_backend_metal_buffer_context * ctx = (struct ggml_backend_metal_buffer_context *)buffer->context;
-
-    GGML_ASSERT(!ctx->is_shared);
-
-    @autoreleasepool {
-        // dst
-        size_t buf_dst_offset = 0;
-        id<MTLBuffer> buf_dst = ggml_metal_get_buffer(tensor, &buf_dst_offset);
-
-        buf_dst_offset += offset;
-
-        id<MTLCommandQueue>  queue   = ctx->queue;
-        id<MTLCommandBuffer> cmd_buf = [queue commandBufferWithUnretainedReferences];
-
-        {
-            id<MTLBlitCommandEncoder> encoder = [cmd_buf blitCommandEncoder];
-
-            [encoder fillBuffer:buf_dst
-                          range:NSMakeRange(buf_dst_offset, buf_dst_offset + size)
-                          value:value];
-
-            [encoder endEncoding];
-        }
-
-        [cmd_buf commit];
-        [cmd_buf waitUntilCompleted];
-    }
-}
-
-static void ggml_backend_metal_buffer_private_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
-    struct ggml_backend_metal_buffer_context * ctx = (struct ggml_backend_metal_buffer_context *)buffer->context;
-
-    GGML_ASSERT(!ctx->is_shared);
-
-    @autoreleasepool {
-        // src
-        void * data_ptr = (void *)(uintptr_t) data; // "const cast" the src data
-        id<MTLBuffer> buf_src = [ctx->device newBufferWithBytesNoCopy:data_ptr
-                                                               length:size
-                                                              options:MTLResourceStorageModeShared
-                                                          deallocator:nil];
-
-        // dst
-        size_t buf_dst_offset = 0;
-        id<MTLBuffer> buf_dst = ggml_metal_get_buffer(tensor, &buf_dst_offset);
-
-        buf_dst_offset += offset;
-
-        // note: for experimentation purposes, here we use a semaphore to wait for the copy to complete
-        //       this is alternative to waitUntilCompleted, which should be faster, but don't seem to make much difference
-        dispatch_semaphore_t completion_semaphore = dispatch_semaphore_create(0);
-
-        id<MTLCommandQueue>  queue   = ctx->queue;
-        id<MTLCommandBuffer> cmd_buf = [queue commandBufferWithUnretainedReferences];
-
-        {
-            id<MTLBlitCommandEncoder> encoder = [cmd_buf blitCommandEncoder];
-
-            [encoder copyFromBuffer:buf_src
-                       sourceOffset:0
-                           toBuffer:buf_dst
-                  destinationOffset:buf_dst_offset
-                               size:size];
-
-            [encoder endEncoding];
-        }
-
-        [cmd_buf addCompletedHandler:^(id<MTLCommandBuffer> cb) {
-                             // TODO: can check for errors here
-            GGML_UNUSED(cb);
-
-            dispatch_semaphore_signal(completion_semaphore);
-        }];
-
-        [cmd_buf commit];
-
-        dispatch_semaphore_wait(completion_semaphore, DISPATCH_TIME_FOREVER);
-        //[cmd_buf waitUntilCompleted];
-    }
-}
-
-static void ggml_backend_metal_buffer_private_get_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
-    struct ggml_backend_metal_buffer_context * ctx = (struct ggml_backend_metal_buffer_context *)buffer->context;
-
-    GGML_ASSERT(!ctx->is_shared);
-
-    @autoreleasepool {
-        // src
-        size_t buf_src_offset = 0;
-        id<MTLBuffer> buf_src = ggml_metal_get_buffer(tensor, &buf_src_offset);
-
-        buf_src_offset += offset;
-
-        // dst
-        id<MTLBuffer> buf_dst = [ctx->device newBufferWithBytesNoCopy:data
-                                                               length:size
-                                                              options:MTLResourceStorageModeShared
-                                                          deallocator:nil];
-
-        id<MTLCommandQueue>  queue   = ctx->queue;
-        id<MTLCommandBuffer> cmd_buf = [queue commandBufferWithUnretainedReferences];
-
-        {
-            id<MTLBlitCommandEncoder> encoder = [cmd_buf blitCommandEncoder];
-
-            [encoder copyFromBuffer:buf_src
-                       sourceOffset:buf_src_offset
-                           toBuffer:buf_dst
-                  destinationOffset:0
-                               size:size];
-
-            [encoder endEncoding];
-        }
-
-        [cmd_buf commit];
-        [cmd_buf waitUntilCompleted];
-    }
-}
-
-static bool ggml_backend_metal_buffer_private_cpy_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) {
-    GGML_UNUSED(buffer);
-    GGML_UNUSED(src);
-    GGML_UNUSED(dst);
-
-    return false;
-}
-
-static void ggml_backend_metal_buffer_private_clear(ggml_backend_buffer_t buffer, uint8_t value) {
-    struct ggml_backend_metal_buffer_context * ctx = (struct ggml_backend_metal_buffer_context *)buffer->context;
-
-    GGML_ASSERT(!ctx->is_shared);
-
-    @autoreleasepool {
-        id<MTLCommandQueue>  queue   = ctx->queue;
-        id<MTLCommandBuffer> cmd_buf = [queue commandBufferWithUnretainedReferences];
-
-        {
-            id<MTLBlitCommandEncoder> encoder = [cmd_buf blitCommandEncoder];
-
-            [encoder fillBuffer:ctx->buffers[0].metal
-                          range:NSMakeRange(0, ctx->buffers[0].size)
-                          value:value];
-
-            [encoder endEncoding];
-        }
-
-        [cmd_buf commit];
-        [cmd_buf waitUntilCompleted];
-    }
-}
-
-static struct ggml_backend_buffer_i ggml_backend_metal_buffer_private_i = {
-    /* .free_buffer     = */ ggml_backend_metal_buffer_private_free_buffer,
-    /* .get_base        = */ ggml_backend_metal_buffer_private_get_base,
-    /* .init_tensor     = */ NULL,
-    /* .memset_tensor   = */ ggml_backend_metal_buffer_private_memset_tensor,
-    /* .set_tensor      = */ ggml_backend_metal_buffer_private_set_tensor,
-    /* .get_tensor      = */ ggml_backend_metal_buffer_private_get_tensor,
-    /* .cpy_tensor      = */ ggml_backend_metal_buffer_private_cpy_tensor,
-    /* .clear           = */ ggml_backend_metal_buffer_private_clear,
-    /* .reset           = */ NULL,
-};
-
-//
-// buffer types
-//
-
-static void ggml_backend_metal_log_allocated_size(id<MTLDevice> device, size_t size_aligned) {
-#ifndef GGML_METAL_NDEBUG
-#if TARGET_OS_OSX || (TARGET_OS_IOS && __clang_major__ >= 15)
-    if (@available(macOS 10.12, iOS 16.0, *)) {
-        GGML_LOG_DEBUG("%s: allocated buffer, size = %8.2f MiB, (%8.2f / %8.2f)\n",
-                __func__,
-                size_aligned / 1024.0 / 1024.0,
-                device.currentAllocatedSize / 1024.0 / 1024.0,
-                device.recommendedMaxWorkingSetSize / 1024.0 / 1024.0);
-
-        if (device.currentAllocatedSize > device.recommendedMaxWorkingSetSize) {
-            GGML_LOG_WARN("%s: warning: current allocated size is greater than the recommended max working set size\n", __func__);
-        }
-    } else {
-        GGML_LOG_INFO("%s: allocated buffer, size = %8.2f MiB, (%8.2f)\n",
-                __func__,
-                size_aligned / 1024.0 / 1024.0,
-                device.currentAllocatedSize / 1024.0 / 1024.0);
-    }
-#endif
-#endif
-    GGML_UNUSED(device);
-    GGML_UNUSED(size_aligned);
-}
-
-// common method for allocating shread or private Metal buffers
-static ggml_backend_buffer_t ggml_backend_metal_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size, bool shared) {
-    struct ggml_backend_metal_buffer_context * ctx = calloc(1, sizeof(struct ggml_backend_metal_buffer_context));
-
-    const size_t size_page = sysconf(_SC_PAGESIZE);
-
-    size_t size_aligned = size;
-    if ((size_aligned % size_page) != 0) {
-        size_aligned += (size_page - (size_aligned % size_page));
-    }
-
-    struct ggml_backend_metal_device_context * ctx_dev = (struct ggml_backend_metal_device_context *)buft->device->context;
-
-    GGML_ASSERT(ctx_dev->mtl_device != nil);
-
-    id<MTLDevice> device = ctx_dev->mtl_device;
-
-    // allocate shared buffer if the device supports it and it is required by the buffer type
-    if (ctx_dev->use_shared_buffers && shared) {
-        ctx->all_data = ggml_metal_host_malloc(size_aligned);
-        ctx->is_shared = true;
-    } else {
-        // dummy, non-NULL value - we'll populate this after creating the Metal buffer below
-        ctx->all_data = (void *) 0x000000400ULL;
-        ctx->is_shared = false;
-    }
-    ctx->all_size = size_aligned;
-
-    ctx->device = device;
-    ctx->queue = ctx_dev->mtl_queue;
-
-    ctx->n_buffers = 1;
-
-    if (ctx->all_data != NULL) {
-        ctx->buffers[0].size  = size;
-        ctx->buffers[0].metal = nil;
-
-        if (size_aligned > 0) {
-            if (ctx_dev->use_shared_buffers) {
-                ctx->buffers[0].metal = [device newBufferWithBytesNoCopy:ctx->all_data
-                                                                  length:size_aligned
-                                                                 options:MTLResourceStorageModeShared
-                                                             deallocator:nil];
-            } else {
-                ctx->buffers[0].metal = [device newBufferWithLength:size_aligned options:MTLResourceStorageModePrivate];
-
-                ctx->all_data = (void *) (ctx->buffers[0].metal.gpuAddress);
-            }
-        }
-
-        ctx->buffers[0].data = ctx->all_data;
-    }
-
-    if (size_aligned > 0 && (ctx->all_data == NULL || ctx->buffers[0].metal == nil)) {
-        GGML_LOG_ERROR("%s: error: failed to allocate buffer, size = %8.2f MiB\n", __func__, size_aligned / 1024.0 / 1024.0);
-        free(ctx);
-        return NULL;
-    }
-
-    if (!ggml_backend_metal_buffer_rset_init(ctx, ctx_dev, device)) {
-        GGML_LOG_ERROR("%s: error: failed to initialize residency set\n", __func__);
-        free(ctx);
-        return NULL;
-    }
-
-    //ggml_backend_metal_log_allocated_size(device, size_aligned);
-
-    struct ggml_backend_buffer_i buf_i = ctx->is_shared ? ggml_backend_metal_buffer_shared_i : ggml_backend_metal_buffer_private_i;
-
-    return ggml_backend_buffer_init(buft, buf_i, ctx, size);
-}
-
-static size_t ggml_backend_metal_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const struct ggml_tensor * tensor) {
-    size_t res = ggml_nbytes(tensor);
-
-    // some operations require additional memory for fleeting data:
-    switch (tensor->op) {
-        case GGML_OP_MUL_MAT_ID:
-            {
-                res += ggml_metal_mul_mat_id_extra_tpe(tensor);
-                res += ggml_metal_mul_mat_id_extra_ids(tensor);
-            } break;
-        case GGML_OP_FLASH_ATTN_EXT:
-            {
-                if (ggml_metal_flash_attn_ext_use_vec(tensor)) {
-                    res += ggml_metal_flash_attn_ext_extra_tmp(tensor);
-                }
-            } break;
-        default:
-            break;
-    }
-
-    return res;
-
-    GGML_UNUSED(buft);
-}
-
-// default (shared) buffer type
-
-static const char * ggml_backend_metal_buffer_type_shared_get_name(ggml_backend_buffer_type_t buft) {
-    return "Metal";
-
-    GGML_UNUSED(buft);
-}
-
-static ggml_backend_buffer_t ggml_backend_metal_buffer_type_shared_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
-    return ggml_backend_metal_buffer_type_alloc_buffer(buft, size, true);
-}
-
-static size_t ggml_backend_metal_buffer_type_shared_get_alignment(ggml_backend_buffer_type_t buft) {
-    return 32;
-
-    GGML_UNUSED(buft);
-}
-
-static size_t ggml_backend_metal_buffer_type_shared_get_max_size(ggml_backend_buffer_type_t buft) {
-    const size_t max_size = ((struct ggml_backend_metal_device_context *)buft->device->context)->max_size;
-
-    return max_size;
-}
-
-static size_t ggml_backend_metal_buffer_type_shared_get_alloc_size(ggml_backend_buffer_type_t buft, const struct ggml_tensor * tensor) {
-    return ggml_backend_metal_buffer_type_get_alloc_size(buft, tensor);
-}
-
-static bool ggml_backend_metal_buffer_type_shared_is_host(ggml_backend_buffer_type_t buft) {
-    return false;
-
-    GGML_UNUSED(buft);
-}
-
-static ggml_backend_buffer_type_t ggml_backend_metal_buffer_type_shared(void) {
-    static struct ggml_backend_buffer_type ggml_backend_buffer_type_metal = {
-        /* .iface = */ {
-            /* .get_name         = */ ggml_backend_metal_buffer_type_shared_get_name,
-            /* .alloc_buffer     = */ ggml_backend_metal_buffer_type_shared_alloc_buffer,
-            /* .get_alignment    = */ ggml_backend_metal_buffer_type_shared_get_alignment,
-            /* .get_max_size     = */ ggml_backend_metal_buffer_type_shared_get_max_size,
-            /* .get_alloc_size   = */ ggml_backend_metal_buffer_type_shared_get_alloc_size,
-            /* .is_host          = */ ggml_backend_metal_buffer_type_shared_is_host,
-        },
-        /* .device  = */ &g_ggml_backend_metal_device,
-        /* .context = */ NULL,
-    };
-
-    return &ggml_backend_buffer_type_metal;
-}
-
-// default (private) buffer type
-
-static const char * ggml_backend_metal_buffer_type_private_get_name(ggml_backend_buffer_type_t buft) {
-    return "Metal_Private";
-
-    GGML_UNUSED(buft);
-}
-
-static ggml_backend_buffer_t ggml_backend_metal_buffer_type_private_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
-    return ggml_backend_metal_buffer_type_alloc_buffer(buft, size, false);
-}
-
-static size_t ggml_backend_metal_buffer_type_private_get_alignment(ggml_backend_buffer_type_t buft) {
-    return 32;
-
-    GGML_UNUSED(buft);
-}
-
-static size_t ggml_backend_metal_buffer_type_private_get_max_size(ggml_backend_buffer_type_t buft) {
-    const size_t max_size = ((struct ggml_backend_metal_device_context *)buft->device->context)->max_size;
-
-    return max_size;
-}
-
-static size_t ggml_backend_metal_buffer_type_private_get_alloc_size(ggml_backend_buffer_type_t buft, const struct ggml_tensor * tensor) {
-    return ggml_backend_metal_buffer_type_get_alloc_size(buft, tensor);
-}
-
-static bool ggml_backend_metal_buffer_type_private_is_host(ggml_backend_buffer_type_t buft) {
-    return false;
-
-    GGML_UNUSED(buft);
-}
-
-static ggml_backend_buffer_type_t ggml_backend_metal_buffer_type_private(void) {
-    static struct ggml_backend_buffer_type ggml_backend_buffer_type_metal = {
-        /* .iface = */ {
-            /* .get_name         = */ ggml_backend_metal_buffer_type_private_get_name,
-            /* .alloc_buffer     = */ ggml_backend_metal_buffer_type_private_alloc_buffer,
-            /* .get_alignment    = */ ggml_backend_metal_buffer_type_private_get_alignment,
-            /* .get_max_size     = */ ggml_backend_metal_buffer_type_private_get_max_size,
-            /* .get_alloc_size   = */ ggml_backend_metal_buffer_type_private_get_alloc_size,
-            /* .is_host          = */ ggml_backend_metal_buffer_type_private_is_host,
-        },
-        /* .device  = */ &g_ggml_backend_metal_device,
-        /* .context = */ NULL,
-    };
-
-    return &ggml_backend_buffer_type_metal;
-}
-
-// mapped buffer type
-
-static const char * ggml_backend_metal_buffer_type_mapped_get_name(ggml_backend_buffer_type_t buft) {
-    return "Metal_Mapped";
-
-    GGML_UNUSED(buft);
-}
-
-static ggml_backend_buffer_t ggml_backend_metal_buffer_type_mapped_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
-    // for mapped buffers, prefer shared memory
-    return ggml_backend_metal_buffer_type_alloc_buffer(buft, size, true);
-}
-
-static size_t ggml_backend_metal_buffer_type_mapped_get_alignment(ggml_backend_buffer_type_t buft) {
-    return 32;
-
-    GGML_UNUSED(buft);
-}
-
-static size_t ggml_backend_metal_buffer_type_mapped_get_max_size(ggml_backend_buffer_type_t buft) {
-    const size_t max_size = ((struct ggml_backend_metal_device_context *)buft->device->context)->max_size;
-
-    return max_size;
-}
-
-static size_t ggml_backend_metal_buffer_type_mapped_get_alloc_size(ggml_backend_buffer_type_t buft, const struct ggml_tensor * tensor) {
-    return ggml_backend_metal_buffer_type_get_alloc_size(buft, tensor);
-}
-
-static bool ggml_backend_metal_buffer_type_mapped_is_host(ggml_backend_buffer_type_t buft) {
-    return false;
-
-    GGML_UNUSED(buft);
-}
-
-static ggml_backend_buffer_type_t ggml_backend_metal_buffer_type_mapped(void) {
-    // note: not obvious, but this buffer type still needs to implement .alloc_buffer:
-    //       https://github.com/ggml-org/llama.cpp/pull/15832#discussion_r2333177099
-    static struct ggml_backend_buffer_type ggml_backend_buffer_type_mapped_metal = {
-        /* .iface = */ {
-            /* .get_name         = */ ggml_backend_metal_buffer_type_mapped_get_name,
-            /* .alloc_buffer     = */ ggml_backend_metal_buffer_type_mapped_alloc_buffer,
-            /* .get_alignment    = */ ggml_backend_metal_buffer_type_mapped_get_alignment,
-            /* .get_max_size     = */ ggml_backend_metal_buffer_type_mapped_get_max_size,
-            /* .get_alloc_size   = */ ggml_backend_metal_buffer_type_mapped_get_alloc_size,
-            /* .is_host          = */ ggml_backend_metal_buffer_type_mapped_is_host,
-        },
-        /* .device  = */ &g_ggml_backend_metal_device,
-        /* .context = */ NULL,
-    };
-
-    return &ggml_backend_buffer_type_mapped_metal;
-}
-
-// backend
-
-static const char * ggml_backend_metal_name(ggml_backend_t backend) {
-    return "Metal";
-
-    GGML_UNUSED(backend);
-}
-
-static void ggml_backend_metal_free(ggml_backend_t backend) {
-    struct ggml_backend_metal_context * ctx = backend->context;
-
-    ggml_metal_free(ctx);
-
-    free(backend);
-}
-
-static void ggml_backend_metal_synchronize(ggml_backend_t backend) {
-    struct ggml_backend_metal_context * ctx = backend->context;
-
-    // wait for any backend operations to finish
-    if (ctx->cmd_buf_last) {
-        [ctx->cmd_buf_last waitUntilCompleted];
-        ctx->cmd_buf_last = nil;
-    }
-
-    // release any completed command buffers
-    if (ctx->cmd_bufs_ext.count > 0) {
-        for (size_t i = 0; i < ctx->cmd_bufs_ext.count; ++i) {
-            id<MTLCommandBuffer> cmd_buf = ctx->cmd_bufs_ext[i];
-
-            MTLCommandBufferStatus status = [cmd_buf status];
-            if (status != MTLCommandBufferStatusCompleted) {
-                GGML_LOG_ERROR("%s: error: command buffer %d failed with status %d\n", __func__, (int) i, (int) status);
-                if (status == MTLCommandBufferStatusError) {
-                    GGML_LOG_ERROR("error: %s\n", [[cmd_buf error].localizedDescription UTF8String]);
-                }
-                GGML_ABORT("fatal error");
-            }
-
-            [cmd_buf release];
-        }
-
-        [ctx->cmd_bufs_ext removeAllObjects];
-    }
-}
-
-static void ggml_backend_metal_set_tensor_async(ggml_backend_t backend,       struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
-    struct ggml_backend_metal_context        * ctx     = backend->context;
-    struct ggml_backend_metal_device_context * ctx_dev = backend->device->context;
-
-    @autoreleasepool {
-        id<MTLDevice> device = ctx_dev->mtl_device;
-
-        // wrap the source data into a Metal buffer
-        id<MTLBuffer> buf_src = [device newBufferWithBytes:data
-                                                    length:size
-                                                   options:MTLResourceStorageModeShared];
-
-        size_t buf_dst_offset = 0;
-        id<MTLBuffer> buf_dst = ggml_metal_get_buffer(tensor, &buf_dst_offset);
-
-        if (buf_dst == nil) {
-            GGML_ABORT("%s: failed to find buffer for tensor '%s'\n", __func__, tensor->name);
-        }
-
-        buf_dst_offset += offset;
-
-        // queue the copy operation into the queue of the Metal context
-        // this will be queued at the end, after any currently ongoing GPU operations
-        id<MTLCommandBuffer> cmd_buf = [ctx->queue commandBufferWithUnretainedReferences];
-        id<MTLBlitCommandEncoder> encoder = [cmd_buf blitCommandEncoder];
-
-        [encoder copyFromBuffer:buf_src
-                   sourceOffset:0
-                       toBuffer:buf_dst
-              destinationOffset:buf_dst_offset
-                           size:size];
-
-        [encoder endEncoding];
-        [cmd_buf commit];
-
-        // do not wait here for completion
-        //[cmd_buf waitUntilCompleted];
-
-        // instead, remember a reference to the command buffer and wait for it later if needed
-        [ctx->cmd_bufs_ext addObject:cmd_buf];
-        ctx->cmd_buf_last = cmd_buf;
-
-        [cmd_buf retain];
-    }
-}
-
-static void ggml_backend_metal_get_tensor_async(ggml_backend_t backend, const struct ggml_tensor * tensor,       void * data, size_t offset, size_t size) {
-    struct ggml_backend_metal_context        * ctx     = backend->context;
-    struct ggml_backend_metal_device_context * ctx_dev = backend->device->context;
-
-    @autoreleasepool {
-        id<MTLDevice> device = ctx_dev->mtl_device;
-
-        id<MTLBuffer> buf_dst = [device newBufferWithBytesNoCopy:data
-                                                          length:size
-                                                         options:MTLResourceStorageModeShared
-                                                     deallocator:nil];
-
-        size_t buf_src_offset = 0;
-        id<MTLBuffer> buf_src = ggml_metal_get_buffer(tensor, &buf_src_offset);
-
-        if (buf_src == nil) {
-            GGML_ABORT("%s: failed to find buffer for tensor '%s'\n", __func__, tensor->name);
-        }
-
-        buf_src_offset += offset;
-
-        // queue the copy operation into the queue of the Metal context
-        // this will be queued at the end, after any currently ongoing GPU operations
-        id<MTLCommandBuffer> cmd_buf = [ctx->queue commandBufferWithUnretainedReferences];
-        id<MTLBlitCommandEncoder> encoder = [cmd_buf blitCommandEncoder];
-
-        [encoder copyFromBuffer:buf_src
-                   sourceOffset:buf_src_offset
-                       toBuffer:buf_dst
-              destinationOffset:0
-                           size:size];
-
-        [encoder endEncoding];
-        [cmd_buf commit];
-
-        // do not wait here for completion
-        //[cmd_buf waitUntilCompleted];
-
-        // instead, remember a reference to the command buffer and wait for it later if needed
-        [ctx->cmd_bufs_ext addObject:cmd_buf];
-        ctx->cmd_buf_last = cmd_buf;
-
-        [cmd_buf retain];
-    }
-}
-
-static bool ggml_backend_metal_cpy_tensor_async(ggml_backend_t backend_src, ggml_backend_t backend_dst, const struct ggml_tensor * src, struct ggml_tensor * dst) {
-    return false;
-
-    GGML_UNUSED(backend_src);
-    GGML_UNUSED(backend_dst);
-    GGML_UNUSED(src);
-    GGML_UNUSED(dst);
-}
-
-static enum ggml_status ggml_backend_metal_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
-    return ggml_metal_graph_compute(backend, cgraph);
-}
-
-static void ggml_backend_metal_graph_optimize(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
-    struct ggml_backend_metal_device_context * ctx_dev = backend->device->context;
-
-    //const int64_t t_start = ggml_time_us();
-
-    if (ctx_dev->use_graph_optimize) {
-        ggml_metal_graph_optimize(cgraph);
-    }
-
-    //printf("%s: graph optimize took %.3f ms\n", __func__, (ggml_time_us() - t_start) / 1000.0);
-}
-
-static void ggml_backend_metal_set_n_cb(ggml_backend_t backend, int n_cb) {
-    GGML_ASSERT(ggml_backend_is_metal(backend));
-
-    struct ggml_backend_metal_context * ctx = (struct ggml_backend_metal_context *)backend->context;
-
-    if (ctx->n_cb != n_cb) {
-        ctx->n_cb = MIN(n_cb, GGML_METAL_MAX_COMMAND_BUFFERS);
-
-        if (ctx->n_cb > 2) {
-            GGML_LOG_WARN("%s: n_cb = %d, using n_cb > 2 is not recommended and can degrade the performance in some cases\n", __func__, n_cb);
-        }
-    }
-
-    if (ctx->encode_async) {
-        Block_release(ctx->encode_async);
-    }
-
-    ctx->encode_async = Block_copy(^(size_t iter) {
-        const int cb_idx = iter;
-        const int n_cb_l = ctx->n_cb;
-
-        const int n_nodes_0 = ctx->n_nodes_0;
-        const int n_nodes_1 = ctx->n_nodes_1;
-
-        const int n_nodes_per_cb = ctx->n_nodes_per_cb;
-
-        id<MTLCommandBuffer>         cmd_buf    = ctx->cmd_bufs[cb_idx].obj;
-        struct ggml_mem_ranges     * mem_ranges = ctx->cmd_bufs[cb_idx].mem_ranges;
-
-        if (mem_ranges) {
-            ggml_mem_ranges_reset(mem_ranges);
-        }
-
-        id<MTLComputeCommandEncoder> encoder;
-
-        struct ggml_backend_metal_device_context * ctx_dev = backend->device->context;
-
-        if (ctx_dev->use_concurrency) {
-            encoder = [cmd_buf computeCommandEncoderWithDispatchType: MTLDispatchTypeConcurrent];
-        } else {
-            encoder = [cmd_buf computeCommandEncoder];
-        }
-
-        int node_start = 0;
-        int node_end   = n_nodes_0;
-
-        if (cb_idx < n_cb_l) {
-            node_start = n_nodes_0 + (                                         (cb_idx + 0) * n_nodes_per_cb);
-            node_end   = n_nodes_0 + (MIN((cb_idx == n_cb_l - 1) ? n_nodes_1 : (cb_idx + 1) * n_nodes_per_cb, n_nodes_1));
-        }
-
-        const bool should_capture = ctx->capture_next_compute;
-
-        struct ggml_metal_encode_context ctx_enc = {
-            /*.backend    =*/ backend,
-            /*.encoder    =*/ encoder,
-            /*.mem_ranges =*/ mem_ranges,
-        };
-
-        for (int idx = node_start; idx < node_end;) {
-            if (should_capture) {
-                [encoder pushDebugGroup:[NSString stringWithCString:ggml_op_desc(ggml_graph_node(ctx->gf, idx)) encoding:NSUTF8StringEncoding]];
-            }
-
-            const int res = ggml_metal_encode_node(&ctx_enc, idx, node_end);
-            if (idx + res > node_end) {
-                GGML_ABORT("fusion error: nodes spanning multiple encoders have been fused. this indicates a bug in the fusion logic %s",
-                        "https://github.com/ggml-org/llama.cpp/pull/14849");
-            }
-
-            if (should_capture) {
-                [encoder popDebugGroup];
-            }
-
-            if (res == 0) {
-                break;
-            }
-
-            idx += res;
-        }
-
-        [encoder endEncoding];
-
-        if (cb_idx < 2 || ctx->abort_callback == NULL) {
-            [cmd_buf commit];
-        }
-    });
-}
-
-static struct ggml_backend_i ggml_backend_metal_i = {
-    /* .get_name                = */ ggml_backend_metal_name,
-    /* .free                    = */ ggml_backend_metal_free,
-    /* .set_tensor_async        = */ ggml_backend_metal_set_tensor_async,
-    /* .get_tensor_async        = */ ggml_backend_metal_get_tensor_async,
-    /* .cpy_tensor_async        = */ ggml_backend_metal_cpy_tensor_async, // only needed for multi-GPU setups
-    /* .synchronize             = */ ggml_backend_metal_synchronize,
-    /* .graph_plan_create       = */ NULL,
-    /* .graph_plan_free         = */ NULL,
-    /* .graph_plan_update       = */ NULL,
-    /* .graph_plan_compute      = */ NULL,
-    /* .graph_compute           = */ ggml_backend_metal_graph_compute,
-
-    // the events API is needed only for multi-GPU setups, so likely no need to implement it for Metal
-    // in any case, these docs seem relevant if we ever decide to implement it:
-    // https://developer.apple.com/documentation/metal/mtlcommandbuffer#Synchronizing-Passes-with-Events
-    /* .event_record            = */ NULL,
-    /* .event_wait              = */ NULL,
-    /* .optimize_graph          = */ ggml_backend_metal_graph_optimize,
-};
-
-static ggml_guid_t ggml_backend_metal_guid(void) {
-    static ggml_guid guid = { 0x81, 0xa1, 0x8b, 0x1e, 0x71, 0xec, 0x79, 0xed, 0x2b, 0x85, 0xdc, 0x8a, 0x61, 0x98, 0x30, 0xe6 };
-    return &guid;
-}
-
-// TODO: remove in the future
-ggml_backend_t ggml_backend_metal_init(void) {
-    ggml_backend_dev_t dev = ggml_backend_reg_dev_get(ggml_backend_metal_reg(), 0);
-
-    struct ggml_backend_metal_context * ctx = ggml_metal_init(dev);
-    if (ctx == NULL) {
-        GGML_LOG_ERROR("%s: error: failed to allocate context\n", __func__);
-        return NULL;
-    }
-
-    ggml_backend_t backend = malloc(sizeof(struct ggml_backend));
-
-    *backend = (struct ggml_backend) {
-        /* .guid      = */ ggml_backend_metal_guid(),
-        /* .interface = */ ggml_backend_metal_i,
-        /* .device    = */ dev,
-        /* .context   = */ ctx,
-    };
-
-    ggml_backend_metal_set_n_cb(backend, 1);
-
-    return backend;
-}
-
-bool ggml_backend_is_metal(ggml_backend_t backend) {
-    return backend != NULL && ggml_guid_matches(backend->guid, ggml_backend_metal_guid());
-}
-
-void ggml_backend_metal_set_abort_callback(ggml_backend_t backend, ggml_abort_callback abort_callback, void * user_data) {
-    GGML_ASSERT(ggml_backend_is_metal(backend));
-
-    struct ggml_backend_metal_context * ctx = (struct ggml_backend_metal_context *)backend->context;
-
-    ctx->abort_callback = abort_callback;
-    ctx->abort_callback_data = user_data;
-}
-
-bool ggml_backend_metal_supports_family(ggml_backend_t backend, int family) {
-    GGML_ASSERT(ggml_backend_is_metal(backend));
-
-    struct ggml_backend_metal_device_context * ctx_dev = backend->device->context;
-
-    GGML_ASSERT(ctx_dev->mtl_device != nil);
-
-    return [ctx_dev->mtl_device supportsFamily:(MTLGPUFamilyApple1 + family - 1)];
-}
-
-void ggml_backend_metal_capture_next_compute(ggml_backend_t backend) {
-    GGML_ASSERT(ggml_backend_is_metal(backend));
-
-    struct ggml_backend_metal_context * ctx = (struct ggml_backend_metal_context *)backend->context;
-    ctx->capture_next_compute = true;
-}
-
-// backend device
-
-static const char * ggml_backend_metal_device_get_name(ggml_backend_dev_t dev) {
-    return "Metal";
-
-    GGML_UNUSED(dev);
-}
-
-static const char * ggml_backend_metal_device_get_description(ggml_backend_dev_t dev) {
-    struct ggml_backend_metal_device_context * ctx_dev = (struct ggml_backend_metal_device_context *)dev->context;
-
-    return ctx_dev->name;
-}
-
-static void ggml_backend_metal_device_get_memory(ggml_backend_dev_t dev, size_t * free, size_t * total) {
-    if (@available(macOS 10.12, iOS 16.0, *)) {
-        struct ggml_backend_metal_device_context * ctx_dev = (struct ggml_backend_metal_device_context *)dev->context;
-        id<MTLDevice> device = ctx_dev->mtl_device;
-
-        *total = device.recommendedMaxWorkingSetSize;
-        *free  = *total - device.currentAllocatedSize;
-    } else {
-        *free = 1;
-        *total = 1;
-    }
-}
-
-static enum ggml_backend_dev_type ggml_backend_metal_device_get_type(ggml_backend_dev_t dev) {
-    return GGML_BACKEND_DEVICE_TYPE_GPU;
-
-    GGML_UNUSED(dev);
-}
-
-static void ggml_backend_metal_device_get_props(ggml_backend_dev_t dev, struct ggml_backend_dev_props * props) {
-    props->name        = ggml_backend_metal_device_get_name(dev);
-    props->description = ggml_backend_metal_device_get_description(dev);
-    props->type        = ggml_backend_metal_device_get_type(dev);
-    ggml_backend_metal_device_get_memory(dev, &props->memory_free, &props->memory_total);
-    props->caps = (struct ggml_backend_dev_caps) {
-        /* .async                 = */ true,
-        /* .host_buffer           = */ false,
-        /* .buffer_from_host_ptr  = */ true,
-        /* .events                = */ false,
-    };
-}
-
-static ggml_backend_t ggml_backend_metal_device_init(ggml_backend_dev_t dev, const char * params) {
-    struct ggml_backend_metal_context * ctx = ggml_metal_init(dev);
-    if (ctx == NULL) {
-        GGML_LOG_ERROR("%s: error: failed to allocate context\n", __func__);
-        return NULL;
-    }
-
-    ggml_backend_t backend = malloc(sizeof(struct ggml_backend));
-
-    *backend = (struct ggml_backend) {
-        /* .guid      = */ ggml_backend_metal_guid(),
-        /* .interface = */ ggml_backend_metal_i,
-        /* .device    = */ dev,
-        /* .context   = */ ctx,
-    };
-
-    ggml_backend_metal_set_n_cb(backend, 1);
-
-    return backend;
-
-    GGML_UNUSED(params);
-}
-
-static ggml_backend_buffer_type_t ggml_backend_metal_device_get_buffer_type(ggml_backend_dev_t dev) {
-    struct ggml_backend_metal_device_context * ctx_dev = dev->context;
-
-    return ctx_dev->use_shared_buffers ? ggml_backend_metal_buffer_type_shared() : ggml_backend_metal_buffer_type_private();
-}
-
-static ggml_backend_buffer_t ggml_backend_metal_device_buffer_mapped(ggml_backend_dev_t dev, void * ptr, size_t size, size_t max_tensor_size) {
-    struct ggml_backend_metal_buffer_context * ctx = calloc(1, sizeof(struct ggml_backend_metal_buffer_context));
-
-    ctx->all_data = ptr;
-    ctx->all_size = size;
-
-    ctx->is_shared = true;
-
-    ctx->n_buffers = 0;
-
-    const size_t size_page = sysconf(_SC_PAGESIZE);
-
-    // page-align the data ptr
-    {
-        const uintptr_t offs = (uintptr_t) ptr % size_page;
-        ptr  = (void *) ((char *) ptr - offs);
-        size += offs;
-    }
-
-    size_t size_aligned = size;
-    if ((size_aligned % size_page) != 0) {
-        size_aligned += (size_page - (size_aligned % size_page));
-    }
-
-    struct ggml_backend_metal_device_context * ctx_dev = (struct ggml_backend_metal_device_context *)dev->context;
-
-    GGML_ASSERT(ctx_dev->mtl_device != nil);
-
-    id<MTLDevice> device = ctx_dev->mtl_device;
-
-    ctx->device = device;
-    ctx->queue = ctx_dev->mtl_queue;
-
-    // the buffer fits into the max buffer size allowed by the device
-    if (size_aligned <= device.maxBufferLength) {
-        ctx->buffers[ctx->n_buffers].data  = ptr;
-        ctx->buffers[ctx->n_buffers].size  = size;
-        ctx->buffers[ctx->n_buffers].metal = nil;
-
-        if (size_aligned > 0) {
-            ctx->buffers[ctx->n_buffers].metal = [device newBufferWithBytesNoCopy:ptr length:size_aligned options:MTLResourceStorageModeShared deallocator:nil];
-
-            if (ctx->buffers[ctx->n_buffers].metal == nil) {
-                GGML_LOG_ERROR("%s: error: failed to allocate buffer, size = %8.2f MiB\n", __func__, size_aligned / 1024.0 / 1024.0);
-                return false;
-            }
-        }
-
-        ggml_backend_metal_log_allocated_size(device, size_aligned);
-
-        ++ctx->n_buffers;
-    } else {
-        // this overlap between the views will guarantee that the tensor with the maximum size will fully fit into
-        // one of the views
-        const size_t size_ovlp = ((max_tensor_size + size_page - 1) / size_page + 1) * size_page; // round-up 2 pages just in case
-        const size_t size_step = device.maxBufferLength - size_ovlp;
-        const size_t size_view = device.maxBufferLength;
-
-        for (size_t i = 0; i < size; i += size_step) {
-            const size_t size_step_aligned = (i + size_view <= size) ? size_view : (size_aligned - i);
-
-            ctx->buffers[ctx->n_buffers].data  = (void *) ((uint8_t *) ptr + i);
-            ctx->buffers[ctx->n_buffers].size  = size_step_aligned;
-            ctx->buffers[ctx->n_buffers].metal = nil;
-
-            if (size_step_aligned > 0) {
-                ctx->buffers[ctx->n_buffers].metal = [device newBufferWithBytesNoCopy:(void *) ((uint8_t *) ptr + i) length:size_step_aligned options:MTLResourceStorageModeShared deallocator:nil];
-
-                if (ctx->buffers[ctx->n_buffers].metal == nil) {
-                    GGML_LOG_ERROR("%s: error: failed to allocate buffer, size = %8.2f MiB\n", __func__, size_step_aligned / 1024.0 / 1024.0);
-                    return false;
-                }
-            }
-
-            ggml_backend_metal_log_allocated_size(device, size_step_aligned);
-
-            if (i + size_step < size) {
-                GGML_LOG_INFO("\n");
-            }
-
-            ++ctx->n_buffers;
-        }
-    }
-
-    if (!ggml_backend_metal_buffer_rset_init(ctx, ctx_dev, device)) {
-        GGML_LOG_ERROR("%s: error: failed to initialize residency set\n", __func__);
-        free(ctx);
-        return NULL;
-    }
-
-    return ggml_backend_buffer_init(ggml_backend_metal_buffer_type_mapped(), ggml_backend_metal_buffer_shared_i, ctx, size);
-}
-
-static bool ggml_backend_metal_device_supports_op(ggml_backend_dev_t dev, const struct ggml_tensor * op) {
-    struct ggml_backend_metal_device_context * ctx_dev = dev->context;
-
-    return ggml_metal_supports_op(ctx_dev, op);
-}
-
-static bool ggml_backend_metal_device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) {
-    return
-        buft->iface.get_name == ggml_backend_metal_buffer_type_shared_get_name ||
-        buft->iface.get_name == ggml_backend_metal_buffer_type_private_get_name ||
-        buft->iface.get_name == ggml_backend_metal_buffer_type_mapped_get_name;
-
-    GGML_UNUSED(dev);
-}
-
-static int64_t get_op_batch_size(const struct ggml_tensor * op) {
-    switch (op->op) {
-        case GGML_OP_MUL_MAT:
-            return op->ne[1];
-        case GGML_OP_MUL_MAT_ID:
-            return op->ne[2];
-        default:
-            return ggml_nrows(op);
-    }
-}
-
-static bool ggml_backend_metal_device_offload_op(ggml_backend_dev_t dev, const struct ggml_tensor * op) {
-    const int min_batch_size = 32;
-
-    return (op->op == GGML_OP_MUL_MAT ||
-            op->op == GGML_OP_MUL_MAT_ID) &&
-            get_op_batch_size(op) >= min_batch_size;
-
-    GGML_UNUSED(dev);
-    GGML_UNUSED(op);
-}
-
-static struct ggml_backend_device_i ggml_backend_metal_device_i = {
-    /* .get_name             = */ ggml_backend_metal_device_get_name,
-    /* .get_description      = */ ggml_backend_metal_device_get_description,
-    /* .get_memory           = */ ggml_backend_metal_device_get_memory,
-    /* .get_type             = */ ggml_backend_metal_device_get_type,
-    /* .get_props            = */ ggml_backend_metal_device_get_props,
-    /* .init_backend         = */ ggml_backend_metal_device_init,
-    /* .get_buffer_type      = */ ggml_backend_metal_device_get_buffer_type,
-    /* .get_host_buffer_type = */ NULL,
-    /* .buffer_from_host_ptr = */ ggml_backend_metal_device_buffer_mapped,
-    /* .supports_op          = */ ggml_backend_metal_device_supports_op,
-    /* .supports_buft        = */ ggml_backend_metal_device_supports_buft,
-    /* .offload_op           = */ ggml_backend_metal_device_offload_op,
-    /* .event_new            = */ NULL,
-    /* .event_free           = */ NULL,
-    /* .event_synchronize    = */ NULL,
-};
-
-// backend registry
-
-static const char * ggml_backend_metal_reg_get_name(ggml_backend_reg_t reg) {
-    return "Metal";
-
-    GGML_UNUSED(reg);
-}
-
-static size_t ggml_backend_metal_reg_device_count(ggml_backend_reg_t reg) {
-    return 1;
-
-    GGML_UNUSED(reg);
-}
-
-static ggml_backend_dev_t ggml_backend_metal_reg_device_get(ggml_backend_reg_t reg, size_t index) {
-    GGML_ASSERT(index == 0);
-
-    return &g_ggml_backend_metal_device;
-
-    GGML_UNUSED(reg);
-    GGML_UNUSED(index);
-}
-
-static struct ggml_backend_feature g_ggml_backend_metal_features[] = {
-#if defined(GGML_METAL_EMBED_LIBRARY)
-    { "EMBED_LIBRARY", "1" },
-#endif
-#if defined(GGML_METAL_USE_BF16)
-    { "BF16", "1" },
-#endif
-    { nil, nil },
-};
-
-static struct ggml_backend_feature * ggml_backend_metal_get_features(ggml_backend_reg_t reg) {
-    return g_ggml_backend_metal_features;
-
-    GGML_UNUSED(reg);
-}
-
-static void * ggml_backend_metal_get_proc_address(ggml_backend_reg_t reg, const char * name) {
-    if (strcmp(name, "ggml_backend_get_features") == 0) {
-        return (void *)ggml_backend_metal_get_features;
-    }
-
-    return NULL;
-
-    GGML_UNUSED(reg);
-}
-static struct ggml_backend_reg_i ggml_backend_metal_reg_i = {
-    /* .get_name         = */ ggml_backend_metal_reg_get_name,
-    /* .device_count     = */ ggml_backend_metal_reg_device_count,
-    /* .device_get       = */ ggml_backend_metal_reg_device_get,
-    /* .get_proc_address = */ ggml_backend_metal_get_proc_address,
-};
-
-// called upon program exit
-static void ggml_metal_cleanup(void) {
-    ggml_backend_metal_device_rel(&g_ggml_ctx_dev_main);
-}
-
-// TODO: make thread-safe
-ggml_backend_reg_t ggml_backend_metal_reg(void) {
-    ggml_backend_metal_device_acq(&g_ggml_ctx_dev_main);
-
-    // register cleanup callback
-    // TODO: not ideal, but not sure if there is a better way to do this in Objective-C
-    atexit(ggml_metal_cleanup);
-
-    {
-        g_ggml_backend_metal_reg = (struct ggml_backend_reg) {
-            /* .api_version = */ GGML_BACKEND_API_VERSION,
-            /* .iface       = */ ggml_backend_metal_reg_i,
-            /* .context     = */ NULL,
-        };
-
-        g_ggml_backend_metal_device = (struct ggml_backend_device) {
-            /* .iface   = */ ggml_backend_metal_device_i,
-            /* .reg     = */ &g_ggml_backend_metal_reg,
-            /* .context = */ &g_ggml_ctx_dev_main,
-        };
-    }
-
-    return &g_ggml_backend_metal_reg;
-}
-
-GGML_BACKEND_DL_IMPL(ggml_backend_metal_reg)
index 5057e264f6090b9a409bd10163c99099b89ee251..f34b89e590b791c46738ccf0e5f44b2b45381fca 100644 (file)
@@ -27,11 +27,11 @@ using namespace metal;
 //   .../usr/bin/metal -dM -E -c                             ggml/src/ggml-metal/ggml-metal.metal
 //   .../usr/bin/metal -dM -E -c -target air64-apple-ios14.0 ggml/src/ggml-metal/ggml-metal.metal
 //
-#if __METAL_VERSION__ < 310 && defined(GGML_METAL_USE_BF16)
-#undef GGML_METAL_USE_BF16
+#if __METAL_VERSION__ < 310 && defined(GGML_METAL_HAS_BF16)
+#undef GGML_METAL_HAS_BF16
 #endif
 
-#if defined(GGML_METAL_USE_BF16)
+#if defined(GGML_METAL_HAS_BF16)
 typedef matrix<bfloat, 4, 4> bfloat4x4;
 #endif
 
@@ -87,7 +87,7 @@ void dequantize_f16_t4(device const half4 * src, short il, thread type4 & reg) {
     reg = (type4)(*(src));
 }
 
-#if defined(GGML_METAL_USE_BF16)
+#if defined(GGML_METAL_HAS_BF16)
 template <typename type4x4>
 void dequantize_bf16(device const bfloat4x4 * src, short il, thread type4x4 & reg) {
     reg = (type4x4)(*src);
@@ -1222,53 +1222,78 @@ typedef decltype(kernel_div_row_c4_fuse_impl<1>) kernel_div_row_c4_fuse_t;
 
 template [[host_name("kernel_div_row_c4_fuse_1")]] kernel kernel_div_row_c4_fuse_t kernel_div_row_c4_fuse_impl<1>;
 
-kernel void kernel_scale(
+kernel void kernel_scale_f32(
+        constant ggml_metal_kargs_scale & args,
         device const float * src0,
         device       float * dst,
-        constant     float & scale,
-        constant     float & bias,
         uint tpig[[thread_position_in_grid]]) {
-    dst[tpig] = src0[tpig] * scale + bias;
+    dst[tpig] = src0[tpig] * args.scale + args.bias;
 }
 
-kernel void kernel_scale_4(
+kernel void kernel_scale_f32_4(
+        constant ggml_metal_kargs_scale & args,
         device const float4 * src0,
         device       float4 * dst,
-        constant     float  & scale,
-        constant     float  & bias,
         uint tpig[[thread_position_in_grid]]) {
-    dst[tpig] = src0[tpig] * scale + bias;
+    dst[tpig] = src0[tpig] * args.scale + args.bias;
 }
 
-kernel void kernel_clamp(
+kernel void kernel_clamp_f32(
+        constant ggml_metal_kargs_clamp & args,
         device const float * src0,
         device       float * dst,
-        constant     float & min,
-        constant     float & max,
         uint tpig[[thread_position_in_grid]]) {
-    dst[tpig] = src0[tpig] < min ? min : (src0[tpig] > max ? max : src0[tpig]);
+    dst[tpig] = clamp(src0[tpig], args.min, args.max);
 }
 
-kernel void kernel_relu(
+kernel void kernel_clamp_f32_4(
+        constant ggml_metal_kargs_clamp & args,
+        device const float4 * src0,
+        device       float4 * dst,
+        uint tpig[[thread_position_in_grid]]) {
+    dst[tpig] = clamp(src0[tpig], args.min, args.max);
+}
+
+kernel void kernel_relu_f32(
         device const float * src0,
         device       float * dst,
         uint tpig[[thread_position_in_grid]]) {
     dst[tpig] = max(0.0f, src0[tpig]);
 }
 
-kernel void kernel_sigmoid(
+kernel void kernel_relu_f32_4(
+        device const float4 * src0,
+        device       float4 * dst,
+        uint tpig[[thread_position_in_grid]]) {
+    dst[tpig] = max(0.0f, src0[tpig]);
+}
+
+kernel void kernel_sigmoid_f32(
         device const float * src0,
         device       float * dst,
         uint tpig[[thread_position_in_grid]]) {
     dst[tpig] = 1.0f / (1.0f + exp(-src0[tpig]));
 }
 
-kernel void kernel_tanh(
+kernel void kernel_sigmoid_f32_4(
+        device const float4 * src0,
+        device       float4 * dst,
+        uint tpig[[thread_position_in_grid]]) {
+    dst[tpig] = 1.0f / (1.0f + exp(-src0[tpig]));
+}
+
+kernel void kernel_tanh_f32(
         device const float * src0,
         device       float * dst,
         uint tpig[[thread_position_in_grid]]) {
-    device const float & x = src0[tpig];
-    dst[tpig] = precise::tanh(x);
+    dst[tpig] = precise::tanh(src0[tpig]);
+}
+
+kernel void kernel_tanh_f32_4(
+        device const float4 * src0,
+        device       float4 * dst,
+        uint tpig[[thread_position_in_grid]]) {
+    dst[tpig] = precise::tanh(src0[tpig]);
 }
 
 constant float GELU_COEF_A     = 0.044715f;
@@ -1276,7 +1301,7 @@ constant float GELU_QUICK_COEF = -1.702f;
 constant float SQRT_2_OVER_PI  = 0.79788456080286535587989211986876f;
 constant float SQRT_2_INV      = 0.70710678118654752440084436210484f;
 
-kernel void kernel_gelu(
+kernel void kernel_gelu_f32(
     device const float * src0,
     device       float * dst,
     uint tpig[[thread_position_in_grid]]) {
@@ -1285,7 +1310,7 @@ kernel void kernel_gelu(
     dst[tpig] = 0.5f*x*(1.0f + precise::tanh(SQRT_2_OVER_PI*x*(1.0f + GELU_COEF_A*x*x)));
 }
 
-kernel void kernel_gelu_4(
+kernel void kernel_gelu_f32_4(
     device const float4 * src0,
     device       float4 * dst,
     uint tpig[[thread_position_in_grid]]) {
@@ -1298,7 +1323,7 @@ kernel void kernel_gelu_4(
     dst[tpig] = 0.5f*x*(1.0f + precise::tanh(SQRT_2_OVER_PI*x*(1.0f + GELU_COEF_A*x*x)));
 }
 
-kernel void kernel_gelu_quick(
+kernel void kernel_gelu_quick_f32(
     device const float * src0,
     device       float * dst,
     uint tpig[[thread_position_in_grid]]) {
@@ -1307,7 +1332,7 @@ kernel void kernel_gelu_quick(
     dst[tpig] = x*(1.0f/(1.0f+exp(GELU_QUICK_COEF*x)));
 }
 
-kernel void kernel_gelu_quick_4(
+kernel void kernel_gelu_quick_f32_4(
     device const float4 * src0,
     device       float4 * dst,
     uint tpig[[thread_position_in_grid]]) {
@@ -1334,7 +1359,7 @@ T erf_approx(T x) {
     return sign_x * y;
 }
 
-kernel void kernel_gelu_erf(
+kernel void kernel_gelu_erf_f32(
     device const float * src0,
     device       float * dst,
     uint tpig[[thread_position_in_grid]]) {
@@ -1343,7 +1368,7 @@ kernel void kernel_gelu_erf(
     dst[tpig] = 0.5f*x*(1.0f+erf_approx<float>(x*SQRT_2_INV));
 }
 
-kernel void kernel_gelu_erf_4(
+kernel void kernel_gelu_erf_f32_4(
     device const float4 * src0,
     device       float4 * dst,
     uint tpig[[thread_position_in_grid]]) {
@@ -1352,7 +1377,7 @@ kernel void kernel_gelu_erf_4(
     dst[tpig] = 0.5f*x*(1.0f+erf_approx<float4>(x*SQRT_2_INV));
 }
 
-kernel void kernel_silu(
+kernel void kernel_silu_f32(
         device const float * src0,
         device       float * dst,
         uint tpig[[thread_position_in_grid]]) {
@@ -1360,7 +1385,7 @@ kernel void kernel_silu(
     dst[tpig] = x / (1.0f + exp(-x));
 }
 
-kernel void kernel_silu_4(
+kernel void kernel_silu_f32_4(
         device const float4 * src0,
         device       float4 * dst,
         uint tpig[[thread_position_in_grid]]) {
@@ -1368,99 +1393,202 @@ kernel void kernel_silu_4(
     dst[tpig] = x / (1.0f + exp(-x));
 }
 
-kernel void kernel_elu(
+kernel void kernel_elu_f32(
         device const float * src0,
         device       float * dst,
         uint tpig[[thread_position_in_grid]]) {
-    device const float & x = src0[tpig];
+    const float x = src0[tpig];
     dst[tpig] = (x > 0.0f) ? x : (exp(x) - 1.0f);
 }
 
-kernel void kernel_sqr(
+kernel void kernel_elu_f32_4(
+        device const float4 * src0,
+        device       float4 * dst,
+        uint tpig[[thread_position_in_grid]]) {
+    const float4 x = src0[tpig];
+    dst[tpig][0] = (x[0] > 0.0f) ? x[0] : (exp(x[0]) - 1.0f);
+    dst[tpig][1] = (x[1] > 0.0f) ? x[1] : (exp(x[1]) - 1.0f);
+    dst[tpig][2] = (x[2] > 0.0f) ? x[2] : (exp(x[2]) - 1.0f);
+    dst[tpig][3] = (x[3] > 0.0f) ? x[3] : (exp(x[3]) - 1.0f);
+}
+
+kernel void kernel_sqr_f32(
         device const float * src0,
         device       float * dst,
         uint tpig[[thread_position_in_grid]]) {
     dst[tpig] = src0[tpig] * src0[tpig];
 }
 
-kernel void kernel_sqrt(
+kernel void kernel_sqr_f32_4(
+        device const float4 * src0,
+        device       float4 * dst,
+        uint tpig[[thread_position_in_grid]]) {
+    dst[tpig] = src0[tpig] * src0[tpig];
+}
+
+kernel void kernel_sqrt_f32(
         device const float * src0,
         device       float * dst,
         uint tpig[[thread_position_in_grid]]) {
     dst[tpig] = sqrt(src0[tpig]);
 }
 
-kernel void kernel_sin(
+kernel void kernel_sqrt_f32_4(
+        device const float4 * src0,
+        device       float4 * dst,
+        uint tpig[[thread_position_in_grid]]) {
+    dst[tpig] = sqrt(src0[tpig]);
+}
+
+kernel void kernel_sin_f32(
         device const float * src0,
         device       float * dst,
         uint tpig[[thread_position_in_grid]]) {
     dst[tpig] = sin(src0[tpig]);
 }
 
-kernel void kernel_cos(
+kernel void kernel_sin_f32_4(
+        device const float4 * src0,
+        device       float4 * dst,
+        uint tpig[[thread_position_in_grid]]) {
+    dst[tpig] = sin(src0[tpig]);
+}
+
+kernel void kernel_cos_f32(
         device const float * src0,
         device       float * dst,
         uint tpig[[thread_position_in_grid]]) {
     dst[tpig] = cos(src0[tpig]);
 }
 
-kernel void kernel_neg(
+kernel void kernel_cos_f32_4(
+        device const float4 * src0,
+        device       float4 * dst,
+        uint tpig[[thread_position_in_grid]]) {
+    dst[tpig] = cos(src0[tpig]);
+}
+
+kernel void kernel_log_f32(
+        device const float * src0,
+        device       float * dst,
+        uint tpig[[thread_position_in_grid]]) {
+    dst[tpig] = log(src0[tpig]);
+}
+
+kernel void kernel_log_f32_4(
+        device const float4 * src0,
+        device       float4 * dst,
+        uint tpig[[thread_position_in_grid]]) {
+    dst[tpig] = log(src0[tpig]);
+}
+
+kernel void kernel_neg_f32(
         device const float * src0,
         device       float * dst,
         uint tpig[[thread_position_in_grid]]) {
     dst[tpig] = -src0[tpig];
 }
 
-kernel void kernel_abs(
+kernel void kernel_neg_f32_4(
+        device const float4 * src0,
+        device       float4 * dst,
+        uint tpig[[thread_position_in_grid]]) {
+    dst[tpig] = -src0[tpig];
+}
+
+kernel void kernel_abs_f32(
         device const float * src0,
         device       float * dst,
         uint tpig[[thread_position_in_grid]]) {
     dst[tpig] = fabs(src0[tpig]);
 }
 
-kernel void kernel_sgn(
+kernel void kernel_abs_f32_4(
+        device const float4 * src0,
+        device       float4 * dst,
+        uint tpig[[thread_position_in_grid]]) {
+    dst[tpig] = fabs(src0[tpig]);
+}
+
+kernel void kernel_sgn_f32(
         device const float * src0,
         device       float * dst,
         uint tpig[[thread_position_in_grid]]) {
-    device const float & x = src0[tpig];
-    dst[tpig] = (x > 0.0f) ? 1.0f : ((x < 0.0f) ? -1.0f : 0.0f);
+    dst[tpig] = sign(src0[tpig]);
+}
+
+kernel void kernel_sgn_f32_4(
+        device const float4 * src0,
+        device       float4 * dst,
+        uint tpig[[thread_position_in_grid]]) {
+    dst[tpig] = sign(src0[tpig]);
 }
 
-kernel void kernel_step(
+kernel void kernel_step_f32(
         device const float * src0,
         device       float * dst,
         uint tpig[[thread_position_in_grid]]) {
-    dst[tpig] = src0[tpig] > 0.0f ? 1.0f : 0.0f;
+    dst[tpig] = step(0.0f, src0[tpig]);
+}
+
+kernel void kernel_step_f32_4(
+        device const float4 * src0,
+        device       float4 * dst,
+        uint tpig[[thread_position_in_grid]]) {
+    dst[tpig] = step(0.0f, src0[tpig]);
 }
 
-kernel void kernel_hardswish(
+kernel void kernel_hardswish_f32(
         device const float * src0,
         device       float * dst,
         uint tpig[[thread_position_in_grid]]) {
-    device const float & x = src0[tpig];
+    const float x = src0[tpig];
+    dst[tpig] = x * fmin(1.0f, fmax(0.0f, (x + 3.0f) / 6.0f));
+}
+
+kernel void kernel_hardswish_f32_4(
+        device const float4 * src0,
+        device       float4 * dst,
+        uint tpig[[thread_position_in_grid]]) {
+    const float4 x = src0[tpig];
     dst[tpig] = x * fmin(1.0f, fmax(0.0f, (x + 3.0f) / 6.0f));
 }
 
-kernel void kernel_hardsigmoid(
+kernel void kernel_hardsigmoid_f32(
         device const float * src0,
         device       float * dst,
         uint tpig[[thread_position_in_grid]]) {
-    device const float & x = src0[tpig];
+    const float x = src0[tpig];
+    dst[tpig] = fmin(1.0f, fmax(0.0f, (x + 3.0f) / 6.0f));
+}
+
+kernel void kernel_hardsigmoid_f32_4(
+        device const float4 * src0,
+        device       float4 * dst,
+        uint tpig[[thread_position_in_grid]]) {
+    const float4 x = src0[tpig];
     dst[tpig] = fmin(1.0f, fmax(0.0f, (x + 3.0f) / 6.0f));
 }
 
-kernel void kernel_exp(
+kernel void kernel_exp_f32(
         device const float * src0,
         device       float * dst,
         uint tpig[[thread_position_in_grid]]) {
     dst[tpig] = exp(src0[tpig]);
 }
 
-kernel void kernel_reglu(
+kernel void kernel_exp_f32_4(
+        device const float4 * src0,
+        device       float4 * dst,
+        uint tpig[[thread_position_in_grid]]) {
+    dst[tpig] = exp(src0[tpig]);
+}
+
+kernel void kernel_reglu_f32(
+        constant ggml_metal_kargs_glu & args,
         device const char * src0,
         device const char * src1,
         device       char * dst,
-        constant ggml_metal_kargs_glu & args,
         uint tgpig[[threadgroup_position_in_grid]],
         uint tpitg[[thread_position_in_threadgroup]],
         uint   ntg[[threads_per_threadgroup]]) {
@@ -1476,11 +1604,11 @@ kernel void kernel_reglu(
     }
 }
 
-kernel void kernel_geglu(
+kernel void kernel_geglu_f32(
+        constant ggml_metal_kargs_glu & args,
         device const char * src0,
         device const char * src1,
         device       char * dst,
-        constant ggml_metal_kargs_glu & args,
         uint tgpig[[threadgroup_position_in_grid]],
         uint tpitg[[thread_position_in_threadgroup]],
         uint   ntg[[threads_per_threadgroup]]) {
@@ -1498,11 +1626,11 @@ kernel void kernel_geglu(
     }
 }
 
-kernel void kernel_swiglu(
+kernel void kernel_swiglu_f32(
+        constant ggml_metal_kargs_glu & args,
         device const char * src0,
         device const char * src1,
         device       char * dst,
-        constant ggml_metal_kargs_glu & args,
         uint tgpig[[threadgroup_position_in_grid]],
         uint tpitg[[thread_position_in_threadgroup]],
         uint   ntg[[threads_per_threadgroup]]) {
@@ -1520,11 +1648,11 @@ kernel void kernel_swiglu(
     }
 }
 
-kernel void kernel_swiglu_oai(
+kernel void kernel_swiglu_oai_f32(
+        constant ggml_metal_kargs_glu & args,
         device const char * src0,
         device const char * src1,
         device       char * dst,
-        constant ggml_metal_kargs_glu & args,
         uint tgpig[[threadgroup_position_in_grid]],
         uint tpitg[[thread_position_in_threadgroup]],
         uint   ntg[[threads_per_threadgroup]]) {
@@ -1546,11 +1674,11 @@ kernel void kernel_swiglu_oai(
     }
 }
 
-kernel void kernel_geglu_erf(
+kernel void kernel_geglu_erf_f32(
+        constant ggml_metal_kargs_glu & args,
         device const char * src0,
         device const char * src1,
         device       char * dst,
-        constant ggml_metal_kargs_glu & args,
         uint tgpig[[threadgroup_position_in_grid]],
         uint tpitg[[thread_position_in_threadgroup]],
         uint   ntg[[threads_per_threadgroup]]) {
@@ -1568,11 +1696,11 @@ kernel void kernel_geglu_erf(
     }
 }
 
-kernel void kernel_geglu_quick(
+kernel void kernel_geglu_quick_f32(
+        constant ggml_metal_kargs_glu & args,
         device const char * src0,
         device const char * src1,
         device       char * dst,
-        constant ggml_metal_kargs_glu & args,
         uint tgpig[[threadgroup_position_in_grid]],
         uint tpitg[[thread_position_in_threadgroup]],
         uint   ntg[[threads_per_threadgroup]]) {
@@ -1642,16 +1770,16 @@ kernel void kernel_sum_rows(
 
 typedef decltype(kernel_sum_rows<false>) kernel_sum_rows_t;
 
-template [[host_name("kernel_sum_rows")]] kernel kernel_sum_rows_t kernel_sum_rows<false>;
-template [[host_name("kernel_mean")]]     kernel kernel_sum_rows_t kernel_sum_rows<true>;
+template [[host_name("kernel_sum_rows_f32")]] kernel kernel_sum_rows_t kernel_sum_rows<false>;
+template [[host_name("kernel_mean_f32")]]     kernel kernel_sum_rows_t kernel_sum_rows<true>;
 
 template<typename T>
 kernel void kernel_soft_max(
+        constant ggml_metal_kargs_soft_max & args,
         device const  char * src0,
         device const  char * src1,
         device const  char * src2,
         device        char * dst,
-        constant ggml_metal_kargs_soft_max & args,
         threadgroup  float * buf [[threadgroup(0)]],
         uint3 tgpig[[threadgroup_position_in_grid]],
         uint3 tpitg[[thread_position_in_threadgroup]],
@@ -1753,11 +1881,11 @@ kernel void kernel_soft_max(
 
 template<typename T>
 kernel void kernel_soft_max_4(
+        constant ggml_metal_kargs_soft_max & args,
         device const  char * src0,
         device const  char * src1,
         device const  char * src2,
         device        char * dst,
-        constant ggml_metal_kargs_soft_max & args,
         threadgroup  float * buf [[threadgroup(0)]],
         uint3 tgpig[[threadgroup_position_in_grid]],
         uint3 tpitg[[thread_position_in_threadgroup]],
@@ -1867,53 +1995,12 @@ template [[host_name("kernel_soft_max_f32")]]   kernel kernel_soft_max_t   kerne
 template [[host_name("kernel_soft_max_f16_4")]] kernel kernel_soft_max_4_t kernel_soft_max_4<half4>;
 template [[host_name("kernel_soft_max_f32_4")]] kernel kernel_soft_max_4_t kernel_soft_max_4<float4>;
 
-kernel void kernel_diag_mask_inf(
-        device const float * src0,
-        device       float * dst,
-        constant ggml_metal_kargs_diag_mask_inf & args,
-        uint3 tpig[[thread_position_in_grid]]) {
-    const int64_t i02 = tpig[2];
-    const int64_t i01 = tpig[1];
-    const int64_t i00 = tpig[0];
-
-    if (i00 > args.n_past + i01) {
-        dst[i02*args.ne01*args.ne00 + i01*args.ne00 + i00] = -INFINITY;
-    } else {
-        dst[i02*args.ne01*args.ne00 + i01*args.ne00 + i00] = src0[i02*args.ne01*args.ne00 + i01*args.ne00 + i00];
-    }
-}
-
-kernel void kernel_diag_mask_inf_8(
-        device const float4 * src0,
-        device       float4 * dst,
-        constant ggml_metal_kargs_diag_mask_inf & args,
-        uint3 tpig[[thread_position_in_grid]]) {
-
-    const int64_t i = 2*tpig[0];
-
-    dst[i+0] = src0[i+0];
-    dst[i+1] = src0[i+1];
-    int64_t i4 = 4*i;
-    const int64_t i02 = i4/(args.ne00*args.ne01); i4 -= i02*args.ne00*args.ne01;
-    const int64_t i01 = i4/(args.ne00);      i4 -= i01*args.ne00;
-    const int64_t i00 = i4;
-    for (int k = 3; k >= 0; --k) {
-        if (i00 + 4 + k <= args.n_past + i01) {
-            break;
-        }
-        dst[i+1][k] = -INFINITY;
-        if (i00 + k > args.n_past + i01) {
-            dst[i][k] = -INFINITY;
-        }
-    }
-}
-
 // ref: ggml.c:ggml_compute_forward_ssm_conv_f32
-kernel void kernel_ssm_conv_f32(
+kernel void kernel_ssm_conv_f32_f32(
+        constant ggml_metal_kargs_ssm_conv & args,
         device const  void * src0,
         device const  void * src1,
         device       float * dst,
-        constant ggml_metal_kargs_ssm_conv & args,
         uint3 tgpig[[threadgroup_position_in_grid]],
         uint3 tpitg[[thread_position_in_threadgroup]],
         uint3   ntg[[threads_per_threadgroup]]) {
@@ -1942,6 +2029,7 @@ kernel void kernel_ssm_conv_f32(
 
 // ref: ggml.c:ggml_compute_forward_ssm_scan_f32, Mamba-1 part
 kernel void kernel_ssm_scan_f32(
+        constant ggml_metal_kargs_ssm_scan & args,
         device const void * src0,
         device const void * src1,
         device const void * src2,
@@ -1951,7 +2039,6 @@ kernel void kernel_ssm_scan_f32(
         device const void * src6,
         device      float * dst,
         threadgroup float * shared [[threadgroup(0)]],
-        constant ggml_metal_kargs_ssm_scan & args,
         uint3  tgpig[[threadgroup_position_in_grid]],
         uint3  tpitg[[thread_position_in_threadgroup]],
         ushort sgitg[[simdgroup_index_in_threadgroup]],
@@ -2057,7 +2144,8 @@ kernel void kernel_ssm_scan_f32(
 }
 
 // ref: ggml.c:ggml_compute_forward_ssm_scan_f32, Mamba-2 part
-kernel void kernel_ssm_scan_f32_group(
+kernel void kernel_ssm_scan_group_f32(
+        constant ggml_metal_kargs_ssm_scan & args,
         device const void * src0,
         device const void * src1,
         device const void * src2,
@@ -2067,7 +2155,6 @@ kernel void kernel_ssm_scan_f32_group(
         device const void * src6,
         device      float * dst,
         threadgroup float * shared [[threadgroup(0)]],
-        constant ggml_metal_kargs_ssm_scan & args,
         uint3  tgpig[[threadgroup_position_in_grid]],
         uint3  tpitg[[thread_position_in_threadgroup]],
         ushort sgitg[[simdgroup_index_in_threadgroup]],
@@ -2346,24 +2433,22 @@ kernel void kernel_rwkv_wkv7_f32(
     }
 }
 
-kernel void kernel_argmax(
-        device   const void * x,
-        device      int32_t * dst,
-        constant    int64_t & ncols,
-        constant   uint64_t & nb01,
-        threadgroup   float * shared_maxval [[threadgroup(0)]],
-        threadgroup int32_t * shared_argmax [[threadgroup(1)]],
+kernel void kernel_argmax_f32(
+        constant ggml_metal_kargs_argmax & args,
+        device   const char * src0,
+        device         char * dst,
+        threadgroup    char * shmem [[threadgroup(0)]],
         uint  tgpig[[threadgroup_position_in_grid]],
         uint  tpitg[[thread_position_in_threadgroup]],
         uint  sgitg[[simdgroup_index_in_threadgroup]],
         uint  tiisg[[thread_index_in_simdgroup]],
         uint    ntg[[threads_per_threadgroup]]) {
-    device const float * x_row = (device const float *) ((device const char *) x + tgpig * nb01);
+    device const float * x_row = (device const float *) ((device const char *) src0 + tgpig * args.nb01);
 
     float   lmax = -INFINITY;
     int32_t larg = -1;
 
-    for (int i00 = tpitg; i00 < ncols; i00 += ntg) {
+    for (int i00 = tpitg; i00 < args.ne00; i00 += ntg) {
         if (x_row[i00] > lmax) {
             lmax = x_row[i00];
             larg = i00;
@@ -2374,6 +2459,11 @@ kernel void kernel_argmax(
     float max_val = simd_max(lmax);
     int32_t arg_val = simd_max(select(-1, larg, lmax == max_val));
 
+    device int32_t * dst_i32 = (device int32_t *) dst;
+
+    threadgroup   float * shared_maxval = (threadgroup   float *) shmem;
+    threadgroup int32_t * shared_argmax = (threadgroup int32_t *) shmem + N_SIMDWIDTH;
+
     if (ntg > N_SIMDWIDTH) {
         if (sgitg == 0) {
             shared_maxval[tiisg] = -INFINITY;
@@ -2395,15 +2485,15 @@ kernel void kernel_argmax(
         float max_val_reduced   = simd_max(max_val);
         int32_t arg_val_reduced = simd_max(select(-1, arg_val, max_val == max_val_reduced));
 
-        dst[tgpig] = arg_val_reduced;
+        dst_i32[tgpig] = arg_val_reduced;
 
         return;
     }
 
-    dst[tgpig] = arg_val;
+    dst_i32[tgpig] = arg_val;
 }
 
-kernel void kernel_norm(
+kernel void kernel_norm_f32(
         constant ggml_metal_kargs_norm & args,
         device const char * src0,
         device       char * dst,
@@ -2537,11 +2627,11 @@ kernel void kernel_rms_norm_fuse_impl(
 
 typedef decltype(kernel_rms_norm_fuse_impl<1>) kernel_rms_norm_fuse_t;
 
-template [[host_name("kernel_rms_norm")]]         kernel kernel_rms_norm_fuse_t kernel_rms_norm_fuse_impl<1>;
-template [[host_name("kernel_rms_norm_mul")]]     kernel kernel_rms_norm_fuse_t kernel_rms_norm_fuse_impl<2>;
-template [[host_name("kernel_rms_norm_mul_add")]] kernel kernel_rms_norm_fuse_t kernel_rms_norm_fuse_impl<3>;
+template [[host_name("kernel_rms_norm_f32")]]         kernel kernel_rms_norm_fuse_t kernel_rms_norm_fuse_impl<1>;
+template [[host_name("kernel_rms_norm_mul_f32")]]     kernel kernel_rms_norm_fuse_t kernel_rms_norm_fuse_impl<2>;
+template [[host_name("kernel_rms_norm_mul_add_f32")]] kernel kernel_rms_norm_fuse_t kernel_rms_norm_fuse_impl<3>;
 
-kernel void kernel_l2_norm(
+kernel void kernel_l2_norm_f32(
         constant ggml_metal_kargs_l2_norm & args,
         device const char * src0,
         device       char * dst,
@@ -2584,10 +2674,10 @@ kernel void kernel_l2_norm(
     }
 }
 
-kernel void kernel_group_norm(
+kernel void kernel_group_norm_f32(
+        constant ggml_metal_kargs_group_norm & args,
         device const float * src0,
         device       float * dst,
-        constant ggml_metal_kargs_group_norm & args,
         threadgroup float  * buf [[threadgroup(0)]],
         uint tgpig[[threadgroup_position_in_grid]],
         uint tpitg[[thread_position_in_threadgroup]],
@@ -2595,7 +2685,7 @@ kernel void kernel_group_norm(
         uint tiisg[[thread_index_in_simdgroup]],
         uint   ntg[[threads_per_threadgroup]]) {
     const int64_t ne = args.ne00*args.ne01*args.ne02;
-    const int64_t gs = args.ne00*args.ne01*((args.ne02 + args.n_groups - 1) / args.n_groups);
+    const int64_t gs = args.ne00*args.ne01*((args.ne02 + args.ngrp - 1) / args.ngrp);
 
     int start = tgpig * gs;
     int end   = start + gs;
@@ -3407,7 +3497,7 @@ typedef decltype(kernel_mul_mv<half, half4, half, half4>) mul_mv_t;
 template [[host_name("kernel_mul_mv_f32_f32")]]   kernel mul_mv_t kernel_mul_mv<float,  float4,  float,  float4>;
 template [[host_name("kernel_mul_mv_f16_f32")]]   kernel mul_mv_t kernel_mul_mv<half,   half4,   float,  float4>;
 template [[host_name("kernel_mul_mv_f16_f16")]]   kernel mul_mv_t kernel_mul_mv<half,   half4,   half,   half4>;
-#if defined(GGML_METAL_USE_BF16)
+#if defined(GGML_METAL_HAS_BF16)
 template [[host_name("kernel_mul_mv_bf16_f32")]]  kernel mul_mv_t kernel_mul_mv<bfloat, bfloat4, float,  float4>;
 template [[host_name("kernel_mul_mv_bf16_bf16")]] kernel mul_mv_t kernel_mul_mv<bfloat, bfloat4, bfloat, bfloat4>;
 #endif
@@ -3472,7 +3562,7 @@ typedef decltype(kernel_mul_mv_c4<half4, half4>) mul_mv_c4_t;
 
 template [[host_name("kernel_mul_mv_f32_f32_c4")]]  kernel mul_mv_c4_t kernel_mul_mv_c4<float4,  float4>;
 template [[host_name("kernel_mul_mv_f16_f32_c4")]]  kernel mul_mv_c4_t kernel_mul_mv_c4<half4,   float4>;
-#if defined(GGML_METAL_USE_BF16)
+#if defined(GGML_METAL_HAS_BF16)
 template [[host_name("kernel_mul_mv_bf16_f32_c4")]] kernel mul_mv_c4_t kernel_mul_mv_c4<bfloat4, float4>;
 #endif
 
@@ -3529,7 +3619,7 @@ kernel void kernel_mul_mv_1row(
 typedef decltype(kernel_mul_mv_1row<half, half4>) mul_mv_1row_t;
 
 template [[host_name("kernel_mul_mv_f16_f32_1row")]]  kernel mul_mv_1row_t kernel_mul_mv_1row<half,   half4>;
-#if defined(GGML_METAL_USE_BF16)
+#if defined(GGML_METAL_HAS_BF16)
 template [[host_name("kernel_mul_mv_bf16_f32_1row")]] kernel mul_mv_1row_t kernel_mul_mv_1row<bfloat, bfloat4>;
 #endif
 
@@ -3576,7 +3666,7 @@ kernel void kernel_mul_mv_l4(
 typedef decltype(kernel_mul_mv_l4<half, half4>) mul_mv_l4_t;
 
 template [[host_name("kernel_mul_mv_f16_f32_l4")]]  kernel mul_mv_l4_t kernel_mul_mv_l4<half, half4>;
-#if defined(GGML_METAL_USE_BF16)
+#if defined(GGML_METAL_HAS_BF16)
 template [[host_name("kernel_mul_mv_bf16_f32_l4")]] kernel mul_mv_l4_t kernel_mul_mv_l4<bfloat, bfloat4>;
 #endif
 
@@ -3879,62 +3969,63 @@ template [[host_name("kernel_rope_multi_f16")]] kernel kernel_rope_multi_t kerne
 template [[host_name("kernel_rope_vision_f32")]] kernel kernel_rope_vision_t kernel_rope_vision<float>;
 template [[host_name("kernel_rope_vision_f16")]] kernel kernel_rope_vision_t kernel_rope_vision<half>;
 
-typedef void (im2col_t)(
-        device const float * x,
-        device        char * dst,
-        constant ggml_metal_kargs_im2col & args,
-        uint3 tgpig[[threadgroup_position_in_grid]],
-        uint3  tgpg[[threadgroups_per_grid]],
-        uint3 tpitg[[thread_position_in_threadgroup]],
-        uint3   ntg[[threads_per_threadgroup]]);
-
-template <typename T>
-kernel void kernel_im2col(
-        device const float * x,
-        device        char * dst,
-        constant ggml_metal_kargs_im2col & args,
-        uint3 tgpig[[threadgroup_position_in_grid]],
-        uint3  tgpg[[threadgroups_per_grid]],
-        uint3 tpitg[[thread_position_in_threadgroup]],
-        uint3   ntg[[threads_per_threadgroup]]) {
-//    const int64_t IC = tgpg[0];
-    const int64_t OH = tgpg[1];
-    const int64_t OW = tgpg[2];
-
-//    const int64_t N  = ntg[0];
-    const int64_t KH = ntg[1];
-    const int64_t KW = ntg[2];
-
-    const int64_t in  = tpitg[0];
-    const int64_t ikh = tpitg[1];
-    const int64_t ikw = tpitg[2];
-
-    const int64_t iic = tgpig[0];
-    const int64_t ioh = tgpig[1];
-    const int64_t iow = tgpig[2];
-
-    const int64_t iiw = iow*args.s0 + ikw*args.d0 - args.p0;
-    const int64_t iih = ioh*args.s1 + ikh*args.d1 - args.p1;
-
-    const int64_t offset_dst = (in*OH*OW + ioh*OW + iow)*args.CHW + (iic*(KH*KW) + ikh*KW + ikw);
-
-    device T * pdst = (device T *) (dst);
-
-    if (iih < 0 || iih >= args.IH || iiw < 0 || iiw >= args.IW) {
-        pdst[offset_dst] = 0.0f;
-    } else {
-        const int64_t offset_src = in*args.ofs0 + iic*args.ofs1 + iih*args.IW + iiw;
-        pdst[offset_dst] = x[offset_src];
-    }
-}
-
-template [[host_name("kernel_im2col_f32")]] kernel im2col_t kernel_im2col<float>;
-template [[host_name("kernel_im2col_f16")]] kernel im2col_t kernel_im2col<half>;
+// TODO: obolete -- remove
+//typedef void (im2col_t)(
+//        constant ggml_metal_kargs_im2col & args,
+//        device const float * x,
+//        device        char * dst,
+//        uint3 tgpig[[threadgroup_position_in_grid]],
+//        uint3  tgpg[[threadgroups_per_grid]],
+//        uint3 tpitg[[thread_position_in_threadgroup]],
+//        uint3   ntg[[threads_per_threadgroup]]);
+//
+//template <typename T>
+//kernel void kernel_im2col(
+//        constant ggml_metal_kargs_im2col & args,
+//        device const float * x,
+//        device        char * dst,
+//        uint3 tgpig[[threadgroup_position_in_grid]],
+//        uint3  tgpg[[threadgroups_per_grid]],
+//        uint3 tpitg[[thread_position_in_threadgroup]],
+//        uint3   ntg[[threads_per_threadgroup]]) {
+////    const int64_t IC = tgpg[0];
+//    const int64_t OH = tgpg[1];
+//    const int64_t OW = tgpg[2];
+//
+////    const int64_t N  = ntg[0];
+//    const int64_t KH = ntg[1];
+//    const int64_t KW = ntg[2];
+//
+//    const int64_t in  = tpitg[0];
+//    const int64_t ikh = tpitg[1];
+//    const int64_t ikw = tpitg[2];
+//
+//    const int64_t iic = tgpig[0];
+//    const int64_t ioh = tgpig[1];
+//    const int64_t iow = tgpig[2];
+//
+//    const int64_t iiw = iow*args.s0 + ikw*args.d0 - args.p0;
+//    const int64_t iih = ioh*args.s1 + ikh*args.d1 - args.p1;
+//
+//    const int64_t offset_dst = (in*OH*OW + ioh*OW + iow)*args.CHW + (iic*(KH*KW) + ikh*KW + ikw);
+//
+//    device T * pdst = (device T *) (dst);
+//
+//    if (iih < 0 || iih >= args.IH || iiw < 0 || iiw >= args.IW) {
+//        pdst[offset_dst] = 0.0f;
+//    } else {
+//        const int64_t offset_src = in*args.ofs0 + iic*args.ofs1 + iih*args.IW + iiw;
+//        pdst[offset_dst] = x[offset_src];
+//    }
+//}
+//
+//template [[host_name("kernel_im2col_f32")]] kernel im2col_t kernel_im2col<float>;
+//template [[host_name("kernel_im2col_f16")]] kernel im2col_t kernel_im2col<half>;
 
 typedef void (im2col_ext_t)(
+        constant ggml_metal_kargs_im2col & args,
         device const float * x,
         device        char * dst,
-        constant ggml_metal_kargs_im2col & args,
         uint3 tgpig[[threadgroup_position_in_grid]],
         uint3  tgpg[[threadgroups_per_grid]],
         uint3 tpitg[[thread_position_in_threadgroup]],
@@ -3942,16 +4033,16 @@ typedef void (im2col_ext_t)(
 
 template <typename T>
 kernel void kernel_im2col_ext(
+        constant ggml_metal_kargs_im2col & args,
         device const float * x,
         device        char * dst,
-        constant ggml_metal_kargs_im2col & args,
         uint3 tgpig[[threadgroup_position_in_grid]],
         uint3  tgpg[[threadgroups_per_grid]],      // tgpg[0] = D x IC x KH x KW, CHW = IC x KH x KW
         uint3 tpitg[[thread_position_in_threadgroup]],
         uint3   ntg[[threads_per_threadgroup]]) {  // [M, 1, 1]
     const int64_t KHW = (int64_t)args.KHW;
 
-    const int64_t d = tgpig[0] / args.CHW;
+    const int64_t d   = tgpig[0] / args.CHW;
     const int64_t chw = tgpig[0] % args.CHW;
     const int64_t tgpig_0 = chw / KHW;  // 0 ~ (IC - 1)
     const int64_t HW = tgpig[0] % KHW;
@@ -3985,19 +4076,19 @@ template [[host_name("kernel_im2col_ext_f32")]] kernel im2col_ext_t kernel_im2co
 template [[host_name("kernel_im2col_ext_f16")]] kernel im2col_ext_t kernel_im2col_ext<half>;
 
 typedef void (conv_transpose_1d_t)(
+        constant ggml_metal_kargs_conv_transpose_1d & args,
         device const float * src0,
         device const float * src1,
         device        char * dst,
-        constant ggml_metal_kargs_conv_transpose_1d & args,
         uint3   tgpig[[threadgroup_position_in_grid]],
         uint3    tgpg[[threadgroups_per_grid]]);
 
 template <typename T>
 kernel void kernel_conv_transpose_1d(
+        constant ggml_metal_kargs_conv_transpose_1d & args,
         device const     T * src0,
         device const float * src1,
         device        char * dst,
-        constant ggml_metal_kargs_conv_transpose_1d & args,
         uint3   tgpig[[threadgroup_position_in_grid]],
         uint3   tgpg[[threadgroups_per_grid]]) {
 
@@ -4021,26 +4112,26 @@ kernel void kernel_conv_transpose_1d(
 
 template [[host_name("kernel_conv_transpose_1d_f32_f32")]]
 kernel void kernel_conv_transpose_1d<float>(
+    constant ggml_metal_kargs_conv_transpose_1d & args,
     device const float * src0,
     device const float * src1,
     device        char * dst,
-    constant ggml_metal_kargs_conv_transpose_1d & args,
     uint3   tgpig[[threadgroup_position_in_grid]],
     uint3    tgpg[[threadgroups_per_grid]]);
 
 template [[host_name("kernel_conv_transpose_1d_f16_f32")]]
 kernel void kernel_conv_transpose_1d<half>(
+    constant ggml_metal_kargs_conv_transpose_1d & args,
     device const half  * src0,
     device const float * src1,
     device        char * dst,
-    constant ggml_metal_kargs_conv_transpose_1d & args,
     uint3   tgpig[[threadgroup_position_in_grid]],
     uint3    tgpg[[threadgroups_per_grid]]);
 
 kernel void kernel_upscale_f32(
+    constant ggml_metal_kargs_upscale & args,
     device  const char * src0,
     device        char * dst,
-    constant ggml_metal_kargs_upscale & args,
     uint3 tgpig[[threadgroup_position_in_grid]],
     uint3 tpitg[[thread_position_in_threadgroup]],
     uint3   ntg[[threads_per_threadgroup]]) {
@@ -4064,9 +4155,9 @@ kernel void kernel_upscale_f32(
 }
 
 kernel void kernel_pad_f32(
+    constant ggml_metal_kargs_pad & args,
     device  const char * src0,
     device        char * dst,
-    constant ggml_metal_kargs_pad & args,
     uint3 tgpig[[threadgroup_position_in_grid]],
     uint3 tpitg[[thread_position_in_threadgroup]],
     uint3   ntg[[threads_per_threadgroup]]) {
@@ -4100,9 +4191,9 @@ kernel void kernel_pad_f32(
 }
 
 kernel void kernel_pad_reflect_1d_f32(
+    constant   ggml_metal_kargs_pad_reflect_1d & args,
     device  const char * src0,
     device        char * dst,
-    constant   ggml_metal_kargs_pad_reflect_1d & args,
     uint3 tgpig[[threadgroup_position_in_grid]],
     uint3  tgpg[[threadgroups_per_grid]],
     uint3 tpitg[[thread_position_in_threadgroup]],
@@ -4133,8 +4224,8 @@ kernel void kernel_pad_reflect_1d_f32(
 }
 
 kernel void kernel_arange_f32(
-    device        char * dst,
     constant   ggml_metal_kargs_arange & args,
+    device        char * dst,
     uint3 tgpig[[threadgroup_position_in_grid]],
     uint3 tpitg[[thread_position_in_threadgroup]],
     uint3   ntg[[threads_per_threadgroup]]) {
@@ -4147,9 +4238,9 @@ kernel void kernel_arange_f32(
 }
 
 kernel void kernel_timestep_embedding_f32(
+    constant  ggml_metal_kargs_timestep_embedding & args,
     device  const char * src0,
     device        char * dst,
-    constant  ggml_metal_kargs_timestep_embedding & args,
     uint3 tgpig[[threadgroup_position_in_grid]],
     uint3 tpitg[[thread_position_in_threadgroup]],
     uint3   ntg[[threads_per_threadgroup]]) {
@@ -4173,19 +4264,19 @@ kernel void kernel_timestep_embedding_f32(
 
 // bitonic sort implementation following the CUDA kernels as reference
 typedef void (argsort_t)(
-        device const float  * x,
-        device     int32_t  * dst,
         constant   ggml_metal_kargs_argsort & args,
+        device  const float * x,
+        device      int32_t * dst,
         threadgroup int32_t * shared_values [[threadgroup(0)]],
         uint3 tgpig[[threadgroup_position_in_grid]],
         uint3 tpitg[[thread_position_in_threadgroup]]);
 
 template<ggml_sort_order order>
 kernel void kernel_argsort_f32_i32(
-        device const float   * x,
-        device       int32_t * dst,
         constant   ggml_metal_kargs_argsort & args,
-        threadgroup int32_t  * shared_values [[threadgroup(0)]],
+        device const float  * x,
+        device      int32_t * dst,
+        threadgroup int32_t * shared_values [[threadgroup(0)]],
         uint3 tgpig[[threadgroup_position_in_grid]],
         uint3 tpitg[[thread_position_in_threadgroup]]) {
     // bitonic sort
@@ -4238,11 +4329,21 @@ template [[host_name("kernel_argsort_f32_i32_asc")]]  kernel argsort_t kernel_ar
 template [[host_name("kernel_argsort_f32_i32_desc")]] kernel argsort_t kernel_argsort_f32_i32<GGML_SORT_ORDER_DESC>;
 
 kernel void kernel_leaky_relu_f32(
+        constant     ggml_metal_kargs_leaky_relu & args,
         device const float * src0,
         device       float * dst,
+        uint tpig[[thread_position_in_grid]]) {
+    const float x = src0[tpig];
+    dst[tpig] = x > 0.0f ? x : x * args.slope;
+}
+
+kernel void kernel_leaky_relu_f32_4(
         constant     ggml_metal_kargs_leaky_relu & args,
+        device const float4 * src0,
+        device       float4 * dst,
         uint tpig[[thread_position_in_grid]]) {
-    dst[tpig] = src0[tpig] > 0.0f ? src0[tpig] : src0[tpig] * args.slope;
+    const float4 x = src0[tpig];
+    dst[tpig] = float4(x > 0.0f)*x + float4(x <= 0.0f)*(x * args.slope);
 }
 
 constant bool FC_flash_attn_ext_has_mask  [[function_constant(FC_FLASH_ATTN_EXT + 0)]];
@@ -4884,7 +4985,7 @@ template [[host_name("kernel_flash_attn_ext_f16_dk192_dv128")]]  kernel flash_at
 template [[host_name("kernel_flash_attn_ext_f16_dk256_dv256")]]  kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES,    half4x4,    1, dequantize_f16,  half4x4,    1, dequantize_f16,  256, 256>;
 template [[host_name("kernel_flash_attn_ext_f16_dk576_dv512")]]  kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES,    half4x4,    1, dequantize_f16,  half4x4,    1, dequantize_f16,  576, 512>;
 
-#if defined(GGML_METAL_USE_BF16)
+#if defined(GGML_METAL_HAS_BF16)
 template [[host_name("kernel_flash_attn_ext_bf16_dk40_dv40"  )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES_BF, bfloat4x4,  1, dequantize_bf16, bfloat4x4,  1, dequantize_bf16, 40,  40>;
 template [[host_name("kernel_flash_attn_ext_bf16_dk64_dv64"  )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES_BF, bfloat4x4,  1, dequantize_bf16, bfloat4x4,  1, dequantize_bf16, 64,  64>;
 template [[host_name("kernel_flash_attn_ext_bf16_dk80_dv80"  )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES_BF, bfloat4x4,  1, dequantize_bf16, bfloat4x4,  1, dequantize_bf16, 80,  80>;
@@ -5450,7 +5551,7 @@ kernel void kernel_flash_attn_ext_vec(
 typedef decltype(kernel_flash_attn_ext_vec<FA_TYPES, half4, 1, dequantize_f16_t4, half4, 1, dequantize_f16_t4, 128, 128, 4>) flash_attn_ext_vec_t;
 
 template [[host_name("kernel_flash_attn_ext_vec_f16_dk64_dv64")]]    kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, half4,      1, dequantize_f16_t4,  half4,       1, dequantize_f16_t4,  64, 64, 2>;
-#if defined(GGML_METAL_USE_BF16)
+#if defined(GGML_METAL_HAS_BF16)
 template [[host_name("kernel_flash_attn_ext_vec_bf16_dk64_dv64")]]   kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, bfloat4,    1, dequantize_bf16_t4, bfloat4,     1, dequantize_bf16_t4, 64, 64, 2>;
 #endif
 template [[host_name("kernel_flash_attn_ext_vec_q4_0_dk64_dv64")]]   kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q4_0, 8, dequantize_q4_0_t4, block_q4_0,  8, dequantize_q4_0_t4, 64, 64, 2>;
@@ -5460,7 +5561,7 @@ template [[host_name("kernel_flash_attn_ext_vec_q5_1_dk64_dv64")]]   kernel flas
 template [[host_name("kernel_flash_attn_ext_vec_q8_0_dk64_dv64")]]   kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q8_0, 8, dequantize_q8_0_t4, block_q8_0,  8, dequantize_q8_0_t4, 64, 64, 2>;
 
 template [[host_name("kernel_flash_attn_ext_vec_f16_dk96_dv96")]]    kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, half4,      1, dequantize_f16_t4,  half4,       1, dequantize_f16_t4,  96, 96, 4>;
-#if defined(GGML_METAL_USE_BF16)
+#if defined(GGML_METAL_HAS_BF16)
 template [[host_name("kernel_flash_attn_ext_vec_bf16_dk96_dv96")]]   kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, bfloat4,    1, dequantize_bf16_t4, bfloat4,     1, dequantize_bf16_t4, 96, 96, 4>;
 #endif
 template [[host_name("kernel_flash_attn_ext_vec_q4_0_dk96_dv96")]]   kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q4_0, 8, dequantize_q4_0_t4, block_q4_0,  8, dequantize_q4_0_t4, 96, 96, 4>;
@@ -5470,7 +5571,7 @@ template [[host_name("kernel_flash_attn_ext_vec_q5_1_dk96_dv96")]]   kernel flas
 template [[host_name("kernel_flash_attn_ext_vec_q8_0_dk96_dv96")]]   kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q8_0, 8, dequantize_q8_0_t4, block_q8_0,  8, dequantize_q8_0_t4, 96, 96, 4>;
 
 template [[host_name("kernel_flash_attn_ext_vec_f16_dk128_dv128")]]  kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, half4,      1, dequantize_f16_t4,  half4,       1, dequantize_f16_t4,  128, 128, 1>;
-#if defined(GGML_METAL_USE_BF16)
+#if defined(GGML_METAL_HAS_BF16)
 template [[host_name("kernel_flash_attn_ext_vec_bf16_dk128_dv128")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, bfloat4,    1, dequantize_bf16_t4, bfloat4,     1, dequantize_bf16_t4, 128, 128, 1>;
 #endif
 template [[host_name("kernel_flash_attn_ext_vec_q4_0_dk128_dv128")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q4_0, 8, dequantize_q4_0_t4, block_q4_0,  8, dequantize_q4_0_t4, 128, 128, 1>;
@@ -5480,7 +5581,7 @@ template [[host_name("kernel_flash_attn_ext_vec_q5_1_dk128_dv128")]] kernel flas
 template [[host_name("kernel_flash_attn_ext_vec_q8_0_dk128_dv128")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q8_0, 8, dequantize_q8_0_t4, block_q8_0,  8, dequantize_q8_0_t4, 128, 128, 1>;
 
 template [[host_name("kernel_flash_attn_ext_vec_f16_dk192_dv192")]]  kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, half4,      1, dequantize_f16_t4,  half4,       1, dequantize_f16_t4,  192, 192, 2>;
-#if defined(GGML_METAL_USE_BF16)
+#if defined(GGML_METAL_HAS_BF16)
 template [[host_name("kernel_flash_attn_ext_vec_bf16_dk192_dv192")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, bfloat4,    1, dequantize_bf16_t4, bfloat4,     1, dequantize_bf16_t4, 192, 192, 2>;
 #endif
 template [[host_name("kernel_flash_attn_ext_vec_q4_0_dk192_dv192")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q4_0, 8, dequantize_q4_0_t4, block_q4_0,  8, dequantize_q4_0_t4, 192, 192, 2>;
@@ -5490,7 +5591,7 @@ template [[host_name("kernel_flash_attn_ext_vec_q5_1_dk192_dv192")]] kernel flas
 template [[host_name("kernel_flash_attn_ext_vec_q8_0_dk192_dv192")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q8_0, 8, dequantize_q8_0_t4, block_q8_0,  8, dequantize_q8_0_t4, 192, 192, 2>;
 
 template [[host_name("kernel_flash_attn_ext_vec_f16_dk192_dv128")]]  kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, half4,      1, dequantize_f16_t4,  half4,       1, dequantize_f16_t4,  192, 128, 2>;
-#if defined(GGML_METAL_USE_BF16)
+#if defined(GGML_METAL_HAS_BF16)
 template [[host_name("kernel_flash_attn_ext_vec_bf16_dk192_dv128")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, bfloat4,    1, dequantize_bf16_t4, bfloat4,     1, dequantize_bf16_t4, 192, 128, 2>;
 #endif
 template [[host_name("kernel_flash_attn_ext_vec_q4_0_dk192_dv128")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q4_0, 8, dequantize_q4_0_t4, block_q4_0,  8, dequantize_q4_0_t4, 192, 128, 2>;
@@ -5500,7 +5601,7 @@ template [[host_name("kernel_flash_attn_ext_vec_q5_1_dk192_dv128")]] kernel flas
 template [[host_name("kernel_flash_attn_ext_vec_q8_0_dk192_dv128")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q8_0, 8, dequantize_q8_0_t4, block_q8_0,  8, dequantize_q8_0_t4, 192, 128, 2>;
 
 template [[host_name("kernel_flash_attn_ext_vec_f16_dk256_dv256")]]  kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, half4,      1, dequantize_f16_t4,  half4,       1, dequantize_f16_t4,  256, 256, 1>;
-#if defined(GGML_METAL_USE_BF16)
+#if defined(GGML_METAL_HAS_BF16)
 template [[host_name("kernel_flash_attn_ext_vec_bf16_dk256_dv256")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, bfloat4,    1, dequantize_bf16_t4, bfloat4,     1, dequantize_bf16_t4, 256, 256, 1>;
 #endif
 template [[host_name("kernel_flash_attn_ext_vec_q4_0_dk256_dv256")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q4_0, 8, dequantize_q4_0_t4, block_q4_0,  8, dequantize_q4_0_t4, 256, 256, 1>;
@@ -5510,7 +5611,7 @@ template [[host_name("kernel_flash_attn_ext_vec_q5_1_dk256_dv256")]] kernel flas
 template [[host_name("kernel_flash_attn_ext_vec_q8_0_dk256_dv256")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q8_0, 8, dequantize_q8_0_t4, block_q8_0,  8, dequantize_q8_0_t4, 256, 256, 1>;
 
 template [[host_name("kernel_flash_attn_ext_vec_f16_dk576_dv512")]]  kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, half4,      1, dequantize_f16_t4,  half4,       1, dequantize_f16_t4,  576, 512, 2>;
-#if defined(GGML_METAL_USE_BF16)
+#if defined(GGML_METAL_HAS_BF16)
 template [[host_name("kernel_flash_attn_ext_vec_bf16_dk576_dv512")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, bfloat4,    1, dequantize_bf16_t4, bfloat4,     1, dequantize_bf16_t4, 576, 512, 2>;
 #endif
 template [[host_name("kernel_flash_attn_ext_vec_q4_0_dk576_dv512")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q4_0, 8, dequantize_q4_0_t4, block_q4_0,  8, dequantize_q4_0_t4, 576, 512, 2>;
@@ -5603,12 +5704,12 @@ template [[host_name("kernel_cpy_f32_f32")]]   kernel kernel_cpy_t kernel_cpy<fl
 template [[host_name("kernel_cpy_f32_f16")]]   kernel kernel_cpy_t kernel_cpy<float,  half>;
 template [[host_name("kernel_cpy_f32_i32")]]   kernel kernel_cpy_t kernel_cpy<float,  int32_t>;
 template [[host_name("kernel_cpy_i32_f32")]]   kernel kernel_cpy_t kernel_cpy<int32_t, float>;
-#if defined(GGML_METAL_USE_BF16)
+#if defined(GGML_METAL_HAS_BF16)
 template [[host_name("kernel_cpy_f32_bf16")]]  kernel kernel_cpy_t kernel_cpy<float,  bfloat>;
 #endif
 template [[host_name("kernel_cpy_f16_f32")]]   kernel kernel_cpy_t kernel_cpy<half,   float>;
 template [[host_name("kernel_cpy_f16_f16")]]   kernel kernel_cpy_t kernel_cpy<half,   half>;
-#if defined(GGML_METAL_USE_BF16)
+#if defined(GGML_METAL_HAS_BF16)
 template [[host_name("kernel_cpy_bf16_f32")]]  kernel kernel_cpy_t kernel_cpy<bfloat, float>;
 template [[host_name("kernel_cpy_bf16_bf16")]] kernel kernel_cpy_t kernel_cpy<bfloat, bfloat>;
 #endif
@@ -7880,13 +7981,13 @@ kernel void kernel_mul_mm_id_map0(
 
 typedef decltype(kernel_mul_mm_id_map0<1>) kernel_mul_mm_id_map0_t;
 
-template [[host_name("kernel_mul_mm_id_map0_f16_ne20_1" )]] kernel kernel_mul_mm_id_map0_t kernel_mul_mm_id_map0<1>;
-template [[host_name("kernel_mul_mm_id_map0_f16_ne20_2" )]] kernel kernel_mul_mm_id_map0_t kernel_mul_mm_id_map0<2>;
-template [[host_name("kernel_mul_mm_id_map0_f16_ne20_4" )]] kernel kernel_mul_mm_id_map0_t kernel_mul_mm_id_map0<4>;
-template [[host_name("kernel_mul_mm_id_map0_f16_ne20_6" )]] kernel kernel_mul_mm_id_map0_t kernel_mul_mm_id_map0<6>;
-template [[host_name("kernel_mul_mm_id_map0_f16_ne20_8" )]] kernel kernel_mul_mm_id_map0_t kernel_mul_mm_id_map0<8>;
-template [[host_name("kernel_mul_mm_id_map0_f16_ne20_10")]] kernel kernel_mul_mm_id_map0_t kernel_mul_mm_id_map0<10>;
-template [[host_name("kernel_mul_mm_id_map0_f16_ne20_16")]] kernel kernel_mul_mm_id_map0_t kernel_mul_mm_id_map0<16>;
+template [[host_name("kernel_mul_mm_id_map0_ne20_1" )]] kernel kernel_mul_mm_id_map0_t kernel_mul_mm_id_map0<1>;
+template [[host_name("kernel_mul_mm_id_map0_ne20_2" )]] kernel kernel_mul_mm_id_map0_t kernel_mul_mm_id_map0<2>;
+template [[host_name("kernel_mul_mm_id_map0_ne20_4" )]] kernel kernel_mul_mm_id_map0_t kernel_mul_mm_id_map0<4>;
+template [[host_name("kernel_mul_mm_id_map0_ne20_6" )]] kernel kernel_mul_mm_id_map0_t kernel_mul_mm_id_map0<6>;
+template [[host_name("kernel_mul_mm_id_map0_ne20_8" )]] kernel kernel_mul_mm_id_map0_t kernel_mul_mm_id_map0<8>;
+template [[host_name("kernel_mul_mm_id_map0_ne20_10")]] kernel kernel_mul_mm_id_map0_t kernel_mul_mm_id_map0<10>;
+template [[host_name("kernel_mul_mm_id_map0_ne20_16")]] kernel kernel_mul_mm_id_map0_t kernel_mul_mm_id_map0<16>;
 
 template<typename T, typename T4x4, typename simdgroup_T8x8, typename block_q, short nl, void (*dequantize_func)(device const block_q *, short, thread T4x4 &)>
 kernel void kernel_mul_mm_id(
@@ -8050,7 +8151,7 @@ typedef decltype(kernel_get_rows_f<float>) get_rows_f_t;
 
 template [[host_name("kernel_get_rows_f32")]]  kernel get_rows_f_t kernel_get_rows_f<float>;
 template [[host_name("kernel_get_rows_f16")]]  kernel get_rows_f_t kernel_get_rows_f<half>;
-#if defined(GGML_METAL_USE_BF16)
+#if defined(GGML_METAL_HAS_BF16)
 template [[host_name("kernel_get_rows_bf16")]] kernel get_rows_f_t kernel_get_rows_f<bfloat>;
 #endif
 
@@ -8085,7 +8186,7 @@ typedef decltype(kernel_set_rows_f<float>) set_rows_f_t;
 
 template [[host_name("kernel_set_rows_f32")]]  kernel set_rows_f_t kernel_set_rows_f<float>;
 template [[host_name("kernel_set_rows_f16")]]  kernel set_rows_f_t kernel_set_rows_f<half>;
-#if defined(GGML_METAL_USE_BF16)
+#if defined(GGML_METAL_HAS_BF16)
 template [[host_name("kernel_set_rows_bf16")]] kernel set_rows_f_t kernel_set_rows_f<bfloat>;
 #endif
 
@@ -8106,7 +8207,7 @@ typedef decltype(kernel_mul_mm<half, half4x4, simdgroup_half8x8, float4x4, 1, de
 
 template [[host_name("kernel_mul_mm_f32_f32")]]     kernel mul_mm_t kernel_mul_mm<half,   half4x4,   simdgroup_half8x8,   float4x4,      1,     dequantize_f32>;
 template [[host_name("kernel_mul_mm_f16_f32")]]     kernel mul_mm_t kernel_mul_mm<half,   half4x4,   simdgroup_half8x8,   half4x4,       1,     dequantize_f16>;
-#if defined(GGML_METAL_USE_BF16)
+#if defined(GGML_METAL_HAS_BF16)
 template [[host_name("kernel_mul_mm_bf16_f32")]]    kernel mul_mm_t kernel_mul_mm<bfloat, bfloat4x4, simdgroup_bfloat8x8, bfloat4x4,     1,     dequantize_bf16>;
 #endif
 template [[host_name("kernel_mul_mm_q4_0_f32")]]    kernel mul_mm_t kernel_mul_mm<half,   half4x4,   simdgroup_half8x8,   block_q4_0,    2,     dequantize_q4_0>;
@@ -8138,7 +8239,7 @@ typedef decltype(kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, float4x4, 1,
 
 template [[host_name("kernel_mul_mm_id_f32_f16")]]     kernel mul_mm_id kernel_mul_mm_id<half,   half4x4,   simdgroup_half8x8,   float4x4,      1,     dequantize_f32>;
 template [[host_name("kernel_mul_mm_id_f16_f16")]]     kernel mul_mm_id kernel_mul_mm_id<half,   half4x4,   simdgroup_half8x8,   half4x4,       1,     dequantize_f16>;
-#if defined(GGML_METAL_USE_BF16)
+#if defined(GGML_METAL_HAS_BF16)
 template [[host_name("kernel_mul_mm_id_bf16_f16")]]    kernel mul_mm_id kernel_mul_mm_id<bfloat, bfloat4x4, simdgroup_bfloat8x8, bfloat4x4,     1,     dequantize_bf16>;
 #endif
 template [[host_name("kernel_mul_mm_id_q4_0_f16")]]    kernel mul_mm_id kernel_mul_mm_id<half,   half4x4,   simdgroup_half8x8,   block_q4_0,    2,     dequantize_q4_0>;
@@ -8282,7 +8383,7 @@ typedef decltype(kernel_mul_mv_id<mmv_fn<kernel_mul_mv_impl<float, float4, float
 
 template [[host_name("kernel_mul_mv_id_f32_f32")]]     kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_impl<float, float4, float, float4>>>;
 template [[host_name("kernel_mul_mv_id_f16_f32")]]     kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_impl<half, half4, float, float4>>>;
-#if defined(GGML_METAL_USE_BF16)
+#if defined(GGML_METAL_HAS_BF16)
 template [[host_name("kernel_mul_mv_id_bf16_f32")]]    kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_impl<bfloat, bfloat4, float, float4>>>;
 #endif
 template [[host_name("kernel_mul_mv_id_q8_0_f32")]]    kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_q8_0_f32_impl<N_R0_Q8_0, N_SG_Q8_0, N_SIMDWIDTH>>>;
@@ -8310,12 +8411,12 @@ template [[host_name("kernel_mul_mv_id_iq4_nl_f32")]]  kernel kernel_mul_mv_id_t
 template [[host_name("kernel_mul_mv_id_iq4_xs_f32")]]  kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq4_xs_f32_impl <N_R0_IQ4_XS,  N_SG_IQ4_XS,  N_SIMDWIDTH>>>;
 
 kernel void kernel_pool_2d_max_f32(
+        constant    ggml_metal_kargs_pool_2d & args,
         device  const float * src0,
         device        float * dst,
-        constant    ggml_metal_kargs_pool_2d & args,
         uint        gid[[thread_position_in_grid]]) {
 
-    if (gid >= args.parallel_elements) {
+    if (gid >= args.np) {
         return;
     }
 
@@ -8348,12 +8449,12 @@ kernel void kernel_pool_2d_max_f32(
 }
 
 kernel void kernel_pool_2d_avg_f32(
+        constant    ggml_metal_kargs_pool_2d & args,
         device  const float * src0,
         device        float * dst,
-        constant    ggml_metal_kargs_pool_2d & args,
         uint        gid[[thread_position_in_grid]]) {
 
-    if (gid >= args.parallel_elements) {
+    if (gid >= args.np) {
         return;
     }
 
index b54a1a4e823f97056b242deb9942f1022726bb1d..ce4a88761c87e517659fdab0771928dd48967db2 100644 (file)
@@ -6325,12 +6325,20 @@ static std::vector<std::unique_ptr<test_case>> make_test_cases_eval() {
     }
 
     for (ggml_type type : {GGML_TYPE_F16, GGML_TYPE_F32}) {
-        test_cases.emplace_back(new test_sqr(type));
-        test_cases.emplace_back(new test_sqrt(type));
-        test_cases.emplace_back(new test_log(type));
-        test_cases.emplace_back(new test_sin(type));
-        test_cases.emplace_back(new test_cos(type));
-        test_cases.emplace_back(new test_clamp(type));
+        test_cases.emplace_back(new test_sqr       (type));
+        test_cases.emplace_back(new test_sqrt      (type));
+        test_cases.emplace_back(new test_log       (type));
+        test_cases.emplace_back(new test_sin       (type));
+        test_cases.emplace_back(new test_cos       (type));
+        test_cases.emplace_back(new test_clamp     (type));
+        test_cases.emplace_back(new test_leaky_relu(type));
+        test_cases.emplace_back(new test_sqr       (type, {7, 1, 5, 3}));
+        test_cases.emplace_back(new test_sqrt      (type, {7, 1, 5, 3}));
+        test_cases.emplace_back(new test_log       (type, {7, 1, 5, 3}));
+        test_cases.emplace_back(new test_sin       (type, {7, 1, 5, 3}));
+        test_cases.emplace_back(new test_cos       (type, {7, 1, 5, 3}));
+        test_cases.emplace_back(new test_clamp     (type, {7, 1, 5, 3}));
+        test_cases.emplace_back(new test_leaky_relu(type, {7, 1, 5, 3}));
     }
 
     test_cases.emplace_back(new test_diag_mask_inf(GGML_TYPE_F32, {10, 10, 1, 1}, 5));