From: Jeff Bolz Date: Wed, 26 Nov 2025 15:46:33 +0000 (-0600) Subject: vulkan: allow graph_optimize for prompt processing workloads (llama/17475) X-Git-Tag: upstream/0.9.4.395~114 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=b2253ef2ad428a5eff45de5bdeaf3e70acfbf539;p=pkg%2Fggml%2Fsources%2Fggml vulkan: allow graph_optimize for prompt processing workloads (llama/17475) --- diff --git a/src/ggml-vulkan/ggml-vulkan.cpp b/src/ggml-vulkan/ggml-vulkan.cpp index 9c97f0a6..7f2cf795 100644 --- a/src/ggml-vulkan/ggml-vulkan.cpp +++ b/src/ggml-vulkan/ggml-vulkan.cpp @@ -13158,24 +13158,6 @@ static void ggml_vk_graph_optimize(ggml_backend_t backend, struct ggml_cgraph * return false; }; - // This function tries to reorder the graph to allow nodes to run in parallel. - // This helps with small batches, but for large batches its a slowdown, probably - // due to cache contention. So only reorder if the majority of nodes have few rows. - int num_small_nodes = 0; - int num_counted_nodes = 0; - for (int i = 0; i < graph->n_nodes; ++i) { - if (!is_empty(graph->nodes[i]) && - graph->nodes[i]->op != GGML_OP_SET_ROWS) { - if (ggml_nrows(graph->nodes[i]) <= 8) { - num_small_nodes++; - } - num_counted_nodes++; - } - } - if (num_small_nodes < num_counted_nodes / 2) { - return; - } - std::vector new_order; std::vector used(graph->n_nodes, false); std::set used_node_set;