From: Georgi Gerganov Date: Mon, 25 Aug 2025 10:56:43 +0000 (+0300) Subject: batched-bench : fix unified KV cache handling + pp timing (#15562) X-Git-Tag: upstream/0.0.6527~258 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=6b64f74b55628e4193f4fb00313f07dbd8556528;p=pkg%2Fggml%2Fsources%2Fllama.cpp batched-bench : fix unified KV cache handling + pp timing (#15562) * batched-bench : fix unified KV cache handling + pp timing * cont : run dummy token only with split KV cache --- diff --git a/tools/batched-bench/batched-bench.cpp b/tools/batched-bench/batched-bench.cpp index c6c601ad..93efad32 100644 --- a/tools/batched-bench/batched-bench.cpp +++ b/tools/batched-bench/batched-bench.cpp @@ -124,7 +124,7 @@ int main(int argc, char ** argv) { const int tg = n_tg[i_tg]; const int pl = n_pl[i_pl]; - const int n_ctx_req = is_pp_shared ? pp + pl*tg : pl*(pp + tg); + const int n_ctx_req = is_pp_shared ? (params.kv_unified ? pp : pl*pp) + pl*tg : pl*(pp + tg); if (n_ctx_req > n_kv_max) { continue; @@ -147,13 +147,24 @@ int main(int argc, char ** argv) { return 1; } + const auto t_pp_end = ggml_time_us(); + if (is_pp_shared) { for (int32_t i = 1; i < pl; ++i) { llama_memory_seq_cp(mem, 0, i, -1, -1); } - } - const auto t_pp_end = ggml_time_us(); + if (!params.kv_unified) { + // run one dummy token to apply the memory copy + common_batch_clear(batch); + common_batch_add(batch, get_token_rand(), pp + 0, { 0 }, true); + if (!decode_helper(ctx, batch, ctx_params.n_batch)) { + LOG_ERR("%s: llama_decode() failed\n", __func__); + return 1; + } + llama_memory_seq_rm(mem, 0, pp, -1); + } + } const auto t_tg_start = ggml_time_us();