From: Georgi Gerganov Date: Tue, 16 Jan 2024 17:34:54 +0000 (+0200) Subject: perplexity : fix kv cache handling for hellaswag (#4981) X-Git-Tag: upstream/0.0.4488~2598 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=959ef0c0df725c013c7f712eaa7790b8e38a8e20;p=pkg%2Fggml%2Fsources%2Fllama.cpp perplexity : fix kv cache handling for hellaswag (#4981) ggml-ci --- diff --git a/examples/perplexity/perplexity.cpp b/examples/perplexity/perplexity.cpp index 9a77beca..b4fedf80 100644 --- a/examples/perplexity/perplexity.cpp +++ b/examples/perplexity/perplexity.cpp @@ -428,6 +428,7 @@ static std::vector hellaswag_evaluate_tokens( for (size_t i_chunk = 0; i_chunk < n_chunk; ++i_chunk) { size_t n_tokens = tokens.size() - i_chunk * n_batch; n_tokens = std::min(n_tokens, size_t(n_batch)); + llama_kv_cache_seq_rm(ctx, 0, n_past, -1); if (llama_decode(ctx, llama_batch_get_one(tokens.data() + i_chunk * n_batch, n_tokens, n_past, 0))) { fprintf(stderr, "%s : failed to eval\n", __func__); return {};