From: Georgi Gerganov Date: Mon, 26 May 2025 09:57:50 +0000 (+0300) Subject: llama : clarify deprecation message (#13794) X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=22229314fc46b2f741bb21b12cde71f6c6a60b52;p=pkg%2Fggml%2Fsources%2Fllama.cpp llama : clarify deprecation message (#13794) --- diff --git a/include/llama.h b/include/llama.h index eafab732..01762bea 100644 --- a/include/llama.h +++ b/include/llama.h @@ -612,11 +612,11 @@ extern "C" { // Returns the number of tokens in the KV cache (slow, use only for debug) // If a KV cell has multiple sequences assigned to it, it will be counted multiple times DEPRECATED(LLAMA_API int32_t llama_kv_self_n_tokens(const struct llama_context * ctx), - "Use llama_kv_self_seq_pos_max() instead"); + "Use llama_kv_self_seq_pos_max() and llama_kv_self_seq_pos_min() instead (https://github.com/ggml-org/llama.cpp/issues/13793)"); // Returns the number of used KV cells (i.e. have at least one sequence assigned to them) DEPRECATED(LLAMA_API int32_t llama_kv_self_used_cells(const struct llama_context * ctx), - "Use llama_kv_self_seq_pos_max() instead"); + "Use llama_kv_self_seq_pos_max() and llama_kv_self_seq_pos_min() instead (https://github.com/ggml-org/llama.cpp/issues/13793)"); // Clear the KV cache - both cell info is erased and KV data is zeroed LLAMA_API void llama_kv_self_clear(