From: alwqx Date: Thu, 2 May 2024 15:56:41 +0000 (+0800) Subject: chore: fix typo in llama.cpp (#7032) X-Git-Tag: upstream/0.0.4488~1706 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=6ecf3189e00a1e8e737a78b6d10e1d7006e050a2;p=pkg%2Fggml%2Fsources%2Fllama.cpp chore: fix typo in llama.cpp (#7032) Co-authored-by: Jared Van Bortel --- diff --git a/llama.cpp b/llama.cpp index 18d6297c..18b49ec2 100644 --- a/llama.cpp +++ b/llama.cpp @@ -2359,7 +2359,7 @@ static bool llama_kv_cache_init( cache.recurrent = model.arch == LLM_ARCH_MAMBA; cache.v_trans = !cparams.flash_attn; - // TODO: support mixed reccurent Transformer architectues + // TODO: support mixed recurrent Transformer architectures // NOTE: (!a || b) is a logical implication (a -> b) GGML_ASSERT(!cache.recurrent || n_embd_k_gqa == hparams.n_embd_k_s()); GGML_ASSERT(!cache.recurrent || n_embd_v_gqa == hparams.n_embd_v_s());