From: compilade Date: Thu, 28 Mar 2024 12:05:54 +0000 (-0400) Subject: llama : fix command-r inference when omitting outputs (#6367) X-Git-Tag: upstream/0.0.4488~1921 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=0308f5e3d7bf9879f818b1a4ae589ff36b242af5;p=pkg%2Fggml%2Fsources%2Fllama.cpp llama : fix command-r inference when omitting outputs (#6367) --- diff --git a/llama.cpp b/llama.cpp index 892d46fb..77ec9b7a 100644 --- a/llama.cpp +++ b/llama.cpp @@ -9152,8 +9152,9 @@ struct llm_build_context { if (il == n_layer - 1) { // skip computing output for unused tokens struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); + ffn_inp = ggml_get_rows(ctx0, ffn_inp, inp_out_ids); } struct ggml_tensor * attn_out = cur;