From: Xuan-Son Nguyen Date: Tue, 29 Apr 2025 06:45:49 +0000 (+0200) Subject: llama-graph : fix text position for mrope (#13159) X-Git-Tag: upstream/0.0.5318~102 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=b6ce7430b7eb51f032152316880204e0a9c0470e;p=pkg%2Fggml%2Fsources%2Fllama.cpp llama-graph : fix text position for mrope (#13159) * llama-graph : fix text position for mrope * fix typo * explicitly set 4th dim in the loop --- diff --git a/src/llama-graph.cpp b/src/llama-graph.cpp index 2706ea26..fabb9ca2 100644 --- a/src/llama-graph.cpp +++ b/src/llama-graph.cpp @@ -55,13 +55,16 @@ void llm_graph_input_pos::set_input(const llama_ubatch * ubatch) { if (ubatch->pos && pos) { const int64_t n_tokens = ubatch->n_tokens; - if (ubatch->token && n_pos_per_embd > 1) { + if (ubatch->token && n_pos_per_embd == 4) { // in case we're using M-RoPE with text tokens, convert the 1D positions to 4D - // the other dimensions are all 0, they are unused for text tokens - std::vector pos_data(n_tokens*n_pos_per_embd, 0); + // the 3 first dims are the same, and 4th dim is all 0 + std::vector pos_data(n_tokens*n_pos_per_embd); // copy the first dimension for (int i = 0; i < n_tokens; ++i) { - pos_data[i] = ubatch->pos[i]; + pos_data[ i] = ubatch->pos[i]; + pos_data[ n_tokens + i] = ubatch->pos[i]; + pos_data[2 * n_tokens + i] = ubatch->pos[i]; + pos_data[3 * n_tokens + i] = 0; // 4th dim is 0 } ggml_backend_tensor_set(pos, pos_data.data(), 0, pos_data.size()*ggml_element_size(pos)); } else {