llama-io.cpp
llama-kv-cache-unified.cpp
llama-kv-cache-unified-iswa.cpp
- llama-kv-cache-recurrent.cpp
+ llama-memory-recurrent.cpp
+ llama-memory-hybrid.cpp
llama-memory.cpp
llama-mmap.cpp
llama-model-loader.cpp
{ LLM_KV_ATTENTION_SCALE, "%s.attention.scale" },
{ LLM_KV_ATTENTION_KEY_LENGTH_MLA, "%s.attention.key_length_mla" },
{ LLM_KV_ATTENTION_VALUE_LENGTH_MLA, "%s.attention.value_length_mla" },
+ { LLM_KV_ATTENTION_LAYER_INDICES, "%s.attention.layer_indices" },
{ LLM_KV_ROPE_DIMENSION_COUNT, "%s.rope.dimension_count" },
{ LLM_KV_ROPE_DIMENSION_SECTIONS, "%s.rope.dimension_sections" },
{ LLM_KV_TOKENIZER_MASK_ID, "tokenizer.ggml.mask_token_id" },
{ LLM_KV_TOKENIZER_ADD_BOS, "tokenizer.ggml.add_bos_token" },
{ LLM_KV_TOKENIZER_ADD_EOS, "tokenizer.ggml.add_eos_token" },
+ { LLM_KV_TOKENIZER_ADD_SEP, "tokenizer.ggml.add_sep_token" },
{ LLM_KV_TOKENIZER_ADD_PREFIX, "tokenizer.ggml.add_space_prefix" },
{ LLM_KV_TOKENIZER_REMOVE_EXTRA_WS, "tokenizer.ggml.remove_extra_whitespaces" },
{ LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP, "tokenizer.ggml.precompiled_charsmap" },
const llm_tensor_info & llm_tensor_info_for(llm_tensor tensor) {
return LLM_TENSOR_INFOS.at(tensor);
}
+
+bool llm_arch_is_recurrent(const llm_arch & arch) {
+ switch (arch) {
+ case LLM_ARCH_MAMBA:
+ case LLM_ARCH_RWKV6:
+ case LLM_ARCH_RWKV6QWEN2:
+ case LLM_ARCH_RWKV7:
+ case LLM_ARCH_ARWKV7:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool llm_arch_is_hybrid(const llm_arch & arch) {
+ // TODO: There are currently no hybrid models! Once there are, this will be
+ // the place to identify them
+ switch (arch) {
+ default:
+ return false;
+ }
+}
LLM_KV_ATTENTION_SCALE,
LLM_KV_ATTENTION_KEY_LENGTH_MLA,
LLM_KV_ATTENTION_VALUE_LENGTH_MLA,
+ LLM_KV_ATTENTION_LAYER_INDICES,
LLM_KV_ROPE_DIMENSION_COUNT,
LLM_KV_ROPE_DIMENSION_SECTIONS,
LLM_KV_TOKENIZER_MASK_ID,
LLM_KV_TOKENIZER_ADD_BOS,
LLM_KV_TOKENIZER_ADD_EOS,
+ LLM_KV_TOKENIZER_ADD_SEP,
LLM_KV_TOKENIZER_ADD_PREFIX,
LLM_KV_TOKENIZER_REMOVE_EXTRA_WS,
LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP,
llm_arch llm_arch_from_string(const std::string & name);
const llm_tensor_info & llm_tensor_info_for(llm_tensor tensor);
+
+bool llm_arch_is_recurrent(const llm_arch & arch);
+bool llm_arch_is_hybrid (const llm_arch & arch);
#include "llama-batch.h"
#include "llama-impl.h"
-#include "llama-cparams.h"
#include "llama-vocab.h"
#include "llama-memory.h"
#include <algorithm>
#include <sstream>
-llama_ubatch llama_sbatch::reserve_ubatch(size_t n_ubatch, bool has_embd) {
- // clear empty sequences
- // the previous ubatch is assumed to be gone,
- // so nothing should refer to values in these sequences anymore.
- for (size_t i = seq.size(); i-- > 0;) {
- if (seq[i].length == 0) {
- seq.pop_back();
- } else {
- break;
- }
- }
-
- udatas.push_back({});
-
- auto & udata = udatas.back();
-
- udata.token.resize(!has_embd ? n_ubatch : 0);
- udata.embd.resize(has_embd ? n_embd * n_ubatch : 0);
- udata.pos.resize(n_ubatch);
- udata.n_seq_id.resize(n_ubatch);
- udata.seq_id.resize(n_ubatch);
- udata.output.resize(n_ubatch);
-
- llama_ubatch ubatch = {
- /*equal_seqs =*/ true,
- /*n_tokens =*/ 0,
- /*n_seq_tokens =*/ 0,
- /*n_seqs =*/ 0,
- /*token =*/ !has_embd ? udata.token.data() : nullptr,
- /*embd =*/ has_embd ? udata.embd.data() : nullptr,
- /*pos =*/ udata.pos.data(),
- /*n_seq_id =*/ udata.n_seq_id.data(),
- /*seq_id =*/ udata.seq_id.data(),
- /*output =*/ udata.output.data(),
- };
-
- return ubatch;
-}
-
-void llama_sbatch::add_seq_to_ubatch(llama_ubatch & ubatch, llama_sbatch_seq & seq, size_t length) {
- GGML_ASSERT(batch != nullptr);
- GGML_ASSERT(length <= seq.length);
- // Can only add sequences of equal lengths to a batch,
- // otherwise it isn't clear to which sequence a token belongs
- GGML_ASSERT(seq.n_seq_id == 0 || ubatch.n_seqs == 0 || length == (size_t) ubatch.n_tokens / ubatch.n_seqs);
- GGML_ASSERT((seq.n_seq_id != 0) == ubatch.equal_seqs);
- // NOTE: loops are separated for cache-friendliness
- if (batch->token) {
- if (ubatch.equal_seqs) {
- for (size_t i = 0; i < length; ++i) {
- ubatch.token[ubatch.n_tokens + i] = batch->token[ids[seq.offset + i]];
- }
- } else {
- // simple split
- ubatch.token = batch->token + seq.offset;
- }
- } else {
- ubatch.token = nullptr;
- }
- if (batch->embd) {
- if (ubatch.equal_seqs) {
- for (size_t i = 0; i < length; ++i) {
- memcpy(
- ubatch.embd + (n_embd * (ubatch.n_tokens + i)),
- batch->embd + (n_embd * ids[seq.offset + i]),
- n_embd * sizeof(float)
- );
- }
- } else {
- // simple split
- ubatch.embd = batch->embd + (n_embd * seq.offset);
- }
- } else {
- ubatch.embd = nullptr;
- }
- if (ubatch.equal_seqs) {
- for (size_t i = 0; i < length; ++i) {
- ubatch.pos[ubatch.n_tokens + i] = batch->pos[ids[seq.offset + i]];
- }
- } else {
- // simple split
- ubatch.pos = batch->pos + seq.offset;
- }
- if (ubatch.equal_seqs) {
- ubatch.n_seq_id[ubatch.n_seqs] = seq.n_seq_id;
- if (seq.seq_id) {
- ubatch.seq_id[ubatch.n_seqs] = seq.seq_id;
- }
- } else {
- // simple split
- if (batch->n_seq_id) {
- ubatch.n_seq_id = batch->n_seq_id + seq.offset;
- } else {
- for (size_t i = 0; i < length; ++i) {
- ubatch.n_seq_id[ubatch.n_seqs + i] = 1;
- }
- }
- if (batch->seq_id) {
- ubatch.seq_id = batch->seq_id + seq.offset;
- }
- }
- if (batch->logits) {
- if (ubatch.equal_seqs) {
- for (size_t i = 0; i < length; ++i) {
- size_t id = ids[seq.offset + i];
- int8_t is_output = batch->logits[id];
- ubatch.output[ubatch.n_tokens + i] = is_output;
- if (is_output) { out_ids.push_back(id); }
- }
- } else {
- // simple split
- ubatch.output = batch->logits + seq.offset;
- for (size_t i = 0; i < length; ++i) {
- if (ubatch.output[i] != 0) { out_ids.push_back(seq.offset + i); }
- }
- }
- } else {
- // only get last output
- for (size_t i = 0; i < length; ++i) {
- size_t id = ids[seq.offset + i];
- int8_t is_last = id == ids.size() - 1;
- ubatch.output[ubatch.n_tokens + i] = is_last;
- if (is_last) { out_ids.push_back(id); }
- }
- }
- if (ubatch.n_tokens == 0 && ubatch.n_seqs == 0) {
- ubatch.n_seq_tokens = ubatch.equal_seqs ? length : 1;
- }
- ubatch.n_tokens += length;
- ubatch.n_seqs += ubatch.equal_seqs ? 1 : length; // virtual sequences for simple splits
- seq.offset += length;
- seq.length -= length;
- n_tokens -= length;
- GGML_ASSERT(ubatch.n_tokens == ubatch.n_seq_tokens * ubatch.n_seqs);
-}
-
-llama_ubatch llama_sbatch::split_simple(size_t n_ubatch) {
- n_ubatch = n_tokens < n_ubatch ? n_tokens : n_ubatch;
- llama_ubatch ubatch = reserve_ubatch(n_ubatch, /* has_embd */ batch->embd != nullptr);
- ubatch.equal_seqs = false;
- if (!seq.empty()) {
- llama_sbatch_seq & s = seq[0];
- size_t length = s.length < n_ubatch ? s.length : n_ubatch;
- GGML_ASSERT(seq.size() == 1 && s.n_seq_id == 0); // don't mix with other splits
- add_seq_to_ubatch(ubatch, s, length);
- }
- return ubatch;
-}
-
-llama_ubatch llama_sbatch::split_equal(size_t n_ubatch) {
- n_ubatch = n_tokens < n_ubatch ? n_tokens : n_ubatch;
- llama_ubatch ubatch = reserve_ubatch(n_ubatch, /* has_embd */ batch->embd != nullptr);
- if (!seq.empty()) {
- size_t length = 0;
- size_t n_tokens_in_ubatch = 0;
- GGML_ASSERT(seq[0].n_seq_id > 0); // should not be mixed with simple splits
- // smallest first, because it's easier to split this way;
- // starting from the end to pop in constant time.
- for (size_t i = seq.size(); i-- > 0;) {
- llama_sbatch_seq & s = seq[i];
- GGML_ASSERT(s.length > 0);
- if (length == 0) {
- length = s.length < n_ubatch ? s.length : n_ubatch;
- }
- add_seq_to_ubatch(ubatch, s, length);
- n_tokens_in_ubatch += length;
- // shared prompts can't be mixed with any of their sequences,
- // so it's safer to compute them in their own ubatch
- if (s.n_seq_id > 1) { break; }
- // stop when there isn't enough space for another sequence
- if (length + n_tokens_in_ubatch > n_ubatch) { break; }
- }
- }
- return ubatch;
-}
-
-llama_ubatch llama_sbatch::split_seq(size_t n_ubatch) {
- n_ubatch = n_tokens < n_ubatch ? n_tokens : n_ubatch;
- llama_ubatch ubatch = reserve_ubatch(n_ubatch, /* has_embd */ batch->embd != nullptr);
- if (!seq.empty()) {
- llama_sbatch_seq & s = seq[seq.size() - 1];
- size_t length = s.length < n_ubatch ? s.length : n_ubatch;
- GGML_ASSERT(s.n_seq_id > 0); // should not be mixed with simple splits
- add_seq_to_ubatch(ubatch, s, length);
- }
- return ubatch;
-}
-
-llama_sbatch::llama_sbatch(const llama_batch & batch, size_t n_embd, bool simple_split) {
- GGML_ASSERT(batch.n_tokens >= 0);
- this->batch = &batch;
- this->n_embd = n_embd;
-
- n_tokens = batch.n_tokens;
- ids.resize(n_tokens);
- out_ids.clear();
- // TODO: reserve out_ids and seq
-
- for (size_t i = 0; i < n_tokens; ++i) {
- ids[i] = i;
- }
-
- if (simple_split) {
- seq.resize(1);
- llama_sbatch_seq & s = seq[0];
- s.n_seq_id = 0;
- s.seq_id = nullptr;
- s.offset = 0;
- s.length = n_tokens;
- return;
- }
-
- std::sort(ids.begin(), ids.end(),
- [&batch](size_t a, size_t b) {
- int32_t n_seq_a = batch.n_seq_id ? batch.n_seq_id[a] : 1;
- int32_t n_seq_b = batch.n_seq_id ? batch.n_seq_id[b] : 1;
- // sort by seq_id, then by pos
- if (n_seq_a == n_seq_b) {
- if (batch.seq_id) {
- for (int32_t i = 0; i < n_seq_a; ++i) {
- llama_seq_id seq_id_a = batch.seq_id[a][i];
- llama_seq_id seq_id_b = batch.seq_id[b][i];
- // smaller seq_ids go first
- if (seq_id_a != seq_id_b) {
- return seq_id_a < seq_id_b;
- }
- }
- }
- // when all else is equal, sort by pos
- if (batch.pos) {
- return batch.pos[a] < batch.pos[b];
- }
- // no pos, sort by id
- return a < b;
- }
- // shared prompts go first
- return n_seq_a > n_seq_b;
- }
- );
-
- // init seq
- llama_sbatch_seq * last_seq = nullptr;
-
- for (size_t i = 0; i < n_tokens; ++i) {
- const size_t bi = ids[i];
- const int32_t n_seqs = batch.n_seq_id[bi];
- llama_seq_id * seq_ids = batch.seq_id[bi];
- if (last_seq != nullptr) {
- bool same = n_seqs == last_seq->n_seq_id;
- for (int32_t j = 0; same && j < n_seqs; ++j) {
- if (seq_ids[j] != last_seq->seq_id[j]) {
- same = false;
- }
- }
- if (same) {
- last_seq->length += 1;
- continue;
- }
- }
- llama_sbatch_seq new_seq = {n_seqs, seq_ids, i, 1};
- seq.push_back(new_seq);
- last_seq = &seq.back();
- }
-
- // keep shared prompts first at the end, then sort by length descending.
- std::sort(seq.begin(), seq.end(),
- [](llama_sbatch_seq & a, llama_sbatch_seq & b) {
- if (a.n_seq_id == b.n_seq_id) {
- return a.length > b.length;
- }
- return a.n_seq_id < b.n_seq_id;
- }
- );
-}
-
-llama_batch_allocr::llama_batch_allocr() {
+llama_batch_allocr::llama_batch_allocr(uint32_t n_pos_per_embd) : n_pos_per_embd(n_pos_per_embd) {
const char * LLAMA_BATCH_DEBUG = getenv("LLAMA_BATCH_DEBUG");
debug = LLAMA_BATCH_DEBUG ? atoi(LLAMA_BATCH_DEBUG) : 0;
for (auto & cur : seq_cpl) {
cur.resize(LLAMA_MAX_SEQ);
}
+
+ seq_idx.resize(LLAMA_MAX_SEQ, -1);
}
bool llama_batch_allocr::init(
const llama_batch & batch_inp,
const llama_vocab & vocab,
const llama_memory_i * memory,
- bool embd_all) {
+ uint32_t n_embd,
+ bool output_all) {
clear();
batch = batch_inp;
+ this->vocab = &vocab;
+
GGML_ASSERT(batch.n_tokens > 0);
//
llama_pos p0[LLAMA_MAX_SEQ];
for (int32_t s = 0; s < LLAMA_MAX_SEQ; ++s) {
if (!memory) {
+ // if no memory -> start from 0
p0[s] = 0;
} else {
p0[s] = memory->seq_pos_max(s) + 1;
pos[i] = p0[seq_id];
+ // update the starting position for all sequences that are assigned to the this token
for (int32_t s = 0; s < batch.n_seq_id[i]; ++s) {
- p0[batch.seq_id[i][s]] = pos[i] + 1;
+ const llama_seq_id seq_id = batch.seq_id[i][s];
+
+ p0[seq_id] = pos[i] + 1;
}
}
}
if (!batch.logits) {
- if (embd_all) {
+ if (output_all) {
// return the output for all tokens
output.resize(batch.n_tokens, true);
} else {
}
batch.logits = output.data();
- } else if (embd_all) {
+ } else if (output_all) {
bool warn = false;
for (int32_t i = 0; i < batch.n_tokens; ++i) {
// compute stats
//
+ this->n_embd = n_embd;
+
+ // count the outputs in this batch
for (int32_t i = 0; i < batch.n_tokens; ++i) {
n_outputs += batch.logits[i] != 0;
}
// determine coupled sequences
// these are pairs of sequences that have at least one token in the input batch that is assigned to both of them
for (int32_t i = 0; i < batch.n_tokens; ++i) {
+ const llama_seq_id s0 = batch.seq_id[i][0];
+
for (int32_t s = 0; s < batch.n_seq_id[i]; ++s) {
- seq_pos[batch.seq_id[i][s]].insert(batch.pos[i]);
+ const llama_seq_id s1 = batch.seq_id[i][s];
- if (s > 0) {
- const llama_seq_id s0 = batch.seq_id[i][0];
- const llama_seq_id s1 = batch.seq_id[i][s];
+ seq_pos[s1].insert(batch.pos[i]);
+ if (s > 0) {
// mark that sequence s1 is coupled to s0
seq_cpl[s1][s0] = true;
- // note: the other way around is not necessary for now
+ // note: tracking the other way around is not necessary for now
//seq_cpl[s0][s1] = true;
}
}
}
- if (debug > 0) {
- LLAMA_LOG_DEBUG("%s: input batch info:\n", __func__);
- LLAMA_LOG_DEBUG("%s: n_tokens = %d\n", __func__, batch.n_tokens);
- LLAMA_LOG_DEBUG("%s: token = %p\n", __func__, (void *) batch.token);
- LLAMA_LOG_DEBUG("%s: embd = %p\n", __func__, (void *) batch.embd);
- LLAMA_LOG_DEBUG("%s: pos = %p\n", __func__, (void *) batch.pos);
- LLAMA_LOG_DEBUG("%s: n_seq_id = %p\n", __func__, (void *) batch.n_seq_id);
- LLAMA_LOG_DEBUG("%s: seq_id = %p\n", __func__, (void *) batch.seq_id);
- LLAMA_LOG_DEBUG("%s: logits = %p\n", __func__, (void *) batch.logits);
- LLAMA_LOG_DEBUG("%s: n_outputs = %d\n", __func__, n_outputs);
+ // precompute the sequence sets for each token and determine the unique sequence ids that participate in the batch
+ {
+ seq_set_t seq_set_unq;
- if (debug > 1) {
- int seq_id_max = 0;
- for (int32_t i = 0; i < batch.n_tokens; ++i) {
- for (int s = 0; s < batch.n_seq_id[i]; ++s) {
- for (int s = 0; s < batch.n_seq_id[i]; ++s) {
- seq_id_max = std::max(seq_id_max, batch.seq_id[i][s]);
- }
- }
+ for (int32_t i = 0; i < batch.n_tokens; ++i) {
+ seq_set_t cur;
+ for (int32_t s = 0; s < batch.n_seq_id[i]; ++s) {
+ const llama_seq_id seq_id = batch.seq_id[i][s];
+
+ cur .set(seq_id);
+ seq_set_unq.set(seq_id);
}
- ++seq_id_max;
- LLAMA_LOG_DEBUG("%s: token = [\n", __func__);
- for (int32_t i = 0; i < batch.n_tokens; ++i) {
- std::vector<int8_t> seq_id(seq_id_max);
+ seq_set.push_back(cur);
+ seq_set_map[cur].push_back(i);
+ }
- for (int s = 0; s < batch.n_seq_id[i]; ++s) {
- seq_id[batch.seq_id[i][s]] = 1;
- }
+ for (int32_t s = 0; s < LLAMA_MAX_SEQ; ++s) {
+ if (seq_set_unq.test(s)) {
+ seq_idx[s] = seq_id_unq.size();
+ seq_id_unq.push_back(s);
+ }
+ }
+ }
- std::stringstream ss;
- for (int s = 0; s < seq_id_max; ++s) {
- if (seq_id[s]) {
- ss << s%10;
- } else {
- ss << ".";
- }
- }
+ if (debug > 0) {
+ LLAMA_LOG_DEBUG("%s: input batch info:\n", __func__);
- LLAMA_LOG_DEBUG("%s: %4d: id = %6d (%16s), pos = %4d, n_seq_id = %2d, seq_id = [%s], output = %d\n",
- __func__, i, batch.token[i], vocab.token_to_piece(batch.token[i]).c_str(),
- batch.pos[i], batch.n_seq_id[i], ss.str().c_str(), batch.logits[i]);
+ llama_ubatch ubatch {
+ /*.equal_seqs =*/ false,
+ /*.n_tokens =*/ (uint32_t) batch.n_tokens,
+ /*.n_seq_tokens =*/ (uint32_t) 1,
+ /*.n_seqs =*/ (uint32_t) batch.n_tokens,
+ /*.n_seqs_unq =*/ (uint32_t) this->seq_id_unq.size(),
+ /*.token =*/ batch.token,
+ /*.embd =*/ batch.embd,
+ /*.pos =*/ batch.pos,
+ /*.n_seq_id =*/ batch.n_seq_id,
+ /*.seq_id =*/ batch.seq_id,
+ /*.seq_id_unq =*/ this->seq_id_unq.data(),
+ /*.seq_idx =*/ this->seq_idx.data(),
+ /*.output =*/ batch.logits,
+ };
+
+ ubatch_print(ubatch, debug);
+
+ LLAMA_LOG_DEBUG("%s: seq = [\n", __func__);
+ for (int s0 = 0; s0 < (int) seq_pos.size(); ++s0) {
+ if (seq_pos[s0].empty()) {
+ continue;
}
- LLAMA_LOG_DEBUG("%s: ]\n", __func__);
-
- LLAMA_LOG_DEBUG("%s: seq = [\n", __func__);
- for (int s0 = 0; s0 < (int) seq_pos.size(); ++s0) {
- if (seq_pos[s0].empty()) {
- continue;
- }
- std::stringstream ss;
- for (int s1 = 0; s1 < (int) seq_cpl[s0].size(); ++s1) {
- if (seq_cpl[s0][s1]) {
- ss << s1 << " ";
- }
+ std::stringstream ss;
+ for (int s1 = 0; s1 < (int) seq_cpl[s0].size(); ++s1) {
+ if (seq_cpl[s0][s1]) {
+ ss << s1 << " ";
}
-
- LLAMA_LOG_DEBUG("%s: %4d: pos = [%4d, %4d], cpl = %s\n",
- __func__, s0, seq_pos_min(s0), seq_pos_max(s0), ss.str().empty() ? "-" : ss.str().c_str());
}
- LLAMA_LOG_DEBUG("%s: ]\n", __func__);
+
+ LLAMA_LOG_DEBUG("%s: %4d: pos = [%4d, %4d], cpl = %s\n",
+ __func__, s0, seq_pos_min(s0), seq_pos_max(s0), ss.str().empty() ? "-" : ss.str().c_str());
}
+ LLAMA_LOG_DEBUG("%s: ]\n", __func__);
}
//
continue;
}
- if (memory && seq_pos_min(s) != memory->seq_pos_max(s) + 1) {
- LLAMA_LOG_ERROR("%s: sequence %d does not start from the last position stored in the memory\n", __func__, s);
- return false;
+ if (memory) {
+ if (batch.token) {
+ if (seq_pos_min(s) != memory->seq_pos_max(s) + 1) {
+ LLAMA_LOG_ERROR("%s: sequence %d does not start from the last position stored in the memory\n", __func__, s);
+ return false;
+ }
+ } else {
+ assert(batch.embd);
+
+ // for embeddings (typically used as vision input), we allow them to have repeating positions
+ // ref: https://github.com/ggml-org/llama.cpp/issues/13694#issuecomment-2983871762
+ if (seq_pos_min(s) != memory->seq_pos_max(s) && seq_pos_min(s) != memory->seq_pos_max(s) + 1) {
+ LLAMA_LOG_ERROR("%s: sequence %d does not start from the last position stored in the memory\n", __func__, s);
+ return false;
+ }
+ }
}
if (seq_pos_max(s) - seq_pos_min(s) + 1 > (int) seq_pos[s].size()) {
}
}
+ // disallow partial sequence sub-sets:
+ //
+ // invalid: x
+ // i: 0 1 2 ...
+ // ---------------------------------------
+ // seq_id[i][0]: 0 0 1
+ // seq_id[i][1]: 1 1 2
+ // seq_id[i][2]: 2
+ //
+ // disallow decreasing sequence positions:
+ //
+ // invalid: x
+ // i: 0 1 2 3 4 5 6 ...
+ // ---------------------------------------
+ // pos[i]: 4 5 0 1 6 2 3
+ // seq_id[i][0]: 0 0 1 1 0 1 0
+ //
+ {
+ seq_set_t cur_seq_set[LLAMA_MAX_SEQ];
+ for (int32_t s = 0; s < LLAMA_MAX_SEQ; ++s) {
+ cur_seq_set[s].set();
+ }
+
+ llama_pos cur_seq_pos[LLAMA_MAX_SEQ];
+ for (int32_t s = 0; s < LLAMA_MAX_SEQ; ++s) {
+ cur_seq_pos[s] = -1;
+ }
+
+ for (int32_t i = 0; i < batch.n_tokens; ++i) {
+ const llama_pos pos = batch.pos[i];
+
+ for (int32_t s = 0; s < batch.n_seq_id[i]; ++s) {
+ const llama_seq_id seq_id = batch.seq_id[i][s];
+
+ cur_seq_set[seq_id] &= seq_set[i];
+
+ if (cur_seq_set[seq_id].none()) {
+ LLAMA_LOG_ERROR("%s: sequence %d belongs to incompatible sequence sets (not allowed)\n", __func__, seq_id);
+ return false;
+ }
+
+ if (pos < cur_seq_pos[seq_id]) {
+ LLAMA_LOG_ERROR("%s: sequence %d positions are decreasing (not allowed)\n", __func__, seq_id);
+ return false;
+ }
+ }
+ }
+ }
+
+ split_reset();
+
return true;
}
+llama_ubatch llama_batch_allocr::ubatch_reserve(uint32_t n_seq_tokens, uint32_t n_seqs) {
+ const uint32_t n_tokens = n_seq_tokens*n_seqs;
+
+ clear();
+ split_reset();
+
+ ubatches.emplace_back();
+
+ auto & ubatch = ubatches.back();
+
+ ubatch.token .resize(n_tokens);
+ ubatch.embd .clear();
+ ubatch.pos .resize(n_tokens);
+ ubatch.n_seq_id .resize(n_tokens);
+ ubatch.seq_id .resize(n_tokens);
+ ubatch.seq_id_unq.resize(0);
+ ubatch.seq_idx .resize(LLAMA_MAX_SEQ, -1);
+ ubatch.output .resize(n_tokens);
+
+ for (uint32_t s = 0; s < n_seqs; ++s) {
+ ubatch.seq_idx[s] = s;
+ ubatch.seq_id_unq.push_back(s);
+ }
+
+ llama_ubatch res {
+ /*.equal_seqs =*/ true,
+ /*.n_tokens =*/ n_tokens,
+ /*.n_seq_tokens =*/ n_seq_tokens,
+ /*.n_seqs =*/ n_seqs,
+ /*.n_seqs_unq =*/ n_seqs,
+
+ /*.token =*/ ubatch.token.data(),
+ /*.embd =*/ nullptr,
+ /*.pos =*/ ubatch.pos.data(),
+ /*.n_seq_id =*/ ubatch.n_seq_id.data(),
+ /*.seq_id =*/ ubatch.seq_id.data(),
+ /*.seq_id_unq =*/ ubatch.seq_id_unq.data(),
+ /*.seq_idx =*/ ubatch.seq_idx.data(),
+ /*.output =*/ ubatch.output.data(),
+ };
+
+ return res;
+}
+
const llama_batch & llama_batch_allocr::get_batch() const {
return batch;
}
+uint32_t llama_batch_allocr::get_n_tokens() const {
+ return batch.n_tokens;
+}
+
uint32_t llama_batch_allocr::get_n_outputs() const {
return n_outputs;
}
+std::vector<int32_t> & llama_batch_allocr::get_out_ids() {
+ return out_ids;
+}
+
llama_pos llama_batch_allocr::seq_pos_min(llama_seq_id seq_id) const {
return seq_pos[seq_id].empty() ? -1 : *seq_pos[seq_id].begin();
}
return seq_pos[seq_id].empty() ? -1 : *seq_pos[seq_id].rbegin();
}
+void llama_batch_allocr::split_reset() {
+ out_ids.clear();
+
+ used.clear();
+ used.resize(get_n_tokens(), false);
+
+ ubatches.clear();
+}
+
+llama_ubatch llama_batch_allocr::split_simple(uint32_t n_ubatch) {
+ // find the first unused token
+ uint32_t cur_idx = 0;
+ while (cur_idx < used.size() && used[cur_idx]) {
+ ++cur_idx;
+ }
+
+ // we are done
+ if (cur_idx >= used.size()) {
+ return {};
+ }
+
+ std::vector<int32_t> idxs;
+
+ while (true) {
+ idxs.push_back(cur_idx);
+
+ used[cur_idx] = true;
+
+ ++cur_idx;
+
+ if (cur_idx >= used.size()) {
+ break;
+ }
+
+ if (idxs.size() >= n_ubatch) {
+ break;
+ }
+ }
+
+ return ubatch_add(idxs, idxs.size(), false);
+}
+
+llama_ubatch llama_batch_allocr::split_equal(uint32_t n_ubatch) {
+ std::vector<seq_set_t> cur_seq_set;
+
+ // determine the non-overlapping sequence sets participating in this ubatch
+ for (int32_t i = 0; i < batch.n_tokens; ++i) {
+ if (used[i]) {
+ continue;
+ }
+
+ bool add = true;
+
+ for (uint32_t s = 0; s < cur_seq_set.size(); ++s) {
+ // no overlap with existing sequence sets:
+ if (!(cur_seq_set[s] & seq_set[i]).none()) {
+ add = false;
+ break;
+ }
+ }
+
+ if (add) {
+ cur_seq_set.push_back(seq_set[i]);
+
+ if (cur_seq_set.size() > n_ubatch) {
+ break;
+ }
+ }
+ }
+
+ const uint32_t n_seqs = cur_seq_set.size();
+
+ // we are done
+ if (n_seqs == 0) {
+ return {};
+ }
+
+ // the current batch index of each sequence set
+ std::vector<int32_t> cur_idx(n_seqs, 0);
+
+ for (uint32_t s = 0; s < n_seqs; ++s) {
+ while (used[seq_set_map[cur_seq_set[s]][cur_idx[s]]]) {
+ ++cur_idx[s];
+ }
+ }
+
+ // the list of batch indices for each sequence set
+ // at the end we will concat these to get the final ubatch
+ std::vector<idx_vec_t> idxs_per_seq(n_seqs);
+
+ while (true) {
+ // we can only add new n_seq_tokens tokens if all the sequence sets have at least one more unused token and
+ // if we haven't reached n_ubatch
+ bool can_expand = true;
+
+ for (uint32_t s = 0; s < n_seqs; ++s) {
+ if (cur_idx[s] >= (int32_t) seq_set_map[cur_seq_set[s]].size()) {
+ can_expand = false;
+ break;
+ }
+ }
+
+ if (!can_expand) {
+ break;
+ }
+
+ for (uint32_t s = 0; s < n_seqs; ++s) {
+ const int32_t idx = seq_set_map[cur_seq_set[s]][cur_idx[s]];
+
+ idxs_per_seq[s].push_back(idx);
+
+ used[idx] = true;
+
+ ++cur_idx[s];
+ }
+
+ if ((idxs_per_seq[0].size() + 1)*n_seqs > n_ubatch) {
+ break;
+ }
+ }
+
+ // concat the per-sequence-set lists
+ std::vector<int32_t> idxs;
+
+ for (uint32_t s = 0; s < n_seqs; ++s) {
+ idxs.insert(idxs.end(), idxs_per_seq[s].begin(), idxs_per_seq[s].end());
+ }
+
+ return ubatch_add(idxs, n_seqs, true);
+}
+
+llama_ubatch llama_batch_allocr::split_seq(uint32_t n_ubatch) {
+ // find the first unused token
+ uint32_t cur_idx = 0;
+ while (cur_idx < used.size() && used[cur_idx]) {
+ ++cur_idx;
+ }
+
+ // we are done
+ if (cur_idx >= used.size()) {
+ return {};
+ }
+
+ // this is the starting sequence set
+ // we allow adding tokens only if their sequence set is a subset of the current sequence set
+ auto cur_seq_set = seq_set[cur_idx];
+
+ std::vector<int32_t> idxs;
+
+ while (true) {
+ idxs.push_back(cur_idx);
+
+ used[cur_idx] = true;
+
+ if (idxs.size() >= n_ubatch) {
+ break;
+ }
+
+ do {
+ ++cur_idx;
+ } while (cur_idx < get_n_tokens() && (used[cur_idx] || ((cur_seq_set & seq_set[cur_idx]) != seq_set[cur_idx])));
+
+ if (cur_idx == get_n_tokens()) {
+ break;
+ }
+
+ cur_seq_set = seq_set[cur_idx];
+ }
+
+ return ubatch_add(idxs, 1, true);
+}
+
void llama_batch_allocr::clear() {
n_outputs = 0;
batch = {};
- pos.clear();
- n_seq_id.clear();
- seq_id.clear();
- output.clear();
+
+ pos .clear();
+ n_seq_id .clear();
+ seq_id .clear();
+ seq_id_unq.clear();
+ output .clear();
for (auto & cur : seq_pos) {
cur.clear();
for (auto & cur : seq_cpl) {
std::fill(cur.begin(), cur.end(), false);
}
+
+ seq_set.clear();
+
+ seq_set_map.clear();
+
+ std::fill(seq_idx.begin(), seq_idx.end(), -1);
+}
+
+llama_ubatch llama_batch_allocr::ubatch_add(const std::vector<int32_t> & idxs, uint32_t n_seqs, bool equal_seqs) {
+ const uint32_t n_tokens = idxs.size();
+
+ assert(n_tokens%n_seqs == 0);
+
+ ubatches.emplace_back();
+
+ auto & ubatch = ubatches.back();
+
+ const int32_t n_pos_cur = batch.embd ? n_pos_per_embd : 1;
+
+ const int64_t n_embd_all = batch.embd ? (int64_t) n_tokens*n_embd : 0;
+ const int64_t n_pos_all = (int64_t) n_tokens*n_pos_cur;
+
+ ubatch.token .resize(n_tokens);
+ ubatch.embd .resize(n_embd_all);
+ ubatch.pos .resize(n_pos_all);
+ ubatch.n_seq_id .resize(n_tokens);
+ ubatch.seq_id .resize(n_tokens);
+ ubatch.seq_id_unq.resize(0);
+ ubatch.seq_idx .resize(LLAMA_MAX_SEQ, -1);
+ ubatch.output .resize(n_tokens);
+
+ seq_set_t seq_set_unq;
+
+ for (size_t i = 0; i < idxs.size(); ++i) {
+ if (batch.token) {
+ ubatch.token[i] = batch.token[idxs[i]];
+ }
+
+ if (batch.embd) {
+ memcpy(ubatch.embd.data() + i*n_embd, batch.embd + (int64_t) idxs[i]*n_embd, n_embd*sizeof(float));
+ }
+
+ for (int j = 0; j < n_pos_cur; ++j) {
+ ubatch.pos[j*n_tokens + i] = batch.pos[j*batch.n_tokens + idxs[i]];
+ }
+
+ ubatch.n_seq_id[i] = batch.n_seq_id[idxs[i]];
+ ubatch.seq_id[i] = batch.seq_id[idxs[i]];
+ ubatch.output[i] = batch.logits[idxs[i]];
+
+ for (int s = 0; s < ubatch.n_seq_id[i]; ++s) {
+ seq_set_unq.set(ubatch.seq_id[i][s]);
+ }
+
+ if (ubatch.output[i]) {
+ out_ids.push_back(idxs[i]);
+ }
+ }
+
+ for (int32_t s = 0; s < LLAMA_MAX_SEQ; ++s) {
+ if (seq_set_unq.test(s)) {
+ ubatch.seq_idx[s] = ubatch.seq_id_unq.size();
+ ubatch.seq_id_unq.push_back(s);
+ }
+ }
+
+ llama_ubatch res {
+ /*.equal_seqs =*/ equal_seqs,
+ /*.n_tokens =*/ n_tokens,
+ /*.n_seq_tokens =*/ n_tokens/n_seqs,
+ /*.n_seqs =*/ n_seqs,
+ /*.n_seqs_unq =*/ (uint32_t) ubatch.seq_id_unq.size(),
+
+ /*.token =*/ batch.token ? ubatch.token.data() : nullptr,
+ /*.embd =*/ batch.embd ? ubatch.embd.data() : nullptr,
+ /*.pos =*/ ubatch.pos.data(),
+ /*.n_seq_id =*/ ubatch.n_seq_id.data(),
+ /*.seq_id =*/ ubatch.seq_id.data(),
+ /*.seq_id_unq =*/ ubatch.seq_id_unq.data(),
+ /*.seq_idx =*/ ubatch.seq_idx.data(),
+ /*.output =*/ ubatch.output.data(),
+ };
+
+ if (debug > 0) {
+ LLAMA_LOG_DEBUG("%s: added ubatch %d to split:\n", __func__, (int) ubatches.size() - 1);
+
+ ubatch_print(res, debug);
+ }
+
+ return res;
+}
+
+void llama_batch_allocr::ubatch_print(const llama_ubatch & ubatch, int debug) {
+ if (debug > 0) {
+ LLAMA_LOG_DEBUG("%s: equal_seqs = %d\n", __func__, ubatch.equal_seqs);
+ LLAMA_LOG_DEBUG("%s: n_tokens = %d\n", __func__, ubatch.n_tokens);
+ LLAMA_LOG_DEBUG("%s: n_seq_tokens = %d\n", __func__, ubatch.n_seq_tokens);
+ LLAMA_LOG_DEBUG("%s: n_seqs = %d\n", __func__, ubatch.n_seqs);
+ LLAMA_LOG_DEBUG("%s: n_seqs_unq = %d\n", __func__, ubatch.n_seqs_unq);
+
+ std::stringstream ss_seq_id_unq;
+ std::stringstream ss_seq_idx;
+
+ ss_seq_id_unq << "[ ";
+ ss_seq_idx << "[";
+
+ for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) {
+ ss_seq_id_unq << ubatch.seq_id_unq[s] << " ";
+ }
+
+ for (uint32_t s = 0; s < LLAMA_MAX_SEQ; ++s) {
+ if (ubatch.seq_idx[s] >= 0) {
+ ss_seq_idx << ubatch.seq_idx[s]%10;
+ } else {
+ ss_seq_idx << ".";
+ }
+ }
+
+ ss_seq_id_unq << "]";
+ ss_seq_idx << "]";
+
+ LLAMA_LOG_DEBUG("%s: token = %p\n", __func__, (void *) ubatch.token);
+ LLAMA_LOG_DEBUG("%s: embd = %p\n", __func__, (void *) ubatch.embd);
+ LLAMA_LOG_DEBUG("%s: pos = %p\n", __func__, (void *) ubatch.pos);
+ LLAMA_LOG_DEBUG("%s: n_seq_id = %p\n", __func__, (void *) ubatch.n_seq_id);
+ LLAMA_LOG_DEBUG("%s: seq_id = %p\n", __func__, (void *) ubatch.seq_id);
+ LLAMA_LOG_DEBUG("%s: seq_id_unq = %s\n", __func__, ss_seq_id_unq.str().c_str());
+ LLAMA_LOG_DEBUG("%s: seq_idx = %s\n", __func__, ss_seq_idx.str().c_str());
+ LLAMA_LOG_DEBUG("%s: output = %p\n", __func__, (void *) ubatch.output);
+ LLAMA_LOG_DEBUG("%s: n_outputs = %d\n", __func__, n_outputs);
+
+ if (debug > 1) {
+ int seq_id_max = 0;
+ for (uint32_t i = 0; i < ubatch.n_tokens; ++i) {
+ for (int s = 0; s < ubatch.n_seq_id[i]; ++s) {
+ for (int s = 0; s < ubatch.n_seq_id[i]; ++s) {
+ seq_id_max = std::max(seq_id_max, ubatch.seq_id[i][s]);
+ }
+ }
+ }
+ ++seq_id_max;
+
+ LLAMA_LOG_DEBUG("%s: token = [\n", __func__);
+ for (uint32_t i = 0; i < ubatch.n_tokens; ++i) {
+ std::vector<int8_t> seq_id(seq_id_max);
+
+ for (int s = 0; s < ubatch.n_seq_id[i]; ++s) {
+ seq_id[ubatch.seq_id[i][s]] = 1;
+ }
+
+ std::stringstream ss;
+ for (int s = 0; s < seq_id_max; ++s) {
+ if (seq_id[s]) {
+ ss << s%10;
+ } else {
+ ss << ".";
+ }
+ }
+
+ if (ubatch.token) {
+ LLAMA_LOG_DEBUG("%s: %4d: id = %6d (%16s), pos = %4d, n_seq_id = %2d, seq_id = [%s], output = %d\n",
+ __func__, i, ubatch.token[i], vocab->token_to_piece(ubatch.token[i]).c_str(),
+ ubatch.pos[i], ubatch.n_seq_id[i], ss.str().c_str(), ubatch.output[i]);
+ } else {
+ LLAMA_LOG_DEBUG("%s: %4d: [embd], pos = %4d, n_seq_id = %2d, seq_id = [%s], output = %d\n",
+ __func__, i, ubatch.pos[i], ubatch.n_seq_id[i], ss.str().c_str(), ubatch.output[i]);
+ }
+ }
+ LLAMA_LOG_DEBUG("%s: ]\n", __func__);
+ }
+ }
}
//
llama_token * tokens,
int32_t n_tokens) {
return {
- /*n_tokens =*/ n_tokens,
- /*tokens =*/ tokens,
- /*embd =*/ nullptr,
- /*pos =*/ nullptr,
- /*n_seq_id =*/ nullptr,
- /*seq_id =*/ nullptr,
- /*logits =*/ nullptr,
+ /*n_tokens =*/ n_tokens,
+ /*tokens =*/ tokens,
+ /*embd =*/ nullptr,
+ /*pos =*/ nullptr,
+ /*n_seq_id =*/ nullptr,
+ /*seq_id =*/ nullptr,
+ /*logits =*/ nullptr,
};
}
struct llama_batch llama_batch_init(int32_t n_tokens_alloc, int32_t embd, int32_t n_seq_max) {
llama_batch batch = {
- /*n_tokens =*/ 0,
- /*tokens =*/ nullptr,
- /*embd =*/ nullptr,
- /*pos =*/ nullptr,
- /*n_seq_id =*/ nullptr,
- /*seq_id =*/ nullptr,
- /*logits =*/ nullptr,
+ /*n_tokens =*/ 0,
+ /*tokens =*/ nullptr,
+ /*embd =*/ nullptr,
+ /*pos =*/ nullptr,
+ /*n_seq_id =*/ nullptr,
+ /*seq_id =*/ nullptr,
+ /*logits =*/ nullptr,
};
if (embd) {
#include "llama.h"
+#include "llama-cparams.h"
+
#include <array>
#include <vector>
#include <set>
+#include <bitset>
+#include <unordered_map>
-// very similar to llama_batch,
-// but has more metadata about sequences
+// keep this struct lightweight
+// it points to data in `llama_batch_allocr`
struct llama_ubatch {
bool equal_seqs;
// TODO: whole_seqs for embeddings?
uint32_t n_tokens; // total tokens (n_seq_tokens * n_seqs)
- uint32_t n_seq_tokens; // tokens per sequence
- uint32_t n_seqs;
-
- llama_token * token; // [n_tokens]
- float * embd; // [n_embd, n_tokens]
- llama_pos * pos; // [n_tokens]
- int32_t * n_seq_id; // [n_seqs]
- llama_seq_id ** seq_id; // [n_seqs]
- int8_t * output; // [n_tokens]
-};
-
-struct llama_sbatch_seq {
- int32_t n_seq_id;
-
- llama_seq_id * seq_id;
-
- size_t offset;
- size_t length;
-};
-
-// sequence-length-aware batch splitting
-struct llama_sbatch {
- // tokens left in this batch
- size_t n_tokens;
-
- size_t n_embd;
-
- // sorted indices into the batch
- std::vector<int64_t> ids;
- // batch indices of the output
- std::vector<int64_t> out_ids;
- std::vector<llama_sbatch_seq> seq;
-
- const llama_batch * batch = nullptr;
-
- // buffers for the ubatches
- // TODO: very hacky, this needs a complete rework
- struct ubatch_data {
- std::vector<llama_token> token;
- std::vector<float> embd;
- std::vector<llama_pos> pos;
- std::vector<int32_t> n_seq_id;
- std::vector<llama_seq_id *> seq_id;
- std::vector<int8_t> output;
- };
-
- std::vector<ubatch_data> udatas;
-
- llama_ubatch reserve_ubatch(size_t n_ubatch, bool has_embd = false);
-
- void add_seq_to_ubatch(llama_ubatch & ubatch, llama_sbatch_seq & seq, size_t length);
-
- // simple split, unknown number of sequences of unequal lengths
- llama_ubatch split_simple(size_t n_ubatch);
-
- // make batches of equal-length sequences
- llama_ubatch split_equal(size_t n_ubatch);
-
- // sequence-wise split
- llama_ubatch split_seq(size_t n_ubatch);
-
- llama_sbatch() = default;
- llama_sbatch(const llama_batch & batch, size_t n_embd, bool simple_split = false);
+ uint32_t n_seq_tokens; // tokens per sequence set
+ uint32_t n_seqs; // sequence sets in the ubatch
+ uint32_t n_seqs_unq; // unique sequence ids in the ubatch
+
+ // seq_id_unq: unique sequence ids in the ubatch
+ // seq_idx: indices of the unique sequence ids in the ubatch in [0, n_seqs_unq)
+ // used for extracting sequence pooled embeddings
+
+ // // size | idx | val
+ llama_token * token; // [n_tokens] | i | id, token
+ float * embd; // [n_embd, n_tokens] | i | embd
+ llama_pos * pos; // [n_tokens] | i | pos
+ int32_t * n_seq_id; // [n_tokens] | i | -
+ llama_seq_id ** seq_id; // [n_tokens] | s | s0, s1, seq_id
+ llama_seq_id * seq_id_unq; // [n_seqs_unq] | s | seq_id
+ int32_t * seq_idx; // [LLAMA_MAX_SEQ] | - | seq_idx
+ int8_t * output; // [n_tokens] | i | -
};
-// a helper for sanitizing and fulfilling a batch
+// a helper for sanitizing, fulfilling and splitting a batch
class llama_batch_allocr {
public:
- llama_batch_allocr();
+ llama_batch_allocr(uint32_t n_pos_per_embd);
// sanitize and auto-gen missing data in the input batch
// memory is optional. if provided will be used to check for sequence continuity and to determine the positions
const llama_batch & batch_inp,
const llama_vocab & vocab,
const llama_memory_i * memory,
- bool embd_all);
+ uint32_t n_embd,
+ bool output_all);
const llama_batch & get_batch() const;
+ uint32_t get_n_tokens() const;
uint32_t get_n_outputs() const;
+ // the array of output indices in the order they were encountered during the ubatch splitting
+ std::vector<int32_t> & get_out_ids();
+
+ // min/max positions of each sequence in the current ubatch
llama_pos seq_pos_min(llama_seq_id seq_id) const;
llama_pos seq_pos_max(llama_seq_id seq_id) const;
+ // call once before splitting the batch to reset the internal state
+ void split_reset();
+
+ // simple split, unknown number of sequence sets of unequal lengths
+ llama_ubatch split_simple(uint32_t n_ubatch);
+
+ // make ubatches of equal-length sequences sets
+ llama_ubatch split_equal(uint32_t n_ubatch);
+
+ // sequence-set-wise split - each ubatch contains a single sequence-set
+ llama_ubatch split_seq(uint32_t n_ubatch);
+
+ // a helper method for creating a well-defined ubatch of tokens
+ // TODO: support embeddings if needed in the future
+ llama_ubatch ubatch_reserve(uint32_t n_seq_tokens, uint32_t n_seqs);
+
private:
void clear();
+ // create the next ubatch based on the provided batch indices (idxs) and the number of sequence sets (n_seqs)
+ // return llama_ubatch.n_tokens == 0 if the entire batch was consumed
+ llama_ubatch ubatch_add(const std::vector<int32_t> & idxs, uint32_t n_seqs, bool equal_seqs);
+
+ // for debugging, start with LLAMA_BATCH_DEBUG=2
+ void ubatch_print(const llama_ubatch & ubatch, int debug);
+
llama_batch batch;
+ // only for debugging purposes
+ const llama_vocab * vocab;
+
+ // TODO: this is more of a temporary solution until we have a better way to handle multiple positions per token/embd
+ // ref: https://github.com/ggml-org/llama.cpp/issues/13694#issuecomment-2983871762
+ const uint32_t n_pos_per_embd;
+
+ uint32_t n_embd;
uint32_t n_outputs;
std::array<llama_seq_id, 1> seq_id_0 = { 0 }; // default sequence id
std::vector<llama_pos> pos;
std::vector<int32_t> n_seq_id;
std::vector<llama_seq_id *> seq_id;
+ std::vector<llama_seq_id> seq_id_unq;
+ std::vector<int32_t> seq_idx;
std::vector<int8_t> output;
- std::vector<std::set<llama_pos>> seq_pos; // seq_pos[s]: the set of positions in sequence s
- std::vector<std::vector<bool>> seq_cpl; // seq_cpl[s0][s1]: if sequence s0 is coupled to sequence s1
+ using pos_set_t = std::set<llama_pos>;
+ using seq_cpl_t = std::vector<bool>;
+
+ std::vector<pos_set_t> seq_pos; // seq_pos[s]: the set of positions in sequence s
+ std::vector<seq_cpl_t> seq_cpl; // seq_cpl[s0][s1]: if sequence s0 is coupled to sequence s1
+
+ using idx_vec_t = std::vector<int32_t>;
+ using seq_set_t = std::bitset<LLAMA_MAX_SEQ>;
+
+ std::vector<seq_set_t> seq_set; // seq_set[i]: the sequence set of token i
+
+ std::unordered_map<seq_set_t, idx_vec_t> seq_set_map; // the indices at which the sequence set appears
+
+ // batch indices of the output
+ std::vector<int32_t> out_ids;
+
+ // used[i] indicates if token i has already been used in a previous ubatch
+ std::vector<bool> used;
+
+ // llama_ubatch points to this data:
+ struct ubatch {
+ std::vector<llama_token> token;
+ std::vector<float> embd;
+ std::vector<llama_pos> pos;
+ std::vector<int32_t> n_seq_id;
+ std::vector<llama_seq_id *> seq_id;
+ std::vector<llama_seq_id> seq_id_unq;
+ std::vector<int32_t> seq_idx;
+ std::vector<int8_t> output;
+ };
+
+ // current splitting state:
+ std::vector<ubatch> ubatches;
int debug;
};
std::string role(message->role);
if (role == "system") {
// there is no system message for gemma, but we will merge it with user prompt, so nothing is broken
- system_prompt = trim(message->content);
+ system_prompt += trim(message->content);
continue;
}
// in gemma, "assistant" is "model"
std::string role(message->role);
if (role == "system") {
// there is no system message support, we will merge it with user prompt
- system_prompt = message->content;
+ system_prompt += message->content;
continue;
} else if (role == "user") {
ss << "Human: ";
const llama_model & model,
llama_context_params params) :
model(model),
- batch_allocr(std::make_unique<llama_batch_allocr>()) {
+ balloc(std::make_unique<llama_batch_allocr>(model.hparams.n_pos_per_embd())) {
LLAMA_LOG_INFO("%s: constructing llama_context\n", __func__);
t_start_us = model.t_start_us;
}
int llama_context::encode(const llama_batch & batch_inp) {
+ GGML_ASSERT((!batch_inp.token && batch_inp.embd) || (batch_inp.token && !batch_inp.embd)); // NOLINT
+
if (batch_inp.n_tokens == 0) {
LLAMA_LOG_ERROR("%s: n_tokens == 0\n", __func__);
return -1;
}
+ const auto & hparams = model.hparams;
+
+ const int64_t n_embd = hparams.n_embd;
+
// note: during encode, we always pass the full sequence starting from pos = 0
- if (!batch_allocr->init(batch_inp, model.vocab, nullptr, true)) {
+ if (!balloc->init(batch_inp, model.vocab, nullptr, n_embd, true)) {
LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__);
return -1;
}
- const llama_batch & batch = batch_allocr->get_batch();
+ const uint32_t n_tokens = balloc->get_n_tokens();
- const uint32_t n_tokens = batch.n_tokens;
-
- GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT
+ const llama_ubatch ubatch = balloc->split_simple(n_tokens);
// micro-batching is not possible for non-causal encoding, so we process the batch in a single shot
GGML_ASSERT(cparams.n_ubatch >= n_tokens && "encoder requires n_ubatch >= n_tokens");
n_queued_tokens += n_tokens;
- const auto & hparams = model.hparams;
-
- const int64_t n_embd = hparams.n_embd;
-
- llama_sbatch sbatch = llama_sbatch(batch, n_embd, /* simple_split */ true);
-
- const llama_ubatch ubatch = sbatch.split_simple(n_tokens);
-
// reserve output buffer
if (output_reserve(n_tokens) < n_tokens) {
LLAMA_LOG_ERROR("%s: could not reserve space for batch with %u outputs\n", __func__, n_tokens);
{
// extract sequence embeddings
auto & embd_seq_out = embd_seq;
- embd_seq_out.clear();
- GGML_ASSERT(!ubatch.equal_seqs); // TODO: handle equal splits
+ for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) {
+ const llama_seq_id seq_id = ubatch.seq_id_unq[s];
+ const int32_t seq_idx = ubatch.seq_idx[seq_id];
- // TODO: fix indexing [UBATCH_IDX]
- for (uint32_t i = 0; i < n_tokens; i++) {
- const llama_seq_id seq_id = ubatch.seq_id[i][0];
- if (embd_seq_out.find(seq_id) != embd_seq_out.end()) {
- continue;
- }
embd_seq_out[seq_id].resize(n_embd);
- ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_embd*seq_id)*sizeof(float), n_embd*sizeof(float));
+ ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_embd*seq_idx)*sizeof(float), n_embd*sizeof(float));
}
} break;
case LLAMA_POOLING_TYPE_RANK:
{
// extract the rerank score - n_cls_out floats per sequence
auto & embd_seq_out = embd_seq;
+
const uint32_t n_cls_out = hparams.n_cls_out;
- // TODO: fix indexing [UBATCH_IDX]
- for (uint32_t s = 0; s < ubatch.n_seqs; ++s) {
- const llama_seq_id seq_id = ubatch.seq_id[s][0];
- if (embd_seq_out.find(seq_id) != embd_seq_out.end()) {
- continue;
- }
+ for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) {
+ const llama_seq_id seq_id = ubatch.seq_id_unq[s];
+ const int32_t seq_idx = ubatch.seq_idx[seq_id];
+
embd_seq_out[seq_id].resize(n_cls_out);
- ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_cls_out*seq_id)*sizeof(float), n_cls_out*sizeof(float));
+ ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_cls_out*seq_idx)*sizeof(float), n_cls_out*sizeof(float));
}
} break;
case LLAMA_POOLING_TYPE_UNSPECIFIED:
cross.v_embd.resize(cross.n_embd*cross.n_enc);
memcpy(cross.v_embd.data(), embd, ggml_nbytes(t_embd));
+ const auto & batch = balloc->get_batch();
+
// remember the sequence ids used during the encoding - needed for cross attention later
cross.seq_ids_enc.resize(n_tokens);
for (uint32_t i = 0; i < n_tokens; i++) {
cross.seq_ids_enc[i].clear();
+
for (int s = 0; s < batch.n_seq_id[i]; s++) {
- llama_seq_id seq_id = batch.seq_id[i][s];
+ const llama_seq_id seq_id = batch.seq_id[i][s];
+
cross.seq_ids_enc[i].insert(seq_id);
}
}
}
int llama_context::decode(const llama_batch & batch_inp) {
+ GGML_ASSERT((!batch_inp.token && batch_inp.embd) || (batch_inp.token && !batch_inp.embd)); // NOLINT
+
if (!memory) {
LLAMA_LOG_DEBUG("%s: cannot decode batches with this context (calling encode() instead)\n", __func__);
return encode(batch_inp);
return -1;
}
- // when computing embeddings, all tokens are output
- const bool embd_all = cparams.embeddings;
-
- if (!batch_allocr->init(batch_inp, model.vocab, memory.get(), embd_all)) {
- LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__);
- return -1;
- }
-
- const llama_batch & batch = batch_allocr->get_batch();
-
const auto & vocab = model.vocab;
const auto & hparams = model.hparams;
const int32_t n_vocab = vocab.n_tokens();
const int64_t n_embd = hparams.n_embd;
- const uint32_t n_tokens_all = batch.n_tokens;
+ // when computing embeddings, all tokens are output
+ const bool output_all = cparams.embeddings;
- GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT
+ if (!balloc->init(batch_inp, vocab, memory.get(), n_embd, output_all)) {
+ LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__);
+ return -1;
+ }
- const uint32_t n_outputs_all = batch_allocr->get_n_outputs();
+ const uint32_t n_tokens_all = balloc->get_n_tokens();
+ const uint32_t n_outputs_all = balloc->get_n_outputs();
- if (embd_all) {
+ if (output_all) {
// require that all tokens are output
if (n_outputs_all != n_tokens_all) {
LLAMA_LOG_ERROR("%s: pooled embedding requires that all tokens are output (n_outputs_all = %d, n_tokens_all = %d)\n",
llama_memory_state_ptr mstate;
while (true) {
- mstate = memory->init_batch(batch, cparams.n_ubatch, embd_all);
+ mstate = memory->init_batch(*balloc, cparams.n_ubatch, output_all);
if (!mstate) {
return -2;
}
did_optimize = true;
if (kv_self_update(true)) {
- LLAMA_LOG_DEBUG("%s: retrying batch size %d after cache optimization\n", __func__, batch.n_tokens);
+ LLAMA_LOG_DEBUG("%s: retrying batch size %d after cache optimization\n", __func__, balloc->get_n_tokens());
continue;
}
}
- LLAMA_LOG_WARN("%s: failed to find a memory slot for batch of size %d\n", __func__, batch.n_tokens);
+ LLAMA_LOG_WARN("%s: failed to find a memory slot for batch of size %d\n", __func__, balloc->get_n_tokens());
return 1;
}
case LLAMA_MEMORY_STATUS_FAILED_COMPUTE:
{
- LLAMA_LOG_ERROR("%s: compute failed while preparing batch of size %d\n", __func__, batch.n_tokens);
+ LLAMA_LOG_ERROR("%s: compute failed while preparing batch of size %d\n", __func__, balloc->get_n_tokens());
return -2;
}
if (n_outputs_all == n_tokens_all) {
n_outputs_new = ubatch.n_tokens;
} else {
- GGML_ASSERT(ubatch.output);
for (uint32_t i = 0; i < ubatch.n_tokens; i++) {
n_outputs_new += (int32_t) (ubatch.output[i] != 0);
}
// extract sequence embeddings (cleared before processing each batch)
auto & embd_seq_out = embd_seq;
- for (uint32_t s = 0; s < ubatch.n_seqs; ++s) {
- const llama_seq_id seq_id = ubatch.seq_id[s][0];
- if (embd_seq_out.find(seq_id) != embd_seq_out.end()) {
- continue;
- }
+ for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) {
+ const llama_seq_id seq_id = ubatch.seq_id_unq[s];
+ const int32_t seq_idx = ubatch.seq_idx[seq_id];
+
embd_seq_out[seq_id].resize(n_embd);
- ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_embd*seq_id)*sizeof(float), n_embd*sizeof(float));
+ ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_embd*seq_idx)*sizeof(float), n_embd*sizeof(float));
}
} break;
case LLAMA_POOLING_TYPE_RANK:
{
- // extract the rerank score - a single float per sequence
+ // extract the rerank score - n_cls_out floats per sequence
auto & embd_seq_out = embd_seq;
- for (uint32_t s = 0; s < ubatch.n_seqs; ++s) {
- const llama_seq_id seq_id = ubatch.seq_id[s][0];
- if (embd_seq_out.find(seq_id) != embd_seq_out.end()) {
- continue;
- }
- embd_seq_out[seq_id].resize(1);
- ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (seq_id)*sizeof(float), sizeof(float));
+ const uint32_t n_cls_out = hparams.n_cls_out;
+
+ for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) {
+ const llama_seq_id seq_id = ubatch.seq_id_unq[s];
+ const int32_t seq_idx = ubatch.seq_idx[seq_id];
+
+ embd_seq_out[seq_id].resize(n_cls_out);
+ ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_cls_out*seq_idx)*sizeof(float), n_cls_out*sizeof(float));
}
} break;
case LLAMA_POOLING_TYPE_UNSPECIFIED:
if (n_outputs > 0) {
bool sorted_output = true;
- auto & out_ids = mstate->out_ids();
+ auto & out_ids = balloc->get_out_ids();
GGML_ASSERT(out_ids.size() == (size_t) n_outputs);
this->n_outputs = n_outputs;
- llama_token token = model.vocab.token_bos(); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph
- llama_ubatch ubatch = { true, n_tokens, n_tokens / n_seqs, n_seqs, &token, nullptr, nullptr, nullptr, nullptr, nullptr};
+ llama_batch_allocr balloc(model.hparams.n_pos_per_embd());
+ llama_ubatch ubatch = balloc.ubatch_reserve(n_tokens/n_seqs, n_seqs);
auto * gf = graph_init();
auto res = graph_build(ctx_compute.get(), gf, ubatch, LLM_GRAPH_TYPE_DEFAULT, mstate);
batch.logits [pos_batch] = true;
}
- const auto n_tokens_all = batch.n_tokens;
+ if (!balloc->init(batch, model.vocab, nullptr, model.hparams.n_embd, true)) {
+ LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__);
+ return;
+ }
+
+ const uint32_t n_tokens_all = balloc->get_n_tokens();
n_queued_tokens += n_tokens_all;
uint32_t n_outputs_all = n_tokens_all;
- auto mstate = memory->init_batch(batch, cparams.n_ubatch, true);
+ auto mstate = memory->init_batch(*balloc, cparams.n_ubatch, true);
if (!mstate || mstate->get_status() != LLAMA_MEMORY_STATUS_SUCCESS) {
LLAMA_LOG_ERROR("%s: could not initialize batch\n", __func__);
break;
std::map<llama_seq_id, std::vector<float>> embd_seq;
// reuse the batch_allocr to avoid unnecessary memory allocations
- std::unique_ptr<llama_batch_allocr> batch_allocr;
+ std::unique_ptr<llama_batch_allocr> balloc;
uint32_t n_outputs = 0; // number of actually-used outputs in the current ubatch or last logical batch
#include "llama-kv-cache-unified.h"
#include "llama-kv-cache-unified-iswa.h"
-#include "llama-kv-cache-recurrent.h"
+#include "llama-memory-hybrid.h"
+#include "llama-memory-recurrent.h"
#include <cassert>
#include <cmath>
}
void llm_graph_input_out_ids::set_input(const llama_ubatch * ubatch) {
- if (hparams.causal_attn || cparams.pooling_type == LLAMA_POOLING_TYPE_NONE) {
- //GGML_ASSERT(out_ids && "every model that can must skip unused outputs");
+ GGML_ASSERT(out_ids);
- if (!out_ids) {
- LLAMA_LOG_WARN("%s: 'out_ids' is not created\n", __func__);
- } else {
- const int64_t n_tokens = ubatch->n_tokens;
+ const int64_t n_tokens = ubatch->n_tokens;
- GGML_ASSERT(ggml_backend_buffer_is_host(out_ids->buffer));
- int32_t * data = (int32_t *) out_ids->data;
+ GGML_ASSERT(ggml_backend_buffer_is_host(out_ids->buffer));
+ int32_t * data = (int32_t *) out_ids->data;
- if (n_outputs == n_tokens) {
- for (int i = 0; i < n_tokens; ++i) {
- data[i] = i;
- }
- } else if (ubatch->output) {
- int32_t n_outputs = 0;
- for (int i = 0; i < n_tokens; ++i) {
- if (ubatch->output[i]) {
- data[n_outputs++] = i;
- }
- }
- // the graph needs to have been passed the correct number of outputs
- GGML_ASSERT(n_outputs == n_outputs);
- } else if (n_outputs == 1) {
- // only keep last output
- data[0] = n_tokens - 1;
- } else {
- GGML_ASSERT(n_outputs == 0);
- }
+ if (n_outputs == n_tokens) {
+ for (int i = 0; i < n_tokens; ++i) {
+ data[i] = i;
+ }
+
+ return;
+ }
+
+ GGML_ASSERT(ubatch->output);
+
+ int n_outputs = 0;
+
+ for (int i = 0; i < n_tokens; ++i) {
+ if (ubatch->output[i]) {
+ data[n_outputs++] = i;
}
}
}
if (cparams.embeddings && cparams.pooling_type == LLAMA_POOLING_TYPE_MEAN) {
const int64_t n_tokens = ubatch->n_tokens;
const int64_t n_seq_tokens = ubatch->n_seq_tokens;
- const int64_t n_seqs = ubatch->n_seqs;
+ const int64_t n_seqs_unq = ubatch->n_seqs_unq;
GGML_ASSERT(mean);
GGML_ASSERT(ggml_backend_buffer_is_host(mean->buffer));
float * data = (float *) mean->data;
- memset(mean->data, 0, n_tokens * n_tokens * ggml_element_size(mean));
+ memset(mean->data, 0, n_tokens*n_seqs_unq*ggml_element_size(mean));
- std::vector<uint64_t> sum(n_tokens, 0);
+ std::vector<uint64_t> sums(n_seqs_unq, 0);
+ for (int i = 0; i < n_tokens; i += n_seq_tokens) {
+ for (int s = 0; s < ubatch->n_seq_id[i]; ++s) {
+ const llama_seq_id seq_id = ubatch->seq_id[i][s];
+ const int32_t seq_idx = ubatch->seq_idx[seq_id];
- // TODO: fix indexing [UBATCH_IDX]
- for (int s = 0; s < n_seqs; ++s) {
- const llama_seq_id seq_id = ubatch->seq_id[s][0];
-
- // TODO: adapt limits to n_seqs when ubatch->equal_seqs is true
- GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == MEAN");
-
- sum[seq_id] += ubatch->n_seq_tokens;
+ sums[seq_idx] += ubatch->n_seq_tokens;
+ }
}
- std::vector<float> div(n_tokens, 0.0f);
- for (int i = 0; i < n_tokens; ++i) {
- const uint64_t s = sum[i];
- if (s > 0) {
- div[i] = 1.0f/float(s);
+ std::vector<float> div(n_seqs_unq, 0.0f);
+ for (int s = 0; s < n_seqs_unq; ++s) {
+ const uint64_t sum = sums[s];
+ if (sum > 0) {
+ div[s] = 1.0f/float(sum);
}
}
- // TODO: fix indexing [UBATCH_IDX]
- for (int s = 0; s < n_seqs; ++s) {
- const llama_seq_id seq_id = ubatch->seq_id[s][0];
+ for (int i = 0; i < n_tokens; i += n_seq_tokens) {
+ for (int s = 0; s < ubatch->n_seq_id[i]; ++s) {
+ const llama_seq_id seq_id = ubatch->seq_id[i][s];
+ const int32_t seq_idx = ubatch->seq_idx[seq_id];
- for (int i = 0; i < n_seq_tokens; ++i) {
- data[seq_id*n_tokens + s*n_seq_tokens + i] = div[seq_id];
+ for (int j = 0; j < n_seq_tokens; ++j) {
+ data[seq_idx*n_tokens + i + j] = div[seq_idx];
+ }
}
}
}
}
void llm_graph_input_cls::set_input(const llama_ubatch * ubatch) {
- if (cparams.embeddings && (
- cparams.pooling_type == LLAMA_POOLING_TYPE_CLS ||
- cparams.pooling_type == LLAMA_POOLING_TYPE_RANK)) {
- const int64_t n_tokens = ubatch->n_tokens;
- const int64_t n_seq_tokens = ubatch->n_seq_tokens;
- const int64_t n_seqs = ubatch->n_seqs;
+ const int64_t n_tokens = ubatch->n_tokens;
+ const int64_t n_seq_tokens = ubatch->n_seq_tokens;
+ const int64_t n_seqs_unq = ubatch->n_seqs_unq;
+ if (cparams.embeddings && (
+ cparams.pooling_type == LLAMA_POOLING_TYPE_CLS ||
+ cparams.pooling_type == LLAMA_POOLING_TYPE_RANK
+ )) {
GGML_ASSERT(cls);
GGML_ASSERT(ggml_backend_buffer_is_host(cls->buffer));
uint32_t * data = (uint32_t *) cls->data;
- memset(cls->data, 0, n_tokens * ggml_element_size(cls));
-
- // TODO: fix indexing [UBATCH_IDX]
- for (int s = 0; s < n_seqs; ++s) {
- const llama_seq_id seq_id = ubatch->seq_id[s][0];
-
- // TODO: adapt limits to n_seqs when ubatch->equal_seqs is true
- GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == CLS or RANK");
+ memset(cls->data, 0, n_seqs_unq*ggml_element_size(cls));
- for (int i = 0; i < n_seq_tokens; ++i) {
- const llama_pos pos = ubatch->pos[s*n_seq_tokens + i];
+ for (int i = 0; i < n_tokens; i += n_seq_tokens) {
+ for (int s = 0; s < ubatch->n_seq_id[i]; ++s) {
+ const llama_seq_id seq_id = ubatch->seq_id[i][s];
+ const int32_t seq_idx = ubatch->seq_idx[seq_id];
- if (pos == 0) {
- data[seq_id] = s*n_seq_tokens + i;
- }
+ data[seq_idx] = i;
}
}
}
if (cparams.embeddings && cparams.pooling_type == LLAMA_POOLING_TYPE_LAST) {
- const int64_t n_tokens = ubatch->n_tokens;
- const int64_t n_seq_tokens = ubatch->n_seq_tokens;
- const int64_t n_seqs = ubatch->n_seqs;
-
GGML_ASSERT(cls);
GGML_ASSERT(ggml_backend_buffer_is_host(cls->buffer));
uint32_t * data = (uint32_t *) cls->data;
- memset(cls->data, 0, n_tokens * ggml_element_size(cls));
-
- std::vector<int> last_pos(n_tokens, -1);
- std::vector<int> last_row(n_tokens, -1);
+ memset(cls->data, 0, n_seqs_unq*ggml_element_size(cls));
- // TODO: fix indexing [UBATCH_IDX]
- for (int s = 0; s < n_seqs; ++s) {
- const llama_seq_id seq_id = ubatch->seq_id[s][0];
+ std::vector<int> last_pos(n_seqs_unq, -1);
+ std::vector<int> last_row(n_seqs_unq, -1);
- // TODO: adapt limits to n_seqs when ubatch->equal_seqs is true
- GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == LAST");
+ for (int i = 0; i < n_tokens; ++i) {
+ const llama_pos pos = ubatch->pos[i];
- for (int i = 0; i < n_seq_tokens; ++i) {
- const llama_pos pos = ubatch->pos[s*n_seq_tokens + i];
+ for (int s = 0; s < ubatch->n_seq_id[i]; ++s) {
+ const llama_seq_id seq_id = ubatch->seq_id[i][s];
+ const int32_t seq_idx = ubatch->seq_idx[seq_id];
- if (pos >= last_pos[seq_id]) {
- last_pos[seq_id] = pos;
- last_row[seq_id] = s*n_seq_tokens + i;
+ if (pos >= last_pos[seq_idx]) {
+ last_pos[seq_idx] = pos;
+ last_row[seq_idx] = i;
}
}
}
- for (int i = 0; i < n_tokens; ++i) {
- if (last_row[i] >= 0) {
- data[i] = last_row[i];
+ for (int s = 0; s < n_seqs_unq; ++s) {
+ if (last_row[s] >= 0) {
+ data[s] = last_row[s];
}
}
}
}
-void llm_graph_input_s_copy::set_input(const llama_ubatch * ubatch) {
+void llm_graph_input_rs::set_input(const llama_ubatch * ubatch) {
GGML_UNUSED(ubatch);
- const int64_t n_kv = kv_state->get_n_kv();
+ const int64_t n_rs = mem_state->get_n_rs();
if (s_copy) {
GGML_ASSERT(ggml_backend_buffer_is_host(s_copy->buffer));
int32_t * data = (int32_t *) s_copy->data;
// assuming copy destinations ALWAYS happen ONLY on the cells between head and head+n
- for (uint32_t i = 0; i < n_kv; ++i) {
- data[i] = kv_state->s_copy(i);
+ for (uint32_t i = 0; i < n_rs; ++i) {
+ data[i] = mem_state->s_copy(i);
}
}
}
}
void llm_graph_input_attn_no_cache::set_input(const llama_ubatch * ubatch) {
- if (kq_mask) {
- if (cparams.causal_attn) {
- const int64_t n_kv = ubatch->n_tokens;
- const int64_t n_tokens = ubatch->n_tokens;
- const int64_t n_seq_tokens = ubatch->n_seq_tokens;
- const int64_t n_seqs = ubatch->n_seqs;
-
- GGML_ASSERT(ggml_backend_buffer_is_host(kq_mask->buffer));
- float * data = (float *) kq_mask->data;
-
- for (int h = 0; h < 1; ++h) {
- for (int s1 = 0; s1 < n_seqs; ++s1) {
- const llama_seq_id seq_id = ubatch->seq_id[s1][0];
-
- for (int j = 0; j < n_seq_tokens; ++j) {
- const int32_t tj = s1*n_seq_tokens + j;
-
- for (int s0 = 0; s0 < n_seqs; ++s0) {
- for (int i = 0; i < n_seq_tokens; ++i) {
- const int32_t ti = s0*n_seq_tokens + i;
- float f = -INFINITY;
-
- // TODO: fix indexing [UBATCH_IDX]
- for (int s = 0; s < ubatch->n_seq_id[s0]; ++s) {
- if (ubatch->seq_id[s0][s] == seq_id && ubatch->pos[ti] <= ubatch->pos[tj]) {
- if (hparams.use_alibi) {
- f = -std::abs(ubatch->pos[ti] - ubatch->pos[tj]);
- } else {
- f = 0.0f;
- }
- break;
- }
- }
-
- data[h*(n_kv*n_tokens) + tj*n_kv + ti] = f;
- }
- }
- }
- }
- }
- } else {
- const int64_t n_tokens = ubatch->n_tokens;
- const int64_t n_seq_tokens = ubatch->n_seq_tokens;
- const int64_t n_seqs = ubatch->n_seqs;
- const int64_t n_stride = ubatch->n_tokens;
-
- GGML_ASSERT(ggml_backend_buffer_is_host(kq_mask->buffer));
-
- float * data = (float *) kq_mask->data;
-
- for (int h = 0; h < 1; ++h) {
- for (int s1 = 0; s1 < n_seqs; ++s1) {
- const llama_seq_id seq_id = ubatch->seq_id[s1][0];
-
- for (int j = 0; j < n_seq_tokens; ++j) {
- const int32_t tj = s1*n_seq_tokens + j;
-
- for (int s0 = 0; s0 < n_seqs; ++s0) {
- for (int i = 0; i < n_seq_tokens; ++i) {
- const int32_t ti = s0*n_seq_tokens + i;
- float f = -INFINITY;
-
- // TODO: fix indexing [UBATCH_IDX]
- for (int s = 0; s < ubatch->n_seq_id[s0]; ++s) {
- if (ubatch->seq_id[s0][s] == seq_id) {
- if (hparams.use_alibi) {
- f = -std::abs(ubatch->pos[ti] - ubatch->pos[tj]);
- } else {
- f = 0.0f;
- }
- break;
- }
- }
-
- data[h*(n_tokens*n_tokens) + tj*n_stride + ti] = f;
- }
- }
+ const int64_t n_kv = ubatch->n_tokens;
+ const int64_t n_tokens = ubatch->n_tokens;
+
+ GGML_ASSERT(kq_mask);
+ GGML_ASSERT(ggml_backend_buffer_is_host(kq_mask->buffer));
+
+ float * data = (float *) kq_mask->data;
+
+ for (int h = 0; h < 1; ++h) {
+ for (int i1 = 0; i1 < n_tokens; ++i1) {
+ const llama_seq_id s1 = ubatch->seq_id[i1][0];
+
+ for (int i0 = 0; i0 < n_tokens; ++i0) {
+ float f = -INFINITY;
+
+ for (int s = 0; s < ubatch->n_seq_id[i0]; ++s) {
+ const llama_seq_id s0 = ubatch->seq_id[i0][0];
- for (int i = n_tokens; i < n_stride; ++i) {
- data[h*(n_tokens*n_tokens) + tj*n_stride + i] = -INFINITY;
+ // TODO: reimplement this like in llama_kv_cache_unified
+ if (s0 == s1 && (!cparams.causal_attn || ubatch->pos[i0] <= ubatch->pos[i1])) {
+ if (hparams.use_alibi) {
+ f = -std::abs(ubatch->pos[i0] - ubatch->pos[i1]);
+ } else {
+ f = 0.0f;
}
+ break;
}
}
+
+ data[h*(n_kv*n_tokens) + i1*n_kv + i0] = f;
}
}
}
}
void llm_graph_input_attn_cross::set_input(const llama_ubatch * ubatch) {
- if (cross_kq_mask) {
- const int64_t n_enc = cross_kq_mask->ne[0];
- const int64_t n_tokens = ubatch->n_tokens;
+ GGML_ASSERT(cross_kq_mask);
- GGML_ASSERT(ggml_backend_buffer_is_host(cross_kq_mask->buffer));
- GGML_ASSERT(!ubatch->equal_seqs); // TODO: use ubatch->n_seqs instead of failing
+ const int64_t n_enc = cross_kq_mask->ne[0];
+ const int64_t n_tokens = ubatch->n_tokens;
- float * data = (float *) cross_kq_mask->data;
+ GGML_ASSERT(ggml_backend_buffer_is_host(cross_kq_mask->buffer));
+ GGML_ASSERT(!ubatch->equal_seqs); // TODO: use ubatch->n_seqs instead of failing
- for (int h = 0; h < 1; ++h) {
- for (int j = 0; j < n_tokens; ++j) {
- for (int i = 0; i < n_enc; ++i) {
- float f = -INFINITY;
- // TODO: fix indexing [UBATCH_IDX]
- for (int s = 0; s < ubatch->n_seq_id[j]; ++s) {
- const llama_seq_id seq_id = ubatch->seq_id[j][s];
- if (cross->seq_ids_enc[i].find(seq_id) != cross->seq_ids_enc[i].end()) {
- f = 0.0f;
- }
+ float * data = (float *) cross_kq_mask->data;
+
+ for (int h = 0; h < 1; ++h) {
+ for (int i = 0; i < n_tokens; ++i) {
+ for (int j = 0; j < n_enc; ++j) {
+ float f = -INFINITY;
+
+ for (int s = 0; s < ubatch->n_seq_id[i]; ++s) {
+ const llama_seq_id seq_id = ubatch->seq_id[i][s];
+
+ if (cross->seq_ids_enc[j].find(seq_id) != cross->seq_ids_enc[j].end()) {
+ f = 0.0f;
}
- data[h*(n_enc*n_tokens) + j*n_enc + i] = f;
}
+
+ data[h*(n_enc*n_tokens) + i*n_enc + j] = f;
}
+ }
- for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) {
- for (int j = 0; j < n_enc; ++j) {
- data[h*(n_enc*n_tokens) + i*n_enc + j] = -INFINITY;
- }
+ for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) {
+ for (int j = 0; j < n_enc; ++j) {
+ data[h*(n_enc*n_tokens) + i*n_enc + j] = -INFINITY;
}
}
}
}
+void llm_graph_input_mem_hybrid::set_input(const llama_ubatch * ubatch) {
+ if (self_kq_mask) {
+ mem_state->get_state_attn()->set_input_kq_mask(self_kq_mask, ubatch, cparams.causal_attn);
+ }
+
+ const int64_t n_rs = mem_state->get_state_recr()->get_n_rs();
+
+ if (s_copy) {
+ GGML_ASSERT(ggml_backend_buffer_is_host(s_copy->buffer));
+ int32_t * data = (int32_t *) s_copy->data;
+
+ // assuming copy destinations ALWAYS happen ONLY on the cells between head and head+n
+ for (uint32_t i = 0; i < n_rs; ++i) {
+ data[i] = mem_state->get_state_recr()->s_copy(i);
+ }
+ }
+}
+
//
// llm_graph_context
//
res (std::make_unique<llm_graph_result>()) {
}
-int64_t llm_graph_context::n_pos_per_embd() const {
- return hparams.rope_type == LLAMA_ROPE_TYPE_MROPE ? 4 : 1;
-}
-
void llm_graph_context::cb(ggml_tensor * cur, const char * name, int il) const {
if (cb_func) {
cb_func(ubatch, cur, name, il);
}
ggml_tensor * llm_graph_context::build_inp_pos() const {
- auto inp = std::make_unique<llm_graph_input_pos>(n_pos_per_embd());
+ auto inp = std::make_unique<llm_graph_input_pos>(hparams.n_pos_per_embd());
auto & cur = inp->pos;
- cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens*n_pos_per_embd());
+ cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, (int64_t)n_tokens*hparams.n_pos_per_embd());
ggml_set_input(cur);
res->add_input(std::move(inp));
}
ggml_tensor * llm_graph_context::build_inp_out_ids() const {
+ // note: when all tokens are output, we could skip this optimization to spare the ggml_get_rows() calls,
+ // but this would make the graph topology depend on the number of output tokens, which can interere with
+ // features that require constant topology such as pipline parallelism
+ // ref: https://github.com/ggml-org/llama.cpp/pull/14275#issuecomment-2987424471
+ //if (n_outputs < n_tokens) {
+ // return nullptr;
+ //}
+
auto inp = std::make_unique<llm_graph_input_out_ids>(hparams, cparams, n_outputs);
auto & cur = inp->out_ids;
auto & cur = inp->mean;
- cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, n_tokens);
+ cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, ubatch.n_seqs_unq);
ggml_set_input(cur);
res->add_input(std::move(inp));
auto & cur = inp->cls;
- cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
- ggml_set_input(cur);
-
- res->add_input(std::move(inp));
-
- return cur;
-}
-
-ggml_tensor * llm_graph_context::build_inp_s_copy() const {
- const auto * kv_state = static_cast<const llama_kv_cache_recurrent_state *>(mstate);
-
- auto inp = std::make_unique<llm_graph_input_s_copy>(kv_state);
-
- const auto n_kv = kv_state->get_n_kv();
-
- auto & cur = inp->s_copy;
-
- cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_kv);
+ cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, ubatch.n_seqs_unq);
ggml_set_input(cur);
res->add_input(std::move(inp));
return pos_bias;
}
+llm_graph_input_mem_hybrid * llm_graph_context::build_inp_mem_hybrid() const {
+ const auto * mem_state = static_cast<const llama_memory_hybrid_state *>(mstate);
+
+ auto inp = std::make_unique<llm_graph_input_mem_hybrid>(hparams, cparams, mem_state);
+
+ {
+ GGML_ASSERT(hparams.swa_type == LLAMA_SWA_TYPE_NONE && "Hybrid recurrent is not supported with SWA attention layers");
+
+ const auto n_kv = inp->mem_state->get_state_attn()->get_n_kv();
+
+ inp->self_kq_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD));
+ //cb(inp->self_kq_mask, "KQ_mask", -1);
+ ggml_set_input(inp->self_kq_mask);
+
+ inp->self_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask, GGML_TYPE_F16) : inp->self_kq_mask;
+ }
+
+ {
+ const auto n_rs = mem_state->get_state_recr()->get_n_rs();
+
+ inp->s_copy = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_rs);
+ ggml_set_input(inp->s_copy);
+ }
+
+ return (llm_graph_input_mem_hybrid *) res->add_input(std::move(inp));
+}
+
ggml_tensor * llm_graph_context::build_attn_mha(
ggml_cgraph * gf,
ggml_tensor * q,
return cur;
}
-llm_graph_input_attn_kv_unified_iswa * llm_graph_context::build_attn_inp_kv_unified_iswa() const {
- const auto * kv_state = static_cast<const llama_kv_cache_unified_iswa_state *>(mstate);
-
- auto inp = std::make_unique<llm_graph_input_attn_kv_unified_iswa>(hparams, cparams, kv_state);
-
- {
- const auto n_kv = kv_state->get_base()->get_n_kv();
-
- inp->self_kq_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD));
- //cb(inp->self_kq_mask, "KQ_mask", -1);
- ggml_set_input(inp->self_kq_mask);
-
- inp->self_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask, GGML_TYPE_F16) : inp->self_kq_mask;
- }
-
- {
- GGML_ASSERT(hparams.swa_type != LLAMA_SWA_TYPE_NONE && "Use llama_kv_cache_unified for non-SWA");
-
- const auto n_kv = kv_state->get_swa()->get_n_kv();
-
- inp->self_kq_mask_swa = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD));
- //cb(inp->self_kq_mask_swa, "KQ_mask_swa", -1);
- ggml_set_input(inp->self_kq_mask_swa);
-
- inp->self_kq_mask_swa_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask_swa, GGML_TYPE_F16) : inp->self_kq_mask_swa;
- }
-
- return (llm_graph_input_attn_kv_unified_iswa *) res->add_input(std::move(inp));
-}
-
ggml_tensor * llm_graph_context::build_attn(
llm_graph_input_attn_kv_unified_iswa * inp,
ggml_cgraph * gf,
return cur;
}
-ggml_tensor * llm_graph_context::build_recurrent_state(
- ggml_cgraph * gf,
- ggml_tensor * s,
- ggml_tensor * state_copy,
- int32_t state_size,
- int32_t n_seqs,
- bool avoid_copies) const {
- const auto * kv_state = static_cast<const llama_kv_cache_recurrent_state *>(mstate);
-
- const auto n_kv = kv_state->get_n_kv();
- const auto kv_head = kv_state->get_head();
- const auto rs_zero = kv_state->get_rs_z();
+ggml_tensor * llm_graph_context::build_attn(
+ llm_graph_input_mem_hybrid * inp,
+ ggml_cgraph * gf,
+ ggml_tensor * wo,
+ ggml_tensor * wo_b,
+ ggml_tensor * q_cur,
+ ggml_tensor * k_cur,
+ ggml_tensor * v_cur,
+ ggml_tensor * kq_b,
+ ggml_tensor * v_mla,
+ float kq_scale,
+ int il) const {
+ // these nodes are added to the graph together so that they are not reordered
+ // by doing so, the number of splits in the graph is reduced
+ ggml_build_forward_expand(gf, q_cur);
+ ggml_build_forward_expand(gf, k_cur);
+ ggml_build_forward_expand(gf, v_cur);
+
+ const auto * kv_state = static_cast<const llama_memory_hybrid_state *>(mstate)->get_state_attn();
+
+ // store to KV cache
+ {
+ ggml_build_forward_expand(gf, kv_state->cpy_k(ctx0, k_cur, il));
+ ggml_build_forward_expand(gf, kv_state->cpy_v(ctx0, v_cur, il));
+ }
+
+ const auto & kq_mask = inp->get_kq_mask();
+
+ ggml_tensor * q = q_cur;
+ ggml_tensor * k = kv_state->get_k(ctx0, il);
+ ggml_tensor * v = kv_state->get_v(ctx0, il);
+
+ ggml_tensor * cur = build_attn_mha(gf, q, k, v, kq_b, kq_mask, v_mla, kq_scale);
+ cb(cur, "kqv_out", il);
+
+ if (wo) {
+ cur = build_lora_mm(wo, cur);
+ if (arch == LLM_ARCH_GLM4) {
+ // GLM4 seems to have numerical issues with half-precision accumulators
+ ggml_mul_mat_set_prec(cur, GGML_PREC_F32);
+ }
+ }
+
+ if (wo_b) {
+ cur = ggml_add(ctx0, cur, wo_b);
+ }
+
+ return cur;
+}
+
+llm_graph_input_attn_kv_unified_iswa * llm_graph_context::build_attn_inp_kv_unified_iswa() const {
+ const auto * kv_state = static_cast<const llama_kv_cache_unified_iswa_state *>(mstate);
+
+ auto inp = std::make_unique<llm_graph_input_attn_kv_unified_iswa>(hparams, cparams, kv_state);
+
+ {
+ const auto n_kv = kv_state->get_base()->get_n_kv();
+
+ inp->self_kq_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD));
+ //cb(inp->self_kq_mask, "KQ_mask", -1);
+ ggml_set_input(inp->self_kq_mask);
+
+ inp->self_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask, GGML_TYPE_F16) : inp->self_kq_mask;
+ }
+
+ {
+ GGML_ASSERT(hparams.swa_type != LLAMA_SWA_TYPE_NONE && "Use llama_kv_cache_unified for non-SWA");
- ggml_tensor * states = ggml_reshape_2d(ctx0, s, state_size, kv_state->get_size());
+ const auto n_kv = kv_state->get_swa()->get_n_kv();
+
+ inp->self_kq_mask_swa = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD));
+ //cb(inp->self_kq_mask_swa, "KQ_mask_swa", -1);
+ ggml_set_input(inp->self_kq_mask_swa);
+
+ inp->self_kq_mask_swa_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask_swa, GGML_TYPE_F16) : inp->self_kq_mask_swa;
+ }
+
+ return (llm_graph_input_attn_kv_unified_iswa *) res->add_input(std::move(inp));
+}
+
+ggml_tensor * llm_graph_context::build_rs(
+ ggml_cgraph * gf,
+ ggml_tensor * s,
+ ggml_tensor * state_copy,
+ int32_t state_size,
+ int32_t n_seqs,
+ uint32_t n_kv,
+ uint32_t kv_head,
+ uint32_t kv_size,
+ int32_t rs_zero,
+ bool avoid_copies) const {
+
+ ggml_tensor * states = ggml_reshape_2d(ctx0, s, state_size, kv_size);
// Clear a single state which will then be copied to the other cleared states.
// Note that this is a no-op when the view is zero-sized.
return output_states;
}
+llm_graph_input_rs * llm_graph_context::build_rs_inp() const {
+ const auto * kv_state = static_cast<const llama_memory_recurrent_state *>(mstate);
+
+ auto inp = std::make_unique<llm_graph_input_rs>(kv_state);
+
+ const auto n_rs = kv_state->get_n_rs();
+
+ inp->s_copy = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_rs);
+ ggml_set_input(inp->s_copy);
+
+ return (llm_graph_input_rs *) res->add_input(std::move(inp));
+}
+
+ggml_tensor * llm_graph_context::build_rs(
+ llm_graph_input_rs * inp,
+ ggml_cgraph * gf,
+ ggml_tensor * s,
+ int32_t state_size,
+ int32_t n_seqs,
+ bool avoid_copies) const {
+ const auto * kv_state = static_cast<const llama_memory_recurrent_state *>(mstate);
+
+ return build_rs(gf, s, inp->s_copy, state_size, n_seqs, kv_state->get_n_rs(), kv_state->get_head(), kv_state->get_size(), kv_state->get_rs_z(), avoid_copies);
+}
+
+ggml_tensor * llm_graph_context::build_rs(
+ llm_graph_input_mem_hybrid * inp,
+ ggml_cgraph * gf,
+ ggml_tensor * s,
+ int32_t state_size,
+ int32_t n_seqs,
+ bool avoid_copies) const {
+ const auto * kv_state = static_cast<const llama_memory_hybrid_state *>(mstate)->get_state_recr();
+
+ return build_rs(gf, s, inp->s_copy, state_size, n_seqs, kv_state->get_n_rs(), kv_state->get_head(), kv_state->get_size(), kv_state->get_rs_z(), avoid_copies);
+}
+
ggml_tensor * llm_graph_context::build_rwkv_token_shift_load(
- ggml_cgraph * gf,
- ggml_tensor * state_copy,
- const llama_ubatch & ubatch,
+ llm_graph_input_rs * inp,
+ ggml_cgraph * gf,
+ const llama_ubatch & ubatch,
int il) const {
- const auto * kv_state = static_cast<const llama_kv_cache_recurrent_state *>(mstate);
+ const auto * kv_state = static_cast<const llama_memory_recurrent_state *>(mstate);
const auto token_shift_count = hparams.token_shift_count;
const int64_t n_seqs = ubatch.n_seqs;
- ggml_tensor * token_shift_all = kv_state->get_k_l(il);
+ ggml_tensor * token_shift_all = kv_state->get_r_l(il);
- ggml_tensor * token_shift = build_recurrent_state(
- gf, token_shift_all, state_copy,
- hparams.n_embd_k_s(), n_seqs);
+ ggml_tensor * token_shift = build_rs(
+ inp, gf, token_shift_all,
+ hparams.n_embd_r(), n_seqs);
token_shift = ggml_reshape_3d(ctx0, token_shift, hparams.n_embd, token_shift_count, n_seqs);
ggml_tensor * token_shift,
const llama_ubatch & ubatch,
int il) const {
- const auto * kv_state = static_cast<const llama_kv_cache_recurrent_state *>(mstate);
+ const auto * kv_state = static_cast<const llama_memory_recurrent_state *>(mstate);
const auto token_shift_count = hparams.token_shift_count;
const auto n_embd = hparams.n_embd;
return ggml_cpy(
ctx0,
ggml_view_1d(ctx0, token_shift, n_embd * n_seqs * token_shift_count, 0),
- ggml_view_1d(ctx0, kv_state->get_k_l(il), hparams.n_embd_k_s()*n_seqs, hparams.n_embd_k_s()*kv_head*ggml_element_size(kv_state->get_k_l(il)))
+ ggml_view_1d(ctx0, kv_state->get_r_l(il), hparams.n_embd_r()*n_seqs, hparams.n_embd_r()*kv_head*ggml_element_size(kv_state->get_r_l(il)))
);
}
class llama_kv_cache_unified_state;
class llama_kv_cache_unified_iswa_state;
-class llama_kv_cache_recurrent_state;
+class llama_memory_recurrent_state;
+class llama_memory_hybrid_state;
// certain models (typically multi-modal) can produce different types of graphs
enum llm_graph_type {
class llm_graph_input_pos : public llm_graph_input_i {
public:
- llm_graph_input_pos(int64_t n_pos_per_embd) : n_pos_per_embd(n_pos_per_embd) {}
+ llm_graph_input_pos(uint32_t n_pos_per_embd) : n_pos_per_embd(n_pos_per_embd) {}
virtual ~llm_graph_input_pos() = default;
void set_input(const llama_ubatch * ubatch) override;
ggml_tensor * pos = nullptr; // I32 [n_batch]
- const int64_t n_pos_per_embd = 1;
+ const uint32_t n_pos_per_embd = 1;
};
// temperature tuning, used by llama4
const llama_cparams & cparams;
};
-class llm_graph_input_s_copy : public llm_graph_input_i {
+class llm_graph_input_rs : public llm_graph_input_i {
public:
- llm_graph_input_s_copy(const llama_kv_cache_recurrent_state * kv_state) : kv_state(kv_state) {}
- virtual ~llm_graph_input_s_copy() = default;
+ llm_graph_input_rs(const llama_memory_recurrent_state * mem_state) : mem_state(mem_state) {}
+ virtual ~llm_graph_input_rs() = default;
void set_input(const llama_ubatch * ubatch) override;
ggml_tensor * s_copy; // I32 [kv_size]
- const llama_kv_cache_recurrent_state * kv_state;
+ const llama_memory_recurrent_state * mem_state;
};
class llm_graph_input_cross_embd : public llm_graph_input_i {
const llama_cross * cross = nullptr;
};
+class llm_graph_input_mem_hybrid : public llm_graph_input_i {
+public:
+ llm_graph_input_mem_hybrid(
+ const llama_hparams & hparams,
+ const llama_cparams & cparams,
+ const llama_memory_hybrid_state * mem_state) :
+ hparams(hparams),
+ cparams(cparams),
+ mem_state(mem_state) {
+ }
+ virtual ~llm_graph_input_mem_hybrid() = default;
+
+ void set_input(const llama_ubatch * ubatch) override;
+
+ ggml_tensor * s_copy; // I32 [kv_size]
+
+ ggml_tensor * get_kq_mask() const { return self_kq_mask_cnv; }
+
+ ggml_tensor * self_kq_mask = nullptr; // F32 [n_kv, n_batch]
+ ggml_tensor * self_kq_mask_cnv = nullptr; // [n_kv, n_batch]
+
+ const llama_hparams & hparams;
+ const llama_cparams & cparams;
+
+ const llama_memory_hybrid_state * mem_state;
+};
+
//
// llm_graph_result
//
llm_graph_context(const llm_graph_params & params);
- int64_t n_pos_per_embd() const;
-
void cb(ggml_tensor * cur, const char * name, int il) const;
//
ggml_tensor * build_inp_out_ids() const;
ggml_tensor * build_inp_mean() const;
ggml_tensor * build_inp_cls() const;
- ggml_tensor * build_inp_s_copy() const;
ggml_tensor * build_inp_cross_embd() const;
ggml_tensor * build_inp_pos_bucket_enc() const;
ggml_tensor * build_inp_pos_bucket_dec() const;
ggml_tensor * build_pos_bias(ggml_tensor * pos_bucket, ggml_tensor * attn_rel_b) const;
+ llm_graph_input_mem_hybrid * build_inp_mem_hybrid() const;
+
//
// attention
//
float kq_scale,
int il) const;
+ ggml_tensor * build_attn(
+ llm_graph_input_mem_hybrid * inp,
+ ggml_cgraph * gf,
+ ggml_tensor * wo,
+ ggml_tensor * wo_b,
+ ggml_tensor * q_cur, // [n_embd_head_q, n_head_q, n_tokens]
+ ggml_tensor * k_cur, // [n_embd_head_k, n_head_k, n_tokens]
+ ggml_tensor * v_cur, // [n_embd_head_v, n_head_v, n_tokens]
+ ggml_tensor * kq_b,
+ ggml_tensor * v_mla, // [n_embd_head_v_mla, n_embd_head_v, n_head_v]
+ float kq_scale,
+ int il) const;
//
// recurrent
//
- ggml_tensor * build_recurrent_state(
- ggml_cgraph * gf,
- ggml_tensor * s,
- ggml_tensor * state_copy,
- int32_t state_size,
- int32_t n_seqs,
- bool avoid_copies = false) const;
+ // TODO: avoid notion of "kv"
+ // TODO: move this implementation to llama_memory_recurrent.
+ // this is analogous to llama_kv_cache_unified::cpy_k / cpy_v
+ // when moving, avoid passing `ggml_cgraph` - only pass `ggml_context`. would likely need to split the
+ // implementation in 2 separate methods. the goal is to avoid calling `ggml_build_forward_expand` in
+ // `llama_memory_recurrent`
+ ggml_tensor * build_rs(
+ ggml_cgraph * gf,
+ ggml_tensor * s,
+ ggml_tensor * state_copy,
+ int32_t state_size,
+ int32_t n_seqs,
+ uint32_t n_kv,
+ uint32_t kv_head,
+ uint32_t kv_size,
+ int32_t rs_zero,
+ bool avoid_copies = false) const;
+
+ llm_graph_input_rs * build_rs_inp() const;
+
+ ggml_tensor * build_rs(
+ llm_graph_input_rs * inp,
+ ggml_cgraph * gf,
+ ggml_tensor * s,
+ int32_t state_size,
+ int32_t n_seqs,
+ bool avoid_copies = false) const;
+
+ ggml_tensor * build_rs(
+ llm_graph_input_mem_hybrid * inp,
+ ggml_cgraph * gf,
+ ggml_tensor * s,
+ int32_t state_size,
+ int32_t n_seqs,
+ bool avoid_copies = false) const;
ggml_tensor * build_rwkv_token_shift_load(
- ggml_cgraph * gf,
- ggml_tensor * state_copy,
- const llama_ubatch & ubatch,
+ llm_graph_input_rs * inp,
+ ggml_cgraph * gf,
+ const llama_ubatch & ubatch,
int il) const;
ggml_tensor * build_rwkv_token_shift_store(
return n_embd_head_v * n_head_kv;
}
-uint32_t llama_hparams::n_embd_k_s() const {
+uint32_t llama_hparams::n_embd_r() const {
if (wkv_head_size != 0) {
// for RWKV models
return token_shift_count * n_embd;
return (ssm_d_conv > 0 ? ssm_d_conv - 1 : 0) * ssm_d_inner;
}
-uint32_t llama_hparams::n_embd_v_s() const {
+uint32_t llama_hparams::n_embd_s() const {
if (wkv_head_size != 0) {
// corresponds to RWKV's wkv_states size
return n_embd * wkv_head_size;
return ssm_d_state * ssm_d_inner;
}
+bool llama_hparams::is_recurrent(uint32_t il) const {
+ return recurrent_layer_arr[il];
+}
+
+uint32_t llama_hparams::n_pos_per_embd() const {
+ return rope_type == LLAMA_ROPE_TYPE_MROPE ? 4 : 1;
+}
+
bool llama_hparams::is_swa(uint32_t il) const {
if (il < n_layer) {
return swa_layers[il];
uint32_t ssm_d_state = 0;
uint32_t ssm_dt_rank = 0;
+ // for hybrid state space models
+ std::array<bool, LLAMA_MAX_LAYERS> recurrent_layer_arr;
+
bool ssm_dt_b_c_rms = false;
float f_clamp_kqv = 0.0f;
// dimension of the rolling state embeddings
// corresponds to Mamba's conv_states size or RWKV's token_shift states size
- uint32_t n_embd_k_s() const;
+ uint32_t n_embd_r() const;
// dimension of the recurrent state embeddings
- uint32_t n_embd_v_s() const;
+ uint32_t n_embd_s() const;
+
+ // whether or not the given layer is recurrent (for hybrid models)
+ bool is_recurrent(uint32_t il) const;
+
+ uint32_t n_pos_per_embd() const;
bool is_swa(uint32_t il) const;
};
+++ /dev/null
-#include "llama-kv-cache-recurrent.h"
-
-#include "llama-impl.h"
-#include "llama-io.h"
-#include "llama-batch.h"
-#include "llama-model.h"
-
-#include <algorithm>
-#include <cassert>
-#include <limits>
-#include <map>
-#include <stdexcept>
-
-//
-// llama_kv_cache_recurrent
-//
-
-llama_kv_cache_recurrent::llama_kv_cache_recurrent(
- const llama_model & model,
- ggml_type type_k,
- ggml_type type_v,
- bool offload,
- uint32_t kv_size,
- uint32_t n_seq_max) : hparams(model.hparams), n_seq_max(n_seq_max) {
- const int32_t n_layer = hparams.n_layer;
-
- LLAMA_LOG_INFO("%s: kv_size = %u, n_seq_max = %u, type_k = '%s', type_v = '%s', n_layer = %d\n",
- __func__, kv_size, n_seq_max, ggml_type_name(type_k), ggml_type_name(type_v), n_layer);
-
- head = 0;
- size = kv_size;
- used = 0;
-
- cells.clear();
- cells.resize(kv_size);
-
- // create a context for each buffer type
- std::map<ggml_backend_buffer_type_t, ggml_context *> ctx_map;
- auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * {
- auto it = ctx_map.find(buft);
- if (it == ctx_map.end()) {
- ggml_init_params params = {
- /*.mem_size =*/ size_t(2u*n_layer*ggml_tensor_overhead()),
- /*.mem_buffer =*/ NULL,
- /*.no_alloc =*/ true,
- };
-
- ggml_context * ctx = ggml_init(params);
- if (!ctx) {
- return nullptr;
- }
-
- ctx_map[buft] = ctx;
- ctxs.emplace_back(ctx);
-
- return ctx;
- }
-
- return it->second;
- };
-
- k_l.reserve(n_layer);
- v_l.reserve(n_layer);
-
- for (int i = 0; i < n_layer; i++) {
- const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(i) + hparams.n_embd_k_s();
- const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(i) + hparams.n_embd_v_s();
-
- const char * dev_name = "CPU";
-
- ggml_backend_buffer_type_t buft = ggml_backend_cpu_buffer_type();
-
- if (offload) {
- auto * dev = model.dev_layer(i);
- buft = ggml_backend_dev_buffer_type(dev);
-
- dev_name = ggml_backend_dev_name(dev);
- }
-
- LLAMA_LOG_DEBUG("%s, layer %3d: dev = %s\n", __func__, i, dev_name);
-
- ggml_context * ctx = ctx_for_buft(buft);
- if (!ctx) {
- throw std::runtime_error("failed to create ggml context for kv cache");
- }
-
- ggml_tensor * k = ggml_new_tensor_1d(ctx, type_k, n_embd_k_gqa*kv_size);
- ggml_tensor * v = ggml_new_tensor_1d(ctx, type_v, n_embd_v_gqa*kv_size);
- ggml_format_name(k, "cache_k_l%d", i);
- ggml_format_name(v, "cache_v_l%d", i);
- k_l.push_back(k);
- v_l.push_back(v);
- }
-
- // allocate tensors and initialize the buffers to avoid NaNs in the padding
- for (auto it : ctx_map) {
- auto * buft = it.first;
- auto * ctx = it.second;
-
- ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
- if (!buf) {
- throw std::runtime_error("failed to allocate buffer for kv cache");
- }
- ggml_backend_buffer_clear(buf, 0);
- LLAMA_LOG_INFO("%s: %10s KV buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf)/1024.0/1024.0);
- bufs.emplace_back(buf);
- }
-
- {
- const size_t memory_size_k = size_k_bytes();
- const size_t memory_size_v = size_v_bytes();
-
- LLAMA_LOG_INFO("%s: KV self size = %7.2f MiB, K (%s): %7.2f MiB, V (%s): %7.2f MiB\n", __func__,
- (float)(memory_size_k + memory_size_v) / (1024.0f * 1024.0f),
- ggml_type_name(type_k), (float)memory_size_k / (1024.0f * 1024.0f),
- ggml_type_name(type_v), (float)memory_size_v / (1024.0f * 1024.0f));
- }
-}
-
-void llama_kv_cache_recurrent::clear(bool data) {
- for (int32_t i = 0; i < (int32_t) size; ++i) {
- cells[i].pos = -1;
- cells[i].seq_id.clear();
- cells[i].src = -1;
- cells[i].tail = -1;
- }
-
- head = 0;
- used = 0;
-
- if (data) {
- for (auto & buf : bufs) {
- ggml_backend_buffer_clear(buf.get(), 0);
- }
- }
-}
-
-bool llama_kv_cache_recurrent::seq_rm(llama_seq_id seq_id, llama_pos p0, llama_pos p1) {
- uint32_t new_head = size;
-
- if (p0 < 0) {
- p0 = 0;
- }
-
- if (p1 < 0) {
- p1 = std::numeric_limits<llama_pos>::max();
- }
-
- // models like Mamba or RWKV can't have a state partially erased
- if (seq_id >= (int64_t) size) {
- // could be fatal
- return false;
- }
- if (0 <= seq_id) {
- int32_t & tail_id = cells[seq_id].tail;
- if (tail_id >= 0) {
- const kv_cell & cell = cells[tail_id];
- // partial intersection is invalid
- if ((0 < p0 && p0 <= cell.pos) || (0 < p1 && p1 <= cell.pos)) {
- return false;
- }
- // invalidate tails which will be cleared
- if (p0 <= cell.pos && cell.pos < p1) {
- tail_id = -1;
- }
- }
- } else {
- // seq_id is negative, then the range should include everything or nothing
- if (p0 != p1 && (p0 != 0 || p1 != std::numeric_limits<llama_pos>::max())) {
- return false;
- }
- }
-
- for (uint32_t i = 0; i < size; ++i) {
- if (cells[i].pos >= p0 && cells[i].pos < p1) {
- if (seq_id < 0) {
- cells[i].seq_id.clear();
- } else if (cells[i].has_seq_id(seq_id)) {
- cells[i].seq_id.erase(seq_id);
- } else {
- continue;
- }
- if (cells[i].is_empty()) {
- // keep count of the number of used cells
- if (cells[i].pos >= 0) {
- used--;
- }
- cells[i].pos = -1;
- cells[i].src = -1;
- if (new_head == size) {
- new_head = i;
- }
- }
- }
- }
-
- // If we freed up a slot, set head to it so searching can start there.
- if (new_head != size && new_head < head) {
- head = new_head;
- }
-
- return true;
-}
-
-void llama_kv_cache_recurrent::seq_cp(llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) {
- if (seq_id_src == seq_id_dst) {
- return;
- }
-
- if (p0 < 0) {
- p0 = 0;
- }
-
- if (p1 < 0) {
- p1 = std::numeric_limits<llama_pos>::max();
- }
-
- if ((uint32_t) seq_id_dst < size && (uint32_t) seq_id_src < size) {
- kv_cell & tail_src = cells[seq_id_src];
- kv_cell & tail_dst = cells[seq_id_dst];
- if (tail_dst.tail >= 0) {
- // clear destination seq_id if it wasn't empty
- kv_cell & cell_dst = cells[tail_dst.tail];
-
- cell_dst.seq_id.erase(seq_id_dst);
- tail_dst.tail = -1;
- if (cell_dst.seq_id.empty()) {
- cell_dst.pos = -1;
- cell_dst.src = -1;
- used -= 1;
- }
- }
- if (tail_src.tail >= 0) {
- kv_cell & cell_src = cells[tail_src.tail];
-
- cell_src.seq_id.insert(seq_id_dst);
- tail_dst.tail = tail_src.tail;
- }
- }
-}
-
-void llama_kv_cache_recurrent::seq_keep(llama_seq_id seq_id) {
- uint32_t new_head = size;
-
- for (uint32_t i = 0; i < size; ++i) {
- if ((llama_seq_id) i != seq_id) {
- cells[i].tail = -1;
- }
-
- if (!cells[i].has_seq_id(seq_id)) {
- if (cells[i].pos >= 0) {
- used--;
- }
-
- cells[i].pos = -1;
- cells[i].src = -1;
- cells[i].seq_id.clear();
-
- if (new_head == size){
- new_head = i;
- }
- } else {
- cells[i].seq_id.clear();
- cells[i].seq_id.insert(seq_id);
- }
- }
-
- // If we freed up a slot, set head to it so searching can start there.
- if (new_head != size && new_head < head) {
- head = new_head;
- }
-}
-
-void llama_kv_cache_recurrent::seq_add(llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) {
- if (shift == 0) {
- return;
- }
-
- if (p0 < 0) {
- p0 = 0;
- }
-
- if (p1 < 0) {
- p1 = std::numeric_limits<llama_pos>::max();
- }
-
- // If there is no range then return early to avoid looping over the
- if (p0 == p1) {
- return;
- }
-
- // for Mamba-like or RWKV models, only the pos needs to be shifted
- if (0 <= seq_id && seq_id < (int64_t) size) {
- const int32_t tail_id = cells[seq_id].tail;
- if (tail_id >= 0) {
- kv_cell & cell = cells[tail_id];
- if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) {
- cell.pos += shift;
- }
- }
- }
-}
-
-void llama_kv_cache_recurrent::seq_div(llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) {
- if (d == 1) {
- return;
- }
-
- if (p0 < 0) {
- p0 = 0;
- }
-
- if (p1 < 0) {
- p1 = std::numeric_limits<llama_pos>::max();
- }
-
- // If there is no range then return early to avoid looping over the cache.
- if (p0 == p1) {
- return;
- }
-
- // for Mamba-like or RWKV models, only the pos needs to be changed
- if (0 <= seq_id && seq_id < (int64_t) size) {
- const int32_t tail_id = cells[seq_id].tail;
- if (tail_id >= 0) {
- kv_cell & cell = cells[tail_id];
- if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) {
- cell.pos /= d;
- }
- }
- }
-}
-
-llama_pos llama_kv_cache_recurrent::seq_pos_min(llama_seq_id seq_id) const {
- llama_pos result = std::numeric_limits<llama_pos>::max();
-
- for (uint32_t i = 0; i < size; ++i) {
- if (cells[i].has_seq_id(seq_id)) {
- result = std::min(result, cells[i].pos);
- }
- }
-
- if (result == std::numeric_limits<llama_pos>::max()) {
- result = -1;
- }
-
- return result;
-}
-
-llama_pos llama_kv_cache_recurrent::seq_pos_max(llama_seq_id seq_id) const {
- llama_pos result = -1;
-
- for (uint32_t i = 0; i < size; ++i) {
- if (cells[i].has_seq_id(seq_id)) {
- result = std::max(result, cells[i].pos);
- }
- }
-
- return result;
-}
-
-llama_memory_state_ptr llama_kv_cache_recurrent::init_batch(const llama_batch & batch, uint32_t n_ubatch, bool embd_all) {
- auto sbatch = llama_sbatch(batch, hparams.n_embd, false);
-
- std::vector<llama_ubatch> ubatches;
-
- while (sbatch.n_tokens > 0) {
- llama_ubatch ubatch;
-
- if (embd_all) {
- // if all tokens are output, split by sequence
- ubatch = sbatch.split_seq(n_ubatch);
- } else {
- ubatch = sbatch.split_equal(n_ubatch);
- }
-
- ubatches.push_back(ubatch);
- }
-
- if (!prepare(ubatches)) {
- return std::make_unique<llama_kv_cache_recurrent_state>(LLAMA_MEMORY_STATUS_FAILED_PREPARE);
- }
-
- return std::make_unique<llama_kv_cache_recurrent_state>(LLAMA_MEMORY_STATUS_SUCCESS, this, std::move(sbatch), std::move(ubatches));
-}
-
-llama_memory_state_ptr llama_kv_cache_recurrent::init_full() {
- return std::make_unique<llama_kv_cache_recurrent_state>(LLAMA_MEMORY_STATUS_SUCCESS, this);
-}
-
-llama_memory_state_ptr llama_kv_cache_recurrent::init_update(llama_context * lctx, bool optimize) {
- GGML_UNUSED(lctx);
- GGML_UNUSED(optimize);
-
- return std::make_unique<llama_kv_cache_recurrent_state>(LLAMA_MEMORY_STATUS_NO_UPDATE);
-}
-
-bool llama_kv_cache_recurrent::prepare(const std::vector<llama_ubatch> & ubatches) {
- // simply remember the full state because it is very small for this type of cache
- // TODO: optimize
- auto org_cells = cells;
- auto org_used = used;
- auto org_head = head;
-
- bool success = true;
-
- for (const auto & ubatch : ubatches) {
- if (!find_slot(ubatch)) {
- success = false;
- break;
- }
- }
-
- // restore the original state
- cells = std::move(org_cells);
- used = org_used;
- head = org_head;
-
- return success;
-}
-
-bool llama_kv_cache_recurrent::find_slot(const llama_ubatch & ubatch) {
- const uint32_t n_seqs = ubatch.n_seqs;
-
- const uint32_t n_seq_tokens = ubatch.n_seq_tokens;
-
- // if we have enough unused cells before the current head ->
- // better to start searching from the beginning of the cache, hoping to fill it
- if (head > used + 2*n_seqs) {
- head = 0;
- }
-
- // For recurrent state architectures (like Mamba or RWKV),
- // each cache cell can store the state for a whole sequence.
- // A slot should be always be contiguous.
-
- // can only process batches with an equal number of new tokens in each sequence
- GGML_ASSERT(ubatch.equal_seqs);
-
- int32_t min = size - 1;
- int32_t max = 0;
-
- // everything should fit if all seq_ids are smaller than the max
- for (uint32_t s = 0; s < n_seqs; ++s) {
- const uint32_t n_seq_id = ubatch.n_seq_id[s];
- for (uint32_t j = 0; j < n_seq_id; ++j) {
- const llama_seq_id seq_id = ubatch.seq_id[s][j];
-
- if (seq_id < 0 || (uint32_t) seq_id >= size) {
- // too big seq_id
- // TODO: would it be possible to resize the cache instead?
- LLAMA_LOG_ERROR("%s: seq_id=%d >= n_seq_max=%u Try using a bigger --parallel value\n", __func__, seq_id, n_seq_max);
- return false;
- }
- if (j > 0) {
- kv_cell & seq = cells[seq_id];
- if (seq.tail >= 0) {
- kv_cell & cell = cells[seq.tail];
- // clear cells from seq_ids that become shared
- // (should not normally happen, but let's handle it anyway)
- cell.seq_id.erase(seq_id);
- seq.tail = -1;
- if (cell.seq_id.empty()) {
- cell.pos = -1;
- cell.src = -1;
- used -= 1;
- }
- }
- }
- }
- }
-
-#ifndef NDEBUG
- {
- std::vector<int32_t> tails_verif;
- tails_verif.assign(size, -1);
- for (uint32_t i = 0; i < size; ++i) {
- kv_cell & cell = cells[i];
- for (llama_seq_id seq_id : cell.seq_id) {
- if (tails_verif[seq_id] != -1) {
- LLAMA_LOG_ERROR("%s: duplicate tail for seq_id %d in cell %d and %d\n", __func__, seq_id, i, tails_verif[seq_id]);
- }
- tails_verif[seq_id] = i;
- }
- }
- for (uint32_t i = 0; i < size; ++i) {
- if (tails_verif[i] != cells[i].tail) {
- LLAMA_LOG_ERROR("%s: wrong tail for seq_id %d, (%d instead of %d)\n", __func__, i, cells[i].tail, tails_verif[i]);
- }
- }
- }
-#endif
-
- // find next empty cell
- uint32_t next_empty_cell = head;
-
- for (uint32_t i = 0; i < size; ++i) {
- if (next_empty_cell >= size) { next_empty_cell -= size; }
- kv_cell & cell = cells[next_empty_cell];
- if (cell.is_empty()) { break; }
- next_empty_cell += 1;
- }
-
- // find usable cell range
- for (uint32_t s = 0; s < n_seqs; ++s) {
- const llama_seq_id seq_id = ubatch.seq_id[s][0];
- kv_cell & seq_meta = cells[seq_id];
- bool has_cell = false;
- if (seq_meta.tail >= 0) {
- kv_cell & cell = cells[seq_meta.tail];
- GGML_ASSERT(cell.has_seq_id(seq_id));
- // does this seq_id "own" the cell?
- if (cell.seq_id.size() == 1) { has_cell = true; }
- }
- if (!has_cell) {
- kv_cell & empty_cell = cells[next_empty_cell];
- GGML_ASSERT(empty_cell.is_empty());
- // copy old tail into the empty cell
- if (seq_meta.tail >= 0) {
- kv_cell & orig_cell = cells[seq_meta.tail];
- empty_cell.pos = orig_cell.pos;
- empty_cell.src = orig_cell.src;
- orig_cell.seq_id.erase(seq_id);
- empty_cell.seq_id.insert(seq_id); // will be overwritten
- GGML_ASSERT(!orig_cell.is_empty()); // has at least one remaining seq_id
- }
- seq_meta.tail = next_empty_cell;
- // find next empty cell
- if (s + 1 < n_seqs) {
- for (uint32_t i = 0; i < size; ++i) {
- next_empty_cell += 1;
- if (next_empty_cell >= size) { next_empty_cell -= size; }
- kv_cell & cell = cells[next_empty_cell];
- if (cell.is_empty()) { break; }
- }
- }
- }
- if (min > seq_meta.tail) { min = seq_meta.tail; }
- if (max < seq_meta.tail) { max = seq_meta.tail; }
- }
-
- // gather and re-order
- for (uint32_t s = 0; s < n_seqs; ++s) {
- const int32_t dst_id = s + min;
- const int32_t src_id = cells[ubatch.seq_id[s][0]].tail;
- if (dst_id != src_id) {
- kv_cell & dst_cell = cells[dst_id];
- kv_cell & src_cell = cells[src_id];
-
- std::swap(dst_cell.pos, src_cell.pos);
- std::swap(dst_cell.src, src_cell.src);
- std::swap(dst_cell.seq_id, src_cell.seq_id);
-
- // swap tails
- for (uint32_t i = 0; i < size; ++i) {
- int32_t & tail = cells[i].tail;
- if (tail == src_id) {
- tail = dst_id;
- } else if (tail == dst_id) {
- tail = src_id;
- }
- }
- }
- }
-
- // update the pos of the used seqs
- for (uint32_t s = 0; s < n_seqs; ++s) {
- const llama_pos last_pos = ubatch.pos[n_seq_tokens * s + n_seq_tokens - 1];
- const int32_t cell_id = s + min;
- kv_cell & cell = cells[cell_id];
-
- if (cell.pos >= 0 && last_pos != cell.pos + (llama_pos) n_seq_tokens) {
- // What should happen when the pos backtracks or skips a value?
- // Clearing the state mid-batch would require special-casing which isn't done.
- LLAMA_LOG_WARN("%s: non-consecutive token position %d after %d for sequence %d with %u new tokens\n",
- __func__, last_pos, cell.pos, ubatch.seq_id[s][0], n_seq_tokens);
- }
- cell.pos = last_pos;
- cell.seq_id.clear();
- for (int32_t j = 0; j < ubatch.n_seq_id[s]; ++j) {
- const llama_seq_id seq_id = ubatch.seq_id[s][j];
- cell.seq_id.insert(seq_id);
- cells[seq_id].tail = cell_id;
- }
- }
-
- // Find first cell without src refs, to use as the zero-ed state
- {
- // TODO: bake-in src refcounts in the cell metadata
- std::vector<int32_t> refcounts(size, 0);
- for (size_t i = 0; i < size; ++i) {
- const int32_t src = cells[i].src;
- if (src >= 0) {
- refcounts[src] += 1;
- }
- }
-
- rs_z = -1;
- for (int i = min; i <= max; ++i) {
- if (refcounts[i] == 0) {
- rs_z = i;
- break;
- }
- }
-
- for (int i = min; i <= max; ++i) {
- if (cells[i].src < 0) {
- GGML_ASSERT(rs_z >= 0);
- cells[i].src0 = rs_z;
- } else {
- // Stage the source ids for all used cells to allow correct seq_* behavior
- // and still make these values available when setting the inputs
- cells[i].src0 = cells[i].src;
- }
- cells[i].src = i; // avoid moving or clearing twice
- }
- }
-
- // allow getting the range of used cells, from head to head + n
- head = min;
- n = max - min + 1;
- used = std::count_if(cells.begin(), cells.end(),
- [](const kv_cell & cell){ return !cell.is_empty(); });
-
- // sanity check
- return n >= n_seqs;
-}
-
-bool llama_kv_cache_recurrent::get_can_shift() const {
- // shifting the pos is trivial for recurrent models
- return true;
-}
-
-size_t llama_kv_cache_recurrent::total_size() const {
- size_t size = 0;
- for (const auto & buf : bufs) {
- size += ggml_backend_buffer_get_size(buf.get());
- }
-
- return size;
-}
-
-size_t llama_kv_cache_recurrent::size_k_bytes() const {
- size_t size_k_bytes = 0;
-
- for (const auto & k : k_l) {
- size_k_bytes += ggml_nbytes(k);
- }
-
- return size_k_bytes;
-}
-
-size_t llama_kv_cache_recurrent::size_v_bytes() const {
- size_t size_v_bytes = 0;
-
- for (const auto & v : v_l) {
- size_v_bytes += ggml_nbytes(v);
- }
-
- return size_v_bytes;
-}
-
-void llama_kv_cache_recurrent::state_write(llama_io_write_i & io, llama_seq_id seq_id) const {
- std::vector<std::pair<uint32_t, uint32_t>> cell_ranges; // ranges, from inclusive, to exclusive
- uint32_t cell_count = 0;
-
- // Count the number of cells with the specified seq_id
- // Find all the ranges of cells with this seq id (or all, when -1)
- uint32_t cell_range_begin = size;
- for (uint32_t i = 0; i < size; ++i) {
- const auto & cell = cells[i];
- if ((seq_id == -1 && !cell.is_empty()) || cell.has_seq_id(seq_id)) {
- ++cell_count;
- if (cell_range_begin == size) {
- cell_range_begin = i;
- }
- } else {
- if (cell_range_begin != size) {
- cell_ranges.emplace_back(cell_range_begin, i);
- cell_range_begin = size;
- }
- }
- }
- if (cell_range_begin != size) {
- cell_ranges.emplace_back(cell_range_begin, size);
- }
-
- // DEBUG CHECK: Sum of cell counts in ranges should equal the total cell count
- uint32_t cell_count_check = 0;
- for (const auto & range : cell_ranges) {
- cell_count_check += range.second - range.first;
- }
- GGML_ASSERT(cell_count == cell_count_check);
-
- io.write(&cell_count, sizeof(cell_count));
-
- state_write_meta(io, cell_ranges, seq_id);
- state_write_data(io, cell_ranges);
-}
-
-void llama_kv_cache_recurrent::state_read(llama_io_read_i & io, llama_seq_id seq_id) {
- uint32_t cell_count;
- io.read_to(&cell_count, sizeof(cell_count));
-
- bool res = true;
-
- res = res && state_read_meta(io, cell_count, seq_id);
- res = res && state_read_data(io, cell_count);
-
- if (!res) {
- if (seq_id == -1) {
- clear(true);
- } else {
- seq_rm(seq_id, -1, -1);
- }
- throw std::runtime_error("failed to restore kv cache");
- }
-}
-
-void llama_kv_cache_recurrent::state_write_meta(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges, llama_seq_id seq_id) const {
- for (const auto & range : cell_ranges) {
- for (uint32_t i = range.first; i < range.second; ++i) {
- const auto & cell = cells[i];
- const llama_pos pos = cell.pos;
- const uint32_t n_seq_id = seq_id == -1 ? cell.seq_id.size() : 0;
-
- io.write(&pos, sizeof(pos));
- io.write(&n_seq_id, sizeof(n_seq_id));
-
- if (n_seq_id) {
- for (auto seq_id : cell.seq_id) {
- io.write(&seq_id, sizeof(seq_id));
- }
- }
- }
- }
-}
-
-void llama_kv_cache_recurrent::state_write_data(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges) const {
- const uint32_t v_trans = 0;
- const uint32_t n_layer = hparams.n_layer;
-
- io.write(&v_trans, sizeof(v_trans));
- io.write(&n_layer, sizeof(n_layer));
-
- std::vector<uint8_t> tmp_buf;
-
- // Iterate and write all the keys first, each row is a cell
- // Get whole range at a time
- for (uint32_t il = 0; il < n_layer; ++il) {
- const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s();
-
- // Write key type
- const int32_t k_type_i = (int32_t)k_l[il]->type;
- io.write(&k_type_i, sizeof(k_type_i));
-
- // Write row size of key
- const uint64_t k_size_row = ggml_row_size(k_l[il]->type, n_embd_k_gqa);
- io.write(&k_size_row, sizeof(k_size_row));
-
- // Read each range of cells of k_size length each into tmp_buf and write out
- for (const auto & range : cell_ranges) {
- const size_t range_size = range.second - range.first;
- const size_t buf_size = range_size * k_size_row;
- io.write_tensor(k_l[il], range.first * k_size_row, buf_size);
- }
- }
-
- if (!v_trans) {
- for (uint32_t il = 0; il < n_layer; ++il) {
- const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
-
- // Write value type
- const int32_t v_type_i = (int32_t)v_l[il]->type;
- io.write(&v_type_i, sizeof(v_type_i));
-
- // Write row size of value
- const uint64_t v_size_row = ggml_row_size(v_l[il]->type, n_embd_v_gqa);
- io.write(&v_size_row, sizeof(v_size_row));
-
- // Read each range of cells of v_size length each into tmp_buf and write out
- for (const auto & range : cell_ranges) {
- const size_t range_size = range.second - range.first;
- const size_t buf_size = range_size * v_size_row;
- io.write_tensor(v_l[il], range.first * v_size_row, buf_size);
- }
- }
- } else {
- // When v is transposed, we also need the element size and get the element ranges from each row
- const uint32_t kv_size = size;
- for (uint32_t il = 0; il < n_layer; ++il) {
- const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
-
- // Write value type
- const int32_t v_type_i = (int32_t)v_l[il]->type;
- io.write(&v_type_i, sizeof(v_type_i));
-
- // Write element size
- const uint32_t v_size_el = ggml_type_size(v_l[il]->type);
- io.write(&v_size_el, sizeof(v_size_el));
-
- // Write GQA embedding size
- io.write(&n_embd_v_gqa, sizeof(n_embd_v_gqa));
-
- // For each row, we get the element values of each cell
- for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
- // Read each range of cells of v_size_el length each into tmp_buf and write out
- for (const auto & range : cell_ranges) {
- const size_t range_size = range.second - range.first;
- const size_t src_offset = (range.first + j * kv_size) * v_size_el;
- const size_t buf_size = range_size * v_size_el;
- io.write_tensor(v_l[il], src_offset, buf_size);
- }
- }
- }
- }
-}
-
-bool llama_kv_cache_recurrent::state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id) {
- if (dest_seq_id != -1) {
- // single sequence
-
- seq_rm(dest_seq_id, -1, -1);
-
- llama_sbatch sbatch;
- llama_ubatch batch = sbatch.reserve_ubatch(cell_count, /* has_embd */ false);
-
- batch.n_tokens = cell_count;
- batch.n_seq_tokens = cell_count;
- batch.n_seqs = 1;
-
- for (uint32_t i = 0; i < cell_count; ++i) {
- llama_pos pos;
- uint32_t n_seq_id;
-
- io.read_to(&pos, sizeof(pos));
- io.read_to(&n_seq_id, sizeof(n_seq_id));
-
- if (n_seq_id != 0) {
- LLAMA_LOG_ERROR("%s: invalid seq_id-agnostic kv cell\n", __func__);
- return false;
- }
-
- batch.pos[i] = pos;
- }
- batch.n_seq_id[0] = 1;
- batch.seq_id[0] = &dest_seq_id;
-
- if (!find_slot(batch)) {
- LLAMA_LOG_ERROR("%s: failed to find available cells in kv cache\n", __func__);
- return false;
- }
-
- // DEBUG CHECK: kv.head should be our first cell, kv.head + cell_count - 1 should be our last cell (verify seq_id and pos values)
- // Assume that this is one contiguous block of cells
- GGML_ASSERT(head + cell_count <= size);
- GGML_ASSERT(cells[head].pos == batch.pos[0]);
- GGML_ASSERT(cells[head + cell_count - 1].pos == batch.pos[cell_count - 1]);
- GGML_ASSERT(cells[head].has_seq_id(dest_seq_id));
- GGML_ASSERT(cells[head + cell_count - 1].has_seq_id(dest_seq_id));
- } else {
- // whole KV cache restore
-
- if (cell_count > size) {
- LLAMA_LOG_ERROR("%s: not enough cells in kv cache\n", __func__);
- return false;
- }
-
- clear(true);
-
- for (uint32_t i = 0; i < cell_count; ++i) {
- kv_cell & cell = cells[i];
-
- llama_pos pos;
- uint32_t n_seq_id;
-
- io.read_to(&pos, sizeof(pos));
- io.read_to(&n_seq_id, sizeof(n_seq_id));
-
- cell.pos = pos;
-
- for (uint32_t j = 0; j < n_seq_id; ++j) {
- llama_seq_id seq_id;
- io.read_to(&seq_id, sizeof(seq_id));
-
- // TODO: llama_kv_cache_recurrent should have a notion of max sequences
- //if (seq_id < 0 || (uint32_t) seq_id >= llama_n_seq_max(ctx)) {
- if (seq_id < 0) {
- //LLAMA_LOG_ERROR("%s: invalid seq_id, %d is out of range [0, %u)\n", __func__, seq_id, llama_n_seq_max(ctx));
- LLAMA_LOG_ERROR("%s: invalid seq_id, %d is out of range [0, inf)\n", __func__, seq_id);
- return false;
- }
-
- cell.seq_id.insert(seq_id);
-
- int32_t & tail = cells[seq_id].tail;
- if (tail != -1) {
- LLAMA_LOG_ERROR("%s: duplicate tail for seq_id %d in cell %d and %d\n", __func__, seq_id, i, tail);
- return false;
- }
- tail = i;
- }
- }
-
- head = 0;
- used = cell_count;
- }
-
- for (uint32_t i = 0; i < cell_count; ++i) {
- uint32_t cell_id = head + i;
- // make sure the recurrent states will keep their restored state
- cells[cell_id].src = cell_id;
- }
-
- return true;
-}
-
-bool llama_kv_cache_recurrent::state_read_data(llama_io_read_i & io, uint32_t cell_count) {
- uint32_t v_trans;
- uint32_t n_layer;
- io.read_to(&v_trans, sizeof(v_trans));
- io.read_to(&n_layer, sizeof(n_layer));
-
- if (n_layer != hparams.n_layer) {
- LLAMA_LOG_ERROR("%s: mismatched layer count (%u instead of %u)\n", __func__, n_layer, hparams.n_layer);
- return false;
- }
- if (cell_count > size) {
- LLAMA_LOG_ERROR("%s: not enough cells in kv cache to restore state (%u > %u)\n", __func__, cell_count, size);
- return false;
- }
- if (false != (bool) v_trans) {
- LLAMA_LOG_ERROR("%s: incompatible V transposition\n", __func__);
- return false;
- }
-
- // For each layer, read the keys for each cell, one row is one cell, read as one contiguous block
- for (uint32_t il = 0; il < n_layer; ++il) {
- const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s();
-
- // Read type of key
- int32_t k_type_i_ref;
- io.read_to(&k_type_i_ref, sizeof(k_type_i_ref));
- const int32_t k_type_i = (int32_t) k_l[il]->type;
- if (k_type_i != k_type_i_ref) {
- LLAMA_LOG_ERROR("%s: mismatched key type (%d != %d, layer %d)\n", __func__, k_type_i, k_type_i_ref, il);
- return false;
- }
-
- // Read row size of key
- uint64_t k_size_row_ref;
- io.read_to(&k_size_row_ref, sizeof(k_size_row_ref));
- const size_t k_size_row = ggml_row_size(k_l[il]->type, n_embd_k_gqa);
- if (k_size_row != k_size_row_ref) {
- LLAMA_LOG_ERROR("%s: mismatched key row size (%zu != %zu, layer %d)\n", __func__, k_size_row, (size_t) k_size_row_ref, il);
- return false;
- }
-
- if (cell_count) {
- // Read and set the keys for the whole cell range
- ggml_backend_tensor_set(k_l[il], io.read(cell_count * k_size_row), head * k_size_row, cell_count * k_size_row);
- }
- }
-
- if (!v_trans) {
- for (uint32_t il = 0; il < n_layer; ++il) {
- const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
-
- // Read type of value
- int32_t v_type_i_ref;
- io.read_to(&v_type_i_ref, sizeof(v_type_i_ref));
- const int32_t v_type_i = (int32_t)v_l[il]->type;
- if (v_type_i != v_type_i_ref) {
- LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il);
- return false;
- }
-
- // Read row size of value
- uint64_t v_size_row_ref;
- io.read_to(&v_size_row_ref, sizeof(v_size_row_ref));
- const size_t v_size_row = ggml_row_size(v_l[il]->type, n_embd_v_gqa);
- if (v_size_row != v_size_row_ref) {
- LLAMA_LOG_ERROR("%s: mismatched value row size (%zu != %zu, layer %d)\n", __func__, v_size_row, (size_t) v_size_row_ref, il);
- return false;
- }
-
- if (cell_count) {
- // Read and set the values for the whole cell range
- ggml_backend_tensor_set(v_l[il], io.read(cell_count * v_size_row), head * v_size_row, cell_count * v_size_row);
- }
- }
- } else {
- // For each layer, read the values for each cell (transposed)
- for (uint32_t il = 0; il < n_layer; ++il) {
- const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
-
- // Read type of value
- int32_t v_type_i_ref;
- io.read_to(&v_type_i_ref, sizeof(v_type_i_ref));
- const int32_t v_type_i = (int32_t)v_l[il]->type;
- if (v_type_i != v_type_i_ref) {
- LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il);
- return false;
- }
-
- // Read element size of value
- uint32_t v_size_el_ref;
- io.read_to(&v_size_el_ref, sizeof(v_size_el_ref));
- const size_t v_size_el = ggml_type_size(v_l[il]->type);
- if (v_size_el != v_size_el_ref) {
- LLAMA_LOG_ERROR("%s: mismatched value element size (%zu != %zu, layer %d)\n", __func__, v_size_el, (size_t) v_size_el_ref, il);
- return false;
- }
-
- // Read GQA embedding size
- uint32_t n_embd_v_gqa_ref;
- io.read_to(&n_embd_v_gqa_ref, sizeof(n_embd_v_gqa_ref));
- if (n_embd_v_gqa != n_embd_v_gqa_ref) {
- LLAMA_LOG_ERROR("%s: mismatched GQA embedding size (%u != %u, layer %d)\n", __func__, n_embd_v_gqa, n_embd_v_gqa_ref, il);
- return false;
- }
-
- if (cell_count) {
- // For each row in the transposed matrix, read the values for the whole cell range
- for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
- const size_t dst_offset = (head + j * size) * v_size_el;
- ggml_backend_tensor_set(v_l[il], io.read(cell_count * v_size_el), dst_offset, cell_count * v_size_el);
- }
- }
- }
- }
-
- return true;
-}
-
-//
-// llama_kv_cache_recurrent_state
-//
-
-llama_kv_cache_recurrent_state::llama_kv_cache_recurrent_state(llama_memory_status status) : status(status) {}
-
-llama_kv_cache_recurrent_state::llama_kv_cache_recurrent_state(
- llama_memory_status status,
- llama_kv_cache_recurrent * kv) : status(status), kv(kv), is_full(true) {
-}
-
-llama_kv_cache_recurrent_state::llama_kv_cache_recurrent_state(
- llama_memory_status status,
- llama_kv_cache_recurrent * kv,
- llama_sbatch sbatch,
- std::vector<llama_ubatch> ubatches) : status(status), kv(kv), sbatch(std::move(sbatch)), ubatches(std::move(ubatches)) {}
-
-llama_kv_cache_recurrent_state::~llama_kv_cache_recurrent_state() = default;
-
-bool llama_kv_cache_recurrent_state::next() {
- assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
-
- if (++i_next >= ubatches.size()) {
- return false;
- }
-
- return true;
-}
-
-bool llama_kv_cache_recurrent_state::apply() {
- assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
-
- kv->find_slot(ubatches[i_next]);
-
- return true;
-}
-
-std::vector<int64_t> & llama_kv_cache_recurrent_state::out_ids() {
- assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
-
- return sbatch.out_ids;
-}
-
-llama_memory_status llama_kv_cache_recurrent_state::get_status() const {
- return status;
-}
-
-const llama_ubatch & llama_kv_cache_recurrent_state::get_ubatch() const {
- assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
-
- return ubatches[i_next];
-}
-
-uint32_t llama_kv_cache_recurrent_state::get_n_kv() const {
- return is_full ? kv->size : kv->n;
-}
-
-uint32_t llama_kv_cache_recurrent_state::get_head() const {
- return is_full ? 0 : kv->head;
-}
-
-int32_t llama_kv_cache_recurrent_state::get_rs_z() const {
- return is_full ? 0 : kv->rs_z;
-}
-
-uint32_t llama_kv_cache_recurrent_state::get_size() const {
- return kv->size;
-}
-
-ggml_tensor * llama_kv_cache_recurrent_state::get_k_l(int32_t il) const {
- return kv->k_l[il];
-}
-
-ggml_tensor * llama_kv_cache_recurrent_state::get_v_l(int32_t il) const {
- return kv->v_l[il];
-}
-
-int32_t llama_kv_cache_recurrent_state::s_copy(int i) const {
- return kv->cells[i + kv->head].src0;
-}
+++ /dev/null
-#pragma once
-
-#include "llama-batch.h"
-#include "llama-graph.h"
-#include "llama-memory.h"
-
-#include <set>
-#include <vector>
-
-//
-// llama_kv_cache_recurrent
-//
-
-// TODO: extract the KV cache state used for graph computation into llama_kv_cache_recurrent_state_i
-// see the implementation of llama_kv_cache_unified_state_i for an example how to do it
-class llama_kv_cache_recurrent : public llama_memory_i {
-public:
- llama_kv_cache_recurrent(
- const llama_model & model,
- ggml_type type_k,
- ggml_type type_v,
- bool offload,
- uint32_t kv_size,
- uint32_t n_seq_max);
-
- ~llama_kv_cache_recurrent() = default;
-
- //
- // llama_memory_i
- //
-
- llama_memory_state_ptr init_batch(
- const llama_batch & batch,
- uint32_t n_ubatch,
- bool embd_all) override;
-
- llama_memory_state_ptr init_full() override;
-
- llama_memory_state_ptr init_update(llama_context * lctx, bool optimize) override;
-
- void clear(bool data) override;
-
- bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override;
- void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override;
- void seq_keep(llama_seq_id seq_id) override;
- void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) override;
- void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override;
-
- llama_pos seq_pos_min(llama_seq_id seq_id) const override;
- llama_pos seq_pos_max(llama_seq_id seq_id) const override;
-
- bool prepare(const std::vector<llama_ubatch> & ubatches);
-
- // find a contiguous slot of kv cells and emplace the ubatch there
- bool find_slot(const llama_ubatch & ubatch);
-
- bool get_can_shift() const override;
-
- // state write/load
-
- void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const override;
- void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) override;
-
- uint32_t head = 0; // the location where the batch will be placed in the cache (see find_slot())
- uint32_t size = 0; // total number of cells, shared across all sequences
- uint32_t used = 0; // used cells (i.e. at least one seq_id)
-
- // computed before each graph build
- uint32_t n = 0;
-
- // first zero-ed state
- int32_t rs_z = -1;
-
- // TODO: optimize for recurrent state needs
- struct kv_cell {
- llama_pos pos = -1;
- int32_t src = -1; // used to know where states should be copied from
- int32_t src0 = -1; // like src, but only used when setting the inputs (allowing to copy once)
- int32_t tail = -1;
-
- std::set<llama_seq_id> seq_id;
-
- bool has_seq_id(const llama_seq_id & id) const {
- return seq_id.find(id) != seq_id.end();
- }
-
- bool is_empty() const {
- return seq_id.empty();
- }
-
- bool is_same_seq(const kv_cell & other) const {
- return seq_id == other.seq_id;
- }
- };
-
- std::vector<kv_cell> cells;
-
- std::vector<ggml_tensor *> k_l; // per layer
- std::vector<ggml_tensor *> v_l;
-
-private:
- //const llama_model & model;
- const llama_hparams & hparams;
-
- const uint32_t n_seq_max = 1;
-
- std::vector<ggml_context_ptr> ctxs;
- std::vector<ggml_backend_buffer_ptr> bufs;
-
- size_t total_size() const;
-
- size_t size_k_bytes() const;
- size_t size_v_bytes() const;
-
- void state_write_meta(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges, llama_seq_id seq_id = -1) const;
- void state_write_data(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges) const;
-
- bool state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id = -1);
- bool state_read_data(llama_io_read_i & io, uint32_t cell_count);
-};
-
-class llama_kv_cache_recurrent_state : public llama_memory_state_i {
-public:
- // used for errors
- llama_kv_cache_recurrent_state(llama_memory_status status);
-
- // used to create a full-cache state
- llama_kv_cache_recurrent_state(
- llama_memory_status status,
- llama_kv_cache_recurrent * kv);
-
- // used to create a state from a batch
- llama_kv_cache_recurrent_state(
- llama_memory_status status,
- llama_kv_cache_recurrent * kv,
- llama_sbatch sbatch,
- std::vector<llama_ubatch> ubatches);
-
- virtual ~llama_kv_cache_recurrent_state();
-
- //
- // llama_memory_state_i
- //
-
- bool next() override;
- bool apply() override;
-
- std::vector<int64_t> & out_ids() override;
-
- llama_memory_status get_status() const override;
- const llama_ubatch & get_ubatch() const override;
-
- //
- // llama_kv_cache_recurrent_state specific API
- //
-
- uint32_t get_n_kv() const;
- uint32_t get_head() const;
- int32_t get_rs_z() const;
- uint32_t get_size() const;
-
- ggml_tensor * get_k_l(int32_t il) const;
- ggml_tensor * get_v_l(int32_t il) const;
-
- int32_t s_copy(int i) const;
-
-private:
- const llama_memory_status status;
-
- llama_kv_cache_recurrent * kv;
-
- llama_sbatch sbatch;
-
- size_t i_next = 0;
-
- std::vector<llama_ubatch> ubatches;
-
- //
- // data needed for building the compute graph for the current ubatch:
- // TODO: extract all the state like `head` and `n` here
- //
-
- const bool is_full = false;
-};
return kv_swa->seq_pos_max(seq_id);
}
-llama_memory_state_ptr llama_kv_cache_unified_iswa::init_batch(const llama_batch & batch, uint32_t n_ubatch, bool embd_all) {
+llama_memory_state_ptr llama_kv_cache_unified_iswa::init_batch(llama_batch_allocr & balloc, uint32_t n_ubatch, bool embd_all) {
GGML_UNUSED(embd_all);
// first try simple split
do {
- auto sbatch = llama_sbatch(batch, hparams.n_embd, true);
+ balloc.split_reset();
std::vector<llama_ubatch> ubatches;
+ while (true) {
+ auto ubatch = balloc.split_simple(n_ubatch);
- while (sbatch.n_tokens > 0) {
- auto ubatch = sbatch.split_simple(n_ubatch);
+ if (ubatch.n_tokens == 0) {
+ break;
+ }
- ubatches.push_back(ubatch);
+ ubatches.push_back(std::move(ubatch)); // NOLINT
}
auto heads_base = kv_base->prepare(ubatches);
assert(heads_base.size() == heads_swa.size());
return std::make_unique<llama_kv_cache_unified_iswa_state>(
- this, std::move(sbatch), std::move(heads_base), std::move(heads_swa), std::move(ubatches));
+ this, std::move(heads_base), std::move(heads_swa), std::move(ubatches));
} while (false);
// if it fails, try equal split
do {
- auto sbatch = llama_sbatch(batch, hparams.n_embd, false);
+ balloc.split_reset();
std::vector<llama_ubatch> ubatches;
+ while (true) {
+ auto ubatch = balloc.split_equal(n_ubatch);
- while (sbatch.n_tokens > 0) {
- auto ubatch = sbatch.split_equal(n_ubatch);
+ if (ubatch.n_tokens == 0) {
+ break;
+ }
- ubatches.push_back(ubatch);
+ ubatches.push_back(std::move(ubatch)); // NOLINT
}
auto heads_base = kv_base->prepare(ubatches);
assert(heads_base.size() == heads_swa.size());
return std::make_unique<llama_kv_cache_unified_iswa_state>(
- this, std::move(sbatch), std::move(heads_base), std::move(heads_swa), std::move(ubatches));
+ this, std::move(heads_base), std::move(heads_swa), std::move(ubatches));
} while (false);
// TODO: if we fail again, we should attempt different splitting strategies
llama_kv_cache_unified_iswa_state::llama_kv_cache_unified_iswa_state(llama_memory_status status) : status(status) {}
llama_kv_cache_unified_iswa_state::llama_kv_cache_unified_iswa_state(
- llama_kv_cache_unified_iswa * kv) : status(LLAMA_MEMORY_STATUS_SUCCESS) {
- state_base = kv->get_base()->init_full();
- state_swa = kv->get_swa ()->init_full();
-
- status = llama_memory_status_combine(state_base->get_status(), state_swa->get_status());
+ llama_kv_cache_unified_iswa * kv) :
+ state_base(kv->get_base()->init_full()),
+ state_swa (kv->get_swa ()->init_full()),
+ status(llama_memory_status_combine(state_base->get_status(), state_swa->get_status())) {
}
llama_kv_cache_unified_iswa_state::llama_kv_cache_unified_iswa_state(
llama_kv_cache_unified_iswa * kv,
llama_context * lctx,
- bool optimize) : status(LLAMA_MEMORY_STATUS_SUCCESS) {
- state_base = kv->get_base()->init_update(lctx, optimize);
- state_swa = kv->get_swa ()->init_update(lctx, optimize);
-
- status = llama_memory_status_combine(state_base->get_status(), state_swa->get_status());
+ bool optimize) :
+ state_base(kv->get_base()->init_update(lctx, optimize)),
+ state_swa (kv->get_swa ()->init_update(lctx, optimize)),
+ status(llama_memory_status_combine(state_base->get_status(), state_swa->get_status())) {
}
llama_kv_cache_unified_iswa_state::llama_kv_cache_unified_iswa_state(
llama_kv_cache_unified_iswa * kv,
- llama_sbatch sbatch,
std::vector<uint32_t> heads_base,
std::vector<uint32_t> heads_swa,
- std::vector<llama_ubatch> ubatches)
- : status(LLAMA_MEMORY_STATUS_SUCCESS),
- sbatch(std::move(sbatch)),
- ubatches(std::move(ubatches)) {
+ std::vector<llama_ubatch> ubatches) :
+ ubatches(std::move(ubatches)),
// note: here we copy the ubatches. not sure if this is ideal
- state_base.reset(new llama_kv_cache_unified_state(kv->get_base(), {}, std::move(heads_base), this->ubatches));
- state_swa .reset(new llama_kv_cache_unified_state(kv->get_swa (), {}, std::move(heads_swa), this->ubatches));
-
- status = llama_memory_status_combine(state_base->get_status(), state_swa->get_status());
+ state_base(new llama_kv_cache_unified_state(kv->get_base(), std::move(heads_base), this->ubatches)),
+ state_swa (new llama_kv_cache_unified_state(kv->get_swa (), std::move(heads_swa), this->ubatches)),
+ status(llama_memory_status_combine(state_base->get_status(), state_swa->get_status())) {
}
llama_kv_cache_unified_iswa_state:: ~llama_kv_cache_unified_iswa_state() = default;
return res;
}
-std::vector<int64_t> & llama_kv_cache_unified_iswa_state::out_ids() {
- assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
-
- return sbatch.out_ids;
-}
-
llama_memory_status llama_kv_cache_unified_iswa_state::get_status() const {
return status;
}
//
llama_memory_state_ptr init_batch(
- const llama_batch & batch,
+ llama_batch_allocr & balloc,
uint32_t n_ubatch,
bool embd_all) override;
// used to create a state from a batch
llama_kv_cache_unified_iswa_state(
llama_kv_cache_unified_iswa * kv,
- llama_sbatch sbatch,
std::vector<uint32_t> heads_base,
std::vector<uint32_t> heads_swa,
std::vector<llama_ubatch> ubatches);
bool next() override;
bool apply() override;
- std::vector<int64_t> & out_ids() override;
-
llama_memory_status get_status() const override;
const llama_ubatch & get_ubatch() const override;
const llama_kv_cache_unified_state * get_swa() const;
private:
- llama_memory_status status;
-
//llama_kv_cache_unified_iswa * kv;
- llama_sbatch sbatch;
-
// the index of the next ubatch to process
size_t i_next = 0;
std::vector<llama_ubatch> ubatches;
- llama_memory_state_ptr state_base;
- llama_memory_state_ptr state_swa;
+ const llama_memory_state_ptr state_base;
+ const llama_memory_state_ptr state_swa;
+
+ const llama_memory_status status;
};
continue;
}
- const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s();
- const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
+ const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il);
+ const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il);
const char * dev_name = "CPU";
}
llama_memory_state_ptr llama_kv_cache_unified::init_batch(
- const llama_batch & batch,
+ llama_batch_allocr & balloc,
uint32_t n_ubatch,
bool embd_all) {
GGML_UNUSED(embd_all);
do {
- auto sbatch = llama_sbatch(batch, hparams.n_embd, true);
+ balloc.split_reset();
std::vector<llama_ubatch> ubatches;
- while (sbatch.n_tokens > 0) {
- ubatches.push_back(sbatch.split_simple(n_ubatch));
+ while (true) {
+ auto ubatch = balloc.split_simple(n_ubatch);
+
+ if (ubatch.n_tokens == 0) {
+ break;
+ }
+
+ ubatches.push_back(std::move(ubatch)); // NOLINT
}
auto heads = prepare(ubatches);
}
return std::make_unique<llama_kv_cache_unified_state>(
- this, std::move(sbatch), std::move(heads), std::move(ubatches));
+ this, std::move(heads), std::move(ubatches));
} while (false);
return std::make_unique<llama_kv_cache_unified_state>(LLAMA_MEMORY_STATUS_FAILED_PREPARE);
}
void llama_kv_cache_unified::apply_ubatch(uint32_t head_cur, const llama_ubatch & ubatch) {
- if (debug > 0) {
- LLAMA_LOG_DEBUG("%s: ubatch info:\n", __func__);
- LLAMA_LOG_DEBUG("%s: n_tokens = %d, equal_seqs = %d\n", __func__, ubatch.n_tokens, ubatch.equal_seqs);
- LLAMA_LOG_DEBUG("%s: n_seq_tokens = %d, n_seqs = %d\n", __func__, ubatch.n_seq_tokens, ubatch.n_seqs);
- }
-
// keep track of the max sequence position that we would overwrite with this ubatch
// for non-SWA cache, this would be always empty
llama_seq_id seq_pos_max_rm[LLAMA_MAX_SEQ];
seq_pos_max_rm[s] = -1;
}
- for (uint32_t s = 0; s < ubatch.n_seqs; ++s) {
- for (uint32_t j = 0; j < ubatch.n_seq_tokens; ++j) {
- const uint32_t idx = s*ubatch.n_seq_tokens + j;
-
- if (!cells.is_empty(head_cur + idx)) {
- assert(cells.seq_count(head_cur + idx) == 1);
+ for (uint32_t i = 0; i < ubatch.n_tokens; ++i) {
+ if (!cells.is_empty(head_cur + i)) {
+ assert(cells.seq_count(head_cur + i) == 1);
- const llama_seq_id seq_id = cells.seq_get(head_cur + idx);
- const llama_pos pos = cells.pos_get(head_cur + idx);
+ const llama_seq_id seq_id = cells.seq_get(head_cur + i);
+ const llama_pos pos = cells.pos_get(head_cur + i);
- seq_pos_max_rm[seq_id] = std::max(seq_pos_max_rm[seq_id], pos);
+ seq_pos_max_rm[seq_id] = std::max(seq_pos_max_rm[seq_id], pos);
- cells.rm(head_cur + idx);
- }
+ cells.rm(head_cur + i);
+ }
- cells.pos_set(head_cur + idx, ubatch.pos[idx]);
+ cells.pos_set(head_cur + i, ubatch.pos[i]);
- // TODO: fix indexing [UBATCH_IDX]
- for (int32_t i = 0; i < ubatch.n_seq_id[s]; i++) {
- cells.seq_add(head_cur + idx, ubatch.seq_id[s][i]);
- }
+ for (int32_t s = 0; s < ubatch.n_seq_id[i]; s++) {
+ cells.seq_add(head_cur + i, ubatch.seq_id[i][s]);
}
}
seq_rm(s, cells.seq_pos_min(s), seq_pos_max_rm[s] + 1);
}
}
+
// move the head at the end of the slot
head = head_cur + ubatch.n_tokens;
}
}
void llama_kv_cache_unified::set_input_kq_mask(ggml_tensor * dst, const llama_ubatch * ubatch, bool causal_attn) const {
- const uint32_t n_tokens = ubatch->n_tokens;
- const uint32_t n_seq_tokens = ubatch->n_seq_tokens;
- const uint32_t n_seqs = ubatch->n_seqs;
+ const uint32_t n_tokens = ubatch->n_tokens;
GGML_ASSERT(ggml_backend_buffer_is_host(dst->buffer));
float * data = (float *) dst->data;
// xxxxx-----
// To visualize the mask, see https://github.com/ggml-org/llama.cpp/pull/12615
for (uint32_t h = 0; h < 1; ++h) {
- for (uint32_t s = 0; s < n_seqs; ++s) {
- const llama_seq_id seq_id = ubatch->seq_id[s][0];
+ for (uint32_t i = 0; i < n_tokens; ++i) {
+ const llama_seq_id seq_id = ubatch->seq_id[i][0];
- for (uint32_t j = 0; j < n_seq_tokens; ++j) {
- const uint32_t idx = s*n_seq_tokens + j;
+ const llama_pos p1 = ubatch->pos[i];
- const llama_pos p1 = ubatch->pos[idx];
+ for (uint32_t j = 0; j < n_kv; ++j) {
+ float f = 0.0f;
- for (uint32_t i = 0; i < n_kv; ++i) {
- float f = 0.0f;
+ bool masked = false;
- bool masked = false;
-
- if (cells.is_empty(i)) {
- masked = true;
- } else {
- const llama_pos p0 = cells.pos_get(i);
-
- // mask the token if not the same sequence
- masked = masked || (!cells.seq_has(i, seq_id));
+ if (cells.is_empty(j)) {
+ masked = true;
+ } else {
+ const llama_pos p0 = cells.pos_get(j);
- // mask future tokens
- masked = masked || (causal_attn && p0 > p1);
+ // mask the token if not the same sequence
+ masked = masked || (!cells.seq_has(j, seq_id));
- // apply SWA if any
- masked = masked || (is_masked_swa(p0, p1));
+ // mask future tokens
+ masked = masked || (causal_attn && p0 > p1);
- if (!masked && hparams.use_alibi) {
- f = -std::abs(p0 - p1);
- }
- }
+ // apply SWA if any
+ masked = masked || (is_masked_swa(p0, p1));
- if (masked) {
- f = -INFINITY;
+ if (!masked && hparams.use_alibi) {
+ f = -std::abs(p0 - p1);
}
+ }
- data[h*(n_kv*n_tokens) + idx*n_kv + i] = f;
+ if (masked) {
+ f = -INFINITY;
}
+
+ data[h*(n_kv*n_tokens) + i*n_kv + j] = f;
}
}
// mask padded tokens
if (data) {
- for (uint32_t j = n_tokens; j < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++j) {
- for (uint32_t i = 0; i < n_kv; ++i) {
- data[h*(n_kv*n_tokens) + j*n_kv + i] = -INFINITY;
+ for (uint32_t i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) {
+ for (uint32_t j = 0; j < n_kv; ++j) {
+ data[h*(n_kv*n_tokens) + i*n_kv + j] = -INFINITY;
}
}
}
const int32_t n_kv = dst->ne[0];
for (int h = 0; h < 1; ++h) {
- for (int j = 0; j < n_tokens; ++j) {
- for (int i = 0; i < n_kv; ++i) {
+ for (int i = 0; i < n_tokens; ++i) {
+ for (int j = 0; j < n_kv; ++j) {
// the position when the cells is empty is irrelevant - it will be masked out later in the attention
- const llama_pos p0 = cells.is_empty(i) ? -1 : cells.pos_get(i);
+ const llama_pos p0 = cells.is_empty(j) ? -1 : cells.pos_get(j);
- data[h*(n_kv*n_tokens) + j*n_kv + i] = llama_relative_position_bucket(p0, ubatch->pos[j], hparams.n_rel_attn_bkts, false);
+ data[h*(n_kv*n_tokens) + i*n_kv + j] = llama_relative_position_bucket(p0, ubatch->pos[i], hparams.n_rel_attn_bkts, false);
}
}
}
for (const auto & layer : layers) {
const uint32_t il = layer.il;
- const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s();
+ const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il);
// Write key type
const int32_t k_type_i = (int32_t)layer.k->type;
for (const auto & layer : layers) {
const uint32_t il = layer.il;
- const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
+ const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il);
// Write value type
const int32_t v_type_i = (int32_t)layer.v->type;
for (const auto & layer : layers) {
const uint32_t il = layer.il;
- const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
+ const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il);
// Write value type
const int32_t v_type_i = (int32_t)layer.v->type;
seq_rm(dest_seq_id, -1, -1);
- llama_sbatch sbatch;
- llama_ubatch ubatch = sbatch.reserve_ubatch(cell_count, /* has_embd */ false);
+ llama_batch_allocr balloc(hparams.n_pos_per_embd());
- ubatch.n_tokens = cell_count;
- ubatch.n_seq_tokens = cell_count;
- ubatch.n_seqs = 1;
+ llama_ubatch ubatch = balloc.ubatch_reserve(cell_count, 1);
for (uint32_t i = 0; i < cell_count; ++i) {
llama_pos pos;
for (const auto & layer : layers) {
const uint32_t il = layer.il;
- const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s();
+ const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il);
// Read type of key
int32_t k_type_i_ref;
for (const auto & layer : layers) {
const uint32_t il = layer.il;
- const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
+ const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il);
// Read type of value
int32_t v_type_i_ref;
for (const auto & layer : layers) {
const uint32_t il = layer.il;
- const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
+ const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il);
// Read type of value
int32_t v_type_i_ref;
llama_kv_cache_unified_state::llama_kv_cache_unified_state(
llama_kv_cache_unified * kv,
- llama_sbatch sbatch,
llama_kv_cache_unified::ubatch_heads heads,
- std::vector<llama_ubatch> ubatches) : status(LLAMA_MEMORY_STATUS_SUCCESS), kv(kv), sbatch(std::move(sbatch)), heads(std::move(heads)), ubatches(std::move(ubatches)) {
+ std::vector<llama_ubatch> ubatches) : status(LLAMA_MEMORY_STATUS_SUCCESS), kv(kv), heads(std::move(heads)), ubatches(std::move(ubatches)) {
}
llama_kv_cache_unified_state::~llama_kv_cache_unified_state() = default;
return true;
}
-std::vector<int64_t> & llama_kv_cache_unified_state::out_ids() {
- assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
-
- return sbatch.out_ids;
-}
-
llama_memory_status llama_kv_cache_unified_state::get_status() const {
return status;
}
//
llama_memory_state_ptr init_batch(
- const llama_batch & batch,
+ llama_batch_allocr & balloc,
uint32_t n_ubatch,
bool embd_all) override;
// used to create a decode state from a batch
llama_kv_cache_unified_state(
llama_kv_cache_unified * kv,
- llama_sbatch sbatch,
ubatch_heads heads,
std::vector<llama_ubatch> ubatches);
bool next() override;
bool apply() override;
- std::vector<int64_t> & out_ids() override;
-
llama_memory_status get_status() const override;
const llama_ubatch & get_ubatch() const override;
// batch processing state
//
- llama_sbatch sbatch;
-
// the index of the next ubatch to process
size_t i_next = 0;
//
std::vector<llama_pos> shift;
- using bits_t = std::bitset<LLAMA_MAX_SEQ>;
+ using seq_set_t = std::bitset<LLAMA_MAX_SEQ>;
// the bitset seq[i] tells us which sequences are currently occupying the i-th cell
- std::vector<bits_t> seq;
+ std::vector<seq_set_t> seq;
// the set seq_pos[s] tells us which positions are currently present for sequence s
// this way seq_pos[s].begin() and seq_pos[s].rbegin() give us the min/max positions currently in the cache
--- /dev/null
+#include "llama-memory-hybrid.h"
+
+#include "llama-impl.h"
+#include "llama-model.h"
+#include "llama-context.h"
+
+//
+// llama_memory_hybrid
+//
+
+llama_memory_hybrid::llama_memory_hybrid(
+ const llama_model & model,
+ /* attn */
+ ggml_type type_k,
+ ggml_type type_v,
+ bool v_trans,
+ uint32_t kv_size,
+ uint32_t n_pad,
+ uint32_t n_swa,
+ llama_swa_type swa_type,
+ /* recurrent */
+ ggml_type type_r,
+ ggml_type type_s,
+ uint32_t rs_size,
+ /* common */
+ uint32_t n_seq_max,
+ bool offload,
+ /* layer filters */
+ layer_filter_cb && filter_attn,
+ layer_filter_cb && filter_recr) :
+ hparams(model.hparams),
+ mem_attn(new llama_kv_cache_unified(
+ model,
+ filter_attn == nullptr ?
+ [&](int32_t il) { return !hparams.is_recurrent(il); }
+ : filter_attn,
+ type_k,
+ type_v,
+ v_trans,
+ offload,
+ kv_size,
+ n_seq_max,
+ n_pad,
+ n_swa,
+ swa_type
+ )),
+ mem_recr(new llama_memory_recurrent(
+ model,
+ filter_recr == nullptr ?
+ [&](int32_t il) { return hparams.is_recurrent(il); }
+ : filter_recr,
+ type_r,
+ type_s,
+ offload,
+ rs_size,
+ n_seq_max
+ )) {}
+
+llama_memory_state_ptr llama_memory_hybrid::init_batch(llama_batch_allocr & balloc, uint32_t n_ubatch, bool embd_all) {
+ do {
+ balloc.split_reset();
+
+ // follow the recurrent pattern for creating the ubatch splits
+ std::vector<llama_ubatch> ubatches;
+
+ while (true) {
+ llama_ubatch ubatch;
+
+ if (embd_all) {
+ // if all tokens are output, split by sequence
+ ubatch = balloc.split_seq(n_ubatch);
+ } else {
+ ubatch = balloc.split_equal(n_ubatch);
+ }
+
+ if (ubatch.n_tokens == 0) {
+ break;
+ }
+
+ ubatches.push_back(std::move(ubatch)); // NOLINT
+ }
+
+ // prepare the recurrent batches first
+ if (!mem_recr->prepare(ubatches)) {
+ // TODO: will the recurrent cache be in an undefined state at this point?
+ LLAMA_LOG_ERROR("%s: failed to prepare recurrent ubatches\n", __func__);
+ return std::make_unique<llama_memory_hybrid_state>(LLAMA_MEMORY_STATUS_FAILED_PREPARE);
+ }
+
+ // prepare the attention cache
+ auto heads_attn = mem_attn->prepare(ubatches);
+ if (heads_attn.empty()) {
+ LLAMA_LOG_ERROR("%s: failed to prepare attention ubatches\n", __func__);
+ return std::make_unique<llama_memory_hybrid_state>(LLAMA_MEMORY_STATUS_FAILED_PREPARE);
+ }
+
+ return std::make_unique<llama_memory_hybrid_state>(
+ this, std::move(heads_attn), std::move(ubatches));
+ } while(false);
+
+ return std::make_unique<llama_memory_hybrid_state>(LLAMA_MEMORY_STATUS_FAILED_PREPARE);
+}
+
+llama_memory_state_ptr llama_memory_hybrid::init_full() {
+ return std::make_unique<llama_memory_hybrid_state>(this);
+}
+
+llama_memory_state_ptr llama_memory_hybrid::init_update(llama_context * lctx, bool optimize) {
+ return std::make_unique<llama_memory_hybrid_state>(this, lctx, optimize);
+}
+
+bool llama_memory_hybrid::get_can_shift() const {
+ // Shifting is trivially supported for recurrent
+ return mem_attn->get_can_shift();
+}
+
+void llama_memory_hybrid::clear(bool data) {
+ mem_attn->clear(data);
+ mem_recr->clear(data);
+}
+
+bool llama_memory_hybrid::seq_rm(llama_seq_id seq_id, llama_pos p0, llama_pos p1) {
+ // Try removing from the recurrent cache first since it may fail. If it does
+ // fail, the cache will not have been mutated.
+ if (!mem_recr->seq_rm(seq_id, p0, p1)) {
+ return false;
+ }
+ return mem_attn->seq_rm(seq_id, p0, p1);
+}
+
+void llama_memory_hybrid::seq_cp(llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) {
+ mem_attn->seq_cp(seq_id_src, seq_id_dst, p0, p1);
+ mem_recr->seq_cp(seq_id_src, seq_id_dst, p0, p1);
+}
+
+void llama_memory_hybrid::seq_keep(llama_seq_id seq_id) {
+ mem_attn->seq_keep(seq_id);
+ mem_recr->seq_keep(seq_id);
+}
+
+void llama_memory_hybrid::seq_add(llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) {
+ mem_attn->seq_add(seq_id, p0, p1, shift);
+ mem_recr->seq_add(seq_id, p0, p1, shift);
+}
+
+void llama_memory_hybrid::seq_div(llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) {
+ mem_attn->seq_div(seq_id, p0, p1, d);
+ mem_recr->seq_div(seq_id, p0, p1, d);
+}
+
+llama_pos llama_memory_hybrid::seq_pos_min(llama_seq_id seq_id) const {
+ // the min of the total cache is the max of the two caches' min values
+ return std::max(mem_attn->seq_pos_min(seq_id), mem_recr->seq_pos_min(seq_id));
+}
+
+llama_pos llama_memory_hybrid::seq_pos_max(llama_seq_id seq_id) const {
+ // the max of the total cache is the min of the two caches' max values
+ return std::min(mem_attn->seq_pos_max(seq_id), mem_recr->seq_pos_max(seq_id));
+}
+
+void llama_memory_hybrid::state_write(llama_io_write_i & io, llama_seq_id seq_id) const {
+ mem_attn->state_write(io, seq_id);
+ mem_recr->state_write(io, seq_id);
+}
+
+void llama_memory_hybrid::state_read(llama_io_read_i & io, llama_seq_id seq_id) {
+ mem_attn->state_read(io, seq_id);
+ mem_recr->state_read(io, seq_id);
+}
+
+llama_kv_cache_unified * llama_memory_hybrid::get_mem_attn() const {
+ return mem_attn.get();
+}
+
+llama_memory_recurrent * llama_memory_hybrid::get_mem_recr() const {
+ return mem_recr.get();
+}
+
+llama_memory_hybrid_state::llama_memory_hybrid_state(llama_memory_status status) : status(status) {}
+
+llama_memory_hybrid_state::llama_memory_hybrid_state(llama_memory_hybrid * mem) :
+ state_attn(mem->get_mem_attn()->init_full()),
+ state_recr(mem->get_mem_recr()->init_full()),
+ status(llama_memory_status_combine(state_attn->get_status(), state_recr->get_status())) {
+}
+
+llama_memory_hybrid_state::llama_memory_hybrid_state(
+ llama_memory_hybrid * mem,
+ llama_context * lctx,
+ bool optimize) :
+ state_attn(mem->get_mem_attn()->init_update(lctx, optimize)),
+ state_recr(mem->get_mem_recr()->init_update(lctx, optimize)),
+ status(llama_memory_status_combine(state_attn->get_status(), state_recr->get_status())) {
+}
+
+llama_memory_hybrid_state::llama_memory_hybrid_state(
+ llama_memory_hybrid * mem,
+ std::vector<uint32_t> heads_attn,
+ std::vector<llama_ubatch> ubatches) :
+ ubatches(std::move(ubatches)),
+ // note: here we copy the ubatches. not sure if this is ideal
+ state_attn(new llama_kv_cache_unified_state(mem->get_mem_attn(), std::move(heads_attn), this->ubatches)),
+ state_recr(new llama_memory_recurrent_state(mem->get_mem_recr(), this->ubatches)),
+ status(llama_memory_status_combine(state_attn->get_status(), state_recr->get_status())) {
+}
+
+bool llama_memory_hybrid_state::next() {
+ assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
+
+ state_attn->next();
+ state_recr->next();
+
+ if (++i_next >= ubatches.size()) {
+ return false;
+ }
+
+ return true;
+}
+
+bool llama_memory_hybrid_state::apply() {
+ assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
+
+ bool res = true;
+
+ res = res & state_attn->apply();
+ res = res & state_recr->apply();
+
+ return res;
+}
+
+llama_memory_status llama_memory_hybrid_state::get_status() const {
+ return status;
+}
+
+const llama_ubatch & llama_memory_hybrid_state::get_ubatch() const {
+ assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
+ return ubatches[i_next];
+}
+
+const llama_kv_cache_unified_state * llama_memory_hybrid_state::get_state_attn() const {
+ return static_cast<const llama_kv_cache_unified_state *>(state_attn.get());
+}
+
+const llama_memory_recurrent_state * llama_memory_hybrid_state::get_state_recr() const {
+ return static_cast<const llama_memory_recurrent_state *>(state_recr.get());
+}
--- /dev/null
+#pragma once
+
+#include "llama-batch.h"
+#include "llama-graph.h"
+#include "llama-kv-cache-unified.h"
+#include "llama-memory.h"
+#include "llama-memory-recurrent.h"
+
+#include <memory>
+#include <vector>
+
+//
+// llama_memory_hybrid
+//
+
+// utilizes instances of llama_memory_recurrent and llama_kv_cache_unified to
+// support models where each layer may be either attention-based or recurrent
+
+class llama_memory_hybrid : public llama_memory_i {
+public:
+
+ // this callback is used to filter out layers that should not be included in the cache
+ using layer_filter_cb = std::function<bool(int32_t il)>;
+
+ llama_memory_hybrid(
+ const llama_model & model,
+ /* attn */
+ ggml_type type_k,
+ ggml_type type_v,
+ bool v_trans,
+ uint32_t kv_size,
+ uint32_t n_pad,
+ uint32_t n_swa,
+ llama_swa_type swa_type,
+ /* recurrent */
+ ggml_type type_r,
+ ggml_type type_s,
+ uint32_t rs_size,
+ /* common */
+ uint32_t n_seq_max,
+ bool offload,
+ /* layer filters */
+ layer_filter_cb && filter_attn = nullptr,
+ layer_filter_cb && filter_recr = nullptr);
+
+ ~llama_memory_hybrid() = default;
+
+ //
+ // llama_memory_i
+ //
+
+ llama_memory_state_ptr init_batch(
+ llama_batch_allocr & balloc,
+ uint32_t n_ubatch,
+ bool embd_all) override;
+
+ llama_memory_state_ptr init_full() override;
+
+ llama_memory_state_ptr init_update(llama_context * lctx, bool optimize) override;
+
+ bool get_can_shift() const override;
+
+ void clear(bool data) override;
+
+ bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override;
+ void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override;
+ void seq_keep(llama_seq_id seq_id) override;
+ void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) override;
+ void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override;
+
+ llama_pos seq_pos_min(llama_seq_id seq_id) const override;
+ llama_pos seq_pos_max(llama_seq_id seq_id) const override;
+
+ // state write/load
+
+ void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const override;
+ void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) override;
+
+ //
+ // llama_memory_hybrid specific API
+ //
+
+ llama_kv_cache_unified * get_mem_attn() const;
+ llama_memory_recurrent * get_mem_recr() const;
+
+private:
+ const llama_hparams & hparams;
+
+ const std::unique_ptr<llama_kv_cache_unified> mem_attn;
+ const std::unique_ptr<llama_memory_recurrent> mem_recr;
+};
+
+class llama_memory_hybrid_state : public llama_memory_state_i {
+public:
+ // init failure
+ explicit llama_memory_hybrid_state(llama_memory_status status);
+
+ // init full
+ explicit llama_memory_hybrid_state(llama_memory_hybrid * mem);
+
+ // init update
+ explicit llama_memory_hybrid_state(
+ llama_memory_hybrid * mem,
+ llama_context * lctx,
+ bool optimize);
+
+ // init success
+ llama_memory_hybrid_state(
+ llama_memory_hybrid * mem,
+ std::vector<uint32_t> heads_attn,
+ std::vector<llama_ubatch> ubatches);
+
+ ~llama_memory_hybrid_state() = default;
+
+ bool next() override;
+ bool apply() override;
+
+ llama_memory_status get_status() const override;
+ const llama_ubatch & get_ubatch() const override;
+
+ //
+ // llama_memory_hybrid_state
+ //
+
+ const llama_kv_cache_unified_state * get_state_attn() const;
+ const llama_memory_recurrent_state * get_state_recr() const;
+
+private:
+ // the index of the next ubatch to process
+ size_t i_next = 0;
+
+ std::vector<llama_ubatch> ubatches;
+
+ const llama_memory_state_ptr state_attn;
+ const llama_memory_state_ptr state_recr;
+
+ const llama_memory_status status;
+};
--- /dev/null
+#include "llama-memory-recurrent.h"
+
+#include "llama-impl.h"
+#include "llama-io.h"
+#include "llama-batch.h"
+#include "llama-model.h"
+
+#include <algorithm>
+#include <cassert>
+#include <limits>
+#include <map>
+#include <stdexcept>
+
+//
+// llama_memory_recurrent
+//
+
+llama_memory_recurrent::llama_memory_recurrent(
+ const llama_model & model,
+ layer_filter_cb && filter,
+ ggml_type type_r,
+ ggml_type type_s,
+ bool offload,
+ uint32_t mem_size,
+ uint32_t n_seq_max) : hparams(model.hparams), n_seq_max(n_seq_max) {
+ const int32_t n_layer = hparams.n_layer;
+
+ LLAMA_LOG_INFO("%s: mem_size = %u, n_seq_max = %u, type_r = '%s', type_s = '%s', n_layer = %d\n",
+ __func__, mem_size, n_seq_max, ggml_type_name(type_r), ggml_type_name(type_s), n_layer);
+
+ head = 0;
+ size = mem_size;
+ used = 0;
+
+ cells.clear();
+ cells.resize(mem_size);
+
+ // create a context for each buffer type
+ std::map<ggml_backend_buffer_type_t, ggml_context *> ctx_map;
+ auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * {
+ auto it = ctx_map.find(buft);
+ if (it == ctx_map.end()) {
+ ggml_init_params params = {
+ /*.mem_size =*/ size_t(2u*n_layer*ggml_tensor_overhead()),
+ /*.mem_buffer =*/ NULL,
+ /*.no_alloc =*/ true,
+ };
+
+ ggml_context * ctx = ggml_init(params);
+ if (!ctx) {
+ return nullptr;
+ }
+
+ ctx_map[buft] = ctx;
+ ctxs.emplace_back(ctx);
+
+ return ctx;
+ }
+
+ return it->second;
+ };
+
+ r_l.resize(n_layer);
+ s_l.resize(n_layer);
+
+ for (int i = 0; i < n_layer; i++) {
+ if (filter && !filter(i)) {
+ LLAMA_LOG_DEBUG("%s: layer %3d: skipped\n", __func__, i);
+ continue;
+ }
+
+ const char * dev_name = "CPU";
+
+ ggml_backend_buffer_type_t buft = ggml_backend_cpu_buffer_type();
+
+ if (offload) {
+ auto * dev = model.dev_layer(i);
+ buft = ggml_backend_dev_buffer_type(dev);
+
+ dev_name = ggml_backend_dev_name(dev);
+ }
+
+ LLAMA_LOG_DEBUG("%s, layer %3d: dev = %s\n", __func__, i, dev_name);
+
+ ggml_context * ctx = ctx_for_buft(buft);
+ if (!ctx) {
+ throw std::runtime_error("failed to create ggml context for kv cache");
+ }
+
+ ggml_tensor * r = ggml_new_tensor_1d(ctx, type_r, hparams.n_embd_r()*mem_size);
+ ggml_tensor * s = ggml_new_tensor_1d(ctx, type_s, hparams.n_embd_s()*mem_size);
+ ggml_format_name(r, "cache_r_l%d", i);
+ ggml_format_name(s, "cache_s_l%d", i);
+ r_l[i] = r;
+ s_l[i] = s;
+ }
+
+ // allocate tensors and initialize the buffers to avoid NaNs in the padding
+ for (auto it : ctx_map) {
+ auto * buft = it.first;
+ auto * ctx = it.second;
+
+ ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
+ if (!buf) {
+ throw std::runtime_error("failed to allocate buffer for kv cache");
+ }
+ ggml_backend_buffer_clear(buf, 0);
+ LLAMA_LOG_INFO("%s: %10s KV buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf)/1024.0/1024.0);
+ bufs.emplace_back(buf);
+ }
+
+ {
+ const size_t memory_size_r = size_r_bytes();
+ const size_t memory_size_s = size_s_bytes();
+
+ LLAMA_LOG_INFO("%s: KV self size = %7.2f MiB, R (%s): %7.2f MiB, S (%s): %7.2f MiB\n", __func__,
+ (float)(memory_size_r + memory_size_s) / (1024.0f * 1024.0f),
+ ggml_type_name(type_r), (float)memory_size_r / (1024.0f * 1024.0f),
+ ggml_type_name(type_s), (float)memory_size_s / (1024.0f * 1024.0f));
+ }
+}
+
+void llama_memory_recurrent::clear(bool data) {
+ for (int32_t i = 0; i < (int32_t) size; ++i) {
+ cells[i].pos = -1;
+ cells[i].seq_id.clear();
+ cells[i].src = -1;
+ cells[i].tail = -1;
+ }
+
+ head = 0;
+ used = 0;
+
+ if (data) {
+ for (auto & buf : bufs) {
+ ggml_backend_buffer_clear(buf.get(), 0);
+ }
+ }
+}
+
+bool llama_memory_recurrent::seq_rm(llama_seq_id seq_id, llama_pos p0, llama_pos p1) {
+ uint32_t new_head = size;
+
+ if (p0 < 0) {
+ p0 = 0;
+ }
+
+ if (p1 < 0) {
+ p1 = std::numeric_limits<llama_pos>::max();
+ }
+
+ // models like Mamba or RWKV can't have a state partially erased
+ if (seq_id >= (int64_t) size) {
+ // could be fatal
+ return false;
+ }
+ if (0 <= seq_id) {
+ int32_t & tail_id = cells[seq_id].tail;
+ if (tail_id >= 0) {
+ const auto & cell = cells[tail_id];
+ // partial intersection is invalid
+ if ((0 < p0 && p0 <= cell.pos) || (0 < p1 && p1 <= cell.pos)) {
+ return false;
+ }
+ // invalidate tails which will be cleared
+ if (p0 <= cell.pos && cell.pos < p1) {
+ tail_id = -1;
+ }
+ }
+ } else {
+ // seq_id is negative, then the range should include everything or nothing
+ if (p0 != p1 && (p0 != 0 || p1 != std::numeric_limits<llama_pos>::max())) {
+ return false;
+ }
+ }
+
+ for (uint32_t i = 0; i < size; ++i) {
+ if (cells[i].pos >= p0 && cells[i].pos < p1) {
+ if (seq_id < 0) {
+ cells[i].seq_id.clear();
+ } else if (cells[i].has_seq_id(seq_id)) {
+ cells[i].seq_id.erase(seq_id);
+ } else {
+ continue;
+ }
+ if (cells[i].is_empty()) {
+ // keep count of the number of used cells
+ if (cells[i].pos >= 0) {
+ used--;
+ }
+ cells[i].pos = -1;
+ cells[i].src = -1;
+ if (new_head == size) {
+ new_head = i;
+ }
+ }
+ }
+ }
+
+ // If we freed up a slot, set head to it so searching can start there.
+ if (new_head != size && new_head < head) {
+ head = new_head;
+ }
+
+ return true;
+}
+
+void llama_memory_recurrent::seq_cp(llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) {
+ if (seq_id_src == seq_id_dst) {
+ return;
+ }
+
+ if (p0 < 0) {
+ p0 = 0;
+ }
+
+ if (p1 < 0) {
+ p1 = std::numeric_limits<llama_pos>::max();
+ }
+
+ if ((uint32_t) seq_id_dst < size && (uint32_t) seq_id_src < size) {
+ auto & tail_src = cells[seq_id_src];
+ auto & tail_dst = cells[seq_id_dst];
+ if (tail_dst.tail >= 0) {
+ // clear destination seq_id if it wasn't empty
+ auto & cell_dst = cells[tail_dst.tail];
+
+ cell_dst.seq_id.erase(seq_id_dst);
+ tail_dst.tail = -1;
+ if (cell_dst.seq_id.empty()) {
+ cell_dst.pos = -1;
+ cell_dst.src = -1;
+ used -= 1;
+ }
+ }
+ if (tail_src.tail >= 0) {
+ auto & cell_src = cells[tail_src.tail];
+
+ cell_src.seq_id.insert(seq_id_dst);
+ tail_dst.tail = tail_src.tail;
+ }
+ }
+}
+
+void llama_memory_recurrent::seq_keep(llama_seq_id seq_id) {
+ uint32_t new_head = size;
+
+ for (uint32_t i = 0; i < size; ++i) {
+ if ((llama_seq_id) i != seq_id) {
+ cells[i].tail = -1;
+ }
+
+ if (!cells[i].has_seq_id(seq_id)) {
+ if (cells[i].pos >= 0) {
+ used--;
+ }
+
+ cells[i].pos = -1;
+ cells[i].src = -1;
+ cells[i].seq_id.clear();
+
+ if (new_head == size){
+ new_head = i;
+ }
+ } else {
+ cells[i].seq_id.clear();
+ cells[i].seq_id.insert(seq_id);
+ }
+ }
+
+ // If we freed up a slot, set head to it so searching can start there.
+ if (new_head != size && new_head < head) {
+ head = new_head;
+ }
+}
+
+void llama_memory_recurrent::seq_add(llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) {
+ if (shift == 0) {
+ return;
+ }
+
+ if (p0 < 0) {
+ p0 = 0;
+ }
+
+ if (p1 < 0) {
+ p1 = std::numeric_limits<llama_pos>::max();
+ }
+
+ // If there is no range then return early to avoid looping over the
+ if (p0 == p1) {
+ return;
+ }
+
+ // for Mamba-like or RWKV models, only the pos needs to be shifted
+ if (0 <= seq_id && seq_id < (int64_t) size) {
+ const int32_t tail_id = cells[seq_id].tail;
+ if (tail_id >= 0) {
+ auto & cell = cells[tail_id];
+ if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) {
+ cell.pos += shift;
+ }
+ }
+ }
+}
+
+void llama_memory_recurrent::seq_div(llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) {
+ if (d == 1) {
+ return;
+ }
+
+ if (p0 < 0) {
+ p0 = 0;
+ }
+
+ if (p1 < 0) {
+ p1 = std::numeric_limits<llama_pos>::max();
+ }
+
+ // If there is no range then return early to avoid looping over the cache.
+ if (p0 == p1) {
+ return;
+ }
+
+ // for Mamba-like or RWKV models, only the pos needs to be changed
+ if (0 <= seq_id && seq_id < (int64_t) size) {
+ const int32_t tail_id = cells[seq_id].tail;
+ if (tail_id >= 0) {
+ auto & cell = cells[tail_id];
+ if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) {
+ cell.pos /= d;
+ }
+ }
+ }
+}
+
+llama_pos llama_memory_recurrent::seq_pos_min(llama_seq_id seq_id) const {
+ llama_pos result = std::numeric_limits<llama_pos>::max();
+
+ for (uint32_t i = 0; i < size; ++i) {
+ if (cells[i].has_seq_id(seq_id)) {
+ result = std::min(result, cells[i].pos);
+ }
+ }
+
+ if (result == std::numeric_limits<llama_pos>::max()) {
+ result = -1;
+ }
+
+ return result;
+}
+
+llama_pos llama_memory_recurrent::seq_pos_max(llama_seq_id seq_id) const {
+ llama_pos result = -1;
+
+ for (uint32_t i = 0; i < size; ++i) {
+ if (cells[i].has_seq_id(seq_id)) {
+ result = std::max(result, cells[i].pos);
+ }
+ }
+
+ return result;
+}
+
+llama_memory_state_ptr llama_memory_recurrent::init_batch(llama_batch_allocr & balloc, uint32_t n_ubatch, bool embd_all) {
+ std::vector<llama_ubatch> ubatches;
+
+ while (true) {
+ llama_ubatch ubatch;
+
+ if (embd_all) {
+ // if all tokens are output, split by sequence
+ ubatch = balloc.split_seq(n_ubatch);
+ } else {
+ ubatch = balloc.split_equal(n_ubatch);
+ }
+
+ if (ubatch.n_tokens == 0) {
+ break;
+ }
+
+ ubatches.push_back(std::move(ubatch)); // NOLINT
+ }
+
+ if (!prepare(ubatches)) {
+ return std::make_unique<llama_memory_recurrent_state>(LLAMA_MEMORY_STATUS_FAILED_PREPARE);
+ }
+
+ return std::make_unique<llama_memory_recurrent_state>(this, std::move(ubatches));
+}
+
+llama_memory_state_ptr llama_memory_recurrent::init_full() {
+ return std::make_unique<llama_memory_recurrent_state>(this);
+}
+
+llama_memory_state_ptr llama_memory_recurrent::init_update(llama_context * lctx, bool optimize) {
+ GGML_UNUSED(lctx);
+ GGML_UNUSED(optimize);
+
+ return std::make_unique<llama_memory_recurrent_state>(LLAMA_MEMORY_STATUS_NO_UPDATE);
+}
+
+bool llama_memory_recurrent::prepare(const std::vector<llama_ubatch> & ubatches) {
+ // simply remember the full state because it is very small for this type of cache
+ // TODO: optimize
+ auto org_cells = cells;
+ auto org_used = used;
+ auto org_head = head;
+
+ bool success = true;
+
+ for (const auto & ubatch : ubatches) {
+ if (!find_slot(ubatch)) {
+ success = false;
+ break;
+ }
+ }
+
+ // restore the original state
+ cells = std::move(org_cells);
+ used = org_used;
+ head = org_head;
+
+ return success;
+}
+
+bool llama_memory_recurrent::find_slot(const llama_ubatch & ubatch) {
+ const uint32_t n_seq_tokens = ubatch.n_seq_tokens;
+ const uint32_t n_seqs = ubatch.n_seqs;
+
+ // if we have enough unused cells before the current head ->
+ // better to start searching from the beginning of the cache, hoping to fill it
+ if (head > used + 2*n_seqs) {
+ head = 0;
+ }
+
+ // For recurrent state architectures (like Mamba or RWKV),
+ // each cache cell can store the state for a whole sequence.
+ // A slot should be always be contiguous.
+
+ // can only process batches with an equal number of new tokens in each sequence
+ GGML_ASSERT(ubatch.equal_seqs);
+
+ int32_t min = size - 1;
+ int32_t max = 0;
+
+ // everything should fit if all seq_ids are smaller than the max
+ for (uint32_t s = 0; s < n_seqs; ++s) {
+ const uint32_t i = s*n_seq_tokens; // first token of sequence set s
+ const uint32_t n_seq_id = ubatch.n_seq_id[i];
+
+ for (uint32_t j = 0; j < n_seq_id; ++j) {
+ const llama_seq_id seq_id = ubatch.seq_id[i][j];
+
+ if (seq_id < 0 || (uint32_t) seq_id >= size) {
+ // too big seq_id
+ // TODO: would it be possible to resize the cache instead?
+ LLAMA_LOG_ERROR("%s: seq_id=%d >= n_seq_max=%u Try using a bigger --parallel value\n", __func__, seq_id, n_seq_max);
+ return false;
+ }
+ if (j > 0) {
+ auto & seq = cells[seq_id];
+ if (seq.tail >= 0) {
+ auto & cell = cells[seq.tail];
+ // clear cells from seq_ids that become shared
+ // (should not normally happen, but let's handle it anyway)
+ cell.seq_id.erase(seq_id);
+ seq.tail = -1;
+ if (cell.seq_id.empty()) {
+ cell.pos = -1;
+ cell.src = -1;
+ used -= 1;
+ }
+ }
+ }
+ }
+ }
+
+#ifndef NDEBUG
+ {
+ std::vector<int32_t> tails_verif;
+ tails_verif.assign(size, -1);
+ for (uint32_t i = 0; i < size; ++i) {
+ auto & cell = cells[i];
+ for (llama_seq_id seq_id : cell.seq_id) {
+ if (tails_verif[seq_id] != -1) {
+ LLAMA_LOG_ERROR("%s: duplicate tail for seq_id %d in cell %d and %d\n", __func__, seq_id, i, tails_verif[seq_id]);
+ }
+ tails_verif[seq_id] = i;
+ }
+ }
+ for (uint32_t i = 0; i < size; ++i) {
+ if (tails_verif[i] != cells[i].tail) {
+ LLAMA_LOG_ERROR("%s: wrong tail for seq_id %d, (%d instead of %d)\n", __func__, i, cells[i].tail, tails_verif[i]);
+ }
+ }
+ }
+#endif
+
+ // find next empty cell
+ uint32_t next_empty_cell = head;
+
+ for (uint32_t i = 0; i < size; ++i) {
+ if (next_empty_cell >= size) { next_empty_cell -= size; }
+ auto & cell = cells[next_empty_cell];
+ if (cell.is_empty()) { break; }
+ next_empty_cell += 1;
+ }
+
+ // find usable cell range
+ for (uint32_t s = 0; s < n_seqs; ++s) {
+ const uint32_t i = s*n_seq_tokens;
+ const llama_seq_id seq_id = ubatch.seq_id[i][0];
+ auto & seq_meta = cells[seq_id];
+ bool has_cell = false;
+ if (seq_meta.tail >= 0) {
+ auto & cell = cells[seq_meta.tail];
+ GGML_ASSERT(cell.has_seq_id(seq_id));
+ // does this seq_id "own" the cell?
+ if (cell.seq_id.size() == 1) { has_cell = true; }
+ }
+ if (!has_cell) {
+ auto & empty_cell = cells[next_empty_cell];
+ GGML_ASSERT(empty_cell.is_empty());
+ // copy old tail into the empty cell
+ if (seq_meta.tail >= 0) {
+ auto & orig_cell = cells[seq_meta.tail];
+ empty_cell.pos = orig_cell.pos;
+ empty_cell.src = orig_cell.src;
+ orig_cell.seq_id.erase(seq_id);
+ empty_cell.seq_id.insert(seq_id); // will be overwritten
+ GGML_ASSERT(!orig_cell.is_empty()); // has at least one remaining seq_id
+ }
+ seq_meta.tail = next_empty_cell;
+ // find next empty cell
+ if (s + 1 < n_seqs) {
+ for (uint32_t j = 0; j < size; ++j) {
+ next_empty_cell += 1;
+ if (next_empty_cell >= size) { next_empty_cell -= size; }
+ auto & cell = cells[next_empty_cell];
+ if (cell.is_empty()) { break; }
+ }
+ }
+ }
+ if (min > seq_meta.tail) { min = seq_meta.tail; }
+ if (max < seq_meta.tail) { max = seq_meta.tail; }
+ }
+
+ // gather and re-order
+ for (uint32_t s = 0; s < n_seqs; ++s) {
+ const uint32_t i = s*n_seq_tokens;
+ const int32_t dst_id = s + min;
+ const int32_t src_id = cells[ubatch.seq_id[i][0]].tail;
+ if (dst_id != src_id) {
+ auto & dst_cell = cells[dst_id];
+ auto & src_cell = cells[src_id];
+
+ std::swap(dst_cell.pos, src_cell.pos);
+ std::swap(dst_cell.src, src_cell.src);
+ std::swap(dst_cell.seq_id, src_cell.seq_id);
+
+ // swap tails
+ for (uint32_t j = 0; j < size; ++j) {
+ int32_t & tail = cells[j].tail;
+ if (tail == src_id) {
+ tail = dst_id;
+ } else if (tail == dst_id) {
+ tail = src_id;
+ }
+ }
+ }
+ }
+
+ // update the pos of the used seqs
+ for (uint32_t s = 0; s < n_seqs; ++s) {
+ const uint32_t i = s*n_seq_tokens;
+ const llama_pos last_pos = ubatch.pos[i + n_seq_tokens - 1];
+ const int32_t cell_id = s + min;
+ auto & cell = cells[cell_id];
+
+ if (cell.pos >= 0 && last_pos != cell.pos + (llama_pos) n_seq_tokens) {
+ // What should happen when the pos backtracks or skips a value?
+ // Clearing the state mid-batch would require special-casing which isn't done.
+ LLAMA_LOG_WARN("%s: non-consecutive token position %d after %d for sequence %d with %u new tokens\n",
+ __func__, last_pos, cell.pos, ubatch.seq_id[i][0], n_seq_tokens);
+ }
+ cell.pos = last_pos;
+ cell.seq_id.clear();
+ for (int32_t j = 0; j < ubatch.n_seq_id[i]; ++j) {
+ const llama_seq_id seq_id = ubatch.seq_id[i][j];
+ cell.seq_id.insert(seq_id);
+ cells[seq_id].tail = cell_id;
+ }
+ }
+
+ // Find first cell without src refs, to use as the zero-ed state
+ {
+ // TODO: bake-in src refcounts in the cell metadata
+ std::vector<int32_t> refcounts(size, 0);
+ for (size_t i = 0; i < size; ++i) {
+ const int32_t src = cells[i].src;
+ if (src >= 0) {
+ refcounts[src] += 1;
+ }
+ }
+
+ rs_z = -1;
+ for (int i = min; i <= max; ++i) {
+ if (refcounts[i] == 0) {
+ rs_z = i;
+ break;
+ }
+ }
+
+ for (int i = min; i <= max; ++i) {
+ if (cells[i].src < 0) {
+ GGML_ASSERT(rs_z >= 0);
+ cells[i].src0 = rs_z;
+ } else {
+ // Stage the source ids for all used cells to allow correct seq_* behavior
+ // and still make these values available when setting the inputs
+ cells[i].src0 = cells[i].src;
+ }
+ cells[i].src = i; // avoid moving or clearing twice
+ }
+ }
+
+ // allow getting the range of used cells, from head to head + n
+ head = min;
+ n = max - min + 1;
+ used = std::count_if(cells.begin(), cells.end(),
+ [](const mem_cell & cell){ return !cell.is_empty(); });
+
+ // sanity check
+ return n >= n_seqs;
+}
+
+bool llama_memory_recurrent::get_can_shift() const {
+ // shifting the pos is trivial for recurrent models
+ return true;
+}
+
+size_t llama_memory_recurrent::total_size() const {
+ size_t size = 0;
+ for (const auto & buf : bufs) {
+ size += ggml_backend_buffer_get_size(buf.get());
+ }
+
+ return size;
+}
+
+size_t llama_memory_recurrent::size_r_bytes() const {
+ size_t size_r_bytes = 0;
+
+ for (const auto & r : r_l) {
+ if (r != nullptr) {
+ size_r_bytes += ggml_nbytes(r);
+ }
+ }
+
+ return size_r_bytes;
+}
+
+size_t llama_memory_recurrent::size_s_bytes() const {
+ size_t size_s_bytes = 0;
+
+ for (const auto & s : s_l) {
+ if (s != nullptr) {
+ size_s_bytes += ggml_nbytes(s);
+ }
+ }
+
+ return size_s_bytes;
+}
+
+void llama_memory_recurrent::state_write(llama_io_write_i & io, llama_seq_id seq_id) const {
+ std::vector<std::pair<uint32_t, uint32_t>> cell_ranges; // ranges, from inclusive, to exclusive
+ uint32_t cell_count = 0;
+
+ // Count the number of cells with the specified seq_id
+ // Find all the ranges of cells with this seq id (or all, when -1)
+ uint32_t cell_range_begin = size;
+ for (uint32_t i = 0; i < size; ++i) {
+ const auto & cell = cells[i];
+ if ((seq_id == -1 && !cell.is_empty()) || cell.has_seq_id(seq_id)) {
+ ++cell_count;
+ if (cell_range_begin == size) {
+ cell_range_begin = i;
+ }
+ } else {
+ if (cell_range_begin != size) {
+ cell_ranges.emplace_back(cell_range_begin, i);
+ cell_range_begin = size;
+ }
+ }
+ }
+ if (cell_range_begin != size) {
+ cell_ranges.emplace_back(cell_range_begin, size);
+ }
+
+ // DEBUG CHECK: Sum of cell counts in ranges should equal the total cell count
+ uint32_t cell_count_check = 0;
+ for (const auto & range : cell_ranges) {
+ cell_count_check += range.second - range.first;
+ }
+ GGML_ASSERT(cell_count == cell_count_check);
+
+ io.write(&cell_count, sizeof(cell_count));
+
+ state_write_meta(io, cell_ranges, seq_id);
+ state_write_data(io, cell_ranges);
+}
+
+void llama_memory_recurrent::state_read(llama_io_read_i & io, llama_seq_id seq_id) {
+ uint32_t cell_count;
+ io.read_to(&cell_count, sizeof(cell_count));
+
+ bool res = true;
+
+ res = res && state_read_meta(io, cell_count, seq_id);
+ res = res && state_read_data(io, cell_count);
+
+ if (!res) {
+ if (seq_id == -1) {
+ clear(true);
+ } else {
+ seq_rm(seq_id, -1, -1);
+ }
+ throw std::runtime_error("failed to restore kv cache");
+ }
+}
+
+void llama_memory_recurrent::state_write_meta(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges, llama_seq_id seq_id) const {
+ for (const auto & range : cell_ranges) {
+ for (uint32_t i = range.first; i < range.second; ++i) {
+ const auto & cell = cells[i];
+ const llama_pos pos = cell.pos;
+ const uint32_t n_seq_id = seq_id == -1 ? cell.seq_id.size() : 0;
+
+ io.write(&pos, sizeof(pos));
+ io.write(&n_seq_id, sizeof(n_seq_id));
+
+ if (n_seq_id) {
+ for (auto seq_id : cell.seq_id) {
+ io.write(&seq_id, sizeof(seq_id));
+ }
+ }
+ }
+ }
+}
+
+void llama_memory_recurrent::state_write_data(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges) const {
+ const uint32_t s_trans = 0;
+ const uint32_t n_layer = hparams.n_layer;
+
+ io.write(&s_trans, sizeof(s_trans));
+ io.write(&n_layer, sizeof(n_layer));
+
+ std::vector<uint8_t> tmp_buf;
+
+ // Iterate and write all the keys first, each row is a cell
+ // Get whole range at a time
+ for (uint32_t il = 0; il < n_layer; ++il) {
+
+ // Write key type
+ const int32_t r_type_i = (int32_t)r_l[il]->type;
+ io.write(&r_type_i, sizeof(r_type_i));
+
+ // Write row size of key
+ const uint64_t r_size_row = ggml_row_size(r_l[il]->type, hparams.n_embd_r());
+ io.write(&r_size_row, sizeof(r_size_row));
+
+ // Read each range of cells of k_size length each into tmp_buf and write out
+ for (const auto & range : cell_ranges) {
+ const size_t range_size = range.second - range.first;
+ const size_t buf_size = range_size * r_size_row;
+ io.write_tensor(r_l[il], range.first * r_size_row, buf_size);
+ }
+ }
+
+ if (!s_trans) {
+ for (uint32_t il = 0; il < n_layer; ++il) {
+
+ // Write value type
+ const int32_t s_type_i = (int32_t)s_l[il]->type;
+ io.write(&s_type_i, sizeof(s_type_i));
+
+ // Write row size of value
+ const uint64_t s_size_row = ggml_row_size(s_l[il]->type, hparams.n_embd_s());
+ io.write(&s_size_row, sizeof(s_size_row));
+
+ // Read each range of cells of s_size length each into tmp_buf and write out
+ for (const auto & range : cell_ranges) {
+ const size_t range_size = range.second - range.first;
+ const size_t buf_size = range_size * s_size_row;
+ io.write_tensor(s_l[il], range.first * s_size_row, buf_size);
+ }
+ }
+ } else {
+ // When v is transposed, we also need the element size and get the element ranges from each row
+ const uint32_t mem_size = size;
+ for (uint32_t il = 0; il < n_layer; ++il) {
+ const uint32_t n_embd_s = hparams.n_embd_s();
+
+ // Write value type
+ const int32_t s_type_i = (int32_t)s_l[il]->type;
+ io.write(&s_type_i, sizeof(s_type_i));
+
+ // Write element size
+ const uint32_t s_size_el = ggml_type_size(s_l[il]->type);
+ io.write(&s_size_el, sizeof(s_size_el));
+
+ // Write GQA embedding size
+ io.write(&n_embd_s, sizeof(n_embd_s));
+
+ // For each row, we get the element values of each cell
+ for (uint32_t j = 0; j < n_embd_s; ++j) {
+ // Read each range of cells of v_size_el length each into tmp_buf and write out
+ for (const auto & range : cell_ranges) {
+ const size_t range_size = range.second - range.first;
+ const size_t src_offset = (range.first + j * mem_size) * s_size_el;
+ const size_t buf_size = range_size * s_size_el;
+ io.write_tensor(s_l[il], src_offset, buf_size);
+ }
+ }
+ }
+ }
+}
+
+bool llama_memory_recurrent::state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id) {
+ if (dest_seq_id != -1) {
+ // single sequence
+
+ seq_rm(dest_seq_id, -1, -1);
+
+ llama_batch_allocr balloc(hparams.n_pos_per_embd());
+
+ llama_ubatch ubatch = balloc.ubatch_reserve(cell_count, 1);
+
+ for (uint32_t i = 0; i < cell_count; ++i) {
+ llama_pos pos;
+ uint32_t n_seq_id;
+
+ io.read_to(&pos, sizeof(pos));
+ io.read_to(&n_seq_id, sizeof(n_seq_id));
+
+ if (n_seq_id != 0) {
+ LLAMA_LOG_ERROR("%s: invalid seq_id-agnostic kv cell\n", __func__);
+ return false;
+ }
+
+ ubatch.pos[i] = pos;
+ }
+ ubatch.n_seq_id[0] = 1;
+ ubatch.seq_id[0] = &dest_seq_id;
+
+ if (!find_slot(ubatch)) {
+ LLAMA_LOG_ERROR("%s: failed to find available cells in kv cache\n", __func__);
+ return false;
+ }
+
+ // DEBUG CHECK: kv.head should be our first cell, kv.head + cell_count - 1 should be our last cell (verify seq_id and pos values)
+ // Assume that this is one contiguous block of cells
+ GGML_ASSERT(head + cell_count <= size);
+ GGML_ASSERT(cells[head].pos == ubatch.pos[0]);
+ GGML_ASSERT(cells[head + cell_count - 1].pos == ubatch.pos[cell_count - 1]);
+ GGML_ASSERT(cells[head].has_seq_id(dest_seq_id));
+ GGML_ASSERT(cells[head + cell_count - 1].has_seq_id(dest_seq_id));
+ } else {
+ // whole KV cache restore
+
+ if (cell_count > size) {
+ LLAMA_LOG_ERROR("%s: not enough cells in kv cache\n", __func__);
+ return false;
+ }
+
+ clear(true);
+
+ for (uint32_t i = 0; i < cell_count; ++i) {
+ auto & cell = cells[i];
+
+ llama_pos pos;
+ uint32_t n_seq_id;
+
+ io.read_to(&pos, sizeof(pos));
+ io.read_to(&n_seq_id, sizeof(n_seq_id));
+
+ cell.pos = pos;
+
+ for (uint32_t j = 0; j < n_seq_id; ++j) {
+ llama_seq_id seq_id;
+ io.read_to(&seq_id, sizeof(seq_id));
+
+ // TODO: llama_memory_recurrent should have a notion of max sequences
+ //if (seq_id < 0 || (uint32_t) seq_id >= llama_n_seq_max(ctx)) {
+ if (seq_id < 0) {
+ //LLAMA_LOG_ERROR("%s: invalid seq_id, %d is out of range [0, %u)\n", __func__, seq_id, llama_n_seq_max(ctx));
+ LLAMA_LOG_ERROR("%s: invalid seq_id, %d is out of range [0, inf)\n", __func__, seq_id);
+ return false;
+ }
+
+ cell.seq_id.insert(seq_id);
+
+ int32_t & tail = cells[seq_id].tail;
+ if (tail != -1) {
+ LLAMA_LOG_ERROR("%s: duplicate tail for seq_id %d in cell %d and %d\n", __func__, seq_id, i, tail);
+ return false;
+ }
+ tail = i;
+ }
+ }
+
+ head = 0;
+ used = cell_count;
+ }
+
+ for (uint32_t i = 0; i < cell_count; ++i) {
+ uint32_t cell_id = head + i;
+ // make sure the recurrent states will keep their restored state
+ cells[cell_id].src = cell_id;
+ }
+
+ return true;
+}
+
+bool llama_memory_recurrent::state_read_data(llama_io_read_i & io, uint32_t cell_count) {
+ uint32_t s_trans;
+ uint32_t n_layer;
+ io.read_to(&s_trans, sizeof(s_trans));
+ io.read_to(&n_layer, sizeof(n_layer));
+
+ if (n_layer != hparams.n_layer) {
+ LLAMA_LOG_ERROR("%s: mismatched layer count (%u instead of %u)\n", __func__, n_layer, hparams.n_layer);
+ return false;
+ }
+ if (cell_count > size) {
+ LLAMA_LOG_ERROR("%s: not enough cells in kv cache to restore state (%u > %u)\n", __func__, cell_count, size);
+ return false;
+ }
+ if (false != (bool) s_trans) {
+ LLAMA_LOG_ERROR("%s: incompatible s transposition\n", __func__);
+ return false;
+ }
+
+ // For each layer, read the keys for each cell, one row is one cell, read as one contiguous block
+ for (uint32_t il = 0; il < n_layer; ++il) {
+
+ // Read type of key
+ int32_t r_type_i_ref;
+ io.read_to(&r_type_i_ref, sizeof(r_type_i_ref));
+ const int32_t r_type_i = (int32_t) r_l[il]->type;
+ if (r_type_i != r_type_i_ref) {
+ LLAMA_LOG_ERROR("%s: mismatched r type (%d != %d, layer %d)\n", __func__, r_type_i, r_type_i_ref, il);
+ return false;
+ }
+
+ // Read row size of key
+ uint64_t r_size_row_ref;
+ io.read_to(&r_size_row_ref, sizeof(r_size_row_ref));
+ const size_t r_size_row = ggml_row_size(r_l[il]->type, hparams.n_embd_r());
+ if (r_size_row != r_size_row_ref) {
+ LLAMA_LOG_ERROR("%s: mismatched r row size (%zu != %zu, layer %d)\n", __func__, r_size_row, (size_t) r_size_row_ref, il);
+ return false;
+ }
+
+ if (cell_count) {
+ // Read and set the keys for the whole cell range
+ ggml_backend_tensor_set(r_l[il], io.read(cell_count * r_size_row), head * r_size_row, cell_count * r_size_row);
+ }
+ }
+
+ if (!s_trans) {
+ for (uint32_t il = 0; il < n_layer; ++il) {
+
+ // Read type of value
+ int32_t s_type_i_ref;
+ io.read_to(&s_type_i_ref, sizeof(s_type_i_ref));
+ const int32_t s_type_i = (int32_t)s_l[il]->type;
+ if (s_type_i != s_type_i_ref) {
+ LLAMA_LOG_ERROR("%s: mismatched s type (%d != %d, layer %d)\n", __func__, s_type_i, s_type_i_ref, il);
+ return false;
+ }
+
+ // Read row size of value
+ uint64_t s_size_row_ref;
+ io.read_to(&s_size_row_ref, sizeof(s_size_row_ref));
+ const size_t s_size_row = ggml_row_size(s_l[il]->type, hparams.n_embd_s());
+ if (s_size_row != s_size_row_ref) {
+ LLAMA_LOG_ERROR("%s: mismatched s row size (%zu != %zu, layer %d)\n", __func__, s_size_row, (size_t) s_size_row_ref, il);
+ return false;
+ }
+
+ if (cell_count) {
+ // Read and set the values for the whole cell range
+ ggml_backend_tensor_set(s_l[il], io.read(cell_count * s_size_row), head * s_size_row, cell_count * s_size_row);
+ }
+ }
+ } else {
+ // For each layer, read the values for each cell (transposed)
+ for (uint32_t il = 0; il < n_layer; ++il) {
+ const uint32_t n_embd_s = hparams.n_embd_s();
+
+ // Read type of value
+ int32_t s_type_i_ref;
+ io.read_to(&s_type_i_ref, sizeof(s_type_i_ref));
+ const int32_t s_type_i = (int32_t)s_l[il]->type;
+ if (s_type_i != s_type_i_ref) {
+ LLAMA_LOG_ERROR("%s: mismatched s type (%d != %d, layer %d)\n", __func__, s_type_i, s_type_i_ref, il);
+ return false;
+ }
+
+ // Read element size of value
+ uint32_t s_size_el_ref;
+ io.read_to(&s_size_el_ref, sizeof(s_size_el_ref));
+ const size_t s_size_el = ggml_type_size(s_l[il]->type);
+ if (s_size_el != s_size_el_ref) {
+ LLAMA_LOG_ERROR("%s: mismatched s element size (%zu != %zu, layer %d)\n", __func__, s_size_el, (size_t) s_size_el_ref, il);
+ return false;
+ }
+
+ // Read state embedding size
+ uint32_t n_embd_s_ref;
+ io.read_to(&n_embd_s_ref, sizeof(n_embd_s_ref));
+ if (n_embd_s != n_embd_s_ref) {
+ LLAMA_LOG_ERROR("%s: mismatched s embedding size (%u != %u, layer %d)\n", __func__, n_embd_s, n_embd_s_ref, il);
+ return false;
+ }
+
+ if (cell_count) {
+ // For each row in the transposed matrix, read the values for the whole cell range
+ for (uint32_t j = 0; j < n_embd_s; ++j) {
+ const size_t dst_offset = (head + j * size) * s_size_el;
+ ggml_backend_tensor_set(s_l[il], io.read(cell_count * s_size_el), dst_offset, cell_count * s_size_el);
+ }
+ }
+ }
+ }
+
+ return true;
+}
+
+//
+// llama_memory_recurrent_state
+//
+
+llama_memory_recurrent_state::llama_memory_recurrent_state(llama_memory_status status) : status(status) {}
+
+llama_memory_recurrent_state::llama_memory_recurrent_state(
+ llama_memory_recurrent * mem) : status(LLAMA_MEMORY_STATUS_SUCCESS), mem(mem), is_full(true) {
+}
+
+llama_memory_recurrent_state::llama_memory_recurrent_state(
+ llama_memory_recurrent * mem,
+ std::vector<llama_ubatch> ubatches) : status(LLAMA_MEMORY_STATUS_SUCCESS), mem(mem), ubatches(std::move(ubatches)) {}
+
+llama_memory_recurrent_state::~llama_memory_recurrent_state() = default;
+
+bool llama_memory_recurrent_state::next() {
+ assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
+
+ if (++i_next >= ubatches.size()) {
+ return false;
+ }
+
+ return true;
+}
+
+bool llama_memory_recurrent_state::apply() {
+ assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
+
+ mem->find_slot(ubatches[i_next]);
+
+ return true;
+}
+
+llama_memory_status llama_memory_recurrent_state::get_status() const {
+ return status;
+}
+
+const llama_ubatch & llama_memory_recurrent_state::get_ubatch() const {
+ assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
+
+ return ubatches[i_next];
+}
+
+uint32_t llama_memory_recurrent_state::get_n_rs() const {
+ return is_full ? mem->size : mem->n;
+}
+
+uint32_t llama_memory_recurrent_state::get_head() const {
+ return is_full ? 0 : mem->head;
+}
+
+int32_t llama_memory_recurrent_state::get_rs_z() const {
+ return is_full ? 0 : mem->rs_z;
+}
+
+uint32_t llama_memory_recurrent_state::get_size() const {
+ return mem->size;
+}
+
+ggml_tensor * llama_memory_recurrent_state::get_r_l(int32_t il) const {
+ return mem->r_l[il];
+}
+
+ggml_tensor * llama_memory_recurrent_state::get_s_l(int32_t il) const {
+ return mem->s_l[il];
+}
+
+int32_t llama_memory_recurrent_state::s_copy(int i) const {
+ return mem->cells[i + mem->head].src0;
+}
--- /dev/null
+#pragma once
+
+#include "llama-batch.h"
+#include "llama-graph.h"
+#include "llama-memory.h"
+
+#include <set>
+#include <vector>
+
+//
+// llama_memory_recurrent
+//
+
+// TODO: extract the cache state used for graph computation into llama_memory_recurrent_state_i
+// see the implementation of llama_kv_cache_unified_state_i for an example how to do it
+class llama_memory_recurrent : public llama_memory_i {
+public:
+
+ // this callback is used to filter out layers that should not be included in the cache
+ using layer_filter_cb = std::function<bool(int32_t il)>;
+
+ llama_memory_recurrent(
+ const llama_model & model,
+ layer_filter_cb && filter,
+ ggml_type type_r,
+ ggml_type type_s,
+ bool offload,
+ uint32_t mem_size,
+ uint32_t n_seq_max);
+
+ ~llama_memory_recurrent() = default;
+
+ //
+ // llama_memory_i
+ //
+
+ llama_memory_state_ptr init_batch(
+ llama_batch_allocr & balloc,
+ uint32_t n_ubatch,
+ bool embd_all) override;
+
+ llama_memory_state_ptr init_full() override;
+
+ llama_memory_state_ptr init_update(llama_context * lctx, bool optimize) override;
+
+ void clear(bool data) override;
+
+ bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override;
+ void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override;
+ void seq_keep(llama_seq_id seq_id) override;
+ void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) override;
+ void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override;
+
+ llama_pos seq_pos_min(llama_seq_id seq_id) const override;
+ llama_pos seq_pos_max(llama_seq_id seq_id) const override;
+
+ bool prepare(const std::vector<llama_ubatch> & ubatches);
+
+ // find a contiguous slot of memory cells and emplace the ubatch there
+ bool find_slot(const llama_ubatch & ubatch);
+
+ bool get_can_shift() const override;
+
+ // state write/load
+
+ void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const override;
+ void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) override;
+
+ uint32_t head = 0; // the location where the batch will be placed in the cache (see find_slot())
+ uint32_t size = 0; // total number of cells, shared across all sequences
+ uint32_t used = 0; // used cells (i.e. at least one seq_id)
+
+ // computed before each graph build
+ uint32_t n = 0;
+
+ // first zero-ed state
+ int32_t rs_z = -1;
+
+ // TODO: optimize for recurrent state needs
+ struct mem_cell {
+ llama_pos pos = -1;
+ int32_t src = -1; // used to know where states should be copied from
+ int32_t src0 = -1; // like src, but only used when setting the inputs (allowing to copy once)
+ int32_t tail = -1;
+
+ std::set<llama_seq_id> seq_id;
+
+ bool has_seq_id(const llama_seq_id & id) const {
+ return seq_id.find(id) != seq_id.end();
+ }
+
+ bool is_empty() const {
+ return seq_id.empty();
+ }
+
+ bool is_same_seq(const mem_cell & other) const {
+ return seq_id == other.seq_id;
+ }
+ };
+
+ std::vector<mem_cell> cells;
+
+ // per layer
+ std::vector<ggml_tensor *> r_l;
+ std::vector<ggml_tensor *> s_l;
+
+private:
+ //const llama_model & model;
+ const llama_hparams & hparams;
+
+ const uint32_t n_seq_max = 1;
+
+ std::vector<ggml_context_ptr> ctxs;
+ std::vector<ggml_backend_buffer_ptr> bufs;
+
+ size_t total_size() const;
+
+ size_t size_r_bytes() const;
+ size_t size_s_bytes() const;
+
+ void state_write_meta(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges, llama_seq_id seq_id = -1) const;
+ void state_write_data(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges) const;
+
+ bool state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id = -1);
+ bool state_read_data(llama_io_read_i & io, uint32_t cell_count);
+};
+
+class llama_memory_recurrent_state : public llama_memory_state_i {
+public:
+ // used for errors
+ llama_memory_recurrent_state(llama_memory_status status);
+
+ // used to create a full-cache state
+ llama_memory_recurrent_state(
+ llama_memory_recurrent * mem);
+
+ // used to create a state from a batch
+ llama_memory_recurrent_state(
+ llama_memory_recurrent * mem,
+ std::vector<llama_ubatch> ubatches);
+
+ virtual ~llama_memory_recurrent_state();
+
+ //
+ // llama_memory_state_i
+ //
+
+ bool next() override;
+ bool apply() override;
+
+ llama_memory_status get_status() const override;
+ const llama_ubatch & get_ubatch() const override;
+
+ //
+ // llama_memory_recurrent_state specific API
+ //
+
+ uint32_t get_n_rs() const;
+ uint32_t get_head() const;
+ int32_t get_rs_z() const;
+ uint32_t get_size() const;
+
+ ggml_tensor * get_r_l(int32_t il) const;
+ ggml_tensor * get_s_l(int32_t il) const;
+
+ int32_t s_copy(int i) const;
+
+private:
+ const llama_memory_status status;
+
+ llama_memory_recurrent * mem;
+
+ size_t i_next = 0;
+
+ std::vector<llama_ubatch> ubatches;
+
+ //
+ // data needed for building the compute graph for the current ubatch:
+ // TODO: extract all the state like `head` and `n` here
+ //
+
+ const bool is_full = false;
+};
struct llama_ubatch;
+class llama_batch_allocr;
+
class llama_io_write_i;
class llama_io_read_i;
// return false on failure
virtual bool apply() = 0;
- // TODO: this might get reworked in the future when refactoring llama_batch
- virtual std::vector<int64_t> & out_ids() = 0;
-
// get the current ubatch
virtual const llama_ubatch & get_ubatch() const = 0;
// return a state object containing the ubatches and KV cache state required to process them
// check the llama_memory_state_i::get_status() for the result
virtual llama_memory_state_ptr init_batch(
- const llama_batch & batch,
+ llama_batch_allocr & balloc,
uint32_t n_ubatch,
bool embd_all) = 0;
// add_kv(LLM_KV_TOKENIZER_MASK_ID, ???);
add_kv(LLM_KV_TOKENIZER_ADD_BOS, vocab.get_add_bos());
add_kv(LLM_KV_TOKENIZER_ADD_EOS, vocab.get_add_eos());
+ add_kv(LLM_KV_TOKENIZER_ADD_SEP, vocab.get_add_sep());
add_kv(LLM_KV_TOKENIZER_ADD_PREFIX, vocab.get_add_space_prefix());
add_kv(LLM_KV_TOKENIZER_REMOVE_EXTRA_WS, vocab.get_remove_extra_whitespaces());
add_kv(LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP, vocab.get_precompiled_charsmap());
#include "llama-kv-cache-unified.h"
#include "llama-kv-cache-unified-iswa.h"
-#include "llama-kv-cache-recurrent.h"
+#include "llama-memory-hybrid.h"
+#include "llama-memory-recurrent.h"
#include "ggml-cpp.h"
std::fill(hparams.n_head_arr.begin(), hparams.n_head_arr.end(), 0);
std::fill(hparams.n_head_kv_arr.begin(), hparams.n_head_kv_arr.end(), 0);
std::fill(hparams.n_ff_arr.begin(), hparams.n_ff_arr.end(), 0);
+ std::fill(
+ hparams.recurrent_layer_arr.begin(),
+ hparams.recurrent_layer_arr.end(),
+ llm_arch_is_recurrent(ml.get_arch()));
std::fill(hparams.rope_sections.begin(), hparams.rope_sections.end(), 0);
const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale;
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
ggml_tensor * inpSA = inpL;
cb(cur, "attn_out", il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
}
const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale;
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
ggml_tensor * inpSA = inpL;
cb(cur, "attn_out", il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
}
auto * inp_attn = build_attn_inp_kv_unified();
const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale;
+
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
ggml_tensor * inpSA = inpL;
const int64_t n_head_kv = hparams.n_head_kv(il);
Qcur, Kcur, Vcur, nullptr, nullptr, kq_scale, il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
}
auto * inp_attn = build_attn_inp_kv_unified();
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
ggml_tensor * inpSA = inpL;
Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
}
auto * inp_attn = build_attn_inp_kv_unified();
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
ggml_tensor * inpSA = inpL;
Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
}
auto * inp_attn = build_attn_inp_kv_unified();
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
ggml_tensor * attn_norm;
Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
attn_norm = ggml_get_rows(ctx0, attn_norm, inp_out_ids);
auto * inp_attn = build_attn_inp_kv_unified();
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
ggml_tensor * inpSA = inpL;
Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f, il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
}
auto * inp_attn = build_attn_inp_kv_unified();
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
ggml_tensor * inpSA = inpL;
Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
}
inpL = ggml_add(ctx0, inpL, pos);
cb(inpL, "inpL", -1);
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
cur = build_norm(inpL,
model.layers[il].attn_norm,
Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
}
auto * inp_attn = build_attn_inp_kv_unified();
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
ggml_tensor * inpSA = inpL;
Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
}
auto * inp_attn = build_attn_inp_no_cache();
- // iterate layers
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
ggml_tensor * cur = inpL;
- ggml_tensor * Qcur;
- ggml_tensor * Kcur;
- ggml_tensor * Vcur;
+ {
+ ggml_tensor * Qcur;
+ ggml_tensor * Kcur;
+ ggml_tensor * Vcur;
- // self-attention
- if (model.layers[il].wqkv) {
- cur = build_lora_mm(model.layers[il].wqkv, cur);
- cb(cur, "wqkv", il);
+ // self-attention
+ if (model.layers[il].wqkv) {
+ cur = build_lora_mm(model.layers[il].wqkv, cur);
+ cb(cur, "wqkv", il);
- if (model.layers[il].bqkv) {
- cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
- cb(cur, "bqkv", il);
- }
+ if (model.layers[il].bqkv) {
+ cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
+ cb(cur, "bqkv", il);
+ }
- Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
- Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
- Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
- } else {
- Qcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wq, cur), model.layers[il].bq);
- Kcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wk, cur), model.layers[il].bk);
- Vcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wv, cur), model.layers[il].bv);
- }
+ Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
+ Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
+ Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
+ } else {
+ Qcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wq, cur), model.layers[il].bq);
+ Kcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wk, cur), model.layers[il].bk);
+ Vcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wv, cur), model.layers[il].bv);
+ }
- if (model.layers[il].attn_q_norm) {
- Qcur = build_norm(Qcur,
- model.layers[il].attn_q_norm,
- model.layers[il].attn_q_norm_b,
- LLM_NORM, il);
- }
+ if (model.layers[il].attn_q_norm) {
+ Qcur = build_norm(Qcur,
+ model.layers[il].attn_q_norm,
+ model.layers[il].attn_q_norm_b,
+ LLM_NORM, il);
+ }
- if (model.layers[il].attn_k_norm) {
- Kcur = build_norm(Kcur,
- model.layers[il].attn_k_norm,
- model.layers[il].attn_k_norm_b,
- LLM_NORM, il);
- }
+ if (model.layers[il].attn_k_norm) {
+ Kcur = build_norm(Kcur,
+ model.layers[il].attn_k_norm,
+ model.layers[il].attn_k_norm_b,
+ LLM_NORM, il);
+ }
- Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
- Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
- Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
+ Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
+ Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
+ Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
- // RoPE
- if (model.arch == LLM_ARCH_NOMIC_BERT || model.arch == LLM_ARCH_NOMIC_BERT_MOE) {
- Qcur = ggml_rope_ext(
- ctx0, Qcur, inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
+ // RoPE
+ if (model.arch == LLM_ARCH_NOMIC_BERT || model.arch == LLM_ARCH_NOMIC_BERT_MOE) {
+ Qcur = ggml_rope_ext(
+ ctx0, Qcur, inp_pos, nullptr,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
+ ext_factor, attn_factor, beta_fast, beta_slow
+ );
- Kcur = ggml_rope_ext(
- ctx0, Kcur, inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- }
+ Kcur = ggml_rope_ext(
+ ctx0, Kcur, inp_pos, nullptr,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
+ ext_factor, attn_factor, beta_fast, beta_slow
+ );
+ }
- cb(Qcur, "Qcur", il);
- cb(Kcur, "Kcur", il);
- cb(Vcur, "Vcur", il);
+ cb(Qcur, "Qcur", il);
+ cb(Kcur, "Kcur", il);
+ cb(Vcur, "Vcur", il);
- cur = build_attn(inp_attn, gf,
- model.layers[il].wo, model.layers[il].bo,
- Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
- cb(cur, "kqv_out", il);
+ cur = build_attn(inp_attn, gf,
+ model.layers[il].wo, model.layers[il].bo,
+ Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
+ cb(cur, "kqv_out", il);
+ }
- if (il == n_layer - 1 && pooling_type == LLAMA_POOLING_TYPE_NONE) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
}
auto * inp_attn = build_attn_inp_no_cache();
- // iterate layers
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
ggml_tensor * cur = inpL;
- ggml_tensor * Qcur;
- ggml_tensor * Kcur;
- ggml_tensor * Vcur;
-
// pre-norm
cur = build_norm(inpL,
model.layers[il].attn_norm, NULL,
LLM_NORM_RMS, il);
- // self-attention
- cur = build_lora_mm(model.layers[il].wqkv, cur);
- cb(cur, "wqkv", il);
-
- Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
- Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
- Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
-
- Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
- Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
- Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
-
- // RoPE
- Qcur = ggml_rope_ext(
- ctx0, Qcur, inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
+ {
+ ggml_tensor * Qcur;
+ ggml_tensor * Kcur;
+ ggml_tensor * Vcur;
- Kcur = ggml_rope_ext(
- ctx0, Kcur, inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
+ // self-attention
+ cur = build_lora_mm(model.layers[il].wqkv, cur);
+ cb(cur, "wqkv", il);
+
+ Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
+ Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
+ Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
- cb(Qcur, "Qcur", il);
- cb(Kcur, "Kcur", il);
- cb(Vcur, "Vcur", il);
+ Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
+ Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
+ Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
- cur = build_attn(inp_attn, gf,
- model.layers[il].wo, nullptr,
- Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
- cb(cur, "kqv_out", il);
+ // RoPE
+ Qcur = ggml_rope_ext(
+ ctx0, Qcur, inp_pos, nullptr,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
+ ext_factor, attn_factor, beta_fast, beta_slow
+ );
- if (il == n_layer - 1 && pooling_type == LLAMA_POOLING_TYPE_NONE) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ Kcur = ggml_rope_ext(
+ ctx0, Kcur, inp_pos, nullptr,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
+ ext_factor, attn_factor, beta_fast, beta_slow
+ );
+
+ cb(Qcur, "Qcur", il);
+ cb(Kcur, "Kcur", il);
+ cb(Vcur, "Vcur", il);
+
+ cur = build_attn(inp_attn, gf,
+ model.layers[il].wo, nullptr,
+ Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
+ cb(cur, "kqv_out", il);
+ }
+
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
}
LLM_NORM, -1);
cb(inpL, "inp_norm", -1);
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
cur = build_norm(inpL,
model.layers[il].attn_norm,
Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
}
cb(inpL, "inpL", -1);
}
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
ggml_tensor * attn_norm;
Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
}
auto * inp_attn = build_attn_inp_kv_unified();
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
// norm
cur = build_norm(inpL,
Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
auto * inp_attn = build_attn_inp_kv_unified();
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
ggml_tensor * inpSA = inpL;
Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
}
auto * inp_attn = build_attn_inp_kv_unified();
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
ggml_tensor * inpSA = inpL;
Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
}
int sections[4];
std::copy(std::begin(hparams.rope_sections), std::begin(hparams.rope_sections) + 4, sections);
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
ggml_tensor * inpSA = inpL;
Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
}
auto * inp_attn = build_attn_inp_kv_unified();
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
ggml_tensor * inpSA = inpL;
Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
}
auto * inp_attn = build_attn_inp_kv_unified();
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
ggml_tensor * inpSA = inpL;
Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
}
auto * inp_attn = build_attn_inp_kv_unified();
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
ggml_tensor * inpSA = inpL;
Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
}
auto * inp_attn = build_attn_inp_kv_unified();
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
attn_norm_output = build_norm(inpL,
model.layers[il].attn_norm,
Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f, il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
attn_norm_output = ggml_get_rows(ctx0, attn_norm_output, inp_out_ids);
inp_attn = build_attn_inp_kv_unified();
}
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
auto * residual = inpL;
Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f, il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor* inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
residual = ggml_get_rows(ctx0, residual, inp_out_ids);
}
auto * inp_attn = build_attn_inp_kv_unified();
- for (int il = 0; il < n_layer; ++il) {
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+ for (int il = 0; il < n_layer; ++il) {
// norm
cur = build_norm(inpL,
model.layers[il].attn_norm, NULL,
LLM_NORM_RMS, il);
cb(cur, "attn_norm", il);
- ggml_tensor * attention_norm = cur;
+ ggml_tensor * sa_inp = cur;
// self-attention
{
model.layers[il].wo, NULL,
Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
}
- ggml_tensor * sa_out = cur;
-
- cur = attention_norm;
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- sa_out = ggml_get_rows(ctx0, sa_out, inp_out_ids);
+ sa_inp = ggml_get_rows(ctx0, sa_inp, inp_out_ids);
inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
}
+ ggml_tensor * sa_out = cur;
+
+ cur = sa_inp;
+
// feed-forward network
{
cur = build_ffn(cur,
inpL = ggml_add(ctx0, inpL, pos);
cb(inpL, "inpL", -1);
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
cur = build_norm(inpL,
model.layers[il].attn_norm,
Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
}
auto * inp_attn = build_attn_inp_kv_unified();
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
cur = build_norm(inpL,
model.layers[il].attn_norm,
Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
}
struct llm_build_orion : public llm_graph_context {
llm_build_orion(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
- const int64_t n_embd_head = hparams.n_embd_head_v;
+ const int64_t n_embd_head = hparams.n_embd_head_v;
- GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
- GGML_ASSERT(n_embd_head == hparams.n_rot);
+ GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
+ GGML_ASSERT(n_embd_head == hparams.n_rot);
- ggml_tensor * cur;
- ggml_tensor * inpL;
+ ggml_tensor * cur;
+ ggml_tensor * inpL;
- inpL = build_inp_embd(model.tok_embd);
+ inpL = build_inp_embd(model.tok_embd);
- // inp_pos - contains the positions
- ggml_tensor * inp_pos = build_inp_pos();
+ // inp_pos - contains the positions
+ ggml_tensor * inp_pos = build_inp_pos();
- auto * inp_attn = build_attn_inp_kv_unified();
+ auto * inp_attn = build_attn_inp_kv_unified();
- for (int il = 0; il < n_layer; ++il) {
- ggml_tensor * inpSA = inpL;
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
- // norm
- cur = build_norm(inpL,
- model.layers[il].attn_norm, model.layers[il].attn_norm_b,
- LLM_NORM, il);
- cb(cur, "attn_norm", il);
+ for (int il = 0; il < n_layer; ++il) {
+ ggml_tensor * inpSA = inpL;
- // self-attention
- {
- // compute Q and K and RoPE them
- ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
- cb(Qcur, "Qcur", il);
- // if (model.layers[il].bq) {
- // Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
- // cb(Qcur, "Qcur", il);
- // }
-
- ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
- cb(Kcur, "Kcur", il);
- // if (model.layers[il].bk) {
- // Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
- // cb(Kcur, "Kcur", il);
- // }
-
- ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
- cb(Vcur, "Vcur", il);
- // if (model.layers[il].bv) {
- // Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
- // cb(Vcur, "Vcur", il);
- // }
-
- Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
- Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
- Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
-
- Qcur = ggml_rope_ext(
- ctx0, Qcur, inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
+ // norm
+ cur = build_norm(inpL,
+ model.layers[il].attn_norm, model.layers[il].attn_norm_b,
+ LLM_NORM, il);
+ cb(cur, "attn_norm", il);
- Kcur = ggml_rope_ext(
- ctx0, Kcur, inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
+ // self-attention
+ {
+ // compute Q and K and RoPE them
+ ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
+ cb(Qcur, "Qcur", il);
+ // if (model.layers[il].bq) {
+ // Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
+ // cb(Qcur, "Qcur", il);
+ // }
+
+ ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
+ cb(Kcur, "Kcur", il);
+ // if (model.layers[il].bk) {
+ // Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
+ // cb(Kcur, "Kcur", il);
+ // }
- cb(Qcur, "Qcur", il);
- cb(Kcur, "Kcur", il);
- cb(Vcur, "Vcur", il);
+ ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
+ cb(Vcur, "Vcur", il);
+ // if (model.layers[il].bv) {
+ // Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
+ // cb(Vcur, "Vcur", il);
+ // }
- cur = build_attn(inp_attn, gf,
- model.layers[il].wo, NULL,
- Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
- }
+ Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
+ Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
+ Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
- }
+ Qcur = ggml_rope_ext(
+ ctx0, Qcur, inp_pos, nullptr,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
+ ext_factor, attn_factor, beta_fast, beta_slow
+ );
- ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
- cb(ffn_inp, "ffn_inp", il);
+ Kcur = ggml_rope_ext(
+ ctx0, Kcur, inp_pos, nullptr,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
+ ext_factor, attn_factor, beta_fast, beta_slow
+ );
- // feed-forward network
- cur = build_norm(ffn_inp,
- model.layers[il].ffn_norm, model.layers[il].ffn_norm_b,
- LLM_NORM, il);
- cb(cur, "ffn_norm", il);
+ cb(Qcur, "Qcur", il);
+ cb(Kcur, "Kcur", il);
+ cb(Vcur, "Vcur", il);
- cur = build_ffn(cur,
- model.layers[il].ffn_up, NULL, NULL,
- model.layers[il].ffn_gate, NULL, NULL,
- model.layers[il].ffn_down, NULL, NULL,
- NULL,
- LLM_FFN_SILU, LLM_FFN_PAR, il);
- cb(cur, "ffn_out", il);
+ cur = build_attn(inp_attn, gf,
+ model.layers[il].wo, NULL,
+ Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
+ }
- cur = ggml_add(ctx0, cur, ffn_inp);
+ if (il == n_layer - 1 && inp_out_ids) {
+ cur = ggml_get_rows(ctx0, cur, inp_out_ids);
+ inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
+ }
- cur = build_cvec(cur, il);
- cb(cur, "l_out", il);
+ ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
+ cb(ffn_inp, "ffn_inp", il);
- // input for next layer
- inpL = cur;
- }
+ // feed-forward network
+ cur = build_norm(ffn_inp,
+ model.layers[il].ffn_norm, model.layers[il].ffn_norm_b,
+ LLM_NORM, il);
+ cb(cur, "ffn_norm", il);
+
+ cur = build_ffn(cur,
+ model.layers[il].ffn_up, NULL, NULL,
+ model.layers[il].ffn_gate, NULL, NULL,
+ model.layers[il].ffn_down, NULL, NULL,
+ NULL,
+ LLM_FFN_SILU, LLM_FFN_PAR, il);
+ cb(cur, "ffn_out", il);
+
+ cur = ggml_add(ctx0, cur, ffn_inp);
+
+ cur = build_cvec(cur, il);
+ cb(cur, "l_out", il);
+
+ // input for next layer
+ inpL = cur;
+ }
- cur = inpL;
+ cur = inpL;
- cur = build_norm(cur,
- model.output_norm, model.output_norm_b,
- LLM_NORM, -1);
+ cur = build_norm(cur,
+ model.output_norm, model.output_norm_b,
+ LLM_NORM, -1);
- cb(cur, "result_norm", -1);
- res->t_embd = cur;
+ cb(cur, "result_norm", -1);
+ res->t_embd = cur;
- // lm_head
- cur = build_lora_mm(model.output, cur);
+ // lm_head
+ cur = build_lora_mm(model.output, cur);
- cb(cur, "result_output", -1);
- res->t_logits = cur;
+ cb(cur, "result_output", -1);
+ res->t_logits = cur;
- ggml_build_forward_expand(gf, cur);
+ ggml_build_forward_expand(gf, cur);
}
};
auto * inp_attn = build_attn_inp_kv_unified();
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
ggml_tensor * inpSA = inpL;
Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
}
auto * inp_attn = build_attn_inp_kv_unified();
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
ggml_tensor * inpSA = inpL;
q_states, k_states, v_states, nullptr, nullptr, kq_scale, il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
}
// scale_res - scale the hidden states for residual connection
- const float scale_res = scale_depth/sqrtf(float(n_layer));
+ const float scale_res = scale_depth/sqrtf(float(n_layer)); // TODO: is this correct?
cur = ggml_scale(ctx0, cur, scale_res);
cb(cur, "hidden_scaled", il);
auto * inp_attn = build_attn_inp_kv_unified();
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
// norm
cur = build_norm(inpL,
Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f, il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
}
auto * inp_attn = build_attn_inp_kv_unified_iswa();
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
// norm
cur = build_norm(inpL,
Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f, il);
}
+ if (il == n_layer - 1 && inp_out_ids) {
+ cur = ggml_get_rows(ctx0, cur, inp_out_ids);
+ inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
+ }
+
cur = build_norm(cur,
model.layers[il].attn_post_norm, NULL,
LLM_NORM_RMS, il);
cb(cur, "attn_post_norm", il);
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
- }
-
ggml_tensor * sa_out = ggml_add(ctx0, cur, inpL);
cb(sa_out, "sa_out", il);
// TODO: is causal == true correct? might need some changes
auto * inp_attn = build_attn_inp_kv_unified_iswa();
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
const float freq_base_l = model.get_rope_freq_base (cparams, il);
const float freq_scale_l = model.get_rope_freq_scale(cparams, il);
Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f, il);
}
+ if (il == n_layer - 1 && inp_out_ids) {
+ cur = ggml_get_rows(ctx0, cur, inp_out_ids);
+ inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
+ }
+
cur = build_norm(cur,
model.layers[il].attn_post_norm, NULL,
LLM_NORM_RMS, il);
cb(cur, "attn_post_norm", il);
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
- }
-
ggml_tensor * sa_out = ggml_add(ctx0, cur, inpL);
cb(sa_out, "sa_out", il);
auto * inp_attn = build_attn_inp_kv_unified();
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
ggml_tensor * inpSA = inpL;
Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
}
// {n_embd, n_tokens}
inpL = build_inp_embd(model.tok_embd);
- ggml_tensor * state_copy = build_inp_s_copy();
+ auto * rs_inp = build_rs_inp();
+
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
for (int il = 0; il < n_layer; ++il) {
// norm
LLM_NORM_RMS, il);
cb(cur, "attn_norm", il);
- cur = build_mamba_layer(gf, cur, state_copy, ubatch, il);
+ cur = build_mamba_layer(rs_inp, gf, cur, ubatch, il);
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
}
// TODO: split
ggml_tensor * build_mamba_layer(
- ggml_cgraph * gf,
- ggml_tensor * cur,
- ggml_tensor * state_copy,
- const llama_ubatch & ubatch,
- int il) const {
- const auto * kv_state = static_cast<const llama_kv_cache_recurrent_state *>(mstate);
+ llm_graph_input_rs * inp,
+ ggml_cgraph * gf,
+ ggml_tensor * cur,
+ const llama_ubatch & ubatch,
+ int il) const {
+ const auto * kv_state = static_cast<const llama_memory_recurrent_state *>(mstate);
const auto kv_head = kv_state->get_head();
GGML_ASSERT(ubatch.equal_seqs);
GGML_ASSERT(ubatch.n_tokens == n_seq_tokens * n_seqs);
- ggml_tensor * conv_states_all = kv_state->get_k_l(il);
- ggml_tensor * ssm_states_all = kv_state->get_v_l(il);
+ ggml_tensor * conv_states_all = kv_state->get_r_l(il);
+ ggml_tensor * ssm_states_all = kv_state->get_s_l(il);
// (ab)using the KV cache to store the states
- ggml_tensor * conv = build_recurrent_state(
- gf, conv_states_all, state_copy,
- hparams.n_embd_k_s(), n_seqs);
+ ggml_tensor * conv = build_rs(
+ inp, gf, conv_states_all,
+ hparams.n_embd_r(), n_seqs);
conv = ggml_reshape_3d(ctx0, conv, d_conv - 1, d_inner, n_seqs);
- ggml_tensor * ssm = build_recurrent_state(
- gf, ssm_states_all, state_copy,
- hparams.n_embd_v_s(), n_seqs);
+ ggml_tensor * ssm = build_rs(
+ inp, gf, ssm_states_all,
+ hparams.n_embd_s(), n_seqs);
ssm = ggml_reshape_3d(ctx0, ssm, d_state, d_inner, n_seqs);
// {n_embd, n_tokens} => {n_embd, n_seq_tokens, n_seqs}
auto * inp_attn = build_attn_inp_kv_unified();
- for (int il = 0; il < n_layer; ++il) {
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+ for (int il = 0; il < n_layer; ++il) {
// norm
cur = build_norm(inpL,
model.layers[il].attn_norm, NULL,
LLM_NORM, il);
cb(cur, "attn_norm", il);
+
ggml_tensor * ffn_inp = cur;
// self-attention
Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
ffn_inp = ggml_get_rows(ctx0, ffn_inp, inp_out_ids);
auto * inp_attn = build_attn_inp_kv_unified_iswa();
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
const bool is_swa = hparams.is_swa(il);
Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
ffn_inp = ggml_get_rows(ctx0, ffn_inp, inp_out_ids);
auto * inp_attn = build_attn_inp_kv_unified();
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
ggml_tensor * inpSA = inpL;
Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
}
auto * inp_attn = build_attn_inp_kv_unified();
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
ggml_tensor * inpSA = inpL;
Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
}
+ if (il == n_layer - 1 && inp_out_ids) {
+ cur = ggml_get_rows(ctx0, cur, inp_out_ids);
+ inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
+ }
+
cur = build_norm(cur,
model.layers[il].attn_post_norm, NULL,
LLM_NORM_RMS, il);
cb(cur, "attn_post_norm", il);
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
- }
-
ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
cb(ffn_inp, "ffn_inp", il);
auto * inp_attn = build_attn_inp_kv_unified();
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
ggml_tensor * inpSA = inpL;
Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
}
auto * inp_attn = build_attn_inp_kv_unified();
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
const int64_t n_head = hparams.n_head(il);
const int64_t n_head_kv = hparams.n_head_kv(il);
Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
residual = ggml_get_rows(ctx0, residual, inp_out_ids);
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
+ cur = ggml_get_rows(ctx0, cur, inp_out_ids);
}
ggml_tensor * ffn_inp = ggml_add(ctx0, residual, cur);
auto * inp_attn = build_attn_inp_kv_unified();
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
cur = build_norm(inpL,
model.layers[il].attn_norm,
Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
}
auto * inp_attn = build_attn_inp_kv_unified();
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
ggml_tensor * inpSA = inpL;
Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
}
const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale;
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
ggml_tensor * inpSA = inpL;
Qcur, Kcur, Vcur, nullptr, nullptr, kq_scale, il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
}
-
ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
cb(ffn_inp, "ffn_inp", il);
auto * inp_attn = build_attn_inp_kv_unified();
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
ggml_tensor * inpSA = inpL;
}
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
}
auto * inp_attn = build_attn_inp_kv_unified();
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
ggml_tensor * inpSA = inpL;
cb(cur, "attn_o_out", il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
}
auto * inp_attn = build_attn_inp_no_cache();
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
ggml_tensor * inpSA = inpL;
cb(cur, "kqv_out", il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
}
auto * inp_attn_self = build_attn_inp_kv_unified();
auto * inp_attn_cross = build_attn_inp_cross();
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
ggml_tensor * inpSA = inpL;
//cb(cur, "kqv_out", il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
inpCA = ggml_get_rows(ctx0, inpCA, inp_out_ids);
}
auto * inp_attn = build_attn_inp_kv_unified();
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
cur = build_norm(inpL,
model.layers[il].attn_norm,
Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/float(n_embd_head), il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
}
auto * inp_attn = build_attn_inp_kv_unified();
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
ggml_tensor * inpSA = inpL;
Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
}
auto * inp_attn = build_attn_inp_kv_unified();
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
ggml_tensor * inpSA = inpL;
Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
}
auto * inp_attn = build_attn_inp_kv_unified();
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
ggml_tensor * inpSA = inpL;
Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
}
auto * inp_attn = build_attn_inp_kv_unified();
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
ggml_tensor * inpSA = inpL;
Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
}
}
ggml_tensor * build_rwkv6_time_mix(
+ llm_graph_input_rs * inp,
ggml_cgraph * gf,
ggml_tensor * cur,
ggml_tensor * x_prev,
- ggml_tensor * state_copy,
const llama_ubatch & ubatch,
int il) const {
- const auto * kv_state = static_cast<const llama_kv_cache_recurrent_state *>(mstate);
+ const auto * kv_state = static_cast<const llama_memory_recurrent_state *>(mstate);
const auto n_tokens = ubatch.n_tokens;
const auto n_seqs = ubatch.n_seqs;
k = ggml_sub(ctx0, k, ggml_mul(ctx0, k, w));
}
- ggml_tensor * wkv_state = build_recurrent_state(
- gf, kv_state->get_v_l(il), state_copy,
- hparams.n_embd_v_s(), n_seqs);
+ ggml_tensor * wkv_state = build_rs(
+ inp, gf, kv_state->get_s_l(il),
+ hparams.n_embd_s(), n_seqs);
ggml_tensor * wkv_output;
if (is_qrwkv) {
wkv_state,
ggml_view_1d(
ctx0,
- kv_state->get_v_l(il),
- hparams.n_embd_v_s() * n_seqs,
- hparams.n_embd_v_s() * kv_head * ggml_element_size(kv_state->get_v_l(il))
+ kv_state->get_s_l(il),
+ hparams.n_embd_s() * n_seqs,
+ hparams.n_embd_s() * kv_head * ggml_element_size(kv_state->get_s_l(il))
)
)
);
inpL = build_inp_embd(model.tok_embd);
inpL = build_norm(inpL, model.tok_norm, model.tok_norm_b, LLM_NORM, -1);
- ggml_tensor * state_copy = build_inp_s_copy();
+ auto * rs_inp = build_rs_inp();
const auto n_embd = hparams.n_embd;
const auto n_seq_tokens = ubatch.n_seq_tokens;
const auto n_seqs = ubatch.n_seqs;
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
const llama_layer * layer = &model.layers[il];
inpL = ggml_reshape_3d(ctx0, inpL, n_embd, n_seq_tokens, n_seqs);
- ggml_tensor * token_shift = build_rwkv_token_shift_load(
- gf, state_copy, ubatch, il
- );
+ ggml_tensor * token_shift = build_rwkv_token_shift_load(rs_inp, gf, ubatch, il);
ggml_tensor * att_shift = ggml_view_3d(ctx0, token_shift, n_embd, 1, n_seqs, token_shift->nb[1], token_shift->nb[2], 0);
ggml_tensor * ffn_shift = ggml_view_3d(ctx0, token_shift, n_embd, 1, n_seqs, token_shift->nb[1], token_shift->nb[2], n_embd * ggml_element_size(token_shift));
1
);
- cur = build_rwkv6_time_mix(gf, att_norm, x_prev, state_copy, ubatch, il);
+ cur = build_rwkv6_time_mix(rs_inp, gf, att_norm, x_prev, ubatch, il);
ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
cb(ffn_inp, "ffn_inp", il);
);
ggml_build_forward_expand(gf, build_rwkv_token_shift_store(token_shift, ubatch, il));
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- ffn_inp = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, ffn_inp, n_embd, n_tokens), inp_out_ids);
- ffn_norm = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, ffn_norm, n_embd, n_tokens), inp_out_ids);
- x_prev = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, x_prev, n_embd, n_tokens), inp_out_ids);
- cur = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, cur, n_embd, n_tokens), inp_out_ids);
+ ffn_inp = ggml_reshape_2d(ctx0, ffn_inp, n_embd, n_tokens);
+ ffn_norm = ggml_reshape_2d(ctx0, ffn_norm, n_embd, n_tokens);
+ x_prev = ggml_reshape_2d(ctx0, x_prev, n_embd, n_tokens);
+ cur = ggml_reshape_2d(ctx0, cur, n_embd, n_tokens);
+
+ if (il == n_layer - 1 && inp_out_ids) {
+ ffn_inp = ggml_get_rows(ctx0, ffn_inp, inp_out_ids);
+ ffn_norm = ggml_get_rows(ctx0, ffn_norm, inp_out_ids);
+ x_prev = ggml_get_rows(ctx0, x_prev, inp_out_ids);
+ cur = ggml_get_rows(ctx0, cur, inp_out_ids);
}
cur = build_rwkv6_channel_mix(layer, ffn_norm, x_prev, LLM_ARCH_RWKV6);
// ref: https://huggingface.co/recursal/QRWKV6-32B-Instruct-Preview-v0.1/blob/main/modeling_rwkv6qwen2.py
struct llm_build_rwkv6qwen2 : public llm_build_rwkv6_base {
llm_build_rwkv6qwen2(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_build_rwkv6_base(model, params) {
- GGML_ASSERT(n_embd == hparams.n_embd_k_s());
+ GGML_ASSERT(n_embd == hparams.n_embd_r());
ggml_tensor * cur;
ggml_tensor * inpL;
inpL = build_inp_embd(model.tok_embd);
- ggml_tensor * state_copy = build_inp_s_copy();
+ auto * rs_inp = build_rs_inp();
const auto n_embd = hparams.n_embd;
const auto n_seq_tokens = ubatch.n_seq_tokens;
const auto n_seqs = ubatch.n_seqs;
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
const llama_layer * layer = &model.layers[il];
inpL = ggml_reshape_3d(ctx0, inpL, n_embd, n_seq_tokens, n_seqs);
- ggml_tensor * token_shift = build_rwkv_token_shift_load(
- gf, state_copy, ubatch, il
- );
+ ggml_tensor * token_shift = build_rwkv_token_shift_load(rs_inp, gf, ubatch, il);
ggml_tensor * att_norm = build_norm(inpL, layer->attn_norm, layer->attn_norm_b, LLM_NORM_RMS, il);
cb(att_norm, "attn_norm", il);
1
);
- cur = build_rwkv6_time_mix(gf, att_norm, x_prev, state_copy, ubatch, il);
+ cur = build_rwkv6_time_mix(rs_inp, gf, att_norm, x_prev, ubatch, il);
token_shift = ggml_view_3d(ctx0, att_norm, n_embd, 1, n_seqs, att_norm->nb[1], att_norm->nb[2], (n_seq_tokens-1)*n_embd*ggml_element_size(att_norm));
ggml_build_forward_expand(gf, build_rwkv_token_shift_store(token_shift, ubatch, il));
ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
cb(ffn_inp, "ffn_inp", il);
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- cur = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, cur, n_embd, n_tokens), inp_out_ids);
- ffn_inp = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, ffn_inp, n_embd, n_tokens), inp_out_ids);
+ cur = ggml_reshape_2d(ctx0, cur, n_embd, n_tokens);
+ ffn_inp = ggml_reshape_2d(ctx0, ffn_inp, n_embd, n_tokens);
+
+ if (il == n_layer - 1 && inp_out_ids) {
+ cur = ggml_get_rows(ctx0, cur, inp_out_ids);
+ ffn_inp = ggml_get_rows(ctx0, ffn_inp, inp_out_ids);
}
// feed-forward network
}
ggml_tensor * build_rwkv7_time_mix(
+ llm_graph_input_rs * inp,
ggml_cgraph * gf,
ggml_tensor * cur,
ggml_tensor * x_prev,
- ggml_tensor * state_copy,
ggml_tensor *& first_layer_value,
const llama_ubatch & ubatch,
int il) const {
- const auto * kv_state = static_cast<const llama_kv_cache_recurrent_state *>(mstate);
+ const auto * kv_state = static_cast<const llama_memory_recurrent_state *>(mstate);
const auto n_tokens = ubatch.n_tokens;
const auto n_seqs = ubatch.n_seqs;
v = ggml_reshape_3d(ctx0, v, head_size, head_count, n_tokens);
a = ggml_reshape_3d(ctx0, a, head_size, head_count, n_tokens);
- ggml_tensor * wkv_state = build_recurrent_state(
- gf, kv_state->get_v_l(il), state_copy,
- hparams.n_embd_v_s(), n_seqs);
+ ggml_tensor * wkv_state = build_rs(
+ inp, gf, kv_state->get_s_l(il),
+ hparams.n_embd_s(), n_seqs);
ggml_tensor * wkv_output = ggml_rwkv_wkv7(ctx0, r, w, k, v, ggml_neg(ctx0, kk), ggml_mul(ctx0, kk, a), wkv_state);
cur = ggml_view_1d(ctx0, wkv_output, n_embd * n_tokens, 0);
wkv_state,
ggml_view_1d(
ctx0,
- kv_state->get_v_l(il),
- hparams.n_embd_v_s() * n_seqs,
- hparams.n_embd_v_s() * kv_head * ggml_element_size(kv_state->get_v_l(il))
+ kv_state->get_s_l(il),
+ hparams.n_embd_s() * n_seqs,
+ hparams.n_embd_s() * kv_head * ggml_element_size(kv_state->get_s_l(il))
)
)
);
inpL = build_inp_embd(model.tok_embd);
inpL = build_norm(inpL, model.tok_norm, model.tok_norm_b, LLM_NORM, -1);
- ggml_tensor * state_copy = build_inp_s_copy();
+ auto * rs_inp = build_rs_inp();
const auto n_embd = hparams.n_embd;
const auto n_seq_tokens = ubatch.n_seq_tokens;
const auto n_seqs = ubatch.n_seqs;
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
const llama_layer * layer = &model.layers[il];
inpL = ggml_reshape_3d(ctx0, inpL, n_embd, n_seq_tokens, n_seqs);
- ggml_tensor * token_shift = build_rwkv_token_shift_load(
- gf, state_copy, ubatch, il
- );
+ ggml_tensor * token_shift = build_rwkv_token_shift_load(rs_inp, gf, ubatch, il);
ggml_tensor * att_shift = ggml_view_3d(ctx0, token_shift, n_embd, 1, n_seqs, token_shift->nb[1], token_shift->nb[2], 0);
ggml_tensor * ffn_shift = ggml_view_3d(ctx0, token_shift, n_embd, 1, n_seqs, token_shift->nb[1], token_shift->nb[2], n_embd * ggml_element_size(token_shift));
1
);
- cur = build_rwkv7_time_mix(gf, att_norm, x_prev, state_copy, v_first, ubatch, il);
+ cur = build_rwkv7_time_mix(rs_inp, gf, att_norm, x_prev, v_first, ubatch, il);
ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
cb(ffn_inp, "ffn_inp", il);
);
ggml_build_forward_expand(gf, build_rwkv_token_shift_store(token_shift, ubatch, il));
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- ffn_inp = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, ffn_inp, n_embd, n_tokens), inp_out_ids);
- ffn_norm = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, ffn_norm, n_embd, n_tokens), inp_out_ids);
- x_prev = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, x_prev, n_embd, n_tokens), inp_out_ids);
+ ffn_inp = ggml_reshape_2d(ctx0, ffn_inp, n_embd, n_tokens);
+ ffn_norm = ggml_reshape_2d(ctx0, ffn_norm, n_embd, n_tokens);
+ x_prev = ggml_reshape_2d(ctx0, x_prev, n_embd, n_tokens);
+
+ if (il == n_layer - 1 && inp_out_ids) {
+ ffn_inp = ggml_get_rows(ctx0, ffn_inp, inp_out_ids);
+ ffn_norm = ggml_get_rows(ctx0, ffn_norm, inp_out_ids);
+ x_prev = ggml_get_rows(ctx0, x_prev, inp_out_ids);
}
cur = build_rwkv7_channel_mix(layer, ffn_norm, x_prev, LLM_ARCH_RWKV7);
struct llm_build_arwkv7 : public llm_build_rwkv7_base {
llm_build_arwkv7(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_build_rwkv7_base(model, params) {
- GGML_ASSERT(n_embd == hparams.n_embd_k_s());
+ GGML_ASSERT(n_embd == hparams.n_embd_r());
ggml_tensor * cur;
ggml_tensor * inpL;
inpL = build_inp_embd(model.tok_embd);
- ggml_tensor * state_copy = build_inp_s_copy();
+ auto * rs_inp = build_rs_inp();
const auto n_embd = hparams.n_embd;
const auto n_seq_tokens = ubatch.n_seq_tokens;
const auto n_seqs = ubatch.n_seqs;
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
const llama_layer * layer = &model.layers[il];
inpL = ggml_reshape_3d(ctx0, inpL, n_embd, n_seq_tokens, n_seqs);
- ggml_tensor * token_shift = build_rwkv_token_shift_load(
- gf, state_copy, ubatch, il
- );
+ ggml_tensor * token_shift = build_rwkv_token_shift_load(rs_inp, gf, ubatch, il);
ggml_tensor * att_norm = build_norm(inpL, layer->attn_norm, layer->attn_norm_b, LLM_NORM_RMS, il);
cb(att_norm, "attn_norm", il);
1
);
- cur = build_rwkv7_time_mix(gf, att_norm, x_prev, state_copy, v_first, ubatch, il);
+ cur = build_rwkv7_time_mix(rs_inp, gf, att_norm, x_prev, v_first, ubatch, il);
token_shift = ggml_view_3d(ctx0, att_norm, n_embd, 1, n_seqs, att_norm->nb[1], att_norm->nb[2], (n_seq_tokens-1)*n_embd*ggml_element_size(att_norm));
ggml_build_forward_expand(gf, build_rwkv_token_shift_store(token_shift, ubatch, il));
ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
cb(ffn_inp, "ffn_inp", il);
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- cur = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, cur, n_embd, n_tokens), inp_out_ids);
- ffn_inp = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, ffn_inp, n_embd, n_tokens), inp_out_ids);
+ cur = ggml_reshape_2d(ctx0, cur, n_embd, n_tokens);
+ ffn_inp = ggml_reshape_2d(ctx0, ffn_inp, n_embd, n_tokens);
+
+ if (il == n_layer - 1 && inp_out_ids) {
+ cur = ggml_get_rows(ctx0, cur, inp_out_ids);
+ ffn_inp = ggml_get_rows(ctx0, ffn_inp, inp_out_ids);
}
// feed-forward network
auto * inp_attn = build_attn_inp_kv_unified();
const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale;
+
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
ggml_tensor * inpSA = inpL;
cb(cur, "attn_out", il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
}
auto * inp_attn = build_attn_inp_kv_unified();
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
ggml_tensor * inpSA = inpL;
cur = build_attn(inp_attn, gf,
model.layers[il].wo, nullptr,
Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
-
- if (hparams.swin_norm) {
- cur = build_norm(cur,
- model.layers[il].attn_norm, NULL,
- LLM_NORM_RMS, il);
- }
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
}
+ if (hparams.swin_norm) {
+ cur = build_norm(cur,
+ model.layers[il].attn_norm, NULL,
+ LLM_NORM_RMS, il);
+ }
+
ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
cb(ffn_inp, "ffn_inp", il);
auto * inp_attn = build_attn_inp_kv_unified();
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
ggml_tensor * inpSA = inpL;
q_states, k_states, v_states, nullptr, nullptr, kq_scale, il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
}
auto * inp_attn = build_attn_inp_kv_unified();
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
ggml_tensor * inpSA = inpL;
Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_rot)), il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
}
auto * inp_attn = build_attn_inp_kv_unified();
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
ggml_tensor * inpSA = inpL;
Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
}
const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale;
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
for (int il = 0; il < n_layer; ++il) {
ggml_tensor * inpSA = inpL;
cb(cur, "attn_out", il);
}
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- ggml_tensor * inp_out_ids = build_inp_out_ids();
+ if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
}
llama_memory_i * res;
switch (arch) {
+ // Models that need specific instantiation should be handled in the
+ // switch statement
case LLM_ARCH_BERT:
case LLM_ARCH_JINA_BERT_V2:
case LLM_ARCH_NOMIC_BERT:
{
res = nullptr;
} break;
- case LLM_ARCH_MAMBA:
- case LLM_ARCH_RWKV6:
- case LLM_ARCH_RWKV6QWEN2:
- case LLM_ARCH_RWKV7:
- case LLM_ARCH_ARWKV7:
- {
- res = new llama_kv_cache_recurrent(
- *this,
- GGML_TYPE_F32,
- GGML_TYPE_F32,
- cparams.offload_kqv,
- std::max((uint32_t) 1, cparams.n_seq_max),
- cparams.n_seq_max);
- } break;
+ // Models that need standard caching should rely on recurrent/hybrid
+ // checks
default:
{
- const auto padding = llama_kv_cache_unified::get_padding(cparams);
-
- cparams.n_ctx = GGML_PAD(cparams.n_ctx, padding);
-
- LLAMA_LOG_DEBUG("%s: n_ctx = %u (padded)\n", __func__, cparams.n_ctx);
-
- if (hparams.swa_type != LLAMA_SWA_TYPE_NONE) {
- GGML_ASSERT(hparams.is_swa_any());
-
- res = new llama_kv_cache_unified_iswa(
- *this,
- params.type_k,
- params.type_v,
- !cparams.flash_attn,
- cparams.offload_kqv,
- params.swa_full,
- cparams.n_ctx,
- cparams.n_seq_max,
- cparams.n_ubatch,
- padding);
- } else {
- GGML_ASSERT(!hparams.is_swa_any());
-
- res = new llama_kv_cache_unified(
+ if (llm_arch_is_recurrent(arch)) {
+ res = new llama_memory_recurrent(
*this,
nullptr,
- params.type_k,
- params.type_v,
- !cparams.flash_attn,
+ GGML_TYPE_F32,
+ GGML_TYPE_F32,
cparams.offload_kqv,
- cparams.n_ctx,
- cparams.n_seq_max,
- padding,
- hparams.n_swa,
- hparams.swa_type);
+ std::max((uint32_t) 1, cparams.n_seq_max),
+ cparams.n_seq_max);
+ } else if (llm_arch_is_hybrid(arch)) {
+ const auto padding = llama_kv_cache_unified::get_padding(cparams);
+
+ cparams.n_ctx = GGML_PAD(cparams.n_ctx, padding);
+
+ res = new llama_memory_hybrid(
+ /* model */ *this,
+ /* attn_type_k */ params.type_k,
+ /* attn_type_v */ params.type_v,
+ /* attn_v_trans */ !cparams.flash_attn,
+ /* attn_kv_size */ cparams.n_ctx,
+ /* attn_n_pad */ padding,
+ /* attn_n_swa */ hparams.n_swa,
+ /* attn_swa_type */ hparams.swa_type,
+ /* recurrent_type_k */ GGML_TYPE_F32,
+ /* recurrent_type_v */ GGML_TYPE_F32,
+ /* recurrent_kv_size */ std::max((uint32_t) 1, cparams.n_seq_max),
+ /* n_seq_max */ cparams.n_seq_max,
+ /* offload */ cparams.offload_kqv);
+ } else {
+ const auto padding = llama_kv_cache_unified::get_padding(cparams);
+
+ cparams.n_ctx = GGML_PAD(cparams.n_ctx, padding);
+
+ LLAMA_LOG_DEBUG("%s: n_ctx = %u (padded)\n", __func__, cparams.n_ctx);
+
+ if (hparams.swa_type != LLAMA_SWA_TYPE_NONE) {
+ GGML_ASSERT(hparams.is_swa_any());
+
+ res = new llama_kv_cache_unified_iswa(
+ *this,
+ params.type_k,
+ params.type_v,
+ !cparams.flash_attn,
+ cparams.offload_kqv,
+ params.swa_full,
+ cparams.n_ctx,
+ cparams.n_seq_max,
+ cparams.n_ubatch,
+ padding);
+ } else {
+ GGML_ASSERT(!hparams.is_swa_any());
+
+ res = new llama_kv_cache_unified(
+ *this,
+ nullptr,
+ params.type_k,
+ params.type_v,
+ !cparams.flash_attn,
+ cparams.offload_kqv,
+ cparams.n_ctx,
+ cparams.n_seq_max,
+ padding,
+ hparams.n_swa,
+ hparams.swa_type);
+ }
}
}
}
}
bool llama_model_is_recurrent(const llama_model * model) {
- switch (model->arch) {
- case LLM_ARCH_MAMBA: return true;
- case LLM_ARCH_RWKV6: return true;
- case LLM_ARCH_RWKV6QWEN2: return true;
- case LLM_ARCH_RWKV7: return true;
- case LLM_ARCH_ARWKV7: return true;
- default: return false;
- }
+ return llm_arch_is_recurrent(model->arch);
}
const std::vector<std::pair<std::string, ggml_tensor *>> & llama_internal_get_tensor_map(const llama_model * model) {
bool add_space_prefix = false;
bool add_bos = false;
bool add_eos = false;
+ bool add_sep = false;
bool ignore_merges = false;
bool clean_spaces = false; // clean_up_tokenization_spaces
bool remove_extra_whitespaces = false;
special_sep_id = 102;
special_pad_id = 0;
special_mask_id = 103;
+
+ add_sep = true;
} else if (tokenizer_model == "gpt2") {
type = LLAMA_VOCAB_TYPE_BPE;
tokenizer_pre == "jina-es" ||
tokenizer_pre == "jina-de" ||
tokenizer_pre == "gigachat" ||
- tokenizer_pre == "jina-v1-en" ||
tokenizer_pre == "jina-v2-es" ||
- tokenizer_pre == "jina-v2-de" ||
+ tokenizer_pre == "jina-v2-de") {
+ pre_type = LLAMA_VOCAB_PRE_TYPE_GPT2;
+ } else if (
+ tokenizer_pre == "jina-v1-en" ||
tokenizer_pre == "jina-v2-code" ||
tokenizer_pre == "roberta-bpe") {
pre_type = LLAMA_VOCAB_PRE_TYPE_GPT2;
+ add_sep = true;
} else if (
tokenizer_pre == "refact") {
pre_type = LLAMA_VOCAB_PRE_TYPE_REFACT;
clean_spaces = true;
add_bos = true;
add_eos = false;
+ add_sep = true;
} else if (type == LLAMA_VOCAB_TYPE_UGM) {
pre_type = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
add_bos = false;
}
}
- // Handle add_bos and add_eos
+ // Handle add_bos, add_eos and add_sep
{
bool temp = true;
if (ml.get_key(LLM_KV_TOKENIZER_ADD_EOS, temp, false)) {
add_eos = temp;
}
+ if (ml.get_key(LLM_KV_TOKENIZER_ADD_SEP, temp, false)) {
+ add_sep = temp;
+ }
}
// auto-detect special tokens by text
//NOTE: Per token attributes are missing from the GGUF file.
//TODO: Extract attributes from GGUF file.
{
- auto _contains_any = [] (const std::string & str, const std::vector<std::string> & substrs) -> bool {
+ auto _contains_any = [] (const std::string & str, const std::vector<std::string_view> & substrs) -> bool {
for (const auto & substr : substrs) {
- if (str.find(substr) < std::string::npos) {
+ if (str.find(substr) != std::string::npos) {
return true;
}
}
return pimpl->add_eos;
}
+bool llama_vocab::get_add_sep() const {
+ return pimpl->add_sep;
+}
+
bool llama_vocab::get_ignore_merges() const {
return pimpl->ignore_merges;
}
bool add_special,
bool parse_special) const {
auto res = tokenize(std::string(text, text_len), add_special, parse_special);
+ if (res.size() >= static_cast<size_t>(std::numeric_limits<int32_t>::max())) {
+ LLAMA_LOG_ERROR("%s: tokenization result size %zu exceeds int32_t limit\n", __func__, res.size());
+ return std::numeric_limits<int32_t>::min();
+ }
+
if (n_tokens_max < (int) res.size()) {
// LLAMA_LOG_ERROR("%s: too many tokens\n", __func__);
return -((int) res.size());
return vocab->get_add_eos();
}
+bool llama_vocab_get_add_sep(const struct llama_vocab * vocab) {
+ return vocab->get_add_sep();
+}
+
llama_token llama_vocab_fim_pre(const struct llama_vocab * vocab) {
return vocab->token_fim_pre();
}
bool get_add_space_prefix () const;
bool get_add_bos () const;
bool get_add_eos () const;
+ bool get_add_sep () const;
bool get_ignore_merges () const;
bool get_clean_spaces () const;
bool get_remove_extra_whitespaces () const;
LLAMA_API bool llama_vocab_get_add_bos(const struct llama_vocab * vocab);
LLAMA_API bool llama_vocab_get_add_eos(const struct llama_vocab * vocab);
+ LLAMA_API bool llama_vocab_get_add_sep(const struct llama_vocab * vocab);
LLAMA_API llama_token llama_vocab_fim_pre(const struct llama_vocab * vocab);
LLAMA_API llama_token llama_vocab_fim_suf(const struct llama_vocab * vocab);
/// @param tokens The tokens pointer must be large enough to hold the resulting tokens.
/// @return Returns the number of tokens on success, no more than n_tokens_max
/// @return Returns a negative number on failure - the number of tokens that would have been returned
+ /// @return Returns INT32_MIN on overflow (e.g., tokenization result size exceeds int32_t limit)
/// @param add_special Allow to add BOS and EOS tokens if model is configured to do so.
/// @param parse_special Allow tokenizing special and/or control tokens which otherwise are not exposed and treated
/// as plaintext. Does not insert a leading space.
// disable C++17 deprecation warning for std::codecvt_utf8
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wdeprecated-declarations"
+#elif defined(__GNUC__)
+# pragma GCC diagnostic push
+# pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#endif
std::wstring_convert<std::codecvt_utf8<wchar_t>> conv;
#if defined(__clang__)
# pragma clang diagnostic pop
+#elif defined(__GNUC__)
+# pragma GCC diagnostic pop
#endif
return conv.from_bytes(s);