}
}
-static void batch_decode(llama_context * ctx, llama_batch & batch, float * output, int n_seq, int n_embd) {
+static void batch_encode(llama_context * ctx, llama_batch & batch, float * output, int n_seq, int n_embd) {
// clear previous kv_cache values (irrelevant for embeddings)
llama_kv_self_clear(ctx);
// run model
LOG_INF("%s: n_tokens = %d, n_seq = %d\n", __func__, batch.n_tokens, n_seq);
- if (llama_decode(ctx, batch) < 0) {
- LOG_ERR("%s : failed to decode\n", __func__);
+ if (llama_encode(ctx, batch) < 0) {
+ LOG_ERR("%s : failed to encode\n", __func__);
}
for (int i = 0; i < batch.n_tokens; i++) {
// encode if at capacity
if (batch.n_tokens + n_toks > n_batch) {
float * out = emb + p * n_embd;
- batch_decode(ctx, batch, out, s, n_embd);
+ batch_encode(ctx, batch, out, s, n_embd);
common_batch_clear(batch);
p += s;
s = 0;
// final batch
float * out = emb + p * n_embd;
- batch_decode(ctx, batch, out, s, n_embd);
+ batch_encode(ctx, batch, out, s, n_embd);
// save embeddings to chunks
for (int i = 0; i < n_chunks; i++) {
batch_add_seq(query_batch, query_tokens, 0);
std::vector<float> query_emb(n_embd, 0);
- batch_decode(ctx, query_batch, query_emb.data(), 1, n_embd);
+ batch_encode(ctx, query_batch, query_emb.data(), 1, n_embd);
common_batch_clear(query_batch);