result->prev.resize(params.n_prev);
- result->n_considered = 0;
+ result->n_valid = 0;
llama_sampling_set_rng_seed(result, params.seed);
std::fill(ctx->prev.begin(), ctx->prev.end(), 0);
ctx->cur.clear();
- ctx->n_considered = 0;
+ ctx->n_valid = 0;
}
void llama_sampling_set_rng_seed(struct llama_sampling_context * ctx, uint32_t seed) {
}
}
- ctx_sampling->n_considered = cur_p.size;
+ ctx_sampling->n_valid = temp == 0.0f ? 0 : cur_p.size;
return id;
}
// TODO: replace with ring-buffer
std::vector<llama_token> prev;
std::vector<llama_token_data> cur;
- size_t n_considered;
+ size_t n_valid; // Number of correct top tokens with correct probabilities.
std::mt19937 rng;
};
const size_t n_probs = std::min(cur_p.size, (size_t) slot.sparams.n_probs);
if (n_probs > 0) {
- const size_t n_considered = slot.ctx_sampling->n_considered;
+ const size_t n_valid = slot.ctx_sampling->n_valid;
// Make sure at least n_probs top tokens are at the front of the vector:
- if (slot.sparams.temp == 0.0f && n_probs > n_considered) {
+ if (slot.sparams.temp == 0.0f && n_probs > n_valid) {
llama_sample_top_k(ctx, &cur_p, n_probs, 0);
}
for (size_t i = 0; i < n_probs; ++i) {
result.probs.push_back({
cur_p.data[i].id,
- i >= n_considered ? 0.0f : cur_p.data[i].p // Tokens filtered out due to e.g. top_k have 0 probability.
+ i >= n_valid ? 0.0f : cur_p.data[i].p // Tokens filtered out due to e.g. top_k have 0 probability.
});
}
}