int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
int32_t offset_t_ms = 0;
int32_t offset_n = 0;
+ int32_t max_context = -1;
bool verbose = false;
bool translate = false;
params.offset_t_ms = std::stoi(argv[++i]);
} else if (arg == "-on" || arg == "--offset-n") {
params.offset_n = std::stoi(argv[++i]);
+ } else if (arg == "-mc" || arg == "--max-context") {
+ params.max_context = std::stoi(argv[++i]);
} else if (arg == "-v" || arg == "--verbose") {
params.verbose = true;
} else if (arg == "--translate") {
fprintf(stderr, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads);
fprintf(stderr, " -ot N, --offset-t N time offset in milliseconds (default: %d)\n", params.offset_t_ms);
fprintf(stderr, " -on N, --offset-n N segment index offset (default: %d)\n", params.offset_n);
+ fprintf(stderr, " -mc N, --max-context N maximum number of text context tokens to store (default: max)\n");
fprintf(stderr, " -v, --verbose verbose output\n");
fprintf(stderr, " --translate translate from source language to english\n");
fprintf(stderr, " -otxt, --output-txt output result in a text file\n");
wparams.translate = params.translate;
wparams.language = params.language.c_str();
wparams.n_threads = params.n_threads;
+ wparams.n_processors = 1;
+ wparams.n_max_text_ctx = params.max_context >= 0 ? params.max_context : wparams.n_max_text_ctx;
wparams.offset_ms = params.offset_t_ms;
// this callback is called on each new segment
}
};
-struct whisper_token_data {
- whisper_token id; // token id
- whisper_token tid; // forced timestamp token id
-
- float p; // probability of the token
- float pt; // probability of the timestamp token
-};
-
struct whisper_segment {
int64_t t0;
int64_t t1;
return 0;
}
-whisper_token whisper_sample_best(struct whisper_context * ctx) {
+whisper_token_data whisper_sample_best(struct whisper_context * ctx) {
const int64_t t_start_sample_us = ggml_time_us();
// TODO: simplify
ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
- return res.id;
+ return res;
}
whisper_token whisper_sample_timestamp(struct whisper_context * ctx) {
/*.strategy =*/ WHISPER_SAMPLING_GREEDY,
/*.n_threads =*/ std::min(4, (int32_t) std::thread::hardware_concurrency()),
- /*.offset_ms =*/ 0,
/*.n_processors =*/ 1,
+ /*.n_max_text_ctx =*/ 16384,
+ /*.offset_ms =*/ 0,
/*.translate =*/ false,
/*.no_context =*/ false,
/*.strategy =*/ WHISPER_SAMPLING_BEAM_SEARCH,
/*.n_threads =*/ std::min(4, (int32_t) std::thread::hardware_concurrency()),
- /*.offset_ms =*/ 0,
/*.n_processors =*/ 1,
+ /*.n_max_text_ctx =*/ 16384,
+ /*.offset_ms =*/ 0,
/*.translate =*/ false,
/*.no_context =*/ false,
// if we have already generated some text, use it as a prompt to condition the next generation
if (prompt_past.size() > 0) {
- int n_take = std::min(whisper_n_text_ctx(ctx)/2, int(prompt_past.size()));
+ int n_take = std::min(std::min(params.n_max_text_ctx, whisper_n_text_ctx(ctx)/2), int(prompt_past.size()));
prompt = { whisper_token_prev(ctx) };
prompt.insert(prompt.begin() + 1, prompt_past.end() - n_take, prompt_past.end());
// feel free to experiment!
//
{
- auto token = whisper_sample_best(ctx->vocab, ctx->probs.data() + (ctx->probs.size() - ctx->vocab.n_vocab));
+ auto token = whisper_sample_best(ctx);
if (i == 0) {
token.tid = whisper_token_beg(ctx);
typedef int whisper_token;
+ struct whisper_token_data {
+ whisper_token id; // token id
+ whisper_token tid; // forced timestamp token id
+
+ float p; // probability of the token
+ float pt; // probability of the timestamp token
+ };
+
// Allocates all memory needed for the model and loads the model from the given file.
// Returns NULL on failure.
WHISPER_API struct whisper_context * whisper_init(const char * path_model);
// You can also implement your own sampling method using the whisper_get_probs() function.
// whisper_sample_best() returns the token with the highest probability
// whisper_sample_timestamp() returns the most probable timestamp token
- WHISPER_API whisper_token whisper_sample_best(struct whisper_context * ctx);
+ WHISPER_API whisper_token_data whisper_sample_best(struct whisper_context * ctx);
WHISPER_API whisper_token whisper_sample_timestamp(struct whisper_context * ctx);
// Return the id of the specified language, returns -1 if not found
enum whisper_sampling_strategy strategy;
int n_threads;
- int offset_ms;
int n_processors;
+ int n_max_text_ctx;
+ int offset_ms;
bool translate;
bool no_context;