int32_t seed = -1; // RNG seed, not used currently
int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
int32_t step_ms = 3000;
+ int32_t length_ms = 10000;
bool verbose = false;
bool translate = false;
params.n_threads = std::stoi(argv[++i]);
} else if (arg == "--step") {
params.step_ms = std::stoi(argv[++i]);
+ } else if (arg == "--length") {
+ params.length_ms = std::stoi(argv[++i]);
} else if (arg == "-v" || arg == "--verbose") {
params.verbose = true;
} else if (arg == "--translate") {
fprintf(stderr, " -s SEED, --seed SEED RNG seed (default: -1)\n");
fprintf(stderr, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads);
fprintf(stderr, " --step N audio step size in milliseconds (default: %d)\n", params.step_ms);
+ fprintf(stderr, " --length N audio length in milliseconds (default: %d)\n", params.length_ms);
fprintf(stderr, " -v, --verbose verbose output\n");
fprintf(stderr, " --translate translate from source language to english\n");
- fprintf(stderr, " -nc, --no-context disable context from earlier audio (default: false)\n");
+ fprintf(stderr, " -kc, --keep-context keep text context from earlier audio (default: false)\n");
fprintf(stderr, " -ps, --print_special print special tokens\n");
fprintf(stderr, " -nt, --no_timestamps do not print timestamps\n");
fprintf(stderr, " -l LANG, --language LANG spoken language (default: %s)\n", params.language.c_str());
struct whisper_context * ctx = whisper_init(params.model.c_str());
const int n_samples = (params.step_ms/1000.0)*WHISPER_SAMPLE_RATE;
+ const int n_samples_len = (params.length_ms/1000.0)*WHISPER_SAMPLE_RATE;
const int n_samples_30s = 30*WHISPER_SAMPLE_RATE;
std::vector<float> pcmf32(n_samples_30s, 0.0f);
std::vector<float> pcmf32_old;
printf("%s: WARNING: model is not multilingual, ignoring language and translation options\n", __func__);
}
}
- printf("%s: processing %d samples (%.1f sec), %d threads, lang = %s, task = %s, timestamps = %d ...\n",
- __func__, n_samples, float(n_samples)/WHISPER_SAMPLE_RATE, params.n_threads,
+ printf("%s: processing %d samples (step = %.1f sec / len = %.1f sec), %d threads, lang = %s, task = %s, timestamps = %d ...\n",
+ __func__,
+ n_samples,
+ float(n_samples)/WHISPER_SAMPLE_RATE,
+ float(n_samples_len)/WHISPER_SAMPLE_RATE,
+ params.n_threads,
params.language.c_str(),
params.translate ? "translate" : "transcribe",
params.no_timestamps ? 0 : 1);
SDL_PauseAudioDevice(g_dev_id_in, 0);
+ int n_iter = 0;
bool is_running = true;
// main audio loop
const int n_samples_new = SDL_GetQueuedAudioSize(g_dev_id_in)/sizeof(float);
// take one second from previous iteration
- // TODO: better strategy
- const int n_samples_take = std::min((int) pcmf32_old.size(), std::max(0, n_samples_30s/30 - n_samples_new));
+ //const int n_samples_take = std::min((int) pcmf32_old.size(), std::max(0, n_samples_30s/30 - n_samples_new));
+
+ // take up to params.length_ms audio from previous iteration
+ const int n_samples_take = std::min((int) pcmf32_old.size(), std::max(0, n_samples_len - n_samples_new));
//printf("processing: take = %d, new = %d, old = %d\n", n_samples_take, n_samples_new, (int) pcmf32_old.size());
// print result;
{
- printf("\n");
+ if ((n_iter % (params.length_ms / params.step_ms - 1)) != 0) {
+ printf("\33[2K\r");
+ }
const int n_segments = whisper_full_n_segments(ctx);
for (int i = 0; i < n_segments; ++i) {
}
}
}
+
+ ++n_iter;
+ if ((n_iter % (params.length_ms / params.step_ms - 1)) == 0) {
+ printf("\n");
+
+ pcmf32_old.clear();
+ }
}
}