sync.sh
compile_commands.json
+examples/arm_neon.h
examples/whisper.objc/whisper.objc.xcodeproj/xcshareddata
examples/whisper.objc/whisper.objc.xcodeproj/xcuserdata/
examples/whisper.objc/whisper.objc.xcodeproj/project.xcworkspace/xcuserdata
options:
-h, --help show this help message and exit
- -s SEED, --seed SEED RNG seed (default: -1)
-t N, --threads N number of threads to use during computation (default: 4)
-p N, --processors N number of processors to use during computation (default: 1)
-ot N, --offset-t N time offset in milliseconds (default: 0)
struct whisper_full_params params = whisper_full_default_params(whisper_sampling_strategy::WHISPER_SAMPLING_GREEDY);
- params.print_realtime = true;
- params.print_progress = false;
- params.print_timestamps = true;
- params.print_special_tokens = false;
- params.translate = translate;
- params.language = whisper_is_multilingual(g_contexts[index]) ? lang.c_str() : "en";
- params.n_threads = std::min(8, (int) std::thread::hardware_concurrency());
- params.offset_ms = 0;
+ params.print_realtime = true;
+ params.print_progress = false;
+ params.print_timestamps = true;
+ params.print_special = false;
+ params.translate = translate;
+ params.language = whisper_is_multilingual(g_contexts[index]) ? lang.c_str() : "en";
+ params.n_threads = std::min(8, (int) std::thread::hardware_concurrency());
+ params.offset_ms = 0;
std::vector<float> pcmf32;
const int n = audio["length"].as<int>();
// command-line parameters
struct whisper_params {
- int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
+ int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
- std::string model = "models/ggml-base.en.bin";
+ std::string model = "models/ggml-base.en.bin";
};
void whisper_print_usage(int argc, char ** argv, const whisper_params & params);
for (int i = 1; i < argc; i++) {
std::string arg = argv[i];
- if (arg == "-t" || arg == "--threads") {
- params.n_threads = std::stoi(argv[++i]);
- } else if (arg == "-m" || arg == "--model") {
- params.model = argv[++i];
- } else if (arg == "-h" || arg == "--help") {
+ if (arg == "-h" || arg == "--help") {
whisper_print_usage(argc, argv, params);
exit(0);
- } else {
+ }
+ else if (arg == "-t" || arg == "--threads") { params.n_threads = std::stoi(argv[++i]); }
+ else if (arg == "-m" || arg == "--model") { params.model = argv[++i]; }
+ else {
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
whisper_print_usage(argc, argv, params);
exit(0);
fprintf(stderr, "usage: %s [options]\n", argv[0]);
fprintf(stderr, "\n");
fprintf(stderr, "options:\n");
- fprintf(stderr, " -h, --help show this help message and exit\n");
- fprintf(stderr, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads);
- fprintf(stderr, " -m FNAME, --model FNAME model path (default: %s)\n", params.model.c_str());
+ fprintf(stderr, " -h, --help [default] show this help message and exit\n");
+ fprintf(stderr, " -t N, --threads N [%-7d] number of threads to use during computation\n", params.n_threads);
+ fprintf(stderr, " -m FNAME, --model FNAME [%-7s] model path\n", params.model.c_str());
fprintf(stderr, "\n");
}
// command-line parameters
struct whisper_params {
- int32_t seed = -1; // RNG seed, not used currently
int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
int32_t n_processors = 1;
int32_t offset_t_ms = 0;
float word_thold = 0.01f;
- bool speed_up = false;
- bool translate = false;
- bool output_txt = false;
- bool output_vtt = false;
- bool output_srt = false;
- bool output_wts = false;
- bool print_special_tokens = false;
- bool print_colors = false;
- bool no_timestamps = false;
+ bool speed_up = false;
+ bool translate = false;
+ bool output_txt = false;
+ bool output_vtt = false;
+ bool output_srt = false;
+ bool output_wts = false;
+ bool print_special = false;
+ bool print_colors = false;
+ bool no_timestamps = false;
std::string language = "en";
std::string model = "models/ggml-base.en.bin";
continue;
}
- if (arg == "-s" || arg == "--seed") {
- params.seed = std::stoi(argv[++i]);
- } else if (arg == "-t" || arg == "--threads") {
- params.n_threads = std::stoi(argv[++i]);
- } else if (arg == "-p" || arg == "--processors") {
- params.n_processors = std::stoi(argv[++i]);
- } else if (arg == "-ot" || arg == "--offset-t") {
- params.offset_t_ms = std::stoi(argv[++i]);
- } else if (arg == "-on" || arg == "--offset-n") {
- params.offset_n = std::stoi(argv[++i]);
- } else if (arg == "-d" || arg == "--duration") {
- params.duration_ms = std::stoi(argv[++i]);
- } else if (arg == "-mc" || arg == "--max-context") {
- params.max_context = std::stoi(argv[++i]);
- } else if (arg == "-ml" || arg == "--max-len") {
- params.max_len = std::stoi(argv[++i]);
- } else if (arg == "-wt" || arg == "--word-thold") {
- params.word_thold = std::stof(argv[++i]);
- } else if (arg == "-su" || arg == "--speed-up") {
- params.speed_up = true;
- } else if (arg == "-tr" || arg == "--translate") {
- params.translate = true;
- } else if (arg == "-l" || arg == "--language") {
- params.language = argv[++i];
- if (whisper_lang_id(params.language.c_str()) == -1) {
- fprintf(stderr, "error: unknown language '%s'\n", params.language.c_str());
- whisper_print_usage(argc, argv, params);
- exit(0);
- }
- } else if (arg == "-otxt" || arg == "--output-txt") {
- params.output_txt = true;
- } else if (arg == "-ovtt" || arg == "--output-vtt") {
- params.output_vtt = true;
- } else if (arg == "-osrt" || arg == "--output-srt") {
- params.output_srt = true;
- } else if (arg == "-owts" || arg == "--output-words") {
- params.output_wts = true;
- } else if (arg == "-ps" || arg == "--print_special") {
- params.print_special_tokens = true;
- } else if (arg == "-pc" || arg == "--print_colors") {
- params.print_colors = true;
- } else if (arg == "-nt" || arg == "--no_timestamps") {
- params.no_timestamps = true;
- } else if (arg == "-m" || arg == "--model") {
- params.model = argv[++i];
- } else if (arg == "-f" || arg == "--file") {
- params.fname_inp.push_back(argv[++i]);
- } else if (arg == "-h" || arg == "--help") {
+ if (arg == "-h" || arg == "--help") {
whisper_print_usage(argc, argv, params);
exit(0);
- } else {
+ }
+ else if (arg == "-t" || arg == "--threads") { params.n_threads = std::stoi(argv[++i]); }
+ else if (arg == "-p" || arg == "--processors") { params.n_processors = std::stoi(argv[++i]); }
+ else if (arg == "-ot" || arg == "--offset-t") { params.offset_t_ms = std::stoi(argv[++i]); }
+ else if (arg == "-on" || arg == "--offset-n") { params.offset_n = std::stoi(argv[++i]); }
+ else if (arg == "-d" || arg == "--duration") { params.duration_ms = std::stoi(argv[++i]); }
+ else if (arg == "-mc" || arg == "--max-context") { params.max_context = std::stoi(argv[++i]); }
+ else if (arg == "-ml" || arg == "--max-len") { params.max_len = std::stoi(argv[++i]); }
+ else if (arg == "-wt" || arg == "--word-thold") { params.word_thold = std::stof(argv[++i]); }
+ else if (arg == "-su" || arg == "--speed-up") { params.speed_up = true; }
+ else if (arg == "-tr" || arg == "--translate") { params.translate = true; }
+ else if (arg == "-otxt" || arg == "--output-txt") { params.output_txt = true; }
+ else if (arg == "-ovtt" || arg == "--output-vtt") { params.output_vtt = true; }
+ else if (arg == "-osrt" || arg == "--output-srt") { params.output_srt = true; }
+ else if (arg == "-owts" || arg == "--output-words") { params.output_wts = true; }
+ else if (arg == "-ps" || arg == "--print-special") { params.print_special = true; }
+ else if (arg == "-pc" || arg == "--print-colors") { params.print_colors = true; }
+ else if (arg == "-nt" || arg == "--no-timestamps") { params.no_timestamps = true; }
+ else if (arg == "-l" || arg == "--language") { params.language = argv[++i]; }
+ else if (arg == "-m" || arg == "--model") { params.model = argv[++i]; }
+ else if (arg == "-f" || arg == "--file") { params.fname_inp.push_back(argv[++i]); }
+ else {
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
whisper_print_usage(argc, argv, params);
exit(0);
fprintf(stderr, "usage: %s [options] file0.wav file1.wav ...\n", argv[0]);
fprintf(stderr, "\n");
fprintf(stderr, "options:\n");
- fprintf(stderr, " -h, --help show this help message and exit\n");
- fprintf(stderr, " -s SEED, --seed SEED RNG seed (default: -1)\n");
- fprintf(stderr, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads);
- fprintf(stderr, " -p N, --processors N number of processors to use during computation (default: %d)\n", params.n_processors);
- fprintf(stderr, " -ot N, --offset-t N time offset in milliseconds (default: %d)\n", params.offset_t_ms);
- fprintf(stderr, " -on N, --offset-n N segment index offset (default: %d)\n", params.offset_n);
- fprintf(stderr, " -d N, --duration N duration of audio to process in milliseconds (default: %d)\n", params.duration_ms);
- fprintf(stderr, " -mc N, --max-context N maximum number of text context tokens to store (default: max)\n");
- fprintf(stderr, " -ml N, --max-len N maximum segment length in characters (default: %d)\n", params.max_len);
- fprintf(stderr, " -wt N, --word-thold N word timestamp probability threshold (default: %f)\n", params.word_thold);
- fprintf(stderr, " -su, --speed-up speed up audio by factor of 2 (faster processing, reduced accuracy, default: %s)\n", params.speed_up ? "true" : "false");
- fprintf(stderr, " -tr, --translate translate from source language to english\n");
- fprintf(stderr, " -otxt, --output-txt output result in a text file\n");
- fprintf(stderr, " -ovtt, --output-vtt output result in a vtt file\n");
- fprintf(stderr, " -osrt, --output-srt output result in a srt file\n");
- fprintf(stderr, " -owts, --output-words output script for generating karaoke video\n");
- fprintf(stderr, " -ps, --print_special print special tokens\n");
- fprintf(stderr, " -pc, --print_colors print colors\n");
- fprintf(stderr, " -nt, --no_timestamps do not print timestamps\n");
- fprintf(stderr, " -l LANG, --language LANG spoken language (default: %s)\n", params.language.c_str());
- fprintf(stderr, " -m FNAME, --model FNAME model path (default: %s)\n", params.model.c_str());
- fprintf(stderr, " -f FNAME, --file FNAME input WAV file path\n");
+ fprintf(stderr, " -h, --help [default] show this help message and exit\n");
+ fprintf(stderr, " -t N, --threads N [%-7d] number of threads to use during computation\n", params.n_threads);
+ fprintf(stderr, " -p N, --processors N [%-7d] number of processors to use during computation\n", params.n_processors);
+ fprintf(stderr, " -ot N, --offset-t N [%-7d] time offset in milliseconds\n", params.offset_t_ms);
+ fprintf(stderr, " -on N, --offset-n N [%-7d] segment index offset\n", params.offset_n);
+ fprintf(stderr, " -d N, --duration N [%-7d] duration of audio to process in milliseconds\n", params.duration_ms);
+ fprintf(stderr, " -mc N, --max-context N [%-7d] maximum number of text context tokens to store\n", params.max_context);
+ fprintf(stderr, " -ml N, --max-len N [%-7d] maximum segment length in characters\n", params.max_len);
+ fprintf(stderr, " -wt N, --word-thold N [%-7f] word timestamp probability threshold\n", params.word_thold);
+ fprintf(stderr, " -su, --speed-up [%-7s] speed up audio by x2 (reduced accuracy)\n", params.speed_up ? "true" : "false");
+ fprintf(stderr, " -tr, --translate [%-7s] translate from source language to english\n", params.translate ? "true" : "false");
+ fprintf(stderr, " -otxt, --output-txt [%-7s] output result in a text file\n", params.output_txt ? "true" : "false");
+ fprintf(stderr, " -ovtt, --output-vtt [%-7s] output result in a vtt file\n", params.output_vtt ? "true" : "false");
+ fprintf(stderr, " -osrt, --output-srt [%-7s] output result in a srt file\n", params.output_srt ? "true" : "false");
+ fprintf(stderr, " -owts, --output-words [%-7s] output script for generating karaoke video\n", params.output_wts ? "true" : "false");
+ fprintf(stderr, " -ps, --print-special [%-7s] print special tokens\n", params.print_special ? "true" : "false");
+ fprintf(stderr, " -pc, --print-colors [%-7s] print colors\n", params.print_colors ? "true" : "false");
+ fprintf(stderr, " -nt, --no-timestamps [%-7s] do not print timestamps\n", params.no_timestamps ? "false" : "true");
+ fprintf(stderr, " -l LANG, --language LANG [%-7s] spoken language\n", params.language.c_str());
+ fprintf(stderr, " -m FNAME, --model FNAME [%-7s] model path\n", params.model.c_str());
+ fprintf(stderr, " -f FNAME, --file FNAME [%-7s] input WAV file path\n", "");
fprintf(stderr, "\n");
}
if (params.no_timestamps) {
if (params.print_colors) {
for (int j = 0; j < whisper_full_n_tokens(ctx, i); ++j) {
- if (params.print_special_tokens == false) {
+ if (params.print_special == false) {
const whisper_token id = whisper_full_get_token_id(ctx, i, j);
if (id >= whisper_token_eot(ctx)) {
continue;
if (params.print_colors) {
printf("[%s --> %s] ", to_timestamp(t0).c_str(), to_timestamp(t1).c_str());
for (int j = 0; j < whisper_full_n_tokens(ctx, i); ++j) {
- if (params.print_special_tokens == false) {
+ if (params.print_special == false) {
const whisper_token id = whisper_full_get_token_id(ctx, i, j);
if (id >= whisper_token_eot(ctx)) {
continue;
return 1;
}
- if (params.seed < 0) {
- params.seed = time(NULL);
- }
-
if (params.fname_inp.empty()) {
fprintf(stderr, "error: no input files specified\n");
whisper_print_usage(argc, argv, params);
return 2;
}
+ if (whisper_lang_id(params.language.c_str()) == -1) {
+ fprintf(stderr, "error: unknown language '%s'\n", params.language.c_str());
+ whisper_print_usage(argc, argv, params);
+ exit(0);
+ }
+
// whisper init
struct whisper_context * ctx = whisper_init(params.model.c_str());
fprintf(stderr, "error: failed to open WAV file from stdin\n");
return 4;
}
+
+ fprintf(stderr, "%s: read %zu bytes from stdin\n", __func__, wav_data.size());
}
else if (drwav_init_file(&wav, fname_inp.c_str(), NULL) == false) {
fprintf(stderr, "error: failed to open '%s' as WAV file\n", fname_inp.c_str());
return 7;
}
- int n = wav.totalPCMFrameCount;
+ const uint64_t n = wav_data.empty() ? wav.totalPCMFrameCount : wav_data.size()/(wav.channels*wav.bitsPerSample/8);
std::vector<int16_t> pcm16;
pcm16.resize(n*wav.channels);
{
whisper_full_params wparams = whisper_full_default_params(WHISPER_SAMPLING_GREEDY);
- wparams.print_realtime = false;
- wparams.print_progress = false;
- wparams.print_timestamps = !params.no_timestamps;
- wparams.print_special_tokens = params.print_special_tokens;
- wparams.translate = params.translate;
- wparams.language = params.language.c_str();
- wparams.n_threads = params.n_threads;
- wparams.n_max_text_ctx = params.max_context >= 0 ? params.max_context : wparams.n_max_text_ctx;
- wparams.offset_ms = params.offset_t_ms;
- wparams.duration_ms = params.duration_ms;
-
- wparams.token_timestamps = params.output_wts || params.max_len > 0;
- wparams.thold_pt = params.word_thold;
- wparams.max_len = params.output_wts && params.max_len == 0 ? 60 : params.max_len;
-
- wparams.speed_up = params.speed_up;
+ wparams.print_realtime = false;
+ wparams.print_progress = false;
+ wparams.print_timestamps = !params.no_timestamps;
+ wparams.print_special = params.print_special;
+ wparams.translate = params.translate;
+ wparams.language = params.language.c_str();
+ wparams.n_threads = params.n_threads;
+ wparams.n_max_text_ctx = params.max_context >= 0 ? params.max_context : wparams.n_max_text_ctx;
+ wparams.offset_ms = params.offset_t_ms;
+ wparams.duration_ms = params.duration_ms;
+
+ wparams.token_timestamps = params.output_wts || params.max_len > 0;
+ wparams.thold_pt = params.word_thold;
+ wparams.max_len = params.output_wts && params.max_len == 0 ? 60 : params.max_len;
+
+ wparams.speed_up = params.speed_up;
// this callback is called on each new segment
if (!wparams.print_realtime) {
#include "whisper.h"
-// third-party utilities
-// use your favorite implementations
-#define DR_WAV_IMPLEMENTATION
-#include "dr_wav.h"
-
#include <SDL.h>
#include <SDL_audio.h>
// command-line parameters
struct whisper_params {
- int32_t seed = -1; // RNG seed, not used currently
int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
int32_t step_ms = 3000;
int32_t length_ms = 10000;
int32_t max_tokens = 32;
int32_t audio_ctx = 0;
- bool speed_up = false;
- bool translate = false;
- bool no_context = true;
- bool print_special_tokens = false;
- bool no_timestamps = true;
+ bool speed_up = false;
+ bool translate = false;
+ bool no_context = true;
+ bool print_special = false;
+ bool no_timestamps = true;
std::string language = "en";
std::string model = "models/ggml-base.en.bin";
for (int i = 1; i < argc; i++) {
std::string arg = argv[i];
- if (arg == "-s" || arg == "--seed") {
- params.seed = std::stoi(argv[++i]);
- } else if (arg == "-t" || arg == "--threads") {
- params.n_threads = std::stoi(argv[++i]);
- } else if (arg == "--step") {
- params.step_ms = std::stoi(argv[++i]);
- } else if (arg == "--length") {
- params.length_ms = std::stoi(argv[++i]);
- } else if (arg == "-c" || arg == "--capture") {
- params.capture_id = std::stoi(argv[++i]);
- } else if (arg == "-mt" || arg == "--max_tokens") {
- params.max_tokens = std::stoi(argv[++i]);
- } else if (arg == "-ac" || arg == "--audio_ctx") {
- params.audio_ctx = std::stoi(argv[++i]);
- } else if (arg == "-su" || arg == "--speed-up") {
- params.speed_up = true;
- } else if (arg == "-tr" || arg == "--translate") {
- params.translate = true;
- } else if (arg == "-kc" || arg == "--keep-context") {
- params.no_context = false;
- } else if (arg == "-l" || arg == "--language") {
- params.language = argv[++i];
- if (whisper_lang_id(params.language.c_str()) == -1) {
- fprintf(stderr, "error: unknown language '%s'\n", params.language.c_str());
- whisper_print_usage(argc, argv, params);
- exit(0);
- }
- } else if (arg == "-ps" || arg == "--print_special") {
- params.print_special_tokens = true;
- } else if (arg == "-nt" || arg == "--no_timestamps") {
- params.no_timestamps = true;
- } else if (arg == "-m" || arg == "--model") {
- params.model = argv[++i];
- } else if (arg == "-f" || arg == "--file") {
- params.fname_out = argv[++i];
- } else if (arg == "-h" || arg == "--help") {
+ if (arg == "-h" || arg == "--help") {
whisper_print_usage(argc, argv, params);
exit(0);
- } else {
+ }
+ else if (arg == "-t" || arg == "--threads") { params.n_threads = std::stoi(argv[++i]); }
+ else if ( arg == "--step") { params.step_ms = std::stoi(argv[++i]); }
+ else if ( arg == "--length") { params.length_ms = std::stoi(argv[++i]); }
+ else if (arg == "-c" || arg == "--capture") { params.capture_id = std::stoi(argv[++i]); }
+ else if (arg == "-mt" || arg == "--max-tokens") { params.max_tokens = std::stoi(argv[++i]); }
+ else if (arg == "-ac" || arg == "--audio-ctx") { params.audio_ctx = std::stoi(argv[++i]); }
+ else if (arg == "-su" || arg == "--speed-up") { params.speed_up = true; }
+ else if (arg == "-tr" || arg == "--translate") { params.translate = true; }
+ else if (arg == "-kc" || arg == "--keep-context") { params.no_context = false; }
+ else if (arg == "-ps" || arg == "--print-special") { params.print_special = true; }
+ else if (arg == "-l" || arg == "--language") { params.language = argv[++i]; }
+ else if (arg == "-m" || arg == "--model") { params.model = argv[++i]; }
+ else if (arg == "-f" || arg == "--file") { params.fname_out = argv[++i]; }
+ else {
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
whisper_print_usage(argc, argv, params);
exit(0);
fprintf(stderr, "usage: %s [options]\n", argv[0]);
fprintf(stderr, "\n");
fprintf(stderr, "options:\n");
- fprintf(stderr, " -h, --help show this help message and exit\n");
- fprintf(stderr, " -s SEED, --seed SEED RNG seed (default: -1)\n");
- fprintf(stderr, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads);
- fprintf(stderr, " --step N audio step size in milliseconds (default: %d)\n", params.step_ms);
- fprintf(stderr, " --length N audio length in milliseconds (default: %d)\n", params.length_ms);
- fprintf(stderr, " -c ID, --capture ID capture device ID (default: -1)\n");
- fprintf(stderr, " -mt N, --max_tokens N maximum number of tokens per audio chunk (default: %d)\n", params.max_tokens);
- fprintf(stderr, " -ac N, --audio_ctx N audio context size (default: %d, 0 - all)\n", params.audio_ctx);
- fprintf(stderr, " -su, --speed-up speed up audio by factor of 2 (faster processing, reduced accuracy, default: %s)\n", params.speed_up ? "true" : "false");
- fprintf(stderr, " -tr, --translate translate from source language to english\n");
- fprintf(stderr, " -kc, --keep-context keep text context from earlier audio (default: false)\n");
- fprintf(stderr, " -ps, --print_special print special tokens\n");
- fprintf(stderr, " -nt, --no_timestamps do not print timestamps\n");
- fprintf(stderr, " -l LANG, --language LANG spoken language (default: %s)\n", params.language.c_str());
- fprintf(stderr, " -m FNAME, --model FNAME model path (default: %s)\n", params.model.c_str());
- fprintf(stderr, " -f FNAME, --file FNAME text output file name (default: no output to file)\n");
+ fprintf(stderr, " -h, --help [default] show this help message and exit\n");
+ fprintf(stderr, " -t N, --threads N [%-7d] number of threads to use during computation\n", params.n_threads);
+ fprintf(stderr, " --step N [%-7d] audio step size in milliseconds\n", params.step_ms);
+ fprintf(stderr, " --length N [%-7d] audio length in milliseconds\n", params.length_ms);
+ fprintf(stderr, " -c ID, --capture ID [%-7d] capture device ID\n", params.capture_id);
+ fprintf(stderr, " -mt N, --max-tokens N [%-7d] maximum number of tokens per audio chunk\n", params.max_tokens);
+ fprintf(stderr, " -ac N, --audio-ctx N [%-7d] audio context size (0 - all)\n", params.audio_ctx);
+ fprintf(stderr, " -su, --speed-up [%-7s] speed up audio by x2 (reduced accuracy)\n", params.speed_up ? "true" : "false");
+ fprintf(stderr, " -tr, --translate [%-7s] translate from source language to english\n", params.translate ? "true" : "false");
+ fprintf(stderr, " -kc, --keep-context [%-7s] keep context between audio chunks\n", params.no_context ? "false" : "true");
+ fprintf(stderr, " -ps, --print-special [%-7s] print special tokens\n", params.print_special ? "true" : "false");
+ fprintf(stderr, " -l LANG, --language LANG [%-7s] spoken language\n", params.language.c_str());
+ fprintf(stderr, " -m FNAME, --model FNAME [%-7s] model path\n", params.model.c_str());
+ fprintf(stderr, " -f FNAME, --file FNAME [%-7s] text output file name\n", params.fname_out.c_str());
fprintf(stderr, "\n");
}
return false;
}
- if (g_dev_id_in == 0) {
- SDL_LogSetPriority(SDL_LOG_CATEGORY_APPLICATION, SDL_LOG_PRIORITY_INFO);
+ SDL_LogSetPriority(SDL_LOG_CATEGORY_APPLICATION, SDL_LOG_PRIORITY_INFO);
- if (SDL_Init(SDL_INIT_AUDIO) < 0) {
- SDL_LogError(SDL_LOG_CATEGORY_APPLICATION, "Couldn't initialize SDL: %s\n", SDL_GetError());
- return (1);
- }
+ if (SDL_Init(SDL_INIT_AUDIO) < 0) {
+ SDL_LogError(SDL_LOG_CATEGORY_APPLICATION, "Couldn't initialize SDL: %s\n", SDL_GetError());
+ return (1);
+ }
- SDL_SetHintWithPriority(SDL_HINT_AUDIO_RESAMPLING_MODE, "medium", SDL_HINT_OVERRIDE);
+ SDL_SetHintWithPriority(SDL_HINT_AUDIO_RESAMPLING_MODE, "medium", SDL_HINT_OVERRIDE);
- {
- int nDevices = SDL_GetNumAudioDevices(SDL_TRUE);
- fprintf(stderr, "%s: found %d capture devices:\n", __func__, nDevices);
- for (int i = 0; i < nDevices; i++) {
- fprintf(stderr, "%s: - Capture device #%d: '%s'\n", __func__, i, SDL_GetAudioDeviceName(i, SDL_TRUE));
- }
+ {
+ int nDevices = SDL_GetNumAudioDevices(SDL_TRUE);
+ fprintf(stderr, "%s: found %d capture devices:\n", __func__, nDevices);
+ for (int i = 0; i < nDevices; i++) {
+ fprintf(stderr, "%s: - Capture device #%d: '%s'\n", __func__, i, SDL_GetAudioDeviceName(i, SDL_TRUE));
}
}
- if (g_dev_id_in == 0) {
- SDL_AudioSpec capture_spec_requested;
- SDL_AudioSpec capture_spec_obtained;
+ SDL_AudioSpec capture_spec_requested;
+ SDL_AudioSpec capture_spec_obtained;
- SDL_zero(capture_spec_requested);
- SDL_zero(capture_spec_obtained);
+ SDL_zero(capture_spec_requested);
+ SDL_zero(capture_spec_obtained);
- capture_spec_requested.freq = WHISPER_SAMPLE_RATE;
- capture_spec_requested.format = AUDIO_F32;
- capture_spec_requested.channels = 1;
- capture_spec_requested.samples = 1024;
+ capture_spec_requested.freq = WHISPER_SAMPLE_RATE;
+ capture_spec_requested.format = AUDIO_F32;
+ capture_spec_requested.channels = 1;
+ capture_spec_requested.samples = 1024;
- if (capture_id >= 0) {
- fprintf(stderr, "%s: attempt to open capture device %d : '%s' ...\n", __func__, capture_id, SDL_GetAudioDeviceName(capture_id, SDL_TRUE));
- g_dev_id_in = SDL_OpenAudioDevice(SDL_GetAudioDeviceName(capture_id, SDL_TRUE), SDL_TRUE, &capture_spec_requested, &capture_spec_obtained, 0);
- } else {
- fprintf(stderr, "%s: attempt to open default capture device ...\n", __func__);
- g_dev_id_in = SDL_OpenAudioDevice(nullptr, SDL_TRUE, &capture_spec_requested, &capture_spec_obtained, 0);
- }
- if (!g_dev_id_in) {
- fprintf(stderr, "%s: couldn't open an audio device for capture: %s!\n", __func__, SDL_GetError());
- g_dev_id_in = 0;
- } else {
- fprintf(stderr, "%s: obtained spec for input device (SDL Id = %d):\n", __func__, g_dev_id_in);
- fprintf(stderr, "%s: - sample rate: %d\n", __func__, capture_spec_obtained.freq);
- fprintf(stderr, "%s: - format: %d (required: %d)\n", __func__, capture_spec_obtained.format, capture_spec_requested.format);
- fprintf(stderr, "%s: - channels: %d (required: %d)\n", __func__, capture_spec_obtained.channels, capture_spec_requested.channels);
- fprintf(stderr, "%s: - samples per frame: %d\n", __func__, capture_spec_obtained.samples);
- }
+ if (capture_id >= 0) {
+ fprintf(stderr, "%s: attempt to open capture device %d : '%s' ...\n", __func__, capture_id, SDL_GetAudioDeviceName(capture_id, SDL_TRUE));
+ g_dev_id_in = SDL_OpenAudioDevice(SDL_GetAudioDeviceName(capture_id, SDL_TRUE), SDL_TRUE, &capture_spec_requested, &capture_spec_obtained, 0);
+ } else {
+ fprintf(stderr, "%s: attempt to open default capture device ...\n", __func__);
+ g_dev_id_in = SDL_OpenAudioDevice(nullptr, SDL_TRUE, &capture_spec_requested, &capture_spec_obtained, 0);
+ }
+ if (!g_dev_id_in) {
+ fprintf(stderr, "%s: couldn't open an audio device for capture: %s!\n", __func__, SDL_GetError());
+ g_dev_id_in = 0;
+ } else {
+ fprintf(stderr, "%s: obtained spec for input device (SDL Id = %d):\n", __func__, g_dev_id_in);
+ fprintf(stderr, "%s: - sample rate: %d\n", __func__, capture_spec_obtained.freq);
+ fprintf(stderr, "%s: - format: %d (required: %d)\n", __func__, capture_spec_obtained.format, capture_spec_requested.format);
+ fprintf(stderr, "%s: - channels: %d (required: %d)\n", __func__, capture_spec_obtained.channels, capture_spec_requested.channels);
+ fprintf(stderr, "%s: - samples per frame: %d\n", __func__, capture_spec_obtained.samples);
}
-
return true;
}
return 1;
}
- if (params.seed < 0) {
- params.seed = time(NULL);
- }
-
// init audio
if (!audio_sdl_init(params.capture_id)) {
return 1;
}
+ if (whisper_lang_id(params.language.c_str()) == -1) {
+ fprintf(stderr, "error: unknown language '%s'\n", params.language.c_str());
+ whisper_print_usage(argc, argv, params);
+ exit(0);
+ }
+
// whisper init
struct whisper_context * ctx = whisper_init(params.model.c_str());
// main audio loop
while (is_running) {
- // process SDL events:
- SDL_Event event;
- while (SDL_PollEvent(&event)) {
- switch (event.type) {
- case SDL_QUIT:
- {
- is_running = false;
- } break;
- default:
- break;
+ // handle Ctrl + C
+ {
+ SDL_Event event;
+ while (SDL_PollEvent(&event)) {
+ switch (event.type) {
+ case SDL_QUIT:
+ {
+ is_running = false;
+ } break;
+ default:
+ break;
+ }
+ }
+
+ if (!is_running) {
+ break;
}
}
{
whisper_full_params wparams = whisper_full_default_params(WHISPER_SAMPLING_GREEDY);
- wparams.print_progress = false;
- wparams.print_special_tokens = params.print_special_tokens;
- wparams.print_realtime = false;
- wparams.print_timestamps = !params.no_timestamps;
- wparams.translate = params.translate;
- wparams.no_context = true;
- wparams.single_segment = true;
- wparams.max_tokens = params.max_tokens;
- wparams.language = params.language.c_str();
- wparams.n_threads = params.n_threads;
-
- wparams.audio_ctx = params.audio_ctx;
- wparams.speed_up = params.speed_up;
-
- wparams.prompt_tokens = params.no_context ? nullptr : prompt_tokens.data();
- wparams.prompt_n_tokens = params.no_context ? 0 : prompt_tokens.size();
+ wparams.print_progress = false;
+ wparams.print_special = params.print_special;
+ wparams.print_realtime = false;
+ wparams.print_timestamps = !params.no_timestamps;
+ wparams.translate = params.translate;
+ wparams.no_context = true;
+ wparams.single_segment = true;
+ wparams.max_tokens = params.max_tokens;
+ wparams.language = params.language.c_str();
+ wparams.n_threads = params.n_threads;
+
+ wparams.audio_ctx = params.audio_ctx;
+ wparams.speed_up = params.speed_up;
+
+ wparams.prompt_tokens = params.no_context ? nullptr : prompt_tokens.data();
+ wparams.prompt_n_tokens = params.no_context ? 0 : prompt_tokens.size();
if (whisper_full(ctx, wparams, pcmf32.data(), pcmf32.size()) != 0) {
fprintf(stderr, "%s: failed to process audio\n", argv[0]);
struct whisper_full_params wparams = whisper_full_default_params(whisper_sampling_strategy::WHISPER_SAMPLING_GREEDY);
- wparams.n_threads = std::min(N_THREAD, (int) std::thread::hardware_concurrency());
- wparams.offset_ms = 0;
- wparams.translate = false;
- wparams.no_context = true;
- wparams.single_segment = true;
- wparams.print_realtime = false;
- wparams.print_progress = false;
- wparams.print_timestamps = true;
- wparams.print_special_tokens = false;
+ wparams.n_threads = std::min(N_THREAD, (int) std::thread::hardware_concurrency());
+ wparams.offset_ms = 0;
+ wparams.translate = false;
+ wparams.no_context = true;
+ wparams.single_segment = true;
+ wparams.print_realtime = false;
+ wparams.print_progress = false;
+ wparams.print_timestamps = true;
+ wparams.print_special = false;
wparams.max_tokens = 32;
wparams.audio_ctx = 768; // partial encoder context for better performance
// whisper context
auto & ctx = g_contexts[index];
- const int64_t step_samples = 2*WHISPER_SAMPLE_RATE;
- const int64_t step_ms = (step_samples*1000)/WHISPER_SAMPLE_RATE;
+ const int64_t step_samples = 2*WHISPER_SAMPLE_RATE;
const int64_t window_samples = 9*WHISPER_SAMPLE_RATE;
+ const int64_t step_ms = (step_samples*1000)/WHISPER_SAMPLE_RATE;
auto t_last = std::chrono::high_resolution_clock::now();
pcmf32 = std::vector<float>(g_pcmf32.end() - std::min((int64_t) g_pcmf32.size(), window_samples), g_pcmf32.end());
}
- // if energy in during last second is above threshold, then skip
+ // VAD: if energy in during last second is above threshold, then skip
{
float energy_all = 0.0f;
float energy_1s = 0.0f;
// run the model
struct whisper_full_params params = whisper_full_default_params(WHISPER_SAMPLING_GREEDY);
- params.print_realtime = true;
- params.print_progress = false;
- params.print_timestamps = true;
- params.print_special_tokens = false;
- params.translate = false;
- params.language = "en";
- params.n_threads = 4;
- params.offset_ms = 0;
+ params.print_realtime = true;
+ params.print_progress = false;
+ params.print_timestamps = true;
+ params.print_special = false;
+ params.translate = false;
+ params.language = "en";
+ params.n_threads = 4;
+ params.offset_ms = 0;
CFTimeInterval startTime = CACurrentMediaTime();
set(TARGET whisper.wasm)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/index-tmpl.html ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${TARGET}/index.html @ONLY)
+configure_file(${CMAKE_CURRENT_SOURCE_DIR}/../helpers.js ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${TARGET}/helpers.js @ONLY)
configure_file(${CMAKE_SOURCE_DIR}/bindings/javascript/whisper.js ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${TARGET}/whisper.js COPYONLY)
case WHISPER_SAMPLING_GREEDY:
{
result = {
- /*.strategy =*/ WHISPER_SAMPLING_GREEDY,
+ /*.strategy =*/ WHISPER_SAMPLING_GREEDY,
- /*.n_threads =*/ std::min(4, (int32_t) std::thread::hardware_concurrency()),
- /*.n_max_text_ctx =*/ 16384,
- /*.offset_ms =*/ 0,
- /*.duration_ms =*/ 0,
+ /*.n_threads =*/ std::min(4, (int32_t) std::thread::hardware_concurrency()),
+ /*.n_max_text_ctx =*/ 16384,
+ /*.offset_ms =*/ 0,
+ /*.duration_ms =*/ 0,
- /*.translate =*/ false,
- /*.no_context =*/ false,
- /*.single_segment =*/ false,
- /*.print_special_tokens =*/ false,
- /*.print_progress =*/ true,
- /*.print_realtime =*/ false,
- /*.print_timestamps =*/ true,
+ /*.translate =*/ false,
+ /*.no_context =*/ false,
+ /*.single_segment =*/ false,
+ /*.print_special =*/ false,
+ /*.print_progress =*/ true,
+ /*.print_realtime =*/ false,
+ /*.print_timestamps =*/ true,
- /*.token_timestamps =*/ false,
- /*.thold_pt =*/ 0.01f,
- /*.thold_ptsum =*/ 0.01f,
- /*.max_len =*/ 0,
- /*.max_tokens =*/ 0,
+ /*.token_timestamps =*/ false,
+ /*.thold_pt =*/ 0.01f,
+ /*.thold_ptsum =*/ 0.01f,
+ /*.max_len =*/ 0,
+ /*.max_tokens =*/ 0,
- /*.speed_up =*/ false,
- /*.audio_ctx =*/ 0,
+ /*.speed_up =*/ false,
+ /*.audio_ctx =*/ 0,
- /*.prompt_tokens =*/ nullptr,
- /*.prompt_n_tokens =*/ 0,
+ /*.prompt_tokens =*/ nullptr,
+ /*.prompt_n_tokens =*/ 0,
- /*.language =*/ "en",
+ /*.language =*/ "en",
- /*.greedy =*/ {
+ /*.greedy =*/ {
/*.n_past =*/ 0,
},
- /*.beam_search =*/ {
+ /*.beam_search =*/ {
/*.n_past =*/ -1,
/*.beam_width =*/ -1,
/*.n_best =*/ -1,
},
- /*.new_segment_callback =*/ nullptr,
+ /*.new_segment_callback =*/ nullptr,
/*.new_segment_callback_user_data =*/ nullptr,
};
} break;
case WHISPER_SAMPLING_BEAM_SEARCH:
{
result = {
- /*.strategy =*/ WHISPER_SAMPLING_BEAM_SEARCH,
+ /*.strategy =*/ WHISPER_SAMPLING_BEAM_SEARCH,
- /*.n_threads =*/ std::min(4, (int32_t) std::thread::hardware_concurrency()),
- /*.n_max_text_ctx =*/ 16384,
- /*.offset_ms =*/ 0,
- /*.duration_ms =*/ 0,
+ /*.n_threads =*/ std::min(4, (int32_t) std::thread::hardware_concurrency()),
+ /*.n_max_text_ctx =*/ 16384,
+ /*.offset_ms =*/ 0,
+ /*.duration_ms =*/ 0,
- /*.translate =*/ false,
- /*.no_context =*/ false,
- /*.single_segment =*/ false,
- /*.print_special_tokens =*/ false,
- /*.print_progress =*/ true,
- /*.print_realtime =*/ false,
- /*.print_timestamps =*/ true,
+ /*.translate =*/ false,
+ /*.no_context =*/ false,
+ /*.single_segment =*/ false,
+ /*.print_special =*/ false,
+ /*.print_progress =*/ true,
+ /*.print_realtime =*/ false,
+ /*.print_timestamps =*/ true,
- /*.token_timestamps =*/ false,
- /*.thold_pt =*/ 0.01f,
- /*.thold_ptsum =*/ 0.01f,
- /*.max_len =*/ 0,
- /*.max_tokens =*/ 0,
+ /*.token_timestamps =*/ false,
+ /*.thold_pt =*/ 0.01f,
+ /*.thold_ptsum =*/ 0.01f,
+ /*.max_len =*/ 0,
+ /*.max_tokens =*/ 0,
- /*.speed_up =*/ false,
- /*.audio_ctx =*/ 0,
+ /*.speed_up =*/ false,
+ /*.audio_ctx =*/ 0,
- /*.prompt_tokens =*/ nullptr,
- /*.prompt_n_tokens =*/ 0,
+ /*.prompt_tokens =*/ nullptr,
+ /*.prompt_n_tokens =*/ 0,
- /*.language =*/ "en",
+ /*.language =*/ "en",
- /*.greedy =*/ {
+ /*.greedy =*/ {
/*.n_past =*/ -1,
},
- /*.beam_search =*/ {
+ /*.beam_search =*/ {
/*.n_past =*/ 0,
/*.beam_width =*/ 10,
/*.n_best =*/ 5,
},
- /*.new_segment_callback =*/ nullptr,
+ /*.new_segment_callback =*/ nullptr,
/*.new_segment_callback_user_data =*/ nullptr,
};
} break;
// ctx->vocab.id_to_token[tokens_cur[i].id].c_str(), tokens_cur[i].p,
// ctx->vocab.id_to_token[tokens_cur[i].tid].c_str(), tokens_cur[i].pt);
- if (params.print_special_tokens == false && tokens_cur[i].id >= whisper_token_eot(ctx)) {
+ if (params.print_special == false && tokens_cur[i].id >= whisper_token_eot(ctx)) {
} else {
text += whisper_token_to_str(ctx, tokens_cur[i].id);
}
bool translate;
bool no_context;
bool single_segment; // force single segment output (useful for streaming)
- bool print_special_tokens;
+ bool print_special;
bool print_progress;
bool print_realtime;
bool print_timestamps;