From: InconsolableCellist Date: Wed, 29 Mar 2023 21:10:20 +0000 (-0600) Subject: talk-llama : fixing usage message for talk-llama (#687) X-Git-Tag: upstream/1.7.4~1522 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=5e6e2187a3ded25deb1b3d99eb3d6cccb968aa87;p=pkg%2Fggml%2Fsources%2Fwhisper.cpp talk-llama : fixing usage message for talk-llama (#687) "-ml" instead of "-mg" for specifying the llama file --- diff --git a/examples/talk-llama/talk-llama.cpp b/examples/talk-llama/talk-llama.cpp index af5309cb..de915a6b 100644 --- a/examples/talk-llama/talk-llama.cpp +++ b/examples/talk-llama/talk-llama.cpp @@ -120,7 +120,7 @@ void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & para fprintf(stderr, " -p NAME, --person NAME [%-7s] person name (for prompt selection)\n", params.person.c_str()); fprintf(stderr, " -l LANG, --language LANG [%-7s] spoken language\n", params.language.c_str()); fprintf(stderr, " -mw FILE, --model-whisper [%-7s] whisper model file\n", params.model_wsp.c_str()); - fprintf(stderr, " -mg FILE, --model-llama [%-7s] llama model file\n", params.model_llama.c_str()); + fprintf(stderr, " -ml FILE, --model-llama [%-7s] llama model file\n", params.model_llama.c_str()); fprintf(stderr, " --n-parts-llama N [%-7d] num parts in llama model file\n", params.n_parts_llama); fprintf(stderr, " -s FILE, --speak TEXT [%-7s] command for TTS\n", params.speak.c_str()); fprintf(stderr, " --prompt-file FNAME [%-7s] file with custom prompt to start dialog\n", "");