From: petterreinholdtsen Date: Tue, 14 May 2024 18:32:41 +0000 (+0200) Subject: talk-llama : reject runs without required arguments (#2153) X-Git-Tag: upstream/1.7.4~750 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=9d5771ae43d7fc7cca9d31dd924b13a29144e476;p=pkg%2Fggml%2Fsources%2Fwhisper.cpp talk-llama : reject runs without required arguments (#2153) * Extended talk-llama example to reject runs without required arguments. Print warning and exit if models are not specified on the command line. * Update examples/talk-llama/talk-llama.cpp * Update examples/talk-llama/talk-llama.cpp --------- Co-authored-by: Georgi Gerganov --- diff --git a/examples/talk-llama/talk-llama.cpp b/examples/talk-llama/talk-llama.cpp index bb8c26d5..838d6f56 100644 --- a/examples/talk-llama/talk-llama.cpp +++ b/examples/talk-llama/talk-llama.cpp @@ -288,6 +288,10 @@ int main(int argc, char ** argv) { cparams.use_gpu = params.use_gpu; struct whisper_context * ctx_wsp = whisper_init_from_file_with_params(params.model_wsp.c_str(), cparams); + if (!ctx_wsp) { + fprintf(stderr, "No whisper.cpp model specified. Please provide using -mw \n"); + return 1; + } // llama init @@ -301,6 +305,10 @@ int main(int argc, char ** argv) { } struct llama_model * model_llama = llama_load_model_from_file(params.model_llama.c_str(), lmparams); + if (!model_llama) { + fprintf(stderr, "No llama.cpp model specified. Please provide using -ml \n"); + return 1; + } llama_context_params lcparams = llama_context_default_params();