* Extended talk-llama example to reject runs without required arguments.
Print warning and exit if models are not specified on the command line.
* Update examples/talk-llama/talk-llama.cpp
* Update examples/talk-llama/talk-llama.cpp
---------
Co-authored-by: Georgi Gerganov <redacted>
cparams.use_gpu = params.use_gpu;
struct whisper_context * ctx_wsp = whisper_init_from_file_with_params(params.model_wsp.c_str(), cparams);
+ if (!ctx_wsp) {
+ fprintf(stderr, "No whisper.cpp model specified. Please provide using -mw <modelfile>\n");
+ return 1;
+ }
// llama init
}
struct llama_model * model_llama = llama_load_model_from_file(params.model_llama.c_str(), lmparams);
+ if (!model_llama) {
+ fprintf(stderr, "No llama.cpp model specified. Please provide using -ml <modelfile>\n");
+ return 1;
+ }
llama_context_params lcparams = llama_context_default_params();