From: Rand Xie Date: Sat, 1 Jul 2023 16:02:58 +0000 (+0800) Subject: llama : catch llama_load_session_file_internal exceptions (#2022) X-Git-Tag: gguf-v0.4.0~539 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=cb44dbc7de287b3d17772cfb1aa49d55e082ce5b;p=pkg%2Fggml%2Fsources%2Fllama.cpp llama : catch llama_load_session_file_internal exceptions (#2022) * convert checks in llama_load_session_file to throw and handle them * make llama_load_session_file_internal static * address feedbacks to avoid using exceptions --- diff --git a/llama.cpp b/llama.cpp index 049f73e4..3a7a0d5d 100644 --- a/llama.cpp +++ b/llama.cpp @@ -3219,7 +3219,7 @@ size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src) { return nread; } -bool llama_load_session_file(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) { +static bool llama_load_session_file_internal(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) { llama_file file(path_session, "rb"); // sanity checks @@ -3269,8 +3269,15 @@ bool llama_load_session_file(struct llama_context * ctx, const char * path_sessi llama_set_state_data(ctx, state_data.data()); } +} - return true; +bool llama_load_session_file(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) { + try { + return llama_load_session_file_internal(ctx, path_session, tokens_out, n_token_capacity, n_token_count_out); + } catch (const std::exception & err) { + fprintf(stderr, "error loading session file: %s\n", err.what()); + return false; + } } bool llama_save_session_file(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) {