static std::vector<llama_token> * g_output_tokens;
static bool is_interacting = false;
+static bool file_exists(const std::string &path) {
+ std::ifstream f(path.c_str());
+ return f.good();
+}
+
+static bool file_is_empty(const std::string &path) {
+ std::ifstream f;
+ f.exceptions(std::ifstream::failbit | std::ifstream::badbit);
+ f.open(path.c_str(), std::ios::in | std::ios::binary | std::ios::ate);
+ return f.tellg() == 0;
+}
static void write_logfile(
const llama_context * ctx, const gpt_params & params, const llama_model * model,
if (!path_session.empty()) {
LOG_TEE("%s: attempting to load saved session from '%s'\n", __func__, path_session.c_str());
-
- // fopen to check for existing session
- FILE * fp = std::fopen(path_session.c_str(), "rb");
- if (fp != NULL) {
- std::fclose(fp);
-
+ if (!file_exists(path_session)) {
+ LOG_TEE("%s: session file does not exist, will create.\n", __func__);
+ } else if (file_is_empty(path_session)) {
+ LOG_TEE("%s: The session file is empty. A new session will be initialized.\n", __func__);
+ } else {
+ // The file exists and is not empty
session_tokens.resize(n_ctx);
size_t n_token_count_out = 0;
if (!llama_load_session_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.capacity(), &n_token_count_out)) {
}
session_tokens.resize(n_token_count_out);
llama_set_rng_seed(ctx, params.seed);
-
- LOG_TEE("%s: loaded a session with prompt size of %d tokens\n", __func__, (int) session_tokens.size());
- } else {
- LOG_TEE("%s: session file does not exist, will create\n", __func__);
+ LOG_TEE("%s: loaded a session with prompt size of %d tokens\n", __func__, (int)session_tokens.size());
}
}