From: Georgi Gerganov Date: Fri, 24 Mar 2023 15:21:01 +0000 (+0200) Subject: Properly free llama_context on failure X-Git-Tag: gguf-v0.4.0~1127 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=afd220d9c665e4c19107120ace2f0cb742e28aa1;p=pkg%2Fggml%2Fsources%2Fllama.cpp Properly free llama_context on failure --- diff --git a/llama.cpp b/llama.cpp index 5d56cc90..cdb86282 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1432,16 +1432,16 @@ struct llama_context * llama_init_from_file( if (!llama_model_load(path_model, *ctx, params.n_ctx, params.n_parts, type_memory, params.vocab_only)) { fprintf(stderr, "%s: failed to load model\n", __func__); - delete ctx; + llama_free(ctx); return nullptr; } - + if (params.use_mlock) { char *err; if (!ggml_mlock(ctx->model.ctx, &err)) { fprintf(stderr, "%s\n", err); free(err); - delete ctx; + llama_free(ctx); return nullptr; } } @@ -1464,7 +1464,9 @@ struct llama_context * llama_init_from_file( } void llama_free(struct llama_context * ctx) { - ggml_free(ctx->model.ctx); + if (ctx->model.ctx) { + ggml_free(ctx->model.ctx); + } delete ctx; }