From: Uzo Nweke Date: Fri, 19 Jan 2024 18:20:50 +0000 (-0500) Subject: finetune : fix ggml_allocr lifetimes (tmp workaround) (#5033) X-Git-Tag: upstream/0.0.4488~2562 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=381ee195721d8e747ee31a60c0751822b3072f02;p=pkg%2Fggml%2Fsources%2Fllama.cpp finetune : fix ggml_allocr lifetimes (tmp workaround) (#5033) * Fix issue with alloc causing max_compute_size to be calculated * remove ggml_allocr_free as suggested in issue #4791 --- diff --git a/examples/train-text-from-scratch/train-text-from-scratch.cpp b/examples/train-text-from-scratch/train-text-from-scratch.cpp index 4a9a2340..eee9d4de 100644 --- a/examples/train-text-from-scratch/train-text-from-scratch.cpp +++ b/examples/train-text-from-scratch/train-text-from-scratch.cpp @@ -263,7 +263,6 @@ static void init_model(struct my_llama_model * model) { model->data.resize(size + tensor_alignment); alloc = ggml_allocr_new(model->data.data(), model->data.size(), tensor_alignment); alloc_model(alloc, model); - ggml_allocr_free(alloc); } static void randomize_model(struct my_llama_model * model, int seed, float mean, float std, float min, float max) { @@ -1102,7 +1101,6 @@ int main(int argc, char ** argv) { alloc = ggml_allocr_new(mem_input_data.data(), mem_input_data.size(), tensor_alignment); ggml_allocr_alloc(alloc, tokens_input); ggml_allocr_alloc(alloc, target_probs); - ggml_allocr_free(alloc); // context for compute tensors without their data const size_t estimated_compute_size_wo_data = ( @@ -1149,7 +1147,6 @@ int main(int argc, char ** argv) { best_compute_size = max_compute_size; best_order = gf->order; } - ggml_allocr_free(alloc); ggml_free(ctx_compute); } size_t max_compute_size = best_compute_size; @@ -1177,7 +1174,6 @@ int main(int argc, char ** argv) { params.common.use_flash, params.common.use_checkpointing ); - ggml_allocr_free(alloc); std::vector train_tokens; std::vector train_samples_begin;