From: Salvatore Mesoraca Date: Tue, 27 Aug 2024 06:25:12 +0000 (+0200) Subject: tests : fix memory leaks (#936) X-Git-Tag: upstream/0.0.1642~439 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=2438d62cb9290b5b5dc6228dec76fe81cf64238e;p=pkg%2Fggml%2Fsources%2Fggml tests : fix memory leaks (#936) It is annoying to run the tests using the sanitizers because of all the uninteresting reports about the memory leaked by the tests themselves. Signed-off-by: Salvatore Mesoraca --- diff --git a/tests/test-conv-transpose-1d.cpp b/tests/test-conv-transpose-1d.cpp index 83c60b42..c2e517dd 100644 --- a/tests/test-conv-transpose-1d.cpp +++ b/tests/test-conv-transpose-1d.cpp @@ -421,9 +421,9 @@ int main(void) } } - float* conv1d_transpose_data_0 = new float[ggml_nelements(conv1d_transpose_res_0)]; + std::vector conv1d_transpose_data_0(ggml_nelements(conv1d_transpose_res_0)); - ggml_backend_tensor_get(conv1d_transpose_res_0, conv1d_transpose_data_0, 0, ggml_nbytes(conv1d_transpose_res_0)); + ggml_backend_tensor_get(conv1d_transpose_res_0, conv1d_transpose_data_0.data(), 0, ggml_nbytes(conv1d_transpose_res_0)); const int n_conv_transpose_1d_test_0 = 4; @@ -440,9 +440,9 @@ int main(void) } } - float* conv1d_transpose_data_1 = new float[ggml_nelements(conv1d_transpose_res_1)]; + std::vector conv1d_transpose_data_1(ggml_nelements(conv1d_transpose_res_1)); - ggml_backend_tensor_get(conv1d_transpose_res_1, conv1d_transpose_data_1, 0, ggml_nbytes(conv1d_transpose_res_1)); + ggml_backend_tensor_get(conv1d_transpose_res_1, conv1d_transpose_data_1.data(), 0, ggml_nbytes(conv1d_transpose_res_1)); @@ -462,9 +462,9 @@ int main(void) } } - float* conv1d_transpose_data_2 = new float[ggml_nelements(conv1d_transpose_res_2)]; + std::vector conv1d_transpose_data_2(ggml_nelements(conv1d_transpose_res_2)); - ggml_backend_tensor_get(conv1d_transpose_res_2, conv1d_transpose_data_2, 0, ggml_nbytes(conv1d_transpose_res_2)); + ggml_backend_tensor_get(conv1d_transpose_res_2, conv1d_transpose_data_2.data(), 0, ggml_nbytes(conv1d_transpose_res_2)); const int n_conv_transpose_1d_test_2 = 10; @@ -481,9 +481,9 @@ int main(void) } } - float* conv1d_transpose_data_3 = new float[ggml_nelements(conv1d_transpose_res_3)]; + std::vector conv1d_transpose_data_3(ggml_nelements(conv1d_transpose_res_3)); - ggml_backend_tensor_get(conv1d_transpose_res_3, conv1d_transpose_data_3, 0, ggml_nbytes(conv1d_transpose_res_3)); + ggml_backend_tensor_get(conv1d_transpose_res_3, conv1d_transpose_data_3.data(), 0, ggml_nbytes(conv1d_transpose_res_3)); const int n_conv_transpose_1d_test_3 = 14; @@ -501,9 +501,9 @@ int main(void) } } - float* conv1d_transpose_data_4 = new float[ggml_nelements(conv1d_transpose_res_4)]; + std::vector conv1d_transpose_data_4(ggml_nelements(conv1d_transpose_res_4)); - ggml_backend_tensor_get(conv1d_transpose_res_4, conv1d_transpose_data_4, 0, ggml_nbytes(conv1d_transpose_res_4)); + ggml_backend_tensor_get(conv1d_transpose_res_4, conv1d_transpose_data_4.data(), 0, ggml_nbytes(conv1d_transpose_res_4)); const int n_conv_transpose_1d_test_4 = 12; @@ -522,9 +522,9 @@ int main(void) } } - float* conv1d_transpose_data_5 = new float[ggml_nelements(conv1d_transpose_res_5)]; + std::vector conv1d_transpose_data_5(ggml_nelements(conv1d_transpose_res_5)); - ggml_backend_tensor_get(conv1d_transpose_res_5, conv1d_transpose_data_5, 0, ggml_nbytes(conv1d_transpose_res_5)); + ggml_backend_tensor_get(conv1d_transpose_res_5, conv1d_transpose_data_5.data(), 0, ggml_nbytes(conv1d_transpose_res_5)); const int n_conv_transpose_1d_test_5 = 18; @@ -543,9 +543,9 @@ int main(void) } } - float* conv1d_transpose_data_6 = new float[ggml_nelements(conv1d_transpose_res_6)]; + std::vector conv1d_transpose_data_6(ggml_nelements(conv1d_transpose_res_6)); - ggml_backend_tensor_get(conv1d_transpose_res_6, conv1d_transpose_data_6, 0, ggml_nbytes(conv1d_transpose_res_6)); + ggml_backend_tensor_get(conv1d_transpose_res_6, conv1d_transpose_data_6.data(), 0, ggml_nbytes(conv1d_transpose_res_6)); const int n_conv_transpose_1d_test_6 = 24; @@ -565,9 +565,9 @@ int main(void) } } - float* conv1d_transpose_data_7 = new float[ggml_nelements(conv1d_transpose_res_7)]; + std::vector conv1d_transpose_data_7(ggml_nelements(conv1d_transpose_res_7)); - ggml_backend_tensor_get(conv1d_transpose_res_7, conv1d_transpose_data_7, 0, ggml_nbytes(conv1d_transpose_res_7)); + ggml_backend_tensor_get(conv1d_transpose_res_7, conv1d_transpose_data_7.data(), 0, ggml_nbytes(conv1d_transpose_res_7)); const int n_conv_transpose_1d_test_7 = 32*1584; diff --git a/tests/test-conv1d.cpp b/tests/test-conv1d.cpp index 53481470..d38d2993 100644 --- a/tests/test-conv1d.cpp +++ b/tests/test-conv1d.cpp @@ -40,17 +40,17 @@ void load_model(test_model & model, bool use_gpu = false) { int IL = 8, N = 1; // Initialize adata - float * adata = new float[K * IC * OC]; + std::vector adata(K * IC * OC); for (int i = 0; i < K * IC * OC; i++) { adata[i] = 4.5f; } // Convert adata to fp16 format std::vector hadata(K * IC * OC); - ggml_fp32_to_fp16_row(adata, hadata.data(), K * IC * OC); + ggml_fp32_to_fp16_row(adata.data(), hadata.data(), K * IC * OC); // Initialize bdata - float * bdata = new float[IL * IC * N]; + std::vector bdata(IL * IC * N); for (int i = 0; i < IL * IC * N; i++) { bdata[i] = 2.5f; } @@ -129,9 +129,9 @@ void load_model(test_model & model, bool use_gpu = false) { || ggml_backend_is_metal(model.backend) #endif ) { - memcpy(model.b->data, bdata, ggml_nbytes(model.b)); + memcpy(model.b->data, bdata.data(), ggml_nbytes(model.b)); } else { - ggml_backend_tensor_set(model.b, bdata, 0, ggml_nbytes(model.b)); + ggml_backend_tensor_set(model.b, bdata.data(), 0, ggml_nbytes(model.b)); } } @@ -226,11 +226,11 @@ int main(void) } } - uint16_t* im2col_data = new uint16_t[ggml_nelements(im2col_res)]; - float* conv2d_data = new float[ggml_nelements(conv1d_res)]; + std::vector im2col_data(ggml_nelements(im2col_res)); + std::vector conv2d_data(ggml_nelements(conv1d_res)); - ggml_backend_tensor_get(im2col_res, im2col_data, 0, ggml_nbytes(im2col_res)); - ggml_backend_tensor_get(conv1d_res, conv2d_data, 0, ggml_nbytes(conv1d_res)); + ggml_backend_tensor_get(im2col_res, im2col_data.data(), 0, ggml_nbytes(im2col_res)); + ggml_backend_tensor_get(conv1d_res, conv2d_data.data(), 0, ggml_nbytes(conv1d_res)); const int n_conv1d_test = 80; const int n_im2col_test = 240; diff --git a/tests/test-conv2d.cpp b/tests/test-conv2d.cpp index 227f94c3..37942c50 100644 --- a/tests/test-conv2d.cpp +++ b/tests/test-conv2d.cpp @@ -40,17 +40,17 @@ void load_model(test_model & model, bool use_gpu = false) { int IW = 8, IH = 6, N = 1; // Initialize adata - float * adata = new float[KW * KH * IC * OC]; + std::vector adata(KW * KH * IC * OC); for (int i = 0; i < KW * KH * IC * OC; i++) { adata[i] = 2.5f; } // Convert adata to fp16 format std::vector hadata(KW * KH * IC * OC); - ggml_fp32_to_fp16_row(adata, hadata.data(), KW * KH * IC * OC); + ggml_fp32_to_fp16_row(adata.data(), hadata.data(), KW * KH * IC * OC); // Initialize bdata - float * bdata = new float[IW * IH * IC * N]; + std::vector bdata(IW * IH * IC * N); for (int i = 0; i < IW * IH * IC * N; i++) { bdata[i] = 1.5f; } @@ -129,9 +129,9 @@ void load_model(test_model & model, bool use_gpu = false) { || ggml_backend_is_metal(model.backend) #endif ) { - memcpy(model.b->data, bdata, ggml_nbytes(model.b)); + memcpy(model.b->data, bdata.data(), ggml_nbytes(model.b)); } else { - ggml_backend_tensor_set(model.b, bdata, 0, ggml_nbytes(model.b)); + ggml_backend_tensor_set(model.b, bdata.data(), 0, ggml_nbytes(model.b)); } } @@ -229,11 +229,11 @@ int main(void) } } - uint16_t* im2col_data = new uint16_t[ggml_nelements(im2col_res)]; - float* conv2d_data = new float[ggml_nelements(conv2d_res)]; + std::vector im2col_data(ggml_nelements(im2col_res)); + std::vector conv2d_data(ggml_nelements(conv2d_res)); - ggml_backend_tensor_get(im2col_res, im2col_data, 0, ggml_nbytes(im2col_res)); - ggml_backend_tensor_get(conv2d_res, conv2d_data, 0, ggml_nbytes(conv2d_res)); + ggml_backend_tensor_get(im2col_res, im2col_data.data(), 0, ggml_nbytes(im2col_res)); + ggml_backend_tensor_get(conv2d_res, conv2d_data.data(), 0, ggml_nbytes(conv2d_res)); const int n_conv2d_test = 480; const int n_im2col_test = 4320; diff --git a/tests/test-mul-mat.cpp b/tests/test-mul-mat.cpp index cd218a2c..f91e172a 100644 --- a/tests/test-mul-mat.cpp +++ b/tests/test-mul-mat.cpp @@ -232,8 +232,8 @@ static void gemm_f16_out_f32(int m, int n, int k, void perform_gemm_test(float* a, float* b, float* expected, int M, int N, int K) { printf("\nPerforming gemm_f16_out_f32 test:\n"); - float* gemm_out = new float[M * N]; - gemm_f16_out_f32(M, N, K, a, b, gemm_out, 0, 1); + std::vector gemm_out(M * N); + gemm_f16_out_f32(M, N, K, a, b, gemm_out.data(), 0, 1); for (int i = 0; i < M; i++) { for (int j = 0; j < N; j++) { @@ -318,9 +318,9 @@ int main(void) struct ggml_tensor * result = compute(model, allocr); - float* out_data = new float[ggml_nelements(result)]; + std::vector out_data(ggml_nelements(result)); - ggml_backend_tensor_get(result, out_data, 0, ggml_nbytes(result)); + ggml_backend_tensor_get(result, out_data.data(), 0, ggml_nbytes(result)); printf("\nPerforming ggml_mul_mat test:\n");