}
}
- float* conv1d_transpose_data_0 = new float[ggml_nelements(conv1d_transpose_res_0)];
+ std::vector<float> conv1d_transpose_data_0(ggml_nelements(conv1d_transpose_res_0));
- ggml_backend_tensor_get(conv1d_transpose_res_0, conv1d_transpose_data_0, 0, ggml_nbytes(conv1d_transpose_res_0));
+ ggml_backend_tensor_get(conv1d_transpose_res_0, conv1d_transpose_data_0.data(), 0, ggml_nbytes(conv1d_transpose_res_0));
const int n_conv_transpose_1d_test_0 = 4;
}
}
- float* conv1d_transpose_data_1 = new float[ggml_nelements(conv1d_transpose_res_1)];
+ std::vector<float> conv1d_transpose_data_1(ggml_nelements(conv1d_transpose_res_1));
- ggml_backend_tensor_get(conv1d_transpose_res_1, conv1d_transpose_data_1, 0, ggml_nbytes(conv1d_transpose_res_1));
+ ggml_backend_tensor_get(conv1d_transpose_res_1, conv1d_transpose_data_1.data(), 0, ggml_nbytes(conv1d_transpose_res_1));
}
}
- float* conv1d_transpose_data_2 = new float[ggml_nelements(conv1d_transpose_res_2)];
+ std::vector<float> conv1d_transpose_data_2(ggml_nelements(conv1d_transpose_res_2));
- ggml_backend_tensor_get(conv1d_transpose_res_2, conv1d_transpose_data_2, 0, ggml_nbytes(conv1d_transpose_res_2));
+ ggml_backend_tensor_get(conv1d_transpose_res_2, conv1d_transpose_data_2.data(), 0, ggml_nbytes(conv1d_transpose_res_2));
const int n_conv_transpose_1d_test_2 = 10;
}
}
- float* conv1d_transpose_data_3 = new float[ggml_nelements(conv1d_transpose_res_3)];
+ std::vector<float> conv1d_transpose_data_3(ggml_nelements(conv1d_transpose_res_3));
- ggml_backend_tensor_get(conv1d_transpose_res_3, conv1d_transpose_data_3, 0, ggml_nbytes(conv1d_transpose_res_3));
+ ggml_backend_tensor_get(conv1d_transpose_res_3, conv1d_transpose_data_3.data(), 0, ggml_nbytes(conv1d_transpose_res_3));
const int n_conv_transpose_1d_test_3 = 14;
}
}
- float* conv1d_transpose_data_4 = new float[ggml_nelements(conv1d_transpose_res_4)];
+ std::vector<float> conv1d_transpose_data_4(ggml_nelements(conv1d_transpose_res_4));
- ggml_backend_tensor_get(conv1d_transpose_res_4, conv1d_transpose_data_4, 0, ggml_nbytes(conv1d_transpose_res_4));
+ ggml_backend_tensor_get(conv1d_transpose_res_4, conv1d_transpose_data_4.data(), 0, ggml_nbytes(conv1d_transpose_res_4));
const int n_conv_transpose_1d_test_4 = 12;
}
}
- float* conv1d_transpose_data_5 = new float[ggml_nelements(conv1d_transpose_res_5)];
+ std::vector<float> conv1d_transpose_data_5(ggml_nelements(conv1d_transpose_res_5));
- ggml_backend_tensor_get(conv1d_transpose_res_5, conv1d_transpose_data_5, 0, ggml_nbytes(conv1d_transpose_res_5));
+ ggml_backend_tensor_get(conv1d_transpose_res_5, conv1d_transpose_data_5.data(), 0, ggml_nbytes(conv1d_transpose_res_5));
const int n_conv_transpose_1d_test_5 = 18;
}
}
- float* conv1d_transpose_data_6 = new float[ggml_nelements(conv1d_transpose_res_6)];
+ std::vector<float> conv1d_transpose_data_6(ggml_nelements(conv1d_transpose_res_6));
- ggml_backend_tensor_get(conv1d_transpose_res_6, conv1d_transpose_data_6, 0, ggml_nbytes(conv1d_transpose_res_6));
+ ggml_backend_tensor_get(conv1d_transpose_res_6, conv1d_transpose_data_6.data(), 0, ggml_nbytes(conv1d_transpose_res_6));
const int n_conv_transpose_1d_test_6 = 24;
}
}
- float* conv1d_transpose_data_7 = new float[ggml_nelements(conv1d_transpose_res_7)];
+ std::vector<float> conv1d_transpose_data_7(ggml_nelements(conv1d_transpose_res_7));
- ggml_backend_tensor_get(conv1d_transpose_res_7, conv1d_transpose_data_7, 0, ggml_nbytes(conv1d_transpose_res_7));
+ ggml_backend_tensor_get(conv1d_transpose_res_7, conv1d_transpose_data_7.data(), 0, ggml_nbytes(conv1d_transpose_res_7));
const int n_conv_transpose_1d_test_7 = 32*1584;
int IL = 8, N = 1;
// Initialize adata
- float * adata = new float[K * IC * OC];
+ std::vector<float> adata(K * IC * OC);
for (int i = 0; i < K * IC * OC; i++) {
adata[i] = 4.5f;
}
// Convert adata to fp16 format
std::vector<ggml_fp16_t> hadata(K * IC * OC);
- ggml_fp32_to_fp16_row(adata, hadata.data(), K * IC * OC);
+ ggml_fp32_to_fp16_row(adata.data(), hadata.data(), K * IC * OC);
// Initialize bdata
- float * bdata = new float[IL * IC * N];
+ std::vector<float> bdata(IL * IC * N);
for (int i = 0; i < IL * IC * N; i++) {
bdata[i] = 2.5f;
}
|| ggml_backend_is_metal(model.backend)
#endif
) {
- memcpy(model.b->data, bdata, ggml_nbytes(model.b));
+ memcpy(model.b->data, bdata.data(), ggml_nbytes(model.b));
} else {
- ggml_backend_tensor_set(model.b, bdata, 0, ggml_nbytes(model.b));
+ ggml_backend_tensor_set(model.b, bdata.data(), 0, ggml_nbytes(model.b));
}
}
}
}
- uint16_t* im2col_data = new uint16_t[ggml_nelements(im2col_res)];
- float* conv2d_data = new float[ggml_nelements(conv1d_res)];
+ std::vector<uint16_t> im2col_data(ggml_nelements(im2col_res));
+ std::vector<float> conv2d_data(ggml_nelements(conv1d_res));
- ggml_backend_tensor_get(im2col_res, im2col_data, 0, ggml_nbytes(im2col_res));
- ggml_backend_tensor_get(conv1d_res, conv2d_data, 0, ggml_nbytes(conv1d_res));
+ ggml_backend_tensor_get(im2col_res, im2col_data.data(), 0, ggml_nbytes(im2col_res));
+ ggml_backend_tensor_get(conv1d_res, conv2d_data.data(), 0, ggml_nbytes(conv1d_res));
const int n_conv1d_test = 80;
const int n_im2col_test = 240;
int IW = 8, IH = 6, N = 1;
// Initialize adata
- float * adata = new float[KW * KH * IC * OC];
+ std::vector<float> adata(KW * KH * IC * OC);
for (int i = 0; i < KW * KH * IC * OC; i++) {
adata[i] = 2.5f;
}
// Convert adata to fp16 format
std::vector<ggml_fp16_t> hadata(KW * KH * IC * OC);
- ggml_fp32_to_fp16_row(adata, hadata.data(), KW * KH * IC * OC);
+ ggml_fp32_to_fp16_row(adata.data(), hadata.data(), KW * KH * IC * OC);
// Initialize bdata
- float * bdata = new float[IW * IH * IC * N];
+ std::vector<float> bdata(IW * IH * IC * N);
for (int i = 0; i < IW * IH * IC * N; i++) {
bdata[i] = 1.5f;
}
|| ggml_backend_is_metal(model.backend)
#endif
) {
- memcpy(model.b->data, bdata, ggml_nbytes(model.b));
+ memcpy(model.b->data, bdata.data(), ggml_nbytes(model.b));
} else {
- ggml_backend_tensor_set(model.b, bdata, 0, ggml_nbytes(model.b));
+ ggml_backend_tensor_set(model.b, bdata.data(), 0, ggml_nbytes(model.b));
}
}
}
}
- uint16_t* im2col_data = new uint16_t[ggml_nelements(im2col_res)];
- float* conv2d_data = new float[ggml_nelements(conv2d_res)];
+ std::vector<uint16_t> im2col_data(ggml_nelements(im2col_res));
+ std::vector<float> conv2d_data(ggml_nelements(conv2d_res));
- ggml_backend_tensor_get(im2col_res, im2col_data, 0, ggml_nbytes(im2col_res));
- ggml_backend_tensor_get(conv2d_res, conv2d_data, 0, ggml_nbytes(conv2d_res));
+ ggml_backend_tensor_get(im2col_res, im2col_data.data(), 0, ggml_nbytes(im2col_res));
+ ggml_backend_tensor_get(conv2d_res, conv2d_data.data(), 0, ggml_nbytes(conv2d_res));
const int n_conv2d_test = 480;
const int n_im2col_test = 4320;
void perform_gemm_test(float* a, float* b, float* expected, int M, int N, int K) {
printf("\nPerforming gemm_f16_out_f32 test:\n");
- float* gemm_out = new float[M * N];
- gemm_f16_out_f32(M, N, K, a, b, gemm_out, 0, 1);
+ std::vector<float> gemm_out(M * N);
+ gemm_f16_out_f32(M, N, K, a, b, gemm_out.data(), 0, 1);
for (int i = 0; i < M; i++) {
for (int j = 0; j < N; j++) {
struct ggml_tensor * result = compute(model, allocr);
- float* out_data = new float[ggml_nelements(result)];
+ std::vector<float> out_data(ggml_nelements(result));
- ggml_backend_tensor_get(result, out_data, 0, ggml_nbytes(result));
+ ggml_backend_tensor_get(result, out_data.data(), 0, ggml_nbytes(result));
printf("\nPerforming ggml_mul_mat test:\n");