From: Georgi Gerganov Date: Wed, 5 Jul 2023 17:38:20 +0000 (+0300) Subject: tests : sync from llama.cpp and disable some obsolete tests X-Git-Tag: upstream/0.0.1642~1347 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=d8fbf15c60a2e7136b9e3eea11a7ebb51ee8ab07;p=pkg%2Fggml%2Fsources%2Fggml tests : sync from llama.cpp and disable some obsolete tests --- diff --git a/scripts/sync-llama.sh b/scripts/sync-llama.sh index 9bccd91d..0c6c424e 100755 --- a/scripts/sync-llama.sh +++ b/scripts/sync-llama.sh @@ -9,3 +9,8 @@ cp -rpv ../llama.cpp/ggml-metal.h src/ggml-metal.h cp -rpv ../llama.cpp/ggml-metal.m src/ggml-metal.m cp -rpv ../llama.cpp/ggml-metal.metal src/ggml-metal.metal cp -rpv ../llama.cpp/ggml.h include/ggml/ggml.h + +cp -rpv ../llama.cpp/tests/test-opt.c tests/test-opt.c +cp -rpv ../llama.cpp/tests/test-grad0.c tests/test-grad0.c +cp -rpv ../llama.cpp/tests/test-quantize-fns.cpp tests/test-quantize-fns.cpp +cp -rpv ../llama.cpp/tests/test-quantize-perf.cpp tests/test-quantize-perf.cpp diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 07cac88c..89c2aa6d 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -132,7 +132,6 @@ endif() set(TEST_TARGET test-vec0) add_executable(${TEST_TARGET} ${TEST_TARGET}.c) target_link_libraries(${TEST_TARGET} PRIVATE ggml) -add_test(NAME ${TEST_TARGET} COMMAND $) # # test-vec1 (x86) @@ -140,7 +139,6 @@ if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "x86") set(TEST_TARGET test-vec1) add_executable(${TEST_TARGET} ${TEST_TARGET}.c) target_link_libraries(${TEST_TARGET} PRIVATE ggml) - add_test(NAME ${TEST_TARGET} COMMAND $) #set_target_properties(${TEST_TARGET} PROPERTIES COMPILE_FLAGS "-mavx -mavx2 -mfma -mf16c") set_target_properties(${TEST_TARGET} PROPERTIES COMPILE_FLAGS ${GGML_C_FLAGS}) endif() @@ -151,7 +149,6 @@ if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm") set(TEST_TARGET test-vec2) add_executable(${TEST_TARGET} ${TEST_TARGET}.c) target_link_libraries(${TEST_TARGET} PRIVATE ggml) - add_test(NAME ${TEST_TARGET} COMMAND $) endif() # @@ -170,6 +167,22 @@ add_executable(${TEST_TARGET} ${TEST_TARGET}.c) target_link_libraries(${TEST_TARGET} PRIVATE ggml) add_test(NAME ${TEST_TARGET} COMMAND $) +# +# test-quantize-fns + +set(TEST_TARGET test-quantize-fns) +add_executable(${TEST_TARGET} ${TEST_TARGET}.cpp) +target_link_libraries(${TEST_TARGET} PRIVATE ggml) +add_test(NAME ${TEST_TARGET} COMMAND $) + +# +# test-quantize-perf + +set(TEST_TARGET test-quantize-perf) +add_executable(${TEST_TARGET} ${TEST_TARGET}.cpp) +target_link_libraries(${TEST_TARGET} PRIVATE ggml) +add_test(NAME ${TEST_TARGET} COMMAND $) + # # test-mul-mat0 @@ -187,7 +200,6 @@ if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm" AND NOT GGML_NO_ACCELERATE) add_executable(${TEST_TARGET} ${TEST_TARGET}.c) target_link_libraries(${TEST_TARGET} PRIVATE ggml ${GGML_EXTRA_LIBS}) target_compile_options(${TEST_TARGET} PRIVATE ${GGML_EXTRA_FLAGS}) - add_test(NAME ${TEST_TARGET} COMMAND $) endif() # @@ -252,12 +264,10 @@ if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm" AND NOT GGML_NO_ACCELERATE) add_executable(${TEST_TARGET} ${TEST_TARGET}.c) target_link_libraries(${TEST_TARGET} PRIVATE ggml ${GGML_EXTRA_LIBS}) target_compile_options(${TEST_TARGET} PRIVATE ${GGML_EXTRA_FLAGS}) - add_test(NAME ${TEST_TARGET} COMMAND $) elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "x86" AND GGML_OPENBLAS) set(TEST_TARGET test-svd0) add_executable(${TEST_TARGET} ${TEST_TARGET}.c) target_link_libraries(${TEST_TARGET} PRIVATE ggml ${GGML_EXTRA_LIBS}) target_compile_options(${TEST_TARGET} PRIVATE ${GGML_EXTRA_FLAGS}) - add_test(NAME ${TEST_TARGET} COMMAND $) endif() diff --git a/tests/test-quantize-fns.cpp b/tests/test-quantize-fns.cpp new file mode 100644 index 00000000..8d3c162d --- /dev/null +++ b/tests/test-quantize-fns.cpp @@ -0,0 +1,164 @@ +// Unit tests for quantization specific functions - quantize, dequantize and dot product + +#include "ggml.h" + +#undef NDEBUG +#include +#include +#include +#include +#include + +#if defined(_MSC_VER) +#pragma warning(disable: 4244 4267) // possible loss of data +#endif + +const float MAX_QUANTIZATION_REFERENCE_ERROR = 0.0001f; +const float MAX_QUANTIZATION_TOTAL_ERROR = 0.002f; +const float MAX_QUANTIZATION_TOTAL_ERROR_2BITS = 0.0075f; +const float MAX_QUANTIZATION_TOTAL_ERROR_3BITS = 0.0040f; +const float MAX_DOT_PRODUCT_ERROR = 0.02f; + +const char* RESULT_STR[] = {"ok", "FAILED"}; + + +// Generate synthetic data +void generate_data(float offset, size_t n, float * dst) { + for (size_t i = 0; i < n; i++) { + dst[i] = 0.1 + 2*cosf(i + offset); + } +} + +// Calculate RMSE between two float arrays +float array_rmse(const float * a1, const float * a2, size_t n) { + double sum = 0; + for (size_t i = 0; i < n; i++) { + double diff = a1[i] - a2[i]; + sum += diff * diff; + } + return sqrtf(sum) / n; +} + +// Total quantization error on test data +float total_quantization_error(ggml_type_traits_t & qfns, size_t test_size, const float * test_data) { + std::vector tmp_q(2*test_size); + std::vector tmp_out(test_size); + + qfns.from_float(test_data, tmp_q.data(), test_size); + qfns.to_float(tmp_q.data(), tmp_out.data(), test_size); + return array_rmse(test_data, tmp_out.data(), test_size); +} + +// Total quantization error on test data +float reference_quantization_error(ggml_type_traits_t & qfns, size_t test_size, const float * test_data) { + std::vector tmp_q(2*test_size); + std::vector tmp_out(test_size); + std::vector tmp_out_ref(test_size); + + qfns.from_float(test_data, tmp_q.data(), test_size); + qfns.to_float(tmp_q.data(), tmp_out.data(), test_size); + + qfns.from_float_reference(test_data, tmp_q.data(), test_size); + qfns.to_float(tmp_q.data(), tmp_out_ref.data(), test_size); + + return array_rmse(tmp_out.data(), tmp_out_ref.data(), test_size); +} + +float dot_product(const float * a1, const float * a2, size_t test_size) { + double sum = 0; + for (size_t i = 0; i < test_size; i++) { + sum += a1[i] * a2[i]; + } + return sum; +} + +// Total dot product error +float dot_product_error(ggml_type_traits_t & qfns, size_t test_size, const float * test_data1, const float *test_data2) { + std::vector tmp_q1(2*test_size); + std::vector tmp_q2(2*test_size); + + auto vdot = ggml_internal_get_type_traits(qfns.vec_dot_type); + + qfns.from_float(test_data1, tmp_q1.data(), test_size); + vdot.from_float(test_data2, tmp_q2.data(), test_size); + + float result = INFINITY; + qfns.vec_dot(test_size, &result, tmp_q1.data(), tmp_q2.data()); + + const float dot_ref = dot_product(test_data1, test_data2, test_size); + + return fabsf(result - dot_ref) / test_size; +} + +int main(int argc, char * argv[]) { + bool verbose = false; + const size_t test_size = 32 * 128; + + std::string arg; + for (int i = 1; i < argc; i++) { + arg = argv[i]; + + if (arg == "-v") { + verbose = true; + } else { + fprintf(stderr, "error: unknown argument: %s\n", arg.c_str()); + return 1; + } + } + + std::vector test_data(test_size); + std::vector test_data2(test_size); + + generate_data(0.0, test_data.size(), test_data.data()); + generate_data(1.0, test_data2.size(), test_data2.data()); + + // Initialize GGML, ensures float conversion tables are initialized + struct ggml_init_params ggml_params = { + /* .mem_size = */ 1*1024, + /* .mem_buffer = */ NULL, + /* .no_alloc = */ true, + }; + struct ggml_context * ctx = ggml_init(ggml_params); + + int num_failed = 0; + bool failed = false; + + for (int i = 0; i < GGML_TYPE_COUNT; i++) { + ggml_type type = (ggml_type) i; + ggml_type_traits_t qfns = ggml_internal_get_type_traits(type); + + if (qfns.from_float && qfns.to_float) { + const float total_error = total_quantization_error(qfns, test_size, test_data.data()); + const float max_quantization_error = + type == GGML_TYPE_Q2_K ? MAX_QUANTIZATION_TOTAL_ERROR_2BITS : + type == GGML_TYPE_Q3_K ? MAX_QUANTIZATION_TOTAL_ERROR_3BITS : MAX_QUANTIZATION_TOTAL_ERROR; + failed = !(total_error < max_quantization_error); + num_failed += failed; + if (failed || verbose) { + printf("%5s absolute quantization error: %s (%f)\n", ggml_type_name(type), RESULT_STR[failed], total_error); + } + + const float reference_error = reference_quantization_error(qfns, test_size, test_data.data()); + failed = !(reference_error < MAX_QUANTIZATION_REFERENCE_ERROR); + num_failed += failed; + if (failed || verbose) { + printf("%5s reference implementation error: %s (%f)\n", ggml_type_name(type), RESULT_STR[failed], reference_error); + } + + const float vec_dot_error = dot_product_error(qfns, test_size, test_data.data(), test_data2.data()); + failed = !(vec_dot_error < MAX_DOT_PRODUCT_ERROR); + num_failed += failed; + if (failed || verbose) { + printf("%5s dot product error: %s (%f)\n", ggml_type_name(type), RESULT_STR[failed], vec_dot_error); + } + } + } + + if (num_failed || verbose) { + printf("%d tests failed\n", num_failed); + } + + ggml_free(ctx); + + return num_failed > 0; +} diff --git a/tests/test-quantize-perf.cpp b/tests/test-quantize-perf.cpp new file mode 100644 index 00000000..0bb9537f --- /dev/null +++ b/tests/test-quantize-perf.cpp @@ -0,0 +1,362 @@ +// Benchmark quantization specific functions on synthetic data + +#include "ggml.h" + +#undef NDEBUG +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(_MSC_VER) +#pragma warning(disable: 4244 4267) // possible loss of data +#endif + +#define MAX_ALIGNMENT 64 +#define QK 32 +#define WARMUP 5 +#define ITERATIONS 10 +#define MAX_ITERATIONS 100000000 + +#define L1_SIZE 32*128 +#define L2_SIZE 32*2048 +#define L3_SIZE 32*20480 +#define MEM_SIZE 32*2048000 + +struct quantize_perf_params { + std::vector include_types; + std::vector test_sizes; + size_t alignment_offset = 0; + bool op_quantize_row_q_reference = false; + bool op_quantize_row_q = false; + bool op_dequantize_row_q = false; + bool op_quantize_row_q_dot = false; + bool op_vec_dot_q = false; + int64_t iterations = ITERATIONS; +}; + +#if defined(__x86_64__) || defined(__i386__) + +#include +inline int64_t cpu_cycles() { +// Rough way to detect new-ish CPUs +#ifdef __POPCNT__ + unsigned int dummy; + return __rdtscp(&dummy); +#else + return __rdtsc(); +#endif +} + +#else + +#define cpu_cycles() 0 + +#endif + + +// Generate synthetic data +void generate_data(float offset, size_t n, float * dst) { + for (size_t i = 0; i < n; i++) { + dst[i] = 0.1 + 2*cosf(i + offset); + } +} + +float gigabytes_per_second(size_t bytes, int64_t usecs) { + return bytes / (float) usecs * 1000000 / (1024*1024*1024); +} + +void * align_with_offset(void * ptr, int offset) { + size_t dummy_size = MAX_ALIGNMENT * 4; + return (char *) std::align(MAX_ALIGNMENT, MAX_ALIGNMENT, ptr, dummy_size) + offset; +} + +void benchmark_function(size_t size, size_t q_size, int64_t iterations, std::function function) { + int64_t min_time_us = INT64_MAX; + int64_t total_time_us = 0; + int64_t min_time_cycles = INT64_MAX; + int64_t total_time_cycles = 0; + + for (int i = 0; i < WARMUP; i++) { + function(); + } + + + for (int i = 0; i < iterations; i++) { + const int64_t start_time = ggml_time_us(); + const int64_t start_cycles = cpu_cycles(); + + function(); + + const int64_t end_cycles = cpu_cycles(); + const int64_t end_time = ggml_time_us(); + + total_time_cycles += end_cycles - start_cycles; + min_time_cycles = std::min(min_time_cycles, end_cycles - start_cycles); + total_time_us += end_time - start_time; + min_time_us = std::min(min_time_us, end_time - start_time); + } + + printf(" min cycles/%d vals : %9.2f\n", QK, QK * min_time_cycles / (float) size); + printf(" avg cycles/%d vals : %9.2f\n", QK, QK * total_time_cycles / (float) (size * iterations)); + printf(" float32 throughput : %9.2f GB/s\n", gigabytes_per_second(4 * size * iterations, total_time_us)); + printf(" quantized throughput : %9.2f GB/s\n", gigabytes_per_second(q_size * iterations, total_time_us)); +} + +void usage(char * argv[]) { + printf("Benchmark quantization specific functions on synthetic data\n"); + printf("\n"); + printf("usage: %s [options]\n", argv[0]); + printf("\n"); + printf("options: (default)\n"); + printf(" -h, --help show this help message and exit\n"); + printf(" --size SIZE set test size, divisible by 32 (L1_SIZE:%d)\n", L1_SIZE); + printf(" -3 use size as L1, L2, L3 sizes (L1:%d L2:%d L3:%d)\n", L1_SIZE, L2_SIZE, L3_SIZE); + printf(" -4 use size as L1, L2, L3, MEM sizes (L1:%d L2:%d L3:%d MEM:%d)\n", L1_SIZE, L2_SIZE, L3_SIZE, MEM_SIZE); + printf(" --op OP set test opration as quantize_row_q_reference, quantize_row_q, dequantize_row_q,\n"); + printf(" quantize_row_q_dot, vec_dot_q (all)\n"); + printf(" --type TYPE set test type as"); + for (int i = 0; i < GGML_TYPE_COUNT; i++) { + ggml_type type = (ggml_type) i; + ggml_type_traits_t qfns = ggml_internal_get_type_traits(type); + if (ggml_type_name(type) != NULL) { + if (qfns.from_float && qfns.to_float) { + printf(" %s", ggml_type_name(type)); + } + } + } + printf(" (all)\n"); + printf(" --alignment-offset OFFSET\n"); + printf(" set alignment offset as OFFSET (0)\n"); + printf(" -i NUM, --iterations NUM\n"); + printf(" set test iteration number (%d)\n", ITERATIONS); +} + +int main(int argc, char * argv[]) { + quantize_perf_params params {}; + + // read command line + + bool invalid_param = false; + std::string arg; + for (int i = 1; i < argc; i++) { + arg = argv[i]; + + if (arg == "--size") { + if (++i >= argc) { + invalid_param = true; + break; + } + size_t size = std::stoi(argv[i]); + if (size % 32 != 0) { + fprintf(stderr, "error: size %zu not divisible by 32\n", size); + invalid_param = true; + break; + } + params.test_sizes.push_back(size); + } else if (arg == "-3") { + // quick select sizes that probably fit in CPU caches + params.test_sizes.push_back(L1_SIZE); + params.test_sizes.push_back(L2_SIZE); + params.test_sizes.push_back(L3_SIZE); + } else if (arg == "-4") { + // quick select cache sizes + memory + params.test_sizes.push_back(L1_SIZE); + params.test_sizes.push_back(L2_SIZE); + params.test_sizes.push_back(L3_SIZE); + params.test_sizes.push_back(MEM_SIZE); + } else if (arg == "--op") { + if (++i >= argc) { + invalid_param = true; + break; + } + std::string op {argv[i]}; + if (op == "quantize_row_q_reference") { + params.op_quantize_row_q_reference = true; + } else if (op == "quantize_row_q") { + params.op_quantize_row_q = true; + } else if (op == "dequantize_row_q") { + params.op_dequantize_row_q = true; + } else if (op == "quantize_row_q_dot") { + params.op_quantize_row_q_dot = true; + } else if (op == "vec_dot_q") { + params.op_vec_dot_q = true; + } else { + invalid_param = true; + break; + } + } else if (arg == "--type") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.include_types.push_back(argv[i]); + } else if (arg == "--alignment-offset") { + if (++i >= argc) { + invalid_param = true; + break; + } + int alignment = std::stoi(argv[i]); + if (alignment < 0 || alignment > MAX_ALIGNMENT) { + fprintf(stderr, "error: aligment-offset must be less than %d\n", MAX_ALIGNMENT); + invalid_param = true; + break; + } + params.alignment_offset = alignment; + } else if ((arg == "-i") || (arg == "--iterations")) { + if (++i >= argc) { + invalid_param = true; + break; + } + int number = std::stoi(argv[i]); + if (number < 0 || number > MAX_ITERATIONS) { + fprintf(stderr, "error: iterations must be less than %d\n", MAX_ITERATIONS); + invalid_param = true; + break; + } + params.iterations = number; + } else if ((arg == "-h") || (arg == "--help")) { + usage(argv); + return 1; + } else { + fprintf(stderr, "error: unknown argument: %s\n", arg.c_str()); + return 1; + } + } + if (invalid_param) { + fprintf(stderr, "error: invalid parameter for argument: %s\n", arg.c_str()); + return 1; + } + + if (params.test_sizes.empty()) { + params.test_sizes.push_back(L1_SIZE); + } + if (!(params.op_quantize_row_q_reference || params.op_quantize_row_q || params.op_dequantize_row_q || params.op_quantize_row_q_dot || params.op_vec_dot_q)) { + params.op_quantize_row_q_reference = params.op_quantize_row_q = params.op_dequantize_row_q = params.op_quantize_row_q_dot = params.op_vec_dot_q = true; + } + + std::sort(params.test_sizes.begin(), params.test_sizes.end()); + size_t largest = params.test_sizes.back(); + + std::vector test_data1_v(largest*4 + MAX_ALIGNMENT*2); + std::vector test_data2_v(largest*4 + MAX_ALIGNMENT*2); + std::vector test_q1_v(largest*4 + MAX_ALIGNMENT*2); + std::vector test_q2_v(largest*4 + MAX_ALIGNMENT*2); + std::vector test_out_v(largest*4 + MAX_ALIGNMENT*2); + + float * test_data1 = (float *) align_with_offset(test_data1_v.data(), params.alignment_offset); + float * test_data2 = (float *) align_with_offset(test_data2_v.data(), params.alignment_offset); + float * test_q1 = (float *) align_with_offset(test_q1_v.data(), params.alignment_offset); + float * test_q2 = (float *) align_with_offset(test_q2_v.data(), params.alignment_offset); + float * test_out = (float *) align_with_offset(test_out_v.data(), params.alignment_offset); + + generate_data(0, largest, test_data1); + generate_data(1, largest, test_data2); + + int64_t iterations = params.iterations; + + + // Initialize GGML, ensures float conversion tables are initialized + struct ggml_init_params ggml_params = { + /* .mem_size = */ 1*1024, + /* .mem_buffer = */ NULL, + /* .no_alloc = */ true, + }; + struct ggml_context * ctx = ggml_init(ggml_params); + + for (int i = 0; i < GGML_TYPE_COUNT; i++) { + ggml_type type = (ggml_type) i; + ggml_type_traits_t qfns = ggml_internal_get_type_traits(type); + if (!params.include_types.empty() && ggml_type_name(type) && std::find(params.include_types.begin(), params.include_types.end(), ggml_type_name(type)) == params.include_types.end()) { + continue; + } + + if (qfns.from_float && qfns.to_float) { + printf("%s\n", ggml_type_name(type)); + + if (params.op_quantize_row_q_reference) { + printf(" quantize_row_q_reference\n"); + for (size_t size : params.test_sizes) { + printf(" %zu values (%.2f MB)\n", size, 4*size/(float)(1024*1024)); + auto quantize_fn = [&](void ) { + qfns.from_float_reference(test_data1, test_q1, size); + return test_q1[0]; + }; + size_t quantized_size = size / ggml_blck_size(type) * ggml_type_size(type); + benchmark_function(size, quantized_size, iterations, quantize_fn); + } + printf("\n"); + } + + if (params.op_quantize_row_q) { + printf(" quantize_row_q\n"); + for (size_t size : params.test_sizes) { + printf(" %zu values (%.2f MB)\n", size, 4*size/(float)(1024*1024)); + auto quantize_fn = [&](void ) { + qfns.from_float(test_data1, test_q1, size); + return test_q1[0]; + }; + size_t quantized_size = size / ggml_blck_size(type) * ggml_type_size(type); + benchmark_function(size, quantized_size, iterations, quantize_fn); + } + printf("\n"); + } + + if (params.op_dequantize_row_q) { + printf(" dequantize_row_q\n"); + qfns.from_float(test_data1, test_q1, largest); + for (size_t size : params.test_sizes) { + printf(" %zu values (%.2f MB)\n", size, 4*size/(float)(1024*1024)); + auto quantize_fn = [&](void ) { + qfns.to_float(test_q1, test_out, size); + return test_out[0]; + }; + size_t quantized_size = size / ggml_blck_size(type) * ggml_type_size(type); + benchmark_function(size, quantized_size, iterations, quantize_fn); + } + printf("\n"); + } + + if (params.op_quantize_row_q_dot) { + printf(" quantize_row_q_dot\n"); + for (size_t size : params.test_sizes) { + printf(" %zu values (%.2f MB)\n", size, 4*size/(float)(1024*1024)); + auto quantize_fn = [&](void ) { + auto vdot = ggml_internal_get_type_traits(qfns.vec_dot_type); + vdot.from_float(test_data1, test_q1, size); + return test_q1[0]; + }; + size_t quantized_size = size / ggml_blck_size(type) * ggml_type_size(type); + benchmark_function(size, quantized_size, iterations, quantize_fn); + } + printf("\n"); + } + + if (params.op_vec_dot_q) { + printf(" vec_dot_q\n"); + qfns.from_float(test_data1, test_q1, largest); + qfns.from_float(test_data2, test_q2, largest); + for (size_t size : params.test_sizes) { + printf(" %zu values (%.2f MB)\n", size, 4*size/(float)(1024*1024)); + auto quantize_fn = [&](void ) { + float result; + qfns.vec_dot(size, &result, test_q1, test_q2); + return result; + }; + size_t quantized_size = size / ggml_blck_size(type) * ggml_type_size(type); + benchmark_function(size, quantized_size, iterations, quantize_fn); + } + printf("\n"); + } + } + } + + ggml_free(ctx); + + return 0; +}