]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
test-opt: allow slight inprecision (#15503)
authorJohannes Gäßler <redacted>
Fri, 22 Aug 2025 21:47:01 +0000 (23:47 +0200)
committerGitHub <redacted>
Fri, 22 Aug 2025 21:47:01 +0000 (23:47 +0200)
tests/test-opt.cpp

index f02b4cad8c6741f59c1ff8a38d10c5bf854a5bf1..18d3fcf2cb948ff69f06863c44c1d478da59e5f4 100644 (file)
@@ -358,7 +358,7 @@ static std::pair<int, int> test_forward_backward(
         double accuracy;
         double accuracy_unc;
         ggml_opt_result_accuracy(cd.result, &accuracy, &accuracy_unc);
-        const bool subtest_ok = ndata == 0 && loss == 0.0 && std::isnan(loss_unc) && std::isnan(accuracy) && std::isnan(accuracy_unc);
+        const bool subtest_ok = ndata == 0 && almost_equal(loss, 0.0, 1e-6) && std::isnan(loss_unc) && std::isnan(accuracy) && std::isnan(accuracy_unc);
         helper_after_test_forward_backward(optim, __func__, high_level, shuffle, "results_initial", subtest_ok, ntest, npass);
     }
 
@@ -381,10 +381,12 @@ static std::pair<int, int> test_forward_backward(
     {
         float weights;
         ggml_backend_tensor_get(cd.weights, &weights, 0, sizeof(float));
-        const bool subtest_ok = weights == ndata/2;
+        const bool subtest_ok = almost_equal(weights, ndata/2, 1e-10);
         helper_after_test_forward_backward(optim, __func__, high_level, shuffle, "weights_after_forward", subtest_ok, ntest, npass);
     }
     {
+        constexpr double atol = 1e-10;
+
         int64_t ndata;
         ggml_opt_result_ndata(cd.result, &ndata);
         bool subtest_ok = ndata == 6;
@@ -392,7 +394,7 @@ static std::pair<int, int> test_forward_backward(
         double loss;
         double loss_unc;
         ggml_opt_result_loss(cd.result, &loss, &loss_unc);
-        subtest_ok = subtest_ok && loss == 33.0 && almost_equal(loss_unc, sqrt(3.5), 1e-10);
+        subtest_ok = subtest_ok && almost_equal(loss, 33.0, atol) && almost_equal(loss_unc, sqrt(3.5), atol);
 
         double accuracy;
         double accuracy_unc;
@@ -437,7 +439,7 @@ static std::pair<int, int> test_forward_backward(
     {
         float weights;
         ggml_backend_tensor_get(cd.weights, &weights, 0, sizeof(float));
-        const bool subtest_ok = weights == -ndata * .5;
+        const bool subtest_ok = almost_equal(weights, -ndata * 0.5, 1e-10);
         helper_after_test_forward_backward(optim, __func__, high_level, shuffle, "weights_after_forward_backward", subtest_ok, ntest, npass);
     }
     {
@@ -448,7 +450,7 @@ static std::pair<int, int> test_forward_backward(
         double loss;
         double loss_unc;
         ggml_opt_result_loss(cd.result, &loss, &loss_unc);
-        subtest_ok = subtest_ok && loss == 18.0 && (shuffle || loss_unc == 0.0);
+        subtest_ok = subtest_ok && almost_equal(loss, 18.0, 1e-10) && (shuffle || loss_unc == 0.0);
 
         double accuracy;
         double accuracy_unc;
@@ -550,10 +552,12 @@ static std::pair<int, int> test_idata_split(
         if (adamw) {
             float weights;
             ggml_backend_tensor_get(cd.weights, &weights, 0, sizeof(float));
-            const bool subtest_ok = weights == ndata/2 - epoch*idata_split;
+            const bool subtest_ok = almost_equal(weights, ndata/2 - epoch*idata_split, 1e-10);
             helper_after_test_idata_split(optim, __func__, high_level, epoch, "weights", subtest_ok, ntest, npass);
         }
         if (adamw) {
+            constexpr double atol = 1e-10;
+
             int64_t ndata_result;
             ggml_opt_result_ndata(cd.result, &ndata_result);
             bool subtest_ok = ndata_result == idata_split;
@@ -561,7 +565,7 @@ static std::pair<int, int> test_idata_split(
             double loss;
             double loss_unc;
             ggml_opt_result_loss(cd.result, &loss, &loss_unc);
-            subtest_ok = subtest_ok && loss == 28.0 - epoch*16.0 && loss_unc == 0.0;
+            subtest_ok = subtest_ok && almost_equal(loss, 28.0 - epoch*16.0, atol) && almost_equal(loss_unc, 0.0, atol);
 
             double accuracy;
             double accuracy_unc;
@@ -571,6 +575,8 @@ static std::pair<int, int> test_idata_split(
             helper_after_test_idata_split(optim, __func__, high_level, epoch, "results_backward", subtest_ok, ntest, npass);
         }
         if (adamw) {
+            constexpr double atol = 1e-10;
+
             int64_t ndata_result;
             ggml_opt_result_ndata(cd.result2, &ndata_result);
             bool subtest_ok = ndata_result == ndata - idata_split;
@@ -578,7 +584,7 @@ static std::pair<int, int> test_idata_split(
             double loss;
             double loss_unc;
             ggml_opt_result_loss(cd.result2, &loss, &loss_unc);
-            subtest_ok = subtest_ok && loss == 15.0 - epoch*8 && almost_equal(loss_unc, sqrt(0.5), 1e-10);
+            subtest_ok = subtest_ok && almost_equal(loss, 15.0 - epoch*8, atol) && almost_equal(loss_unc, sqrt(0.5), atol);
 
             double accuracy;
             double accuracy_unc;
@@ -687,22 +693,24 @@ static std::pair<int, int> test_gradient_accumulation(
         }
         bool const adamw = optim == GGML_OPT_OPTIMIZER_TYPE_ADAMW;
         if (adamw) {
+            constexpr double atol = 1e-6;
             float weights;
             ggml_backend_tensor_get(cd.weights, &weights, 0, sizeof(float));
-            const bool subtest_ok = weights == (ndata/2) - epoch;
+            const bool subtest_ok = almost_equal(weights, (ndata/2) - epoch, atol);
             helper_after_test_gradient_accumulation(optim, __func__, nbatch_physical, loss_type, epoch, "weights", subtest_ok, ntest, npass);
         }
         {
+            constexpr double atol = 1e-6;
             int64_t ndata_result;
             ggml_opt_result_ndata(cd.result, &ndata_result);
-            bool subtest_ok = ndata_result == ndata/nbatch_physical;
+            bool subtest_ok = almost_equal(ndata_result, ndata/nbatch_physical, atol);
 
             double loss;
             ggml_opt_result_loss(cd.result, &loss, /*loss_unc =*/ nullptr);
             if (loss_type == GGML_OPT_LOSS_TYPE_SUM) {
-                subtest_ok = subtest_ok && loss == (39.0 - epoch*6.0);
+                subtest_ok = subtest_ok && almost_equal(loss, (39.0 - epoch*6.0), atol);
             } else if (loss_type == GGML_OPT_LOSS_TYPE_MEAN) {
-                subtest_ok = subtest_ok && almost_equal(loss, (39.0 - epoch*6.0) / ndata, 1e-6);
+                subtest_ok = subtest_ok && almost_equal(loss, (39.0 - epoch*6.0) / ndata, atol);
             } else {
                 GGML_ASSERT(false);
             }