double accuracy;
double accuracy_unc;
ggml_opt_result_accuracy(cd.result, &accuracy, &accuracy_unc);
- const bool subtest_ok = ndata == 0 && loss == 0.0 && std::isnan(loss_unc) && std::isnan(accuracy) && std::isnan(accuracy_unc);
+ const bool subtest_ok = ndata == 0 && almost_equal(loss, 0.0, 1e-6) && std::isnan(loss_unc) && std::isnan(accuracy) && std::isnan(accuracy_unc);
helper_after_test_forward_backward(optim, __func__, high_level, shuffle, "results_initial", subtest_ok, ntest, npass);
}
{
float weights;
ggml_backend_tensor_get(cd.weights, &weights, 0, sizeof(float));
- const bool subtest_ok = weights == ndata/2;
+ const bool subtest_ok = almost_equal(weights, ndata/2, 1e-10);
helper_after_test_forward_backward(optim, __func__, high_level, shuffle, "weights_after_forward", subtest_ok, ntest, npass);
}
{
+ constexpr double atol = 1e-10;
+
int64_t ndata;
ggml_opt_result_ndata(cd.result, &ndata);
bool subtest_ok = ndata == 6;
double loss;
double loss_unc;
ggml_opt_result_loss(cd.result, &loss, &loss_unc);
- subtest_ok = subtest_ok && loss == 33.0 && almost_equal(loss_unc, sqrt(3.5), 1e-10);
+ subtest_ok = subtest_ok && almost_equal(loss, 33.0, atol) && almost_equal(loss_unc, sqrt(3.5), atol);
double accuracy;
double accuracy_unc;
{
float weights;
ggml_backend_tensor_get(cd.weights, &weights, 0, sizeof(float));
- const bool subtest_ok = weights == -ndata * .5;
+ const bool subtest_ok = almost_equal(weights, -ndata * 0.5, 1e-10);
helper_after_test_forward_backward(optim, __func__, high_level, shuffle, "weights_after_forward_backward", subtest_ok, ntest, npass);
}
{
double loss;
double loss_unc;
ggml_opt_result_loss(cd.result, &loss, &loss_unc);
- subtest_ok = subtest_ok && loss == 18.0 && (shuffle || loss_unc == 0.0);
+ subtest_ok = subtest_ok && almost_equal(loss, 18.0, 1e-10) && (shuffle || loss_unc == 0.0);
double accuracy;
double accuracy_unc;
if (adamw) {
float weights;
ggml_backend_tensor_get(cd.weights, &weights, 0, sizeof(float));
- const bool subtest_ok = weights == ndata/2 - epoch*idata_split;
+ const bool subtest_ok = almost_equal(weights, ndata/2 - epoch*idata_split, 1e-10);
helper_after_test_idata_split(optim, __func__, high_level, epoch, "weights", subtest_ok, ntest, npass);
}
if (adamw) {
+ constexpr double atol = 1e-10;
+
int64_t ndata_result;
ggml_opt_result_ndata(cd.result, &ndata_result);
bool subtest_ok = ndata_result == idata_split;
double loss;
double loss_unc;
ggml_opt_result_loss(cd.result, &loss, &loss_unc);
- subtest_ok = subtest_ok && loss == 28.0 - epoch*16.0 && loss_unc == 0.0;
+ subtest_ok = subtest_ok && almost_equal(loss, 28.0 - epoch*16.0, atol) && almost_equal(loss_unc, 0.0, atol);
double accuracy;
double accuracy_unc;
helper_after_test_idata_split(optim, __func__, high_level, epoch, "results_backward", subtest_ok, ntest, npass);
}
if (adamw) {
+ constexpr double atol = 1e-10;
+
int64_t ndata_result;
ggml_opt_result_ndata(cd.result2, &ndata_result);
bool subtest_ok = ndata_result == ndata - idata_split;
double loss;
double loss_unc;
ggml_opt_result_loss(cd.result2, &loss, &loss_unc);
- subtest_ok = subtest_ok && loss == 15.0 - epoch*8 && almost_equal(loss_unc, sqrt(0.5), 1e-10);
+ subtest_ok = subtest_ok && almost_equal(loss, 15.0 - epoch*8, atol) && almost_equal(loss_unc, sqrt(0.5), atol);
double accuracy;
double accuracy_unc;
}
bool const adamw = optim == GGML_OPT_OPTIMIZER_TYPE_ADAMW;
if (adamw) {
+ constexpr double atol = 1e-6;
float weights;
ggml_backend_tensor_get(cd.weights, &weights, 0, sizeof(float));
- const bool subtest_ok = weights == (ndata/2) - epoch;
+ const bool subtest_ok = almost_equal(weights, (ndata/2) - epoch, atol);
helper_after_test_gradient_accumulation(optim, __func__, nbatch_physical, loss_type, epoch, "weights", subtest_ok, ntest, npass);
}
{
+ constexpr double atol = 1e-6;
int64_t ndata_result;
ggml_opt_result_ndata(cd.result, &ndata_result);
- bool subtest_ok = ndata_result == ndata/nbatch_physical;
+ bool subtest_ok = almost_equal(ndata_result, ndata/nbatch_physical, atol);
double loss;
ggml_opt_result_loss(cd.result, &loss, /*loss_unc =*/ nullptr);
if (loss_type == GGML_OPT_LOSS_TYPE_SUM) {
- subtest_ok = subtest_ok && loss == (39.0 - epoch*6.0);
+ subtest_ok = subtest_ok && almost_equal(loss, (39.0 - epoch*6.0), atol);
} else if (loss_type == GGML_OPT_LOSS_TYPE_MEAN) {
- subtest_ok = subtest_ok && almost_equal(loss, (39.0 - epoch*6.0) / ndata, 1e-6);
+ subtest_ok = subtest_ok && almost_equal(loss, (39.0 - epoch*6.0) / ndata, atol);
} else {
GGML_ASSERT(false);
}