From b68dba47cb1f5f12cce8e94050e9b7ce6f2e1c92 Mon Sep 17 00:00:00 2001 From: Sharad Sirsat Date: Wed, 5 Jul 2023 22:53:31 +0200 Subject: [PATCH] fix(tests): fix the tests --- .../test_multi_datasets_evaluator.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/test_evaluation/test_evaluator/test_multi_datasets_evaluator.py b/tests/test_evaluation/test_evaluator/test_multi_datasets_evaluator.py index f28da520c..f9b0f1410 100644 --- a/tests/test_evaluation/test_evaluator/test_multi_datasets_evaluator.py +++ b/tests/test_evaluation/test_evaluator/test_multi_datasets_evaluator.py @@ -97,11 +97,11 @@ def test_composed_metrics(self): size, batch_size, pred=1, label=1): evaluator.process(predictions, data_samples) - metrics = evaluator.evaluate(size=size) + metrics_results, averaged_results = evaluator.evaluate(size=size) - self.assertAlmostEqual(metrics['Fake/Toy/accuracy'], 1.0) - self.assertAlmostEqual(metrics['Fake/Toy/mAP'], 0.0) - self.assertEqual(metrics['Fake/Toy/size'], size) + self.assertAlmostEqual(metrics_results['Fake/Toy/accuracy'], 1.0) + self.assertAlmostEqual(metrics_results['Fake/Toy/mAP'], 0.0) + self.assertEqual(metrics_results['Fake/Toy/size'], size) with self.assertWarns(Warning): evaluator.evaluate(size=0) @@ -124,9 +124,9 @@ def test_composed_metrics(self): for data_samples, predictions in generate_test_results( size, batch_size, pred=1, label=1): evaluator.process(predictions, data_samples) - metrics = evaluator.evaluate(size=size) - self.assertIn('Fake/Toy/accuracy', metrics) - self.assertIn('Fake/accuracy', metrics) + metrics_results, averaged_results = evaluator.evaluate(size=size) + self.assertIn('Fake/Toy/accuracy', metrics_results) + self.assertIn('Fake/accuracy', metrics_results) metrics_results = OrderedDict({ 'dataset1/metric1/accuracy': 0.9, @@ -135,7 +135,7 @@ def test_composed_metrics(self): 'dataset2/metric2/f1_score': 0.75 }) - evaluator = MultiDatasetsEvaluator([], []) + evaluator = MultiDatasetsEvaluator(cfg, dataset_prefixes=['Fake']) averaged_results = evaluator.average_results(metrics_results) expected_averaged_results = {