From d3b2e4f3f7cf6b9e16eeda9ea463a79e2b28e52e Mon Sep 17 00:00:00 2001 From: Sharad Date: Thu, 27 Jul 2023 15:50:42 +0200 Subject: [PATCH] fix(multi_datasets-evaluator): return single dict --- .../evaluation/evaluator/multi_datasets_evaluator.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/mmocr/evaluation/evaluator/multi_datasets_evaluator.py b/mmocr/evaluation/evaluator/multi_datasets_evaluator.py index 560a2e6f6..e0c0d8b1f 100644 --- a/mmocr/evaluation/evaluator/multi_datasets_evaluator.py +++ b/mmocr/evaluation/evaluator/multi_datasets_evaluator.py @@ -92,15 +92,18 @@ def evaluate(self, size: int) -> dict: metrics_results.update(metric_results) metric.results.clear() if is_main_process(): - averaged_results = self.average_results(metrics_results) + averaged_results = [self.average_results(metrics_results)] else: - averaged_results = None + averaged_results = [None] metrics_results = [metrics_results] broadcast_object_list(metrics_results) broadcast_object_list([averaged_results]) - - return metrics_results[0], averaged_results + results = { + 'metric_results': metrics_results[0], + 'averaged_results': averaged_results + } + return results def average_results(self, metrics_results): """Compute the average of metric results across all datasets.