From f987102ed6ba53a8f383e078c126ef35918c49f8 Mon Sep 17 00:00:00 2001 From: Agata Benvegna Date: Wed, 10 Dec 2025 19:29:24 +0100 Subject: [PATCH 01/28] Set the structure for implementing the evaluation strategy by events New functions "_variable_eval_with_events_strategy()", "_point_eval_with_events_strategy()", "_series_eval_with_events_strategy()" aim to the evaluation with events strategy with the 3 supported granularities. The use of functions above inside "evaluate()" has been established. --- ats/evaluators.py | 30 ++++++++++++++++++++++++------ 1 file changed, 24 insertions(+), 6 deletions(-) diff --git a/ats/evaluators.py b/ats/evaluators.py index d4862cc..c4f2bf5 100644 --- a/ats/evaluators.py +++ b/ats/evaluators.py @@ -114,9 +114,6 @@ def _copy_dataset(self,dataset,models): return dataset_copies def evaluate(self,models={},granularity='point',strategy='flags',breakdown=False): - if strategy != 'flags': - raise NotImplementedError(f'Evaluation strategy {strategy} is not implemented') - if not models: raise ValueError('There are no models to evaluate') if not self.test_data: @@ -138,11 +135,23 @@ def evaluate(self,models={},granularity='point',strategy='flags',breakdown=False flagged_dataset = _get_model_output(dataset_copies[j],model) for i,sample_df in enumerate(flagged_dataset): if granularity == 'point': - single_model_evaluation[f'sample_{i+1}'] = _point_granularity_evaluation(sample_df,anomaly_labels_list[i],breakdown=breakdown) + if strategy == 'flags': + single_model_evaluation[f'sample_{i+1}'] = _point_granularity_evaluation(sample_df,anomaly_labels_list[i],breakdown=breakdown) + elif strategy == 'events': + single_model_evaluation[f'sample_{i+1}'] = _point_eval_with_events_strategy(sample_df,anomaly_labels_df[i],breakdown=breakdown) + elif granularity == 'variable': - single_model_evaluation[f'sample_{i+1}'] = _variable_granularity_evaluation(sample_df,anomaly_labels_list[i], breakdown = breakdown) + if strategy == 'flags': + single_model_evaluation[f'sample_{i+1}'] = _variable_granularity_evaluation(sample_df,anomaly_labels_list[i], breakdown = breakdown) + elif strategy == 'events': + single_model_evaluation[f'sample_{i+1}'] = _variable_eval_with_events_strategy(sample_df,anomaly_labels_df[i],breakdown=breakdown) + elif granularity == 'series': - single_model_evaluation[f'sample_{i+1}'] = _series_granularity_evaluation(sample_df,anomaly_labels_list[i], breakdown = breakdown) + if strategy == 'flags': + single_model_evaluation[f'sample_{i+1}'] = _series_granularity_evaluation(sample_df,anomaly_labels_list[i], breakdown = breakdown) + elif strategy == 'events': + single_model_evaluation[f'sample_{i+1}'] = _series_eval_with_events_strategy(sample_df,anomaly_labels_df[i],breakdown=breakdown) + else: raise ValueError(f'Unknown granularity {granularity}') @@ -360,3 +369,12 @@ def _series_granularity_evaluation(flagged_timeseries_df,anomaly_labels_df,break return one_series_evaluation_result | breakdown_info else: return one_series_evaluation_result + +def _variable_eval_with_events_strategy(sample_df,anomaly_labels_df,breakdown=False): + pass + +def _point_eval_with_events_strategy(sample_df,anomaly_labels_df,breakdown=False): + pass + +def _series_eval_with_events_strategy(sample_df,anomaly_labels_df,breakdown=False): + pass From 53627490d6fed4b5bb99456e4c8f14e24a929a74 Mon Sep 17 00:00:00 2001 From: Agata Benvegna Date: Wed, 10 Dec 2025 21:00:50 +0100 Subject: [PATCH 02/28] Implement "_point_eval_with_events_strategy()" without breakdown The implementation of the above function includes adding "_get_anomalous_events()" --- ats/evaluators.py | 31 +++++++++++++++++++++++++++++-- 1 file changed, 29 insertions(+), 2 deletions(-) diff --git a/ats/evaluators.py b/ats/evaluators.py index c4f2bf5..d131b7a 100644 --- a/ats/evaluators.py +++ b/ats/evaluators.py @@ -373,8 +373,35 @@ def _series_granularity_evaluation(flagged_timeseries_df,anomaly_labels_df,break def _variable_eval_with_events_strategy(sample_df,anomaly_labels_df,breakdown=False): pass -def _point_eval_with_events_strategy(sample_df,anomaly_labels_df,breakdown=False): - pass +def _point_eval_with_events_strategy(flagged_timeseries_df,anomaly_labels_df,breakdown=False): + detected_events_n = 0 + false_positives_n = 0 + inserted_events_n = _get_anomalous_events(anomaly_labels_df) + evaluation_result = {} + + previous_timestamp = None + for timestamp in flagged_timeseries_df.index: + anomaly_label = anomaly_labels_df.loc[timestamp] + is_anomalous = flagged_timeseries_df.loc[timestamp] + if anomaly_label is not None and is_anomalous: + if anomaly_label != previous_anomaly_label: + detected_events_n += 1 + + elif anomaly_label is None and is_anomalous: + if anomaly_label != previous_anomaly_label: + false_positives_n += 1 + + previous_timestamp = timestamp + previous_anomaly_label = anomaly_labels_df.loc[previous_timestamp] + + evaluation_result['true_positives_count'] = detected_events_n + evaluation_result['true_positives_rate'] = detected_events_n/inserted_events_n + evaluation_result['false_positives_count'] = false_positives_n + evaluation_result['false_positives_count'] = false_positives_n/len(flagged_timeseries_df) + return evaluation_result def _series_eval_with_events_strategy(sample_df,anomaly_labels_df,breakdown=False): pass + +def _get_anomalous_events(anomaly_labels_df): + pass \ No newline at end of file From 00ba2ac4f14cf56858df0993e3d8c031cc9f2122 Mon Sep 17 00:00:00 2001 From: Agata Benvegna Date: Fri, 12 Dec 2025 09:03:28 +0100 Subject: [PATCH 03/28] Implement "_get_anomalous_events()" This function counts the total number of anomalous events in the series --- ats/evaluators.py | 14 ++++++++++++-- ats/tests/test_evaluators.py | 13 +++++++++++++ 2 files changed, 25 insertions(+), 2 deletions(-) diff --git a/ats/evaluators.py b/ats/evaluators.py index d131b7a..4374ade 100644 --- a/ats/evaluators.py +++ b/ats/evaluators.py @@ -379,7 +379,8 @@ def _point_eval_with_events_strategy(flagged_timeseries_df,anomaly_labels_df,bre inserted_events_n = _get_anomalous_events(anomaly_labels_df) evaluation_result = {} - previous_timestamp = None + previous_timestamp = None # non necessario secondo me + # previous_anomaly_label = None for timestamp in flagged_timeseries_df.index: anomaly_label = anomaly_labels_df.loc[timestamp] is_anomalous = flagged_timeseries_df.loc[timestamp] @@ -404,4 +405,13 @@ def _series_eval_with_events_strategy(sample_df,anomaly_labels_df,breakdown=Fals pass def _get_anomalous_events(anomaly_labels_df): - pass \ No newline at end of file + anomalous_events_n = 0 + previous_anomaly_label = None + for timestamp in anomaly_labels_df.index: + anomaly_label = anomaly_labels_df.loc[timestamp] + if anomaly_label is not None: + if anomaly_label != previous_anomaly_label: + anomalous_events_n += 1 + previous_timestamp = timestamp + previous_anomaly_label = anomaly_labels_df.loc[previous_timestamp] + return anomalous_events_n diff --git a/ats/tests/test_evaluators.py b/ats/tests/test_evaluators.py index 7d293c6..089d671 100644 --- a/ats/tests/test_evaluators.py +++ b/ats/tests/test_evaluators.py @@ -11,6 +11,8 @@ from ..evaluators import _series_granularity_evaluation from ..evaluators import _get_breakdown_info from ..anomaly_detectors.stat.periodic_average import PeriodicAverageAnomalyDetector +from ..evaluators import _get_anomalous_events + import unittest import pandas as pd import random as rnd @@ -652,3 +654,14 @@ def test_evaluate_with_autofit_model(self): models={'paverage': PeriodicAverageAnomalyDetector() } evaluation_results = evaluator.evaluate(models=models,granularity='point') + def test_get_anomalous_events(self): + humi_temp_generator = HumiTempTimeseriesGenerator() + timeseries_df = humi_temp_generator.generate(include_effect_label=False, anomalies=['step_uv']) + anomalous_events = _get_anomalous_events(timeseries_df.loc[:,'anomaly_label']) + self.assertEqual(anomalous_events,1) + + def test_get_anomalous_events_with_point_anomaly(self): + humi_temp_generator = HumiTempTimeseriesGenerator() + timeseries_df = humi_temp_generator.generate(include_effect_label=False, anomalies=['spike_uv']) + anomalous_events = _get_anomalous_events(timeseries_df.loc[:,'anomaly_label']) + self.assertEqual(anomalous_events,1) From c6d5e8b6a10f10d4fd17e59a1c73027c880f5faf Mon Sep 17 00:00:00 2001 From: Agata Benvegna Date: Fri, 12 Dec 2025 10:14:12 +0100 Subject: [PATCH 04/28] Add test on "_point_eval_with_events_strategy()" --- ats/tests/test_evaluators.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/ats/tests/test_evaluators.py b/ats/tests/test_evaluators.py index 089d671..22185bc 100644 --- a/ats/tests/test_evaluators.py +++ b/ats/tests/test_evaluators.py @@ -12,6 +12,7 @@ from ..evaluators import _get_breakdown_info from ..anomaly_detectors.stat.periodic_average import PeriodicAverageAnomalyDetector from ..evaluators import _get_anomalous_events +from ..evaluators import _point_eval_with_events_strategy import unittest import pandas as pd @@ -665,3 +666,16 @@ def test_get_anomalous_events_with_point_anomaly(self): timeseries_df = humi_temp_generator.generate(include_effect_label=False, anomalies=['spike_uv']) anomalous_events = _get_anomalous_events(timeseries_df.loc[:,'anomaly_label']) self.assertEqual(anomalous_events,1) + + def test_point_eval_with_events_strategy(self): + # model output + series = generate_timeseries_df(entries=6, variables=1) + series['value_anomaly'] = [0,1,1,1,1,1] + + anomaly_labels = pd.Series([None, 'anomaly_1', 'anomaly_1', None, None,'anomaly_1']) + anomaly_labels.index = series.index + evaluation_result = _point_eval_with_events_strategy(series,anomaly_labels) + self.assertAlmostEqual(evaluation_result['true_positives_count'],2) + self.assertAlmostEqual(evaluation_result['true_positives_rate'],2/2) + self.assertAlmostEqual(evaluation_result['false_positives_count'],1) + self.assertAlmostEqual(evaluation_result['false_positives_ratio'],1/6) From f0dbbed424f841a256f591927238c2787c67f7b0 Mon Sep 17 00:00:00 2001 From: Agata Benvegna Date: Fri, 12 Dec 2025 10:14:59 +0100 Subject: [PATCH 05/28] Fix errors on "_point_eval_with_events_strategy()" --- ats/evaluators.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/ats/evaluators.py b/ats/evaluators.py index 4374ade..290b9b7 100644 --- a/ats/evaluators.py +++ b/ats/evaluators.py @@ -379,11 +379,12 @@ def _point_eval_with_events_strategy(flagged_timeseries_df,anomaly_labels_df,bre inserted_events_n = _get_anomalous_events(anomaly_labels_df) evaluation_result = {} - previous_timestamp = None # non necessario secondo me - # previous_anomaly_label = None + previous_anomaly_label = None for timestamp in flagged_timeseries_df.index: anomaly_label = anomaly_labels_df.loc[timestamp] - is_anomalous = flagged_timeseries_df.loc[timestamp] + flags_df = flagged_timeseries_df.filter(like='_anomaly') + # True if there is at least 1 variable detected as anomalous + is_anomalous = flags_df.loc[timestamp].any() if anomaly_label is not None and is_anomalous: if anomaly_label != previous_anomaly_label: detected_events_n += 1 @@ -398,7 +399,7 @@ def _point_eval_with_events_strategy(flagged_timeseries_df,anomaly_labels_df,bre evaluation_result['true_positives_count'] = detected_events_n evaluation_result['true_positives_rate'] = detected_events_n/inserted_events_n evaluation_result['false_positives_count'] = false_positives_n - evaluation_result['false_positives_count'] = false_positives_n/len(flagged_timeseries_df) + evaluation_result['false_positives_ratio'] = false_positives_n/len(flagged_timeseries_df) return evaluation_result def _series_eval_with_events_strategy(sample_df,anomaly_labels_df,breakdown=False): From 1ec8bd8c2bd932d7f468ce99d24c19fa76ce3364 Mon Sep 17 00:00:00 2001 From: Agata Benvegna Date: Fri, 12 Dec 2025 10:58:52 +0100 Subject: [PATCH 06/28] Add the breakdown --- ats/evaluators.py | 14 ++++++++++++-- ats/tests/test_evaluators.py | 11 +++++++++++ 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/ats/evaluators.py b/ats/evaluators.py index 290b9b7..c5ea319 100644 --- a/ats/evaluators.py +++ b/ats/evaluators.py @@ -378,6 +378,7 @@ def _point_eval_with_events_strategy(flagged_timeseries_df,anomaly_labels_df,bre false_positives_n = 0 inserted_events_n = _get_anomalous_events(anomaly_labels_df) evaluation_result = {} + breakdown_info = {} previous_anomaly_label = None for timestamp in flagged_timeseries_df.index: @@ -387,7 +388,13 @@ def _point_eval_with_events_strategy(flagged_timeseries_df,anomaly_labels_df,bre is_anomalous = flags_df.loc[timestamp].any() if anomaly_label is not None and is_anomalous: if anomaly_label != previous_anomaly_label: - detected_events_n += 1 + detected_events_n += 1 + + breakdown_key = anomaly_label + '_true_positives_count' + if breakdown_key in breakdown_info.keys(): + breakdown_info[breakdown_key] += 1 + else: + breakdown_info[breakdown_key] = 1 elif anomaly_label is None and is_anomalous: if anomaly_label != previous_anomaly_label: @@ -400,7 +407,10 @@ def _point_eval_with_events_strategy(flagged_timeseries_df,anomaly_labels_df,bre evaluation_result['true_positives_rate'] = detected_events_n/inserted_events_n evaluation_result['false_positives_count'] = false_positives_n evaluation_result['false_positives_ratio'] = false_positives_n/len(flagged_timeseries_df) - return evaluation_result + if breakdown: + return evaluation_result | breakdown_info + else: + return evaluation_result def _series_eval_with_events_strategy(sample_df,anomaly_labels_df,breakdown=False): pass diff --git a/ats/tests/test_evaluators.py b/ats/tests/test_evaluators.py index 22185bc..2eff05a 100644 --- a/ats/tests/test_evaluators.py +++ b/ats/tests/test_evaluators.py @@ -679,3 +679,14 @@ def test_point_eval_with_events_strategy(self): self.assertAlmostEqual(evaluation_result['true_positives_rate'],2/2) self.assertAlmostEqual(evaluation_result['false_positives_count'],1) self.assertAlmostEqual(evaluation_result['false_positives_ratio'],1/6) + + def test_point_eval_with_events_strategy_and_breakdown(self): + # model output + series = generate_timeseries_df(entries=6, variables=1) + series['value_anomaly'] = [0,1,1,1,1,1] + + anomaly_labels = pd.Series([None, 'anomaly_1', 'anomaly_1', None, None,'anomaly_1']) + anomaly_labels.index = series.index + evaluation_result = _point_eval_with_events_strategy(series,anomaly_labels,breakdown=True) + self.assertIn('anomaly_1_true_positives_count',evaluation_result.keys()) + self.assertAlmostEqual(evaluation_result['anomaly_1_true_positives_count'],2) From 92ff4930f0cc2e0527d6bc28fddc224649ae94cf Mon Sep 17 00:00:00 2001 From: Agata Benvegna Date: Fri, 12 Dec 2025 11:06:31 +0100 Subject: [PATCH 07/28] =?UTF-8?q?Change=20the=20function=20"=5Fget=5Fanoma?= =?UTF-8?q?lous=5Fevents"=20in=20=E2=80=9C=5Fcount=5Fanomalous=5Fevents?= =?UTF-8?q?=E2=80=9C?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now this function counts the number of anomalies inserted for each type --- ats/evaluators.py | 14 +++++++++++--- ats/tests/test_evaluators.py | 12 ++++++++---- 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/ats/evaluators.py b/ats/evaluators.py index c5ea319..cdb77ee 100644 --- a/ats/evaluators.py +++ b/ats/evaluators.py @@ -376,7 +376,7 @@ def _variable_eval_with_events_strategy(sample_df,anomaly_labels_df,breakdown=Fa def _point_eval_with_events_strategy(flagged_timeseries_df,anomaly_labels_df,breakdown=False): detected_events_n = 0 false_positives_n = 0 - inserted_events_n = _get_anomalous_events(anomaly_labels_df) + inserted_events_n = _count_anomalous_events(anomaly_labels_df) evaluation_result = {} breakdown_info = {} @@ -415,14 +415,22 @@ def _point_eval_with_events_strategy(flagged_timeseries_df,anomaly_labels_df,bre def _series_eval_with_events_strategy(sample_df,anomaly_labels_df,breakdown=False): pass -def _get_anomalous_events(anomaly_labels_df): +def _count_anomalous_events(anomaly_labels_df): anomalous_events_n = 0 + events_by_type_n = {} previous_anomaly_label = None for timestamp in anomaly_labels_df.index: anomaly_label = anomaly_labels_df.loc[timestamp] if anomaly_label is not None: if anomaly_label != previous_anomaly_label: anomalous_events_n += 1 + + key = anomaly_label + if key in events_by_type_n.keys(): + events_by_type_n[key] +=1 + else: + events_by_type_n[key] =1 + previous_timestamp = timestamp previous_anomaly_label = anomaly_labels_df.loc[previous_timestamp] - return anomalous_events_n + return anomalous_events_n, events_by_type_n diff --git a/ats/tests/test_evaluators.py b/ats/tests/test_evaluators.py index 2eff05a..190c148 100644 --- a/ats/tests/test_evaluators.py +++ b/ats/tests/test_evaluators.py @@ -12,6 +12,7 @@ from ..evaluators import _get_breakdown_info from ..anomaly_detectors.stat.periodic_average import PeriodicAverageAnomalyDetector from ..evaluators import _get_anomalous_events +from ..evaluators import _count_anomalous_events from ..evaluators import _point_eval_with_events_strategy import unittest @@ -655,17 +656,20 @@ def test_evaluate_with_autofit_model(self): models={'paverage': PeriodicAverageAnomalyDetector() } evaluation_results = evaluator.evaluate(models=models,granularity='point') - def test_get_anomalous_events(self): + def test_count_anomalous_events(self): humi_temp_generator = HumiTempTimeseriesGenerator() timeseries_df = humi_temp_generator.generate(include_effect_label=False, anomalies=['step_uv']) - anomalous_events = _get_anomalous_events(timeseries_df.loc[:,'anomaly_label']) + anomalous_events,events_by_type = _count_anomalous_events(timeseries_df.loc[:,'anomaly_label']) self.assertEqual(anomalous_events,1) + self.assertIsInstance(events_by_type,dict) + self.assertEqual(events_by_type['step_uv'],1) - def test_get_anomalous_events_with_point_anomaly(self): + def test_count_anomalous_events_with_point_anomaly(self): humi_temp_generator = HumiTempTimeseriesGenerator() timeseries_df = humi_temp_generator.generate(include_effect_label=False, anomalies=['spike_uv']) - anomalous_events = _get_anomalous_events(timeseries_df.loc[:,'anomaly_label']) + anomalous_events,events_by_type = _count_anomalous_events(timeseries_df.loc[:,'anomaly_label']) self.assertEqual(anomalous_events,1) + self.assertEqual(events_by_type['spike_uv'],1) def test_point_eval_with_events_strategy(self): # model output From fd592de35fc8f4ee0b9d295bd113027c1d9785e0 Mon Sep 17 00:00:00 2001 From: Agata Benvegna Date: Fri, 12 Dec 2025 11:13:43 +0100 Subject: [PATCH 08/28] Complete the breakdown --- ats/evaluators.py | 5 ++++- ats/tests/test_evaluators.py | 1 + 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/ats/evaluators.py b/ats/evaluators.py index cdb77ee..a2ffa40 100644 --- a/ats/evaluators.py +++ b/ats/evaluators.py @@ -376,7 +376,7 @@ def _variable_eval_with_events_strategy(sample_df,anomaly_labels_df,breakdown=Fa def _point_eval_with_events_strategy(flagged_timeseries_df,anomaly_labels_df,breakdown=False): detected_events_n = 0 false_positives_n = 0 - inserted_events_n = _count_anomalous_events(anomaly_labels_df) + inserted_events_n,inserted_events_by_type = _count_anomalous_events(anomaly_labels_df) evaluation_result = {} breakdown_info = {} @@ -407,6 +407,9 @@ def _point_eval_with_events_strategy(flagged_timeseries_df,anomaly_labels_df,bre evaluation_result['true_positives_rate'] = detected_events_n/inserted_events_n evaluation_result['false_positives_count'] = false_positives_n evaluation_result['false_positives_ratio'] = false_positives_n/len(flagged_timeseries_df) + + for key in inserted_events_by_type.keys(): + breakdown_info[key + '_true_positives_rate'] = breakdown_info[key + '_true_positives_count']/inserted_events_by_type[key] if breakdown: return evaluation_result | breakdown_info else: diff --git a/ats/tests/test_evaluators.py b/ats/tests/test_evaluators.py index 190c148..1f2bdf2 100644 --- a/ats/tests/test_evaluators.py +++ b/ats/tests/test_evaluators.py @@ -694,3 +694,4 @@ def test_point_eval_with_events_strategy_and_breakdown(self): evaluation_result = _point_eval_with_events_strategy(series,anomaly_labels,breakdown=True) self.assertIn('anomaly_1_true_positives_count',evaluation_result.keys()) self.assertAlmostEqual(evaluation_result['anomaly_1_true_positives_count'],2) + self.assertAlmostEqual(evaluation_result['anomaly_1_true_positives_rate'],1) From 2586b644b1b7c241cbd133b30e8013d649b05225 Mon Sep 17 00:00:00 2001 From: Agata Benvegna Date: Fri, 12 Dec 2025 11:46:16 +0100 Subject: [PATCH 09/28] Fix a zero division --- ats/evaluators.py | 11 +++++++---- ats/tests/test_evaluators.py | 16 ++++++++++++++++ 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/ats/evaluators.py b/ats/evaluators.py index a2ffa40..a31fb62 100644 --- a/ats/evaluators.py +++ b/ats/evaluators.py @@ -138,19 +138,19 @@ def evaluate(self,models={},granularity='point',strategy='flags',breakdown=False if strategy == 'flags': single_model_evaluation[f'sample_{i+1}'] = _point_granularity_evaluation(sample_df,anomaly_labels_list[i],breakdown=breakdown) elif strategy == 'events': - single_model_evaluation[f'sample_{i+1}'] = _point_eval_with_events_strategy(sample_df,anomaly_labels_df[i],breakdown=breakdown) + single_model_evaluation[f'sample_{i+1}'] = _point_eval_with_events_strategy(sample_df,anomaly_labels_list[i],breakdown=breakdown) elif granularity == 'variable': if strategy == 'flags': single_model_evaluation[f'sample_{i+1}'] = _variable_granularity_evaluation(sample_df,anomaly_labels_list[i], breakdown = breakdown) elif strategy == 'events': - single_model_evaluation[f'sample_{i+1}'] = _variable_eval_with_events_strategy(sample_df,anomaly_labels_df[i],breakdown=breakdown) + single_model_evaluation[f'sample_{i+1}'] = _variable_eval_with_events_strategy(sample_df,anomaly_labels_list[i],breakdown=breakdown) elif granularity == 'series': if strategy == 'flags': single_model_evaluation[f'sample_{i+1}'] = _series_granularity_evaluation(sample_df,anomaly_labels_list[i], breakdown = breakdown) elif strategy == 'events': - single_model_evaluation[f'sample_{i+1}'] = _series_eval_with_events_strategy(sample_df,anomaly_labels_df[i],breakdown=breakdown) + single_model_evaluation[f'sample_{i+1}'] = _series_eval_with_events_strategy(sample_df,anomaly_labels_list[i],breakdown=breakdown) else: raise ValueError(f'Unknown granularity {granularity}') @@ -404,7 +404,10 @@ def _point_eval_with_events_strategy(flagged_timeseries_df,anomaly_labels_df,bre previous_anomaly_label = anomaly_labels_df.loc[previous_timestamp] evaluation_result['true_positives_count'] = detected_events_n - evaluation_result['true_positives_rate'] = detected_events_n/inserted_events_n + if inserted_events_n: + evaluation_result['true_positives_rate'] = detected_events_n/inserted_events_n + else: + evaluation_result['true_positives_rate'] = None evaluation_result['false_positives_count'] = false_positives_n evaluation_result['false_positives_ratio'] = false_positives_n/len(flagged_timeseries_df) diff --git a/ats/tests/test_evaluators.py b/ats/tests/test_evaluators.py index 1f2bdf2..b3d1b6d 100644 --- a/ats/tests/test_evaluators.py +++ b/ats/tests/test_evaluators.py @@ -695,3 +695,19 @@ def test_point_eval_with_events_strategy_and_breakdown(self): self.assertIn('anomaly_1_true_positives_count',evaluation_result.keys()) self.assertAlmostEqual(evaluation_result['anomaly_1_true_positives_count'],2) self.assertAlmostEqual(evaluation_result['anomaly_1_true_positives_rate'],1) + + def test_eval_point_granularity_events_strategy(self): + dataset = [self.series1, self.series2, self.series3] + minmax1 = MinMaxAnomalyDetector() + minmax2 = MinMaxAnomalyDetector() + minmax3 = MinMaxAnomalyDetector() + models={'detector_1': minmax1, + 'detector_2': minmax2, + 'detector_3': minmax3 + } + evaluator = Evaluator(test_data=dataset) + evaluation_results = evaluator.evaluate(models=models,granularity='point',strategy='events') + self.assertAlmostEqual(evaluation_results['detector_1']['true_positives_count'],6) + self.assertAlmostEqual(evaluation_results['detector_1']['true_positives_rate'],7/8) + self.assertAlmostEqual(evaluation_results['detector_1']['false_positives_count'],2) + self.assertAlmostEqual(evaluation_results['detector_1']['false_positives_ratio'],8/21) From 6a3ee64f565c42b81d022dbed8bb0d124620b5d1 Mon Sep 17 00:00:00 2001 From: Agata Benvegna Date: Fri, 12 Dec 2025 12:00:27 +0100 Subject: [PATCH 10/28] Fix error on evaluation with events strategy and point granularity --- ats/evaluators.py | 2 +- ats/tests/test_evaluators.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ats/evaluators.py b/ats/evaluators.py index a31fb62..fa3e999 100644 --- a/ats/evaluators.py +++ b/ats/evaluators.py @@ -380,7 +380,7 @@ def _point_eval_with_events_strategy(flagged_timeseries_df,anomaly_labels_df,bre evaluation_result = {} breakdown_info = {} - previous_anomaly_label = None + previous_anomaly_label = 0 for timestamp in flagged_timeseries_df.index: anomaly_label = anomaly_labels_df.loc[timestamp] flags_df = flagged_timeseries_df.filter(like='_anomaly') diff --git a/ats/tests/test_evaluators.py b/ats/tests/test_evaluators.py index b3d1b6d..570e262 100644 --- a/ats/tests/test_evaluators.py +++ b/ats/tests/test_evaluators.py @@ -710,4 +710,4 @@ def test_eval_point_granularity_events_strategy(self): self.assertAlmostEqual(evaluation_results['detector_1']['true_positives_count'],6) self.assertAlmostEqual(evaluation_results['detector_1']['true_positives_rate'],7/8) self.assertAlmostEqual(evaluation_results['detector_1']['false_positives_count'],2) - self.assertAlmostEqual(evaluation_results['detector_1']['false_positives_ratio'],8/21) + self.assertAlmostEqual(evaluation_results['detector_1']['false_positives_ratio'],10/(21*3)) From 8f3fcf2ecac77712eff872cd79cafd10645a3ac9 Mon Sep 17 00:00:00 2001 From: Agata Benvegna Date: Fri, 12 Dec 2025 12:15:04 +0100 Subject: [PATCH 11/28] Add NotImplemented for evaluation with events strategy and variable granularity --- ats/evaluators.py | 2 +- ats/tests/test_evaluators.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/ats/evaluators.py b/ats/evaluators.py index fa3e999..84ac182 100644 --- a/ats/evaluators.py +++ b/ats/evaluators.py @@ -419,7 +419,7 @@ def _point_eval_with_events_strategy(flagged_timeseries_df,anomaly_labels_df,bre return evaluation_result def _series_eval_with_events_strategy(sample_df,anomaly_labels_df,breakdown=False): - pass + raise NotImplementedError('Evaluation with events strategy and series granularity not implemented') def _count_anomalous_events(anomaly_labels_df): anomalous_events_n = 0 diff --git a/ats/tests/test_evaluators.py b/ats/tests/test_evaluators.py index 570e262..2d0ea38 100644 --- a/ats/tests/test_evaluators.py +++ b/ats/tests/test_evaluators.py @@ -11,7 +11,6 @@ from ..evaluators import _series_granularity_evaluation from ..evaluators import _get_breakdown_info from ..anomaly_detectors.stat.periodic_average import PeriodicAverageAnomalyDetector -from ..evaluators import _get_anomalous_events from ..evaluators import _count_anomalous_events from ..evaluators import _point_eval_with_events_strategy From bd58613b425ab12dc5883cf557a443c3f2b6cbd8 Mon Sep 17 00:00:00 2001 From: Agata Benvegna Date: Sat, 13 Dec 2025 19:02:08 +0100 Subject: [PATCH 12/28] Add test on evaluation with point granularity, events strategy and breakdown --- ats/tests/test_evaluators.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/ats/tests/test_evaluators.py b/ats/tests/test_evaluators.py index 2d0ea38..2ac0026 100644 --- a/ats/tests/test_evaluators.py +++ b/ats/tests/test_evaluators.py @@ -710,3 +710,18 @@ def test_eval_point_granularity_events_strategy(self): self.assertAlmostEqual(evaluation_results['detector_1']['true_positives_rate'],7/8) self.assertAlmostEqual(evaluation_results['detector_1']['false_positives_count'],2) self.assertAlmostEqual(evaluation_results['detector_1']['false_positives_ratio'],10/(21*3)) + + def test_eval_point_granularity_events_strategy_with_breakdown(self): + dataset = [self.series1, self.series2, self.series3] + minmax = MinMaxAnomalyDetector() + models={'detector_1': minmax} + evaluator = Evaluator(test_data=dataset) + evaluation_results = evaluator.evaluate(models=models,granularity='point',strategy='events',breakdown=True) + self.assertAlmostEqual(evaluation_results['detector_1']['true_positives_count'],6) + self.assertAlmostEqual(evaluation_results['detector_1']['true_positives_rate'],7/8) + self.assertAlmostEqual(evaluation_results['detector_1']['false_positives_count'],2) + self.assertAlmostEqual(evaluation_results['detector_1']['false_positives_ratio'],10/(21*3)) + self.assertAlmostEqual(evaluation_results['detector_1']['anomaly_1_true_positives_count'],4) + self.assertAlmostEqual(evaluation_results['detector_1']['anomaly_1_true_positives_rate'],5/6) + self.assertAlmostEqual(evaluation_results['detector_1']['anomaly_2_true_positives_count'],2) + self.assertAlmostEqual(evaluation_results['detector_1']['anomaly_2_true_positives_rate'],1) From 731ce15d0b5a9bf459effa0b6dd1d4f0f4a4f7db Mon Sep 17 00:00:00 2001 From: Agata Benvegna Date: Sat, 13 Dec 2025 19:35:32 +0100 Subject: [PATCH 13/28] Raise NotImplementedError for evaluation with variable granularity and events strategy --- ats/evaluators.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ats/evaluators.py b/ats/evaluators.py index 84ac182..f5b08a2 100644 --- a/ats/evaluators.py +++ b/ats/evaluators.py @@ -371,7 +371,7 @@ def _series_granularity_evaluation(flagged_timeseries_df,anomaly_labels_df,break return one_series_evaluation_result def _variable_eval_with_events_strategy(sample_df,anomaly_labels_df,breakdown=False): - pass + raise NotImplementedError('Evaluation with events strategy and variable granularity not implemented') def _point_eval_with_events_strategy(flagged_timeseries_df,anomaly_labels_df,breakdown=False): detected_events_n = 0 From 279b03944066bf101199f29e502926ba527ec998 Mon Sep 17 00:00:00 2001 From: Agata Benvegna Date: Tue, 16 Dec 2025 12:02:37 +0100 Subject: [PATCH 14/28] Fix bugs About "_point_eval_with_events_strategy()" - The event evaluation was not taking into account not detected anomalies. The "breakdown_info" dictionary had only detected anomalies as keys; the for cycle for calculating the anomaly ratio (anomaly_counts/inserted_anomalies) was done on keys of "inserted_events_by_type", so a key error was raised for not detected anomalies. - The dataframe was filtrated using the column name "_anomaly" to get the model flags but the "apply()" method of the model adds the column "anomaly". Probably this is the reason why the evaluation on models like NHARAnomalyDetector was giving 0.0 everywhere. --- ats/evaluators.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/ats/evaluators.py b/ats/evaluators.py index f5b08a2..5922380 100644 --- a/ats/evaluators.py +++ b/ats/evaluators.py @@ -383,7 +383,7 @@ def _point_eval_with_events_strategy(flagged_timeseries_df,anomaly_labels_df,bre previous_anomaly_label = 0 for timestamp in flagged_timeseries_df.index: anomaly_label = anomaly_labels_df.loc[timestamp] - flags_df = flagged_timeseries_df.filter(like='_anomaly') + flags_df = flagged_timeseries_df.filter(like='anomaly') # True if there is at least 1 variable detected as anomalous is_anomalous = flags_df.loc[timestamp].any() if anomaly_label is not None and is_anomalous: @@ -411,8 +411,13 @@ def _point_eval_with_events_strategy(flagged_timeseries_df,anomaly_labels_df,bre evaluation_result['false_positives_count'] = false_positives_n evaluation_result['false_positives_ratio'] = false_positives_n/len(flagged_timeseries_df) - for key in inserted_events_by_type.keys(): - breakdown_info[key + '_true_positives_rate'] = breakdown_info[key + '_true_positives_count']/inserted_events_by_type[key] + for event in inserted_events_by_type.keys(): + breakdown_key = event + '_true_positives_count' + if breakdown_key in breakdown_info.keys(): + breakdown_info[event + '_true_positives_rate'] = breakdown_info[breakdown_key]/inserted_events_by_type[event] + else: + breakdown_info[breakdown_key] = 0 + breakdown_info[event + '_true_positives_rate'] = 0 if breakdown: return evaluation_result | breakdown_info else: From d29e9fa551f794ecfa5f2274124720dd9627ee65 Mon Sep 17 00:00:00 2001 From: Agata Benvegna Date: Tue, 16 Dec 2025 20:55:20 +0100 Subject: [PATCH 15/28] Add a test to detect incorrect false positive counting --- ats/evaluators.py | 13 ++++++------- ats/tests/test_evaluators.py | 30 ++++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 7 deletions(-) diff --git a/ats/evaluators.py b/ats/evaluators.py index 5922380..933668e 100644 --- a/ats/evaluators.py +++ b/ats/evaluators.py @@ -380,14 +380,15 @@ def _point_eval_with_events_strategy(flagged_timeseries_df,anomaly_labels_df,bre evaluation_result = {} breakdown_info = {} - previous_anomaly_label = 0 + previous_point_info = {'label': 0} for timestamp in flagged_timeseries_df.index: anomaly_label = anomaly_labels_df.loc[timestamp] flags_df = flagged_timeseries_df.filter(like='anomaly') # True if there is at least 1 variable detected as anomalous is_anomalous = flags_df.loc[timestamp].any() - if anomaly_label is not None and is_anomalous: - if anomaly_label != previous_anomaly_label: + point_info = {anomaly_label: is_anomalous} + if point_info != previous_point_info and is_anomalous: + if anomaly_label is not None: detected_events_n += 1 breakdown_key = anomaly_label + '_true_positives_count' @@ -396,12 +397,10 @@ def _point_eval_with_events_strategy(flagged_timeseries_df,anomaly_labels_df,bre else: breakdown_info[breakdown_key] = 1 - elif anomaly_label is None and is_anomalous: - if anomaly_label != previous_anomaly_label: + else: false_positives_n += 1 - previous_timestamp = timestamp - previous_anomaly_label = anomaly_labels_df.loc[previous_timestamp] + previous_point_info = point_info evaluation_result['true_positives_count'] = detected_events_n if inserted_events_n: diff --git a/ats/tests/test_evaluators.py b/ats/tests/test_evaluators.py index 2ac0026..5d28f73 100644 --- a/ats/tests/test_evaluators.py +++ b/ats/tests/test_evaluators.py @@ -11,8 +11,10 @@ from ..evaluators import _series_granularity_evaluation from ..evaluators import _get_breakdown_info from ..anomaly_detectors.stat.periodic_average import PeriodicAverageAnomalyDetector +from ats.anomaly_detectors.stat.robust import NHARAnomalyDetector from ..evaluators import _count_anomalous_events from ..evaluators import _point_eval_with_events_strategy +from ats.dataset_generators import HumiTempDatasetGenerator import unittest import pandas as pd @@ -725,3 +727,31 @@ def test_eval_point_granularity_events_strategy_with_breakdown(self): self.assertAlmostEqual(evaluation_results['detector_1']['anomaly_1_true_positives_rate'],5/6) self.assertAlmostEqual(evaluation_results['detector_1']['anomaly_2_true_positives_count'],2) self.assertAlmostEqual(evaluation_results['detector_1']['anomaly_2_true_positives_rate'],1) + + def test_correct_counting_false_positives_with_events_strategy(self): + effects = ['noise', 'clouds'] + anomalies = ['spike_mv', 'step_mv'] + generator = HumiTempDatasetGenerator(sampling_interval='60m') + evaluation_dataset = generator.generate(n_series = 1, effects = effects, anomalies = anomalies, + time_span = '90D', max_anomalies_per_series = 3, + anomalies_ratio = 1.0, auto_repeat_anomalies=True) + models = {'minmax': MinMaxAnomalyDetector(), + 'nhar': NHARAnomalyDetector(), + 'p_avg': PeriodicAverageAnomalyDetector() + } + evaluator = Evaluator(test_data = evaluation_dataset) + + series = evaluation_dataset[0] + anomalous_events_n, events_by_type_n = _count_anomalous_events(series.loc[:,'anomaly_label']) + evaluation_results = evaluator.evaluate(models=models,granularity='point',strategy='events',breakdown=False) + + for model in evaluation_results.keys(): + tp_n = evaluation_results[model]['true_positives_count'] + tp_rate = evaluation_results[model]['true_positives_rate'] + if tp_rate: + self.assertAlmostEqual(anomalous_events_n, tp_n / tp_rate) + + fp_n = evaluation_results[model]['false_positives_count'] + fp_ratio = evaluation_results[model]['false_positives_ratio'] + if fp_ratio: + self.assertAlmostEqual(len(series), fp_n / fp_ratio) From 0b1d6e293c32a9e16aedabcab76d2923ee6d866e Mon Sep 17 00:00:00 2001 From: Agata Benvegna Date: Tue, 16 Dec 2025 22:50:51 +0100 Subject: [PATCH 16/28] Delete the instance of "PeriodicAverageAnomalyDetector()" in "test_correct_counting_false_positives_with_events_strategy()" This test checks if true positives and false positives are correctly counted using a dataset with only 1 series. In this case, the ratio between true positives (false positives) count and true positives rate (false positives ratio) must give the total number of inserted anomalies (lenght of the series). Tests pass for "MinMaxAnomalyDetector()" and "NHARAnomalyDetector()" but not for the "PeriodicAverageAnomalyDetector()" because the output of the model is a shorter series than the input one. --- ats/tests/test_evaluators.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ats/tests/test_evaluators.py b/ats/tests/test_evaluators.py index 5d28f73..075ece8 100644 --- a/ats/tests/test_evaluators.py +++ b/ats/tests/test_evaluators.py @@ -736,8 +736,7 @@ def test_correct_counting_false_positives_with_events_strategy(self): time_span = '90D', max_anomalies_per_series = 3, anomalies_ratio = 1.0, auto_repeat_anomalies=True) models = {'minmax': MinMaxAnomalyDetector(), - 'nhar': NHARAnomalyDetector(), - 'p_avg': PeriodicAverageAnomalyDetector() + 'nhar': NHARAnomalyDetector() } evaluator = Evaluator(test_data = evaluation_dataset) From f4e4298170f6e4bfc17d4f34468d0bdc366c3813 Mon Sep 17 00:00:00 2001 From: Agata Benvegna Date: Tue, 16 Dec 2025 23:25:20 +0100 Subject: [PATCH 17/28] Fix the problem with the periodic average model Now the false positives ratio is computed as the ratio between false positives counts and the length of the column 'anomaly_labels', which is equal to the length of the series of the dataset. This was done because the function "_point_eval_with_events_strategy()" takes in input the flagged series (output of the "apply()" method of the model) that for the periodic average model has a shorter length than the original series of the dataset. --- ats/evaluators.py | 2 +- ats/tests/test_evaluators.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/ats/evaluators.py b/ats/evaluators.py index 933668e..c31cf59 100644 --- a/ats/evaluators.py +++ b/ats/evaluators.py @@ -408,7 +408,7 @@ def _point_eval_with_events_strategy(flagged_timeseries_df,anomaly_labels_df,bre else: evaluation_result['true_positives_rate'] = None evaluation_result['false_positives_count'] = false_positives_n - evaluation_result['false_positives_ratio'] = false_positives_n/len(flagged_timeseries_df) + evaluation_result['false_positives_ratio'] = false_positives_n/len(anomaly_labels_df) for event in inserted_events_by_type.keys(): breakdown_key = event + '_true_positives_count' diff --git a/ats/tests/test_evaluators.py b/ats/tests/test_evaluators.py index 075ece8..62c0827 100644 --- a/ats/tests/test_evaluators.py +++ b/ats/tests/test_evaluators.py @@ -736,7 +736,8 @@ def test_correct_counting_false_positives_with_events_strategy(self): time_span = '90D', max_anomalies_per_series = 3, anomalies_ratio = 1.0, auto_repeat_anomalies=True) models = {'minmax': MinMaxAnomalyDetector(), - 'nhar': NHARAnomalyDetector() + 'nhar': NHARAnomalyDetector(), + 'p_avg': PeriodicAverageAnomalyDetector() } evaluator = Evaluator(test_data = evaluation_dataset) From 368b4044abba0d51ff3f3cf404e3a050ecb60cd2 Mon Sep 17 00:00:00 2001 From: Agata Benvegna Date: Sun, 21 Dec 2025 15:14:48 +0100 Subject: [PATCH 18/28] Add tests on event evaluation --- ats/tests/test_evaluators.py | 133 +++++++++++++++++++++++++++++++++++ 1 file changed, 133 insertions(+) diff --git a/ats/tests/test_evaluators.py b/ats/tests/test_evaluators.py index 62c0827..786d336 100644 --- a/ats/tests/test_evaluators.py +++ b/ats/tests/test_evaluators.py @@ -755,3 +755,136 @@ def test_correct_counting_false_positives_with_events_strategy(self): fp_ratio = evaluation_results[model]['false_positives_ratio'] if fp_ratio: self.assertAlmostEqual(len(series), fp_n / fp_ratio) + + def test_count_anomalous_events_on_synth_dataset(self): + anomalies = ['step_mv','pattern_mv','spike_mv'] + generator = HumiTempDatasetGenerator(sampling_interval='60m') + evaluation_dataset = generator.generate(n_series = 3, effects = ['noise'], anomalies = anomalies, + time_span = '90D', max_anomalies_per_series = 3, + anomalies_ratio = 1.0, auto_repeat_anomalies=True) + series_1 = evaluation_dataset[0] + '''for timestamp in series_1.index: + print(series_1.loc[timestamp,'anomaly_label'])''' + # series_1 + # 1 step + # 0 pattern + # 0 spike + anomalous_events_n, events_by_type_n = _count_anomalous_events(series_1.loc[:,'anomaly_label']) + self.assertIn('step_mv',events_by_type_n.keys()) + self.assertEqual(events_by_type_n['step_mv'],1) + self.assertEqual(anomalous_events_n,1) + + series_2 = evaluation_dataset[1] + '''for timestamp in series_2.index: + print(series_2.loc[timestamp,'anomaly_label'])''' + # series_2 + # 1 step + # 0 pattern + # 0 spike + anomalous_events_n_2, events_by_type_n_2 = _count_anomalous_events(series_2.loc[:,'anomaly_label']) + self.assertIn('step_mv',events_by_type_n_2.keys()) + self.assertEqual(events_by_type_n_2['step_mv'],1) + self.assertEqual(anomalous_events_n_2,1) + + series_3 = evaluation_dataset[2] + '''for timestamp in series_3.index: + print(series_3.loc[timestamp,'anomaly_label'])''' + # series_3 + # 1 step + # 1 pattern + # 1 spike + anomalous_events_n_3, events_by_type_n_3 = _count_anomalous_events(series_3.loc[:,'anomaly_label']) + self.assertIn('step_mv',events_by_type_n_3.keys()) + self.assertIn('pattern_mv',events_by_type_n_3.keys()) + self.assertEqual(events_by_type_n_3['step_mv'],1) + self.assertEqual(events_by_type_n_3['pattern_mv'],1) + self.assertEqual(events_by_type_n_3['spike_mv'],1) + self.assertEqual(anomalous_events_n_3,3) + + def test_event_eval_on_p_avg(self): + anomalies = ['step_mv'] + generator = HumiTempDatasetGenerator(sampling_interval='60m') + evaluation_dataset = generator.generate(n_series = 1, effects = ['noise'], anomalies = anomalies, + time_span = '90D', max_anomalies_per_series = 1, + anomalies_ratio = 1.0, auto_repeat_anomalies=True) + series = evaluation_dataset[0] + '''for timestamp in series.index: + print(series.loc[timestamp,'anomaly_label'])''' + # series + # 1 step + # 0 pattern + # 0 spike + anomalous_events_n, events_by_type_n = _count_anomalous_events(series.loc[:,'anomaly_label']) + self.assertIn('step_mv',events_by_type_n.keys()) + self.assertEqual(events_by_type_n['step_mv'],1) + self.assertEqual(anomalous_events_n,1) + + model = PeriodicAverageAnomalyDetector() + new_series = series.drop(columns=['anomaly_label'],inplace=False) + p_avg_output = model.apply(new_series) + + anomalous_timestamps = [] + for timestamp in p_avg_output.index: + is_anomalous = p_avg_output.filter(like='anomaly').loc[timestamp].any() + anomaly_label = series.loc[timestamp,'anomaly_label'] + if anomaly_label is not None and is_anomalous: + anomalous_timestamps.append(timestamp) + + start = anomalous_timestamps[0] + sampling_interval = pd.Timedelta(minutes=60) + consecutive_timestamp_n = 0 + for timestamp in anomalous_timestamps: + are_consecutive = (timestamp - start) == sampling_interval + if are_consecutive: + consecutive_timestamp_n += 1 + start = timestamp + + detected_anomalies = len(anomalous_timestamps) - consecutive_timestamp_n + evaluator = Evaluator(test_data = evaluation_dataset) + evaluation_results = evaluator.evaluate(models={'p_avg':model},granularity='point',strategy='events',breakdown=False) + self.assertEqual(evaluation_results['p_avg']['true_positives_count'],detected_anomalies) + self.assertEqual(evaluation_results['p_avg']['true_positives_rate'],detected_anomalies/anomalous_events_n) + + def test_event_eval_on_nhar(self): + anomalies = ['step_mv'] + generator = HumiTempDatasetGenerator(sampling_interval='60m') + evaluation_dataset = generator.generate(n_series = 1, effects = ['noise'], anomalies = anomalies, + time_span = '90D', max_anomalies_per_series = 1, + anomalies_ratio = 1.0, auto_repeat_anomalies=True) + series = evaluation_dataset[0] + '''for timestamp in series.index: + print(series.loc[timestamp,'anomaly_label'])''' + # series + # 1 step + # 0 pattern + # 0 spike + anomalous_events_n, events_by_type_n = _count_anomalous_events(series.loc[:,'anomaly_label']) + self.assertIn('step_mv',events_by_type_n.keys()) + self.assertEqual(events_by_type_n['step_mv'],1) + self.assertEqual(anomalous_events_n,1) + + model = PeriodicAverageAnomalyDetector() + new_series = series.drop(columns=['anomaly_label'],inplace=False) + p_avg_output = model.apply(new_series) + + anomalous_timestamps = [] + for timestamp in p_avg_output.index: + is_anomalous = p_avg_output.filter(like='anomaly').loc[timestamp].any() + anomaly_label = series.loc[timestamp,'anomaly_label'] + if anomaly_label is not None and is_anomalous: + anomalous_timestamps.append(timestamp) + + start = anomalous_timestamps[0] + sampling_interval = pd.Timedelta(minutes=60) + consecutive_timestamp_n = 0 + for timestamp in anomalous_timestamps: + are_consecutive = (timestamp - start) == sampling_interval + if are_consecutive: + consecutive_timestamp_n += 1 + start = timestamp + + detected_anomalies = len(anomalous_timestamps) - consecutive_timestamp_n + evaluator = Evaluator(test_data = evaluation_dataset) + evaluation_results = evaluator.evaluate(models={'nhar':model},granularity='point',strategy='events',breakdown=False) + self.assertEqual(evaluation_results['nhar']['true_positives_count'],detected_anomalies) + self.assertEqual(evaluation_results['nhar']['true_positives_rate'],detected_anomalies/anomalous_events_n) From f1e5b13f5713b7368721a6c3b4489f3093646ade Mon Sep 17 00:00:00 2001 From: Agata Benvegna Date: Wed, 24 Dec 2025 23:08:50 +0100 Subject: [PATCH 19/28] Change variable names anomalous_events_n ---> events_n events_by_type_n ---> event_type_counts --- ats/evaluators.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/ats/evaluators.py b/ats/evaluators.py index c31cf59..40423ee 100644 --- a/ats/evaluators.py +++ b/ats/evaluators.py @@ -426,21 +426,21 @@ def _series_eval_with_events_strategy(sample_df,anomaly_labels_df,breakdown=Fals raise NotImplementedError('Evaluation with events strategy and series granularity not implemented') def _count_anomalous_events(anomaly_labels_df): - anomalous_events_n = 0 - events_by_type_n = {} + events_n = 0 + event_type_counts = {} previous_anomaly_label = None for timestamp in anomaly_labels_df.index: anomaly_label = anomaly_labels_df.loc[timestamp] if anomaly_label is not None: if anomaly_label != previous_anomaly_label: - anomalous_events_n += 1 + events_n += 1 key = anomaly_label - if key in events_by_type_n.keys(): - events_by_type_n[key] +=1 + if key in event_type_counts.keys(): + event_type_counts[key] +=1 else: - events_by_type_n[key] =1 + event_type_counts[key] =1 previous_timestamp = timestamp previous_anomaly_label = anomaly_labels_df.loc[previous_timestamp] - return anomalous_events_n, events_by_type_n + return events_n , event_type_counts From 3d0d313cc15acba1847ad01309f187638f3baa91 Mon Sep 17 00:00:00 2001 From: Agata Benvegna Date: Wed, 24 Dec 2025 23:24:17 +0100 Subject: [PATCH 20/28] Add new returned value to "_count_anomalous_events()" "event_time_slots" is a dictionary in which the key is the anomaly type and the value contains timeslots of the anomaly (a list of start and stop timestamps) --- ats/evaluators.py | 13 +++++++------ ats/tests/test_evaluators.py | 16 ++++++++-------- 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/ats/evaluators.py b/ats/evaluators.py index 40423ee..e50923c 100644 --- a/ats/evaluators.py +++ b/ats/evaluators.py @@ -376,7 +376,7 @@ def _variable_eval_with_events_strategy(sample_df,anomaly_labels_df,breakdown=Fa def _point_eval_with_events_strategy(flagged_timeseries_df,anomaly_labels_df,breakdown=False): detected_events_n = 0 false_positives_n = 0 - inserted_events_n,inserted_events_by_type = _count_anomalous_events(anomaly_labels_df) + events_n, event_type_counts, event_time_slots = _count_anomalous_events(anomaly_labels_df) evaluation_result = {} breakdown_info = {} @@ -403,17 +403,17 @@ def _point_eval_with_events_strategy(flagged_timeseries_df,anomaly_labels_df,bre previous_point_info = point_info evaluation_result['true_positives_count'] = detected_events_n - if inserted_events_n: - evaluation_result['true_positives_rate'] = detected_events_n/inserted_events_n + if events_n: + evaluation_result['true_positives_rate'] = detected_events_n/events_n else: evaluation_result['true_positives_rate'] = None evaluation_result['false_positives_count'] = false_positives_n evaluation_result['false_positives_ratio'] = false_positives_n/len(anomaly_labels_df) - for event in inserted_events_by_type.keys(): + for event in event_type_counts.keys(): breakdown_key = event + '_true_positives_count' if breakdown_key in breakdown_info.keys(): - breakdown_info[event + '_true_positives_rate'] = breakdown_info[breakdown_key]/inserted_events_by_type[event] + breakdown_info[event + '_true_positives_rate'] = breakdown_info[breakdown_key]/ event_type_counts[event] else: breakdown_info[breakdown_key] = 0 breakdown_info[event + '_true_positives_rate'] = 0 @@ -428,6 +428,7 @@ def _series_eval_with_events_strategy(sample_df,anomaly_labels_df,breakdown=Fals def _count_anomalous_events(anomaly_labels_df): events_n = 0 event_type_counts = {} + event_time_slots = {} previous_anomaly_label = None for timestamp in anomaly_labels_df.index: anomaly_label = anomaly_labels_df.loc[timestamp] @@ -443,4 +444,4 @@ def _count_anomalous_events(anomaly_labels_df): previous_timestamp = timestamp previous_anomaly_label = anomaly_labels_df.loc[previous_timestamp] - return events_n , event_type_counts + return events_n , event_type_counts, event_time_slots diff --git a/ats/tests/test_evaluators.py b/ats/tests/test_evaluators.py index 786d336..a1cf543 100644 --- a/ats/tests/test_evaluators.py +++ b/ats/tests/test_evaluators.py @@ -660,7 +660,7 @@ def test_evaluate_with_autofit_model(self): def test_count_anomalous_events(self): humi_temp_generator = HumiTempTimeseriesGenerator() timeseries_df = humi_temp_generator.generate(include_effect_label=False, anomalies=['step_uv']) - anomalous_events,events_by_type = _count_anomalous_events(timeseries_df.loc[:,'anomaly_label']) + anomalous_events,events_by_type, event_time_slots = _count_anomalous_events(timeseries_df.loc[:,'anomaly_label']) self.assertEqual(anomalous_events,1) self.assertIsInstance(events_by_type,dict) self.assertEqual(events_by_type['step_uv'],1) @@ -668,7 +668,7 @@ def test_count_anomalous_events(self): def test_count_anomalous_events_with_point_anomaly(self): humi_temp_generator = HumiTempTimeseriesGenerator() timeseries_df = humi_temp_generator.generate(include_effect_label=False, anomalies=['spike_uv']) - anomalous_events,events_by_type = _count_anomalous_events(timeseries_df.loc[:,'anomaly_label']) + anomalous_events,events_by_type, event_time_slots = _count_anomalous_events(timeseries_df.loc[:,'anomaly_label']) self.assertEqual(anomalous_events,1) self.assertEqual(events_by_type['spike_uv'],1) @@ -742,7 +742,7 @@ def test_correct_counting_false_positives_with_events_strategy(self): evaluator = Evaluator(test_data = evaluation_dataset) series = evaluation_dataset[0] - anomalous_events_n, events_by_type_n = _count_anomalous_events(series.loc[:,'anomaly_label']) + anomalous_events_n, events_by_type_n, event_time_slots= _count_anomalous_events(series.loc[:,'anomaly_label']) evaluation_results = evaluator.evaluate(models=models,granularity='point',strategy='events',breakdown=False) for model in evaluation_results.keys(): @@ -769,7 +769,7 @@ def test_count_anomalous_events_on_synth_dataset(self): # 1 step # 0 pattern # 0 spike - anomalous_events_n, events_by_type_n = _count_anomalous_events(series_1.loc[:,'anomaly_label']) + anomalous_events_n, events_by_type_n ,event_time_slots= _count_anomalous_events(series_1.loc[:,'anomaly_label']) self.assertIn('step_mv',events_by_type_n.keys()) self.assertEqual(events_by_type_n['step_mv'],1) self.assertEqual(anomalous_events_n,1) @@ -781,7 +781,7 @@ def test_count_anomalous_events_on_synth_dataset(self): # 1 step # 0 pattern # 0 spike - anomalous_events_n_2, events_by_type_n_2 = _count_anomalous_events(series_2.loc[:,'anomaly_label']) + anomalous_events_n_2, events_by_type_n_2,event_time_slots_2 = _count_anomalous_events(series_2.loc[:,'anomaly_label']) self.assertIn('step_mv',events_by_type_n_2.keys()) self.assertEqual(events_by_type_n_2['step_mv'],1) self.assertEqual(anomalous_events_n_2,1) @@ -793,7 +793,7 @@ def test_count_anomalous_events_on_synth_dataset(self): # 1 step # 1 pattern # 1 spike - anomalous_events_n_3, events_by_type_n_3 = _count_anomalous_events(series_3.loc[:,'anomaly_label']) + anomalous_events_n_3, events_by_type_n_3 , event_time_slots_3= _count_anomalous_events(series_3.loc[:,'anomaly_label']) self.assertIn('step_mv',events_by_type_n_3.keys()) self.assertIn('pattern_mv',events_by_type_n_3.keys()) self.assertEqual(events_by_type_n_3['step_mv'],1) @@ -814,7 +814,7 @@ def test_event_eval_on_p_avg(self): # 1 step # 0 pattern # 0 spike - anomalous_events_n, events_by_type_n = _count_anomalous_events(series.loc[:,'anomaly_label']) + anomalous_events_n, events_by_type_n ,event_time_slots= _count_anomalous_events(series.loc[:,'anomaly_label']) self.assertIn('step_mv',events_by_type_n.keys()) self.assertEqual(events_by_type_n['step_mv'],1) self.assertEqual(anomalous_events_n,1) @@ -858,7 +858,7 @@ def test_event_eval_on_nhar(self): # 1 step # 0 pattern # 0 spike - anomalous_events_n, events_by_type_n = _count_anomalous_events(series.loc[:,'anomaly_label']) + anomalous_events_n, events_by_type_n , event_time_slots= _count_anomalous_events(series.loc[:,'anomaly_label']) self.assertIn('step_mv',events_by_type_n.keys()) self.assertEqual(events_by_type_n['step_mv'],1) self.assertEqual(anomalous_events_n,1) From 0a91de7a0c12fc59d44b145e4eadd9f789898621 Mon Sep 17 00:00:00 2001 From: Agata Benvegna Date: Thu, 25 Dec 2025 00:14:54 +0100 Subject: [PATCH 21/28] Build the dictionary "event_time_slots" and add test --- ats/evaluators.py | 13 +++++++++---- ats/tests/test_evaluators.py | 21 ++++++++++++++++++++- 2 files changed, 29 insertions(+), 5 deletions(-) diff --git a/ats/evaluators.py b/ats/evaluators.py index e50923c..b549e6e 100644 --- a/ats/evaluators.py +++ b/ats/evaluators.py @@ -432,15 +432,20 @@ def _count_anomalous_events(anomaly_labels_df): previous_anomaly_label = None for timestamp in anomaly_labels_df.index: anomaly_label = anomaly_labels_df.loc[timestamp] - if anomaly_label is not None: - if anomaly_label != previous_anomaly_label: + if anomaly_label != previous_anomaly_label: + if anomaly_label is not None: events_n += 1 - key = anomaly_label - if key in event_type_counts.keys(): + if anomaly_label in event_type_counts.keys(): event_type_counts[key] +=1 + event_time_slots[key].append(timestamp) else: event_type_counts[key] =1 + event_time_slots[key] = [] + event_time_slots[key].append(timestamp) + + else: + event_time_slots[key].append(previous_timestamp) previous_timestamp = timestamp previous_anomaly_label = anomaly_labels_df.loc[previous_timestamp] diff --git a/ats/tests/test_evaluators.py b/ats/tests/test_evaluators.py index a1cf543..a328144 100644 --- a/ats/tests/test_evaluators.py +++ b/ats/tests/test_evaluators.py @@ -664,6 +664,11 @@ def test_count_anomalous_events(self): self.assertEqual(anomalous_events,1) self.assertIsInstance(events_by_type,dict) self.assertEqual(events_by_type['step_uv'],1) + '''for timestamp in timeseries_df.index: + print(f"{timestamp} {timeseries_df.loc[timestamp,'anomaly_label']}")''' + # 1973-05-25 01:17:00+00:00 - 1973-06-01 02:02:00+00:00 + self.assertEqual(event_time_slots['step_uv'][0],pd.Timestamp('1973-05-25 01:17:00+00:00')) + self.assertEqual(event_time_slots['step_uv'][1],pd.Timestamp('1973-06-01 02:02:00+00:00')) def test_count_anomalous_events_with_point_anomaly(self): humi_temp_generator = HumiTempTimeseriesGenerator() @@ -671,6 +676,11 @@ def test_count_anomalous_events_with_point_anomaly(self): anomalous_events,events_by_type, event_time_slots = _count_anomalous_events(timeseries_df.loc[:,'anomaly_label']) self.assertEqual(anomalous_events,1) self.assertEqual(events_by_type['spike_uv'],1) + '''for timestamp in timeseries_df.index: + print(f"{timestamp} {timeseries_df.loc[timestamp,'anomaly_label']}")''' + # 1973-05-02 15:47:00+00:00 - 1973-05-02 15:47:00+00:00 + self.assertEqual(event_time_slots['spike_uv'][0],pd.Timestamp('1973-05-02 15:47:00+00:00')) + self.assertEqual(event_time_slots['spike_uv'][1],pd.Timestamp('1973-05-02 15:47:00+00:00')) def test_point_eval_with_events_strategy(self): # model output @@ -788,7 +798,7 @@ def test_count_anomalous_events_on_synth_dataset(self): series_3 = evaluation_dataset[2] '''for timestamp in series_3.index: - print(series_3.loc[timestamp,'anomaly_label'])''' + print(f"{timestamp} {series_3.loc[timestamp,'anomaly_label']}")''' # series_3 # 1 step # 1 pattern @@ -800,6 +810,15 @@ def test_count_anomalous_events_on_synth_dataset(self): self.assertEqual(events_by_type_n_3['pattern_mv'],1) self.assertEqual(events_by_type_n_3['spike_mv'],1) self.assertEqual(anomalous_events_n_3,3) + # spike : 1976-01-02 14:50:00+00:00 - 1976-01-02 14:50:00+00:00 + self.assertEqual(event_time_slots_3['spike_mv'][0],pd.Timestamp('1976-01-02 14:50:00+00:00')) + self.assertEqual(event_time_slots_3['spike_mv'][1],pd.Timestamp('1976-01-02 14:50:00+00:00')) + # step : 1975-12-25 16:50:00+00:00 - 1976-01-02 03:50:00+00:00 + self.assertEqual(event_time_slots_3['step_mv'][0],pd.Timestamp('1975-12-25 16:50:00+00:00')) + self.assertEqual(event_time_slots_3['step_mv'][1],pd.Timestamp('1976-01-02 03:50:00+00:00')) + # pattern : 1975-11-13 04:50:00+00:00 - 1975-11-23 08:50:00+00:00 + self.assertEqual(event_time_slots_3['pattern_mv'][0],pd.Timestamp('1975-11-13 04:50:00+00:00')) + self.assertEqual(event_time_slots_3['pattern_mv'][1],pd.Timestamp('1975-11-23 08:50:00+00:00')) def test_event_eval_on_p_avg(self): anomalies = ['step_mv'] From f9814474441ed05e56f32700277f8360fc66c6cb Mon Sep 17 00:00:00 2001 From: Agata Benvegna Date: Tue, 30 Dec 2025 15:56:52 +0100 Subject: [PATCH 22/28] Change the counting strategy of true positives --- ats/evaluators.py | 40 +++++++++++++++++++--------------------- 1 file changed, 19 insertions(+), 21 deletions(-) diff --git a/ats/evaluators.py b/ats/evaluators.py index b549e6e..2787ad0 100644 --- a/ats/evaluators.py +++ b/ats/evaluators.py @@ -380,43 +380,41 @@ def _point_eval_with_events_strategy(flagged_timeseries_df,anomaly_labels_df,bre evaluation_result = {} breakdown_info = {} + flags_df = flagged_timeseries_df.filter(like='anomaly') previous_point_info = {'label': 0} for timestamp in flagged_timeseries_df.index: anomaly_label = anomaly_labels_df.loc[timestamp] - flags_df = flagged_timeseries_df.filter(like='anomaly') # True if there is at least 1 variable detected as anomalous is_anomalous = flags_df.loc[timestamp].any() point_info = {anomaly_label: is_anomalous} if point_info != previous_point_info and is_anomalous: - if anomaly_label is not None: - detected_events_n += 1 - - breakdown_key = anomaly_label + '_true_positives_count' - if breakdown_key in breakdown_info.keys(): - breakdown_info[breakdown_key] += 1 - else: - breakdown_info[breakdown_key] = 1 - - else: + if anomaly_label is None: false_positives_n += 1 - previous_point_info = point_info + evaluation_result['false_positives_count'] = false_positives_n + evaluation_result['false_positives_ratio'] = false_positives_n/len(anomaly_labels_df) + + for anomaly, anomaly_time_slots in event_time_slots.items(): + anomaly_n = len(anomaly_time_slots)//2 + events_n += anomaly_n + detected_anomaly_n = 0 + for i in range(0,len(anomaly_time_slots),2): + start = anomaly_time_slots[i] + stop = anomaly_time_slots[i+1] + is_detected = flags_df.loc[start:stop].any().any() + if is_detected: + detected_anomaly_n +=1 + breakdown_info[anomaly + 'true_positives_count'] = detected_anomaly_n + breakdown_info[anomaly + 'true_positives_rate'] = detected_anomaly_n/anomaly_n + detected_events_n += detected_anomaly_n + evaluation_result['true_positives_count'] = detected_events_n if events_n: evaluation_result['true_positives_rate'] = detected_events_n/events_n else: evaluation_result['true_positives_rate'] = None - evaluation_result['false_positives_count'] = false_positives_n - evaluation_result['false_positives_ratio'] = false_positives_n/len(anomaly_labels_df) - for event in event_type_counts.keys(): - breakdown_key = event + '_true_positives_count' - if breakdown_key in breakdown_info.keys(): - breakdown_info[event + '_true_positives_rate'] = breakdown_info[breakdown_key]/ event_type_counts[event] - else: - breakdown_info[breakdown_key] = 0 - breakdown_info[event + '_true_positives_rate'] = 0 if breakdown: return evaluation_result | breakdown_info else: From eb6654c0c3c06b6f478ec3e45d43b58386da2757 Mon Sep 17 00:00:00 2001 From: Agata Benvegna Date: Tue, 30 Dec 2025 16:35:56 +0100 Subject: [PATCH 23/28] Fix error in case of series ending with an anomaly --- ats/evaluators.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ats/evaluators.py b/ats/evaluators.py index 2787ad0..5c5f7e6 100644 --- a/ats/evaluators.py +++ b/ats/evaluators.py @@ -446,5 +446,8 @@ def _count_anomalous_events(anomaly_labels_df): event_time_slots[key].append(previous_timestamp) previous_timestamp = timestamp - previous_anomaly_label = anomaly_labels_df.loc[previous_timestamp] + previous_anomaly_label = anomaly_label + # To manage series ending with an anomaly + if previous_anomaly_label is not None: + event_time_slots[key].append(previous_timestamp) return events_n , event_type_counts, event_time_slots From aefe5c1536d0c18ff3b8b30bbd91fa8af40f57ec Mon Sep 17 00:00:00 2001 From: Agata Benvegna Date: Tue, 30 Dec 2025 16:52:48 +0100 Subject: [PATCH 24/28] Manage series with adjoining anomalies --- ats/evaluators.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/ats/evaluators.py b/ats/evaluators.py index 5c5f7e6..d823031 100644 --- a/ats/evaluators.py +++ b/ats/evaluators.py @@ -433,14 +433,16 @@ def _count_anomalous_events(anomaly_labels_df): if anomaly_label != previous_anomaly_label: if anomaly_label is not None: events_n += 1 + # To manage series with adjoining anomalies + if previous_anomaly_label is not None: + event_time_slots[key].append(previous_timestamp) key = anomaly_label if anomaly_label in event_type_counts.keys(): event_type_counts[key] +=1 event_time_slots[key].append(timestamp) else: event_type_counts[key] =1 - event_time_slots[key] = [] - event_time_slots[key].append(timestamp) + event_time_slots[key] = [timestamp] else: event_time_slots[key].append(previous_timestamp) From b0f0490b244efa0cf1b7f6fed7afc95973fff6bb Mon Sep 17 00:00:00 2001 From: Agata Benvegna Date: Fri, 2 Jan 2026 11:58:47 +0100 Subject: [PATCH 25/28] Fix error in making the evaluation result dict --- ats/evaluators.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ats/evaluators.py b/ats/evaluators.py index d823031..c73b2dd 100644 --- a/ats/evaluators.py +++ b/ats/evaluators.py @@ -405,8 +405,8 @@ def _point_eval_with_events_strategy(flagged_timeseries_df,anomaly_labels_df,bre is_detected = flags_df.loc[start:stop].any().any() if is_detected: detected_anomaly_n +=1 - breakdown_info[anomaly + 'true_positives_count'] = detected_anomaly_n - breakdown_info[anomaly + 'true_positives_rate'] = detected_anomaly_n/anomaly_n + breakdown_info[anomaly + '_true_positives_count'] = detected_anomaly_n + breakdown_info[anomaly + '_true_positives_rate'] = detected_anomaly_n/anomaly_n detected_events_n += detected_anomaly_n evaluation_result['true_positives_count'] = detected_events_n From 1886ba62065033ed8d025bde683d1025ced594f7 Mon Sep 17 00:00:00 2001 From: Agata Benvegna Date: Fri, 2 Jan 2026 12:43:31 +0100 Subject: [PATCH 26/28] Fix error in counting inserted anomalies --- ats/evaluators.py | 7 ++-- ats/tests/test_evaluators.py | 74 +++++++++++++++--------------------- 2 files changed, 34 insertions(+), 47 deletions(-) diff --git a/ats/evaluators.py b/ats/evaluators.py index c73b2dd..25d9f5e 100644 --- a/ats/evaluators.py +++ b/ats/evaluators.py @@ -376,6 +376,7 @@ def _variable_eval_with_events_strategy(sample_df,anomaly_labels_df,breakdown=Fa def _point_eval_with_events_strategy(flagged_timeseries_df,anomaly_labels_df,breakdown=False): detected_events_n = 0 false_positives_n = 0 + total_events_n = 0 events_n, event_type_counts, event_time_slots = _count_anomalous_events(anomaly_labels_df) evaluation_result = {} breakdown_info = {} @@ -396,8 +397,8 @@ def _point_eval_with_events_strategy(flagged_timeseries_df,anomaly_labels_df,bre evaluation_result['false_positives_ratio'] = false_positives_n/len(anomaly_labels_df) for anomaly, anomaly_time_slots in event_time_slots.items(): - anomaly_n = len(anomaly_time_slots)//2 - events_n += anomaly_n + anomaly_n = int(len(anomaly_time_slots))/2 + total_events_n += anomaly_n detected_anomaly_n = 0 for i in range(0,len(anomaly_time_slots),2): start = anomaly_time_slots[i] @@ -411,7 +412,7 @@ def _point_eval_with_events_strategy(flagged_timeseries_df,anomaly_labels_df,bre evaluation_result['true_positives_count'] = detected_events_n if events_n: - evaluation_result['true_positives_rate'] = detected_events_n/events_n + evaluation_result['true_positives_rate'] = detected_events_n/total_events_n else: evaluation_result['true_positives_rate'] = None diff --git a/ats/tests/test_evaluators.py b/ats/tests/test_evaluators.py index a328144..3444d42 100644 --- a/ats/tests/test_evaluators.py +++ b/ats/tests/test_evaluators.py @@ -841,28 +841,12 @@ def test_event_eval_on_p_avg(self): model = PeriodicAverageAnomalyDetector() new_series = series.drop(columns=['anomaly_label'],inplace=False) p_avg_output = model.apply(new_series) - - anomalous_timestamps = [] - for timestamp in p_avg_output.index: - is_anomalous = p_avg_output.filter(like='anomaly').loc[timestamp].any() - anomaly_label = series.loc[timestamp,'anomaly_label'] - if anomaly_label is not None and is_anomalous: - anomalous_timestamps.append(timestamp) - - start = anomalous_timestamps[0] - sampling_interval = pd.Timedelta(minutes=60) - consecutive_timestamp_n = 0 - for timestamp in anomalous_timestamps: - are_consecutive = (timestamp - start) == sampling_interval - if are_consecutive: - consecutive_timestamp_n += 1 - start = timestamp - - detected_anomalies = len(anomalous_timestamps) - consecutive_timestamp_n - evaluator = Evaluator(test_data = evaluation_dataset) + '''for timestamp in p_avg_output.index: + print(f"{p_avg_output.loc[timestamp,'anomaly']} {series.loc[timestamp,'anomaly_label']}")''' + evaluator = Evaluator(evaluation_dataset) evaluation_results = evaluator.evaluate(models={'p_avg':model},granularity='point',strategy='events',breakdown=False) - self.assertEqual(evaluation_results['p_avg']['true_positives_count'],detected_anomalies) - self.assertEqual(evaluation_results['p_avg']['true_positives_rate'],detected_anomalies/anomalous_events_n) + self.assertEqual(evaluation_results['p_avg']['true_positives_count'],1) + self.assertEqual(evaluation_results['p_avg']['true_positives_rate'],1) def test_event_eval_on_nhar(self): anomalies = ['step_mv'] @@ -882,28 +866,30 @@ def test_event_eval_on_nhar(self): self.assertEqual(events_by_type_n['step_mv'],1) self.assertEqual(anomalous_events_n,1) - model = PeriodicAverageAnomalyDetector() + model = NHARAnomalyDetector() new_series = series.drop(columns=['anomaly_label'],inplace=False) - p_avg_output = model.apply(new_series) - - anomalous_timestamps = [] - for timestamp in p_avg_output.index: - is_anomalous = p_avg_output.filter(like='anomaly').loc[timestamp].any() - anomaly_label = series.loc[timestamp,'anomaly_label'] - if anomaly_label is not None and is_anomalous: - anomalous_timestamps.append(timestamp) - - start = anomalous_timestamps[0] - sampling_interval = pd.Timedelta(minutes=60) - consecutive_timestamp_n = 0 - for timestamp in anomalous_timestamps: - are_consecutive = (timestamp - start) == sampling_interval - if are_consecutive: - consecutive_timestamp_n += 1 - start = timestamp - - detected_anomalies = len(anomalous_timestamps) - consecutive_timestamp_n - evaluator = Evaluator(test_data = evaluation_dataset) + nhara_output = model.apply(new_series) + '''for timestamp in nhara_output.index: + print(f"{nhara_output.loc[timestamp,'anomaly']} {series.loc[timestamp,'anomaly_label']}")''' + evaluator = Evaluator(evaluation_dataset) evaluation_results = evaluator.evaluate(models={'nhar':model},granularity='point',strategy='events',breakdown=False) - self.assertEqual(evaluation_results['nhar']['true_positives_count'],detected_anomalies) - self.assertEqual(evaluation_results['nhar']['true_positives_rate'],detected_anomalies/anomalous_events_n) + self.assertEqual(evaluation_results['nhar']['true_positives_count'],1) + self.assertEqual(evaluation_results['nhar']['true_positives_rate'],1/1) + + def test_actual_anomalies_count(self): + # model output + series = generate_timeseries_df(entries=6, variables=1) + series['value_anomaly'] = [0,1,1,1,1,1] + + anomaly_labels = pd.Series([None, 'anomaly_1', 'anomaly_1', None, None,'anomaly_1']) + anomaly_labels.index = series.index + events_n , event_type_counts, event_time_slots = _count_anomalous_events(anomaly_labels) + self.assertEqual(len(event_time_slots['anomaly_1']),4) + self.assertEqual(event_time_slots['anomaly_1'][0],pd.Timestamp('2025-06-10 15:00:00+00:00')) + self.assertEqual(event_time_slots['anomaly_1'][1],pd.Timestamp('2025-06-10 16:00:00+00:00')) + self.assertEqual(event_time_slots['anomaly_1'][2],pd.Timestamp('2025-06-10 19:00:00+00:00')) + self.assertEqual(event_time_slots['anomaly_1'][3],pd.Timestamp('2025-06-10 19:00:00+00:00')) + '''for timestamp in anomaly_labels.index: + print(f'{timestamp}') + for key,value in event_time_slots.items(): + print(f'{key}: {value}')''' From c8b47c8de5ef4d3854d0c1b541cd3d2308c10541 Mon Sep 17 00:00:00 2001 From: Agata Benvegna Date: Fri, 2 Jan 2026 19:21:24 +0100 Subject: [PATCH 27/28] Add test on event evaluation --- ats/tests/test_evaluators.py | 61 ++++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) diff --git a/ats/tests/test_evaluators.py b/ats/tests/test_evaluators.py index 3444d42..8d021e5 100644 --- a/ats/tests/test_evaluators.py +++ b/ats/tests/test_evaluators.py @@ -893,3 +893,64 @@ def test_actual_anomalies_count(self): print(f'{timestamp}') for key,value in event_time_slots.items(): print(f'{key}: {value}')''' + + def test_event_eval_on_dataset(self): + generator = HumiTempDatasetGenerator(sampling_interval='2h') + evaluation_dataset = generator.generate( + n_series = 2, + effects = [], + anomalies = ['spike_mv'], + time_span = '5D', + max_anomalies_per_series = 3, + anomalies_ratio = 1.0, + auto_repeat_anomalies=True, + ) + series_1 = evaluation_dataset[0] + '''for timestamp in series_1.index: + print(f"{series_1.loc[timestamp,'anomaly_label']}")''' + # Anomalies in series_1 + # 1 spike + + series_2 = evaluation_dataset[1] + '''for timestamp in series_2.index: + print(f"{series_2.loc[timestamp,'anomaly_label']}")''' + # Anomalies in series_2 + # 1 spike + anomaly_detectors = {} + anomaly_detectors['minmax'] = MinMaxAnomalyDetector() + anomaly_detectors['nhar'] = NHARAnomalyDetector() + evaluator = Evaluator(evaluation_dataset) + evaluation = evaluator.evaluate(anomaly_detectors, + granularity='point', + strategy='events', + breakdown=True) + + new_series_1 = series_1.drop(columns=['anomaly_label'],inplace=False) + new_series_2 = series_2.drop(columns=['anomaly_label'],inplace=False) + new_dataset = [new_series_1,new_series_2] + + minmax_output = _get_model_output(new_dataset,anomaly_detectors['minmax']) + '''for timestamp in minmax_output[0].index: + print(f"{series_1.loc[timestamp,'anomaly_label']} {minmax_output[0].loc[timestamp,'temperature_anomaly']} {minmax_output[0].loc[timestamp,'humidity_anomaly']}")''' + '''for timestamp in minmax_output[1].index: + print(f"{series_2.loc[timestamp,'anomaly_label']} {minmax_output[1].loc[timestamp,'temperature_anomaly']} {minmax_output[1].loc[timestamp,'humidity_anomaly']}")''' + + self.assertEqual(evaluation['minmax']['true_positives_count'],2) + self.assertEqual(evaluation['minmax']['true_positives_rate'],1) + self.assertEqual(evaluation['minmax']['false_positives_count'],2) + self.assertEqual(evaluation['minmax']['false_positives_ratio'],1/60) + self.assertEqual(evaluation['minmax']['spike_mv_true_positives_count'],2) + self.assertEqual(evaluation['minmax']['spike_mv_true_positives_rate'],1) + + nhar_output = _get_model_output(new_dataset,anomaly_detectors['nhar']) + '''for timestamp in nhar_output[0].index: + print(f"{series_1.loc[timestamp,'anomaly_label']} {nhar_output[0].loc[timestamp,'anomaly']} {timestamp}") + for timestamp in nhar_output[1].index: + print(f"{series_2.loc[timestamp,'anomaly_label']} {nhar_output[1].loc[timestamp,'anomaly']} {timestamp}")''' + + self.assertEqual(evaluation['nhar']['true_positives_count'],2) + self.assertAlmostEqual(evaluation['nhar']['true_positives_rate'],1) + #self.assertEqual(evaluation['nhar']['false_positives_count'],5) + #self.assertAlmostEqual(evaluation['nhar']['false_positives_ratio'],5/120) + self.assertEqual(evaluation['nhar']['spike_mv_true_positives_count'],2) + self.assertAlmostEqual(evaluation['nhar']['spike_mv_true_positives_rate'],1) From 49286157234824100f5e601152a9b38716e697d6 Mon Sep 17 00:00:00 2001 From: Agata Benvegna Date: Tue, 6 Jan 2026 16:14:28 +0100 Subject: [PATCH 28/28] Use function "ensure_full_reproducibility()" to fix the seed --- ats/tests/test_evaluators.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/ats/tests/test_evaluators.py b/ats/tests/test_evaluators.py index 8d021e5..ca5c80d 100644 --- a/ats/tests/test_evaluators.py +++ b/ats/tests/test_evaluators.py @@ -15,6 +15,7 @@ from ..evaluators import _count_anomalous_events from ..evaluators import _point_eval_with_events_strategy from ats.dataset_generators import HumiTempDatasetGenerator +from ..utils import ensure_full_reproducibility import unittest import pandas as pd @@ -29,9 +30,7 @@ class TestEvaluators(unittest.TestCase): def setUp(self): - - rnd.seed(123) - np.random.seed(123) + ensure_full_reproducibility(123) self.series1 = generate_timeseries_df(entries=5, variables=2) self.series1['anomaly_label'] = [None, 'anomaly_2', 'anomaly_1', None, 'anomaly_1'] @@ -950,7 +949,7 @@ def test_event_eval_on_dataset(self): self.assertEqual(evaluation['nhar']['true_positives_count'],2) self.assertAlmostEqual(evaluation['nhar']['true_positives_rate'],1) - #self.assertEqual(evaluation['nhar']['false_positives_count'],5) - #self.assertAlmostEqual(evaluation['nhar']['false_positives_ratio'],5/120) + self.assertEqual(evaluation['nhar']['false_positives_count'],6) + self.assertAlmostEqual(evaluation['nhar']['false_positives_ratio'],6/120) self.assertEqual(evaluation['nhar']['spike_mv_true_positives_count'],2) self.assertAlmostEqual(evaluation['nhar']['spike_mv_true_positives_rate'],1)