Skip to content

Commit

Permalink
added fixed ipc and apc, detectron comparaison and config files
Browse files Browse the repository at this point in the history
  • Loading branch information
lyna1404 committed Jul 22, 2024
1 parent b328161 commit 14b5375
Show file tree
Hide file tree
Showing 227 changed files with 740,068 additions and 34,680 deletions.
103 changes: 103 additions & 0 deletions MED3pa/detectron/comparaison.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
"""
Compares between two ``DetectronExperiment``.
"""
import json
import os
from typing import Any, Dict, List, Tuple, Type, Union

import numpy as np
from sklearn.model_selection import train_test_split

class DetectronComparison:
"""
Class to compare the results of two Med3paExperiment instances.
"""
def __init__(self, results1_path: str, results2_path: str) -> None:
self.results1_path = os.path.abspath(results1_path)
self.results2_path = os.path.abspath(results2_path)
self.detectron_results_comparaison = {}
self.config_file = {}
self._check_experiment_name()

def _check_experiment_name(self) -> None:
"""
Checks if the experiment_name in the config_file of both results paths is the same.
If not, raises a ValueError. Also sets the flag for Detectron comparison if applicable.
"""
config_file_1 = os.path.join(self.results1_path, 'experiment_config.json')
config_file_2 = os.path.join(self.results2_path, 'experiment_config.json')

with open(config_file_1, 'r') as f1, open(config_file_2, 'r') as f2:
config1 = json.load(f1)
config2 = json.load(f2)

if config1['experiment_name'] != "DetectronExperiment":
raise ValueError("Only DetectronExperiment can be compared using this class")

if config1['experiment_name'] != config2['experiment_name']:
raise ValueError("The two results are not from the same experiment.")

def compare_detectron_results(self):
"""
Compares profile metrics between two sets of results and stores them in a dictionary.
"""
combined = {}
file_1 = os.path.join(self.results1_path, 'detectron_results.json')
file_2 = os.path.join(self.results2_path, 'detectron_results.json')

with open(file_1, 'r') as f1, open(file_2, 'r') as f2:
detectron1 = json.load(f1)
detectron2 = json.load(f2)

combined['detectron_results1'] = detectron1
combined['detectron_results2'] = detectron2

self.detectron_results_comparaison = combined

def compare_config(self):
"""
Compares the config files of the two experiments.
"""
combined = {}
config_file_1 = os.path.join(self.results1_path, 'experiment_config.json')
config_file_2 = os.path.join(self.results2_path, 'experiment_config.json')

with open(config_file_1, 'r') as f1, open(config_file_2, 'r') as f2:
config1 = json.load(f1)
config2 = json.load(f2)

combined['datasets1'] = config1["datasets"]
combined['datasets2'] = config2["datasets"]

combined['base_model1'] = config1["base_model"]
combined['base_model2'] = config2["base_model"]

combined['experiment_params1'] = config1["experiment_params"]
combined['experiment_params2'] = config2["experiment_params"]

self.config_file = combined

def compare_experiments(self):
"""
Compares the experiments by detectron_results.
"""
self.compare_detectron_results()
self.compare_config()

def save(self, directory_path: str) -> None:
"""
Saves the comparison results to a specified directory.
Args:
directory_path (str): The directory where the comparison results will be saved.
"""
# Ensure the main directory exists
os.makedirs(directory_path, exist_ok=True)

global_comparaison_path = os.path.join(directory_path, 'detectron_results_comparaison.json')
with open(global_comparaison_path, 'w') as f:
json.dump(self.detectron_results_comparaison, f, indent=4)

config_path = os.path.join(directory_path, 'experiment_config_comparaison.json')
with open(config_path, 'w') as f:
json.dump(self.config_file, f, indent=4)
14 changes: 9 additions & 5 deletions MED3pa/detectron/experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ def analyze_results(self, strategies: Union[str, List[str]]= ["enhanced_disagree
if isinstance(strategies, str):
strategies = [strategies] # Convert single strategy name to list

self.experiment_config['test_strategies'] = strategies
self.experiment_config['experiment_params']['test_strategies'] = strategies

for strategy_name in strategies:
if strategy_name not in self.strategy_mapping:
Expand Down Expand Up @@ -227,10 +227,7 @@ def run(datasets: DatasetsManager,

# save the detectron runs results
detectron_results = DetectronResult(cal_record, test_record)
experiment_config = {
'experiment_name': "DetectronExperiment",
'datasets':datasets.get_info(),
'base_model': base_model_manager.get_instance().get_info(),
detectron_params = {
'additional_training_params': training_params,
'samples_size': samples_size,
'cdcs_ensemble_size': ensemble_size,
Expand All @@ -239,6 +236,13 @@ def run(datasets: DatasetsManager,
'allow_margin': allow_margin,
'margin': margin
}
experiment_config = {
'experiment_name': "DetectronExperiment",
'datasets':datasets.get_info(),
'base_model': base_model_manager.get_instance().get_info(),
'experiment_params': detectron_params

}
detectron_results.set_experiment_config(experiment_config)
# return the Detectron results
return detectron_results
Expand Down
68 changes: 68 additions & 0 deletions MED3pa/med3pa/comparaison.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
"""
Compares between two experiments, either two ``Med3paExperiment`` or two ``Med3paDetectronExperiment``
"""
import json
import os
from typing import Any, Dict, List, Tuple, Type, Union
Expand All @@ -20,6 +23,8 @@ def __init__(self, results1_path: str, results2_path: str) -> None:
self.profiles_metrics_comparaison = {}
self.profiles_detectron_comparaison = {}
self.global_metrics_comparaison = {}
self.models_evaluation_comparaison = {}
self.config_file = {}
self.compare_profiles = False
self.compare_detectron = False
self._check_experiment_name()
Expand Down Expand Up @@ -161,6 +166,58 @@ def compare_global_metrics(self):
combined[dr_str]['metrics_dr_2'] = dr2[dr_str]

self.global_metrics_comparaison = combined

def compare_models_evaluation(self):
"""
Compares IPC and APC evaluation between two experiments.
"""
combined = {}
file_1 = os.path.join(self.results1_path, 'test', 'models_evaluation.json')
file_2 = os.path.join(self.results2_path, 'test', 'models_evaluation.json')

with open(file_1, 'r') as f1, open(file_2, 'r') as f2:
models1 = json.load(f1)
models2 = json.load(f2)

if "IPC_evaluation" in models1 and "IPC_evaluation" in models2:
combined['IPC_evaluation1'] = models1["IPC_evaluation"]
combined['IPC_evaluation2'] = models2["IPC_evaluation"]

if "APC_evaluation" in models1 and "APC_evaluation" in models2:
combined['APC_evaluation1'] = models1["APC_evaluation"]
combined['APC_evaluation2'] = models2["APC_evaluation"]


self.models_evaluation_comparaison = combined

def compare_config(self):
"""
Compares the config files of the two experiments.
"""
combined = {}
config_file_1 = os.path.join(self.results1_path, 'experiment_config.json')
config_file_2 = os.path.join(self.results2_path, 'experiment_config.json')

with open(config_file_1, 'r') as f1, open(config_file_2, 'r') as f2:
config1 = json.load(f1)
config2 = json.load(f2)

combined['datasets1'] = config1["datasets"]
combined['datasets2'] = config2["datasets"]

combined['base_model1'] = config1["base_model"]
combined['base_model2'] = config2["base_model"]

combined['apc_model1'] = config1["apc_model"]
combined['apc_model2'] = config2["apc_model"]

combined['ipc_model1'] = config1["ipc_model"]
combined['ipc_model2'] = config2["ipc_model"]

combined['experiment_params1'] = config1["experiment_params"]
combined['experiment_params2'] = config2["experiment_params"]

self.config_file = combined

def compare_experiments(self):
"""
Expand All @@ -172,6 +229,9 @@ def compare_experiments(self):
self.compare_profiles_metrics()
if self.compare_detectron:
self.compare_profiles_detectron_results()

self.compare_config()
self.compare_models_evaluation()

def save(self, directory_path: str) -> None:
"""
Expand All @@ -187,6 +247,14 @@ def save(self, directory_path: str) -> None:
with open(global_comparaison_path, 'w') as f:
json.dump(self.global_metrics_comparaison, f, indent=4)

config_path = os.path.join(directory_path, 'experiment_config_comparaison.json')
with open(config_path, 'w') as f:
json.dump(self.config_file, f, indent=4)

evaluation_path = os.path.join(directory_path, 'models_evaluation_comparaison.json')
with open(evaluation_path, 'w') as f:
json.dump(self.models_evaluation_comparaison, f, indent=4)

if self.profiles_detectron_comparaison is not {} and self.compare_detectron:
profiles_detectron_path = os.path.join(directory_path, 'profiles_detectron_comparaison.json')
with open(profiles_detectron_path, 'w') as f:
Expand Down
Loading

0 comments on commit 14b5375

Please sign in to comment.