diff --git a/src/everest/everest_storage.py b/src/everest/everest_storage.py index 07b4548cba3..0ba023cacdb 100644 --- a/src/everest/everest_storage.py +++ b/src/everest/everest_storage.py @@ -20,6 +20,7 @@ from ropt.plan import BasicOptimizer, Event from ropt.results import FunctionResults, GradientResults, convert_to_maximize from seba_sqlite import CalculationResult +from seba_sqlite.sqlite_storage import OptimalResult from everest.simulator import Simulator @@ -507,6 +508,44 @@ def get_optimal_result(self): # then return it # print("yo") + has_merit = "merit_value" in self._dataframes.batches[0].objective.columns + + if has_merit: + # Minimize merit + sorted_batches = list(self._dataframes.batches) + sorted_batches.sort( + key=lambda b: b.objective.select(polars.col("merit_value").min()).item() + ) + batch_with_min_merit = sorted_batches[0] + print(batch_with_min_merit) + + else: + # Maximize objective + sorted_batches = list(self._dataframes.batches) + sorted_batches.sort( + key=lambda b: -b.objective.select( + polars.col("weighted_objective_value").sample(n=1) + ).item() + ) + batch = sorted_batches[0] + + return OptimalResult( + batch=batch.batch_id, + controls={ + k: batch.objective[k].sample(1).item() + for k in self._dataframes.initial_values["control_name"] + }, + total_objective=batch.objective.select( + polars.col("weighted_objective_value").sample(n=1) + ).item(), + expected_objectives={ + d["objective_name"]: d["objective_value"] + for d in batch.objective[ + ["objective_name", "objective_value"] + ].to_dicts() + }, + ) + snapshot = {} # SebaSnapshot(self._output_dir) optimum = next( (