Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Store everest results in ERT storage #9161

Open
wants to merge 4 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 11 additions & 20 deletions src/ert/run_models/everest_run_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
import shutil
import threading
import time
from dataclasses import dataclass
from pathlib import Path
from types import TracebackType
from typing import (
Expand All @@ -28,7 +27,6 @@
TypedDict,
)

import seba_sqlite.sqlite_storage
from ropt.enums import EventType, OptimizerExitCode
from ropt.plan import BasicOptimizer, Event
from seba_sqlite import SqliteStorage
Expand All @@ -37,6 +35,7 @@
from ert.ensemble_evaluator import EvaluatorServerConfig
from ert.storage import open_storage
from everest.config import EverestConfig
from everest.everest_storage import EverestStorage, OptimalResult
from everest.optimizer.everest2ropt import everest2ropt
from everest.simulator import Simulator
from everest.simulator.everest_to_ert import everest_to_ert_config
Expand Down Expand Up @@ -291,24 +290,6 @@ class OptimizerCallback(Protocol):
def __call__(self) -> str | None: ...


@dataclass
class OptimalResult:
batch: int
controls: List[Any]
total_objective: float

@staticmethod
def from_seba_optimal_result(
o: Optional[seba_sqlite.sqlite_storage.OptimalResult] = None,
) -> "OptimalResult" | None:
if o is None:
return None

return OptimalResult(
batch=o.batch, controls=o.controls, total_objective=o.total_objective
)


class EverestRunModel(BaseRunModel):
def __init__(
self,
Expand Down Expand Up @@ -420,6 +401,14 @@ def run_experiment(
# Initialize the ropt optimizer:
optimizer = self._create_optimizer(simulator)

self.ever_storage = EverestStorage(
Path(self.everest_config.optimization_output_dir)
/ "dakota"
/ "OPT_DEFAULT.out",
output_dir=Path(self.everest_config.optimization_output_dir),
)
self.ever_storage.observe_optimizer(optimizer)

# The SqliteStorage object is used to store optimization results from
# Seba in an sqlite database. It reacts directly to events emitted by
# Seba and is not called by Everest directly. The stored results are
Expand All @@ -437,6 +426,8 @@ def run_experiment(
self._result = OptimalResult.from_seba_optimal_result(
seba_storage.get_optimal_result() # type: ignore
)
optimal_result_from_everstorage = self.ever_storage.get_optimal_result()
assert self._result == optimal_result_from_everstorage

if self._monitor_thread is not None:
self._monitor_thread.stop()
Expand Down
5 changes: 5 additions & 0 deletions src/ert/storage/local_ensemble.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@

from ert.storage.local_experiment import LocalExperiment
from ert.storage.local_storage import LocalStorage
from everest.everest_storage import JoinedBatchDataFrames

Check failure on line 28 in src/ert/storage/local_ensemble.py

View workflow job for this annotation

GitHub Actions / type-checking (3.12)

Module "everest.everest_storage" has no attribute "JoinedBatchDataFrames"; maybe "BatchDataFrames"?

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -512,6 +513,10 @@

return [_find_state(i) for i in range(self.ensemble_size)]

def save_optimization_results(self, data: JoinedBatchDataFrames):

Check failure on line 516 in src/ert/storage/local_ensemble.py

View workflow job for this annotation

GitHub Actions / type-checking (3.12)

Function is missing a return type annotation
os.mkdir(self.mount_point / "optimization_results")
data.write_to_folder(self.mount_point / "optimization_results")

def _load_single_dataset(
self,
group: str,
Expand Down
Loading
Loading