Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion docs/source/how-to-guides/acquire-baseline.rst
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ Here we configure an agent with three DOFs and two objectives. The second object
sensors=[readable1, readable2],
dofs=dofs,
objectives=objectives,
evaluation=evaluation_function,
evaluation_function=evaluation_function,
outcome_constraints=outcome_constraints,
)

Expand Down
2 changes: 1 addition & 1 deletion docs/source/how-to-guides/attach-data-to-experiments.rst
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ The ``DOF`` and ``Objective`` names must match the keys in the data dictionaries
sensors=[readable1, readable2],
dofs=dofs,
objectives=objectives,
evaluation=evaluation_function,
evaluation_function=evaluation_function,
)

Ingest your data
Expand Down
2 changes: 1 addition & 1 deletion docs/source/how-to-guides/custom-generation-strategies.rst
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ Configure an agent
sensors=[readable1, readable2],
dofs=dofs,
objectives=objectives,
evaluation=evaluation_function,
evaluation_function=evaluation_function,
)

Configure a generation strategy
Expand Down
2 changes: 1 addition & 1 deletion docs/source/how-to-guides/set-dof-constraints.rst
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,6 @@ Configure an agent with DOF constraints
sensors=[],
dofs=[dof1, dof2, dof3],
objectives=[objective],
evaluation=evaluation_function,
evaluation_function=evaluation_function,
dof_constraints=[constraint],
)
2 changes: 1 addition & 1 deletion docs/source/how-to-guides/set-outcome-constraints.rst
Original file line number Diff line number Diff line change
Expand Up @@ -138,6 +138,6 @@ Configure an agent with outcome constraints
sensors=[],
dofs=dofs,
objectives=objectives,
evaluation=evaluation_function,
evaluation_function=evaluation_function,
outcome_constraints=[constraint],
)
4 changes: 2 additions & 2 deletions docs/source/how-to-guides/tiled-databroker.rst
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ Configure an agent
sensors=[motor_x],
dofs=[dof1],
objectives=[objective],
evaluation=TiledEvaluation(tiled_client=tiled_client),
evaluation_function=TiledEvaluation(tiled_client=tiled_client),
)
RE(agent.optimize())
server.close()
Expand All @@ -205,6 +205,6 @@ or for Databroker:
sensors=[motor_x],
dofs=[dof1],
objectives=[objective],
evaluation=DatabrokerEvaluation(db=db),
evaluation_function=DatabrokerEvaluation(db=db),
)
RE(agent_db.optimize())
6 changes: 3 additions & 3 deletions docs/source/how-to-guides/use-ophyd-devices.rst
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ The ``name`` attribute of the signal will be used as the name of the :class:`blo
sensors=[some_readable_signal],
dofs=[dof],
objectives=[Objective(name="result", minimize=False)],
evaluation=lambda uid, suggestions: [{"result": 0.1}],
evaluation_function=lambda uid, suggestions: [{"result": 0.1}],
)

Ophyd-async devices
Expand All @@ -48,7 +48,7 @@ Once again, the ``name`` attribute of the signal will be used as the name of the
sensors=[some_readable_signal],
dofs=[dof],
objectives=[Objective(name="result", minimize=False)],
evaluation=lambda uid, suggestions: [{"result": 0.1}],
evaluation_function=lambda uid, suggestions: [{"result": 0.1}],
)

Using your devices in custom acquisition plans
Expand Down Expand Up @@ -83,7 +83,7 @@ If you use a custom acquisition plan by implementing the :class:`blop.protocols.
dofs=[dof],
acquisition_plan=custom_acquire,
objectives=[Objective(name="result", minimize=False)],
evaluation=lambda uid, suggestions: [{"result": 0.1, "_id": 0}],
evaluation_function=lambda uid, suggestions: [{"result": 0.1, "_id": 0}],
)

RE(agent.optimize())
2 changes: 1 addition & 1 deletion docs/source/tutorials/simple-experiment.md
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ agent = Agent(
sensors=sensors,
dofs=dofs,
objectives=objectives,
evaluation=Himmelblau2DEvaluation(tiled_client=tiled_client),
evaluation_function=Himmelblau2DEvaluation(tiled_client=tiled_client),
name="simple-experiment",
description="A simple experiment optimizing the Himmelblau function",
)
Expand Down
2 changes: 1 addition & 1 deletion docs/source/tutorials/xrt-kb-mirrors.md
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ agent = Agent(
sensors=[beamline.det],
dofs=dofs,
objectives=objectives,
evaluation=DetectorEvaluation(tiled_client),
evaluation_function=DetectorEvaluation(tiled_client),
name="xrt-blop-demo",
description="A demo of the Blop agent with XRT simulated beamline",
experiment_type="demo",
Expand Down
2 changes: 1 addition & 1 deletion docs/wip/qserver-experiment.md
Original file line number Diff line number Diff line change
Expand Up @@ -303,7 +303,7 @@ agent = QueueserverAgent(
sensors=sensors, # The list of sensors to read from
dofs=dofs, # The list of DOFs to search over
objectives=objectives, # The list of objectives to be optimized
evaluation= DetectorEvaluation(tiled_client), # The function to create objective function values
evaluation_function= DetectorEvaluation(tiled_client), # The function to create objective function values
acquisition_plan= "acquire", # The name of the plan in the Queueserver environment
Queueserver_control_addr="tcp://localhost:60615",
Queueserver_info_addr="tcp://localhost:60625",
Expand Down
93 changes: 65 additions & 28 deletions src/blop/ax/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from bluesky.utils import MsgGenerator

from ..plans import acquire_baseline, optimize
from ..protocols import AcquisitionPlan, EvaluationFunction, OptimizationProblem, Sensor
from ..protocols import AcquisitionPlan, Actuator, EvaluationFunction, OptimizationProblem, Sensor
from .dof import DOF, DOFConstraint
from .objective import Objective, OutcomeConstraint, to_ax_objective_str
from .optimizer import AxOptimizer
Expand All @@ -33,7 +33,7 @@ class Agent:
The degrees of freedom that the agent can control, which determine the search space.
objectives : Sequence[Objective]
The objectives which the agent will try to optimize.
evaluation : EvaluationFunction
evaluation_function : EvaluationFunction
The function to evaluate acquired data and produce outcomes.
acquisition_plan : AcquisitionPlan | None, optional
The acquisition plan to use for acquiring data from the beamline. If not provided,
Expand All @@ -42,6 +42,8 @@ class Agent:
Constraints on DOFs to refine the search space.
outcome_constraints : Sequence[OutcomeConstraint] | None, optional
Constraints on outcomes to be satisfied during optimization.
checkpoint_path : str | None, optional
The path to the checkpoint file to save the optimizer's state to.
**kwargs : Any
Additional keyword arguments to configure the Ax experiment.

Expand All @@ -67,42 +69,75 @@ def __init__(
sensors: Sequence[Sensor],
dofs: Sequence[DOF],
objectives: Sequence[Objective],
evaluation: EvaluationFunction,
evaluation_function: EvaluationFunction,
acquisition_plan: AcquisitionPlan | None = None,
dof_constraints: Sequence[DOFConstraint] | None = None,
outcome_constraints: Sequence[OutcomeConstraint] | None = None,
checkpoint_path: str | None = None,
**kwargs: Any,
):
self._sensors = sensors
self._dofs = {dof.parameter_name: dof for dof in dofs}
self._objectives = {obj.name: obj for obj in objectives}
self._evaluation_function = evaluation
self._actuators = [dof.actuator for dof in dofs if dof.actuator is not None]
self._evaluation_function = evaluation_function
self._acquisition_plan = acquisition_plan
self._dof_constraints = dof_constraints
self._outcome_constraints = outcome_constraints
self._optimizer = AxOptimizer(
parameters=[dof.to_ax_parameter_config() for dof in dofs],
objective=to_ax_objective_str(objectives),
parameter_constraints=[constraint.ax_constraint for constraint in self._dof_constraints]
if self._dof_constraints
else None,
outcome_constraints=[constraint.ax_constraint for constraint in self._outcome_constraints]
if self._outcome_constraints
parameter_constraints=[constraint.ax_constraint for constraint in dof_constraints] if dof_constraints else None,
outcome_constraints=[constraint.ax_constraint for constraint in outcome_constraints]
if outcome_constraints
else None,
checkpoint_path=checkpoint_path,
**kwargs,
)

@classmethod
def from_checkpoint(
cls,
checkpoint_path: str,
actuators: Sequence[Actuator],
sensors: Sequence[Sensor],
evaluation_function: EvaluationFunction,
acquisition_plan: AcquisitionPlan | None = None,
) -> "Agent":
"""
Load an agent from the optimizer's checkpoint file.

.. note::

Only the optimizer state is saved during a checkpoint, so we cannot reliably validate
the remaining state against the optimizer configuration.

Parameters
----------
checkpoint_path : str
The checkpoint path to load the agent from.
actuators: Sequence[Actuator]
Objects that can be moved to control the beamline using the Bluesky RunEngine.
A subset of the actuators' names must match the names of suggested parameterizations.
sensors: Sequence[Sensor]
Objects that can produce data to acquire data from the beamline using the Bluesky RunEngine.
evaluation_function: EvaluationFunction
A callable to evaluate data from a Bluesky run and produce outcomes.
acquisition_plan: AcquisitionPlan, optional
A Bluesky plan to acquire data from the beamline. If not provided, a default plan will be used.
"""
instance = object.__new__(cls)
instance._optimizer = AxOptimizer.from_checkpoint(checkpoint_path)
instance._actuators = actuators
instance._sensors = sensors
instance._evaluation_function = evaluation_function
instance._acquisition_plan = acquisition_plan

return instance

@property
def sensors(self) -> Sequence[Sensor]:
return self._sensors

@property
def dofs(self) -> Sequence[DOF]:
return list(self._dofs.values())

@property
def objectives(self) -> Sequence[Objective]:
return list(self._objectives.values())
def actuators(self) -> Sequence[Actuator]:
return self._actuators

@property
def evaluation_function(self) -> EvaluationFunction:
Expand All @@ -112,18 +147,14 @@ def evaluation_function(self) -> EvaluationFunction:
def acquisition_plan(self) -> AcquisitionPlan | None:
return self._acquisition_plan

@property
def dof_constraints(self) -> Sequence[DOFConstraint] | None:
return self._dof_constraints

@property
def outcome_constraints(self) -> Sequence[OutcomeConstraint] | None:
return self._outcome_constraints

@property
def ax_client(self) -> Client:
return self._optimizer.ax_client

@property
def checkpoint_path(self) -> str | None:
return self._optimizer.checkpoint_path

def to_optimization_problem(self) -> OptimizationProblem:
"""
Construct an optimization problem from the agent.
Expand All @@ -144,7 +175,7 @@ def to_optimization_problem(self) -> OptimizationProblem:
"""
return OptimizationProblem(
optimizer=self._optimizer,
actuators=[dof.actuator for dof in self.dofs if dof.actuator is not None],
actuators=self.actuators,
sensors=self.sensors,
evaluation_function=self.evaluation_function,
acquisition_plan=self.acquisition_plan,
Expand Down Expand Up @@ -299,3 +330,9 @@ def plot_objective(
*args,
**kwargs,
)

def checkpoint(self) -> None:
"""
Save the agent's state to a JSON file.
"""
self._optimizer.checkpoint()
43 changes: 41 additions & 2 deletions src/blop/ax/optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,10 @@

from ax import ChoiceParameterConfig, Client, RangeParameterConfig

from ..protocols import ID_KEY, Optimizer
from ..protocols import ID_KEY, Checkpointable, Optimizer


class AxOptimizer(Optimizer):
class AxOptimizer(Optimizer, Checkpointable):
"""
An optimizer that uses Ax as the backend for optimization and experiment tracking.

Expand All @@ -22,6 +22,8 @@ class AxOptimizer(Optimizer):
The parameter constraints to apply to the optimization.
outcome_constraints : Sequence[str] | None, optional
The outcome constraints to apply to the optimization.
checkpoint_path : str | None, optional
The path to the checkpoint file to save the optimizer's state to.
client_kwargs : dict[str, Any] | None, optional
Additional keyword arguments to configure the Ax client.
**kwargs : Any
Expand All @@ -39,10 +41,12 @@ def __init__(
objective: str,
parameter_constraints: Sequence[str] | None = None,
outcome_constraints: Sequence[str] | None = None,
checkpoint_path: str | None = None,
client_kwargs: dict[str, Any] | None = None,
**kwargs: Any,
):
self._parameter_names = [parameter.name for parameter in parameters]
self._checkpoint_path = checkpoint_path
self._client = Client(**(client_kwargs or {}))
self._client.configure_experiment(
parameters=parameters,
Expand All @@ -54,6 +58,33 @@ def __init__(
outcome_constraints=outcome_constraints,
)

@classmethod
def from_checkpoint(cls, checkpoint_path: str) -> "AxOptimizer":
"""
Load an optimizer from a checkpoint file.

Parameters
----------
checkpoint_path : str
The path to the checkpoint file to load the optimizer from.

Returns
-------
AxOptimizer
An instance of the optimizer class, initialized from the checkpoint.
"""
client = Client.load_from_json_file(checkpoint_path)
instance = object.__new__(cls)
instance._parameter_names = list(client._experiment.parameters.keys())
instance._checkpoint_path = checkpoint_path
instance._client = client

return instance

@property
def checkpoint_path(self) -> str | None:
return self._checkpoint_path

@property
def ax_client(self) -> Client:
return self._client
Expand Down Expand Up @@ -126,3 +157,11 @@ def ingest(self, points: list[dict]) -> None:
elif trial_idx == "baseline":
trial_idx = self._client.attach_baseline(parameters=parameters)
self._client.complete_trial(trial_index=trial_idx, raw_data=outcomes)

def checkpoint(self) -> None:
"""
Save the optimizer's state to JSON file.
"""
if not self.checkpoint_path:
raise ValueError("Checkpoint path is not set. Please set a checkpoint path when initializing the optimizer.")
self._client.save_to_json_file(self.checkpoint_path)
6 changes: 3 additions & 3 deletions src/blop/ax/qserver_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ class BlopQserverAgent(BlopAxAgent):
The degrees of freedom that the agent can control, which determine the search space.
objectives : Sequence[Objective]
The objectives which the agent will try to optimize.
evaluation : EvaluationFunction
evaluation_function : EvaluationFunction
The function to evaluate acquired data and produce outcomes.
acquisition_plan : str, optional
The name of the plan on the queueserver
Expand Down Expand Up @@ -121,7 +121,7 @@ def __init__(
sensors: Sequence[Sensor],
dofs: Sequence[DOF],
objectives: Sequence[Objective],
evaluation: EvaluationFunction = None,
evaluation_function: EvaluationFunction = None,
acquisition_plan: str = "acquire",
dof_constraints: Sequence[DOFConstraint] = None,
qserver_control_addr: str = "tcp://localhost:60615",
Expand All @@ -134,7 +134,7 @@ def __init__(
sensors=sensors,
dofs=dofs,
objectives=objectives,
evaluation=evaluation,
evaluation_function=evaluation_function,
acquisition_plan=acquisition_plan,
dof_constraints=dof_constraints,
**kwargs,
Expand Down
Loading
Loading