From c920fff8c76310d1056082c45269f6f172a40e46 Mon Sep 17 00:00:00 2001 From: Bernard Knueven Date: Thu, 29 Apr 2021 19:00:34 -0600 Subject: [PATCH] initial pass at PTDF extension --- examples/uc/ptdf_ext.py | 246 ++++++++++++++++++++++++++++++++ examples/uc/uc_cylinders.py | 28 +++- examples/uc/uc_funcs.py | 75 ++++++++-- mpisppy/extensions/extension.py | 9 ++ mpisppy/spopt.py | 2 +- mpisppy/utils/sputils.py | 10 +- mpisppy/utils/vanilla.py | 2 + mpisppy/utils/xhat_eval.py | 31 ++-- 8 files changed, 368 insertions(+), 35 deletions(-) create mode 100644 examples/uc/ptdf_ext.py diff --git a/examples/uc/ptdf_ext.py b/examples/uc/ptdf_ext.py new file mode 100644 index 000000000..c95cec728 --- /dev/null +++ b/examples/uc/ptdf_ext.py @@ -0,0 +1,246 @@ +# Copyright 2020 by B. Knueven, D. Mildebrath, C. Muir, J-P Watson, and D.L. Woodruff +# This software is distributed under the 3-clause BSD License. + +import pyomo.environ as pyo +from pyomo.common.collections import ComponentMap, ComponentSet +from mpisppy.extensions.extension import Extension +from mpisppy.utils.sputils import is_persistent + +import egret.common.lazy_ptdf_utils as lpu +from egret.models.unit_commitment import (_lazy_ptdf_check_violations, + _lazy_ptdf_log_terminate_on_violations, + _lazy_ptdf_warmstart_copy_violations, + _lazy_ptdf_solve, + _lazy_ptdf_normal_terminatation, + _lazy_ptdf_violation_adder, + ) +import logging +from egret.common.log import logger + +logger.setLevel(logging.ERROR) + +class PTDFExtension(Extension): + ''' Abstract base class for extensions to general SPBase objects. + + Args: + ph (PHBase): The PHBase object for the current model + ''' + def __init__(self, spopt_object, **kwargs): + # attach self.opt + super().__init__(spopt_object) + + self.pre_lp_iteration_limit = kwargs.get('pre_lp_iteration_limit', 100) + self.lp_iteration_limit = kwargs.get('lp_iteration_limit', 100) + self.lp_cleanup_phase = kwargs.get('lp_cleanup_phase', True) + self.iteration_limit = kwargs.get('iteration_limit', 100000) + self.verbose = kwargs.get('verbose',False) + + if self.verbose: + logger.setLevel(logging.INFO) + + self.initial_pass_complete = ComponentSet() + self.bundling = self.opt.bundling + + self.vars_to_load = ComponentMap() + self.time_periods = ComponentMap() + self.bundle_conditional_probability = ComponentMap() + + def pre_solve(self, subproblem): + if subproblem not in self.initial_pass_complete: + self.spoke_name = None + if self.opt.spcomm is not None: + self.spoke_name = self.opt.spcomm.__class__.__name__ + self._initial_pass(subproblem) + self.initial_pass_complete.add(subproblem) + + def post_solve(self, subproblem, results): + scenario_blocks = self._get_scenario_blocks(subproblem) + termination_cond, results, iterations = \ + self._mip_pass(subproblem, scenario_blocks, results) + return results + + def _get_scenario_blocks(self, subproblem): + if self.bundling: + return tuple( subproblem.component(sname) \ + for sname in subproblem._ef_scenario_names ) + else: + return ( subproblem, ) + + def _initial_pass(self, subproblem): + # get vars_to_load for later + scenario_blocks = self._get_scenario_blocks(subproblem) + if is_persistent(subproblem._solver_plugin): + subproblem_vars_to_load = [] + for s in scenario_blocks: + for t in s.TimePeriods: + b = s.TransmissionBlock[t] + assert isinstance(b.p_nw, pyo.Var) + subproblem_vars_to_load.extend(b.p_nw.values()) + + self.vars_to_load[subproblem] = subproblem_vars_to_load + else: + self.vars_to_load[subproblem] = None + + for s in scenario_blocks: + self.time_periods[s] = s.TimePeriods + if self.bundling: + self.bundle_conditional_probability[s] = \ + s._mpisppy_data.bundle_conditional_probability + else: + self.bundle_conditional_probability[s] = 1. + + self.tee = ("tee-rank0-solves" in self.opt.options + and self.opt.options['tee-rank0-solves'] + and self.opt.cylinder_rank == 0 + ) + + if (self.pre_lp_iteration_limit + self.lp_iteration_limit) == 0: + return + + # relax the initial subproblems + for s in scenario_blocks: + lpu.uc_instance_binary_relaxer(s, subproblem._solver_plugin) + + # solve the model + for k,val in self.opt.current_solver_options.items(): + subproblem._solver_plugin.options[k] = val + + if is_persistent(subproblem._solver_plugin): + results = subproblem._solver_plugin.solve(subproblem, tee=self.tee, save_results=False, load_solutions=False) + subproblem._solver_plugin.load_vars(self.vars_to_load[subproblem]) + else: + results = subproblem._solver_plugin.solve(subproblem, tee=self.tee, load_solutions=False) + subproblem.solutions.load_from(results) + + if self.pre_lp_iteration_limit > 0: + lp_warmstart_termination_cond, results, lp_warmstart_iterations = \ + self._pre_lp_pass(subproblem, scenario_blocks) + + if self.lp_iteration_limit > 0: + lp_termination_cond, results, lp_iterations = \ + self._lp_pass(subproblem, scenario_blocks) + + if self.lp_cleanup_phase: + tot_removed = 0 + for s in scenario_blocks: + for t,b in s.TransmissionBlock.items(): + tot_removed += lpu.remove_inactive(b, subproblem._solver_plugin, + t, prepend_str=f"[LP cleanup phase on rank {self.opt.global_rank}] ") + logger.info(f"[LP cleanup phase on rank {self.opt.global_rank} for {self.spoke_name}] removed {tot_removed} inactive flow constraint(s)") + # enforce binaries in subproblems + for s in scenario_blocks: + lpu.uc_instance_binary_enforcer(s, subproblem._solver_plugin) + + # mpi-sppy will solve the MIP + return + + def _do_pass(self, subproblem, scenario_blocks, time_periods, vars_to_load, + prepend_str, iteration_limit, add_all_lazy_violations=False, + results=None, pre_lp_cleanup=False): + + persistent_solver = is_persistent(subproblem._solver_plugin) + for i in range(iteration_limit): + flows, viol_lazy = ComponentMap(), ComponentMap() + terminate_this_iter, all_viol_in_model = ComponentMap(), ComponentMap() + for s in scenario_blocks: + flows[s], viol_num, mon_viol_num, viol_lazy[s] = \ + _lazy_ptdf_check_violations(s, s.model_data, time_periods[s], + s._ptdf_options, prepend_str) + + terminate_this_iter[s], all_viol_in_model[s] = \ + _lazy_ptdf_log_terminate_on_violations(viol_num, mon_viol_num, + i, prepend_str) + + all_viol_in_model = all(all_viol_in_model.values()) + terminate_this_iter = all(terminate_this_iter.values()) + if terminate_this_iter and not add_all_lazy_violations: + if pre_lp_cleanup: + results = self._pre_lp_cleanup(subproblem, scenario_blocks, + persistent_solver, time_periods, prepend_str) + return _lazy_ptdf_normal_terminatation(all_viol_in_model, results, i, prepend_str) + + for s in scenario_blocks: + _lazy_ptdf_violation_adder(s, s.model_data, flows[s], viol_lazy[s], time_periods[s], + subproblem._solver_plugin, s._ptdf_options, prepend_str, i, + obj_multi=self.bundle_conditional_probability[s]) + + if terminate_this_iter and add_all_lazy_violations: + if pre_lp_cleanup: + results = self._pre_lp_cleanup(subproblem, scenario_blocks, + persistent_solver, time_periods, prepend_str) + return _lazy_ptdf_normal_terminatation(all_viol_in_model, results, i, prepend_str) + + results = _lazy_ptdf_solve(subproblem, subproblem._solver_plugin, persistent_solver, + symbolic_solver_labels=False, solver_tee=self.tee, + vars_to_load=vars_to_load, solve_method_options=None) + else: + if pre_lp_cleanup: + results = self._pre_lp_cleanup(subproblem, scenario_blocks, + persistent_solver, time_periods, prepend_str) + return lpu.LazyPTDFTerminationCondition.ITERATION_LIMIT, results, i + + def _pre_lp_cleanup(self, subproblem, scenario_blocks, persistent_solver, time_periods, prepend_str): + if persistent_solver: + # unpack lpu._load_pf_slacks into a single call to load_vars + vars_to_load = [] + for s in scenario_blocks: + for t in time_periods[s]: + b = s.TransmissionBlock[t] + vars_to_load.extend(b.pf_slack_pos.values()) + vars_to_load.extend(b.pf_slack_neg.values()) + vars_to_load.extend(b.pfi_slack_pos.values()) + vars_to_load.extend(b.pfi_slack_neg.values()) + vars_to_load.extend(b.pfc_slack_pos.values()) + vars_to_load.extend(b.pfc_slack_neg.values()) + if vars_to_load: + subproblem._solver_plugin.load_vars(vars_to_load) + + for s in scenario_blocks: + _lazy_ptdf_warmstart_copy_violations(s, s.model_data, time_periods[s], + subproblem._solver_plugin, s._ptdf_options, prepend_str, + obj_multi=self.bundle_conditional_probability[s]) + + results = _lazy_ptdf_solve(subproblem, subproblem._solver_plugin, persistent_solver, + symbolic_solver_labels=False, solver_tee=self.tee, + vars_to_load=self.vars_to_load[subproblem], + solve_method_options=None) + return results + + def _pre_lp_pass(self, subproblem, scenario_blocks): + vars_to_load_t_subset = ComponentMap() + t_subset = ComponentMap() + persistent_solver = is_persistent(subproblem._solver_plugin) + + if persistent_solver: + vars_to_load = [] + else: + vars_to_load = None + for s in scenario_blocks: + max_demand_time = max(s.TotalDemand, key=s.TotalDemand.__getitem__) + t_subset[s] = [max_demand_time,] + if persistent_solver: + transmission_block = s.TransmissionBlock[max_demand_time] + assert isinstance(transmission_block.p_nw, pyo.Var) + vars_to_load.extend(transmission_block.p_nw.values()) + + return self._do_pass(subproblem, scenario_blocks, t_subset, vars_to_load, + prepend_str=f"[LP warmstart phase on rank {self.opt.global_rank} for {self.spoke_name}] ", + iteration_limit=self.pre_lp_iteration_limit, + add_all_lazy_violations=False, + results=None, pre_lp_cleanup=True) + + def _lp_pass(self, subproblem, scenario_blocks): + return self._do_pass(subproblem, scenario_blocks, + self.time_periods, self.vars_to_load[subproblem], + prepend_str=f"[LP phase on rank {self.opt.global_rank} for {self.spoke_name}] ", + iteration_limit=self.lp_iteration_limit, + add_all_lazy_violations=True, + results=None, pre_lp_cleanup=False) + + def _mip_pass(self, subproblem, scenario_blocks, results): + return self._do_pass(subproblem, scenario_blocks, + self.time_periods, self.vars_to_load[subproblem], + prepend_str=f"[MIP phase on rank {self.opt.global_rank} for {self.spoke_name}] ", + iteration_limit=self.iteration_limit, + add_all_lazy_violations=False, + results=results, pre_lp_cleanup=False) diff --git a/examples/uc/uc_cylinders.py b/examples/uc/uc_cylinders.py index 8d37d0564..d59acf4bc 100644 --- a/examples/uc/uc_cylinders.py +++ b/examples/uc/uc_cylinders.py @@ -20,6 +20,7 @@ from mpisppy.utils import vanilla from mpisppy.extensions.cross_scen_extension import CrossScenarioExtension +from ptdf_ext import PTDFExtension def _parse_args(): parser = baseparsers.make_parser("uc_cylinders") @@ -30,6 +31,7 @@ def _parse_args(): parser = baseparsers.xhatlooper_args(parser) parser = baseparsers.xhatshuffle_args(parser) parser = baseparsers.cross_scenario_cuts_args(parser) + parser = baseparsers.mip_options(parser) parser.add_argument("--ph-mipgaps-json", help="json file with mipgap schedule (default None)", dest="ph_mipgaps_json", @@ -47,6 +49,12 @@ def _parse_args(): action='store_true', dest='xhat_closest_tree', default=False) + parser.add_argument("--add-contingency-constraints", + help="Use EGRET to monitor all possible" + " non-disconnecting contingencies (default False)", + action='store_true', + dest='add_contingency_constraints', + default=False) args = parser.parse_args() return args @@ -65,15 +73,19 @@ def main(): fixer_tol = args.fixer_tol with_cross_scenario_cuts = args.with_cross_scenario_cuts - scensavail = [3,5,10,25,50,100] + scensavail = [3,4,5,10,25,50,100] if num_scen not in scensavail: raise RuntimeError("num-scen was {}, but must be in {}".\ format(num_scen, scensavail)) scenario_creator_kwargs = { - "scenario_count": num_scen, - "path": str(num_scen) + "scenarios_r1", - } + "scenario_count": num_scen, + "path": str(num_scen) + "scenarios_r1", + "add_contingency_constraints": args.add_contingency_constraints, + } + if num_scen == 4: + scenario_creator_kwargs["path"] = str(num_scen) + "scenarios_rtsgmlc" + scenario_creator = uc.scenario_creator scenario_denouement = uc.scenario_denouement all_scenario_names = [f"Scenario{i+1}" for i in range(num_scen)] @@ -90,7 +102,7 @@ def main(): rho_setter = rho_setter) # Extend and/or correct the vanilla dictionary - ext_classes = [Gapper] + ext_classes = [Gapper, PTDFExtension] if with_fixer: ext_classes.append(Fixer) if with_cross_scenario_cuts: @@ -134,24 +146,30 @@ def main(): # FWPH spoke if with_fwph: fw_spoke = vanilla.fwph_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + fw_spoke["opt_kwargs"]["extensions"] = PTDFExtension # Standard Lagrangian bound spoke if with_lagrangian: lagrangian_spoke = vanilla.lagrangian_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs, rho_setter = rho_setter) + lagrangian_spoke["opt_kwargs"]["extensions"] = PTDFExtension # xhat looper bound spoke if with_xhatlooper: xhatlooper_spoke = vanilla.xhatlooper_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + xhatlooper_spoke["opt_kwargs"]["extensions"] = PTDFExtension # xhat shuffle bound spoke if with_xhatshuffle: xhatshuffle_spoke = vanilla.xhatshuffle_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + xhatshuffle_spoke["opt_kwargs"]["extensions"] = PTDFExtension # cross scenario cut spoke if with_cross_scenario_cuts: cross_scenario_cut_spoke = vanilla.cross_scenario_cut_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + cross_scenario_cut_spoke["opt_kwargs"]["extensions"] = PTDFExtension + list_of_spoke_dict = list() if with_fwph: diff --git a/examples/uc/uc_funcs.py b/examples/uc/uc_funcs.py index 67cb79a3e..d48b99587 100644 --- a/examples/uc/uc_funcs.py +++ b/examples/uc/uc_funcs.py @@ -15,7 +15,8 @@ import egret.models.unit_commitment as uc -def pysp_instance_creation_callback(scenario_name, path=None, scenario_count=None): +def pysp_instance_creation_callback(scenario_name, path=None, scenario_count=None, + add_contingency_constraints=False): """ Notes: - The uc_cylinders.py code has a `scenario_count` kwarg that gets passed to @@ -24,22 +25,53 @@ def pysp_instance_creation_callback(scenario_name, path=None, scenario_count=Non #print("Building instance for scenario =", scenario_name) scennum = sputils.extract_num(scenario_name) - uc_model_params = pdp.get_uc_model() + root_node_dat = os.path.isfile(os.path.join(path,"RootNode.dat")) + scenario_1_dat = os.path.isfile(os.path.join(path,"Scenario_1.dat")) + scenario_1_json = os.path.isfile(os.path.join(path,"Scenario_1.json")) or\ + os.path.isfile(os.path.join(path,"Scenario_1.json.gz")) - scenario_data = DataPortal(model=uc_model_params) - scenario_data.load(filename=path+os.sep+"RootNode.dat") - scenario_data.load(filename=path+os.sep+"Node"+str(scennum)+".dat") - scenario_params = uc_model_params.create_instance(scenario_data, - report_timing=False, - name=scenario_name) + assert not (root_node_dat and scenario_1_dat) + assert not (root_node_dat and scenario_1_json) + assert not (scenario_1_dat and scenario_1_json) - scenario_md = md.ModelData(pdp.create_model_data_dict_params(scenario_params, keep_names=True)) + if root_node_dat: + uc_model_params = pdp.get_uc_model() + + scenario_data = DataPortal(model=uc_model_params) + scenario_data.load(filename=path+os.sep+"RootNode.dat") + scenario_data.load(filename=path+os.sep+"Node"+str(scennum)+".dat") + + scenario_params = uc_model_params.create_instance(scenario_data, + report_timing=False, + name=scenario_name) + + scenario_md = md.ModelData(pdp.create_model_data_dict_params(scenario_params, keep_names=True)) + + elif scenario_1_dat: + uc_model_params = pdp.get_uc_model() + + scenario_data = DataPortal(model=uc_model_params) + scenario_data.load(filename=path+os.sep+"Scenario_"+str(scennum)+".dat") + + scenario_params = uc_model_params.create_instance(scenario_data, + report_timing=False, + name=scenario_name) + + scenario_md = md.ModelData(pdp.create_model_data_dict_params(scenario_params, keep_names=True)) + + elif scenario_1_json: + scenario_md = md.ModelData(os.path.join(path, f"Scenario_{scennum}.json")) + + else: + raise RuntimeError(f"Can't determine stochastic UC data type in directory {path}") + + if add_contingency_constraints: + _add_all_contingency_constraints(scenario_md) ## TODO: use the "power_balance_constraints" for now. In the future, networks should be ## handled with a custom callback -- also consider other base models - scenario_instance = uc.create_tight_unit_commitment_model(scenario_md, - network_constraints='power_balance_constraints') + scenario_instance = uc.create_tight_unit_commitment_model(scenario_md) # hold over string attribute from Egret, # causes warning wth LShaped/Benders @@ -47,17 +79,17 @@ def pysp_instance_creation_callback(scenario_name, path=None, scenario_count=Non return scenario_instance -def scenario_creator(scenario_name, scenario_count=None, path=None): - return pysp2_callback(scenario_name, scenario_count=scenario_count, path=path) +def scenario_creator(scenario_name, **kwargs): + return pysp2_callback(scenario_name, **kwargs) -def pysp2_callback(scenario_name, scenario_count=None, path=None): +def pysp2_callback(scenario_name, **kwargs): ''' The callback needs to create an instance and then attach the PySP nodes to it in a list _mpisppy_node_list ordered by stages. Optionally attach _PHrho. Standard (1.0) PySP signature for now... ''' instance = pysp_instance_creation_callback( - scenario_name, scenario_count=scenario_count, path=path, + scenario_name, **kwargs, ) # now attach the one and only tree node (ROOT is a reserved word) @@ -248,3 +280,16 @@ def scenario_tree_solution_writer( solution_dir, sname, scenario, bundling ): file_name = os.path.join(solution_dir, sname+'.json') mds = uc._save_uc_results(scenario, relaxed=False) mds.write(file_name) + +def _add_all_contingency_constraints(md): + from egret.model_library.transmission.tx_calc import (construct_connection_graph, + get_N_minus_1_branches, + ) + + branches = dict(md.elements(element_type='branch')) + mapping_bus_to_idx = { bus_n: i for i, bus_n in enumerate(md.data['elements']['bus'].keys()) } + graph = construct_connection_graph(branches, mapping_bus_to_idx) + all_connected_contigencies = get_N_minus_1_branches(graph, branches, mapping_bus_to_idx) + + md.data['elements']['contingency'] = \ + { bn : {'branch_contingency': bn} for bn in all_connected_contigencies } diff --git a/mpisppy/extensions/extension.py b/mpisppy/extensions/extension.py index 4830eed33..5e282a9b8 100644 --- a/mpisppy/extensions/extension.py +++ b/mpisppy/extensions/extension.py @@ -97,6 +97,15 @@ def __init__(self, ph, ext_classes): name = constr.__name__ self.extdict[name] = constr(ph) + def pre_solve(self, subproblem): + for lobject in self.extdict.values(): + lobject.pre_solve(subproblem) + + def post_solve(self, subproblem, results): + for lobject in self.extdict.values(): + results = lobject.post_solve(subproblem, results) + return results + def pre_iter0(self): for lobject in self.extdict.values(): lobject.pre_iter0() diff --git a/mpisppy/spopt.py b/mpisppy/spopt.py index 173ccb4c6..4cba96c70 100644 --- a/mpisppy/spopt.py +++ b/mpisppy/spopt.py @@ -130,7 +130,7 @@ def _vb(msg): np.max(all_set_objective_times))) if self.extensions is not None: - results = self.extobject.pre_solve(s) + self.extobject.pre_solve(s) solve_start_time = time.time() if (solver_options): diff --git a/mpisppy/utils/sputils.py b/mpisppy/utils/sputils.py index d9cfda55c..bdfb4e0b5 100644 --- a/mpisppy/utils/sputils.py +++ b/mpisppy/utils/sputils.py @@ -380,7 +380,7 @@ def _create_EF_from_scen_dict(scen_dict, EF_name=None, obj_func = scenario_objs[0] # Select the first objective try: EF_instance.EF_Obj.expr += scenario_instance._mpisppy_probability * obj_func.expr - EF_instance._mpisppy_probability += scenario_instance._mpisppy_probability + EF_instance._mpisppy_probability += scenario_instance._mpisppy_probability except AttributeError as e: raise AttributeError("Scenario " + sname + " has no specified " "probability. Specify a value for the attribute " @@ -389,6 +389,14 @@ def _create_EF_from_scen_dict(scen_dict, EF_name=None, # appropraite scaling of EFs used as bundles. EF_instance.EF_Obj.expr /= EF_instance._mpisppy_probability + for sn in EF_instance._ef_scenario_names: + scenario_mpisppy_data = EF_instance.component(sn).component('_mpisppy_data') + if scenario_mpisppy_data is None: + break # none will be in a bundle + # else part of a bundle + scenario_mpisppy_data.bundle_conditional_probability = \ + scenario_instance._mpisppy_probability / EF_instance._mpisppy_probability + # For each node in the scenario tree, we need to collect the # nonanticipative vars and create the constraints for them, # which we do using a reference variable. diff --git a/mpisppy/utils/vanilla.py b/mpisppy/utils/vanilla.py index 4c12e9cca..a0e2265f2 100644 --- a/mpisppy/utils/vanilla.py +++ b/mpisppy/utils/vanilla.py @@ -51,6 +51,8 @@ def shared_options(args): shoptions["iter0_solver_options"]["mipgap"] = args.iter0_mipgap if _hasit(args, "iterk_mipgap"): shoptions["iterk_solver_options"]["mipgap"] = args.iterk_mipgap + if _hasit(args, "bundles_per_rank"): + shoptions["bundles_per_rank"] = args.bundles_per_rank return shoptions diff --git a/mpisppy/utils/xhat_eval.py b/mpisppy/utils/xhat_eval.py index ec5e4c8fc..37fcf720c 100644 --- a/mpisppy/utils/xhat_eval.py +++ b/mpisppy/utils/xhat_eval.py @@ -30,16 +30,19 @@ class Xhat_Eval(mpisppy.spopt.SPOpt): """ See SPOpt for list of args. """ def __init__( - self, - options, - all_scenario_names, - scenario_creator, - scenario_denouement=None, - all_nodenames=None, - mpicomm=None, - scenario_creator_kwargs=None, - variable_probability=None, - ): + self, + options, + all_scenario_names, + scenario_creator, + scenario_denouement=None, + all_nodenames=None, + mpicomm=None, + extensions=None, + extension_kwargs=None, + scenario_creator_kwargs=None, + variable_probability=None, + E1_tolerance=1e-5 + ): super().__init__( options, @@ -48,14 +51,16 @@ def __init__( scenario_denouement=scenario_denouement, all_nodenames=all_nodenames, mpicomm=mpicomm, + extensions=extensions, + extension_kwargs=extension_kwargs, scenario_creator_kwargs=scenario_creator_kwargs, variable_probability=variable_probability, + E1_tolerance=E1_tolerance ) self.verbose = self.options['verbose'] - - #TODO: CHANGE THIS AFTER UPDATE - self.PH_extensions = None + self.current_solver_options = self.options['iter0_solver_options'] if 'iter0_solver_options' in self.options else ( + self.options['solver_options'] if 'solver_options' in self.options else dict() ) self.subproblem_creation(self.verbose) self._create_solvers()