1+ """
2+ optimized_dict = optimize(dopt, objective)
3+ optimize(dopt::DictParameters, objective, setup_fn = dopt.setup_function;
4+ grad_tol = 1e-6,
5+ obj_change_tol = 1e-6,
6+ max_it = 25,
7+ opt_fun = missing,
8+ maximize = false,
9+ simulator = missing,
10+ config = missing,
11+ solution_history = false,
12+ backend_arg = (
13+ use_sparsity = false,
14+ di_sparse = true,
15+ single_step_sparsity = false,
16+ do_prep = true,
17+ ),
18+ kwarg...
19+ )
20+
21+ Optimize parameters defined in a [`DictParameters`](@ref) object using the
22+ provided objective function. At least one variable has to be declared to be free
23+ using `free_optimization_parameter!` prior to calling the optimizer.
24+
25+ # Arguments
26+ - `dopt::DictParameters`: Container with parameters to optimize
27+ - `objective`: The objective function to minimize (or maximize)
28+ - `setup_fn`: Function to set up the optimization problem. Defaults to `dopt.setup_function`
29+
30+ # Keyword Arguments
31+ - `grad_tol`: Gradient tolerance for stopping criterion
32+ - `obj_change_tol`: Objective function change tolerance for stopping criterion
33+ - `max_it`: Maximum number of iterations
34+ - `opt_fun`: Optional custom optimization function. If missing, L-BFGS will be used
35+ - `maximize`: Set to `true` to maximize the objective instead of minimizing
36+ - `simulator`: Optional simulator object used in forward simulations
37+ - `config`: Optional configuration for the setup
38+ - `solution_history`: If `true`, stores all intermediate solutions
39+ - `backend_arg`: Options for the autodiff backend:
40+ - `use_sparsity`: Enable sparsity detection for the objective function
41+ - `di_sparse`: Use sparse differentiation
42+ - `single_step_sparsity`: Enable single step sparsity detection (if sparsity does not change during timesteps)
43+ - `do_prep`: Perform preparation step
44+
45+ # Returns
46+ The optimized parameters as a dictionary.
47+
48+ # Notes
49+ - The function stores the optimization history and optimized parameters in the input `dopt` object.
50+ - If `solution_history` is `true`, intermediate solutions are stored in `dopt.history.solutions`.
51+ - The default optimization algorithm is L-BFGS with box constraints.
52+ """
153function optimize (dopt:: DictParameters , objective, setup_fn = dopt. setup_function;
254 grad_tol = 1e-6 ,
355 obj_change_tol = 1e-6 ,
@@ -81,6 +133,15 @@ function optimize(dopt::DictParameters, objective, setup_fn = dopt.setup_functio
81133 return prm_out
82134end
83135
136+ """
137+ parameters_gradient(dopt::DictParameters, objective, setup_fn = dopt.setup_function)
138+
139+ Compute the gradient of the objective function with respect to the parameters
140+ defined in the `DictParameters` object. This function will return the gradient
141+ as a dictionary with the same structure as the input parameters, where each
142+ entry is a vector of gradients for each parameter. Only gradients with respect
143+ to free parameters will be computed.
144+ """
84145function parameters_gradient (dopt:: DictParameters , objective, setup_fn = dopt. setup_function;
85146 simulator = missing ,
86147 config = missing ,
@@ -118,6 +179,16 @@ function parameters_gradient(dopt::DictParameters, objective, setup_fn = dopt.se
118179 return out
119180end
120181
182+ """
183+ freeze_optimization_parameter!(dopt, "parameter_name")
184+ freeze_optimization_parameter!(dopt, ["dict_name", "parameter_name"])
185+ freeze_optimization_parameter!(dopt::DictParameters, parameter_name, val = missing)
186+
187+ Freeze an optimization parameter in the `DictParameters` object. This will
188+ remove the parameter from the optimization targets and set its value to `val` if
189+ provided. Any limits/lumping/scaling settings for this parameter will be
190+ removed.
191+ """
121192function freeze_optimization_parameter! (dopt:: DictParameters , parameter_name, val = missing )
122193 parameter_name = convert_key (parameter_name)
123194 if ! ismissing (val)
@@ -126,6 +197,54 @@ function freeze_optimization_parameter!(dopt::DictParameters, parameter_name, va
126197 delete! (dopt. parameter_targets, parameter_name)
127198end
128199
200+ """
201+ free_optimization_parameter!(dopt, "parameter_name", rel_min = 0.01, rel_max = 100.0)
202+ free_optimization_parameter!(dopt, ["dict_name", "parameter_name"], abs_min = -8.0, abs_max = 7.0)
203+
204+ Free an existing parameter for optimization in the `DictParameters` object. This
205+ will allow the parameter to be optimized through a call to [`optimize`](@ref).
206+
207+ # Nesting structures
208+ If your `DictParameters` has a nesting structure, you can use a vector of
209+ strings or symbols to specify the parameter name, e.g. `["dict_name",
210+ "parameter_name"]` to access the parameter located at
211+ `["dict_name"]["parameter_name"]`.
212+
213+ # Setting limits
214+ The limits can be set using the following keyword arguments:
215+ - `abs_min`: Absolute minimum value for the parameter. If not set, no absolute
216+ minimum will be applied.
217+ - `abs_max`: Absolute maximum value for the parameter. If not set, no absolute
218+ maximum will be applied.
219+ - `rel_min`: Relative minimum value for the parameter. If not set, no relative
220+ minimum will be applied.
221+ - `rel_max`: Relative maximum value for the parameter. If not set, no relative
222+ maximum will be applied.
223+
224+ For either of these entries it is possible to pass either a scalar, or an array.
225+ If an array is passed, it must have the same size as the parameter being set.
226+
227+ Note that if `dopt.strict` is set to `true`, at least one of the upper or lower
228+ bounds must be set for free parameters. If `dopt.strict` is set to `false`, the
229+ bounds are optional and the `DictParameters` object can be used to compute
230+ sensitivities, but the built-in optimization routine assumes that finite limits
231+ are set for all parameters.
232+
233+ # Other keyword arguments
234+ - `initial`: Initial value for the parameter. If not set, the current value in
235+ `dopt.parameters` will be used.
236+ - `scaler=missing`: Optional scaler for the parameter. If not set, no scaling
237+ will be applied. Available scalers are `:log`, `:exp`. The scaler will be
238+ applied
239+ - `lumping=missing`: Optional lumping array for the parameter. If not set, no
240+ lumping will be applied. The lumping array should have the same size as the
241+ parameter and contain positive integers. The lumping array defines groups of
242+ indices that should be lumped together, i.e. the same value will be used for
243+ all indices in the same group. The lumping array should contain all integers
244+ from 1 to the maximum value in the array, and all indices in the same group
245+ should have the same value in the initial parameter, otherwise an error will
246+ be thrown.
247+ """
129248function free_optimization_parameter! (dopt:: DictParameters , parameter_name;
130249 initial = missing ,
131250 abs_min = - Inf ,
@@ -191,6 +310,12 @@ function free_optimization_parameters!(dopt::DictParameters, targets = all_keys(
191310 return dopt
192311end
193312
313+ """
314+ set_optimization_parameter!(dopt::DictParameters, parameter_name, value)
315+
316+ Set a specific optimization parameter in the `DictParameters` object. This
317+ function will update the value of the parameter in the `dopt.parameters` dictionary.
318+ """
194319function set_optimization_parameter! (dopt:: DictParameters , parameter_name, value)
195320 set_nested_dict_value! (dopt. parameters, parameter_name, value)
196321end
0 commit comments