Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Parametrize CVXPY problem #11

Closed
wants to merge 3 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# ncOPT

This repository is designed for solving constrained optimization problems where objective and/or constraint functions are Pytorch modules. It is mainly intended for optimization with pre-trained networks, but could be used for other purposes.
This repository is designed for solving constrained optimization problems where objective and/or constraint functions are PyTorch modules. It is mainly intended for optimization with pre-trained networks, but could be used for other purposes.

As main solver, this repository contains a `Python` implementation of the SQP-GS (*Sequential Quadratic Programming - Gradient Sampling*) algorithm by Curtis and Overton [1].

Expand Down Expand Up @@ -38,12 +38,12 @@ The solver can be called via
problem = SQPGS(f, gI, gE)
problem.solve()
```
The three main arguments, called `f`, `gI` and `gE`, are the objective, the inequality and equality constaints respectively. Each argument should be passed as a list of instances of `ncopt.functions.ObjectiveOrConstraint` (see example below).
The three main arguments, called `f`, `gI` and `gE`, are the objective, the inequality and equality constrains respectively. Each argument should be passed as a list of instances of `ncopt.functions.ObjectiveOrConstraint` (see example below).

* Each constraint function is allowed to have multi-dimensional output (see example below).
* An empty list can be passed if no (in)equality constraints are needed.

For example, a linear constraint function `Ax <= b` could be implmented as follows:
For example, a linear constraint function `Ax <= b` could be implemented as follows:

```python
from ncopt.functions import ObjectiveOrConstraint
Expand All @@ -56,7 +56,7 @@ For example, a linear constraint function `Ax <= b` could be implmented as follo

Note the argument `dim_out`, which needs to be passed for all constraint functions: it tells the solver what the output dimension of this constraint is.

The main function class is `ncopt.functions.ObjectiveOrConstraint`. It is a simple wrapper around a given Pytorch module (e.g. the checkpoint of your trained network). We can evaluate the function and compute gradient using the standard Pytorch `autograd` functionalities.
The main function class is `ncopt.functions.ObjectiveOrConstraint`. It is a simple wrapper around a given PyTorch module (e.g. the checkpoint of your trained network). We can evaluate the function and compute gradient using the standard PyTorch `autograd` functionalities.


## Example
Expand Down
105 changes: 75 additions & 30 deletions src/ncopt/sqpgs/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -575,6 +575,73 @@ def setup_time(self) -> float:
def solve_time(self) -> float:
return self.problem.solver_stats.solve_time

def _set_up_problem(self, L, rho, D_f, D_gI, D_gE, f_k, gI_k, gE_k):
p = {
"L_T": cp.Parameter(L.shape),
"D_f": cp.Parameter(D_f.shape),
"f_k": cp.Parameter(),
}

d = self.d
z = cp.Variable()
if self.has_ineq_constraints:
r_I = cp.Variable(gI_k.size, nonneg=True)
if self.has_eq_constraints:
r_E = cp.Variable(gE_k.size, nonneg=True)

objective = rho * z + (1 / 2) * cp.sum_squares(p["L_T"] @ d)

obj_constraint = f_k + p["D_f"] @ d <= z
constraints = [obj_constraint]

if self.has_ineq_constraints:
p.update(
{
"D_gI": [cp.Parameter(D_gI[j].shape) for j in range(self.nI)],
"gI_k": cp.Parameter(gI_k.size),
}
)
ineq_constraints = [p["gI_k"][j] + p["D_gI"][j] @ d <= r_I[j] for j in range(self.nI)]
constraints += ineq_constraints
objective = objective + cp.sum(r_I)
self._ineq_constraints = ineq_constraints

if self.has_eq_constraints:
p.update(
{
"D_gE": [cp.Parameter(D_gE[j].shape) for j in range(self.nE)],
"gE_k": cp.Parameter(gE_k.size),
}
)
eq_constraints_plus = [
p["gE_k"][j] + p["D_gE"][j] @ d <= r_E[j] for j in range(self.nE)
]
eq_constraints_neg = [p["gE_k"][j] + p["D_gE"][j] @ d >= r_E[j] for j in range(self.nE)]
constraints += eq_constraints_plus + eq_constraints_neg
objective = objective + cp.sum(r_E)
self._eq_constraints_plus = eq_constraints_plus
self._eq_constraints_neg = eq_constraints_neg

problem = cp.Problem(cp.Minimize(objective), constraints)
self._problem = problem
self._parameters = p
self._obj_constraint = obj_constraint

def _update_parameters(self, L, D_f, D_gI, D_gE, f_k, gI_k, gE_k):
self._parameters["L_T"].value = L.T
self._parameters["D_f"].value = D_f
self._parameters["f_k"].value = f_k

if self.has_ineq_constraints:
for j in range(self.nI):
self._parameters["D_gI"][j].value = D_gI[j]
self._parameters["gI_k"].value = gI_k

if self.has_eq_constraints:
for j in range(self.nE):
self._parameters["D_gE"][j].value = D_gE[j]
self._parameters["gE_k"].value = gE_k

def solve(
self,
L: np.ndarray,
Expand Down Expand Up @@ -623,48 +690,26 @@ def solve(

"""

d = self.d
z = cp.Variable()
if self.has_ineq_constraints:
r_I = cp.Variable(gI_k.size, nonneg=True)
if self.has_eq_constraints:
r_E = cp.Variable(gE_k.size, nonneg=True)
self._set_up_problem(L, rho, D_f, D_gI, D_gE, f_k, gI_k, gE_k)
self._update_parameters(L, D_f, D_gI, D_gE, f_k, gI_k, gE_k)

objective = rho * z + (1 / 2) * cp.sum_squares(L.T @ d)
self.problem.solve(solver=self._qp_solver, verbose=True)

obj_constraint = f_k + D_f @ d <= z
constraints = [obj_constraint]

if self.has_ineq_constraints:
ineq_constraints = [gI_k[j] + D_gI[j] @ d <= r_I[j] for j in range(self.nI)]
constraints += ineq_constraints
objective = objective + cp.sum(r_I)

if self.has_eq_constraints:
eq_constraints_plus = [gE_k[j] + D_gE[j] @ d <= r_E[j] for j in range(self.nE)]
eq_constraints_neg = [gE_k[j] + D_gE[j] @ d >= r_E[j] for j in range(self.nE)]
constraints += eq_constraints_plus + eq_constraints_neg
objective = objective + cp.sum(r_E)

problem = cp.Problem(cp.Minimize(objective), constraints)
problem.solve(solver=self._qp_solver, verbose=False)

assert problem.status in {cp.OPTIMAL, cp.OPTIMAL_INACCURATE}
self._problem = problem
assert self.problem.status in {cp.OPTIMAL, cp.OPTIMAL_INACCURATE}

# Extract dual variables
duals = problem.solution.dual_vars
self.lambda_f = duals[obj_constraint.id]
duals = self.problem.solution.dual_vars
self.lambda_f = duals[self._obj_constraint.id]

if self.has_ineq_constraints:
self.lambda_gI = [duals[c.id] for c in ineq_constraints]
self.lambda_gI = [duals[c.id] for c in self._ineq_constraints]
else:
self.lambda_gI = []

if self.has_eq_constraints:
self.lambda_gE = [
duals[c_plus.id] - duals[c_neg.id]
for c_plus, c_neg in zip(eq_constraints_plus, eq_constraints_neg)
for c_plus, c_neg in zip(self._eq_constraints_plus, self._eq_constraints_neg)
]
else:
self.lambda_gE = []