Skip to content

Commit

Permalink
[QUBO] correct local cost, separate annealing neuron, single flips pe…
Browse files Browse the repository at this point in the history
…r time step (#272)

* revision for clear user API

* adjust QUBO tutorial

* adjust QUBO tutorial

* make nebm-sa model choice explicit

* Correct local cost in SA

* Input validation

* Input validation

* added warning to QUBO users of CPU

* fix unit test

* linear and geometric annealing neuron

* nebm-sa supports external annealing neuron

* refract removed, unit tests adjusted

* lintin

* Proc and ProcModel for new neuron model

* initial integration of new SA model

* fixed solutionreadout

* lintin, deactivating QUBO CPU unit tests

* fixing unit tests

* make SA-local results reproducible

* fix unit test

* fix codacy
  • Loading branch information
phstratmann authored Nov 24, 2023
1 parent cca038f commit d328ae1
Show file tree
Hide file tree
Showing 21 changed files with 797 additions and 277 deletions.
12 changes: 9 additions & 3 deletions src/lava/lib/optimization/problems/problems.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,8 +65,8 @@ def __init__(self, q: npt.ArrayLike):
Parameters
----------
q: squared Q matrix defining the QUBO problem over a binary
vector x as: minimize x^T*Q*x.
q: squared, symmetric, int Q matrix defining the QUBO problem over a
binary vector x as: minimize x^T*Q*x.
"""
super().__init__()
self.validate_input(q)
Expand Down Expand Up @@ -109,20 +109,26 @@ def evaluate_cost(self, solution: np.ndarray) -> int:
return int(self._q_cost(solution))

def validate_input(self, q):
"""Validate the cost coefficient is a square matrix.
"""Validate that cost coefficient is a square, symmetric, int matrix.
Parameters
----------
q: Quadratic coefficient of the cost function.
"""

m, n = q.shape
if m != n:
raise ValueError("q matrix is not a square matrix.")
if not issubclass(q.dtype.type, np.integer):
raise NotImplementedError(
"Non integer q matrices are not supported yet."
)
# matrix must be symmetric for current implementation
if not np.allclose(q, q.T, rtol=1e-05, atol=1e-08):
raise NotImplementedError(
"Only symmetric matrixes are currently supported."
)

def verify_solution(self, solution):
raise NotImplementedError
Expand Down
113 changes: 113 additions & 0 deletions src/lava/lib/optimization/solvers/generic/annealing/process.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
# Copyright (C) 2023 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
# See: https://spdx.org/licenses/
import numpy as np
import typing as ty
from numpy import typing as npty

from lava.magma.core.process.ports.ports import OutPort
from lava.magma.core.process.process import AbstractProcess
from lava.magma.core.process.variable import Var


class Annealing(AbstractProcess):
"""
Neuron that updates the temperature for simulated annealing.
Parameters
----------
shape: Tuple
Number of neurons. Default is (1,).
max_temperature: int, ArrayLike
Both maximum and initial temperature of the annealing schedule.
The temperature defines the noise of the system
min_temperature: ArrayLike
Minimum temperature of the annealing schedule.
steps_per_temperature: int, ArrayLike
The number of time steps between two annealing steps.
delta_temperature: ArrayLike
Defines the change in temperature in each annealing step.
If annealing_schedule is 'linear', the temperature is decreased by
temperature -= delta_temperature .
If annealing_schedule is 'geometric', the temperature is changed by
temperature *= delta_temperature * 2^(exp_temperature) .
exp_temperature: ArrayLike
Defines the change in temperature in each annealing step. For
details, refer to 'delta_temperature'
annealing_schedule: str
Defines the annealing schedule. Supported values are 'linear' and
'geometric'.
"""

# annealing schedules that are currently supported
supported_anneal_schedules = ['linear', 'geometric']

def __init__(
self,
*,
max_temperature: ty.Union[int, npty.NDArray],
min_temperature: ty.Union[int, npty.NDArray],
delta_temperature: ty.Union[int, npty.NDArray],
steps_per_temperature: ty.Union[int, npty.NDArray],
exp_temperature: ty.Union[int, npty.NDArray],
annealing_schedule: str,
shape: ty.Tuple[int, ...] = (1,),
):

self._validate_input(
shape=shape,
min_temperature=min_temperature,
max_temperature=max_temperature,
delta_temperature=delta_temperature,
steps_per_temperature=steps_per_temperature,
exp_temperature=exp_temperature,
annealing_schedule=annealing_schedule,
)

super().__init__(
shape=shape,
min_temperature=min_temperature,
max_temperature=max_temperature,
delta_temperature=delta_temperature,
steps_per_temperature=steps_per_temperature,
exp_temperature=exp_temperature,
annealing_schedule=annealing_schedule,
)

self.delta_temperature_out = OutPort(shape=shape)

self.temperature = Var(shape=shape, init=np.int_(max_temperature))

@property
def shape(self) -> ty.Tuple[int, ...]:
return self.proc_params["shape"]

def _validate_input(self, shape, min_temperature, max_temperature,
delta_temperature, steps_per_temperature,
exp_temperature, annealing_schedule) -> None:
"""Validates input to the annealing neuron."""

if min_temperature < 0:
raise ValueError("min_temperature must be >= 0.")
if max_temperature > 2**(16) - 1:
raise ValueError("max_temperature must be < 2^16 - 1")
if min_temperature > max_temperature:
raise ValueError("max_temperature must be >= min_temperature.")
if delta_temperature < 0:
raise ValueError("delta_temperature must be >=0.")
if annealing_schedule == 'geometric' and exp_temperature < 0:
raise ValueError("exp_temperature must be >=0.")
if annealing_schedule not in self.supported_anneal_schedules:
raise ValueError(f"At the moment only the annealing schedules "
f"{self.supported_anneal_schedules} are "
f"supported.")
if steps_per_temperature < 0:
raise ValueError(f"steps_per_temperature is "
f"{steps_per_temperature} but must be > 0.")
if annealing_schedule == 'geometric':
geometric_constant = np.right_shift(delta_temperature,
exp_temperature)
if geometric_constant > 1 or geometric_constant < 0:
raise ValueError(f"delta_temperature >> exp_temperature "
f"should be between 0 to 1, but is"
f" {geometric_constant}.")
7 changes: 7 additions & 0 deletions src/lava/lib/optimization/solvers/generic/builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,9 @@
from lava.magma.core.process.variable import Var
from lava.magma.core.resources import AbstractComputeResource
from lava.magma.core.sync.protocol import AbstractSyncProtocol
from lava.lib.optimization.solvers.generic.sub_process_models import (
DiscreteVariablesModel
)
from numpy import typing as npt

BACKENDS = ty.Union[CPU, Loihi2NeuroCore, NeuroCore, str]
Expand Down Expand Up @@ -253,6 +256,10 @@ def constructor(self, proc):
var_shape=discrete_var_shape,
target_cost=target_cost,
num_in_ports=len(hps),
time_steps_per_algorithmic_step=DiscreteVariablesModel.
get_neuron_process(
proc.hyperparameters).
time_steps_per_algorithmic_step
)
finders = []
for idx, hp in enumerate(hps):
Expand Down
Loading

0 comments on commit d328ae1

Please sign in to comment.