Skip to content

Commit

Permalink
Merge branch 'release/0.12.0'
Browse files Browse the repository at this point in the history
  • Loading branch information
lucidfrontier45 committed Dec 26, 2023
2 parents 441536d + 7bf7244 commit 96657f1
Show file tree
Hide file tree
Showing 21 changed files with 387 additions and 234 deletions.
2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ categories = ["algorithms"]
repository = "https://github.com/lucidfrontier45/localsearch"
license-file = "LICENSE"
readme = "README.md"
version = "0.11.0"
version = "0.12.0"
edition = "2021"

# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
Expand Down
40 changes: 21 additions & 19 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ All of the algorithms are parallelized with Rayon.
1. Hill Climbing.
2. Tabu Search.
3. Simulated Annealing
4. Epsilon Greedy Search, a variant of Hill Climbing which accepts the trial state with a constant probabilith even if the score of the trial state is worse than the previous one.
4. Epsilon Greedy Search, a variant of Hill Climbing which accepts the trial solution with a constant probabilith even if the score of the trial solution is worse than the previous one.
5. Relative Annealing, a variant of Simulated Annealing which uses relative score diff to calculate transition probability.
6. Logistic Annealing, a variant of Relative Annealing which uses logistic function instead of simple exponential.

Expand All @@ -17,7 +17,7 @@ All of the algorithms are parallelized with Rayon.
You need to implement your own model that implements `OptModel` trait. Actual optimization is handled by each algorithm functions. Here is a simple example to optimize a quadratic function with Hill Climbing algorithm.

```rust
use std::error::Error;
use std::{error::Error, time::Duration};

use indicatif::{ProgressBar, ProgressDrawTarget, ProgressStyle};
use localsearch::{
Expand All @@ -42,39 +42,39 @@ impl QuadraticModel {
}
}

type StateType = Vec<f64>;
type SolutionType = Vec<f64>;
type ScoreType = NotNan<f64>;

impl OptModel for QuadraticModel {
type StateType = StateType;
type SolutionType = SolutionType;
type TransitionType = ();
type ScoreType = ScoreType;
fn generate_random_state<R: rand::Rng>(
fn generate_random_solution<R: rand::Rng>(
&self,
rng: &mut R,
) -> Result<Self::StateType, Box<dyn Error>> {
let state = self.dist.sample_iter(rng).take(self.k).collect::<Vec<_>>();
Ok(state)
) -> Result<Self::SolutionType, Box<dyn Error>> {
let solution = self.dist.sample_iter(rng).take(self.k).collect::<Vec<_>>();
Ok(solution)
}

fn generate_trial_state<R: rand::Rng>(
fn generate_trial_solution<R: rand::Rng>(
&self,
current_state: &Self::StateType,
current_solution: &Self::SolutionType,
rng: &mut R,
_current_score: Option<NotNan<f64>>,
) -> (Self::StateType, Self::TransitionType, NotNan<f64>) {
) -> (Self::SolutionType, Self::TransitionType, NotNan<f64>) {
let k = rng.gen_range(0..self.k);
let v = self.dist.sample(rng);
let mut new_state = current_state.clone();
new_state[k] = v;
let score = self.evaluate_state(&new_state);
(new_state, (), score)
let mut new_solution = current_solution.clone();
new_solution[k] = v;
let score = self.evaluate_solution(&new_solution);
(new_solution, (), score)
}

fn evaluate_state(&self, state: &Self::StateType) -> NotNan<f64> {
fn evaluate_solution(&self, solution: &Self::SolutionType) -> NotNan<f64> {
let score = (0..self.k)
.into_iter()
.map(|i| (state[i] - self.centers[i]).powf(2.0))
.map(|i| (solution[i] - self.centers[i]).powf(2.0))
.sum();
NotNan::new(score).unwrap()
}
Expand All @@ -98,19 +98,21 @@ fn main() {

println!("running Hill Climbing optimizer");
let n_iter = 10000;
let time_limit = Duration::from_secs(60);
let patiance = 1000;
let n_trials = 50;
let opt = HillClimbingOptimizer::new(patiance, n_trials);
let pb = create_pbar(n_iter as u64);
let callback = |op: OptProgress<StateType, ScoreType>| {
let callback = |op: OptProgress<SolutionType, ScoreType>| {
pb.set_message(format!("best score {:e}", op.score.into_inner()));
pb.set_position(op.iter as u64);
};

let res = opt.optimize(&model, None, n_iter, Some(&callback), ());
let res = opt.optimize(&model, None, n_iter, time_limit, Some(&callback), ());
pb.finish();
dbg!(res);
}

```

Further details can be found at API document, example and test codes.
37 changes: 19 additions & 18 deletions examples/quadratic_model.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use std::error::Error;
use std::{error::Error, time::Duration};

use indicatif::{ProgressBar, ProgressDrawTarget, ProgressStyle};
use localsearch::{
Expand All @@ -23,39 +23,39 @@ impl QuadraticModel {
}
}

type StateType = Vec<f64>;
type SolutionType = Vec<f64>;
type ScoreType = NotNan<f64>;

impl OptModel for QuadraticModel {
type StateType = StateType;
type SolutionType = SolutionType;
type TransitionType = ();
type ScoreType = ScoreType;
fn generate_random_state<R: rand::Rng>(
fn generate_random_solution<R: rand::Rng>(
&self,
rng: &mut R,
) -> Result<Self::StateType, Box<dyn Error>> {
let state = self.dist.sample_iter(rng).take(self.k).collect::<Vec<_>>();
Ok(state)
) -> Result<Self::SolutionType, Box<dyn Error>> {
let solution = self.dist.sample_iter(rng).take(self.k).collect::<Vec<_>>();
Ok(solution)
}

fn generate_trial_state<R: rand::Rng>(
fn generate_trial_solution<R: rand::Rng>(
&self,
current_state: &Self::StateType,
current_solution: &Self::SolutionType,
rng: &mut R,
_current_score: Option<NotNan<f64>>,
) -> (Self::StateType, Self::TransitionType, NotNan<f64>) {
) -> (Self::SolutionType, Self::TransitionType, NotNan<f64>) {
let k = rng.gen_range(0..self.k);
let v = self.dist.sample(rng);
let mut new_state = current_state.clone();
new_state[k] = v;
let score = self.evaluate_state(&new_state);
(new_state, (), score)
let mut new_solution = current_solution.clone();
new_solution[k] = v;
let score = self.evaluate_solution(&new_solution);
(new_solution, (), score)
}

fn evaluate_state(&self, state: &Self::StateType) -> NotNan<f64> {
fn evaluate_solution(&self, solution: &Self::SolutionType) -> NotNan<f64> {
let score = (0..self.k)
.into_iter()
.map(|i| (state[i] - self.centers[i]).powf(2.0))
.map(|i| (solution[i] - self.centers[i]).powf(2.0))
.sum();
NotNan::new(score).unwrap()
}
Expand All @@ -79,16 +79,17 @@ fn main() {

println!("running Hill Climbing optimizer");
let n_iter = 10000;
let time_limit = Duration::from_secs_f32(1.0);
let patiance = 1000;
let n_trials = 50;
let opt = HillClimbingOptimizer::new(patiance, n_trials);
let pb = create_pbar(n_iter as u64);
let callback = |op: OptProgress<StateType, ScoreType>| {
let callback = |op: OptProgress<SolutionType, ScoreType>| {
pb.set_message(format!("best score {:e}", op.score.into_inner()));
pb.set_position(op.iter as u64);
};

let res = opt.optimize(&model, None, n_iter, Some(&callback), ());
let res = opt.optimize(&model, None, n_iter, time_limit, Some(&callback), ());
pb.finish();
dbg!(res);
}
Loading

0 comments on commit 96657f1

Please sign in to comment.