Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
6326086
Updated wolfe line search
Sh3xe Jun 17, 2025
366f22a
fixed header guard
Sh3xe Jun 17, 2025
7937e62
fixed class name
Sh3xe Jun 17, 2025
fd8445f
initial genetic optimizer impl
Sh3xe Jun 18, 2025
89b16d4
Fixed GeneticOptim class bug where state would not reset properly fro…
Sh3xe Jun 18, 2025
d0780a4
Added rank selection
Sh3xe Jun 19, 2025
cf0423b
fixed assert type
Sh3xe Jun 20, 2025
66eb658
NelderMead initial commit (wip)
Sh3xe Jun 20, 2025
2b77464
Added initial condition, minor fixes, still wip
Sh3xe Jun 20, 2025
6bf508a
Merge branch 'genetic_optimizer' into test
Sh3xe Jun 23, 2025
ab71303
Merged every current optimizers, improved NelderMead initial simplex,…
Sh3xe Jun 23, 2025
00ad38d
Merge branch 'develop' of github.com:Sh3xe/fdaPDE-core into develop
Sh3xe Jun 23, 2025
92dc1f9
Finished Nelder Mead
Sh3xe Jun 23, 2025
0567c88
Finished nelder mead, removed genetic algorithm as it is not ready, i…
Sh3xe Jun 23, 2025
382ccf2
fixed wofle line search
Sh3xe Jun 23, 2025
1ab82a7
fixed GD header guard, added CG meethod
Sh3xe Jun 25, 2025
1bc2fed
Added crossover mutation, fixed geneticOptim constructor
Sh3xe Jun 26, 2025
04dad18
Merge remote-tracking branch 'upstream/develop' into develop
Sh3xe Jul 3, 2025
4ac3d4c
removed perturbized centroid in low dimensions because of performance
Sh3xe Jul 4, 2025
9847b99
fixed genetic opts
Sh3xe Jul 7, 2025
bd7dac7
fixed nelder-mead coefficients
Sh3xe Jul 7, 2025
70fb8db
Merge branch 'develop' of github.com:Sh3xe/fdaPDE-core into develop
Sh3xe Jul 25, 2025
d2902bf
adapted genetic optimizers for new API, updated stoping criterion
Sh3xe Jul 25, 2025
f25d255
replaced std::vector<vector_t> with matrix_t
Sh3xe Jul 25, 2025
eb1a86c
fixed broken algorithms
Sh3xe Jul 25, 2025
c2aff98
updated nelder mead, fixed static_since var, added some const(s)
Sh3xe Jul 28, 2025
b625741
Updated CG, increased max iter for wolfe.h, fixed issue with lbfgs va…
Sh3xe Aug 11, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions fdaPDE/optimization.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,8 @@ template <typename Opt> static constexpr bool is_gradient_free_opt_v = is_gradie
#include "src/optimization/callbacks.h"
#include "src/optimization/backtracking.h"
#include "src/optimization/wolfe.h"
#include "src/optimization/genetic_selection_ops.h"
#include "src/optimization/genetic_mutation_ops.h"

// algorithms
#include "src/optimization/grid_search.h"
Expand All @@ -70,6 +72,7 @@ template <typename Opt> static constexpr bool is_gradient_free_opt_v = is_gradie
#include "src/optimization/bfgs.h"
#include "src/optimization/lbfgs.h"
#include "src/optimization/nelder_mead.h"
#include "src/optimization/genetic_optim.h"

// clang-format on

Expand Down
3 changes: 0 additions & 3 deletions fdaPDE/src/linear_algebra/rp_chol.h
Original file line number Diff line number Diff line change
Expand Up @@ -65,9 +65,6 @@ class RpChol {
}
pivot_set_.merge(pivot_set);
std::vector<int> pivot_vec(pivot_set.begin(), pivot_set.end());

for(int kk : pivot_vec) std::cout << kk << " ";
std::cout << std::endl;

// evaluate columns at pivot_set, remove overlap with previously choosen columns
matrix_t G = A(Eigen::all, pivot_vec) - L_.leftCols(i) * L_(pivot_vec, Eigen::all).leftCols(i).transpose();
Expand Down
7 changes: 4 additions & 3 deletions fdaPDE/src/optimization/bfgs.h
Original file line number Diff line number Diff line change
Expand Up @@ -70,9 +70,10 @@ template <int N> class BFGS {
inv_hessian_ = matrix_t::Identity(size, size);
} else {
inv_hessian_ = matrix_t::Identity();
}
update = -inv_hessian_ * grad_old;
stop |= internals::exec_grad_hooks(*this, objective, callbacks_);
}
values_.clear();
update = -inv_hessian_ * grad_old;
stop |= internals::exec_grad_hooks(*this, objective, callbacks_);
error = grad_old.norm();
values_.push_back(objective(x_old));

Expand Down
42 changes: 42 additions & 0 deletions fdaPDE/src/optimization/callbacks.h
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,48 @@ template <typename Opt, typename Obj> bool exec_stop_if(Opt& optimizer, Obj& obj
return b;
}

template <typename Opt, typename... Args>
bool exec_sync_hooks(Opt& optimizer, std::tuple<Args...>& callbacks) {
return opt_hooks_loop(
[&](auto&& callback) {
if constexpr (requires(std::decay_t<decltype(callback)> c, Opt opt) {
{ c.sync_hook(opt) } -> std::same_as<bool>;
}) {
return callback.sync_hook(optimizer);
}
return false;
},
callbacks);
}

template <typename Opt, typename... Args>
bool exec_mutate_hooks(Opt& optimizer, std::tuple<Args...>& callbacks) {
return opt_hooks_loop(
[&](auto&& callback) {
if constexpr (requires(std::decay_t<decltype(callback)> c, Opt opt) {
{ c.mutate_hook(opt) } -> std::same_as<bool>;
}) {
return callback.mutate_hook(optimizer);
}
return false;
},
callbacks);
}

template <typename Opt, typename... Args>
bool exec_select_hooks(Opt& optimizer, std::tuple<Args...>& callbacks) {
return opt_hooks_loop(
[&](auto&& callback) {
if constexpr (requires(std::decay_t<decltype(callback)> c, Opt opt) {
{ c.select_hook(opt) } -> std::same_as<bool>;
}) {
return callback.select_hook(optimizer);
}
return false;
},
callbacks);
}

} // namespace internals
} // namespace fdapde

Expand Down
57 changes: 46 additions & 11 deletions fdaPDE/src/optimization/conjugate_gradient.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,20 +36,22 @@ template <int N, typename DirectionUpdate> class conjugate_gradient_impl {
int max_iter_; // maximum number of iterations before forced stop
double tol_; // tolerance on error before forced stop
double step_; // update step
bool restart_;
public:
static constexpr bool gradient_free = false;
static constexpr int static_input_size = N;
vector_t x_old, x_new, update, grad_old, grad_new;
double h;
// constructor
conjugate_gradient_impl() : max_iter_(500), tol_(1e-5), step_(1e-2) { }
conjugate_gradient_impl(int max_iter, double tol, double step) : max_iter_(max_iter), tol_(tol), step_(step) { }
conjugate_gradient_impl() : max_iter_(500), tol_(1e-5), step_(1e-2), restart_(true) { }
conjugate_gradient_impl(int max_iter, double tol, double step, bool restart) : max_iter_(max_iter), tol_(tol), step_(step), restart_(restart) { }
conjugate_gradient_impl(const conjugate_gradient_impl& other) :
max_iter_(other.max_iter_), tol_(other.tol_), step_(other.step_) { }
max_iter_(other.max_iter_), tol_(other.tol_), step_(other.step_), restart_(other.restart_) { }
conjugate_gradient_impl& operator=(const conjugate_gradient_impl& other) {
max_iter_ = other.max_iter_;
tol_ = other.tol_;
step_ = other.step_;
restart_ = other.restart_;
return *this;
}
template <typename ObjectiveT, typename... Callbacks>
Expand All @@ -66,6 +68,7 @@ template <int N, typename DirectionUpdate> class conjugate_gradient_impl {
auto grad = objective.gradient();
h = step_;
n_iter_ = 0;
value_ = std::numeric_limits<double>::max();
x_old = x0, x_new = vector_t::Constant(size, NaN);
grad_old = grad(x_old), grad_new = vector_t::Constant(size, NaN);
update = -grad_old;
Expand All @@ -75,21 +78,36 @@ template <int N, typename DirectionUpdate> class conjugate_gradient_impl {

while (n_iter_ < max_iter_ && error > tol_ && !stop) {
stop |= internals::exec_adapt_hooks(*this, objective, callbacks_);

// update along descent direction
x_new = x_old + h * update;
grad_new = grad(x_new);
// prepare next iteration
update = -grad_new + std::max(0.0, beta(*this)) * update; // update conjugate direction
if(restart_ && std::abs(grad_new.dot(grad_old))/ grad_new.dot(grad_new) >= 0.1 ) {
update = -grad_new;
} else {
update = -grad_new + std::max(0.0, beta(*this)) * update; // update conjugate direction
}

stop |=
(internals::exec_grad_hooks(*this, objective, callbacks_) || internals::exec_stop_if(*this, objective));
(internals::exec_grad_hooks(*this, objective, callbacks_) || internals::exec_stop_if(*this, objective));

x_old = x_new;
grad_old = grad_new;
error = grad_new.norm();
values_.push_back(objective(x_old));
n_iter_++;

if(values_.back() <= value_) {
value_ = values_.back();
optimum_ = x_old;
}
}

if(values_.back() <= value_) {
value_ = values_.back();
optimum_ = x_old;
}
optimum_ = x_old;
value_ = values_.back();
return optimum_;
}
// observers
Expand All @@ -100,11 +118,18 @@ template <int N, typename DirectionUpdate> class conjugate_gradient_impl {
};

struct fletcher_reeves_update {
template <typename Opt> double operator()(const Opt& opt) { return opt.grad_new.norm() / opt.grad_old.norm(); }
template <typename Opt> double operator()(const Opt& opt) { return opt.grad_new.norm() / opt.grad_old.dot(opt.grad_old); }
};

struct polak_ribiere_update {
template <typename Opt> double operator()(const Opt& opt) {
return opt.grad_new.dot(opt.grad_new - opt.grad_old) / opt.grad_old.norm();
return opt.grad_new.dot(opt.grad_new - opt.grad_old) / opt.grad_old.dot(opt.grad_old);
}
};

struct polak_ribiere_plus_update {
template <typename Opt> double operator()(const Opt& opt) {
return std::max(opt.grad_new.dot(opt.grad_new - opt.grad_old) / opt.grad_old.dot(opt.grad_old), 0.0);
}
};

Expand All @@ -116,16 +141,26 @@ class FletcherReevesCG : public internals::conjugate_gradient_impl<N, internals:
using Base = internals::conjugate_gradient_impl<N, internals::fletcher_reeves_update>;
public:
FletcherReevesCG() = default;
FletcherReevesCG(int max_iter, double tol, double step) : Base(max_iter, tol, step) { }
FletcherReevesCG(int max_iter, double tol, double step, bool restart) : Base(max_iter, tol, step, restart) { }
};

template <int N>
class PolakRibiereCG : public internals::conjugate_gradient_impl<N, internals::polak_ribiere_update> {
using Base = internals::conjugate_gradient_impl<N, internals::polak_ribiere_update>;
public:
PolakRibiereCG() = default;
PolakRibiereCG(int max_iter, double tol, double step) : Base(max_iter, tol, step) { }
PolakRibiereCG(int max_iter, double tol, double step, bool restart) : Base(max_iter, tol, step, restart) { }
};

template <int N>
class PolakRibierePlsCG : public internals::conjugate_gradient_impl<N, internals::polak_ribiere_plus_update> {
using Base = internals::conjugate_gradient_impl<N, internals::polak_ribiere_plus_update>;
public:
PolakRibierePlsCG() = default;
PolakRibierePlsCG(int max_iter, double tol, double step, bool restart) : Base(max_iter, tol, step, restart) { }
};


} // namespace fdapde

#endif // __FDAPDE_CONJUGATE_GRADIENT_H__
86 changes: 86 additions & 0 deletions fdaPDE/src/optimization/genetic_mutation_ops.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@

// This file is part of fdaPDE, a C++ library for physics-informed
// spatial and functional data analysis.
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.

#ifndef __FDAPDE_GENETIC_MUTATION_OPS_H__
#define __FDAPDE_GENETIC_MUTATION_OPS_H__

#include "header_check.h"

namespace fdapde {

class GaussianMutation {
private:
double initial_variance_ = 2.5;
double variance_ = 2.5;
double multiplier_ = 0.95;
std::normal_distribution<double> normal_dist_{0.0,1.0};

public:
// constructors
GaussianMutation(double initial_variance = 2.5, double multiplier = 0.95):
initial_variance_(initial_variance),
variance_(initial_variance),
multiplier_(multiplier)
{}

template <typename Opt> bool sync_hook(Opt& opt) {
variance_ = initial_variance_ * opt.population.rowwise().norm().mean();
return false;
}

template <typename Opt> bool mutate_hook(Opt& opt) {
opt.population += internals::gaussian_matrix(opt.population.rows(), opt.population.cols(), variance_, opt.seed_);
variance_ *= multiplier_;
return false;
}
};

class CrossoverMutation {
private:
std::uniform_real_distribution<double> distribution_{0.0,1.0};
std::uniform_int_distribution<int> indices_distribution_ {0,1};
double mutation_probability_ = 0.3;

public:
// constructors
CrossoverMutation(double mutation_probability = 0.3) : mutation_probability_(mutation_probability) {}

template <typename Opt> bool sync_hook(Opt& opt) {
indices_distribution_ = std::uniform_int_distribution<int>(0, opt.population.cols()-1);
return false;
}

template <typename Opt> bool mutate_hook(Opt& opt) {
for(int j = 0; j < opt.population.cols() - 1; j += 2) {
// Find a random pair of "parents"
int partner_idx = indices_distribution_(opt.rng);

for(int i = 0; i < opt.population.rows(); ++i) {
// With a certain probability, swap
if( distribution_(opt.rng) < mutation_probability_) {
double coeff = (opt.population(i, j) + opt.population(i, partner_idx))/2.0;
opt.population(i, j) = coeff;
}
}
}
return false;
}
};

} // namespace fdapde

#endif // __FDAPDE_GENETIC_MUTATION_OPS_H__
Loading