Skip to content

Commit

Permalink
chore: more doc updates
Browse files Browse the repository at this point in the history
  • Loading branch information
morenol committed Nov 25, 2024
1 parent def877c commit e42332d
Show file tree
Hide file tree
Showing 10 changed files with 19 additions and 30 deletions.
6 changes: 0 additions & 6 deletions src/algorithm/neighbour/fastpair.rs
Original file line number Diff line number Diff line change
Expand Up @@ -52,10 +52,8 @@ pub struct FastPair<'a, T: RealNumber + FloatNumber, M: Array2<T>> {
}

impl<'a, T: RealNumber + FloatNumber, M: Array2<T>> FastPair<'a, T, M> {
///
/// Constructor
/// Instantiate and initialize the algorithm
///
pub fn new(m: &'a M) -> Result<Self, Failed> {
if m.shape().0 < 3 {
return Err(Failed::because(
Expand Down Expand Up @@ -156,9 +154,7 @@ impl<'a, T: RealNumber + FloatNumber, M: Array2<T>> FastPair<'a, T, M> {
self.neighbours = neighbours;
}

///
/// Find closest pair by scanning list of nearest neighbors.
///
#[allow(dead_code)]
pub fn closest_pair(&self) -> PairwiseDistance<T> {
let mut a = self.neighbours[0]; // Start with first point
Expand Down Expand Up @@ -215,9 +211,7 @@ mod tests_fastpair {
use super::*;
use crate::linalg::basic::{arrays::Array, matrix::DenseMatrix};

///
/// Brute force algorithm, used only for comparison and testing
///
pub fn closest_pair_brute(fastpair: &FastPair<f64, DenseMatrix<f64>>) -> PairwiseDistance<f64> {
use itertools::Itertools;
let m = fastpair.samples.shape().0;
Expand Down
6 changes: 3 additions & 3 deletions src/linalg/basic/arrays.rs
Original file line number Diff line number Diff line change
Expand Up @@ -973,7 +973,7 @@ pub trait Array1<T: Debug + Display + Copy + Sized>: MutArrayView1<T> + Sized +
result.softmax_mut();
result
}
///
/// multiply array by matrix
fn xa(&self, a_transpose: bool, a: &dyn ArrayView2<T>) -> Self
where
T: Number,
Expand Down Expand Up @@ -1136,7 +1136,7 @@ pub trait Array2<T: Debug + Display + Copy + Sized>: MutArrayView2<T> + Sized +

result
}
///
/// matrix multiplication
fn ab(&self, a_transpose: bool, b: &dyn ArrayView2<T>, b_transpose: bool) -> Self
where
T: Number,
Expand Down Expand Up @@ -1171,7 +1171,7 @@ pub trait Array2<T: Debug + Display + Copy + Sized>: MutArrayView2<T> + Sized +
result
}
}
///
/// matrix vector multiplication
fn ax(&self, a_transpose: bool, x: &dyn ArrayView1<T>) -> Self
where
T: Number,
Expand Down
2 changes: 1 addition & 1 deletion src/linalg/traits/svd.rs
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ pub struct SVD<T: Number + RealNumber, M: SVDDecomposable<T>> {
m: usize,
///

Check failure on line 53 in src/linalg/traits/svd.rs

View workflow job for this annotation

GitHub Actions / lint

empty doc comment
n: usize,
///
/// Tolerance
tol: T,
}

Expand Down
7 changes: 3 additions & 4 deletions src/linear/lasso_optimizer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ use crate::linalg::basic::arrays::{Array1, Array2, ArrayView1, MutArray, MutArra
use crate::linear::bg_solver::BiconjugateGradientSolver;
use crate::numbers::floatnum::FloatNumber;

///
/// Interior Point Optimizer
pub struct InteriorPointOptimizer<T: FloatNumber, X: Array2<T>> {
ata: X,
d1: Vec<T>,
Expand All @@ -25,9 +25,8 @@ pub struct InteriorPointOptimizer<T: FloatNumber, X: Array2<T>> {
prs: Vec<T>,
}

///
impl<T: FloatNumber, X: Array2<T>> InteriorPointOptimizer<T, X> {
///
/// Initialize a new Interior Point Optimizer
pub fn new(a: &X, n: usize) -> InteriorPointOptimizer<T, X> {
InteriorPointOptimizer {
ata: a.ab(true, a, false),
Expand All @@ -38,7 +37,7 @@ impl<T: FloatNumber, X: Array2<T>> InteriorPointOptimizer<T, X> {
}
}

///
/// Run the optimization
pub fn optimize(
&mut self,
x: &X,
Expand Down
3 changes: 0 additions & 3 deletions src/linear/logistic_regression.rs
Original file line number Diff line number Diff line change
Expand Up @@ -183,14 +183,11 @@ pub struct LogisticRegression<
}

trait ObjectiveFunction<T: Number + FloatNumber, X: Array2<T>> {
///
fn f(&self, w_bias: &[T]) -> T;

///
#[allow(clippy::ptr_arg)]
fn df(&self, g: &mut Vec<T>, w_bias: &Vec<T>);

///
#[allow(clippy::ptr_arg)]
fn partial_dot(w: &[T], x: &X, v_col: usize, m_row: usize) -> T {
let mut sum = T::zero();
Expand Down
2 changes: 1 addition & 1 deletion src/optimization/first_order/gradient_descent.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ use crate::optimization::{DF, F};

///
pub struct GradientDescent {
///
/// Maximum number of iterations
pub max_iter: usize,
///
pub g_rtol: f64,
Expand Down
2 changes: 1 addition & 1 deletion src/optimization/first_order/lbfgs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ use crate::optimization::{DF, F};

///
pub struct LBFGS {
///
/// Maximum number of iterations
pub max_iter: usize,
///
pub g_rtol: f64,
Expand Down
4 changes: 2 additions & 2 deletions src/optimization/first_order/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,9 @@ pub trait FirstOrderOptimizer<T: FloatNumber> {
/// Result of optimization
#[derive(Debug, Clone)]
pub struct OptimizerResult<T: FloatNumber, X: Array1<T>> {
///
/// Solution
pub x: X,
///
/// f(x) value
pub f_x: T,
/// number of iterations
pub iterations: usize,
Expand Down
11 changes: 5 additions & 6 deletions src/optimization/line_search.rs
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
use crate::optimization::FunctionOrder;
use num_traits::Float;

/// Line search method is an iterative approach to find a local minimum of a multidimensional nonlinear function using the function's gradients
/// Line search optimization.
pub trait LineSearchMethod<T: Float> {
///
/// Find alpha that satisfies strong Wolfe conditions.
fn search(
&self,
f: &(dyn Fn(T) -> T),
Expand All @@ -17,7 +17,7 @@ pub trait LineSearchMethod<T: Float> {
/// Line search result
#[derive(Debug, Clone)]
pub struct LineSearchResult<T: Float> {
///
/// Alpha
pub alpha: T,
///
pub f_x: T,
Expand All @@ -27,19 +27,18 @@ pub struct LineSearchResult<T: Float> {
pub struct Backtracking<T: Float> {
///
pub c1: T,
///
/// Maximum number of iterations for Backtracking single run
pub max_iterations: usize,
///
pub max_infinity_iterations: usize,
///
pub phi: T,
///
pub plo: T,
///
/// function order
pub order: FunctionOrder,
}

///
impl<T: Float> Default for Backtracking<T> {
fn default() -> Self {
Backtracking {
Expand Down
6 changes: 3 additions & 3 deletions src/optimization/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,12 @@ pub type F<'a, T, X> = dyn for<'b> Fn(&'b X) -> T + 'a;
///
pub type DF<'a, X> = dyn for<'b> Fn(&'b mut X, &'b X) + 'a;

///
/// Function order
#[allow(clippy::upper_case_acronyms)]
#[derive(Debug, PartialEq, Eq)]
pub enum FunctionOrder {
///
/// Second order
SECOND,
///
/// Third order
THIRD,
}

0 comments on commit e42332d

Please sign in to comment.