diff --git a/src/algorithm/neighbour/fastpair.rs b/src/algorithm/neighbour/fastpair.rs index eff22ebe..ebd57b42 100644 --- a/src/algorithm/neighbour/fastpair.rs +++ b/src/algorithm/neighbour/fastpair.rs @@ -74,10 +74,8 @@ impl<'a, T: RealNumber + FloatNumber, M: Array2> FastPair<'a, T, M> { Ok(init) } - /// /// Initialise `FastPair` by passing a `Array2`. /// Build a FastPairs data-structure from a set of (new) points. - /// fn init(&mut self) { // basic measures let len = self.samples.shape().0; diff --git a/src/linalg/basic/arrays.rs b/src/linalg/basic/arrays.rs index 2299ee6d..9eb5739a 100644 --- a/src/linalg/basic/arrays.rs +++ b/src/linalg/basic/arrays.rs @@ -1294,7 +1294,7 @@ pub trait Array2: MutArrayView2 + Sized + } } } - /// + /// merge 1d arrays fn merge_1d<'a>(&'a self, arrays: &'a [&'a dyn ArrayView1], axis: u8, append: bool) -> Self { assert!( axis == 1 || axis == 0, @@ -1362,7 +1362,7 @@ pub trait Array2: MutArrayView2 + Sized + } } } - /// + /// Stack arrays in sequence vertically fn v_stack(&self, other: &dyn ArrayView2) -> Self { let (nrows, ncols) = self.shape(); let (other_nrows, other_ncols) = other.shape(); @@ -1378,7 +1378,7 @@ pub trait Array2: MutArrayView2 + Sized + 0, ) } - /// + /// Stack arrays in sequence horizontally fn h_stack(&self, other: &dyn ArrayView2) -> Self { let (nrows, ncols) = self.shape(); let (other_nrows, other_ncols) = other.shape(); @@ -1407,7 +1407,7 @@ pub trait Array2: MutArrayView2 + Sized + fn col_iter<'a>(&'a self) -> Box + 'a>> + 'a> { Box::new((0..self.shape().1).map(move |r| self.get_col(r))) } - /// + /// take elements from 2d array fn take(&self, index: &[usize], axis: u8) -> Self { let (nrows, ncols) = self.shape(); diff --git a/src/linear/bg_solver.rs b/src/linear/bg_solver.rs index 6ee4f0ec..7ebd3733 100644 --- a/src/linear/bg_solver.rs +++ b/src/linear/bg_solver.rs @@ -133,7 +133,7 @@ pub trait BiconjugateGradientSolver<'a, T: FloatNumber, X: Array2> { y.copy_from(&x.xa(true, a)); } - /// + /// Extract the diagonal from a matrix fn diag(a: &X) -> Vec { let (nrows, ncols) = a.shape(); let n = nrows.min(ncols); diff --git a/src/linear/lasso_optimizer.rs b/src/linear/lasso_optimizer.rs index 8b775aea..583354e6 100644 --- a/src/linear/lasso_optimizer.rs +++ b/src/linear/lasso_optimizer.rs @@ -208,7 +208,6 @@ impl> InteriorPointOptimizer { Ok(w) } - /// fn sumlogneg(f: &X) -> T { let (n, _) = f.shape(); let mut sum = T::zero(); @@ -220,11 +219,9 @@ impl> InteriorPointOptimizer { } } -/// impl<'a, T: FloatNumber, X: Array2> BiconjugateGradientSolver<'a, T, X> for InteriorPointOptimizer { - /// fn solve_preconditioner(&self, a: &'a X, b: &[T], x: &mut [T]) { let (_, p) = a.shape(); @@ -234,7 +231,6 @@ impl<'a, T: FloatNumber, X: Array2> BiconjugateGradientSolver<'a, T, X> } } - /// fn mat_vec_mul(&self, _: &X, x: &Vec, y: &mut Vec) { let (_, p) = self.ata.shape(); let x_slice = Vec::from_slice(x.slice(0..p).as_ref()); @@ -246,7 +242,6 @@ impl<'a, T: FloatNumber, X: Array2> BiconjugateGradientSolver<'a, T, X> } } - /// fn mat_t_vec_mul(&self, a: &X, x: &Vec, y: &mut Vec) { self.mat_vec_mul(a, x, y); } diff --git a/src/neighbors/knn_regressor.rs b/src/neighbors/knn_regressor.rs index 1e0ad519..3d17bf24 100644 --- a/src/neighbors/knn_regressor.rs +++ b/src/neighbors/knn_regressor.rs @@ -88,25 +88,21 @@ pub struct KNNRegressor, Y: Array1, D: impl, Y: Array1, D: Distance>> KNNRegressor { - /// fn y(&self) -> &Y { self.y.as_ref().unwrap() } - /// fn knn_algorithm(&self) -> &KNNAlgorithm { self.knn_algorithm .as_ref() .expect("Missing parameter: KNNAlgorithm") } - /// fn weight(&self) -> &KNNWeightFunction { self.weight.as_ref().expect("Missing parameter: weight") } #[allow(dead_code)] - /// fn k(&self) -> usize { self.k.unwrap() } diff --git a/src/optimization/first_order/gradient_descent.rs b/src/optimization/first_order/gradient_descent.rs index 8c8f60a4..60aeb0b1 100644 --- a/src/optimization/first_order/gradient_descent.rs +++ b/src/optimization/first_order/gradient_descent.rs @@ -16,7 +16,6 @@ pub struct GradientDescent { pub g_atol: f64, } -/// impl Default for GradientDescent { fn default() -> Self { GradientDescent { @@ -27,9 +26,7 @@ impl Default for GradientDescent { } } -/// impl FirstOrderOptimizer for GradientDescent { - /// fn optimize<'a, X: Array1, LS: LineSearchMethod>( &self, f: &'a F<'_, T, X>, diff --git a/src/optimization/first_order/lbfgs.rs b/src/optimization/first_order/lbfgs.rs index c2988e8e..f97f2f4e 100644 --- a/src/optimization/first_order/lbfgs.rs +++ b/src/optimization/first_order/lbfgs.rs @@ -33,9 +33,7 @@ pub struct LBFGS { pub m: usize, } -/// impl Default for LBFGS { - /// fn default() -> Self { LBFGS { max_iter: 1000, @@ -52,7 +50,6 @@ impl Default for LBFGS { } impl LBFGS { - /// fn two_loops>(&self, state: &mut LBFGSState) { let lower = state.iteration.max(self.m) - self.m; let upper = state.iteration; @@ -94,7 +91,6 @@ impl LBFGS { state.s.mul_scalar_mut(-T::one()); } - /// fn init_state>(&self, x: &X) -> LBFGSState { LBFGSState { x: x.clone(), @@ -118,7 +114,6 @@ impl LBFGS { } } - /// fn update_state<'a, T: FloatNumber + RealNumber, X: Array1, LS: LineSearchMethod>( &self, f: &'a F<'_, T, X>, @@ -160,7 +155,6 @@ impl LBFGS { df(&mut state.x_df, &state.x); } - /// fn assess_convergence>( &self, state: &mut LBFGSState, @@ -194,7 +188,6 @@ impl LBFGS { g_converged || x_converged || state.counter_f_tol > self.successive_f_tol } - /// fn update_hessian>( &self, _: &DF<'_, X>, @@ -233,9 +226,7 @@ struct LBFGSState> { alpha: T, } -/// impl FirstOrderOptimizer for LBFGS { - /// fn optimize<'a, X: Array1, LS: LineSearchMethod>( &self, f: &F<'_, T, X>, diff --git a/src/optimization/line_search.rs b/src/optimization/line_search.rs index f98d4c24..d9511fc8 100644 --- a/src/optimization/line_search.rs +++ b/src/optimization/line_search.rs @@ -53,9 +53,7 @@ impl Default for Backtracking { } } -/// impl LineSearchMethod for Backtracking { - /// fn search( &self, f: &(dyn Fn(T) -> T),