From ba75f9ffada069445c7f0b510d3aba770cd5eb60 Mon Sep 17 00:00:00 2001 From: morenol <22335041+morenol@users.noreply.github.com> Date: Mon, 25 Nov 2024 10:34:29 -0500 Subject: [PATCH] chore: fix clippy (#283) * chore: fix clippy Co-authored-by: Luis Moreno --- .github/workflows/ci.yml | 2 +- src/algorithm/neighbour/cover_tree.rs | 8 +- src/algorithm/neighbour/fastpair.rs | 10 +- src/algorithm/neighbour/linear_search.rs | 4 +- src/algorithm/sort/heap_select.rs | 4 +- src/algorithm/sort/quick_sort.rs | 1 + src/cluster/kmeans.rs | 8 +- src/linalg/basic/arrays.rs | 152 +++++++++--------- src/linalg/traits/evd.rs | 4 +- src/linalg/traits/stats.rs | 4 +- src/linalg/traits/svd.rs | 4 +- src/linear/bg_solver.rs | 8 +- src/linear/lasso_optimizer.rs | 14 +- src/linear/logistic_regression.rs | 21 ++- src/naive_bayes/bernoulli.rs | 7 +- src/naive_bayes/categorical.rs | 5 +- src/naive_bayes/gaussian.rs | 5 +- src/naive_bayes/mod.rs | 3 +- src/naive_bayes/multinomial.rs | 7 +- src/neighbors/knn_classifier.rs | 1 + src/neighbors/knn_regressor.rs | 7 +- .../first_order/gradient_descent.rs | 17 +- src/optimization/first_order/lbfgs.rs | 39 ++--- src/optimization/first_order/mod.rs | 16 +- src/optimization/line_search.rs | 29 ++-- src/optimization/mod.rs | 16 +- src/svm/mod.rs | 2 +- src/tree/decision_tree_classifier.rs | 18 +-- src/tree/decision_tree_regressor.rs | 14 +- 29 files changed, 194 insertions(+), 236 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 89b3b37e..d7942c8f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -36,7 +36,7 @@ jobs: - name: Install Rust toolchain uses: actions-rs/toolchain@v1 with: - toolchain: stable + toolchain: 1.81 # 1.82 seems to break wasm32 tests https://github.com/rustwasm/wasm-bindgen/issues/4274 target: ${{ matrix.platform.target }} profile: minimal default: true diff --git a/src/algorithm/neighbour/cover_tree.rs b/src/algorithm/neighbour/cover_tree.rs index 011a9cc0..9989ae24 100644 --- a/src/algorithm/neighbour/cover_tree.rs +++ b/src/algorithm/neighbour/cover_tree.rs @@ -124,7 +124,7 @@ impl> CoverTree { current_cover_set.push((d, &self.root)); let mut heap = HeapSelection::with_capacity(k); - heap.add(std::f64::MAX); + heap.add(f64::MAX); let mut empty_heap = true; if !self.identical_excluded || self.get_data_value(self.root.idx) != p { @@ -145,7 +145,7 @@ impl> CoverTree { } let upper_bound = if empty_heap { - std::f64::INFINITY + f64::INFINITY } else { *heap.peek() }; @@ -291,7 +291,7 @@ impl> CoverTree { } else { let max_dist = self.max(point_set); let next_scale = (max_scale - 1).min(self.get_scale(max_dist)); - if next_scale == std::i64::MIN { + if next_scale == i64::MIN { let mut children: Vec = Vec::new(); let mut leaf = self.new_leaf(p); children.push(leaf); @@ -435,7 +435,7 @@ impl> CoverTree { fn get_scale(&self, d: f64) -> i64 { if d == 0f64 { - std::i64::MIN + i64::MIN } else { (self.inv_log_base * d.ln()).ceil() as i64 } diff --git a/src/algorithm/neighbour/fastpair.rs b/src/algorithm/neighbour/fastpair.rs index 759d018c..9f663f67 100644 --- a/src/algorithm/neighbour/fastpair.rs +++ b/src/algorithm/neighbour/fastpair.rs @@ -52,10 +52,8 @@ pub struct FastPair<'a, T: RealNumber + FloatNumber, M: Array2> { } impl<'a, T: RealNumber + FloatNumber, M: Array2> FastPair<'a, T, M> { - /// /// Constructor - /// Instantiate and inizialise the algorithm - /// + /// Instantiate and initialize the algorithm pub fn new(m: &'a M) -> Result { if m.shape().0 < 3 { return Err(Failed::because( @@ -74,10 +72,8 @@ impl<'a, T: RealNumber + FloatNumber, M: Array2> FastPair<'a, T, M> { Ok(init) } - /// /// Initialise `FastPair` by passing a `Array2`. /// Build a FastPairs data-structure from a set of (new) points. - /// fn init(&mut self) { // basic measures let len = self.samples.shape().0; @@ -158,9 +154,7 @@ impl<'a, T: RealNumber + FloatNumber, M: Array2> FastPair<'a, T, M> { self.neighbours = neighbours; } - /// /// Find closest pair by scanning list of nearest neighbors. - /// #[allow(dead_code)] pub fn closest_pair(&self) -> PairwiseDistance { let mut a = self.neighbours[0]; // Start with first point @@ -217,9 +211,7 @@ mod tests_fastpair { use super::*; use crate::linalg::basic::{arrays::Array, matrix::DenseMatrix}; - /// /// Brute force algorithm, used only for comparison and testing - /// pub fn closest_pair_brute(fastpair: &FastPair>) -> PairwiseDistance { use itertools::Itertools; let m = fastpair.samples.shape().0; diff --git a/src/algorithm/neighbour/linear_search.rs b/src/algorithm/neighbour/linear_search.rs index b1ce7270..6bc82176 100644 --- a/src/algorithm/neighbour/linear_search.rs +++ b/src/algorithm/neighbour/linear_search.rs @@ -61,7 +61,7 @@ impl> LinearKNNSearch { for _ in 0..k { heap.add(KNNPoint { - distance: std::f64::INFINITY, + distance: f64::INFINITY, index: None, }); } @@ -215,7 +215,7 @@ mod tests { }; let point_inf = KNNPoint { - distance: std::f64::INFINITY, + distance: f64::INFINITY, index: Some(3), }; diff --git a/src/algorithm/sort/heap_select.rs b/src/algorithm/sort/heap_select.rs index 23d2704a..8a4ef78a 100644 --- a/src/algorithm/sort/heap_select.rs +++ b/src/algorithm/sort/heap_select.rs @@ -133,7 +133,7 @@ mod tests { #[test] fn test_add1() { let mut heap = HeapSelection::with_capacity(3); - heap.add(std::f64::INFINITY); + heap.add(f64::INFINITY); heap.add(-5f64); heap.add(4f64); heap.add(-1f64); @@ -151,7 +151,7 @@ mod tests { #[test] fn test_add2() { let mut heap = HeapSelection::with_capacity(3); - heap.add(std::f64::INFINITY); + heap.add(f64::INFINITY); heap.add(0.0); heap.add(8.4852); heap.add(5.6568); diff --git a/src/algorithm/sort/quick_sort.rs b/src/algorithm/sort/quick_sort.rs index 97d34e7c..e64c4243 100644 --- a/src/algorithm/sort/quick_sort.rs +++ b/src/algorithm/sort/quick_sort.rs @@ -3,6 +3,7 @@ use num_traits::Num; pub trait QuickArgSort { fn quick_argsort_mut(&mut self) -> Vec; + #[allow(dead_code)] fn quick_argsort(&self) -> Vec; } diff --git a/src/cluster/kmeans.rs b/src/cluster/kmeans.rs index 6609ace4..2fade68f 100644 --- a/src/cluster/kmeans.rs +++ b/src/cluster/kmeans.rs @@ -96,7 +96,7 @@ impl, Y: Array1> PartialEq for KMeans< return false; } for j in 0..self.centroids[i].len() { - if (self.centroids[i][j] - other.centroids[i][j]).abs() > std::f64::EPSILON { + if (self.centroids[i][j] - other.centroids[i][j]).abs() > f64::EPSILON { return false; } } @@ -270,7 +270,7 @@ impl, Y: Array1> KMeans let (n, d) = data.shape(); - let mut distortion = std::f64::MAX; + let mut distortion = f64::MAX; let mut y = KMeans::::kmeans_plus_plus(data, parameters.k, parameters.seed); let mut size = vec![0; parameters.k]; let mut centroids = vec![vec![0f64; d]; parameters.k]; @@ -331,7 +331,7 @@ impl, Y: Array1> KMeans let mut row = vec![0f64; x.shape().1]; for i in 0..n { - let mut min_dist = std::f64::MAX; + let mut min_dist = f64::MAX; let mut best_cluster = 0; for j in 0..self.k { @@ -361,7 +361,7 @@ impl, Y: Array1> KMeans .cloned() .collect(); - let mut d = vec![std::f64::MAX; n]; + let mut d = vec![f64::MAX; n]; let mut row = vec![TX::zero(); data.shape().1]; for j in 1..k { diff --git a/src/linalg/basic/arrays.rs b/src/linalg/basic/arrays.rs index 99df2078..3c889722 100644 --- a/src/linalg/basic/arrays.rs +++ b/src/linalg/basic/arrays.rs @@ -265,11 +265,11 @@ pub trait ArrayView1: Array { if p.is_infinite() && p.is_sign_positive() { self.iterator(0) .map(|x| x.to_f64().unwrap().abs()) - .fold(std::f64::NEG_INFINITY, |a, b| a.max(b)) + .fold(f64::NEG_INFINITY, |a, b| a.max(b)) } else if p.is_infinite() && p.is_sign_negative() { self.iterator(0) .map(|x| x.to_f64().unwrap().abs()) - .fold(std::f64::INFINITY, |a, b| a.min(b)) + .fold(f64::INFINITY, |a, b| a.min(b)) } else { let mut norm = 0f64; @@ -558,11 +558,11 @@ pub trait ArrayView2: Array: pub trait MutArrayView2: MutArray + ArrayView2 { - /// + /// copy values from another array fn copy_from(&mut self, other: &dyn Array) { self.iterator_mut(0) .zip(other.iterator(0)) .for_each(|(s, o)| *s = *o); } - /// + /// update view with absolute values fn abs_mut(&mut self) where T: Number + Signed, { self.iterator_mut(0).for_each(|v| *v = v.abs()); } - /// + /// update view values with opposite sign fn neg_mut(&mut self) where T: Number + Neg, { self.iterator_mut(0).for_each(|v| *v = -*v); } - /// + /// update view values at power `p` fn pow_mut(&mut self, p: T) where T: RealNumber, { self.iterator_mut(0).for_each(|v| *v = v.powf(p)); } - /// + /// scale view values fn scale_mut(&mut self, mean: &[T], std: &[T], axis: u8) where T: Number, @@ -784,27 +784,27 @@ pub trait MutArrayView2: /// Trait for mutable 1D-array view pub trait Array1: MutArrayView1 + Sized + Clone { - /// + /// return a view of the array fn slice<'a>(&'a self, range: Range) -> Box + 'a>; - /// + /// return a mutable view of the array fn slice_mut<'a>(&'a mut self, range: Range) -> Box + 'a>; - /// + /// fill array with a given value fn fill(len: usize, value: T) -> Self where Self: Sized; - /// + /// create array from iterator fn from_iterator>(iter: I, len: usize) -> Self where Self: Sized; - /// + /// create array from vector fn from_vec_slice(slice: &[T]) -> Self where Self: Sized; - /// + /// create array from slice fn from_slice(slice: &'_ dyn ArrayView1) -> Self where Self: Sized; - /// + /// create a zero array fn zeros(len: usize) -> Self where T: Number, @@ -812,7 +812,7 @@ pub trait Array1: MutArrayView1 + Sized + { Self::fill(len, T::zero()) } - /// + /// create an array of ones fn ones(len: usize) -> Self where T: Number, @@ -820,7 +820,7 @@ pub trait Array1: MutArrayView1 + Sized + { Self::fill(len, T::one()) } - /// + /// create an array of random values fn rand(len: usize) -> Self where T: RealNumber, @@ -828,7 +828,7 @@ pub trait Array1: MutArrayView1 + Sized + { Self::from_iterator((0..len).map(|_| T::rand()), len) } - /// + /// add a scalar to the array fn add_scalar(&self, x: T) -> Self where T: Number, @@ -838,7 +838,7 @@ pub trait Array1: MutArrayView1 + Sized + result.add_scalar_mut(x); result } - /// + /// subtract a scalar from the array fn sub_scalar(&self, x: T) -> Self where T: Number, @@ -848,7 +848,7 @@ pub trait Array1: MutArrayView1 + Sized + result.sub_scalar_mut(x); result } - /// + /// divide a scalar from the array fn div_scalar(&self, x: T) -> Self where T: Number, @@ -858,7 +858,7 @@ pub trait Array1: MutArrayView1 + Sized + result.div_scalar_mut(x); result } - /// + /// multiply a scalar to the array fn mul_scalar(&self, x: T) -> Self where T: Number, @@ -868,7 +868,7 @@ pub trait Array1: MutArrayView1 + Sized + result.mul_scalar_mut(x); result } - /// + /// sum of two arrays fn add(&self, other: &dyn Array) -> Self where T: Number, @@ -878,7 +878,7 @@ pub trait Array1: MutArrayView1 + Sized + result.add_mut(other); result } - /// + /// subtract two arrays fn sub(&self, other: &impl Array1) -> Self where T: Number, @@ -888,7 +888,7 @@ pub trait Array1: MutArrayView1 + Sized + result.sub_mut(other); result } - /// + /// multiply two arrays fn mul(&self, other: &dyn Array) -> Self where T: Number, @@ -898,7 +898,7 @@ pub trait Array1: MutArrayView1 + Sized + result.mul_mut(other); result } - /// + /// divide two arrays fn div(&self, other: &dyn Array) -> Self where T: Number, @@ -908,7 +908,7 @@ pub trait Array1: MutArrayView1 + Sized + result.div_mut(other); result } - /// + /// replace values with another array fn take(&self, index: &[usize]) -> Self where Self: Sized, @@ -920,7 +920,7 @@ pub trait Array1: MutArrayView1 + Sized + ); Self::from_iterator(index.iter().map(move |&i| *self.get(i)), index.len()) } - /// + /// create a view of the array with absolute values fn abs(&self) -> Self where T: Number + Signed, @@ -930,7 +930,7 @@ pub trait Array1: MutArrayView1 + Sized + result.abs_mut(); result } - /// + /// create a view of the array with opposite sign fn neg(&self) -> Self where T: Number + Neg, @@ -940,7 +940,7 @@ pub trait Array1: MutArrayView1 + Sized + result.neg_mut(); result } - /// + /// create a view of the array with values at power `p` fn pow(&self, p: T) -> Self where T: RealNumber, @@ -950,7 +950,7 @@ pub trait Array1: MutArrayView1 + Sized + result.pow_mut(p); result } - /// + /// apply argsort to the array fn argsort(&self) -> Vec where T: Number + PartialOrd, @@ -958,12 +958,12 @@ pub trait Array1: MutArrayView1 + Sized + let mut v = self.clone(); v.argsort_mut() } - /// + /// map values of the array fn map, F: FnMut(&T) -> O>(self, f: F) -> A { let len = self.shape(); A::from_iterator(self.iterator(0).map(f), len) } - /// + /// apply softmax to the array fn softmax(&self) -> Self where T: RealNumber, @@ -973,7 +973,7 @@ pub trait Array1: MutArrayView1 + Sized + result.softmax_mut(); result } - /// + /// multiply array by matrix fn xa(&self, a_transpose: bool, a: &dyn ArrayView2) -> Self where T: Number, @@ -1003,7 +1003,7 @@ pub trait Array1: MutArrayView1 + Sized + result } - /// + /// check if two arrays are approximately equal fn approximate_eq(&self, other: &Self, error: T) -> bool where T: Number + RealNumber, @@ -1015,13 +1015,13 @@ pub trait Array1: MutArrayView1 + Sized + /// Trait for mutable 2D-array view pub trait Array2: MutArrayView2 + Sized + Clone { - /// + /// fill 2d array with a given value fn fill(nrows: usize, ncols: usize, value: T) -> Self; - /// + /// get a view of the 2d array fn slice<'a>(&'a self, rows: Range, cols: Range) -> Box + 'a> where Self: Sized; - /// + /// get a mutable view of the 2d array fn slice_mut<'a>( &'a mut self, rows: Range, @@ -1029,31 +1029,31 @@ pub trait Array2: MutArrayView2 + Sized + ) -> Box + 'a> where Self: Sized; - /// + /// create 2d array from iterator fn from_iterator>(iter: I, nrows: usize, ncols: usize, axis: u8) -> Self; - /// + /// get row from 2d array fn get_row<'a>(&'a self, row: usize) -> Box + 'a> where Self: Sized; - /// + /// get column from 2d array fn get_col<'a>(&'a self, col: usize) -> Box + 'a> where Self: Sized; - /// + /// create a zero 2d array fn zeros(nrows: usize, ncols: usize) -> Self where T: Number, { Self::fill(nrows, ncols, T::zero()) } - /// + /// create a 2d array of ones fn ones(nrows: usize, ncols: usize) -> Self where T: Number, { Self::fill(nrows, ncols, T::one()) } - /// + /// create an identity matrix fn eye(size: usize) -> Self where T: Number, @@ -1066,29 +1066,29 @@ pub trait Array2: MutArrayView2 + Sized + matrix } - /// + /// create a 2d array of random values fn rand(nrows: usize, ncols: usize) -> Self where T: RealNumber, { Self::from_iterator((0..nrows * ncols).map(|_| T::rand()), nrows, ncols, 0) } - /// + /// crate from 2d slice fn from_slice(slice: &dyn ArrayView2) -> Self { let (nrows, ncols) = slice.shape(); Self::from_iterator(slice.iterator(0).cloned(), nrows, ncols, 0) } - /// + /// create from row fn from_row(slice: &dyn ArrayView1) -> Self { let ncols = slice.shape(); Self::from_iterator(slice.iterator(0).cloned(), 1, ncols, 0) } - /// + /// create from column fn from_column(slice: &dyn ArrayView1) -> Self { let nrows = slice.shape(); Self::from_iterator(slice.iterator(0).cloned(), nrows, 1, 0) } - /// + /// transpose 2d array fn transpose(&self) -> Self { let (nrows, ncols) = self.shape(); let mut m = Self::fill(ncols, nrows, *self.get((0, 0))); @@ -1099,7 +1099,7 @@ pub trait Array2: MutArrayView2 + Sized + } m } - /// + /// change shape of 2d array fn reshape(&self, nrows: usize, ncols: usize, axis: u8) -> Self { let (onrows, oncols) = self.shape(); @@ -1110,7 +1110,7 @@ pub trait Array2: MutArrayView2 + Sized + Self::from_iterator(self.iterator(0).cloned(), nrows, ncols, axis) } - /// + /// multiply two 2d arrays fn matmul(&self, other: &dyn ArrayView2) -> Self where T: Number, @@ -1136,7 +1136,7 @@ pub trait Array2: MutArrayView2 + Sized + result } - /// + /// matrix multiplication fn ab(&self, a_transpose: bool, b: &dyn ArrayView2, b_transpose: bool) -> Self where T: Number, @@ -1171,7 +1171,7 @@ pub trait Array2: MutArrayView2 + Sized + result } } - /// + /// matrix vector multiplication fn ax(&self, a_transpose: bool, x: &dyn ArrayView1) -> Self where T: Number, @@ -1199,7 +1199,7 @@ pub trait Array2: MutArrayView2 + Sized + } result } - /// + /// concatenate 1d array fn concatenate_1d<'a>(arrays: &'a [&'a dyn ArrayView1], axis: u8) -> Self { assert!( axis == 1 || axis == 0, @@ -1237,7 +1237,7 @@ pub trait Array2: MutArrayView2 + Sized + ), } } - /// + /// concatenate 2d array fn concatenate_2d<'a>(arrays: &'a [&'a dyn ArrayView2], axis: u8) -> Self { assert!( axis == 1 || axis == 0, @@ -1294,7 +1294,7 @@ pub trait Array2: MutArrayView2 + Sized + } } } - /// + /// merge 1d arrays fn merge_1d<'a>(&'a self, arrays: &'a [&'a dyn ArrayView1], axis: u8, append: bool) -> Self { assert!( axis == 1 || axis == 0, @@ -1362,7 +1362,7 @@ pub trait Array2: MutArrayView2 + Sized + } } } - /// + /// Stack arrays in sequence vertically fn v_stack(&self, other: &dyn ArrayView2) -> Self { let (nrows, ncols) = self.shape(); let (other_nrows, other_ncols) = other.shape(); @@ -1378,7 +1378,7 @@ pub trait Array2: MutArrayView2 + Sized + 0, ) } - /// + /// Stack arrays in sequence horizontally fn h_stack(&self, other: &dyn ArrayView2) -> Self { let (nrows, ncols) = self.shape(); let (other_nrows, other_ncols) = other.shape(); @@ -1394,20 +1394,20 @@ pub trait Array2: MutArrayView2 + Sized + 1, ) } - /// + /// map array values fn map, F: FnMut(&T) -> O>(self, f: F) -> A { let (nrows, ncols) = self.shape(); A::from_iterator(self.iterator(0).map(f), nrows, ncols, 0) } - /// + /// iter rows fn row_iter<'a>(&'a self) -> Box + 'a>> + 'a> { Box::new((0..self.shape().0).map(move |r| self.get_row(r))) } - /// + /// iter cols fn col_iter<'a>(&'a self) -> Box + 'a>> + 'a> { Box::new((0..self.shape().1).map(move |r| self.get_col(r))) } - /// + /// take elements from 2d array fn take(&self, index: &[usize], axis: u8) -> Self { let (nrows, ncols) = self.shape(); @@ -1447,7 +1447,7 @@ pub trait Array2: MutArrayView2 + Sized + fn take_column(&self, column_index: usize) -> Self { self.take(&[column_index], 1) } - /// + /// add a scalar to the array fn add_scalar(&self, x: T) -> Self where T: Number, @@ -1456,7 +1456,7 @@ pub trait Array2: MutArrayView2 + Sized + result.add_scalar_mut(x); result } - /// + /// subtract a scalar from the array fn sub_scalar(&self, x: T) -> Self where T: Number, @@ -1465,7 +1465,7 @@ pub trait Array2: MutArrayView2 + Sized + result.sub_scalar_mut(x); result } - /// + /// divide a scalar from the array fn div_scalar(&self, x: T) -> Self where T: Number, @@ -1474,7 +1474,7 @@ pub trait Array2: MutArrayView2 + Sized + result.div_scalar_mut(x); result } - /// + /// multiply a scalar to the array fn mul_scalar(&self, x: T) -> Self where T: Number, @@ -1483,7 +1483,7 @@ pub trait Array2: MutArrayView2 + Sized + result.mul_scalar_mut(x); result } - /// + /// sum of two arrays fn add(&self, other: &dyn Array) -> Self where T: Number, @@ -1492,7 +1492,7 @@ pub trait Array2: MutArrayView2 + Sized + result.add_mut(other); result } - /// + /// subtract two arrays fn sub(&self, other: &dyn Array) -> Self where T: Number, @@ -1501,7 +1501,7 @@ pub trait Array2: MutArrayView2 + Sized + result.sub_mut(other); result } - /// + /// multiply two arrays fn mul(&self, other: &dyn Array) -> Self where T: Number, @@ -1510,7 +1510,7 @@ pub trait Array2: MutArrayView2 + Sized + result.mul_mut(other); result } - /// + /// divide two arrays fn div(&self, other: &dyn Array) -> Self where T: Number, @@ -1519,7 +1519,7 @@ pub trait Array2: MutArrayView2 + Sized + result.div_mut(other); result } - /// + /// absolute values of the array fn abs(&self) -> Self where T: Number + Signed, @@ -1528,7 +1528,7 @@ pub trait Array2: MutArrayView2 + Sized + result.abs_mut(); result } - /// + /// negation of the array fn neg(&self) -> Self where T: Number + Neg, @@ -1537,7 +1537,7 @@ pub trait Array2: MutArrayView2 + Sized + result.neg_mut(); result } - /// + /// values at power `p` fn pow(&self, p: T) -> Self where T: RealNumber, @@ -1575,7 +1575,7 @@ pub trait Array2: MutArrayView2 + Sized + } } - /// appriximate equality of the elements of a matrix according to a given error + /// approximate equality of the elements of a matrix according to a given error fn approximate_eq(&self, other: &Self, error: T) -> bool where T: Number + RealNumber, @@ -1631,8 +1631,8 @@ mod tests { let v = vec![3., -2., 6.]; assert_eq!(v.norm(1.), 11.); assert_eq!(v.norm(2.), 7.); - assert_eq!(v.norm(std::f64::INFINITY), 6.); - assert_eq!(v.norm(std::f64::NEG_INFINITY), 2.); + assert_eq!(v.norm(f64::INFINITY), 6.); + assert_eq!(v.norm(f64::NEG_INFINITY), 2.); } #[test] diff --git a/src/linalg/traits/evd.rs b/src/linalg/traits/evd.rs index 4db766b0..3bb382a0 100644 --- a/src/linalg/traits/evd.rs +++ b/src/linalg/traits/evd.rs @@ -841,7 +841,7 @@ mod tests { )); for (i, eigen_values_i) in eigen_values.iter().enumerate() { assert!((eigen_values_i - evd.d[i]).abs() < 1e-4); - assert!((0f64 - evd.e[i]).abs() < std::f64::EPSILON); + assert!((0f64 - evd.e[i]).abs() < f64::EPSILON); } } #[cfg_attr( @@ -875,7 +875,7 @@ mod tests { )); for (i, eigen_values_i) in eigen_values.iter().enumerate() { assert!((eigen_values_i - evd.d[i]).abs() < 1e-4); - assert!((0f64 - evd.e[i]).abs() < std::f64::EPSILON); + assert!((0f64 - evd.e[i]).abs() < f64::EPSILON); } } #[cfg_attr( diff --git a/src/linalg/traits/stats.rs b/src/linalg/traits/stats.rs index 43c23dce..8702a81a 100644 --- a/src/linalg/traits/stats.rs +++ b/src/linalg/traits/stats.rs @@ -217,8 +217,8 @@ mod tests { let expected_0 = vec![0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]; let expected_1 = vec![1.25, 1.25]; - assert!(m.var(0).approximate_eq(&expected_0, std::f64::EPSILON)); - assert!(m.var(1).approximate_eq(&expected_1, std::f64::EPSILON)); + assert!(m.var(0).approximate_eq(&expected_0, f64::EPSILON)); + assert!(m.var(1).approximate_eq(&expected_1, f64::EPSILON)); assert_eq!( m.mean(0), vec![0.0, 0.25, 0.25, 1.25, 1.5, 1.75, 2.75, 3.25] diff --git a/src/linalg/traits/svd.rs b/src/linalg/traits/svd.rs index 75c303ae..cee33a0e 100644 --- a/src/linalg/traits/svd.rs +++ b/src/linalg/traits/svd.rs @@ -48,11 +48,9 @@ pub struct SVD> { pub V: M, /// Singular values of the original matrix pub s: Vec, - /// m: usize, - /// n: usize, - /// + /// Tolerance tol: T, } diff --git a/src/linear/bg_solver.rs b/src/linear/bg_solver.rs index 6ee4f0ec..2c466b13 100644 --- a/src/linear/bg_solver.rs +++ b/src/linear/bg_solver.rs @@ -27,9 +27,9 @@ use crate::error::Failed; use crate::linalg::basic::arrays::{Array, Array1, Array2, ArrayView1, MutArrayView1}; use crate::numbers::floatnum::FloatNumber; -/// +/// Trait for Biconjugate Gradient Solver pub trait BiconjugateGradientSolver<'a, T: FloatNumber, X: Array2> { - /// + /// Solve Ax = b fn solve_mut( &self, a: &'a X, @@ -109,7 +109,7 @@ pub trait BiconjugateGradientSolver<'a, T: FloatNumber, X: Array2> { Ok(err) } - /// + /// solve preconditioner fn solve_preconditioner(&self, a: &'a X, b: &[T], x: &mut [T]) { let diag = Self::diag(a); let n = diag.len(); @@ -133,7 +133,7 @@ pub trait BiconjugateGradientSolver<'a, T: FloatNumber, X: Array2> { y.copy_from(&x.xa(true, a)); } - /// + /// Extract the diagonal from a matrix fn diag(a: &X) -> Vec { let (nrows, ncols) = a.shape(); let n = nrows.min(ncols); diff --git a/src/linear/lasso_optimizer.rs b/src/linear/lasso_optimizer.rs index 3f18c030..22119160 100644 --- a/src/linear/lasso_optimizer.rs +++ b/src/linear/lasso_optimizer.rs @@ -16,7 +16,7 @@ use crate::linalg::basic::arrays::{Array1, Array2, ArrayView1, MutArray, MutArra use crate::linear::bg_solver::BiconjugateGradientSolver; use crate::numbers::floatnum::FloatNumber; -/// +/// Interior Point Optimizer pub struct InteriorPointOptimizer> { ata: X, d1: Vec, @@ -25,9 +25,8 @@ pub struct InteriorPointOptimizer> { prs: Vec, } -/// impl> InteriorPointOptimizer { - /// + /// Initialize a new Interior Point Optimizer pub fn new(a: &X, n: usize) -> InteriorPointOptimizer { InteriorPointOptimizer { ata: a.ab(true, a, false), @@ -38,7 +37,7 @@ impl> InteriorPointOptimizer { } } - /// + /// Run the optimization pub fn optimize( &mut self, x: &X, @@ -101,7 +100,7 @@ impl> InteriorPointOptimizer { // CALCULATE DUALITY GAP let xnu = nu.xa(false, x); - let max_xnu = xnu.norm(std::f64::INFINITY); + let max_xnu = xnu.norm(f64::INFINITY); if max_xnu > lambda_f64 { let lnu = T::from_f64(lambda_f64 / max_xnu).unwrap(); nu.mul_scalar_mut(lnu); @@ -208,7 +207,6 @@ impl> InteriorPointOptimizer { Ok(w) } - /// fn sumlogneg(f: &X) -> T { let (n, _) = f.shape(); let mut sum = T::zero(); @@ -220,11 +218,9 @@ impl> InteriorPointOptimizer { } } -/// impl<'a, T: FloatNumber, X: Array2> BiconjugateGradientSolver<'a, T, X> for InteriorPointOptimizer { - /// fn solve_preconditioner(&self, a: &'a X, b: &[T], x: &mut [T]) { let (_, p) = a.shape(); @@ -234,7 +230,6 @@ impl<'a, T: FloatNumber, X: Array2> BiconjugateGradientSolver<'a, T, X> } } - /// fn mat_vec_mul(&self, _: &X, x: &Vec, y: &mut Vec) { let (_, p) = self.ata.shape(); let x_slice = Vec::from_slice(x.slice(0..p).as_ref()); @@ -246,7 +241,6 @@ impl<'a, T: FloatNumber, X: Array2> BiconjugateGradientSolver<'a, T, X> } } - /// fn mat_t_vec_mul(&self, a: &X, x: &Vec, y: &mut Vec) { self.mat_vec_mul(a, x, y); } diff --git a/src/linear/logistic_regression.rs b/src/linear/logistic_regression.rs index 12ecf8d8..7e934288 100644 --- a/src/linear/logistic_regression.rs +++ b/src/linear/logistic_regression.rs @@ -183,14 +183,11 @@ pub struct LogisticRegression< } trait ObjectiveFunction> { - /// fn f(&self, w_bias: &[T]) -> T; - /// #[allow(clippy::ptr_arg)] fn df(&self, g: &mut Vec, w_bias: &Vec); - /// #[allow(clippy::ptr_arg)] fn partial_dot(w: &[T], x: &X, v_col: usize, m_row: usize) -> T { let mut sum = T::zero(); @@ -629,11 +626,11 @@ mod tests { objective.df(&mut g, &vec![1., 2., 3., 4., 5., 6., 7., 8., 9.]); objective.df(&mut g, &vec![1., 2., 3., 4., 5., 6., 7., 8., 9.]); - assert!((g[0] + 33.000068218163484).abs() < std::f64::EPSILON); + assert!((g[0] + 33.000068218163484).abs() < f64::EPSILON); let f = objective.f(&[1., 2., 3., 4., 5., 6., 7., 8., 9.]); - assert!((f - 408.0052230582765).abs() < std::f64::EPSILON); + assert!((f - 408.0052230582765).abs() < f64::EPSILON); let objective_reg = MultiClassObjectiveFunction { x: &x, @@ -689,13 +686,13 @@ mod tests { objective.df(&mut g, &vec![1., 2., 3.]); objective.df(&mut g, &vec![1., 2., 3.]); - assert!((g[0] - 26.051064349381285).abs() < std::f64::EPSILON); - assert!((g[1] - 10.239000702928523).abs() < std::f64::EPSILON); - assert!((g[2] - 3.869294270156324).abs() < std::f64::EPSILON); + assert!((g[0] - 26.051064349381285).abs() < f64::EPSILON); + assert!((g[1] - 10.239000702928523).abs() < f64::EPSILON); + assert!((g[2] - 3.869294270156324).abs() < f64::EPSILON); let f = objective.f(&[1., 2., 3.]); - assert!((f - 59.76994756647412).abs() < std::f64::EPSILON); + assert!((f - 59.76994756647412).abs() < f64::EPSILON); let objective_reg = BinaryObjectiveFunction { x: &x, @@ -916,7 +913,7 @@ mod tests { let x: DenseMatrix = DenseMatrix::rand(52181, 94); let y1: Vec = vec![1; 2181]; let y2: Vec = vec![0; 50000]; - let y: Vec = y1.into_iter().chain(y2.into_iter()).collect(); + let y: Vec = y1.into_iter().chain(y2).collect(); let lr = LogisticRegression::fit(&x, &y, Default::default()).unwrap(); let lr_reg = LogisticRegression::fit( @@ -938,12 +935,12 @@ mod tests { let x: &DenseMatrix = &DenseMatrix::rand(52181, 94); let y1: Vec = vec![1; 2181]; let y2: Vec = vec![0; 50000]; - let y: &Vec = &(y1.into_iter().chain(y2.into_iter()).collect()); + let y: &Vec = &(y1.into_iter().chain(y2).collect()); println!("y vec height: {:?}", y.len()); println!("x matrix shape: {:?}", x.shape()); let lr = LogisticRegression::fit(x, y, Default::default()).unwrap(); - let y_hat = lr.predict(&x).unwrap(); + let y_hat = lr.predict(x).unwrap(); println!("y_hat shape: {:?}", y_hat.shape()); diff --git a/src/naive_bayes/bernoulli.rs b/src/naive_bayes/bernoulli.rs index 33f00bd4..4be62d56 100644 --- a/src/naive_bayes/bernoulli.rs +++ b/src/naive_bayes/bernoulli.rs @@ -258,7 +258,7 @@ impl BernoulliNBDistribution { /// * `x` - training data. /// * `y` - vector with target values (classes) of length N. /// * `priors` - Optional vector with prior probabilities of the classes. If not defined, - /// priors are adjusted according to the data. + /// priors are adjusted according to the data. /// * `alpha` - Additive (Laplace/Lidstone) smoothing parameter. /// * `binarize` - Threshold for binarizing. fn fit, Y: Array1>( @@ -402,10 +402,10 @@ impl, Y: Arr { /// Fits BernoulliNB with given data /// * `x` - training data of size NxM where N is the number of samples and M is the number of - /// features. + /// features. /// * `y` - vector with target values (classes) of length N. /// * `parameters` - additional parameters like class priors, alpha for smoothing and - /// binarizing threshold. + /// binarizing threshold. pub fn fit(x: &X, y: &Y, parameters: BernoulliNBParameters) -> Result { let distribution = if let Some(threshold) = parameters.binarize { BernoulliNBDistribution::fit( @@ -427,6 +427,7 @@ impl, Y: Arr /// Estimates the class labels for the provided data. /// * `x` - data of shape NxM where N is number of data points to estimate and M is number of features. + /// /// Returns a vector of size N with class estimates. pub fn predict(&self, x: &X) -> Result { if let Some(threshold) = self.binarize { diff --git a/src/naive_bayes/categorical.rs b/src/naive_bayes/categorical.rs index 71a7487a..b60ee0d3 100644 --- a/src/naive_bayes/categorical.rs +++ b/src/naive_bayes/categorical.rs @@ -95,7 +95,7 @@ impl PartialEq for CategoricalNBDistribution { return false; } for (a_i_j, b_i_j) in a_i.iter().zip(b_i.iter()) { - if (*a_i_j - *b_i_j).abs() > std::f64::EPSILON { + if (*a_i_j - *b_i_j).abs() > f64::EPSILON { return false; } } @@ -363,7 +363,7 @@ impl, Y: Array1> Predictor for Categ impl, Y: Array1> CategoricalNB { /// Fits CategoricalNB with given data /// * `x` - training data of size NxM where N is the number of samples and M is the number of - /// features. + /// features. /// * `y` - vector with target values (classes) of length N. /// * `parameters` - additional parameters like alpha for smoothing pub fn fit(x: &X, y: &Y, parameters: CategoricalNBParameters) -> Result { @@ -375,6 +375,7 @@ impl, Y: Array1> CategoricalNB { /// Estimates the class labels for the provided data. /// * `x` - data of shape NxM where N is number of data points to estimate and M is number of features. + /// /// Returns a vector of size N with class estimates. pub fn predict(&self, x: &X) -> Result { self.inner.as_ref().unwrap().predict(x) diff --git a/src/naive_bayes/gaussian.rs b/src/naive_bayes/gaussian.rs index aff996be..e774fdc9 100644 --- a/src/naive_bayes/gaussian.rs +++ b/src/naive_bayes/gaussian.rs @@ -175,7 +175,7 @@ impl GaussianNBDistribution { /// * `x` - training data. /// * `y` - vector with target values (classes) of length N. /// * `priors` - Optional vector with prior probabilities of the classes. If not defined, - /// priors are adjusted according to the data. + /// priors are adjusted according to the data. pub fn fit, Y: Array1>( x: &X, y: &Y, @@ -317,7 +317,7 @@ impl, Y: Arr { /// Fits GaussianNB with given data /// * `x` - training data of size NxM where N is the number of samples and M is the number of - /// features. + /// features. /// * `y` - vector with target values (classes) of length N. /// * `parameters` - additional parameters like class priors. pub fn fit(x: &X, y: &Y, parameters: GaussianNBParameters) -> Result { @@ -328,6 +328,7 @@ impl, Y: Arr /// Estimates the class labels for the provided data. /// * `x` - data of shape NxM where N is number of data points to estimate and M is number of features. + /// /// Returns a vector of size N with class estimates. pub fn predict(&self, x: &X) -> Result { self.inner.as_ref().unwrap().predict(x) diff --git a/src/naive_bayes/mod.rs b/src/naive_bayes/mod.rs index 1d74a315..31cdd46d 100644 --- a/src/naive_bayes/mod.rs +++ b/src/naive_bayes/mod.rs @@ -89,6 +89,7 @@ impl, Y: Array1, D: NBDistribution Result { let y_classes = self.distribution.classes(); @@ -163,7 +164,7 @@ mod tests { } fn classes(&self) -> &Vec { - &self.0 + self.0 } } diff --git a/src/naive_bayes/multinomial.rs b/src/naive_bayes/multinomial.rs index 2d6c437c..e00965ed 100644 --- a/src/naive_bayes/multinomial.rs +++ b/src/naive_bayes/multinomial.rs @@ -208,7 +208,7 @@ impl MultinomialNBDistribution { /// * `x` - training data. /// * `y` - vector with target values (classes) of length N. /// * `priors` - Optional vector with prior probabilities of the classes. If not defined, - /// priors are adjusted according to the data. + /// priors are adjusted according to the data. /// * `alpha` - Additive (Laplace/Lidstone) smoothing parameter. pub fn fit, Y: Array1>( x: &X, @@ -345,10 +345,10 @@ impl, Y: Array { /// Fits MultinomialNB with given data /// * `x` - training data of size NxM where N is the number of samples and M is the number of - /// features. + /// features. /// * `y` - vector with target values (classes) of length N. /// * `parameters` - additional parameters like class priors, alpha for smoothing and - /// binarizing threshold. + /// binarizing threshold. pub fn fit(x: &X, y: &Y, parameters: MultinomialNBParameters) -> Result { let distribution = MultinomialNBDistribution::fit(x, y, parameters.alpha, parameters.priors)?; @@ -358,6 +358,7 @@ impl, Y: Array /// Estimates the class labels for the provided data. /// * `x` - data of shape NxM where N is number of data points to estimate and M is number of features. + /// /// Returns a vector of size N with class estimates. pub fn predict(&self, x: &X) -> Result { self.inner.as_ref().unwrap().predict(x) diff --git a/src/neighbors/knn_classifier.rs b/src/neighbors/knn_classifier.rs index d18620c9..137143e0 100644 --- a/src/neighbors/knn_classifier.rs +++ b/src/neighbors/knn_classifier.rs @@ -261,6 +261,7 @@ impl, Y: Array1, D: Distance Result { let mut result = Y::zeros(x.shape().0); diff --git a/src/neighbors/knn_regressor.rs b/src/neighbors/knn_regressor.rs index e4efe48a..b49743f8 100644 --- a/src/neighbors/knn_regressor.rs +++ b/src/neighbors/knn_regressor.rs @@ -88,25 +88,21 @@ pub struct KNNRegressor, Y: Array1, D: impl, Y: Array1, D: Distance>> KNNRegressor { - /// fn y(&self) -> &Y { self.y.as_ref().unwrap() } - /// fn knn_algorithm(&self) -> &KNNAlgorithm { self.knn_algorithm .as_ref() .expect("Missing parameter: KNNAlgorithm") } - /// fn weight(&self) -> &KNNWeightFunction { self.weight.as_ref().expect("Missing parameter: weight") } #[allow(dead_code)] - /// fn k(&self) -> usize { self.k.unwrap() } @@ -250,6 +246,7 @@ impl, Y: Array1, D: Distance>> /// Predict the target for the provided data. /// * `x` - data of shape NxM where N is number of data points to estimate and M is number of features. + /// /// Returns a vector of size N with estimates. pub fn predict(&self, x: &X) -> Result { let mut result = Y::zeros(x.shape().0); @@ -312,7 +309,7 @@ mod tests { let y_hat = knn.predict(&x).unwrap(); assert_eq!(5, Vec::len(&y_hat)); for i in 0..y_hat.len() { - assert!((y_hat[i] - y_exp[i]).abs() < std::f64::EPSILON); + assert!((y_hat[i] - y_exp[i]).abs() < f64::EPSILON); } } diff --git a/src/optimization/first_order/gradient_descent.rs b/src/optimization/first_order/gradient_descent.rs index 9cc78f0c..0be7222f 100644 --- a/src/optimization/first_order/gradient_descent.rs +++ b/src/optimization/first_order/gradient_descent.rs @@ -1,5 +1,3 @@ -// TODO: missing documentation - use std::default::Default; use crate::linalg::basic::arrays::Array1; @@ -8,30 +6,27 @@ use crate::optimization::first_order::{FirstOrderOptimizer, OptimizerResult}; use crate::optimization::line_search::LineSearchMethod; use crate::optimization::{DF, F}; -/// +/// Gradient Descent optimization algorithm pub struct GradientDescent { - /// + /// Maximum number of iterations pub max_iter: usize, - /// + /// Relative tolerance for the gradient norm pub g_rtol: f64, - /// + /// Absolute tolerance for the gradient norm pub g_atol: f64, } -/// impl Default for GradientDescent { fn default() -> Self { GradientDescent { max_iter: 10000, - g_rtol: std::f64::EPSILON.sqrt(), - g_atol: std::f64::EPSILON, + g_rtol: f64::EPSILON.sqrt(), + g_atol: f64::EPSILON, } } } -/// impl FirstOrderOptimizer for GradientDescent { - /// fn optimize<'a, X: Array1, LS: LineSearchMethod>( &self, f: &'a F<'_, T, X>, diff --git a/src/optimization/first_order/lbfgs.rs b/src/optimization/first_order/lbfgs.rs index 81e7b640..b4f6c9f1 100644 --- a/src/optimization/first_order/lbfgs.rs +++ b/src/optimization/first_order/lbfgs.rs @@ -11,31 +11,29 @@ use crate::optimization::first_order::{FirstOrderOptimizer, OptimizerResult}; use crate::optimization::line_search::LineSearchMethod; use crate::optimization::{DF, F}; -/// +/// Limited-memory BFGS optimization algorithm pub struct LBFGS { - /// + /// Maximum number of iterations pub max_iter: usize, - /// + /// TODO: Add documentation pub g_rtol: f64, - /// + /// TODO: Add documentation pub g_atol: f64, - /// + /// TODO: Add documentation pub x_atol: f64, - /// + /// TODO: Add documentation pub x_rtol: f64, - /// + /// TODO: Add documentation pub f_abstol: f64, - /// + /// TODO: Add documentation pub f_reltol: f64, - /// + /// TODO: Add documentation pub successive_f_tol: usize, - /// + /// TODO: Add documentation pub m: usize, } -/// impl Default for LBFGS { - /// fn default() -> Self { LBFGS { max_iter: 1000, @@ -51,9 +49,7 @@ impl Default for LBFGS { } } -/// impl LBFGS { - /// fn two_loops>(&self, state: &mut LBFGSState) { let lower = state.iteration.max(self.m) - self.m; let upper = state.iteration; @@ -95,7 +91,6 @@ impl LBFGS { state.s.mul_scalar_mut(-T::one()); } - /// fn init_state>(&self, x: &X) -> LBFGSState { LBFGSState { x: x.clone(), @@ -119,7 +114,6 @@ impl LBFGS { } } - /// fn update_state<'a, T: FloatNumber + RealNumber, X: Array1, LS: LineSearchMethod>( &self, f: &'a F<'_, T, X>, @@ -161,7 +155,6 @@ impl LBFGS { df(&mut state.x_df, &state.x); } - /// fn assess_convergence>( &self, state: &mut LBFGSState, @@ -173,7 +166,7 @@ impl LBFGS { } if state.x.max_diff(&state.x_prev) - <= T::from_f64(self.x_rtol * state.x.norm(std::f64::INFINITY)).unwrap() + <= T::from_f64(self.x_rtol * state.x.norm(f64::INFINITY)).unwrap() { x_converged = true; } @@ -188,14 +181,13 @@ impl LBFGS { state.counter_f_tol += 1; } - if state.x_df.norm(std::f64::INFINITY) <= self.g_atol { + if state.x_df.norm(f64::INFINITY) <= self.g_atol { g_converged = true; } g_converged || x_converged || state.counter_f_tol > self.successive_f_tol } - /// fn update_hessian>( &self, _: &DF<'_, X>, @@ -212,7 +204,6 @@ impl LBFGS { } } -/// #[derive(Debug)] struct LBFGSState> { x: X, @@ -234,9 +225,7 @@ struct LBFGSState> { alpha: T, } -/// impl FirstOrderOptimizer for LBFGS { - /// fn optimize<'a, X: Array1, LS: LineSearchMethod>( &self, f: &F<'_, T, X>, @@ -248,7 +237,7 @@ impl FirstOrderOptimizer for LBFGS { df(&mut state.x_df, x0); - let g_converged = state.x_df.norm(std::f64::INFINITY) < self.g_atol; + let g_converged = state.x_df.norm(f64::INFINITY) < self.g_atol; let mut converged = g_converged; let stopped = false; @@ -299,7 +288,7 @@ mod tests { let result = optimizer.optimize(&f, &df, &x0, &ls); - assert!((result.f_x - 0.0).abs() < std::f64::EPSILON); + assert!((result.f_x - 0.0).abs() < f64::EPSILON); assert!((result.x[0] - 1.0).abs() < 1e-8); assert!((result.x[1] - 1.0).abs() < 1e-8); assert!(result.iterations <= 24); diff --git a/src/optimization/first_order/mod.rs b/src/optimization/first_order/mod.rs index 910be275..cf7e4f91 100644 --- a/src/optimization/first_order/mod.rs +++ b/src/optimization/first_order/mod.rs @@ -1,6 +1,6 @@ -/// +/// Gradient descent optimization algorithm pub mod gradient_descent; -/// +/// Limited-memory BFGS optimization algorithm pub mod lbfgs; use std::clone::Clone; @@ -11,9 +11,9 @@ use crate::numbers::floatnum::FloatNumber; use crate::optimization::line_search::LineSearchMethod; use crate::optimization::{DF, F}; -/// +/// First-order optimization is a class of algorithms that use the first derivative of a function to find optimal solutions. pub trait FirstOrderOptimizer { - /// + /// run first order optimization fn optimize<'a, X: Array1, LS: LineSearchMethod>( &self, f: &F<'_, T, X>, @@ -23,13 +23,13 @@ pub trait FirstOrderOptimizer { ) -> OptimizerResult; } -/// +/// Result of optimization #[derive(Debug, Clone)] pub struct OptimizerResult> { - /// + /// Solution pub x: X, - /// + /// f(x) value pub f_x: T, - /// + /// number of iterations pub iterations: usize, } diff --git a/src/optimization/line_search.rs b/src/optimization/line_search.rs index 9a2656cd..8357d8da 100644 --- a/src/optimization/line_search.rs +++ b/src/optimization/line_search.rs @@ -1,11 +1,9 @@ -// TODO: missing documentation - use crate::optimization::FunctionOrder; use num_traits::Float; -/// +/// Line search optimization. pub trait LineSearchMethod { - /// + /// Find alpha that satisfies strong Wolfe conditions. fn search( &self, f: &(dyn Fn(T) -> T), @@ -16,32 +14,31 @@ pub trait LineSearchMethod { ) -> LineSearchResult; } -/// +/// Line search result #[derive(Debug, Clone)] pub struct LineSearchResult { - /// + /// Alpha value pub alpha: T, - /// + /// f(alpha) value pub f_x: T, } -/// +/// Backtracking line search method. pub struct Backtracking { - /// + /// TODO: Add documentation pub c1: T, - /// + /// Maximum number of iterations for Backtracking single run pub max_iterations: usize, - /// + /// TODO: Add documentation pub max_infinity_iterations: usize, - /// + /// TODO: Add documentation pub phi: T, - /// + /// TODO: Add documentation pub plo: T, - /// + /// function order pub order: FunctionOrder, } -/// impl Default for Backtracking { fn default() -> Self { Backtracking { @@ -55,9 +52,7 @@ impl Default for Backtracking { } } -/// impl LineSearchMethod for Backtracking { - /// fn search( &self, f: &(dyn Fn(T) -> T), diff --git a/src/optimization/mod.rs b/src/optimization/mod.rs index 2f6c41a2..83ca2493 100644 --- a/src/optimization/mod.rs +++ b/src/optimization/mod.rs @@ -1,21 +1,19 @@ -// TODO: missing documentation - -/// +/// first order optimization algorithms pub mod first_order; -/// +/// line search algorithms pub mod line_search; -/// +/// Function f(x) = y pub type F<'a, T, X> = dyn for<'b> Fn(&'b X) -> T + 'a; -/// +/// Function df(x) pub type DF<'a, X> = dyn for<'b> Fn(&'b mut X, &'b X) + 'a; -/// +/// Function order #[allow(clippy::upper_case_acronyms)] #[derive(Debug, PartialEq, Eq)] pub enum FunctionOrder { - /// + /// Second order SECOND, - /// + /// Third order THIRD, } diff --git a/src/svm/mod.rs b/src/svm/mod.rs index 0792fdb8..f6baf8bb 100644 --- a/src/svm/mod.rs +++ b/src/svm/mod.rs @@ -292,7 +292,7 @@ mod tests { .unwrap() .abs(); - assert!((4913f64 - result) < std::f64::EPSILON); + assert!((4913f64 - result).abs() < f64::EPSILON); } #[cfg_attr( diff --git a/src/tree/decision_tree_classifier.rs b/src/tree/decision_tree_classifier.rs index 4da9f443..c6596517 100644 --- a/src/tree/decision_tree_classifier.rs +++ b/src/tree/decision_tree_classifier.rs @@ -197,12 +197,12 @@ impl PartialEq for Node { self.output == other.output && self.split_feature == other.split_feature && match (self.split_value, other.split_value) { - (Some(a), Some(b)) => (a - b).abs() < std::f64::EPSILON, + (Some(a), Some(b)) => (a - b).abs() < f64::EPSILON, (None, None) => true, _ => false, } && match (self.split_score, other.split_score) { - (Some(a), Some(b)) => (a - b).abs() < std::f64::EPSILON, + (Some(a), Some(b)) => (a - b).abs() < f64::EPSILON, (None, None) => true, _ => false, } @@ -613,7 +613,7 @@ impl, Y: Array1> visitor_queue.push_back(visitor); } - while tree.depth() < tree.parameters().max_depth.unwrap_or(std::u16::MAX) { + while tree.depth() < tree.parameters().max_depth.unwrap_or(u16::MAX) { match visitor_queue.pop_front() { Some(node) => tree.split(node, mtry, &mut visitor_queue, &mut rng), None => break, @@ -650,7 +650,7 @@ impl, Y: Array1> if node.true_child.is_none() && node.false_child.is_none() { result = node.output; } else if x.get((row, node.split_feature)).to_f64().unwrap() - <= node.split_value.unwrap_or(std::f64::NAN) + <= node.split_value.unwrap_or(f64::NAN) { queue.push_back(node.true_child.unwrap()); } else { @@ -803,9 +803,7 @@ impl, Y: Array1> .get((i, self.nodes()[visitor.node].split_feature)) .to_f64() .unwrap() - <= self.nodes()[visitor.node] - .split_value - .unwrap_or(std::f64::NAN) + <= self.nodes()[visitor.node].split_value.unwrap_or(f64::NAN) { *true_sample = visitor.samples[i]; tc += *true_sample; @@ -925,14 +923,14 @@ mod tests { )] #[test] fn gini_impurity() { - assert!((impurity(&SplitCriterion::Gini, &[7, 3], 10) - 0.42).abs() < std::f64::EPSILON); + assert!((impurity(&SplitCriterion::Gini, &[7, 3], 10) - 0.42).abs() < f64::EPSILON); assert!( (impurity(&SplitCriterion::Entropy, &[7, 3], 10) - 0.8812908992306927).abs() - < std::f64::EPSILON + < f64::EPSILON ); assert!( (impurity(&SplitCriterion::ClassificationError, &[7, 3], 10) - 0.3).abs() - < std::f64::EPSILON + < f64::EPSILON ); } diff --git a/src/tree/decision_tree_regressor.rs b/src/tree/decision_tree_regressor.rs index 1569af2e..d735697d 100644 --- a/src/tree/decision_tree_regressor.rs +++ b/src/tree/decision_tree_regressor.rs @@ -311,15 +311,15 @@ impl Node { impl PartialEq for Node { fn eq(&self, other: &Self) -> bool { - (self.output - other.output).abs() < std::f64::EPSILON + (self.output - other.output).abs() < f64::EPSILON && self.split_feature == other.split_feature && match (self.split_value, other.split_value) { - (Some(a), Some(b)) => (a - b).abs() < std::f64::EPSILON, + (Some(a), Some(b)) => (a - b).abs() < f64::EPSILON, (None, None) => true, _ => false, } && match (self.split_score, other.split_score) { - (Some(a), Some(b)) => (a - b).abs() < std::f64::EPSILON, + (Some(a), Some(b)) => (a - b).abs() < f64::EPSILON, (None, None) => true, _ => false, } @@ -478,7 +478,7 @@ impl, Y: Array1> visitor_queue.push_back(visitor); } - while tree.depth() < tree.parameters().max_depth.unwrap_or(std::u16::MAX) { + while tree.depth() < tree.parameters().max_depth.unwrap_or(u16::MAX) { match visitor_queue.pop_front() { Some(node) => tree.split(node, mtry, &mut visitor_queue, &mut rng), None => break, @@ -515,7 +515,7 @@ impl, Y: Array1> if node.true_child.is_none() && node.false_child.is_none() { result = node.output; } else if x.get((row, node.split_feature)).to_f64().unwrap() - <= node.split_value.unwrap_or(std::f64::NAN) + <= node.split_value.unwrap_or(f64::NAN) { queue.push_back(node.true_child.unwrap()); } else { @@ -640,9 +640,7 @@ impl, Y: Array1> .get((i, self.nodes()[visitor.node].split_feature)) .to_f64() .unwrap() - <= self.nodes()[visitor.node] - .split_value - .unwrap_or(std::f64::NAN) + <= self.nodes()[visitor.node].split_value.unwrap_or(f64::NAN) { *true_sample = visitor.samples[i]; tc += *true_sample;