Skip to content

Commit cc1f84e

Browse files
Volodymyr OrlovVolodymyr Orlov
authored andcommitted
feat: documents matrix decomposition methods
1 parent bbe810d commit cc1f84e

File tree

6 files changed

+250
-10
lines changed

6 files changed

+250
-10
lines changed

src/linalg/evd.rs

Lines changed: 44 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,37 @@
1+
//! # Eigen Decomposition
2+
//!
3+
//! Eigendecomposition is one of the most useful matrix factorization methods in machine learning that decomposes a matrix into eigenvectors and eigenvalues.
4+
//! This decomposition plays an important role in the the [Principal Component Analysis (PCA)](../../decomposition/pca/index.html).
5+
//!
6+
//! Eigendecomposition decomposes a square matrix into a set of eigenvectors and eigenvalues.
7+
//!
8+
//! \\[A = Q \Lambda Q^{-1}\\]
9+
//!
10+
//! where \\(Q\\) is a matrix comprised of the eigenvectors, \\(\Lambda\\) is a diagonal matrix comprised of the eigenvalues along the diagonal,
11+
//! and \\(Q{-1}\\) is the inverse of the matrix comprised of the eigenvectors.
12+
//!
13+
//! Example:
14+
//! ```
15+
//! use smartcore::linalg::naive::dense_matrix::*;
16+
//! use smartcore::linalg::evd::*;
17+
//!
18+
//! let A = DenseMatrix::from_2d_array(&[
19+
//! &[0.9000, 0.4000, 0.7000],
20+
//! &[0.4000, 0.5000, 0.3000],
21+
//! &[0.7000, 0.3000, 0.8000],
22+
//! ]);
23+
//!
24+
//! let evd = A.evd(true);
25+
//! let eigenvectors: DenseMatrix<f64> = evd.V;
26+
//! let eigenvalues: Vec<f64> = evd.d;
27+
//! ```
28+
//!
29+
//! ## References:
30+
//! * ["Numerical Recipes: The Art of Scientific Computing", Press W.H., Teukolsky S.A., Vetterling W.T, Flannery B.P, 3rd ed., Section 11 Eigensystems](http://numerical.recipes/)
31+
//! * ["Introduction to Linear Algebra", Gilbert Strang, 5rd ed., ch. 6 Eigenvalues and Eigenvectors](https://math.mit.edu/~gs/linearalgebra/)
32+
//!
33+
//! <script src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script>
34+
//! <script id="MathJax-script" async src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"></script>
135
#![allow(non_snake_case)]
236

337
use crate::linalg::BaseMatrix;
@@ -6,23 +40,27 @@ use num::complex::Complex;
640
use std::fmt::Debug;
741

842
#[derive(Debug, Clone)]
43+
/// Results of eigen decomposition
944
pub struct EVD<T: RealNumber, M: BaseMatrix<T>> {
45+
/// Real part of eigenvalues.
1046
pub d: Vec<T>,
47+
/// Imaginary part of eigenvalues.
1148
pub e: Vec<T>,
49+
/// Eigenvectors
1250
pub V: M,
1351
}
1452

15-
impl<T: RealNumber, M: BaseMatrix<T>> EVD<T, M> {
16-
pub fn new(V: M, d: Vec<T>, e: Vec<T>) -> EVD<T, M> {
17-
EVD { d: d, e: e, V: V }
18-
}
19-
}
20-
53+
/// Trait that implements EVD decomposition routine for any matrix.
2154
pub trait EVDDecomposableMatrix<T: RealNumber>: BaseMatrix<T> {
55+
/// Compute the eigen decomposition of a square matrix.
56+
/// * `symmetric` - whether the matrix is symmetric
2257
fn evd(&self, symmetric: bool) -> EVD<T, Self> {
2358
self.clone().evd_mut(symmetric)
2459
}
2560

61+
/// Compute the eigen decomposition of a square matrix. The input matrix
62+
/// will be used for factorization.
63+
/// * `symmetric` - whether the matrix is symmetric
2664
fn evd_mut(mut self, symmetric: bool) -> EVD<T, Self> {
2765
let (nrows, ncols) = self.shape();
2866
if ncols != nrows {

src/linalg/lu.rs

Lines changed: 44 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,36 @@
1+
//! # LU Decomposition
2+
//!
3+
//! Decomposes a square matrix into a product of two triangular matrices:
4+
//!
5+
//! \\[A = LU\\]
6+
//!
7+
//! where \\(U\\) is an upper triangular matrix and \\(L\\) is a lower triangular matrix.
8+
//! and \\(Q{-1}\\) is the inverse of the matrix comprised of the eigenvectors. The LU decomposition is used to obtain more efficient solutions to equations of the form
9+
//!
10+
//! \\[Ax = b\\]
11+
//!
12+
//! Example:
13+
//! ```
14+
//! use smartcore::linalg::naive::dense_matrix::*;
15+
//! use smartcore::linalg::lu::*;
16+
//!
17+
//! let A = DenseMatrix::from_2d_array(&[
18+
//! &[1., 2., 3.],
19+
//! &[0., 1., 5.],
20+
//! &[5., 6., 0.]
21+
//! ]);
22+
//!
23+
//! let lu = A.lu();
24+
//! let lower: DenseMatrix<f64> = lu.L();
25+
//! let upper: DenseMatrix<f64> = lu.U();
26+
//! ```
27+
//!
28+
//! ## References:
29+
//! * ["No bullshit guide to linear algebra", Ivan Savov, 2016, 7.6 Matrix decompositions](https://minireference.com/)
30+
//! * ["Numerical Recipes: The Art of Scientific Computing", Press W.H., Teukolsky S.A., Vetterling W.T, Flannery B.P, 3rd ed., 2.3.1 Performing the LU Decomposition](http://numerical.recipes/)
31+
//!
32+
//! <script src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script>
33+
//! <script id="MathJax-script" async src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"></script>
134
#![allow(non_snake_case)]
235

336
use std::fmt::Debug;
@@ -7,6 +40,7 @@ use crate::linalg::BaseMatrix;
740
use crate::math::num::RealNumber;
841

942
#[derive(Debug, Clone)]
43+
/// Result of LU decomposition.
1044
pub struct LU<T: RealNumber, M: BaseMatrix<T>> {
1145
LU: M,
1246
pivot: Vec<usize>,
@@ -16,7 +50,7 @@ pub struct LU<T: RealNumber, M: BaseMatrix<T>> {
1650
}
1751

1852
impl<T: RealNumber, M: BaseMatrix<T>> LU<T, M> {
19-
pub fn new(LU: M, pivot: Vec<usize>, pivot_sign: i8) -> LU<T, M> {
53+
pub(crate) fn new(LU: M, pivot: Vec<usize>, pivot_sign: i8) -> LU<T, M> {
2054
let (_, n) = LU.shape();
2155

2256
let mut singular = false;
@@ -36,6 +70,7 @@ impl<T: RealNumber, M: BaseMatrix<T>> LU<T, M> {
3670
}
3771
}
3872

73+
/// Get lower triangular matrix
3974
pub fn L(&self) -> M {
4075
let (n_rows, n_cols) = self.LU.shape();
4176
let mut L = M::zeros(n_rows, n_cols);
@@ -55,6 +90,7 @@ impl<T: RealNumber, M: BaseMatrix<T>> LU<T, M> {
5590
L
5691
}
5792

93+
/// Get upper triangular matrix
5894
pub fn U(&self) -> M {
5995
let (n_rows, n_cols) = self.LU.shape();
6096
let mut U = M::zeros(n_rows, n_cols);
@@ -72,6 +108,7 @@ impl<T: RealNumber, M: BaseMatrix<T>> LU<T, M> {
72108
U
73109
}
74110

111+
/// Pivot vector
75112
pub fn pivot(&self) -> M {
76113
let (_, n) = self.LU.shape();
77114
let mut piv = M::zeros(n, n);
@@ -83,6 +120,7 @@ impl<T: RealNumber, M: BaseMatrix<T>> LU<T, M> {
83120
piv
84121
}
85122

123+
/// Returns matrix inverse
86124
pub fn inverse(&self) -> M {
87125
let (m, n) = self.LU.shape();
88126

@@ -153,11 +191,15 @@ impl<T: RealNumber, M: BaseMatrix<T>> LU<T, M> {
153191
}
154192
}
155193

194+
/// Trait that implements LU decomposition routine for any matrix.
156195
pub trait LUDecomposableMatrix<T: RealNumber>: BaseMatrix<T> {
196+
/// Compute the LU decomposition of a square matrix.
157197
fn lu(&self) -> LU<T, Self> {
158198
self.clone().lu_mut()
159199
}
160200

201+
/// Compute the LU decomposition of a square matrix. The input matrix
202+
/// will be used for factorization.
161203
fn lu_mut(mut self) -> LU<T, Self> {
162204
let (m, n) = self.shape();
163205

@@ -213,6 +255,7 @@ pub trait LUDecomposableMatrix<T: RealNumber>: BaseMatrix<T> {
213255
LU::new(self, piv, pivsign)
214256
}
215257

258+
/// Solves Ax = b
216259
fn lu_solve_mut(self, b: Self) -> Self {
217260
self.lu_mut().solve(b)
218261
}

src/linalg/nalgebra_bindings.rs

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,42 @@
1+
//! # Connector for nalgebra
2+
//!
3+
//! If you want to use [nalgebra](https://docs.rs/nalgebra/) matrices and vectors with SmartCore:
4+
//!
5+
//! ```
6+
//! use nalgebra::{DMatrix, RowDVector};
7+
//! use smartcore::linear::linear_regression::*;
8+
//! // Enable nalgebra connector
9+
//! use smartcore::linalg::nalgebra_bindings::*;
10+
//!
11+
//! // Longley dataset (https://www.statsmodels.org/stable/datasets/generated/longley.html)
12+
//! let x = DMatrix::from_row_slice(16, 6, &[
13+
//! 234.289, 235.6, 159.0, 107.608, 1947., 60.323,
14+
//! 259.426, 232.5, 145.6, 108.632, 1948., 61.122,
15+
//! 258.054, 368.2, 161.6, 109.773, 1949., 60.171,
16+
//! 284.599, 335.1, 165.0, 110.929, 1950., 61.187,
17+
//! 328.975, 209.9, 309.9, 112.075, 1951., 63.221,
18+
//! 346.999, 193.2, 359.4, 113.270, 1952., 63.639,
19+
//! 365.385, 187.0, 354.7, 115.094, 1953., 64.989,
20+
//! 363.112, 357.8, 335.0, 116.219, 1954., 63.761,
21+
//! 397.469, 290.4, 304.8, 117.388, 1955., 66.019,
22+
//! 419.180, 282.2, 285.7, 118.734, 1956., 67.857,
23+
//! 442.769, 293.6, 279.8, 120.445, 1957., 68.169,
24+
//! 444.546, 468.1, 263.7, 121.950, 1958., 66.513,
25+
//! 482.704, 381.3, 255.2, 123.366, 1959., 68.655,
26+
//! 502.601, 393.1, 251.4, 125.368, 1960., 69.564,
27+
//! 518.173, 480.6, 257.2, 127.852, 1961., 69.331,
28+
//! 554.894, 400.7, 282.7, 130.081, 1962., 70.551
29+
//! ]);
30+
//!
31+
//! let y: RowDVector<f64> = RowDVector::from_vec(vec![
32+
//! 83.0, 88.5, 88.2, 89.5, 96.2, 98.1, 99.0, 100.0,
33+
//! 101.2, 104.6, 108.4, 110.8, 112.6, 114.2, 115.7,
34+
//! 116.9,
35+
//! ]);
36+
//!
37+
//! let lr = LinearRegression::fit(&x, &y, Default::default());
38+
//! let y_hat = lr.predict(&x);
39+
//! ```
140
use std::iter::Sum;
241
use std::ops::{AddAssign, DivAssign, MulAssign, Range, SubAssign};
342

src/linalg/ndarray_bindings.rs

Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,44 @@
1+
//! # Connector for ndarray
2+
//!
3+
//! If you want to use [ndarray](https://docs.rs/ndarray) matrices and vectors with SmartCore:
4+
//!
5+
//! ```
6+
//! use ndarray::{arr1, arr2};
7+
//! use smartcore::linear::logistic_regression::*;
8+
//! // Enable ndarray connector
9+
//! use smartcore::linalg::ndarray_bindings::*;
10+
//!
11+
//! // Iris dataset
12+
//! let x = arr2(&[
13+
//! [5.1, 3.5, 1.4, 0.2],
14+
//! [4.9, 3.0, 1.4, 0.2],
15+
//! [4.7, 3.2, 1.3, 0.2],
16+
//! [4.6, 3.1, 1.5, 0.2],
17+
//! [5.0, 3.6, 1.4, 0.2],
18+
//! [5.4, 3.9, 1.7, 0.4],
19+
//! [4.6, 3.4, 1.4, 0.3],
20+
//! [5.0, 3.4, 1.5, 0.2],
21+
//! [4.4, 2.9, 1.4, 0.2],
22+
//! [4.9, 3.1, 1.5, 0.1],
23+
//! [7.0, 3.2, 4.7, 1.4],
24+
//! [6.4, 3.2, 4.5, 1.5],
25+
//! [6.9, 3.1, 4.9, 1.5],
26+
//! [5.5, 2.3, 4.0, 1.3],
27+
//! [6.5, 2.8, 4.6, 1.5],
28+
//! [5.7, 2.8, 4.5, 1.3],
29+
//! [6.3, 3.3, 4.7, 1.6],
30+
//! [4.9, 2.4, 3.3, 1.0],
31+
//! [6.6, 2.9, 4.6, 1.3],
32+
//! [5.2, 2.7, 3.9, 1.4],
33+
//! ]);
34+
//! let y = arr1(&[
35+
//! 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
36+
//! 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.
37+
//! ]);
38+
//!
39+
//! let lr = LogisticRegression::fit(&x, &y);
40+
//! let y_hat = lr.predict(&x);
41+
//! ```
142
use std::iter::Sum;
243
use std::ops::AddAssign;
344
use std::ops::DivAssign;

src/linalg/qr.rs

Lines changed: 37 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,31 @@
1+
//! # QR Decomposition
2+
//!
3+
//! Any real square matrix \\(A \in R^{n \times n}\\) can be decomposed as a product of an orthogonal matrix \\(Q\\) and an upper triangular matrix \\(R\\):
4+
//!
5+
//! \\[A = QR\\]
6+
//!
7+
//! Example:
8+
//! ```
9+
//! use smartcore::linalg::naive::dense_matrix::*;
10+
//! use smartcore::linalg::qr::*;
11+
//!
12+
//! let A = DenseMatrix::from_2d_array(&[
13+
//! &[0.9, 0.4, 0.7],
14+
//! &[0.4, 0.5, 0.3],
15+
//! &[0.7, 0.3, 0.8]
16+
//! ]);
17+
//!
18+
//! let lu = A.qr();
19+
//! let orthogonal: DenseMatrix<f64> = lu.Q();
20+
//! let triangular: DenseMatrix<f64> = lu.R();
21+
//! ```
22+
//!
23+
//! ## References:
24+
//! * ["No bullshit guide to linear algebra", Ivan Savov, 2016, 7.6 Matrix decompositions](https://minireference.com/)
25+
//! * ["Numerical Recipes: The Art of Scientific Computing", Press W.H., Teukolsky S.A., Vetterling W.T, Flannery B.P, 3rd ed., 2.10 QR Decomposition](http://numerical.recipes/)
26+
//!
27+
//! <script src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script>
28+
//! <script id="MathJax-script" async src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"></script>
129
#![allow(non_snake_case)]
230

331
use std::fmt::Debug;
@@ -6,14 +34,15 @@ use crate::linalg::BaseMatrix;
634
use crate::math::num::RealNumber;
735

836
#[derive(Debug, Clone)]
37+
/// Results of QR decomposition.
938
pub struct QR<T: RealNumber, M: BaseMatrix<T>> {
1039
QR: M,
1140
tau: Vec<T>,
1241
singular: bool,
1342
}
1443

1544
impl<T: RealNumber, M: BaseMatrix<T>> QR<T, M> {
16-
pub fn new(QR: M, tau: Vec<T>) -> QR<T, M> {
45+
pub(crate) fn new(QR: M, tau: Vec<T>) -> QR<T, M> {
1746
let mut singular = false;
1847
for j in 0..tau.len() {
1948
if tau[j] == T::zero() {
@@ -29,6 +58,7 @@ impl<T: RealNumber, M: BaseMatrix<T>> QR<T, M> {
2958
}
3059
}
3160

61+
/// Get upper triangular matrix.
3262
pub fn R(&self) -> M {
3363
let (_, n) = self.QR.shape();
3464
let mut R = M::zeros(n, n);
@@ -41,6 +71,7 @@ impl<T: RealNumber, M: BaseMatrix<T>> QR<T, M> {
4171
return R;
4272
}
4373

74+
/// Get an orthogonal matrix.
4475
pub fn Q(&self) -> M {
4576
let (m, n) = self.QR.shape();
4677
let mut Q = M::zeros(m, n);
@@ -112,11 +143,15 @@ impl<T: RealNumber, M: BaseMatrix<T>> QR<T, M> {
112143
}
113144
}
114145

146+
/// Trait that implements QR decomposition routine for any matrix.
115147
pub trait QRDecomposableMatrix<T: RealNumber>: BaseMatrix<T> {
148+
/// Compute the QR decomposition of a matrix.
116149
fn qr(&self) -> QR<T, Self> {
117150
self.clone().qr_mut()
118151
}
119152

153+
/// Compute the QR decomposition of a matrix. The input matrix
154+
/// will be used for factorization.
120155
fn qr_mut(mut self) -> QR<T, Self> {
121156
let (m, n) = self.shape();
122157

@@ -154,6 +189,7 @@ pub trait QRDecomposableMatrix<T: RealNumber>: BaseMatrix<T> {
154189
QR::new(self, r_diagonal)
155190
}
156191

192+
/// Solves Ax = b
157193
fn qr_solve_mut(self, b: Self) -> Self {
158194
self.qr_mut().solve(b)
159195
}

0 commit comments

Comments
 (0)