Skip to content

Commit c172c40

Browse files
Merge pull request #35 from smartcorelib/lasso
LASSO
2 parents 89a5136 + 67e5829 commit c172c40

File tree

9 files changed

+819
-3
lines changed

9 files changed

+819
-3
lines changed

src/linalg/high_order.rs

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
//! In this module you will find composite of matrix operations that are used elsewhere
2+
//! for improved efficiency.
3+
4+
use crate::linalg::BaseMatrix;
5+
use crate::math::num::RealNumber;
6+
7+
/// High order matrix operations.
8+
pub trait HighOrderOperations<T: RealNumber>: BaseMatrix<T> {
9+
/// Y = AB
10+
/// ```
11+
/// use smartcore::linalg::naive::dense_matrix::*;
12+
/// use smartcore::linalg::high_order::HighOrderOperations;
13+
///
14+
/// let a = DenseMatrix::from_2d_array(&[&[1., 2.], &[3., 4.], &[5., 6.]]);
15+
/// let b = DenseMatrix::from_2d_array(&[&[5., 6.], &[7., 8.], &[9., 10.]]);
16+
/// let expected = DenseMatrix::from_2d_array(&[&[71., 80.], &[92., 104.]]);
17+
///
18+
/// assert_eq!(a.ab(true, &b, false), expected);
19+
/// ```
20+
fn ab(&self, a_transpose: bool, b: &Self, b_transpose: bool) -> Self {
21+
match (a_transpose, b_transpose) {
22+
(true, true) => b.matmul(self).transpose(),
23+
(false, true) => self.matmul(&b.transpose()),
24+
(true, false) => self.transpose().matmul(b),
25+
(false, false) => self.matmul(b),
26+
}
27+
}
28+
}

src/linalg/mod.rs

Lines changed: 63 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@
3636
pub mod cholesky;
3737
/// The matrix is represented in terms of its eigenvalues and eigenvectors.
3838
pub mod evd;
39+
pub mod high_order;
3940
/// Factors a matrix as the product of a lower triangular matrix and an upper triangular matrix.
4041
pub mod lu;
4142
/// Dense matrix with column-major order that wraps [Vec](https://doc.rust-lang.org/std/vec/struct.Vec.html).
@@ -59,6 +60,7 @@ use std::ops::Range;
5960
use crate::math::num::RealNumber;
6061
use cholesky::CholeskyDecomposableMatrix;
6162
use evd::EVDDecomposableMatrix;
63+
use high_order::HighOrderOperations;
6264
use lu::LUDecomposableMatrix;
6365
use qr::QRDecomposableMatrix;
6466
use stats::MatrixStats;
@@ -134,6 +136,66 @@ pub trait BaseVector<T: RealNumber>: Clone + Debug {
134136
/// Subtract `x` from single element of the vector, write result to original vector.
135137
fn sub_element_mut(&mut self, pos: usize, x: T);
136138

139+
/// Subtract scalar
140+
fn sub_scalar_mut(&mut self, x: T) -> &Self {
141+
for i in 0..self.len() {
142+
self.set(i, self.get(i) - x);
143+
}
144+
self
145+
}
146+
147+
/// Subtract scalar
148+
fn add_scalar_mut(&mut self, x: T) -> &Self {
149+
for i in 0..self.len() {
150+
self.set(i, self.get(i) + x);
151+
}
152+
self
153+
}
154+
155+
/// Subtract scalar
156+
fn mul_scalar_mut(&mut self, x: T) -> &Self {
157+
for i in 0..self.len() {
158+
self.set(i, self.get(i) * x);
159+
}
160+
self
161+
}
162+
163+
/// Subtract scalar
164+
fn div_scalar_mut(&mut self, x: T) -> &Self {
165+
for i in 0..self.len() {
166+
self.set(i, self.get(i) / x);
167+
}
168+
self
169+
}
170+
171+
/// Add vectors, element-wise
172+
fn add_scalar(&self, x: T) -> Self {
173+
let mut r = self.clone();
174+
r.add_scalar_mut(x);
175+
r
176+
}
177+
178+
/// Subtract vectors, element-wise
179+
fn sub_scalar(&self, x: T) -> Self {
180+
let mut r = self.clone();
181+
r.sub_scalar_mut(x);
182+
r
183+
}
184+
185+
/// Multiply vectors, element-wise
186+
fn mul_scalar(&self, x: T) -> Self {
187+
let mut r = self.clone();
188+
r.mul_scalar_mut(x);
189+
r
190+
}
191+
192+
/// Divide vectors, element-wise
193+
fn div_scalar(&self, x: T) -> Self {
194+
let mut r = self.clone();
195+
r.div_scalar_mut(x);
196+
r
197+
}
198+
137199
/// Add vectors, element-wise, overriding original vector with result.
138200
fn add_mut(&mut self, other: &Self) -> &Self;
139201

@@ -557,6 +619,7 @@ pub trait Matrix<T: RealNumber>:
557619
+ LUDecomposableMatrix<T>
558620
+ CholeskyDecomposableMatrix<T>
559621
+ MatrixStats<T>
622+
+ HighOrderOperations<T>
560623
+ PartialEq
561624
+ Display
562625
{

src/linalg/naive/dense_matrix.rs

Lines changed: 58 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ use serde::{Deserialize, Serialize};
99

1010
use crate::linalg::cholesky::CholeskyDecomposableMatrix;
1111
use crate::linalg::evd::EVDDecomposableMatrix;
12+
use crate::linalg::high_order::HighOrderOperations;
1213
use crate::linalg::lu::LUDecomposableMatrix;
1314
use crate::linalg::qr::QRDecomposableMatrix;
1415
use crate::linalg::stats::MatrixStats;
@@ -444,6 +445,38 @@ impl<T: RealNumber> LUDecomposableMatrix<T> for DenseMatrix<T> {}
444445

445446
impl<T: RealNumber> CholeskyDecomposableMatrix<T> for DenseMatrix<T> {}
446447

448+
impl<T: RealNumber> HighOrderOperations<T> for DenseMatrix<T> {
449+
fn ab(&self, a_transpose: bool, b: &Self, b_transpose: bool) -> Self {
450+
if !a_transpose && !b_transpose {
451+
self.matmul(b)
452+
} else {
453+
let (d1, d2, d3, d4) = match (a_transpose, b_transpose) {
454+
(true, false) => (self.nrows, self.ncols, b.ncols, b.nrows),
455+
(false, true) => (self.ncols, self.nrows, b.nrows, b.ncols),
456+
_ => (self.nrows, self.ncols, b.nrows, b.ncols),
457+
};
458+
if d1 != d4 {
459+
panic!("Can not multiply {}x{} by {}x{} matrices", d2, d1, d4, d3);
460+
}
461+
let mut result = Self::zeros(d2, d3);
462+
for r in 0..d2 {
463+
for c in 0..d3 {
464+
let mut s = T::zero();
465+
for i in 0..d1 {
466+
match (a_transpose, b_transpose) {
467+
(true, false) => s += self.get(i, r) * b.get(i, c),
468+
(false, true) => s += self.get(r, i) * b.get(c, i),
469+
_ => s += self.get(i, r) * b.get(c, i),
470+
}
471+
}
472+
result.set(r, c, s);
473+
}
474+
}
475+
result
476+
}
477+
}
478+
}
479+
447480
impl<T: RealNumber> MatrixStats<T> for DenseMatrix<T> {}
448481

449482
impl<T: RealNumber> Matrix<T> for DenseMatrix<T> {}
@@ -625,8 +658,8 @@ impl<T: RealNumber> BaseMatrix<T> for DenseMatrix<T> {
625658
}
626659

627660
fn dot(&self, other: &Self) -> T {
628-
if self.nrows != 1 && other.nrows != 1 {
629-
panic!("A and B should both be 1-dimentional vectors.");
661+
if (self.nrows != 1 && other.nrows != 1) && (self.ncols != 1 && other.ncols != 1) {
662+
panic!("A and B should both be either a row or a column vector.");
630663
}
631664
if self.nrows * self.ncols != other.nrows * other.ncols {
632665
panic!("A and B should have the same size");
@@ -1120,6 +1153,29 @@ mod tests {
11201153
assert_eq!(result, expected);
11211154
}
11221155

1156+
#[test]
1157+
fn ab() {
1158+
let a = DenseMatrix::from_2d_array(&[&[1., 2., 3.], &[4., 5., 6.]]);
1159+
let b = DenseMatrix::from_2d_array(&[&[5., 6.], &[7., 8.], &[9., 10.]]);
1160+
let c = DenseMatrix::from_2d_array(&[&[1., 2.], &[3., 4.], &[5., 6.]]);
1161+
assert_eq!(
1162+
a.ab(false, &b, false),
1163+
DenseMatrix::from_2d_array(&[&[46., 52.], &[109., 124.]])
1164+
);
1165+
assert_eq!(
1166+
c.ab(true, &b, false),
1167+
DenseMatrix::from_2d_array(&[&[71., 80.], &[92., 104.]])
1168+
);
1169+
assert_eq!(
1170+
b.ab(false, &c, true),
1171+
DenseMatrix::from_2d_array(&[&[17., 39., 61.], &[23., 53., 83.,], &[29., 67., 105.]])
1172+
);
1173+
assert_eq!(
1174+
a.ab(true, &b, true),
1175+
DenseMatrix::from_2d_array(&[&[29., 39., 49.], &[40., 54., 68.,], &[51., 69., 87.]])
1176+
);
1177+
}
1178+
11231179
#[test]
11241180
fn dot() {
11251181
let a = DenseMatrix::from_array(1, 3, &[1., 2., 3.]);

src/linalg/nalgebra_bindings.rs

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,7 @@ use nalgebra::{DMatrix, Dynamic, Matrix, MatrixMN, RowDVector, Scalar, VecStorag
4444

4545
use crate::linalg::cholesky::CholeskyDecomposableMatrix;
4646
use crate::linalg::evd::EVDDecomposableMatrix;
47+
use crate::linalg::high_order::HighOrderOperations;
4748
use crate::linalg::lu::LUDecomposableMatrix;
4849
use crate::linalg::qr::QRDecomposableMatrix;
4950
use crate::linalg::stats::MatrixStats;
@@ -553,6 +554,11 @@ impl<T: RealNumber + Scalar + AddAssign + SubAssign + MulAssign + DivAssign + Su
553554
{
554555
}
555556

557+
impl<T: RealNumber + Scalar + AddAssign + SubAssign + MulAssign + DivAssign + Sum + 'static>
558+
HighOrderOperations<T> for Matrix<T, Dynamic, Dynamic, VecStorage<T, Dynamic, Dynamic>>
559+
{
560+
}
561+
556562
impl<T: RealNumber + Scalar + AddAssign + SubAssign + MulAssign + DivAssign + Sum + 'static>
557563
SmartCoreMatrix<T> for Matrix<T, Dynamic, Dynamic, VecStorage<T, Dynamic, Dynamic>>
558564
{

src/linalg/ndarray_bindings.rs

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,7 @@ use ndarray::{s, stack, Array, ArrayBase, Axis, Ix1, Ix2, OwnedRepr};
5151

5252
use crate::linalg::cholesky::CholeskyDecomposableMatrix;
5353
use crate::linalg::evd::EVDDecomposableMatrix;
54+
use crate::linalg::high_order::HighOrderOperations;
5455
use crate::linalg::lu::LUDecomposableMatrix;
5556
use crate::linalg::qr::QRDecomposableMatrix;
5657
use crate::linalg::stats::MatrixStats;
@@ -502,6 +503,11 @@ impl<T: RealNumber + ScalarOperand + AddAssign + SubAssign + MulAssign + DivAssi
502503
{
503504
}
504505

506+
impl<T: RealNumber + ScalarOperand + AddAssign + SubAssign + MulAssign + DivAssign + Sum>
507+
HighOrderOperations<T> for ArrayBase<OwnedRepr<T>, Ix2>
508+
{
509+
}
510+
505511
impl<T: RealNumber + ScalarOperand + AddAssign + SubAssign + MulAssign + DivAssign + Sum> Matrix<T>
506512
for ArrayBase<OwnedRepr<T>, Ix2>
507513
{

src/linear/bg_solver.rs

Lines changed: 146 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,146 @@
1+
//! This is a generic solver for Ax = b type of equation
2+
//!
3+
//! for more information take a look at [this Wikipedia article](https://en.wikipedia.org/wiki/Biconjugate_gradient_method)
4+
//! and [this paper](https://www.cs.cmu.edu/~quake-papers/painless-conjugate-gradient.pdf)
5+
use crate::error::Failed;
6+
use crate::linalg::Matrix;
7+
use crate::math::num::RealNumber;
8+
9+
pub trait BiconjugateGradientSolver<T: RealNumber, M: Matrix<T>> {
10+
fn solve_mut(&self, a: &M, b: &M, x: &mut M, tol: T, max_iter: usize) -> Result<T, Failed> {
11+
if tol <= T::zero() {
12+
return Err(Failed::fit("tolerance shoud be > 0"));
13+
}
14+
15+
if max_iter == 0 {
16+
return Err(Failed::fit("maximum number of iterations should be > 0"));
17+
}
18+
19+
let (n, _) = b.shape();
20+
21+
let mut r = M::zeros(n, 1);
22+
let mut rr = M::zeros(n, 1);
23+
let mut z = M::zeros(n, 1);
24+
let mut zz = M::zeros(n, 1);
25+
26+
self.mat_vec_mul(a, x, &mut r);
27+
28+
for j in 0..n {
29+
r.set(j, 0, b.get(j, 0) - r.get(j, 0));
30+
rr.set(j, 0, r.get(j, 0));
31+
}
32+
33+
let bnrm = b.norm(T::two());
34+
self.solve_preconditioner(a, &r, &mut z);
35+
36+
let mut p = M::zeros(n, 1);
37+
let mut pp = M::zeros(n, 1);
38+
let mut bkden = T::zero();
39+
let mut err = T::zero();
40+
41+
for iter in 1..max_iter {
42+
let mut bknum = T::zero();
43+
44+
self.solve_preconditioner(a, &rr, &mut zz);
45+
for j in 0..n {
46+
bknum += z.get(j, 0) * rr.get(j, 0);
47+
}
48+
if iter == 1 {
49+
for j in 0..n {
50+
p.set(j, 0, z.get(j, 0));
51+
pp.set(j, 0, zz.get(j, 0));
52+
}
53+
} else {
54+
let bk = bknum / bkden;
55+
for j in 0..n {
56+
p.set(j, 0, bk * p.get(j, 0) + z.get(j, 0));
57+
pp.set(j, 0, bk * pp.get(j, 0) + zz.get(j, 0));
58+
}
59+
}
60+
bkden = bknum;
61+
self.mat_vec_mul(a, &p, &mut z);
62+
let mut akden = T::zero();
63+
for j in 0..n {
64+
akden += z.get(j, 0) * pp.get(j, 0);
65+
}
66+
let ak = bknum / akden;
67+
self.mat_t_vec_mul(a, &pp, &mut zz);
68+
for j in 0..n {
69+
x.set(j, 0, x.get(j, 0) + ak * p.get(j, 0));
70+
r.set(j, 0, r.get(j, 0) - ak * z.get(j, 0));
71+
rr.set(j, 0, rr.get(j, 0) - ak * zz.get(j, 0));
72+
}
73+
self.solve_preconditioner(a, &r, &mut z);
74+
err = r.norm(T::two()) / bnrm;
75+
76+
if err <= tol {
77+
break;
78+
}
79+
}
80+
81+
Ok(err)
82+
}
83+
84+
fn solve_preconditioner(&self, a: &M, b: &M, x: &mut M) {
85+
let diag = Self::diag(a);
86+
let n = diag.len();
87+
88+
for i in 0..n {
89+
if diag[i] != T::zero() {
90+
x.set(i, 0, b.get(i, 0) / diag[i]);
91+
} else {
92+
x.set(i, 0, b.get(i, 0));
93+
}
94+
}
95+
}
96+
97+
// y = Ax
98+
fn mat_vec_mul(&self, a: &M, x: &M, y: &mut M) {
99+
y.copy_from(&a.matmul(x));
100+
}
101+
102+
// y = Atx
103+
fn mat_t_vec_mul(&self, a: &M, x: &M, y: &mut M) {
104+
y.copy_from(&a.ab(true, x, false));
105+
}
106+
107+
fn diag(a: &M) -> Vec<T> {
108+
let (nrows, ncols) = a.shape();
109+
let n = nrows.min(ncols);
110+
111+
let mut d = Vec::with_capacity(n);
112+
for i in 0..n {
113+
d.push(a.get(i, i));
114+
}
115+
116+
d
117+
}
118+
}
119+
120+
#[cfg(test)]
121+
mod tests {
122+
use super::*;
123+
use crate::linalg::naive::dense_matrix::*;
124+
125+
pub struct BGSolver {}
126+
127+
impl<T: RealNumber, M: Matrix<T>> BiconjugateGradientSolver<T, M> for BGSolver {}
128+
129+
#[test]
130+
fn bg_solver() {
131+
let a = DenseMatrix::from_2d_array(&[&[25., 15., -5.], &[15., 18., 0.], &[-5., 0., 11.]]);
132+
let b = DenseMatrix::from_2d_array(&[&[40., 51., 28.]]);
133+
let expected = DenseMatrix::from_2d_array(&[&[1.0, 2.0, 3.0]]);
134+
135+
let mut x = DenseMatrix::zeros(3, 1);
136+
137+
let solver = BGSolver {};
138+
139+
let err: f64 = solver
140+
.solve_mut(&a, &b.transpose(), &mut x, 1e-6, 6)
141+
.unwrap();
142+
143+
assert!(x.transpose().approximate_eq(&expected, 1e-4));
144+
assert!((err - 0.0).abs() < 1e-4);
145+
}
146+
}

0 commit comments

Comments
 (0)