|
| 1 | +use crate::error::Failed; |
| 2 | +use crate::linalg::row_iter; |
| 3 | +use crate::linalg::BaseVector; |
| 4 | +use crate::linalg::Matrix; |
| 5 | +use crate::math::num::RealNumber; |
| 6 | +use crate::math::vector::RealNumberVector; |
| 7 | +use crate::naive_bayes::{BaseNaiveBayes, NBDistribution}; |
| 8 | +use serde::{Deserialize, Serialize}; |
| 9 | + |
| 10 | +/// Naive Bayes classifier for categorical features |
| 11 | +#[derive(Serialize, Deserialize, Debug, PartialEq)] |
| 12 | +struct GaussianNBDistribution<T: RealNumber> { |
| 13 | + /// class labels known to the classifier |
| 14 | + class_labels: Vec<T>, |
| 15 | + /// probability of each class. |
| 16 | + class_priors: Vec<T>, |
| 17 | + /// variance of each feature per class |
| 18 | + sigma: Vec<Vec<T>>, |
| 19 | + /// mean of each feature per class |
| 20 | + theta: Vec<Vec<T>>, |
| 21 | +} |
| 22 | + |
| 23 | +impl<T: RealNumber, M: Matrix<T>> NBDistribution<T, M> for GaussianNBDistribution<T> { |
| 24 | + fn prior(&self, class_index: usize) -> T { |
| 25 | + if class_index >= self.class_labels.len() { |
| 26 | + T::zero() |
| 27 | + } else { |
| 28 | + self.class_priors[class_index] |
| 29 | + } |
| 30 | + } |
| 31 | + |
| 32 | + fn log_likelihood(&self, class_index: usize, j: &M::RowVector) -> T { |
| 33 | + if class_index < self.class_labels.len() { |
| 34 | + let mut likelihood = T::zero(); |
| 35 | + for feature in 0..j.len() { |
| 36 | + let value = j.get(feature); |
| 37 | + let mean = self.theta[class_index][feature]; |
| 38 | + let variance = self.sigma[class_index][feature]; |
| 39 | + likelihood += self.calculate_log_probability(value, mean, variance); |
| 40 | + } |
| 41 | + likelihood |
| 42 | + } else { |
| 43 | + T::zero() |
| 44 | + } |
| 45 | + } |
| 46 | + |
| 47 | + fn classes(&self) -> &Vec<T> { |
| 48 | + &self.class_labels |
| 49 | + } |
| 50 | +} |
| 51 | + |
| 52 | +/// `GaussianNB` parameters. Use `Default::default()` for default values. |
| 53 | +#[derive(Serialize, Deserialize, Debug, Default)] |
| 54 | +pub struct GaussianNBParameters<T: RealNumber> { |
| 55 | + /// Prior probabilities of the classes. If specified the priors are not adjusted according to the data |
| 56 | + pub priors: Option<Vec<T>>, |
| 57 | +} |
| 58 | + |
| 59 | +impl<T: RealNumber> GaussianNBParameters<T> { |
| 60 | + /// Create GaussianNBParameters with specific paramaters. |
| 61 | + pub fn new(priors: Option<Vec<T>>) -> Self { |
| 62 | + Self { priors } |
| 63 | + } |
| 64 | +} |
| 65 | + |
| 66 | +impl<T: RealNumber> GaussianNBDistribution<T> { |
| 67 | + /// Fits the distribution to a NxM matrix where N is number of samples and M is number of features. |
| 68 | + /// * `x` - training data. |
| 69 | + /// * `y` - vector with target values (classes) of length N. |
| 70 | + /// * `priors` - Optional vector with prior probabilities of the classes. If not defined, |
| 71 | + /// priors are adjusted according to the data. |
| 72 | + pub fn fit<M: Matrix<T>>( |
| 73 | + x: &M, |
| 74 | + y: &M::RowVector, |
| 75 | + priors: Option<Vec<T>>, |
| 76 | + ) -> Result<Self, Failed> { |
| 77 | + let (n_samples, n_features) = x.shape(); |
| 78 | + let y_samples = y.len(); |
| 79 | + if y_samples != n_samples { |
| 80 | + return Err(Failed::fit(&format!( |
| 81 | + "Size of x should equal size of y; |x|=[{}], |y|=[{}]", |
| 82 | + n_samples, y_samples |
| 83 | + ))); |
| 84 | + } |
| 85 | + |
| 86 | + if n_samples == 0 { |
| 87 | + return Err(Failed::fit(&format!( |
| 88 | + "Size of x and y should greater than 0; |x|=[{}]", |
| 89 | + n_samples |
| 90 | + ))); |
| 91 | + } |
| 92 | + let y = y.to_vec(); |
| 93 | + let (class_labels, indices) = <Vec<T> as RealNumberVector<T>>::unique_with_indices(&y); |
| 94 | + |
| 95 | + let mut class_count = vec![T::zero(); class_labels.len()]; |
| 96 | + |
| 97 | + let mut subdataset: Vec<Vec<Vec<T>>> = vec![vec![]; class_labels.len()]; |
| 98 | + |
| 99 | + for (row, class_index) in row_iter(x).zip(indices.iter()) { |
| 100 | + class_count[*class_index] += T::one(); |
| 101 | + subdataset[*class_index].push(row); |
| 102 | + } |
| 103 | + |
| 104 | + let class_priors = if let Some(class_priors) = priors { |
| 105 | + if class_priors.len() != class_labels.len() { |
| 106 | + return Err(Failed::fit( |
| 107 | + "Size of priors provided does not match the number of classes of the data.", |
| 108 | + )); |
| 109 | + } |
| 110 | + class_priors |
| 111 | + } else { |
| 112 | + class_count |
| 113 | + .into_iter() |
| 114 | + .map(|c| c / T::from(n_samples).unwrap()) |
| 115 | + .collect() |
| 116 | + }; |
| 117 | + |
| 118 | + let subdataset: Vec<M> = subdataset |
| 119 | + .into_iter() |
| 120 | + .map(|v| { |
| 121 | + let mut m = M::zeros(v.len(), n_features); |
| 122 | + for row in 0..v.len() { |
| 123 | + for col in 0..n_features { |
| 124 | + m.set(row, col, v[row][col]); |
| 125 | + } |
| 126 | + } |
| 127 | + m |
| 128 | + }) |
| 129 | + .collect(); |
| 130 | + |
| 131 | + let (sigma, theta): (Vec<Vec<T>>, Vec<Vec<T>>) = subdataset |
| 132 | + .iter() |
| 133 | + .map(|data| (data.var(0), data.mean(0))) |
| 134 | + .unzip(); |
| 135 | + |
| 136 | + Ok(Self { |
| 137 | + class_labels, |
| 138 | + class_priors, |
| 139 | + sigma, |
| 140 | + theta, |
| 141 | + }) |
| 142 | + } |
| 143 | + |
| 144 | + /// Calculate probability of x equals to a value of a Gaussian distribution given its mean and its |
| 145 | + /// variance. |
| 146 | + fn calculate_log_probability(&self, value: T, mean: T, variance: T) -> T { |
| 147 | + let pi = T::from(std::f64::consts::PI).unwrap(); |
| 148 | + -((value - mean).powf(T::two()) / (T::two() * variance)) |
| 149 | + - (T::two() * pi).ln() / T::two() |
| 150 | + - (variance).ln() / T::two() |
| 151 | + } |
| 152 | +} |
| 153 | + |
| 154 | +/// GaussianNB implements the categorical naive Bayes algorithm for categorically distributed data. |
| 155 | +#[derive(Serialize, Deserialize, Debug, PartialEq)] |
| 156 | +pub struct GaussianNB<T: RealNumber, M: Matrix<T>> { |
| 157 | + inner: BaseNaiveBayes<T, M, GaussianNBDistribution<T>>, |
| 158 | +} |
| 159 | + |
| 160 | +impl<T: RealNumber, M: Matrix<T>> GaussianNB<T, M> { |
| 161 | + /// Fits GaussianNB with given data |
| 162 | + /// * `x` - training data of size NxM where N is the number of samples and M is the number of |
| 163 | + /// features. |
| 164 | + /// * `y` - vector with target values (classes) of length N. |
| 165 | + /// * `parameters` - additional parameters like class priors. |
| 166 | + pub fn fit( |
| 167 | + x: &M, |
| 168 | + y: &M::RowVector, |
| 169 | + parameters: GaussianNBParameters<T>, |
| 170 | + ) -> Result<Self, Failed> { |
| 171 | + let distribution = GaussianNBDistribution::fit(x, y, parameters.priors)?; |
| 172 | + let inner = BaseNaiveBayes::fit(distribution)?; |
| 173 | + Ok(Self { inner }) |
| 174 | + } |
| 175 | + |
| 176 | + /// Estimates the class labels for the provided data. |
| 177 | + /// * `x` - data of shape NxM where N is number of data points to estimate and M is number of features. |
| 178 | + /// Returns a vector of size N with class estimates. |
| 179 | + pub fn predict(&self, x: &M) -> Result<M::RowVector, Failed> { |
| 180 | + self.inner.predict(x) |
| 181 | + } |
| 182 | +} |
| 183 | + |
| 184 | +#[cfg(test)] |
| 185 | +mod tests { |
| 186 | + use super::*; |
| 187 | + use crate::linalg::naive::dense_matrix::DenseMatrix; |
| 188 | + |
| 189 | + #[test] |
| 190 | + fn run_gaussian_naive_bayes() { |
| 191 | + let x = DenseMatrix::from_2d_array(&[ |
| 192 | + &[-1., -1.], |
| 193 | + &[-2., -1.], |
| 194 | + &[-3., -2.], |
| 195 | + &[1., 1.], |
| 196 | + &[2., 1.], |
| 197 | + &[3., 2.], |
| 198 | + ]); |
| 199 | + let y = vec![1., 1., 1., 2., 2., 2.]; |
| 200 | + |
| 201 | + let gnb = GaussianNB::fit(&x, &y, Default::default()).unwrap(); |
| 202 | + let y_hat = gnb.predict(&x).unwrap(); |
| 203 | + assert_eq!(y_hat, y); |
| 204 | + assert_eq!( |
| 205 | + gnb.inner.distribution.sigma, |
| 206 | + &[ |
| 207 | + &[0.666666666666667, 0.22222222222222232], |
| 208 | + &[0.666666666666667, 0.22222222222222232] |
| 209 | + ] |
| 210 | + ); |
| 211 | + |
| 212 | + assert_eq!(gnb.inner.distribution.class_priors, &[0.5, 0.5]); |
| 213 | + |
| 214 | + assert_eq!( |
| 215 | + gnb.inner.distribution.theta, |
| 216 | + &[&[-2., -1.3333333333333333], &[2., 1.3333333333333333]] |
| 217 | + ); |
| 218 | + } |
| 219 | + |
| 220 | + #[test] |
| 221 | + fn run_gaussian_naive_bayes_with_priors() { |
| 222 | + let x = DenseMatrix::from_2d_array(&[ |
| 223 | + &[-1., -1.], |
| 224 | + &[-2., -1.], |
| 225 | + &[-3., -2.], |
| 226 | + &[1., 1.], |
| 227 | + &[2., 1.], |
| 228 | + &[3., 2.], |
| 229 | + ]); |
| 230 | + let y = vec![1., 1., 1., 2., 2., 2.]; |
| 231 | + |
| 232 | + let priors = vec![0.3, 0.7]; |
| 233 | + let parameters = GaussianNBParameters::new(Some(priors.clone())); |
| 234 | + let gnb = GaussianNB::fit(&x, &y, parameters).unwrap(); |
| 235 | + |
| 236 | + assert_eq!(gnb.inner.distribution.class_priors, priors); |
| 237 | + } |
| 238 | + |
| 239 | + #[test] |
| 240 | + fn serde() { |
| 241 | + let x = DenseMatrix::<f64>::from_2d_array(&[ |
| 242 | + &[-1., -1.], |
| 243 | + &[-2., -1.], |
| 244 | + &[-3., -2.], |
| 245 | + &[1., 1.], |
| 246 | + &[2., 1.], |
| 247 | + &[3., 2.], |
| 248 | + ]); |
| 249 | + let y = vec![1., 1., 1., 2., 2., 2.]; |
| 250 | + |
| 251 | + let gnb = GaussianNB::fit(&x, &y, Default::default()).unwrap(); |
| 252 | + let deserialized_gnb: GaussianNB<f64, DenseMatrix<f64>> = |
| 253 | + serde_json::from_str(&serde_json::to_string(&gnb).unwrap()).unwrap(); |
| 254 | + |
| 255 | + assert_eq!(gnb, deserialized_gnb); |
| 256 | + } |
| 257 | +} |
0 commit comments