1
1
/// Metrics for measuring the quality of the prediction.
2
2
enum MetricType {
3
+ ///
4
+ ///
3
5
/// Mean percentage absolute error (MAPE), a regression metric. The formula
4
6
/// is:
5
7
///
@@ -17,6 +19,8 @@ enum MetricType {
17
19
/// can produce scores which are greater than 1.
18
20
mape,
19
21
22
+ ///
23
+ ///
20
24
/// Root mean squared error (RMSE), a regression metric. The formula is:
21
25
///
22
26
///
@@ -31,17 +35,69 @@ enum MetricType {
31
35
/// scores within the range [0, +Infinity]
32
36
rmse,
33
37
34
- /// A classification metric. The greater the score produced by the metric, the
35
- /// better the prediction's quality is. The metric produces scores within the
36
- /// range [0, 1]
38
+ ///
39
+ ///
40
+ /// A classification metric. The formula is
41
+ ///
42
+ ///
43
+ /// ![{\mbox{Score}}=\frac{k}{n}] (https://latex.codecogs.com/gif.latex?%7B%5Cmbox%7BScore%7D%7D%3D%5Cfrac%7Bk%7D%7Bn%7D)
44
+ ///
45
+ ///
46
+ /// where `k` is a number of correctly predicted labels, `n` - total amount
47
+ /// of labels
48
+ ///
49
+ ///
50
+ /// The greater the score produced by the metric, the better the prediction's
51
+ /// quality is. The metric produces scores within the range [0, 1]
37
52
accuracy,
38
53
39
- /// A classification metric. The greater the score produced by the metric, the
54
+ ///
55
+ ///
56
+ /// A classification metric. The formula for a single-class problem is
57
+ ///
58
+ ///
59
+ /// ![{\mbox{Score}}=\frac{TP}{TP + FP}] (https://latex.codecogs.com/gif.latex?%7B%5Cmbox%7BScore%7D%7D%3D%5Cfrac%7BTP%7D%7BTP%20+%20FP%7D)
60
+ ///
61
+ ///
62
+ /// where TP is a number of correctly predicted positive labels (true positive),
63
+ /// FP - a number of incorrectly predicted positive labels (false positive). In
64
+ /// other words, TP + FP is a number of all the labels predicted to be positive
65
+ ///
66
+ /// The formula for a multi-class problem is
67
+ ///
68
+ ///
69
+ /// ![{\mbox{Score}}= \frac{1}{n}\sum_{t=1}^{n}Score_{t}] (https://latex.codecogs.com/gif.latex?%7B%5Cmbox%7BScore%7D%7D%3D%20%5Cfrac%7B1%7D%7Bn%7D%5Csum_%7Bt%3D1%7D%5E%7Bn%7DScore_%7Bt%7D)
70
+ ///
71
+ /// Where `Score 1..t` are scores for each class from 1 to t
72
+ ///
73
+ ///
74
+ /// The greater the score produced by the metric, the
40
75
/// better the prediction's quality is. The metric produces scores within the
41
76
/// range [0, 1]
42
77
precision,
43
78
44
- /// A classification metric. The greater the score produced by the metric, the
79
+ ///
80
+ ///
81
+ /// A classification metric. The formula for a single-class problem is
82
+ ///
83
+ ///
84
+ /// ![{\mbox{Score}}=\frac{TP}{TP + FN}] (https://latex.codecogs.com/gif.latex?%7B%5Cmbox%7BScore%7D%7D%3D%5Cfrac%7BTP%7D%7BTP%20+%20FN%7D)
85
+ ///
86
+ ///
87
+ /// where TP is a number of correctly predicted positive labels (true positive),
88
+ /// FN - a number of incorrectly predicted negative labels (false negative). In
89
+ /// other words, TP + FN is a total amount of positive labels for a class in
90
+ /// the given data
91
+ ///
92
+ /// The formula for a multi-class problem is
93
+ ///
94
+ ///
95
+ /// ![{\mbox{Score}}= \frac{1}{n}\sum_{t=1}^{n}Score_{t}] (https://latex.codecogs.com/gif.latex?%7B%5Cmbox%7BScore%7D%7D%3D%20%5Cfrac%7B1%7D%7Bn%7D%5Csum_%7Bt%3D1%7D%5E%7Bn%7DScore_%7Bt%7D)
96
+ ///
97
+ ///
98
+ /// Where `Score 1..t` are scores for each class from 1 to t
99
+ ///
100
+ /// The greater the score produced by the metric, the
45
101
/// better the prediction's quality is. The metric produces scores within the
46
102
/// range [0, 1]
47
103
recall,
0 commit comments