Skip to content

Commit ab01346

Browse files
author
Matt Sokoloff
committed
update docstrings
1 parent dc6310b commit ab01346

File tree

5 files changed

+122
-62
lines changed

5 files changed

+122
-62
lines changed

labelbox/data/annotation_types/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,3 +32,5 @@
3232
from .metrics import ScalarMetricAggregation
3333
from .metrics import ConfusionMatrixMetric
3434
from .metrics import ConfusionMatrixAggregation
35+
from .metrics import ScalarMetricValue
36+
from .metrics import ConfusionMatrixMetricValue
Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
1-
from .scalar import ScalarMetric, ScalarMetricAggregation
2-
from .confusion_matrix import ConfusionMatrixMetric, ConfusionMatrixAggregation
1+
from .scalar import ScalarMetric, ScalarMetricAggregation, ScalarMetricValue
2+
from .confusion_matrix import ConfusionMatrixMetric, ConfusionMatrixAggregation, ConfusionMatrixMetricValue

labelbox/data/metrics/confusion_matrix/calculation.py

Lines changed: 104 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,13 @@
1-
from labelbox.data.metrics.iou.calculation import _get_mask_pairs, _get_vector_pairs, miou
2-
3-
from labelbox.data.annotation_types.metrics.confusion_matrix import \
4-
ConfusionMatrixMetricValue
5-
6-
from labelbox.data.annotation_types.metrics.scalar import ScalarMetricValue
71
from typing import List, Optional, Tuple, Union
2+
83
import numpy as np
4+
5+
from ..iou.calculation import _get_mask_pairs, _get_vector_pairs, miou
96
from ...annotation_types import (ObjectAnnotation, ClassificationAnnotation,
10-
Mask, Geometry, Checklist, Radio)
11-
from ..processing import get_feature_pairs, get_identifying_key, has_no_annotations, has_no_matching_annotations
7+
Mask, Geometry, Checklist, Radio,
8+
ScalarMetricValue, ConfusionMatrixMetricValue)
9+
from ..processing import (get_feature_pairs, get_identifying_key,
10+
has_no_annotations, has_no_matching_annotations)
1211

1312

1413
def confusion_matrix(ground_truths: List[Union[ObjectAnnotation,
@@ -17,34 +16,58 @@ def confusion_matrix(ground_truths: List[Union[ObjectAnnotation,
1716
ClassificationAnnotation]],
1817
include_subclasses: bool,
1918
iou: float) -> ConfusionMatrixMetricValue:
19+
"""
20+
Computes the confusion matrix for an arbitrary set of ground truth and predicted annotations.
21+
It first computes the confusion matrix for each metric and then sums across all classes
22+
23+
Args:
24+
ground_truth : Label containing human annotations or annotations known to be correct
25+
prediction: Label representing model predictions
26+
include_subclasses (bool): Whether or not to include subclasses in the calculation.
27+
If set to True, the iou between two overlapping objects of the same type is 0 if the subclasses are not the same.
28+
iou: minimum overlap between objects for them to count as matching
29+
Returns:
30+
confusion matrix as a list: [TP,FP,TN,FN]
31+
Returns None if there are no annotations in ground_truth or prediction annotations
32+
"""
2033

2134
annotation_pairs = get_feature_pairs(predictions, ground_truths)
22-
ious = [
35+
conf_matrix = [
2336
feature_confusion_matrix(annotation_pair[0], annotation_pair[1],
2437
include_subclasses, iou)
2538
for annotation_pair in annotation_pairs.values()
2639
]
27-
ious = [iou for iou in ious if iou is not None]
28-
29-
return None if not len(ious) else np.sum(ious, axis=0).tolist()
40+
matrices = [matrix for matrix in conf_matrix if matrix is not None]
41+
return None if not len(matrices) else np.sum(matrices, axis=0).tolist()
3042

3143

3244
def feature_confusion_matrix(
3345
ground_truths: List[Union[ObjectAnnotation, ClassificationAnnotation]],
3446
predictions: List[Union[ObjectAnnotation, ClassificationAnnotation]],
3547
include_subclasses: bool,
3648
iou: float) -> Optional[ConfusionMatrixMetricValue]:
49+
"""
50+
Computes confusion matrix for all features of the same class.
51+
52+
Args:
53+
ground_truths: List of ground truth annotations belonging to the same class.
54+
predictions: List of annotations belonging to the same class.
55+
include_subclasses (bool): Whether or not to include subclasses in the calculation.
56+
If set to True, the iou between two overlapping objects of the same type is 0 if the subclasses are not the same.
57+
Returns:
58+
confusion matrix as a list: [TP,FP,TN,FN]
59+
Returns None if there are no annotations in ground_truth or prediction annotations
60+
"""
3761
if has_no_matching_annotations(ground_truths, predictions):
3862
return [0, int(len(predictions) > 0), 0, int(len(ground_truths) > 0)]
3963
elif has_no_annotations(ground_truths, predictions):
40-
# Note that we could return [0,0,0,0] but that will bloat the imports for no reason
4164
return None
4265
elif isinstance(predictions[0].value, Mask):
43-
return mask_confusion_matrix(ground_truths, predictions, iou,
44-
include_subclasses)
66+
return mask_confusion_matrix(ground_truths, predictions,
67+
include_subclasses, iou)
4568
elif isinstance(predictions[0].value, Geometry):
46-
return vector_confusion_matrix(ground_truths, predictions, iou,
47-
include_subclasses)
69+
return vector_confusion_matrix(ground_truths, predictions,
70+
include_subclasses, iou)
4871
elif isinstance(predictions[0], ClassificationAnnotation):
4972
return classification_confusion_matrix(ground_truths, predictions)
5073
else:
@@ -63,7 +86,8 @@ def classification_confusion_matrix(
6386
ground_truths: List of ground truth classification annotations
6487
predictions: List of prediction classification annotations
6588
Returns:
66-
float representing the iou score for the classification
89+
confusion matrix as a list: [TP,FP,TN,FN]
90+
Returns None if there are no annotations in ground_truth or prediction annotations
6791
"""
6892

6993
if has_no_matching_annotations(ground_truths, predictions):
@@ -86,27 +110,56 @@ def classification_confusion_matrix(
86110
elif isinstance(prediction.value, Checklist):
87111
return checklist_confusion_matrix(ground_truth.value, prediction.value)
88112
else:
89-
raise ValueError(f"Unsupported subclass. {prediction}.")
113+
raise ValueError(
114+
f"Unsupported subclass. {prediction}. Only Radio and Checklist are supported"
115+
)
90116

91117

92118
def vector_confusion_matrix(ground_truths: List[ObjectAnnotation],
93119
predictions: List[ObjectAnnotation],
94-
iou: float,
95120
include_subclasses: bool,
121+
iou: float,
96122
buffer=70.) -> Optional[ConfusionMatrixMetricValue]:
123+
"""
124+
Computes confusion matrix for any vector class (point, polygon, line, rectangle).
125+
Ground truths and predictions should all belong to the same class.
126+
127+
Args:
128+
ground_truths: List of ground truth vector annotations
129+
predictions: List of prediction vector annotations
130+
iou: minimum overlap between objects for them to count as matching
131+
include_subclasses (bool): Whether or not to include subclasses in the calculation.
132+
If set to True, the iou between two overlapping objects of the same type is 0 if the subclasses are not the same.
133+
buffer: How much to buffer point and lines (used for determining if overlap meets iou threshold )
134+
Returns:
135+
confusion matrix as a list: [TP,FP,TN,FN]
136+
Returns None if there are no annotations in ground_truth or prediction annotations
137+
"""
97138
if has_no_matching_annotations(ground_truths, predictions):
98139
return [0, int(len(predictions) > 0), 0, int(len(ground_truths) > 0)]
99140
elif has_no_annotations(ground_truths, predictions):
100141
return None
101142

102143
pairs = _get_vector_pairs(ground_truths, predictions, buffer=buffer)
103-
return object_pair_confusion_matrix(pairs, iou, include_subclasses)
144+
return object_pair_confusion_matrix(pairs, include_subclasses, iou)
104145

105146

106-
def object_pair_confusion_matrix(
107-
pairs: List[Tuple[ObjectAnnotation, ObjectAnnotation,
108-
ScalarMetricValue]], iou,
109-
include_subclasses) -> ConfusionMatrixMetricValue:
147+
def object_pair_confusion_matrix(pairs: List[Tuple[ObjectAnnotation,
148+
ObjectAnnotation,
149+
ScalarMetricValue]],
150+
include_subclasses: bool,
151+
iou: float) -> ConfusionMatrixMetricValue:
152+
"""
153+
Computes the confusion matrix for a list of object annotation pairs.
154+
Performs greedy matching of pairs.
155+
156+
Args:
157+
pairs : A list of object annotation pairs with an iou score.
158+
This is used to determine matching priority (or if objects are matching at all) since objects can only be matched once.
159+
iou : iou threshold to deterine if objects are matching
160+
Returns:
161+
confusion matrix as a list: [TP,FP,TN,FN]
162+
"""
110163
pairs.sort(key=lambda triplet: triplet[2], reverse=True)
111164
prediction_ids = set()
112165
ground_truth_ids = set()
@@ -144,11 +197,10 @@ def radio_confusion_matrix(ground_truth: Radio,
144197
"""
145198
Calculates confusion between ground truth and predicted radio values
146199
147-
The way we are calculating confusion matrix metrics:
148-
- TNs aren't defined because we don't know how many other classes exist ... etc
149-
150-
When P == L, then we get [1,0,0,0]
151-
when P != L, we get [0,1,0,1]
200+
Calculation:
201+
- TNs aren't defined because we don't know how many other classes exist
202+
- When P == L, then we get [1,0,0,0]
203+
- when P != L, we get [0,1,0,1]
152204
153205
This is because we are aggregating the stats for the entire radio. Not for each class.
154206
Since we are not tracking TNs (P == L) only adds to TP.
@@ -169,9 +221,16 @@ def checklist_confusion_matrix(
169221
ground_truth: Checklist,
170222
prediction: Checklist) -> ConfusionMatrixMetricValue:
171223
"""
172-
Calculates agreement between ground truth and predicted checklist items
224+
Calculates agreement between ground truth and predicted checklist items:
225+
226+
Calculation:
227+
- When a prediction matches a label that counts as a true postivie.
228+
- When a prediction was made and does not have a corresponding label this is counted as a false postivie
229+
- When a label does not have a corresponding prediction this is counted as a false negative
230+
231+
We are also not tracking TNs since we don't know the number of possible classes
232+
(and they aren't necessary for precision/recall/f1).
173233
174-
Also not tracking TNs
175234
"""
176235
key = get_identifying_key(prediction.answer, ground_truth.answer)
177236
schema_ids_pred = {getattr(answer, key) for answer in prediction.answer}
@@ -185,33 +244,35 @@ def checklist_confusion_matrix(
185244
return [tps, fps, 0, fns]
186245

187246

188-
def mask_confusion_matrix(
189-
ground_truths: List[ObjectAnnotation],
190-
predictions: List[ObjectAnnotation], iou,
191-
include_subclasses: bool) -> Optional[ScalarMetricValue]:
247+
def mask_confusion_matrix(ground_truths: List[ObjectAnnotation],
248+
predictions: List[ObjectAnnotation],
249+
include_subclasses: bool,
250+
iou: float) -> Optional[ScalarMetricValue]:
192251
"""
193-
Computes iou score for all features with the same feature schema id.
194-
Calculation includes subclassifications.
252+
Computes confusion matrix metric for two masks
253+
254+
Important:
255+
- If including subclasses in the calculation, then the metrics are computed the same as if it were object detection.
256+
- Each mask is its own instance. Otherwise this metric is computed as pixel level annotations.
195257
196258
Args:
197259
ground_truths: List of ground truth mask annotations
198260
predictions: List of prediction mask annotations
199261
Returns:
200-
float representing the iou score for the masks
262+
confusion matrix as a list: [TP,FP,TN,FN]
201263
"""
202264
if has_no_matching_annotations(ground_truths, predictions):
203265
return [0, int(len(predictions) > 0), 0, int(len(ground_truths) > 0)]
204266
elif has_no_annotations(ground_truths, predictions):
205267
return None
206268

207269
if include_subclasses:
208-
# This results in a faily drastically different value.
270+
# This results in a faily drastically different value than without subclasses.
209271
# If we have subclasses set to True, then this is object detection with masks
210-
# Otherwise this will flatten the masks.
211-
# TODO: Make this more apprent in the configuration.
272+
# Otherwise this will compute metrics on each pixel.
212273
pairs = _get_mask_pairs(ground_truths, predictions)
213274
return object_pair_confusion_matrix(
214-
pairs, iou, include_subclasses=include_subclasses)
275+
pairs, include_subclasses=include_subclasses, iou=iou)
215276

216277
prediction_np = np.max([pred.value.draw(color=1) for pred in predictions],
217278
axis=0)

labelbox/data/metrics/confusion_matrix/confusion_matrix.py

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -19,17 +19,17 @@ def confusion_matrix_metric(ground_truths: List[Union[
1919
include_subclasses=True,
2020
iou=0.5) -> List[ConfusionMatrixMetric]:
2121
"""
22-
Computes miou between two sets of annotations.
22+
Computes confusion matrix metrics between two sets of annotations.
2323
This will most commonly be used for data row level metrics.
24-
Each class in the annotation list is weighted equally in the iou score.
24+
On the front end these will be displayed as precision, recall, and f1 scores.
2525
2626
Args:
2727
ground_truth : Label containing human annotations or annotations known to be correct
2828
prediction: Label representing model predictions
29-
include_subclasses (bool): Whether or not to include subclasses in the iou calculation.
29+
include_subclasses (bool): Whether or not to include subclasses in the calculation.
3030
If set to True, the iou between two overlapping objects of the same type is 0 if the subclasses are not the same.
3131
Returns:
32-
Returns a list of ScalarMetrics. Will be empty if there were no predictions and labels. Otherwise a single metric will be returned.
32+
Returns a list of ConfusionMatrixMetrics. Will be empty if there were no predictions and labels. Otherwise a single metric will be returned.
3333
"""
3434
if not (0. < iou < 1.):
3535
raise ValueError("iou must be between 0 and 1")
@@ -53,15 +53,16 @@ def feature_confusion_matrix_metric(
5353
iou: float = 0.5,
5454
) -> List[ConfusionMatrixMetric]:
5555
"""
56-
Computes the miou for each type of class in the list of annotations.
56+
Computes the confusion matrix metrics for each type of class in the list of annotations.
57+
On the front end these will be displayed as precision, recall, and f1 scores.
5758
5859
Args:
5960
ground_truth : Label containing human annotations or annotations known to be correct
6061
prediction: Label representing model predictions
61-
include_subclasses (bool): Whether or not to include subclasses in the iou calculation.
62+
include_subclasses (bool): Whether or not to include subclasses in the calculation.
6263
If set to True, the iou between two overlapping objects of the same type is 0 if the subclasses are not the same.
6364
Returns:
64-
Returns a list of ScalarMetrics.
65+
Returns a list of ConfusionMatrixMetrics.
6566
There will be one metric for each class in the union of ground truth and prediction classes.
6667
"""
6768
# Classifications are supported because we just take a naive approach to them..

labelbox/data/metrics/iou/calculation.py

Lines changed: 6 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,17 +1,13 @@
1-
"""
2-
All intermediate functions required to create iou scores.
3-
These can be used in user workflows to create custom metrics.
4-
"""
5-
6-
from labelbox.data.annotation_types.metrics.scalar import ScalarMetricValue
71
from typing import List, Optional, Tuple, Union
8-
from shapely.geometry import Polygon
92
from itertools import product
3+
4+
from shapely.geometry import Polygon
105
import numpy as np
6+
7+
from ..processing import get_feature_pairs, get_identifying_key, has_no_annotations, has_no_matching_annotations
118
from ...annotation_types import (ObjectAnnotation, ClassificationAnnotation,
129
Mask, Geometry, Point, Line, Checklist, Text,
13-
Radio)
14-
from ..processing import get_feature_pairs, get_identifying_key, has_no_annotations, has_no_matching_annotations
10+
Radio, ScalarMetricValue)
1511

1612

1713
def miou(ground_truths: List[Union[ObjectAnnotation, ClassificationAnnotation]],
@@ -45,7 +41,7 @@ def feature_miou(ground_truths: List[Union[ObjectAnnotation,
4541
ClassificationAnnotation]],
4642
include_subclasses: bool) -> Optional[ScalarMetricValue]:
4743
"""
48-
Computes iou score for all features with the same feature schema id.
44+
Computes iou score for all features of the same class.
4945
5046
Args:
5147
ground_truths: List of ground truth annotations with the same feature schema.

0 commit comments

Comments
 (0)