Skip to content

Commit d6c8fe1

Browse files
author
Matt Sokoloff
committed
wip
1 parent e05322c commit d6c8fe1

File tree

3 files changed

+102
-66
lines changed

3 files changed

+102
-66
lines changed

labelbox/data/metrics/confusion_matrix/calculation.py

Lines changed: 64 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11

22

33

4-
from pydantic.utils import truncate
4+
from labelbox.data.metrics.iou.calculation import _mask_iou, miou
55

66
from labelbox.data.annotation_types.metrics.confusion_matrix import \
77
ConfusionMatrixMetricValue
@@ -27,11 +27,12 @@ def confusion_matrix(ground_truths: List[Union[ObjectAnnotation,
2727

2828
annotation_pairs = get_feature_pairs(predictions, ground_truths)
2929
ious = [
30-
feature_confusion_matrix(annotation_pair[0], annotation_pair[1], include_subclasses)
30+
feature_confusion_matrix(annotation_pair[0], annotation_pair[1], iou, include_subclasses)
3131
for annotation_pair in annotation_pairs.values()
3232
]
3333
ious = [iou for iou in ious if iou is not None]
34-
return None if not len(ious) else np.sum(ious, axis = 0 )
34+
35+
return None if not len(ious) else np.sum(ious, axis = 0 ).tolist()
3536

3637

3738

@@ -42,13 +43,14 @@ def feature_confusion_matrix(ground_truths: List[Union[ObjectAnnotation,
4243
iou: float,
4344
include_subclasses: bool) -> Optional[ConfusionMatrixMetricValue]:
4445
if _no_matching_annotations(ground_truths, predictions):
45-
return 0.
46+
return [0,int(len(predictions) > 0), 0, int(len(ground_truths) > 0)]
4647
elif _no_annotations(ground_truths, predictions):
48+
# Note that we could return [0,0,0,0] but that will bloat the imports for no reason
4749
return None
4850
elif isinstance(predictions[0].value, Mask):
49-
return mask_confusion_matrix(ground_truths, predictions, include_subclasses)
51+
return mask_confusion_matrix(ground_truths, predictions, iou, include_subclasses)
5052
elif isinstance(predictions[0].value, Geometry):
51-
return vector_confusion_matrix(ground_truths, predictions, include_subclasses)
53+
return vector_confusion_matrix(ground_truths, predictions, iou, include_subclasses)
5254
elif isinstance(predictions[0], ClassificationAnnotation):
5355
return classification_confusion_matrix(ground_truths, predictions)
5456
else:
@@ -57,7 +59,7 @@ def feature_confusion_matrix(ground_truths: List[Union[ObjectAnnotation,
5759

5860

5961
def classification_confusion_matrix(ground_truths: List[ClassificationAnnotation],
60-
predictions: List[ClassificationAnnotation]) -> ScalarMetricValue:
62+
predictions: List[ClassificationAnnotation]) -> ConfusionMatrixMetricValue:
6163
"""
6264
Computes iou score for all features with the same feature schema id.
6365
@@ -68,8 +70,11 @@ def classification_confusion_matrix(ground_truths: List[ClassificationAnnotation
6870
float representing the iou score for the classification
6971
"""
7072

71-
if len(predictions) != len(ground_truths) != 1:
72-
return 0.
73+
if _no_matching_annotations(ground_truths, predictions):
74+
return [0,int(len(predictions) > 0), 0, int(len(ground_truths) > 0)]
75+
elif _no_annotations(ground_truths, predictions) or len(predictions) > 1 or len(ground_truths) > 1:
76+
# Note that we could return [0,0,0,0] but that will bloat the imports for no reason
77+
return None
7378

7479
prediction, ground_truth = predictions[0], ground_truths[0]
7580

@@ -87,32 +92,47 @@ def classification_confusion_matrix(ground_truths: List[ClassificationAnnotation
8792
else:
8893
raise ValueError(f"Unsupported subclass. {prediction}.")
8994

95+
96+
9097
def vector_confusion_matrix(ground_truths: List[ObjectAnnotation],
9198
predictions: List[ObjectAnnotation],
99+
iou,
92100
include_subclasses: bool,
93101
buffer=70.) -> Optional[ConfusionMatrixMetricValue]:
94102
if _no_matching_annotations(ground_truths, predictions):
95-
return 0.
103+
return [0,int(len(predictions) > 0), 0, int(len(ground_truths) > 0)]
96104
elif _no_annotations(ground_truths, predictions):
97105
return None
98106

99107
pairs = _get_vector_pairs(ground_truths, predictions, buffer=buffer)
100-
pairs.sort(key=lambda triplet: triplet[2], reverse=True)
108+
return object_pair_confusion_matrix(pairs, iou, include_subclasses)
101109

102-
prediction_ids = {id(pred) for pred in predictions}
103-
ground_truth_ids = {id(gt) for gt in ground_truths}
110+
111+
def object_pair_confusion_matrix(pairs : List[Tuple[ObjectAnnotation, ObjectAnnotation, ScalarMetricValue]], iou, include_subclasses) -> ConfusionMatrixMetricValue:
112+
pairs.sort(key=lambda triplet: triplet[2], reverse=True)
113+
prediction_ids = set()
114+
ground_truth_ids = set()
104115
matched_predictions = set()
105116
matched_ground_truths = set()
106117

107118
for prediction, ground_truth, agreement in pairs:
108-
if id(prediction) not in matched_predictions and id(
109-
ground_truth) not in matched_ground_truths:
110-
matched_predictions.add(id(prediction))
111-
matched_ground_truths.add(id(ground_truth))
112-
119+
prediction_id = id(prediction)
120+
ground_truth_id = id(ground_truth)
121+
prediction_ids.add(prediction_id)
122+
ground_truth_ids.add(ground_truth_id)
123+
124+
if agreement > iou and \
125+
prediction_id not in matched_predictions and \
126+
ground_truth_id not in matched_ground_truths:
127+
if include_subclasses and (ground_truth.classifications or prediction.classifications):
128+
if miou(prediction.classifications, ground_truth.classifications) < 1.:
129+
# Incorrect if the subclasses don't 100% agree
130+
continue
131+
matched_predictions.add(prediction_id)
132+
matched_ground_truths.add(ground_truth_id)
113133
tps = len(matched_ground_truths)
114134
fps = len(prediction_ids.difference(matched_predictions))
115-
fns = len(ground_truth_ids.difference(matched_predictions))
135+
fns = len(ground_truth_ids.difference(matched_ground_truths))
116136
# Not defined for object detection.
117137
tns = 0
118138
return [tps, fps, tns, fns]
@@ -139,6 +159,21 @@ def _get_vector_pairs(
139159
pairs.append((prediction, ground_truth, score))
140160
return pairs
141161

162+
def _get_mask_pairs(
163+
ground_truths: List[ObjectAnnotation],
164+
predictions: List[ObjectAnnotation]
165+
) -> List[Tuple[ObjectAnnotation, ObjectAnnotation, ScalarMetricValue]]:
166+
"""
167+
# Get iou score for all pairs of ground truths and predictions
168+
"""
169+
pairs = []
170+
for prediction, ground_truth in product(predictions, ground_truths):
171+
if isinstance(prediction.value, Mask) and isinstance(
172+
ground_truth.value, Mask):
173+
score = _mask_iou(prediction.value.draw(color = 1),
174+
ground_truth.value.draw(color = 1))
175+
pairs.append((prediction, ground_truth, score))
176+
return pairs
142177

143178
def _polygon_iou(poly1: Polygon, poly2: Polygon) -> ScalarMetricValue:
144179
"""Computes iou between two shapely polygons."""
@@ -147,7 +182,6 @@ def _polygon_iou(poly1: Polygon, poly2: Polygon) -> ScalarMetricValue:
147182
return 0.
148183

149184

150-
151185
def radio_confusion_matrix(ground_truth: Radio, prediction: Radio) -> ScalarMetricValue:
152186
"""
153187
Calculates confusion between ground truth and predicted radio values
@@ -179,10 +213,8 @@ def checklist_confusion_matrix(ground_truth: Checklist, prediction: Checklist) -
179213
len(schema_ids_label | schema_ids_pred))
180214

181215

182-
183-
184216
def mask_confusion_matrix(ground_truths: List[ObjectAnnotation],
185-
predictions: List[ObjectAnnotation]) -> Optional[ScalarMetricValue]:
217+
predictions: List[ObjectAnnotation], iou, include_subclasses: bool) -> Optional[ScalarMetricValue]:
186218
"""
187219
Computes iou score for all features with the same feature schema id.
188220
Calculation includes subclassifications.
@@ -194,10 +226,18 @@ def mask_confusion_matrix(ground_truths: List[ObjectAnnotation],
194226
float representing the iou score for the masks
195227
"""
196228
if _no_matching_annotations(ground_truths, predictions):
197-
return 0.
229+
return [0,int(len(predictions) > 0), 0, int(len(ground_truths) > 0)]
198230
elif _no_annotations(ground_truths, predictions):
199231
return None
200232

233+
if include_subclasses:
234+
# This results in a faily drastically different value.
235+
# If we have subclasses set to True, then this is object detection with masks
236+
# Otherwise this will flatten the masks.
237+
# TODO: Make this more apprent in the configuration.
238+
pairs = _get_mask_pairs(ground_truths, predictions)
239+
return object_pair_confusion_matrix(pairs, iou, include_subclasses=include_subclasses)
240+
201241
prediction_np = np.max([pred.value.draw(color=1) for pred in predictions],
202242
axis=0)
203243
ground_truth_np = np.max(

labelbox/data/metrics/confusion_matrix/confusion_matrix.py

Lines changed: 13 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -1,22 +1,17 @@
11
# type: ignore
2+
from collections import defaultdict
3+
from labelbox.data.annotation_types import feature
24
from labelbox.data.annotation_types.metrics import ConfusionMatrixMetric
35
from typing import List, Optional, Union
46
from ...annotation_types import (Label, ObjectAnnotation,
57
ClassificationAnnotation)
68

79
from ..group import get_feature_pairs
8-
from .calculation import feature_miou
9-
from .calculation import miou
10+
from .calculation import confusion_matrix
11+
from .calculation import feature_confusion_matrix
1012
import numpy as np
1113

1214

13-
# You can include subclasses for each of these.
14-
# However, subclasses are only considered matching if there is 100% agreement
15-
# This is most applicable for Radio.
16-
17-
# TODO: Do the top level grouping by all subclasses and support a feature level option..
18-
19-
2015
def confusion_matrix_metric(ground_truths: List[Union[ObjectAnnotation,
2116
ClassificationAnnotation]],
2217
predictions: List[Union[ObjectAnnotation,
@@ -38,18 +33,21 @@ def confusion_matrix_metric(ground_truths: List[Union[ObjectAnnotation,
3833
if not (0. < iou < 1.):
3934
raise ValueError("iou must be between 0 and 1")
4035

41-
iou = miou(ground_truths, predictions, include_subclasses)
36+
value = confusion_matrix(ground_truths, predictions, iou, include_subclasses)
4237
# If both gt and preds are empty there is no metric
43-
if iou is None:
38+
if value is None:
4439
return []
4540

46-
return [ConfusionMatrixMetric(metric_name="confusion_matrix_{iou}pct_iou", value=iou)]
41+
return [ConfusionMatrixMetric(metric_name=f"confusion_matrix_{int(iou*100)}pct_iou", value=value)]
42+
43+
4744

4845

4946
def feature_confusion_matrix_metric(ground_truths: List[Union[ObjectAnnotation,
5047
ClassificationAnnotation]],
5148
predictions: List[Union[ObjectAnnotation,
5249
ClassificationAnnotation]],
50+
iou: float = 0.5,
5351
include_subclasses=True) -> List[ConfusionMatrixMetric]:
5452
"""
5553
Computes the miou for each type of class in the list of annotations.
@@ -67,24 +65,16 @@ def feature_confusion_matrix_metric(ground_truths: List[Union[ObjectAnnotation,
6765
annotation_pairs = get_feature_pairs(predictions, ground_truths)
6866
metrics = []
6967
for key in annotation_pairs:
70-
71-
value = feature_miou(annotation_pairs[key][0], annotation_pairs[key][1],
72-
include_subclasses)
68+
value = feature_confusion_matrix(annotation_pairs[key][0], annotation_pairs[key][1],
69+
iou, include_subclasses)
7370
if value is None:
7471
continue
7572
metrics.append(
76-
ConfusionMatrixMetric(metric_name="iou", feature_name=key, value=value))
73+
ConfusionMatrixMetric(metric_name=f"confusion_matrix_{int(iou*100)}pct_iou", feature_name=key, value=value))
7774
return metrics
7875

7976

8077

81-
def iou_by_tool():
82-
#... We want to group by tool type.
83-
#... Otherwise the weighted aggregates could be overpowered.
84-
#... Since images might be huge, instances will have a few, and classifications will have the fewest.
85-
86-
87-
8878

8979

9080

labelbox/data/metrics/iou/calculation.py

Lines changed: 25 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -89,8 +89,10 @@ def vector_miou(ground_truths: List[ObjectAnnotation],
8989
return 0.
9090
elif _no_annotations(ground_truths, predictions):
9191
return None
92-
9392
pairs = _get_vector_pairs(ground_truths, predictions, buffer=buffer)
93+
return object_pair_miou(pairs, include_subclasses)
94+
95+
def object_pair_miou(pairs : List[Tuple[ObjectAnnotation, ObjectAnnotation, ScalarMetricValue]], include_subclasses) -> ScalarMetricValue:
9496
pairs.sort(key=lambda triplet: triplet[2], reverse=True)
9597
solution_agreements = []
9698
solution_features = set()
@@ -134,8 +136,12 @@ def mask_miou(ground_truths: List[ObjectAnnotation],
134136
elif _no_annotations(ground_truths, predictions):
135137
return None
136138

139+
if include_subclasses:
140+
pairs = _get_mask_pairs(ground_truths, predictions)
141+
return object_pair_miou(pairs, include_subclasses=include_subclasses)
142+
137143
prediction_np = np.max([pred.value.draw(color=1) for pred in predictions],
138-
axis=0)
144+
axis=0)
139145
ground_truth_np = np.max(
140146
[ground_truth.value.draw(color=1) for ground_truth in ground_truths],
141147
axis=0)
@@ -144,24 +150,8 @@ def mask_miou(ground_truths: List[ObjectAnnotation],
144150
"Prediction and mask must have the same shape."
145151
f" Found {prediction_np.shape}/{ground_truth_np.shape}.")
146152

147-
agreement = _mask_iou(ground_truth_np, prediction_np)
148-
if not include_subclasses:
149-
return agreement
150-
151-
prediction_classifications = []
152-
for prediction in predictions:
153-
prediction_classifications.extend(prediction.classifications)
154-
ground_truth_classifications = []
155-
for ground_truth in ground_truths:
156-
ground_truth_classifications.extend(ground_truth.classifications)
153+
return _mask_iou(ground_truth_np, prediction_np)
157154

158-
classification_iou = miou(ground_truth_classifications,
159-
prediction_classifications,
160-
include_subclasses=False)
161-
162-
classification_iou = classification_iou if classification_iou is not None else agreement
163-
164-
return (agreement + classification_iou) / 2.
165155

166156

167157
def classification_miou(ground_truths: List[ClassificationAnnotation],
@@ -247,6 +237,22 @@ def _get_vector_pairs(
247237
pairs.append((prediction, ground_truth, score))
248238
return pairs
249239

240+
def _get_mask_pairs(
241+
ground_truths: List[ObjectAnnotation],
242+
predictions: List[ObjectAnnotation]
243+
) -> List[Tuple[ObjectAnnotation, ObjectAnnotation, ScalarMetricValue]]:
244+
"""
245+
# Get iou score for all pairs of ground truths and predictions
246+
"""
247+
pairs = []
248+
for prediction, ground_truth in product(predictions, ground_truths):
249+
if isinstance(prediction.value, Mask) and isinstance(
250+
ground_truth.value, Mask):
251+
score = _mask_iou(prediction.value.draw(color = 1),
252+
ground_truth.value.draw(color = 1))
253+
pairs.append((prediction, ground_truth, score))
254+
return pairs
255+
250256

251257
def _polygon_iou(poly1: Polygon, poly2: Polygon) -> ScalarMetricValue:
252258
"""Computes iou between two shapely polygons."""

0 commit comments

Comments
 (0)