1
1
2
2
3
3
4
- from pydantic . utils import truncate
4
+ from labelbox . data . metrics . iou . calculation import _mask_iou , miou
5
5
6
6
from labelbox .data .annotation_types .metrics .confusion_matrix import \
7
7
ConfusionMatrixMetricValue
@@ -27,11 +27,12 @@ def confusion_matrix(ground_truths: List[Union[ObjectAnnotation,
27
27
28
28
annotation_pairs = get_feature_pairs (predictions , ground_truths )
29
29
ious = [
30
- feature_confusion_matrix (annotation_pair [0 ], annotation_pair [1 ], include_subclasses )
30
+ feature_confusion_matrix (annotation_pair [0 ], annotation_pair [1 ], iou , include_subclasses )
31
31
for annotation_pair in annotation_pairs .values ()
32
32
]
33
33
ious = [iou for iou in ious if iou is not None ]
34
- return None if not len (ious ) else np .sum (ious , axis = 0 )
34
+
35
+ return None if not len (ious ) else np .sum (ious , axis = 0 ).tolist ()
35
36
36
37
37
38
@@ -42,13 +43,14 @@ def feature_confusion_matrix(ground_truths: List[Union[ObjectAnnotation,
42
43
iou : float ,
43
44
include_subclasses : bool ) -> Optional [ConfusionMatrixMetricValue ]:
44
45
if _no_matching_annotations (ground_truths , predictions ):
45
- return 0.
46
+ return [ 0 , int ( len ( predictions ) > 0 ), 0 , int ( len ( ground_truths ) > 0 )]
46
47
elif _no_annotations (ground_truths , predictions ):
48
+ # Note that we could return [0,0,0,0] but that will bloat the imports for no reason
47
49
return None
48
50
elif isinstance (predictions [0 ].value , Mask ):
49
- return mask_confusion_matrix (ground_truths , predictions , include_subclasses )
51
+ return mask_confusion_matrix (ground_truths , predictions , iou , include_subclasses )
50
52
elif isinstance (predictions [0 ].value , Geometry ):
51
- return vector_confusion_matrix (ground_truths , predictions , include_subclasses )
53
+ return vector_confusion_matrix (ground_truths , predictions , iou , include_subclasses )
52
54
elif isinstance (predictions [0 ], ClassificationAnnotation ):
53
55
return classification_confusion_matrix (ground_truths , predictions )
54
56
else :
@@ -57,7 +59,7 @@ def feature_confusion_matrix(ground_truths: List[Union[ObjectAnnotation,
57
59
58
60
59
61
def classification_confusion_matrix (ground_truths : List [ClassificationAnnotation ],
60
- predictions : List [ClassificationAnnotation ]) -> ScalarMetricValue :
62
+ predictions : List [ClassificationAnnotation ]) -> ConfusionMatrixMetricValue :
61
63
"""
62
64
Computes iou score for all features with the same feature schema id.
63
65
@@ -68,8 +70,11 @@ def classification_confusion_matrix(ground_truths: List[ClassificationAnnotation
68
70
float representing the iou score for the classification
69
71
"""
70
72
71
- if len (predictions ) != len (ground_truths ) != 1 :
72
- return 0.
73
+ if _no_matching_annotations (ground_truths , predictions ):
74
+ return [0 ,int (len (predictions ) > 0 ), 0 , int (len (ground_truths ) > 0 )]
75
+ elif _no_annotations (ground_truths , predictions ) or len (predictions ) > 1 or len (ground_truths ) > 1 :
76
+ # Note that we could return [0,0,0,0] but that will bloat the imports for no reason
77
+ return None
73
78
74
79
prediction , ground_truth = predictions [0 ], ground_truths [0 ]
75
80
@@ -87,32 +92,47 @@ def classification_confusion_matrix(ground_truths: List[ClassificationAnnotation
87
92
else :
88
93
raise ValueError (f"Unsupported subclass. { prediction } ." )
89
94
95
+
96
+
90
97
def vector_confusion_matrix (ground_truths : List [ObjectAnnotation ],
91
98
predictions : List [ObjectAnnotation ],
99
+ iou ,
92
100
include_subclasses : bool ,
93
101
buffer = 70. ) -> Optional [ConfusionMatrixMetricValue ]:
94
102
if _no_matching_annotations (ground_truths , predictions ):
95
- return 0.
103
+ return [ 0 , int ( len ( predictions ) > 0 ), 0 , int ( len ( ground_truths ) > 0 )]
96
104
elif _no_annotations (ground_truths , predictions ):
97
105
return None
98
106
99
107
pairs = _get_vector_pairs (ground_truths , predictions , buffer = buffer )
100
- pairs . sort ( key = lambda triplet : triplet [ 2 ], reverse = True )
108
+ return object_pair_confusion_matrix ( pairs , iou , include_subclasses )
101
109
102
- prediction_ids = {id (pred ) for pred in predictions }
103
- ground_truth_ids = {id (gt ) for gt in ground_truths }
110
+
111
+ def object_pair_confusion_matrix (pairs : List [Tuple [ObjectAnnotation , ObjectAnnotation , ScalarMetricValue ]], iou , include_subclasses ) -> ConfusionMatrixMetricValue :
112
+ pairs .sort (key = lambda triplet : triplet [2 ], reverse = True )
113
+ prediction_ids = set ()
114
+ ground_truth_ids = set ()
104
115
matched_predictions = set ()
105
116
matched_ground_truths = set ()
106
117
107
118
for prediction , ground_truth , agreement in pairs :
108
- if id (prediction ) not in matched_predictions and id (
109
- ground_truth ) not in matched_ground_truths :
110
- matched_predictions .add (id (prediction ))
111
- matched_ground_truths .add (id (ground_truth ))
112
-
119
+ prediction_id = id (prediction )
120
+ ground_truth_id = id (ground_truth )
121
+ prediction_ids .add (prediction_id )
122
+ ground_truth_ids .add (ground_truth_id )
123
+
124
+ if agreement > iou and \
125
+ prediction_id not in matched_predictions and \
126
+ ground_truth_id not in matched_ground_truths :
127
+ if include_subclasses and (ground_truth .classifications or prediction .classifications ):
128
+ if miou (prediction .classifications , ground_truth .classifications ) < 1. :
129
+ # Incorrect if the subclasses don't 100% agree
130
+ continue
131
+ matched_predictions .add (prediction_id )
132
+ matched_ground_truths .add (ground_truth_id )
113
133
tps = len (matched_ground_truths )
114
134
fps = len (prediction_ids .difference (matched_predictions ))
115
- fns = len (ground_truth_ids .difference (matched_predictions ))
135
+ fns = len (ground_truth_ids .difference (matched_ground_truths ))
116
136
# Not defined for object detection.
117
137
tns = 0
118
138
return [tps , fps , tns , fns ]
@@ -139,6 +159,21 @@ def _get_vector_pairs(
139
159
pairs .append ((prediction , ground_truth , score ))
140
160
return pairs
141
161
162
+ def _get_mask_pairs (
163
+ ground_truths : List [ObjectAnnotation ],
164
+ predictions : List [ObjectAnnotation ]
165
+ ) -> List [Tuple [ObjectAnnotation , ObjectAnnotation , ScalarMetricValue ]]:
166
+ """
167
+ # Get iou score for all pairs of ground truths and predictions
168
+ """
169
+ pairs = []
170
+ for prediction , ground_truth in product (predictions , ground_truths ):
171
+ if isinstance (prediction .value , Mask ) and isinstance (
172
+ ground_truth .value , Mask ):
173
+ score = _mask_iou (prediction .value .draw (color = 1 ),
174
+ ground_truth .value .draw (color = 1 ))
175
+ pairs .append ((prediction , ground_truth , score ))
176
+ return pairs
142
177
143
178
def _polygon_iou (poly1 : Polygon , poly2 : Polygon ) -> ScalarMetricValue :
144
179
"""Computes iou between two shapely polygons."""
@@ -147,7 +182,6 @@ def _polygon_iou(poly1: Polygon, poly2: Polygon) -> ScalarMetricValue:
147
182
return 0.
148
183
149
184
150
-
151
185
def radio_confusion_matrix (ground_truth : Radio , prediction : Radio ) -> ScalarMetricValue :
152
186
"""
153
187
Calculates confusion between ground truth and predicted radio values
@@ -179,10 +213,8 @@ def checklist_confusion_matrix(ground_truth: Checklist, prediction: Checklist) -
179
213
len (schema_ids_label | schema_ids_pred ))
180
214
181
215
182
-
183
-
184
216
def mask_confusion_matrix (ground_truths : List [ObjectAnnotation ],
185
- predictions : List [ObjectAnnotation ]) -> Optional [ScalarMetricValue ]:
217
+ predictions : List [ObjectAnnotation ], iou , include_subclasses : bool ) -> Optional [ScalarMetricValue ]:
186
218
"""
187
219
Computes iou score for all features with the same feature schema id.
188
220
Calculation includes subclassifications.
@@ -194,10 +226,18 @@ def mask_confusion_matrix(ground_truths: List[ObjectAnnotation],
194
226
float representing the iou score for the masks
195
227
"""
196
228
if _no_matching_annotations (ground_truths , predictions ):
197
- return 0.
229
+ return [ 0 , int ( len ( predictions ) > 0 ), 0 , int ( len ( ground_truths ) > 0 )]
198
230
elif _no_annotations (ground_truths , predictions ):
199
231
return None
200
232
233
+ if include_subclasses :
234
+ # This results in a faily drastically different value.
235
+ # If we have subclasses set to True, then this is object detection with masks
236
+ # Otherwise this will flatten the masks.
237
+ # TODO: Make this more apprent in the configuration.
238
+ pairs = _get_mask_pairs (ground_truths , predictions )
239
+ return object_pair_confusion_matrix (pairs , iou , include_subclasses = include_subclasses )
240
+
201
241
prediction_np = np .max ([pred .value .draw (color = 1 ) for pred in predictions ],
202
242
axis = 0 )
203
243
ground_truth_np = np .max (
0 commit comments