11
11
Line , Checklist , Text , Radio )
12
12
13
13
from .utils import get_feature_pairs
14
-
15
-
16
14
"""
17
15
Instead of these functions accepting labels they should accept annotations..
18
16
Then we can add a helper for applying functions across pred and label combinations..
28
26
Nike - Somehow getting issue with empty masks. idk wtf
29
27
"""
30
28
29
+
31
30
# TODO: What should we call this?
32
31
# We should be returning these objects..
33
- def data_row_miou_v2 (ground_truth : Label , prediction : Label , include_subclasses = True ) -> List [ScalarMetric ]:
32
+ def data_row_miou_v2 (ground_truth : Label ,
33
+ prediction : Label ,
34
+ include_subclasses = True ) -> List [ScalarMetric ]:
34
35
feature_ious = data_row_miou (ground_truth .annotations ,
35
- prediction .annotations , include_subclasses )
36
- return [ScalarMetric (metric_name = "iou" , value = feature_ious )]
36
+ prediction .annotations , include_subclasses )
37
+ return [ScalarMetric (metric_name = "iou" , value = feature_ious )]
38
+
37
39
38
- def features_miou (
39
- ground_truths : List [Union [ObjectAnnotation , ClassificationAnnotation ]],
40
- predictions : List [Union [ObjectAnnotation , ClassificationAnnotation ]], include_subclasses = True ) -> List [ScalarMetric ]:
40
+ def features_miou (ground_truths : List [Union [ObjectAnnotation ,
41
+ ClassificationAnnotation ]],
42
+ predictions : List [Union [ObjectAnnotation ,
43
+ ClassificationAnnotation ]],
44
+ include_subclasses = True ) -> List [ScalarMetric ]:
41
45
"""
42
46
Groups annotations by feature_schema_id or name (which is available), calculates iou score and returns the mean across all features.
43
47
@@ -50,15 +54,17 @@ def features_miou(
50
54
"""
51
55
# Classifications are supported because we just take a naive approach to them..
52
56
annotation_pairs = get_feature_pairs (predictions , ground_truths )
53
- return [
54
- ScalarMetric (
55
- metric_name = "iou" ,
56
- value = feature_miou ( annotation_pair [ 0 ], annotation_pair [ 1 ], include_subclasses )
57
- ) for annotation_pair in annotation_pairs
57
+ return [
58
+ ScalarMetric (metric_name = "iou" ,
59
+ value = feature_miou ( annotation_pair [ 0 ], annotation_pair [ 1 ] ,
60
+ include_subclasses ) )
61
+ for annotation_pair in annotation_pairs
58
62
]
59
63
60
64
61
- def data_row_miou (ground_truth : Label , prediction : Label , include_subclasses = True ) -> Optional [float ]:
65
+ def data_row_miou (ground_truth : Label ,
66
+ prediction : Label ,
67
+ include_subclasses = True ) -> Optional [float ]:
62
68
"""
63
69
Calculate iou for two labels corresponding to the same data row.
64
70
@@ -70,21 +76,21 @@ def data_row_miou(ground_truth: Label, prediction: Label, include_subclasses = T
70
76
Returns None if there are no annotations in ground_truth or prediction Labels
71
77
"""
72
78
feature_ious = features_miou (ground_truth .annotations ,
73
- prediction .annotations , include_subclasses )
74
- return average_ious ({feature .metric_name : feature .value for feature in feature_ious })
79
+ prediction .annotations , include_subclasses )
80
+ return average_ious (
81
+ {feature .metric_name : feature .value for feature in feature_ious })
75
82
76
83
77
- def average_ious (feature_ious : Dict [str , Optional [float ]]) -> Optional [float ]:
84
+ def average_ious (feature_ious : Dict [str , Optional [float ]]) -> Optional [float ]:
78
85
ious = [iou for iou in feature_ious .values () if iou is not None ]
79
86
return None if not len (ious ) else np .mean (ious )
80
87
81
88
82
-
83
- def feature_miou (
84
- ground_truths : List [Union [ObjectAnnotation , ClassificationAnnotation ]],
85
- predictions : List [Union [ObjectAnnotation , ClassificationAnnotation ]],
86
- include_subclasses : bool
87
- ) -> Optional [float ]:
89
+ def feature_miou (ground_truths : List [Union [ObjectAnnotation ,
90
+ ClassificationAnnotation ]],
91
+ predictions : List [Union [ObjectAnnotation ,
92
+ ClassificationAnnotation ]],
93
+ include_subclasses : bool ) -> Optional [float ]:
88
94
"""
89
95
Computes iou score for all features with the same feature schema id.
90
96
@@ -100,10 +106,12 @@ def feature_miou(
100
106
elif not len (ground_truths ) and len (predictions ):
101
107
# No ground truth annotations but there are predictions means no matches
102
108
return 0.
103
- elif not len (ground_truths ) and not len (predictions ): #TODO: This shouldn't run at all for subclasses. Otherwise it should return 1.
109
+ elif not len (ground_truths ) and not len (
110
+ predictions
111
+ ): #TODO: This shouldn't run at all for subclasses. Otherwise it should return 1.
104
112
# Ignore examples that do not have any annotations or predictions
105
113
# This could maybe be counted as correct but could also skew the stats..
106
- return # Undefined (neither wrong nor right. )
114
+ return # Undefined (neither wrong nor right. )
107
115
elif isinstance (predictions [0 ].value , Mask ):
108
116
return mask_miou (ground_truths , predictions , include_subclasses )
109
117
elif isinstance (predictions [0 ].value , Geometry ):
@@ -117,7 +125,8 @@ def feature_miou(
117
125
118
126
def vector_miou (ground_truths : List [ObjectAnnotation ],
119
127
predictions : List [ObjectAnnotation ],
120
- buffer = 70. , include_subclasses = True ) -> float :
128
+ buffer = 70. ,
129
+ include_subclasses = True ) -> float :
121
130
"""
122
131
Computes iou score for all features with the same feature schema id.
123
132
Calculation includes subclassifications.
@@ -139,10 +148,12 @@ def vector_miou(ground_truths: List[ObjectAnnotation],
139
148
ground_truth ) not in solution_features :
140
149
solution_features .update ({id (prediction ), id (ground_truth )})
141
150
if include_subclasses :
142
- classification_iou = average_ious (get_iou_across_features (
143
- prediction .classifications , ground_truth .classifications ))
151
+ classification_iou = average_ious (
152
+ get_iou_across_features (prediction .classifications ,
153
+ ground_truth .classifications ))
144
154
classification_iou = classification_iou if classification_iou is not None else agreement
145
- solution_agreements .append ((agreement + classification_iou ) / 2. )
155
+ solution_agreements .append (
156
+ (agreement + classification_iou ) / 2. )
146
157
else :
147
158
solution_agreements .append (agreement )
148
159
@@ -153,7 +164,8 @@ def vector_miou(ground_truths: List[ObjectAnnotation],
153
164
154
165
155
166
def mask_miou (ground_truths : List [ObjectAnnotation ],
156
- predictions : List [ObjectAnnotation ], include_subclasses = True ) -> float :
167
+ predictions : List [ObjectAnnotation ],
168
+ include_subclasses = True ) -> float :
157
169
"""
158
170
Computes iou score for all features with the same feature schema id.
159
171
Calculation includes subclassifications.
@@ -283,6 +295,3 @@ def _polygon_iou(poly1: Polygon, poly2: Polygon) -> float:
283
295
def _mask_iou (mask1 : np .ndarray , mask2 : np .ndarray ) -> float :
284
296
"""Computes iou between two binary segmentation masks."""
285
297
return np .sum (mask1 & mask2 ) / np .sum (mask1 | mask2 )
286
-
287
-
288
-
0 commit comments