Skip to content

Commit 456f2fe

Browse files
author
Matt Sokoloff
committed
format
1 parent e771541 commit 456f2fe

File tree

7 files changed

+66
-59
lines changed

7 files changed

+66
-59
lines changed

labelbox/data/annotation_types/metrics/scalar.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,5 +16,3 @@ class ScalarMetric(BaseModel):
1616
subclass_name: Optional[str] = None
1717
aggregation: MetricAggregation = MetricAggregation.ARITHMETIC_MEAN
1818
extra: Dict[str, Any] = {}
19-
20-

labelbox/data/metrics/iou.py

Lines changed: 42 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,6 @@
1111
Line, Checklist, Text, Radio)
1212

1313
from .utils import get_feature_pairs
14-
15-
1614
"""
1715
Instead of these functions accepting labels they should accept annotations..
1816
Then we can add a helper for applying functions across pred and label combinations..
@@ -28,16 +26,22 @@
2826
Nike - Somehow getting issue with empty masks. idk wtf
2927
"""
3028

29+
3130
# TODO: What should we call this?
3231
# We should be returning these objects..
33-
def data_row_miou_v2(ground_truth: Label, prediction: Label, include_subclasses = True) -> List[ScalarMetric]:
32+
def data_row_miou_v2(ground_truth: Label,
33+
prediction: Label,
34+
include_subclasses=True) -> List[ScalarMetric]:
3435
feature_ious = data_row_miou(ground_truth.annotations,
35-
prediction.annotations, include_subclasses)
36-
return [ScalarMetric(metric_name = "iou", value = feature_ious)]
36+
prediction.annotations, include_subclasses)
37+
return [ScalarMetric(metric_name="iou", value=feature_ious)]
38+
3739

38-
def features_miou(
39-
ground_truths : List[Union[ObjectAnnotation, ClassificationAnnotation]],
40-
predictions: List[Union[ObjectAnnotation, ClassificationAnnotation]], include_subclasses = True) -> List[ScalarMetric]:
40+
def features_miou(ground_truths: List[Union[ObjectAnnotation,
41+
ClassificationAnnotation]],
42+
predictions: List[Union[ObjectAnnotation,
43+
ClassificationAnnotation]],
44+
include_subclasses=True) -> List[ScalarMetric]:
4145
"""
4246
Groups annotations by feature_schema_id or name (which is available), calculates iou score and returns the mean across all features.
4347
@@ -50,15 +54,17 @@ def features_miou(
5054
"""
5155
# Classifications are supported because we just take a naive approach to them..
5256
annotation_pairs = get_feature_pairs(predictions, ground_truths)
53-
return [
54-
ScalarMetric(
55-
metric_name = "iou",
56-
value = feature_miou(annotation_pair[0], annotation_pair[1], include_subclasses)
57-
) for annotation_pair in annotation_pairs
57+
return [
58+
ScalarMetric(metric_name="iou",
59+
value=feature_miou(annotation_pair[0], annotation_pair[1],
60+
include_subclasses))
61+
for annotation_pair in annotation_pairs
5862
]
5963

6064

61-
def data_row_miou(ground_truth: Label, prediction: Label, include_subclasses = True) -> Optional[float]:
65+
def data_row_miou(ground_truth: Label,
66+
prediction: Label,
67+
include_subclasses=True) -> Optional[float]:
6268
"""
6369
Calculate iou for two labels corresponding to the same data row.
6470
@@ -70,21 +76,21 @@ def data_row_miou(ground_truth: Label, prediction: Label, include_subclasses = T
7076
Returns None if there are no annotations in ground_truth or prediction Labels
7177
"""
7278
feature_ious = features_miou(ground_truth.annotations,
73-
prediction.annotations, include_subclasses)
74-
return average_ious({feature.metric_name: feature.value for feature in feature_ious})
79+
prediction.annotations, include_subclasses)
80+
return average_ious(
81+
{feature.metric_name: feature.value for feature in feature_ious})
7582

7683

77-
def average_ious(feature_ious : Dict[str, Optional[float]]) -> Optional[float]:
84+
def average_ious(feature_ious: Dict[str, Optional[float]]) -> Optional[float]:
7885
ious = [iou for iou in feature_ious.values() if iou is not None]
7986
return None if not len(ious) else np.mean(ious)
8087

8188

82-
83-
def feature_miou(
84-
ground_truths: List[Union[ObjectAnnotation, ClassificationAnnotation]],
85-
predictions: List[Union[ObjectAnnotation, ClassificationAnnotation]],
86-
include_subclasses: bool
87-
) -> Optional[float]:
89+
def feature_miou(ground_truths: List[Union[ObjectAnnotation,
90+
ClassificationAnnotation]],
91+
predictions: List[Union[ObjectAnnotation,
92+
ClassificationAnnotation]],
93+
include_subclasses: bool) -> Optional[float]:
8894
"""
8995
Computes iou score for all features with the same feature schema id.
9096
@@ -100,10 +106,12 @@ def feature_miou(
100106
elif not len(ground_truths) and len(predictions):
101107
# No ground truth annotations but there are predictions means no matches
102108
return 0.
103-
elif not len(ground_truths) and not len(predictions): #TODO: This shouldn't run at all for subclasses. Otherwise it should return 1.
109+
elif not len(ground_truths) and not len(
110+
predictions
111+
): #TODO: This shouldn't run at all for subclasses. Otherwise it should return 1.
104112
# Ignore examples that do not have any annotations or predictions
105113
# This could maybe be counted as correct but could also skew the stats..
106-
return # Undefined (neither wrong nor right. )
114+
return # Undefined (neither wrong nor right. )
107115
elif isinstance(predictions[0].value, Mask):
108116
return mask_miou(ground_truths, predictions, include_subclasses)
109117
elif isinstance(predictions[0].value, Geometry):
@@ -117,7 +125,8 @@ def feature_miou(
117125

118126
def vector_miou(ground_truths: List[ObjectAnnotation],
119127
predictions: List[ObjectAnnotation],
120-
buffer=70., include_subclasses = True) -> float:
128+
buffer=70.,
129+
include_subclasses=True) -> float:
121130
"""
122131
Computes iou score for all features with the same feature schema id.
123132
Calculation includes subclassifications.
@@ -139,10 +148,12 @@ def vector_miou(ground_truths: List[ObjectAnnotation],
139148
ground_truth) not in solution_features:
140149
solution_features.update({id(prediction), id(ground_truth)})
141150
if include_subclasses:
142-
classification_iou = average_ious(get_iou_across_features(
143-
prediction.classifications, ground_truth.classifications))
151+
classification_iou = average_ious(
152+
get_iou_across_features(prediction.classifications,
153+
ground_truth.classifications))
144154
classification_iou = classification_iou if classification_iou is not None else agreement
145-
solution_agreements.append((agreement + classification_iou) / 2.)
155+
solution_agreements.append(
156+
(agreement + classification_iou) / 2.)
146157
else:
147158
solution_agreements.append(agreement)
148159

@@ -153,7 +164,8 @@ def vector_miou(ground_truths: List[ObjectAnnotation],
153164

154165

155166
def mask_miou(ground_truths: List[ObjectAnnotation],
156-
predictions: List[ObjectAnnotation], include_subclasses = True) -> float:
167+
predictions: List[ObjectAnnotation],
168+
include_subclasses=True) -> float:
157169
"""
158170
Computes iou score for all features with the same feature schema id.
159171
Calculation includes subclassifications.
@@ -283,6 +295,3 @@ def _polygon_iou(poly1: Polygon, poly2: Polygon) -> float:
283295
def _mask_iou(mask1: np.ndarray, mask2: np.ndarray) -> float:
284296
"""Computes iou between two binary segmentation masks."""
285297
return np.sum(mask1 & mask2) / np.sum(mask1 | mask2)
286-
287-
288-

labelbox/data/serialization/labelbox_v1/converter.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717

1818

1919
class LBV1Converter:
20+
2021
@staticmethod
2122
def deserialize_video(json_data: Iterable[Dict[str, Any]],
2223
client: "labelbox.Client"):
@@ -44,6 +45,7 @@ def deserialize(json_data: Iterable[Dict[str, Any]]) -> LabelGenerator:
4445
Returns:
4546
LabelGenerator containing the export data.
4647
"""
48+
4749
def label_generator():
4850
for example in json_data:
4951
if 'frames' in example['Label']:

labelbox/data/serialization/ndjson/classification.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,7 @@ def from_common(
120120
ImageData]) -> "NDChecklist":
121121
return cls(answer=[
122122
NDFeature(schema_id=answer.feature_schema_id)
123-
for answer in checklist.answer
123+
for answer in checklist.answers
124124
],
125125
data_row={'id': data.uid},
126126
schema_id=feature_schema_id,

labelbox/data/serialization/ndjson/label.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -108,8 +108,10 @@ def _create_non_video_annotations(cls, label: Label):
108108
for annotation in non_video_annotations:
109109
if isinstance(annotation, ClassificationAnnotation):
110110
if isinstance(annotation.value, Dropdown):
111-
raise ValueError("Dropdowns are not supported by the NDJson format."
112-
" Please filter out Dropdown annotations before converting.")
111+
raise ValueError(
112+
"Dropdowns are not supported by the NDJson format."
113+
" Please filter out Dropdown annotations before converting."
114+
)
113115
yield NDClassification.from_common(annotation, label.data)
114116
elif isinstance(annotation, ObjectAnnotation):
115117
yield NDObject.from_common(annotation, label.data)

labelbox/data/serialization/ndjson/metric.py

Lines changed: 13 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@
66
from labelbox.data.serialization.ndjson.base import NDJsonBase
77

88

9-
109
class NDScalarMetric(NDJsonBase):
1110
metric_name: str
1211
metric_value: float
@@ -15,24 +14,23 @@ class NDScalarMetric(NDJsonBase):
1514
aggregation: MetricAggregation
1615

1716
def to_common(self) -> ScalarMetric:
18-
return ScalarMetric(
19-
value=self.metric_value,
20-
metric_name=self.metric_name,
21-
feature_name=self.feature_name,
22-
subclass_name=self.subclass_name,
23-
aggregation=MetricAggregation[self.aggregation],
24-
extra={'uuid': self.uuid})
17+
return ScalarMetric(value=self.metric_value,
18+
metric_name=self.metric_name,
19+
feature_name=self.feature_name,
20+
subclass_name=self.subclass_name,
21+
aggregation=MetricAggregation[self.aggregation],
22+
extra={'uuid': self.uuid})
2523

2624
@classmethod
2725
def from_common(cls, metric: ScalarMetric,
2826
data: Union[TextData, ImageData]) -> "NDScalarMetric":
2927
return ScalarMetric(uuid=metric.extra.get('uuid'),
30-
metric_value=metric.value,
31-
metric_name=metric.metric_name,
32-
feature_name=metric.feature_name,
33-
subclass_name=metric.subclass_name,
34-
aggregation=metric.aggregation.value,
35-
data_row={'id': data.uid})
28+
metric_value=metric.value,
29+
metric_name=metric.metric_name,
30+
feature_name=metric.feature_name,
31+
subclass_name=metric.subclass_name,
32+
aggregation=metric.aggregation.value,
33+
data_row={'id': data.uid})
3634

3735
def dict(self, *args, **kwargs):
3836
res = super().dict(*args, **kwargs)
@@ -63,13 +61,11 @@ def from_common(cls, annotation: ScalarMetric,
6361
return obj.from_common(annotation, data)
6462

6563
@staticmethod
66-
def lookup_object(
67-
metric: ScalarMetric) -> "NDScalarMetric":
64+
def lookup_object(metric: ScalarMetric) -> "NDScalarMetric":
6865
result = {
6966
ScalarMetric: NDScalarMetric,
7067
}.get(type(metric))
7168
if result is None:
7269
raise TypeError(
7370
f"Unable to convert object to MAL format. `{type(metric)}`")
7471
return result
75-

tests/data/annotation_types/test_metrics.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -47,10 +47,10 @@ def test_custom_scalar_metric(feature_name, subclass_name, aggregation):
4747
value = 0.5
4848
kwargs = {'aggregation': aggregation} if aggregation is not None else {}
4949
metric = ScalarMetric(metric_name="iou",
50-
value=value,
51-
feature_name=feature_name,
52-
subclass_name=subclass_name,
53-
**kwargs)
50+
value=value,
51+
feature_name=feature_name,
52+
subclass_name=subclass_name,
53+
**kwargs)
5454
assert metric.value == value
5555

5656
label = Label(data=ImageData(uid="ckrmd9q8g000009mg6vej7hzg"),

0 commit comments

Comments
 (0)