Skip to content

Commit e86770d

Browse files
author
Matt Sokoloff
committed
wip
1 parent 141a050 commit e86770d

File tree

1 file changed

+49
-33
lines changed

1 file changed

+49
-33
lines changed

labelbox/data/metrics/iou.py

Lines changed: 49 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
from itertools import product
66
import numpy as np
77
from collections import defaultdict
8-
8+
from ..annotation_types.feature import FeatureSchema
99
from ..annotation_types import (Label, ObjectAnnotation,
1010
ClassificationAnnotation, Mask, Geometry, Point,
1111
Line, Checklist, Text, Radio)
@@ -29,6 +29,7 @@ def subclass_ious(ground_truth: Label, prediction: Label) -> Dict[str, Optional[
2929
Otherwise this function is a bit naive and if you want something to specifically suite
3030
your use case then create a new function based off this one.
3131
"""
32+
identifying = get_identifying_key(prediction.annotations, ground_truth.annotations)
3233
prediction_annotations = _create_feature_lookup(prediction.annotations)
3334
ground_truth_annotations = _create_feature_lookup(ground_truth.annotations)
3435
feature_schemas = set(prediction_annotations.keys()).union(
@@ -48,15 +49,14 @@ def _create_classification_feature_lookup(annotations: Union[List[ObjectAnnotati
4849
classifications = [classification.answer.name or classification.answer.feature_schema_id for classification in classifications ]
4950
# TODO: create the lookup
5051
grouped_annotations[annotation.name or annotation.feature_schema_id].append(annotation)
51-
5252
return grouped_annotations
5353

5454

55-
ious = []
56-
for key in feature_schemas:
55+
#ious = []
56+
#for key in feature_schemas:
5757
# We shouldn't have any nones. Since the keys are generated by the presence of the object.
58-
prediction_annotations = prediction_annotations[key]
59-
ground_truth_annotations =
58+
#prediction_annotations = prediction_annotations[key]
59+
# #ground_truth_annotations =
6060

6161

6262

@@ -314,34 +314,7 @@ def _create_feature_lookup(
314314
315315
"""
316316
# TODO: Add a check here.
317-
"""
318-
319-
We don't want to select name for one and then feature_schema_id for the other.
320-
I think in another function we should check
321-
322-
Do we want to require that the user provides the feature name?
323-
We don't really want schema ids showing up in the metric names..
324-
325-
So:
326-
327-
Also add a freakin test.
328-
####
329-
all_schema_ids_defined_pred, all_names_defined_pred = check_references(pred_annotations)
330-
if (not all_schema_ids_defined and not all_names_defined_pred):
331-
raise ValueError("All data must have feature_schema_ids or names set")
332-
333317

334-
all_schema_ids_defined_gt, all_names_defined_gt = check_references(gt_annotations)
335-
336-
#Prefer name becuse the user will be able to know what it means
337-
#Schema id incase that doesn't exist..
338-
if (all_names_defined_pred and all_names_defined_gt):
339-
return 'name'
340-
elif all_schema_ids_defined_pred and all_schema_ids_defined_gt:
341-
return 'feature_schema_id'
342-
else:
343-
raise ValueError("Ground truth and prediction annotations must have set all name or feature ids. Otherwise there is no key to match on. Please update.")
344-
"""
345318
grouped_annotations = defaultdict(list)
346319
for annotation in annotations:
347320
grouped_annotations[annotation.name or
@@ -381,3 +354,46 @@ def _polygon_iou(poly1: Polygon, poly2: Polygon) -> float:
381354
def _mask_iou(mask1: np.ndarray, mask2: np.ndarray) -> float:
382355
"""Computes iou between two binary segmentation masks."""
383356
return np.sum(mask1 & mask2) / np.sum(mask1 | mask2)
357+
358+
359+
def all_have_key(annotations: List[FeatureSchema]) -> Tuple[bool, bool]:
360+
"""
361+
We want to make sure that all feature schemas have names set or feature_schema_ids set.
362+
363+
"""
364+
all_names = True
365+
all_schemas = True
366+
for annotation in annotations:
367+
if annotation.name is None:
368+
all_names = False
369+
if annotation.feature_schema_id is None:
370+
all_schemas = False
371+
return all_schemas, all_names
372+
373+
def get_identifying_key(pred_annotations, gt_annotations):
374+
"""
375+
We don't want to select name for one and then feature_schema_id for the other.
376+
I think in another function we should check
377+
378+
Do we want to require that the user provides the feature name?
379+
We don't really want schema ids showing up in the metric names..
380+
381+
So:
382+
"""
383+
#TODO: Also add a freakin test.
384+
all_schema_ids_defined_pred, all_names_defined_pred = all_have_key(pred_annotations)
385+
if (not all_schema_ids_defined_pred and not all_names_defined_pred):
386+
raise ValueError("All data must have feature_schema_ids or names set")
387+
388+
389+
all_schema_ids_defined_gt, all_names_defined_gt = all_have_key(gt_annotations)
390+
391+
#Prefer name becuse the user will be able to know what it means
392+
#Schema id incase that doesn't exist..
393+
if (all_names_defined_pred and all_names_defined_gt):
394+
return 'name'
395+
elif all_schema_ids_defined_pred and all_schema_ids_defined_gt:
396+
return 'feature_schema_id'
397+
else:
398+
raise ValueError("Ground truth and prediction annotations must have set all name or feature ids. Otherwise there is no key to match on. Please update.")
399+

0 commit comments

Comments
 (0)