5
5
from itertools import product
6
6
import numpy as np
7
7
from collections import defaultdict
8
-
8
+ from .. annotation_types . feature import FeatureSchema
9
9
from ..annotation_types import (Label , ObjectAnnotation ,
10
10
ClassificationAnnotation , Mask , Geometry , Point ,
11
11
Line , Checklist , Text , Radio )
@@ -29,6 +29,7 @@ def subclass_ious(ground_truth: Label, prediction: Label) -> Dict[str, Optional[
29
29
Otherwise this function is a bit naive and if you want something to specifically suite
30
30
your use case then create a new function based off this one.
31
31
"""
32
+ identifying = get_identifying_key (prediction .annotations , ground_truth .annotations )
32
33
prediction_annotations = _create_feature_lookup (prediction .annotations )
33
34
ground_truth_annotations = _create_feature_lookup (ground_truth .annotations )
34
35
feature_schemas = set (prediction_annotations .keys ()).union (
@@ -48,15 +49,14 @@ def _create_classification_feature_lookup(annotations: Union[List[ObjectAnnotati
48
49
classifications = [classification .answer .name or classification .answer .feature_schema_id for classification in classifications ]
49
50
# TODO: create the lookup
50
51
grouped_annotations [annotation .name or annotation .feature_schema_id ].append (annotation )
51
-
52
52
return grouped_annotations
53
53
54
54
55
- ious = []
56
- for key in feature_schemas :
55
+ # ious = []
56
+ # for key in feature_schemas:
57
57
# We shouldn't have any nones. Since the keys are generated by the presence of the object.
58
- prediction_annotations = prediction_annotations [key ]
59
- ground_truth_annotations =
58
+ # prediction_annotations = prediction_annotations[key]
59
+ # # ground_truth_annotations =
60
60
61
61
62
62
@@ -314,34 +314,7 @@ def _create_feature_lookup(
314
314
315
315
"""
316
316
# TODO: Add a check here.
317
- """
318
-
319
- We don't want to select name for one and then feature_schema_id for the other.
320
- I think in another function we should check
321
-
322
- Do we want to require that the user provides the feature name?
323
- We don't really want schema ids showing up in the metric names..
324
-
325
- So:
326
-
327
- Also add a freakin test.
328
- ####
329
- all_schema_ids_defined_pred, all_names_defined_pred = check_references(pred_annotations)
330
- if (not all_schema_ids_defined and not all_names_defined_pred):
331
- raise ValueError("All data must have feature_schema_ids or names set")
332
-
333
317
334
- all_schema_ids_defined_gt, all_names_defined_gt = check_references(gt_annotations)
335
-
336
- #Prefer name becuse the user will be able to know what it means
337
- #Schema id incase that doesn't exist..
338
- if (all_names_defined_pred and all_names_defined_gt):
339
- return 'name'
340
- elif all_schema_ids_defined_pred and all_schema_ids_defined_gt:
341
- return 'feature_schema_id'
342
- else:
343
- raise ValueError("Ground truth and prediction annotations must have set all name or feature ids. Otherwise there is no key to match on. Please update.")
344
- """
345
318
grouped_annotations = defaultdict (list )
346
319
for annotation in annotations :
347
320
grouped_annotations [annotation .name or
@@ -381,3 +354,46 @@ def _polygon_iou(poly1: Polygon, poly2: Polygon) -> float:
381
354
def _mask_iou (mask1 : np .ndarray , mask2 : np .ndarray ) -> float :
382
355
"""Computes iou between two binary segmentation masks."""
383
356
return np .sum (mask1 & mask2 ) / np .sum (mask1 | mask2 )
357
+
358
+
359
+ def all_have_key (annotations : List [FeatureSchema ]) -> Tuple [bool , bool ]:
360
+ """
361
+ We want to make sure that all feature schemas have names set or feature_schema_ids set.
362
+
363
+ """
364
+ all_names = True
365
+ all_schemas = True
366
+ for annotation in annotations :
367
+ if annotation .name is None :
368
+ all_names = False
369
+ if annotation .feature_schema_id is None :
370
+ all_schemas = False
371
+ return all_schemas , all_names
372
+
373
+ def get_identifying_key (pred_annotations , gt_annotations ):
374
+ """
375
+ We don't want to select name for one and then feature_schema_id for the other.
376
+ I think in another function we should check
377
+
378
+ Do we want to require that the user provides the feature name?
379
+ We don't really want schema ids showing up in the metric names..
380
+
381
+ So:
382
+ """
383
+ #TODO: Also add a freakin test.
384
+ all_schema_ids_defined_pred , all_names_defined_pred = all_have_key (pred_annotations )
385
+ if (not all_schema_ids_defined_pred and not all_names_defined_pred ):
386
+ raise ValueError ("All data must have feature_schema_ids or names set" )
387
+
388
+
389
+ all_schema_ids_defined_gt , all_names_defined_gt = all_have_key (gt_annotations )
390
+
391
+ #Prefer name becuse the user will be able to know what it means
392
+ #Schema id incase that doesn't exist..
393
+ if (all_names_defined_pred and all_names_defined_gt ):
394
+ return 'name'
395
+ elif all_schema_ids_defined_pred and all_schema_ids_defined_gt :
396
+ return 'feature_schema_id'
397
+ else :
398
+ raise ValueError ("Ground truth and prediction annotations must have set all name or feature ids. Otherwise there is no key to match on. Please update." )
399
+
0 commit comments