Skip to content

Commit 4c54ed6

Browse files
authored
Merge pull request #318 from Labelbox/ms/rename-annotation-groups
remove annotation groups
2 parents a058022 + 28ca7ac commit 4c54ed6

File tree

9 files changed

+118
-104
lines changed

9 files changed

+118
-104
lines changed

examples/integrations/detectron2/coco_object.ipynb

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -645,10 +645,10 @@
645645
"outputs": [],
646646
"source": [
647647
"\n",
648-
"for idx, annotation_group in enumerate(model_run.annotation_groups()):\n",
648+
"for idx, model_run_data_row in enumerate(model_run.model_run_data_rows()):\n",
649649
" if idx == 5:\n",
650650
" break\n",
651-
" print(annotation_group.url)"
651+
" print(model_run_data_row.url)"
652652
]
653653
},
654654
{
@@ -794,4 +794,4 @@
794794
},
795795
"nbformat": 4,
796796
"nbformat_minor": 5
797-
}
797+
}

examples/integrations/detectron2/coco_panoptic.ipynb

Lines changed: 43 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -86,21 +86,21 @@
8686
"name": "stdout",
8787
"output_type": "stream",
8888
"text": [
89-
"\u001B[K |████████████████████████████████| 49 kB 3.4 MB/s \n",
90-
"\u001B[K |████████████████████████████████| 74 kB 4.0 MB/s \n",
91-
"\u001B[K |████████████████████████████████| 145 kB 63.4 MB/s \n",
92-
"\u001B[K |████████████████████████████████| 130 kB 75.2 MB/s \n",
93-
"\u001B[K |████████████████████████████████| 745 kB 60.0 MB/s \n",
94-
"\u001B[K |████████████████████████████████| 743 kB 54.6 MB/s \n",
95-
"\u001B[K |████████████████████████████████| 636 kB 63.8 MB/s \n",
96-
"\u001B[K |████████████████████████████████| 112 kB 76.0 MB/s \n",
97-
"\u001B[?25h Building wheel for detectron2 (setup.py) ... \u001B[?25l\u001B[?25hdone\n",
98-
" Building wheel for panopticapi (setup.py) ... \u001B[?25l\u001B[?25hdone\n",
99-
" Building wheel for fvcore (setup.py) ... \u001B[?25l\u001B[?25hdone\n",
100-
" Building wheel for antlr4-python3-runtime (setup.py) ... \u001B[?25l\u001B[?25hdone\n",
101-
"\u001B[K |████████████████████████████████| 10.1 MB 12.1 MB/s \n",
102-
"\u001B[K |████████████████████████████████| 19.3 MB 1.2 MB/s \n",
103-
"\u001B[?25h Building wheel for labelbox (setup.py) ... \u001B[?25l\u001B[?25hdone\n"
89+
"\u001b[K |████████████████████████████████| 49 kB 3.4 MB/s \n",
90+
"\u001b[K |████████████████████████████████| 74 kB 4.0 MB/s \n",
91+
"\u001b[K |████████████████████████████████| 145 kB 63.4 MB/s \n",
92+
"\u001b[K |████████████████████████████████| 130 kB 75.2 MB/s \n",
93+
"\u001b[K |████████████████████████████████| 745 kB 60.0 MB/s \n",
94+
"\u001b[K |████████████████████████████████| 743 kB 54.6 MB/s \n",
95+
"\u001b[K |████████████████████████████████| 636 kB 63.8 MB/s \n",
96+
"\u001b[K |████████████████████████████████| 112 kB 76.0 MB/s \n",
97+
"\u001b[?25h Building wheel for detectron2 (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
98+
" Building wheel for panopticapi (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
99+
" Building wheel for fvcore (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
100+
" Building wheel for antlr4-python3-runtime (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
101+
"\u001b[K |████████████████████████████████| 10.1 MB 12.1 MB/s \n",
102+
"\u001b[K |████████████████████████████████| 19.3 MB 1.2 MB/s \n",
103+
"\u001b[?25h Building wheel for labelbox (setup.py) ... \u001b[?25l\u001b[?25hdone\n"
104104
]
105105
}
106106
],
@@ -510,7 +510,7 @@
510510
"name": "stdout",
511511
"output_type": "stream",
512512
"text": [
513-
"\u001B[32m[09/15 12:47:46 d2.engine.defaults]: \u001B[0mModel:\n",
513+
"\u001b[32m[09/15 12:47:46 d2.engine.defaults]: \u001b[0mModel:\n",
514514
"PanopticFPN(\n",
515515
" (backbone): FPN(\n",
516516
" (fpn_lateral2): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1))\n",
@@ -888,21 +888,21 @@
888888
" (predictor): Conv2d(128, 5, kernel_size=(1, 1), stride=(1, 1))\n",
889889
" )\n",
890890
")\n",
891-
"\u001B[32m[09/15 12:47:46 d2.data.datasets.coco]: \u001B[0mLoaded 20 images in COCO format from /tmp/json_train_instance_annotations.json\n",
892-
"\u001B[5m\u001B[31mWARNING\u001B[0m \u001B[32m[09/15 12:47:46 d2.data.datasets.coco]: \u001B[0mDirectory /tmp/images/ and /tmp/segmentation has 25 and 20 files, respectively.\n",
893-
"\u001B[5m\u001B[31mWARNING\u001B[0m \u001B[32m[09/15 12:47:46 d2.data.datasets.coco]: \u001B[0mWill use their intersection of 20 files.\n",
894-
"\u001B[32m[09/15 12:47:46 d2.data.datasets.coco]: \u001B[0mLoaded 20 images with semantic segmentation from /tmp/images/\n",
895-
"\u001B[32m[09/15 12:47:46 d2.data.build]: \u001B[0mDistribution of instances among all 4 categories:\n",
896-
"\u001B[36m| category | #instances | category | #instances | category | #instances |\n",
891+
"\u001b[32m[09/15 12:47:46 d2.data.datasets.coco]: \u001b[0mLoaded 20 images in COCO format from /tmp/json_train_instance_annotations.json\n",
892+
"\u001b[5m\u001b[31mWARNING\u001b[0m \u001b[32m[09/15 12:47:46 d2.data.datasets.coco]: \u001b[0mDirectory /tmp/images/ and /tmp/segmentation has 25 and 20 files, respectively.\n",
893+
"\u001b[5m\u001b[31mWARNING\u001b[0m \u001b[32m[09/15 12:47:46 d2.data.datasets.coco]: \u001b[0mWill use their intersection of 20 files.\n",
894+
"\u001b[32m[09/15 12:47:46 d2.data.datasets.coco]: \u001b[0mLoaded 20 images with semantic segmentation from /tmp/images/\n",
895+
"\u001b[32m[09/15 12:47:46 d2.data.build]: \u001b[0mDistribution of instances among all 4 categories:\n",
896+
"\u001b[36m| category | #instances | category | #instances | category | #instances |\n",
897897
"|:----------:|:-------------|:----------:|:-------------|:----------:|:-------------|\n",
898898
"| car | 186 | person | 116 | truck | 5 |\n",
899899
"| bus | 5 | | | | |\n",
900-
"| total | 312 | | | | |\u001B[0m\n",
901-
"\u001B[32m[09/15 12:47:46 d2.data.dataset_mapper]: \u001B[0m[DatasetMapper] Augmentations used in training: [ResizeShortestEdge(short_edge_length=(640, 672, 704, 736, 768, 800), max_size=1333, sample_style='choice'), RandomFlip()]\n",
902-
"\u001B[32m[09/15 12:47:46 d2.data.build]: \u001B[0mUsing training sampler TrainingSampler\n",
903-
"\u001B[32m[09/15 12:47:46 d2.data.common]: \u001B[0mSerializing 20 elements to byte tensors and concatenating them all ...\n",
904-
"\u001B[32m[09/15 12:47:46 d2.data.common]: \u001B[0mSerialized dataset takes 1.18 MiB\n",
905-
"\u001B[5m\u001B[31mWARNING\u001B[0m \u001B[32m[09/15 12:47:46 d2.solver.build]: \u001B[0mSOLVER.STEPS contains values larger than SOLVER.MAX_ITER. These values will be ignored.\n"
900+
"| total | 312 | | | | |\u001b[0m\n",
901+
"\u001b[32m[09/15 12:47:46 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in training: [ResizeShortestEdge(short_edge_length=(640, 672, 704, 736, 768, 800), max_size=1333, sample_style='choice'), RandomFlip()]\n",
902+
"\u001b[32m[09/15 12:47:46 d2.data.build]: \u001b[0mUsing training sampler TrainingSampler\n",
903+
"\u001b[32m[09/15 12:47:46 d2.data.common]: \u001b[0mSerializing 20 elements to byte tensors and concatenating them all ...\n",
904+
"\u001b[32m[09/15 12:47:46 d2.data.common]: \u001b[0mSerialized dataset takes 1.18 MiB\n",
905+
"\u001b[5m\u001b[31mWARNING\u001b[0m \u001b[32m[09/15 12:47:46 d2.solver.build]: \u001b[0mSOLVER.STEPS contains values larger than SOLVER.MAX_ITER. These values will be ignored.\n"
906906
]
907907
},
908908
{
@@ -921,17 +921,17 @@
921921
"Skip loading parameter 'sem_seg_head.predictor.weight' to the model due to incompatible shapes: (54, 128, 1, 1) in the checkpoint but (5, 128, 1, 1) in the model! You might want to double check if this is expected.\n",
922922
"Skip loading parameter 'sem_seg_head.predictor.bias' to the model due to incompatible shapes: (54,) in the checkpoint but (5,) in the model! You might want to double check if this is expected.\n",
923923
"Some model parameters or buffers are not found in the checkpoint:\n",
924-
"\u001B[34mroi_heads.box_predictor.bbox_pred.{bias, weight}\u001B[0m\n",
925-
"\u001B[34mroi_heads.box_predictor.cls_score.{bias, weight}\u001B[0m\n",
926-
"\u001B[34mroi_heads.mask_head.predictor.{bias, weight}\u001B[0m\n",
927-
"\u001B[34msem_seg_head.predictor.{bias, weight}\u001B[0m\n"
924+
"\u001b[34mroi_heads.box_predictor.bbox_pred.{bias, weight}\u001b[0m\n",
925+
"\u001b[34mroi_heads.box_predictor.cls_score.{bias, weight}\u001b[0m\n",
926+
"\u001b[34mroi_heads.mask_head.predictor.{bias, weight}\u001b[0m\n",
927+
"\u001b[34msem_seg_head.predictor.{bias, weight}\u001b[0m\n"
928928
]
929929
},
930930
{
931931
"name": "stdout",
932932
"output_type": "stream",
933933
"text": [
934-
"\u001B[32m[09/15 12:47:51 d2.engine.train_loop]: \u001B[0mStarting training from iteration 0\n"
934+
"\u001b[32m[09/15 12:47:51 d2.engine.train_loop]: \u001b[0mStarting training from iteration 0\n"
935935
]
936936
},
937937
{
@@ -951,13 +951,13 @@
951951
"name": "stdout",
952952
"output_type": "stream",
953953
"text": [
954-
"\u001B[32m[09/15 12:48:02 d2.utils.events]: \u001B[0m eta: 0:00:26 iter: 19 total_loss: 6.528 loss_sem_seg: 2.82 loss_rpn_cls: 0.1179 loss_rpn_loc: 0.493 loss_cls: 1.457 loss_box_reg: 0.8083 loss_mask: 0.6864 time: 0.4074 data_time: 0.2200 lr: 0.00019081 max_mem: 1857M\n",
955-
"\u001B[32m[09/15 12:48:11 d2.utils.events]: \u001B[0m eta: 0:00:21 iter: 39 total_loss: 2.956 loss_sem_seg: 0.3267 loss_rpn_cls: 0.09758 loss_rpn_loc: 0.447 loss_cls: 0.6932 loss_box_reg: 0.7776 loss_mask: 0.5941 time: 0.4114 data_time: 0.0793 lr: 0.00039061 max_mem: 1936M\n",
956-
"\u001B[32m[09/15 12:48:19 d2.utils.events]: \u001B[0m eta: 0:00:14 iter: 59 total_loss: 2.369 loss_sem_seg: 0.2144 loss_rpn_cls: 0.09306 loss_rpn_loc: 0.4839 loss_cls: 0.5 loss_box_reg: 0.7751 loss_mask: 0.4428 time: 0.4132 data_time: 0.0960 lr: 0.00059041 max_mem: 1939M\n",
957-
"\u001B[32m[09/15 12:48:27 d2.utils.events]: \u001B[0m eta: 0:00:07 iter: 79 total_loss: 2.128 loss_sem_seg: 0.1537 loss_rpn_cls: 0.05618 loss_rpn_loc: 0.4794 loss_cls: 0.3576 loss_box_reg: 0.6835 loss_mask: 0.3823 time: 0.4150 data_time: 0.1021 lr: 0.00079021 max_mem: 1939M\n",
958-
"\u001B[32m[09/15 12:48:37 d2.utils.events]: \u001B[0m eta: 0:00:00 iter: 99 total_loss: 2.007 loss_sem_seg: 0.119 loss_rpn_cls: 0.07207 loss_rpn_loc: 0.4316 loss_cls: 0.3953 loss_box_reg: 0.6027 loss_mask: 0.3536 time: 0.4194 data_time: 0.1094 lr: 0.00099001 max_mem: 1939M\n",
959-
"\u001B[32m[09/15 12:48:37 d2.engine.hooks]: \u001B[0mOverall training speed: 98 iterations in 0:00:41 (0.4194 s / it)\n",
960-
"\u001B[32m[09/15 12:48:37 d2.engine.hooks]: \u001B[0mTotal training time: 0:00:42 (0:00:01 on hooks)\n"
954+
"\u001b[32m[09/15 12:48:02 d2.utils.events]: \u001b[0m eta: 0:00:26 iter: 19 total_loss: 6.528 loss_sem_seg: 2.82 loss_rpn_cls: 0.1179 loss_rpn_loc: 0.493 loss_cls: 1.457 loss_box_reg: 0.8083 loss_mask: 0.6864 time: 0.4074 data_time: 0.2200 lr: 0.00019081 max_mem: 1857M\n",
955+
"\u001b[32m[09/15 12:48:11 d2.utils.events]: \u001b[0m eta: 0:00:21 iter: 39 total_loss: 2.956 loss_sem_seg: 0.3267 loss_rpn_cls: 0.09758 loss_rpn_loc: 0.447 loss_cls: 0.6932 loss_box_reg: 0.7776 loss_mask: 0.5941 time: 0.4114 data_time: 0.0793 lr: 0.00039061 max_mem: 1936M\n",
956+
"\u001b[32m[09/15 12:48:19 d2.utils.events]: \u001b[0m eta: 0:00:14 iter: 59 total_loss: 2.369 loss_sem_seg: 0.2144 loss_rpn_cls: 0.09306 loss_rpn_loc: 0.4839 loss_cls: 0.5 loss_box_reg: 0.7751 loss_mask: 0.4428 time: 0.4132 data_time: 0.0960 lr: 0.00059041 max_mem: 1939M\n",
957+
"\u001b[32m[09/15 12:48:27 d2.utils.events]: \u001b[0m eta: 0:00:07 iter: 79 total_loss: 2.128 loss_sem_seg: 0.1537 loss_rpn_cls: 0.05618 loss_rpn_loc: 0.4794 loss_cls: 0.3576 loss_box_reg: 0.6835 loss_mask: 0.3823 time: 0.4150 data_time: 0.1021 lr: 0.00079021 max_mem: 1939M\n",
958+
"\u001b[32m[09/15 12:48:37 d2.utils.events]: \u001b[0m eta: 0:00:00 iter: 99 total_loss: 2.007 loss_sem_seg: 0.119 loss_rpn_cls: 0.07207 loss_rpn_loc: 0.4316 loss_cls: 0.3953 loss_box_reg: 0.6027 loss_mask: 0.3536 time: 0.4194 data_time: 0.1094 lr: 0.00099001 max_mem: 1939M\n",
959+
"\u001b[32m[09/15 12:48:37 d2.engine.hooks]: \u001b[0mOverall training speed: 98 iterations in 0:00:41 (0.4194 s / it)\n",
960+
"\u001b[32m[09/15 12:48:37 d2.engine.hooks]: \u001b[0mTotal training time: 0:00:42 (0:00:01 on hooks)\n"
961961
]
962962
}
963963
],
@@ -1395,10 +1395,10 @@
13951395
],
13961396
"source": [
13971397
"\n",
1398-
"for idx, annotation_group in enumerate(model_run.annotation_groups()):\n",
1398+
"for idx, model_run_data_row in enumerate(model_run.model_run_data_rows()):\n",
13991399
" if idx == 5:\n",
14001400
" break\n",
1401-
" print(annotation_group.url)"
1401+
" print(model_run_data_row.url)"
14021402
]
14031403
},
14041404
{
@@ -1564,4 +1564,4 @@
15641564
},
15651565
"nbformat": 4,
15661566
"nbformat_minor": 5
1567-
}
1567+
}

examples/model_diagnostics/custom_metrics_demo.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -566,10 +566,10 @@
566566
},
567567
"outputs": [],
568568
"source": [
569-
"for idx, annotation_group in enumerate(lb_model_run.annotation_groups()):\n",
569+
"for idx, model_run_data_row in enumerate(lb_model_run.model_run_data_rows()):\n",
570570
" if idx == 5:\n",
571571
" break\n",
572-
" print(annotation_group.url)"
572+
" print(model_run_data_row.url)"
573573
]
574574
},
575575
{

examples/model_diagnostics/model_diagnostics_demo.ipynb

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -527,10 +527,10 @@
527527
},
528528
"outputs": [],
529529
"source": [
530-
"for idx, annotation_group in enumerate(lb_model_run.annotation_groups()):\n",
530+
"for idx, model_run_data_row in enumerate(lb_model_run.model_run_data_rows()):\n",
531531
" if idx == 5:\n",
532532
" break\n",
533-
" print(annotation_group.url)"
533+
" print(model_run_data_row.url)"
534534
]
535535
},
536536
{
@@ -569,4 +569,4 @@
569569
},
570570
"nbformat": 4,
571571
"nbformat_minor": 5
572-
}
572+
}

examples/model_diagnostics/model_diagnostics_guide.ipynb

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -392,10 +392,10 @@
392392
"metadata": {},
393393
"outputs": [],
394394
"source": [
395-
"for idx, annotation_group in enumerate(lb_model_run.annotation_groups()):\n",
395+
"for idx, model_run_data_row in enumerate(lb_model_run.model_run_data_rows()):\n",
396396
" if idx == 5:\n",
397397
" break\n",
398-
" print(annotation_group.url)"
398+
" print(model_run_data_row.url)"
399399
]
400400
}
401401
],
@@ -426,4 +426,4 @@
426426
},
427427
"nbformat": 4,
428428
"nbformat_minor": 5
429-
}
429+
}

labelbox/schema/model_run.py

Lines changed: 20 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
from pathlib import Path
33
import os
44
import time
5+
import warnings
56

67
from labelbox.pagination import PaginatedCollection
78
from labelbox.schema.annotation_import import MEAPredictionImport
@@ -131,18 +132,24 @@ def add_predictions(
131132
raise ValueError(
132133
f'Invalid predictions given of type: {type(predictions)}')
133134

134-
def annotation_groups(self):
135+
def model_run_data_rows(self):
135136
query_str = """query modelRunPyApi($modelRunId: ID!, $from : String, $first: Int){
136137
annotationGroups(where: {modelRunId: {id: $modelRunId}}, after: $from, first: $first)
137138
{nodes{%s},pageInfo{endCursor}}
138139
}
139-
""" % (results_query_part(AnnotationGroup))
140+
""" % (results_query_part(ModelRunDataRow))
140141
return PaginatedCollection(
141142
self.client, query_str, {'modelRunId': self.uid},
142143
['annotationGroups', 'nodes'],
143-
lambda client, res: AnnotationGroup(client, self.model_id, res),
144+
lambda client, res: ModelRunDataRow(client, self.model_id, res),
144145
['annotationGroups', 'pageInfo', 'endCursor'])
145146

147+
def annotation_groups(self):
148+
warnings.warn(
149+
"`ModelRun.annotation_groups` is being deprecated in favor of `ModelRun.model_run_data_rows`"
150+
)
151+
return self.model_run_data_rows()
152+
146153
def delete(self):
147154
""" Deletes specified model run.
148155
@@ -154,11 +161,11 @@ def delete(self):
154161
deleteModelRuns(where: {ids: [$%s]})}""" % (ids_param, ids_param)
155162
self.client.execute(query_str, {ids_param: str(self.uid)})
156163

157-
def delete_annotation_groups(self, data_row_ids):
158-
""" Deletes annotation groups by data row ids for a model run.
164+
def delete_model_run_data_rows(self, data_row_ids):
165+
""" Deletes data rows from model runs.
159166
160167
Args:
161-
data_row_ids (list): List of data row ids to delete annotation groups.
168+
data_row_ids (list): List of data row ids to delete from the model run.
162169
Returns:
163170
Query execution success.
164171
"""
@@ -173,8 +180,14 @@ def delete_annotation_groups(self, data_row_ids):
173180
data_row_ids_param: data_row_ids
174181
})
175182

183+
def delete_annotation_groups(self, data_row_ids):
184+
warnings.warn(
185+
"`ModelRun.delete_annotation_groups` is being deprecated in favor of `ModelRun.delete_model_run_data_rows`"
186+
)
187+
return self.delete_model_run_data_rows(data_row_ids)
188+
176189

177-
class AnnotationGroup(DbObject):
190+
class ModelRunDataRow(DbObject):
178191
label_id = Field.String("label_id")
179192
model_run_id = Field.String("model_run_id")
180193
data_row = Relationship.ToOne("DataRow", False, cache=True)

tests/integration/annotation_import/conftest.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -326,9 +326,9 @@ def model_run(rand_gen, model):
326326

327327

328328
@pytest.fixture
329-
def model_run_annotation_groups(client, configured_project,
330-
annotation_submit_fn, model_run_predictions,
331-
model_run):
329+
def model_run_with_model_run_data_rows(client, configured_project,
330+
annotation_submit_fn,
331+
model_run_predictions, model_run):
332332
configured_project.enable_model_assisted_labeling()
333333

334334
upload_task = MALPredictionImport.create_from_objects(

0 commit comments

Comments
 (0)