Skip to content

Huge optimization to annotation import library #1727

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jul 15, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion libs/labelbox/tests/data/annotation_import/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -787,7 +787,6 @@ def rectangle_inference_with_confidence(prediction_id_mapping):
if "rectangle_nested" not in feature:
continue
rectangle = feature["rectangle_nested"].copy()
print(rectangle)
rectangle.update({
"bbox": {
"top": 48,
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
import itertools
import uuid
from labelbox.schema.annotation_import import AnnotationImport, MALPredictionImport
from labelbox.schema.media_type import MediaType
import pytest
from unittest.mock import patch


@patch('labelbox.schema.annotation_import.ANNOTATION_PER_LABEL_LIMIT', 1)
def test_above_annotation_limit_on_single_import_on_single_data_row(annotations_by_media_type):

annotations_ndjson = list(itertools.chain.from_iterable(annotations_by_media_type[MediaType.Image]))
data_row_id = annotations_ndjson[0]["dataRow"]["id"]

data_row_annotations = [annotation for annotation in annotations_ndjson if annotation["dataRow"]["id"] == data_row_id and "bbox" in annotation]

with pytest.raises(ValueError):
AnnotationImport._validate_data_rows([data_row_annotations[0]]*2)


@patch('labelbox.schema.annotation_import.ANNOTATION_PER_LABEL_LIMIT', 1)
def test_above_annotation_limit_divided_among_different_rows(annotations_by_media_type):

annotations_ndjson = list(itertools.chain.from_iterable(annotations_by_media_type[MediaType.Image]))
data_row_id = annotations_ndjson[0]["dataRow"]["id"]

first_data_row_annotation = [annotation for annotation in annotations_ndjson if annotation["dataRow"]["id"] == data_row_id and "bbox" in annotation][0]

second_data_row_annotation = first_data_row_annotation.copy()
second_data_row_annotation["dataRow"]["id"] == "data_row_id_2"

with pytest.raises(ValueError):
AnnotationImport._validate_data_rows([first_data_row_annotation, second_data_row_annotation]*2)
Original file line number Diff line number Diff line change
Expand Up @@ -219,110 +219,3 @@ def test_delete(module_project, predictions):
bulk_import_request.delete()
all_import_requests = module_project.bulk_import_requests()
assert len(list(all_import_requests)) == 0


def _create_label(row_index, data_row_uids, label_name_ids=['bbox']):
label_name = label_name_ids[row_index % len(label_name_ids)]
data_row_uid = data_row_uids[row_index % len(data_row_uids)]
return Label(data=GenericDataRowData(uid=data_row_uid),
annotations=[
VideoObjectAnnotation(name=label_name,
keyframe=True,
frame=4,
segment_index=0,
value=Rectangle(
start=Point(x=100, y=100),
end=Point(x=105, y=105),
))
])


@pytest.mark.parametrize("configured_project", [MediaType.Video], indirect = True)
@patch('labelbox.schema.annotation_import.ANNOTATION_PER_LABEL_LIMIT', 20)
def test_below_annotation_limit_on_single_data_row(
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Remove this we are constantly testing for this case everytime we do a label import

client, configured_project, video_data, rand_gen):
_, data_row_uids = video_data
configured_project.create_batch(
rand_gen(str),
data_row_uids, # sample of data row objects
5 # priority between 1(Highest) - 5(lowest)
)
labels = [_create_label(index, data_row_uids) for index in range(19)]
import_annotations = MALPredictionImport.create_from_objects(
client=client,
project_id=configured_project.uid,
name=f"import {str(uuid.uuid4())}",
predictions=labels)
import_annotations.wait_until_done()

assert import_annotations.errors == []


@pytest.mark.parametrize("configured_project", [MediaType.Video], indirect = True)
@patch('labelbox.schema.annotation_import.ANNOTATION_PER_LABEL_LIMIT', 20)
def test_above_annotation_limit_on_single_label_on_single_data_row(
client, configured_project, video_data, rand_gen):
_, data_row_uids = video_data

configured_project.create_batch(
rand_gen(str),
data_row_uids, # sample of data row objects
5 # priority between 1(Highest) - 5(lowest)
)
labels = [_create_label(index, data_row_uids) for index in range(21)]
with pytest.raises(ValueError):
import_annotations = MALPredictionImport.create_from_objects(
client=client,
project_id=configured_project.uid,
name=f"import {str(uuid.uuid4())}",
predictions=labels)
import_annotations.wait_until_done()

@pytest.mark.parametrize("configured_project", [MediaType.Video], indirect = True)
@patch('labelbox.schema.annotation_import.ANNOTATION_PER_LABEL_LIMIT', 20)
def test_above_annotation_limit_divided_among_different_rows(
client, configured_project, video_data_100_rows,
rand_gen):
_, data_row_uids = video_data_100_rows

configured_project.create_batch(
rand_gen(str),
data_row_uids, # sample of data row objects
5 # priority between 1(Highest) - 5(lowest)
)
labels = [_create_label(index, data_row_uids) for index in range(21)]

import_annotations = MALPredictionImport.create_from_objects(
client=client,
project_id=configured_project.uid,
name=f"import {str(uuid.uuid4())}",
predictions=labels)

assert import_annotations.errors == []


@pytest.mark.parametrize("configured_project", [MediaType.Video], indirect = True)
@patch('labelbox.schema.annotation_import.ANNOTATION_PER_LABEL_LIMIT', 20)
def test_above_annotation_limit_divided_among_labels_on_one_row(
Copy link
Collaborator Author

@Gabefire Gabefire Jul 13, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This seems to do almost the same thing above, so it was removed. I can add this back in as a unit test instead if wanted

client, configured_project, video_data, rand_gen):
_, data_row_uids = video_data

configured_project.create_batch(
rand_gen(str),
data_row_uids, # sample of data row objects
5 # priority between 1(Highest) - 5(lowest)
)
labels = [
_create_label(index,
data_row_uids,
label_name_ids=['bbox', 'bbox_tool_with_nested_text'])
for index in range(21)
]

import_annotations = MALPredictionImport.create_from_objects(
client=client,
project_id=configured_project.uid,
name=f"import {str(uuid.uuid4())}",
predictions=labels)

assert import_annotations.errors == []