Skip to content

Swapped/marked remaining V1 methods in tests #1679

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Jun 26, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions libs/labelbox/tests/data/annotation_import/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -608,7 +608,7 @@ def get_data_row_id(indx=0):

yield get_data_row_id


#TODO: Switch to setup_editor, setup might get removed in later releases
@pytest.fixture
def configured_project(client, initial_dataset, ontology, rand_gen, image_url):
dataset = initial_dataset
Expand Down Expand Up @@ -642,7 +642,7 @@ def configured_project(client, initial_dataset, ontology, rand_gen, image_url):

project.delete()


#TODO: Switch to setup_editor, setup might get removed in later releases
@pytest.fixture
def project_with_ontology(client, configured_project, ontology, rand_gen):
project = client.create_project(name=rand_gen(str),
Expand All @@ -657,7 +657,7 @@ def project_with_ontology(client, configured_project, ontology, rand_gen):

project.delete()


#TODO: Switch to setup_editor, setup might get removed in later releases
@pytest.fixture
def configured_project_pdf(client, ontology, rand_gen, pdf_url):
project = client.create_project(name=rand_gen(str),
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from unittest.mock import patch
import uuid
from labelbox import parser
from labelbox import parser, Project
import pytest
import random
from labelbox.data.annotation_types.annotation import ObjectAnnotation
Expand Down Expand Up @@ -217,20 +217,20 @@ def test_delete(configured_project, predictions):
assert len(list(all_import_requests)) == 0


def test_pdf_mal_bbox(client, configured_project_pdf):
def test_pdf_mal_bbox(client, configured_project_pdf:Project):
"""
tests pdf mal against only a bbox annotation
"""
annotations = []
num_annotations = 1

for row in configured_project_pdf.export_queued_data_rows():
for data_row_id in configured_project_pdf.data_row_ids:
for _ in range(num_annotations):
annotations.append({
"uuid": str(uuid.uuid4()),
"name": "bbox",
"dataRow": {
"id": row['id']
"id": data_row_id
},
"bbox": {
"top": round(random.uniform(0, 300), 2),
Expand All @@ -247,14 +247,14 @@ def test_pdf_mal_bbox(client, configured_project_pdf):
'answer': 'the answer to the text question',
'uuid': 'fc1913c6-b735-4dea-bd25-c18152a4715f',
"dataRow": {
"id": row['id']
"id": data_row_id
}
},
{
'name': 'checklist',
'uuid': '9d7b2e57-d68f-4388-867a-af2a9b233719',
"dataRow": {
"id": row['id']
"id": data_row_id
},
'answer': [{
'name': 'option1'
Expand All @@ -269,14 +269,14 @@ def test_pdf_mal_bbox(client, configured_project_pdf):
},
'uuid': 'ad60897f-ea1a-47de-b923-459339764921',
"dataRow": {
"id": row['id']
"id": data_row_id
}
},
{ #adding this with the intention to ensure we allow page: 0
"uuid": str(uuid.uuid4()),
"name": "bbox",
"dataRow": {
"id": row['id']
"id": data_row_id
},
"bbox": {
"top": round(random.uniform(0, 300), 2),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -145,6 +145,7 @@ def create_data_row_for_project(project, dataset, data_row_ndjson, batch_name):


# TODO: Add VideoData. Currently label import job finishes without errors but project.export_labels() returns empty list.
@pytest.mark.export_v1("tests used export v1 method, v2 test -> test_import_data_types_v2 below")
@pytest.mark.parametrize(
"data_type_class",
[
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import uuid
from labelbox import parser
import pytest
from labelbox import ModelRun

from labelbox.schema.annotation_import import AnnotationImportState, MEAPredictionImport
from labelbox.data.serialization import NDJsonConverter
Expand Down Expand Up @@ -107,29 +108,38 @@ def test_create_from_objects_all_project_labels(
annotation_import.status_file_url)


def test_model_run_project_labels(model_run_with_all_project_labels,
def test_model_run_project_labels(model_run_with_all_project_labels: ModelRun,
model_run_predictions):
model_run = model_run_with_all_project_labels
# TODO: Move to export_v2
model_run_exported_labels = model_run.export_labels(download=True)

export_task = model_run.export()
export_task.wait_till_done()
stream = export_task.get_buffered_stream()

# exports to list of tuples (data_row_id, label) needed to adapt test to export v2 instead of export v1 since data rows ids are not at label level in export v2.
model_run_exported_labels = [(
data_row.json["data_row"]["id"],
data_row.json["experiments"][model_run.model_id]["runs"][model_run.uid]["labels"][0])
for data_row in stream]

labels_indexed_by_schema_id = {}

for label in model_run_exported_labels:
for data_row_id, label in model_run_exported_labels:
# assuming exported array of label 'objects' has only one label per data row... as usually is when there are no label revisions
schema_id = label['Label']['objects'][0]['schemaId']
labels_indexed_by_schema_id[schema_id] = label
schema_id = label["annotations"]["objects"][0]["feature_schema_id"]
labels_indexed_by_schema_id[schema_id] = {"label": label, "data_row_id": data_row_id}

assert (len(
labels_indexed_by_schema_id.keys())) == len(model_run_predictions)

# making sure the labels are in this model run are all labels uploaded to the project
# by comparing some 'immutable' attributes
for expected_label in model_run_predictions:
schema_id = expected_label['schemaId']
schema_id = expected_label["schemaId"]
actual_label = labels_indexed_by_schema_id[schema_id]
assert actual_label['Label']['objects'][0]['title'] == expected_label[
assert actual_label["label"]["annotations"]["objects"][0]["name"] == expected_label[
'name']
assert actual_label['DataRow ID'] == expected_label['dataRow']['id']
assert actual_label["data_row_id"] == expected_label["dataRow"]["id"]


def test_create_from_label_objects(model_run_with_data_rows,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ def test_model_run_upsert_data_rows_with_existing_labels(
assert n_data_rows == len(
list(model_run_with_data_rows.model_run_data_rows()))


@pytest.mark.export_v1("tests used export v1 method, v2 test -> test_import_data_types_v2 below")
def test_model_run_export_labels(model_run_with_data_rows):
labels = model_run_with_data_rows.export_labels(download=True)
assert len(labels) == 3
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,9 +56,14 @@ def test_send_to_annotate_from_model(client, configured_project,
destination_batches = list(destination_project.batches())
assert len(destination_batches) == 1

destination_data_rows = list(destination_batches[0].export_data_rows())
export_task = destination_project.export()
export_task.wait_till_done()
stream = export_task.get_buffered_stream()

destination_data_rows = [dr.json["data_row"]["id"] for dr in stream]

assert len(destination_data_rows) == len(data_row_ids)
assert all([dr.uid in data_row_ids for dr in destination_data_rows])
assert all([dr in data_row_ids for dr in destination_data_rows])

# Since data rows were added to a review queue, predictions should be imported into the project as labels
destination_project_labels = (list(destination_project.labels()))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -313,7 +313,7 @@ def test_batch(
},
] * 2)
task.wait_till_done()
data_rows = [dr.uid for dr in list(dataset.export_data_rows())]
data_rows = [result["id"] for result in task.result]
batch_one = f"batch one {uuid.uuid4()}"

# This test creates two batches, only one batch should be exporter
Expand Down
10 changes: 7 additions & 3 deletions libs/labelbox/tests/data/test_data_row_metadata.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,9 +89,13 @@ def test_export_empty_metadata(client, configured_project_with_label,
wait_for_data_row_processing):
project, _, data_row, _ = configured_project_with_label
data_row = wait_for_data_row_processing(client, data_row)
labels = project.label_generator()
label = next(labels)
assert label.data.metadata == []

export_task = project.export(params={"metadata_fields": True})
export_task.wait_till_done()
stream = export_task.get_buffered_stream()
data_row = [data_row.json for data_row in stream][0]

assert data_row["metadata_fields"] == []


def test_bulk_export_datarow_metadata(data_row, mdo: DataRowMetadataOntology):
Expand Down
Loading