diff --git a/libs/labelbox/tests/data/annotation_import/conftest.py b/libs/labelbox/tests/data/annotation_import/conftest.py index 7af011bdc..55453ade3 100644 --- a/libs/labelbox/tests/data/annotation_import/conftest.py +++ b/libs/labelbox/tests/data/annotation_import/conftest.py @@ -608,7 +608,7 @@ def get_data_row_id(indx=0): yield get_data_row_id - +#TODO: Switch to setup_editor, setup might get removed in later releases @pytest.fixture def configured_project(client, initial_dataset, ontology, rand_gen, image_url): dataset = initial_dataset @@ -642,7 +642,7 @@ def configured_project(client, initial_dataset, ontology, rand_gen, image_url): project.delete() - +#TODO: Switch to setup_editor, setup might get removed in later releases @pytest.fixture def project_with_ontology(client, configured_project, ontology, rand_gen): project = client.create_project(name=rand_gen(str), @@ -657,7 +657,7 @@ def project_with_ontology(client, configured_project, ontology, rand_gen): project.delete() - +#TODO: Switch to setup_editor, setup might get removed in later releases @pytest.fixture def configured_project_pdf(client, ontology, rand_gen, pdf_url): project = client.create_project(name=rand_gen(str), diff --git a/libs/labelbox/tests/data/annotation_import/test_bulk_import_request.py b/libs/labelbox/tests/data/annotation_import/test_bulk_import_request.py index 290cb84a5..9d9680f53 100644 --- a/libs/labelbox/tests/data/annotation_import/test_bulk_import_request.py +++ b/libs/labelbox/tests/data/annotation_import/test_bulk_import_request.py @@ -1,6 +1,6 @@ from unittest.mock import patch import uuid -from labelbox import parser +from labelbox import parser, Project import pytest import random from labelbox.data.annotation_types.annotation import ObjectAnnotation @@ -217,20 +217,20 @@ def test_delete(configured_project, predictions): assert len(list(all_import_requests)) == 0 -def test_pdf_mal_bbox(client, configured_project_pdf): +def test_pdf_mal_bbox(client, configured_project_pdf:Project): """ tests pdf mal against only a bbox annotation """ annotations = [] num_annotations = 1 - for row in configured_project_pdf.export_queued_data_rows(): + for data_row_id in configured_project_pdf.data_row_ids: for _ in range(num_annotations): annotations.append({ "uuid": str(uuid.uuid4()), "name": "bbox", "dataRow": { - "id": row['id'] + "id": data_row_id }, "bbox": { "top": round(random.uniform(0, 300), 2), @@ -247,14 +247,14 @@ def test_pdf_mal_bbox(client, configured_project_pdf): 'answer': 'the answer to the text question', 'uuid': 'fc1913c6-b735-4dea-bd25-c18152a4715f', "dataRow": { - "id": row['id'] + "id": data_row_id } }, { 'name': 'checklist', 'uuid': '9d7b2e57-d68f-4388-867a-af2a9b233719', "dataRow": { - "id": row['id'] + "id": data_row_id }, 'answer': [{ 'name': 'option1' @@ -269,14 +269,14 @@ def test_pdf_mal_bbox(client, configured_project_pdf): }, 'uuid': 'ad60897f-ea1a-47de-b923-459339764921', "dataRow": { - "id": row['id'] + "id": data_row_id } }, { #adding this with the intention to ensure we allow page: 0 "uuid": str(uuid.uuid4()), "name": "bbox", "dataRow": { - "id": row['id'] + "id": data_row_id }, "bbox": { "top": round(random.uniform(0, 300), 2), diff --git a/libs/labelbox/tests/data/annotation_import/test_data_types.py b/libs/labelbox/tests/data/annotation_import/test_data_types.py index 75e5fd4c8..a59149ea8 100644 --- a/libs/labelbox/tests/data/annotation_import/test_data_types.py +++ b/libs/labelbox/tests/data/annotation_import/test_data_types.py @@ -145,6 +145,7 @@ def create_data_row_for_project(project, dataset, data_row_ndjson, batch_name): # TODO: Add VideoData. Currently label import job finishes without errors but project.export_labels() returns empty list. +@pytest.mark.export_v1("tests used export v1 method, v2 test -> test_import_data_types_v2 below") @pytest.mark.parametrize( "data_type_class", [ diff --git a/libs/labelbox/tests/data/annotation_import/test_mea_prediction_import.py b/libs/labelbox/tests/data/annotation_import/test_mea_prediction_import.py index c2e277cd9..877b4de0b 100644 --- a/libs/labelbox/tests/data/annotation_import/test_mea_prediction_import.py +++ b/libs/labelbox/tests/data/annotation_import/test_mea_prediction_import.py @@ -1,6 +1,7 @@ import uuid from labelbox import parser import pytest +from labelbox import ModelRun from labelbox.schema.annotation_import import AnnotationImportState, MEAPredictionImport from labelbox.data.serialization import NDJsonConverter @@ -107,17 +108,26 @@ def test_create_from_objects_all_project_labels( annotation_import.status_file_url) -def test_model_run_project_labels(model_run_with_all_project_labels, +def test_model_run_project_labels(model_run_with_all_project_labels: ModelRun, model_run_predictions): model_run = model_run_with_all_project_labels - # TODO: Move to export_v2 - model_run_exported_labels = model_run.export_labels(download=True) + + export_task = model_run.export() + export_task.wait_till_done() + stream = export_task.get_buffered_stream() + + # exports to list of tuples (data_row_id, label) needed to adapt test to export v2 instead of export v1 since data rows ids are not at label level in export v2. + model_run_exported_labels = [( + data_row.json["data_row"]["id"], + data_row.json["experiments"][model_run.model_id]["runs"][model_run.uid]["labels"][0]) + for data_row in stream] + labels_indexed_by_schema_id = {} - for label in model_run_exported_labels: + for data_row_id, label in model_run_exported_labels: # assuming exported array of label 'objects' has only one label per data row... as usually is when there are no label revisions - schema_id = label['Label']['objects'][0]['schemaId'] - labels_indexed_by_schema_id[schema_id] = label + schema_id = label["annotations"]["objects"][0]["feature_schema_id"] + labels_indexed_by_schema_id[schema_id] = {"label": label, "data_row_id": data_row_id} assert (len( labels_indexed_by_schema_id.keys())) == len(model_run_predictions) @@ -125,11 +135,11 @@ def test_model_run_project_labels(model_run_with_all_project_labels, # making sure the labels are in this model run are all labels uploaded to the project # by comparing some 'immutable' attributes for expected_label in model_run_predictions: - schema_id = expected_label['schemaId'] + schema_id = expected_label["schemaId"] actual_label = labels_indexed_by_schema_id[schema_id] - assert actual_label['Label']['objects'][0]['title'] == expected_label[ + assert actual_label["label"]["annotations"]["objects"][0]["name"] == expected_label[ 'name'] - assert actual_label['DataRow ID'] == expected_label['dataRow']['id'] + assert actual_label["data_row_id"] == expected_label["dataRow"]["id"] def test_create_from_label_objects(model_run_with_data_rows, diff --git a/libs/labelbox/tests/data/annotation_import/test_model_run.py b/libs/labelbox/tests/data/annotation_import/test_model_run.py index b267bcf6f..b4bd59d5c 100644 --- a/libs/labelbox/tests/data/annotation_import/test_model_run.py +++ b/libs/labelbox/tests/data/annotation_import/test_model_run.py @@ -119,7 +119,7 @@ def test_model_run_upsert_data_rows_with_existing_labels( assert n_data_rows == len( list(model_run_with_data_rows.model_run_data_rows())) - +@pytest.mark.export_v1("tests used export v1 method, v2 test -> test_import_data_types_v2 below") def test_model_run_export_labels(model_run_with_data_rows): labels = model_run_with_data_rows.export_labels(download=True) assert len(labels) == 3 diff --git a/libs/labelbox/tests/data/annotation_import/test_send_to_annotate_mea.py b/libs/labelbox/tests/data/annotation_import/test_send_to_annotate_mea.py index 6727e84b0..a12077290 100644 --- a/libs/labelbox/tests/data/annotation_import/test_send_to_annotate_mea.py +++ b/libs/labelbox/tests/data/annotation_import/test_send_to_annotate_mea.py @@ -56,9 +56,14 @@ def test_send_to_annotate_from_model(client, configured_project, destination_batches = list(destination_project.batches()) assert len(destination_batches) == 1 - destination_data_rows = list(destination_batches[0].export_data_rows()) + export_task = destination_project.export() + export_task.wait_till_done() + stream = export_task.get_buffered_stream() + + destination_data_rows = [dr.json["data_row"]["id"] for dr in stream] + assert len(destination_data_rows) == len(data_row_ids) - assert all([dr.uid in data_row_ids for dr in destination_data_rows]) + assert all([dr in data_row_ids for dr in destination_data_rows]) # Since data rows were added to a review queue, predictions should be imported into the project as labels destination_project_labels = (list(destination_project.labels())) diff --git a/libs/labelbox/tests/data/export/streamable/test_export_project_streamable.py b/libs/labelbox/tests/data/export/streamable/test_export_project_streamable.py index c0305491a..c29239887 100644 --- a/libs/labelbox/tests/data/export/streamable/test_export_project_streamable.py +++ b/libs/labelbox/tests/data/export/streamable/test_export_project_streamable.py @@ -313,7 +313,7 @@ def test_batch( }, ] * 2) task.wait_till_done() - data_rows = [dr.uid for dr in list(dataset.export_data_rows())] + data_rows = [result["id"] for result in task.result] batch_one = f"batch one {uuid.uuid4()}" # This test creates two batches, only one batch should be exporter diff --git a/libs/labelbox/tests/data/test_data_row_metadata.py b/libs/labelbox/tests/data/test_data_row_metadata.py index 037296103..04804eb2f 100644 --- a/libs/labelbox/tests/data/test_data_row_metadata.py +++ b/libs/labelbox/tests/data/test_data_row_metadata.py @@ -89,9 +89,13 @@ def test_export_empty_metadata(client, configured_project_with_label, wait_for_data_row_processing): project, _, data_row, _ = configured_project_with_label data_row = wait_for_data_row_processing(client, data_row) - labels = project.label_generator() - label = next(labels) - assert label.data.metadata == [] + + export_task = project.export(params={"metadata_fields": True}) + export_task.wait_till_done() + stream = export_task.get_buffered_stream() + data_row = [data_row.json for data_row in stream][0] + + assert data_row["metadata_fields"] == [] def test_bulk_export_datarow_metadata(data_row, mdo: DataRowMetadataOntology):