diff --git a/libs/labelbox/src/labelbox/schema/project.py b/libs/labelbox/src/labelbox/schema/project.py index d141c89cb..b552a945b 100644 --- a/libs/labelbox/src/labelbox/schema/project.py +++ b/libs/labelbox/src/labelbox/schema/project.py @@ -149,6 +149,12 @@ def is_chat_evaluation(self) -> bool: def is_auto_data_generation(self) -> bool: return (self.upload_type == UploadType.Auto) # type: ignore + # we test not only the project ontology is None, but also a default empty ontology that we create when we attach a labeling front end in createLabelingFrontendOptions + def is_empty_ontology(self) -> bool: + ontology = self.ontology() # type: ignore + return ontology is None or (len(ontology.tools()) == 0 and + len(ontology.classifications()) == 0) + def project_model_configs(self): query_str = """query ProjectModelConfigsPyApi($id: ID!) { project(where: {id : $id}) { @@ -772,35 +778,27 @@ def setup_editor(self, ontology) -> None: Args: ontology (Ontology): The ontology to attach to the project """ + warnings.warn("This method is deprecated use connect_ontology instead.") + self.connect_ontology(ontology) - if self.labeling_frontend() is not None and not self.is_chat_evaluation( - ): # Chat evaluation projects are automatically set up via the same api that creates a project - raise ResourceConflict("Editor is already set up.") - - if not self.is_chat_evaluation(): - labeling_frontend = next( - self.client.get_labeling_frontends( - where=Entity.LabelingFrontend.name == "Editor")) - self.labeling_frontend.connect(labeling_frontend) - - LFO = Entity.LabelingFrontendOptions - self.client._create( - LFO, { - LFO.project: - self, - LFO.labeling_frontend: - labeling_frontend, - LFO.customization_options: - json.dumps({ - "tools": [], - "classifications": [] - }) - }) - else: - warnings.warn(""" - Skipping editor setup for a chat evaluation project. - Editor was setup automatically. - """) + def connect_ontology(self, ontology) -> None: + """ + Connects the ontology to the project. If an editor is not setup, it will be connected as well. + + Note: For live chat model evaluation projects, the editor setup is skipped becase it is automatically setup when the project is created. + + Args: + ontology (Ontology): The ontology to attach to the project + """ + if self.labeling_frontend( + ) is None: # Chat evaluation projects are automatically set up via the same api that creates a project + self._connect_default_labeling_front_end(ontology_as_dict={ + "tools": [], + "classifications": [] + }) + + if not self.is_empty_ontology(): + raise ValueError("Ontology already connected to project.") query_str = """mutation ConnectOntologyPyApi($projectId: ID!, $ontologyId: ID!){ project(where: {id: $projectId}) {connectOntology(ontologyId: $ontologyId) {id}}}""" @@ -812,43 +810,55 @@ def setup_editor(self, ontology) -> None: self.update(setup_complete=timestamp) def setup(self, labeling_frontend, labeling_frontend_options) -> None: - """ Finalizes the Project setup. + """ This method will associate default labeling frontend with the project and create an ontology based on labeling_frontend_options. Args: - labeling_frontend (LabelingFrontend): Which UI to use to label the - data. + labeling_frontend (LabelingFrontend): Do not use, this parameter is deprecated. We now associate the default labeling frontend with the project. labeling_frontend_options (dict or str): Labeling frontend options, a.k.a. project ontology. If given a `dict` it will be converted to `str` using `json.dumps`. """ + warnings.warn("This method is deprecated use connect_ontology instead.") + if labeling_frontend is not None: + warnings.warn( + "labeling_frontend parameter will not be used to create a new labeling frontend." + ) + if self.is_chat_evaluation(): warnings.warn(""" - This project is a chat evaluation project. + This project is a live chat evaluation project. Editor was setup automatically. - No need to call this method. """) return - if self.labeling_frontend() is not None: - raise ResourceConflict("Editor is already set up.") + if self.labeling_frontend( + ) is None: # Chat evaluation projects are automatically set up via the same api that creates a project + self._connect_default_labeling_front_end(labeling_frontend_options) - if not isinstance(labeling_frontend_options, str): - labeling_frontend_options = json.dumps(labeling_frontend_options) + timestamp = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ") + self.update(setup_complete=timestamp) + def _connect_default_labeling_front_end(self, ontology_as_dict: dict): + warnings.warn("Connecting default labeling editor for the project.") + labeling_frontend = next( + self.client.get_labeling_frontends( + where=Entity.LabelingFrontend.name == "Editor")) self.labeling_frontend.connect(labeling_frontend) + if not isinstance(ontology_as_dict, str): + labeling_frontend_options_str = json.dumps(ontology_as_dict) + else: + labeling_frontend_options_str = ontology_as_dict + LFO = Entity.LabelingFrontendOptions self.client._create( LFO, { LFO.project: self, LFO.labeling_frontend: labeling_frontend, - LFO.customization_options: labeling_frontend_options + LFO.customization_options: labeling_frontend_options_str }) - timestamp = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ") - self.update(setup_complete=timestamp) - def create_batch( self, name: str, diff --git a/libs/labelbox/tests/data/annotation_import/conftest.py b/libs/labelbox/tests/data/annotation_import/conftest.py index b37976653..df911da10 100644 --- a/libs/labelbox/tests/data/annotation_import/conftest.py +++ b/libs/labelbox/tests/data/annotation_import/conftest.py @@ -608,7 +608,8 @@ def get_data_row_id(indx=0): yield get_data_row_id -#TODO: Switch to setup_editor, setup might get removed in later releases + +#TODO: Switch to connect_ontology, setup might get removed in later releases @pytest.fixture def configured_project(client, initial_dataset, ontology, rand_gen, image_url): dataset = initial_dataset @@ -645,7 +646,8 @@ def configured_project(client, initial_dataset, ontology, rand_gen, image_url): project.delete() -#TODO: Switch to setup_editor, setup might get removed in later releases + +#TODO: Switch to connect_ontology, setup might get removed in later releases @pytest.fixture def project_with_ontology(client, configured_project, ontology, rand_gen): project = client.create_project(name=rand_gen(str), @@ -660,7 +662,8 @@ def project_with_ontology(client, configured_project, ontology, rand_gen): project.delete() -#TODO: Switch to setup_editor, setup might get removed in later releases + +#TODO: Switch to connect_ontology, setup might get removed in later releases @pytest.fixture def configured_project_pdf(client, ontology, rand_gen, pdf_url): project = client.create_project(name=rand_gen(str), diff --git a/libs/labelbox/tests/data/annotation_import/test_send_to_annotate_mea.py b/libs/labelbox/tests/data/annotation_import/test_send_to_annotate_mea.py index a12077290..b96eb31b6 100644 --- a/libs/labelbox/tests/data/annotation_import/test_send_to_annotate_mea.py +++ b/libs/labelbox/tests/data/annotation_import/test_send_to_annotate_mea.py @@ -14,7 +14,7 @@ def test_send_to_annotate_from_model(client, configured_project, destination_project = project model = client.get_model(model_run.model_id) ontology = client.get_ontology(model.ontology_id) - destination_project.setup_editor(ontology) + destination_project.connect_ontology(ontology) queues = destination_project.task_queues() initial_review_task = next( @@ -55,13 +55,13 @@ def test_send_to_annotate_from_model(client, configured_project, # Check that the data row was sent to the new project destination_batches = list(destination_project.batches()) assert len(destination_batches) == 1 - + export_task = destination_project.export() export_task.wait_till_done() stream = export_task.get_buffered_stream() - + destination_data_rows = [dr.json["data_row"]["id"] for dr in stream] - + assert len(destination_data_rows) == len(data_row_ids) assert all([dr in data_row_ids for dr in destination_data_rows]) diff --git a/libs/labelbox/tests/integration/test_chat_evaluation_ontology_project.py b/libs/labelbox/tests/integration/test_chat_evaluation_ontology_project.py index eddc54aaf..aafcddbcc 100644 --- a/libs/labelbox/tests/integration/test_chat_evaluation_ontology_project.py +++ b/libs/labelbox/tests/integration/test_chat_evaluation_ontology_project.py @@ -28,7 +28,7 @@ def test_create_chat_evaluation_ontology_project( project = live_chat_evaluation_project_with_new_dataset assert project.model_setup_complete is None - project.setup_editor(ontology) + project.connect_ontology(ontology) assert project.labeling_frontend().name == "Editor" assert project.ontology().name == ontology.name @@ -61,7 +61,7 @@ def test_create_chat_evaluation_ontology_project_existing_dataset( project = chat_evaluation_project_append_to_dataset assert project - project.setup_editor(ontology) + project.connect_ontology(ontology) assert project.labeling_frontend().name == "Editor" assert project.ontology().name == ontology.name diff --git a/libs/labelbox/tests/integration/test_offline_chat_evaluation_project.py b/libs/labelbox/tests/integration/test_offline_chat_evaluation_project.py index f1e3877ff..2ff5607c3 100644 --- a/libs/labelbox/tests/integration/test_offline_chat_evaluation_project.py +++ b/libs/labelbox/tests/integration/test_offline_chat_evaluation_project.py @@ -1,5 +1,6 @@ import pytest + def test_create_offline_chat_evaluation_project(client, rand_gen, offline_chat_evaluation_project, chat_evaluation_ontology, @@ -9,7 +10,7 @@ def test_create_offline_chat_evaluation_project(client, rand_gen, assert project ontology = chat_evaluation_ontology - project.setup_editor(ontology) + project.connect_ontology(ontology) assert project.labeling_frontend().name == "Editor" assert project.ontology().name == ontology.name diff --git a/libs/labelbox/tests/integration/test_ontology.py b/libs/labelbox/tests/integration/test_ontology.py index aaaaf1b72..61dfac0bc 100644 --- a/libs/labelbox/tests/integration/test_ontology.py +++ b/libs/labelbox/tests/integration/test_ontology.py @@ -95,7 +95,7 @@ def test_cant_delete_an_ontology_with_project(client): name='ontology name', feature_schema_ids=[feature_schema_id], media_type=MediaType.Image) - project.setup_editor(ontology) + project.connect_ontology(ontology) with pytest.raises( Exception, @@ -160,7 +160,7 @@ def test_does_not_include_used_ontologies(client): project = client.create_project(name="test project", queue_mode=QueueMode.Batch, media_type=MediaType.Image) - project.setup_editor(ontology_with_project) + project.connect_ontology(ontology_with_project) unused_ontologies = client.get_unused_ontologies() assert ontology_with_project.uid not in unused_ontologies diff --git a/libs/labelbox/tests/integration/test_project_setup.py b/libs/labelbox/tests/integration/test_project_setup.py index 8d81ba043..8404b0e50 100644 --- a/libs/labelbox/tests/integration/test_project_setup.py +++ b/libs/labelbox/tests/integration/test_project_setup.py @@ -55,7 +55,7 @@ def test_project_editor_setup(client, project, rand_gen): ontology_name = f"test_project_editor_setup_ontology_name-{rand_gen(str)}" ontology = client.create_ontology(ontology_name, simple_ontology()) now = datetime.now().astimezone(timezone.utc) - project.setup_editor(ontology) + project.connect_ontology(ontology) assert now - project.setup_complete <= timedelta(seconds=3) assert now - project.last_activity_time <= timedelta(seconds=3) assert project.labeling_frontend().name == "Editor" @@ -68,10 +68,10 @@ def test_project_editor_setup(client, project, rand_gen): ] == [ontology_name] -def test_project_editor_setup_cant_call_multiple_times(client, project, - rand_gen): +def test_project_connect_ontology_cant_call_multiple_times( + client, project, rand_gen): ontology_name = f"test_project_editor_setup_ontology_name-{rand_gen(str)}" ontology = client.create_ontology(ontology_name, simple_ontology()) - project.setup_editor(ontology) - with pytest.raises(ResourceConflict): - project.setup_editor(ontology) + project.connect_ontology(ontology) + with pytest.raises(ValueError): + project.connect_ontology(ontology) diff --git a/libs/labelbox/tests/integration/test_send_to_annotate.py b/libs/labelbox/tests/integration/test_send_to_annotate.py index 4338985b5..fd358324f 100644 --- a/libs/labelbox/tests/integration/test_send_to_annotate.py +++ b/libs/labelbox/tests/integration/test_send_to_annotate.py @@ -4,12 +4,13 @@ def test_send_to_annotate_include_annotations( - client: Client, configured_batch_project_with_label: Project, project_pack: List[Project], ontology: Ontology): + client: Client, configured_batch_project_with_label: Project, + project_pack: List[Project], ontology: Ontology): [source_project, _, data_row, _] = configured_batch_project_with_label destination_project: Project = project_pack[0] src_ontology = source_project.ontology() - destination_project.setup_editor(ontology) + destination_project.connect_ontology(ontology) # build an ontology mapping using the top level tools src_feature_schema_ids = list( @@ -46,11 +47,11 @@ def test_send_to_annotate_include_annotations( # Check that the data row was sent to the new project destination_batches = list(destination_project.batches()) assert len(destination_batches) == 1 - + export_task = destination_project.export() export_task.wait_till_done() stream = export_task.get_buffered_stream() - + destination_data_rows = [dr.json["data_row"]["id"] for dr in stream] assert len(destination_data_rows) == 1 assert destination_data_rows[0] == data_row.uid diff --git a/libs/labelbox/tests/unit/test_project.py b/libs/labelbox/tests/unit/test_project.py index 5a6754aa3..ff0c75b6c 100644 --- a/libs/labelbox/tests/unit/test_project.py +++ b/libs/labelbox/tests/unit/test_project.py @@ -1,10 +1,32 @@ import pytest -from unittest.mock import MagicMock +from unittest.mock import MagicMock, patch from labelbox.schema.project import Project from labelbox.schema.ontology_kind import EditorTaskType +@pytest.fixture +def project_entity(): + return Project( + MagicMock(), { + "id": "test", + "name": "test", + "createdAt": "2021-06-01T00:00:00.000Z", + "updatedAt": "2021-06-01T00:00:00.000Z", + "autoAuditNumberOfLabels": 1, + "autoAuditPercentage": 100, + "dataRowCount": 1, + "description": "test", + "editorTaskType": "MODEL_CHAT_EVALUATION", + "lastActivityTime": "2021-06-01T00:00:00.000Z", + "allowedMediaType": "IMAGE", + "queueMode": "BATCH", + "setupComplete": "2021-06-01T00:00:00.000Z", + "modelSetupComplete": None, + "uploadType": "Auto", + }) + + @pytest.mark.parametrize( 'api_editor_task_type, expected_editor_task_type', [(None, EditorTaskType.Missing), @@ -14,7 +36,7 @@ EditorTaskType.OfflineModelChatEvaluation), ('NEW_TYPE', EditorTaskType.Missing)]) def test_project_editor_task_type(api_editor_task_type, - expected_editor_task_type): + expected_editor_task_type, project_entity): client = MagicMock() project = Project( client, { @@ -36,3 +58,13 @@ def test_project_editor_task_type(api_editor_task_type, }) assert project.editor_task_type == expected_editor_task_type + + +def test_setup_editor_using_connect_ontology(project_entity): + project = project_entity + ontology = MagicMock() + project.connect_ontology = MagicMock() + with patch("warnings.warn") as warn: + project.setup_editor(ontology) + warn.assert_called_once() + project.connect_ontology.assert_called_once_with(ontology)