diff --git a/libs/labelbox/src/labelbox/schema/bulk_import_request.py b/libs/labelbox/src/labelbox/schema/bulk_import_request.py index 44ac7cd6a..8e11f3261 100644 --- a/libs/labelbox/src/labelbox/schema/bulk_import_request.py +++ b/libs/labelbox/src/labelbox/schema/bulk_import_request.py @@ -787,9 +787,7 @@ def validate_feature_schemas( # A union with custom construction logic to improve error messages class NDClassification( SpecialUnion, - Type[ # type: ignore - Union[NDText, NDRadio, NDChecklist] - ], + Type[Union[NDText, NDRadio, NDChecklist]], # type: ignore ): ... @@ -979,9 +977,7 @@ class NDTool( class NDAnnotation( SpecialUnion, - Type[ # type: ignore - Union[NDTool, NDClassification] - ], + Type[Union[NDTool, NDClassification]], # type: ignore ): @classmethod def build(cls: Any, data) -> "NDBase": diff --git a/libs/labelbox/src/labelbox/schema/labeling_service_dashboard.py b/libs/labelbox/src/labelbox/schema/labeling_service_dashboard.py index 2052897f6..c5e1fa11e 100644 --- a/libs/labelbox/src/labelbox/schema/labeling_service_dashboard.py +++ b/libs/labelbox/src/labelbox/schema/labeling_service_dashboard.py @@ -84,7 +84,8 @@ def __init__(self, **kwargs): super().__init__(**kwargs) if not self.client.enable_experimental: raise RuntimeError( - "Please enable experimental in client to use LabelingService") + "Please enable experimental in client to use LabelingService" + ) @property def service_type(self): @@ -97,20 +98,28 @@ def service_type(self): if self.editor_task_type is None: return sentence_case(self.media_type.value) - if (self.editor_task_type == EditorTaskType.OfflineModelChatEvaluation - and self.media_type == MediaType.Conversational): + if ( + self.editor_task_type == EditorTaskType.OfflineModelChatEvaluation + and self.media_type == MediaType.Conversational + ): return "Offline chat evaluation" - if (self.editor_task_type == EditorTaskType.ModelChatEvaluation and - self.media_type == MediaType.Conversational): + if ( + self.editor_task_type == EditorTaskType.ModelChatEvaluation + and self.media_type == MediaType.Conversational + ): return "Live chat evaluation" - if (self.editor_task_type == EditorTaskType.ResponseCreation and - self.media_type == MediaType.Text): + if ( + self.editor_task_type == EditorTaskType.ResponseCreation + and self.media_type == MediaType.Text + ): return "Response creation" - if (self.media_type == MediaType.LLMPromptCreation or - self.media_type == MediaType.LLMPromptResponseCreation): + if ( + self.media_type == MediaType.LLMPromptCreation + or self.media_type == MediaType.LLMPromptResponseCreation + ): return "Prompt response creation" return sentence_case(self.media_type.value) @@ -154,7 +163,8 @@ def get_all( pageInfo { endCursor } } } - """) + """ + ) else: template = Template( """query SearchProjectsPyApi($$first: Int, $$from: String) { @@ -164,11 +174,13 @@ def get_all( pageInfo { endCursor } } } - """) + """ + ) query_str = template.substitute( labeling_dashboard_selections=GRAPHQL_QUERY_SELECTIONS, search_query=build_search_filter(search_query) - if search_query else None, + if search_query + else None, ) params: Dict[str, Union[str, int]] = {} @@ -186,7 +198,7 @@ def convert_to_labeling_service_dashboard(client, data): experimental=True, ) - @model_validator(mode='before') + @model_validator(mode="before") def convert_boost_data(cls, data): if "boostStatus" in data: data["status"] = LabelingServiceStatus(data.pop("boostStatus")) diff --git a/libs/labelbox/tests/conftest.py b/libs/labelbox/tests/conftest.py index 446db396b..6d13a8d83 100644 --- a/libs/labelbox/tests/conftest.py +++ b/libs/labelbox/tests/conftest.py @@ -7,7 +7,9 @@ import re import uuid import time +from labelbox.schema.project import Project import requests +from labelbox.schema.ontology import Ontology import pytest from types import SimpleNamespace from typing import Type @@ -23,21 +25,11 @@ from labelbox.schema.queue_mode import QueueMode from labelbox import Client -from labelbox import Dataset, DataRow from labelbox import LabelingFrontend -from labelbox import OntologyBuilder, Tool, Option, Classification, MediaType -from labelbox.orm import query -from labelbox.pagination import PaginatedCollection +from labelbox import OntologyBuilder, Tool, Option, Classification from labelbox.schema.annotation_import import LabelImport -from labelbox.schema.catalog import Catalog from labelbox.schema.enums import AnnotationImportState -from labelbox.schema.invite import Invite -from labelbox.schema.quality_mode import QualityMode -from labelbox.schema.queue_mode import QueueMode -from labelbox.schema.user import User from labelbox.exceptions import LabelboxError -from contextlib import suppress -from labelbox import Client IMG_URL = "https://picsum.photos/200/300.jpg" MASKABLE_IMG_URL = "https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg" @@ -638,17 +630,22 @@ def organization(client): def configured_project_with_label( client, rand_gen, - image_url, - project, dataset, data_row, wait_for_label_processing, + teardown_helpers, ): """Project with a connected dataset, having one datarow + Project contains an ontology with 1 bbox tool Additionally includes a create_label method for any needed extra labels One label is already created and yielded when using fixture """ + project = client.create_project( + name=rand_gen(str), + queue_mode=QueueMode.Batch, + media_type=MediaType.Image, + ) project._wait_until_data_rows_are_processed( data_row_ids=[data_row.uid], wait_processing_max_seconds=DATA_ROW_PROCESSING_WAIT_TIMEOUT_SECONDS, @@ -666,8 +663,7 @@ def configured_project_with_label( ) yield [project, dataset, data_row, label] - for label in project.labels(): - label.delete() + teardown_helpers.teardown_project_labels_ontology_feature_schemas(project) def _create_label(project, data_row, ontology, wait_for_label_processing): @@ -736,13 +732,23 @@ def big_dataset(dataset: Dataset): @pytest.fixture def configured_batch_project_with_label( - project, dataset, data_row, wait_for_label_processing + client, + dataset, + data_row, + wait_for_label_processing, + rand_gen, + teardown_helpers, ): """Project with a batch having one datarow Project contains an ontology with 1 bbox tool Additionally includes a create_label method for any needed extra labels One label is already created and yielded when using fixture """ + project = client.create_project( + name=rand_gen(str), + queue_mode=QueueMode.Batch, + media_type=MediaType.Image, + ) data_rows = [dr.uid for dr in list(dataset.data_rows())] project._wait_until_data_rows_are_processed( data_row_ids=data_rows, sleep_interval=3 @@ -757,18 +763,27 @@ def configured_batch_project_with_label( yield [project, dataset, data_row, label] - for label in project.labels(): - label.delete() + teardown_helpers.teardown_project_labels_ontology_feature_schemas(project) @pytest.fixture def configured_batch_project_with_multiple_datarows( - project, dataset, data_rows, wait_for_label_processing + client, + dataset, + data_rows, + wait_for_label_processing, + rand_gen, + teardown_helpers, ): """Project with a batch having multiple datarows Project contains an ontology with 1 bbox tool Additionally includes a create_label method for any needed extra labels """ + project = client.create_project( + name=rand_gen(str), + queue_mode=QueueMode.Batch, + media_type=MediaType.Image, + ) global_keys = [dr.global_key for dr in data_rows] batch_name = f"batch {uuid.uuid4()}" @@ -780,26 +795,7 @@ def configured_batch_project_with_multiple_datarows( yield [project, dataset, data_rows] - for label in project.labels(): - label.delete() - - -@pytest.fixture -def configured_batch_project_for_labeling_service( - project, data_row_and_global_key -): - """Project with a batch having multiple datarows - Project contains an ontology with 1 bbox tool - Additionally includes a create_label method for any needed extra labels - """ - global_keys = [data_row_and_global_key[1]] - - batch_name = f"batch {uuid.uuid4()}" - project.create_batch(batch_name, global_keys=global_keys) - - _setup_ontology(project) - - yield project + teardown_helpers.teardown_project_labels_ontology_feature_schemas(project) # NOTE this is nice heuristics, also there is this logic _wait_until_data_rows_are_processed in Project @@ -1062,7 +1058,7 @@ def project_with_empty_ontology(project): @pytest.fixture def configured_project_with_complex_ontology( - client, initial_dataset, rand_gen, image_url + client, initial_dataset, rand_gen, image_url, teardown_helpers ): project = client.create_project( name=rand_gen(str), @@ -1127,7 +1123,7 @@ def configured_project_with_complex_ontology( project.setup(editor, ontology.asdict()) yield [project, data_row] - project.delete() + teardown_helpers.teardown_project_labels_ontology_feature_schemas(project) @pytest.fixture @@ -1147,12 +1143,13 @@ def valid_model_id(): @pytest.fixture def requested_labeling_service( - rand_gen, - live_chat_evaluation_project_with_new_dataset, - chat_evaluation_ontology, - model_config, + rand_gen, client, chat_evaluation_ontology, model_config, teardown_helpers ): - project = live_chat_evaluation_project_with_new_dataset + project_name = f"test-model-evaluation-project-{rand_gen(str)}" + dataset_name = f"test-model-evaluation-dataset-{rand_gen(str)}" + project = client.create_model_evaluation_project( + name=project_name, dataset_name=dataset_name, data_row_count=1 + ) project.connect_ontology(chat_evaluation_ontology) project.upsert_instructions("tests/integration/media/sample_pdf.pdf") @@ -1164,3 +1161,105 @@ def requested_labeling_service( labeling_service.request() yield project, project.get_labeling_service() + + teardown_helpers.teardown_project_labels_ontology_feature_schemas(project) + + +class TearDownHelpers: + @staticmethod + def teardown_project_labels_ontology_feature_schemas(project: Project): + """ + Call this function to release project, labels, ontology and feature schemas in fixture teardown + + NOTE: exception handling is not required as this is a fixture teardown + """ + ontology = project.ontology() + ontology_id = ontology.uid + client = project.client + classification_feature_schema_ids = [ + feature["featureSchemaId"] + for feature in ontology.normalized["classifications"] + ] + tool_feature_schema_ids = [ + feature["featureSchemaId"] + for feature in ontology.normalized["tools"] + ] + + feature_schema_ids = ( + classification_feature_schema_ids + tool_feature_schema_ids + ) + labels = list(project.labels()) + for label in labels: + label.delete() + + project.delete() + client.delete_unused_ontology(ontology_id) + for feature_schema_id in feature_schema_ids: + try: + project.client.delete_unused_feature_schema(feature_schema_id) + except LabelboxError as e: + print( + f"Failed to delete feature schema {feature_schema_id}: {e}" + ) + + @staticmethod + def teardown_ontology_feature_schemas(ontology: Ontology): + """ + Call this function to release project, labels, ontology and feature schemas in fixture teardown + + NOTE: exception handling is not required as this is a fixture teardown + """ + ontology_id = ontology.uid + client = ontology.client + classification_feature_schema_ids = [ + feature["featureSchemaId"] + for feature in ontology.normalized["classifications"] + ] + [ + option["featureSchemaId"] + for feature in ontology.normalized["classifications"] + for option in feature.get("options", []) + ] + + tool_feature_schema_ids = ( + [ + feature["featureSchemaId"] + for feature in ontology.normalized["tools"] + ] + + [ + classification["featureSchemaId"] + for tool in ontology.normalized["tools"] + for classification in tool.get("classifications", []) + ] + + [ + option["featureSchemaId"] + for tool in ontology.normalized["tools"] + for classification in tool.get("classifications", []) + for option in classification.get("options", []) + ] + ) + + feature_schema_ids = ( + classification_feature_schema_ids + tool_feature_schema_ids + ) + + client.delete_unused_ontology(ontology_id) + for feature_schema_id in feature_schema_ids: + try: + project.client.delete_unused_feature_schema(feature_schema_id) + except LabelboxError as e: + print( + f"Failed to delete feature schema {feature_schema_id}: {e}" + ) + + +class ModuleTearDownHelpers(TearDownHelpers): ... + + +@pytest.fixture +def teardown_helpers(): + return TearDownHelpers() + + +@pytest.fixture(scope="module") +def module_teardown_helpers(): + return TearDownHelpers() diff --git a/libs/labelbox/tests/data/annotation_import/conftest.py b/libs/labelbox/tests/data/annotation_import/conftest.py index 39cede0bb..6543f54bf 100644 --- a/libs/labelbox/tests/data/annotation_import/conftest.py +++ b/libs/labelbox/tests/data/annotation_import/conftest.py @@ -1,4 +1,3 @@ -import itertools import uuid from labelbox.schema.model_run import ModelRun @@ -14,7 +13,6 @@ from typing import Tuple, Type from labelbox.schema.annotation_import import LabelImport, AnnotationImportState from pytest import FixtureRequest -from contextlib import suppress """ The main fixtures of this library are configured_project and configured_project_by_global_key. Both fixtures generate data rows with a parametrize media type. They create the amount of data rows equal to the DATA_ROW_COUNT variable below. The data rows are generated with a factory fixture that returns a function that allows you to pass a global key. The ontologies are generated normalized and based on the MediaType given (i.e. only features supported by MediaType are created). This ontology is later used to obtain the correct annotations with the prediction_id_mapping and corresponding inferences. Each data row will have all possible annotations attached supported for the MediaType. @@ -719,7 +717,6 @@ def _create_project( ) project.connect_ontology(ontology) - data_row_data = [] for _ in range(DATA_ROW_COUNT): @@ -752,6 +749,7 @@ def configured_project( normalized_ontology_by_media_type, export_v2_test_helpers, llm_prompt_response_creation_dataset_with_data_row, + teardown_helpers, ): """Configure project for test. Request.param will contain the media type if not present will use Image MediaType. The project will have 10 data rows.""" @@ -789,13 +787,11 @@ def configured_project( yield project - project.delete() + teardown_helpers.teardown_project_labels_ontology_feature_schemas(project) if dataset: dataset.delete() - client.delete_unused_ontology(ontology.uid) - @pytest.fixture() def configured_project_by_global_key( @@ -805,6 +801,7 @@ def configured_project_by_global_key( request: FixtureRequest, normalized_ontology_by_media_type, export_v2_test_helpers, + teardown_helpers, ): """Does the same thing as configured project but with global keys focus.""" @@ -841,13 +838,11 @@ def configured_project_by_global_key( yield project - project.delete() + teardown_helpers.teardown_project_labels_ontology_feature_schemas(project) if dataset: dataset.delete() - client.delete_unused_ontology(ontology.uid) - @pytest.fixture(scope="module") def module_project( @@ -856,6 +851,7 @@ def module_project( data_row_json_by_media_type, request: FixtureRequest, normalized_ontology_by_media_type, + module_teardown_helpers, ): """Generates a image project that scopes to the test module(file). Used to reduce api calls.""" @@ -889,13 +885,13 @@ def module_project( yield project - project.delete() + module_teardown_helpers.teardown_project_labels_ontology_feature_schemas( + project + ) if dataset: dataset.delete() - client.delete_unused_ontology(ontology.uid) - @pytest.fixture def prediction_id_mapping(request, normalized_ontology_by_media_type): diff --git a/libs/labelbox/tests/data/annotation_import/test_model_run.py b/libs/labelbox/tests/data/annotation_import/test_model_run.py index 9eca28429..1174115c5 100644 --- a/libs/labelbox/tests/data/annotation_import/test_model_run.py +++ b/libs/labelbox/tests/data/annotation_import/test_model_run.py @@ -7,13 +7,23 @@ from labelbox import DataSplit, ModelRun -@pytest.mark.order(1) -def test_model_run(client, configured_project_with_label, data_row, rand_gen): +@pytest.fixture +def current_model(client, configured_project_with_label, rand_gen): project, _, _, label = configured_project_with_label - label_id = label.uid ontology = project.ontology() - data = {"name": rand_gen(str), "ontology_id": ontology.uid} - model = client.create_model(data["name"], data["ontology_id"]) + + model = client.create_model(rand_gen(str), ontology.uid) + yield model + + model.delete() + + +def test_model_run( + client, configured_project_with_label, current_model, data_row, rand_gen +): + _, _, _, label = configured_project_with_label + label_id = label.uid + model = current_model name = rand_gen(str) config = {"batch_size": 100, "reruns": None} diff --git a/libs/labelbox/tests/data/export/conftest.py b/libs/labelbox/tests/data/export/conftest.py index 0836c2b9e..0a62f39c8 100644 --- a/libs/labelbox/tests/data/export/conftest.py +++ b/libs/labelbox/tests/data/export/conftest.py @@ -2,7 +2,6 @@ import time import pytest from labelbox.schema.queue_mode import QueueMode -from labelbox.schema.media_type import MediaType from labelbox.schema.labeling_frontend import LabelingFrontend from labelbox.schema.annotation_import import LabelImport, AnnotationImportState @@ -242,7 +241,7 @@ def polygon_inference(prediction_id_mapping): @pytest.fixture def configured_project_with_ontology( - client, initial_dataset, ontology, rand_gen, image_url + client, initial_dataset, ontology, rand_gen, image_url, teardown_helpers ): dataset = initial_dataset project = client.create_project( @@ -264,11 +263,13 @@ def configured_project_with_ontology( ) project.data_row_ids = data_row_ids yield project - project.delete() + teardown_helpers.teardown_project_labels_ontology_feature_schemas(project) @pytest.fixture -def configured_project_without_data_rows(client, ontology, rand_gen): +def configured_project_without_data_rows( + client, ontology, rand_gen, teardown_helpers +): project = client.create_project( name=rand_gen(str), description=rand_gen(str), @@ -279,7 +280,7 @@ def configured_project_without_data_rows(client, ontology, rand_gen): )[0] project.setup(editor, ontology) yield project - project.delete() + teardown_helpers.teardown_project_labels_ontology_feature_schemas(project) @pytest.fixture diff --git a/libs/labelbox/tests/data/test_data_row_metadata.py b/libs/labelbox/tests/data/test_data_row_metadata.py index 9a3690776..891cab9be 100644 --- a/libs/labelbox/tests/data/test_data_row_metadata.py +++ b/libs/labelbox/tests/data/test_data_row_metadata.py @@ -92,21 +92,6 @@ def make_named_metadata(dr_id) -> DataRowMetadata: return metadata -@pytest.mark.skip(reason="broken export v1 api, to be retired soon") -def test_export_empty_metadata( - client, configured_project_with_label, wait_for_data_row_processing -): - project, _, data_row, _ = configured_project_with_label - data_row = wait_for_data_row_processing(client, data_row) - - export_task = project.export(params={"metadata_fields": True}) - export_task.wait_till_done() - stream = export_task.get_buffered_stream() - data_row = [data_row.json for data_row in stream][0] - - assert data_row["metadata_fields"] == [] - - def test_bulk_export_datarow_metadata(data_row, mdo: DataRowMetadataOntology): metadata = make_metadata(data_row.uid) mdo.bulk_upsert([metadata]) diff --git a/libs/labelbox/tests/integration/conftest.py b/libs/labelbox/tests/integration/conftest.py index d37287fe8..c917a6164 100644 --- a/libs/labelbox/tests/integration/conftest.py +++ b/libs/labelbox/tests/integration/conftest.py @@ -113,7 +113,7 @@ def configured_project( @pytest.fixture def configured_project_with_complex_ontology( - client, initial_dataset, rand_gen, image_url + client, initial_dataset, rand_gen, image_url, teardown_helpers ): project = client.create_project( name=rand_gen(str), @@ -178,7 +178,7 @@ def configured_project_with_complex_ontology( project.setup(editor, ontology.asdict()) yield [project, data_row] - project.delete() + teardown_helpers.teardown_project_labels_ontology_feature_schemas(project) @pytest.fixture diff --git a/libs/labelbox/tests/integration/test_feature_schema.py b/libs/labelbox/tests/integration/test_feature_schema.py index 1dc940f08..46ec8c067 100644 --- a/libs/labelbox/tests/integration/test_feature_schema.py +++ b/libs/labelbox/tests/integration/test_feature_schema.py @@ -58,9 +58,8 @@ def test_throws_an_error_if_feature_schema_to_delete_doesnt_exist(client): client.delete_unused_feature_schema("doesntexist") -def test_updates_a_feature_schema_title(client): - tool = client.upsert_feature_schema(point.asdict()) - feature_schema_id = tool.normalized["featureSchemaId"] +def test_updates_a_feature_schema_title(client, feature_schema): + feature_schema_id = feature_schema.normalized["featureSchemaId"] new_title = "new title" updated_feature_schema = client.update_feature_schema_title( feature_schema_id, new_title @@ -68,20 +67,16 @@ def test_updates_a_feature_schema_title(client): assert updated_feature_schema.normalized["name"] == new_title - client.delete_unused_feature_schema(feature_schema_id) - def test_throws_an_error_when_updating_a_feature_schema_with_empty_title( - client, + client, feature_schema ): - tool = client.upsert_feature_schema(point.asdict()) + tool = feature_schema feature_schema_id = tool.normalized["featureSchemaId"] with pytest.raises(Exception): client.update_feature_schema_title(feature_schema_id, "") - client.delete_unused_feature_schema(feature_schema_id) - def test_throws_an_error_when_updating_not_existing_feature_schema(client): with pytest.raises(Exception): @@ -107,8 +102,8 @@ def test_updates_a_feature_schema(client, feature_schema): assert updated_feature_schema.normalized["name"] == "new name" -def test_does_not_include_used_feature_schema(client): - tool = client.upsert_feature_schema(point.asdict()) +def test_does_not_include_used_feature_schema(client, feature_schema): + tool = feature_schema feature_schema_id = tool.normalized["featureSchemaId"] ontology = client.create_ontology_from_feature_schemas( name="ontology name", @@ -120,4 +115,3 @@ def test_does_not_include_used_feature_schema(client): assert feature_schema_id not in unused_feature_schemas client.delete_unused_ontology(ontology.uid) - client.delete_unused_feature_schema(feature_schema_id) diff --git a/libs/labelbox/tests/unit/test_labeling_service_dashboard.py b/libs/labelbox/tests/unit/test_labeling_service_dashboard.py index 8ecdef2f1..061efbadf 100644 --- a/libs/labelbox/tests/unit/test_labeling_service_dashboard.py +++ b/libs/labelbox/tests/unit/test_labeling_service_dashboard.py @@ -5,23 +5,23 @@ def test_no_tasks_remaining_count(): labeling_service_dashboard_data = { - 'id': 'cm0eeo4c301lg07061phfhva0', - 'name': 'TestStatus', - 'boostRequestedAt': '2024-08-28T22:08:07.446Z', - 'boostUpdatedAt': '2024-08-28T22:08:07.446Z', - 'boostRequestedBy': None, - 'boostStatus': 'SET_UP', - 'dataRowsCount': 0, - 'dataRowsDoneCount': 0, - 'dataRowsInReviewCount': 0, - 'dataRowsInReworkCount': 0, - 'tasksTotalCount': 0, - 'tasksCompletedCount': 0, - 'tasksRemainingCount': 0, - 'mediaType': 'image', - 'editorTaskType': None, - 'tags': [], - 'client': MagicMock() + "id": "cm0eeo4c301lg07061phfhva0", + "name": "TestStatus", + "boostRequestedAt": "2024-08-28T22:08:07.446Z", + "boostUpdatedAt": "2024-08-28T22:08:07.446Z", + "boostRequestedBy": None, + "boostStatus": "SET_UP", + "dataRowsCount": 0, + "dataRowsDoneCount": 0, + "dataRowsInReviewCount": 0, + "dataRowsInReworkCount": 0, + "tasksTotalCount": 0, + "tasksCompletedCount": 0, + "tasksRemainingCount": 0, + "mediaType": "image", + "editorTaskType": None, + "tags": [], + "client": MagicMock(), } lsd = LabelingServiceDashboard(**labeling_service_dashboard_data) assert lsd.tasks_remaining_count is None @@ -29,23 +29,23 @@ def test_no_tasks_remaining_count(): def test_tasks_remaining_count_exists(): labeling_service_dashboard_data = { - 'id': 'cm0eeo4c301lg07061phfhva0', - 'name': 'TestStatus', - 'boostRequestedAt': '2024-08-28T22:08:07.446Z', - 'boostUpdatedAt': '2024-08-28T22:08:07.446Z', - 'boostRequestedBy': None, - 'boostStatus': 'SET_UP', - 'dataRowsCount': 0, - 'dataRowsDoneCount': 0, - 'dataRowsInReviewCount': 0, - 'dataRowsInReworkCount': 0, - 'tasksTotalCount': 0, - 'tasksCompletedCount': 0, - 'tasksRemainingCount': 1, - 'mediaType': 'image', - 'editorTaskType': None, - 'tags': [], - 'client': MagicMock() + "id": "cm0eeo4c301lg07061phfhva0", + "name": "TestStatus", + "boostRequestedAt": "2024-08-28T22:08:07.446Z", + "boostUpdatedAt": "2024-08-28T22:08:07.446Z", + "boostRequestedBy": None, + "boostStatus": "SET_UP", + "dataRowsCount": 0, + "dataRowsDoneCount": 0, + "dataRowsInReviewCount": 0, + "dataRowsInReworkCount": 0, + "tasksTotalCount": 0, + "tasksCompletedCount": 0, + "tasksRemainingCount": 1, + "mediaType": "image", + "editorTaskType": None, + "tags": [], + "client": MagicMock(), } lsd = LabelingServiceDashboard(**labeling_service_dashboard_data) assert lsd.tasks_remaining_count == 1 @@ -53,23 +53,23 @@ def test_tasks_remaining_count_exists(): def test_tasks_total_no_tasks_remaining_count(): labeling_service_dashboard_data = { - 'id': 'cm0eeo4c301lg07061phfhva0', - 'name': 'TestStatus', - 'boostRequestedAt': '2024-08-28T22:08:07.446Z', - 'boostUpdatedAt': '2024-08-28T22:08:07.446Z', - 'boostRequestedBy': None, - 'boostStatus': 'SET_UP', - 'dataRowsCount': 0, - 'dataRowsDoneCount': 0, - 'dataRowsInReviewCount': 1, - 'dataRowsInReworkCount': 0, - 'tasksTotalCount': 1, - 'tasksCompletedCount': 0, - 'tasksRemainingCount': 0, - 'mediaType': 'image', - 'editorTaskType': None, - 'tags': [], - 'client': MagicMock() + "id": "cm0eeo4c301lg07061phfhva0", + "name": "TestStatus", + "boostRequestedAt": "2024-08-28T22:08:07.446Z", + "boostUpdatedAt": "2024-08-28T22:08:07.446Z", + "boostRequestedBy": None, + "boostStatus": "SET_UP", + "dataRowsCount": 0, + "dataRowsDoneCount": 0, + "dataRowsInReviewCount": 1, + "dataRowsInReworkCount": 0, + "tasksTotalCount": 1, + "tasksCompletedCount": 0, + "tasksRemainingCount": 0, + "mediaType": "image", + "editorTaskType": None, + "tags": [], + "client": MagicMock(), } lsd = LabelingServiceDashboard(**labeling_service_dashboard_data) assert lsd.tasks_remaining_count == 0