From f26397c6409b446a6a73879ac46d22d48269979a Mon Sep 17 00:00:00 2001 From: Val Brodsky Date: Tue, 24 Sep 2024 14:17:37 -0700 Subject: [PATCH 1/7] Add project validation input --- libs/labelbox/src/labelbox/client.py | 103 +----------------- .../src/labelbox/project_validation.py | 82 ++++++++++++++ .../tests/integration/test_project.py | 27 +++-- 3 files changed, 104 insertions(+), 108 deletions(-) create mode 100644 libs/labelbox/src/labelbox/project_validation.py diff --git a/libs/labelbox/src/labelbox/client.py b/libs/labelbox/src/labelbox/client.py index cccd23be1..f11e32d0a 100644 --- a/libs/labelbox/src/labelbox/client.py +++ b/libs/labelbox/src/labelbox/client.py @@ -23,6 +23,7 @@ from labelbox.orm.db_object import DbObject from labelbox.orm.model import Entity, Field from labelbox.pagination import PaginatedCollection +from labelbox.project_validation import _CoreProjectInput from labelbox.schema import role from labelbox.schema.catalog import Catalog from labelbox.schema.data_row import DataRow @@ -632,7 +633,8 @@ def create_project(self, **kwargs) -> Project: kwargs.pop("append_to_existing_dataset", None) kwargs.pop("data_row_count", None) kwargs.pop("editor_task_type", None) - return self._create_project(**kwargs) + input = _CoreProjectInput(**kwargs) + return self._create_project(input) @overload def create_model_evaluation_project( @@ -820,103 +822,10 @@ def create_response_creation_project(self, **kwargs) -> Project: return self._create_project(**kwargs) - def _create_project(self, **kwargs) -> Project: - auto_audit_percentage = kwargs.get("auto_audit_percentage") - auto_audit_number_of_labels = kwargs.get("auto_audit_number_of_labels") - if ( - auto_audit_percentage is not None - or auto_audit_number_of_labels is not None - ): - raise ValueError( - "quality_modes must be set instead of auto_audit_percentage or auto_audit_number_of_labels." - ) - - name = kwargs.get("name") - if name is None or not name.strip(): - raise ValueError("project name must be a valid string.") - - queue_mode = kwargs.get("queue_mode") - if queue_mode is QueueMode.Dataset: - raise ValueError( - "Dataset queue mode is deprecated. Please prefer Batch queue mode." - ) - elif queue_mode is QueueMode.Batch: - logger.warning( - "Passing a queue mode of batch is redundant and will soon no longer be supported." - ) - - media_type = kwargs.get("media_type") - if media_type and MediaType.is_supported(media_type): - media_type_value = media_type.value - elif media_type: - raise TypeError( - f"{media_type} is not a valid media type. Use" - f" any of {MediaType.get_supported_members()}" - " from MediaType. Example: MediaType.Image." - ) - else: - logger.warning( - "Creating a project without specifying media_type" - " through this method will soon no longer be supported." - ) - media_type_value = None - - quality_modes = kwargs.get("quality_modes") - quality_mode = kwargs.get("quality_mode") - if quality_mode: - logger.warning( - "Passing quality_mode is deprecated and will soon no longer be supported. Use quality_modes instead." - ) - - if quality_modes and quality_mode: - raise ValueError( - "Cannot use both quality_modes and quality_mode at the same time. Use one or the other." - ) - - if not quality_modes and not quality_mode: - logger.info("Defaulting quality modes to Benchmark and Consensus.") - - data = kwargs - data.pop("quality_modes", None) - data.pop("quality_mode", None) - - # check if quality_modes is a set, if not, convert to set - quality_modes_set = quality_modes - if quality_modes and not isinstance(quality_modes, set): - quality_modes_set = set(quality_modes) - if quality_mode: - quality_modes_set = {quality_mode} - - if ( - quality_modes_set is None - or len(quality_modes_set) == 0 - or quality_modes_set - == {QualityMode.Benchmark, QualityMode.Consensus} - ): - data["auto_audit_number_of_labels"] = ( - CONSENSUS_AUTO_AUDIT_NUMBER_OF_LABELS - ) - data["auto_audit_percentage"] = CONSENSUS_AUTO_AUDIT_PERCENTAGE - data["is_benchmark_enabled"] = True - data["is_consensus_enabled"] = True - elif quality_modes_set == {QualityMode.Benchmark}: - data["auto_audit_number_of_labels"] = ( - BENCHMARK_AUTO_AUDIT_NUMBER_OF_LABELS - ) - data["auto_audit_percentage"] = BENCHMARK_AUTO_AUDIT_PERCENTAGE - data["is_benchmark_enabled"] = True - elif quality_modes_set == {QualityMode.Consensus}: - data["auto_audit_number_of_labels"] = ( - CONSENSUS_AUTO_AUDIT_NUMBER_OF_LABELS - ) - data["auto_audit_percentage"] = CONSENSUS_AUTO_AUDIT_PERCENTAGE - data["is_consensus_enabled"] = True - else: - raise ValueError( - f"{quality_modes_set} is not a valid quality modes set. Allowed values are [Benchmark, Consensus]" - ) + def _create_project(self, input: _CoreProjectInput) -> Project: + media_type_value = input.media_type.value - params = {**data} + params = input.model_dump(exclude_none=True) if media_type_value: params["media_type"] = media_type_value diff --git a/libs/labelbox/src/labelbox/project_validation.py b/libs/labelbox/src/labelbox/project_validation.py new file mode 100644 index 000000000..dda0c4a8d --- /dev/null +++ b/libs/labelbox/src/labelbox/project_validation.py @@ -0,0 +1,82 @@ +from typing import Optional, Set + +from pydantic import BaseModel, ConfigDict, Field, model_validator + +from labelbox.schema.media_type import MediaType +from labelbox.schema.quality_mode import ( + BENCHMARK_AUTO_AUDIT_NUMBER_OF_LABELS, + BENCHMARK_AUTO_AUDIT_PERCENTAGE, + CONSENSUS_AUTO_AUDIT_NUMBER_OF_LABELS, + CONSENSUS_AUTO_AUDIT_PERCENTAGE, + QualityMode, +) +from labelbox.schema.queue_mode import QueueMode + + +class _CoreProjectInput(BaseModel): + name: str + description: Optional[str] = None + media_type: MediaType + queue_mode: QueueMode = Field(default=QueueMode.Batch, frozen=True) + auto_audit_percentage: Optional[float] = None + auto_audit_number_of_labels: Optional[int] = None + quality_modes: Optional[Set[QualityMode]] = Field( + default={QualityMode.Benchmark, QualityMode.Consensus}, exclude=True + ) + is_benchmark_enabled: Optional[bool] = None + is_consensus_enabled: Optional[bool] = None + dataset_name_or_id: Optional[str] = None + append_to_existing_dataset: Optional[bool] = None + + model_config = ConfigDict(extra="forbid") + + @model_validator(mode="after") + def validate_fields(self): + if ( + self.auto_audit_percentage is not None + and self.auto_audit_number_of_labels is not None + ): + raise ValueError( + "quality_modes must be set instead of auto_audit_percentage or auto_audit_number_of_labels." + ) + + if not self.name.strip(): + raise ValueError("project name must be a valid string.") + + if self.quality_modes == { + QualityMode.Benchmark, + QualityMode.Consensus, + }: + self._set_quality_mode_attributes( + CONSENSUS_AUTO_AUDIT_NUMBER_OF_LABELS, + CONSENSUS_AUTO_AUDIT_PERCENTAGE, + is_benchmark_enabled=True, + is_consensus_enabled=True, + ) + elif self.quality_modes == {QualityMode.Benchmark}: + self._set_quality_mode_attributes( + BENCHMARK_AUTO_AUDIT_NUMBER_OF_LABELS, + BENCHMARK_AUTO_AUDIT_PERCENTAGE, + is_benchmark_enabled=True, + ) + elif self.quality_modes == {QualityMode.Consensus}: + self._set_quality_mode_attributes( + data, + CONSENSUS_AUTO_AUDIT_NUMBER_OF_LABELS, + CONSENSUS_AUTO_AUDIT_PERCENTAGE, + is_consensus_enabled=True, + ) + + return self + + def _set_quality_mode_attributes( + self, + number_of_labels, + percentage, + is_benchmark_enabled=False, + is_consensus_enabled=False, + ): + self.auto_audit_number_of_labels = number_of_labels + self.auto_audit_percentage = percentage + self.is_benchmark_enabled = is_benchmark_enabled + self.is_consensus_enabled = is_consensus_enabled diff --git a/libs/labelbox/tests/integration/test_project.py b/libs/labelbox/tests/integration/test_project.py index 6f0f74e35..12e9d3bda 100644 --- a/libs/labelbox/tests/integration/test_project.py +++ b/libs/labelbox/tests/integration/test_project.py @@ -7,6 +7,7 @@ from lbox.exceptions import InvalidQueryError from labelbox import Dataset, LabelingFrontend, Project +from labelbox.schema import media_type from labelbox.schema.media_type import MediaType from labelbox.schema.quality_mode import QualityMode from labelbox.schema.queue_mode import QueueMode @@ -51,7 +52,7 @@ def data_for_project_test(client, rand_gen): def _create_project(name: str = None): if name is None: name = rand_gen(str) - project = client.create_project(name=name) + project = client.create_project(name=name, media_type=MediaType.Image) projects.append(project) return project @@ -140,10 +141,6 @@ def test_extend_reservations(project): project.extend_reservations("InvalidQueueType") -@pytest.mark.skipif( - condition=os.environ["LABELBOX_TEST_ENVIRON"] == "onprem", - reason="new mutation does not work for onprem", -) def test_attach_instructions(client, project): with pytest.raises(ValueError) as execinfo: project.upsert_instructions("tests/integration/media/sample_pdf.pdf") @@ -248,9 +245,11 @@ def test_media_type(client, project: Project, rand_gen): assert isinstance(project.media_type, MediaType) # Update test - project = client.create_project(name=rand_gen(str)) - project.update(media_type=MediaType.Image) - assert project.media_type == MediaType.Image + project = client.create_project( + name=rand_gen(str), media_type=MediaType.Image + ) + project.update(media_type=MediaType.Text) + assert project.media_type == MediaType.Text project.delete() for media_type in MediaType.get_supported_members(): @@ -271,13 +270,16 @@ def test_media_type(client, project: Project, rand_gen): def test_queue_mode(client, rand_gen): project = client.create_project( - name=rand_gen(str) + name=rand_gen(str), + media_type=MediaType.Image, ) # defaults to benchmark and consensus assert project.auto_audit_number_of_labels == 3 assert project.auto_audit_percentage == 0 project = client.create_project( - name=rand_gen(str), quality_modes=[QualityMode.Benchmark] + name=rand_gen(str), + quality_modes=[QualityMode.Benchmark], + media_type=MediaType.Image, ) assert project.auto_audit_number_of_labels == 1 assert project.auto_audit_percentage == 1 @@ -285,13 +287,16 @@ def test_queue_mode(client, rand_gen): project = client.create_project( name=rand_gen(str), quality_modes=[QualityMode.Benchmark, QualityMode.Consensus], + media_type=MediaType.Image, ) assert project.auto_audit_number_of_labels == 3 assert project.auto_audit_percentage == 0 def test_label_count(client, configured_batch_project_with_label): - project = client.create_project(name="test label count") + project = client.create_project( + name="test label count", media_type=MediaType.Image + ) assert project.get_label_count() == 0 project.delete() From 1f4a377fb89c511a96d40c0066f133ab97b69258 Mon Sep 17 00:00:00 2001 From: Val Brodsky Date: Tue, 24 Sep 2024 14:36:27 -0700 Subject: [PATCH 2/7] Use _CoreProjectInput in all create_project methods, create_model_evaluation_project, create_offline_model_evaluation_project, create_prompt_response_generation_project, create_response_creation_project --- libs/labelbox/src/labelbox/client.py | 205 +++++++++++------- .../src/labelbox/project_validation.py | 6 + .../src/labelbox/schema/ontology_kind.py | 2 +- 3 files changed, 136 insertions(+), 77 deletions(-) diff --git a/libs/labelbox/src/labelbox/client.py b/libs/labelbox/src/labelbox/client.py index f11e32d0a..d79cb1568 100644 --- a/libs/labelbox/src/labelbox/client.py +++ b/libs/labelbox/src/labelbox/client.py @@ -8,7 +8,7 @@ import urllib.parse from collections import defaultdict from types import MappingProxyType -from typing import Any, Callable, Dict, List, Optional, Union, overload +from typing import Any, Callable, Dict, List, Optional, Set, Union, overload import lbox.exceptions import requests @@ -597,7 +597,20 @@ def create_dataset( raise e return dataset - def create_project(self, **kwargs) -> Project: + def create_project( + self, + name: str, + media_type: MediaType, + description: Optional[str] = None, + auto_audit_percentage: Optional[float] = None, + auto_audit_number_of_labels: Optional[int] = None, + quality_modes: Optional[Set[QualityMode]] = { + QualityMode.Benchmark, + QualityMode.Consensus, + }, + is_benchmark_enabled: Optional[bool] = None, + is_consensus_enabled: Optional[bool] = None, + ) -> Project: """Creates a Project object on the server. Attribute values are passed as keyword arguments. @@ -628,40 +641,33 @@ def create_project(self, **kwargs) -> Project: dataset_name_or_id, append_to_existing_dataset, data_row_count, editor_task_type They are not used for general projects and not supported in this method """ - # The following arguments are not supported for general projects, only for chat model evaluation projects - kwargs.pop("dataset_name_or_id", None) - kwargs.pop("append_to_existing_dataset", None) - kwargs.pop("data_row_count", None) - kwargs.pop("editor_task_type", None) - input = _CoreProjectInput(**kwargs) - return self._create_project(input) - - @overload - def create_model_evaluation_project( - self, - dataset_name: str, - dataset_id: str = None, - data_row_count: int = 100, - **kwargs, - ) -> Project: - pass - - @overload - def create_model_evaluation_project( - self, - dataset_id: str, - dataset_name: str = None, - data_row_count: int = 100, - **kwargs, - ) -> Project: - pass + input = { + "name": name, + "description": description, + "media_type": media_type, + "auto_audit_percentage": auto_audit_percentage, + "auto_audit_number_of_labels": auto_audit_number_of_labels, + "quality_modes": quality_modes, + "is_benchmark_enabled": is_benchmark_enabled, + "is_consensus_enabled": is_consensus_enabled, + } + return self._create_project(_CoreProjectInput(**input)) def create_model_evaluation_project( self, + name: str, + description: Optional[str] = None, + auto_audit_percentage: Optional[float] = None, + auto_audit_number_of_labels: Optional[int] = None, + quality_modes: Optional[Set[QualityMode]] = { + QualityMode.Benchmark, + QualityMode.Consensus, + }, + is_benchmark_enabled: Optional[bool] = None, + is_consensus_enabled: Optional[bool] = None, dataset_id: Optional[str] = None, dataset_name: Optional[str] = None, data_row_count: int = 100, - **kwargs, ) -> Project: """ Use this method exclusively to create a chat model evaluation project. @@ -692,8 +698,6 @@ def create_model_evaluation_project( raise ValueError( "dataset_name or data_set_id must be present and not be an empty string." ) - if data_row_count <= 0: - raise ValueError("data_row_count must be a positive integer.") if dataset_id: append_to_existing_dataset = True @@ -702,15 +706,38 @@ def create_model_evaluation_project( append_to_existing_dataset = False dataset_name_or_id = dataset_name - kwargs["media_type"] = MediaType.Conversational - kwargs["dataset_name_or_id"] = dataset_name_or_id - kwargs["append_to_existing_dataset"] = append_to_existing_dataset - kwargs["data_row_count"] = data_row_count - kwargs["editor_task_type"] = EditorTaskType.ModelChatEvaluation.value + media_type = MediaType.Conversational + editor_task_type = EditorTaskType.ModelChatEvaluation - return self._create_project(**kwargs) + input = { + "name": name, + "description": description, + "media_type": media_type, + "auto_audit_percentage": auto_audit_percentage, + "auto_audit_number_of_labels": auto_audit_number_of_labels, + "quality_modes": quality_modes, + "is_benchmark_enabled": is_benchmark_enabled, + "is_consensus_enabled": is_consensus_enabled, + "dataset_name_or_id": dataset_name_or_id, + "append_to_existing_dataset": append_to_existing_dataset, + "data_row_count": data_row_count, + "editor_task_type": editor_task_type, + } + return self._create_project(_CoreProjectInput(**input)) - def create_offline_model_evaluation_project(self, **kwargs) -> Project: + def create_offline_model_evaluation_project( + self, + name: str, + description: Optional[str] = None, + auto_audit_percentage: Optional[float] = None, + auto_audit_number_of_labels: Optional[int] = None, + quality_modes: Optional[Set[QualityMode]] = { + QualityMode.Benchmark, + QualityMode.Consensus, + }, + is_benchmark_enabled: Optional[bool] = None, + is_consensus_enabled: Optional[bool] = None, + ) -> Project: """ Creates a project for offline model evaluation. Args: @@ -718,26 +745,35 @@ def create_offline_model_evaluation_project(self, **kwargs) -> Project: Returns: Project: The created project """ - kwargs["media_type"] = ( - MediaType.Conversational - ) # Only Conversational is supported - kwargs["editor_task_type"] = ( - EditorTaskType.OfflineModelChatEvaluation.value - ) # Special editor task type for offline model evaluation - - # The following arguments are not supported for offline model evaluation - kwargs.pop("dataset_name_or_id", None) - kwargs.pop("append_to_existing_dataset", None) - kwargs.pop("data_row_count", None) - - return self._create_project(**kwargs) + input = { + "name": name, + "description": description, + "media_type": MediaType.Conversational, + "auto_audit_percentage": auto_audit_percentage, + "auto_audit_number_of_labels": auto_audit_number_of_labels, + "quality_modes": quality_modes, + "is_benchmark_enabled": is_benchmark_enabled, + "is_consensus_enabled": is_consensus_enabled, + "editor_task_type": EditorTaskType.OfflineModelChatEvaluation, + } + return self._create_project(_CoreProjectInput(**input)) def create_prompt_response_generation_project( self, + name: str, + media_type: MediaType, + description: Optional[str] = None, + auto_audit_percentage: Optional[float] = None, + auto_audit_number_of_labels: Optional[int] = None, + quality_modes: Optional[Set[QualityMode]] = { + QualityMode.Benchmark, + QualityMode.Consensus, + }, + is_benchmark_enabled: Optional[bool] = None, + is_consensus_enabled: Optional[bool] = None, dataset_id: Optional[str] = None, dataset_name: Optional[str] = None, data_row_count: int = 100, - **kwargs, ) -> Project: """ Use this method exclusively to create a prompt and response generation project. @@ -776,9 +812,6 @@ def create_prompt_response_generation_project( "Only provide a dataset_name or dataset_id, not both." ) - if data_row_count <= 0: - raise ValueError("data_row_count must be a positive integer.") - if dataset_id: append_to_existing_dataset = True dataset_name_or_id = dataset_id @@ -786,7 +819,7 @@ def create_prompt_response_generation_project( append_to_existing_dataset = False dataset_name_or_id = dataset_name - if "media_type" in kwargs and kwargs.get("media_type") not in [ + if media_type not in [ MediaType.LLMPromptCreation, MediaType.LLMPromptResponseCreation, ]: @@ -794,15 +827,34 @@ def create_prompt_response_generation_project( "media_type must be either LLMPromptCreation or LLMPromptResponseCreation" ) - kwargs["dataset_name_or_id"] = dataset_name_or_id - kwargs["append_to_existing_dataset"] = append_to_existing_dataset - kwargs["data_row_count"] = data_row_count - - kwargs.pop("editor_task_type", None) - - return self._create_project(**kwargs) + input = { + "name": name, + "description": description, + "media_type": media_type, + "auto_audit_percentage": auto_audit_percentage, + "auto_audit_number_of_labels": auto_audit_number_of_labels, + "quality_modes": quality_modes, + "is_benchmark_enabled": is_benchmark_enabled, + "is_consensus_enabled": is_consensus_enabled, + "dataset_name_or_id": dataset_name_or_id, + "append_to_existing_dataset": append_to_existing_dataset, + "data_row_count": data_row_count, + } + return self._create_project(_CoreProjectInput(**input)) - def create_response_creation_project(self, **kwargs) -> Project: + def create_response_creation_project( + self, + name: str, + description: Optional[str] = None, + auto_audit_percentage: Optional[float] = None, + auto_audit_number_of_labels: Optional[int] = None, + quality_modes: Optional[Set[QualityMode]] = { + QualityMode.Benchmark, + QualityMode.Consensus, + }, + is_benchmark_enabled: Optional[bool] = None, + is_consensus_enabled: Optional[bool] = None, + ) -> Project: """ Creates a project for response creation. Args: @@ -810,17 +862,18 @@ def create_response_creation_project(self, **kwargs) -> Project: Returns: Project: The created project """ - kwargs["media_type"] = MediaType.Text # Only Text is supported - kwargs["editor_task_type"] = ( - EditorTaskType.ResponseCreation.value - ) # Special editor task type for response creation projects - - # The following arguments are not supported for response creation projects - kwargs.pop("dataset_name_or_id", None) - kwargs.pop("append_to_existing_dataset", None) - kwargs.pop("data_row_count", None) - - return self._create_project(**kwargs) + input = { + "name": name, + "description": description, + "media_type": MediaType.Text, # Only Text is supported + "auto_audit_percentage": auto_audit_percentage, + "auto_audit_number_of_labels": auto_audit_number_of_labels, + "quality_modes": quality_modes, + "is_benchmark_enabled": is_benchmark_enabled, + "is_consensus_enabled": is_consensus_enabled, + "editor_task_type": EditorTaskType.ResponseCreation.value, # Special editor task type for response creation projects + } + return self._create_project(_CoreProjectInput(**input)) def _create_project(self, input: _CoreProjectInput) -> Project: media_type_value = input.media_type.value diff --git a/libs/labelbox/src/labelbox/project_validation.py b/libs/labelbox/src/labelbox/project_validation.py index dda0c4a8d..fde2ff217 100644 --- a/libs/labelbox/src/labelbox/project_validation.py +++ b/libs/labelbox/src/labelbox/project_validation.py @@ -1,8 +1,10 @@ from typing import Optional, Set from pydantic import BaseModel, ConfigDict, Field, model_validator +from typing_extensions import Annotated from labelbox.schema.media_type import MediaType +from labelbox.schema.ontology_kind import EditorTaskType from labelbox.schema.quality_mode import ( BENCHMARK_AUTO_AUDIT_NUMBER_OF_LABELS, BENCHMARK_AUTO_AUDIT_PERCENTAGE, @@ -12,6 +14,8 @@ ) from labelbox.schema.queue_mode import QueueMode +PositiveInt = Annotated[int, Field(gt=0)] + class _CoreProjectInput(BaseModel): name: str @@ -27,6 +31,8 @@ class _CoreProjectInput(BaseModel): is_consensus_enabled: Optional[bool] = None dataset_name_or_id: Optional[str] = None append_to_existing_dataset: Optional[bool] = None + data_row_count: Optional[PositiveInt] = None + editor_task_type: Optional[EditorTaskType] = None model_config = ConfigDict(extra="forbid") diff --git a/libs/labelbox/src/labelbox/schema/ontology_kind.py b/libs/labelbox/src/labelbox/schema/ontology_kind.py index 3171b811e..79ef7d7a3 100644 --- a/libs/labelbox/src/labelbox/schema/ontology_kind.py +++ b/libs/labelbox/src/labelbox/schema/ontology_kind.py @@ -53,7 +53,7 @@ def evaluate_ontology_kind_with_media_type( return media_type -class EditorTaskType(Enum): +class EditorTaskType(str, Enum): ModelChatEvaluation = "MODEL_CHAT_EVALUATION" ResponseCreation = "RESPONSE_CREATION" OfflineModelChatEvaluation = "OFFLINE_MODEL_CHAT_EVALUATION" From 8a36b7b7f3bf2deacd2c78d864beb1d066e67bcb Mon Sep 17 00:00:00 2001 From: Val Brodsky Date: Tue, 24 Sep 2024 16:16:55 -0700 Subject: [PATCH 3/7] Added media_type to test projects --- libs/labelbox/tests/data/export/conftest.py | 11 ++++++++--- .../tests/integration/test_client_errors.py | 13 +++++++++---- libs/labelbox/tests/integration/test_filtering.py | 8 +++++--- .../tests/integration/test_legacy_project.py | 15 +++++++++++++-- 4 files changed, 35 insertions(+), 12 deletions(-) diff --git a/libs/labelbox/tests/data/export/conftest.py b/libs/labelbox/tests/data/export/conftest.py index 0a62f39c8..a6abe5742 100644 --- a/libs/labelbox/tests/data/export/conftest.py +++ b/libs/labelbox/tests/data/export/conftest.py @@ -1,9 +1,12 @@ -import uuid import time +import uuid + import pytest -from labelbox.schema.queue_mode import QueueMode + +from labelbox.schema.annotation_import import AnnotationImportState, LabelImport from labelbox.schema.labeling_frontend import LabelingFrontend -from labelbox.schema.annotation_import import LabelImport, AnnotationImportState +from labelbox.schema.media_type import MediaType +from labelbox.schema.queue_mode import QueueMode @pytest.fixture @@ -247,6 +250,7 @@ def configured_project_with_ontology( project = client.create_project( name=rand_gen(str), queue_mode=QueueMode.Batch, + media_type=MediaType.Image, ) editor = list( client.get_labeling_frontends(where=LabelingFrontend.name == "editor") @@ -274,6 +278,7 @@ def configured_project_without_data_rows( name=rand_gen(str), description=rand_gen(str), queue_mode=QueueMode.Batch, + media_type=MediaType.Image, ) editor = list( client.get_labeling_frontends(where=LabelingFrontend.name == "editor") diff --git a/libs/labelbox/tests/integration/test_client_errors.py b/libs/labelbox/tests/integration/test_client_errors.py index c8721dfc4..775a39d50 100644 --- a/libs/labelbox/tests/integration/test_client_errors.py +++ b/libs/labelbox/tests/integration/test_client_errors.py @@ -8,6 +8,7 @@ import labelbox.client from labelbox import Project, User +from labelbox.schema.media_type import MediaType def test_missing_api_key(): @@ -29,7 +30,7 @@ def test_bad_key(rand_gen): client = labelbox.client.Client(api_key=bad_key) with pytest.raises(lbox.exceptions.AuthenticationError) as excinfo: - client.create_project(name=rand_gen(str)) + client.create_project(name=rand_gen(str), media_type=MediaType.Image) def test_syntax_error(client): @@ -77,7 +78,7 @@ def test_network_error(client): ) with pytest.raises(lbox.exceptions.NetworkError) as excinfo: - client.create_project(name="Project name") + client.create_project(name="Project name", media_type=MediaType.Image) def test_invalid_attribute_error( @@ -86,12 +87,16 @@ def test_invalid_attribute_error( ): # Creation with pytest.raises(lbox.exceptions.InvalidAttributeError) as excinfo: - client.create_project(name="Name", invalid_field="Whatever") + client.create_project( + name="Name", invalid_field="Whatever", media_type=MediaType.Image + ) assert excinfo.value.db_object_type == Project assert excinfo.value.field == "invalid_field" # Update - project = client.create_project(name=rand_gen(str)) + project = client.create_project( + name=rand_gen(str), media_type=MediaType.Image + ) with pytest.raises(lbox.exceptions.InvalidAttributeError) as excinfo: project.update(invalid_field="Whatever") assert excinfo.value.db_object_type == Project diff --git a/libs/labelbox/tests/integration/test_filtering.py b/libs/labelbox/tests/integration/test_filtering.py index cb6f11baa..4e0f5f415 100644 --- a/libs/labelbox/tests/integration/test_filtering.py +++ b/libs/labelbox/tests/integration/test_filtering.py @@ -1,7 +1,9 @@ import pytest from lbox.exceptions import InvalidQueryError +from libs.labelbox.src.labelbox.schema.media_type import MediaType from labelbox import Project +from labelbox.schema import media_type from labelbox.schema.queue_mode import QueueMode @@ -11,9 +13,9 @@ def project_to_test_where(client, rand_gen): p_b_name = f"b-{rand_gen(str)}" p_c_name = f"c-{rand_gen(str)}" - p_a = client.create_project(name=p_a_name, queue_mode=QueueMode.Batch) - p_b = client.create_project(name=p_b_name, queue_mode=QueueMode.Batch) - p_c = client.create_project(name=p_c_name, queue_mode=QueueMode.Batch) + p_a = client.create_project(name=p_a_name, media_type=MediaType.Image) + p_b = client.create_project(name=p_b_name, media_type=MediaType.Image) + p_c = client.create_project(name=p_c_name, media_type=MediaType.Image) yield p_a, p_b, p_c diff --git a/libs/labelbox/tests/integration/test_legacy_project.py b/libs/labelbox/tests/integration/test_legacy_project.py index 320a2191d..88de329a6 100644 --- a/libs/labelbox/tests/integration/test_legacy_project.py +++ b/libs/labelbox/tests/integration/test_legacy_project.py @@ -1,5 +1,7 @@ import pytest +from labelbox.schema import media_type +from labelbox.schema.media_type import MediaType from labelbox.schema.queue_mode import QueueMode @@ -11,6 +13,7 @@ def test_project_dataset(client, rand_gen): client.create_project( name=rand_gen(str), queue_mode=QueueMode.Dataset, + media_type=MediaType.Image, ) @@ -19,13 +22,21 @@ def test_project_auto_audit_parameters(client, rand_gen): ValueError, match="quality_modes must be set instead of auto_audit_percentage or auto_audit_number_of_labels.", ): - client.create_project(name=rand_gen(str), auto_audit_percentage=0.5) + client.create_project( + name=rand_gen(str), + media_type=MediaType.Image, + auto_audit_percentage=0.5, + ) with pytest.raises( ValueError, match="quality_modes must be set instead of auto_audit_percentage or auto_audit_number_of_labels.", ): - client.create_project(name=rand_gen(str), auto_audit_number_of_labels=2) + client.create_project( + name=rand_gen(str), + media_type=MediaType.Image, + auto_audit_number_of_labels=2, + ) def test_project_name_parameter(client, rand_gen): From 1ada7be5f91c8ea17ab0983d61eed376c5b43512 Mon Sep 17 00:00:00 2001 From: Val Brodsky Date: Tue, 24 Sep 2024 16:36:21 -0700 Subject: [PATCH 4/7] Removed queue_mode --- .../src/labelbox/project_validation.py | 7 ++--- libs/labelbox/tests/conftest.py | 9 +----- libs/labelbox/tests/data/export/conftest.py | 20 +++++++++++-- .../test_export_video_streamable.py | 4 +-- libs/labelbox/tests/integration/conftest.py | 30 ++++++++----------- .../tests/integration/test_client_errors.py | 28 ----------------- .../tests/integration/test_filtering.py | 4 +-- .../tests/integration/test_legacy_project.py | 27 ++++------------- .../tests/integration/test_ontology.py | 11 +++---- .../tests/integration/test_project.py | 2 -- 10 files changed, 47 insertions(+), 95 deletions(-) diff --git a/libs/labelbox/src/labelbox/project_validation.py b/libs/labelbox/src/labelbox/project_validation.py index fde2ff217..0380e94f0 100644 --- a/libs/labelbox/src/labelbox/project_validation.py +++ b/libs/labelbox/src/labelbox/project_validation.py @@ -40,7 +40,7 @@ class _CoreProjectInput(BaseModel): def validate_fields(self): if ( self.auto_audit_percentage is not None - and self.auto_audit_number_of_labels is not None + or self.auto_audit_number_of_labels is not None ): raise ValueError( "quality_modes must be set instead of auto_audit_percentage or auto_audit_number_of_labels." @@ -67,9 +67,8 @@ def validate_fields(self): ) elif self.quality_modes == {QualityMode.Consensus}: self._set_quality_mode_attributes( - data, - CONSENSUS_AUTO_AUDIT_NUMBER_OF_LABELS, - CONSENSUS_AUTO_AUDIT_PERCENTAGE, + number_of_labels=CONSENSUS_AUTO_AUDIT_NUMBER_OF_LABELS, + percentage=CONSENSUS_AUTO_AUDIT_PERCENTAGE, is_consensus_enabled=True, ) diff --git a/libs/labelbox/tests/conftest.py b/libs/labelbox/tests/conftest.py index d25544034..a57e6b842 100644 --- a/libs/labelbox/tests/conftest.py +++ b/libs/labelbox/tests/conftest.py @@ -32,7 +32,6 @@ from labelbox.schema.ontology import Ontology from labelbox.schema.project import Project from labelbox.schema.quality_mode import QualityMode -from labelbox.schema.queue_mode import QueueMode IMG_URL = "https://picsum.photos/200/300.jpg" MASKABLE_IMG_URL = "https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg" @@ -444,7 +443,6 @@ def conversation_entity_data_row(client, rand_gen): def project(client, rand_gen): project = client.create_project( name=rand_gen(str), - queue_mode=QueueMode.Batch, media_type=MediaType.Image, ) yield project @@ -455,8 +453,7 @@ def project(client, rand_gen): def consensus_project(client, rand_gen): project = client.create_project( name=rand_gen(str), - quality_mode=QualityMode.Consensus, - queue_mode=QueueMode.Batch, + quality_modes={QualityMode.Consensus}, media_type=MediaType.Image, ) yield project @@ -646,7 +643,6 @@ def configured_project_with_label( """ project = client.create_project( name=rand_gen(str), - queue_mode=QueueMode.Batch, media_type=MediaType.Image, ) project._wait_until_data_rows_are_processed( @@ -749,7 +745,6 @@ def configured_batch_project_with_label( """ project = client.create_project( name=rand_gen(str), - queue_mode=QueueMode.Batch, media_type=MediaType.Image, ) data_rows = [dr.uid for dr in list(dataset.data_rows())] @@ -784,7 +779,6 @@ def configured_batch_project_with_multiple_datarows( """ project = client.create_project( name=rand_gen(str), - queue_mode=QueueMode.Batch, media_type=MediaType.Image, ) global_keys = [dr.global_key for dr in data_rows] @@ -1065,7 +1059,6 @@ def configured_project_with_complex_ontology( ): project = client.create_project( name=rand_gen(str), - queue_mode=QueueMode.Batch, media_type=MediaType.Image, ) dataset = initial_dataset diff --git a/libs/labelbox/tests/data/export/conftest.py b/libs/labelbox/tests/data/export/conftest.py index a6abe5742..2bd00775f 100644 --- a/libs/labelbox/tests/data/export/conftest.py +++ b/libs/labelbox/tests/data/export/conftest.py @@ -6,7 +6,6 @@ from labelbox.schema.annotation_import import AnnotationImportState, LabelImport from labelbox.schema.labeling_frontend import LabelingFrontend from labelbox.schema.media_type import MediaType -from labelbox.schema.queue_mode import QueueMode @pytest.fixture @@ -249,7 +248,6 @@ def configured_project_with_ontology( dataset = initial_dataset project = client.create_project( name=rand_gen(str), - queue_mode=QueueMode.Batch, media_type=MediaType.Image, ) editor = list( @@ -277,7 +275,6 @@ def configured_project_without_data_rows( project = client.create_project( name=rand_gen(str), description=rand_gen(str), - queue_mode=QueueMode.Batch, media_type=MediaType.Image, ) editor = list( @@ -288,6 +285,23 @@ def configured_project_without_data_rows( teardown_helpers.teardown_project_labels_ontology_feature_schemas(project) +@pytest.fixture +def configured_video_project_without_data_rows( + client, ontology, rand_gen, teardown_helpers +): + project = client.create_project( + name=rand_gen(str), + description=rand_gen(str), + media_type=MediaType.Video, + ) + editor = list( + client.get_labeling_frontends(where=LabelingFrontend.name == "editor") + )[0] + project.setup(editor, ontology) + yield project + teardown_helpers.teardown_project_labels_ontology_feature_schemas(project) + + @pytest.fixture def model_run_with_data_rows( client, diff --git a/libs/labelbox/tests/data/export/streamable/test_export_video_streamable.py b/libs/labelbox/tests/data/export/streamable/test_export_video_streamable.py index 28ef6e0cf..7fa2bd6f6 100644 --- a/libs/labelbox/tests/data/export/streamable/test_export_video_streamable.py +++ b/libs/labelbox/tests/data/export/streamable/test_export_video_streamable.py @@ -21,13 +21,13 @@ def org_id(self, client): def test_export( self, client, - configured_project_without_data_rows, + configured_video_project_without_data_rows, video_data, video_data_row, bbox_video_annotation_objects, rand_gen, ): - project = configured_project_without_data_rows + project = configured_video_project_without_data_rows project_id = project.uid labels = [] diff --git a/libs/labelbox/tests/integration/conftest.py b/libs/labelbox/tests/integration/conftest.py index c917a6164..f16689950 100644 --- a/libs/labelbox/tests/integration/conftest.py +++ b/libs/labelbox/tests/integration/conftest.py @@ -1,30 +1,30 @@ -from collections import defaultdict -from itertools import islice import json import os -import sys import re +import sys import time import uuid -import requests -from types import SimpleNamespace -from typing import Type, List +from collections import defaultdict from enum import Enum -from typing import Tuple +from itertools import islice +from types import SimpleNamespace +from typing import List, Tuple, Type import pytest import requests -from labelbox import Dataset, DataRow -from labelbox import LabelingFrontend from labelbox import ( - OntologyBuilder, - Tool, - Option, Classification, + Client, + DataRow, + Dataset, + LabelingFrontend, MediaType, + OntologyBuilder, + Option, PromptResponseClassification, ResponseOption, + Tool, ) from labelbox.orm import query from labelbox.pagination import PaginatedCollection @@ -32,11 +32,9 @@ from labelbox.schema.catalog import Catalog from labelbox.schema.enums import AnnotationImportState from labelbox.schema.invite import Invite +from labelbox.schema.ontology_kind import OntologyKind from labelbox.schema.quality_mode import QualityMode -from labelbox.schema.queue_mode import QueueMode from labelbox.schema.user import User -from labelbox import Client -from labelbox.schema.ontology_kind import OntologyKind @pytest.fixture @@ -69,7 +67,6 @@ def project_pack(client): projects = [ client.create_project( name=f"user-proj-{idx}", - queue_mode=QueueMode.Batch, media_type=MediaType.Image, ) for idx in range(2) @@ -117,7 +114,6 @@ def configured_project_with_complex_ontology( ): project = client.create_project( name=rand_gen(str), - queue_mode=QueueMode.Batch, media_type=MediaType.Image, ) dataset = initial_dataset diff --git a/libs/labelbox/tests/integration/test_client_errors.py b/libs/labelbox/tests/integration/test_client_errors.py index 775a39d50..38022d1d2 100644 --- a/libs/labelbox/tests/integration/test_client_errors.py +++ b/libs/labelbox/tests/integration/test_client_errors.py @@ -81,34 +81,6 @@ def test_network_error(client): client.create_project(name="Project name", media_type=MediaType.Image) -def test_invalid_attribute_error( - client, - rand_gen, -): - # Creation - with pytest.raises(lbox.exceptions.InvalidAttributeError) as excinfo: - client.create_project( - name="Name", invalid_field="Whatever", media_type=MediaType.Image - ) - assert excinfo.value.db_object_type == Project - assert excinfo.value.field == "invalid_field" - - # Update - project = client.create_project( - name=rand_gen(str), media_type=MediaType.Image - ) - with pytest.raises(lbox.exceptions.InvalidAttributeError) as excinfo: - project.update(invalid_field="Whatever") - assert excinfo.value.db_object_type == Project - assert excinfo.value.field == "invalid_field" - - # Top-level-fetch - with pytest.raises(lbox.exceptions.InvalidAttributeError) as excinfo: - client.get_projects(where=User.email == "email") - assert excinfo.value.db_object_type == Project - assert excinfo.value.field == {User.email} - - @pytest.mark.skip("timeouts cause failure before rate limit") def test_api_limit_error(client): def get(arg): diff --git a/libs/labelbox/tests/integration/test_filtering.py b/libs/labelbox/tests/integration/test_filtering.py index 4e0f5f415..bba483b19 100644 --- a/libs/labelbox/tests/integration/test_filtering.py +++ b/libs/labelbox/tests/integration/test_filtering.py @@ -1,10 +1,8 @@ import pytest from lbox.exceptions import InvalidQueryError -from libs.labelbox.src.labelbox.schema.media_type import MediaType from labelbox import Project -from labelbox.schema import media_type -from labelbox.schema.queue_mode import QueueMode +from labelbox.schema.media_type import MediaType @pytest.fixture diff --git a/libs/labelbox/tests/integration/test_legacy_project.py b/libs/labelbox/tests/integration/test_legacy_project.py index 88de329a6..1f7b13847 100644 --- a/libs/labelbox/tests/integration/test_legacy_project.py +++ b/libs/labelbox/tests/integration/test_legacy_project.py @@ -1,20 +1,10 @@ +from os import name + import pytest +from pydantic import ValidationError from labelbox.schema import media_type from labelbox.schema.media_type import MediaType -from labelbox.schema.queue_mode import QueueMode - - -def test_project_dataset(client, rand_gen): - with pytest.raises( - ValueError, - match="Dataset queue mode is deprecated. Please prefer Batch queue mode.", - ): - client.create_project( - name=rand_gen(str), - queue_mode=QueueMode.Dataset, - media_type=MediaType.Image, - ) def test_project_auto_audit_parameters(client, rand_gen): @@ -29,7 +19,7 @@ def test_project_auto_audit_parameters(client, rand_gen): ) with pytest.raises( - ValueError, + ValidationError, match="quality_modes must be set instead of auto_audit_percentage or auto_audit_number_of_labels.", ): client.create_project( @@ -41,11 +31,6 @@ def test_project_auto_audit_parameters(client, rand_gen): def test_project_name_parameter(client, rand_gen): with pytest.raises( - ValueError, match="project name must be a valid string." - ): - client.create_project() - - with pytest.raises( - ValueError, match="project name must be a valid string." + ValidationError, match="project name must be a valid string" ): - client.create_project(name=" ") + client.create_project(name=" ", media_type=MediaType.Image) diff --git a/libs/labelbox/tests/integration/test_ontology.py b/libs/labelbox/tests/integration/test_ontology.py index bf70536d0..c7c7c270c 100644 --- a/libs/labelbox/tests/integration/test_ontology.py +++ b/libs/labelbox/tests/integration/test_ontology.py @@ -1,11 +1,10 @@ -import pytest - -from labelbox import OntologyBuilder, MediaType, Tool -from labelbox.orm.model import Entity import json import time -from labelbox.schema.queue_mode import QueueMode +import pytest + +from labelbox import MediaType, OntologyBuilder, Tool +from labelbox.orm.model import Entity def test_feature_schema_is_not_archived(client, ontology): @@ -99,7 +98,6 @@ def test_deletes_an_ontology(client): def test_cant_delete_an_ontology_with_project(client): project = client.create_project( name="test project", - queue_mode=QueueMode.Batch, media_type=MediaType.Image, ) tool = client.upsert_feature_schema(point.asdict()) @@ -187,7 +185,6 @@ def test_does_not_include_used_ontologies(client): ) project = client.create_project( name="test project", - queue_mode=QueueMode.Batch, media_type=MediaType.Image, ) project.connect_ontology(ontology_with_project) diff --git a/libs/labelbox/tests/integration/test_project.py b/libs/labelbox/tests/integration/test_project.py index 12e9d3bda..12046eadd 100644 --- a/libs/labelbox/tests/integration/test_project.py +++ b/libs/labelbox/tests/integration/test_project.py @@ -10,14 +10,12 @@ from labelbox.schema import media_type from labelbox.schema.media_type import MediaType from labelbox.schema.quality_mode import QualityMode -from labelbox.schema.queue_mode import QueueMode def test_project(client, rand_gen): data = { "name": rand_gen(str), "description": rand_gen(str), - "queue_mode": QueueMode.Batch.Batch, "media_type": MediaType.Image, } project = client.create_project(**data) From cd5567f0ec3261ea482de539e93373a6f400cb30 Mon Sep 17 00:00:00 2001 From: Val Brodsky Date: Tue, 24 Sep 2024 17:55:28 -0700 Subject: [PATCH 5/7] Update rdoc --- libs/labelbox/src/labelbox/client.py | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/libs/labelbox/src/labelbox/client.py b/libs/labelbox/src/labelbox/client.py index d79cb1568..badc013de 100644 --- a/libs/labelbox/src/labelbox/client.py +++ b/libs/labelbox/src/labelbox/client.py @@ -619,7 +619,6 @@ def create_project( name="", description="", media_type=MediaType.Image, - queue_mode=QueueMode.Batch ) Args: @@ -627,19 +626,14 @@ def create_project( description (str): A short summary for the project media_type (MediaType): The type of assets that this project will accept queue_mode (Optional[QueueMode]): The queue mode to use - quality_mode (Optional[QualityMode]): The quality mode to use (e.g. Benchmark, Consensus). Defaults to - Benchmark quality_modes (Optional[List[QualityMode]]): The quality modes to use (e.g. Benchmark, Consensus). Defaults to Benchmark. + is_benchmark_enabled (Optional[bool]): Whether the project supports benchmark. Defaults to None. + is_consensus_enabled (Optional[bool]): Whether the project supports consensus. Defaults to None. Returns: A new Project object. Raises: - InvalidAttributeError: If the Project type does not contain - any of the attribute names given in kwargs. - - NOTE: the following attributes are used only in chat model evaluation projects: - dataset_name_or_id, append_to_existing_dataset, data_row_count, editor_task_type - They are not used for general projects and not supported in this method + ValueError: If inputs are invalid. """ input = { "name": name, @@ -675,12 +669,12 @@ def create_model_evaluation_project( dataset_name: When creating a new dataset, pass the name dataset_id: When using an existing dataset, pass the id data_row_count: The number of data row assets to use for the project - **kwargs: Additional parameters to pass to the the create_project method + See create_project for additional parameters Returns: Project: The created project Examples: - >>> client.create_model_evaluation_project(name=project_name, dataset_name="new data set") + >>> client.create_model_evaluation_project(name=project_name, media_type=dataset_name="new data set") >>> This creates a new dataset with a default number of rows (100), creates new project and assigns a batch of the newly created datarows to the project. >>> client.create_model_evaluation_project(name=project_name, dataset_name="new data set", data_row_count=10) @@ -741,7 +735,7 @@ def create_offline_model_evaluation_project( """ Creates a project for offline model evaluation. Args: - **kwargs: Additional parameters to pass see the create_project method + See create_project for parameters Returns: Project: The created project """ @@ -782,7 +776,8 @@ def create_prompt_response_generation_project( dataset_name: When creating a new dataset, pass the name dataset_id: When using an existing dataset, pass the id data_row_count: The number of data row assets to use for the project - **kwargs: Additional parameters to pass see the create_project method + media_type: The type of assets that this project will accept. Limited to LLMPromptCreation and LLMPromptResponseCreation + See create_project for additional parameters Returns: Project: The created project @@ -858,7 +853,7 @@ def create_response_creation_project( """ Creates a project for response creation. Args: - **kwargs: Additional parameters to pass see the create_project method + See create_project for parameters Returns: Project: The created project """ From 6c16f432e2a7aca8915d0600ad335c80ef189675 Mon Sep 17 00:00:00 2001 From: Val Brodsky Date: Tue, 24 Sep 2024 17:57:14 -0700 Subject: [PATCH 6/7] Remove auto_audit inputs --- libs/labelbox/src/labelbox/client.py | 16 ------------- .../tests/integration/test_legacy_project.py | 23 ------------------- 2 files changed, 39 deletions(-) diff --git a/libs/labelbox/src/labelbox/client.py b/libs/labelbox/src/labelbox/client.py index badc013de..16f28148e 100644 --- a/libs/labelbox/src/labelbox/client.py +++ b/libs/labelbox/src/labelbox/client.py @@ -602,8 +602,6 @@ def create_project( name: str, media_type: MediaType, description: Optional[str] = None, - auto_audit_percentage: Optional[float] = None, - auto_audit_number_of_labels: Optional[int] = None, quality_modes: Optional[Set[QualityMode]] = { QualityMode.Benchmark, QualityMode.Consensus, @@ -639,8 +637,6 @@ def create_project( "name": name, "description": description, "media_type": media_type, - "auto_audit_percentage": auto_audit_percentage, - "auto_audit_number_of_labels": auto_audit_number_of_labels, "quality_modes": quality_modes, "is_benchmark_enabled": is_benchmark_enabled, "is_consensus_enabled": is_consensus_enabled, @@ -651,8 +647,6 @@ def create_model_evaluation_project( self, name: str, description: Optional[str] = None, - auto_audit_percentage: Optional[float] = None, - auto_audit_number_of_labels: Optional[int] = None, quality_modes: Optional[Set[QualityMode]] = { QualityMode.Benchmark, QualityMode.Consensus, @@ -707,8 +701,6 @@ def create_model_evaluation_project( "name": name, "description": description, "media_type": media_type, - "auto_audit_percentage": auto_audit_percentage, - "auto_audit_number_of_labels": auto_audit_number_of_labels, "quality_modes": quality_modes, "is_benchmark_enabled": is_benchmark_enabled, "is_consensus_enabled": is_consensus_enabled, @@ -723,8 +715,6 @@ def create_offline_model_evaluation_project( self, name: str, description: Optional[str] = None, - auto_audit_percentage: Optional[float] = None, - auto_audit_number_of_labels: Optional[int] = None, quality_modes: Optional[Set[QualityMode]] = { QualityMode.Benchmark, QualityMode.Consensus, @@ -743,8 +733,6 @@ def create_offline_model_evaluation_project( "name": name, "description": description, "media_type": MediaType.Conversational, - "auto_audit_percentage": auto_audit_percentage, - "auto_audit_number_of_labels": auto_audit_number_of_labels, "quality_modes": quality_modes, "is_benchmark_enabled": is_benchmark_enabled, "is_consensus_enabled": is_consensus_enabled, @@ -841,8 +829,6 @@ def create_response_creation_project( self, name: str, description: Optional[str] = None, - auto_audit_percentage: Optional[float] = None, - auto_audit_number_of_labels: Optional[int] = None, quality_modes: Optional[Set[QualityMode]] = { QualityMode.Benchmark, QualityMode.Consensus, @@ -861,8 +847,6 @@ def create_response_creation_project( "name": name, "description": description, "media_type": MediaType.Text, # Only Text is supported - "auto_audit_percentage": auto_audit_percentage, - "auto_audit_number_of_labels": auto_audit_number_of_labels, "quality_modes": quality_modes, "is_benchmark_enabled": is_benchmark_enabled, "is_consensus_enabled": is_consensus_enabled, diff --git a/libs/labelbox/tests/integration/test_legacy_project.py b/libs/labelbox/tests/integration/test_legacy_project.py index 1f7b13847..3e652f333 100644 --- a/libs/labelbox/tests/integration/test_legacy_project.py +++ b/libs/labelbox/tests/integration/test_legacy_project.py @@ -3,32 +3,9 @@ import pytest from pydantic import ValidationError -from labelbox.schema import media_type from labelbox.schema.media_type import MediaType -def test_project_auto_audit_parameters(client, rand_gen): - with pytest.raises( - ValueError, - match="quality_modes must be set instead of auto_audit_percentage or auto_audit_number_of_labels.", - ): - client.create_project( - name=rand_gen(str), - media_type=MediaType.Image, - auto_audit_percentage=0.5, - ) - - with pytest.raises( - ValidationError, - match="quality_modes must be set instead of auto_audit_percentage or auto_audit_number_of_labels.", - ): - client.create_project( - name=rand_gen(str), - media_type=MediaType.Image, - auto_audit_number_of_labels=2, - ) - - def test_project_name_parameter(client, rand_gen): with pytest.raises( ValidationError, match="project name must be a valid string" From e1b7f666c4140fc343bb69f5522e037fb36fca1e Mon Sep 17 00:00:00 2001 From: Val Brodsky Date: Wed, 25 Sep 2024 15:12:51 -0700 Subject: [PATCH 7/7] Remove unnecessary test --- libs/labelbox/tests/unit/schema/test_user_group.py | 6 ------ pyproject.toml | 3 ++- requirements-dev.lock | 1 + requirements.lock | 7 +++++++ 4 files changed, 10 insertions(+), 7 deletions(-) diff --git a/libs/labelbox/tests/unit/schema/test_user_group.py b/libs/labelbox/tests/unit/schema/test_user_group.py index 4d78f096e..1df555a64 100644 --- a/libs/labelbox/tests/unit/schema/test_user_group.py +++ b/libs/labelbox/tests/unit/schema/test_user_group.py @@ -57,12 +57,6 @@ def setup_method(self): self.client.enable_experimental = True self.group = UserGroup(client=self.client) - def test_constructor_experimental_needed(self): - client = MagicMock(Client) - client.enable_experimental = False - with pytest.raises(RuntimeError): - group = UserGroup(client) - def test_constructor(self): group = UserGroup(self.client) diff --git a/pyproject.toml b/pyproject.toml index ebce059f5..1a4f19e04 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -28,6 +28,7 @@ dev-dependencies = [ "pytest-timestamper>=0.0.10", "pytest-timeout>=2.3.1", "pytest-order>=1.2.1", + "pyjwt>=2.9.0", ] [tool.rye.workspace] @@ -35,7 +36,7 @@ members = ["libs/*", "examples"] [tool.pytest.ini_options] # https://github.com/pytest-dev/pytest-rerunfailures/issues/99 -addopts = "-rP -vvv --reruns 1 --reruns-delay 5 --durations=20 -n auto --cov=labelbox --import-mode=importlib --order-group-scope=module" +addopts = "-rP -vvv" markers = """ slow: marks tests as slow (deselect with '-m "not slow"') """ diff --git a/requirements-dev.lock b/requirements-dev.lock index a51fa0dcf..fcca5b15d 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -200,6 +200,7 @@ pygments==2.18.0 # via nbconvert # via rich # via sphinx +pyjwt==2.9.0 pyproj==3.5.0 # via labelbox pytest==8.2.2 diff --git a/requirements.lock b/requirements.lock index 16ed91c80..3f32f3286 100644 --- a/requirements.lock +++ b/requirements.lock @@ -50,6 +50,10 @@ jinja2==3.1.4 # via sphinx markupsafe==2.1.5 # via jinja2 +mypy==1.10.1 + # via labelbox +mypy-extensions==1.0.0 + # via mypy numpy==1.24.4 # via labelbox # via opencv-python-headless @@ -119,6 +123,8 @@ sphinxcontrib-serializinghtml==1.1.5 # via sphinx strenum==0.4.15 # via labelbox +tomli==2.0.1 + # via mypy tqdm==4.66.4 # via labelbox typeguard==4.3.0 @@ -126,6 +132,7 @@ typeguard==4.3.0 typing-extensions==4.12.2 # via annotated-types # via labelbox + # via mypy # via pydantic # via pydantic-core # via typeguard