Skip to content

Commit 1f4a377

Browse files
author
Val Brodsky
committed
Use _CoreProjectInput in all create_project methods, create_model_evaluation_project, create_offline_model_evaluation_project, create_prompt_response_generation_project, create_response_creation_project
1 parent f26397c commit 1f4a377

File tree

3 files changed

+136
-77
lines changed

3 files changed

+136
-77
lines changed

libs/labelbox/src/labelbox/client.py

Lines changed: 129 additions & 76 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
import urllib.parse
99
from collections import defaultdict
1010
from types import MappingProxyType
11-
from typing import Any, Callable, Dict, List, Optional, Union, overload
11+
from typing import Any, Callable, Dict, List, Optional, Set, Union, overload
1212

1313
import lbox.exceptions
1414
import requests
@@ -597,7 +597,20 @@ def create_dataset(
597597
raise e
598598
return dataset
599599

600-
def create_project(self, **kwargs) -> Project:
600+
def create_project(
601+
self,
602+
name: str,
603+
media_type: MediaType,
604+
description: Optional[str] = None,
605+
auto_audit_percentage: Optional[float] = None,
606+
auto_audit_number_of_labels: Optional[int] = None,
607+
quality_modes: Optional[Set[QualityMode]] = {
608+
QualityMode.Benchmark,
609+
QualityMode.Consensus,
610+
},
611+
is_benchmark_enabled: Optional[bool] = None,
612+
is_consensus_enabled: Optional[bool] = None,
613+
) -> Project:
601614
"""Creates a Project object on the server.
602615
603616
Attribute values are passed as keyword arguments.
@@ -628,40 +641,33 @@ def create_project(self, **kwargs) -> Project:
628641
dataset_name_or_id, append_to_existing_dataset, data_row_count, editor_task_type
629642
They are not used for general projects and not supported in this method
630643
"""
631-
# The following arguments are not supported for general projects, only for chat model evaluation projects
632-
kwargs.pop("dataset_name_or_id", None)
633-
kwargs.pop("append_to_existing_dataset", None)
634-
kwargs.pop("data_row_count", None)
635-
kwargs.pop("editor_task_type", None)
636-
input = _CoreProjectInput(**kwargs)
637-
return self._create_project(input)
638-
639-
@overload
640-
def create_model_evaluation_project(
641-
self,
642-
dataset_name: str,
643-
dataset_id: str = None,
644-
data_row_count: int = 100,
645-
**kwargs,
646-
) -> Project:
647-
pass
648-
649-
@overload
650-
def create_model_evaluation_project(
651-
self,
652-
dataset_id: str,
653-
dataset_name: str = None,
654-
data_row_count: int = 100,
655-
**kwargs,
656-
) -> Project:
657-
pass
644+
input = {
645+
"name": name,
646+
"description": description,
647+
"media_type": media_type,
648+
"auto_audit_percentage": auto_audit_percentage,
649+
"auto_audit_number_of_labels": auto_audit_number_of_labels,
650+
"quality_modes": quality_modes,
651+
"is_benchmark_enabled": is_benchmark_enabled,
652+
"is_consensus_enabled": is_consensus_enabled,
653+
}
654+
return self._create_project(_CoreProjectInput(**input))
658655

659656
def create_model_evaluation_project(
660657
self,
658+
name: str,
659+
description: Optional[str] = None,
660+
auto_audit_percentage: Optional[float] = None,
661+
auto_audit_number_of_labels: Optional[int] = None,
662+
quality_modes: Optional[Set[QualityMode]] = {
663+
QualityMode.Benchmark,
664+
QualityMode.Consensus,
665+
},
666+
is_benchmark_enabled: Optional[bool] = None,
667+
is_consensus_enabled: Optional[bool] = None,
661668
dataset_id: Optional[str] = None,
662669
dataset_name: Optional[str] = None,
663670
data_row_count: int = 100,
664-
**kwargs,
665671
) -> Project:
666672
"""
667673
Use this method exclusively to create a chat model evaluation project.
@@ -692,8 +698,6 @@ def create_model_evaluation_project(
692698
raise ValueError(
693699
"dataset_name or data_set_id must be present and not be an empty string."
694700
)
695-
if data_row_count <= 0:
696-
raise ValueError("data_row_count must be a positive integer.")
697701

698702
if dataset_id:
699703
append_to_existing_dataset = True
@@ -702,42 +706,74 @@ def create_model_evaluation_project(
702706
append_to_existing_dataset = False
703707
dataset_name_or_id = dataset_name
704708

705-
kwargs["media_type"] = MediaType.Conversational
706-
kwargs["dataset_name_or_id"] = dataset_name_or_id
707-
kwargs["append_to_existing_dataset"] = append_to_existing_dataset
708-
kwargs["data_row_count"] = data_row_count
709-
kwargs["editor_task_type"] = EditorTaskType.ModelChatEvaluation.value
709+
media_type = MediaType.Conversational
710+
editor_task_type = EditorTaskType.ModelChatEvaluation
710711

711-
return self._create_project(**kwargs)
712+
input = {
713+
"name": name,
714+
"description": description,
715+
"media_type": media_type,
716+
"auto_audit_percentage": auto_audit_percentage,
717+
"auto_audit_number_of_labels": auto_audit_number_of_labels,
718+
"quality_modes": quality_modes,
719+
"is_benchmark_enabled": is_benchmark_enabled,
720+
"is_consensus_enabled": is_consensus_enabled,
721+
"dataset_name_or_id": dataset_name_or_id,
722+
"append_to_existing_dataset": append_to_existing_dataset,
723+
"data_row_count": data_row_count,
724+
"editor_task_type": editor_task_type,
725+
}
726+
return self._create_project(_CoreProjectInput(**input))
712727

713-
def create_offline_model_evaluation_project(self, **kwargs) -> Project:
728+
def create_offline_model_evaluation_project(
729+
self,
730+
name: str,
731+
description: Optional[str] = None,
732+
auto_audit_percentage: Optional[float] = None,
733+
auto_audit_number_of_labels: Optional[int] = None,
734+
quality_modes: Optional[Set[QualityMode]] = {
735+
QualityMode.Benchmark,
736+
QualityMode.Consensus,
737+
},
738+
is_benchmark_enabled: Optional[bool] = None,
739+
is_consensus_enabled: Optional[bool] = None,
740+
) -> Project:
714741
"""
715742
Creates a project for offline model evaluation.
716743
Args:
717744
**kwargs: Additional parameters to pass see the create_project method
718745
Returns:
719746
Project: The created project
720747
"""
721-
kwargs["media_type"] = (
722-
MediaType.Conversational
723-
) # Only Conversational is supported
724-
kwargs["editor_task_type"] = (
725-
EditorTaskType.OfflineModelChatEvaluation.value
726-
) # Special editor task type for offline model evaluation
727-
728-
# The following arguments are not supported for offline model evaluation
729-
kwargs.pop("dataset_name_or_id", None)
730-
kwargs.pop("append_to_existing_dataset", None)
731-
kwargs.pop("data_row_count", None)
732-
733-
return self._create_project(**kwargs)
748+
input = {
749+
"name": name,
750+
"description": description,
751+
"media_type": MediaType.Conversational,
752+
"auto_audit_percentage": auto_audit_percentage,
753+
"auto_audit_number_of_labels": auto_audit_number_of_labels,
754+
"quality_modes": quality_modes,
755+
"is_benchmark_enabled": is_benchmark_enabled,
756+
"is_consensus_enabled": is_consensus_enabled,
757+
"editor_task_type": EditorTaskType.OfflineModelChatEvaluation,
758+
}
759+
return self._create_project(_CoreProjectInput(**input))
734760

735761
def create_prompt_response_generation_project(
736762
self,
763+
name: str,
764+
media_type: MediaType,
765+
description: Optional[str] = None,
766+
auto_audit_percentage: Optional[float] = None,
767+
auto_audit_number_of_labels: Optional[int] = None,
768+
quality_modes: Optional[Set[QualityMode]] = {
769+
QualityMode.Benchmark,
770+
QualityMode.Consensus,
771+
},
772+
is_benchmark_enabled: Optional[bool] = None,
773+
is_consensus_enabled: Optional[bool] = None,
737774
dataset_id: Optional[str] = None,
738775
dataset_name: Optional[str] = None,
739776
data_row_count: int = 100,
740-
**kwargs,
741777
) -> Project:
742778
"""
743779
Use this method exclusively to create a prompt and response generation project.
@@ -776,51 +812,68 @@ def create_prompt_response_generation_project(
776812
"Only provide a dataset_name or dataset_id, not both."
777813
)
778814

779-
if data_row_count <= 0:
780-
raise ValueError("data_row_count must be a positive integer.")
781-
782815
if dataset_id:
783816
append_to_existing_dataset = True
784817
dataset_name_or_id = dataset_id
785818
else:
786819
append_to_existing_dataset = False
787820
dataset_name_or_id = dataset_name
788821

789-
if "media_type" in kwargs and kwargs.get("media_type") not in [
822+
if media_type not in [
790823
MediaType.LLMPromptCreation,
791824
MediaType.LLMPromptResponseCreation,
792825
]:
793826
raise ValueError(
794827
"media_type must be either LLMPromptCreation or LLMPromptResponseCreation"
795828
)
796829

797-
kwargs["dataset_name_or_id"] = dataset_name_or_id
798-
kwargs["append_to_existing_dataset"] = append_to_existing_dataset
799-
kwargs["data_row_count"] = data_row_count
800-
801-
kwargs.pop("editor_task_type", None)
802-
803-
return self._create_project(**kwargs)
830+
input = {
831+
"name": name,
832+
"description": description,
833+
"media_type": media_type,
834+
"auto_audit_percentage": auto_audit_percentage,
835+
"auto_audit_number_of_labels": auto_audit_number_of_labels,
836+
"quality_modes": quality_modes,
837+
"is_benchmark_enabled": is_benchmark_enabled,
838+
"is_consensus_enabled": is_consensus_enabled,
839+
"dataset_name_or_id": dataset_name_or_id,
840+
"append_to_existing_dataset": append_to_existing_dataset,
841+
"data_row_count": data_row_count,
842+
}
843+
return self._create_project(_CoreProjectInput(**input))
804844

805-
def create_response_creation_project(self, **kwargs) -> Project:
845+
def create_response_creation_project(
846+
self,
847+
name: str,
848+
description: Optional[str] = None,
849+
auto_audit_percentage: Optional[float] = None,
850+
auto_audit_number_of_labels: Optional[int] = None,
851+
quality_modes: Optional[Set[QualityMode]] = {
852+
QualityMode.Benchmark,
853+
QualityMode.Consensus,
854+
},
855+
is_benchmark_enabled: Optional[bool] = None,
856+
is_consensus_enabled: Optional[bool] = None,
857+
) -> Project:
806858
"""
807859
Creates a project for response creation.
808860
Args:
809861
**kwargs: Additional parameters to pass see the create_project method
810862
Returns:
811863
Project: The created project
812864
"""
813-
kwargs["media_type"] = MediaType.Text # Only Text is supported
814-
kwargs["editor_task_type"] = (
815-
EditorTaskType.ResponseCreation.value
816-
) # Special editor task type for response creation projects
817-
818-
# The following arguments are not supported for response creation projects
819-
kwargs.pop("dataset_name_or_id", None)
820-
kwargs.pop("append_to_existing_dataset", None)
821-
kwargs.pop("data_row_count", None)
822-
823-
return self._create_project(**kwargs)
865+
input = {
866+
"name": name,
867+
"description": description,
868+
"media_type": MediaType.Text, # Only Text is supported
869+
"auto_audit_percentage": auto_audit_percentage,
870+
"auto_audit_number_of_labels": auto_audit_number_of_labels,
871+
"quality_modes": quality_modes,
872+
"is_benchmark_enabled": is_benchmark_enabled,
873+
"is_consensus_enabled": is_consensus_enabled,
874+
"editor_task_type": EditorTaskType.ResponseCreation.value, # Special editor task type for response creation projects
875+
}
876+
return self._create_project(_CoreProjectInput(**input))
824877

825878
def _create_project(self, input: _CoreProjectInput) -> Project:
826879
media_type_value = input.media_type.value

libs/labelbox/src/labelbox/project_validation.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,10 @@
11
from typing import Optional, Set
22

33
from pydantic import BaseModel, ConfigDict, Field, model_validator
4+
from typing_extensions import Annotated
45

56
from labelbox.schema.media_type import MediaType
7+
from labelbox.schema.ontology_kind import EditorTaskType
68
from labelbox.schema.quality_mode import (
79
BENCHMARK_AUTO_AUDIT_NUMBER_OF_LABELS,
810
BENCHMARK_AUTO_AUDIT_PERCENTAGE,
@@ -12,6 +14,8 @@
1214
)
1315
from labelbox.schema.queue_mode import QueueMode
1416

17+
PositiveInt = Annotated[int, Field(gt=0)]
18+
1519

1620
class _CoreProjectInput(BaseModel):
1721
name: str
@@ -27,6 +31,8 @@ class _CoreProjectInput(BaseModel):
2731
is_consensus_enabled: Optional[bool] = None
2832
dataset_name_or_id: Optional[str] = None
2933
append_to_existing_dataset: Optional[bool] = None
34+
data_row_count: Optional[PositiveInt] = None
35+
editor_task_type: Optional[EditorTaskType] = None
3036

3137
model_config = ConfigDict(extra="forbid")
3238

libs/labelbox/src/labelbox/schema/ontology_kind.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ def evaluate_ontology_kind_with_media_type(
5353
return media_type
5454

5555

56-
class EditorTaskType(Enum):
56+
class EditorTaskType(str, Enum):
5757
ModelChatEvaluation = "MODEL_CHAT_EVALUATION"
5858
ResponseCreation = "RESPONSE_CREATION"
5959
OfflineModelChatEvaluation = "OFFLINE_MODEL_CHAT_EVALUATION"

0 commit comments

Comments
 (0)