@@ -145,7 +145,8 @@ def execute(self,
145
145
files = None ,
146
146
timeout = 60.0 ,
147
147
experimental = False ,
148
- error_log_key = "message" ):
148
+ error_log_key = "message" ,
149
+ raise_return_resource_not_found = False ):
149
150
""" Sends a request to the server for the execution of the
150
151
given query.
151
152
@@ -297,9 +298,13 @@ def get_error_status_code(error: dict) -> int:
297
298
resource_not_found_error = check_errors (["RESOURCE_NOT_FOUND" ],
298
299
"extensions" , "code" )
299
300
if resource_not_found_error is not None :
300
- # Return None and let the caller methods raise an exception
301
- # as they already know which resource type and ID was requested
302
- return None
301
+ if raise_return_resource_not_found :
302
+ raise labelbox .exceptions .ResourceNotFoundError (
303
+ message = resource_not_found_error ["message" ])
304
+ else :
305
+ # Return None and let the caller methods raise an exception
306
+ # as they already know which resource type and ID was requested
307
+ return None
303
308
304
309
resource_conflict_error = check_errors (["RESOURCE_CONFLICT" ],
305
310
"extensions" , "code" )
@@ -875,12 +880,12 @@ def create_offline_model_evaluation_project(self, **kwargs) -> Project:
875
880
876
881
return self ._create_project (** kwargs )
877
882
878
-
879
- def create_prompt_response_generation_project ( self ,
880
- dataset_id : Optional [str ] = None ,
881
- dataset_name : Optional [str ] = None ,
882
- data_row_count : int = 100 ,
883
- ** kwargs ) -> Project :
883
+ def create_prompt_response_generation_project (
884
+ self ,
885
+ dataset_id : Optional [str ] = None ,
886
+ dataset_name : Optional [str ] = None ,
887
+ data_row_count : int = 100 ,
888
+ ** kwargs ) -> Project :
884
889
"""
885
890
Use this method exclusively to create a prompt and response generation project.
886
891
@@ -915,8 +920,7 @@ def create_prompt_response_generation_project(self,
915
920
916
921
if dataset_id and dataset_name :
917
922
raise ValueError (
918
- "Only provide a dataset_name or dataset_id, not both."
919
- )
923
+ "Only provide a dataset_name or dataset_id, not both." )
920
924
921
925
if data_row_count <= 0 :
922
926
raise ValueError ("data_row_count must be a positive integer." )
@@ -928,7 +932,9 @@ def create_prompt_response_generation_project(self,
928
932
append_to_existing_dataset = False
929
933
dataset_name_or_id = dataset_name
930
934
931
- if "media_type" in kwargs and kwargs .get ("media_type" ) not in [MediaType .LLMPromptCreation , MediaType .LLMPromptResponseCreation ]:
935
+ if "media_type" in kwargs and kwargs .get ("media_type" ) not in [
936
+ MediaType .LLMPromptCreation , MediaType .LLMPromptResponseCreation
937
+ ]:
932
938
raise ValueError (
933
939
"media_type must be either LLMPromptCreation or LLMPromptResponseCreation"
934
940
)
@@ -949,8 +955,7 @@ def create_response_creation_project(self, **kwargs) -> Project:
949
955
Returns:
950
956
Project: The created project
951
957
"""
952
- kwargs [
953
- "media_type" ] = MediaType .Text # Only Text is supported
958
+ kwargs ["media_type" ] = MediaType .Text # Only Text is supported
954
959
kwargs [
955
960
"editor_task_type" ] = EditorTaskType .ResponseCreation .value # Special editor task type for response creation projects
956
961
@@ -1005,7 +1010,8 @@ def _create_project(self, **kwargs) -> Project:
1005
1010
1006
1011
if quality_modes and quality_mode :
1007
1012
raise ValueError (
1008
- "Cannot use both quality_modes and quality_mode at the same time. Use one or the other." )
1013
+ "Cannot use both quality_modes and quality_mode at the same time. Use one or the other."
1014
+ )
1009
1015
1010
1016
if not quality_modes and not quality_mode :
1011
1017
logger .info ("Defaulting quality modes to Benchmark and Consensus." )
@@ -1021,12 +1027,11 @@ def _create_project(self, **kwargs) -> Project:
1021
1027
if quality_mode :
1022
1028
quality_modes_set = {quality_mode }
1023
1029
1024
- if (
1025
- quality_modes_set is None
1026
- or len (quality_modes_set ) == 0
1027
- or quality_modes_set == {QualityMode .Benchmark , QualityMode .Consensus }
1028
- ):
1029
- data ["auto_audit_number_of_labels" ] = CONSENSUS_AUTO_AUDIT_NUMBER_OF_LABELS
1030
+ if (quality_modes_set is None or len (quality_modes_set ) == 0 or
1031
+ quality_modes_set
1032
+ == {QualityMode .Benchmark , QualityMode .Consensus }):
1033
+ data [
1034
+ "auto_audit_number_of_labels" ] = CONSENSUS_AUTO_AUDIT_NUMBER_OF_LABELS
1030
1035
data ["auto_audit_percentage" ] = CONSENSUS_AUTO_AUDIT_PERCENTAGE
1031
1036
data ["is_benchmark_enabled" ] = True
1032
1037
data ["is_consensus_enabled" ] = True
@@ -1297,10 +1302,12 @@ def create_ontology_from_feature_schemas(
1297
1302
f"Tool `{ tool } ` not in list of supported tools." )
1298
1303
elif 'type' in feature_schema .normalized :
1299
1304
classification = feature_schema .normalized ['type' ]
1300
- if classification in Classification .Type ._value2member_map_ .keys ():
1305
+ if classification in Classification .Type ._value2member_map_ .keys (
1306
+ ):
1301
1307
Classification .Type (classification )
1302
1308
classifications .append (feature_schema .normalized )
1303
- elif classification in PromptResponseClassification .Type ._value2member_map_ .keys ():
1309
+ elif classification in PromptResponseClassification .Type ._value2member_map_ .keys (
1310
+ ):
1304
1311
PromptResponseClassification .Type (classification )
1305
1312
classifications .append (feature_schema .normalized )
1306
1313
else :
@@ -1518,7 +1525,8 @@ def create_ontology(self,
1518
1525
raise get_media_type_validation_error (media_type )
1519
1526
1520
1527
if ontology_kind and OntologyKind .is_supported (ontology_kind ):
1521
- media_type = OntologyKind .evaluate_ontology_kind_with_media_type (ontology_kind , media_type )
1528
+ media_type = OntologyKind .evaluate_ontology_kind_with_media_type (
1529
+ ontology_kind , media_type )
1522
1530
editor_task_type_value = EditorTaskTypeMapper .to_editor_task_type (
1523
1531
ontology_kind , media_type ).value
1524
1532
elif ontology_kind :
0 commit comments