From 15519dc55621e3f693674fd31c171f3613ab2bf2 Mon Sep 17 00:00:00 2001 From: Gabefire <33893811+Gabefire@users.noreply.github.com> Date: Thu, 12 Sep 2024 12:22:54 -0500 Subject: [PATCH 1/5] added merge environmental settings to prepared request --- libs/labelbox/src/labelbox/client.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/libs/labelbox/src/labelbox/client.py b/libs/labelbox/src/labelbox/client.py index 431ddbdc4..7b37c7b94 100644 --- a/libs/labelbox/src/labelbox/client.py +++ b/libs/labelbox/src/labelbox/client.py @@ -214,15 +214,20 @@ def convert_value(value): if files: del headers['Content-Type'] del headers['Accept'] + request = requests.Request('POST', endpoint, headers=headers, data=data, files=files if files else None) - prepped: requests.PreparedRequest = request.prepare() - - response = self._connection.send(prepped, timeout=timeout) + prepped: requests.PreparedRequest = self._connection.prepare_request( + request + ) + + settings = self._connection.merge_environment_settings(prepped.url, {}, None, None, None) + + response = self._connection.send(prepped, timeout=timeout, **settings) logger.debug("Response: %s", response.text) except requests.exceptions.Timeout as e: raise labelbox.exceptions.TimeoutError(str(e)) From 53197f854768fe9b30167638f3f12de630db0713 Mon Sep 17 00:00:00 2001 From: Gabefire <33893811+Gabefire@users.noreply.github.com> Date: Thu, 12 Sep 2024 14:35:10 -0500 Subject: [PATCH 2/5] feedback --- libs/labelbox/src/labelbox/client.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/libs/labelbox/src/labelbox/client.py b/libs/labelbox/src/labelbox/client.py index 7b37c7b94..9c94fa1af 100644 --- a/libs/labelbox/src/labelbox/client.py +++ b/libs/labelbox/src/labelbox/client.py @@ -225,6 +225,8 @@ def convert_value(value): request ) + prepped.headers = headers + settings = self._connection.merge_environment_settings(prepped.url, {}, None, None, None) response = self._connection.send(prepped, timeout=timeout, **settings) From dd1a6e97db25587e104204ca468f0b6c52960313 Mon Sep 17 00:00:00 2001 From: Gabefire <33893811+Gabefire@users.noreply.github.com> Date: Thu, 12 Sep 2024 14:39:06 -0500 Subject: [PATCH 3/5] removed headers from request since gets overwritten on prepped request --- libs/labelbox/src/labelbox/client.py | 1 - 1 file changed, 1 deletion(-) diff --git a/libs/labelbox/src/labelbox/client.py b/libs/labelbox/src/labelbox/client.py index 9c94fa1af..f6a4cadc5 100644 --- a/libs/labelbox/src/labelbox/client.py +++ b/libs/labelbox/src/labelbox/client.py @@ -217,7 +217,6 @@ def convert_value(value): request = requests.Request('POST', endpoint, - headers=headers, data=data, files=files if files else None) From 7322bfd038d73b45a83d556ce4f6081a27a9221f Mon Sep 17 00:00:00 2001 From: Gabefire <33893811+Gabefire@users.noreply.github.com> Date: Thu, 12 Sep 2024 14:51:41 -0500 Subject: [PATCH 4/5] swapped back to just prepare --- libs/labelbox/src/labelbox/client.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/libs/labelbox/src/labelbox/client.py b/libs/labelbox/src/labelbox/client.py index f6a4cadc5..56c16a395 100644 --- a/libs/labelbox/src/labelbox/client.py +++ b/libs/labelbox/src/labelbox/client.py @@ -215,16 +215,16 @@ def convert_value(value): del headers['Content-Type'] del headers['Accept'] - request = requests.Request('POST', - endpoint, - data=data, - files=files if files else None) - - prepped: requests.PreparedRequest = self._connection.prepare_request( - request + request = requests.Request( + "POST", + endpoint, + headers=headers, + data=data, + files=files if files else None, ) - - prepped.headers = headers + + prepped: requests.PreparedRequest = request.prepare() + settings = self._connection.merge_environment_settings(prepped.url, {}, None, None, None) From 4eadc80e0343d43b8832a49982517d99aab53e95 Mon Sep 17 00:00:00 2001 From: Gabefire <33893811+Gabefire@users.noreply.github.com> Date: Thu, 12 Sep 2024 20:08:03 -0500 Subject: [PATCH 5/5] formatted --- libs/labelbox/src/labelbox/client.py | 1130 ++++++++++++++------------ 1 file changed, 627 insertions(+), 503 deletions(-) diff --git a/libs/labelbox/src/labelbox/client.py b/libs/labelbox/src/labelbox/client.py index 56c16a395..003fe4bb2 100644 --- a/libs/labelbox/src/labelbox/client.py +++ b/libs/labelbox/src/labelbox/client.py @@ -43,21 +43,38 @@ from labelbox.schema.model_config import ModelConfig from labelbox.schema.model_run import ModelRun from labelbox.schema.ontology import Ontology, DeleteFeatureFromOntologyResult -from labelbox.schema.ontology import Tool, Classification, FeatureSchema, PromptResponseClassification +from labelbox.schema.ontology import ( + Tool, + Classification, + FeatureSchema, + PromptResponseClassification, +) from labelbox.schema.organization import Organization from labelbox.schema.project import Project -from labelbox.schema.quality_mode import QualityMode, BENCHMARK_AUTO_AUDIT_NUMBER_OF_LABELS, \ - BENCHMARK_AUTO_AUDIT_PERCENTAGE, CONSENSUS_AUTO_AUDIT_NUMBER_OF_LABELS, CONSENSUS_AUTO_AUDIT_PERCENTAGE +from labelbox.schema.quality_mode import ( + QualityMode, + BENCHMARK_AUTO_AUDIT_NUMBER_OF_LABELS, + BENCHMARK_AUTO_AUDIT_PERCENTAGE, + CONSENSUS_AUTO_AUDIT_NUMBER_OF_LABELS, + CONSENSUS_AUTO_AUDIT_PERCENTAGE, +) from labelbox.schema.queue_mode import QueueMode from labelbox.schema.role import Role -from labelbox.schema.send_to_annotate_params import SendToAnnotateFromCatalogParams, build_destination_task_queue_input, \ - build_predictions_input, build_annotations_input +from labelbox.schema.send_to_annotate_params import ( + SendToAnnotateFromCatalogParams, + build_destination_task_queue_input, + build_predictions_input, + build_annotations_input, +) from labelbox.schema.slice import CatalogSlice, ModelSlice from labelbox.schema.task import Task, DataUpsertTask from labelbox.schema.user import User from labelbox.schema.label_score import LabelScore -from labelbox.schema.ontology_kind import (OntologyKind, EditorTaskTypeMapper, - EditorTaskType) +from labelbox.schema.ontology_kind import ( + OntologyKind, + EditorTaskTypeMapper, + EditorTaskType, +) from labelbox.schema.labeling_service_dashboard import LabelingServiceDashboard logger = logging.getLogger(__name__) @@ -72,20 +89,22 @@ def python_version_info(): class Client: - """ A Labelbox client. + """A Labelbox client. Contains info necessary for connecting to a Labelbox server (URL, authentication key). Provides functions for querying and creating top-level data objects (Projects, Datasets). """ - def __init__(self, - api_key=None, - endpoint='https://api.labelbox.com/graphql', - enable_experimental=False, - app_url="https://app.labelbox.com", - rest_endpoint="https://api.labelbox.com/api/v1"): - """ Creates and initializes a Labelbox Client. + def __init__( + self, + api_key=None, + endpoint="https://api.labelbox.com/graphql", + enable_experimental=False, + app_url="https://app.labelbox.com", + rest_endpoint="https://api.labelbox.com/api/v1", + ): + """Creates and initializes a Labelbox Client. Logging is defaulted to level WARNING. To receive more verbose output to console, update `logging.level` to the appropriate level. @@ -106,7 +125,8 @@ def __init__(self, if api_key is None: if _LABELBOX_API_KEY not in os.environ: raise labelbox.exceptions.AuthenticationError( - "Labelbox API key not provided") + "Labelbox API key not provided" + ) api_key = os.environ[_LABELBOX_API_KEY] self.api_key = api_key @@ -123,8 +143,7 @@ def __init__(self, self._connection: requests.Session = self._init_connection() def _init_connection(self) -> requests.Session: - connection = requests.Session( - ) # using default connection pool size of 10 + connection = requests.Session() # using default connection pool size of 10 connection.headers.update(self._default_headers()) return connection @@ -135,26 +154,30 @@ def headers(self) -> MappingProxyType: def _default_headers(self): return { - 'Authorization': 'Bearer %s' % self.api_key, - 'Accept': 'application/json', - 'Content-Type': 'application/json', - 'X-User-Agent': f"python-sdk {SDK_VERSION}", - 'X-Python-Version': f"{python_version_info()}", + "Authorization": "Bearer %s" % self.api_key, + "Accept": "application/json", + "Content-Type": "application/json", + "X-User-Agent": f"python-sdk {SDK_VERSION}", + "X-Python-Version": f"{python_version_info()}", } - @retry.Retry(predicate=retry.if_exception_type( - labelbox.exceptions.InternalServerError, - labelbox.exceptions.TimeoutError)) - def execute(self, - query=None, - params=None, - data=None, - files=None, - timeout=60.0, - experimental=False, - error_log_key="message", - raise_return_resource_not_found=False): - """ Sends a request to the server for the execution of the + @retry.Retry( + predicate=retry.if_exception_type( + labelbox.exceptions.InternalServerError, labelbox.exceptions.TimeoutError + ) + ) + def execute( + self, + query=None, + params=None, + data=None, + files=None, + timeout=60.0, + experimental=False, + error_log_key="message", + raise_return_resource_not_found=False, + ): + """Sends a request to the server for the execution of the given query. Checks the response for errors and wraps errors @@ -196,25 +219,23 @@ def convert_value(value): if query is not None: if params is not None: - params = { - key: convert_value(value) for key, value in params.items() - } - data = json.dumps({ - 'query': query, - 'variables': params - }).encode('utf-8') + params = {key: convert_value(value) for key, value in params.items()} + data = json.dumps({"query": query, "variables": params}).encode("utf-8") elif data is None: raise ValueError("query and data cannot both be none") - endpoint = self.endpoint if not experimental else self.endpoint.replace( - "/graphql", "/_gql") + endpoint = ( + self.endpoint + if not experimental + else self.endpoint.replace("/graphql", "/_gql") + ) try: headers = self._connection.headers.copy() if files: - del headers['Content-Type'] - del headers['Accept'] - + del headers["Content-Type"] + del headers["Accept"] + request = requests.Request( "POST", endpoint, @@ -225,9 +246,10 @@ def convert_value(value): prepped: requests.PreparedRequest = request.prepare() - - settings = self._connection.merge_environment_settings(prepped.url, {}, None, None, None) - + settings = self._connection.merge_environment_settings( + prepped.url, {}, None, None, None + ) + response = self._connection.send(prepped, timeout=timeout, **settings) logger.debug("Response: %s", response.text) except requests.exceptions.Timeout as e: @@ -237,20 +259,28 @@ def convert_value(value): raise labelbox.exceptions.NetworkError(e) except Exception as e: raise labelbox.exceptions.LabelboxError( - "Unknown error during Client.query(): " + str(e), e) + "Unknown error during Client.query(): " + str(e), e + ) - if 200 <= response.status_code < 300 or response.status_code < 500 or response.status_code >= 600: + if ( + 200 <= response.status_code < 300 + or response.status_code < 500 + or response.status_code >= 600 + ): try: r_json = response.json() except Exception: raise labelbox.exceptions.LabelboxError( - "Failed to parse response as JSON: %s" % response.text) + "Failed to parse response as JSON: %s" % response.text + ) else: - if "upstream connect error or disconnect/reset before headers" in response.text: - raise labelbox.exceptions.InternalServerError( - "Connection reset") + if ( + "upstream connect error or disconnect/reset before headers" + in response.text + ): + raise labelbox.exceptions.InternalServerError("Connection reset") elif response.status_code == 502: - error_502 = '502 Bad Gateway' + error_502 = "502 Bad Gateway" raise labelbox.exceptions.InternalServerError(error_502) elif 500 <= response.status_code < 600: error_500 = f"Internal server http error {response.status_code}" @@ -259,7 +289,7 @@ def convert_value(value): errors = r_json.get("errors", []) def check_errors(keywords, *path): - """ Helper that looks for any of the given `keywords` in any of + """Helper that looks for any of the given `keywords` in any of current errors on paths (like error[path][component][to][keyword]). """ for error in errors: @@ -276,18 +306,18 @@ def get_error_status_code(error: dict) -> int: except: return 500 - if check_errors(["AUTHENTICATION_ERROR"], "extensions", - "code") is not None: + if check_errors(["AUTHENTICATION_ERROR"], "extensions", "code") is not None: raise labelbox.exceptions.AuthenticationError("Invalid API key") - authorization_error = check_errors(["AUTHORIZATION_ERROR"], - "extensions", "code") + authorization_error = check_errors( + ["AUTHORIZATION_ERROR"], "extensions", "code" + ) if authorization_error is not None: - raise labelbox.exceptions.AuthorizationError( - authorization_error["message"]) + raise labelbox.exceptions.AuthorizationError(authorization_error["message"]) - validation_error = check_errors(["GRAPHQL_VALIDATION_FAILED"], - "extensions", "code") + validation_error = check_errors( + ["GRAPHQL_VALIDATION_FAILED"], "extensions", "code" + ) if validation_error is not None: message = validation_error["message"] @@ -296,11 +326,9 @@ def get_error_status_code(error: dict) -> int: else: raise labelbox.exceptions.InvalidQueryError(message) - graphql_error = check_errors(["GRAPHQL_PARSE_FAILED"], "extensions", - "code") + graphql_error = check_errors(["GRAPHQL_PARSE_FAILED"], "extensions", "code") if graphql_error is not None: - raise labelbox.exceptions.InvalidQueryError( - graphql_error["message"]) + raise labelbox.exceptions.InvalidQueryError(graphql_error["message"]) # Check if API limit was exceeded response_msg = r_json.get("message", "") @@ -308,34 +336,41 @@ def get_error_status_code(error: dict) -> int: if response_msg.startswith("You have exceeded"): raise labelbox.exceptions.ApiLimitError(response_msg) - resource_not_found_error = check_errors(["RESOURCE_NOT_FOUND"], - "extensions", "code") + resource_not_found_error = check_errors( + ["RESOURCE_NOT_FOUND"], "extensions", "code" + ) if resource_not_found_error is not None: if raise_return_resource_not_found: raise labelbox.exceptions.ResourceNotFoundError( - message=resource_not_found_error["message"]) + message=resource_not_found_error["message"] + ) else: # Return None and let the caller methods raise an exception # as they already know which resource type and ID was requested return None - resource_conflict_error = check_errors(["RESOURCE_CONFLICT"], - "extensions", "code") + resource_conflict_error = check_errors( + ["RESOURCE_CONFLICT"], "extensions", "code" + ) if resource_conflict_error is not None: raise labelbox.exceptions.ResourceConflict( - resource_conflict_error["message"]) + resource_conflict_error["message"] + ) - malformed_request_error = check_errors(["MALFORMED_REQUEST"], - "extensions", "code") + malformed_request_error = check_errors( + ["MALFORMED_REQUEST"], "extensions", "code" + ) if malformed_request_error is not None: raise labelbox.exceptions.MalformedQueryException( - malformed_request_error[error_log_key]) + malformed_request_error[error_log_key] + ) # A lot of different error situations are now labeled serverside # as INTERNAL_SERVER_ERROR, when they are actually client errors. # TODO: fix this in the server API - internal_server_error = check_errors(["INTERNAL_SERVER_ERROR"], - "extensions", "code") + internal_server_error = check_errors( + ["INTERNAL_SERVER_ERROR"], "extensions", "code" + ) if internal_server_error is not None: message = internal_server_error.get("message") error_status_code = get_error_status_code(internal_server_error) @@ -350,8 +385,9 @@ def get_error_status_code(error: dict) -> int: else: raise labelbox.exceptions.InternalServerError(message) - not_allowed_error = check_errors(["OPERATION_NOT_ALLOWED"], - "extensions", "code") + not_allowed_error = check_errors( + ["OPERATION_NOT_ALLOWED"], "extensions", "code" + ) if not_allowed_error is not None: message = not_allowed_error.get("message") raise labelbox.exceptions.OperationNotAllowedException(message) @@ -362,10 +398,12 @@ def get_error_status_code(error: dict) -> int: map( lambda x: { "message": x["message"], - "code": x["extensions"]["code"] - }, errors)) - raise labelbox.exceptions.LabelboxError("Unknown error: %s" % - str(messages)) + "code": x["extensions"]["code"], + }, + errors, + ) + ) + raise labelbox.exceptions.LabelboxError("Unknown error: %s" % str(messages)) # if we do return a proper error code, and didn't catch this above # reraise @@ -374,7 +412,7 @@ def get_error_status_code(error: dict) -> int: # in the SDK if response.status_code != requests.codes.ok: message = f"{response.status_code} {response.reason}" - cause = r_json.get('message') + cause = r_json.get("message") raise labelbox.exceptions.LabelboxError(message, cause) return r_json["data"] @@ -394,18 +432,21 @@ def upload_file(self, path: str) -> str: content_type, _ = mimetypes.guess_type(path) filename = os.path.basename(path) with open(path, "rb") as f: - return self.upload_data(content=f.read(), - filename=filename, - content_type=content_type) - - @retry.Retry(predicate=retry.if_exception_type( - labelbox.exceptions.InternalServerError)) - def upload_data(self, - content: bytes, - filename: str = None, - content_type: str = None, - sign: bool = False) -> str: - """ Uploads the given data (bytes) to Labelbox. + return self.upload_data( + content=f.read(), filename=filename, content_type=content_type + ) + + @retry.Retry( + predicate=retry.if_exception_type(labelbox.exceptions.InternalServerError) + ) + def upload_data( + self, + content: bytes, + filename: str = None, + content_type: str = None, + sign: bool = False, + ) -> str: + """Uploads the given data (bytes) to Labelbox. Args: content: bytestring to upload @@ -421,40 +462,39 @@ def upload_data(self, """ request_data = { - "operations": - json.dumps({ + "operations": json.dumps( + { "variables": { "file": None, "contentLength": len(content), - "sign": sign + "sign": sign, }, - "query": - """mutation UploadFile($file: Upload!, $contentLength: Int!, + "query": """mutation UploadFile($file: Upload!, $contentLength: Int!, $sign: Boolean) { uploadFile(file: $file, contentLength: $contentLength, sign: $sign) {url filename} } """, - }), + } + ), "map": (None, json.dumps({"1": ["variables.file"]})), } files = { - "1": (filename, content, content_type) if - (filename and content_type) else content + "1": (filename, content, content_type) + if (filename and content_type) + else content } headers = self._connection.headers.copy() headers.pop("Content-Type", None) - request = requests.Request('POST', - self.endpoint, - headers=headers, - data=request_data, - files=files) + request = requests.Request( + "POST", self.endpoint, headers=headers, data=request_data, files=files + ) prepped: requests.PreparedRequest = request.prepare() response = self._connection.send(prepped) if response.status_code == 502: - error_502 = '502 Bad Gateway' + error_502 = "502 Bad Gateway" raise labelbox.exceptions.InternalServerError(error_502) elif response.status_code == 503: raise labelbox.exceptions.InternalServerError(response.text) @@ -465,22 +505,23 @@ def upload_data(self, file_data = response.json().get("data", None) except ValueError as e: # response is not valid JSON raise labelbox.exceptions.LabelboxError( - "Failed to upload, unknown cause", e) + "Failed to upload, unknown cause", e + ) if not file_data or not file_data.get("uploadFile", None): try: errors = response.json().get("errors", []) - error_msg = next(iter(errors), {}).get("message", - "Unknown error") + error_msg = next(iter(errors), {}).get("message", "Unknown error") except Exception as e: error_msg = "Unknown error" raise labelbox.exceptions.LabelboxError( - "Failed to upload, message: %s" % error_msg) + "Failed to upload, message: %s" % error_msg + ) return file_data["uploadFile"]["url"] def _get_single(self, db_object_type, uid): - """ Fetches a single object of the given type, for the given ID. + """Fetches a single object of the given type, for the given ID. Args: db_object_type (type): DbObject subclass. @@ -496,13 +537,12 @@ def _get_single(self, db_object_type, uid): res = self.execute(query_str, params) res = res and res.get(utils.camel_case(db_object_type.type_name())) if res is None: - raise labelbox.exceptions.ResourceNotFoundError( - db_object_type, params) + raise labelbox.exceptions.ResourceNotFoundError(db_object_type, params) else: return db_object_type(self, res) def get_project(self, project_id) -> Project: - """ Gets a single Project with the given ID. + """Gets a single Project with the given ID. >>> project = client.get_project("") @@ -517,7 +557,7 @@ def get_project(self, project_id) -> Project: return self._get_single(Entity.Project, project_id) def get_dataset(self, dataset_id) -> Dataset: - """ Gets a single Dataset with the given ID. + """Gets a single Dataset with the given ID. >>> dataset = client.get_dataset("") @@ -532,21 +572,21 @@ def get_dataset(self, dataset_id) -> Dataset: return self._get_single(Entity.Dataset, dataset_id) def get_user(self) -> User: - """ Gets the current User database object. + """Gets the current User database object. >>> user = client.get_user() """ return self._get_single(Entity.User, None) def get_organization(self) -> Organization: - """ Gets the Organization DB object of the current user. + """Gets the Organization DB object of the current user. >>> organization = client.get_organization() """ return self._get_single(Entity.Organization, None) def _get_all(self, db_object_type, where, filter_deleted=True): - """ Fetches all the objects of the given type the user has access to. + """Fetches all the objects of the given type the user has access to. Args: db_object_type (type): DbObject subclass. @@ -561,12 +601,15 @@ def _get_all(self, db_object_type, where, filter_deleted=True): query_str, params = query.get_all(db_object_type, where) return PaginatedCollection( - self, query_str, params, + self, + query_str, + params, [utils.camel_case(db_object_type.type_name()) + "s"], - db_object_type) + db_object_type, + ) def get_projects(self, where=None) -> PaginatedCollection: - """ Fetches all the projects the user has access to. + """Fetches all the projects the user has access to. >>> projects = client.get_projects(where=(Project.name == "") & (Project.description == "")) @@ -579,7 +622,7 @@ def get_projects(self, where=None) -> PaginatedCollection: return self._get_all(Entity.Project, where) def get_users(self, where=None) -> PaginatedCollection: - """ Fetches all the users. + """Fetches all the users. >>> users = client.get_users(where=User.email == "") @@ -592,7 +635,7 @@ def get_users(self, where=None) -> PaginatedCollection: return self._get_all(Entity.User, where, filter_deleted=False) def get_datasets(self, where=None) -> PaginatedCollection: - """ Fetches one or more datasets. + """Fetches one or more datasets. >>> datasets = client.get_datasets(where=(Dataset.name == "") & (Dataset.description == "")) @@ -605,7 +648,7 @@ def get_datasets(self, where=None) -> PaginatedCollection: return self._get_all(Entity.Dataset, where) def get_labeling_frontends(self, where=None) -> List[LabelingFrontend]: - """ Fetches all the labeling frontends. + """Fetches all the labeling frontends. >>> frontend = client.get_labeling_frontends(where=LabelingFrontend.name == "Editor") @@ -618,7 +661,7 @@ def get_labeling_frontends(self, where=None) -> List[LabelingFrontend]: return self._get_all(Entity.LabelingFrontend, where) def _create(self, db_object_type, data, extra_params={}): - """ Creates an object on the server. Attribute values are + """Creates an object on the server. Attribute values are passed as keyword arguments: Args: @@ -636,8 +679,9 @@ def _create(self, db_object_type, data, extra_params={}): # Convert string attribute names to Field or Relationship objects. # Also convert Labelbox object values to their UIDs. data = { - db_object_type.attribute(attr) if isinstance(attr, str) else attr: - value.uid if isinstance(value, DbObject) else value + db_object_type.attribute(attr) if isinstance(attr, str) else attr: value.uid + if isinstance(value, DbObject) + else value for attr, value in data.items() } @@ -646,15 +690,17 @@ def _create(self, db_object_type, data, extra_params={}): res = self.execute(query_string, params) if not res: - raise labelbox.exceptions.LabelboxError("Failed to create %s" % - db_object_type.type_name()) + raise labelbox.exceptions.LabelboxError( + "Failed to create %s" % db_object_type.type_name() + ) res = res["create%s" % db_object_type.type_name()] return db_object_type(self, res) - def create_model_config(self, name: str, model_id: str, - inference_params: dict) -> ModelConfig: - """ Creates a new model config with the given params. + def create_model_config( + self, name: str, model_id: str, inference_params: dict + ) -> ModelConfig: + """Creates a new model config with the given params. Model configs are scoped to organizations, and can be reused between projects. Args: @@ -679,13 +725,13 @@ def create_model_config(self, name: str, model_id: str, params = { "modelId": model_id, "inferenceParams": inference_params, - "name": name + "name": name, } result = self.execute(query, params) - return ModelConfig(self, result['createModelConfig']) + return ModelConfig(self, result["createModelConfig"]) def delete_model_config(self, id: str) -> bool: - """ Deletes an existing model config with the given id + """Deletes an existing model config with the given id Args: id (str): ID of existing model config @@ -702,14 +748,13 @@ def delete_model_config(self, id: str) -> bool: params = {"id": id} result = self.execute(query, params) if not result: - raise labelbox.exceptions.ResourceNotFoundError( - Entity.ModelConfig, params) - return result['deleteModelConfig']['success'] + raise labelbox.exceptions.ResourceNotFoundError(Entity.ModelConfig, params) + return result["deleteModelConfig"]["success"] - def create_dataset(self, - iam_integration=IAMIntegration._DEFAULT, - **kwargs) -> Dataset: - """ Creates a Dataset object on the server. + def create_dataset( + self, iam_integration=IAMIntegration._DEFAULT, **kwargs + ) -> Dataset: + """Creates a Dataset object on the server. Attribute values are passed as keyword arguments. @@ -730,8 +775,7 @@ def create_dataset(self, """ dataset = self._create(Entity.Dataset, kwargs) if iam_integration == IAMIntegration._DEFAULT: - iam_integration = self.get_organization( - ).get_default_iam_integration() + iam_integration = self.get_organization().get_default_iam_integration() if iam_integration is None: return dataset @@ -743,22 +787,22 @@ def create_dataset(self, ) if not iam_integration.valid: - raise ValueError( - "Integration is not valid. Please select another.") + raise ValueError("Integration is not valid. Please select another.") self.execute( """mutation setSignerForDatasetPyApi($signerId: ID!, $datasetId: ID!) { setSignerForDataset(data: { signerId: $signerId}, where: {id: $datasetId}){id}} - """, { - 'signerId': iam_integration.uid, - 'datasetId': dataset.uid - }) + """, + {"signerId": iam_integration.uid, "datasetId": dataset.uid}, + ) validation_result = self.execute( """mutation validateDatasetPyApi($id: ID!){validateDataset(where: {id : $id}){ valid checks{name, success}}} - """, {'id': dataset.uid}) + """, + {"id": dataset.uid}, + ) - if not validation_result['validateDataset']['valid']: + if not validation_result["validateDataset"]["valid"]: raise labelbox.exceptions.LabelboxError( f"IAMIntegration was not successfully added to the dataset." ) @@ -768,7 +812,7 @@ def create_dataset(self, return dataset def create_project(self, **kwargs) -> Project: - """ Creates a Project object on the server. + """Creates a Project object on the server. Attribute values are passed as keyword arguments. @@ -806,26 +850,32 @@ def create_project(self, **kwargs) -> Project: return self._create_project(**kwargs) @overload - def create_model_evaluation_project(self, - dataset_name: str, - dataset_id: str = None, - data_row_count: int = 100, - **kwargs) -> Project: + def create_model_evaluation_project( + self, + dataset_name: str, + dataset_id: str = None, + data_row_count: int = 100, + **kwargs, + ) -> Project: pass @overload - def create_model_evaluation_project(self, - dataset_id: str, - dataset_name: str = None, - data_row_count: int = 100, - **kwargs) -> Project: + def create_model_evaluation_project( + self, + dataset_id: str, + dataset_name: str = None, + data_row_count: int = 100, + **kwargs, + ) -> Project: pass - def create_model_evaluation_project(self, - dataset_id: Optional[str] = None, - dataset_name: Optional[str] = None, - data_row_count: int = 100, - **kwargs) -> Project: + def create_model_evaluation_project( + self, + dataset_id: Optional[str] = None, + dataset_name: Optional[str] = None, + data_row_count: int = 100, + **kwargs, + ) -> Project: """ Use this method exclusively to create a chat model evaluation project. Args: @@ -881,10 +931,12 @@ def create_offline_model_evaluation_project(self, **kwargs) -> Project: Returns: Project: The created project """ - kwargs[ - "media_type"] = MediaType.Conversational # Only Conversational is supported - kwargs[ - "editor_task_type"] = EditorTaskType.OfflineModelChatEvaluation.value # Special editor task type for offline model evaluation + kwargs["media_type"] = ( + MediaType.Conversational + ) # Only Conversational is supported + kwargs["editor_task_type"] = ( + EditorTaskType.OfflineModelChatEvaluation.value + ) # Special editor task type for offline model evaluation # The following arguments are not supported for offline model evaluation kwargs.pop("dataset_name_or_id", None) @@ -894,11 +946,12 @@ def create_offline_model_evaluation_project(self, **kwargs) -> Project: return self._create_project(**kwargs) def create_prompt_response_generation_project( - self, - dataset_id: Optional[str] = None, - dataset_name: Optional[str] = None, - data_row_count: int = 100, - **kwargs) -> Project: + self, + dataset_id: Optional[str] = None, + dataset_name: Optional[str] = None, + data_row_count: int = 100, + **kwargs, + ) -> Project: """ Use this method exclusively to create a prompt and response generation project. @@ -932,8 +985,7 @@ def create_prompt_response_generation_project( ) if dataset_id and dataset_name: - raise ValueError( - "Only provide a dataset_name or dataset_id, not both.") + raise ValueError("Only provide a dataset_name or dataset_id, not both.") if data_row_count <= 0: raise ValueError("data_row_count must be a positive integer.") @@ -946,7 +998,8 @@ def create_prompt_response_generation_project( dataset_name_or_id = dataset_name if "media_type" in kwargs and kwargs.get("media_type") not in [ - MediaType.LLMPromptCreation, MediaType.LLMPromptResponseCreation + MediaType.LLMPromptCreation, + MediaType.LLMPromptResponseCreation, ]: raise ValueError( "media_type must be either LLMPromptCreation or LLMPromptResponseCreation" @@ -969,8 +1022,9 @@ def create_response_creation_project(self, **kwargs) -> Project: Project: The created project """ kwargs["media_type"] = MediaType.Text # Only Text is supported - kwargs[ - "editor_task_type"] = EditorTaskType.ResponseCreation.value # Special editor task type for response creation projects + kwargs["editor_task_type"] = ( + EditorTaskType.ResponseCreation.value + ) # Special editor task type for response creation projects # The following arguments are not supported for response creation projects kwargs.pop("dataset_name_or_id", None) @@ -1005,13 +1059,16 @@ def _create_project(self, **kwargs) -> Project: if media_type and MediaType.is_supported(media_type): media_type_value = media_type.value elif media_type: - raise TypeError(f"{media_type} is not a valid media type. Use" - f" any of {MediaType.get_supported_members()}" - " from MediaType. Example: MediaType.Image.") + raise TypeError( + f"{media_type} is not a valid media type. Use" + f" any of {MediaType.get_supported_members()}" + " from MediaType. Example: MediaType.Image." + ) else: logger.warning( "Creating a project without specifying media_type" - " through this method will soon no longer be supported.") + " through this method will soon no longer be supported." + ) media_type_value = None quality_modes = kwargs.get("quality_modes") @@ -1040,22 +1097,21 @@ def _create_project(self, **kwargs) -> Project: if quality_mode: quality_modes_set = {quality_mode} - if (quality_modes_set is None or len(quality_modes_set) == 0 or - quality_modes_set - == {QualityMode.Benchmark, QualityMode.Consensus}): - data[ - "auto_audit_number_of_labels"] = CONSENSUS_AUTO_AUDIT_NUMBER_OF_LABELS + if ( + quality_modes_set is None + or len(quality_modes_set) == 0 + or quality_modes_set == {QualityMode.Benchmark, QualityMode.Consensus} + ): + data["auto_audit_number_of_labels"] = CONSENSUS_AUTO_AUDIT_NUMBER_OF_LABELS data["auto_audit_percentage"] = CONSENSUS_AUTO_AUDIT_PERCENTAGE data["is_benchmark_enabled"] = True data["is_consensus_enabled"] = True elif quality_modes_set == {QualityMode.Benchmark}: - data[ - "auto_audit_number_of_labels"] = BENCHMARK_AUTO_AUDIT_NUMBER_OF_LABELS + data["auto_audit_number_of_labels"] = BENCHMARK_AUTO_AUDIT_NUMBER_OF_LABELS data["auto_audit_percentage"] = BENCHMARK_AUTO_AUDIT_PERCENTAGE data["is_benchmark_enabled"] = True elif quality_modes_set == {QualityMode.Consensus}: - data[ - "auto_audit_number_of_labels"] = CONSENSUS_AUTO_AUDIT_NUMBER_OF_LABELS + data["auto_audit_number_of_labels"] = CONSENSUS_AUTO_AUDIT_NUMBER_OF_LABELS data["auto_audit_percentage"] = CONSENSUS_AUTO_AUDIT_PERCENTAGE data["is_consensus_enabled"] = True else: @@ -1068,10 +1124,10 @@ def _create_project(self, **kwargs) -> Project: params["media_type"] = media_type_value extra_params = { - Field.String("dataset_name_or_id"): - params.pop("dataset_name_or_id", None), - Field.Boolean("append_to_existing_dataset"): - params.pop("append_to_existing_dataset", None), + Field.String("dataset_name_or_id"): params.pop("dataset_name_or_id", None), + Field.Boolean("append_to_existing_dataset"): params.pop( + "append_to_existing_dataset", None + ), } extra_params = {k: v for k, v in extra_params.items() if v is not None} return self._create(Entity.Project, params, extra_params) @@ -1095,13 +1151,14 @@ def get_data_row(self, data_row_id): def get_data_row_by_global_key(self, global_key: str) -> DataRow: """ - Returns: DataRow: returns a single data row given the global key + Returns: DataRow: returns a single data row given the global key """ res = self.get_data_row_ids_for_global_keys([global_key]) - if res['status'] != "SUCCESS": + if res["status"] != "SUCCESS": raise labelbox.exceptions.ResourceNotFoundError( - Entity.DataRow, {global_key: global_key}) - data_row_id = res['results'][0] + Entity.DataRow, {global_key: global_key} + ) + data_row_id = res["results"][0] return self.get_data_row(data_row_id) @@ -1117,7 +1174,7 @@ def get_data_row_metadata_ontology(self) -> DataRowMetadataOntology: return self._data_row_metadata_ontology def get_model(self, model_id) -> Model: - """ Gets a single Model with the given ID. + """Gets a single Model with the given ID. >>> model = client.get_model("") @@ -1132,7 +1189,7 @@ def get_model(self, model_id) -> Model: return self._get_single(Entity.Model, model_id) def get_models(self, where=None) -> List[Model]: - """ Fetches all the models the user has access to. + """Fetches all the models the user has access to. >>> models = client.get_models(where=(Model.name == "")) @@ -1145,7 +1202,7 @@ def get_models(self, where=None) -> List[Model]: return self._get_all(Entity.Model, where, filter_deleted=False) def create_model(self, name, ontology_id) -> Model: - """ Creates a Model object on the server. + """Creates a Model object on the server. >>> model = client.create_model(, ) @@ -1164,14 +1221,12 @@ def create_model(self, name, ontology_id) -> Model: } }""" % query.results_query_part(Entity.Model) - result = self.execute(query_str, { - "name": name, - "ontologyId": ontology_id - }) - return Entity.Model(self, result['createModel']) + result = self.execute(query_str, {"name": name, "ontologyId": ontology_id}) + return Entity.Model(self, result["createModel"]) def get_data_row_ids_for_external_ids( - self, external_ids: List[str]) -> Dict[str, List[str]]: + self, external_ids: List[str] + ) -> Dict[str, List[str]]: """ Returns a list of data row ids for a list of external ids. There is a max of 1500 items returned at a time. @@ -1189,10 +1244,9 @@ def get_data_row_ids_for_external_ids( result = defaultdict(list) for i in range(0, len(external_ids), max_ids_per_request): for row in self.execute( - query_str, - {'externalId_in': external_ids[i:i + max_ids_per_request] - })['externalIdsToDataRowIds']: - result[row['externalId']].append(row['dataRowId']) + query_str, {"externalId_in": external_ids[i : i + max_ids_per_request]} + )["externalIdsToDataRowIds"]: + result[row["externalId"]].append(row["dataRowId"]) return result def get_ontology(self, ontology_id) -> Ontology: @@ -1222,10 +1276,15 @@ def get_ontologies(self, name_contains) -> PaginatedCollection: } } """ % query.results_query_part(Entity.Ontology) - params = {'search': name_contains, 'filter': {'status': 'ALL'}} - return PaginatedCollection(self, query_str, params, - ['ontologies', 'nodes'], Entity.Ontology, - ['ontologies', 'nextCursor']) + params = {"search": name_contains, "filter": {"status": "ALL"}} + return PaginatedCollection( + self, + query_str, + params, + ["ontologies", "nodes"], + Entity.Ontology, + ["ontologies", "nextCursor"], + ) def get_feature_schema(self, feature_schema_id): """ @@ -1242,11 +1301,9 @@ def get_feature_schema(self, feature_schema_id): }""" % query.results_query_part(Entity.FeatureSchema) res = self.execute( - query_str, - {'rootSchemaNodeWhere': { - 'featureSchemaId': feature_schema_id - }})['rootSchemaNode'] - res['id'] = res['normalized']['featureSchemaId'] + query_str, {"rootSchemaNodeWhere": {"featureSchemaId": feature_schema_id}} + )["rootSchemaNode"] + res["id"] = res["normalized"]["featureSchemaId"] return Entity.FeatureSchema(self, res) def get_feature_schemas(self, name_contains) -> PaginatedCollection: @@ -1267,25 +1324,30 @@ def get_feature_schemas(self, name_contains) -> PaginatedCollection: } } """ % query.results_query_part(Entity.FeatureSchema) - params = {'search': name_contains, 'filter': {'status': 'ALL'}} + params = {"search": name_contains, "filter": {"status": "ALL"}} def rootSchemaPayloadToFeatureSchema(client, payload): # Technically we are querying for a Schema Node. # But the features are the same so we just grab the feature schema id - payload['id'] = payload['normalized']['featureSchemaId'] + payload["id"] = payload["normalized"]["featureSchemaId"] return Entity.FeatureSchema(client, payload) - return PaginatedCollection(self, query_str, params, - ['rootSchemaNodes', 'nodes'], - rootSchemaPayloadToFeatureSchema, - ['rootSchemaNodes', 'nextCursor']) + return PaginatedCollection( + self, + query_str, + params, + ["rootSchemaNodes", "nodes"], + rootSchemaPayloadToFeatureSchema, + ["rootSchemaNodes", "nextCursor"], + ) def create_ontology_from_feature_schemas( - self, - name, - feature_schema_ids, - media_type: MediaType = None, - ontology_kind: OntologyKind = None) -> Ontology: + self, + name, + feature_schema_ids, + media_type: MediaType = None, + ontology_kind: OntologyKind = None, + ) -> Ontology: """ Creates an ontology from a list of feature schema ids @@ -1304,22 +1366,22 @@ def create_ontology_from_feature_schemas( tools, classifications = [], [] for feature_schema_id in feature_schema_ids: feature_schema = self.get_feature_schema(feature_schema_id) - tool = ['tool'] - if 'tool' in feature_schema.normalized: - tool = feature_schema.normalized['tool'] + tool = ["tool"] + if "tool" in feature_schema.normalized: + tool = feature_schema.normalized["tool"] try: Tool.Type(tool) tools.append(feature_schema.normalized) except ValueError: - raise ValueError( - f"Tool `{tool}` not in list of supported tools.") - elif 'type' in feature_schema.normalized: - classification = feature_schema.normalized['type'] - if classification in Classification.Type._value2member_map_.keys( - ): + raise ValueError(f"Tool `{tool}` not in list of supported tools.") + elif "type" in feature_schema.normalized: + classification = feature_schema.normalized["type"] + if classification in Classification.Type._value2member_map_.keys(): Classification.Type(classification) classifications.append(feature_schema.normalized) - elif classification in PromptResponseClassification.Type._value2member_map_.keys( + elif ( + classification + in PromptResponseClassification.Type._value2member_map_.keys() ): PromptResponseClassification.Type(classification) classifications.append(feature_schema.normalized) @@ -1331,13 +1393,15 @@ def create_ontology_from_feature_schemas( raise ValueError( "Neither `tool` or `classification` found in the normalized feature schema" ) - normalized = {'tools': tools, 'classifications': classifications} + normalized = {"tools": tools, "classifications": classifications} # validation for ontology_kind and media_type is done within self.create_ontology - return self.create_ontology(name=name, - normalized=normalized, - media_type=media_type, - ontology_kind=ontology_kind) + return self.create_ontology( + name=name, + normalized=normalized, + media_type=media_type, + ontology_kind=ontology_kind, + ) def delete_unused_feature_schema(self, feature_schema_id: str) -> None: """ @@ -1348,14 +1412,18 @@ def delete_unused_feature_schema(self, feature_schema_id: str) -> None: >>> client.delete_unused_feature_schema("cleabc1my012ioqvu5anyaabc") """ - endpoint = self.rest_endpoint + "/feature-schemas/" + urllib.parse.quote( - feature_schema_id) + endpoint = ( + self.rest_endpoint + + "/feature-schemas/" + + urllib.parse.quote(feature_schema_id) + ) response = self._connection.delete(endpoint) if response.status_code != requests.codes.no_content: raise labelbox.exceptions.LabelboxError( - "Failed to delete the feature schema, message: " + - str(response.json()['message'])) + "Failed to delete the feature schema, message: " + + str(response.json()["message"]) + ) def delete_unused_ontology(self, ontology_id: str) -> None: """ @@ -1365,17 +1433,18 @@ def delete_unused_ontology(self, ontology_id: str) -> None: Example: >>> client.delete_unused_ontology("cleabc1my012ioqvu5anyaabc") """ - endpoint = self.rest_endpoint + "/ontologies/" + urllib.parse.quote( - ontology_id) + endpoint = self.rest_endpoint + "/ontologies/" + urllib.parse.quote(ontology_id) response = self._connection.delete(endpoint) if response.status_code != requests.codes.no_content: raise labelbox.exceptions.LabelboxError( - "Failed to delete the ontology, message: " + - str(response.json()['message'])) + "Failed to delete the ontology, message: " + + str(response.json()["message"]) + ) - def update_feature_schema_title(self, feature_schema_id: str, - title: str) -> FeatureSchema: + def update_feature_schema_title( + self, feature_schema_id: str, title: str + ) -> FeatureSchema: """ Updates a title of a feature schema Args: @@ -1387,16 +1456,21 @@ def update_feature_schema_title(self, feature_schema_id: str, >>> client.update_feature_schema_title("cleabc1my012ioqvu5anyaabc", "New Title") """ - endpoint = self.rest_endpoint + "/feature-schemas/" + urllib.parse.quote( - feature_schema_id) + '/definition' + endpoint = ( + self.rest_endpoint + + "/feature-schemas/" + + urllib.parse.quote(feature_schema_id) + + "/definition" + ) response = self._connection.patch(endpoint, json={"title": title}) if response.status_code == requests.codes.ok: return self.get_feature_schema(feature_schema_id) else: raise labelbox.exceptions.LabelboxError( - "Failed to update the feature schema, message: " + - str(response.json()['message'])) + "Failed to update the feature schema, message: " + + str(response.json()["message"]) + ) def upsert_feature_schema(self, feature_schema: Dict) -> FeatureSchema: """ @@ -1414,23 +1488,29 @@ def upsert_feature_schema(self, feature_schema: Dict) -> FeatureSchema: >>> client.upsert_feature_schema(tool.asdict()) """ - feature_schema_id = feature_schema.get( - "featureSchemaId") or "new_feature_schema_id" - endpoint = self.rest_endpoint + "/feature-schemas/" + urllib.parse.quote( - feature_schema_id) + feature_schema_id = ( + feature_schema.get("featureSchemaId") or "new_feature_schema_id" + ) + endpoint = ( + self.rest_endpoint + + "/feature-schemas/" + + urllib.parse.quote(feature_schema_id) + ) response = self._connection.put( - endpoint, json={"normalized": json.dumps(feature_schema)}) + endpoint, json={"normalized": json.dumps(feature_schema)} + ) if response.status_code == requests.codes.ok: - return self.get_feature_schema(response.json()['schemaId']) + return self.get_feature_schema(response.json()["schemaId"]) else: raise labelbox.exceptions.LabelboxError( - "Failed to upsert the feature schema, message: " + - str(response.json()['message'])) + "Failed to upsert the feature schema, message: " + + str(response.json()["message"]) + ) - def insert_feature_schema_into_ontology(self, feature_schema_id: str, - ontology_id: str, - position: int) -> None: + def insert_feature_schema_into_ontology( + self, feature_schema_id: str, ontology_id: str, position: int + ) -> None: """ Inserts a feature schema into an ontology. If the feature schema is already in the ontology, it will be moved to the new position. @@ -1442,14 +1522,19 @@ def insert_feature_schema_into_ontology(self, feature_schema_id: str, >>> client.insert_feature_schema_into_ontology("cleabc1my012ioqvu5anyaabc", "clefdvwl7abcgefgu3lyvcde", 2) """ - endpoint = self.rest_endpoint + '/ontologies/' + urllib.parse.quote( - ontology_id) + "/feature-schemas/" + urllib.parse.quote( - feature_schema_id) + endpoint = ( + self.rest_endpoint + + "/ontologies/" + + urllib.parse.quote(ontology_id) + + "/feature-schemas/" + + urllib.parse.quote(feature_schema_id) + ) response = self._connection.post(endpoint, json={"position": position}) if response.status_code != requests.codes.created: raise labelbox.exceptions.LabelboxError( "Failed to insert the feature schema into the ontology, message: " - + str(response.json()['message'])) + + str(response.json()["message"]) + ) def get_unused_ontologies(self, after: str = None) -> List[str]: """ @@ -1472,8 +1557,9 @@ def get_unused_ontologies(self, after: str = None) -> List[str]: return response.json() else: raise labelbox.exceptions.LabelboxError( - "Failed to get unused ontologies, message: " + - str(response.json()['message'])) + "Failed to get unused ontologies, message: " + + str(response.json()["message"]) + ) def get_unused_feature_schemas(self, after: str = None) -> List[str]: """ @@ -1496,14 +1582,17 @@ def get_unused_feature_schemas(self, after: str = None) -> List[str]: return response.json() else: raise labelbox.exceptions.LabelboxError( - "Failed to get unused feature schemas, message: " + - str(response.json()['message'])) + "Failed to get unused feature schemas, message: " + + str(response.json()["message"]) + ) - def create_ontology(self, - name, - normalized, - media_type: MediaType = None, - ontology_kind: OntologyKind = None) -> Ontology: + def create_ontology( + self, + name, + normalized, + media_type: MediaType = None, + ontology_kind: OntologyKind = None, + ) -> Ontology: """ Creates an ontology from normalized data >>> normalized = {"tools" : [{'tool': 'polygon', 'name': 'cat', 'color': 'black'}], "classifications" : []} @@ -1521,7 +1610,7 @@ def create_ontology(self, name (str): Name of the ontology normalized (dict): A normalized ontology payload. See above for details. media_type (MediaType or None): Media type of a new ontology - ontology_kind (OntologyKind or None): set to OntologyKind.ModelEvaluation if the ontology is for chat evaluation or + ontology_kind (OntologyKind or None): set to OntologyKind.ModelEvaluation if the ontology is for chat evaluation or OntologyKind.ResponseCreation if ontology is for response creation, leave as None otherwise. Returns: @@ -1539,9 +1628,11 @@ def create_ontology(self, if ontology_kind and OntologyKind.is_supported(ontology_kind): media_type = OntologyKind.evaluate_ontology_kind_with_media_type( - ontology_kind, media_type) + ontology_kind, media_type + ) editor_task_type_value = EditorTaskTypeMapper.to_editor_task_type( - ontology_kind, media_type).value + ontology_kind, media_type + ).value elif ontology_kind: raise OntologyKind.get_ontology_kind_validation_error(ontology_kind) else: @@ -1551,17 +1642,17 @@ def create_ontology(self, upsertOntology(data: $data){ %s } } """ % query.results_query_part(Entity.Ontology) params = { - 'data': { - 'name': name, - 'normalized': json.dumps(normalized), - 'mediaType': media_type_value + "data": { + "name": name, + "normalized": json.dumps(normalized), + "mediaType": media_type_value, } } if editor_task_type_value: - params['data']['editorTaskType'] = editor_task_type_value + params["data"]["editorTaskType"] = editor_task_type_value res = self.execute(query_str, params) - return Entity.Ontology(self, res['upsertOntology']) + return Entity.Ontology(self, res["upsertOntology"]) def create_feature_schema(self, normalized): """ @@ -1598,15 +1689,15 @@ def create_feature_schema(self, normalized): upsertRootSchemaNode(data: $data){ %s } } """ % query.results_query_part(Entity.FeatureSchema) normalized = {k: v for k, v in normalized.items() if v} - params = {'data': {'normalized': json.dumps(normalized)}} - res = self.execute(query_str, params)['upsertRootSchemaNode'] + params = {"data": {"normalized": json.dumps(normalized)}} + res = self.execute(query_str, params)["upsertRootSchemaNode"] # Technically we are querying for a Schema Node. # But the features are the same so we just grab the feature schema id - res['id'] = res['normalized']['featureSchemaId'] + res["id"] = res["normalized"]["featureSchemaId"] return Entity.FeatureSchema(self, res) def get_model_run(self, model_run_id: str) -> ModelRun: - """ Gets a single ModelRun with the given ID. + """Gets a single ModelRun with the given ID. >>> model_run = client.get_model_run("") @@ -1618,9 +1709,8 @@ def get_model_run(self, model_run_id: str) -> ModelRun: return self._get_single(Entity.ModelRun, model_run_id) def assign_global_keys_to_data_rows( - self, - global_key_to_data_row_inputs: List[Dict[str, str]], - timeout_seconds=60) -> Dict[str, Union[str, List[Any]]]: + self, global_key_to_data_row_inputs: List[Dict[str, str]], timeout_seconds=60 + ) -> Dict[str, Union[str, List[Any]]]: """ Assigns global keys to data rows. @@ -1651,21 +1741,29 @@ def assign_global_keys_to_data_rows( [{'data_row_id': 'cl7tpjzw30031ka6g4evqdfoy', 'global_key': 'gk"', 'error': 'Invalid global key'}] """ - def _format_successful_rows(rows: Dict[str, str], - sanitized: bool) -> List[Dict[str, str]]: - return [{ - 'data_row_id': r['dataRowId'], - 'global_key': r['globalKey'], - 'sanitized': sanitized - } for r in rows] + def _format_successful_rows( + rows: Dict[str, str], sanitized: bool + ) -> List[Dict[str, str]]: + return [ + { + "data_row_id": r["dataRowId"], + "global_key": r["globalKey"], + "sanitized": sanitized, + } + for r in rows + ] - def _format_failed_rows(rows: Dict[str, str], - error_msg: str) -> List[Dict[str, str]]: - return [{ - 'data_row_id': r['dataRowId'], - 'global_key': r['globalKey'], - 'error': error_msg - } for r in rows] + def _format_failed_rows( + rows: Dict[str, str], error_msg: str + ) -> List[Dict[str, str]]: + return [ + { + "data_row_id": r["dataRowId"], + "global_key": r["globalKey"], + "error": error_msg, + } + for r in rows + ] # Validate input dict validation_errors = [] @@ -1685,9 +1783,10 @@ def _format_failed_rows(rows: Dict[str, str], } """ params = { - 'globalKeyDataRowLinks': [{ - utils.camel_case(key): value for key, value in input.items() - } for input in global_key_to_data_row_inputs] + "globalKeyDataRowLinks": [ + {utils.camel_case(key): value for key, value in input.items()} + for input in global_key_to_data_row_inputs + ] } assign_global_keys_to_data_rows_job = self.execute(query_str, params) @@ -1715,9 +1814,9 @@ def _format_failed_rows(rows: Dict[str, str], }}} """ result_params = { - "jobId": - assign_global_keys_to_data_rows_job["assignGlobalKeysToDataRows" - ]["jobId"] + "jobId": assign_global_keys_to_data_rows_job["assignGlobalKeysToDataRows"][ + "jobId" + ] } # Poll job status until finished, then retrieve results @@ -1725,27 +1824,33 @@ def _format_failed_rows(rows: Dict[str, str], start_time = time.time() while True: res = self.execute(result_query_str, result_params) - if res["assignGlobalKeysToDataRowsResult"][ - "jobStatus"] == "COMPLETE": + if res["assignGlobalKeysToDataRowsResult"]["jobStatus"] == "COMPLETE": results, errors = [], [] - res = res['assignGlobalKeysToDataRowsResult']['data'] + res = res["assignGlobalKeysToDataRowsResult"]["data"] # Successful assignments results.extend( - _format_successful_rows(rows=res['sanitizedAssignments'], - sanitized=True)) + _format_successful_rows( + rows=res["sanitizedAssignments"], sanitized=True + ) + ) results.extend( - _format_successful_rows(rows=res['unmodifiedAssignments'], - sanitized=False)) + _format_successful_rows( + rows=res["unmodifiedAssignments"], sanitized=False + ) + ) # Failed assignments errors.extend( _format_failed_rows( - rows=res['invalidGlobalKeyAssignments'], - error_msg= - "Invalid assignment. Either DataRow does not exist, or globalKey is invalid" - )) + rows=res["invalidGlobalKeyAssignments"], + error_msg="Invalid assignment. Either DataRow does not exist, or globalKey is invalid", + ) + ) errors.extend( - _format_failed_rows(rows=res['accessDeniedAssignments'], - error_msg="Access denied to Data Row")) + _format_failed_rows( + rows=res["accessDeniedAssignments"], + error_msg="Access denied to Data Row", + ) + ) if not errors: status = CollectionJobStatus.SUCCESS.value @@ -1764,10 +1869,10 @@ def _format_failed_rows(rows: Dict[str, str], "results": results, "errors": errors, } - elif res["assignGlobalKeysToDataRowsResult"][ - "jobStatus"] == "FAILED": + elif res["assignGlobalKeysToDataRowsResult"]["jobStatus"] == "FAILED": raise labelbox.exceptions.LabelboxError( - "Job assign_global_keys_to_data_rows failed.") + "Job assign_global_keys_to_data_rows failed." + ) current_time = time.time() if current_time - start_time > timeout_seconds: raise labelbox.exceptions.TimeoutError( @@ -1776,9 +1881,8 @@ def _format_failed_rows(rows: Dict[str, str], time.sleep(sleep_time) def get_data_row_ids_for_global_keys( - self, - global_keys: List[str], - timeout_seconds=60) -> Dict[str, Union[str, List[Any]]]: + self, global_keys: List[str], timeout_seconds=60 + ) -> Dict[str, Union[str, List[Any]]]: """ Gets data row ids for a list of global keys. @@ -1811,9 +1915,10 @@ def get_data_row_ids_for_global_keys( [{'global_key': 'asdf', 'error': 'Data Row not found'}] """ - def _format_failed_rows(rows: List[str], - error_msg: str) -> List[Dict[str, str]]: - return [{'global_key': r, 'error': error_msg} for r in rows] + def _format_failed_rows( + rows: List[str], error_msg: str + ) -> List[Dict[str, str]]: + return [{"global_key": r, "error": error_msg} for r in rows] # Start get data rows for global keys job query_str = """query getDataRowsForGlobalKeysPyApi($globalKeys: [ID!]!) { @@ -1831,8 +1936,7 @@ def _format_failed_rows(rows: List[str], } jobStatus}} """ result_params = { - "jobId": - data_rows_for_global_keys_job["dataRowsForGlobalKeys"]["jobId"] + "jobId": data_rows_for_global_keys_job["dataRowsForGlobalKeys"]["jobId"] } # Poll job status until finished, then retrieve results @@ -1840,20 +1944,24 @@ def _format_failed_rows(rows: List[str], start_time = time.time() while True: res = self.execute(result_query_str, result_params) - if res["dataRowsForGlobalKeysResult"]['jobStatus'] == "COMPLETE": - data = res["dataRowsForGlobalKeysResult"]['data'] + if res["dataRowsForGlobalKeysResult"]["jobStatus"] == "COMPLETE": + data = res["dataRowsForGlobalKeysResult"]["data"] results, errors = [], [] - results.extend([row['id'] for row in data['fetchedDataRows']]) + results.extend([row["id"] for row in data["fetchedDataRows"]]) errors.extend( - _format_failed_rows(data['notFoundGlobalKeys'], - "Data Row not found")) + _format_failed_rows( + data["notFoundGlobalKeys"], "Data Row not found" + ) + ) errors.extend( - _format_failed_rows(data['accessDeniedGlobalKeys'], - "Access denied to Data Row")) + _format_failed_rows( + data["accessDeniedGlobalKeys"], "Access denied to Data Row" + ) + ) # Invalid results may contain empty string, so we must filter # them prior to checking for PARTIAL_SUCCESS - filtered_results = list(filter(lambda r: r != '', results)) + filtered_results = list(filter(lambda r: r != "", results)) if not errors: status = CollectionJobStatus.SUCCESS.value elif errors and len(filtered_results) > 0: @@ -1867,9 +1975,10 @@ def _format_failed_rows(rows: List[str], ) return {"status": status, "results": results, "errors": errors} - elif res["dataRowsForGlobalKeysResult"]['jobStatus'] == "FAILED": + elif res["dataRowsForGlobalKeysResult"]["jobStatus"] == "FAILED": raise labelbox.exceptions.LabelboxError( - "Job dataRowsForGlobalKeys failed.") + "Job dataRowsForGlobalKeys failed." + ) current_time = time.time() if current_time - start_time > timeout_seconds: raise labelbox.exceptions.TimeoutError( @@ -1878,9 +1987,8 @@ def _format_failed_rows(rows: List[str], time.sleep(sleep_time) def clear_global_keys( - self, - global_keys: List[str], - timeout_seconds=60) -> Dict[str, Union[str, List[Any]]]: + self, global_keys: List[str], timeout_seconds=60 + ) -> Dict[str, Union[str, List[Any]]]: """ Clears global keys for the data rows tha correspond to the global keys provided. @@ -1906,9 +2014,10 @@ def clear_global_keys( [{'global_key': 'notfoundkey', 'error': 'Failed to find data row matching provided global key'}] """ - def _format_failed_rows(rows: List[str], - error_msg: str) -> List[Dict[str, str]]: - return [{'global_key': r, 'error': error_msg} for r in rows] + def _format_failed_rows( + rows: List[str], error_msg: str + ) -> List[Dict[str, str]]: + return [{"global_key": r, "error": error_msg} for r in rows] # Start get data rows for global keys job query_str = """mutation clearGlobalKeysPyApi($globalKeys: [ID!]!) { @@ -1926,30 +2035,33 @@ def _format_failed_rows(rows: List[str], accessDeniedGlobalKeys } jobStatus}} """ - result_params = { - "jobId": clear_global_keys_job["clearGlobalKeys"]["jobId"] - } + result_params = {"jobId": clear_global_keys_job["clearGlobalKeys"]["jobId"]} # Poll job status until finished, then retrieve results sleep_time = 2 start_time = time.time() while True: res = self.execute(result_query_str, result_params) - if res["clearGlobalKeysResult"]['jobStatus'] == "COMPLETE": - data = res["clearGlobalKeysResult"]['data'] + if res["clearGlobalKeysResult"]["jobStatus"] == "COMPLETE": + data = res["clearGlobalKeysResult"]["data"] results, errors = [], [] - results.extend(data['clearedGlobalKeys']) + results.extend(data["clearedGlobalKeys"]) errors.extend( - _format_failed_rows(data['failedToClearGlobalKeys'], - "Clearing global key failed")) + _format_failed_rows( + data["failedToClearGlobalKeys"], "Clearing global key failed" + ) + ) errors.extend( _format_failed_rows( - data['notFoundGlobalKeys'], - "Failed to find data row matching provided global key")) + data["notFoundGlobalKeys"], + "Failed to find data row matching provided global key", + ) + ) errors.extend( _format_failed_rows( - data['accessDeniedGlobalKeys'], - "Denied access to modify data row matching provided global key" - )) + data["accessDeniedGlobalKeys"], + "Denied access to modify data row matching provided global key", + ) + ) if not errors: status = CollectionJobStatus.SUCCESS.value @@ -1964,13 +2076,13 @@ def _format_failed_rows(rows: List[str], ) return {"status": status, "results": results, "errors": errors} - elif res["clearGlobalKeysResult"]['jobStatus'] == "FAILED": - raise labelbox.exceptions.LabelboxError( - "Job clearGlobalKeys failed.") + elif res["clearGlobalKeysResult"]["jobStatus"] == "FAILED": + raise labelbox.exceptions.LabelboxError("Job clearGlobalKeys failed.") current_time = time.time() if current_time - start_time > timeout_seconds: raise labelbox.exceptions.TimeoutError( - "Timed out waiting for clear_global_keys job to complete.") + "Timed out waiting for clear_global_keys job to complete." + ) time.sleep(sleep_time) def get_catalog(self) -> Catalog: @@ -1996,11 +2108,12 @@ def get_catalog_slice(self, slice_id) -> CatalogSlice: } } """ - res = self.execute(query_str, {'id': slice_id}) - return Entity.CatalogSlice(self, res['getSavedQuery']) + res = self.execute(query_str, {"id": slice_id}) + return Entity.CatalogSlice(self, res["getSavedQuery"]) - def is_feature_schema_archived(self, ontology_id: str, - feature_schema_id: str) -> bool: + def is_feature_schema_archived( + self, ontology_id: str, feature_schema_id: str + ) -> bool: """ Returns true if a feature schema is archived in the specified ontology, returns false otherwise. @@ -2011,33 +2124,35 @@ def is_feature_schema_archived(self, ontology_id: str, bool """ - ontology_endpoint = self.rest_endpoint + "/ontologies/" + urllib.parse.quote( - ontology_id) + ontology_endpoint = ( + self.rest_endpoint + "/ontologies/" + urllib.parse.quote(ontology_id) + ) response = self._connection.get(ontology_endpoint) if response.status_code == requests.codes.ok: - feature_schema_nodes = response.json()['featureSchemaNodes'] - tools = feature_schema_nodes['tools'] - classifications = feature_schema_nodes['classifications'] - relationships = feature_schema_nodes['relationships'] + feature_schema_nodes = response.json()["featureSchemaNodes"] + tools = feature_schema_nodes["tools"] + classifications = feature_schema_nodes["classifications"] + relationships = feature_schema_nodes["relationships"] feature_schema_node_list = tools + classifications + relationships filtered_feature_schema_nodes = [ feature_schema_node for feature_schema_node in feature_schema_node_list - if feature_schema_node['featureSchemaId'] == feature_schema_id + if feature_schema_node["featureSchemaId"] == feature_schema_id ] if filtered_feature_schema_nodes: - return bool(filtered_feature_schema_nodes[0]['archived']) + return bool(filtered_feature_schema_nodes[0]["archived"]) else: raise labelbox.exceptions.LabelboxError( - "The specified feature schema was not in the ontology.") + "The specified feature schema was not in the ontology." + ) elif response.status_code == 404: - raise labelbox.exceptions.ResourceNotFoundError( - Ontology, ontology_id) + raise labelbox.exceptions.ResourceNotFoundError(Ontology, ontology_id) else: raise labelbox.exceptions.LabelboxError( - "Failed to get the feature schema archived status.") + "Failed to get the feature schema archived status." + ) def get_model_slice(self, slice_id) -> ModelSlice: """ @@ -2062,14 +2177,13 @@ def get_model_slice(self, slice_id) -> ModelSlice: """ res = self.execute(query_str, {"id": slice_id}) if res is None or res["getSavedQuery"] is None: - raise labelbox.exceptions.ResourceNotFoundError( - ModelSlice, slice_id) + raise labelbox.exceptions.ResourceNotFoundError(ModelSlice, slice_id) return Entity.ModelSlice(self, res["getSavedQuery"]) def delete_feature_schema_from_ontology( - self, ontology_id: str, - feature_schema_id: str) -> DeleteFeatureFromOntologyResult: + self, ontology_id: str, feature_schema_id: str + ) -> DeleteFeatureFromOntologyResult: """ Deletes or archives a feature schema from an ontology. If the feature schema is a root level node with associated labels, it will be archived. @@ -2086,31 +2200,36 @@ def delete_feature_schema_from_ontology( Example: >>> client.delete_feature_schema_from_ontology(, ) """ - ontology_endpoint = self.rest_endpoint + "/ontologies/" + urllib.parse.quote( - ontology_id) + "/feature-schemas/" + urllib.parse.quote( - feature_schema_id) + ontology_endpoint = ( + self.rest_endpoint + + "/ontologies/" + + urllib.parse.quote(ontology_id) + + "/feature-schemas/" + + urllib.parse.quote(feature_schema_id) + ) response = self._connection.delete(ontology_endpoint) if response.status_code == requests.codes.ok: response_json = response.json() - if response_json['archived'] == True: + if response_json["archived"] == True: logger.info( - 'Feature schema was archived from the ontology because it had associated labels.' + "Feature schema was archived from the ontology because it had associated labels." ) - elif response_json['deleted'] == True: - logger.info( - 'Feature schema was successfully removed from the ontology') + elif response_json["deleted"] == True: + logger.info("Feature schema was successfully removed from the ontology") result = DeleteFeatureFromOntologyResult() - result.archived = bool(response_json['archived']) - result.deleted = bool(response_json['deleted']) + result.archived = bool(response_json["archived"]) + result.deleted = bool(response_json["deleted"]) return result else: raise labelbox.exceptions.LabelboxError( - "Failed to remove feature schema from ontology, message: " + - str(response.json()['message'])) + "Failed to remove feature schema from ontology, message: " + + str(response.json()["message"]) + ) - def unarchive_feature_schema_node(self, ontology_id: str, - root_feature_schema_id: str) -> None: + def unarchive_feature_schema_node( + self, ontology_id: str, root_feature_schema_id: str + ) -> None: """ Unarchives a feature schema node in an ontology. Only root level feature schema nodes can be unarchived. @@ -2120,18 +2239,24 @@ def unarchive_feature_schema_node(self, ontology_id: str, Returns: None """ - ontology_endpoint = self.rest_endpoint + "/ontologies/" + urllib.parse.quote( - ontology_id) + '/feature-schemas/' + urllib.parse.quote( - root_feature_schema_id) + '/unarchive' + ontology_endpoint = ( + self.rest_endpoint + + "/ontologies/" + + urllib.parse.quote(ontology_id) + + "/feature-schemas/" + + urllib.parse.quote(root_feature_schema_id) + + "/unarchive" + ) response = self._connection.patch(ontology_endpoint) if response.status_code == requests.codes.ok: - if not bool(response.json()['unarchived']): + if not bool(response.json()["unarchived"]): raise labelbox.exceptions.LabelboxError( - "Failed unarchive the feature schema.") + "Failed unarchive the feature schema." + ) else: raise labelbox.exceptions.LabelboxError( - "Failed unarchive the feature schema node, message: ", - response.text) + "Failed unarchive the feature schema node, message: ", response.text + ) def get_batch(self, project_id: str, batch_id: str) -> Entity.Batch: # obtain batch entity to return @@ -2144,24 +2269,28 @@ def get_batch(self, project_id: str, batch_id: str) -> Entity.Batch: } } } - """ % ("getProjectBatchPyApi", - query.results_query_part(Entity.Batch)) + """ % ( + "getProjectBatchPyApi", + query.results_query_part(Entity.Batch), + ) batch = self.execute( - get_batch_str, { - "projectId": project_id, - "batchId": batch_id - }, + get_batch_str, + {"projectId": project_id, "batchId": batch_id}, timeout=180.0, - experimental=True)["project"]["batches"]["nodes"][0] + experimental=True, + )["project"]["batches"]["nodes"][0] return Entity.Batch(self, project_id, batch) - def send_to_annotate_from_catalog(self, destination_project_id: str, - task_queue_id: Optional[str], - batch_name: str, - data_rows: Union[DataRowIds, GlobalKeys], - params: Dict[str, Any]): + def send_to_annotate_from_catalog( + self, + destination_project_id: str, + task_queue_id: Optional[str], + batch_name: str, + data_rows: Union[DataRowIds, GlobalKeys], + params: Dict[str, Any], + ): """ Sends data rows from catalog to a specified project for annotation. @@ -2201,57 +2330,54 @@ def send_to_annotate_from_catalog(self, destination_project_id: str, } """ - destination_task_queue = build_destination_task_queue_input( - task_queue_id) + destination_task_queue = build_destination_task_queue_input(task_queue_id) data_rows_query = self.build_catalog_query(data_rows) - predictions_input = build_predictions_input( - validated_params.predictions_ontology_mapping, - validated_params.source_model_run_id - ) if validated_params.source_model_run_id else None - - annotations_input = build_annotations_input( - validated_params.annotations_ontology_mapping, validated_params. - source_project_id) if validated_params.source_project_id else None + predictions_input = ( + build_predictions_input( + validated_params.predictions_ontology_mapping, + validated_params.source_model_run_id, + ) + if validated_params.source_model_run_id + else None + ) + + annotations_input = ( + build_annotations_input( + validated_params.annotations_ontology_mapping, + validated_params.source_project_id, + ) + if validated_params.source_project_id + else None + ) res = self.execute( - mutation_str, { + mutation_str, + { "input": { - "destinationProjectId": - destination_project_id, + "destinationProjectId": destination_project_id, "batchInput": { "batchName": batch_name, - "batchPriority": validated_params.batch_priority - }, - "destinationTaskQueue": - destination_task_queue, - "excludeDataRowsInProject": - validated_params.exclude_data_rows_in_project, - "annotationsInput": - annotations_input, - "predictionsInput": - predictions_input, - "conflictLabelsResolutionStrategy": - validated_params.override_existing_annotations_rule, - "searchQuery": { - "scope": None, - "query": [data_rows_query] + "batchPriority": validated_params.batch_priority, }, + "destinationTaskQueue": destination_task_queue, + "excludeDataRowsInProject": validated_params.exclude_data_rows_in_project, + "annotationsInput": annotations_input, + "predictionsInput": predictions_input, + "conflictLabelsResolutionStrategy": validated_params.override_existing_annotations_rule, + "searchQuery": {"scope": None, "query": [data_rows_query]}, "ordering": { "type": "RANDOM", - "random": { - "seed": random.randint(0, 10000) - }, - "sorting": None + "random": {"seed": random.randint(0, 10000)}, + "sorting": None, }, - "sorting": - None, - "limit": - None + "sorting": None, + "limit": None, } - })['sendToAnnotateFromCatalog'] + }, + )["sendToAnnotateFromCatalog"] - return Entity.Task.get_task(self, res['taskId']) + return Entity.Task.get_task(self, res["taskId"]) @staticmethod def build_catalog_query(data_rows: Union[DataRowIds, GlobalKeys]): @@ -2268,13 +2394,13 @@ def build_catalog_query(data_rows: Union[DataRowIds, GlobalKeys]): data_rows_query = { "type": "data_row_id", "operator": "is", - "ids": list(data_rows) + "ids": list(data_rows), } elif isinstance(data_rows, GlobalKeys): data_rows_query = { "type": "global_key", "operator": "is", - "ids": list(data_rows) + "ids": list(data_rows), } else: raise ValueError( @@ -2282,9 +2408,9 @@ def build_catalog_query(data_rows: Union[DataRowIds, GlobalKeys]): ) return data_rows_query - def run_foundry_app(self, model_run_name: str, data_rows: Union[DataRowIds, - GlobalKeys], - app_id: str) -> Task: + def run_foundry_app( + self, model_run_name: str, data_rows: Union[DataRowIds, GlobalKeys], app_id: str + ) -> Task: """ Run a foundry app @@ -2351,11 +2477,11 @@ def get_embedding_by_name(self, name: str) -> Embedding: for e in embeddings: if e.name == name: return e - raise labelbox.exceptions.ResourceNotFoundError(Embedding, - dict(name=name)) + raise labelbox.exceptions.ResourceNotFoundError(Embedding, dict(name=name)) - def upsert_label_feedback(self, label_id: str, feedback: str, - scores: Dict[str, float]) -> List[LabelScore]: + def upsert_label_feedback( + self, label_id: str, feedback: str, scores: Dict[str, float] + ) -> List[LabelScore]: """ Submits the label feedback which is a free-form text and numeric label scores. @@ -2391,16 +2517,13 @@ def upsert_label_feedback(self, label_id: str, feedback: str, } } """ - res = self.execute(mutation_str, { - "labelId": label_id, - "feedback": feedback, - "scores": scores - }) + res = self.execute( + mutation_str, {"labelId": label_id, "feedback": feedback, "scores": scores} + ) scores_raw = res["upsertAutoQaLabelFeedback"]["scores"] return [ - labelbox.LabelScore(name=x['name'], score=x['score']) - for x in scores_raw + labelbox.LabelScore(name=x["name"], score=x["score"]) for x in scores_raw ] def get_labeling_service_dashboards( @@ -2412,12 +2535,12 @@ def get_labeling_service_dashboards( Optional parameters: search_query: A list of search filters representing the search - + NOTE: - Retrieves all projects for the organization or as filtered by the search query - INCLUDING those not requesting labeling services - Sorted by project created date in ascending order. - + Examples: Retrieves all labeling service dashboards for a given workspace id: >>> workspace_filter = WorkspaceFilter( @@ -2448,7 +2571,7 @@ def get_task_by_id(self, task_id: str) -> Union[Task, DataUpsertTask]: Returns: Task or DataUpsertTask - + Throws: ResourceNotFoundError: If the task does not exist. @@ -2477,9 +2600,10 @@ def get_task_by_id(self, task_id: str) -> Union[Task, DataUpsertTask]: data = result.get("user", {}).get("createdTasks", []) if not data: raise labelbox.exceptions.ResourceNotFoundError( - message=f"The task {task_id} does not exist.") + message=f"The task {task_id} does not exist." + ) task_data = data[0] - if task_data["type"].lower() == 'adv-upsert-data-rows': + if task_data["type"].lower() == "adv-upsert-data-rows": task = DataUpsertTask(self, task_data) else: task = Task(self, task_data)