From efc921b875f520130ecc695b5615a06bde7588cc Mon Sep 17 00:00:00 2001 From: Miles Yucht Date: Wed, 30 Apr 2025 11:22:26 +0000 Subject: [PATCH 1/3] bump --- .codegen/_openapi_sha | 2 +- NEXT_CHANGELOG.md | 37 + databricks/sdk/__init__.py | 20 +- databricks/sdk/service/apps.py | 18 +- databricks/sdk/service/billing.py | 6 +- databricks/sdk/service/catalog.py | 55 +- databricks/sdk/service/cleanrooms.py | 23 +- databricks/sdk/service/compute.py | 137 ++- databricks/sdk/service/dashboards.py | 51 +- databricks/sdk/service/jobs.py | 91 +- databricks/sdk/service/oauth2.py | 21 +- databricks/sdk/service/serving.py | 25 +- databricks/sdk/service/settings.py | 339 ++++--- databricks/sdk/service/sql.py | 888 +++++++++++++++++- databricks/sdk/service/vectorsearch.py | 514 ++++++---- docs/account/billing/budget_policy.rst | 6 +- docs/account/iam/service_principals.rst | 5 +- docs/account/oauth2/federation_policy.rst | 8 +- .../service_principal_federation_policy.rst | 8 +- docs/account/provisioning/storage.rst | 6 +- .../account/settings/network_connectivity.rst | 73 +- docs/dbdataclasses/catalog.rst | 9 + docs/dbdataclasses/compute.rst | 10 + docs/dbdataclasses/jobs.rst | 5 +- docs/dbdataclasses/serving.rst | 11 - docs/dbdataclasses/settings.rst | 56 +- docs/dbdataclasses/sql.rst | 124 +++ docs/dbdataclasses/vectorsearch.rst | 32 +- docs/workspace/apps/apps.rst | 16 +- .../workspace/catalog/artifact_allowlists.rst | 8 +- docs/workspace/catalog/catalogs.rst | 7 +- docs/workspace/catalog/external_locations.rst | 26 +- docs/workspace/catalog/online_tables.rst | 6 +- .../workspace/catalog/storage_credentials.rst | 16 +- .../cleanrooms/clean_room_assets.rst | 8 +- docs/workspace/cleanrooms/clean_rooms.rst | 8 +- docs/workspace/compute/clusters.rst | 17 +- docs/workspace/compute/instance_pools.rst | 5 +- docs/workspace/dashboards/genie.rst | 27 +- docs/workspace/dashboards/lakeview.rst | 20 +- docs/workspace/iam/current_user.rst | 2 +- docs/workspace/iam/service_principals.rst | 10 +- docs/workspace/iam/users.rst | 7 +- docs/workspace/jobs/jobs.rst | 32 +- docs/workspace/ml/model_registry.rst | 2 +- .../settings/enable_export_notebook.rst | 16 +- .../enable_notebook_table_clipboard.rst | 12 +- .../settings/enable_results_downloading.rst | 14 +- docs/workspace/settings/settings.rst | 2 +- docs/workspace/sharing/providers.rst | 17 +- docs/workspace/sql/alerts.rst | 5 +- docs/workspace/sql/alerts_v2.rst | 77 ++ docs/workspace/sql/index.rst | 1 + docs/workspace/sql/queries.rst | 7 +- .../vectorsearch/vector_search_endpoints.rst | 44 +- .../vectorsearch/vector_search_indexes.rst | 26 +- docs/workspace/workspace/workspace.rst | 8 +- 57 files changed, 2373 insertions(+), 653 deletions(-) create mode 100644 docs/workspace/sql/alerts_v2.rst diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 26ece1bc5..e7f752fb5 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -05692f4dcf168be190bb7bcda725ee8b368b7ae3 \ No newline at end of file +ce962ccd0a078a5a9d89494fe38d237ce377d5f3 \ No newline at end of file diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 650fd402d..20b9b56ea 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -18,3 +18,40 @@ ### Internal Changes ### API Changes +* Added [w.alerts_v2](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/sql/alerts_v2.html) workspace-level service. +* Added `update_ncc_azure_private_endpoint_rule_public()` method for [a.network_connectivity](https://databricks-sdk-py.readthedocs.io/en/latest/account/settings/network_connectivity.html) account-level service. +* Added `update_endpoint_budget_policy()` and `update_endpoint_custom_tags()` methods for [w.vector_search_endpoints](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/vectorsearch/vector_search_endpoints.html) workspace-level service. +* Added `created_at`, `created_by` and `metastore_id` fields for `databricks.sdk.service.catalog.SetArtifactAllowlist`. +* Added `node_type_flexibility` field for `databricks.sdk.service.compute.EditInstancePool`. +* Added `page_size` and `page_token` fields for `databricks.sdk.service.compute.GetEvents`. +* Added `next_page_token` and `prev_page_token` fields for `databricks.sdk.service.compute.GetEventsResponse`. +* Added `node_type_flexibility` field for `databricks.sdk.service.compute.GetInstancePool`. +* Added `node_type_flexibility` field for `databricks.sdk.service.compute.InstancePoolAndStats`. +* Added `effective_performance_target` field for `databricks.sdk.service.jobs.RepairHistoryItem`. +* Added `performance_target` field for `databricks.sdk.service.jobs.RepairRun`. +* [Breaking] Added `network_connectivity_config` field for `databricks.sdk.service.settings.CreateNetworkConnectivityConfigRequest`. +* [Breaking] Added `private_endpoint_rule` field for `databricks.sdk.service.settings.CreatePrivateEndpointRuleRequest`. +* Added `domain_names` field for `databricks.sdk.service.settings.NccAzurePrivateEndpointRule`. +* Added `auto_resolve_display_name` field for `databricks.sdk.service.sql.CreateAlertRequest`. +* Added `auto_resolve_display_name` field for `databricks.sdk.service.sql.CreateQueryRequest`. +* Added `budget_policy_id` field for `databricks.sdk.service.vectorsearch.CreateEndpoint`. +* Added `custom_tags` and `effective_budget_policy_id` fields for `databricks.sdk.service.vectorsearch.EndpointInfo`. +* Added `create_clean_room`, `execute_clean_room_task` and `modify_clean_room` enum values for `databricks.sdk.service.catalog.Privilege`. +* Added `dns_resolution_error` and `gcp_denied_by_org_policy` enum values for `databricks.sdk.service.compute.TerminationReasonCode`. +* Added `disabled` enum value for `databricks.sdk.service.jobs.TerminationCodeCode`. +* Added `expired` enum value for `databricks.sdk.service.settings.NccAzurePrivateEndpointRuleConnectionState`. +* [Breaking] Changed `create_network_connectivity_configuration()` and `create_private_endpoint_rule()` methods for [a.network_connectivity](https://databricks-sdk-py.readthedocs.io/en/latest/account/settings/network_connectivity.html) account-level service with new required argument order. +* [Breaking] Changed `create_index()` method for [w.vector_search_indexes](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/vectorsearch/vector_search_indexes.html) workspace-level service to return `databricks.sdk.service.vectorsearch.VectorIndex` dataclass. +* [Breaking] Changed `delete_data_vector_index()` method for [w.vector_search_indexes](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/vectorsearch/vector_search_indexes.html) workspace-level service . HTTP method/verb has changed. +* [Breaking] Changed `delete_data_vector_index()` method for [w.vector_search_indexes](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/vectorsearch/vector_search_indexes.html) workspace-level service with new required argument order. +* [Breaking] Changed `databricks.sdk.service.vectorsearch.List` dataclass to. +* [Breaking] Changed `workload_size` field for `databricks.sdk.service.serving.ServedModelInput` to type `str` dataclass. +* [Breaking] Changed `group_id` field for `databricks.sdk.service.settings.NccAzurePrivateEndpointRule` to type `str` dataclass. +* [Breaking] Changed `target_services` field for `databricks.sdk.service.settings.NccAzureServiceEndpointRule` to type `databricks.sdk.service.settings.EgressResourceTypeList` dataclass. +* [Breaking] Changed `data_array` field for `databricks.sdk.service.vectorsearch.ResultData` to type `databricks.sdk.service.vectorsearch.ListValueList` dataclass. +* [Breaking] Changed waiter for [VectorSearchEndpointsAPI.create_endpoint](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/vectorsearch/vector_search_endpoints.html#databricks.sdk.service.vectorsearch.VectorSearchEndpointsAPI.create_endpoint) method. +* [Breaking] Removed `name` and `region` fields for `databricks.sdk.service.settings.CreateNetworkConnectivityConfigRequest`. +* [Breaking] Removed `group_id` and `resource_id` fields for `databricks.sdk.service.settings.CreatePrivateEndpointRuleRequest`. +* [Breaking] Removed `null_value` field for `databricks.sdk.service.vectorsearch.Value`. +* [Breaking] Removed `large`, `medium` and `small` enum values for `databricks.sdk.service.serving.ServedModelInputWorkloadSize`. +* [Breaking] Removed `blob`, `dfs`, `mysql_server` and `sql_server` enum values for `databricks.sdk.service.settings.NccAzurePrivateEndpointRuleGroupId`. diff --git a/databricks/sdk/__init__.py b/databricks/sdk/__init__.py index 9189284dd..098fc457e 100755 --- a/databricks/sdk/__init__.py +++ b/databricks/sdk/__init__.py @@ -97,10 +97,10 @@ RecipientActivationAPI, RecipientsAPI, SharesAPI) from databricks.sdk.service.sql import (AlertsAPI, AlertsLegacyAPI, - DashboardsAPI, DashboardWidgetsAPI, - DataSourcesAPI, DbsqlPermissionsAPI, - QueriesAPI, QueriesLegacyAPI, - QueryHistoryAPI, + AlertsV2API, DashboardsAPI, + DashboardWidgetsAPI, DataSourcesAPI, + DbsqlPermissionsAPI, QueriesAPI, + QueriesLegacyAPI, QueryHistoryAPI, QueryVisualizationsAPI, QueryVisualizationsLegacyAPI, RedashConfigAPI, StatementExecutionAPI, @@ -170,7 +170,6 @@ def __init__( product_version="0.0.0", credentials_strategy: Optional[CredentialsStrategy] = None, credentials_provider: Optional[CredentialsStrategy] = None, - token_audience: Optional[str] = None, config: Optional[client.Config] = None, ): if not config: @@ -199,7 +198,6 @@ def __init__( debug_headers=debug_headers, product=product, product_version=product_version, - token_audience=token_audience, ) self._config = config.copy() self._dbutils = _make_dbutils(self._config) @@ -209,6 +207,7 @@ def __init__( self._account_access_control_proxy = service.iam.AccountAccessControlProxyAPI(self._api_client) self._alerts = service.sql.AlertsAPI(self._api_client) self._alerts_legacy = service.sql.AlertsLegacyAPI(self._api_client) + self._alerts_v2 = service.sql.AlertsV2API(self._api_client) self._apps = service.apps.AppsAPI(self._api_client) self._artifact_allowlists = service.catalog.ArtifactAllowlistsAPI(self._api_client) self._catalogs = service.catalog.CatalogsAPI(self._api_client) @@ -348,6 +347,11 @@ def alerts_legacy(self) -> service.sql.AlertsLegacyAPI: """The alerts API can be used to perform CRUD operations on alerts.""" return self._alerts_legacy + @property + def alerts_v2(self) -> service.sql.AlertsV2API: + """TODO: Add description.""" + return self._alerts_v2 + @property def apps(self) -> service.apps.AppsAPI: """Apps run directly on a customer’s Databricks instance, integrate with their data, use and extend Databricks services, and enable users to interact through single sign-on.""" @@ -760,7 +764,7 @@ def table_constraints(self) -> service.catalog.TableConstraintsAPI: @property def tables(self) -> service.catalog.TablesAPI: - """A table resides in the third layer of Unity Catalog's three-level namespace.""" + """A table resides in the third layer of Unity Catalog’s three-level namespace.""" return self._tables @property @@ -864,7 +868,6 @@ def __init__( product_version="0.0.0", credentials_strategy: Optional[CredentialsStrategy] = None, credentials_provider: Optional[CredentialsStrategy] = None, - token_audience: Optional[str] = None, config: Optional[client.Config] = None, ): if not config: @@ -893,7 +896,6 @@ def __init__( debug_headers=debug_headers, product=product, product_version=product_version, - token_audience=token_audience, ) self._config = config.copy() self._api_client = client.ApiClient(self._config) diff --git a/databricks/sdk/service/apps.py b/databricks/sdk/service/apps.py index dc4a1c80f..6f645641d 100755 --- a/databricks/sdk/service/apps.py +++ b/databricks/sdk/service/apps.py @@ -1173,12 +1173,12 @@ def wait_get_app_stopped( attempt += 1 raise TimeoutError(f"timed out after {timeout}: {status_message}") - def create(self, *, app: Optional[App] = None, no_compute: Optional[bool] = None) -> Wait[App]: + def create(self, app: App, *, no_compute: Optional[bool] = None) -> Wait[App]: """Create an app. Creates a new app. - :param app: :class:`App` (optional) + :param app: :class:`App` :param no_compute: bool (optional) If true, the app will not be started after creation. @@ -1198,9 +1198,7 @@ def create(self, *, app: Optional[App] = None, no_compute: Optional[bool] = None op_response = self._api.do("POST", "/api/2.0/apps", query=query, body=body, headers=headers) return Wait(self.wait_get_app_active, response=App.from_dict(op_response), name=op_response["name"]) - def create_and_wait( - self, *, app: Optional[App] = None, no_compute: Optional[bool] = None, timeout=timedelta(minutes=20) - ) -> App: + def create_and_wait(self, app: App, *, no_compute: Optional[bool] = None, timeout=timedelta(minutes=20)) -> App: return self.create(app=app, no_compute=no_compute).result(timeout=timeout) def delete(self, name: str) -> App: @@ -1221,14 +1219,14 @@ def delete(self, name: str) -> App: res = self._api.do("DELETE", f"/api/2.0/apps/{name}", headers=headers) return App.from_dict(res) - def deploy(self, app_name: str, *, app_deployment: Optional[AppDeployment] = None) -> Wait[AppDeployment]: + def deploy(self, app_name: str, app_deployment: AppDeployment) -> Wait[AppDeployment]: """Create an app deployment. Creates an app deployment for the app with the supplied name. :param app_name: str The name of the app. - :param app_deployment: :class:`AppDeployment` (optional) + :param app_deployment: :class:`AppDeployment` :returns: Long-running operation waiter for :class:`AppDeployment`. @@ -1249,7 +1247,7 @@ def deploy(self, app_name: str, *, app_deployment: Optional[AppDeployment] = Non ) def deploy_and_wait( - self, app_name: str, *, app_deployment: Optional[AppDeployment] = None, timeout=timedelta(minutes=20) + self, app_name: str, app_deployment: AppDeployment, timeout=timedelta(minutes=20) ) -> AppDeployment: return self.deploy(app_deployment=app_deployment, app_name=app_name).result(timeout=timeout) @@ -1466,7 +1464,7 @@ def stop(self, name: str) -> Wait[App]: def stop_and_wait(self, name: str, timeout=timedelta(minutes=20)) -> App: return self.stop(name=name).result(timeout=timeout) - def update(self, name: str, *, app: Optional[App] = None) -> App: + def update(self, name: str, app: App) -> App: """Update an app. Updates the app with the supplied name. @@ -1474,7 +1472,7 @@ def update(self, name: str, *, app: Optional[App] = None) -> App: :param name: str The name of the app. The name must contain only lowercase alphanumeric characters and hyphens. It must be unique within the workspace. - :param app: :class:`App` (optional) + :param app: :class:`App` :returns: :class:`App` """ diff --git a/databricks/sdk/service/billing.py b/databricks/sdk/service/billing.py index 511756013..3595e4026 100755 --- a/databricks/sdk/service/billing.py +++ b/databricks/sdk/service/billing.py @@ -1873,7 +1873,7 @@ def list( query["page_token"] = json["next_page_token"] def update( - self, policy_id: str, *, limit_config: Optional[LimitConfig] = None, policy: Optional[BudgetPolicy] = None + self, policy_id: str, policy: BudgetPolicy, *, limit_config: Optional[LimitConfig] = None ) -> BudgetPolicy: """Update a budget policy. @@ -1881,10 +1881,10 @@ def update( :param policy_id: str The Id of the policy. This field is generated by Databricks and globally unique. + :param policy: :class:`BudgetPolicy` + Contains the BudgetPolicy details. :param limit_config: :class:`LimitConfig` (optional) DEPRECATED. This is redundant field as LimitConfig is part of the BudgetPolicy - :param policy: :class:`BudgetPolicy` (optional) - Contains the BudgetPolicy details. :returns: :class:`BudgetPolicy` """ diff --git a/databricks/sdk/service/catalog.py b/databricks/sdk/service/catalog.py index d73d53ea5..a77115edc 100755 --- a/databricks/sdk/service/catalog.py +++ b/databricks/sdk/service/catalog.py @@ -6675,6 +6675,7 @@ class Privilege(Enum): BROWSE = "BROWSE" CREATE = "CREATE" CREATE_CATALOG = "CREATE_CATALOG" + CREATE_CLEAN_ROOM = "CREATE_CLEAN_ROOM" CREATE_CONNECTION = "CREATE_CONNECTION" CREATE_EXTERNAL_LOCATION = "CREATE_EXTERNAL_LOCATION" CREATE_EXTERNAL_TABLE = "CREATE_EXTERNAL_TABLE" @@ -6695,9 +6696,11 @@ class Privilege(Enum): CREATE_VIEW = "CREATE_VIEW" CREATE_VOLUME = "CREATE_VOLUME" EXECUTE = "EXECUTE" + EXECUTE_CLEAN_ROOM_TASK = "EXECUTE_CLEAN_ROOM_TASK" MANAGE = "MANAGE" MANAGE_ALLOWLIST = "MANAGE_ALLOWLIST" MODIFY = "MODIFY" + MODIFY_CLEAN_ROOM = "MODIFY_CLEAN_ROOM" READ_FILES = "READ_FILES" READ_PRIVATE_FILES = "READ_PRIVATE_FILES" READ_VOLUME = "READ_VOLUME" @@ -7360,6 +7363,15 @@ class SetArtifactAllowlist: artifact_type: Optional[ArtifactType] = None """The artifact type of the allowlist.""" + created_at: Optional[int] = None + """Time at which this artifact allowlist was set, in epoch milliseconds.""" + + created_by: Optional[str] = None + """Username of the user who set the artifact allowlist.""" + + metastore_id: Optional[str] = None + """Unique identifier of parent metastore.""" + def as_dict(self) -> dict: """Serializes the SetArtifactAllowlist into a dictionary suitable for use as a JSON request body.""" body = {} @@ -7367,6 +7379,12 @@ def as_dict(self) -> dict: body["artifact_matchers"] = [v.as_dict() for v in self.artifact_matchers] if self.artifact_type is not None: body["artifact_type"] = self.artifact_type.value + if self.created_at is not None: + body["created_at"] = self.created_at + if self.created_by is not None: + body["created_by"] = self.created_by + if self.metastore_id is not None: + body["metastore_id"] = self.metastore_id return body def as_shallow_dict(self) -> dict: @@ -7376,6 +7394,12 @@ def as_shallow_dict(self) -> dict: body["artifact_matchers"] = self.artifact_matchers if self.artifact_type is not None: body["artifact_type"] = self.artifact_type + if self.created_at is not None: + body["created_at"] = self.created_at + if self.created_by is not None: + body["created_by"] = self.created_by + if self.metastore_id is not None: + body["metastore_id"] = self.metastore_id return body @classmethod @@ -7384,6 +7408,9 @@ def from_dict(cls, d: Dict[str, Any]) -> SetArtifactAllowlist: return cls( artifact_matchers=_repeated_dict(d, "artifact_matchers", ArtifactMatcher), artifact_type=_enum(d, "artifact_type", ArtifactType), + created_at=d.get("created_at", None), + created_by=d.get("created_by", None), + metastore_id=d.get("metastore_id", None), ) @@ -10467,7 +10494,15 @@ def get(self, artifact_type: ArtifactType) -> ArtifactAllowlistInfo: res = self._api.do("GET", f"/api/2.1/unity-catalog/artifact-allowlists/{artifact_type.value}", headers=headers) return ArtifactAllowlistInfo.from_dict(res) - def update(self, artifact_type: ArtifactType, artifact_matchers: List[ArtifactMatcher]) -> ArtifactAllowlistInfo: + def update( + self, + artifact_type: ArtifactType, + artifact_matchers: List[ArtifactMatcher], + *, + created_at: Optional[int] = None, + created_by: Optional[str] = None, + metastore_id: Optional[str] = None, + ) -> ArtifactAllowlistInfo: """Set an artifact allowlist. Set the artifact allowlist of a certain artifact type. The whole artifact allowlist is replaced with @@ -10478,12 +10513,24 @@ def update(self, artifact_type: ArtifactType, artifact_matchers: List[ArtifactMa The artifact type of the allowlist. :param artifact_matchers: List[:class:`ArtifactMatcher`] A list of allowed artifact match patterns. + :param created_at: int (optional) + Time at which this artifact allowlist was set, in epoch milliseconds. + :param created_by: str (optional) + Username of the user who set the artifact allowlist. + :param metastore_id: str (optional) + Unique identifier of parent metastore. :returns: :class:`ArtifactAllowlistInfo` """ body = {} if artifact_matchers is not None: body["artifact_matchers"] = [v.as_dict() for v in artifact_matchers] + if created_at is not None: + body["created_at"] = created_at + if created_by is not None: + body["created_by"] = created_by + if metastore_id is not None: + body["metastore_id"] = metastore_id headers = { "Accept": "application/json", "Content-Type": "application/json", @@ -12354,12 +12401,12 @@ def wait_get_online_table_active( attempt += 1 raise TimeoutError(f"timed out after {timeout}: {status_message}") - def create(self, *, table: Optional[OnlineTable] = None) -> Wait[OnlineTable]: + def create(self, table: OnlineTable) -> Wait[OnlineTable]: """Create an Online Table. Create a new Online Table. - :param table: :class:`OnlineTable` (optional) + :param table: :class:`OnlineTable` Online Table information. :returns: @@ -12377,7 +12424,7 @@ def create(self, *, table: Optional[OnlineTable] = None) -> Wait[OnlineTable]: self.wait_get_online_table_active, response=OnlineTable.from_dict(op_response), name=op_response["name"] ) - def create_and_wait(self, *, table: Optional[OnlineTable] = None, timeout=timedelta(minutes=20)) -> OnlineTable: + def create_and_wait(self, table: OnlineTable, timeout=timedelta(minutes=20)) -> OnlineTable: return self.create(table=table).result(timeout=timeout) def delete(self, name: str): diff --git a/databricks/sdk/service/cleanrooms.py b/databricks/sdk/service/cleanrooms.py index e551d82be..3f6d5a033 100755 --- a/databricks/sdk/service/cleanrooms.py +++ b/databricks/sdk/service/cleanrooms.py @@ -332,7 +332,7 @@ def from_dict(cls, d: Dict[str, Any]) -> CleanRoomAssetForeignTableLocalDetails: @dataclass class CleanRoomAssetNotebook: etag: Optional[str] = None - """Server generated checksum that represents the notebook version.""" + """Server generated etag that represents the notebook version.""" notebook_content: Optional[str] = None """Base 64 representation of the notebook contents. This is the same format as returned by @@ -1097,7 +1097,7 @@ class CleanRoomAssetsAPI: def __init__(self, api_client): self._api = api_client - def create(self, clean_room_name: str, *, asset: Optional[CleanRoomAsset] = None) -> CleanRoomAsset: + def create(self, clean_room_name: str, asset: CleanRoomAsset) -> CleanRoomAsset: """Create an asset. Create a clean room asset —share an asset like a notebook or table into the clean room. For each UC @@ -1107,7 +1107,7 @@ def create(self, clean_room_name: str, *, asset: Optional[CleanRoomAsset] = None :param clean_room_name: str Name of the clean room. - :param asset: :class:`CleanRoomAsset` (optional) + :param asset: :class:`CleanRoomAsset` Metadata of the clean room asset :returns: :class:`CleanRoomAsset` @@ -1200,12 +1200,7 @@ def list(self, clean_room_name: str, *, page_token: Optional[str] = None) -> Ite query["page_token"] = json["next_page_token"] def update( - self, - clean_room_name: str, - asset_type: CleanRoomAssetAssetType, - name: str, - *, - asset: Optional[CleanRoomAsset] = None, + self, clean_room_name: str, asset_type: CleanRoomAssetAssetType, name: str, asset: CleanRoomAsset ) -> CleanRoomAsset: """Update an asset. @@ -1224,7 +1219,7 @@ def update( *shared_catalog*.*shared_schema*.*asset_name* For notebooks, the name is the notebook file name. - :param asset: :class:`CleanRoomAsset` (optional) + :param asset: :class:`CleanRoomAsset` Metadata of the clean room asset :returns: :class:`CleanRoomAsset` @@ -1303,7 +1298,7 @@ class CleanRoomsAPI: def __init__(self, api_client): self._api = api_client - def create(self, *, clean_room: Optional[CleanRoom] = None) -> CleanRoom: + def create(self, clean_room: CleanRoom) -> CleanRoom: """Create a clean room. Create a new clean room with the specified collaborators. This method is asynchronous; the returned @@ -1314,7 +1309,7 @@ def create(self, *, clean_room: Optional[CleanRoom] = None) -> CleanRoom: The caller must be a metastore admin or have the **CREATE_CLEAN_ROOM** privilege on the metastore. - :param clean_room: :class:`CleanRoom` (optional) + :param clean_room: :class:`CleanRoom` :returns: :class:`CleanRoom` """ @@ -1328,7 +1323,7 @@ def create(self, *, clean_room: Optional[CleanRoom] = None) -> CleanRoom: return CleanRoom.from_dict(res) def create_output_catalog( - self, clean_room_name: str, *, output_catalog: Optional[CleanRoomOutputCatalog] = None + self, clean_room_name: str, output_catalog: CleanRoomOutputCatalog ) -> CreateCleanRoomOutputCatalogResponse: """Create an output catalog. @@ -1336,7 +1331,7 @@ def create_output_catalog( :param clean_room_name: str Name of the clean room. - :param output_catalog: :class:`CleanRoomOutputCatalog` (optional) + :param output_catalog: :class:`CleanRoomOutputCatalog` :returns: :class:`CreateCleanRoomOutputCatalogResponse` """ diff --git a/databricks/sdk/service/compute.py b/databricks/sdk/service/compute.py index 088d4ed3e..9a60ebc87 100755 --- a/databricks/sdk/service/compute.py +++ b/databricks/sdk/service/compute.py @@ -4499,6 +4499,10 @@ class EditInstancePool: min_idle_instances: Optional[int] = None """Minimum number of idle instances to keep in the instance pool""" + node_type_flexibility: Optional[NodeTypeFlexibility] = None + """For Fleet-pool V2, this object contains the information about the alternate node type ids to use + when attempting to launch a cluster if the node type id is not available.""" + def as_dict(self) -> dict: """Serializes the EditInstancePool into a dictionary suitable for use as a JSON request body.""" body = {} @@ -4514,6 +4518,8 @@ def as_dict(self) -> dict: body["max_capacity"] = self.max_capacity if self.min_idle_instances is not None: body["min_idle_instances"] = self.min_idle_instances + if self.node_type_flexibility: + body["node_type_flexibility"] = self.node_type_flexibility.as_dict() if self.node_type_id is not None: body["node_type_id"] = self.node_type_id return body @@ -4533,6 +4539,8 @@ def as_shallow_dict(self) -> dict: body["max_capacity"] = self.max_capacity if self.min_idle_instances is not None: body["min_idle_instances"] = self.min_idle_instances + if self.node_type_flexibility: + body["node_type_flexibility"] = self.node_type_flexibility if self.node_type_id is not None: body["node_type_id"] = self.node_type_id return body @@ -4547,6 +4555,7 @@ def from_dict(cls, d: Dict[str, Any]) -> EditInstancePool: instance_pool_name=d.get("instance_pool_name", None), max_capacity=d.get("max_capacity", None), min_idle_instances=d.get("min_idle_instances", None), + node_type_flexibility=_from_dict(d, "node_type_flexibility", NodeTypeFlexibility), node_type_id=d.get("node_type_id", None), ) @@ -4772,8 +4781,11 @@ def from_dict(cls, d: Dict[str, Any]) -> EnforceClusterComplianceResponse: @dataclass class Environment: - """The environment entity used to preserve serverless environment side panel and jobs' environment - for non-notebook task. In this minimal environment spec, only pip dependencies are supported.""" + """The environment entity used to preserve serverless environment side panel, jobs' environment for + non-notebook task, and DLT's environment for classic and serverless pipelines. (Note: DLT uses a + copied version of the Environment proto below, at + //spark/pipelines/api/protos/copied/libraries-environments-copy.proto) In this minimal + environment spec, only pip dependencies are supported.""" client: str """Client version used by the environment The client is the user-facing environment of the runtime. @@ -5261,16 +5273,30 @@ class GetEvents: """An optional set of event types to filter on. If empty, all event types are returned.""" limit: Optional[int] = None - """The maximum number of events to include in a page of events. Defaults to 50, and maximum allowed + """Deprecated: use page_token in combination with page_size instead. + + The maximum number of events to include in a page of events. Defaults to 50, and maximum allowed value is 500.""" offset: Optional[int] = None - """The offset in the result set. Defaults to 0 (no offset). When an offset is specified and the + """Deprecated: use page_token in combination with page_size instead. + + The offset in the result set. Defaults to 0 (no offset). When an offset is specified and the results are requested in descending order, the end_time field is required.""" order: Optional[GetEventsOrder] = None """The order to list events in; either "ASC" or "DESC". Defaults to "DESC".""" + page_size: Optional[int] = None + """The maximum number of events to include in a page of events. The server may further constrain + the maximum number of results returned in a single page. If the page_size is empty or 0, the + server will decide the number of results to be returned. The field has to be in the range + [0,500]. If the value is outside the range, the server enforces 0 or 500.""" + + page_token: Optional[str] = None + """Use next_page_token or prev_page_token returned from the previous request to list the next or + previous page of events respectively. If page_token is empty, the first page is returned.""" + start_time: Optional[int] = None """The start time in epoch milliseconds. If empty, returns events starting from the beginning of time.""" @@ -5290,6 +5316,10 @@ def as_dict(self) -> dict: body["offset"] = self.offset if self.order is not None: body["order"] = self.order.value + if self.page_size is not None: + body["page_size"] = self.page_size + if self.page_token is not None: + body["page_token"] = self.page_token if self.start_time is not None: body["start_time"] = self.start_time return body @@ -5309,6 +5339,10 @@ def as_shallow_dict(self) -> dict: body["offset"] = self.offset if self.order is not None: body["order"] = self.order + if self.page_size is not None: + body["page_size"] = self.page_size + if self.page_token is not None: + body["page_token"] = self.page_token if self.start_time is not None: body["start_time"] = self.start_time return body @@ -5323,6 +5357,8 @@ def from_dict(cls, d: Dict[str, Any]) -> GetEvents: limit=d.get("limit", None), offset=d.get("offset", None), order=_enum(d, "order", GetEventsOrder), + page_size=d.get("page_size", None), + page_token=d.get("page_token", None), start_time=d.get("start_time", None), ) @@ -5338,11 +5374,24 @@ class GetEventsResponse: events: Optional[List[ClusterEvent]] = None next_page: Optional[GetEvents] = None - """The parameters required to retrieve the next page of events. Omitted if there are no more events + """Deprecated: use next_page_token or prev_page_token instead. + + The parameters required to retrieve the next page of events. Omitted if there are no more events to read.""" + next_page_token: Optional[str] = None + """This field represents the pagination token to retrieve the next page of results. If the value is + "", it means no further results for the request.""" + + prev_page_token: Optional[str] = None + """This field represents the pagination token to retrieve the previous page of results. If the + value is "", it means no further results for the request.""" + total_count: Optional[int] = None - """The total number of events filtered by the start_time, end_time, and event_types.""" + """Deprecated: Returns 0 when request uses page_token. Will start returning zero when request uses + offset/limit soon. + + The total number of events filtered by the start_time, end_time, and event_types.""" def as_dict(self) -> dict: """Serializes the GetEventsResponse into a dictionary suitable for use as a JSON request body.""" @@ -5351,6 +5400,10 @@ def as_dict(self) -> dict: body["events"] = [v.as_dict() for v in self.events] if self.next_page: body["next_page"] = self.next_page.as_dict() + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + if self.prev_page_token is not None: + body["prev_page_token"] = self.prev_page_token if self.total_count is not None: body["total_count"] = self.total_count return body @@ -5362,6 +5415,10 @@ def as_shallow_dict(self) -> dict: body["events"] = self.events if self.next_page: body["next_page"] = self.next_page + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + if self.prev_page_token is not None: + body["prev_page_token"] = self.prev_page_token if self.total_count is not None: body["total_count"] = self.total_count return body @@ -5372,6 +5429,8 @@ def from_dict(cls, d: Dict[str, Any]) -> GetEventsResponse: return cls( events=_repeated_dict(d, "events", ClusterEvent), next_page=_from_dict(d, "next_page", GetEvents), + next_page_token=d.get("next_page_token", None), + prev_page_token=d.get("prev_page_token", None), total_count=d.get("total_count", None), ) @@ -5438,6 +5497,10 @@ class GetInstancePool: min_idle_instances: Optional[int] = None """Minimum number of idle instances to keep in the instance pool""" + node_type_flexibility: Optional[NodeTypeFlexibility] = None + """For Fleet-pool V2, this object contains the information about the alternate node type ids to use + when attempting to launch a cluster if the node type id is not available.""" + node_type_id: Optional[str] = None """This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster. For example, the Spark nodes can be provisioned and optimized for memory or @@ -5488,6 +5551,8 @@ def as_dict(self) -> dict: body["max_capacity"] = self.max_capacity if self.min_idle_instances is not None: body["min_idle_instances"] = self.min_idle_instances + if self.node_type_flexibility: + body["node_type_flexibility"] = self.node_type_flexibility.as_dict() if self.node_type_id is not None: body["node_type_id"] = self.node_type_id if self.preloaded_docker_images: @@ -5529,6 +5594,8 @@ def as_shallow_dict(self) -> dict: body["max_capacity"] = self.max_capacity if self.min_idle_instances is not None: body["min_idle_instances"] = self.min_idle_instances + if self.node_type_flexibility: + body["node_type_flexibility"] = self.node_type_flexibility if self.node_type_id is not None: body["node_type_id"] = self.node_type_id if self.preloaded_docker_images: @@ -5559,6 +5626,7 @@ def from_dict(cls, d: Dict[str, Any]) -> GetInstancePool: instance_pool_name=d.get("instance_pool_name", None), max_capacity=d.get("max_capacity", None), min_idle_instances=d.get("min_idle_instances", None), + node_type_flexibility=_from_dict(d, "node_type_flexibility", NodeTypeFlexibility), node_type_id=d.get("node_type_id", None), preloaded_docker_images=_repeated_dict(d, "preloaded_docker_images", DockerImage), preloaded_spark_versions=d.get("preloaded_spark_versions", None), @@ -6393,6 +6461,10 @@ class InstancePoolAndStats: min_idle_instances: Optional[int] = None """Minimum number of idle instances to keep in the instance pool""" + node_type_flexibility: Optional[NodeTypeFlexibility] = None + """For Fleet-pool V2, this object contains the information about the alternate node type ids to use + when attempting to launch a cluster if the node type id is not available.""" + node_type_id: Optional[str] = None """This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster. For example, the Spark nodes can be provisioned and optimized for memory or @@ -6443,6 +6515,8 @@ def as_dict(self) -> dict: body["max_capacity"] = self.max_capacity if self.min_idle_instances is not None: body["min_idle_instances"] = self.min_idle_instances + if self.node_type_flexibility: + body["node_type_flexibility"] = self.node_type_flexibility.as_dict() if self.node_type_id is not None: body["node_type_id"] = self.node_type_id if self.preloaded_docker_images: @@ -6484,6 +6558,8 @@ def as_shallow_dict(self) -> dict: body["max_capacity"] = self.max_capacity if self.min_idle_instances is not None: body["min_idle_instances"] = self.min_idle_instances + if self.node_type_flexibility: + body["node_type_flexibility"] = self.node_type_flexibility if self.node_type_id is not None: body["node_type_id"] = self.node_type_id if self.preloaded_docker_images: @@ -6514,6 +6590,7 @@ def from_dict(cls, d: Dict[str, Any]) -> InstancePoolAndStats: instance_pool_name=d.get("instance_pool_name", None), max_capacity=d.get("max_capacity", None), min_idle_instances=d.get("min_idle_instances", None), + node_type_flexibility=_from_dict(d, "node_type_flexibility", NodeTypeFlexibility), node_type_id=d.get("node_type_id", None), preloaded_docker_images=_repeated_dict(d, "preloaded_docker_images", DockerImage), preloaded_spark_versions=d.get("preloaded_spark_versions", None), @@ -7976,6 +8053,28 @@ def from_dict(cls, d: Dict[str, Any]) -> NodeType: ) +@dataclass +class NodeTypeFlexibility: + """For Fleet-V2 using classic clusters, this object contains the information about the alternate + node type ids to use when attempting to launch a cluster. It can be used with both the driver + and worker node types.""" + + def as_dict(self) -> dict: + """Serializes the NodeTypeFlexibility into a dictionary suitable for use as a JSON request body.""" + body = {} + return body + + def as_shallow_dict(self) -> dict: + """Serializes the NodeTypeFlexibility into a shallow dictionary of its immediate attributes.""" + body = {} + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> NodeTypeFlexibility: + """Deserializes the NodeTypeFlexibility from a dictionary.""" + return cls() + + @dataclass class PendingInstanceError: """Error message of a failed pending instances""" @@ -9005,6 +9104,7 @@ class TerminationReasonCode(Enum): DATA_ACCESS_CONFIG_CHANGED = "DATA_ACCESS_CONFIG_CHANGED" DBFS_COMPONENT_UNHEALTHY = "DBFS_COMPONENT_UNHEALTHY" DISASTER_RECOVERY_REPLICATION = "DISASTER_RECOVERY_REPLICATION" + DNS_RESOLUTION_ERROR = "DNS_RESOLUTION_ERROR" DOCKER_CONTAINER_CREATION_EXCEPTION = "DOCKER_CONTAINER_CREATION_EXCEPTION" DOCKER_IMAGE_PULL_FAILURE = "DOCKER_IMAGE_PULL_FAILURE" DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION = "DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION" @@ -9023,6 +9123,7 @@ class TerminationReasonCode(Enum): EXECUTION_COMPONENT_UNHEALTHY = "EXECUTION_COMPONENT_UNHEALTHY" EXECUTOR_POD_UNSCHEDULED = "EXECUTOR_POD_UNSCHEDULED" GCP_API_RATE_QUOTA_EXCEEDED = "GCP_API_RATE_QUOTA_EXCEEDED" + GCP_DENIED_BY_ORG_POLICY = "GCP_DENIED_BY_ORG_POLICY" GCP_FORBIDDEN = "GCP_FORBIDDEN" GCP_IAM_TIMEOUT = "GCP_IAM_TIMEOUT" GCP_INACCESSIBLE_KMS_KEY_FAILURE = "GCP_INACCESSIBLE_KMS_KEY_FAILURE" @@ -10947,6 +11048,8 @@ def events( limit: Optional[int] = None, offset: Optional[int] = None, order: Optional[GetEventsOrder] = None, + page_size: Optional[int] = None, + page_token: Optional[str] = None, start_time: Optional[int] = None, ) -> Iterator[ClusterEvent]: """List cluster activity events. @@ -10961,13 +11064,25 @@ def events( :param event_types: List[:class:`EventType`] (optional) An optional set of event types to filter on. If empty, all event types are returned. :param limit: int (optional) + Deprecated: use page_token in combination with page_size instead. + The maximum number of events to include in a page of events. Defaults to 50, and maximum allowed value is 500. :param offset: int (optional) + Deprecated: use page_token in combination with page_size instead. + The offset in the result set. Defaults to 0 (no offset). When an offset is specified and the results are requested in descending order, the end_time field is required. :param order: :class:`GetEventsOrder` (optional) The order to list events in; either "ASC" or "DESC". Defaults to "DESC". + :param page_size: int (optional) + The maximum number of events to include in a page of events. The server may further constrain the + maximum number of results returned in a single page. If the page_size is empty or 0, the server will + decide the number of results to be returned. The field has to be in the range [0,500]. If the value + is outside the range, the server enforces 0 or 500. + :param page_token: str (optional) + Use next_page_token or prev_page_token returned from the previous request to list the next or + previous page of events respectively. If page_token is empty, the first page is returned. :param start_time: int (optional) The start time in epoch milliseconds. If empty, returns events starting from the beginning of time. @@ -10986,6 +11101,10 @@ def events( body["offset"] = offset if order is not None: body["order"] = order.value + if page_size is not None: + body["page_size"] = page_size + if page_token is not None: + body["page_token"] = page_token if start_time is not None: body["start_time"] = start_time headers = { @@ -12101,6 +12220,7 @@ def edit( idle_instance_autotermination_minutes: Optional[int] = None, max_capacity: Optional[int] = None, min_idle_instances: Optional[int] = None, + node_type_flexibility: Optional[NodeTypeFlexibility] = None, ): """Edit an existing instance pool. @@ -12133,6 +12253,9 @@ def edit( upsize requests. :param min_idle_instances: int (optional) Minimum number of idle instances to keep in the instance pool + :param node_type_flexibility: :class:`NodeTypeFlexibility` (optional) + For Fleet-pool V2, this object contains the information about the alternate node type ids to use + when attempting to launch a cluster if the node type id is not available. """ @@ -12149,6 +12272,8 @@ def edit( body["max_capacity"] = max_capacity if min_idle_instances is not None: body["min_idle_instances"] = min_idle_instances + if node_type_flexibility is not None: + body["node_type_flexibility"] = node_type_flexibility.as_dict() if node_type_id is not None: body["node_type_id"] = node_type_id headers = { diff --git a/databricks/sdk/service/dashboards.py b/databricks/sdk/service/dashboards.py index 5ba789eb4..1683ba1b1 100755 --- a/databricks/sdk/service/dashboards.py +++ b/databricks/sdk/service/dashboards.py @@ -926,7 +926,7 @@ def from_dict(cls, d: Dict[str, Any]) -> GenieResultMetadata: @dataclass class GenieSpace: space_id: str - """Space ID""" + """Genie space ID""" title: str """Title of the Genie Space""" @@ -2172,15 +2172,14 @@ def generate_download_full_query_result( ) -> GenieGenerateDownloadFullQueryResultResponse: """Generate full query result download. - Initiate full SQL query result download and obtain a `download_id` to track the download progress. - This call initiates a new SQL execution to generate the query result. The result is stored in an - external link can be retrieved using the [Get Download Full Query - Result](:method:genie/getdownloadfullqueryresult) API. Warning: Databricks strongly recommends that - you protect the URLs that are returned by the `EXTERNAL_LINKS` disposition. See [Execute - Statement](:method:statementexecution/executestatement) for more details. + Initiates a new SQL execution and returns a `download_id` that you can use to track the progress of + the download. The query result is stored in an external link and can be retrieved using the [Get + Download Full Query Result](:method:genie/getdownloadfullqueryresult) API. Warning: Databricks + strongly recommends that you protect the URLs that are returned by the `EXTERNAL_LINKS` disposition. + See [Execute Statement](:method:statementexecution/executestatement) for more details. :param space_id: str - Space ID + Genie space ID :param conversation_id: str Conversation ID :param message_id: str @@ -2208,17 +2207,15 @@ def get_download_full_query_result( """Get download full query result. After [Generating a Full Query Result Download](:method:genie/getdownloadfullqueryresult) and - successfully receiving a `download_id`, use this API to Poll download progress and retrieve the SQL - query result external link(s) upon completion. Warning: Databricks strongly recommends that you - protect the URLs that are returned by the `EXTERNAL_LINKS` disposition. When you use the - `EXTERNAL_LINKS` disposition, a short-lived, presigned URL is generated, which can be used to download - the results directly from Amazon S3. As a short-lived access credential is embedded in this presigned - URL, you should protect the URL. Because presigned URLs are already generated with embedded temporary - access credentials, you must not set an Authorization header in the download requests. See [Execute + successfully receiving a `download_id`, use this API to poll the download progress. When the download + is complete, the API returns one or more external links to the query result files. Warning: Databricks + strongly recommends that you protect the URLs that are returned by the `EXTERNAL_LINKS` disposition. + You must not set an Authorization header in download requests. When using the `EXTERNAL_LINKS` + disposition, Databricks returns presigned URLs that grant temporary access to data. See [Execute Statement](:method:statementexecution/executestatement) for more details. :param space_id: str - Space ID + Genie space ID :param conversation_id: str Conversation ID :param message_id: str @@ -2422,12 +2419,12 @@ class LakeviewAPI: def __init__(self, api_client): self._api = api_client - def create(self, *, dashboard: Optional[Dashboard] = None) -> Dashboard: + def create(self, dashboard: Dashboard) -> Dashboard: """Create dashboard. Create a draft dashboard. - :param dashboard: :class:`Dashboard` (optional) + :param dashboard: :class:`Dashboard` :returns: :class:`Dashboard` """ @@ -2440,12 +2437,12 @@ def create(self, *, dashboard: Optional[Dashboard] = None) -> Dashboard: res = self._api.do("POST", "/api/2.0/lakeview/dashboards", body=body, headers=headers) return Dashboard.from_dict(res) - def create_schedule(self, dashboard_id: str, *, schedule: Optional[Schedule] = None) -> Schedule: + def create_schedule(self, dashboard_id: str, schedule: Schedule) -> Schedule: """Create dashboard schedule. :param dashboard_id: str UUID identifying the dashboard to which the schedule belongs. - :param schedule: :class:`Schedule` (optional) + :param schedule: :class:`Schedule` :returns: :class:`Schedule` """ @@ -2458,16 +2455,14 @@ def create_schedule(self, dashboard_id: str, *, schedule: Optional[Schedule] = N res = self._api.do("POST", f"/api/2.0/lakeview/dashboards/{dashboard_id}/schedules", body=body, headers=headers) return Schedule.from_dict(res) - def create_subscription( - self, dashboard_id: str, schedule_id: str, *, subscription: Optional[Subscription] = None - ) -> Subscription: + def create_subscription(self, dashboard_id: str, schedule_id: str, subscription: Subscription) -> Subscription: """Create schedule subscription. :param dashboard_id: str UUID identifying the dashboard to which the subscription belongs. :param schedule_id: str UUID identifying the schedule to which the subscription belongs. - :param subscription: :class:`Subscription` (optional) + :param subscription: :class:`Subscription` :returns: :class:`Subscription` """ @@ -2853,14 +2848,14 @@ def unpublish(self, dashboard_id: str): self._api.do("DELETE", f"/api/2.0/lakeview/dashboards/{dashboard_id}/published", headers=headers) - def update(self, dashboard_id: str, *, dashboard: Optional[Dashboard] = None) -> Dashboard: + def update(self, dashboard_id: str, dashboard: Dashboard) -> Dashboard: """Update dashboard. Update a draft dashboard. :param dashboard_id: str UUID identifying the dashboard. - :param dashboard: :class:`Dashboard` (optional) + :param dashboard: :class:`Dashboard` :returns: :class:`Dashboard` """ @@ -2873,14 +2868,14 @@ def update(self, dashboard_id: str, *, dashboard: Optional[Dashboard] = None) -> res = self._api.do("PATCH", f"/api/2.0/lakeview/dashboards/{dashboard_id}", body=body, headers=headers) return Dashboard.from_dict(res) - def update_schedule(self, dashboard_id: str, schedule_id: str, *, schedule: Optional[Schedule] = None) -> Schedule: + def update_schedule(self, dashboard_id: str, schedule_id: str, schedule: Schedule) -> Schedule: """Update dashboard schedule. :param dashboard_id: str UUID identifying the dashboard to which the schedule belongs. :param schedule_id: str UUID identifying the schedule. - :param schedule: :class:`Schedule` (optional) + :param schedule: :class:`Schedule` :returns: :class:`Schedule` """ diff --git a/databricks/sdk/service/jobs.py b/databricks/sdk/service/jobs.py index fcf2e487d..051b514c8 100755 --- a/databricks/sdk/service/jobs.py +++ b/databricks/sdk/service/jobs.py @@ -576,10 +576,11 @@ class CleanRoomTaskRunState: life_cycle_state: Optional[CleanRoomTaskRunLifeCycleState] = None """A value indicating the run's current lifecycle state. This field is always available in the - response.""" + response. Note: Additional states might be introduced in future releases.""" result_state: Optional[CleanRoomTaskRunResultState] = None - """A value indicating the run's result. This field is only available for terminal lifecycle states.""" + """A value indicating the run's result. This field is only available for terminal lifecycle states. + Note: Additional states might be introduced in future releases.""" def as_dict(self) -> dict: """Serializes the CleanRoomTaskRunState into a dictionary suitable for use as a JSON request body.""" @@ -1023,8 +1024,8 @@ class CreateJob: """Job-level parameter definitions""" performance_target: Optional[PerformanceTarget] = None - """The performance mode on a serverless job. The performance target determines the level of compute - performance or cost-efficiency for the run. + """The performance mode on a serverless job. This field determines the level of compute performance + or cost-efficiency for the run. * `STANDARD`: Enables cost-efficient execution of serverless workloads. * `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times through rapid scaling and @@ -1322,11 +1323,14 @@ class DashboardTask: """Configures the Lakeview Dashboard job task type.""" dashboard_id: Optional[str] = None + """The identifier of the dashboard to refresh.""" subscription: Optional[Subscription] = None + """Optional: subscription configuration for sending the dashboard snapshot.""" warehouse_id: Optional[str] = None - """The warehouse id to execute the dashboard with for the schedule""" + """Optional: The warehouse id to execute the dashboard with for the schedule. If not specified, the + default warehouse of the dashboard will be used.""" def as_dict(self) -> dict: """Serializes the DashboardTask into a dictionary suitable for use as a JSON request body.""" @@ -2703,8 +2707,11 @@ class JobEnvironment: """The key of an environment. It has to be unique within a job.""" spec: Optional[compute.Environment] = None - """The environment entity used to preserve serverless environment side panel and jobs' environment - for non-notebook task. In this minimal environment spec, only pip dependencies are supported.""" + """The environment entity used to preserve serverless environment side panel, jobs' environment for + non-notebook task, and DLT's environment for classic and serverless pipelines. (Note: DLT uses a + copied version of the Environment proto below, at + //spark/pipelines/api/protos/copied/libraries-environments-copy.proto) In this minimal + environment spec, only pip dependencies are supported.""" def as_dict(self) -> dict: """Serializes the JobEnvironment into a dictionary suitable for use as a JSON request body.""" @@ -3111,8 +3118,8 @@ class JobSettings: """Job-level parameter definitions""" performance_target: Optional[PerformanceTarget] = None - """The performance mode on a serverless job. The performance target determines the level of compute - performance or cost-efficiency for the run. + """The performance mode on a serverless job. This field determines the level of compute performance + or cost-efficiency for the run. * `STANDARD`: Enables cost-efficient execution of serverless workloads. * `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times through rapid scaling and @@ -4174,6 +4181,15 @@ def from_dict(cls, d: Dict[str, Any]) -> QueueSettings: @dataclass class RepairHistoryItem: + effective_performance_target: Optional[PerformanceTarget] = None + """The actual performance target used by the serverless run during execution. This can differ from + the client-set performance target on the request depending on whether the performance mode is + supported by the job type. + + * `STANDARD`: Enables cost-efficient execution of serverless workloads. * + `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times through rapid scaling and + optimized cluster performance.""" + end_time: Optional[int] = None """The end time of the (repaired) run.""" @@ -4198,6 +4214,8 @@ class RepairHistoryItem: def as_dict(self) -> dict: """Serializes the RepairHistoryItem into a dictionary suitable for use as a JSON request body.""" body = {} + if self.effective_performance_target is not None: + body["effective_performance_target"] = self.effective_performance_target.value if self.end_time is not None: body["end_time"] = self.end_time if self.id is not None: @@ -4217,6 +4235,8 @@ def as_dict(self) -> dict: def as_shallow_dict(self) -> dict: """Serializes the RepairHistoryItem into a shallow dictionary of its immediate attributes.""" body = {} + if self.effective_performance_target is not None: + body["effective_performance_target"] = self.effective_performance_target if self.end_time is not None: body["end_time"] = self.end_time if self.id is not None: @@ -4237,6 +4257,7 @@ def as_shallow_dict(self) -> dict: def from_dict(cls, d: Dict[str, Any]) -> RepairHistoryItem: """Deserializes the RepairHistoryItem from a dictionary.""" return cls( + effective_performance_target=_enum(d, "effective_performance_target", PerformanceTarget), end_time=d.get("end_time", None), id=d.get("id", None), start_time=d.get("start_time", None), @@ -4298,6 +4319,15 @@ class RepairRun: [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html""" + performance_target: Optional[PerformanceTarget] = None + """The performance mode on a serverless job. The performance target determines the level of compute + performance or cost-efficiency for the run. This field overrides the performance target defined + on the job level. + + * `STANDARD`: Enables cost-efficient execution of serverless workloads. * + `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times through rapid scaling and + optimized cluster performance.""" + pipeline_params: Optional[PipelineParams] = None """Controls whether the pipeline should perform a full refresh""" @@ -4364,6 +4394,8 @@ def as_dict(self) -> dict: body["latest_repair_id"] = self.latest_repair_id if self.notebook_params: body["notebook_params"] = self.notebook_params + if self.performance_target is not None: + body["performance_target"] = self.performance_target.value if self.pipeline_params: body["pipeline_params"] = self.pipeline_params.as_dict() if self.python_named_params: @@ -4397,6 +4429,8 @@ def as_shallow_dict(self) -> dict: body["latest_repair_id"] = self.latest_repair_id if self.notebook_params: body["notebook_params"] = self.notebook_params + if self.performance_target is not None: + body["performance_target"] = self.performance_target if self.pipeline_params: body["pipeline_params"] = self.pipeline_params if self.python_named_params: @@ -4426,6 +4460,7 @@ def from_dict(cls, d: Dict[str, Any]) -> RepairRun: job_parameters=d.get("job_parameters", None), latest_repair_id=d.get("latest_repair_id", None), notebook_params=d.get("notebook_params", None), + performance_target=_enum(d, "performance_target", PerformanceTarget), pipeline_params=_from_dict(d, "pipeline_params", PipelineParams), python_named_params=d.get("python_named_params", None), python_params=d.get("python_params", None), @@ -6020,13 +6055,14 @@ class RunState: life_cycle_state: Optional[RunLifeCycleState] = None """A value indicating the run's current lifecycle state. This field is always available in the - response.""" + response. Note: Additional states might be introduced in future releases.""" queue_reason: Optional[str] = None """The reason indicating why the run was queued.""" result_state: Optional[RunResultState] = None - """A value indicating the run's result. This field is only available for terminal lifecycle states.""" + """A value indicating the run's result. This field is only available for terminal lifecycle states. + Note: Additional states might be introduced in future releases.""" state_message: Optional[str] = None """A descriptive message for the current state. This field is unstructured, and its exact format is @@ -6161,7 +6197,7 @@ class RunTask: does not support retries or notifications.""" dashboard_task: Optional[DashboardTask] = None - """The task runs a DashboardTask when the `dashboard_task` field is present.""" + """The task refreshes a dashboard and sends a snapshot to subscribers.""" dbt_task: Optional[DbtTask] = None """The task runs one or more dbt commands when the `dbt_task` field is present. The dbt task @@ -7549,7 +7585,7 @@ class SubmitTask: does not support retries or notifications.""" dashboard_task: Optional[DashboardTask] = None - """The task runs a DashboardTask when the `dashboard_task` field is present.""" + """The task refreshes a dashboard and sends a snapshot to subscribers.""" dbt_task: Optional[DbtTask] = None """The task runs one or more dbt commands when the `dbt_task` field is present. The dbt task @@ -7818,6 +7854,7 @@ class Subscription: """When true, the subscription will not send emails.""" subscribers: Optional[List[SubscriptionSubscriber]] = None + """The list of subscribers to send the snapshot of the dashboard to.""" def as_dict(self) -> dict: """Serializes the Subscription into a dictionary suitable for use as a JSON request body.""" @@ -7854,8 +7891,12 @@ def from_dict(cls, d: Dict[str, Any]) -> Subscription: @dataclass class SubscriptionSubscriber: destination_id: Optional[str] = None + """A snapshot of the dashboard will be sent to the destination when the `destination_id` field is + present.""" user_name: Optional[str] = None + """A snapshot of the dashboard will be sent to the user's email when the `user_name` field is + present.""" def as_dict(self) -> dict: """Serializes the SubscriptionSubscriber into a dictionary suitable for use as a JSON request body.""" @@ -7954,7 +7995,7 @@ class Task: does not support retries or notifications.""" dashboard_task: Optional[DashboardTask] = None - """The task runs a DashboardTask when the `dashboard_task` field is present.""" + """The task refreshes a dashboard and sends a snapshot to subscribers.""" dbt_task: Optional[DbtTask] = None """The task runs one or more dbt commands when the `dbt_task` field is present. The dbt task @@ -8459,7 +8500,7 @@ class TerminationCodeCode(Enum): invalid configuration. Refer to the state message for further details. * `CLOUD_FAILURE`: The run failed due to a cloud provider issue. Refer to the state message for further details. * `MAX_JOB_QUEUE_SIZE_EXCEEDED`: The run was skipped due to reaching the job level queue size - limit. + limit. * `DISABLED`: The run was never executed because it was disabled explicitly by the user. [Link]: https://kb.databricks.com/en_US/notebooks/too-many-execution-contexts-are-open-right-now""" @@ -8468,6 +8509,7 @@ class TerminationCodeCode(Enum): CLOUD_FAILURE = "CLOUD_FAILURE" CLUSTER_ERROR = "CLUSTER_ERROR" CLUSTER_REQUEST_LIMIT_EXCEEDED = "CLUSTER_REQUEST_LIMIT_EXCEEDED" + DISABLED = "DISABLED" DRIVER_ERROR = "DRIVER_ERROR" FEATURE_DISABLED = "FEATURE_DISABLED" INTERNAL_ERROR = "INTERNAL_ERROR" @@ -8523,7 +8565,7 @@ class TerminationDetails: invalid configuration. Refer to the state message for further details. * `CLOUD_FAILURE`: The run failed due to a cloud provider issue. Refer to the state message for further details. * `MAX_JOB_QUEUE_SIZE_EXCEEDED`: The run was skipped due to reaching the job level queue size - limit. + limit. * `DISABLED`: The run was never executed because it was disabled explicitly by the user. [Link]: https://kb.databricks.com/en_US/notebooks/too-many-execution-contexts-are-open-right-now""" @@ -9140,8 +9182,8 @@ def create( :param parameters: List[:class:`JobParameterDefinition`] (optional) Job-level parameter definitions :param performance_target: :class:`PerformanceTarget` (optional) - The performance mode on a serverless job. The performance target determines the level of compute - performance or cost-efficiency for the run. + The performance mode on a serverless job. This field determines the level of compute performance or + cost-efficiency for the run. * `STANDARD`: Enables cost-efficient execution of serverless workloads. * `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times through rapid scaling and optimized cluster @@ -9593,6 +9635,7 @@ def repair_run( job_parameters: Optional[Dict[str, str]] = None, latest_repair_id: Optional[int] = None, notebook_params: Optional[Dict[str, str]] = None, + performance_target: Optional[PerformanceTarget] = None, pipeline_params: Optional[PipelineParams] = None, python_named_params: Optional[Dict[str, str]] = None, python_params: Optional[List[str]] = None, @@ -9643,6 +9686,14 @@ def repair_run( [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html + :param performance_target: :class:`PerformanceTarget` (optional) + The performance mode on a serverless job. The performance target determines the level of compute + performance or cost-efficiency for the run. This field overrides the performance target defined on + the job level. + + * `STANDARD`: Enables cost-efficient execution of serverless workloads. * `PERFORMANCE_OPTIMIZED`: + Prioritizes fast startup and execution times through rapid scaling and optimized cluster + performance. :param pipeline_params: :class:`PipelineParams` (optional) Controls whether the pipeline should perform a full refresh :param python_named_params: Dict[str,str] (optional) @@ -9703,6 +9754,8 @@ def repair_run( body["latest_repair_id"] = latest_repair_id if notebook_params is not None: body["notebook_params"] = notebook_params + if performance_target is not None: + body["performance_target"] = performance_target.value if pipeline_params is not None: body["pipeline_params"] = pipeline_params.as_dict() if python_named_params is not None: @@ -9742,6 +9795,7 @@ def repair_run_and_wait( job_parameters: Optional[Dict[str, str]] = None, latest_repair_id: Optional[int] = None, notebook_params: Optional[Dict[str, str]] = None, + performance_target: Optional[PerformanceTarget] = None, pipeline_params: Optional[PipelineParams] = None, python_named_params: Optional[Dict[str, str]] = None, python_params: Optional[List[str]] = None, @@ -9758,6 +9812,7 @@ def repair_run_and_wait( job_parameters=job_parameters, latest_repair_id=latest_repair_id, notebook_params=notebook_params, + performance_target=performance_target, pipeline_params=pipeline_params, python_named_params=python_named_params, python_params=python_params, diff --git a/databricks/sdk/service/oauth2.py b/databricks/sdk/service/oauth2.py index 366f282f4..53d337ef4 100755 --- a/databricks/sdk/service/oauth2.py +++ b/databricks/sdk/service/oauth2.py @@ -1191,10 +1191,10 @@ class AccountFederationPolicyAPI: def __init__(self, api_client): self._api = api_client - def create(self, *, policy: Optional[FederationPolicy] = None, policy_id: Optional[str] = None) -> FederationPolicy: + def create(self, policy: FederationPolicy, *, policy_id: Optional[str] = None) -> FederationPolicy: """Create account federation policy. - :param policy: :class:`FederationPolicy` (optional) + :param policy: :class:`FederationPolicy` :param policy_id: str (optional) The identifier for the federation policy. The identifier must contain only lowercase alphanumeric characters, numbers, hyphens, and slashes. If unspecified, the id will be assigned by Databricks. @@ -1284,13 +1284,13 @@ def list(self, *, page_size: Optional[int] = None, page_token: Optional[str] = N query["page_token"] = json["next_page_token"] def update( - self, policy_id: str, *, policy: Optional[FederationPolicy] = None, update_mask: Optional[str] = None + self, policy_id: str, policy: FederationPolicy, *, update_mask: Optional[str] = None ) -> FederationPolicy: """Update account federation policy. :param policy_id: str The identifier for the federation policy. - :param policy: :class:`FederationPolicy` (optional) + :param policy: :class:`FederationPolicy` :param update_mask: str (optional) The field mask specifies which fields of the policy to update. To specify multiple fields in the field mask, use comma as the separator (no space). The special value '*' indicates that all fields @@ -1758,13 +1758,13 @@ def __init__(self, api_client): self._api = api_client def create( - self, service_principal_id: int, *, policy: Optional[FederationPolicy] = None, policy_id: Optional[str] = None + self, service_principal_id: int, policy: FederationPolicy, *, policy_id: Optional[str] = None ) -> FederationPolicy: """Create service principal federation policy. :param service_principal_id: int The service principal id for the federation policy. - :param policy: :class:`FederationPolicy` (optional) + :param policy: :class:`FederationPolicy` :param policy_id: str (optional) The identifier for the federation policy. The identifier must contain only lowercase alphanumeric characters, numbers, hyphens, and slashes. If unspecified, the id will be assigned by Databricks. @@ -1869,12 +1869,7 @@ def list( query["page_token"] = json["next_page_token"] def update( - self, - service_principal_id: int, - policy_id: str, - *, - policy: Optional[FederationPolicy] = None, - update_mask: Optional[str] = None, + self, service_principal_id: int, policy_id: str, policy: FederationPolicy, *, update_mask: Optional[str] = None ) -> FederationPolicy: """Update service principal federation policy. @@ -1882,7 +1877,7 @@ def update( The service principal id for the federation policy. :param policy_id: str The identifier for the federation policy. - :param policy: :class:`FederationPolicy` (optional) + :param policy: :class:`FederationPolicy` :param update_mask: str (optional) The field mask specifies which fields of the policy to update. To specify multiple fields in the field mask, use comma as the separator (no space). The special value '*' indicates that all fields diff --git a/databricks/sdk/service/serving.py b/databricks/sdk/service/serving.py index ab0318195..cd8a4eb1d 100755 --- a/databricks/sdk/service/serving.py +++ b/databricks/sdk/service/serving.py @@ -2874,7 +2874,8 @@ class ServedEntityInput: """The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between. A single unit of provisioned concurrency can process one request at a time. Valid workload sizes are "Small" (4 - 4 provisioned concurrency), - "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency). If + "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency). + Additional custom workload sizes can also be used when available in the workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0.""" @@ -3014,7 +3015,8 @@ class ServedEntityOutput: """The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between. A single unit of provisioned concurrency can process one request at a time. Valid workload sizes are "Small" (4 - 4 provisioned concurrency), - "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency). If + "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency). + Additional custom workload sizes can also be used when available in the workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0.""" @@ -3204,11 +3206,12 @@ class ServedModelInput: model, this field defaults to external_model.name, with '.' and ':' replaced with '-', and if not specified for other entities, it defaults to entity_name-entity_version.""" - workload_size: Optional[ServedModelInputWorkloadSize] = None + workload_size: Optional[str] = None """The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between. A single unit of provisioned concurrency can process one request at a time. Valid workload sizes are "Small" (4 - 4 provisioned concurrency), - "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency). If + "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency). + Additional custom workload sizes can also be used when available in the workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0.""" @@ -3240,7 +3243,7 @@ def as_dict(self) -> dict: if self.scale_to_zero_enabled is not None: body["scale_to_zero_enabled"] = self.scale_to_zero_enabled if self.workload_size is not None: - body["workload_size"] = self.workload_size.value + body["workload_size"] = self.workload_size if self.workload_type is not None: body["workload_type"] = self.workload_type.value return body @@ -3282,18 +3285,11 @@ def from_dict(cls, d: Dict[str, Any]) -> ServedModelInput: model_version=d.get("model_version", None), name=d.get("name", None), scale_to_zero_enabled=d.get("scale_to_zero_enabled", None), - workload_size=_enum(d, "workload_size", ServedModelInputWorkloadSize), + workload_size=d.get("workload_size", None), workload_type=_enum(d, "workload_type", ServedModelInputWorkloadType), ) -class ServedModelInputWorkloadSize(Enum): - - LARGE = "Large" - MEDIUM = "Medium" - SMALL = "Small" - - class ServedModelInputWorkloadType(Enum): """Please keep this in sync with with workload types in InferenceEndpointEntities.scala""" @@ -3338,7 +3334,8 @@ class ServedModelOutput: """The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between. A single unit of provisioned concurrency can process one request at a time. Valid workload sizes are "Small" (4 - 4 provisioned concurrency), - "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency). If + "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency). + Additional custom workload sizes can also be used when available in the workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0.""" diff --git a/databricks/sdk/service/settings.py b/databricks/sdk/service/settings.py index e683209e9..673296a03 100755 --- a/databricks/sdk/service/settings.py +++ b/databricks/sdk/service/settings.py @@ -635,6 +635,7 @@ class ComplianceStandard(Enum): IRAP_PROTECTED = "IRAP_PROTECTED" ISMAP = "ISMAP" ITAR_EAR = "ITAR_EAR" + K_FSI = "K_FSI" NONE = "NONE" PCI_DSS = "PCI_DSS" @@ -768,18 +769,20 @@ def from_dict(cls, d: Dict[str, Any]) -> CreateIpAccessListResponse: @dataclass -class CreateNetworkConnectivityConfigRequest: +class CreateNetworkConnectivityConfiguration: + """Properties of the new network connectivity configuration.""" + name: str """The name of the network connectivity configuration. The name can contain alphanumeric characters, hyphens, and underscores. The length must be between 3 and 30 characters. The name - must match the regular expression `^[0-9a-zA-Z-_]{3,30}$`.""" + must match the regular expression ^[0-9a-zA-Z-_]{3,30}$""" region: str """The region for the network connectivity configuration. Only workspaces in the same region can be attached to the network connectivity configuration.""" def as_dict(self) -> dict: - """Serializes the CreateNetworkConnectivityConfigRequest into a dictionary suitable for use as a JSON request body.""" + """Serializes the CreateNetworkConnectivityConfiguration into a dictionary suitable for use as a JSON request body.""" body = {} if self.name is not None: body["name"] = self.name @@ -788,7 +791,7 @@ def as_dict(self) -> dict: return body def as_shallow_dict(self) -> dict: - """Serializes the CreateNetworkConnectivityConfigRequest into a shallow dictionary of its immediate attributes.""" + """Serializes the CreateNetworkConnectivityConfiguration into a shallow dictionary of its immediate attributes.""" body = {} if self.name is not None: body["name"] = self.name @@ -797,8 +800,8 @@ def as_shallow_dict(self) -> dict: return body @classmethod - def from_dict(cls, d: Dict[str, Any]) -> CreateNetworkConnectivityConfigRequest: - """Deserializes the CreateNetworkConnectivityConfigRequest from a dictionary.""" + def from_dict(cls, d: Dict[str, Any]) -> CreateNetworkConnectivityConfiguration: + """Deserializes the CreateNetworkConnectivityConfiguration from a dictionary.""" return cls(name=d.get("name", None), region=d.get("region", None)) @@ -913,59 +916,58 @@ def from_dict(cls, d: Dict[str, Any]) -> CreateOboTokenResponse: @dataclass -class CreatePrivateEndpointRuleRequest: +class CreatePrivateEndpointRule: + """Properties of the new private endpoint rule. Note that you must approve the endpoint in Azure + portal after initialization.""" + resource_id: str """The Azure resource ID of the target resource.""" - group_id: CreatePrivateEndpointRuleRequestGroupId - """The sub-resource type (group ID) of the target resource. Note that to connect to workspace root - storage (root DBFS), you need two endpoints, one for `blob` and one for `dfs`.""" + domain_names: Optional[List[str]] = None + """Only used by private endpoints to customer-managed resources. + + Domain names of target private link service. When updating this field, the full list of target + domain_names must be specified.""" - network_connectivity_config_id: Optional[str] = None - """Your Network Connectvity Configuration ID.""" + group_id: Optional[str] = None + """Only used by private endpoints to Azure first-party services. Enum: blob | dfs | sqlServer | + mysqlServer + + The sub-resource type (group ID) of the target resource. Note that to connect to workspace root + storage (root DBFS), you need two endpoints, one for blob and one for dfs.""" def as_dict(self) -> dict: - """Serializes the CreatePrivateEndpointRuleRequest into a dictionary suitable for use as a JSON request body.""" + """Serializes the CreatePrivateEndpointRule into a dictionary suitable for use as a JSON request body.""" body = {} + if self.domain_names: + body["domain_names"] = [v for v in self.domain_names] if self.group_id is not None: - body["group_id"] = self.group_id.value - if self.network_connectivity_config_id is not None: - body["network_connectivity_config_id"] = self.network_connectivity_config_id + body["group_id"] = self.group_id if self.resource_id is not None: body["resource_id"] = self.resource_id return body def as_shallow_dict(self) -> dict: - """Serializes the CreatePrivateEndpointRuleRequest into a shallow dictionary of its immediate attributes.""" + """Serializes the CreatePrivateEndpointRule into a shallow dictionary of its immediate attributes.""" body = {} + if self.domain_names: + body["domain_names"] = self.domain_names if self.group_id is not None: body["group_id"] = self.group_id - if self.network_connectivity_config_id is not None: - body["network_connectivity_config_id"] = self.network_connectivity_config_id if self.resource_id is not None: body["resource_id"] = self.resource_id return body @classmethod - def from_dict(cls, d: Dict[str, Any]) -> CreatePrivateEndpointRuleRequest: - """Deserializes the CreatePrivateEndpointRuleRequest from a dictionary.""" + def from_dict(cls, d: Dict[str, Any]) -> CreatePrivateEndpointRule: + """Deserializes the CreatePrivateEndpointRule from a dictionary.""" return cls( - group_id=_enum(d, "group_id", CreatePrivateEndpointRuleRequestGroupId), - network_connectivity_config_id=d.get("network_connectivity_config_id", None), + domain_names=d.get("domain_names", None), + group_id=d.get("group_id", None), resource_id=d.get("resource_id", None), ) -class CreatePrivateEndpointRuleRequestGroupId(Enum): - """The sub-resource type (group ID) of the target resource. Note that to connect to workspace root - storage (root DBFS), you need two endpoints, one for `blob` and one for `dfs`.""" - - BLOB = "blob" - DFS = "dfs" - MYSQL_SERVER = "mysqlServer" - SQL_SERVER = "sqlServer" - - @dataclass class CreateTokenRequest: comment: Optional[str] = None @@ -1961,6 +1963,14 @@ class EgressNetworkPolicyInternetAccessPolicyStorageDestinationStorageDestinatio GOOGLE_CLOUD_STORAGE = "GOOGLE_CLOUD_STORAGE" +class EgressResourceType(Enum): + """The target resources that are supported by Network Connectivity Config. Note: some egress types + can support general types that are not defined in EgressResourceType. E.g.: Azure private + endpoint supports private link enabled Azure services.""" + + AZURE_BLOB_STORAGE = "AZURE_BLOB_STORAGE" + + @dataclass class EmailConfig: addresses: Optional[List[str]] = None @@ -2721,6 +2731,8 @@ def from_dict(cls, d: Dict[str, Any]) -> ListIpAccessListResponse: @dataclass class ListNccAzurePrivateEndpointRulesResponse: + """The private endpoint rule list was successfully retrieved.""" + items: Optional[List[NccAzurePrivateEndpointRule]] = None next_page_token: Optional[str] = None @@ -2756,6 +2768,8 @@ def from_dict(cls, d: Dict[str, Any]) -> ListNccAzurePrivateEndpointRulesRespons @dataclass class ListNetworkConnectivityConfigurationsResponse: + """The network connectivity configuration list was successfully retrieved.""" + items: Optional[List[NetworkConnectivityConfiguration]] = None next_page_token: Optional[str] = None @@ -2991,17 +3005,19 @@ def from_dict(cls, d: Dict[str, Any]) -> NccAwsStableIpRule: @dataclass class NccAzurePrivateEndpointRule: + """Properties of the new private endpoint rule. Note that you must approve the endpoint in Azure + portal after initialization.""" + connection_state: Optional[NccAzurePrivateEndpointRuleConnectionState] = None """The current status of this private endpoint. The private endpoint rules are effective only if - the connection state is `ESTABLISHED`. Remember that you must approve new endpoints on your - resources in the Azure portal before they take effect. - - The possible values are: - INIT: (deprecated) The endpoint has been created and pending - approval. - PENDING: The endpoint has been created and pending approval. - ESTABLISHED: The - endpoint has been approved and is ready to use in your serverless compute resources. - REJECTED: - Connection was rejected by the private link resource owner. - DISCONNECTED: Connection was - removed by the private link resource owner, the private endpoint becomes informative and should - be deleted for clean-up.""" + the connection state is ESTABLISHED. Remember that you must approve new endpoints on your + resources in the Azure portal before they take effect. The possible values are: - INIT: + (deprecated) The endpoint has been created and pending approval. - PENDING: The endpoint has + been created and pending approval. - ESTABLISHED: The endpoint has been approved and is ready to + use in your serverless compute resources. - REJECTED: Connection was rejected by the private + link resource owner. - DISCONNECTED: Connection was removed by the private link resource owner, + the private endpoint becomes informative and should be deleted for clean-up. - EXPIRED: If the + endpoint was created but not approved in 14 days, it will be EXPIRED.""" creation_time: Optional[int] = None """Time in epoch milliseconds when this object was created.""" @@ -3012,12 +3028,21 @@ class NccAzurePrivateEndpointRule: deactivated_at: Optional[int] = None """Time in epoch milliseconds when this object was deactivated.""" + domain_names: Optional[List[str]] = None + """Only used by private endpoints to customer-managed resources. + + Domain names of target private link service. When updating this field, the full list of target + domain_names must be specified.""" + endpoint_name: Optional[str] = None """The name of the Azure private endpoint resource.""" - group_id: Optional[NccAzurePrivateEndpointRuleGroupId] = None - """The sub-resource type (group ID) of the target resource. Note that to connect to workspace root - storage (root DBFS), you need two endpoints, one for `blob` and one for `dfs`.""" + group_id: Optional[str] = None + """Only used by private endpoints to Azure first-party services. Enum: blob | dfs | sqlServer | + mysqlServer + + The sub-resource type (group ID) of the target resource. Note that to connect to workspace root + storage (root DBFS), you need two endpoints, one for blob and one for dfs.""" network_connectivity_config_id: Optional[str] = None """The ID of a network connectivity configuration, which is the parent resource of this private @@ -3043,10 +3068,12 @@ def as_dict(self) -> dict: body["deactivated"] = self.deactivated if self.deactivated_at is not None: body["deactivated_at"] = self.deactivated_at + if self.domain_names: + body["domain_names"] = [v for v in self.domain_names] if self.endpoint_name is not None: body["endpoint_name"] = self.endpoint_name if self.group_id is not None: - body["group_id"] = self.group_id.value + body["group_id"] = self.group_id if self.network_connectivity_config_id is not None: body["network_connectivity_config_id"] = self.network_connectivity_config_id if self.resource_id is not None: @@ -3068,6 +3095,8 @@ def as_shallow_dict(self) -> dict: body["deactivated"] = self.deactivated if self.deactivated_at is not None: body["deactivated_at"] = self.deactivated_at + if self.domain_names: + body["domain_names"] = self.domain_names if self.endpoint_name is not None: body["endpoint_name"] = self.endpoint_name if self.group_id is not None: @@ -3090,8 +3119,9 @@ def from_dict(cls, d: Dict[str, Any]) -> NccAzurePrivateEndpointRule: creation_time=d.get("creation_time", None), deactivated=d.get("deactivated", None), deactivated_at=d.get("deactivated_at", None), + domain_names=d.get("domain_names", None), endpoint_name=d.get("endpoint_name", None), - group_id=_enum(d, "group_id", NccAzurePrivateEndpointRuleGroupId), + group_id=d.get("group_id", None), network_connectivity_config_id=d.get("network_connectivity_config_id", None), resource_id=d.get("resource_id", None), rule_id=d.get("rule_id", None), @@ -3100,34 +3130,15 @@ def from_dict(cls, d: Dict[str, Any]) -> NccAzurePrivateEndpointRule: class NccAzurePrivateEndpointRuleConnectionState(Enum): - """The current status of this private endpoint. The private endpoint rules are effective only if - the connection state is `ESTABLISHED`. Remember that you must approve new endpoints on your - resources in the Azure portal before they take effect. - - The possible values are: - INIT: (deprecated) The endpoint has been created and pending - approval. - PENDING: The endpoint has been created and pending approval. - ESTABLISHED: The - endpoint has been approved and is ready to use in your serverless compute resources. - REJECTED: - Connection was rejected by the private link resource owner. - DISCONNECTED: Connection was - removed by the private link resource owner, the private endpoint becomes informative and should - be deleted for clean-up.""" DISCONNECTED = "DISCONNECTED" ESTABLISHED = "ESTABLISHED" + EXPIRED = "EXPIRED" INIT = "INIT" PENDING = "PENDING" REJECTED = "REJECTED" -class NccAzurePrivateEndpointRuleGroupId(Enum): - """The sub-resource type (group ID) of the target resource. Note that to connect to workspace root - storage (root DBFS), you need two endpoints, one for `blob` and one for `dfs`.""" - - BLOB = "blob" - DFS = "dfs" - MYSQL_SERVER = "mysqlServer" - SQL_SERVER = "sqlServer" - - @dataclass class NccAzureServiceEndpointRule: """The stable Azure service endpoints. You can configure the firewall of your Azure resources to @@ -3138,9 +3149,9 @@ class NccAzureServiceEndpointRule: resources.""" target_region: Optional[str] = None - """The Azure region in which this service endpoint rule applies.""" + """The Azure region in which this service endpoint rule applies..""" - target_services: Optional[List[str]] = None + target_services: Optional[List[EgressResourceType]] = None """The Azure services to which this service endpoint rule applies to.""" def as_dict(self) -> dict: @@ -3151,7 +3162,7 @@ def as_dict(self) -> dict: if self.target_region is not None: body["target_region"] = self.target_region if self.target_services: - body["target_services"] = [v for v in self.target_services] + body["target_services"] = [v.value for v in self.target_services] return body def as_shallow_dict(self) -> dict: @@ -3171,15 +3182,12 @@ def from_dict(cls, d: Dict[str, Any]) -> NccAzureServiceEndpointRule: return cls( subnets=d.get("subnets", None), target_region=d.get("target_region", None), - target_services=d.get("target_services", None), + target_services=_repeated_enum(d, "target_services", EgressResourceType), ) @dataclass class NccEgressConfig: - """The network connectivity rules that apply to network traffic from your serverless compute - resources.""" - default_rules: Optional[NccEgressDefaultRules] = None """The network connectivity rules that are applied by default without resource specific configurations. You can find the stable network information of your serverless compute resources @@ -3218,9 +3226,7 @@ def from_dict(cls, d: Dict[str, Any]) -> NccEgressConfig: @dataclass class NccEgressDefaultRules: - """The network connectivity rules that are applied by default without resource specific - configurations. You can find the stable network information of your serverless compute resources - here.""" + """Default rules don't have specific targets.""" aws_stable_ip_rule: Optional[NccAwsStableIpRule] = None """The stable AWS IP CIDR blocks. You can use these to configure the firewall of your resources to @@ -3259,8 +3265,7 @@ def from_dict(cls, d: Dict[str, Any]) -> NccEgressDefaultRules: @dataclass class NccEgressTargetRules: - """The network connectivity rules that configured for each destinations. These rules override - default rules.""" + """Target rule controls the egress rules that are dedicated to specific resources.""" azure_private_endpoint_rules: Optional[List[NccAzurePrivateEndpointRule]] = None @@ -3288,6 +3293,8 @@ def from_dict(cls, d: Dict[str, Any]) -> NccEgressTargetRules: @dataclass class NetworkConnectivityConfiguration: + """Properties of the new network connectivity configuration.""" + account_id: Optional[str] = None """The Databricks account ID that hosts the credential.""" @@ -3301,7 +3308,7 @@ class NetworkConnectivityConfiguration: name: Optional[str] = None """The name of the network connectivity configuration. The name can contain alphanumeric characters, hyphens, and underscores. The length must be between 3 and 30 characters. The name - must match the regular expression `^[0-9a-zA-Z-_]{3,30}$`.""" + must match the regular expression ^[0-9a-zA-Z-_]{3,30}$""" network_connectivity_config_id: Optional[str] = None """Databricks network connectivity configuration ID.""" @@ -5198,6 +5205,37 @@ def from_dict(cls, d: Dict[str, Any]) -> UpdatePersonalComputeSettingRequest: ) +@dataclass +class UpdatePrivateEndpointRule: + """Properties of the new private endpoint rule. Note that you must approve the endpoint in Azure + portal after initialization.""" + + domain_names: Optional[List[str]] = None + """Only used by private endpoints to customer-managed resources. + + Domain names of target private link service. When updating this field, the full list of target + domain_names must be specified.""" + + def as_dict(self) -> dict: + """Serializes the UpdatePrivateEndpointRule into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.domain_names: + body["domain_names"] = [v for v in self.domain_names] + return body + + def as_shallow_dict(self) -> dict: + """Serializes the UpdatePrivateEndpointRule into a shallow dictionary of its immediate attributes.""" + body = {} + if self.domain_names: + body["domain_names"] = self.domain_names + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> UpdatePrivateEndpointRule: + """Deserializes the UpdatePrivateEndpointRule from a dictionary.""" + return cls(domain_names=d.get("domain_names", None)) + + @dataclass class UpdateResponse: def as_dict(self) -> dict: @@ -6490,16 +6528,16 @@ def update(self, allow_missing: bool, setting: DisableLegacyFeatures, field_mask class EnableExportNotebookAPI: - """Controls whether users can export notebooks and files from the Workspace. By default, this setting is + """Controls whether users can export notebooks and files from the Workspace UI. By default, this setting is enabled.""" def __init__(self, api_client): self._api = api_client def get_enable_export_notebook(self) -> EnableExportNotebook: - """Get the Enable Export Notebook setting. + """Get the Notebook and File exporting setting. - Gets the Enable Export Notebook setting. + Gets the Notebook and File exporting setting. :returns: :class:`EnableExportNotebook` """ @@ -6514,10 +6552,10 @@ def get_enable_export_notebook(self) -> EnableExportNotebook: def patch_enable_export_notebook( self, allow_missing: bool, setting: EnableExportNotebook, field_mask: str ) -> EnableExportNotebook: - """Update the Enable Export Notebook setting. + """Update the Notebook and File exporting setting. - Updates the Enable Export Notebook setting. The model follows eventual consistency, which means the - get after the update operation might receive stale values for some time. + Updates the Notebook and File exporting setting. The model follows eventual consistency, which means + the get after the update operation might receive stale values for some time. :param allow_missing: bool This should always be set to true for Settings API. Added for AIP compliance. @@ -6670,9 +6708,9 @@ def __init__(self, api_client): self._api = api_client def get_enable_notebook_table_clipboard(self) -> EnableNotebookTableClipboard: - """Get the Enable Notebook Table Clipboard setting. + """Get the Results Table Clipboard features setting. - Gets the Enable Notebook Table Clipboard setting. + Gets the Results Table Clipboard features setting. :returns: :class:`EnableNotebookTableClipboard` """ @@ -6689,9 +6727,9 @@ def get_enable_notebook_table_clipboard(self) -> EnableNotebookTableClipboard: def patch_enable_notebook_table_clipboard( self, allow_missing: bool, setting: EnableNotebookTableClipboard, field_mask: str ) -> EnableNotebookTableClipboard: - """Update the Enable Notebook Table Clipboard setting. + """Update the Results Table Clipboard features setting. - Updates the Enable Notebook Table Clipboard setting. The model follows eventual consistency, which + Updates the Results Table Clipboard features setting. The model follows eventual consistency, which means the get after the update operation might receive stale values for some time. :param allow_missing: bool @@ -6735,9 +6773,9 @@ def __init__(self, api_client): self._api = api_client def get_enable_results_downloading(self) -> EnableResultsDownloading: - """Get the Enable Results Downloading setting. + """Get the Notebook results download setting. - Gets the Enable Results Downloading setting. + Gets the Notebook results download setting. :returns: :class:`EnableResultsDownloading` """ @@ -6752,10 +6790,10 @@ def get_enable_results_downloading(self) -> EnableResultsDownloading: def patch_enable_results_downloading( self, allow_missing: bool, setting: EnableResultsDownloading, field_mask: str ) -> EnableResultsDownloading: - """Update the Enable Results Downloading setting. + """Update the Notebook results download setting. - Updates the Enable Results Downloading setting. The model follows eventual consistency, which means - the get after the update operation might receive stale values for some time. + Updates the Notebook results download setting. The model follows eventual consistency, which means the + get after the update operation might receive stale values for some time. :param allow_missing: bool This should always be set to true for Settings API. Added for AIP compliance. @@ -7183,29 +7221,40 @@ def update( class NetworkConnectivityAPI: """These APIs provide configurations for the network connectivity of your workspaces for serverless compute - resources.""" + resources. This API provides stable subnets for your workspace so that you can configure your firewalls on + your Azure Storage accounts to allow access from Databricks. You can also use the API to provision private + endpoints for Databricks to privately connect serverless compute resources to your Azure resources using + Azure Private Link. See [configure serverless secure connectivity]. + + [configure serverless secure connectivity]: https://learn.microsoft.com/azure/databricks/security/network/serverless-network-security + """ def __init__(self, api_client): self._api = api_client - def create_network_connectivity_configuration(self, name: str, region: str) -> NetworkConnectivityConfiguration: + def create_network_connectivity_configuration( + self, network_connectivity_config: CreateNetworkConnectivityConfiguration + ) -> NetworkConnectivityConfiguration: """Create a network connectivity configuration. - :param name: str - The name of the network connectivity configuration. The name can contain alphanumeric characters, - hyphens, and underscores. The length must be between 3 and 30 characters. The name must match the - regular expression `^[0-9a-zA-Z-_]{3,30}$`. - :param region: str - The region for the network connectivity configuration. Only workspaces in the same region can be - attached to the network connectivity configuration. + Creates a network connectivity configuration (NCC), which provides stable Azure service subnets when + accessing your Azure Storage accounts. You can also use a network connectivity configuration to create + Databricks managed private endpoints so that Databricks serverless compute resources privately access + your resources. + + **IMPORTANT**: After you create the network connectivity configuration, you must assign one or more + workspaces to the new network connectivity configuration. You can share one network connectivity + configuration with multiple workspaces from the same Azure region within the same Databricks account. + See [configure serverless secure connectivity]. + + [configure serverless secure connectivity]: https://learn.microsoft.com/azure/databricks/security/network/serverless-network-security + + :param network_connectivity_config: :class:`CreateNetworkConnectivityConfiguration` + Properties of the new network connectivity configuration. :returns: :class:`NetworkConnectivityConfiguration` """ - body = {} - if name is not None: - body["name"] = name - if region is not None: - body["region"] = region + body = network_connectivity_config.as_dict() headers = { "Accept": "application/json", "Content-Type": "application/json", @@ -7217,7 +7266,7 @@ def create_network_connectivity_configuration(self, name: str, region: str) -> N return NetworkConnectivityConfiguration.from_dict(res) def create_private_endpoint_rule( - self, network_connectivity_config_id: str, resource_id: str, group_id: CreatePrivateEndpointRuleRequestGroupId + self, network_connectivity_config_id: str, private_endpoint_rule: CreatePrivateEndpointRule ) -> NccAzurePrivateEndpointRule: """Create a private endpoint rule. @@ -7232,20 +7281,14 @@ def create_private_endpoint_rule( [serverless private link]: https://learn.microsoft.com/azure/databricks/security/network/serverless-network-security/serverless-private-link :param network_connectivity_config_id: str - Your Network Connectvity Configuration ID. - :param resource_id: str - The Azure resource ID of the target resource. - :param group_id: :class:`CreatePrivateEndpointRuleRequestGroupId` - The sub-resource type (group ID) of the target resource. Note that to connect to workspace root - storage (root DBFS), you need two endpoints, one for `blob` and one for `dfs`. + Your Network Connectivity Configuration ID. + :param private_endpoint_rule: :class:`CreatePrivateEndpointRule` + Properties of the new private endpoint rule. Note that you must approve the endpoint in Azure portal + after initialization. :returns: :class:`NccAzurePrivateEndpointRule` """ - body = {} - if group_id is not None: - body["group_id"] = group_id.value - if resource_id is not None: - body["resource_id"] = resource_id + body = private_endpoint_rule.as_dict() headers = { "Accept": "application/json", "Content-Type": "application/json", @@ -7265,7 +7308,7 @@ def delete_network_connectivity_configuration(self, network_connectivity_config_ Deletes a network connectivity configuration. :param network_connectivity_config_id: str - Your Network Connectvity Configuration ID. + Your Network Connectivity Configuration ID. """ @@ -7317,7 +7360,7 @@ def get_network_connectivity_configuration( Gets a network connectivity configuration. :param network_connectivity_config_id: str - Your Network Connectvity Configuration ID. + Your Network Connectivity Configuration ID. :returns: :class:`NetworkConnectivityConfiguration` """ @@ -7336,7 +7379,7 @@ def get_network_connectivity_configuration( def get_private_endpoint_rule( self, network_connectivity_config_id: str, private_endpoint_rule_id: str ) -> NccAzurePrivateEndpointRule: - """Get a private endpoint rule. + """Gets a private endpoint rule. Gets the private endpoint rule. @@ -7429,6 +7472,52 @@ def list_private_endpoint_rules( return query["page_token"] = json["next_page_token"] + def update_ncc_azure_private_endpoint_rule_public( + self, + network_connectivity_config_id: str, + private_endpoint_rule_id: str, + private_endpoint_rule: UpdatePrivateEndpointRule, + update_mask: str, + ) -> NccAzurePrivateEndpointRule: + """Update a private endpoint rule. + + Updates a private endpoint rule. Currently only a private endpoint rule to customer-managed resources + is allowed to be updated. + + :param network_connectivity_config_id: str + Your Network Connectivity Configuration ID. + :param private_endpoint_rule_id: str + Your private endpoint rule ID. + :param private_endpoint_rule: :class:`UpdatePrivateEndpointRule` + Properties of the new private endpoint rule. Note that you must approve the endpoint in Azure portal + after initialization. + :param update_mask: str + The field mask must be a single string, with multiple fields separated by commas (no spaces). The + field path is relative to the resource object, using a dot (`.`) to navigate sub-fields (e.g., + `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only + the entire collection field can be specified. Field names must exactly match the resource field + names. + + :returns: :class:`NccAzurePrivateEndpointRule` + """ + body = private_endpoint_rule.as_dict() + query = {} + if update_mask is not None: + query["update_mask"] = update_mask + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do( + "PATCH", + f"/api/2.0/accounts/{self._api.account_id}/network-connectivity-configs/{network_connectivity_config_id}/private-endpoint-rules/{private_endpoint_rule_id}", + query=query, + body=body, + headers=headers, + ) + return NccAzurePrivateEndpointRule.from_dict(res) + class NotificationDestinationsAPI: """The notification destinations API lets you programmatically manage a workspace's notification @@ -7844,7 +7933,7 @@ def disable_legacy_dbfs(self) -> DisableLegacyDbfsAPI: @property def enable_export_notebook(self) -> EnableExportNotebookAPI: - """Controls whether users can export notebooks and files from the Workspace.""" + """Controls whether users can export notebooks and files from the Workspace UI.""" return self._enable_export_notebook @property diff --git a/databricks/sdk/service/sql.py b/databricks/sdk/service/sql.py index e5d91a8dd..a3746381c 100755 --- a/databricks/sdk/service/sql.py +++ b/databricks/sdk/service/sql.py @@ -61,6 +61,18 @@ def from_dict(cls, d: Dict[str, Any]) -> AccessControl: ) +class Aggregation(Enum): + + AVG = "AVG" + COUNT = "COUNT" + COUNT_DISTINCT = "COUNT_DISTINCT" + MAX = "MAX" + MEDIAN = "MEDIAN" + MIN = "MIN" + STDDEV = "STDDEV" + SUM = "SUM" + + @dataclass class Alert: condition: Optional[AlertCondition] = None @@ -306,6 +318,17 @@ def from_dict(cls, d: Dict[str, Any]) -> AlertConditionThreshold: return cls(value=_from_dict(d, "value", AlertOperandValue)) +class AlertEvaluationState(Enum): + """UNSPECIFIED - default unspecify value for proto enum, do not use it in the code UNKNOWN - alert + not yet evaluated TRIGGERED - alert is triggered OK - alert is not triggered ERROR - alert + evaluation failed""" + + ERROR = "ERROR" + OK = "OK" + TRIGGERED = "TRIGGERED" + UNKNOWN = "UNKNOWN" + + @dataclass class AlertOperandColumn: name: Optional[str] = None @@ -609,6 +632,394 @@ class AlertState(Enum): UNKNOWN = "UNKNOWN" +@dataclass +class AlertV2: + create_time: Optional[str] = None + """The timestamp indicating when the alert was created.""" + + custom_description: Optional[str] = None + """Custom description for the alert. support mustache template.""" + + custom_summary: Optional[str] = None + """Custom summary for the alert. support mustache template.""" + + display_name: Optional[str] = None + """The display name of the alert.""" + + evaluation: Optional[AlertV2Evaluation] = None + + id: Optional[str] = None + """UUID identifying the alert.""" + + lifecycle_state: Optional[LifecycleState] = None + """Indicates whether the query is trashed.""" + + owner_user_name: Optional[str] = None + """The owner's username. This field is set to "Unavailable" if the user has been deleted.""" + + parent_path: Optional[str] = None + """The workspace path of the folder containing the alert. Can only be set on create, and cannot be + updated.""" + + query_text: Optional[str] = None + """Text of the query to be run.""" + + run_as_user_name: Optional[str] = None + """The run as username. This field is set to "Unavailable" if the user has been deleted.""" + + schedule: Optional[CronSchedule] = None + + update_time: Optional[str] = None + """The timestamp indicating when the alert was updated.""" + + warehouse_id: Optional[str] = None + """ID of the SQL warehouse attached to the alert.""" + + def as_dict(self) -> dict: + """Serializes the AlertV2 into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.create_time is not None: + body["create_time"] = self.create_time + if self.custom_description is not None: + body["custom_description"] = self.custom_description + if self.custom_summary is not None: + body["custom_summary"] = self.custom_summary + if self.display_name is not None: + body["display_name"] = self.display_name + if self.evaluation: + body["evaluation"] = self.evaluation.as_dict() + if self.id is not None: + body["id"] = self.id + if self.lifecycle_state is not None: + body["lifecycle_state"] = self.lifecycle_state.value + if self.owner_user_name is not None: + body["owner_user_name"] = self.owner_user_name + if self.parent_path is not None: + body["parent_path"] = self.parent_path + if self.query_text is not None: + body["query_text"] = self.query_text + if self.run_as_user_name is not None: + body["run_as_user_name"] = self.run_as_user_name + if self.schedule: + body["schedule"] = self.schedule.as_dict() + if self.update_time is not None: + body["update_time"] = self.update_time + if self.warehouse_id is not None: + body["warehouse_id"] = self.warehouse_id + return body + + def as_shallow_dict(self) -> dict: + """Serializes the AlertV2 into a shallow dictionary of its immediate attributes.""" + body = {} + if self.create_time is not None: + body["create_time"] = self.create_time + if self.custom_description is not None: + body["custom_description"] = self.custom_description + if self.custom_summary is not None: + body["custom_summary"] = self.custom_summary + if self.display_name is not None: + body["display_name"] = self.display_name + if self.evaluation: + body["evaluation"] = self.evaluation + if self.id is not None: + body["id"] = self.id + if self.lifecycle_state is not None: + body["lifecycle_state"] = self.lifecycle_state + if self.owner_user_name is not None: + body["owner_user_name"] = self.owner_user_name + if self.parent_path is not None: + body["parent_path"] = self.parent_path + if self.query_text is not None: + body["query_text"] = self.query_text + if self.run_as_user_name is not None: + body["run_as_user_name"] = self.run_as_user_name + if self.schedule: + body["schedule"] = self.schedule + if self.update_time is not None: + body["update_time"] = self.update_time + if self.warehouse_id is not None: + body["warehouse_id"] = self.warehouse_id + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> AlertV2: + """Deserializes the AlertV2 from a dictionary.""" + return cls( + create_time=d.get("create_time", None), + custom_description=d.get("custom_description", None), + custom_summary=d.get("custom_summary", None), + display_name=d.get("display_name", None), + evaluation=_from_dict(d, "evaluation", AlertV2Evaluation), + id=d.get("id", None), + lifecycle_state=_enum(d, "lifecycle_state", LifecycleState), + owner_user_name=d.get("owner_user_name", None), + parent_path=d.get("parent_path", None), + query_text=d.get("query_text", None), + run_as_user_name=d.get("run_as_user_name", None), + schedule=_from_dict(d, "schedule", CronSchedule), + update_time=d.get("update_time", None), + warehouse_id=d.get("warehouse_id", None), + ) + + +@dataclass +class AlertV2Evaluation: + comparison_operator: Optional[ComparisonOperator] = None + """Operator used for comparison in alert evaluation.""" + + empty_result_state: Optional[AlertEvaluationState] = None + """Alert state if result is empty.""" + + last_evaluated_at: Optional[str] = None + """Timestamp of the last evaluation.""" + + notification: Optional[AlertV2Notification] = None + """User or Notification Destination to notify when alert is triggered.""" + + source: Optional[AlertV2OperandColumn] = None + """Source column from result to use to evaluate alert""" + + state: Optional[AlertEvaluationState] = None + """Latest state of alert evaluation.""" + + threshold: Optional[AlertV2Operand] = None + """Threshold to user for alert evaluation, can be a column or a value.""" + + def as_dict(self) -> dict: + """Serializes the AlertV2Evaluation into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.comparison_operator is not None: + body["comparison_operator"] = self.comparison_operator.value + if self.empty_result_state is not None: + body["empty_result_state"] = self.empty_result_state.value + if self.last_evaluated_at is not None: + body["last_evaluated_at"] = self.last_evaluated_at + if self.notification: + body["notification"] = self.notification.as_dict() + if self.source: + body["source"] = self.source.as_dict() + if self.state is not None: + body["state"] = self.state.value + if self.threshold: + body["threshold"] = self.threshold.as_dict() + return body + + def as_shallow_dict(self) -> dict: + """Serializes the AlertV2Evaluation into a shallow dictionary of its immediate attributes.""" + body = {} + if self.comparison_operator is not None: + body["comparison_operator"] = self.comparison_operator + if self.empty_result_state is not None: + body["empty_result_state"] = self.empty_result_state + if self.last_evaluated_at is not None: + body["last_evaluated_at"] = self.last_evaluated_at + if self.notification: + body["notification"] = self.notification + if self.source: + body["source"] = self.source + if self.state is not None: + body["state"] = self.state + if self.threshold: + body["threshold"] = self.threshold + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> AlertV2Evaluation: + """Deserializes the AlertV2Evaluation from a dictionary.""" + return cls( + comparison_operator=_enum(d, "comparison_operator", ComparisonOperator), + empty_result_state=_enum(d, "empty_result_state", AlertEvaluationState), + last_evaluated_at=d.get("last_evaluated_at", None), + notification=_from_dict(d, "notification", AlertV2Notification), + source=_from_dict(d, "source", AlertV2OperandColumn), + state=_enum(d, "state", AlertEvaluationState), + threshold=_from_dict(d, "threshold", AlertV2Operand), + ) + + +@dataclass +class AlertV2Notification: + notify_on_ok: Optional[bool] = None + """Whether to notify alert subscribers when alert returns back to normal.""" + + retrigger_seconds: Optional[int] = None + """Number of seconds an alert must wait after being triggered to rearm itself. After rearming, it + can be triggered again. If 0 or not specified, the alert will not be triggered again.""" + + subscriptions: Optional[List[AlertV2Subscription]] = None + + def as_dict(self) -> dict: + """Serializes the AlertV2Notification into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.notify_on_ok is not None: + body["notify_on_ok"] = self.notify_on_ok + if self.retrigger_seconds is not None: + body["retrigger_seconds"] = self.retrigger_seconds + if self.subscriptions: + body["subscriptions"] = [v.as_dict() for v in self.subscriptions] + return body + + def as_shallow_dict(self) -> dict: + """Serializes the AlertV2Notification into a shallow dictionary of its immediate attributes.""" + body = {} + if self.notify_on_ok is not None: + body["notify_on_ok"] = self.notify_on_ok + if self.retrigger_seconds is not None: + body["retrigger_seconds"] = self.retrigger_seconds + if self.subscriptions: + body["subscriptions"] = self.subscriptions + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> AlertV2Notification: + """Deserializes the AlertV2Notification from a dictionary.""" + return cls( + notify_on_ok=d.get("notify_on_ok", None), + retrigger_seconds=d.get("retrigger_seconds", None), + subscriptions=_repeated_dict(d, "subscriptions", AlertV2Subscription), + ) + + +@dataclass +class AlertV2Operand: + column: Optional[AlertV2OperandColumn] = None + + value: Optional[AlertV2OperandValue] = None + + def as_dict(self) -> dict: + """Serializes the AlertV2Operand into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.column: + body["column"] = self.column.as_dict() + if self.value: + body["value"] = self.value.as_dict() + return body + + def as_shallow_dict(self) -> dict: + """Serializes the AlertV2Operand into a shallow dictionary of its immediate attributes.""" + body = {} + if self.column: + body["column"] = self.column + if self.value: + body["value"] = self.value + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> AlertV2Operand: + """Deserializes the AlertV2Operand from a dictionary.""" + return cls( + column=_from_dict(d, "column", AlertV2OperandColumn), value=_from_dict(d, "value", AlertV2OperandValue) + ) + + +@dataclass +class AlertV2OperandColumn: + aggregation: Optional[Aggregation] = None + + display: Optional[str] = None + + name: Optional[str] = None + + def as_dict(self) -> dict: + """Serializes the AlertV2OperandColumn into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.aggregation is not None: + body["aggregation"] = self.aggregation.value + if self.display is not None: + body["display"] = self.display + if self.name is not None: + body["name"] = self.name + return body + + def as_shallow_dict(self) -> dict: + """Serializes the AlertV2OperandColumn into a shallow dictionary of its immediate attributes.""" + body = {} + if self.aggregation is not None: + body["aggregation"] = self.aggregation + if self.display is not None: + body["display"] = self.display + if self.name is not None: + body["name"] = self.name + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> AlertV2OperandColumn: + """Deserializes the AlertV2OperandColumn from a dictionary.""" + return cls( + aggregation=_enum(d, "aggregation", Aggregation), display=d.get("display", None), name=d.get("name", None) + ) + + +@dataclass +class AlertV2OperandValue: + bool_value: Optional[bool] = None + + double_value: Optional[float] = None + + string_value: Optional[str] = None + + def as_dict(self) -> dict: + """Serializes the AlertV2OperandValue into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.bool_value is not None: + body["bool_value"] = self.bool_value + if self.double_value is not None: + body["double_value"] = self.double_value + if self.string_value is not None: + body["string_value"] = self.string_value + return body + + def as_shallow_dict(self) -> dict: + """Serializes the AlertV2OperandValue into a shallow dictionary of its immediate attributes.""" + body = {} + if self.bool_value is not None: + body["bool_value"] = self.bool_value + if self.double_value is not None: + body["double_value"] = self.double_value + if self.string_value is not None: + body["string_value"] = self.string_value + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> AlertV2OperandValue: + """Deserializes the AlertV2OperandValue from a dictionary.""" + return cls( + bool_value=d.get("bool_value", None), + double_value=d.get("double_value", None), + string_value=d.get("string_value", None), + ) + + +@dataclass +class AlertV2Subscription: + destination_id: Optional[str] = None + + user_email: Optional[str] = None + + def as_dict(self) -> dict: + """Serializes the AlertV2Subscription into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.destination_id is not None: + body["destination_id"] = self.destination_id + if self.user_email is not None: + body["user_email"] = self.user_email + return body + + def as_shallow_dict(self) -> dict: + """Serializes the AlertV2Subscription into a shallow dictionary of its immediate attributes.""" + body = {} + if self.destination_id is not None: + body["destination_id"] = self.destination_id + if self.user_email is not None: + body["user_email"] = self.user_email + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> AlertV2Subscription: + """Deserializes the AlertV2Subscription from a dictionary.""" + return cls(destination_id=d.get("destination_id", None), user_email=d.get("user_email", None)) + + @dataclass class BaseChunkInfo: """Describes metadata for a particular chunk, within a result set; this structure is used both @@ -948,6 +1359,18 @@ class ColumnInfoTypeName(Enum): USER_DEFINED_TYPE = "USER_DEFINED_TYPE" +class ComparisonOperator(Enum): + + EQUAL = "EQUAL" + GREATER_THAN = "GREATER_THAN" + GREATER_THAN_OR_EQUAL = "GREATER_THAN_OR_EQUAL" + IS_NOT_NULL = "IS_NOT_NULL" + IS_NULL = "IS_NULL" + LESS_THAN = "LESS_THAN" + LESS_THAN_OR_EQUAL = "LESS_THAN_OR_EQUAL" + NOT_EQUAL = "NOT_EQUAL" + + @dataclass class CreateAlert: name: str @@ -1012,11 +1435,17 @@ def from_dict(cls, d: Dict[str, Any]) -> CreateAlert: class CreateAlertRequest: alert: Optional[CreateAlertRequestAlert] = None + auto_resolve_display_name: Optional[bool] = None + """If true, automatically resolve alert display name conflicts. Otherwise, fail the request if the + alert's display name conflicts with an existing alert's display name.""" + def as_dict(self) -> dict: """Serializes the CreateAlertRequest into a dictionary suitable for use as a JSON request body.""" body = {} if self.alert: body["alert"] = self.alert.as_dict() + if self.auto_resolve_display_name is not None: + body["auto_resolve_display_name"] = self.auto_resolve_display_name return body def as_shallow_dict(self) -> dict: @@ -1024,12 +1453,17 @@ def as_shallow_dict(self) -> dict: body = {} if self.alert: body["alert"] = self.alert + if self.auto_resolve_display_name is not None: + body["auto_resolve_display_name"] = self.auto_resolve_display_name return body @classmethod def from_dict(cls, d: Dict[str, Any]) -> CreateAlertRequest: """Deserializes the CreateAlertRequest from a dictionary.""" - return cls(alert=_from_dict(d, "alert", CreateAlertRequestAlert)) + return cls( + alert=_from_dict(d, "alert", CreateAlertRequestAlert), + auto_resolve_display_name=d.get("auto_resolve_display_name", None), + ) @dataclass @@ -1121,13 +1555,43 @@ def from_dict(cls, d: Dict[str, Any]) -> CreateAlertRequestAlert: ) +@dataclass +class CreateAlertV2Request: + alert: Optional[AlertV2] = None + + def as_dict(self) -> dict: + """Serializes the CreateAlertV2Request into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.alert: + body["alert"] = self.alert.as_dict() + return body + + def as_shallow_dict(self) -> dict: + """Serializes the CreateAlertV2Request into a shallow dictionary of its immediate attributes.""" + body = {} + if self.alert: + body["alert"] = self.alert + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> CreateAlertV2Request: + """Deserializes the CreateAlertV2Request from a dictionary.""" + return cls(alert=_from_dict(d, "alert", AlertV2)) + + @dataclass class CreateQueryRequest: + auto_resolve_display_name: Optional[bool] = None + """If true, automatically resolve query display name conflicts. Otherwise, fail the request if the + query's display name conflicts with an existing query's display name.""" + query: Optional[CreateQueryRequestQuery] = None def as_dict(self) -> dict: """Serializes the CreateQueryRequest into a dictionary suitable for use as a JSON request body.""" body = {} + if self.auto_resolve_display_name is not None: + body["auto_resolve_display_name"] = self.auto_resolve_display_name if self.query: body["query"] = self.query.as_dict() return body @@ -1135,6 +1599,8 @@ def as_dict(self) -> dict: def as_shallow_dict(self) -> dict: """Serializes the CreateQueryRequest into a shallow dictionary of its immediate attributes.""" body = {} + if self.auto_resolve_display_name is not None: + body["auto_resolve_display_name"] = self.auto_resolve_display_name if self.query: body["query"] = self.query return body @@ -1142,7 +1608,10 @@ def as_shallow_dict(self) -> dict: @classmethod def from_dict(cls, d: Dict[str, Any]) -> CreateQueryRequest: """Deserializes the CreateQueryRequest from a dictionary.""" - return cls(query=_from_dict(d, "query", CreateQueryRequestQuery)) + return cls( + auto_resolve_display_name=d.get("auto_resolve_display_name", None), + query=_from_dict(d, "query", CreateQueryRequestQuery), + ) @dataclass @@ -1581,12 +2050,60 @@ def as_shallow_dict(self) -> dict: def from_dict(cls, d: Dict[str, Any]) -> CreateWidget: """Deserializes the CreateWidget from a dictionary.""" return cls( - dashboard_id=d.get("dashboard_id", None), - id=d.get("id", None), - options=_from_dict(d, "options", WidgetOptions), - text=d.get("text", None), - visualization_id=d.get("visualization_id", None), - width=d.get("width", None), + dashboard_id=d.get("dashboard_id", None), + id=d.get("id", None), + options=_from_dict(d, "options", WidgetOptions), + text=d.get("text", None), + visualization_id=d.get("visualization_id", None), + width=d.get("width", None), + ) + + +@dataclass +class CronSchedule: + pause_status: Optional[SchedulePauseStatus] = None + """Indicate whether this schedule is paused or not.""" + + quartz_cron_schedule: Optional[str] = None + """A cron expression using quartz syntax that specifies the schedule for this pipeline. Should use + the quartz format described here: + http://www.quartz-scheduler.org/documentation/quartz-2.1.7/tutorials/tutorial-lesson-06.html""" + + timezone_id: Optional[str] = None + """A Java timezone id. The schedule will be resolved using this timezone. This will be combined + with the quartz_cron_schedule to determine the schedule. See + https://docs.databricks.com/sql/language-manual/sql-ref-syntax-aux-conf-mgmt-set-timezone.html + for details.""" + + def as_dict(self) -> dict: + """Serializes the CronSchedule into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.pause_status is not None: + body["pause_status"] = self.pause_status.value + if self.quartz_cron_schedule is not None: + body["quartz_cron_schedule"] = self.quartz_cron_schedule + if self.timezone_id is not None: + body["timezone_id"] = self.timezone_id + return body + + def as_shallow_dict(self) -> dict: + """Serializes the CronSchedule into a shallow dictionary of its immediate attributes.""" + body = {} + if self.pause_status is not None: + body["pause_status"] = self.pause_status + if self.quartz_cron_schedule is not None: + body["quartz_cron_schedule"] = self.quartz_cron_schedule + if self.timezone_id is not None: + body["timezone_id"] = self.timezone_id + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> CronSchedule: + """Deserializes the CronSchedule from a dictionary.""" + return cls( + pause_status=_enum(d, "pause_status", SchedulePauseStatus), + quartz_cron_schedule=d.get("quartz_cron_schedule", None), + timezone_id=d.get("timezone_id", None), ) @@ -2614,7 +3131,7 @@ class EndpointInfo: Supported values: - Must be unique within an org. - Must be less than 100 characters.""" num_active_sessions: Optional[int] = None - """current number of active sessions for the warehouse""" + """Deprecated. current number of active sessions for the warehouse""" num_clusters: Optional[int] = None """current number of clusters running for the service""" @@ -3419,7 +3936,7 @@ class GetWarehouseResponse: Supported values: - Must be unique within an org. - Must be less than 100 characters.""" num_active_sessions: Optional[int] = None - """current number of active sessions for the warehouse""" + """Deprecated. current number of active sessions for the warehouse""" num_clusters: Optional[int] = None """current number of clusters running for the service""" @@ -4270,6 +4787,160 @@ def from_dict(cls, d: Dict[str, Any]) -> ListAlertsResponseAlert: ) +@dataclass +class ListAlertsV2Response: + next_page_token: Optional[str] = None + + results: Optional[List[ListAlertsV2ResponseAlert]] = None + + def as_dict(self) -> dict: + """Serializes the ListAlertsV2Response into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + if self.results: + body["results"] = [v.as_dict() for v in self.results] + return body + + def as_shallow_dict(self) -> dict: + """Serializes the ListAlertsV2Response into a shallow dictionary of its immediate attributes.""" + body = {} + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + if self.results: + body["results"] = self.results + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> ListAlertsV2Response: + """Deserializes the ListAlertsV2Response from a dictionary.""" + return cls( + next_page_token=d.get("next_page_token", None), + results=_repeated_dict(d, "results", ListAlertsV2ResponseAlert), + ) + + +@dataclass +class ListAlertsV2ResponseAlert: + create_time: Optional[str] = None + """The timestamp indicating when the alert was created.""" + + custom_description: Optional[str] = None + """Custom description for the alert. support mustache template.""" + + custom_summary: Optional[str] = None + """Custom summary for the alert. support mustache template.""" + + display_name: Optional[str] = None + """The display name of the alert.""" + + evaluation: Optional[AlertV2Evaluation] = None + + id: Optional[str] = None + """UUID identifying the alert.""" + + lifecycle_state: Optional[LifecycleState] = None + """Indicates whether the query is trashed.""" + + owner_user_name: Optional[str] = None + """The owner's username. This field is set to "Unavailable" if the user has been deleted.""" + + query_text: Optional[str] = None + """Text of the query to be run.""" + + run_as_user_name: Optional[str] = None + """The run as username. This field is set to "Unavailable" if the user has been deleted.""" + + schedule: Optional[CronSchedule] = None + + update_time: Optional[str] = None + """The timestamp indicating when the alert was updated.""" + + warehouse_id: Optional[str] = None + """ID of the SQL warehouse attached to the alert.""" + + def as_dict(self) -> dict: + """Serializes the ListAlertsV2ResponseAlert into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.create_time is not None: + body["create_time"] = self.create_time + if self.custom_description is not None: + body["custom_description"] = self.custom_description + if self.custom_summary is not None: + body["custom_summary"] = self.custom_summary + if self.display_name is not None: + body["display_name"] = self.display_name + if self.evaluation: + body["evaluation"] = self.evaluation.as_dict() + if self.id is not None: + body["id"] = self.id + if self.lifecycle_state is not None: + body["lifecycle_state"] = self.lifecycle_state.value + if self.owner_user_name is not None: + body["owner_user_name"] = self.owner_user_name + if self.query_text is not None: + body["query_text"] = self.query_text + if self.run_as_user_name is not None: + body["run_as_user_name"] = self.run_as_user_name + if self.schedule: + body["schedule"] = self.schedule.as_dict() + if self.update_time is not None: + body["update_time"] = self.update_time + if self.warehouse_id is not None: + body["warehouse_id"] = self.warehouse_id + return body + + def as_shallow_dict(self) -> dict: + """Serializes the ListAlertsV2ResponseAlert into a shallow dictionary of its immediate attributes.""" + body = {} + if self.create_time is not None: + body["create_time"] = self.create_time + if self.custom_description is not None: + body["custom_description"] = self.custom_description + if self.custom_summary is not None: + body["custom_summary"] = self.custom_summary + if self.display_name is not None: + body["display_name"] = self.display_name + if self.evaluation: + body["evaluation"] = self.evaluation + if self.id is not None: + body["id"] = self.id + if self.lifecycle_state is not None: + body["lifecycle_state"] = self.lifecycle_state + if self.owner_user_name is not None: + body["owner_user_name"] = self.owner_user_name + if self.query_text is not None: + body["query_text"] = self.query_text + if self.run_as_user_name is not None: + body["run_as_user_name"] = self.run_as_user_name + if self.schedule: + body["schedule"] = self.schedule + if self.update_time is not None: + body["update_time"] = self.update_time + if self.warehouse_id is not None: + body["warehouse_id"] = self.warehouse_id + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> ListAlertsV2ResponseAlert: + """Deserializes the ListAlertsV2ResponseAlert from a dictionary.""" + return cls( + create_time=d.get("create_time", None), + custom_description=d.get("custom_description", None), + custom_summary=d.get("custom_summary", None), + display_name=d.get("display_name", None), + evaluation=_from_dict(d, "evaluation", AlertV2Evaluation), + id=d.get("id", None), + lifecycle_state=_enum(d, "lifecycle_state", LifecycleState), + owner_user_name=d.get("owner_user_name", None), + query_text=d.get("query_text", None), + run_as_user_name=d.get("run_as_user_name", None), + schedule=_from_dict(d, "schedule", CronSchedule), + update_time=d.get("update_time", None), + warehouse_id=d.get("warehouse_id", None), + ) + + class ListOrder(Enum): CREATED_AT = "created_at" @@ -6185,6 +6856,12 @@ class RunAsRole(Enum): VIEWER = "viewer" +class SchedulePauseStatus(Enum): + + PAUSED = "PAUSED" + UNPAUSED = "UNPAUSED" + + @dataclass class ServiceError: error_code: Optional[ServiceErrorCode] = None @@ -6993,6 +7670,52 @@ def from_dict(cls, d: Dict[str, Any]) -> UpdateAlertRequestAlert: ) +@dataclass +class UpdateAlertV2Request: + update_mask: str + """The field mask must be a single string, with multiple fields separated by commas (no spaces). + The field path is relative to the resource object, using a dot (`.`) to navigate sub-fields + (e.g., `author.given_name`). Specification of elements in sequence or map fields is not allowed, + as only the entire collection field can be specified. Field names must exactly match the + resource field names. + + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the + fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the + API changes in the future.""" + + alert: Optional[AlertV2] = None + + id: Optional[str] = None + """UUID identifying the alert.""" + + def as_dict(self) -> dict: + """Serializes the UpdateAlertV2Request into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.alert: + body["alert"] = self.alert.as_dict() + if self.id is not None: + body["id"] = self.id + if self.update_mask is not None: + body["update_mask"] = self.update_mask + return body + + def as_shallow_dict(self) -> dict: + """Serializes the UpdateAlertV2Request into a shallow dictionary of its immediate attributes.""" + body = {} + if self.alert: + body["alert"] = self.alert + if self.id is not None: + body["id"] = self.id + if self.update_mask is not None: + body["update_mask"] = self.update_mask + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> UpdateAlertV2Request: + """Deserializes the UpdateAlertV2Request from a dictionary.""" + return cls(alert=_from_dict(d, "alert", AlertV2), id=d.get("id", None), update_mask=d.get("update_mask", None)) + + @dataclass class UpdateQueryRequest: update_mask: str @@ -7902,18 +8625,25 @@ class AlertsAPI: def __init__(self, api_client): self._api = api_client - def create(self, *, alert: Optional[CreateAlertRequestAlert] = None) -> Alert: + def create( + self, *, alert: Optional[CreateAlertRequestAlert] = None, auto_resolve_display_name: Optional[bool] = None + ) -> Alert: """Create an alert. Creates an alert. :param alert: :class:`CreateAlertRequestAlert` (optional) + :param auto_resolve_display_name: bool (optional) + If true, automatically resolve alert display name conflicts. Otherwise, fail the request if the + alert's display name conflicts with an existing alert's display name. :returns: :class:`Alert` """ body = {} if alert is not None: body["alert"] = alert.as_dict() + if auto_resolve_display_name is not None: + body["auto_resolve_display_name"] = auto_resolve_display_name headers = { "Accept": "application/json", "Content-Type": "application/json", @@ -8193,6 +8923,133 @@ def update(self, alert_id: str, name: str, options: AlertOptions, query_id: str, self._api.do("PUT", f"/api/2.0/preview/sql/alerts/{alert_id}", body=body, headers=headers) +class AlertsV2API: + """TODO: Add description""" + + def __init__(self, api_client): + self._api = api_client + + def create_alert(self, *, alert: Optional[AlertV2] = None) -> AlertV2: + """Create an alert. + + Create Alert + + :param alert: :class:`AlertV2` (optional) + + :returns: :class:`AlertV2` + """ + body = {} + if alert is not None: + body["alert"] = alert.as_dict() + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("POST", "/api/2.0/alerts", body=body, headers=headers) + return AlertV2.from_dict(res) + + def get_alert(self, id: str) -> AlertV2: + """Get an alert. + + Gets an alert. + + :param id: str + + :returns: :class:`AlertV2` + """ + + headers = { + "Accept": "application/json", + } + + res = self._api.do("GET", f"/api/2.0/alerts/{id}", headers=headers) + return AlertV2.from_dict(res) + + def list_alerts( + self, *, page_size: Optional[int] = None, page_token: Optional[str] = None + ) -> Iterator[ListAlertsV2ResponseAlert]: + """List alerts. + + Gets a list of alerts accessible to the user, ordered by creation time. + + :param page_size: int (optional) + :param page_token: str (optional) + + :returns: Iterator over :class:`ListAlertsV2ResponseAlert` + """ + + query = {} + if page_size is not None: + query["page_size"] = page_size + if page_token is not None: + query["page_token"] = page_token + headers = { + "Accept": "application/json", + } + + while True: + json = self._api.do("GET", "/api/2.0/alerts", query=query, headers=headers) + if "results" in json: + for v in json["results"]: + yield ListAlertsV2ResponseAlert.from_dict(v) + if "next_page_token" not in json or not json["next_page_token"]: + return + query["page_token"] = json["next_page_token"] + + def trash_alert(self, id: str): + """Delete an alert. + + Moves an alert to the trash. Trashed alerts immediately disappear from list views, and can no longer + trigger. You can restore a trashed alert through the UI. A trashed alert is permanently deleted after + 30 days. + + :param id: str + + + """ + + headers = { + "Accept": "application/json", + } + + self._api.do("DELETE", f"/api/2.0/alerts/{id}", headers=headers) + + def update_alert(self, id: str, update_mask: str, *, alert: Optional[AlertV2] = None) -> AlertV2: + """Update an alert. + + Update alert + + :param id: str + UUID identifying the alert. + :param update_mask: str + The field mask must be a single string, with multiple fields separated by commas (no spaces). The + field path is relative to the resource object, using a dot (`.`) to navigate sub-fields (e.g., + `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only + the entire collection field can be specified. Field names must exactly match the resource field + names. + + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the + fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API + changes in the future. + :param alert: :class:`AlertV2` (optional) + + :returns: :class:`AlertV2` + """ + body = {} + if alert is not None: + body["alert"] = alert.as_dict() + if update_mask is not None: + body["update_mask"] = update_mask + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("PATCH", f"/api/2.0/alerts/{id}", body=body, headers=headers) + return AlertV2.from_dict(res) + + class DashboardWidgetsAPI: """This is an evolving API that facilitates the addition and removal of widgets from existing dashboards within the Databricks Workspace. Data structures may change over time.""" @@ -8678,16 +9535,23 @@ class QueriesAPI: def __init__(self, api_client): self._api = api_client - def create(self, *, query: Optional[CreateQueryRequestQuery] = None) -> Query: + def create( + self, *, auto_resolve_display_name: Optional[bool] = None, query: Optional[CreateQueryRequestQuery] = None + ) -> Query: """Create a query. Creates a query. + :param auto_resolve_display_name: bool (optional) + If true, automatically resolve query display name conflicts. Otherwise, fail the request if the + query's display name conflicts with an existing query's display name. :param query: :class:`CreateQueryRequestQuery` (optional) :returns: :class:`Query` """ body = {} + if auto_resolve_display_name is not None: + body["auto_resolve_display_name"] = auto_resolve_display_name if query is not None: body["query"] = query.as_dict() headers = { diff --git a/databricks/sdk/service/vectorsearch.py b/databricks/sdk/service/vectorsearch.py index e75578374..e40a64bf2 100755 --- a/databricks/sdk/service/vectorsearch.py +++ b/databricks/sdk/service/vectorsearch.py @@ -47,14 +47,19 @@ def from_dict(cls, d: Dict[str, Any]) -> ColumnInfo: @dataclass class CreateEndpoint: name: str - """Name of endpoint""" + """Name of the vector search endpoint""" endpoint_type: EndpointType - """Type of endpoint.""" + """Type of endpoint""" + + budget_policy_id: Optional[str] = None + """The budget policy id to be applied""" def as_dict(self) -> dict: """Serializes the CreateEndpoint into a dictionary suitable for use as a JSON request body.""" body = {} + if self.budget_policy_id is not None: + body["budget_policy_id"] = self.budget_policy_id if self.endpoint_type is not None: body["endpoint_type"] = self.endpoint_type.value if self.name is not None: @@ -64,6 +69,8 @@ def as_dict(self) -> dict: def as_shallow_dict(self) -> dict: """Serializes the CreateEndpoint into a shallow dictionary of its immediate attributes.""" body = {} + if self.budget_policy_id is not None: + body["budget_policy_id"] = self.budget_policy_id if self.endpoint_type is not None: body["endpoint_type"] = self.endpoint_type if self.name is not None: @@ -73,7 +80,11 @@ def as_shallow_dict(self) -> dict: @classmethod def from_dict(cls, d: Dict[str, Any]) -> CreateEndpoint: """Deserializes the CreateEndpoint from a dictionary.""" - return cls(endpoint_type=_enum(d, "endpoint_type", EndpointType), name=d.get("name", None)) + return cls( + budget_policy_id=d.get("budget_policy_id", None), + endpoint_type=_enum(d, "endpoint_type", EndpointType), + name=d.get("name", None), + ) @dataclass @@ -88,12 +99,11 @@ class CreateVectorIndexRequest: """Primary key of the index""" index_type: VectorIndexType - """There are 2 types of Vector Search indexes: - - - `DELTA_SYNC`: An index that automatically syncs with a source Delta Table, automatically and - incrementally updating the index as the underlying data in the Delta Table changes. - - `DIRECT_ACCESS`: An index that supports direct read and write of vectors and metadata through - our REST and SDK APIs. With this model, the user manages index updates.""" + """There are 2 types of Vector Search indexes: - `DELTA_SYNC`: An index that automatically syncs + with a source Delta Table, automatically and incrementally updating the index as the underlying + data in the Delta Table changes. - `DIRECT_ACCESS`: An index that supports direct read and write + of vectors and metadata through our REST and SDK APIs. With this model, the user manages index + updates.""" delta_sync_index_spec: Optional[DeltaSyncVectorIndexSpecRequest] = None """Specification for Delta Sync Index. Required if `index_type` is `DELTA_SYNC`.""" @@ -149,33 +159,39 @@ def from_dict(cls, d: Dict[str, Any]) -> CreateVectorIndexRequest: @dataclass -class CreateVectorIndexResponse: - vector_index: Optional[VectorIndex] = None +class CustomTag: + key: str + """Key field for a vector search endpoint tag.""" + + value: Optional[str] = None + """[Optional] Value field for a vector search endpoint tag.""" def as_dict(self) -> dict: - """Serializes the CreateVectorIndexResponse into a dictionary suitable for use as a JSON request body.""" + """Serializes the CustomTag into a dictionary suitable for use as a JSON request body.""" body = {} - if self.vector_index: - body["vector_index"] = self.vector_index.as_dict() + if self.key is not None: + body["key"] = self.key + if self.value is not None: + body["value"] = self.value return body def as_shallow_dict(self) -> dict: - """Serializes the CreateVectorIndexResponse into a shallow dictionary of its immediate attributes.""" + """Serializes the CustomTag into a shallow dictionary of its immediate attributes.""" body = {} - if self.vector_index: - body["vector_index"] = self.vector_index + if self.key is not None: + body["key"] = self.key + if self.value is not None: + body["value"] = self.value return body @classmethod - def from_dict(cls, d: Dict[str, Any]) -> CreateVectorIndexResponse: - """Deserializes the CreateVectorIndexResponse from a dictionary.""" - return cls(vector_index=_from_dict(d, "vector_index", VectorIndex)) + def from_dict(cls, d: Dict[str, Any]) -> CustomTag: + """Deserializes the CustomTag from a dictionary.""" + return cls(key=d.get("key", None), value=d.get("value", None)) @dataclass class DeleteDataResult: - """Result of the upsert or delete operation.""" - failed_primary_keys: Optional[List[str]] = None """List of primary keys for rows that failed to process.""" @@ -209,51 +225,14 @@ def from_dict(cls, d: Dict[str, Any]) -> DeleteDataResult: class DeleteDataStatus(Enum): - """Status of the delete operation.""" FAILURE = "FAILURE" PARTIAL_SUCCESS = "PARTIAL_SUCCESS" SUCCESS = "SUCCESS" -@dataclass -class DeleteDataVectorIndexRequest: - """Request payload for deleting data from a vector index.""" - - primary_keys: List[str] - """List of primary keys for the data to be deleted.""" - - index_name: Optional[str] = None - """Name of the vector index where data is to be deleted. Must be a Direct Vector Access Index.""" - - def as_dict(self) -> dict: - """Serializes the DeleteDataVectorIndexRequest into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.index_name is not None: - body["index_name"] = self.index_name - if self.primary_keys: - body["primary_keys"] = [v for v in self.primary_keys] - return body - - def as_shallow_dict(self) -> dict: - """Serializes the DeleteDataVectorIndexRequest into a shallow dictionary of its immediate attributes.""" - body = {} - if self.index_name is not None: - body["index_name"] = self.index_name - if self.primary_keys: - body["primary_keys"] = self.primary_keys - return body - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> DeleteDataVectorIndexRequest: - """Deserializes the DeleteDataVectorIndexRequest from a dictionary.""" - return cls(index_name=d.get("index_name", None), primary_keys=d.get("primary_keys", None)) - - @dataclass class DeleteDataVectorIndexResponse: - """Response to a delete data vector index request.""" - result: Optional[DeleteDataResult] = None """Result of the upsert or delete operation.""" @@ -331,20 +310,17 @@ class DeltaSyncVectorIndexSpecRequest: """The columns that contain the embedding source.""" embedding_vector_columns: Optional[List[EmbeddingVectorColumn]] = None - """The columns that contain the embedding vectors. The format should be array[double].""" + """The columns that contain the embedding vectors.""" embedding_writeback_table: Optional[str] = None - """[Optional] Automatically sync the vector index contents and computed embeddings to the specified - Delta table. The only supported table name is the index name with the suffix `_writeback_table`.""" + """[Optional] Name of the Delta table to sync the vector index contents and computed embeddings to.""" pipeline_type: Optional[PipelineType] = None - """Pipeline execution mode. - - - `TRIGGERED`: If the pipeline uses the triggered execution mode, the system stops processing - after successfully refreshing the source table in the pipeline once, ensuring the table is - updated based on the data available when the update started. - `CONTINUOUS`: If the pipeline - uses continuous execution, the pipeline processes new data as it arrives in the source table to - keep vector index fresh.""" + """Pipeline execution mode. - `TRIGGERED`: If the pipeline uses the triggered execution mode, the + system stops processing after successfully refreshing the source table in the pipeline once, + ensuring the table is updated based on the data available when the update started. - + `CONTINUOUS`: If the pipeline uses continuous execution, the pipeline processes new data as it + arrives in the source table to keep vector index fresh.""" source_table: Optional[str] = None """The name of the source table.""" @@ -411,13 +387,11 @@ class DeltaSyncVectorIndexSpecResponse: """The ID of the pipeline that is used to sync the index.""" pipeline_type: Optional[PipelineType] = None - """Pipeline execution mode. - - - `TRIGGERED`: If the pipeline uses the triggered execution mode, the system stops processing - after successfully refreshing the source table in the pipeline once, ensuring the table is - updated based on the data available when the update started. - `CONTINUOUS`: If the pipeline - uses continuous execution, the pipeline processes new data as it arrives in the source table to - keep vector index fresh.""" + """Pipeline execution mode. - `TRIGGERED`: If the pipeline uses the triggered execution mode, the + system stops processing after successfully refreshing the source table in the pipeline once, + ensuring the table is updated based on the data available when the update started. - + `CONTINUOUS`: If the pipeline uses continuous execution, the pipeline processes new data as it + arrives in the source table to keep vector index fresh.""" source_table: Optional[str] = None """The name of the source table.""" @@ -472,17 +446,15 @@ def from_dict(cls, d: Dict[str, Any]) -> DeltaSyncVectorIndexSpecResponse: @dataclass class DirectAccessVectorIndexSpec: embedding_source_columns: Optional[List[EmbeddingSourceColumn]] = None - """Contains the optional model endpoint to use during query time.""" + """The columns that contain the embedding source. The format should be array[double].""" embedding_vector_columns: Optional[List[EmbeddingVectorColumn]] = None + """The columns that contain the embedding vectors. The format should be array[double].""" schema_json: Optional[str] = None - """The schema of the index in JSON format. - - Supported types are `integer`, `long`, `float`, `double`, `boolean`, `string`, `date`, - `timestamp`. - - Supported types for vector column: `array`, `array`,`.""" + """The schema of the index in JSON format. Supported types are `integer`, `long`, `float`, + `double`, `boolean`, `string`, `date`, `timestamp`. Supported types for vector column: + `array`, `array`,`.""" def as_dict(self) -> dict: """Serializes the DirectAccessVectorIndexSpec into a dictionary suitable for use as a JSON request body.""" @@ -588,11 +560,17 @@ class EndpointInfo: creator: Optional[str] = None """Creator of the endpoint""" + custom_tags: Optional[List[CustomTag]] = None + """The custom tags assigned to the endpoint""" + + effective_budget_policy_id: Optional[str] = None + """The budget policy id applied to the endpoint""" + endpoint_status: Optional[EndpointStatus] = None """Current status of the endpoint""" endpoint_type: Optional[EndpointType] = None - """Type of endpoint.""" + """Type of endpoint""" id: Optional[str] = None """Unique identifier of the endpoint""" @@ -604,7 +582,7 @@ class EndpointInfo: """User who last updated the endpoint""" name: Optional[str] = None - """Name of endpoint""" + """Name of the vector search endpoint""" num_indexes: Optional[int] = None """Number of indexes on the endpoint""" @@ -616,6 +594,10 @@ def as_dict(self) -> dict: body["creation_timestamp"] = self.creation_timestamp if self.creator is not None: body["creator"] = self.creator + if self.custom_tags: + body["custom_tags"] = [v.as_dict() for v in self.custom_tags] + if self.effective_budget_policy_id is not None: + body["effective_budget_policy_id"] = self.effective_budget_policy_id if self.endpoint_status: body["endpoint_status"] = self.endpoint_status.as_dict() if self.endpoint_type is not None: @@ -639,6 +621,10 @@ def as_shallow_dict(self) -> dict: body["creation_timestamp"] = self.creation_timestamp if self.creator is not None: body["creator"] = self.creator + if self.custom_tags: + body["custom_tags"] = self.custom_tags + if self.effective_budget_policy_id is not None: + body["effective_budget_policy_id"] = self.effective_budget_policy_id if self.endpoint_status: body["endpoint_status"] = self.endpoint_status if self.endpoint_type is not None: @@ -661,6 +647,8 @@ def from_dict(cls, d: Dict[str, Any]) -> EndpointInfo: return cls( creation_timestamp=d.get("creation_timestamp", None), creator=d.get("creator", None), + custom_tags=_repeated_dict(d, "custom_tags", CustomTag), + effective_budget_policy_id=d.get("effective_budget_policy_id", None), endpoint_status=_from_dict(d, "endpoint_status", EndpointStatus), endpoint_type=_enum(d, "endpoint_type", EndpointType), id=d.get("id", None), @@ -756,7 +744,14 @@ def from_dict(cls, d: Dict[str, Any]) -> ListEndpointResponse: @dataclass class ListValue: + """copied from proto3 / Google Well Known Types, source: + https://github.com/protocolbuffers/protobuf/blob/450d24ca820750c5db5112a6f0b0c2efb9758021/src/google/protobuf/struct.proto + `ListValue` is a wrapper around a repeated field of values. + + The JSON representation for `ListValue` is JSON array.""" + values: Optional[List[Value]] = None + """Repeated field of dynamically typed values.""" def as_dict(self) -> dict: """Serializes the ListValue into a dictionary suitable for use as a JSON request body.""" @@ -856,12 +851,11 @@ class MiniVectorIndex: """Name of the endpoint associated with the index""" index_type: Optional[VectorIndexType] = None - """There are 2 types of Vector Search indexes: - - - `DELTA_SYNC`: An index that automatically syncs with a source Delta Table, automatically and - incrementally updating the index as the underlying data in the Delta Table changes. - - `DIRECT_ACCESS`: An index that supports direct read and write of vectors and metadata through - our REST and SDK APIs. With this model, the user manages index updates.""" + """There are 2 types of Vector Search indexes: - `DELTA_SYNC`: An index that automatically syncs + with a source Delta Table, automatically and incrementally updating the index as the underlying + data in the Delta Table changes. - `DIRECT_ACCESS`: An index that supports direct read and write + of vectors and metadata through our REST and SDK APIs. With this model, the user manages index + updates.""" name: Optional[str] = None """Name of the index""" @@ -911,14 +905,69 @@ def from_dict(cls, d: Dict[str, Any]) -> MiniVectorIndex: ) -class PipelineType(Enum): - """Pipeline execution mode. +@dataclass +class PatchEndpointBudgetPolicyRequest: + budget_policy_id: str + """The budget policy id to be applied""" + + endpoint_name: Optional[str] = None + """Name of the vector search endpoint""" + + def as_dict(self) -> dict: + """Serializes the PatchEndpointBudgetPolicyRequest into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.budget_policy_id is not None: + body["budget_policy_id"] = self.budget_policy_id + if self.endpoint_name is not None: + body["endpoint_name"] = self.endpoint_name + return body + + def as_shallow_dict(self) -> dict: + """Serializes the PatchEndpointBudgetPolicyRequest into a shallow dictionary of its immediate attributes.""" + body = {} + if self.budget_policy_id is not None: + body["budget_policy_id"] = self.budget_policy_id + if self.endpoint_name is not None: + body["endpoint_name"] = self.endpoint_name + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> PatchEndpointBudgetPolicyRequest: + """Deserializes the PatchEndpointBudgetPolicyRequest from a dictionary.""" + return cls(budget_policy_id=d.get("budget_policy_id", None), endpoint_name=d.get("endpoint_name", None)) + - - `TRIGGERED`: If the pipeline uses the triggered execution mode, the system stops processing - after successfully refreshing the source table in the pipeline once, ensuring the table is - updated based on the data available when the update started. - `CONTINUOUS`: If the pipeline - uses continuous execution, the pipeline processes new data as it arrives in the source table to - keep vector index fresh.""" +@dataclass +class PatchEndpointBudgetPolicyResponse: + effective_budget_policy_id: Optional[str] = None + """The budget policy applied to the vector search endpoint.""" + + def as_dict(self) -> dict: + """Serializes the PatchEndpointBudgetPolicyResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.effective_budget_policy_id is not None: + body["effective_budget_policy_id"] = self.effective_budget_policy_id + return body + + def as_shallow_dict(self) -> dict: + """Serializes the PatchEndpointBudgetPolicyResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.effective_budget_policy_id is not None: + body["effective_budget_policy_id"] = self.effective_budget_policy_id + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> PatchEndpointBudgetPolicyResponse: + """Deserializes the PatchEndpointBudgetPolicyResponse from a dictionary.""" + return cls(effective_budget_policy_id=d.get("effective_budget_policy_id", None)) + + +class PipelineType(Enum): + """Pipeline execution mode. - `TRIGGERED`: If the pipeline uses the triggered execution mode, the + system stops processing after successfully refreshing the source table in the pipeline once, + ensuring the table is updated based on the data available when the update started. - + `CONTINUOUS`: If the pipeline uses continuous execution, the pipeline processes new data as it + arrives in the source table to keep vector index fresh.""" CONTINUOUS = "CONTINUOUS" TRIGGERED = "TRIGGERED" @@ -980,9 +1029,11 @@ class QueryVectorIndexRequest: filters_json: Optional[str] = None """JSON string representing query filters. - Example filters: - `{"id <": 5}`: Filter for id less than 5. - `{"id >": 5}`: Filter for id - greater than 5. - `{"id <=": 5}`: Filter for id less than equal to 5. - `{"id >=": 5}`: Filter - for id greater than equal to 5. - `{"id": 5}`: Filter for id equal to 5.""" + Example filters: + + - `{"id <": 5}`: Filter for id less than 5. - `{"id >": 5}`: Filter for id greater than 5. - + `{"id <=": 5}`: Filter for id less than equal to 5. - `{"id >=": 5}`: Filter for id greater than + equal to 5. - `{"id": 5}`: Filter for id equal to 5.""" index_name: Optional[str] = None """Name of the vector index to query.""" @@ -1114,7 +1165,7 @@ def from_dict(cls, d: Dict[str, Any]) -> QueryVectorIndexResponse: class ResultData: """Data returned in the query result.""" - data_array: Optional[List[List[str]]] = None + data_array: Optional[List[ListValue]] = None """Data rows returned in the query.""" row_count: Optional[int] = None @@ -1124,7 +1175,7 @@ def as_dict(self) -> dict: """Serializes the ResultData into a dictionary suitable for use as a JSON request body.""" body = {} if self.data_array: - body["data_array"] = [v for v in self.data_array] + body["data_array"] = [v.as_dict() for v in self.data_array] if self.row_count is not None: body["row_count"] = self.row_count return body @@ -1141,7 +1192,7 @@ def as_shallow_dict(self) -> dict: @classmethod def from_dict(cls, d: Dict[str, Any]) -> ResultData: """Deserializes the ResultData from a dictionary.""" - return cls(data_array=d.get("data_array", None), row_count=d.get("row_count", None)) + return cls(data_array=_repeated_dict(d, "data_array", ListValue), row_count=d.get("row_count", None)) @dataclass @@ -1180,8 +1231,6 @@ def from_dict(cls, d: Dict[str, Any]) -> ResultManifest: @dataclass class ScanVectorIndexRequest: - """Request payload for scanning data from a vector index.""" - index_name: Optional[str] = None """Name of the vector index to scan.""" @@ -1259,6 +1308,15 @@ def from_dict(cls, d: Dict[str, Any]) -> ScanVectorIndexResponse: @dataclass class Struct: + """copied from proto3 / Google Well Known Types, source: + https://github.com/protocolbuffers/protobuf/blob/450d24ca820750c5db5112a6f0b0c2efb9758021/src/google/protobuf/struct.proto + `Struct` represents a structured data value, consisting of fields which map to dynamically typed + values. In some languages, `Struct` might be supported by a native representation. For example, + in scripting languages like JS a struct is represented as an object. The details of that + representation are described together with the proto support for the language. + + The JSON representation for `Struct` is JSON object.""" + fields: Optional[List[MapStringValueEntry]] = None """Data entry, corresponding to a row in a vector index.""" @@ -1301,9 +1359,71 @@ def from_dict(cls, d: Dict[str, Any]) -> SyncIndexResponse: @dataclass -class UpsertDataResult: - """Result of the upsert or delete operation.""" +class UpdateEndpointCustomTagsRequest: + custom_tags: List[CustomTag] + """The new custom tags for the vector search endpoint""" + + endpoint_name: Optional[str] = None + """Name of the vector search endpoint""" + + def as_dict(self) -> dict: + """Serializes the UpdateEndpointCustomTagsRequest into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.custom_tags: + body["custom_tags"] = [v.as_dict() for v in self.custom_tags] + if self.endpoint_name is not None: + body["endpoint_name"] = self.endpoint_name + return body + + def as_shallow_dict(self) -> dict: + """Serializes the UpdateEndpointCustomTagsRequest into a shallow dictionary of its immediate attributes.""" + body = {} + if self.custom_tags: + body["custom_tags"] = self.custom_tags + if self.endpoint_name is not None: + body["endpoint_name"] = self.endpoint_name + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> UpdateEndpointCustomTagsRequest: + """Deserializes the UpdateEndpointCustomTagsRequest from a dictionary.""" + return cls(custom_tags=_repeated_dict(d, "custom_tags", CustomTag), endpoint_name=d.get("endpoint_name", None)) + + +@dataclass +class UpdateEndpointCustomTagsResponse: + custom_tags: Optional[List[CustomTag]] = None + """All the custom tags that are applied to the vector search endpoint.""" + + name: Optional[str] = None + """The name of the vector search endpoint whose custom tags were updated.""" + + def as_dict(self) -> dict: + """Serializes the UpdateEndpointCustomTagsResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.custom_tags: + body["custom_tags"] = [v.as_dict() for v in self.custom_tags] + if self.name is not None: + body["name"] = self.name + return body + + def as_shallow_dict(self) -> dict: + """Serializes the UpdateEndpointCustomTagsResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.custom_tags: + body["custom_tags"] = self.custom_tags + if self.name is not None: + body["name"] = self.name + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> UpdateEndpointCustomTagsResponse: + """Deserializes the UpdateEndpointCustomTagsResponse from a dictionary.""" + return cls(custom_tags=_repeated_dict(d, "custom_tags", CustomTag), name=d.get("name", None)) + +@dataclass +class UpsertDataResult: failed_primary_keys: Optional[List[str]] = None """List of primary keys for rows that failed to process.""" @@ -1337,7 +1457,6 @@ def from_dict(cls, d: Dict[str, Any]) -> UpsertDataResult: class UpsertDataStatus(Enum): - """Status of the upsert operation.""" FAILURE = "FAILURE" PARTIAL_SUCCESS = "PARTIAL_SUCCESS" @@ -1346,8 +1465,6 @@ class UpsertDataStatus(Enum): @dataclass class UpsertDataVectorIndexRequest: - """Request payload for upserting data into a vector index.""" - inputs_json: str """JSON string representing the data to be upserted.""" @@ -1380,8 +1497,6 @@ def from_dict(cls, d: Dict[str, Any]) -> UpsertDataVectorIndexRequest: @dataclass class UpsertDataVectorIndexResponse: - """Response to an upsert data vector index request.""" - result: Optional[UpsertDataResult] = None """Result of the upsert or delete operation.""" @@ -1417,14 +1532,25 @@ class Value: bool_value: Optional[bool] = None list_value: Optional[ListValue] = None - - null_value: Optional[str] = None + """copied from proto3 / Google Well Known Types, source: + https://github.com/protocolbuffers/protobuf/blob/450d24ca820750c5db5112a6f0b0c2efb9758021/src/google/protobuf/struct.proto + `ListValue` is a wrapper around a repeated field of values. + + The JSON representation for `ListValue` is JSON array.""" number_value: Optional[float] = None string_value: Optional[str] = None struct_value: Optional[Struct] = None + """copied from proto3 / Google Well Known Types, source: + https://github.com/protocolbuffers/protobuf/blob/450d24ca820750c5db5112a6f0b0c2efb9758021/src/google/protobuf/struct.proto + `Struct` represents a structured data value, consisting of fields which map to dynamically typed + values. In some languages, `Struct` might be supported by a native representation. For example, + in scripting languages like JS a struct is represented as an object. The details of that + representation are described together with the proto support for the language. + + The JSON representation for `Struct` is JSON object.""" def as_dict(self) -> dict: """Serializes the Value into a dictionary suitable for use as a JSON request body.""" @@ -1433,8 +1559,6 @@ def as_dict(self) -> dict: body["bool_value"] = self.bool_value if self.list_value: body["list_value"] = self.list_value.as_dict() - if self.null_value is not None: - body["null_value"] = self.null_value if self.number_value is not None: body["number_value"] = self.number_value if self.string_value is not None: @@ -1450,8 +1574,6 @@ def as_shallow_dict(self) -> dict: body["bool_value"] = self.bool_value if self.list_value: body["list_value"] = self.list_value - if self.null_value is not None: - body["null_value"] = self.null_value if self.number_value is not None: body["number_value"] = self.number_value if self.string_value is not None: @@ -1466,7 +1588,6 @@ def from_dict(cls, d: Dict[str, Any]) -> Value: return cls( bool_value=d.get("bool_value", None), list_value=_from_dict(d, "list_value", ListValue), - null_value=d.get("null_value", None), number_value=d.get("number_value", None), string_value=d.get("string_value", None), struct_value=_from_dict(d, "struct_value", Struct), @@ -1486,12 +1607,11 @@ class VectorIndex: """Name of the endpoint associated with the index""" index_type: Optional[VectorIndexType] = None - """There are 2 types of Vector Search indexes: - - - `DELTA_SYNC`: An index that automatically syncs with a source Delta Table, automatically and - incrementally updating the index as the underlying data in the Delta Table changes. - - `DIRECT_ACCESS`: An index that supports direct read and write of vectors and metadata through - our REST and SDK APIs. With this model, the user manages index updates.""" + """There are 2 types of Vector Search indexes: - `DELTA_SYNC`: An index that automatically syncs + with a source Delta Table, automatically and incrementally updating the index as the underlying + data in the Delta Table changes. - `DIRECT_ACCESS`: An index that supports direct read and write + of vectors and metadata through our REST and SDK APIs. With this model, the user manages index + updates.""" name: Optional[str] = None """Name of the index""" @@ -1610,12 +1730,11 @@ def from_dict(cls, d: Dict[str, Any]) -> VectorIndexStatus: class VectorIndexType(Enum): - """There are 2 types of Vector Search indexes: - - - `DELTA_SYNC`: An index that automatically syncs with a source Delta Table, automatically and - incrementally updating the index as the underlying data in the Delta Table changes. - - `DIRECT_ACCESS`: An index that supports direct read and write of vectors and metadata through - our REST and SDK APIs. With this model, the user manages index updates.""" + """There are 2 types of Vector Search indexes: - `DELTA_SYNC`: An index that automatically syncs + with a source Delta Table, automatically and incrementally updating the index as the underlying + data in the Delta Table changes. - `DIRECT_ACCESS`: An index that supports direct read and write + of vectors and metadata through our REST and SDK APIs. With this model, the user manages index + updates.""" DELTA_SYNC = "DELTA_SYNC" DIRECT_ACCESS = "DIRECT_ACCESS" @@ -1661,21 +1780,27 @@ def wait_get_endpoint_vector_search_endpoint_online( attempt += 1 raise TimeoutError(f"timed out after {timeout}: {status_message}") - def create_endpoint(self, name: str, endpoint_type: EndpointType) -> Wait[EndpointInfo]: + def create_endpoint( + self, name: str, endpoint_type: EndpointType, *, budget_policy_id: Optional[str] = None + ) -> Wait[EndpointInfo]: """Create an endpoint. Create a new endpoint. :param name: str - Name of endpoint + Name of the vector search endpoint :param endpoint_type: :class:`EndpointType` - Type of endpoint. + Type of endpoint + :param budget_policy_id: str (optional) + The budget policy id to be applied :returns: Long-running operation waiter for :class:`EndpointInfo`. See :method:wait_get_endpoint_vector_search_endpoint_online for more details. """ body = {} + if budget_policy_id is not None: + body["budget_policy_id"] = budget_policy_id if endpoint_type is not None: body["endpoint_type"] = endpoint_type.value if name is not None: @@ -1693,26 +1818,39 @@ def create_endpoint(self, name: str, endpoint_type: EndpointType) -> Wait[Endpoi ) def create_endpoint_and_wait( - self, name: str, endpoint_type: EndpointType, timeout=timedelta(minutes=20) + self, + name: str, + endpoint_type: EndpointType, + *, + budget_policy_id: Optional[str] = None, + timeout=timedelta(minutes=20), ) -> EndpointInfo: - return self.create_endpoint(endpoint_type=endpoint_type, name=name).result(timeout=timeout) + return self.create_endpoint(budget_policy_id=budget_policy_id, endpoint_type=endpoint_type, name=name).result( + timeout=timeout + ) def delete_endpoint(self, endpoint_name: str): """Delete an endpoint. + Delete a vector search endpoint. + :param endpoint_name: str - Name of the endpoint + Name of the vector search endpoint """ - headers = {} + headers = { + "Accept": "application/json", + } self._api.do("DELETE", f"/api/2.0/vector-search/endpoints/{endpoint_name}", headers=headers) def get_endpoint(self, endpoint_name: str) -> EndpointInfo: """Get an endpoint. + Get details for a single vector search endpoint. + :param endpoint_name: str Name of the endpoint @@ -1729,6 +1867,8 @@ def get_endpoint(self, endpoint_name: str) -> EndpointInfo: def list_endpoints(self, *, page_token: Optional[str] = None) -> Iterator[EndpointInfo]: """List all endpoints. + List all vector search endpoints in the workspace. + :param page_token: str (optional) Token for pagination @@ -1751,14 +1891,66 @@ def list_endpoints(self, *, page_token: Optional[str] = None) -> Iterator[Endpoi return query["page_token"] = json["next_page_token"] + def update_endpoint_budget_policy( + self, endpoint_name: str, budget_policy_id: str + ) -> PatchEndpointBudgetPolicyResponse: + """Update the budget policy of an endpoint. + + Update the budget policy of an endpoint + + :param endpoint_name: str + Name of the vector search endpoint + :param budget_policy_id: str + The budget policy id to be applied + + :returns: :class:`PatchEndpointBudgetPolicyResponse` + """ + body = {} + if budget_policy_id is not None: + body["budget_policy_id"] = budget_policy_id + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do( + "PATCH", f"/api/2.0/vector-search/endpoints/{endpoint_name}/budget-policy", body=body, headers=headers + ) + return PatchEndpointBudgetPolicyResponse.from_dict(res) + + def update_endpoint_custom_tags( + self, endpoint_name: str, custom_tags: List[CustomTag] + ) -> UpdateEndpointCustomTagsResponse: + """Update the custom tags of an endpoint. + + :param endpoint_name: str + Name of the vector search endpoint + :param custom_tags: List[:class:`CustomTag`] + The new custom tags for the vector search endpoint + + :returns: :class:`UpdateEndpointCustomTagsResponse` + """ + body = {} + if custom_tags is not None: + body["custom_tags"] = [v.as_dict() for v in custom_tags] + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do( + "PATCH", f"/api/2.0/vector-search/endpoints/{endpoint_name}/tags", body=body, headers=headers + ) + return UpdateEndpointCustomTagsResponse.from_dict(res) + class VectorSearchIndexesAPI: """**Index**: An efficient representation of your embedding vectors that supports real-time and efficient approximate nearest neighbor (ANN) search queries. - There are 2 types of Vector Search indexes: * **Delta Sync Index**: An index that automatically syncs with + There are 2 types of Vector Search indexes: - **Delta Sync Index**: An index that automatically syncs with a source Delta Table, automatically and incrementally updating the index as the underlying data in the - Delta Table changes. * **Direct Vector Access Index**: An index that supports direct read and write of + Delta Table changes. - **Direct Vector Access Index**: An index that supports direct read and write of vectors and metadata through our REST and SDK APIs. With this model, the user manages index updates.""" def __init__(self, api_client): @@ -1773,7 +1965,7 @@ def create_index( *, delta_sync_index_spec: Optional[DeltaSyncVectorIndexSpecRequest] = None, direct_access_index_spec: Optional[DirectAccessVectorIndexSpec] = None, - ) -> CreateVectorIndexResponse: + ) -> VectorIndex: """Create an index. Create a new index. @@ -1785,18 +1977,16 @@ def create_index( :param primary_key: str Primary key of the index :param index_type: :class:`VectorIndexType` - There are 2 types of Vector Search indexes: - - - `DELTA_SYNC`: An index that automatically syncs with a source Delta Table, automatically and - incrementally updating the index as the underlying data in the Delta Table changes. - - `DIRECT_ACCESS`: An index that supports direct read and write of vectors and metadata through our - REST and SDK APIs. With this model, the user manages index updates. + There are 2 types of Vector Search indexes: - `DELTA_SYNC`: An index that automatically syncs with a + source Delta Table, automatically and incrementally updating the index as the underlying data in the + Delta Table changes. - `DIRECT_ACCESS`: An index that supports direct read and write of vectors and + metadata through our REST and SDK APIs. With this model, the user manages index updates. :param delta_sync_index_spec: :class:`DeltaSyncVectorIndexSpecRequest` (optional) Specification for Delta Sync Index. Required if `index_type` is `DELTA_SYNC`. :param direct_access_index_spec: :class:`DirectAccessVectorIndexSpec` (optional) Specification for Direct Vector Access Index. Required if `index_type` is `DIRECT_ACCESS`. - :returns: :class:`CreateVectorIndexResponse` + :returns: :class:`VectorIndex` """ body = {} if delta_sync_index_spec is not None: @@ -1817,7 +2007,7 @@ def create_index( } res = self._api.do("POST", "/api/2.0/vector-search/indexes", body=body, headers=headers) - return CreateVectorIndexResponse.from_dict(res) + return VectorIndex.from_dict(res) def delete_data_vector_index(self, index_name: str, primary_keys: List[str]) -> DeleteDataVectorIndexResponse: """Delete data from index. @@ -1831,16 +2021,16 @@ def delete_data_vector_index(self, index_name: str, primary_keys: List[str]) -> :returns: :class:`DeleteDataVectorIndexResponse` """ - body = {} + + query = {} if primary_keys is not None: - body["primary_keys"] = [v for v in primary_keys] + query["primary_keys"] = [v for v in primary_keys] headers = { "Accept": "application/json", - "Content-Type": "application/json", } res = self._api.do( - "POST", f"/api/2.0/vector-search/indexes/{index_name}/delete-data", body=body, headers=headers + "DELETE", f"/api/2.0/vector-search/indexes/{index_name}/delete-data", query=query, headers=headers ) return DeleteDataVectorIndexResponse.from_dict(res) @@ -1855,7 +2045,9 @@ def delete_index(self, index_name: str): """ - headers = {} + headers = { + "Accept": "application/json", + } self._api.do("DELETE", f"/api/2.0/vector-search/indexes/{index_name}", headers=headers) @@ -1934,9 +2126,11 @@ def query_index( :param filters_json: str (optional) JSON string representing query filters. - Example filters: - `{"id <": 5}`: Filter for id less than 5. - `{"id >": 5}`: Filter for id greater - than 5. - `{"id <=": 5}`: Filter for id less than equal to 5. - `{"id >=": 5}`: Filter for id - greater than equal to 5. - `{"id": 5}`: Filter for id equal to 5. + Example filters: + + - `{"id <": 5}`: Filter for id less than 5. - `{"id >": 5}`: Filter for id greater than 5. - `{"id + <=": 5}`: Filter for id less than equal to 5. - `{"id >=": 5}`: Filter for id greater than equal to + 5. - `{"id": 5}`: Filter for id equal to 5. :param num_results: int (optional) Number of results to return. Defaults to 10. :param query_text: str (optional) @@ -2049,7 +2243,9 @@ def sync_index(self, index_name: str): """ - headers = {} + headers = { + "Accept": "application/json", + } self._api.do("POST", f"/api/2.0/vector-search/indexes/{index_name}/sync", headers=headers) diff --git a/docs/account/billing/budget_policy.rst b/docs/account/billing/budget_policy.rst index e58364d07..d77eaa4a2 100644 --- a/docs/account/billing/budget_policy.rst +++ b/docs/account/billing/budget_policy.rst @@ -70,7 +70,7 @@ :returns: Iterator over :class:`BudgetPolicy` - .. py:method:: update(policy_id: str [, limit_config: Optional[LimitConfig], policy: Optional[BudgetPolicy]]) -> BudgetPolicy + .. py:method:: update(policy_id: str, policy: BudgetPolicy [, limit_config: Optional[LimitConfig]]) -> BudgetPolicy Update a budget policy. @@ -78,10 +78,10 @@ :param policy_id: str The Id of the policy. This field is generated by Databricks and globally unique. + :param policy: :class:`BudgetPolicy` + Contains the BudgetPolicy details. :param limit_config: :class:`LimitConfig` (optional) DEPRECATED. This is redundant field as LimitConfig is part of the BudgetPolicy - :param policy: :class:`BudgetPolicy` (optional) - Contains the BudgetPolicy details. :returns: :class:`BudgetPolicy` \ No newline at end of file diff --git a/docs/account/iam/service_principals.rst b/docs/account/iam/service_principals.rst index 302cf5f79..e0fd8577a 100644 --- a/docs/account/iam/service_principals.rst +++ b/docs/account/iam/service_principals.rst @@ -23,7 +23,10 @@ a = AccountClient() - spn = a.service_principals.create(display_name=f"sdk-{time.time_ns()}") + sp_create = a.service_principals.create(active=True, display_name=f"sdk-{time.time_ns()}") + + # cleanup + a.service_principals.delete(id=sp_create.id) Create a service principal. diff --git a/docs/account/oauth2/federation_policy.rst b/docs/account/oauth2/federation_policy.rst index 4f9db531d..a8957e5f2 100644 --- a/docs/account/oauth2/federation_policy.rst +++ b/docs/account/oauth2/federation_policy.rst @@ -45,11 +45,11 @@ [SCIM]: https://docs.databricks.com/admin/users-groups/scim/index.html - .. py:method:: create( [, policy: Optional[FederationPolicy], policy_id: Optional[str]]) -> FederationPolicy + .. py:method:: create(policy: FederationPolicy [, policy_id: Optional[str]]) -> FederationPolicy Create account federation policy. - :param policy: :class:`FederationPolicy` (optional) + :param policy: :class:`FederationPolicy` :param policy_id: str (optional) The identifier for the federation policy. The identifier must contain only lowercase alphanumeric characters, numbers, hyphens, and slashes. If unspecified, the id will be assigned by Databricks. @@ -87,13 +87,13 @@ :returns: Iterator over :class:`FederationPolicy` - .. py:method:: update(policy_id: str [, policy: Optional[FederationPolicy], update_mask: Optional[str]]) -> FederationPolicy + .. py:method:: update(policy_id: str, policy: FederationPolicy [, update_mask: Optional[str]]) -> FederationPolicy Update account federation policy. :param policy_id: str The identifier for the federation policy. - :param policy: :class:`FederationPolicy` (optional) + :param policy: :class:`FederationPolicy` :param update_mask: str (optional) The field mask specifies which fields of the policy to update. To specify multiple fields in the field mask, use comma as the separator (no space). The special value '*' indicates that all fields diff --git a/docs/account/oauth2/service_principal_federation_policy.rst b/docs/account/oauth2/service_principal_federation_policy.rst index be823b7a6..f3335d87a 100644 --- a/docs/account/oauth2/service_principal_federation_policy.rst +++ b/docs/account/oauth2/service_principal_federation_policy.rst @@ -45,13 +45,13 @@ You do not need to configure an OAuth application in Databricks to use token federation. - .. py:method:: create(service_principal_id: int [, policy: Optional[FederationPolicy], policy_id: Optional[str]]) -> FederationPolicy + .. py:method:: create(service_principal_id: int, policy: FederationPolicy [, policy_id: Optional[str]]) -> FederationPolicy Create service principal federation policy. :param service_principal_id: int The service principal id for the federation policy. - :param policy: :class:`FederationPolicy` (optional) + :param policy: :class:`FederationPolicy` :param policy_id: str (optional) The identifier for the federation policy. The identifier must contain only lowercase alphanumeric characters, numbers, hyphens, and slashes. If unspecified, the id will be assigned by Databricks. @@ -95,7 +95,7 @@ :returns: Iterator over :class:`FederationPolicy` - .. py:method:: update(service_principal_id: int, policy_id: str [, policy: Optional[FederationPolicy], update_mask: Optional[str]]) -> FederationPolicy + .. py:method:: update(service_principal_id: int, policy_id: str, policy: FederationPolicy [, update_mask: Optional[str]]) -> FederationPolicy Update service principal federation policy. @@ -103,7 +103,7 @@ The service principal id for the federation policy. :param policy_id: str The identifier for the federation policy. - :param policy: :class:`FederationPolicy` (optional) + :param policy: :class:`FederationPolicy` :param update_mask: str (optional) The field mask specifies which fields of the policy to update. To specify multiple fields in the field mask, use comma as the separator (no space). The special value '*' indicates that all fields diff --git a/docs/account/provisioning/storage.rst b/docs/account/provisioning/storage.rst index b8e144f8c..7b85e3be0 100644 --- a/docs/account/provisioning/storage.rst +++ b/docs/account/provisioning/storage.rst @@ -16,7 +16,6 @@ .. code-block:: - import os import time from databricks.sdk import AccountClient @@ -26,11 +25,8 @@ storage = a.storage.create( storage_configuration_name=f"sdk-{time.time_ns()}", - root_bucket_info=provisioning.RootBucketInfo(bucket_name=os.environ["TEST_ROOT_BUCKET"]), + root_bucket_info=provisioning.RootBucketInfo(bucket_name=f"sdk-{time.time_ns()}"), ) - - # cleanup - a.storage.delete(storage_configuration_id=storage.storage_configuration_id) Create new storage configuration. diff --git a/docs/account/settings/network_connectivity.rst b/docs/account/settings/network_connectivity.rst index 179db3fef..2ed5c167a 100644 --- a/docs/account/settings/network_connectivity.rst +++ b/docs/account/settings/network_connectivity.rst @@ -5,24 +5,37 @@ .. py:class:: NetworkConnectivityAPI These APIs provide configurations for the network connectivity of your workspaces for serverless compute - resources. + resources. This API provides stable subnets for your workspace so that you can configure your firewalls on + your Azure Storage accounts to allow access from Databricks. You can also use the API to provision private + endpoints for Databricks to privately connect serverless compute resources to your Azure resources using + Azure Private Link. See [configure serverless secure connectivity]. - .. py:method:: create_network_connectivity_configuration(name: str, region: str) -> NetworkConnectivityConfiguration + [configure serverless secure connectivity]: https://learn.microsoft.com/azure/databricks/security/network/serverless-network-security + + + .. py:method:: create_network_connectivity_configuration(network_connectivity_config: CreateNetworkConnectivityConfiguration) -> NetworkConnectivityConfiguration Create a network connectivity configuration. - :param name: str - The name of the network connectivity configuration. The name can contain alphanumeric characters, - hyphens, and underscores. The length must be between 3 and 30 characters. The name must match the - regular expression `^[0-9a-zA-Z-_]{3,30}$`. - :param region: str - The region for the network connectivity configuration. Only workspaces in the same region can be - attached to the network connectivity configuration. + Creates a network connectivity configuration (NCC), which provides stable Azure service subnets when + accessing your Azure Storage accounts. You can also use a network connectivity configuration to create + Databricks managed private endpoints so that Databricks serverless compute resources privately access + your resources. + + **IMPORTANT**: After you create the network connectivity configuration, you must assign one or more + workspaces to the new network connectivity configuration. You can share one network connectivity + configuration with multiple workspaces from the same Azure region within the same Databricks account. + See [configure serverless secure connectivity]. + + [configure serverless secure connectivity]: https://learn.microsoft.com/azure/databricks/security/network/serverless-network-security + + :param network_connectivity_config: :class:`CreateNetworkConnectivityConfiguration` + Properties of the new network connectivity configuration. :returns: :class:`NetworkConnectivityConfiguration` - .. py:method:: create_private_endpoint_rule(network_connectivity_config_id: str, resource_id: str, group_id: CreatePrivateEndpointRuleRequestGroupId) -> NccAzurePrivateEndpointRule + .. py:method:: create_private_endpoint_rule(network_connectivity_config_id: str, private_endpoint_rule: CreatePrivateEndpointRule) -> NccAzurePrivateEndpointRule Create a private endpoint rule. @@ -37,12 +50,10 @@ [serverless private link]: https://learn.microsoft.com/azure/databricks/security/network/serverless-network-security/serverless-private-link :param network_connectivity_config_id: str - Your Network Connectvity Configuration ID. - :param resource_id: str - The Azure resource ID of the target resource. - :param group_id: :class:`CreatePrivateEndpointRuleRequestGroupId` - The sub-resource type (group ID) of the target resource. Note that to connect to workspace root - storage (root DBFS), you need two endpoints, one for `blob` and one for `dfs`. + Your Network Connectivity Configuration ID. + :param private_endpoint_rule: :class:`CreatePrivateEndpointRule` + Properties of the new private endpoint rule. Note that you must approve the endpoint in Azure portal + after initialization. :returns: :class:`NccAzurePrivateEndpointRule` @@ -54,7 +65,7 @@ Deletes a network connectivity configuration. :param network_connectivity_config_id: str - Your Network Connectvity Configuration ID. + Your Network Connectivity Configuration ID. @@ -83,14 +94,14 @@ Gets a network connectivity configuration. :param network_connectivity_config_id: str - Your Network Connectvity Configuration ID. + Your Network Connectivity Configuration ID. :returns: :class:`NetworkConnectivityConfiguration` .. py:method:: get_private_endpoint_rule(network_connectivity_config_id: str, private_endpoint_rule_id: str) -> NccAzurePrivateEndpointRule - Get a private endpoint rule. + Gets a private endpoint rule. Gets the private endpoint rule. @@ -126,4 +137,28 @@ Pagination token to go to next page based on previous query. :returns: Iterator over :class:`NccAzurePrivateEndpointRule` + + + .. py:method:: update_ncc_azure_private_endpoint_rule_public(network_connectivity_config_id: str, private_endpoint_rule_id: str, private_endpoint_rule: UpdatePrivateEndpointRule, update_mask: str) -> NccAzurePrivateEndpointRule + + Update a private endpoint rule. + + Updates a private endpoint rule. Currently only a private endpoint rule to customer-managed resources + is allowed to be updated. + + :param network_connectivity_config_id: str + Your Network Connectivity Configuration ID. + :param private_endpoint_rule_id: str + Your private endpoint rule ID. + :param private_endpoint_rule: :class:`UpdatePrivateEndpointRule` + Properties of the new private endpoint rule. Note that you must approve the endpoint in Azure portal + after initialization. + :param update_mask: str + The field mask must be a single string, with multiple fields separated by commas (no spaces). The + field path is relative to the resource object, using a dot (`.`) to navigate sub-fields (e.g., + `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only + the entire collection field can be specified. Field names must exactly match the resource field + names. + + :returns: :class:`NccAzurePrivateEndpointRule` \ No newline at end of file diff --git a/docs/dbdataclasses/catalog.rst b/docs/dbdataclasses/catalog.rst index 3deb151b3..98873f7fa 100644 --- a/docs/dbdataclasses/catalog.rst +++ b/docs/dbdataclasses/catalog.rst @@ -1041,6 +1041,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: CREATE_CATALOG :value: "CREATE_CATALOG" + .. py:attribute:: CREATE_CLEAN_ROOM + :value: "CREATE_CLEAN_ROOM" + .. py:attribute:: CREATE_CONNECTION :value: "CREATE_CONNECTION" @@ -1101,6 +1104,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: EXECUTE :value: "EXECUTE" + .. py:attribute:: EXECUTE_CLEAN_ROOM_TASK + :value: "EXECUTE_CLEAN_ROOM_TASK" + .. py:attribute:: MANAGE :value: "MANAGE" @@ -1110,6 +1116,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: MODIFY :value: "MODIFY" + .. py:attribute:: MODIFY_CLEAN_ROOM + :value: "MODIFY_CLEAN_ROOM" + .. py:attribute:: READ_FILES :value: "READ_FILES" diff --git a/docs/dbdataclasses/compute.rst b/docs/dbdataclasses/compute.rst index 9dee4c09e..f7b55523b 100644 --- a/docs/dbdataclasses/compute.rst +++ b/docs/dbdataclasses/compute.rst @@ -968,6 +968,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: NodeTypeFlexibility + :members: + :undoc-members: + .. autoclass:: PendingInstanceError :members: :undoc-members: @@ -1292,6 +1296,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: DISASTER_RECOVERY_REPLICATION :value: "DISASTER_RECOVERY_REPLICATION" + .. py:attribute:: DNS_RESOLUTION_ERROR + :value: "DNS_RESOLUTION_ERROR" + .. py:attribute:: DOCKER_CONTAINER_CREATION_EXCEPTION :value: "DOCKER_CONTAINER_CREATION_EXCEPTION" @@ -1346,6 +1353,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: GCP_API_RATE_QUOTA_EXCEEDED :value: "GCP_API_RATE_QUOTA_EXCEEDED" + .. py:attribute:: GCP_DENIED_BY_ORG_POLICY + :value: "GCP_DENIED_BY_ORG_POLICY" + .. py:attribute:: GCP_FORBIDDEN :value: "GCP_FORBIDDEN" diff --git a/docs/dbdataclasses/jobs.rst b/docs/dbdataclasses/jobs.rst index 37b803d83..2c4f4c09e 100644 --- a/docs/dbdataclasses/jobs.rst +++ b/docs/dbdataclasses/jobs.rst @@ -964,7 +964,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: TerminationCodeCode - The code indicates why the run was terminated. Additional codes might be introduced in future releases. * `SUCCESS`: The run was completed successfully. * `USER_CANCELED`: The run was successfully canceled during execution by a user. * `CANCELED`: The run was canceled during execution by the Databricks platform; for example, if the maximum run duration was exceeded. * `SKIPPED`: Run was never executed, for example, if the upstream task run failed, the dependency type condition was not met, or there were no material tasks to execute. * `INTERNAL_ERROR`: The run encountered an unexpected error. Refer to the state message for further details. * `DRIVER_ERROR`: The run encountered an error while communicating with the Spark Driver. * `CLUSTER_ERROR`: The run failed due to a cluster error. Refer to the state message for further details. * `REPOSITORY_CHECKOUT_FAILED`: Failed to complete the checkout due to an error when communicating with the third party service. * `INVALID_CLUSTER_REQUEST`: The run failed because it issued an invalid request to start the cluster. * `WORKSPACE_RUN_LIMIT_EXCEEDED`: The workspace has reached the quota for the maximum number of concurrent active runs. Consider scheduling the runs over a larger time frame. * `FEATURE_DISABLED`: The run failed because it tried to access a feature unavailable for the workspace. * `CLUSTER_REQUEST_LIMIT_EXCEEDED`: The number of cluster creation, start, and upsize requests have exceeded the allotted rate limit. Consider spreading the run execution over a larger time frame. * `STORAGE_ACCESS_ERROR`: The run failed due to an error when accessing the customer blob storage. Refer to the state message for further details. * `RUN_EXECUTION_ERROR`: The run was completed with task failures. For more details, refer to the state message or run output. * `UNAUTHORIZED_ERROR`: The run failed due to a permission issue while accessing a resource. Refer to the state message for further details. * `LIBRARY_INSTALLATION_ERROR`: The run failed while installing the user-requested library. Refer to the state message for further details. The causes might include, but are not limited to: The provided library is invalid, there are insufficient permissions to install the library, and so forth. * `MAX_CONCURRENT_RUNS_EXCEEDED`: The scheduled run exceeds the limit of maximum concurrent runs set for the job. * `MAX_SPARK_CONTEXTS_EXCEEDED`: The run is scheduled on a cluster that has already reached the maximum number of contexts it is configured to create. See: [Link]. * `RESOURCE_NOT_FOUND`: A resource necessary for run execution does not exist. Refer to the state message for further details. * `INVALID_RUN_CONFIGURATION`: The run failed due to an invalid configuration. Refer to the state message for further details. * `CLOUD_FAILURE`: The run failed due to a cloud provider issue. Refer to the state message for further details. * `MAX_JOB_QUEUE_SIZE_EXCEEDED`: The run was skipped due to reaching the job level queue size limit. + The code indicates why the run was terminated. Additional codes might be introduced in future releases. * `SUCCESS`: The run was completed successfully. * `USER_CANCELED`: The run was successfully canceled during execution by a user. * `CANCELED`: The run was canceled during execution by the Databricks platform; for example, if the maximum run duration was exceeded. * `SKIPPED`: Run was never executed, for example, if the upstream task run failed, the dependency type condition was not met, or there were no material tasks to execute. * `INTERNAL_ERROR`: The run encountered an unexpected error. Refer to the state message for further details. * `DRIVER_ERROR`: The run encountered an error while communicating with the Spark Driver. * `CLUSTER_ERROR`: The run failed due to a cluster error. Refer to the state message for further details. * `REPOSITORY_CHECKOUT_FAILED`: Failed to complete the checkout due to an error when communicating with the third party service. * `INVALID_CLUSTER_REQUEST`: The run failed because it issued an invalid request to start the cluster. * `WORKSPACE_RUN_LIMIT_EXCEEDED`: The workspace has reached the quota for the maximum number of concurrent active runs. Consider scheduling the runs over a larger time frame. * `FEATURE_DISABLED`: The run failed because it tried to access a feature unavailable for the workspace. * `CLUSTER_REQUEST_LIMIT_EXCEEDED`: The number of cluster creation, start, and upsize requests have exceeded the allotted rate limit. Consider spreading the run execution over a larger time frame. * `STORAGE_ACCESS_ERROR`: The run failed due to an error when accessing the customer blob storage. Refer to the state message for further details. * `RUN_EXECUTION_ERROR`: The run was completed with task failures. For more details, refer to the state message or run output. * `UNAUTHORIZED_ERROR`: The run failed due to a permission issue while accessing a resource. Refer to the state message for further details. * `LIBRARY_INSTALLATION_ERROR`: The run failed while installing the user-requested library. Refer to the state message for further details. The causes might include, but are not limited to: The provided library is invalid, there are insufficient permissions to install the library, and so forth. * `MAX_CONCURRENT_RUNS_EXCEEDED`: The scheduled run exceeds the limit of maximum concurrent runs set for the job. * `MAX_SPARK_CONTEXTS_EXCEEDED`: The run is scheduled on a cluster that has already reached the maximum number of contexts it is configured to create. See: [Link]. * `RESOURCE_NOT_FOUND`: A resource necessary for run execution does not exist. Refer to the state message for further details. * `INVALID_RUN_CONFIGURATION`: The run failed due to an invalid configuration. Refer to the state message for further details. * `CLOUD_FAILURE`: The run failed due to a cloud provider issue. Refer to the state message for further details. * `MAX_JOB_QUEUE_SIZE_EXCEEDED`: The run was skipped due to reaching the job level queue size limit. * `DISABLED`: The run was never executed because it was disabled explicitly by the user. [Link]: https://kb.databricks.com/en_US/notebooks/too-many-execution-contexts-are-open-right-now .. py:attribute:: BUDGET_POLICY_LIMIT_EXCEEDED @@ -982,6 +982,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: CLUSTER_REQUEST_LIMIT_EXCEEDED :value: "CLUSTER_REQUEST_LIMIT_EXCEEDED" + .. py:attribute:: DISABLED + :value: "DISABLED" + .. py:attribute:: DRIVER_ERROR :value: "DRIVER_ERROR" diff --git a/docs/dbdataclasses/serving.rst b/docs/dbdataclasses/serving.rst index ed6c73ffb..8284a86f0 100644 --- a/docs/dbdataclasses/serving.rst +++ b/docs/dbdataclasses/serving.rst @@ -393,17 +393,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. py:class:: ServedModelInputWorkloadSize - - .. py:attribute:: LARGE - :value: "LARGE" - - .. py:attribute:: MEDIUM - :value: "MEDIUM" - - .. py:attribute:: SMALL - :value: "SMALL" - .. py:class:: ServedModelInputWorkloadType Please keep this in sync with with workload types in InferenceEndpointEntities.scala diff --git a/docs/dbdataclasses/settings.rst b/docs/dbdataclasses/settings.rst index 9804ef91a..91b0f4669 100644 --- a/docs/dbdataclasses/settings.rst +++ b/docs/dbdataclasses/settings.rst @@ -151,6 +151,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: ITAR_EAR :value: "ITAR_EAR" + .. py:attribute:: K_FSI + :value: "K_FSI" + .. py:attribute:: NONE :value: "NONE" @@ -169,7 +172,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: CreateNetworkConnectivityConfigRequest +.. autoclass:: CreateNetworkConnectivityConfiguration :members: :undoc-members: @@ -185,26 +188,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: CreatePrivateEndpointRuleRequest +.. autoclass:: CreatePrivateEndpointRule :members: :undoc-members: -.. py:class:: CreatePrivateEndpointRuleRequestGroupId - - The sub-resource type (group ID) of the target resource. Note that to connect to workspace root storage (root DBFS), you need two endpoints, one for `blob` and one for `dfs`. - - .. py:attribute:: BLOB - :value: "BLOB" - - .. py:attribute:: DFS - :value: "DFS" - - .. py:attribute:: MYSQL_SERVER - :value: "MYSQL_SERVER" - - .. py:attribute:: SQL_SERVER - :value: "SQL_SERVER" - .. autoclass:: CreateTokenRequest :members: :undoc-members: @@ -375,6 +362,13 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: GOOGLE_CLOUD_STORAGE :value: "GOOGLE_CLOUD_STORAGE" +.. py:class:: EgressResourceType + + The target resources that are supported by Network Connectivity Config. Note: some egress types can support general types that are not defined in EgressResourceType. E.g.: Azure private endpoint supports private link enabled Azure services. + + .. py:attribute:: AZURE_BLOB_STORAGE + :value: "AZURE_BLOB_STORAGE" + .. autoclass:: EmailConfig :members: :undoc-members: @@ -504,15 +498,15 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: NccAzurePrivateEndpointRuleConnectionState - The current status of this private endpoint. The private endpoint rules are effective only if the connection state is `ESTABLISHED`. Remember that you must approve new endpoints on your resources in the Azure portal before they take effect. - The possible values are: - INIT: (deprecated) The endpoint has been created and pending approval. - PENDING: The endpoint has been created and pending approval. - ESTABLISHED: The endpoint has been approved and is ready to use in your serverless compute resources. - REJECTED: Connection was rejected by the private link resource owner. - DISCONNECTED: Connection was removed by the private link resource owner, the private endpoint becomes informative and should be deleted for clean-up. - .. py:attribute:: DISCONNECTED :value: "DISCONNECTED" .. py:attribute:: ESTABLISHED :value: "ESTABLISHED" + .. py:attribute:: EXPIRED + :value: "EXPIRED" + .. py:attribute:: INIT :value: "INIT" @@ -522,22 +516,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: REJECTED :value: "REJECTED" -.. py:class:: NccAzurePrivateEndpointRuleGroupId - - The sub-resource type (group ID) of the target resource. Note that to connect to workspace root storage (root DBFS), you need two endpoints, one for `blob` and one for `dfs`. - - .. py:attribute:: BLOB - :value: "BLOB" - - .. py:attribute:: DFS - :value: "DFS" - - .. py:attribute:: MYSQL_SERVER - :value: "MYSQL_SERVER" - - .. py:attribute:: SQL_SERVER - :value: "SQL_SERVER" - .. autoclass:: NccAzureServiceEndpointRule :members: :undoc-members: @@ -762,6 +740,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: UpdatePrivateEndpointRule + :members: + :undoc-members: + .. autoclass:: UpdateResponse :members: :undoc-members: diff --git a/docs/dbdataclasses/sql.rst b/docs/dbdataclasses/sql.rst index fe84a3b89..ce015cf42 100644 --- a/docs/dbdataclasses/sql.rst +++ b/docs/dbdataclasses/sql.rst @@ -8,6 +8,32 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. py:class:: Aggregation + + .. py:attribute:: AVG + :value: "AVG" + + .. py:attribute:: COUNT + :value: "COUNT" + + .. py:attribute:: COUNT_DISTINCT + :value: "COUNT_DISTINCT" + + .. py:attribute:: MAX + :value: "MAX" + + .. py:attribute:: MEDIAN + :value: "MEDIAN" + + .. py:attribute:: MIN + :value: "MIN" + + .. py:attribute:: STDDEV + :value: "STDDEV" + + .. py:attribute:: SUM + :value: "SUM" + .. autoclass:: Alert :members: :undoc-members: @@ -24,6 +50,22 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. py:class:: AlertEvaluationState + + UNSPECIFIED - default unspecify value for proto enum, do not use it in the code UNKNOWN - alert not yet evaluated TRIGGERED - alert is triggered OK - alert is not triggered ERROR - alert evaluation failed + + .. py:attribute:: ERROR + :value: "ERROR" + + .. py:attribute:: OK + :value: "OK" + + .. py:attribute:: TRIGGERED + :value: "TRIGGERED" + + .. py:attribute:: UNKNOWN + :value: "UNKNOWN" + .. autoclass:: AlertOperandColumn :members: :undoc-members: @@ -87,6 +129,34 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: UNKNOWN :value: "UNKNOWN" +.. autoclass:: AlertV2 + :members: + :undoc-members: + +.. autoclass:: AlertV2Evaluation + :members: + :undoc-members: + +.. autoclass:: AlertV2Notification + :members: + :undoc-members: + +.. autoclass:: AlertV2Operand + :members: + :undoc-members: + +.. autoclass:: AlertV2OperandColumn + :members: + :undoc-members: + +.. autoclass:: AlertV2OperandValue + :members: + :undoc-members: + +.. autoclass:: AlertV2Subscription + :members: + :undoc-members: + .. autoclass:: BaseChunkInfo :members: :undoc-members: @@ -186,6 +256,32 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: USER_DEFINED_TYPE :value: "USER_DEFINED_TYPE" +.. py:class:: ComparisonOperator + + .. py:attribute:: EQUAL + :value: "EQUAL" + + .. py:attribute:: GREATER_THAN + :value: "GREATER_THAN" + + .. py:attribute:: GREATER_THAN_OR_EQUAL + :value: "GREATER_THAN_OR_EQUAL" + + .. py:attribute:: IS_NOT_NULL + :value: "IS_NOT_NULL" + + .. py:attribute:: IS_NULL + :value: "IS_NULL" + + .. py:attribute:: LESS_THAN + :value: "LESS_THAN" + + .. py:attribute:: LESS_THAN_OR_EQUAL + :value: "LESS_THAN_OR_EQUAL" + + .. py:attribute:: NOT_EQUAL + :value: "NOT_EQUAL" + .. autoclass:: CreateAlert :members: :undoc-members: @@ -198,6 +294,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: CreateAlertV2Request + :members: + :undoc-members: + .. autoclass:: CreateQueryRequest :members: :undoc-members: @@ -239,6 +339,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: CronSchedule + :members: + :undoc-members: + .. autoclass:: Dashboard :members: :undoc-members: @@ -545,6 +649,14 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: ListAlertsV2Response + :members: + :undoc-members: + +.. autoclass:: ListAlertsV2ResponseAlert + :members: + :undoc-members: + .. py:class:: ListOrder .. py:attribute:: CREATED_AT @@ -869,6 +981,14 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: VIEWER :value: "VIEWER" +.. py:class:: SchedulePauseStatus + + .. py:attribute:: PAUSED + :value: "PAUSED" + + .. py:attribute:: UNPAUSED + :value: "UNPAUSED" + .. autoclass:: ServiceError :members: :undoc-members: @@ -1325,6 +1445,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: UpdateAlertV2Request + :members: + :undoc-members: + .. autoclass:: UpdateQueryRequest :members: :undoc-members: diff --git a/docs/dbdataclasses/vectorsearch.rst b/docs/dbdataclasses/vectorsearch.rst index 179c4b895..5433f2673 100644 --- a/docs/dbdataclasses/vectorsearch.rst +++ b/docs/dbdataclasses/vectorsearch.rst @@ -16,7 +16,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: CreateVectorIndexResponse +.. autoclass:: CustomTag :members: :undoc-members: @@ -26,8 +26,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: DeleteDataStatus - Status of the delete operation. - .. py:attribute:: FAILURE :value: "FAILURE" @@ -37,10 +35,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: SUCCESS :value: "SUCCESS" -.. autoclass:: DeleteDataVectorIndexRequest - :members: - :undoc-members: - .. autoclass:: DeleteDataVectorIndexResponse :members: :undoc-members: @@ -121,10 +115,17 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: PatchEndpointBudgetPolicyRequest + :members: + :undoc-members: + +.. autoclass:: PatchEndpointBudgetPolicyResponse + :members: + :undoc-members: + .. py:class:: PipelineType - Pipeline execution mode. - - `TRIGGERED`: If the pipeline uses the triggered execution mode, the system stops processing after successfully refreshing the source table in the pipeline once, ensuring the table is updated based on the data available when the update started. - `CONTINUOUS`: If the pipeline uses continuous execution, the pipeline processes new data as it arrives in the source table to keep vector index fresh. + Pipeline execution mode. - `TRIGGERED`: If the pipeline uses the triggered execution mode, the system stops processing after successfully refreshing the source table in the pipeline once, ensuring the table is updated based on the data available when the update started. - `CONTINUOUS`: If the pipeline uses continuous execution, the pipeline processes new data as it arrives in the source table to keep vector index fresh. .. py:attribute:: CONTINUOUS :value: "CONTINUOUS" @@ -168,14 +169,20 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: UpdateEndpointCustomTagsRequest + :members: + :undoc-members: + +.. autoclass:: UpdateEndpointCustomTagsResponse + :members: + :undoc-members: + .. autoclass:: UpsertDataResult :members: :undoc-members: .. py:class:: UpsertDataStatus - Status of the upsert operation. - .. py:attribute:: FAILURE :value: "FAILURE" @@ -207,8 +214,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: VectorIndexType - There are 2 types of Vector Search indexes: - - `DELTA_SYNC`: An index that automatically syncs with a source Delta Table, automatically and incrementally updating the index as the underlying data in the Delta Table changes. - `DIRECT_ACCESS`: An index that supports direct read and write of vectors and metadata through our REST and SDK APIs. With this model, the user manages index updates. + There are 2 types of Vector Search indexes: - `DELTA_SYNC`: An index that automatically syncs with a source Delta Table, automatically and incrementally updating the index as the underlying data in the Delta Table changes. - `DIRECT_ACCESS`: An index that supports direct read and write of vectors and metadata through our REST and SDK APIs. With this model, the user manages index updates. .. py:attribute:: DELTA_SYNC :value: "DELTA_SYNC" diff --git a/docs/workspace/apps/apps.rst b/docs/workspace/apps/apps.rst index dc3c8a8ef..be094be30 100644 --- a/docs/workspace/apps/apps.rst +++ b/docs/workspace/apps/apps.rst @@ -7,13 +7,13 @@ Apps run directly on a customer’s Databricks instance, integrate with their data, use and extend Databricks services, and enable users to interact through single sign-on. - .. py:method:: create( [, app: Optional[App], no_compute: Optional[bool]]) -> Wait[App] + .. py:method:: create(app: App [, no_compute: Optional[bool]]) -> Wait[App] Create an app. Creates a new app. - :param app: :class:`App` (optional) + :param app: :class:`App` :param no_compute: bool (optional) If true, the app will not be started after creation. @@ -22,7 +22,7 @@ See :method:wait_get_app_active for more details. - .. py:method:: create_and_wait( [, app: Optional[App], no_compute: Optional[bool], timeout: datetime.timedelta = 0:20:00]) -> App + .. py:method:: create_and_wait(app: App [, no_compute: Optional[bool], timeout: datetime.timedelta = 0:20:00]) -> App .. py:method:: delete(name: str) -> App @@ -37,7 +37,7 @@ :returns: :class:`App` - .. py:method:: deploy(app_name: str [, app_deployment: Optional[AppDeployment]]) -> Wait[AppDeployment] + .. py:method:: deploy(app_name: str, app_deployment: AppDeployment) -> Wait[AppDeployment] Create an app deployment. @@ -45,14 +45,14 @@ :param app_name: str The name of the app. - :param app_deployment: :class:`AppDeployment` (optional) + :param app_deployment: :class:`AppDeployment` :returns: Long-running operation waiter for :class:`AppDeployment`. See :method:wait_get_deployment_app_succeeded for more details. - .. py:method:: deploy_and_wait(app_name: str [, app_deployment: Optional[AppDeployment], timeout: datetime.timedelta = 0:20:00]) -> AppDeployment + .. py:method:: deploy_and_wait(app_name: str, app_deployment: AppDeployment, timeout: datetime.timedelta = 0:20:00) -> AppDeployment .. py:method:: get(name: str) -> App @@ -183,7 +183,7 @@ .. py:method:: stop_and_wait(name: str, timeout: datetime.timedelta = 0:20:00) -> App - .. py:method:: update(name: str [, app: Optional[App]]) -> App + .. py:method:: update(name: str, app: App) -> App Update an app. @@ -192,7 +192,7 @@ :param name: str The name of the app. The name must contain only lowercase alphanumeric characters and hyphens. It must be unique within the workspace. - :param app: :class:`App` (optional) + :param app: :class:`App` :returns: :class:`App` diff --git a/docs/workspace/catalog/artifact_allowlists.rst b/docs/workspace/catalog/artifact_allowlists.rst index b86ef3bc0..f153dee79 100644 --- a/docs/workspace/catalog/artifact_allowlists.rst +++ b/docs/workspace/catalog/artifact_allowlists.rst @@ -20,7 +20,7 @@ :returns: :class:`ArtifactAllowlistInfo` - .. py:method:: update(artifact_type: ArtifactType, artifact_matchers: List[ArtifactMatcher]) -> ArtifactAllowlistInfo + .. py:method:: update(artifact_type: ArtifactType, artifact_matchers: List[ArtifactMatcher] [, created_at: Optional[int], created_by: Optional[str], metastore_id: Optional[str]]) -> ArtifactAllowlistInfo Set an artifact allowlist. @@ -32,6 +32,12 @@ The artifact type of the allowlist. :param artifact_matchers: List[:class:`ArtifactMatcher`] A list of allowed artifact match patterns. + :param created_at: int (optional) + Time at which this artifact allowlist was set, in epoch milliseconds. + :param created_by: str (optional) + Username of the user who set the artifact allowlist. + :param metastore_id: str (optional) + Unique identifier of parent metastore. :returns: :class:`ArtifactAllowlistInfo` \ No newline at end of file diff --git a/docs/workspace/catalog/catalogs.rst b/docs/workspace/catalog/catalogs.rst index 02f9b8ccb..60959cad4 100644 --- a/docs/workspace/catalog/catalogs.rst +++ b/docs/workspace/catalog/catalogs.rst @@ -24,10 +24,10 @@ w = WorkspaceClient() - created_catalog = w.catalogs.create(name=f"sdk-{time.time_ns()}") + created = w.catalogs.create(name=f"sdk-{time.time_ns()}") # cleanup - w.catalogs.delete(name=created_catalog.name, force=True) + w.catalogs.delete(name=created.name, force=True) Create a catalog. @@ -153,13 +153,12 @@ import time from databricks.sdk import WorkspaceClient - from databricks.sdk.service import catalog w = WorkspaceClient() created = w.catalogs.create(name=f"sdk-{time.time_ns()}") - _ = w.catalogs.update(name=created.name, isolation_mode=catalog.CatalogIsolationMode.ISOLATED) + _ = w.catalogs.update(name=created.name, comment="updated") # cleanup w.catalogs.delete(name=created.name, force=True) diff --git a/docs/workspace/catalog/external_locations.rst b/docs/workspace/catalog/external_locations.rst index 74d6e6935..980467306 100644 --- a/docs/workspace/catalog/external_locations.rst +++ b/docs/workspace/catalog/external_locations.rst @@ -30,20 +30,22 @@ w = WorkspaceClient() - credential = w.storage_credentials.create( + storage_credential = w.storage_credentials.create( name=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + comment="created via SDK", ) - created = w.external_locations.create( + external_location = w.external_locations.create( name=f"sdk-{time.time_ns()}", - credential_name=credential.name, - url=f's3://{os.environ["TEST_BUCKET"]}/sdk-{time.time_ns()}', + credential_name=storage_credential.name, + comment="created via SDK", + url="s3://" + os.environ["TEST_BUCKET"] + "/" + f"sdk-{time.time_ns()}", ) # cleanup - w.storage_credentials.delete(delete=credential.name) - w.external_locations.delete(delete=created.name) + w.storage_credentials.delete(name=storage_credential.name) + w.external_locations.delete(name=external_location.name) Create an external location. @@ -187,24 +189,24 @@ credential = w.storage_credentials.create( name=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) created = w.external_locations.create( name=f"sdk-{time.time_ns()}", credential_name=credential.name, - url=f's3://{os.environ["TEST_BUCKET"]}/sdk-{time.time_ns()}', + url="s3://%s/%s" % (os.environ["TEST_BUCKET"], f"sdk-{time.time_ns()}"), ) _ = w.external_locations.update( name=created.name, credential_name=credential.name, - url=f's3://{os.environ["TEST_BUCKET"]}/sdk-{time.time_ns()}', + url="s3://%s/%s" % (os.environ["TEST_BUCKET"], f"sdk-{time.time_ns()}"), ) # cleanup - w.storage_credentials.delete(delete=credential.name) - w.external_locations.delete(delete=created.name) + w.storage_credentials.delete(name=credential.name) + w.external_locations.delete(name=created.name) Update an external location. diff --git a/docs/workspace/catalog/online_tables.rst b/docs/workspace/catalog/online_tables.rst index ce6ba50c3..898d00eb3 100644 --- a/docs/workspace/catalog/online_tables.rst +++ b/docs/workspace/catalog/online_tables.rst @@ -6,13 +6,13 @@ Online tables provide lower latency and higher QPS access to data from Delta tables. - .. py:method:: create( [, table: Optional[OnlineTable]]) -> Wait[OnlineTable] + .. py:method:: create(table: OnlineTable) -> Wait[OnlineTable] Create an Online Table. Create a new Online Table. - :param table: :class:`OnlineTable` (optional) + :param table: :class:`OnlineTable` Online Table information. :returns: @@ -20,7 +20,7 @@ See :method:wait_get_online_table_active for more details. - .. py:method:: create_and_wait( [, table: Optional[OnlineTable], timeout: datetime.timedelta = 0:20:00]) -> OnlineTable + .. py:method:: create_and_wait(table: OnlineTable, timeout: datetime.timedelta = 0:20:00) -> OnlineTable .. py:method:: delete(name: str) diff --git a/docs/workspace/catalog/storage_credentials.rst b/docs/workspace/catalog/storage_credentials.rst index 787320918..e199f7739 100644 --- a/docs/workspace/catalog/storage_credentials.rst +++ b/docs/workspace/catalog/storage_credentials.rst @@ -32,11 +32,11 @@ created = w.storage_credentials.create( name=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) # cleanup - w.storage_credentials.delete(delete=created.name) + w.storage_credentials.delete(name=created.name) Create a storage credential. @@ -96,13 +96,13 @@ created = w.storage_credentials.create( name=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) - by_name = w.storage_credentials.get(get=created.name) + by_name = w.storage_credentials.get(name=created.name) # cleanup - w.storage_credentials.delete(delete=created.name) + w.storage_credentials.delete(name=created.name) Get a credential. @@ -165,17 +165,17 @@ created = w.storage_credentials.create( name=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) _ = w.storage_credentials.update( name=created.name, comment=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) # cleanup - w.storage_credentials.delete(name=created.name) + w.storage_credentials.delete(delete=created.name) Update a credential. diff --git a/docs/workspace/cleanrooms/clean_room_assets.rst b/docs/workspace/cleanrooms/clean_room_assets.rst index 1317fa002..5021629c8 100644 --- a/docs/workspace/cleanrooms/clean_room_assets.rst +++ b/docs/workspace/cleanrooms/clean_room_assets.rst @@ -7,7 +7,7 @@ Clean room assets are data and code objects — Tables, volumes, and notebooks that are shared with the clean room. - .. py:method:: create(clean_room_name: str [, asset: Optional[CleanRoomAsset]]) -> CleanRoomAsset + .. py:method:: create(clean_room_name: str, asset: CleanRoomAsset) -> CleanRoomAsset Create an asset. @@ -18,7 +18,7 @@ :param clean_room_name: str Name of the clean room. - :param asset: :class:`CleanRoomAsset` (optional) + :param asset: :class:`CleanRoomAsset` Metadata of the clean room asset :returns: :class:`CleanRoomAsset` @@ -68,7 +68,7 @@ :returns: Iterator over :class:`CleanRoomAsset` - .. py:method:: update(clean_room_name: str, asset_type: CleanRoomAssetAssetType, name: str [, asset: Optional[CleanRoomAsset]]) -> CleanRoomAsset + .. py:method:: update(clean_room_name: str, asset_type: CleanRoomAssetAssetType, name: str, asset: CleanRoomAsset) -> CleanRoomAsset Update an asset. @@ -87,7 +87,7 @@ *shared_catalog*.*shared_schema*.*asset_name* For notebooks, the name is the notebook file name. - :param asset: :class:`CleanRoomAsset` (optional) + :param asset: :class:`CleanRoomAsset` Metadata of the clean room asset :returns: :class:`CleanRoomAsset` diff --git a/docs/workspace/cleanrooms/clean_rooms.rst b/docs/workspace/cleanrooms/clean_rooms.rst index 1792cd48d..45981bd9c 100644 --- a/docs/workspace/cleanrooms/clean_rooms.rst +++ b/docs/workspace/cleanrooms/clean_rooms.rst @@ -8,7 +8,7 @@ environment where multiple parties can work together on sensitive enterprise data without direct access to each other’s data. - .. py:method:: create( [, clean_room: Optional[CleanRoom]]) -> CleanRoom + .. py:method:: create(clean_room: CleanRoom) -> CleanRoom Create a clean room. @@ -20,12 +20,12 @@ The caller must be a metastore admin or have the **CREATE_CLEAN_ROOM** privilege on the metastore. - :param clean_room: :class:`CleanRoom` (optional) + :param clean_room: :class:`CleanRoom` :returns: :class:`CleanRoom` - .. py:method:: create_output_catalog(clean_room_name: str [, output_catalog: Optional[CleanRoomOutputCatalog]]) -> CreateCleanRoomOutputCatalogResponse + .. py:method:: create_output_catalog(clean_room_name: str, output_catalog: CleanRoomOutputCatalog) -> CreateCleanRoomOutputCatalogResponse Create an output catalog. @@ -33,7 +33,7 @@ :param clean_room_name: str Name of the clean room. - :param output_catalog: :class:`CleanRoomOutputCatalog` (optional) + :param output_catalog: :class:`CleanRoomOutputCatalog` :returns: :class:`CreateCleanRoomOutputCatalogResponse` diff --git a/docs/workspace/compute/clusters.rst b/docs/workspace/compute/clusters.rst index ca496d3a0..6bc5faf56 100644 --- a/docs/workspace/compute/clusters.rst +++ b/docs/workspace/compute/clusters.rst @@ -583,7 +583,7 @@ Ensures that given cluster is running, regardless of the current state - .. py:method:: events(cluster_id: str [, end_time: Optional[int], event_types: Optional[List[EventType]], limit: Optional[int], offset: Optional[int], order: Optional[GetEventsOrder], start_time: Optional[int]]) -> Iterator[ClusterEvent] + .. py:method:: events(cluster_id: str [, end_time: Optional[int], event_types: Optional[List[EventType]], limit: Optional[int], offset: Optional[int], order: Optional[GetEventsOrder], page_size: Optional[int], page_token: Optional[str], start_time: Optional[int]]) -> Iterator[ClusterEvent] Usage: @@ -626,13 +626,25 @@ :param event_types: List[:class:`EventType`] (optional) An optional set of event types to filter on. If empty, all event types are returned. :param limit: int (optional) + Deprecated: use page_token in combination with page_size instead. + The maximum number of events to include in a page of events. Defaults to 50, and maximum allowed value is 500. :param offset: int (optional) + Deprecated: use page_token in combination with page_size instead. + The offset in the result set. Defaults to 0 (no offset). When an offset is specified and the results are requested in descending order, the end_time field is required. :param order: :class:`GetEventsOrder` (optional) The order to list events in; either "ASC" or "DESC". Defaults to "DESC". + :param page_size: int (optional) + The maximum number of events to include in a page of events. The server may further constrain the + maximum number of results returned in a single page. If the page_size is empty or 0, the server will + decide the number of results to be returned. The field has to be in the range [0,500]. If the value + is outside the range, the server enforces 0 or 500. + :param page_token: str (optional) + Use next_page_token or prev_page_token returned from the previous request to list the next or + previous page of events respectively. If page_token is empty, the first page is returned. :param start_time: int (optional) The start time in epoch milliseconds. If empty, returns events starting from the beginning of time. @@ -713,10 +725,11 @@ .. code-block:: from databricks.sdk import WorkspaceClient + from databricks.sdk.service import compute w = WorkspaceClient() - nodes = w.clusters.list_node_types() + all = w.clusters.list(compute.ListClustersRequest()) List clusters. diff --git a/docs/workspace/compute/instance_pools.rst b/docs/workspace/compute/instance_pools.rst index 0614f2101..38ccb8b37 100644 --- a/docs/workspace/compute/instance_pools.rst +++ b/docs/workspace/compute/instance_pools.rst @@ -105,7 +105,7 @@ - .. py:method:: edit(instance_pool_id: str, instance_pool_name: str, node_type_id: str [, custom_tags: Optional[Dict[str, str]], idle_instance_autotermination_minutes: Optional[int], max_capacity: Optional[int], min_idle_instances: Optional[int]]) + .. py:method:: edit(instance_pool_id: str, instance_pool_name: str, node_type_id: str [, custom_tags: Optional[Dict[str, str]], idle_instance_autotermination_minutes: Optional[int], max_capacity: Optional[int], min_idle_instances: Optional[int], node_type_flexibility: Optional[NodeTypeFlexibility]]) Usage: @@ -162,6 +162,9 @@ upsize requests. :param min_idle_instances: int (optional) Minimum number of idle instances to keep in the instance pool + :param node_type_flexibility: :class:`NodeTypeFlexibility` (optional) + For Fleet-pool V2, this object contains the information about the alternate node type ids to use + when attempting to launch a cluster if the node type id is not available. diff --git a/docs/workspace/dashboards/genie.rst b/docs/workspace/dashboards/genie.rst index d11f98d99..60a1389f7 100644 --- a/docs/workspace/dashboards/genie.rst +++ b/docs/workspace/dashboards/genie.rst @@ -70,15 +70,14 @@ Generate full query result download. - Initiate full SQL query result download and obtain a `download_id` to track the download progress. - This call initiates a new SQL execution to generate the query result. The result is stored in an - external link can be retrieved using the [Get Download Full Query - Result](:method:genie/getdownloadfullqueryresult) API. Warning: Databricks strongly recommends that - you protect the URLs that are returned by the `EXTERNAL_LINKS` disposition. See [Execute - Statement](:method:statementexecution/executestatement) for more details. + Initiates a new SQL execution and returns a `download_id` that you can use to track the progress of + the download. The query result is stored in an external link and can be retrieved using the [Get + Download Full Query Result](:method:genie/getdownloadfullqueryresult) API. Warning: Databricks + strongly recommends that you protect the URLs that are returned by the `EXTERNAL_LINKS` disposition. + See [Execute Statement](:method:statementexecution/executestatement) for more details. :param space_id: str - Space ID + Genie space ID :param conversation_id: str Conversation ID :param message_id: str @@ -94,17 +93,15 @@ Get download full query result. After [Generating a Full Query Result Download](:method:genie/getdownloadfullqueryresult) and - successfully receiving a `download_id`, use this API to Poll download progress and retrieve the SQL - query result external link(s) upon completion. Warning: Databricks strongly recommends that you - protect the URLs that are returned by the `EXTERNAL_LINKS` disposition. When you use the - `EXTERNAL_LINKS` disposition, a short-lived, presigned URL is generated, which can be used to download - the results directly from Amazon S3. As a short-lived access credential is embedded in this presigned - URL, you should protect the URL. Because presigned URLs are already generated with embedded temporary - access credentials, you must not set an Authorization header in the download requests. See [Execute + successfully receiving a `download_id`, use this API to poll the download progress. When the download + is complete, the API returns one or more external links to the query result files. Warning: Databricks + strongly recommends that you protect the URLs that are returned by the `EXTERNAL_LINKS` disposition. + You must not set an Authorization header in download requests. When using the `EXTERNAL_LINKS` + disposition, Databricks returns presigned URLs that grant temporary access to data. See [Execute Statement](:method:statementexecution/executestatement) for more details. :param space_id: str - Space ID + Genie space ID :param conversation_id: str Conversation ID :param message_id: str diff --git a/docs/workspace/dashboards/lakeview.rst b/docs/workspace/dashboards/lakeview.rst index d9cdc1742..4becea5a7 100644 --- a/docs/workspace/dashboards/lakeview.rst +++ b/docs/workspace/dashboards/lakeview.rst @@ -7,29 +7,29 @@ These APIs provide specific management operations for Lakeview dashboards. Generic resource management can be done with Workspace API (import, export, get-status, list, delete). - .. py:method:: create( [, dashboard: Optional[Dashboard]]) -> Dashboard + .. py:method:: create(dashboard: Dashboard) -> Dashboard Create dashboard. Create a draft dashboard. - :param dashboard: :class:`Dashboard` (optional) + :param dashboard: :class:`Dashboard` :returns: :class:`Dashboard` - .. py:method:: create_schedule(dashboard_id: str [, schedule: Optional[Schedule]]) -> Schedule + .. py:method:: create_schedule(dashboard_id: str, schedule: Schedule) -> Schedule Create dashboard schedule. :param dashboard_id: str UUID identifying the dashboard to which the schedule belongs. - :param schedule: :class:`Schedule` (optional) + :param schedule: :class:`Schedule` :returns: :class:`Schedule` - .. py:method:: create_subscription(dashboard_id: str, schedule_id: str [, subscription: Optional[Subscription]]) -> Subscription + .. py:method:: create_subscription(dashboard_id: str, schedule_id: str, subscription: Subscription) -> Subscription Create schedule subscription. @@ -37,7 +37,7 @@ UUID identifying the dashboard to which the subscription belongs. :param schedule_id: str UUID identifying the schedule to which the subscription belongs. - :param subscription: :class:`Subscription` (optional) + :param subscription: :class:`Subscription` :returns: :class:`Subscription` @@ -234,7 +234,7 @@ - .. py:method:: update(dashboard_id: str [, dashboard: Optional[Dashboard]]) -> Dashboard + .. py:method:: update(dashboard_id: str, dashboard: Dashboard) -> Dashboard Update dashboard. @@ -242,12 +242,12 @@ :param dashboard_id: str UUID identifying the dashboard. - :param dashboard: :class:`Dashboard` (optional) + :param dashboard: :class:`Dashboard` :returns: :class:`Dashboard` - .. py:method:: update_schedule(dashboard_id: str, schedule_id: str [, schedule: Optional[Schedule]]) -> Schedule + .. py:method:: update_schedule(dashboard_id: str, schedule_id: str, schedule: Schedule) -> Schedule Update dashboard schedule. @@ -255,7 +255,7 @@ UUID identifying the dashboard to which the schedule belongs. :param schedule_id: str UUID identifying the schedule. - :param schedule: :class:`Schedule` (optional) + :param schedule: :class:`Schedule` :returns: :class:`Schedule` \ No newline at end of file diff --git a/docs/workspace/iam/current_user.rst b/docs/workspace/iam/current_user.rst index bf739025c..1df3adf9f 100644 --- a/docs/workspace/iam/current_user.rst +++ b/docs/workspace/iam/current_user.rst @@ -17,7 +17,7 @@ w = WorkspaceClient() - me = w.current_user.me() + me2 = w.current_user.me() Get current user info. diff --git a/docs/workspace/iam/service_principals.rst b/docs/workspace/iam/service_principals.rst index 40b65f6d5..74a498b00 100644 --- a/docs/workspace/iam/service_principals.rst +++ b/docs/workspace/iam/service_principals.rst @@ -20,13 +20,19 @@ import time from databricks.sdk import WorkspaceClient + from databricks.sdk.service import iam w = WorkspaceClient() - created = w.service_principals.create(display_name=f"sdk-{time.time_ns()}") + groups = w.groups.group_display_name_to_id_map(iam.ListGroupsRequest()) + + spn = w.service_principals.create( + display_name=f"sdk-{time.time_ns()}", + groups=[iam.ComplexValue(value=groups["admins"])], + ) # cleanup - w.service_principals.delete(id=created.id) + w.service_principals.delete(id=spn.id) Create a service principal. diff --git a/docs/workspace/iam/users.rst b/docs/workspace/iam/users.rst index f7e032324..0028cafe8 100644 --- a/docs/workspace/iam/users.rst +++ b/docs/workspace/iam/users.rst @@ -80,9 +80,12 @@ w = WorkspaceClient() - other_owner = w.users.create(user_name=f"sdk-{time.time_ns()}@example.com") + user = w.users.create( + display_name=f"sdk-{time.time_ns()}", + user_name=f"sdk-{time.time_ns()}@example.com", + ) - w.users.delete(id=other_owner.id) + w.users.delete(id=user.id) Delete a user. diff --git a/docs/workspace/jobs/jobs.rst b/docs/workspace/jobs/jobs.rst index 779cac797..27eed0a54 100644 --- a/docs/workspace/jobs/jobs.rst +++ b/docs/workspace/jobs/jobs.rst @@ -231,8 +231,8 @@ :param parameters: List[:class:`JobParameterDefinition`] (optional) Job-level parameter definitions :param performance_target: :class:`PerformanceTarget` (optional) - The performance mode on a serverless job. The performance target determines the level of compute - performance or cost-efficiency for the run. + The performance mode on a serverless job. This field determines the level of compute performance or + cost-efficiency for the run. * `STANDARD`: Enables cost-efficient execution of serverless workloads. * `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times through rapid scaling and optimized cluster @@ -367,23 +367,21 @@ w.clusters.ensure_cluster_is_running(os.environ["DATABRICKS_CLUSTER_ID"]) and os.environ["DATABRICKS_CLUSTER_ID"] ) - created_job = w.jobs.create( - name=f"sdk-{time.time_ns()}", + run = w.jobs.submit( + run_name=f"sdk-{time.time_ns()}", tasks=[ - jobs.Task( - description="test", + jobs.SubmitTask( existing_cluster_id=cluster_id, notebook_task=jobs.NotebookTask(notebook_path=notebook_path), - task_key="test", - timeout_seconds=0, + task_key=f"sdk-{time.time_ns()}", ) ], - ) + ).result() - by_id = w.jobs.get(job_id=created_job.job_id) + output = w.jobs.get_run_output(run_id=run.tasks[0].run_id) # cleanup - w.jobs.delete(job_id=created_job.job_id) + w.jobs.delete_run(run_id=run.run_id) Get a single job. @@ -670,7 +668,7 @@ :returns: Iterator over :class:`BaseRun` - .. py:method:: repair_run(run_id: int [, dbt_commands: Optional[List[str]], jar_params: Optional[List[str]], job_parameters: Optional[Dict[str, str]], latest_repair_id: Optional[int], notebook_params: Optional[Dict[str, str]], pipeline_params: Optional[PipelineParams], python_named_params: Optional[Dict[str, str]], python_params: Optional[List[str]], rerun_all_failed_tasks: Optional[bool], rerun_dependent_tasks: Optional[bool], rerun_tasks: Optional[List[str]], spark_submit_params: Optional[List[str]], sql_params: Optional[Dict[str, str]]]) -> Wait[Run] + .. py:method:: repair_run(run_id: int [, dbt_commands: Optional[List[str]], jar_params: Optional[List[str]], job_parameters: Optional[Dict[str, str]], latest_repair_id: Optional[int], notebook_params: Optional[Dict[str, str]], performance_target: Optional[PerformanceTarget], pipeline_params: Optional[PipelineParams], python_named_params: Optional[Dict[str, str]], python_params: Optional[List[str]], rerun_all_failed_tasks: Optional[bool], rerun_dependent_tasks: Optional[bool], rerun_tasks: Optional[List[str]], spark_submit_params: Optional[List[str]], sql_params: Optional[Dict[str, str]]]) -> Wait[Run] Usage: @@ -757,6 +755,14 @@ [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html + :param performance_target: :class:`PerformanceTarget` (optional) + The performance mode on a serverless job. The performance target determines the level of compute + performance or cost-efficiency for the run. This field overrides the performance target defined on + the job level. + + * `STANDARD`: Enables cost-efficient execution of serverless workloads. * `PERFORMANCE_OPTIMIZED`: + Prioritizes fast startup and execution times through rapid scaling and optimized cluster + performance. :param pipeline_params: :class:`PipelineParams` (optional) Controls whether the pipeline should perform a full refresh :param python_named_params: Dict[str,str] (optional) @@ -807,7 +813,7 @@ See :method:wait_get_run_job_terminated_or_skipped for more details. - .. py:method:: repair_run_and_wait(run_id: int [, dbt_commands: Optional[List[str]], jar_params: Optional[List[str]], job_parameters: Optional[Dict[str, str]], latest_repair_id: Optional[int], notebook_params: Optional[Dict[str, str]], pipeline_params: Optional[PipelineParams], python_named_params: Optional[Dict[str, str]], python_params: Optional[List[str]], rerun_all_failed_tasks: Optional[bool], rerun_dependent_tasks: Optional[bool], rerun_tasks: Optional[List[str]], spark_submit_params: Optional[List[str]], sql_params: Optional[Dict[str, str]], timeout: datetime.timedelta = 0:20:00]) -> Run + .. py:method:: repair_run_and_wait(run_id: int [, dbt_commands: Optional[List[str]], jar_params: Optional[List[str]], job_parameters: Optional[Dict[str, str]], latest_repair_id: Optional[int], notebook_params: Optional[Dict[str, str]], performance_target: Optional[PerformanceTarget], pipeline_params: Optional[PipelineParams], python_named_params: Optional[Dict[str, str]], python_params: Optional[List[str]], rerun_all_failed_tasks: Optional[bool], rerun_dependent_tasks: Optional[bool], rerun_tasks: Optional[List[str]], spark_submit_params: Optional[List[str]], sql_params: Optional[Dict[str, str]], timeout: datetime.timedelta = 0:20:00]) -> Run .. py:method:: reset(job_id: int, new_settings: JobSettings) diff --git a/docs/workspace/ml/model_registry.rst b/docs/workspace/ml/model_registry.rst index 983f07bc7..1f6dcf4d2 100644 --- a/docs/workspace/ml/model_registry.rst +++ b/docs/workspace/ml/model_registry.rst @@ -127,7 +127,7 @@ model = w.model_registry.create_model(name=f"sdk-{time.time_ns()}") - mv = w.model_registry.create_model_version(name=model.registered_model.name, source="dbfs:/tmp") + created = w.model_registry.create_model_version(name=model.registered_model.name, source="dbfs:/tmp") Create a model version. diff --git a/docs/workspace/settings/enable_export_notebook.rst b/docs/workspace/settings/enable_export_notebook.rst index 683b71596..30d17e46a 100644 --- a/docs/workspace/settings/enable_export_notebook.rst +++ b/docs/workspace/settings/enable_export_notebook.rst @@ -1,27 +1,27 @@ -``w.settings.enable_export_notebook``: Enable Export Notebook -============================================================= +``w.settings.enable_export_notebook``: Notebook and File exporting +================================================================== .. currentmodule:: databricks.sdk.service.settings .. py:class:: EnableExportNotebookAPI - Controls whether users can export notebooks and files from the Workspace. By default, this setting is + Controls whether users can export notebooks and files from the Workspace UI. By default, this setting is enabled. .. py:method:: get_enable_export_notebook() -> EnableExportNotebook - Get the Enable Export Notebook setting. + Get the Notebook and File exporting setting. - Gets the Enable Export Notebook setting. + Gets the Notebook and File exporting setting. :returns: :class:`EnableExportNotebook` .. py:method:: patch_enable_export_notebook(allow_missing: bool, setting: EnableExportNotebook, field_mask: str) -> EnableExportNotebook - Update the Enable Export Notebook setting. + Update the Notebook and File exporting setting. - Updates the Enable Export Notebook setting. The model follows eventual consistency, which means the - get after the update operation might receive stale values for some time. + Updates the Notebook and File exporting setting. The model follows eventual consistency, which means + the get after the update operation might receive stale values for some time. :param allow_missing: bool This should always be set to true for Settings API. Added for AIP compliance. diff --git a/docs/workspace/settings/enable_notebook_table_clipboard.rst b/docs/workspace/settings/enable_notebook_table_clipboard.rst index c69e69238..2a9c394a0 100644 --- a/docs/workspace/settings/enable_notebook_table_clipboard.rst +++ b/docs/workspace/settings/enable_notebook_table_clipboard.rst @@ -1,5 +1,5 @@ -``w.settings.enable_notebook_table_clipboard``: Enable Notebook Table Clipboard -=============================================================================== +``w.settings.enable_notebook_table_clipboard``: Results Table Clipboard features +================================================================================ .. currentmodule:: databricks.sdk.service.settings .. py:class:: EnableNotebookTableClipboardAPI @@ -9,18 +9,18 @@ .. py:method:: get_enable_notebook_table_clipboard() -> EnableNotebookTableClipboard - Get the Enable Notebook Table Clipboard setting. + Get the Results Table Clipboard features setting. - Gets the Enable Notebook Table Clipboard setting. + Gets the Results Table Clipboard features setting. :returns: :class:`EnableNotebookTableClipboard` .. py:method:: patch_enable_notebook_table_clipboard(allow_missing: bool, setting: EnableNotebookTableClipboard, field_mask: str) -> EnableNotebookTableClipboard - Update the Enable Notebook Table Clipboard setting. + Update the Results Table Clipboard features setting. - Updates the Enable Notebook Table Clipboard setting. The model follows eventual consistency, which + Updates the Results Table Clipboard features setting. The model follows eventual consistency, which means the get after the update operation might receive stale values for some time. :param allow_missing: bool diff --git a/docs/workspace/settings/enable_results_downloading.rst b/docs/workspace/settings/enable_results_downloading.rst index aadbdbc67..0769eca22 100644 --- a/docs/workspace/settings/enable_results_downloading.rst +++ b/docs/workspace/settings/enable_results_downloading.rst @@ -1,5 +1,5 @@ -``w.settings.enable_results_downloading``: Enable Results Downloading -===================================================================== +``w.settings.enable_results_downloading``: Notebook results download +==================================================================== .. currentmodule:: databricks.sdk.service.settings .. py:class:: EnableResultsDownloadingAPI @@ -8,19 +8,19 @@ .. py:method:: get_enable_results_downloading() -> EnableResultsDownloading - Get the Enable Results Downloading setting. + Get the Notebook results download setting. - Gets the Enable Results Downloading setting. + Gets the Notebook results download setting. :returns: :class:`EnableResultsDownloading` .. py:method:: patch_enable_results_downloading(allow_missing: bool, setting: EnableResultsDownloading, field_mask: str) -> EnableResultsDownloading - Update the Enable Results Downloading setting. + Update the Notebook results download setting. - Updates the Enable Results Downloading setting. The model follows eventual consistency, which means - the get after the update operation might receive stale values for some time. + Updates the Notebook results download setting. The model follows eventual consistency, which means the + get after the update operation might receive stale values for some time. :param allow_missing: bool This should always be set to true for Settings API. Added for AIP compliance. diff --git a/docs/workspace/settings/settings.rst b/docs/workspace/settings/settings.rst index fe5823dc7..bc9cb026f 100644 --- a/docs/workspace/settings/settings.rst +++ b/docs/workspace/settings/settings.rst @@ -64,7 +64,7 @@ .. py:property:: enable_export_notebook :type: EnableExportNotebookAPI - Controls whether users can export notebooks and files from the Workspace. By default, this setting is + Controls whether users can export notebooks and files from the Workspace UI. By default, this setting is enabled. .. py:property:: enable_notebook_table_clipboard diff --git a/docs/workspace/sharing/providers.rst b/docs/workspace/sharing/providers.rst index 263545400..d78dd62a0 100644 --- a/docs/workspace/sharing/providers.rst +++ b/docs/workspace/sharing/providers.rst @@ -108,12 +108,25 @@ .. code-block:: + import time + from databricks.sdk import WorkspaceClient - from databricks.sdk.service import sharing w = WorkspaceClient() - all = w.providers.list(sharing.ListProvidersRequest()) + public_share_recipient = """{ + "shareCredentialsVersion":1, + "bearerToken":"dapiabcdefghijklmonpqrstuvwxyz", + "endpoint":"https://sharing.delta.io/delta-sharing/" + } + """ + + created = w.providers.create(name=f"sdk-{time.time_ns()}", recipient_profile_str=public_share_recipient) + + shares = w.providers.list_shares(name=created.name) + + # cleanup + w.providers.delete(name=created.name) List providers. diff --git a/docs/workspace/sql/alerts.rst b/docs/workspace/sql/alerts.rst index 30195a424..11b4d7d65 100644 --- a/docs/workspace/sql/alerts.rst +++ b/docs/workspace/sql/alerts.rst @@ -9,7 +9,7 @@ notification destinations if the condition was met. Alerts can be scheduled using the `sql_task` type of the Jobs API, e.g. :method:jobs/create. - .. py:method:: create( [, alert: Optional[CreateAlertRequestAlert]]) -> Alert + .. py:method:: create( [, alert: Optional[CreateAlertRequestAlert], auto_resolve_display_name: Optional[bool]]) -> Alert Usage: @@ -55,6 +55,9 @@ Creates an alert. :param alert: :class:`CreateAlertRequestAlert` (optional) + :param auto_resolve_display_name: bool (optional) + If true, automatically resolve alert display name conflicts. Otherwise, fail the request if the + alert's display name conflicts with an existing alert's display name. :returns: :class:`Alert` diff --git a/docs/workspace/sql/alerts_v2.rst b/docs/workspace/sql/alerts_v2.rst new file mode 100644 index 000000000..96b004a7d --- /dev/null +++ b/docs/workspace/sql/alerts_v2.rst @@ -0,0 +1,77 @@ +``w.alerts_v2``: Alerts V2 +========================== +.. currentmodule:: databricks.sdk.service.sql + +.. py:class:: AlertsV2API + + TODO: Add description + + .. py:method:: create_alert( [, alert: Optional[AlertV2]]) -> AlertV2 + + Create an alert. + + Create Alert + + :param alert: :class:`AlertV2` (optional) + + :returns: :class:`AlertV2` + + + .. py:method:: get_alert(id: str) -> AlertV2 + + Get an alert. + + Gets an alert. + + :param id: str + + :returns: :class:`AlertV2` + + + .. py:method:: list_alerts( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[ListAlertsV2ResponseAlert] + + List alerts. + + Gets a list of alerts accessible to the user, ordered by creation time. + + :param page_size: int (optional) + :param page_token: str (optional) + + :returns: Iterator over :class:`ListAlertsV2ResponseAlert` + + + .. py:method:: trash_alert(id: str) + + Delete an alert. + + Moves an alert to the trash. Trashed alerts immediately disappear from list views, and can no longer + trigger. You can restore a trashed alert through the UI. A trashed alert is permanently deleted after + 30 days. + + :param id: str + + + + + .. py:method:: update_alert(id: str, update_mask: str [, alert: Optional[AlertV2]]) -> AlertV2 + + Update an alert. + + Update alert + + :param id: str + UUID identifying the alert. + :param update_mask: str + The field mask must be a single string, with multiple fields separated by commas (no spaces). The + field path is relative to the resource object, using a dot (`.`) to navigate sub-fields (e.g., + `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only + the entire collection field can be specified. Field names must exactly match the resource field + names. + + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the + fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API + changes in the future. + :param alert: :class:`AlertV2` (optional) + + :returns: :class:`AlertV2` + \ No newline at end of file diff --git a/docs/workspace/sql/index.rst b/docs/workspace/sql/index.rst index bddb6a827..fff01dff3 100644 --- a/docs/workspace/sql/index.rst +++ b/docs/workspace/sql/index.rst @@ -9,6 +9,7 @@ Manage Databricks SQL assets, including warehouses, dashboards, queries and quer alerts alerts_legacy + alerts_v2 dashboard_widgets dashboards data_sources diff --git a/docs/workspace/sql/queries.rst b/docs/workspace/sql/queries.rst index 4cc9b5b52..14c0ef0b6 100644 --- a/docs/workspace/sql/queries.rst +++ b/docs/workspace/sql/queries.rst @@ -8,7 +8,7 @@ includes the target SQL warehouse, query text, name, description, tags, and parameters. Queries can be scheduled using the `sql_task` type of the Jobs API, e.g. :method:jobs/create. - .. py:method:: create( [, query: Optional[CreateQueryRequestQuery]]) -> Query + .. py:method:: create( [, auto_resolve_display_name: Optional[bool], query: Optional[CreateQueryRequestQuery]]) -> Query Usage: @@ -29,7 +29,7 @@ display_name=f"sdk-{time.time_ns()}", warehouse_id=srcs[0].warehouse_id, description="test query from Go SDK", - query_text="SHOW TABLES", + query_text="SELECT 1", ) ) @@ -40,6 +40,9 @@ Creates a query. + :param auto_resolve_display_name: bool (optional) + If true, automatically resolve query display name conflicts. Otherwise, fail the request if the + query's display name conflicts with an existing query's display name. :param query: :class:`CreateQueryRequestQuery` (optional) :returns: :class:`Query` diff --git a/docs/workspace/vectorsearch/vector_search_endpoints.rst b/docs/workspace/vectorsearch/vector_search_endpoints.rst index ea81ef3c4..50c335064 100644 --- a/docs/workspace/vectorsearch/vector_search_endpoints.rst +++ b/docs/workspace/vectorsearch/vector_search_endpoints.rst @@ -6,31 +6,35 @@ **Endpoint**: Represents the compute resources to host vector search indexes. - .. py:method:: create_endpoint(name: str, endpoint_type: EndpointType) -> Wait[EndpointInfo] + .. py:method:: create_endpoint(name: str, endpoint_type: EndpointType [, budget_policy_id: Optional[str]]) -> Wait[EndpointInfo] Create an endpoint. Create a new endpoint. :param name: str - Name of endpoint + Name of the vector search endpoint :param endpoint_type: :class:`EndpointType` - Type of endpoint. + Type of endpoint + :param budget_policy_id: str (optional) + The budget policy id to be applied :returns: Long-running operation waiter for :class:`EndpointInfo`. See :method:wait_get_endpoint_vector_search_endpoint_online for more details. - .. py:method:: create_endpoint_and_wait(name: str, endpoint_type: EndpointType, timeout: datetime.timedelta = 0:20:00) -> EndpointInfo + .. py:method:: create_endpoint_and_wait(name: str, endpoint_type: EndpointType [, budget_policy_id: Optional[str], timeout: datetime.timedelta = 0:20:00]) -> EndpointInfo .. py:method:: delete_endpoint(endpoint_name: str) Delete an endpoint. + Delete a vector search endpoint. + :param endpoint_name: str - Name of the endpoint + Name of the vector search endpoint @@ -39,6 +43,8 @@ Get an endpoint. + Get details for a single vector search endpoint. + :param endpoint_name: str Name of the endpoint @@ -49,10 +55,38 @@ List all endpoints. + List all vector search endpoints in the workspace. + :param page_token: str (optional) Token for pagination :returns: Iterator over :class:`EndpointInfo` + .. py:method:: update_endpoint_budget_policy(endpoint_name: str, budget_policy_id: str) -> PatchEndpointBudgetPolicyResponse + + Update the budget policy of an endpoint. + + Update the budget policy of an endpoint + + :param endpoint_name: str + Name of the vector search endpoint + :param budget_policy_id: str + The budget policy id to be applied + + :returns: :class:`PatchEndpointBudgetPolicyResponse` + + + .. py:method:: update_endpoint_custom_tags(endpoint_name: str, custom_tags: List[CustomTag]) -> UpdateEndpointCustomTagsResponse + + Update the custom tags of an endpoint. + + :param endpoint_name: str + Name of the vector search endpoint + :param custom_tags: List[:class:`CustomTag`] + The new custom tags for the vector search endpoint + + :returns: :class:`UpdateEndpointCustomTagsResponse` + + .. py:method:: wait_get_endpoint_vector_search_endpoint_online(endpoint_name: str, timeout: datetime.timedelta = 0:20:00, callback: Optional[Callable[[EndpointInfo], None]]) -> EndpointInfo diff --git a/docs/workspace/vectorsearch/vector_search_indexes.rst b/docs/workspace/vectorsearch/vector_search_indexes.rst index 7828d0ef2..90762b275 100644 --- a/docs/workspace/vectorsearch/vector_search_indexes.rst +++ b/docs/workspace/vectorsearch/vector_search_indexes.rst @@ -7,12 +7,12 @@ **Index**: An efficient representation of your embedding vectors that supports real-time and efficient approximate nearest neighbor (ANN) search queries. - There are 2 types of Vector Search indexes: * **Delta Sync Index**: An index that automatically syncs with + There are 2 types of Vector Search indexes: - **Delta Sync Index**: An index that automatically syncs with a source Delta Table, automatically and incrementally updating the index as the underlying data in the - Delta Table changes. * **Direct Vector Access Index**: An index that supports direct read and write of + Delta Table changes. - **Direct Vector Access Index**: An index that supports direct read and write of vectors and metadata through our REST and SDK APIs. With this model, the user manages index updates. - .. py:method:: create_index(name: str, endpoint_name: str, primary_key: str, index_type: VectorIndexType [, delta_sync_index_spec: Optional[DeltaSyncVectorIndexSpecRequest], direct_access_index_spec: Optional[DirectAccessVectorIndexSpec]]) -> CreateVectorIndexResponse + .. py:method:: create_index(name: str, endpoint_name: str, primary_key: str, index_type: VectorIndexType [, delta_sync_index_spec: Optional[DeltaSyncVectorIndexSpecRequest], direct_access_index_spec: Optional[DirectAccessVectorIndexSpec]]) -> VectorIndex Create an index. @@ -25,18 +25,16 @@ :param primary_key: str Primary key of the index :param index_type: :class:`VectorIndexType` - There are 2 types of Vector Search indexes: - - - `DELTA_SYNC`: An index that automatically syncs with a source Delta Table, automatically and - incrementally updating the index as the underlying data in the Delta Table changes. - - `DIRECT_ACCESS`: An index that supports direct read and write of vectors and metadata through our - REST and SDK APIs. With this model, the user manages index updates. + There are 2 types of Vector Search indexes: - `DELTA_SYNC`: An index that automatically syncs with a + source Delta Table, automatically and incrementally updating the index as the underlying data in the + Delta Table changes. - `DIRECT_ACCESS`: An index that supports direct read and write of vectors and + metadata through our REST and SDK APIs. With this model, the user manages index updates. :param delta_sync_index_spec: :class:`DeltaSyncVectorIndexSpecRequest` (optional) Specification for Delta Sync Index. Required if `index_type` is `DELTA_SYNC`. :param direct_access_index_spec: :class:`DirectAccessVectorIndexSpec` (optional) Specification for Direct Vector Access Index. Required if `index_type` is `DIRECT_ACCESS`. - :returns: :class:`CreateVectorIndexResponse` + :returns: :class:`VectorIndex` .. py:method:: delete_data_vector_index(index_name: str, primary_keys: List[str]) -> DeleteDataVectorIndexResponse @@ -106,9 +104,11 @@ :param filters_json: str (optional) JSON string representing query filters. - Example filters: - `{"id <": 5}`: Filter for id less than 5. - `{"id >": 5}`: Filter for id greater - than 5. - `{"id <=": 5}`: Filter for id less than equal to 5. - `{"id >=": 5}`: Filter for id - greater than equal to 5. - `{"id": 5}`: Filter for id equal to 5. + Example filters: + + - `{"id <": 5}`: Filter for id less than 5. - `{"id >": 5}`: Filter for id greater than 5. - `{"id + <=": 5}`: Filter for id less than equal to 5. - `{"id >=": 5}`: Filter for id greater than equal to + 5. - `{"id": 5}`: Filter for id equal to 5. :param num_results: int (optional) Number of results to return. Defaults to 10. :param query_text: str (optional) diff --git a/docs/workspace/workspace/workspace.rst b/docs/workspace/workspace/workspace.rst index 0aba7e5f7..e31c53fac 100644 --- a/docs/workspace/workspace/workspace.rst +++ b/docs/workspace/workspace/workspace.rst @@ -152,9 +152,9 @@ w = WorkspaceClient() - notebook = f"/Users/{w.current_user.me().user_name}/sdk-{time.time_ns()}" + notebook_path = f"/Users/{w.current_user.me().user_name}/sdk-{time.time_ns()}" - get_status_response = w.workspace.get_status(path=notebook) + obj = w.workspace.get_status(path=notebook_path) Get status. @@ -191,7 +191,9 @@ language=workspace.Language.PYTHON, content=base64.b64encode( ( - """print(1) + """import time + time.sleep(10) + dbutils.notebook.exit('hello') """ ).encode() ).decode(), From 5e94b78a54d07547dbff2ab8fbdd5158ccf6f2fa Mon Sep 17 00:00:00 2001 From: Miles Yucht Date: Wed, 30 Apr 2025 11:31:50 +0000 Subject: [PATCH 2/3] fix --- databricks/sdk/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/databricks/sdk/__init__.py b/databricks/sdk/__init__.py index 098fc457e..af5370a7e 100755 --- a/databricks/sdk/__init__.py +++ b/databricks/sdk/__init__.py @@ -868,6 +868,7 @@ def __init__( product_version="0.0.0", credentials_strategy: Optional[CredentialsStrategy] = None, credentials_provider: Optional[CredentialsStrategy] = None, + token_audience: Optional[str] = None, config: Optional[client.Config] = None, ): if not config: @@ -896,6 +897,7 @@ def __init__( debug_headers=debug_headers, product=product, product_version=product_version, + token_audience=token_audience, ) self._config = config.copy() self._api_client = client.ApiClient(self._config) From 516243949287e9668e59312a8701ea3eabc8daab Mon Sep 17 00:00:00 2001 From: Miles Yucht Date: Wed, 30 Apr 2025 11:34:06 +0000 Subject: [PATCH 3/3] fix --- databricks/sdk/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/databricks/sdk/__init__.py b/databricks/sdk/__init__.py index af5370a7e..a51b21557 100755 --- a/databricks/sdk/__init__.py +++ b/databricks/sdk/__init__.py @@ -170,6 +170,7 @@ def __init__( product_version="0.0.0", credentials_strategy: Optional[CredentialsStrategy] = None, credentials_provider: Optional[CredentialsStrategy] = None, + token_audience: Optional[str] = None, config: Optional[client.Config] = None, ): if not config: @@ -198,6 +199,7 @@ def __init__( debug_headers=debug_headers, product=product, product_version=product_version, + token_audience=token_audience, ) self._config = config.copy() self._dbutils = _make_dbutils(self._config)